ftrace: Decrement count for dyn_ftrace_total_info file
[linux-2.6-block.git] / kernel / trace / trace.c
CommitLineData
bc0c38d1
SR
1/*
2 * ring buffer based function tracer
3 *
2b6080f2 4 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
bc0c38d1
SR
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
6d49e352 12 * Copyright (C) 2004 Nadia Yvette Chambers
bc0c38d1 13 */
2cadf913 14#include <linux/ring_buffer.h>
273b281f 15#include <generated/utsrelease.h>
2cadf913
SR
16#include <linux/stacktrace.h>
17#include <linux/writeback.h>
bc0c38d1
SR
18#include <linux/kallsyms.h>
19#include <linux/seq_file.h>
3f5a54e3 20#include <linux/notifier.h>
2cadf913 21#include <linux/irqflags.h>
bc0c38d1 22#include <linux/debugfs.h>
8434dc93 23#include <linux/tracefs.h>
4c11d7ae 24#include <linux/pagemap.h>
bc0c38d1
SR
25#include <linux/hardirq.h>
26#include <linux/linkage.h>
27#include <linux/uaccess.h>
76c813e2 28#include <linux/vmalloc.h>
bc0c38d1
SR
29#include <linux/ftrace.h>
30#include <linux/module.h>
31#include <linux/percpu.h>
2cadf913 32#include <linux/splice.h>
3f5a54e3 33#include <linux/kdebug.h>
5f0c6c03 34#include <linux/string.h>
f76180bc 35#include <linux/mount.h>
7e53bd42 36#include <linux/rwsem.h>
5a0e3ad6 37#include <linux/slab.h>
bc0c38d1
SR
38#include <linux/ctype.h>
39#include <linux/init.h>
2a2cc8f7 40#include <linux/poll.h>
b892e5c8 41#include <linux/nmi.h>
bc0c38d1 42#include <linux/fs.h>
478409dd 43#include <linux/trace.h>
8bd75c77 44#include <linux/sched/rt.h>
86387f7e 45
bc0c38d1 46#include "trace.h"
f0868d1e 47#include "trace_output.h"
bc0c38d1 48
73c5162a
SR
49/*
50 * On boot up, the ring buffer is set to the minimum size, so that
51 * we do not waste memory on systems that are not using tracing.
52 */
55034cd6 53bool ring_buffer_expanded;
73c5162a 54
8e1b82e0
FW
55/*
56 * We need to change this state when a selftest is running.
ff32504f
FW
57 * A selftest will lurk into the ring-buffer to count the
58 * entries inserted during the selftest although some concurrent
5e1607a0 59 * insertions into the ring-buffer such as trace_printk could occurred
ff32504f
FW
60 * at the same time, giving false positive or negative results.
61 */
8e1b82e0 62static bool __read_mostly tracing_selftest_running;
ff32504f 63
b2821ae6
SR
64/*
65 * If a tracer is running, we do not want to run SELFTEST.
66 */
020e5f85 67bool __read_mostly tracing_selftest_disabled;
b2821ae6 68
0daa2302
SRRH
69/* Pipe tracepoints to printk */
70struct trace_iterator *tracepoint_print_iter;
71int tracepoint_printk;
42391745 72static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key);
0daa2302 73
adf9f195
FW
74/* For tracers that don't implement custom flags */
75static struct tracer_opt dummy_tracer_opt[] = {
76 { }
77};
78
8c1a49ae
SRRH
79static int
80dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
adf9f195
FW
81{
82 return 0;
83}
0f048701 84
7ffbd48d
SR
85/*
86 * To prevent the comm cache from being overwritten when no
87 * tracing is active, only save the comm when a trace event
88 * occurred.
89 */
90static DEFINE_PER_CPU(bool, trace_cmdline_save);
91
0f048701
SR
92/*
93 * Kill all tracing for good (never come back).
94 * It is initialized to 1 but will turn to zero if the initialization
95 * of the tracer is successful. But that is the only place that sets
96 * this back to zero.
97 */
4fd27358 98static int tracing_disabled = 1;
0f048701 99
955b61e5 100cpumask_var_t __read_mostly tracing_buffer_mask;
ab46428c 101
944ac425
SR
102/*
103 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
104 *
105 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
106 * is set, then ftrace_dump is called. This will output the contents
107 * of the ftrace buffers to the console. This is very useful for
108 * capturing traces that lead to crashes and outputing it to a
109 * serial console.
110 *
111 * It is default off, but you can enable it with either specifying
112 * "ftrace_dump_on_oops" in the kernel command line, or setting
cecbca96
FW
113 * /proc/sys/kernel/ftrace_dump_on_oops
114 * Set 1 if you want to dump buffers of all CPUs
115 * Set 2 if you want to dump the buffer of the CPU that triggered oops
944ac425 116 */
cecbca96
FW
117
118enum ftrace_dump_mode ftrace_dump_on_oops;
944ac425 119
de7edd31
SRRH
120/* When set, tracing will stop when a WARN*() is hit */
121int __disable_trace_on_warning;
122
681bec03
JL
123#ifdef CONFIG_TRACE_EVAL_MAP_FILE
124/* Map of enums to their values, for "eval_map" file */
23bf8cb8 125struct trace_eval_map_head {
9828413d
SRRH
126 struct module *mod;
127 unsigned long length;
128};
129
23bf8cb8 130union trace_eval_map_item;
9828413d 131
23bf8cb8 132struct trace_eval_map_tail {
9828413d
SRRH
133 /*
134 * "end" is first and points to NULL as it must be different
00f4b652 135 * than "mod" or "eval_string"
9828413d 136 */
23bf8cb8 137 union trace_eval_map_item *next;
9828413d
SRRH
138 const char *end; /* points to NULL */
139};
140
1793ed93 141static DEFINE_MUTEX(trace_eval_mutex);
9828413d
SRRH
142
143/*
23bf8cb8 144 * The trace_eval_maps are saved in an array with two extra elements,
9828413d
SRRH
145 * one at the beginning, and one at the end. The beginning item contains
146 * the count of the saved maps (head.length), and the module they
147 * belong to if not built in (head.mod). The ending item contains a
681bec03 148 * pointer to the next array of saved eval_map items.
9828413d 149 */
23bf8cb8 150union trace_eval_map_item {
00f4b652 151 struct trace_eval_map map;
23bf8cb8
JL
152 struct trace_eval_map_head head;
153 struct trace_eval_map_tail tail;
9828413d
SRRH
154};
155
23bf8cb8 156static union trace_eval_map_item *trace_eval_maps;
681bec03 157#endif /* CONFIG_TRACE_EVAL_MAP_FILE */
9828413d 158
607e2ea1 159static int tracing_set_tracer(struct trace_array *tr, const char *buf);
b2821ae6 160
ee6c2c1b
LZ
161#define MAX_TRACER_SIZE 100
162static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
b2821ae6 163static char *default_bootup_tracer;
d9e54076 164
55034cd6
SRRH
165static bool allocate_snapshot;
166
1beee96b 167static int __init set_cmdline_ftrace(char *str)
d9e54076 168{
67012ab1 169 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
b2821ae6 170 default_bootup_tracer = bootup_tracer_buf;
73c5162a 171 /* We are using ftrace early, expand it */
55034cd6 172 ring_buffer_expanded = true;
d9e54076
PZ
173 return 1;
174}
1beee96b 175__setup("ftrace=", set_cmdline_ftrace);
d9e54076 176
944ac425
SR
177static int __init set_ftrace_dump_on_oops(char *str)
178{
cecbca96
FW
179 if (*str++ != '=' || !*str) {
180 ftrace_dump_on_oops = DUMP_ALL;
181 return 1;
182 }
183
184 if (!strcmp("orig_cpu", str)) {
185 ftrace_dump_on_oops = DUMP_ORIG;
186 return 1;
187 }
188
189 return 0;
944ac425
SR
190}
191__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
60a11774 192
de7edd31
SRRH
193static int __init stop_trace_on_warning(char *str)
194{
933ff9f2
LCG
195 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
196 __disable_trace_on_warning = 1;
de7edd31
SRRH
197 return 1;
198}
933ff9f2 199__setup("traceoff_on_warning", stop_trace_on_warning);
de7edd31 200
3209cff4 201static int __init boot_alloc_snapshot(char *str)
55034cd6
SRRH
202{
203 allocate_snapshot = true;
204 /* We also need the main ring buffer expanded */
205 ring_buffer_expanded = true;
206 return 1;
207}
3209cff4 208__setup("alloc_snapshot", boot_alloc_snapshot);
55034cd6 209
7bcfaf54
SR
210
211static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
7bcfaf54
SR
212
213static int __init set_trace_boot_options(char *str)
214{
67012ab1 215 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
7bcfaf54
SR
216 return 0;
217}
218__setup("trace_options=", set_trace_boot_options);
219
e1e232ca
SR
220static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
221static char *trace_boot_clock __initdata;
222
223static int __init set_trace_boot_clock(char *str)
224{
225 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
226 trace_boot_clock = trace_boot_clock_buf;
227 return 0;
228}
229__setup("trace_clock=", set_trace_boot_clock);
230
0daa2302
SRRH
231static int __init set_tracepoint_printk(char *str)
232{
233 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
234 tracepoint_printk = 1;
235 return 1;
236}
237__setup("tp_printk", set_tracepoint_printk);
de7edd31 238
a5a1d1c2 239unsigned long long ns2usecs(u64 nsec)
bc0c38d1
SR
240{
241 nsec += 500;
242 do_div(nsec, 1000);
243 return nsec;
244}
245
983f938a
SRRH
246/* trace_flags holds trace_options default values */
247#define TRACE_DEFAULT_FLAGS \
248 (FUNCTION_DEFAULT_FLAGS | \
249 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
250 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
251 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
252 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS)
253
16270145
SRRH
254/* trace_options that are only supported by global_trace */
255#define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
256 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
257
20550622
SRRH
258/* trace_flags that are default zero for instances */
259#define ZEROED_TRACE_FLAGS \
1e10486f 260 (TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK)
16270145 261
4fcdae83 262/*
67d04bb2
JF
263 * The global_trace is the descriptor that holds the top-level tracing
264 * buffers for the live tracing.
4fcdae83 265 */
983f938a
SRRH
266static struct trace_array global_trace = {
267 .trace_flags = TRACE_DEFAULT_FLAGS,
268};
bc0c38d1 269
ae63b31e 270LIST_HEAD(ftrace_trace_arrays);
bc0c38d1 271
ff451961
SRRH
272int trace_array_get(struct trace_array *this_tr)
273{
274 struct trace_array *tr;
275 int ret = -ENODEV;
276
277 mutex_lock(&trace_types_lock);
278 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
279 if (tr == this_tr) {
280 tr->ref++;
281 ret = 0;
282 break;
283 }
284 }
285 mutex_unlock(&trace_types_lock);
286
287 return ret;
288}
289
290static void __trace_array_put(struct trace_array *this_tr)
291{
292 WARN_ON(!this_tr->ref);
293 this_tr->ref--;
294}
295
296void trace_array_put(struct trace_array *this_tr)
297{
298 mutex_lock(&trace_types_lock);
299 __trace_array_put(this_tr);
300 mutex_unlock(&trace_types_lock);
301}
302
2425bcb9 303int call_filter_check_discard(struct trace_event_call *call, void *rec,
f306cc82
TZ
304 struct ring_buffer *buffer,
305 struct ring_buffer_event *event)
306{
307 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
308 !filter_match_preds(call->filter, rec)) {
0fc1b09f 309 __trace_event_discard_commit(buffer, event);
f306cc82
TZ
310 return 1;
311 }
312
313 return 0;
eb02ce01
TZ
314}
315
76c813e2
SRRH
316void trace_free_pid_list(struct trace_pid_list *pid_list)
317{
318 vfree(pid_list->pids);
319 kfree(pid_list);
320}
321
d8275c45
SR
322/**
323 * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
324 * @filtered_pids: The list of pids to check
325 * @search_pid: The PID to find in @filtered_pids
326 *
327 * Returns true if @search_pid is fonud in @filtered_pids, and false otherwis.
328 */
329bool
330trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
331{
332 /*
333 * If pid_max changed after filtered_pids was created, we
334 * by default ignore all pids greater than the previous pid_max.
335 */
336 if (search_pid >= filtered_pids->pid_max)
337 return false;
338
339 return test_bit(search_pid, filtered_pids->pids);
340}
341
342/**
343 * trace_ignore_this_task - should a task be ignored for tracing
344 * @filtered_pids: The list of pids to check
345 * @task: The task that should be ignored if not filtered
346 *
347 * Checks if @task should be traced or not from @filtered_pids.
348 * Returns true if @task should *NOT* be traced.
349 * Returns false if @task should be traced.
350 */
351bool
352trace_ignore_this_task(struct trace_pid_list *filtered_pids, struct task_struct *task)
353{
354 /*
355 * Return false, because if filtered_pids does not exist,
356 * all pids are good to trace.
357 */
358 if (!filtered_pids)
359 return false;
360
361 return !trace_find_filtered_pid(filtered_pids, task->pid);
362}
363
364/**
365 * trace_pid_filter_add_remove - Add or remove a task from a pid_list
366 * @pid_list: The list to modify
367 * @self: The current task for fork or NULL for exit
368 * @task: The task to add or remove
369 *
370 * If adding a task, if @self is defined, the task is only added if @self
371 * is also included in @pid_list. This happens on fork and tasks should
372 * only be added when the parent is listed. If @self is NULL, then the
373 * @task pid will be removed from the list, which would happen on exit
374 * of a task.
375 */
376void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
377 struct task_struct *self,
378 struct task_struct *task)
379{
380 if (!pid_list)
381 return;
382
383 /* For forks, we only add if the forking task is listed */
384 if (self) {
385 if (!trace_find_filtered_pid(pid_list, self->pid))
386 return;
387 }
388
389 /* Sorry, but we don't support pid_max changing after setting */
390 if (task->pid >= pid_list->pid_max)
391 return;
392
393 /* "self" is set for forks, and NULL for exits */
394 if (self)
395 set_bit(task->pid, pid_list->pids);
396 else
397 clear_bit(task->pid, pid_list->pids);
398}
399
5cc8976b
SRRH
400/**
401 * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
402 * @pid_list: The pid list to show
403 * @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
404 * @pos: The position of the file
405 *
406 * This is used by the seq_file "next" operation to iterate the pids
407 * listed in a trace_pid_list structure.
408 *
409 * Returns the pid+1 as we want to display pid of zero, but NULL would
410 * stop the iteration.
411 */
412void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos)
413{
414 unsigned long pid = (unsigned long)v;
415
416 (*pos)++;
417
418 /* pid already is +1 of the actual prevous bit */
419 pid = find_next_bit(pid_list->pids, pid_list->pid_max, pid);
420
421 /* Return pid + 1 to allow zero to be represented */
422 if (pid < pid_list->pid_max)
423 return (void *)(pid + 1);
424
425 return NULL;
426}
427
428/**
429 * trace_pid_start - Used for seq_file to start reading pid lists
430 * @pid_list: The pid list to show
431 * @pos: The position of the file
432 *
433 * This is used by seq_file "start" operation to start the iteration
434 * of listing pids.
435 *
436 * Returns the pid+1 as we want to display pid of zero, but NULL would
437 * stop the iteration.
438 */
439void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos)
440{
441 unsigned long pid;
442 loff_t l = 0;
443
444 pid = find_first_bit(pid_list->pids, pid_list->pid_max);
445 if (pid >= pid_list->pid_max)
446 return NULL;
447
448 /* Return pid + 1 so that zero can be the exit value */
449 for (pid++; pid && l < *pos;
450 pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l))
451 ;
452 return (void *)pid;
453}
454
455/**
456 * trace_pid_show - show the current pid in seq_file processing
457 * @m: The seq_file structure to write into
458 * @v: A void pointer of the pid (+1) value to display
459 *
460 * Can be directly used by seq_file operations to display the current
461 * pid value.
462 */
463int trace_pid_show(struct seq_file *m, void *v)
464{
465 unsigned long pid = (unsigned long)v - 1;
466
467 seq_printf(m, "%lu\n", pid);
468 return 0;
469}
470
76c813e2
SRRH
471/* 128 should be much more than enough */
472#define PID_BUF_SIZE 127
473
474int trace_pid_write(struct trace_pid_list *filtered_pids,
475 struct trace_pid_list **new_pid_list,
476 const char __user *ubuf, size_t cnt)
477{
478 struct trace_pid_list *pid_list;
479 struct trace_parser parser;
480 unsigned long val;
481 int nr_pids = 0;
482 ssize_t read = 0;
483 ssize_t ret = 0;
484 loff_t pos;
485 pid_t pid;
486
487 if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1))
488 return -ENOMEM;
489
490 /*
491 * Always recreate a new array. The write is an all or nothing
492 * operation. Always create a new array when adding new pids by
493 * the user. If the operation fails, then the current list is
494 * not modified.
495 */
496 pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL);
497 if (!pid_list)
498 return -ENOMEM;
499
500 pid_list->pid_max = READ_ONCE(pid_max);
501
502 /* Only truncating will shrink pid_max */
503 if (filtered_pids && filtered_pids->pid_max > pid_list->pid_max)
504 pid_list->pid_max = filtered_pids->pid_max;
505
506 pid_list->pids = vzalloc((pid_list->pid_max + 7) >> 3);
507 if (!pid_list->pids) {
508 kfree(pid_list);
509 return -ENOMEM;
510 }
511
512 if (filtered_pids) {
513 /* copy the current bits to the new max */
67f20b08
WY
514 for_each_set_bit(pid, filtered_pids->pids,
515 filtered_pids->pid_max) {
76c813e2 516 set_bit(pid, pid_list->pids);
76c813e2
SRRH
517 nr_pids++;
518 }
519 }
520
521 while (cnt > 0) {
522
523 pos = 0;
524
525 ret = trace_get_user(&parser, ubuf, cnt, &pos);
526 if (ret < 0 || !trace_parser_loaded(&parser))
527 break;
528
529 read += ret;
530 ubuf += ret;
531 cnt -= ret;
532
533 parser.buffer[parser.idx] = 0;
534
535 ret = -EINVAL;
536 if (kstrtoul(parser.buffer, 0, &val))
537 break;
538 if (val >= pid_list->pid_max)
539 break;
540
541 pid = (pid_t)val;
542
543 set_bit(pid, pid_list->pids);
544 nr_pids++;
545
546 trace_parser_clear(&parser);
547 ret = 0;
548 }
549 trace_parser_put(&parser);
550
551 if (ret < 0) {
552 trace_free_pid_list(pid_list);
553 return ret;
554 }
555
556 if (!nr_pids) {
557 /* Cleared the list of pids */
558 trace_free_pid_list(pid_list);
559 read = ret;
560 pid_list = NULL;
561 }
562
563 *new_pid_list = pid_list;
564
565 return read;
566}
567
a5a1d1c2 568static u64 buffer_ftrace_now(struct trace_buffer *buf, int cpu)
37886f6a
SR
569{
570 u64 ts;
571
572 /* Early boot up does not have a buffer yet */
9457158b 573 if (!buf->buffer)
37886f6a
SR
574 return trace_clock_local();
575
9457158b
AL
576 ts = ring_buffer_time_stamp(buf->buffer, cpu);
577 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
37886f6a
SR
578
579 return ts;
580}
bc0c38d1 581
a5a1d1c2 582u64 ftrace_now(int cpu)
9457158b
AL
583{
584 return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
585}
586
10246fa3
SRRH
587/**
588 * tracing_is_enabled - Show if global_trace has been disabled
589 *
590 * Shows if the global trace has been enabled or not. It uses the
591 * mirror flag "buffer_disabled" to be used in fast paths such as for
592 * the irqsoff tracer. But it may be inaccurate due to races. If you
593 * need to know the accurate state, use tracing_is_on() which is a little
594 * slower, but accurate.
595 */
9036990d
SR
596int tracing_is_enabled(void)
597{
10246fa3
SRRH
598 /*
599 * For quick access (irqsoff uses this in fast path), just
600 * return the mirror variable of the state of the ring buffer.
601 * It's a little racy, but we don't really care.
602 */
603 smp_rmb();
604 return !global_trace.buffer_disabled;
9036990d
SR
605}
606
4fcdae83 607/*
3928a8a2
SR
608 * trace_buf_size is the size in bytes that is allocated
609 * for a buffer. Note, the number of bytes is always rounded
610 * to page size.
3f5a54e3
SR
611 *
612 * This number is purposely set to a low number of 16384.
613 * If the dump on oops happens, it will be much appreciated
614 * to not have to wait for all that output. Anyway this can be
615 * boot time and run time configurable.
4fcdae83 616 */
3928a8a2 617#define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
3f5a54e3 618
3928a8a2 619static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
bc0c38d1 620
4fcdae83 621/* trace_types holds a link list of available tracers. */
bc0c38d1 622static struct tracer *trace_types __read_mostly;
4fcdae83 623
4fcdae83
SR
624/*
625 * trace_types_lock is used to protect the trace_types list.
4fcdae83 626 */
a8227415 627DEFINE_MUTEX(trace_types_lock);
4fcdae83 628
7e53bd42
LJ
629/*
630 * serialize the access of the ring buffer
631 *
632 * ring buffer serializes readers, but it is low level protection.
633 * The validity of the events (which returns by ring_buffer_peek() ..etc)
634 * are not protected by ring buffer.
635 *
636 * The content of events may become garbage if we allow other process consumes
637 * these events concurrently:
638 * A) the page of the consumed events may become a normal page
639 * (not reader page) in ring buffer, and this page will be rewrited
640 * by events producer.
641 * B) The page of the consumed events may become a page for splice_read,
642 * and this page will be returned to system.
643 *
644 * These primitives allow multi process access to different cpu ring buffer
645 * concurrently.
646 *
647 * These primitives don't distinguish read-only and read-consume access.
648 * Multi read-only access are also serialized.
649 */
650
651#ifdef CONFIG_SMP
652static DECLARE_RWSEM(all_cpu_access_lock);
653static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
654
655static inline void trace_access_lock(int cpu)
656{
ae3b5093 657 if (cpu == RING_BUFFER_ALL_CPUS) {
7e53bd42
LJ
658 /* gain it for accessing the whole ring buffer. */
659 down_write(&all_cpu_access_lock);
660 } else {
661 /* gain it for accessing a cpu ring buffer. */
662
ae3b5093 663 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
7e53bd42
LJ
664 down_read(&all_cpu_access_lock);
665
666 /* Secondly block other access to this @cpu ring buffer. */
667 mutex_lock(&per_cpu(cpu_access_lock, cpu));
668 }
669}
670
671static inline void trace_access_unlock(int cpu)
672{
ae3b5093 673 if (cpu == RING_BUFFER_ALL_CPUS) {
7e53bd42
LJ
674 up_write(&all_cpu_access_lock);
675 } else {
676 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
677 up_read(&all_cpu_access_lock);
678 }
679}
680
681static inline void trace_access_lock_init(void)
682{
683 int cpu;
684
685 for_each_possible_cpu(cpu)
686 mutex_init(&per_cpu(cpu_access_lock, cpu));
687}
688
689#else
690
691static DEFINE_MUTEX(access_lock);
692
693static inline void trace_access_lock(int cpu)
694{
695 (void)cpu;
696 mutex_lock(&access_lock);
697}
698
699static inline void trace_access_unlock(int cpu)
700{
701 (void)cpu;
702 mutex_unlock(&access_lock);
703}
704
705static inline void trace_access_lock_init(void)
706{
707}
708
709#endif
710
d78a4614
SRRH
711#ifdef CONFIG_STACKTRACE
712static void __ftrace_trace_stack(struct ring_buffer *buffer,
713 unsigned long flags,
714 int skip, int pc, struct pt_regs *regs);
2d34f489
SRRH
715static inline void ftrace_trace_stack(struct trace_array *tr,
716 struct ring_buffer *buffer,
73dddbb5
SRRH
717 unsigned long flags,
718 int skip, int pc, struct pt_regs *regs);
ca475e83 719
d78a4614
SRRH
720#else
721static inline void __ftrace_trace_stack(struct ring_buffer *buffer,
722 unsigned long flags,
723 int skip, int pc, struct pt_regs *regs)
724{
725}
2d34f489
SRRH
726static inline void ftrace_trace_stack(struct trace_array *tr,
727 struct ring_buffer *buffer,
73dddbb5
SRRH
728 unsigned long flags,
729 int skip, int pc, struct pt_regs *regs)
ca475e83
SRRH
730{
731}
732
d78a4614
SRRH
733#endif
734
3e9a8aad
SRRH
735static __always_inline void
736trace_event_setup(struct ring_buffer_event *event,
737 int type, unsigned long flags, int pc)
738{
739 struct trace_entry *ent = ring_buffer_event_data(event);
740
741 tracing_generic_entry_update(ent, flags, pc);
742 ent->type = type;
743}
744
745static __always_inline struct ring_buffer_event *
746__trace_buffer_lock_reserve(struct ring_buffer *buffer,
747 int type,
748 unsigned long len,
749 unsigned long flags, int pc)
750{
751 struct ring_buffer_event *event;
752
753 event = ring_buffer_lock_reserve(buffer, len);
754 if (event != NULL)
755 trace_event_setup(event, type, flags, pc);
756
757 return event;
758}
759
2290f2c5 760void tracer_tracing_on(struct trace_array *tr)
10246fa3
SRRH
761{
762 if (tr->trace_buffer.buffer)
763 ring_buffer_record_on(tr->trace_buffer.buffer);
764 /*
765 * This flag is looked at when buffers haven't been allocated
766 * yet, or by some tracers (like irqsoff), that just want to
767 * know if the ring buffer has been disabled, but it can handle
768 * races of where it gets disabled but we still do a record.
769 * As the check is in the fast path of the tracers, it is more
770 * important to be fast than accurate.
771 */
772 tr->buffer_disabled = 0;
773 /* Make the flag seen by readers */
774 smp_wmb();
775}
776
499e5470
SR
777/**
778 * tracing_on - enable tracing buffers
779 *
780 * This function enables tracing buffers that may have been
781 * disabled with tracing_off.
782 */
783void tracing_on(void)
784{
10246fa3 785 tracer_tracing_on(&global_trace);
499e5470
SR
786}
787EXPORT_SYMBOL_GPL(tracing_on);
788
52ffabe3
SRRH
789
790static __always_inline void
791__buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
792{
793 __this_cpu_write(trace_cmdline_save, true);
794
795 /* If this is the temp buffer, we need to commit fully */
796 if (this_cpu_read(trace_buffered_event) == event) {
797 /* Length is in event->array[0] */
798 ring_buffer_write(buffer, event->array[0], &event->array[1]);
799 /* Release the temp buffer */
800 this_cpu_dec(trace_buffered_event_cnt);
801 } else
802 ring_buffer_unlock_commit(buffer, event);
803}
804
09ae7234
SRRH
805/**
806 * __trace_puts - write a constant string into the trace buffer.
807 * @ip: The address of the caller
808 * @str: The constant string to write
809 * @size: The size of the string.
810 */
811int __trace_puts(unsigned long ip, const char *str, int size)
812{
813 struct ring_buffer_event *event;
814 struct ring_buffer *buffer;
815 struct print_entry *entry;
816 unsigned long irq_flags;
817 int alloc;
8abfb872
J
818 int pc;
819
983f938a 820 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
f0160a5a
J
821 return 0;
822
8abfb872 823 pc = preempt_count();
09ae7234 824
3132e107
SRRH
825 if (unlikely(tracing_selftest_running || tracing_disabled))
826 return 0;
827
09ae7234
SRRH
828 alloc = sizeof(*entry) + size + 2; /* possible \n added */
829
830 local_save_flags(irq_flags);
831 buffer = global_trace.trace_buffer.buffer;
3e9a8aad
SRRH
832 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
833 irq_flags, pc);
09ae7234
SRRH
834 if (!event)
835 return 0;
836
837 entry = ring_buffer_event_data(event);
838 entry->ip = ip;
839
840 memcpy(&entry->buf, str, size);
841
842 /* Add a newline if necessary */
843 if (entry->buf[size - 1] != '\n') {
844 entry->buf[size] = '\n';
845 entry->buf[size + 1] = '\0';
846 } else
847 entry->buf[size] = '\0';
848
849 __buffer_unlock_commit(buffer, event);
2d34f489 850 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
09ae7234
SRRH
851
852 return size;
853}
854EXPORT_SYMBOL_GPL(__trace_puts);
855
856/**
857 * __trace_bputs - write the pointer to a constant string into trace buffer
858 * @ip: The address of the caller
859 * @str: The constant string to write to the buffer to
860 */
861int __trace_bputs(unsigned long ip, const char *str)
862{
863 struct ring_buffer_event *event;
864 struct ring_buffer *buffer;
865 struct bputs_entry *entry;
866 unsigned long irq_flags;
867 int size = sizeof(struct bputs_entry);
8abfb872
J
868 int pc;
869
983f938a 870 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
f0160a5a
J
871 return 0;
872
8abfb872 873 pc = preempt_count();
09ae7234 874
3132e107
SRRH
875 if (unlikely(tracing_selftest_running || tracing_disabled))
876 return 0;
877
09ae7234
SRRH
878 local_save_flags(irq_flags);
879 buffer = global_trace.trace_buffer.buffer;
3e9a8aad
SRRH
880 event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
881 irq_flags, pc);
09ae7234
SRRH
882 if (!event)
883 return 0;
884
885 entry = ring_buffer_event_data(event);
886 entry->ip = ip;
887 entry->str = str;
888
889 __buffer_unlock_commit(buffer, event);
2d34f489 890 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
09ae7234
SRRH
891
892 return 1;
893}
894EXPORT_SYMBOL_GPL(__trace_bputs);
895
ad909e21 896#ifdef CONFIG_TRACER_SNAPSHOT
cab50379 897static void tracing_snapshot_instance(struct trace_array *tr)
ad909e21 898{
ad909e21
SRRH
899 struct tracer *tracer = tr->current_trace;
900 unsigned long flags;
901
1b22e382
SRRH
902 if (in_nmi()) {
903 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
904 internal_trace_puts("*** snapshot is being ignored ***\n");
905 return;
906 }
907
ad909e21 908 if (!tr->allocated_snapshot) {
ca268da6
SRRH
909 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
910 internal_trace_puts("*** stopping trace here! ***\n");
ad909e21
SRRH
911 tracing_off();
912 return;
913 }
914
915 /* Note, snapshot can not be used when the tracer uses it */
916 if (tracer->use_max_tr) {
ca268da6
SRRH
917 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
918 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
ad909e21
SRRH
919 return;
920 }
921
922 local_irq_save(flags);
923 update_max_tr(tr, current, smp_processor_id());
924 local_irq_restore(flags);
925}
cab50379
SRV
926
927/**
928 * trace_snapshot - take a snapshot of the current buffer.
929 *
930 * This causes a swap between the snapshot buffer and the current live
931 * tracing buffer. You can use this to take snapshots of the live
932 * trace when some condition is triggered, but continue to trace.
933 *
934 * Note, make sure to allocate the snapshot with either
935 * a tracing_snapshot_alloc(), or by doing it manually
936 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
937 *
938 * If the snapshot buffer is not allocated, it will stop tracing.
939 * Basically making a permanent snapshot.
940 */
941void tracing_snapshot(void)
942{
943 struct trace_array *tr = &global_trace;
944
945 tracing_snapshot_instance(tr);
946}
1b22e382 947EXPORT_SYMBOL_GPL(tracing_snapshot);
ad909e21
SRRH
948
949static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
950 struct trace_buffer *size_buf, int cpu_id);
3209cff4
SRRH
951static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
952
953static int alloc_snapshot(struct trace_array *tr)
954{
955 int ret;
956
957 if (!tr->allocated_snapshot) {
958
959 /* allocate spare buffer */
960 ret = resize_buffer_duplicate_size(&tr->max_buffer,
961 &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
962 if (ret < 0)
963 return ret;
964
965 tr->allocated_snapshot = true;
966 }
967
968 return 0;
969}
970
ad1438a0 971static void free_snapshot(struct trace_array *tr)
3209cff4
SRRH
972{
973 /*
974 * We don't free the ring buffer. instead, resize it because
975 * The max_tr ring buffer has some state (e.g. ring->clock) and
976 * we want preserve it.
977 */
978 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
979 set_buffer_entries(&tr->max_buffer, 1);
980 tracing_reset_online_cpus(&tr->max_buffer);
981 tr->allocated_snapshot = false;
982}
ad909e21 983
93e31ffb
TZ
984/**
985 * tracing_alloc_snapshot - allocate snapshot buffer.
986 *
987 * This only allocates the snapshot buffer if it isn't already
988 * allocated - it doesn't also take a snapshot.
989 *
990 * This is meant to be used in cases where the snapshot buffer needs
991 * to be set up for events that can't sleep but need to be able to
992 * trigger a snapshot.
993 */
994int tracing_alloc_snapshot(void)
995{
996 struct trace_array *tr = &global_trace;
997 int ret;
998
999 ret = alloc_snapshot(tr);
1000 WARN_ON(ret < 0);
1001
1002 return ret;
1003}
1004EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1005
ad909e21
SRRH
1006/**
1007 * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
1008 *
1009 * This is similar to trace_snapshot(), but it will allocate the
1010 * snapshot buffer if it isn't already allocated. Use this only
1011 * where it is safe to sleep, as the allocation may sleep.
1012 *
1013 * This causes a swap between the snapshot buffer and the current live
1014 * tracing buffer. You can use this to take snapshots of the live
1015 * trace when some condition is triggered, but continue to trace.
1016 */
1017void tracing_snapshot_alloc(void)
1018{
ad909e21
SRRH
1019 int ret;
1020
93e31ffb
TZ
1021 ret = tracing_alloc_snapshot();
1022 if (ret < 0)
3209cff4 1023 return;
ad909e21
SRRH
1024
1025 tracing_snapshot();
1026}
1b22e382 1027EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
ad909e21
SRRH
1028#else
1029void tracing_snapshot(void)
1030{
1031 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
1032}
1b22e382 1033EXPORT_SYMBOL_GPL(tracing_snapshot);
93e31ffb
TZ
1034int tracing_alloc_snapshot(void)
1035{
1036 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
1037 return -ENODEV;
1038}
1039EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
ad909e21
SRRH
1040void tracing_snapshot_alloc(void)
1041{
1042 /* Give warning */
1043 tracing_snapshot();
1044}
1b22e382 1045EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
ad909e21
SRRH
1046#endif /* CONFIG_TRACER_SNAPSHOT */
1047
2290f2c5 1048void tracer_tracing_off(struct trace_array *tr)
10246fa3
SRRH
1049{
1050 if (tr->trace_buffer.buffer)
1051 ring_buffer_record_off(tr->trace_buffer.buffer);
1052 /*
1053 * This flag is looked at when buffers haven't been allocated
1054 * yet, or by some tracers (like irqsoff), that just want to
1055 * know if the ring buffer has been disabled, but it can handle
1056 * races of where it gets disabled but we still do a record.
1057 * As the check is in the fast path of the tracers, it is more
1058 * important to be fast than accurate.
1059 */
1060 tr->buffer_disabled = 1;
1061 /* Make the flag seen by readers */
1062 smp_wmb();
1063}
1064
499e5470
SR
1065/**
1066 * tracing_off - turn off tracing buffers
1067 *
1068 * This function stops the tracing buffers from recording data.
1069 * It does not disable any overhead the tracers themselves may
1070 * be causing. This function simply causes all recording to
1071 * the ring buffers to fail.
1072 */
1073void tracing_off(void)
1074{
10246fa3 1075 tracer_tracing_off(&global_trace);
499e5470
SR
1076}
1077EXPORT_SYMBOL_GPL(tracing_off);
1078
de7edd31
SRRH
1079void disable_trace_on_warning(void)
1080{
1081 if (__disable_trace_on_warning)
1082 tracing_off();
1083}
1084
10246fa3
SRRH
1085/**
1086 * tracer_tracing_is_on - show real state of ring buffer enabled
1087 * @tr : the trace array to know if ring buffer is enabled
1088 *
1089 * Shows real state of the ring buffer if it is enabled or not.
1090 */
e7c15cd8 1091int tracer_tracing_is_on(struct trace_array *tr)
10246fa3
SRRH
1092{
1093 if (tr->trace_buffer.buffer)
1094 return ring_buffer_record_is_on(tr->trace_buffer.buffer);
1095 return !tr->buffer_disabled;
1096}
1097
499e5470
SR
1098/**
1099 * tracing_is_on - show state of ring buffers enabled
1100 */
1101int tracing_is_on(void)
1102{
10246fa3 1103 return tracer_tracing_is_on(&global_trace);
499e5470
SR
1104}
1105EXPORT_SYMBOL_GPL(tracing_is_on);
1106
3928a8a2 1107static int __init set_buf_size(char *str)
bc0c38d1 1108{
3928a8a2 1109 unsigned long buf_size;
c6caeeb1 1110
bc0c38d1
SR
1111 if (!str)
1112 return 0;
9d612bef 1113 buf_size = memparse(str, &str);
c6caeeb1 1114 /* nr_entries can not be zero */
9d612bef 1115 if (buf_size == 0)
c6caeeb1 1116 return 0;
3928a8a2 1117 trace_buf_size = buf_size;
bc0c38d1
SR
1118 return 1;
1119}
3928a8a2 1120__setup("trace_buf_size=", set_buf_size);
bc0c38d1 1121
0e950173
TB
1122static int __init set_tracing_thresh(char *str)
1123{
87abb3b1 1124 unsigned long threshold;
0e950173
TB
1125 int ret;
1126
1127 if (!str)
1128 return 0;
bcd83ea6 1129 ret = kstrtoul(str, 0, &threshold);
0e950173
TB
1130 if (ret < 0)
1131 return 0;
87abb3b1 1132 tracing_thresh = threshold * 1000;
0e950173
TB
1133 return 1;
1134}
1135__setup("tracing_thresh=", set_tracing_thresh);
1136
57f50be1
SR
1137unsigned long nsecs_to_usecs(unsigned long nsecs)
1138{
1139 return nsecs / 1000;
1140}
1141
a3418a36
SRRH
1142/*
1143 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
f57a4143 1144 * It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that
a3418a36 1145 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
f57a4143 1146 * of strings in the order that the evals (enum) were defined.
a3418a36
SRRH
1147 */
1148#undef C
1149#define C(a, b) b
1150
4fcdae83 1151/* These must match the bit postions in trace_iterator_flags */
bc0c38d1 1152static const char *trace_options[] = {
a3418a36 1153 TRACE_FLAGS
bc0c38d1
SR
1154 NULL
1155};
1156
5079f326
Z
1157static struct {
1158 u64 (*func)(void);
1159 const char *name;
8be0709f 1160 int in_ns; /* is this clock in nanoseconds? */
5079f326 1161} trace_clocks[] = {
1b3e5c09
TG
1162 { trace_clock_local, "local", 1 },
1163 { trace_clock_global, "global", 1 },
1164 { trace_clock_counter, "counter", 0 },
e7fda6c4 1165 { trace_clock_jiffies, "uptime", 0 },
1b3e5c09
TG
1166 { trace_clock, "perf", 1 },
1167 { ktime_get_mono_fast_ns, "mono", 1 },
aabfa5f2 1168 { ktime_get_raw_fast_ns, "mono_raw", 1 },
80ec3552 1169 { ktime_get_boot_fast_ns, "boot", 1 },
8cbd9cc6 1170 ARCH_TRACE_CLOCKS
5079f326
Z
1171};
1172
b63f39ea 1173/*
1174 * trace_parser_get_init - gets the buffer for trace parser
1175 */
1176int trace_parser_get_init(struct trace_parser *parser, int size)
1177{
1178 memset(parser, 0, sizeof(*parser));
1179
1180 parser->buffer = kmalloc(size, GFP_KERNEL);
1181 if (!parser->buffer)
1182 return 1;
1183
1184 parser->size = size;
1185 return 0;
1186}
1187
1188/*
1189 * trace_parser_put - frees the buffer for trace parser
1190 */
1191void trace_parser_put(struct trace_parser *parser)
1192{
1193 kfree(parser->buffer);
0e684b65 1194 parser->buffer = NULL;
b63f39ea 1195}
1196
1197/*
1198 * trace_get_user - reads the user input string separated by space
1199 * (matched by isspace(ch))
1200 *
1201 * For each string found the 'struct trace_parser' is updated,
1202 * and the function returns.
1203 *
1204 * Returns number of bytes read.
1205 *
1206 * See kernel/trace/trace.h for 'struct trace_parser' details.
1207 */
1208int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1209 size_t cnt, loff_t *ppos)
1210{
1211 char ch;
1212 size_t read = 0;
1213 ssize_t ret;
1214
1215 if (!*ppos)
1216 trace_parser_clear(parser);
1217
1218 ret = get_user(ch, ubuf++);
1219 if (ret)
1220 goto out;
1221
1222 read++;
1223 cnt--;
1224
1225 /*
1226 * The parser is not finished with the last write,
1227 * continue reading the user input without skipping spaces.
1228 */
1229 if (!parser->cont) {
1230 /* skip white space */
1231 while (cnt && isspace(ch)) {
1232 ret = get_user(ch, ubuf++);
1233 if (ret)
1234 goto out;
1235 read++;
1236 cnt--;
1237 }
1238
1239 /* only spaces were written */
1240 if (isspace(ch)) {
1241 *ppos += read;
1242 ret = read;
1243 goto out;
1244 }
1245
1246 parser->idx = 0;
1247 }
1248
1249 /* read the non-space input */
1250 while (cnt && !isspace(ch)) {
3c235a33 1251 if (parser->idx < parser->size - 1)
b63f39ea 1252 parser->buffer[parser->idx++] = ch;
1253 else {
1254 ret = -EINVAL;
1255 goto out;
1256 }
1257 ret = get_user(ch, ubuf++);
1258 if (ret)
1259 goto out;
1260 read++;
1261 cnt--;
1262 }
1263
1264 /* We either got finished input or we have to wait for another call. */
1265 if (isspace(ch)) {
1266 parser->buffer[parser->idx] = 0;
1267 parser->cont = false;
057db848 1268 } else if (parser->idx < parser->size - 1) {
b63f39ea 1269 parser->cont = true;
1270 parser->buffer[parser->idx++] = ch;
057db848
SR
1271 } else {
1272 ret = -EINVAL;
1273 goto out;
b63f39ea 1274 }
1275
1276 *ppos += read;
1277 ret = read;
1278
1279out:
1280 return ret;
1281}
1282
3a161d99 1283/* TODO add a seq_buf_to_buffer() */
b8b94265 1284static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
3c56819b
EGM
1285{
1286 int len;
3c56819b 1287
5ac48378 1288 if (trace_seq_used(s) <= s->seq.readpos)
3c56819b
EGM
1289 return -EBUSY;
1290
5ac48378 1291 len = trace_seq_used(s) - s->seq.readpos;
3c56819b
EGM
1292 if (cnt > len)
1293 cnt = len;
3a161d99 1294 memcpy(buf, s->buffer + s->seq.readpos, cnt);
3c56819b 1295
3a161d99 1296 s->seq.readpos += cnt;
3c56819b
EGM
1297 return cnt;
1298}
1299
0e950173
TB
1300unsigned long __read_mostly tracing_thresh;
1301
5d4a9dba 1302#ifdef CONFIG_TRACER_MAX_TRACE
5d4a9dba
SR
1303/*
1304 * Copy the new maximum trace into the separate maximum-trace
1305 * structure. (this way the maximum trace is permanently saved,
1306 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
1307 */
1308static void
1309__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1310{
12883efb
SRRH
1311 struct trace_buffer *trace_buf = &tr->trace_buffer;
1312 struct trace_buffer *max_buf = &tr->max_buffer;
1313 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1314 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
5d4a9dba 1315
12883efb
SRRH
1316 max_buf->cpu = cpu;
1317 max_buf->time_start = data->preempt_timestamp;
5d4a9dba 1318
6d9b3fa5 1319 max_data->saved_latency = tr->max_latency;
8248ac05
SR
1320 max_data->critical_start = data->critical_start;
1321 max_data->critical_end = data->critical_end;
5d4a9dba 1322
1acaa1b2 1323 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
8248ac05 1324 max_data->pid = tsk->pid;
f17a5194
SRRH
1325 /*
1326 * If tsk == current, then use current_uid(), as that does not use
1327 * RCU. The irq tracer can be called out of RCU scope.
1328 */
1329 if (tsk == current)
1330 max_data->uid = current_uid();
1331 else
1332 max_data->uid = task_uid(tsk);
1333
8248ac05
SR
1334 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1335 max_data->policy = tsk->policy;
1336 max_data->rt_priority = tsk->rt_priority;
5d4a9dba
SR
1337
1338 /* record this tasks comm */
1339 tracing_record_cmdline(tsk);
1340}
1341
4fcdae83
SR
1342/**
1343 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1344 * @tr: tracer
1345 * @tsk: the task with the latency
1346 * @cpu: The cpu that initiated the trace.
1347 *
1348 * Flip the buffers between the @tr and the max_tr and record information
1349 * about which task was the cause of this latency.
1350 */
e309b41d 1351void
bc0c38d1
SR
1352update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1353{
2721e72d 1354 struct ring_buffer *buf;
bc0c38d1 1355
2b6080f2 1356 if (tr->stop_count)
b8de7bd1
SR
1357 return;
1358
4c11d7ae 1359 WARN_ON_ONCE(!irqs_disabled());
34600f0e 1360
45ad21ca 1361 if (!tr->allocated_snapshot) {
debdd57f 1362 /* Only the nop tracer should hit this when disabling */
2b6080f2 1363 WARN_ON_ONCE(tr->current_trace != &nop_trace);
34600f0e 1364 return;
debdd57f 1365 }
34600f0e 1366
0b9b12c1 1367 arch_spin_lock(&tr->max_lock);
3928a8a2 1368
12883efb
SRRH
1369 buf = tr->trace_buffer.buffer;
1370 tr->trace_buffer.buffer = tr->max_buffer.buffer;
1371 tr->max_buffer.buffer = buf;
3928a8a2 1372
bc0c38d1 1373 __update_max_tr(tr, tsk, cpu);
0b9b12c1 1374 arch_spin_unlock(&tr->max_lock);
bc0c38d1
SR
1375}
1376
1377/**
1378 * update_max_tr_single - only copy one trace over, and reset the rest
1379 * @tr - tracer
1380 * @tsk - task with the latency
1381 * @cpu - the cpu of the buffer to copy.
4fcdae83
SR
1382 *
1383 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
bc0c38d1 1384 */
e309b41d 1385void
bc0c38d1
SR
1386update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1387{
3928a8a2 1388 int ret;
bc0c38d1 1389
2b6080f2 1390 if (tr->stop_count)
b8de7bd1
SR
1391 return;
1392
4c11d7ae 1393 WARN_ON_ONCE(!irqs_disabled());
6c24499f 1394 if (!tr->allocated_snapshot) {
2930e04d 1395 /* Only the nop tracer should hit this when disabling */
9e8529af 1396 WARN_ON_ONCE(tr->current_trace != &nop_trace);
ef710e10 1397 return;
2930e04d 1398 }
ef710e10 1399
0b9b12c1 1400 arch_spin_lock(&tr->max_lock);
bc0c38d1 1401
12883efb 1402 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
3928a8a2 1403
e8165dbb
SR
1404 if (ret == -EBUSY) {
1405 /*
1406 * We failed to swap the buffer due to a commit taking
1407 * place on this CPU. We fail to record, but we reset
1408 * the max trace buffer (no one writes directly to it)
1409 * and flag that it failed.
1410 */
12883efb 1411 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
e8165dbb
SR
1412 "Failed to swap buffers due to commit in progress\n");
1413 }
1414
e8165dbb 1415 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
bc0c38d1
SR
1416
1417 __update_max_tr(tr, tsk, cpu);
0b9b12c1 1418 arch_spin_unlock(&tr->max_lock);
bc0c38d1 1419}
5d4a9dba 1420#endif /* CONFIG_TRACER_MAX_TRACE */
bc0c38d1 1421
e30f53aa 1422static int wait_on_pipe(struct trace_iterator *iter, bool full)
0d5c6e1c 1423{
15693458
SRRH
1424 /* Iterators are static, they should be filled or empty */
1425 if (trace_buffer_iter(iter, iter->cpu_file))
8b8b3683 1426 return 0;
0d5c6e1c 1427
e30f53aa
RV
1428 return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file,
1429 full);
0d5c6e1c
SR
1430}
1431
f4e781c0 1432#ifdef CONFIG_FTRACE_STARTUP_TEST
9afecfbb
SRV
1433static bool selftests_can_run;
1434
1435struct trace_selftests {
1436 struct list_head list;
1437 struct tracer *type;
1438};
1439
1440static LIST_HEAD(postponed_selftests);
1441
1442static int save_selftest(struct tracer *type)
1443{
1444 struct trace_selftests *selftest;
1445
1446 selftest = kmalloc(sizeof(*selftest), GFP_KERNEL);
1447 if (!selftest)
1448 return -ENOMEM;
1449
1450 selftest->type = type;
1451 list_add(&selftest->list, &postponed_selftests);
1452 return 0;
1453}
1454
f4e781c0
SRRH
1455static int run_tracer_selftest(struct tracer *type)
1456{
1457 struct trace_array *tr = &global_trace;
1458 struct tracer *saved_tracer = tr->current_trace;
1459 int ret;
0d5c6e1c 1460
f4e781c0
SRRH
1461 if (!type->selftest || tracing_selftest_disabled)
1462 return 0;
0d5c6e1c 1463
9afecfbb
SRV
1464 /*
1465 * If a tracer registers early in boot up (before scheduling is
1466 * initialized and such), then do not run its selftests yet.
1467 * Instead, run it a little later in the boot process.
1468 */
1469 if (!selftests_can_run)
1470 return save_selftest(type);
1471
0d5c6e1c 1472 /*
f4e781c0
SRRH
1473 * Run a selftest on this tracer.
1474 * Here we reset the trace buffer, and set the current
1475 * tracer to be this tracer. The tracer can then run some
1476 * internal tracing to verify that everything is in order.
1477 * If we fail, we do not register this tracer.
0d5c6e1c 1478 */
f4e781c0 1479 tracing_reset_online_cpus(&tr->trace_buffer);
0d5c6e1c 1480
f4e781c0
SRRH
1481 tr->current_trace = type;
1482
1483#ifdef CONFIG_TRACER_MAX_TRACE
1484 if (type->use_max_tr) {
1485 /* If we expanded the buffers, make sure the max is expanded too */
1486 if (ring_buffer_expanded)
1487 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1488 RING_BUFFER_ALL_CPUS);
1489 tr->allocated_snapshot = true;
1490 }
1491#endif
1492
1493 /* the test is responsible for initializing and enabling */
1494 pr_info("Testing tracer %s: ", type->name);
1495 ret = type->selftest(type, tr);
1496 /* the test is responsible for resetting too */
1497 tr->current_trace = saved_tracer;
1498 if (ret) {
1499 printk(KERN_CONT "FAILED!\n");
1500 /* Add the warning after printing 'FAILED' */
1501 WARN_ON(1);
1502 return -1;
1503 }
1504 /* Only reset on passing, to avoid touching corrupted buffers */
1505 tracing_reset_online_cpus(&tr->trace_buffer);
1506
1507#ifdef CONFIG_TRACER_MAX_TRACE
1508 if (type->use_max_tr) {
1509 tr->allocated_snapshot = false;
0d5c6e1c 1510
f4e781c0
SRRH
1511 /* Shrink the max buffer again */
1512 if (ring_buffer_expanded)
1513 ring_buffer_resize(tr->max_buffer.buffer, 1,
1514 RING_BUFFER_ALL_CPUS);
1515 }
1516#endif
1517
1518 printk(KERN_CONT "PASSED\n");
1519 return 0;
1520}
9afecfbb
SRV
1521
1522static __init int init_trace_selftests(void)
1523{
1524 struct trace_selftests *p, *n;
1525 struct tracer *t, **last;
1526 int ret;
1527
1528 selftests_can_run = true;
1529
1530 mutex_lock(&trace_types_lock);
1531
1532 if (list_empty(&postponed_selftests))
1533 goto out;
1534
1535 pr_info("Running postponed tracer tests:\n");
1536
1537 list_for_each_entry_safe(p, n, &postponed_selftests, list) {
1538 ret = run_tracer_selftest(p->type);
1539 /* If the test fails, then warn and remove from available_tracers */
1540 if (ret < 0) {
1541 WARN(1, "tracer: %s failed selftest, disabling\n",
1542 p->type->name);
1543 last = &trace_types;
1544 for (t = trace_types; t; t = t->next) {
1545 if (t == p->type) {
1546 *last = t->next;
1547 break;
1548 }
1549 last = &t->next;
1550 }
1551 }
1552 list_del(&p->list);
1553 kfree(p);
1554 }
1555
1556 out:
1557 mutex_unlock(&trace_types_lock);
1558
1559 return 0;
1560}
b9ef0326 1561core_initcall(init_trace_selftests);
f4e781c0
SRRH
1562#else
1563static inline int run_tracer_selftest(struct tracer *type)
1564{
1565 return 0;
0d5c6e1c 1566}
f4e781c0 1567#endif /* CONFIG_FTRACE_STARTUP_TEST */
0d5c6e1c 1568
41d9c0be
SRRH
1569static void add_tracer_options(struct trace_array *tr, struct tracer *t);
1570
a4d1e688
JW
1571static void __init apply_trace_boot_options(void);
1572
4fcdae83
SR
1573/**
1574 * register_tracer - register a tracer with the ftrace system.
1575 * @type - the plugin for the tracer
1576 *
1577 * Register a new plugin tracer.
1578 */
a4d1e688 1579int __init register_tracer(struct tracer *type)
bc0c38d1
SR
1580{
1581 struct tracer *t;
bc0c38d1
SR
1582 int ret = 0;
1583
1584 if (!type->name) {
1585 pr_info("Tracer must have a name\n");
1586 return -1;
1587 }
1588
24a461d5 1589 if (strlen(type->name) >= MAX_TRACER_SIZE) {
ee6c2c1b
LZ
1590 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1591 return -1;
1592 }
1593
bc0c38d1 1594 mutex_lock(&trace_types_lock);
86fa2f60 1595
8e1b82e0
FW
1596 tracing_selftest_running = true;
1597
bc0c38d1
SR
1598 for (t = trace_types; t; t = t->next) {
1599 if (strcmp(type->name, t->name) == 0) {
1600 /* already found */
ee6c2c1b 1601 pr_info("Tracer %s already registered\n",
bc0c38d1
SR
1602 type->name);
1603 ret = -1;
1604 goto out;
1605 }
1606 }
1607
adf9f195
FW
1608 if (!type->set_flag)
1609 type->set_flag = &dummy_set_flag;
d39cdd20
CH
1610 if (!type->flags) {
1611 /*allocate a dummy tracer_flags*/
1612 type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
c8ca003b
CH
1613 if (!type->flags) {
1614 ret = -ENOMEM;
1615 goto out;
1616 }
d39cdd20
CH
1617 type->flags->val = 0;
1618 type->flags->opts = dummy_tracer_opt;
1619 } else
adf9f195
FW
1620 if (!type->flags->opts)
1621 type->flags->opts = dummy_tracer_opt;
6eaaa5d5 1622
d39cdd20
CH
1623 /* store the tracer for __set_tracer_option */
1624 type->flags->trace = type;
1625
f4e781c0
SRRH
1626 ret = run_tracer_selftest(type);
1627 if (ret < 0)
1628 goto out;
60a11774 1629
bc0c38d1
SR
1630 type->next = trace_types;
1631 trace_types = type;
41d9c0be 1632 add_tracer_options(&global_trace, type);
60a11774 1633
bc0c38d1 1634 out:
8e1b82e0 1635 tracing_selftest_running = false;
bc0c38d1
SR
1636 mutex_unlock(&trace_types_lock);
1637
dac74940
SR
1638 if (ret || !default_bootup_tracer)
1639 goto out_unlock;
1640
ee6c2c1b 1641 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
dac74940
SR
1642 goto out_unlock;
1643
1644 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1645 /* Do we want this tracer to start on bootup? */
607e2ea1 1646 tracing_set_tracer(&global_trace, type->name);
dac74940 1647 default_bootup_tracer = NULL;
a4d1e688
JW
1648
1649 apply_trace_boot_options();
1650
dac74940 1651 /* disable other selftests, since this will break it. */
55034cd6 1652 tracing_selftest_disabled = true;
b2821ae6 1653#ifdef CONFIG_FTRACE_STARTUP_TEST
dac74940
SR
1654 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1655 type->name);
b2821ae6 1656#endif
b2821ae6 1657
dac74940 1658 out_unlock:
bc0c38d1
SR
1659 return ret;
1660}
1661
12883efb 1662void tracing_reset(struct trace_buffer *buf, int cpu)
f633903a 1663{
12883efb 1664 struct ring_buffer *buffer = buf->buffer;
f633903a 1665
a5416411
HT
1666 if (!buffer)
1667 return;
1668
f633903a
SR
1669 ring_buffer_record_disable(buffer);
1670
1671 /* Make sure all commits have finished */
1672 synchronize_sched();
68179686 1673 ring_buffer_reset_cpu(buffer, cpu);
f633903a
SR
1674
1675 ring_buffer_record_enable(buffer);
1676}
1677
12883efb 1678void tracing_reset_online_cpus(struct trace_buffer *buf)
213cc060 1679{
12883efb 1680 struct ring_buffer *buffer = buf->buffer;
213cc060
PE
1681 int cpu;
1682
a5416411
HT
1683 if (!buffer)
1684 return;
1685
621968cd
SR
1686 ring_buffer_record_disable(buffer);
1687
1688 /* Make sure all commits have finished */
1689 synchronize_sched();
1690
9457158b 1691 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
213cc060
PE
1692
1693 for_each_online_cpu(cpu)
68179686 1694 ring_buffer_reset_cpu(buffer, cpu);
621968cd
SR
1695
1696 ring_buffer_record_enable(buffer);
213cc060
PE
1697}
1698
09d8091c 1699/* Must have trace_types_lock held */
873c642f 1700void tracing_reset_all_online_cpus(void)
9456f0fa 1701{
873c642f
SRRH
1702 struct trace_array *tr;
1703
873c642f 1704 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
12883efb
SRRH
1705 tracing_reset_online_cpus(&tr->trace_buffer);
1706#ifdef CONFIG_TRACER_MAX_TRACE
1707 tracing_reset_online_cpus(&tr->max_buffer);
1708#endif
873c642f 1709 }
9456f0fa
SR
1710}
1711
939c7a4f 1712#define SAVED_CMDLINES_DEFAULT 128
2c7eea4c 1713#define NO_CMDLINE_MAP UINT_MAX
edc35bd7 1714static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
939c7a4f
YY
1715struct saved_cmdlines_buffer {
1716 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1717 unsigned *map_cmdline_to_pid;
1718 unsigned cmdline_num;
1719 int cmdline_idx;
1720 char *saved_cmdlines;
1721};
1722static struct saved_cmdlines_buffer *savedcmd;
25b0b44a 1723
25b0b44a 1724/* temporary disable recording */
4fd27358 1725static atomic_t trace_record_cmdline_disabled __read_mostly;
bc0c38d1 1726
939c7a4f
YY
1727static inline char *get_saved_cmdlines(int idx)
1728{
1729 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
1730}
1731
1732static inline void set_cmdline(int idx, const char *cmdline)
bc0c38d1 1733{
939c7a4f
YY
1734 memcpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
1735}
1736
1737static int allocate_cmdlines_buffer(unsigned int val,
1738 struct saved_cmdlines_buffer *s)
1739{
1740 s->map_cmdline_to_pid = kmalloc(val * sizeof(*s->map_cmdline_to_pid),
1741 GFP_KERNEL);
1742 if (!s->map_cmdline_to_pid)
1743 return -ENOMEM;
1744
1745 s->saved_cmdlines = kmalloc(val * TASK_COMM_LEN, GFP_KERNEL);
1746 if (!s->saved_cmdlines) {
1747 kfree(s->map_cmdline_to_pid);
1748 return -ENOMEM;
1749 }
1750
1751 s->cmdline_idx = 0;
1752 s->cmdline_num = val;
1753 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
1754 sizeof(s->map_pid_to_cmdline));
1755 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
1756 val * sizeof(*s->map_cmdline_to_pid));
1757
1758 return 0;
1759}
1760
1761static int trace_create_savedcmd(void)
1762{
1763 int ret;
1764
a6af8fbf 1765 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
939c7a4f
YY
1766 if (!savedcmd)
1767 return -ENOMEM;
1768
1769 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
1770 if (ret < 0) {
1771 kfree(savedcmd);
1772 savedcmd = NULL;
1773 return -ENOMEM;
1774 }
1775
1776 return 0;
bc0c38d1
SR
1777}
1778
b5130b1e
CE
1779int is_tracing_stopped(void)
1780{
2b6080f2 1781 return global_trace.stop_count;
b5130b1e
CE
1782}
1783
0f048701
SR
1784/**
1785 * tracing_start - quick start of the tracer
1786 *
1787 * If tracing is enabled but was stopped by tracing_stop,
1788 * this will start the tracer back up.
1789 */
1790void tracing_start(void)
1791{
1792 struct ring_buffer *buffer;
1793 unsigned long flags;
1794
1795 if (tracing_disabled)
1796 return;
1797
2b6080f2
SR
1798 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1799 if (--global_trace.stop_count) {
1800 if (global_trace.stop_count < 0) {
b06a8301
SR
1801 /* Someone screwed up their debugging */
1802 WARN_ON_ONCE(1);
2b6080f2 1803 global_trace.stop_count = 0;
b06a8301 1804 }
0f048701
SR
1805 goto out;
1806 }
1807
a2f80714 1808 /* Prevent the buffers from switching */
0b9b12c1 1809 arch_spin_lock(&global_trace.max_lock);
0f048701 1810
12883efb 1811 buffer = global_trace.trace_buffer.buffer;
0f048701
SR
1812 if (buffer)
1813 ring_buffer_record_enable(buffer);
1814
12883efb
SRRH
1815#ifdef CONFIG_TRACER_MAX_TRACE
1816 buffer = global_trace.max_buffer.buffer;
0f048701
SR
1817 if (buffer)
1818 ring_buffer_record_enable(buffer);
12883efb 1819#endif
0f048701 1820
0b9b12c1 1821 arch_spin_unlock(&global_trace.max_lock);
a2f80714 1822
0f048701 1823 out:
2b6080f2
SR
1824 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1825}
1826
1827static void tracing_start_tr(struct trace_array *tr)
1828{
1829 struct ring_buffer *buffer;
1830 unsigned long flags;
1831
1832 if (tracing_disabled)
1833 return;
1834
1835 /* If global, we need to also start the max tracer */
1836 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1837 return tracing_start();
1838
1839 raw_spin_lock_irqsave(&tr->start_lock, flags);
1840
1841 if (--tr->stop_count) {
1842 if (tr->stop_count < 0) {
1843 /* Someone screwed up their debugging */
1844 WARN_ON_ONCE(1);
1845 tr->stop_count = 0;
1846 }
1847 goto out;
1848 }
1849
12883efb 1850 buffer = tr->trace_buffer.buffer;
2b6080f2
SR
1851 if (buffer)
1852 ring_buffer_record_enable(buffer);
1853
1854 out:
1855 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
0f048701
SR
1856}
1857
1858/**
1859 * tracing_stop - quick stop of the tracer
1860 *
1861 * Light weight way to stop tracing. Use in conjunction with
1862 * tracing_start.
1863 */
1864void tracing_stop(void)
1865{
1866 struct ring_buffer *buffer;
1867 unsigned long flags;
1868
2b6080f2
SR
1869 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1870 if (global_trace.stop_count++)
0f048701
SR
1871 goto out;
1872
a2f80714 1873 /* Prevent the buffers from switching */
0b9b12c1 1874 arch_spin_lock(&global_trace.max_lock);
a2f80714 1875
12883efb 1876 buffer = global_trace.trace_buffer.buffer;
0f048701
SR
1877 if (buffer)
1878 ring_buffer_record_disable(buffer);
1879
12883efb
SRRH
1880#ifdef CONFIG_TRACER_MAX_TRACE
1881 buffer = global_trace.max_buffer.buffer;
0f048701
SR
1882 if (buffer)
1883 ring_buffer_record_disable(buffer);
12883efb 1884#endif
0f048701 1885
0b9b12c1 1886 arch_spin_unlock(&global_trace.max_lock);
a2f80714 1887
0f048701 1888 out:
2b6080f2
SR
1889 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1890}
1891
1892static void tracing_stop_tr(struct trace_array *tr)
1893{
1894 struct ring_buffer *buffer;
1895 unsigned long flags;
1896
1897 /* If global, we need to also stop the max tracer */
1898 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1899 return tracing_stop();
1900
1901 raw_spin_lock_irqsave(&tr->start_lock, flags);
1902 if (tr->stop_count++)
1903 goto out;
1904
12883efb 1905 buffer = tr->trace_buffer.buffer;
2b6080f2
SR
1906 if (buffer)
1907 ring_buffer_record_disable(buffer);
1908
1909 out:
1910 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
0f048701
SR
1911}
1912
379cfdac 1913static int trace_save_cmdline(struct task_struct *tsk)
bc0c38d1 1914{
a635cf04 1915 unsigned pid, idx;
bc0c38d1
SR
1916
1917 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
379cfdac 1918 return 0;
bc0c38d1
SR
1919
1920 /*
1921 * It's not the end of the world if we don't get
1922 * the lock, but we also don't want to spin
1923 * nor do we want to disable interrupts,
1924 * so if we miss here, then better luck next time.
1925 */
0199c4e6 1926 if (!arch_spin_trylock(&trace_cmdline_lock))
379cfdac 1927 return 0;
bc0c38d1 1928
939c7a4f 1929 idx = savedcmd->map_pid_to_cmdline[tsk->pid];
2c7eea4c 1930 if (idx == NO_CMDLINE_MAP) {
939c7a4f 1931 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
bc0c38d1 1932
a635cf04
CE
1933 /*
1934 * Check whether the cmdline buffer at idx has a pid
1935 * mapped. We are going to overwrite that entry so we
1936 * need to clear the map_pid_to_cmdline. Otherwise we
1937 * would read the new comm for the old pid.
1938 */
939c7a4f 1939 pid = savedcmd->map_cmdline_to_pid[idx];
a635cf04 1940 if (pid != NO_CMDLINE_MAP)
939c7a4f 1941 savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
bc0c38d1 1942
939c7a4f
YY
1943 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
1944 savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
bc0c38d1 1945
939c7a4f 1946 savedcmd->cmdline_idx = idx;
bc0c38d1
SR
1947 }
1948
939c7a4f 1949 set_cmdline(idx, tsk->comm);
bc0c38d1 1950
0199c4e6 1951 arch_spin_unlock(&trace_cmdline_lock);
379cfdac
SRRH
1952
1953 return 1;
bc0c38d1
SR
1954}
1955
4c27e756 1956static void __trace_find_cmdline(int pid, char comm[])
bc0c38d1 1957{
bc0c38d1
SR
1958 unsigned map;
1959
4ca53085
SR
1960 if (!pid) {
1961 strcpy(comm, "<idle>");
1962 return;
1963 }
bc0c38d1 1964
74bf4076
SR
1965 if (WARN_ON_ONCE(pid < 0)) {
1966 strcpy(comm, "<XXX>");
1967 return;
1968 }
1969
4ca53085
SR
1970 if (pid > PID_MAX_DEFAULT) {
1971 strcpy(comm, "<...>");
1972 return;
1973 }
bc0c38d1 1974
939c7a4f 1975 map = savedcmd->map_pid_to_cmdline[pid];
50d88758 1976 if (map != NO_CMDLINE_MAP)
e09e2867 1977 strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN);
50d88758
TG
1978 else
1979 strcpy(comm, "<...>");
4c27e756
SRRH
1980}
1981
1982void trace_find_cmdline(int pid, char comm[])
1983{
1984 preempt_disable();
1985 arch_spin_lock(&trace_cmdline_lock);
1986
1987 __trace_find_cmdline(pid, comm);
bc0c38d1 1988
0199c4e6 1989 arch_spin_unlock(&trace_cmdline_lock);
5b6045a9 1990 preempt_enable();
bc0c38d1
SR
1991}
1992
e309b41d 1993void tracing_record_cmdline(struct task_struct *tsk)
bc0c38d1 1994{
0fb9656d 1995 if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on())
bc0c38d1
SR
1996 return;
1997
7ffbd48d
SR
1998 if (!__this_cpu_read(trace_cmdline_save))
1999 return;
2000
379cfdac
SRRH
2001 if (trace_save_cmdline(tsk))
2002 __this_cpu_write(trace_cmdline_save, false);
bc0c38d1
SR
2003}
2004
af0009fc
SRV
2005/*
2006 * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
2007 * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
2008 * simplifies those functions and keeps them in sync.
2009 */
2010enum print_line_t trace_handle_return(struct trace_seq *s)
2011{
2012 return trace_seq_has_overflowed(s) ?
2013 TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED;
2014}
2015EXPORT_SYMBOL_GPL(trace_handle_return);
2016
45dcd8b8 2017void
38697053
SR
2018tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
2019 int pc)
bc0c38d1
SR
2020{
2021 struct task_struct *tsk = current;
bc0c38d1 2022
777e208d
SR
2023 entry->preempt_count = pc & 0xff;
2024 entry->pid = (tsk) ? tsk->pid : 0;
2025 entry->flags =
9244489a 2026#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
2e2ca155 2027 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
9244489a
SR
2028#else
2029 TRACE_FLAG_IRQS_NOSUPPORT |
2030#endif
7e6867bf 2031 ((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) |
bc0c38d1 2032 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
c59f29cb 2033 ((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) |
e5137b50
PZ
2034 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
2035 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
bc0c38d1 2036}
f413cdb8 2037EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
bc0c38d1 2038
e77405ad
SR
2039struct ring_buffer_event *
2040trace_buffer_lock_reserve(struct ring_buffer *buffer,
2041 int type,
2042 unsigned long len,
2043 unsigned long flags, int pc)
51a763dd 2044{
3e9a8aad 2045 return __trace_buffer_lock_reserve(buffer, type, len, flags, pc);
0fc1b09f
SRRH
2046}
2047
2048DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
2049DEFINE_PER_CPU(int, trace_buffered_event_cnt);
2050static int trace_buffered_event_ref;
2051
2052/**
2053 * trace_buffered_event_enable - enable buffering events
2054 *
2055 * When events are being filtered, it is quicker to use a temporary
2056 * buffer to write the event data into if there's a likely chance
2057 * that it will not be committed. The discard of the ring buffer
2058 * is not as fast as committing, and is much slower than copying
2059 * a commit.
2060 *
2061 * When an event is to be filtered, allocate per cpu buffers to
2062 * write the event data into, and if the event is filtered and discarded
2063 * it is simply dropped, otherwise, the entire data is to be committed
2064 * in one shot.
2065 */
2066void trace_buffered_event_enable(void)
2067{
2068 struct ring_buffer_event *event;
2069 struct page *page;
2070 int cpu;
51a763dd 2071
0fc1b09f
SRRH
2072 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2073
2074 if (trace_buffered_event_ref++)
2075 return;
2076
2077 for_each_tracing_cpu(cpu) {
2078 page = alloc_pages_node(cpu_to_node(cpu),
2079 GFP_KERNEL | __GFP_NORETRY, 0);
2080 if (!page)
2081 goto failed;
2082
2083 event = page_address(page);
2084 memset(event, 0, sizeof(*event));
2085
2086 per_cpu(trace_buffered_event, cpu) = event;
2087
2088 preempt_disable();
2089 if (cpu == smp_processor_id() &&
2090 this_cpu_read(trace_buffered_event) !=
2091 per_cpu(trace_buffered_event, cpu))
2092 WARN_ON_ONCE(1);
2093 preempt_enable();
51a763dd
ACM
2094 }
2095
0fc1b09f
SRRH
2096 return;
2097 failed:
2098 trace_buffered_event_disable();
2099}
2100
2101static void enable_trace_buffered_event(void *data)
2102{
2103 /* Probably not needed, but do it anyway */
2104 smp_rmb();
2105 this_cpu_dec(trace_buffered_event_cnt);
2106}
2107
2108static void disable_trace_buffered_event(void *data)
2109{
2110 this_cpu_inc(trace_buffered_event_cnt);
2111}
2112
2113/**
2114 * trace_buffered_event_disable - disable buffering events
2115 *
2116 * When a filter is removed, it is faster to not use the buffered
2117 * events, and to commit directly into the ring buffer. Free up
2118 * the temp buffers when there are no more users. This requires
2119 * special synchronization with current events.
2120 */
2121void trace_buffered_event_disable(void)
2122{
2123 int cpu;
2124
2125 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2126
2127 if (WARN_ON_ONCE(!trace_buffered_event_ref))
2128 return;
2129
2130 if (--trace_buffered_event_ref)
2131 return;
2132
2133 preempt_disable();
2134 /* For each CPU, set the buffer as used. */
2135 smp_call_function_many(tracing_buffer_mask,
2136 disable_trace_buffered_event, NULL, 1);
2137 preempt_enable();
2138
2139 /* Wait for all current users to finish */
2140 synchronize_sched();
2141
2142 for_each_tracing_cpu(cpu) {
2143 free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
2144 per_cpu(trace_buffered_event, cpu) = NULL;
2145 }
2146 /*
2147 * Make sure trace_buffered_event is NULL before clearing
2148 * trace_buffered_event_cnt.
2149 */
2150 smp_wmb();
2151
2152 preempt_disable();
2153 /* Do the work on each cpu */
2154 smp_call_function_many(tracing_buffer_mask,
2155 enable_trace_buffered_event, NULL, 1);
2156 preempt_enable();
51a763dd 2157}
51a763dd 2158
2c4a33ab
SRRH
2159static struct ring_buffer *temp_buffer;
2160
ccb469a1
SR
2161struct ring_buffer_event *
2162trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
7f1d2f82 2163 struct trace_event_file *trace_file,
ccb469a1
SR
2164 int type, unsigned long len,
2165 unsigned long flags, int pc)
2166{
2c4a33ab 2167 struct ring_buffer_event *entry;
0fc1b09f 2168 int val;
2c4a33ab 2169
7f1d2f82 2170 *current_rb = trace_file->tr->trace_buffer.buffer;
0fc1b09f
SRRH
2171
2172 if ((trace_file->flags &
2173 (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED)) &&
2174 (entry = this_cpu_read(trace_buffered_event))) {
2175 /* Try to use the per cpu buffer first */
2176 val = this_cpu_inc_return(trace_buffered_event_cnt);
2177 if (val == 1) {
2178 trace_event_setup(entry, type, flags, pc);
2179 entry->array[0] = len;
2180 return entry;
2181 }
2182 this_cpu_dec(trace_buffered_event_cnt);
2183 }
2184
3e9a8aad
SRRH
2185 entry = __trace_buffer_lock_reserve(*current_rb,
2186 type, len, flags, pc);
2c4a33ab
SRRH
2187 /*
2188 * If tracing is off, but we have triggers enabled
2189 * we still need to look at the event data. Use the temp_buffer
2190 * to store the trace event for the tigger to use. It's recusive
2191 * safe and will not be recorded anywhere.
2192 */
5d6ad960 2193 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
2c4a33ab 2194 *current_rb = temp_buffer;
3e9a8aad
SRRH
2195 entry = __trace_buffer_lock_reserve(*current_rb,
2196 type, len, flags, pc);
2c4a33ab
SRRH
2197 }
2198 return entry;
ccb469a1
SR
2199}
2200EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
2201
42391745
SRRH
2202static DEFINE_SPINLOCK(tracepoint_iter_lock);
2203static DEFINE_MUTEX(tracepoint_printk_mutex);
2204
2205static void output_printk(struct trace_event_buffer *fbuffer)
2206{
2207 struct trace_event_call *event_call;
2208 struct trace_event *event;
2209 unsigned long flags;
2210 struct trace_iterator *iter = tracepoint_print_iter;
2211
2212 /* We should never get here if iter is NULL */
2213 if (WARN_ON_ONCE(!iter))
2214 return;
2215
2216 event_call = fbuffer->trace_file->event_call;
2217 if (!event_call || !event_call->event.funcs ||
2218 !event_call->event.funcs->trace)
2219 return;
2220
2221 event = &fbuffer->trace_file->event_call->event;
2222
2223 spin_lock_irqsave(&tracepoint_iter_lock, flags);
2224 trace_seq_init(&iter->seq);
2225 iter->ent = fbuffer->entry;
2226 event_call->event.funcs->trace(iter, 0, event);
2227 trace_seq_putc(&iter->seq, 0);
2228 printk("%s", iter->seq.buffer);
2229
2230 spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
2231}
2232
2233int tracepoint_printk_sysctl(struct ctl_table *table, int write,
2234 void __user *buffer, size_t *lenp,
2235 loff_t *ppos)
2236{
2237 int save_tracepoint_printk;
2238 int ret;
2239
2240 mutex_lock(&tracepoint_printk_mutex);
2241 save_tracepoint_printk = tracepoint_printk;
2242
2243 ret = proc_dointvec(table, write, buffer, lenp, ppos);
2244
2245 /*
2246 * This will force exiting early, as tracepoint_printk
2247 * is always zero when tracepoint_printk_iter is not allocated
2248 */
2249 if (!tracepoint_print_iter)
2250 tracepoint_printk = 0;
2251
2252 if (save_tracepoint_printk == tracepoint_printk)
2253 goto out;
2254
2255 if (tracepoint_printk)
2256 static_key_enable(&tracepoint_printk_key.key);
2257 else
2258 static_key_disable(&tracepoint_printk_key.key);
2259
2260 out:
2261 mutex_unlock(&tracepoint_printk_mutex);
2262
2263 return ret;
2264}
2265
2266void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
2267{
2268 if (static_key_false(&tracepoint_printk_key.key))
2269 output_printk(fbuffer);
2270
2271 event_trigger_unlock_commit(fbuffer->trace_file, fbuffer->buffer,
2272 fbuffer->event, fbuffer->entry,
2273 fbuffer->flags, fbuffer->pc);
2274}
2275EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
2276
b7f0c959
SRRH
2277void trace_buffer_unlock_commit_regs(struct trace_array *tr,
2278 struct ring_buffer *buffer,
0d5c6e1c
SR
2279 struct ring_buffer_event *event,
2280 unsigned long flags, int pc,
2281 struct pt_regs *regs)
1fd8df2c 2282{
7ffbd48d 2283 __buffer_unlock_commit(buffer, event);
1fd8df2c 2284
be54f69c
SRRH
2285 /*
2286 * If regs is not set, then skip the following callers:
2287 * trace_buffer_unlock_commit_regs
2288 * event_trigger_unlock_commit
2289 * trace_event_buffer_commit
2290 * trace_event_raw_event_sched_switch
2291 * Note, we can still get here via blktrace, wakeup tracer
2292 * and mmiotrace, but that's ok if they lose a function or
2293 * two. They are that meaningful.
2294 */
2295 ftrace_trace_stack(tr, buffer, flags, regs ? 0 : 4, pc, regs);
1fd8df2c
MH
2296 ftrace_trace_userstack(buffer, flags, pc);
2297}
1fd8df2c 2298
52ffabe3
SRRH
2299/*
2300 * Similar to trace_buffer_unlock_commit_regs() but do not dump stack.
2301 */
2302void
2303trace_buffer_unlock_commit_nostack(struct ring_buffer *buffer,
2304 struct ring_buffer_event *event)
2305{
2306 __buffer_unlock_commit(buffer, event);
2307}
2308
478409dd
CZ
2309static void
2310trace_process_export(struct trace_export *export,
2311 struct ring_buffer_event *event)
2312{
2313 struct trace_entry *entry;
2314 unsigned int size = 0;
2315
2316 entry = ring_buffer_event_data(event);
2317 size = ring_buffer_event_length(event);
2318 export->write(entry, size);
2319}
2320
2321static DEFINE_MUTEX(ftrace_export_lock);
2322
2323static struct trace_export __rcu *ftrace_exports_list __read_mostly;
2324
2325static DEFINE_STATIC_KEY_FALSE(ftrace_exports_enabled);
2326
2327static inline void ftrace_exports_enable(void)
2328{
2329 static_branch_enable(&ftrace_exports_enabled);
2330}
2331
2332static inline void ftrace_exports_disable(void)
2333{
2334 static_branch_disable(&ftrace_exports_enabled);
2335}
2336
2337void ftrace_exports(struct ring_buffer_event *event)
2338{
2339 struct trace_export *export;
2340
2341 preempt_disable_notrace();
2342
2343 export = rcu_dereference_raw_notrace(ftrace_exports_list);
2344 while (export) {
2345 trace_process_export(export, event);
2346 export = rcu_dereference_raw_notrace(export->next);
2347 }
2348
2349 preempt_enable_notrace();
2350}
2351
2352static inline void
2353add_trace_export(struct trace_export **list, struct trace_export *export)
2354{
2355 rcu_assign_pointer(export->next, *list);
2356 /*
2357 * We are entering export into the list but another
2358 * CPU might be walking that list. We need to make sure
2359 * the export->next pointer is valid before another CPU sees
2360 * the export pointer included into the list.
2361 */
2362 rcu_assign_pointer(*list, export);
2363}
2364
2365static inline int
2366rm_trace_export(struct trace_export **list, struct trace_export *export)
2367{
2368 struct trace_export **p;
2369
2370 for (p = list; *p != NULL; p = &(*p)->next)
2371 if (*p == export)
2372 break;
2373
2374 if (*p != export)
2375 return -1;
2376
2377 rcu_assign_pointer(*p, (*p)->next);
2378
2379 return 0;
2380}
2381
2382static inline void
2383add_ftrace_export(struct trace_export **list, struct trace_export *export)
2384{
2385 if (*list == NULL)
2386 ftrace_exports_enable();
2387
2388 add_trace_export(list, export);
2389}
2390
2391static inline int
2392rm_ftrace_export(struct trace_export **list, struct trace_export *export)
2393{
2394 int ret;
2395
2396 ret = rm_trace_export(list, export);
2397 if (*list == NULL)
2398 ftrace_exports_disable();
2399
2400 return ret;
2401}
2402
2403int register_ftrace_export(struct trace_export *export)
2404{
2405 if (WARN_ON_ONCE(!export->write))
2406 return -1;
2407
2408 mutex_lock(&ftrace_export_lock);
2409
2410 add_ftrace_export(&ftrace_exports_list, export);
2411
2412 mutex_unlock(&ftrace_export_lock);
2413
2414 return 0;
2415}
2416EXPORT_SYMBOL_GPL(register_ftrace_export);
2417
2418int unregister_ftrace_export(struct trace_export *export)
2419{
2420 int ret;
2421
2422 mutex_lock(&ftrace_export_lock);
2423
2424 ret = rm_ftrace_export(&ftrace_exports_list, export);
2425
2426 mutex_unlock(&ftrace_export_lock);
2427
2428 return ret;
2429}
2430EXPORT_SYMBOL_GPL(unregister_ftrace_export);
2431
e309b41d 2432void
7be42151 2433trace_function(struct trace_array *tr,
38697053
SR
2434 unsigned long ip, unsigned long parent_ip, unsigned long flags,
2435 int pc)
bc0c38d1 2436{
2425bcb9 2437 struct trace_event_call *call = &event_function;
12883efb 2438 struct ring_buffer *buffer = tr->trace_buffer.buffer;
3928a8a2 2439 struct ring_buffer_event *event;
777e208d 2440 struct ftrace_entry *entry;
bc0c38d1 2441
3e9a8aad
SRRH
2442 event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
2443 flags, pc);
3928a8a2
SR
2444 if (!event)
2445 return;
2446 entry = ring_buffer_event_data(event);
777e208d
SR
2447 entry->ip = ip;
2448 entry->parent_ip = parent_ip;
e1112b4d 2449
478409dd
CZ
2450 if (!call_filter_check_discard(call, entry, buffer, event)) {
2451 if (static_branch_unlikely(&ftrace_exports_enabled))
2452 ftrace_exports(event);
7ffbd48d 2453 __buffer_unlock_commit(buffer, event);
478409dd 2454 }
bc0c38d1
SR
2455}
2456
c0a0d0d3 2457#ifdef CONFIG_STACKTRACE
4a9bd3f1
SR
2458
2459#define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
2460struct ftrace_stack {
2461 unsigned long calls[FTRACE_STACK_MAX_ENTRIES];
2462};
2463
2464static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
2465static DEFINE_PER_CPU(int, ftrace_stack_reserve);
2466
e77405ad 2467static void __ftrace_trace_stack(struct ring_buffer *buffer,
53614991 2468 unsigned long flags,
1fd8df2c 2469 int skip, int pc, struct pt_regs *regs)
86387f7e 2470{
2425bcb9 2471 struct trace_event_call *call = &event_kernel_stack;
3928a8a2 2472 struct ring_buffer_event *event;
777e208d 2473 struct stack_entry *entry;
86387f7e 2474 struct stack_trace trace;
4a9bd3f1
SR
2475 int use_stack;
2476 int size = FTRACE_STACK_ENTRIES;
2477
2478 trace.nr_entries = 0;
2479 trace.skip = skip;
2480
be54f69c
SRRH
2481 /*
2482 * Add two, for this function and the call to save_stack_trace()
2483 * If regs is set, then these functions will not be in the way.
2484 */
2485 if (!regs)
2486 trace.skip += 2;
2487
4a9bd3f1
SR
2488 /*
2489 * Since events can happen in NMIs there's no safe way to
2490 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
2491 * or NMI comes in, it will just have to use the default
2492 * FTRACE_STACK_SIZE.
2493 */
2494 preempt_disable_notrace();
2495
82146529 2496 use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
4a9bd3f1
SR
2497 /*
2498 * We don't need any atomic variables, just a barrier.
2499 * If an interrupt comes in, we don't care, because it would
2500 * have exited and put the counter back to what we want.
2501 * We just need a barrier to keep gcc from moving things
2502 * around.
2503 */
2504 barrier();
2505 if (use_stack == 1) {
bdffd893 2506 trace.entries = this_cpu_ptr(ftrace_stack.calls);
4a9bd3f1
SR
2507 trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
2508
2509 if (regs)
2510 save_stack_trace_regs(regs, &trace);
2511 else
2512 save_stack_trace(&trace);
2513
2514 if (trace.nr_entries > size)
2515 size = trace.nr_entries;
2516 } else
2517 /* From now on, use_stack is a boolean */
2518 use_stack = 0;
2519
2520 size *= sizeof(unsigned long);
86387f7e 2521
3e9a8aad
SRRH
2522 event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
2523 sizeof(*entry) + size, flags, pc);
3928a8a2 2524 if (!event)
4a9bd3f1
SR
2525 goto out;
2526 entry = ring_buffer_event_data(event);
86387f7e 2527
4a9bd3f1
SR
2528 memset(&entry->caller, 0, size);
2529
2530 if (use_stack)
2531 memcpy(&entry->caller, trace.entries,
2532 trace.nr_entries * sizeof(unsigned long));
2533 else {
2534 trace.max_entries = FTRACE_STACK_ENTRIES;
2535 trace.entries = entry->caller;
2536 if (regs)
2537 save_stack_trace_regs(regs, &trace);
2538 else
2539 save_stack_trace(&trace);
2540 }
2541
2542 entry->size = trace.nr_entries;
86387f7e 2543
f306cc82 2544 if (!call_filter_check_discard(call, entry, buffer, event))
7ffbd48d 2545 __buffer_unlock_commit(buffer, event);
4a9bd3f1
SR
2546
2547 out:
2548 /* Again, don't let gcc optimize things here */
2549 barrier();
82146529 2550 __this_cpu_dec(ftrace_stack_reserve);
4a9bd3f1
SR
2551 preempt_enable_notrace();
2552
f0a920d5
IM
2553}
2554
2d34f489
SRRH
2555static inline void ftrace_trace_stack(struct trace_array *tr,
2556 struct ring_buffer *buffer,
73dddbb5
SRRH
2557 unsigned long flags,
2558 int skip, int pc, struct pt_regs *regs)
53614991 2559{
2d34f489 2560 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
53614991
SR
2561 return;
2562
73dddbb5 2563 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
53614991
SR
2564}
2565
c0a0d0d3
FW
2566void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
2567 int pc)
38697053 2568{
a33d7d94
SRV
2569 struct ring_buffer *buffer = tr->trace_buffer.buffer;
2570
2571 if (rcu_is_watching()) {
2572 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
2573 return;
2574 }
2575
2576 /*
2577 * When an NMI triggers, RCU is enabled via rcu_nmi_enter(),
2578 * but if the above rcu_is_watching() failed, then the NMI
2579 * triggered someplace critical, and rcu_irq_enter() should
2580 * not be called from NMI.
2581 */
2582 if (unlikely(in_nmi()))
2583 return;
2584
2585 /*
2586 * It is possible that a function is being traced in a
2587 * location that RCU is not watching. A call to
2588 * rcu_irq_enter() will make sure that it is, but there's
2589 * a few internal rcu functions that could be traced
2590 * where that wont work either. In those cases, we just
2591 * do nothing.
2592 */
2593 if (unlikely(rcu_irq_enter_disabled()))
2594 return;
2595
2596 rcu_irq_enter_irqson();
2597 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
2598 rcu_irq_exit_irqson();
38697053
SR
2599}
2600
03889384
SR
2601/**
2602 * trace_dump_stack - record a stack back trace in the trace buffer
c142be8e 2603 * @skip: Number of functions to skip (helper handlers)
03889384 2604 */
c142be8e 2605void trace_dump_stack(int skip)
03889384
SR
2606{
2607 unsigned long flags;
2608
2609 if (tracing_disabled || tracing_selftest_running)
e36c5458 2610 return;
03889384
SR
2611
2612 local_save_flags(flags);
2613
c142be8e
SRRH
2614 /*
2615 * Skip 3 more, seems to get us at the caller of
2616 * this function.
2617 */
2618 skip += 3;
2619 __ftrace_trace_stack(global_trace.trace_buffer.buffer,
2620 flags, skip, preempt_count(), NULL);
03889384
SR
2621}
2622
91e86e56
SR
2623static DEFINE_PER_CPU(int, user_stack_count);
2624
e77405ad
SR
2625void
2626ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
02b67518 2627{
2425bcb9 2628 struct trace_event_call *call = &event_user_stack;
8d7c6a96 2629 struct ring_buffer_event *event;
02b67518
TE
2630 struct userstack_entry *entry;
2631 struct stack_trace trace;
02b67518 2632
983f938a 2633 if (!(global_trace.trace_flags & TRACE_ITER_USERSTACKTRACE))
02b67518
TE
2634 return;
2635
b6345879
SR
2636 /*
2637 * NMIs can not handle page faults, even with fix ups.
2638 * The save user stack can (and often does) fault.
2639 */
2640 if (unlikely(in_nmi()))
2641 return;
02b67518 2642
91e86e56
SR
2643 /*
2644 * prevent recursion, since the user stack tracing may
2645 * trigger other kernel events.
2646 */
2647 preempt_disable();
2648 if (__this_cpu_read(user_stack_count))
2649 goto out;
2650
2651 __this_cpu_inc(user_stack_count);
2652
3e9a8aad
SRRH
2653 event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
2654 sizeof(*entry), flags, pc);
02b67518 2655 if (!event)
1dbd1951 2656 goto out_drop_count;
02b67518 2657 entry = ring_buffer_event_data(event);
02b67518 2658
48659d31 2659 entry->tgid = current->tgid;
02b67518
TE
2660 memset(&entry->caller, 0, sizeof(entry->caller));
2661
2662 trace.nr_entries = 0;
2663 trace.max_entries = FTRACE_STACK_ENTRIES;
2664 trace.skip = 0;
2665 trace.entries = entry->caller;
2666
2667 save_stack_trace_user(&trace);
f306cc82 2668 if (!call_filter_check_discard(call, entry, buffer, event))
7ffbd48d 2669 __buffer_unlock_commit(buffer, event);
91e86e56 2670
1dbd1951 2671 out_drop_count:
91e86e56 2672 __this_cpu_dec(user_stack_count);
91e86e56
SR
2673 out:
2674 preempt_enable();
02b67518
TE
2675}
2676
4fd27358
HE
2677#ifdef UNUSED
2678static void __trace_userstack(struct trace_array *tr, unsigned long flags)
02b67518 2679{
7be42151 2680 ftrace_trace_userstack(tr, flags, preempt_count());
02b67518 2681}
4fd27358 2682#endif /* UNUSED */
02b67518 2683
c0a0d0d3
FW
2684#endif /* CONFIG_STACKTRACE */
2685
07d777fe
SR
2686/* created for use with alloc_percpu */
2687struct trace_buffer_struct {
e2ace001
AL
2688 int nesting;
2689 char buffer[4][TRACE_BUF_SIZE];
07d777fe
SR
2690};
2691
2692static struct trace_buffer_struct *trace_percpu_buffer;
07d777fe
SR
2693
2694/*
e2ace001
AL
2695 * Thise allows for lockless recording. If we're nested too deeply, then
2696 * this returns NULL.
07d777fe
SR
2697 */
2698static char *get_trace_buf(void)
2699{
e2ace001 2700 struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
07d777fe 2701
e2ace001 2702 if (!buffer || buffer->nesting >= 4)
07d777fe
SR
2703 return NULL;
2704
e2ace001
AL
2705 return &buffer->buffer[buffer->nesting++][0];
2706}
2707
2708static void put_trace_buf(void)
2709{
2710 this_cpu_dec(trace_percpu_buffer->nesting);
07d777fe
SR
2711}
2712
2713static int alloc_percpu_trace_buffer(void)
2714{
2715 struct trace_buffer_struct *buffers;
07d777fe
SR
2716
2717 buffers = alloc_percpu(struct trace_buffer_struct);
e2ace001
AL
2718 if (WARN(!buffers, "Could not allocate percpu trace_printk buffer"))
2719 return -ENOMEM;
07d777fe
SR
2720
2721 trace_percpu_buffer = buffers;
07d777fe 2722 return 0;
07d777fe
SR
2723}
2724
81698831
SR
2725static int buffers_allocated;
2726
07d777fe
SR
2727void trace_printk_init_buffers(void)
2728{
07d777fe
SR
2729 if (buffers_allocated)
2730 return;
2731
2732 if (alloc_percpu_trace_buffer())
2733 return;
2734
2184db46
SR
2735 /* trace_printk() is for debug use only. Don't use it in production. */
2736
a395d6a7
JP
2737 pr_warn("\n");
2738 pr_warn("**********************************************************\n");
2739 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2740 pr_warn("** **\n");
2741 pr_warn("** trace_printk() being used. Allocating extra memory. **\n");
2742 pr_warn("** **\n");
2743 pr_warn("** This means that this is a DEBUG kernel and it is **\n");
2744 pr_warn("** unsafe for production use. **\n");
2745 pr_warn("** **\n");
2746 pr_warn("** If you see this message and you are not debugging **\n");
2747 pr_warn("** the kernel, report this immediately to your vendor! **\n");
2748 pr_warn("** **\n");
2749 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2750 pr_warn("**********************************************************\n");
07d777fe 2751
b382ede6
SR
2752 /* Expand the buffers to set size */
2753 tracing_update_buffers();
2754
07d777fe 2755 buffers_allocated = 1;
81698831
SR
2756
2757 /*
2758 * trace_printk_init_buffers() can be called by modules.
2759 * If that happens, then we need to start cmdline recording
2760 * directly here. If the global_trace.buffer is already
2761 * allocated here, then this was called by module code.
2762 */
12883efb 2763 if (global_trace.trace_buffer.buffer)
81698831
SR
2764 tracing_start_cmdline_record();
2765}
2766
2767void trace_printk_start_comm(void)
2768{
2769 /* Start tracing comms if trace printk is set */
2770 if (!buffers_allocated)
2771 return;
2772 tracing_start_cmdline_record();
2773}
2774
2775static void trace_printk_start_stop_comm(int enabled)
2776{
2777 if (!buffers_allocated)
2778 return;
2779
2780 if (enabled)
2781 tracing_start_cmdline_record();
2782 else
2783 tracing_stop_cmdline_record();
07d777fe
SR
2784}
2785
769b0441 2786/**
48ead020 2787 * trace_vbprintk - write binary msg to tracing buffer
769b0441
FW
2788 *
2789 */
40ce74f1 2790int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
769b0441 2791{
2425bcb9 2792 struct trace_event_call *call = &event_bprint;
769b0441 2793 struct ring_buffer_event *event;
e77405ad 2794 struct ring_buffer *buffer;
769b0441 2795 struct trace_array *tr = &global_trace;
48ead020 2796 struct bprint_entry *entry;
769b0441 2797 unsigned long flags;
07d777fe
SR
2798 char *tbuffer;
2799 int len = 0, size, pc;
769b0441
FW
2800
2801 if (unlikely(tracing_selftest_running || tracing_disabled))
2802 return 0;
2803
2804 /* Don't pollute graph traces with trace_vprintk internals */
2805 pause_graph_tracing();
2806
2807 pc = preempt_count();
5168ae50 2808 preempt_disable_notrace();
769b0441 2809
07d777fe
SR
2810 tbuffer = get_trace_buf();
2811 if (!tbuffer) {
2812 len = 0;
e2ace001 2813 goto out_nobuffer;
07d777fe 2814 }
769b0441 2815
07d777fe 2816 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
769b0441 2817
07d777fe
SR
2818 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
2819 goto out;
769b0441 2820
07d777fe 2821 local_save_flags(flags);
769b0441 2822 size = sizeof(*entry) + sizeof(u32) * len;
12883efb 2823 buffer = tr->trace_buffer.buffer;
3e9a8aad
SRRH
2824 event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
2825 flags, pc);
769b0441 2826 if (!event)
07d777fe 2827 goto out;
769b0441
FW
2828 entry = ring_buffer_event_data(event);
2829 entry->ip = ip;
769b0441
FW
2830 entry->fmt = fmt;
2831
07d777fe 2832 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
f306cc82 2833 if (!call_filter_check_discard(call, entry, buffer, event)) {
7ffbd48d 2834 __buffer_unlock_commit(buffer, event);
2d34f489 2835 ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
d931369b 2836 }
769b0441 2837
769b0441 2838out:
e2ace001
AL
2839 put_trace_buf();
2840
2841out_nobuffer:
5168ae50 2842 preempt_enable_notrace();
769b0441
FW
2843 unpause_graph_tracing();
2844
2845 return len;
2846}
48ead020
FW
2847EXPORT_SYMBOL_GPL(trace_vbprintk);
2848
12883efb
SRRH
2849static int
2850__trace_array_vprintk(struct ring_buffer *buffer,
2851 unsigned long ip, const char *fmt, va_list args)
48ead020 2852{
2425bcb9 2853 struct trace_event_call *call = &event_print;
48ead020 2854 struct ring_buffer_event *event;
07d777fe 2855 int len = 0, size, pc;
48ead020 2856 struct print_entry *entry;
07d777fe
SR
2857 unsigned long flags;
2858 char *tbuffer;
48ead020
FW
2859
2860 if (tracing_disabled || tracing_selftest_running)
2861 return 0;
2862
07d777fe
SR
2863 /* Don't pollute graph traces with trace_vprintk internals */
2864 pause_graph_tracing();
2865
48ead020
FW
2866 pc = preempt_count();
2867 preempt_disable_notrace();
48ead020 2868
07d777fe
SR
2869
2870 tbuffer = get_trace_buf();
2871 if (!tbuffer) {
2872 len = 0;
e2ace001 2873 goto out_nobuffer;
07d777fe 2874 }
48ead020 2875
3558a5ac 2876 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
48ead020 2877
07d777fe 2878 local_save_flags(flags);
48ead020 2879 size = sizeof(*entry) + len + 1;
3e9a8aad
SRRH
2880 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
2881 flags, pc);
48ead020 2882 if (!event)
07d777fe 2883 goto out;
48ead020 2884 entry = ring_buffer_event_data(event);
c13d2f7c 2885 entry->ip = ip;
48ead020 2886
3558a5ac 2887 memcpy(&entry->buf, tbuffer, len + 1);
f306cc82 2888 if (!call_filter_check_discard(call, entry, buffer, event)) {
7ffbd48d 2889 __buffer_unlock_commit(buffer, event);
2d34f489 2890 ftrace_trace_stack(&global_trace, buffer, flags, 6, pc, NULL);
d931369b 2891 }
e2ace001
AL
2892
2893out:
2894 put_trace_buf();
2895
2896out_nobuffer:
48ead020 2897 preempt_enable_notrace();
07d777fe 2898 unpause_graph_tracing();
48ead020
FW
2899
2900 return len;
2901}
659372d3 2902
12883efb
SRRH
2903int trace_array_vprintk(struct trace_array *tr,
2904 unsigned long ip, const char *fmt, va_list args)
2905{
2906 return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
2907}
2908
2909int trace_array_printk(struct trace_array *tr,
2910 unsigned long ip, const char *fmt, ...)
2911{
2912 int ret;
2913 va_list ap;
2914
983f938a 2915 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
12883efb
SRRH
2916 return 0;
2917
2918 va_start(ap, fmt);
2919 ret = trace_array_vprintk(tr, ip, fmt, ap);
2920 va_end(ap);
2921 return ret;
2922}
2923
2924int trace_array_printk_buf(struct ring_buffer *buffer,
2925 unsigned long ip, const char *fmt, ...)
2926{
2927 int ret;
2928 va_list ap;
2929
983f938a 2930 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
12883efb
SRRH
2931 return 0;
2932
2933 va_start(ap, fmt);
2934 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
2935 va_end(ap);
2936 return ret;
2937}
2938
659372d3
SR
2939int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
2940{
a813a159 2941 return trace_array_vprintk(&global_trace, ip, fmt, args);
659372d3 2942}
769b0441
FW
2943EXPORT_SYMBOL_GPL(trace_vprintk);
2944
e2ac8ef5 2945static void trace_iterator_increment(struct trace_iterator *iter)
5a90f577 2946{
6d158a81
SR
2947 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
2948
5a90f577 2949 iter->idx++;
6d158a81
SR
2950 if (buf_iter)
2951 ring_buffer_read(buf_iter, NULL);
5a90f577
SR
2952}
2953
e309b41d 2954static struct trace_entry *
bc21b478
SR
2955peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
2956 unsigned long *lost_events)
dd0e545f 2957{
3928a8a2 2958 struct ring_buffer_event *event;
6d158a81 2959 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
dd0e545f 2960
d769041f
SR
2961 if (buf_iter)
2962 event = ring_buffer_iter_peek(buf_iter, ts);
2963 else
12883efb 2964 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
bc21b478 2965 lost_events);
d769041f 2966
4a9bd3f1
SR
2967 if (event) {
2968 iter->ent_size = ring_buffer_event_length(event);
2969 return ring_buffer_event_data(event);
2970 }
2971 iter->ent_size = 0;
2972 return NULL;
dd0e545f 2973}
d769041f 2974
dd0e545f 2975static struct trace_entry *
bc21b478
SR
2976__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
2977 unsigned long *missing_events, u64 *ent_ts)
bc0c38d1 2978{
12883efb 2979 struct ring_buffer *buffer = iter->trace_buffer->buffer;
bc0c38d1 2980 struct trace_entry *ent, *next = NULL;
aa27497c 2981 unsigned long lost_events = 0, next_lost = 0;
b04cc6b1 2982 int cpu_file = iter->cpu_file;
3928a8a2 2983 u64 next_ts = 0, ts;
bc0c38d1 2984 int next_cpu = -1;
12b5da34 2985 int next_size = 0;
bc0c38d1
SR
2986 int cpu;
2987
b04cc6b1
FW
2988 /*
2989 * If we are in a per_cpu trace file, don't bother by iterating over
2990 * all cpu and peek directly.
2991 */
ae3b5093 2992 if (cpu_file > RING_BUFFER_ALL_CPUS) {
b04cc6b1
FW
2993 if (ring_buffer_empty_cpu(buffer, cpu_file))
2994 return NULL;
bc21b478 2995 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
b04cc6b1
FW
2996 if (ent_cpu)
2997 *ent_cpu = cpu_file;
2998
2999 return ent;
3000 }
3001
ab46428c 3002 for_each_tracing_cpu(cpu) {
dd0e545f 3003
3928a8a2
SR
3004 if (ring_buffer_empty_cpu(buffer, cpu))
3005 continue;
dd0e545f 3006
bc21b478 3007 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
dd0e545f 3008
cdd31cd2
IM
3009 /*
3010 * Pick the entry with the smallest timestamp:
3011 */
3928a8a2 3012 if (ent && (!next || ts < next_ts)) {
bc0c38d1
SR
3013 next = ent;
3014 next_cpu = cpu;
3928a8a2 3015 next_ts = ts;
bc21b478 3016 next_lost = lost_events;
12b5da34 3017 next_size = iter->ent_size;
bc0c38d1
SR
3018 }
3019 }
3020
12b5da34
SR
3021 iter->ent_size = next_size;
3022
bc0c38d1
SR
3023 if (ent_cpu)
3024 *ent_cpu = next_cpu;
3025
3928a8a2
SR
3026 if (ent_ts)
3027 *ent_ts = next_ts;
3028
bc21b478
SR
3029 if (missing_events)
3030 *missing_events = next_lost;
3031
bc0c38d1
SR
3032 return next;
3033}
3034
dd0e545f 3035/* Find the next real entry, without updating the iterator itself */
c4a8e8be
FW
3036struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
3037 int *ent_cpu, u64 *ent_ts)
bc0c38d1 3038{
bc21b478 3039 return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
dd0e545f
SR
3040}
3041
3042/* Find the next real entry, and increment the iterator to the next entry */
955b61e5 3043void *trace_find_next_entry_inc(struct trace_iterator *iter)
dd0e545f 3044{
bc21b478
SR
3045 iter->ent = __find_next_entry(iter, &iter->cpu,
3046 &iter->lost_events, &iter->ts);
dd0e545f 3047
3928a8a2 3048 if (iter->ent)
e2ac8ef5 3049 trace_iterator_increment(iter);
dd0e545f 3050
3928a8a2 3051 return iter->ent ? iter : NULL;
b3806b43 3052}
bc0c38d1 3053
e309b41d 3054static void trace_consume(struct trace_iterator *iter)
b3806b43 3055{
12883efb 3056 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
bc21b478 3057 &iter->lost_events);
bc0c38d1
SR
3058}
3059
e309b41d 3060static void *s_next(struct seq_file *m, void *v, loff_t *pos)
bc0c38d1
SR
3061{
3062 struct trace_iterator *iter = m->private;
bc0c38d1 3063 int i = (int)*pos;
4e3c3333 3064 void *ent;
bc0c38d1 3065
a63ce5b3
SR
3066 WARN_ON_ONCE(iter->leftover);
3067
bc0c38d1
SR
3068 (*pos)++;
3069
3070 /* can't go backwards */
3071 if (iter->idx > i)
3072 return NULL;
3073
3074 if (iter->idx < 0)
955b61e5 3075 ent = trace_find_next_entry_inc(iter);
bc0c38d1
SR
3076 else
3077 ent = iter;
3078
3079 while (ent && iter->idx < i)
955b61e5 3080 ent = trace_find_next_entry_inc(iter);
bc0c38d1
SR
3081
3082 iter->pos = *pos;
3083
bc0c38d1
SR
3084 return ent;
3085}
3086
955b61e5 3087void tracing_iter_reset(struct trace_iterator *iter, int cpu)
2f26ebd5 3088{
2f26ebd5
SR
3089 struct ring_buffer_event *event;
3090 struct ring_buffer_iter *buf_iter;
3091 unsigned long entries = 0;
3092 u64 ts;
3093
12883efb 3094 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
2f26ebd5 3095
6d158a81
SR
3096 buf_iter = trace_buffer_iter(iter, cpu);
3097 if (!buf_iter)
2f26ebd5
SR
3098 return;
3099
2f26ebd5
SR
3100 ring_buffer_iter_reset(buf_iter);
3101
3102 /*
3103 * We could have the case with the max latency tracers
3104 * that a reset never took place on a cpu. This is evident
3105 * by the timestamp being before the start of the buffer.
3106 */
3107 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
12883efb 3108 if (ts >= iter->trace_buffer->time_start)
2f26ebd5
SR
3109 break;
3110 entries++;
3111 ring_buffer_read(buf_iter, NULL);
3112 }
3113
12883efb 3114 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
2f26ebd5
SR
3115}
3116
d7350c3f 3117/*
d7350c3f
FW
3118 * The current tracer is copied to avoid a global locking
3119 * all around.
3120 */
bc0c38d1
SR
3121static void *s_start(struct seq_file *m, loff_t *pos)
3122{
3123 struct trace_iterator *iter = m->private;
2b6080f2 3124 struct trace_array *tr = iter->tr;
b04cc6b1 3125 int cpu_file = iter->cpu_file;
bc0c38d1
SR
3126 void *p = NULL;
3127 loff_t l = 0;
3928a8a2 3128 int cpu;
bc0c38d1 3129
2fd196ec
HT
3130 /*
3131 * copy the tracer to avoid using a global lock all around.
3132 * iter->trace is a copy of current_trace, the pointer to the
3133 * name may be used instead of a strcmp(), as iter->trace->name
3134 * will point to the same string as current_trace->name.
3135 */
bc0c38d1 3136 mutex_lock(&trace_types_lock);
2b6080f2
SR
3137 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
3138 *iter->trace = *tr->current_trace;
d7350c3f 3139 mutex_unlock(&trace_types_lock);
bc0c38d1 3140
12883efb 3141#ifdef CONFIG_TRACER_MAX_TRACE
debdd57f
HT
3142 if (iter->snapshot && iter->trace->use_max_tr)
3143 return ERR_PTR(-EBUSY);
12883efb 3144#endif
debdd57f
HT
3145
3146 if (!iter->snapshot)
3147 atomic_inc(&trace_record_cmdline_disabled);
bc0c38d1 3148
bc0c38d1
SR
3149 if (*pos != iter->pos) {
3150 iter->ent = NULL;
3151 iter->cpu = 0;
3152 iter->idx = -1;
3153
ae3b5093 3154 if (cpu_file == RING_BUFFER_ALL_CPUS) {
b04cc6b1 3155 for_each_tracing_cpu(cpu)
2f26ebd5 3156 tracing_iter_reset(iter, cpu);
b04cc6b1 3157 } else
2f26ebd5 3158 tracing_iter_reset(iter, cpu_file);
bc0c38d1 3159
ac91d854 3160 iter->leftover = 0;
bc0c38d1
SR
3161 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
3162 ;
3163
3164 } else {
a63ce5b3
SR
3165 /*
3166 * If we overflowed the seq_file before, then we want
3167 * to just reuse the trace_seq buffer again.
3168 */
3169 if (iter->leftover)
3170 p = iter;
3171 else {
3172 l = *pos - 1;
3173 p = s_next(m, p, &l);
3174 }
bc0c38d1
SR
3175 }
3176
4f535968 3177 trace_event_read_lock();
7e53bd42 3178 trace_access_lock(cpu_file);
bc0c38d1
SR
3179 return p;
3180}
3181
3182static void s_stop(struct seq_file *m, void *p)
3183{
7e53bd42
LJ
3184 struct trace_iterator *iter = m->private;
3185
12883efb 3186#ifdef CONFIG_TRACER_MAX_TRACE
debdd57f
HT
3187 if (iter->snapshot && iter->trace->use_max_tr)
3188 return;
12883efb 3189#endif
debdd57f
HT
3190
3191 if (!iter->snapshot)
3192 atomic_dec(&trace_record_cmdline_disabled);
12883efb 3193
7e53bd42 3194 trace_access_unlock(iter->cpu_file);
4f535968 3195 trace_event_read_unlock();
bc0c38d1
SR
3196}
3197
39eaf7ef 3198static void
12883efb
SRRH
3199get_total_entries(struct trace_buffer *buf,
3200 unsigned long *total, unsigned long *entries)
39eaf7ef
SR
3201{
3202 unsigned long count;
3203 int cpu;
3204
3205 *total = 0;
3206 *entries = 0;
3207
3208 for_each_tracing_cpu(cpu) {
12883efb 3209 count = ring_buffer_entries_cpu(buf->buffer, cpu);
39eaf7ef
SR
3210 /*
3211 * If this buffer has skipped entries, then we hold all
3212 * entries for the trace and we need to ignore the
3213 * ones before the time stamp.
3214 */
12883efb
SRRH
3215 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
3216 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
39eaf7ef
SR
3217 /* total is the same as the entries */
3218 *total += count;
3219 } else
3220 *total += count +
12883efb 3221 ring_buffer_overrun_cpu(buf->buffer, cpu);
39eaf7ef
SR
3222 *entries += count;
3223 }
3224}
3225
e309b41d 3226static void print_lat_help_header(struct seq_file *m)
bc0c38d1 3227{
d79ac28f
RV
3228 seq_puts(m, "# _------=> CPU# \n"
3229 "# / _-----=> irqs-off \n"
3230 "# | / _----=> need-resched \n"
3231 "# || / _---=> hardirq/softirq \n"
3232 "# ||| / _--=> preempt-depth \n"
3233 "# |||| / delay \n"
3234 "# cmd pid ||||| time | caller \n"
3235 "# \\ / ||||| \\ | / \n");
bc0c38d1
SR
3236}
3237
12883efb 3238static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
bc0c38d1 3239{
39eaf7ef
SR
3240 unsigned long total;
3241 unsigned long entries;
3242
12883efb 3243 get_total_entries(buf, &total, &entries);
39eaf7ef
SR
3244 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
3245 entries, total, num_online_cpus());
3246 seq_puts(m, "#\n");
3247}
3248
12883efb 3249static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m)
39eaf7ef 3250{
12883efb 3251 print_event_info(buf, m);
d79ac28f
RV
3252 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n"
3253 "# | | | | |\n");
bc0c38d1
SR
3254}
3255
12883efb 3256static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m)
77271ce4 3257{
12883efb 3258 print_event_info(buf, m);
d79ac28f
RV
3259 seq_puts(m, "# _-----=> irqs-off\n"
3260 "# / _----=> need-resched\n"
3261 "# | / _---=> hardirq/softirq\n"
3262 "# || / _--=> preempt-depth\n"
3263 "# ||| / delay\n"
3264 "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n"
3265 "# | | | |||| | |\n");
77271ce4 3266}
bc0c38d1 3267
62b915f1 3268void
bc0c38d1
SR
3269print_trace_header(struct seq_file *m, struct trace_iterator *iter)
3270{
983f938a 3271 unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
12883efb
SRRH
3272 struct trace_buffer *buf = iter->trace_buffer;
3273 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
2b6080f2 3274 struct tracer *type = iter->trace;
39eaf7ef
SR
3275 unsigned long entries;
3276 unsigned long total;
bc0c38d1
SR
3277 const char *name = "preemption";
3278
d840f718 3279 name = type->name;
bc0c38d1 3280
12883efb 3281 get_total_entries(buf, &total, &entries);
bc0c38d1 3282
888b55dc 3283 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
bc0c38d1 3284 name, UTS_RELEASE);
888b55dc 3285 seq_puts(m, "# -----------------------------------"
bc0c38d1 3286 "---------------------------------\n");
888b55dc 3287 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
bc0c38d1 3288 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
57f50be1 3289 nsecs_to_usecs(data->saved_latency),
bc0c38d1 3290 entries,
4c11d7ae 3291 total,
12883efb 3292 buf->cpu,
bc0c38d1
SR
3293#if defined(CONFIG_PREEMPT_NONE)
3294 "server",
3295#elif defined(CONFIG_PREEMPT_VOLUNTARY)
3296 "desktop",
b5c21b45 3297#elif defined(CONFIG_PREEMPT)
bc0c38d1
SR
3298 "preempt",
3299#else
3300 "unknown",
3301#endif
3302 /* These are reserved for later use */
3303 0, 0, 0, 0);
3304#ifdef CONFIG_SMP
3305 seq_printf(m, " #P:%d)\n", num_online_cpus());
3306#else
3307 seq_puts(m, ")\n");
3308#endif
888b55dc
KM
3309 seq_puts(m, "# -----------------\n");
3310 seq_printf(m, "# | task: %.16s-%d "
bc0c38d1 3311 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
d20b92ab
EB
3312 data->comm, data->pid,
3313 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
bc0c38d1 3314 data->policy, data->rt_priority);
888b55dc 3315 seq_puts(m, "# -----------------\n");
bc0c38d1
SR
3316
3317 if (data->critical_start) {
888b55dc 3318 seq_puts(m, "# => started at: ");
214023c3
SR
3319 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
3320 trace_print_seq(m, &iter->seq);
888b55dc 3321 seq_puts(m, "\n# => ended at: ");
214023c3
SR
3322 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
3323 trace_print_seq(m, &iter->seq);
8248ac05 3324 seq_puts(m, "\n#\n");
bc0c38d1
SR
3325 }
3326
888b55dc 3327 seq_puts(m, "#\n");
bc0c38d1
SR
3328}
3329
a309720c
SR
3330static void test_cpu_buff_start(struct trace_iterator *iter)
3331{
3332 struct trace_seq *s = &iter->seq;
983f938a 3333 struct trace_array *tr = iter->tr;
a309720c 3334
983f938a 3335 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
12ef7d44
SR
3336 return;
3337
3338 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
3339 return;
3340
4dbbe2d8
MK
3341 if (cpumask_available(iter->started) &&
3342 cpumask_test_cpu(iter->cpu, iter->started))
a309720c
SR
3343 return;
3344
12883efb 3345 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
2f26ebd5
SR
3346 return;
3347
4dbbe2d8 3348 if (cpumask_available(iter->started))
919cd979 3349 cpumask_set_cpu(iter->cpu, iter->started);
b0dfa978
FW
3350
3351 /* Don't print started cpu buffer for the first entry of the trace */
3352 if (iter->idx > 1)
3353 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
3354 iter->cpu);
a309720c
SR
3355}
3356
2c4f035f 3357static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
bc0c38d1 3358{
983f938a 3359 struct trace_array *tr = iter->tr;
214023c3 3360 struct trace_seq *s = &iter->seq;
983f938a 3361 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
4e3c3333 3362 struct trace_entry *entry;
f633cef0 3363 struct trace_event *event;
bc0c38d1 3364
4e3c3333 3365 entry = iter->ent;
dd0e545f 3366
a309720c
SR
3367 test_cpu_buff_start(iter);
3368
c4a8e8be 3369 event = ftrace_find_event(entry->type);
bc0c38d1 3370
983f938a 3371 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
19a7fe20
SRRH
3372 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
3373 trace_print_lat_context(iter);
3374 else
3375 trace_print_context(iter);
c4a8e8be 3376 }
bc0c38d1 3377
19a7fe20
SRRH
3378 if (trace_seq_has_overflowed(s))
3379 return TRACE_TYPE_PARTIAL_LINE;
3380
268ccda0 3381 if (event)
a9a57763 3382 return event->funcs->trace(iter, sym_flags, event);
d9793bd8 3383
19a7fe20 3384 trace_seq_printf(s, "Unknown type %d\n", entry->type);
02b67518 3385
19a7fe20 3386 return trace_handle_return(s);
bc0c38d1
SR
3387}
3388
2c4f035f 3389static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
f9896bf3 3390{
983f938a 3391 struct trace_array *tr = iter->tr;
f9896bf3
IM
3392 struct trace_seq *s = &iter->seq;
3393 struct trace_entry *entry;
f633cef0 3394 struct trace_event *event;
f9896bf3
IM
3395
3396 entry = iter->ent;
dd0e545f 3397
983f938a 3398 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
19a7fe20
SRRH
3399 trace_seq_printf(s, "%d %d %llu ",
3400 entry->pid, iter->cpu, iter->ts);
3401
3402 if (trace_seq_has_overflowed(s))
3403 return TRACE_TYPE_PARTIAL_LINE;
f9896bf3 3404
f633cef0 3405 event = ftrace_find_event(entry->type);
268ccda0 3406 if (event)
a9a57763 3407 return event->funcs->raw(iter, 0, event);
d9793bd8 3408
19a7fe20 3409 trace_seq_printf(s, "%d ?\n", entry->type);
777e208d 3410
19a7fe20 3411 return trace_handle_return(s);
f9896bf3
IM
3412}
3413
2c4f035f 3414static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
5e3ca0ec 3415{
983f938a 3416 struct trace_array *tr = iter->tr;
5e3ca0ec
IM
3417 struct trace_seq *s = &iter->seq;
3418 unsigned char newline = '\n';
3419 struct trace_entry *entry;
f633cef0 3420 struct trace_event *event;
5e3ca0ec
IM
3421
3422 entry = iter->ent;
dd0e545f 3423
983f938a 3424 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
19a7fe20
SRRH
3425 SEQ_PUT_HEX_FIELD(s, entry->pid);
3426 SEQ_PUT_HEX_FIELD(s, iter->cpu);
3427 SEQ_PUT_HEX_FIELD(s, iter->ts);
3428 if (trace_seq_has_overflowed(s))
3429 return TRACE_TYPE_PARTIAL_LINE;
c4a8e8be 3430 }
5e3ca0ec 3431
f633cef0 3432 event = ftrace_find_event(entry->type);
268ccda0 3433 if (event) {
a9a57763 3434 enum print_line_t ret = event->funcs->hex(iter, 0, event);
d9793bd8
ACM
3435 if (ret != TRACE_TYPE_HANDLED)
3436 return ret;
3437 }
7104f300 3438
19a7fe20 3439 SEQ_PUT_FIELD(s, newline);
5e3ca0ec 3440
19a7fe20 3441 return trace_handle_return(s);
5e3ca0ec
IM
3442}
3443
2c4f035f 3444static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
cb0f12aa 3445{
983f938a 3446 struct trace_array *tr = iter->tr;
cb0f12aa
IM
3447 struct trace_seq *s = &iter->seq;
3448 struct trace_entry *entry;
f633cef0 3449 struct trace_event *event;
cb0f12aa
IM
3450
3451 entry = iter->ent;
dd0e545f 3452
983f938a 3453 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
19a7fe20
SRRH
3454 SEQ_PUT_FIELD(s, entry->pid);
3455 SEQ_PUT_FIELD(s, iter->cpu);
3456 SEQ_PUT_FIELD(s, iter->ts);
3457 if (trace_seq_has_overflowed(s))
3458 return TRACE_TYPE_PARTIAL_LINE;
c4a8e8be 3459 }
cb0f12aa 3460
f633cef0 3461 event = ftrace_find_event(entry->type);
a9a57763
SR
3462 return event ? event->funcs->binary(iter, 0, event) :
3463 TRACE_TYPE_HANDLED;
cb0f12aa
IM
3464}
3465
62b915f1 3466int trace_empty(struct trace_iterator *iter)
bc0c38d1 3467{
6d158a81 3468 struct ring_buffer_iter *buf_iter;
bc0c38d1
SR
3469 int cpu;
3470
9aba60fe 3471 /* If we are looking at one CPU buffer, only check that one */
ae3b5093 3472 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
9aba60fe 3473 cpu = iter->cpu_file;
6d158a81
SR
3474 buf_iter = trace_buffer_iter(iter, cpu);
3475 if (buf_iter) {
3476 if (!ring_buffer_iter_empty(buf_iter))
9aba60fe
SR
3477 return 0;
3478 } else {
12883efb 3479 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
9aba60fe
SR
3480 return 0;
3481 }
3482 return 1;
3483 }
3484
ab46428c 3485 for_each_tracing_cpu(cpu) {
6d158a81
SR
3486 buf_iter = trace_buffer_iter(iter, cpu);
3487 if (buf_iter) {
3488 if (!ring_buffer_iter_empty(buf_iter))
d769041f
SR
3489 return 0;
3490 } else {
12883efb 3491 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
d769041f
SR
3492 return 0;
3493 }
bc0c38d1 3494 }
d769041f 3495
797d3712 3496 return 1;
bc0c38d1
SR
3497}
3498
4f535968 3499/* Called with trace_event_read_lock() held. */
955b61e5 3500enum print_line_t print_trace_line(struct trace_iterator *iter)
f9896bf3 3501{
983f938a
SRRH
3502 struct trace_array *tr = iter->tr;
3503 unsigned long trace_flags = tr->trace_flags;
2c4f035f
FW
3504 enum print_line_t ret;
3505
19a7fe20
SRRH
3506 if (iter->lost_events) {
3507 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
3508 iter->cpu, iter->lost_events);
3509 if (trace_seq_has_overflowed(&iter->seq))
3510 return TRACE_TYPE_PARTIAL_LINE;
3511 }
bc21b478 3512
2c4f035f
FW
3513 if (iter->trace && iter->trace->print_line) {
3514 ret = iter->trace->print_line(iter);
3515 if (ret != TRACE_TYPE_UNHANDLED)
3516 return ret;
3517 }
72829bc3 3518
09ae7234
SRRH
3519 if (iter->ent->type == TRACE_BPUTS &&
3520 trace_flags & TRACE_ITER_PRINTK &&
3521 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
3522 return trace_print_bputs_msg_only(iter);
3523
48ead020
FW
3524 if (iter->ent->type == TRACE_BPRINT &&
3525 trace_flags & TRACE_ITER_PRINTK &&
3526 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
5ef841f6 3527 return trace_print_bprintk_msg_only(iter);
48ead020 3528
66896a85
FW
3529 if (iter->ent->type == TRACE_PRINT &&
3530 trace_flags & TRACE_ITER_PRINTK &&
3531 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
5ef841f6 3532 return trace_print_printk_msg_only(iter);
66896a85 3533
cb0f12aa
IM
3534 if (trace_flags & TRACE_ITER_BIN)
3535 return print_bin_fmt(iter);
3536
5e3ca0ec
IM
3537 if (trace_flags & TRACE_ITER_HEX)
3538 return print_hex_fmt(iter);
3539
f9896bf3
IM
3540 if (trace_flags & TRACE_ITER_RAW)
3541 return print_raw_fmt(iter);
3542
f9896bf3
IM
3543 return print_trace_fmt(iter);
3544}
3545
7e9a49ef
JO
3546void trace_latency_header(struct seq_file *m)
3547{
3548 struct trace_iterator *iter = m->private;
983f938a 3549 struct trace_array *tr = iter->tr;
7e9a49ef
JO
3550
3551 /* print nothing if the buffers are empty */
3552 if (trace_empty(iter))
3553 return;
3554
3555 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
3556 print_trace_header(m, iter);
3557
983f938a 3558 if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
7e9a49ef
JO
3559 print_lat_help_header(m);
3560}
3561
62b915f1
JO
3562void trace_default_header(struct seq_file *m)
3563{
3564 struct trace_iterator *iter = m->private;
983f938a
SRRH
3565 struct trace_array *tr = iter->tr;
3566 unsigned long trace_flags = tr->trace_flags;
62b915f1 3567
f56e7f8e
JO
3568 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
3569 return;
3570
62b915f1
JO
3571 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
3572 /* print nothing if the buffers are empty */
3573 if (trace_empty(iter))
3574 return;
3575 print_trace_header(m, iter);
3576 if (!(trace_flags & TRACE_ITER_VERBOSE))
3577 print_lat_help_header(m);
3578 } else {
77271ce4
SR
3579 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
3580 if (trace_flags & TRACE_ITER_IRQ_INFO)
12883efb 3581 print_func_help_header_irq(iter->trace_buffer, m);
77271ce4 3582 else
12883efb 3583 print_func_help_header(iter->trace_buffer, m);
77271ce4 3584 }
62b915f1
JO
3585 }
3586}
3587
e0a413f6
SR
3588static void test_ftrace_alive(struct seq_file *m)
3589{
3590 if (!ftrace_is_dead())
3591 return;
d79ac28f
RV
3592 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
3593 "# MAY BE MISSING FUNCTION EVENTS\n");
e0a413f6
SR
3594}
3595
d8741e2e 3596#ifdef CONFIG_TRACER_MAX_TRACE
f1affcaa 3597static void show_snapshot_main_help(struct seq_file *m)
d8741e2e 3598{
d79ac28f
RV
3599 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
3600 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
3601 "# Takes a snapshot of the main buffer.\n"
3602 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
3603 "# (Doesn't have to be '2' works with any number that\n"
3604 "# is not a '0' or '1')\n");
d8741e2e 3605}
f1affcaa
SRRH
3606
3607static void show_snapshot_percpu_help(struct seq_file *m)
3608{
fa6f0cc7 3609 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
f1affcaa 3610#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
d79ac28f
RV
3611 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
3612 "# Takes a snapshot of the main buffer for this cpu.\n");
f1affcaa 3613#else
d79ac28f
RV
3614 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
3615 "# Must use main snapshot file to allocate.\n");
f1affcaa 3616#endif
d79ac28f
RV
3617 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
3618 "# (Doesn't have to be '2' works with any number that\n"
3619 "# is not a '0' or '1')\n");
f1affcaa
SRRH
3620}
3621
d8741e2e
SRRH
3622static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
3623{
45ad21ca 3624 if (iter->tr->allocated_snapshot)
fa6f0cc7 3625 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
d8741e2e 3626 else
fa6f0cc7 3627 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
d8741e2e 3628
fa6f0cc7 3629 seq_puts(m, "# Snapshot commands:\n");
f1affcaa
SRRH
3630 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
3631 show_snapshot_main_help(m);
3632 else
3633 show_snapshot_percpu_help(m);
d8741e2e
SRRH
3634}
3635#else
3636/* Should never be called */
3637static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
3638#endif
3639
bc0c38d1
SR
3640static int s_show(struct seq_file *m, void *v)
3641{
3642 struct trace_iterator *iter = v;
a63ce5b3 3643 int ret;
bc0c38d1
SR
3644
3645 if (iter->ent == NULL) {
3646 if (iter->tr) {
3647 seq_printf(m, "# tracer: %s\n", iter->trace->name);
3648 seq_puts(m, "#\n");
e0a413f6 3649 test_ftrace_alive(m);
bc0c38d1 3650 }
d8741e2e
SRRH
3651 if (iter->snapshot && trace_empty(iter))
3652 print_snapshot_help(m, iter);
3653 else if (iter->trace && iter->trace->print_header)
8bba1bf5 3654 iter->trace->print_header(m);
62b915f1
JO
3655 else
3656 trace_default_header(m);
3657
a63ce5b3
SR
3658 } else if (iter->leftover) {
3659 /*
3660 * If we filled the seq_file buffer earlier, we
3661 * want to just show it now.
3662 */
3663 ret = trace_print_seq(m, &iter->seq);
3664
3665 /* ret should this time be zero, but you never know */
3666 iter->leftover = ret;
3667
bc0c38d1 3668 } else {
f9896bf3 3669 print_trace_line(iter);
a63ce5b3
SR
3670 ret = trace_print_seq(m, &iter->seq);
3671 /*
3672 * If we overflow the seq_file buffer, then it will
3673 * ask us for this data again at start up.
3674 * Use that instead.
3675 * ret is 0 if seq_file write succeeded.
3676 * -1 otherwise.
3677 */
3678 iter->leftover = ret;
bc0c38d1
SR
3679 }
3680
3681 return 0;
3682}
3683
649e9c70
ON
3684/*
3685 * Should be used after trace_array_get(), trace_types_lock
3686 * ensures that i_cdev was already initialized.
3687 */
3688static inline int tracing_get_cpu(struct inode *inode)
3689{
3690 if (inode->i_cdev) /* See trace_create_cpu_file() */
3691 return (long)inode->i_cdev - 1;
3692 return RING_BUFFER_ALL_CPUS;
3693}
3694
88e9d34c 3695static const struct seq_operations tracer_seq_ops = {
4bf39a94
IM
3696 .start = s_start,
3697 .next = s_next,
3698 .stop = s_stop,
3699 .show = s_show,
bc0c38d1
SR
3700};
3701
e309b41d 3702static struct trace_iterator *
6484c71c 3703__tracing_open(struct inode *inode, struct file *file, bool snapshot)
bc0c38d1 3704{
6484c71c 3705 struct trace_array *tr = inode->i_private;
bc0c38d1 3706 struct trace_iterator *iter;
50e18b94 3707 int cpu;
bc0c38d1 3708
85a2f9b4
SR
3709 if (tracing_disabled)
3710 return ERR_PTR(-ENODEV);
60a11774 3711
50e18b94 3712 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
85a2f9b4
SR
3713 if (!iter)
3714 return ERR_PTR(-ENOMEM);
bc0c38d1 3715
72917235 3716 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
6d158a81 3717 GFP_KERNEL);
93574fcc
DC
3718 if (!iter->buffer_iter)
3719 goto release;
3720
d7350c3f
FW
3721 /*
3722 * We make a copy of the current tracer to avoid concurrent
3723 * changes on it while we are reading.
3724 */
bc0c38d1 3725 mutex_lock(&trace_types_lock);
d7350c3f 3726 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
85a2f9b4 3727 if (!iter->trace)
d7350c3f 3728 goto fail;
85a2f9b4 3729
2b6080f2 3730 *iter->trace = *tr->current_trace;
d7350c3f 3731
79f55997 3732 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
b0dfa978
FW
3733 goto fail;
3734
12883efb
SRRH
3735 iter->tr = tr;
3736
3737#ifdef CONFIG_TRACER_MAX_TRACE
2b6080f2
SR
3738 /* Currently only the top directory has a snapshot */
3739 if (tr->current_trace->print_max || snapshot)
12883efb 3740 iter->trace_buffer = &tr->max_buffer;
bc0c38d1 3741 else
12883efb
SRRH
3742#endif
3743 iter->trace_buffer = &tr->trace_buffer;
debdd57f 3744 iter->snapshot = snapshot;
bc0c38d1 3745 iter->pos = -1;
6484c71c 3746 iter->cpu_file = tracing_get_cpu(inode);
d7350c3f 3747 mutex_init(&iter->mutex);
bc0c38d1 3748
8bba1bf5
MM
3749 /* Notify the tracer early; before we stop tracing. */
3750 if (iter->trace && iter->trace->open)
a93751ca 3751 iter->trace->open(iter);
8bba1bf5 3752
12ef7d44 3753 /* Annotate start of buffers if we had overruns */
12883efb 3754 if (ring_buffer_overruns(iter->trace_buffer->buffer))
12ef7d44
SR
3755 iter->iter_flags |= TRACE_FILE_ANNOTATE;
3756
8be0709f 3757 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
58e8eedf 3758 if (trace_clocks[tr->clock_id].in_ns)
8be0709f
DS
3759 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
3760
debdd57f
HT
3761 /* stop the trace while dumping if we are not opening "snapshot" */
3762 if (!iter->snapshot)
2b6080f2 3763 tracing_stop_tr(tr);
2f26ebd5 3764
ae3b5093 3765 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
b04cc6b1 3766 for_each_tracing_cpu(cpu) {
b04cc6b1 3767 iter->buffer_iter[cpu] =
12883efb 3768 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
72c9ddfd
DM
3769 }
3770 ring_buffer_read_prepare_sync();
3771 for_each_tracing_cpu(cpu) {
3772 ring_buffer_read_start(iter->buffer_iter[cpu]);
2f26ebd5 3773 tracing_iter_reset(iter, cpu);
b04cc6b1
FW
3774 }
3775 } else {
3776 cpu = iter->cpu_file;
3928a8a2 3777 iter->buffer_iter[cpu] =
12883efb 3778 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
72c9ddfd
DM
3779 ring_buffer_read_prepare_sync();
3780 ring_buffer_read_start(iter->buffer_iter[cpu]);
2f26ebd5 3781 tracing_iter_reset(iter, cpu);
3928a8a2
SR
3782 }
3783
bc0c38d1
SR
3784 mutex_unlock(&trace_types_lock);
3785
bc0c38d1 3786 return iter;
3928a8a2 3787
d7350c3f 3788 fail:
3928a8a2 3789 mutex_unlock(&trace_types_lock);
d7350c3f 3790 kfree(iter->trace);
6d158a81 3791 kfree(iter->buffer_iter);
93574fcc 3792release:
50e18b94
JO
3793 seq_release_private(inode, file);
3794 return ERR_PTR(-ENOMEM);
bc0c38d1
SR
3795}
3796
3797int tracing_open_generic(struct inode *inode, struct file *filp)
3798{
60a11774
SR
3799 if (tracing_disabled)
3800 return -ENODEV;
3801
bc0c38d1
SR
3802 filp->private_data = inode->i_private;
3803 return 0;
3804}
3805
2e86421d
GB
3806bool tracing_is_disabled(void)
3807{
3808 return (tracing_disabled) ? true: false;
3809}
3810
7b85af63
SRRH
3811/*
3812 * Open and update trace_array ref count.
3813 * Must have the current trace_array passed to it.
3814 */
dcc30223 3815static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
7b85af63
SRRH
3816{
3817 struct trace_array *tr = inode->i_private;
3818
3819 if (tracing_disabled)
3820 return -ENODEV;
3821
3822 if (trace_array_get(tr) < 0)
3823 return -ENODEV;
3824
3825 filp->private_data = inode->i_private;
3826
3827 return 0;
7b85af63
SRRH
3828}
3829
4fd27358 3830static int tracing_release(struct inode *inode, struct file *file)
bc0c38d1 3831{
6484c71c 3832 struct trace_array *tr = inode->i_private;
907f2784 3833 struct seq_file *m = file->private_data;
4acd4d00 3834 struct trace_iterator *iter;
3928a8a2 3835 int cpu;
bc0c38d1 3836
ff451961 3837 if (!(file->f_mode & FMODE_READ)) {
6484c71c 3838 trace_array_put(tr);
4acd4d00 3839 return 0;
ff451961 3840 }
4acd4d00 3841
6484c71c 3842 /* Writes do not use seq_file */
4acd4d00 3843 iter = m->private;
bc0c38d1 3844 mutex_lock(&trace_types_lock);
a695cb58 3845
3928a8a2
SR
3846 for_each_tracing_cpu(cpu) {
3847 if (iter->buffer_iter[cpu])
3848 ring_buffer_read_finish(iter->buffer_iter[cpu]);
3849 }
3850
bc0c38d1
SR
3851 if (iter->trace && iter->trace->close)
3852 iter->trace->close(iter);
3853
debdd57f
HT
3854 if (!iter->snapshot)
3855 /* reenable tracing if it was previously enabled */
2b6080f2 3856 tracing_start_tr(tr);
f77d09a3
AL
3857
3858 __trace_array_put(tr);
3859
bc0c38d1
SR
3860 mutex_unlock(&trace_types_lock);
3861
d7350c3f 3862 mutex_destroy(&iter->mutex);
b0dfa978 3863 free_cpumask_var(iter->started);
d7350c3f 3864 kfree(iter->trace);
6d158a81 3865 kfree(iter->buffer_iter);
50e18b94 3866 seq_release_private(inode, file);
ff451961 3867
bc0c38d1
SR
3868 return 0;
3869}
3870
7b85af63
SRRH
3871static int tracing_release_generic_tr(struct inode *inode, struct file *file)
3872{
3873 struct trace_array *tr = inode->i_private;
3874
3875 trace_array_put(tr);
bc0c38d1
SR
3876 return 0;
3877}
3878
7b85af63
SRRH
3879static int tracing_single_release_tr(struct inode *inode, struct file *file)
3880{
3881 struct trace_array *tr = inode->i_private;
3882
3883 trace_array_put(tr);
3884
3885 return single_release(inode, file);
3886}
3887
bc0c38d1
SR
3888static int tracing_open(struct inode *inode, struct file *file)
3889{
6484c71c 3890 struct trace_array *tr = inode->i_private;
85a2f9b4
SR
3891 struct trace_iterator *iter;
3892 int ret = 0;
bc0c38d1 3893
ff451961
SRRH
3894 if (trace_array_get(tr) < 0)
3895 return -ENODEV;
3896
4acd4d00 3897 /* If this file was open for write, then erase contents */
6484c71c
ON
3898 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
3899 int cpu = tracing_get_cpu(inode);
3900
3901 if (cpu == RING_BUFFER_ALL_CPUS)
12883efb 3902 tracing_reset_online_cpus(&tr->trace_buffer);
4acd4d00 3903 else
6484c71c 3904 tracing_reset(&tr->trace_buffer, cpu);
4acd4d00 3905 }
bc0c38d1 3906
4acd4d00 3907 if (file->f_mode & FMODE_READ) {
6484c71c 3908 iter = __tracing_open(inode, file, false);
4acd4d00
SR
3909 if (IS_ERR(iter))
3910 ret = PTR_ERR(iter);
983f938a 3911 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
4acd4d00
SR
3912 iter->iter_flags |= TRACE_FILE_LAT_FMT;
3913 }
ff451961
SRRH
3914
3915 if (ret < 0)
3916 trace_array_put(tr);
3917
bc0c38d1
SR
3918 return ret;
3919}
3920
607e2ea1
SRRH
3921/*
3922 * Some tracers are not suitable for instance buffers.
3923 * A tracer is always available for the global array (toplevel)
3924 * or if it explicitly states that it is.
3925 */
3926static bool
3927trace_ok_for_array(struct tracer *t, struct trace_array *tr)
3928{
3929 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
3930}
3931
3932/* Find the next tracer that this trace array may use */
3933static struct tracer *
3934get_tracer_for_array(struct trace_array *tr, struct tracer *t)
3935{
3936 while (t && !trace_ok_for_array(t, tr))
3937 t = t->next;
3938
3939 return t;
3940}
3941
e309b41d 3942static void *
bc0c38d1
SR
3943t_next(struct seq_file *m, void *v, loff_t *pos)
3944{
607e2ea1 3945 struct trace_array *tr = m->private;
f129e965 3946 struct tracer *t = v;
bc0c38d1
SR
3947
3948 (*pos)++;
3949
3950 if (t)
607e2ea1 3951 t = get_tracer_for_array(tr, t->next);
bc0c38d1 3952
bc0c38d1
SR
3953 return t;
3954}
3955
3956static void *t_start(struct seq_file *m, loff_t *pos)
3957{
607e2ea1 3958 struct trace_array *tr = m->private;
f129e965 3959 struct tracer *t;
bc0c38d1
SR
3960 loff_t l = 0;
3961
3962 mutex_lock(&trace_types_lock);
607e2ea1
SRRH
3963
3964 t = get_tracer_for_array(tr, trace_types);
3965 for (; t && l < *pos; t = t_next(m, t, &l))
3966 ;
bc0c38d1
SR
3967
3968 return t;
3969}
3970
3971static void t_stop(struct seq_file *m, void *p)
3972{
3973 mutex_unlock(&trace_types_lock);
3974}
3975
3976static int t_show(struct seq_file *m, void *v)
3977{
3978 struct tracer *t = v;
3979
3980 if (!t)
3981 return 0;
3982
fa6f0cc7 3983 seq_puts(m, t->name);
bc0c38d1
SR
3984 if (t->next)
3985 seq_putc(m, ' ');
3986 else
3987 seq_putc(m, '\n');
3988
3989 return 0;
3990}
3991
88e9d34c 3992static const struct seq_operations show_traces_seq_ops = {
4bf39a94
IM
3993 .start = t_start,
3994 .next = t_next,
3995 .stop = t_stop,
3996 .show = t_show,
bc0c38d1
SR
3997};
3998
3999static int show_traces_open(struct inode *inode, struct file *file)
4000{
607e2ea1
SRRH
4001 struct trace_array *tr = inode->i_private;
4002 struct seq_file *m;
4003 int ret;
4004
60a11774
SR
4005 if (tracing_disabled)
4006 return -ENODEV;
4007
607e2ea1
SRRH
4008 ret = seq_open(file, &show_traces_seq_ops);
4009 if (ret)
4010 return ret;
4011
4012 m = file->private_data;
4013 m->private = tr;
4014
4015 return 0;
bc0c38d1
SR
4016}
4017
4acd4d00
SR
4018static ssize_t
4019tracing_write_stub(struct file *filp, const char __user *ubuf,
4020 size_t count, loff_t *ppos)
4021{
4022 return count;
4023}
4024
098c879e 4025loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
364829b1 4026{
098c879e
SRRH
4027 int ret;
4028
364829b1 4029 if (file->f_mode & FMODE_READ)
098c879e 4030 ret = seq_lseek(file, offset, whence);
364829b1 4031 else
098c879e
SRRH
4032 file->f_pos = ret = 0;
4033
4034 return ret;
364829b1
SP
4035}
4036
5e2336a0 4037static const struct file_operations tracing_fops = {
4bf39a94
IM
4038 .open = tracing_open,
4039 .read = seq_read,
4acd4d00 4040 .write = tracing_write_stub,
098c879e 4041 .llseek = tracing_lseek,
4bf39a94 4042 .release = tracing_release,
bc0c38d1
SR
4043};
4044
5e2336a0 4045static const struct file_operations show_traces_fops = {
c7078de1
IM
4046 .open = show_traces_open,
4047 .read = seq_read,
4048 .release = seq_release,
b444786f 4049 .llseek = seq_lseek,
c7078de1
IM
4050};
4051
36dfe925
IM
4052/*
4053 * The tracer itself will not take this lock, but still we want
4054 * to provide a consistent cpumask to user-space:
4055 */
4056static DEFINE_MUTEX(tracing_cpumask_update_lock);
4057
4058/*
4059 * Temporary storage for the character representation of the
4060 * CPU bitmask (and one more byte for the newline):
4061 */
4062static char mask_str[NR_CPUS + 1];
4063
c7078de1
IM
4064static ssize_t
4065tracing_cpumask_read(struct file *filp, char __user *ubuf,
4066 size_t count, loff_t *ppos)
4067{
ccfe9e42 4068 struct trace_array *tr = file_inode(filp)->i_private;
36dfe925 4069 int len;
c7078de1
IM
4070
4071 mutex_lock(&tracing_cpumask_update_lock);
36dfe925 4072
1a40243b
TH
4073 len = snprintf(mask_str, count, "%*pb\n",
4074 cpumask_pr_args(tr->tracing_cpumask));
4075 if (len >= count) {
36dfe925
IM
4076 count = -EINVAL;
4077 goto out_err;
4078 }
36dfe925
IM
4079 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
4080
4081out_err:
c7078de1
IM
4082 mutex_unlock(&tracing_cpumask_update_lock);
4083
4084 return count;
4085}
4086
4087static ssize_t
4088tracing_cpumask_write(struct file *filp, const char __user *ubuf,
4089 size_t count, loff_t *ppos)
4090{
ccfe9e42 4091 struct trace_array *tr = file_inode(filp)->i_private;
9e01c1b7 4092 cpumask_var_t tracing_cpumask_new;
2b6080f2 4093 int err, cpu;
9e01c1b7
RR
4094
4095 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
4096 return -ENOMEM;
c7078de1 4097
9e01c1b7 4098 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
c7078de1 4099 if (err)
36dfe925
IM
4100 goto err_unlock;
4101
215368e8
LZ
4102 mutex_lock(&tracing_cpumask_update_lock);
4103
a5e25883 4104 local_irq_disable();
0b9b12c1 4105 arch_spin_lock(&tr->max_lock);
ab46428c 4106 for_each_tracing_cpu(cpu) {
36dfe925
IM
4107 /*
4108 * Increase/decrease the disabled counter if we are
4109 * about to flip a bit in the cpumask:
4110 */
ccfe9e42 4111 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
9e01c1b7 4112 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
12883efb
SRRH
4113 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
4114 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
36dfe925 4115 }
ccfe9e42 4116 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
9e01c1b7 4117 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
12883efb
SRRH
4118 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
4119 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
36dfe925
IM
4120 }
4121 }
0b9b12c1 4122 arch_spin_unlock(&tr->max_lock);
a5e25883 4123 local_irq_enable();
36dfe925 4124
ccfe9e42 4125 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
36dfe925
IM
4126
4127 mutex_unlock(&tracing_cpumask_update_lock);
9e01c1b7 4128 free_cpumask_var(tracing_cpumask_new);
c7078de1
IM
4129
4130 return count;
36dfe925
IM
4131
4132err_unlock:
215368e8 4133 free_cpumask_var(tracing_cpumask_new);
36dfe925
IM
4134
4135 return err;
c7078de1
IM
4136}
4137
5e2336a0 4138static const struct file_operations tracing_cpumask_fops = {
ccfe9e42 4139 .open = tracing_open_generic_tr,
c7078de1
IM
4140 .read = tracing_cpumask_read,
4141 .write = tracing_cpumask_write,
ccfe9e42 4142 .release = tracing_release_generic_tr,
b444786f 4143 .llseek = generic_file_llseek,
bc0c38d1
SR
4144};
4145
fdb372ed 4146static int tracing_trace_options_show(struct seq_file *m, void *v)
bc0c38d1 4147{
d8e83d26 4148 struct tracer_opt *trace_opts;
2b6080f2 4149 struct trace_array *tr = m->private;
d8e83d26 4150 u32 tracer_flags;
d8e83d26 4151 int i;
adf9f195 4152
d8e83d26 4153 mutex_lock(&trace_types_lock);
2b6080f2
SR
4154 tracer_flags = tr->current_trace->flags->val;
4155 trace_opts = tr->current_trace->flags->opts;
d8e83d26 4156
bc0c38d1 4157 for (i = 0; trace_options[i]; i++) {
983f938a 4158 if (tr->trace_flags & (1 << i))
fdb372ed 4159 seq_printf(m, "%s\n", trace_options[i]);
bc0c38d1 4160 else
fdb372ed 4161 seq_printf(m, "no%s\n", trace_options[i]);
bc0c38d1
SR
4162 }
4163
adf9f195
FW
4164 for (i = 0; trace_opts[i].name; i++) {
4165 if (tracer_flags & trace_opts[i].bit)
fdb372ed 4166 seq_printf(m, "%s\n", trace_opts[i].name);
adf9f195 4167 else
fdb372ed 4168 seq_printf(m, "no%s\n", trace_opts[i].name);
adf9f195 4169 }
d8e83d26 4170 mutex_unlock(&trace_types_lock);
adf9f195 4171
fdb372ed 4172 return 0;
bc0c38d1 4173}
bc0c38d1 4174
8c1a49ae 4175static int __set_tracer_option(struct trace_array *tr,
8d18eaaf
LZ
4176 struct tracer_flags *tracer_flags,
4177 struct tracer_opt *opts, int neg)
4178{
d39cdd20 4179 struct tracer *trace = tracer_flags->trace;
8d18eaaf 4180 int ret;
bc0c38d1 4181
8c1a49ae 4182 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
8d18eaaf
LZ
4183 if (ret)
4184 return ret;
4185
4186 if (neg)
4187 tracer_flags->val &= ~opts->bit;
4188 else
4189 tracer_flags->val |= opts->bit;
4190 return 0;
bc0c38d1
SR
4191}
4192
adf9f195 4193/* Try to assign a tracer specific option */
8c1a49ae 4194static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
adf9f195 4195{
8c1a49ae 4196 struct tracer *trace = tr->current_trace;
7770841e 4197 struct tracer_flags *tracer_flags = trace->flags;
adf9f195 4198 struct tracer_opt *opts = NULL;
8d18eaaf 4199 int i;
adf9f195 4200
7770841e
Z
4201 for (i = 0; tracer_flags->opts[i].name; i++) {
4202 opts = &tracer_flags->opts[i];
adf9f195 4203
8d18eaaf 4204 if (strcmp(cmp, opts->name) == 0)
8c1a49ae 4205 return __set_tracer_option(tr, trace->flags, opts, neg);
adf9f195 4206 }
adf9f195 4207
8d18eaaf 4208 return -EINVAL;
adf9f195
FW
4209}
4210
613f04a0
SRRH
4211/* Some tracers require overwrite to stay enabled */
4212int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
4213{
4214 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
4215 return -1;
4216
4217 return 0;
4218}
4219
2b6080f2 4220int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
af4617bd
SR
4221{
4222 /* do nothing if flag is already set */
983f938a 4223 if (!!(tr->trace_flags & mask) == !!enabled)
613f04a0
SRRH
4224 return 0;
4225
4226 /* Give the tracer a chance to approve the change */
2b6080f2 4227 if (tr->current_trace->flag_changed)
bf6065b5 4228 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
613f04a0 4229 return -EINVAL;
af4617bd
SR
4230
4231 if (enabled)
983f938a 4232 tr->trace_flags |= mask;
af4617bd 4233 else
983f938a 4234 tr->trace_flags &= ~mask;
e870e9a1
LZ
4235
4236 if (mask == TRACE_ITER_RECORD_CMD)
4237 trace_event_enable_cmd_record(enabled);
750912fa 4238
c37775d5
SR
4239 if (mask == TRACE_ITER_EVENT_FORK)
4240 trace_event_follow_fork(tr, enabled);
4241
1e10486f
NK
4242 if (mask == TRACE_ITER_FUNC_FORK)
4243 ftrace_pid_follow_fork(tr, enabled);
4244
80902822 4245 if (mask == TRACE_ITER_OVERWRITE) {
12883efb 4246 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
80902822 4247#ifdef CONFIG_TRACER_MAX_TRACE
12883efb 4248 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
80902822
SRRH
4249#endif
4250 }
81698831 4251
b9f9108c 4252 if (mask == TRACE_ITER_PRINTK) {
81698831 4253 trace_printk_start_stop_comm(enabled);
b9f9108c
SRRH
4254 trace_printk_control(enabled);
4255 }
613f04a0
SRRH
4256
4257 return 0;
af4617bd
SR
4258}
4259
2b6080f2 4260static int trace_set_options(struct trace_array *tr, char *option)
bc0c38d1 4261{
8d18eaaf 4262 char *cmp;
bc0c38d1 4263 int neg = 0;
613f04a0 4264 int ret = -ENODEV;
bc0c38d1 4265 int i;
a4d1e688 4266 size_t orig_len = strlen(option);
bc0c38d1 4267
7bcfaf54 4268 cmp = strstrip(option);
bc0c38d1 4269
8d18eaaf 4270 if (strncmp(cmp, "no", 2) == 0) {
bc0c38d1
SR
4271 neg = 1;
4272 cmp += 2;
4273 }
4274
69d34da2
SRRH
4275 mutex_lock(&trace_types_lock);
4276
bc0c38d1 4277 for (i = 0; trace_options[i]; i++) {
8d18eaaf 4278 if (strcmp(cmp, trace_options[i]) == 0) {
2b6080f2 4279 ret = set_tracer_flag(tr, 1 << i, !neg);
bc0c38d1
SR
4280 break;
4281 }
4282 }
adf9f195
FW
4283
4284 /* If no option could be set, test the specific tracer options */
69d34da2 4285 if (!trace_options[i])
8c1a49ae 4286 ret = set_tracer_option(tr, cmp, neg);
69d34da2
SRRH
4287
4288 mutex_unlock(&trace_types_lock);
bc0c38d1 4289
a4d1e688
JW
4290 /*
4291 * If the first trailing whitespace is replaced with '\0' by strstrip,
4292 * turn it back into a space.
4293 */
4294 if (orig_len > strlen(option))
4295 option[strlen(option)] = ' ';
4296
7bcfaf54
SR
4297 return ret;
4298}
4299
a4d1e688
JW
4300static void __init apply_trace_boot_options(void)
4301{
4302 char *buf = trace_boot_options_buf;
4303 char *option;
4304
4305 while (true) {
4306 option = strsep(&buf, ",");
4307
4308 if (!option)
4309 break;
a4d1e688 4310
43ed3843
SRRH
4311 if (*option)
4312 trace_set_options(&global_trace, option);
a4d1e688
JW
4313
4314 /* Put back the comma to allow this to be called again */
4315 if (buf)
4316 *(buf - 1) = ',';
4317 }
4318}
4319
7bcfaf54
SR
4320static ssize_t
4321tracing_trace_options_write(struct file *filp, const char __user *ubuf,
4322 size_t cnt, loff_t *ppos)
4323{
2b6080f2
SR
4324 struct seq_file *m = filp->private_data;
4325 struct trace_array *tr = m->private;
7bcfaf54 4326 char buf[64];
613f04a0 4327 int ret;
7bcfaf54
SR
4328
4329 if (cnt >= sizeof(buf))
4330 return -EINVAL;
4331
4afe6495 4332 if (copy_from_user(buf, ubuf, cnt))
7bcfaf54
SR
4333 return -EFAULT;
4334
a8dd2176
SR
4335 buf[cnt] = 0;
4336
2b6080f2 4337 ret = trace_set_options(tr, buf);
613f04a0
SRRH
4338 if (ret < 0)
4339 return ret;
7bcfaf54 4340
cf8517cf 4341 *ppos += cnt;
bc0c38d1
SR
4342
4343 return cnt;
4344}
4345
fdb372ed
LZ
4346static int tracing_trace_options_open(struct inode *inode, struct file *file)
4347{
7b85af63 4348 struct trace_array *tr = inode->i_private;
f77d09a3 4349 int ret;
7b85af63 4350
fdb372ed
LZ
4351 if (tracing_disabled)
4352 return -ENODEV;
2b6080f2 4353
7b85af63
SRRH
4354 if (trace_array_get(tr) < 0)
4355 return -ENODEV;
4356
f77d09a3
AL
4357 ret = single_open(file, tracing_trace_options_show, inode->i_private);
4358 if (ret < 0)
4359 trace_array_put(tr);
4360
4361 return ret;
fdb372ed
LZ
4362}
4363
5e2336a0 4364static const struct file_operations tracing_iter_fops = {
fdb372ed
LZ
4365 .open = tracing_trace_options_open,
4366 .read = seq_read,
4367 .llseek = seq_lseek,
7b85af63 4368 .release = tracing_single_release_tr,
ee6bce52 4369 .write = tracing_trace_options_write,
bc0c38d1
SR
4370};
4371
7bd2f24c
IM
4372static const char readme_msg[] =
4373 "tracing mini-HOWTO:\n\n"
22f45649
SRRH
4374 "# echo 0 > tracing_on : quick way to disable tracing\n"
4375 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
4376 " Important files:\n"
4377 " trace\t\t\t- The static contents of the buffer\n"
4378 "\t\t\t To clear the buffer write into this file: echo > trace\n"
4379 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
4380 " current_tracer\t- function and latency tracers\n"
4381 " available_tracers\t- list of configured tracers for current_tracer\n"
4382 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
4383 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
4384 " trace_clock\t\t-change the clock used to order events\n"
4385 " local: Per cpu clock but may not be synced across CPUs\n"
4386 " global: Synced across CPUs but slows tracing down.\n"
4387 " counter: Not a clock, but just an increment\n"
4388 " uptime: Jiffy counter from time of boot\n"
4389 " perf: Same clock that perf events use\n"
4390#ifdef CONFIG_X86_64
4391 " x86-tsc: TSC cycle counter\n"
4392#endif
4393 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
fa32e855 4394 "\n trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n"
22f45649
SRRH
4395 " tracing_cpumask\t- Limit which CPUs to trace\n"
4396 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
4397 "\t\t\t Remove sub-buffer with rmdir\n"
4398 " trace_options\t\t- Set format or modify how tracing happens\n"
71485c45
SRRH
4399 "\t\t\t Disable an option by adding a suffix 'no' to the\n"
4400 "\t\t\t option name\n"
939c7a4f 4401 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
22f45649
SRRH
4402#ifdef CONFIG_DYNAMIC_FTRACE
4403 "\n available_filter_functions - list of functions that can be filtered on\n"
71485c45
SRRH
4404 " set_ftrace_filter\t- echo function name in here to only trace these\n"
4405 "\t\t\t functions\n"
60f1d5e3 4406 "\t accepts: func_full_name or glob-matching-pattern\n"
71485c45
SRRH
4407 "\t modules: Can select a group via module\n"
4408 "\t Format: :mod:<module-name>\n"
4409 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
4410 "\t triggers: a command to perform when function is hit\n"
4411 "\t Format: <function>:<trigger>[:count]\n"
4412 "\t trigger: traceon, traceoff\n"
4413 "\t\t enable_event:<system>:<event>\n"
4414 "\t\t disable_event:<system>:<event>\n"
22f45649 4415#ifdef CONFIG_STACKTRACE
71485c45 4416 "\t\t stacktrace\n"
22f45649
SRRH
4417#endif
4418#ifdef CONFIG_TRACER_SNAPSHOT
71485c45 4419 "\t\t snapshot\n"
22f45649 4420#endif
17a280ea
SRRH
4421 "\t\t dump\n"
4422 "\t\t cpudump\n"
71485c45
SRRH
4423 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
4424 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
4425 "\t The first one will disable tracing every time do_fault is hit\n"
4426 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
4427 "\t The first time do trap is hit and it disables tracing, the\n"
4428 "\t counter will decrement to 2. If tracing is already disabled,\n"
4429 "\t the counter will not decrement. It only decrements when the\n"
4430 "\t trigger did work\n"
4431 "\t To remove trigger without count:\n"
4432 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
4433 "\t To remove trigger with a count:\n"
4434 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
22f45649 4435 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
71485c45
SRRH
4436 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
4437 "\t modules: Can select a group via module command :mod:\n"
4438 "\t Does not accept triggers\n"
22f45649
SRRH
4439#endif /* CONFIG_DYNAMIC_FTRACE */
4440#ifdef CONFIG_FUNCTION_TRACER
71485c45
SRRH
4441 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
4442 "\t\t (function)\n"
22f45649
SRRH
4443#endif
4444#ifdef CONFIG_FUNCTION_GRAPH_TRACER
4445 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
d048a8c7 4446 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
22f45649
SRRH
4447 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
4448#endif
4449#ifdef CONFIG_TRACER_SNAPSHOT
71485c45
SRRH
4450 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
4451 "\t\t\t snapshot buffer. Read the contents for more\n"
4452 "\t\t\t information\n"
22f45649 4453#endif
991821c8 4454#ifdef CONFIG_STACK_TRACER
22f45649
SRRH
4455 " stack_trace\t\t- Shows the max stack trace when active\n"
4456 " stack_max_size\t- Shows current max stack size that was traced\n"
71485c45
SRRH
4457 "\t\t\t Write into this file to reset the max size (trigger a\n"
4458 "\t\t\t new trace)\n"
22f45649 4459#ifdef CONFIG_DYNAMIC_FTRACE
71485c45
SRRH
4460 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
4461 "\t\t\t traces\n"
22f45649 4462#endif
991821c8 4463#endif /* CONFIG_STACK_TRACER */
6b0b7551 4464#ifdef CONFIG_KPROBE_EVENTS
86425625
MH
4465 " kprobe_events\t\t- Add/remove/show the kernel dynamic events\n"
4466 "\t\t\t Write into this file to define/undefine new trace events.\n"
4467#endif
6b0b7551 4468#ifdef CONFIG_UPROBE_EVENTS
86425625
MH
4469 " uprobe_events\t\t- Add/remove/show the userspace dynamic events\n"
4470 "\t\t\t Write into this file to define/undefine new trace events.\n"
4471#endif
6b0b7551 4472#if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
86425625
MH
4473 "\t accepts: event-definitions (one definition per line)\n"
4474 "\t Format: p|r[:[<group>/]<event>] <place> [<args>]\n"
4475 "\t -:[<group>/]<event>\n"
6b0b7551 4476#ifdef CONFIG_KPROBE_EVENTS
86425625 4477 "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
35b6f55a 4478 "place (kretprobe): [<module>:]<symbol>[+<offset>]|<memaddr>\n"
86425625 4479#endif
6b0b7551 4480#ifdef CONFIG_UPROBE_EVENTS
86425625
MH
4481 "\t place: <path>:<offset>\n"
4482#endif
4483 "\t args: <name>=fetcharg[:type]\n"
4484 "\t fetcharg: %<register>, @<address>, @<symbol>[+|-<offset>],\n"
4485 "\t $stack<index>, $stack, $retval, $comm\n"
4486 "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string,\n"
4487 "\t b<bit-width>@<bit-offset>/<container-size>\n"
4488#endif
26f25564
TZ
4489 " events/\t\t- Directory containing all trace event subsystems:\n"
4490 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
4491 " events/<system>/\t- Directory containing all trace events for <system>:\n"
71485c45
SRRH
4492 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
4493 "\t\t\t events\n"
26f25564 4494 " filter\t\t- If set, only events passing filter are traced\n"
71485c45
SRRH
4495 " events/<system>/<event>/\t- Directory containing control files for\n"
4496 "\t\t\t <event>:\n"
26f25564
TZ
4497 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
4498 " filter\t\t- If set, only events passing filter are traced\n"
4499 " trigger\t\t- If set, a command to perform when event is hit\n"
71485c45
SRRH
4500 "\t Format: <trigger>[:count][if <filter>]\n"
4501 "\t trigger: traceon, traceoff\n"
4502 "\t enable_event:<system>:<event>\n"
4503 "\t disable_event:<system>:<event>\n"
d0bad49b
TZ
4504#ifdef CONFIG_HIST_TRIGGERS
4505 "\t enable_hist:<system>:<event>\n"
4506 "\t disable_hist:<system>:<event>\n"
4507#endif
26f25564 4508#ifdef CONFIG_STACKTRACE
71485c45 4509 "\t\t stacktrace\n"
26f25564
TZ
4510#endif
4511#ifdef CONFIG_TRACER_SNAPSHOT
71485c45 4512 "\t\t snapshot\n"
7ef224d1
TZ
4513#endif
4514#ifdef CONFIG_HIST_TRIGGERS
4515 "\t\t hist (see below)\n"
26f25564 4516#endif
71485c45
SRRH
4517 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
4518 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
4519 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
4520 "\t events/block/block_unplug/trigger\n"
4521 "\t The first disables tracing every time block_unplug is hit.\n"
4522 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
4523 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
4524 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
4525 "\t Like function triggers, the counter is only decremented if it\n"
4526 "\t enabled or disabled tracing.\n"
4527 "\t To remove a trigger without a count:\n"
4528 "\t echo '!<trigger> > <system>/<event>/trigger\n"
4529 "\t To remove a trigger with a count:\n"
4530 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
4531 "\t Filters can be ignored when removing a trigger.\n"
7ef224d1
TZ
4532#ifdef CONFIG_HIST_TRIGGERS
4533 " hist trigger\t- If set, event hits are aggregated into a hash table\n"
76a3b0c8 4534 "\t Format: hist:keys=<field1[,field2,...]>\n"
f2606835 4535 "\t [:values=<field1[,field2,...]>]\n"
e62347d2 4536 "\t [:sort=<field1[,field2,...]>]\n"
7ef224d1 4537 "\t [:size=#entries]\n"
e86ae9ba 4538 "\t [:pause][:continue][:clear]\n"
5463bfda 4539 "\t [:name=histname1]\n"
7ef224d1
TZ
4540 "\t [if <filter>]\n\n"
4541 "\t When a matching event is hit, an entry is added to a hash\n"
f2606835
TZ
4542 "\t table using the key(s) and value(s) named, and the value of a\n"
4543 "\t sum called 'hitcount' is incremented. Keys and values\n"
4544 "\t correspond to fields in the event's format description. Keys\n"
69a0200c
TZ
4545 "\t can be any field, or the special string 'stacktrace'.\n"
4546 "\t Compound keys consisting of up to two fields can be specified\n"
4547 "\t by the 'keys' keyword. Values must correspond to numeric\n"
4548 "\t fields. Sort keys consisting of up to two fields can be\n"
4549 "\t specified using the 'sort' keyword. The sort direction can\n"
4550 "\t be modified by appending '.descending' or '.ascending' to a\n"
4551 "\t sort field. The 'size' parameter can be used to specify more\n"
5463bfda
TZ
4552 "\t or fewer than the default 2048 entries for the hashtable size.\n"
4553 "\t If a hist trigger is given a name using the 'name' parameter,\n"
4554 "\t its histogram data will be shared with other triggers of the\n"
4555 "\t same name, and trigger hits will update this common data.\n\n"
7ef224d1 4556 "\t Reading the 'hist' file for the event will dump the hash\n"
52a7f16d
TZ
4557 "\t table in its entirety to stdout. If there are multiple hist\n"
4558 "\t triggers attached to an event, there will be a table for each\n"
5463bfda
TZ
4559 "\t trigger in the output. The table displayed for a named\n"
4560 "\t trigger will be the same as any other instance having the\n"
4561 "\t same name. The default format used to display a given field\n"
4562 "\t can be modified by appending any of the following modifiers\n"
4563 "\t to the field name, as applicable:\n\n"
c6afad49
TZ
4564 "\t .hex display a number as a hex value\n"
4565 "\t .sym display an address as a symbol\n"
6b4827ad 4566 "\t .sym-offset display an address as a symbol and offset\n"
31696198
TZ
4567 "\t .execname display a common_pid as a program name\n"
4568 "\t .syscall display a syscall id as a syscall name\n\n"
4b94f5b7 4569 "\t .log2 display log2 value rather than raw number\n\n"
83e99914
TZ
4570 "\t The 'pause' parameter can be used to pause an existing hist\n"
4571 "\t trigger or to start a hist trigger but not log any events\n"
4572 "\t until told to do so. 'continue' can be used to start or\n"
4573 "\t restart a paused hist trigger.\n\n"
e86ae9ba
TZ
4574 "\t The 'clear' parameter will clear the contents of a running\n"
4575 "\t hist trigger and leave its current paused/active state\n"
4576 "\t unchanged.\n\n"
d0bad49b
TZ
4577 "\t The enable_hist and disable_hist triggers can be used to\n"
4578 "\t have one event conditionally start and stop another event's\n"
4579 "\t already-attached hist trigger. The syntax is analagous to\n"
4580 "\t the enable_event and disable_event triggers.\n"
7ef224d1 4581#endif
7bd2f24c
IM
4582;
4583
4584static ssize_t
4585tracing_readme_read(struct file *filp, char __user *ubuf,
4586 size_t cnt, loff_t *ppos)
4587{
4588 return simple_read_from_buffer(ubuf, cnt, ppos,
4589 readme_msg, strlen(readme_msg));
4590}
4591
5e2336a0 4592static const struct file_operations tracing_readme_fops = {
c7078de1
IM
4593 .open = tracing_open_generic,
4594 .read = tracing_readme_read,
b444786f 4595 .llseek = generic_file_llseek,
7bd2f24c
IM
4596};
4597
42584c81
YY
4598static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
4599{
4600 unsigned int *ptr = v;
69abe6a5 4601
42584c81
YY
4602 if (*pos || m->count)
4603 ptr++;
69abe6a5 4604
42584c81 4605 (*pos)++;
69abe6a5 4606
939c7a4f
YY
4607 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
4608 ptr++) {
42584c81
YY
4609 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
4610 continue;
69abe6a5 4611
42584c81
YY
4612 return ptr;
4613 }
69abe6a5 4614
42584c81
YY
4615 return NULL;
4616}
4617
4618static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
4619{
4620 void *v;
4621 loff_t l = 0;
69abe6a5 4622
4c27e756
SRRH
4623 preempt_disable();
4624 arch_spin_lock(&trace_cmdline_lock);
4625
939c7a4f 4626 v = &savedcmd->map_cmdline_to_pid[0];
42584c81
YY
4627 while (l <= *pos) {
4628 v = saved_cmdlines_next(m, v, &l);
4629 if (!v)
4630 return NULL;
69abe6a5
AP
4631 }
4632
42584c81
YY
4633 return v;
4634}
4635
4636static void saved_cmdlines_stop(struct seq_file *m, void *v)
4637{
4c27e756
SRRH
4638 arch_spin_unlock(&trace_cmdline_lock);
4639 preempt_enable();
42584c81 4640}
69abe6a5 4641
42584c81
YY
4642static int saved_cmdlines_show(struct seq_file *m, void *v)
4643{
4644 char buf[TASK_COMM_LEN];
4645 unsigned int *pid = v;
69abe6a5 4646
4c27e756 4647 __trace_find_cmdline(*pid, buf);
42584c81
YY
4648 seq_printf(m, "%d %s\n", *pid, buf);
4649 return 0;
4650}
4651
4652static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
4653 .start = saved_cmdlines_start,
4654 .next = saved_cmdlines_next,
4655 .stop = saved_cmdlines_stop,
4656 .show = saved_cmdlines_show,
4657};
4658
4659static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
4660{
4661 if (tracing_disabled)
4662 return -ENODEV;
4663
4664 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
69abe6a5
AP
4665}
4666
4667static const struct file_operations tracing_saved_cmdlines_fops = {
42584c81
YY
4668 .open = tracing_saved_cmdlines_open,
4669 .read = seq_read,
4670 .llseek = seq_lseek,
4671 .release = seq_release,
69abe6a5
AP
4672};
4673
939c7a4f
YY
4674static ssize_t
4675tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
4676 size_t cnt, loff_t *ppos)
4677{
4678 char buf[64];
4679 int r;
4680
4681 arch_spin_lock(&trace_cmdline_lock);
a6af8fbf 4682 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
939c7a4f
YY
4683 arch_spin_unlock(&trace_cmdline_lock);
4684
4685 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4686}
4687
4688static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
4689{
4690 kfree(s->saved_cmdlines);
4691 kfree(s->map_cmdline_to_pid);
4692 kfree(s);
4693}
4694
4695static int tracing_resize_saved_cmdlines(unsigned int val)
4696{
4697 struct saved_cmdlines_buffer *s, *savedcmd_temp;
4698
a6af8fbf 4699 s = kmalloc(sizeof(*s), GFP_KERNEL);
939c7a4f
YY
4700 if (!s)
4701 return -ENOMEM;
4702
4703 if (allocate_cmdlines_buffer(val, s) < 0) {
4704 kfree(s);
4705 return -ENOMEM;
4706 }
4707
4708 arch_spin_lock(&trace_cmdline_lock);
4709 savedcmd_temp = savedcmd;
4710 savedcmd = s;
4711 arch_spin_unlock(&trace_cmdline_lock);
4712 free_saved_cmdlines_buffer(savedcmd_temp);
4713
4714 return 0;
4715}
4716
4717static ssize_t
4718tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
4719 size_t cnt, loff_t *ppos)
4720{
4721 unsigned long val;
4722 int ret;
4723
4724 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4725 if (ret)
4726 return ret;
4727
4728 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
4729 if (!val || val > PID_MAX_DEFAULT)
4730 return -EINVAL;
4731
4732 ret = tracing_resize_saved_cmdlines((unsigned int)val);
4733 if (ret < 0)
4734 return ret;
4735
4736 *ppos += cnt;
4737
4738 return cnt;
4739}
4740
4741static const struct file_operations tracing_saved_cmdlines_size_fops = {
4742 .open = tracing_open_generic,
4743 .read = tracing_saved_cmdlines_size_read,
4744 .write = tracing_saved_cmdlines_size_write,
4745};
4746
681bec03 4747#ifdef CONFIG_TRACE_EVAL_MAP_FILE
23bf8cb8 4748static union trace_eval_map_item *
f57a4143 4749update_eval_map(union trace_eval_map_item *ptr)
9828413d 4750{
00f4b652 4751 if (!ptr->map.eval_string) {
9828413d
SRRH
4752 if (ptr->tail.next) {
4753 ptr = ptr->tail.next;
4754 /* Set ptr to the next real item (skip head) */
4755 ptr++;
4756 } else
4757 return NULL;
4758 }
4759 return ptr;
4760}
4761
f57a4143 4762static void *eval_map_next(struct seq_file *m, void *v, loff_t *pos)
9828413d 4763{
23bf8cb8 4764 union trace_eval_map_item *ptr = v;
9828413d
SRRH
4765
4766 /*
4767 * Paranoid! If ptr points to end, we don't want to increment past it.
4768 * This really should never happen.
4769 */
f57a4143 4770 ptr = update_eval_map(ptr);
9828413d
SRRH
4771 if (WARN_ON_ONCE(!ptr))
4772 return NULL;
4773
4774 ptr++;
4775
4776 (*pos)++;
4777
f57a4143 4778 ptr = update_eval_map(ptr);
9828413d
SRRH
4779
4780 return ptr;
4781}
4782
f57a4143 4783static void *eval_map_start(struct seq_file *m, loff_t *pos)
9828413d 4784{
23bf8cb8 4785 union trace_eval_map_item *v;
9828413d
SRRH
4786 loff_t l = 0;
4787
1793ed93 4788 mutex_lock(&trace_eval_mutex);
9828413d 4789
23bf8cb8 4790 v = trace_eval_maps;
9828413d
SRRH
4791 if (v)
4792 v++;
4793
4794 while (v && l < *pos) {
f57a4143 4795 v = eval_map_next(m, v, &l);
9828413d
SRRH
4796 }
4797
4798 return v;
4799}
4800
f57a4143 4801static void eval_map_stop(struct seq_file *m, void *v)
9828413d 4802{
1793ed93 4803 mutex_unlock(&trace_eval_mutex);
9828413d
SRRH
4804}
4805
f57a4143 4806static int eval_map_show(struct seq_file *m, void *v)
9828413d 4807{
23bf8cb8 4808 union trace_eval_map_item *ptr = v;
9828413d
SRRH
4809
4810 seq_printf(m, "%s %ld (%s)\n",
00f4b652 4811 ptr->map.eval_string, ptr->map.eval_value,
9828413d
SRRH
4812 ptr->map.system);
4813
4814 return 0;
4815}
4816
f57a4143
JL
4817static const struct seq_operations tracing_eval_map_seq_ops = {
4818 .start = eval_map_start,
4819 .next = eval_map_next,
4820 .stop = eval_map_stop,
4821 .show = eval_map_show,
9828413d
SRRH
4822};
4823
f57a4143 4824static int tracing_eval_map_open(struct inode *inode, struct file *filp)
9828413d
SRRH
4825{
4826 if (tracing_disabled)
4827 return -ENODEV;
4828
f57a4143 4829 return seq_open(filp, &tracing_eval_map_seq_ops);
9828413d
SRRH
4830}
4831
f57a4143
JL
4832static const struct file_operations tracing_eval_map_fops = {
4833 .open = tracing_eval_map_open,
9828413d
SRRH
4834 .read = seq_read,
4835 .llseek = seq_lseek,
4836 .release = seq_release,
4837};
4838
23bf8cb8 4839static inline union trace_eval_map_item *
5f60b351 4840trace_eval_jmp_to_tail(union trace_eval_map_item *ptr)
9828413d
SRRH
4841{
4842 /* Return tail of array given the head */
4843 return ptr + ptr->head.length + 1;
4844}
4845
4846static void
f57a4143 4847trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start,
9828413d
SRRH
4848 int len)
4849{
00f4b652
JL
4850 struct trace_eval_map **stop;
4851 struct trace_eval_map **map;
23bf8cb8
JL
4852 union trace_eval_map_item *map_array;
4853 union trace_eval_map_item *ptr;
9828413d
SRRH
4854
4855 stop = start + len;
4856
4857 /*
23bf8cb8 4858 * The trace_eval_maps contains the map plus a head and tail item,
9828413d
SRRH
4859 * where the head holds the module and length of array, and the
4860 * tail holds a pointer to the next list.
4861 */
4862 map_array = kmalloc(sizeof(*map_array) * (len + 2), GFP_KERNEL);
4863 if (!map_array) {
f57a4143 4864 pr_warn("Unable to allocate trace eval mapping\n");
9828413d
SRRH
4865 return;
4866 }
4867
1793ed93 4868 mutex_lock(&trace_eval_mutex);
9828413d 4869
23bf8cb8
JL
4870 if (!trace_eval_maps)
4871 trace_eval_maps = map_array;
9828413d 4872 else {
23bf8cb8 4873 ptr = trace_eval_maps;
9828413d 4874 for (;;) {
5f60b351 4875 ptr = trace_eval_jmp_to_tail(ptr);
9828413d
SRRH
4876 if (!ptr->tail.next)
4877 break;
4878 ptr = ptr->tail.next;
4879
4880 }
4881 ptr->tail.next = map_array;
4882 }
4883 map_array->head.mod = mod;
4884 map_array->head.length = len;
4885 map_array++;
4886
4887 for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
4888 map_array->map = **map;
4889 map_array++;
4890 }
4891 memset(map_array, 0, sizeof(*map_array));
4892
1793ed93 4893 mutex_unlock(&trace_eval_mutex);
9828413d
SRRH
4894}
4895
f57a4143 4896static void trace_create_eval_file(struct dentry *d_tracer)
9828413d 4897{
681bec03 4898 trace_create_file("eval_map", 0444, d_tracer,
f57a4143 4899 NULL, &tracing_eval_map_fops);
9828413d
SRRH
4900}
4901
681bec03 4902#else /* CONFIG_TRACE_EVAL_MAP_FILE */
f57a4143
JL
4903static inline void trace_create_eval_file(struct dentry *d_tracer) { }
4904static inline void trace_insert_eval_map_file(struct module *mod,
00f4b652 4905 struct trace_eval_map **start, int len) { }
681bec03 4906#endif /* !CONFIG_TRACE_EVAL_MAP_FILE */
9828413d 4907
f57a4143 4908static void trace_insert_eval_map(struct module *mod,
00f4b652 4909 struct trace_eval_map **start, int len)
0c564a53 4910{
00f4b652 4911 struct trace_eval_map **map;
0c564a53
SRRH
4912
4913 if (len <= 0)
4914 return;
4915
4916 map = start;
4917
f57a4143 4918 trace_event_eval_update(map, len);
9828413d 4919
f57a4143 4920 trace_insert_eval_map_file(mod, start, len);
0c564a53
SRRH
4921}
4922
bc0c38d1
SR
4923static ssize_t
4924tracing_set_trace_read(struct file *filp, char __user *ubuf,
4925 size_t cnt, loff_t *ppos)
4926{
2b6080f2 4927 struct trace_array *tr = filp->private_data;
ee6c2c1b 4928 char buf[MAX_TRACER_SIZE+2];
bc0c38d1
SR
4929 int r;
4930
4931 mutex_lock(&trace_types_lock);
2b6080f2 4932 r = sprintf(buf, "%s\n", tr->current_trace->name);
bc0c38d1
SR
4933 mutex_unlock(&trace_types_lock);
4934
4bf39a94 4935 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
bc0c38d1
SR
4936}
4937
b6f11df2
ACM
4938int tracer_init(struct tracer *t, struct trace_array *tr)
4939{
12883efb 4940 tracing_reset_online_cpus(&tr->trace_buffer);
b6f11df2
ACM
4941 return t->init(tr);
4942}
4943
12883efb 4944static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
438ced17
VN
4945{
4946 int cpu;
737223fb 4947
438ced17 4948 for_each_tracing_cpu(cpu)
12883efb 4949 per_cpu_ptr(buf->data, cpu)->entries = val;
438ced17
VN
4950}
4951
12883efb 4952#ifdef CONFIG_TRACER_MAX_TRACE
d60da506 4953/* resize @tr's buffer to the size of @size_tr's entries */
12883efb
SRRH
4954static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
4955 struct trace_buffer *size_buf, int cpu_id)
d60da506
HT
4956{
4957 int cpu, ret = 0;
4958
4959 if (cpu_id == RING_BUFFER_ALL_CPUS) {
4960 for_each_tracing_cpu(cpu) {
12883efb
SRRH
4961 ret = ring_buffer_resize(trace_buf->buffer,
4962 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
d60da506
HT
4963 if (ret < 0)
4964 break;
12883efb
SRRH
4965 per_cpu_ptr(trace_buf->data, cpu)->entries =
4966 per_cpu_ptr(size_buf->data, cpu)->entries;
d60da506
HT
4967 }
4968 } else {
12883efb
SRRH
4969 ret = ring_buffer_resize(trace_buf->buffer,
4970 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
d60da506 4971 if (ret == 0)
12883efb
SRRH
4972 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
4973 per_cpu_ptr(size_buf->data, cpu_id)->entries;
d60da506
HT
4974 }
4975
4976 return ret;
4977}
12883efb 4978#endif /* CONFIG_TRACER_MAX_TRACE */
d60da506 4979
2b6080f2
SR
4980static int __tracing_resize_ring_buffer(struct trace_array *tr,
4981 unsigned long size, int cpu)
73c5162a
SR
4982{
4983 int ret;
4984
4985 /*
4986 * If kernel or user changes the size of the ring buffer
a123c52b
SR
4987 * we use the size that was given, and we can forget about
4988 * expanding it later.
73c5162a 4989 */
55034cd6 4990 ring_buffer_expanded = true;
73c5162a 4991
b382ede6 4992 /* May be called before buffers are initialized */
12883efb 4993 if (!tr->trace_buffer.buffer)
b382ede6
SR
4994 return 0;
4995
12883efb 4996 ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
73c5162a
SR
4997 if (ret < 0)
4998 return ret;
4999
12883efb 5000#ifdef CONFIG_TRACER_MAX_TRACE
2b6080f2
SR
5001 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
5002 !tr->current_trace->use_max_tr)
ef710e10
KM
5003 goto out;
5004
12883efb 5005 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
73c5162a 5006 if (ret < 0) {
12883efb
SRRH
5007 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
5008 &tr->trace_buffer, cpu);
73c5162a 5009 if (r < 0) {
a123c52b
SR
5010 /*
5011 * AARGH! We are left with different
5012 * size max buffer!!!!
5013 * The max buffer is our "snapshot" buffer.
5014 * When a tracer needs a snapshot (one of the
5015 * latency tracers), it swaps the max buffer
5016 * with the saved snap shot. We succeeded to
5017 * update the size of the main buffer, but failed to
5018 * update the size of the max buffer. But when we tried
5019 * to reset the main buffer to the original size, we
5020 * failed there too. This is very unlikely to
5021 * happen, but if it does, warn and kill all
5022 * tracing.
5023 */
73c5162a
SR
5024 WARN_ON(1);
5025 tracing_disabled = 1;
5026 }
5027 return ret;
5028 }
5029
438ced17 5030 if (cpu == RING_BUFFER_ALL_CPUS)
12883efb 5031 set_buffer_entries(&tr->max_buffer, size);
438ced17 5032 else
12883efb 5033 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
438ced17 5034
ef710e10 5035 out:
12883efb
SRRH
5036#endif /* CONFIG_TRACER_MAX_TRACE */
5037
438ced17 5038 if (cpu == RING_BUFFER_ALL_CPUS)
12883efb 5039 set_buffer_entries(&tr->trace_buffer, size);
438ced17 5040 else
12883efb 5041 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
73c5162a
SR
5042
5043 return ret;
5044}
5045
2b6080f2
SR
5046static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
5047 unsigned long size, int cpu_id)
4f271a2a 5048{
83f40318 5049 int ret = size;
4f271a2a
VN
5050
5051 mutex_lock(&trace_types_lock);
5052
438ced17
VN
5053 if (cpu_id != RING_BUFFER_ALL_CPUS) {
5054 /* make sure, this cpu is enabled in the mask */
5055 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
5056 ret = -EINVAL;
5057 goto out;
5058 }
5059 }
4f271a2a 5060
2b6080f2 5061 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
4f271a2a
VN
5062 if (ret < 0)
5063 ret = -ENOMEM;
5064
438ced17 5065out:
4f271a2a
VN
5066 mutex_unlock(&trace_types_lock);
5067
5068 return ret;
5069}
5070
ef710e10 5071
1852fcce
SR
5072/**
5073 * tracing_update_buffers - used by tracing facility to expand ring buffers
5074 *
5075 * To save on memory when the tracing is never used on a system with it
5076 * configured in. The ring buffers are set to a minimum size. But once
5077 * a user starts to use the tracing facility, then they need to grow
5078 * to their default size.
5079 *
5080 * This function is to be called when a tracer is about to be used.
5081 */
5082int tracing_update_buffers(void)
5083{
5084 int ret = 0;
5085
1027fcb2 5086 mutex_lock(&trace_types_lock);
1852fcce 5087 if (!ring_buffer_expanded)
2b6080f2 5088 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
438ced17 5089 RING_BUFFER_ALL_CPUS);
1027fcb2 5090 mutex_unlock(&trace_types_lock);
1852fcce
SR
5091
5092 return ret;
5093}
5094
577b785f
SR
5095struct trace_option_dentry;
5096
37aea98b 5097static void
2b6080f2 5098create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
577b785f 5099
6b450d25
SRRH
5100/*
5101 * Used to clear out the tracer before deletion of an instance.
5102 * Must have trace_types_lock held.
5103 */
5104static void tracing_set_nop(struct trace_array *tr)
5105{
5106 if (tr->current_trace == &nop_trace)
5107 return;
5108
50512ab5 5109 tr->current_trace->enabled--;
6b450d25
SRRH
5110
5111 if (tr->current_trace->reset)
5112 tr->current_trace->reset(tr);
5113
5114 tr->current_trace = &nop_trace;
5115}
5116
41d9c0be 5117static void add_tracer_options(struct trace_array *tr, struct tracer *t)
bc0c38d1 5118{
09d23a1d
SRRH
5119 /* Only enable if the directory has been created already. */
5120 if (!tr->dir)
5121 return;
5122
37aea98b 5123 create_trace_option_files(tr, t);
09d23a1d
SRRH
5124}
5125
5126static int tracing_set_tracer(struct trace_array *tr, const char *buf)
5127{
bc0c38d1 5128 struct tracer *t;
12883efb 5129#ifdef CONFIG_TRACER_MAX_TRACE
34600f0e 5130 bool had_max_tr;
12883efb 5131#endif
d9e54076 5132 int ret = 0;
bc0c38d1 5133
1027fcb2
SR
5134 mutex_lock(&trace_types_lock);
5135
73c5162a 5136 if (!ring_buffer_expanded) {
2b6080f2 5137 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
438ced17 5138 RING_BUFFER_ALL_CPUS);
73c5162a 5139 if (ret < 0)
59f586db 5140 goto out;
73c5162a
SR
5141 ret = 0;
5142 }
5143
bc0c38d1
SR
5144 for (t = trace_types; t; t = t->next) {
5145 if (strcmp(t->name, buf) == 0)
5146 break;
5147 }
c2931e05
FW
5148 if (!t) {
5149 ret = -EINVAL;
5150 goto out;
5151 }
2b6080f2 5152 if (t == tr->current_trace)
bc0c38d1
SR
5153 goto out;
5154
607e2ea1
SRRH
5155 /* Some tracers are only allowed for the top level buffer */
5156 if (!trace_ok_for_array(t, tr)) {
5157 ret = -EINVAL;
5158 goto out;
5159 }
5160
cf6ab6d9
SRRH
5161 /* If trace pipe files are being read, we can't change the tracer */
5162 if (tr->current_trace->ref) {
5163 ret = -EBUSY;
5164 goto out;
5165 }
5166
9f029e83 5167 trace_branch_disable();
613f04a0 5168
50512ab5 5169 tr->current_trace->enabled--;
613f04a0 5170
2b6080f2
SR
5171 if (tr->current_trace->reset)
5172 tr->current_trace->reset(tr);
34600f0e 5173
12883efb 5174 /* Current trace needs to be nop_trace before synchronize_sched */
2b6080f2 5175 tr->current_trace = &nop_trace;
34600f0e 5176
45ad21ca
SRRH
5177#ifdef CONFIG_TRACER_MAX_TRACE
5178 had_max_tr = tr->allocated_snapshot;
34600f0e
SR
5179
5180 if (had_max_tr && !t->use_max_tr) {
5181 /*
5182 * We need to make sure that the update_max_tr sees that
5183 * current_trace changed to nop_trace to keep it from
5184 * swapping the buffers after we resize it.
5185 * The update_max_tr is called from interrupts disabled
5186 * so a synchronized_sched() is sufficient.
5187 */
5188 synchronize_sched();
3209cff4 5189 free_snapshot(tr);
ef710e10 5190 }
12883efb 5191#endif
12883efb
SRRH
5192
5193#ifdef CONFIG_TRACER_MAX_TRACE
34600f0e 5194 if (t->use_max_tr && !had_max_tr) {
3209cff4 5195 ret = alloc_snapshot(tr);
d60da506
HT
5196 if (ret < 0)
5197 goto out;
ef710e10 5198 }
12883efb 5199#endif
577b785f 5200
1c80025a 5201 if (t->init) {
b6f11df2 5202 ret = tracer_init(t, tr);
1c80025a
FW
5203 if (ret)
5204 goto out;
5205 }
bc0c38d1 5206
2b6080f2 5207 tr->current_trace = t;
50512ab5 5208 tr->current_trace->enabled++;
9f029e83 5209 trace_branch_enable(tr);
bc0c38d1
SR
5210 out:
5211 mutex_unlock(&trace_types_lock);
5212
d9e54076
PZ
5213 return ret;
5214}
5215
5216static ssize_t
5217tracing_set_trace_write(struct file *filp, const char __user *ubuf,
5218 size_t cnt, loff_t *ppos)
5219{
607e2ea1 5220 struct trace_array *tr = filp->private_data;
ee6c2c1b 5221 char buf[MAX_TRACER_SIZE+1];
d9e54076
PZ
5222 int i;
5223 size_t ret;
e6e7a65a
FW
5224 int err;
5225
5226 ret = cnt;
d9e54076 5227
ee6c2c1b
LZ
5228 if (cnt > MAX_TRACER_SIZE)
5229 cnt = MAX_TRACER_SIZE;
d9e54076 5230
4afe6495 5231 if (copy_from_user(buf, ubuf, cnt))
d9e54076
PZ
5232 return -EFAULT;
5233
5234 buf[cnt] = 0;
5235
5236 /* strip ending whitespace. */
5237 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
5238 buf[i] = 0;
5239
607e2ea1 5240 err = tracing_set_tracer(tr, buf);
e6e7a65a
FW
5241 if (err)
5242 return err;
d9e54076 5243
cf8517cf 5244 *ppos += ret;
bc0c38d1 5245
c2931e05 5246 return ret;
bc0c38d1
SR
5247}
5248
5249static ssize_t
6508fa76
SF
5250tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
5251 size_t cnt, loff_t *ppos)
bc0c38d1 5252{
bc0c38d1
SR
5253 char buf[64];
5254 int r;
5255
cffae437 5256 r = snprintf(buf, sizeof(buf), "%ld\n",
bc0c38d1 5257 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
cffae437
SR
5258 if (r > sizeof(buf))
5259 r = sizeof(buf);
4bf39a94 5260 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
bc0c38d1
SR
5261}
5262
5263static ssize_t
6508fa76
SF
5264tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
5265 size_t cnt, loff_t *ppos)
bc0c38d1 5266{
5e39841c 5267 unsigned long val;
c6caeeb1 5268 int ret;
bc0c38d1 5269
22fe9b54
PH
5270 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5271 if (ret)
c6caeeb1 5272 return ret;
bc0c38d1
SR
5273
5274 *ptr = val * 1000;
5275
5276 return cnt;
5277}
5278
6508fa76
SF
5279static ssize_t
5280tracing_thresh_read(struct file *filp, char __user *ubuf,
5281 size_t cnt, loff_t *ppos)
5282{
5283 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
5284}
5285
5286static ssize_t
5287tracing_thresh_write(struct file *filp, const char __user *ubuf,
5288 size_t cnt, loff_t *ppos)
5289{
5290 struct trace_array *tr = filp->private_data;
5291 int ret;
5292
5293 mutex_lock(&trace_types_lock);
5294 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
5295 if (ret < 0)
5296 goto out;
5297
5298 if (tr->current_trace->update_thresh) {
5299 ret = tr->current_trace->update_thresh(tr);
5300 if (ret < 0)
5301 goto out;
5302 }
5303
5304 ret = cnt;
5305out:
5306 mutex_unlock(&trace_types_lock);
5307
5308 return ret;
5309}
5310
f971cc9a 5311#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
e428abbb 5312
6508fa76
SF
5313static ssize_t
5314tracing_max_lat_read(struct file *filp, char __user *ubuf,
5315 size_t cnt, loff_t *ppos)
5316{
5317 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
5318}
5319
5320static ssize_t
5321tracing_max_lat_write(struct file *filp, const char __user *ubuf,
5322 size_t cnt, loff_t *ppos)
5323{
5324 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
5325}
5326
e428abbb
CG
5327#endif
5328
b3806b43
SR
5329static int tracing_open_pipe(struct inode *inode, struct file *filp)
5330{
15544209 5331 struct trace_array *tr = inode->i_private;
b3806b43 5332 struct trace_iterator *iter;
b04cc6b1 5333 int ret = 0;
b3806b43
SR
5334
5335 if (tracing_disabled)
5336 return -ENODEV;
5337
7b85af63
SRRH
5338 if (trace_array_get(tr) < 0)
5339 return -ENODEV;
5340
b04cc6b1
FW
5341 mutex_lock(&trace_types_lock);
5342
b3806b43
SR
5343 /* create a buffer to store the information to pass to userspace */
5344 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
b04cc6b1
FW
5345 if (!iter) {
5346 ret = -ENOMEM;
f77d09a3 5347 __trace_array_put(tr);
b04cc6b1
FW
5348 goto out;
5349 }
b3806b43 5350
3a161d99 5351 trace_seq_init(&iter->seq);
d716ff71 5352 iter->trace = tr->current_trace;
d7350c3f 5353
4462344e 5354 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
b04cc6b1 5355 ret = -ENOMEM;
d7350c3f 5356 goto fail;
4462344e
RR
5357 }
5358
a309720c 5359 /* trace pipe does not show start of buffer */
4462344e 5360 cpumask_setall(iter->started);
a309720c 5361
983f938a 5362 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
112f38a7
SR
5363 iter->iter_flags |= TRACE_FILE_LAT_FMT;
5364
8be0709f 5365 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
58e8eedf 5366 if (trace_clocks[tr->clock_id].in_ns)
8be0709f
DS
5367 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
5368
15544209
ON
5369 iter->tr = tr;
5370 iter->trace_buffer = &tr->trace_buffer;
5371 iter->cpu_file = tracing_get_cpu(inode);
d7350c3f 5372 mutex_init(&iter->mutex);
b3806b43
SR
5373 filp->private_data = iter;
5374
107bad8b
SR
5375 if (iter->trace->pipe_open)
5376 iter->trace->pipe_open(iter);
107bad8b 5377
b444786f 5378 nonseekable_open(inode, filp);
cf6ab6d9
SRRH
5379
5380 tr->current_trace->ref++;
b04cc6b1
FW
5381out:
5382 mutex_unlock(&trace_types_lock);
5383 return ret;
d7350c3f
FW
5384
5385fail:
5386 kfree(iter->trace);
5387 kfree(iter);
7b85af63 5388 __trace_array_put(tr);
d7350c3f
FW
5389 mutex_unlock(&trace_types_lock);
5390 return ret;
b3806b43
SR
5391}
5392
5393static int tracing_release_pipe(struct inode *inode, struct file *file)
5394{
5395 struct trace_iterator *iter = file->private_data;
15544209 5396 struct trace_array *tr = inode->i_private;
b3806b43 5397
b04cc6b1
FW
5398 mutex_lock(&trace_types_lock);
5399
cf6ab6d9
SRRH
5400 tr->current_trace->ref--;
5401
29bf4a5e 5402 if (iter->trace->pipe_close)
c521efd1
SR
5403 iter->trace->pipe_close(iter);
5404
b04cc6b1
FW
5405 mutex_unlock(&trace_types_lock);
5406
4462344e 5407 free_cpumask_var(iter->started);
d7350c3f 5408 mutex_destroy(&iter->mutex);
b3806b43 5409 kfree(iter);
b3806b43 5410
7b85af63
SRRH
5411 trace_array_put(tr);
5412
b3806b43
SR
5413 return 0;
5414}
5415
2a2cc8f7 5416static unsigned int
cc60cdc9 5417trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
2a2cc8f7 5418{
983f938a
SRRH
5419 struct trace_array *tr = iter->tr;
5420
15693458
SRRH
5421 /* Iterators are static, they should be filled or empty */
5422 if (trace_buffer_iter(iter, iter->cpu_file))
5423 return POLLIN | POLLRDNORM;
2a2cc8f7 5424
983f938a 5425 if (tr->trace_flags & TRACE_ITER_BLOCK)
2a2cc8f7
SSP
5426 /*
5427 * Always select as readable when in blocking mode
5428 */
5429 return POLLIN | POLLRDNORM;
15693458 5430 else
12883efb 5431 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
15693458 5432 filp, poll_table);
2a2cc8f7 5433}
2a2cc8f7 5434
cc60cdc9
SR
5435static unsigned int
5436tracing_poll_pipe(struct file *filp, poll_table *poll_table)
5437{
5438 struct trace_iterator *iter = filp->private_data;
5439
5440 return trace_poll(iter, filp, poll_table);
2a2cc8f7
SSP
5441}
5442
d716ff71 5443/* Must be called with iter->mutex held. */
ff98781b 5444static int tracing_wait_pipe(struct file *filp)
b3806b43
SR
5445{
5446 struct trace_iterator *iter = filp->private_data;
8b8b3683 5447 int ret;
b3806b43 5448
b3806b43 5449 while (trace_empty(iter)) {
2dc8f095 5450
107bad8b 5451 if ((filp->f_flags & O_NONBLOCK)) {
ff98781b 5452 return -EAGAIN;
107bad8b 5453 }
2dc8f095 5454
b3806b43 5455 /*
250bfd3d 5456 * We block until we read something and tracing is disabled.
b3806b43
SR
5457 * We still block if tracing is disabled, but we have never
5458 * read anything. This allows a user to cat this file, and
5459 * then enable tracing. But after we have read something,
5460 * we give an EOF when tracing is again disabled.
5461 *
5462 * iter->pos will be 0 if we haven't read anything.
5463 */
10246fa3 5464 if (!tracing_is_on() && iter->pos)
b3806b43 5465 break;
f4874261
SRRH
5466
5467 mutex_unlock(&iter->mutex);
5468
e30f53aa 5469 ret = wait_on_pipe(iter, false);
f4874261
SRRH
5470
5471 mutex_lock(&iter->mutex);
5472
8b8b3683
SRRH
5473 if (ret)
5474 return ret;
b3806b43
SR
5475 }
5476
ff98781b
EGM
5477 return 1;
5478}
5479
5480/*
5481 * Consumer reader.
5482 */
5483static ssize_t
5484tracing_read_pipe(struct file *filp, char __user *ubuf,
5485 size_t cnt, loff_t *ppos)
5486{
5487 struct trace_iterator *iter = filp->private_data;
5488 ssize_t sret;
5489
d7350c3f
FW
5490 /*
5491 * Avoid more than one consumer on a single file descriptor
5492 * This is just a matter of traces coherency, the ring buffer itself
5493 * is protected.
5494 */
5495 mutex_lock(&iter->mutex);
1245800c
SRRH
5496
5497 /* return any leftover data */
5498 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
5499 if (sret != -EBUSY)
5500 goto out;
5501
5502 trace_seq_init(&iter->seq);
5503
ff98781b
EGM
5504 if (iter->trace->read) {
5505 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
5506 if (sret)
5507 goto out;
5508 }
5509
5510waitagain:
5511 sret = tracing_wait_pipe(filp);
5512 if (sret <= 0)
5513 goto out;
5514
b3806b43 5515 /* stop when tracing is finished */
ff98781b
EGM
5516 if (trace_empty(iter)) {
5517 sret = 0;
107bad8b 5518 goto out;
ff98781b 5519 }
b3806b43
SR
5520
5521 if (cnt >= PAGE_SIZE)
5522 cnt = PAGE_SIZE - 1;
5523
53d0aa77 5524 /* reset all but tr, trace, and overruns */
53d0aa77
SR
5525 memset(&iter->seq, 0,
5526 sizeof(struct trace_iterator) -
5527 offsetof(struct trace_iterator, seq));
ed5467da 5528 cpumask_clear(iter->started);
4823ed7e 5529 iter->pos = -1;
b3806b43 5530
4f535968 5531 trace_event_read_lock();
7e53bd42 5532 trace_access_lock(iter->cpu_file);
955b61e5 5533 while (trace_find_next_entry_inc(iter) != NULL) {
2c4f035f 5534 enum print_line_t ret;
5ac48378 5535 int save_len = iter->seq.seq.len;
088b1e42 5536
f9896bf3 5537 ret = print_trace_line(iter);
2c4f035f 5538 if (ret == TRACE_TYPE_PARTIAL_LINE) {
088b1e42 5539 /* don't print partial lines */
5ac48378 5540 iter->seq.seq.len = save_len;
b3806b43 5541 break;
088b1e42 5542 }
b91facc3
FW
5543 if (ret != TRACE_TYPE_NO_CONSUME)
5544 trace_consume(iter);
b3806b43 5545
5ac48378 5546 if (trace_seq_used(&iter->seq) >= cnt)
b3806b43 5547 break;
ee5e51f5
JO
5548
5549 /*
5550 * Setting the full flag means we reached the trace_seq buffer
5551 * size and we should leave by partial output condition above.
5552 * One of the trace_seq_* functions is not used properly.
5553 */
5554 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
5555 iter->ent->type);
b3806b43 5556 }
7e53bd42 5557 trace_access_unlock(iter->cpu_file);
4f535968 5558 trace_event_read_unlock();
b3806b43 5559
b3806b43 5560 /* Now copy what we have to the user */
6c6c2796 5561 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
5ac48378 5562 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
f9520750 5563 trace_seq_init(&iter->seq);
9ff4b974
PP
5564
5565 /*
25985edc 5566 * If there was nothing to send to user, in spite of consuming trace
9ff4b974
PP
5567 * entries, go back to wait for more entries.
5568 */
6c6c2796 5569 if (sret == -EBUSY)
9ff4b974 5570 goto waitagain;
b3806b43 5571
107bad8b 5572out:
d7350c3f 5573 mutex_unlock(&iter->mutex);
107bad8b 5574
6c6c2796 5575 return sret;
b3806b43
SR
5576}
5577
3c56819b
EGM
5578static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
5579 unsigned int idx)
5580{
5581 __free_page(spd->pages[idx]);
5582}
5583
28dfef8f 5584static const struct pipe_buf_operations tracing_pipe_buf_ops = {
34cd4998 5585 .can_merge = 0,
34cd4998 5586 .confirm = generic_pipe_buf_confirm,
92fdd98c 5587 .release = generic_pipe_buf_release,
34cd4998
SR
5588 .steal = generic_pipe_buf_steal,
5589 .get = generic_pipe_buf_get,
3c56819b
EGM
5590};
5591
34cd4998 5592static size_t
fa7c7f6e 5593tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
34cd4998
SR
5594{
5595 size_t count;
74f06bb7 5596 int save_len;
34cd4998
SR
5597 int ret;
5598
5599 /* Seq buffer is page-sized, exactly what we need. */
5600 for (;;) {
74f06bb7 5601 save_len = iter->seq.seq.len;
34cd4998 5602 ret = print_trace_line(iter);
74f06bb7
SRRH
5603
5604 if (trace_seq_has_overflowed(&iter->seq)) {
5605 iter->seq.seq.len = save_len;
34cd4998
SR
5606 break;
5607 }
74f06bb7
SRRH
5608
5609 /*
5610 * This should not be hit, because it should only
5611 * be set if the iter->seq overflowed. But check it
5612 * anyway to be safe.
5613 */
34cd4998 5614 if (ret == TRACE_TYPE_PARTIAL_LINE) {
74f06bb7
SRRH
5615 iter->seq.seq.len = save_len;
5616 break;
5617 }
5618
5ac48378 5619 count = trace_seq_used(&iter->seq) - save_len;
74f06bb7
SRRH
5620 if (rem < count) {
5621 rem = 0;
5622 iter->seq.seq.len = save_len;
34cd4998
SR
5623 break;
5624 }
5625
74e7ff8c
LJ
5626 if (ret != TRACE_TYPE_NO_CONSUME)
5627 trace_consume(iter);
34cd4998 5628 rem -= count;
955b61e5 5629 if (!trace_find_next_entry_inc(iter)) {
34cd4998
SR
5630 rem = 0;
5631 iter->ent = NULL;
5632 break;
5633 }
5634 }
5635
5636 return rem;
5637}
5638
3c56819b
EGM
5639static ssize_t tracing_splice_read_pipe(struct file *filp,
5640 loff_t *ppos,
5641 struct pipe_inode_info *pipe,
5642 size_t len,
5643 unsigned int flags)
5644{
35f3d14d
JA
5645 struct page *pages_def[PIPE_DEF_BUFFERS];
5646 struct partial_page partial_def[PIPE_DEF_BUFFERS];
3c56819b
EGM
5647 struct trace_iterator *iter = filp->private_data;
5648 struct splice_pipe_desc spd = {
35f3d14d
JA
5649 .pages = pages_def,
5650 .partial = partial_def,
34cd4998 5651 .nr_pages = 0, /* This gets updated below. */
047fe360 5652 .nr_pages_max = PIPE_DEF_BUFFERS,
34cd4998
SR
5653 .ops = &tracing_pipe_buf_ops,
5654 .spd_release = tracing_spd_release_pipe,
3c56819b
EGM
5655 };
5656 ssize_t ret;
34cd4998 5657 size_t rem;
3c56819b
EGM
5658 unsigned int i;
5659
35f3d14d
JA
5660 if (splice_grow_spd(pipe, &spd))
5661 return -ENOMEM;
5662
d7350c3f 5663 mutex_lock(&iter->mutex);
3c56819b
EGM
5664
5665 if (iter->trace->splice_read) {
5666 ret = iter->trace->splice_read(iter, filp,
5667 ppos, pipe, len, flags);
5668 if (ret)
34cd4998 5669 goto out_err;
3c56819b
EGM
5670 }
5671
5672 ret = tracing_wait_pipe(filp);
5673 if (ret <= 0)
34cd4998 5674 goto out_err;
3c56819b 5675
955b61e5 5676 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
3c56819b 5677 ret = -EFAULT;
34cd4998 5678 goto out_err;
3c56819b
EGM
5679 }
5680
4f535968 5681 trace_event_read_lock();
7e53bd42 5682 trace_access_lock(iter->cpu_file);
4f535968 5683
3c56819b 5684 /* Fill as many pages as possible. */
a786c06d 5685 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
35f3d14d
JA
5686 spd.pages[i] = alloc_page(GFP_KERNEL);
5687 if (!spd.pages[i])
34cd4998 5688 break;
3c56819b 5689
fa7c7f6e 5690 rem = tracing_fill_pipe_page(rem, iter);
3c56819b
EGM
5691
5692 /* Copy the data into the page, so we can start over. */
5693 ret = trace_seq_to_buffer(&iter->seq,
35f3d14d 5694 page_address(spd.pages[i]),
5ac48378 5695 trace_seq_used(&iter->seq));
3c56819b 5696 if (ret < 0) {
35f3d14d 5697 __free_page(spd.pages[i]);
3c56819b
EGM
5698 break;
5699 }
35f3d14d 5700 spd.partial[i].offset = 0;
5ac48378 5701 spd.partial[i].len = trace_seq_used(&iter->seq);
3c56819b 5702
f9520750 5703 trace_seq_init(&iter->seq);
3c56819b
EGM
5704 }
5705
7e53bd42 5706 trace_access_unlock(iter->cpu_file);
4f535968 5707 trace_event_read_unlock();
d7350c3f 5708 mutex_unlock(&iter->mutex);
3c56819b
EGM
5709
5710 spd.nr_pages = i;
5711
a29054d9
SRRH
5712 if (i)
5713 ret = splice_to_pipe(pipe, &spd);
5714 else
5715 ret = 0;
35f3d14d 5716out:
047fe360 5717 splice_shrink_spd(&spd);
35f3d14d 5718 return ret;
3c56819b 5719
34cd4998 5720out_err:
d7350c3f 5721 mutex_unlock(&iter->mutex);
35f3d14d 5722 goto out;
3c56819b
EGM
5723}
5724
a98a3c3f
SR
5725static ssize_t
5726tracing_entries_read(struct file *filp, char __user *ubuf,
5727 size_t cnt, loff_t *ppos)
5728{
0bc392ee
ON
5729 struct inode *inode = file_inode(filp);
5730 struct trace_array *tr = inode->i_private;
5731 int cpu = tracing_get_cpu(inode);
438ced17
VN
5732 char buf[64];
5733 int r = 0;
5734 ssize_t ret;
a98a3c3f 5735
db526ca3 5736 mutex_lock(&trace_types_lock);
438ced17 5737
0bc392ee 5738 if (cpu == RING_BUFFER_ALL_CPUS) {
438ced17
VN
5739 int cpu, buf_size_same;
5740 unsigned long size;
5741
5742 size = 0;
5743 buf_size_same = 1;
5744 /* check if all cpu sizes are same */
5745 for_each_tracing_cpu(cpu) {
5746 /* fill in the size from first enabled cpu */
5747 if (size == 0)
12883efb
SRRH
5748 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
5749 if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
438ced17
VN
5750 buf_size_same = 0;
5751 break;
5752 }
5753 }
5754
5755 if (buf_size_same) {
5756 if (!ring_buffer_expanded)
5757 r = sprintf(buf, "%lu (expanded: %lu)\n",
5758 size >> 10,
5759 trace_buf_size >> 10);
5760 else
5761 r = sprintf(buf, "%lu\n", size >> 10);
5762 } else
5763 r = sprintf(buf, "X\n");
5764 } else
0bc392ee 5765 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
438ced17 5766
db526ca3
SR
5767 mutex_unlock(&trace_types_lock);
5768
438ced17
VN
5769 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5770 return ret;
a98a3c3f
SR
5771}
5772
5773static ssize_t
5774tracing_entries_write(struct file *filp, const char __user *ubuf,
5775 size_t cnt, loff_t *ppos)
5776{
0bc392ee
ON
5777 struct inode *inode = file_inode(filp);
5778 struct trace_array *tr = inode->i_private;
a98a3c3f 5779 unsigned long val;
4f271a2a 5780 int ret;
a98a3c3f 5781
22fe9b54
PH
5782 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5783 if (ret)
c6caeeb1 5784 return ret;
a98a3c3f
SR
5785
5786 /* must have at least 1 entry */
5787 if (!val)
5788 return -EINVAL;
5789
1696b2b0
SR
5790 /* value is in KB */
5791 val <<= 10;
0bc392ee 5792 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
4f271a2a
VN
5793 if (ret < 0)
5794 return ret;
a98a3c3f 5795
cf8517cf 5796 *ppos += cnt;
a98a3c3f 5797
4f271a2a
VN
5798 return cnt;
5799}
bf5e6519 5800
f81ab074
VN
5801static ssize_t
5802tracing_total_entries_read(struct file *filp, char __user *ubuf,
5803 size_t cnt, loff_t *ppos)
5804{
5805 struct trace_array *tr = filp->private_data;
5806 char buf[64];
5807 int r, cpu;
5808 unsigned long size = 0, expanded_size = 0;
5809
5810 mutex_lock(&trace_types_lock);
5811 for_each_tracing_cpu(cpu) {
12883efb 5812 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
f81ab074
VN
5813 if (!ring_buffer_expanded)
5814 expanded_size += trace_buf_size >> 10;
5815 }
5816 if (ring_buffer_expanded)
5817 r = sprintf(buf, "%lu\n", size);
5818 else
5819 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
5820 mutex_unlock(&trace_types_lock);
5821
5822 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5823}
5824
4f271a2a
VN
5825static ssize_t
5826tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
5827 size_t cnt, loff_t *ppos)
5828{
5829 /*
5830 * There is no need to read what the user has written, this function
5831 * is just to make sure that there is no error when "echo" is used
5832 */
5833
5834 *ppos += cnt;
a98a3c3f
SR
5835
5836 return cnt;
5837}
5838
4f271a2a
VN
5839static int
5840tracing_free_buffer_release(struct inode *inode, struct file *filp)
5841{
2b6080f2
SR
5842 struct trace_array *tr = inode->i_private;
5843
cf30cf67 5844 /* disable tracing ? */
983f938a 5845 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
711e1243 5846 tracer_tracing_off(tr);
4f271a2a 5847 /* resize the ring buffer to 0 */
2b6080f2 5848 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
4f271a2a 5849
7b85af63
SRRH
5850 trace_array_put(tr);
5851
4f271a2a
VN
5852 return 0;
5853}
5854
5bf9a1ee
PP
5855static ssize_t
5856tracing_mark_write(struct file *filp, const char __user *ubuf,
5857 size_t cnt, loff_t *fpos)
5858{
2d71619c 5859 struct trace_array *tr = filp->private_data;
d696b58c
SR
5860 struct ring_buffer_event *event;
5861 struct ring_buffer *buffer;
5862 struct print_entry *entry;
5863 unsigned long irq_flags;
656c7f0d 5864 const char faulted[] = "<faulted>";
d696b58c 5865 ssize_t written;
d696b58c
SR
5866 int size;
5867 int len;
fa32e855 5868
656c7f0d
SRRH
5869/* Used in tracing_mark_raw_write() as well */
5870#define FAULTED_SIZE (sizeof(faulted) - 1) /* '\0' is already accounted for */
5bf9a1ee 5871
c76f0694 5872 if (tracing_disabled)
5bf9a1ee
PP
5873 return -EINVAL;
5874
983f938a 5875 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
5224c3a3
MSB
5876 return -EINVAL;
5877
5bf9a1ee
PP
5878 if (cnt > TRACE_BUF_SIZE)
5879 cnt = TRACE_BUF_SIZE;
5880
d696b58c 5881 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
5bf9a1ee 5882
d696b58c 5883 local_save_flags(irq_flags);
656c7f0d 5884 size = sizeof(*entry) + cnt + 2; /* add '\0' and possible '\n' */
d696b58c 5885
656c7f0d
SRRH
5886 /* If less than "<faulted>", then make sure we can still add that */
5887 if (cnt < FAULTED_SIZE)
5888 size += FAULTED_SIZE - cnt;
d696b58c 5889
2d71619c 5890 buffer = tr->trace_buffer.buffer;
3e9a8aad
SRRH
5891 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
5892 irq_flags, preempt_count());
656c7f0d 5893 if (unlikely(!event))
d696b58c 5894 /* Ring buffer disabled, return as if not open for write */
656c7f0d 5895 return -EBADF;
d696b58c
SR
5896
5897 entry = ring_buffer_event_data(event);
5898 entry->ip = _THIS_IP_;
5899
656c7f0d
SRRH
5900 len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt);
5901 if (len) {
5902 memcpy(&entry->buf, faulted, FAULTED_SIZE);
5903 cnt = FAULTED_SIZE;
5904 written = -EFAULT;
c13d2f7c 5905 } else
656c7f0d
SRRH
5906 written = cnt;
5907 len = cnt;
5bf9a1ee 5908
d696b58c
SR
5909 if (entry->buf[cnt - 1] != '\n') {
5910 entry->buf[cnt] = '\n';
5911 entry->buf[cnt + 1] = '\0';
5912 } else
5913 entry->buf[cnt] = '\0';
5914
7ffbd48d 5915 __buffer_unlock_commit(buffer, event);
5bf9a1ee 5916
656c7f0d
SRRH
5917 if (written > 0)
5918 *fpos += written;
5bf9a1ee 5919
fa32e855
SR
5920 return written;
5921}
5922
5923/* Limit it for now to 3K (including tag) */
5924#define RAW_DATA_MAX_SIZE (1024*3)
5925
5926static ssize_t
5927tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
5928 size_t cnt, loff_t *fpos)
5929{
5930 struct trace_array *tr = filp->private_data;
5931 struct ring_buffer_event *event;
5932 struct ring_buffer *buffer;
5933 struct raw_data_entry *entry;
656c7f0d 5934 const char faulted[] = "<faulted>";
fa32e855 5935 unsigned long irq_flags;
fa32e855 5936 ssize_t written;
fa32e855
SR
5937 int size;
5938 int len;
5939
656c7f0d
SRRH
5940#define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
5941
fa32e855
SR
5942 if (tracing_disabled)
5943 return -EINVAL;
5944
5945 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
5946 return -EINVAL;
5947
5948 /* The marker must at least have a tag id */
5949 if (cnt < sizeof(unsigned int) || cnt > RAW_DATA_MAX_SIZE)
5950 return -EINVAL;
5951
5952 if (cnt > TRACE_BUF_SIZE)
5953 cnt = TRACE_BUF_SIZE;
5954
5955 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
5956
fa32e855
SR
5957 local_save_flags(irq_flags);
5958 size = sizeof(*entry) + cnt;
656c7f0d
SRRH
5959 if (cnt < FAULT_SIZE_ID)
5960 size += FAULT_SIZE_ID - cnt;
5961
fa32e855 5962 buffer = tr->trace_buffer.buffer;
3e9a8aad
SRRH
5963 event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
5964 irq_flags, preempt_count());
656c7f0d 5965 if (!event)
fa32e855 5966 /* Ring buffer disabled, return as if not open for write */
656c7f0d 5967 return -EBADF;
fa32e855
SR
5968
5969 entry = ring_buffer_event_data(event);
5970
656c7f0d
SRRH
5971 len = __copy_from_user_inatomic(&entry->id, ubuf, cnt);
5972 if (len) {
5973 entry->id = -1;
5974 memcpy(&entry->buf, faulted, FAULTED_SIZE);
5975 written = -EFAULT;
fa32e855 5976 } else
656c7f0d 5977 written = cnt;
fa32e855
SR
5978
5979 __buffer_unlock_commit(buffer, event);
5980
656c7f0d
SRRH
5981 if (written > 0)
5982 *fpos += written;
1aa54bca
MS
5983
5984 return written;
5bf9a1ee
PP
5985}
5986
13f16d20 5987static int tracing_clock_show(struct seq_file *m, void *v)
5079f326 5988{
2b6080f2 5989 struct trace_array *tr = m->private;
5079f326
Z
5990 int i;
5991
5992 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
13f16d20 5993 seq_printf(m,
5079f326 5994 "%s%s%s%s", i ? " " : "",
2b6080f2
SR
5995 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
5996 i == tr->clock_id ? "]" : "");
13f16d20 5997 seq_putc(m, '\n');
5079f326 5998
13f16d20 5999 return 0;
5079f326
Z
6000}
6001
e1e232ca 6002static int tracing_set_clock(struct trace_array *tr, const char *clockstr)
5079f326 6003{
5079f326
Z
6004 int i;
6005
5079f326
Z
6006 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
6007 if (strcmp(trace_clocks[i].name, clockstr) == 0)
6008 break;
6009 }
6010 if (i == ARRAY_SIZE(trace_clocks))
6011 return -EINVAL;
6012
5079f326
Z
6013 mutex_lock(&trace_types_lock);
6014
2b6080f2
SR
6015 tr->clock_id = i;
6016
12883efb 6017 ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
5079f326 6018
60303ed3
DS
6019 /*
6020 * New clock may not be consistent with the previous clock.
6021 * Reset the buffer so that it doesn't have incomparable timestamps.
6022 */
9457158b 6023 tracing_reset_online_cpus(&tr->trace_buffer);
12883efb
SRRH
6024
6025#ifdef CONFIG_TRACER_MAX_TRACE
6026 if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer)
6027 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
9457158b 6028 tracing_reset_online_cpus(&tr->max_buffer);
12883efb 6029#endif
60303ed3 6030
5079f326
Z
6031 mutex_unlock(&trace_types_lock);
6032
e1e232ca
SR
6033 return 0;
6034}
6035
6036static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
6037 size_t cnt, loff_t *fpos)
6038{
6039 struct seq_file *m = filp->private_data;
6040 struct trace_array *tr = m->private;
6041 char buf[64];
6042 const char *clockstr;
6043 int ret;
6044
6045 if (cnt >= sizeof(buf))
6046 return -EINVAL;
6047
4afe6495 6048 if (copy_from_user(buf, ubuf, cnt))
e1e232ca
SR
6049 return -EFAULT;
6050
6051 buf[cnt] = 0;
6052
6053 clockstr = strstrip(buf);
6054
6055 ret = tracing_set_clock(tr, clockstr);
6056 if (ret)
6057 return ret;
6058
5079f326
Z
6059 *fpos += cnt;
6060
6061 return cnt;
6062}
6063
13f16d20
LZ
6064static int tracing_clock_open(struct inode *inode, struct file *file)
6065{
7b85af63
SRRH
6066 struct trace_array *tr = inode->i_private;
6067 int ret;
6068
13f16d20
LZ
6069 if (tracing_disabled)
6070 return -ENODEV;
2b6080f2 6071
7b85af63
SRRH
6072 if (trace_array_get(tr))
6073 return -ENODEV;
6074
6075 ret = single_open(file, tracing_clock_show, inode->i_private);
6076 if (ret < 0)
6077 trace_array_put(tr);
6078
6079 return ret;
13f16d20
LZ
6080}
6081
6de58e62
SRRH
6082struct ftrace_buffer_info {
6083 struct trace_iterator iter;
6084 void *spare;
73a757e6 6085 unsigned int spare_cpu;
6de58e62
SRRH
6086 unsigned int read;
6087};
6088
debdd57f
HT
6089#ifdef CONFIG_TRACER_SNAPSHOT
6090static int tracing_snapshot_open(struct inode *inode, struct file *file)
6091{
6484c71c 6092 struct trace_array *tr = inode->i_private;
debdd57f 6093 struct trace_iterator *iter;
2b6080f2 6094 struct seq_file *m;
debdd57f
HT
6095 int ret = 0;
6096
ff451961
SRRH
6097 if (trace_array_get(tr) < 0)
6098 return -ENODEV;
6099
debdd57f 6100 if (file->f_mode & FMODE_READ) {
6484c71c 6101 iter = __tracing_open(inode, file, true);
debdd57f
HT
6102 if (IS_ERR(iter))
6103 ret = PTR_ERR(iter);
2b6080f2
SR
6104 } else {
6105 /* Writes still need the seq_file to hold the private data */
f77d09a3 6106 ret = -ENOMEM;
2b6080f2
SR
6107 m = kzalloc(sizeof(*m), GFP_KERNEL);
6108 if (!m)
f77d09a3 6109 goto out;
2b6080f2
SR
6110 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
6111 if (!iter) {
6112 kfree(m);
f77d09a3 6113 goto out;
2b6080f2 6114 }
f77d09a3
AL
6115 ret = 0;
6116
ff451961 6117 iter->tr = tr;
6484c71c
ON
6118 iter->trace_buffer = &tr->max_buffer;
6119 iter->cpu_file = tracing_get_cpu(inode);
2b6080f2
SR
6120 m->private = iter;
6121 file->private_data = m;
debdd57f 6122 }
f77d09a3 6123out:
ff451961
SRRH
6124 if (ret < 0)
6125 trace_array_put(tr);
6126
debdd57f
HT
6127 return ret;
6128}
6129
6130static ssize_t
6131tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
6132 loff_t *ppos)
6133{
2b6080f2
SR
6134 struct seq_file *m = filp->private_data;
6135 struct trace_iterator *iter = m->private;
6136 struct trace_array *tr = iter->tr;
debdd57f
HT
6137 unsigned long val;
6138 int ret;
6139
6140 ret = tracing_update_buffers();
6141 if (ret < 0)
6142 return ret;
6143
6144 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6145 if (ret)
6146 return ret;
6147
6148 mutex_lock(&trace_types_lock);
6149
2b6080f2 6150 if (tr->current_trace->use_max_tr) {
debdd57f
HT
6151 ret = -EBUSY;
6152 goto out;
6153 }
6154
6155 switch (val) {
6156 case 0:
f1affcaa
SRRH
6157 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
6158 ret = -EINVAL;
6159 break;
debdd57f 6160 }
3209cff4
SRRH
6161 if (tr->allocated_snapshot)
6162 free_snapshot(tr);
debdd57f
HT
6163 break;
6164 case 1:
f1affcaa
SRRH
6165/* Only allow per-cpu swap if the ring buffer supports it */
6166#ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
6167 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
6168 ret = -EINVAL;
6169 break;
6170 }
6171#endif
45ad21ca 6172 if (!tr->allocated_snapshot) {
3209cff4 6173 ret = alloc_snapshot(tr);
debdd57f
HT
6174 if (ret < 0)
6175 break;
debdd57f 6176 }
debdd57f
HT
6177 local_irq_disable();
6178 /* Now, we're going to swap */
f1affcaa 6179 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
ce9bae55 6180 update_max_tr(tr, current, smp_processor_id());
f1affcaa 6181 else
ce9bae55 6182 update_max_tr_single(tr, current, iter->cpu_file);
debdd57f
HT
6183 local_irq_enable();
6184 break;
6185 default:
45ad21ca 6186 if (tr->allocated_snapshot) {
f1affcaa
SRRH
6187 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
6188 tracing_reset_online_cpus(&tr->max_buffer);
6189 else
6190 tracing_reset(&tr->max_buffer, iter->cpu_file);
6191 }
debdd57f
HT
6192 break;
6193 }
6194
6195 if (ret >= 0) {
6196 *ppos += cnt;
6197 ret = cnt;
6198 }
6199out:
6200 mutex_unlock(&trace_types_lock);
6201 return ret;
6202}
2b6080f2
SR
6203
6204static int tracing_snapshot_release(struct inode *inode, struct file *file)
6205{
6206 struct seq_file *m = file->private_data;
ff451961
SRRH
6207 int ret;
6208
6209 ret = tracing_release(inode, file);
2b6080f2
SR
6210
6211 if (file->f_mode & FMODE_READ)
ff451961 6212 return ret;
2b6080f2
SR
6213
6214 /* If write only, the seq_file is just a stub */
6215 if (m)
6216 kfree(m->private);
6217 kfree(m);
6218
6219 return 0;
6220}
6221
6de58e62
SRRH
6222static int tracing_buffers_open(struct inode *inode, struct file *filp);
6223static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
6224 size_t count, loff_t *ppos);
6225static int tracing_buffers_release(struct inode *inode, struct file *file);
6226static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
6227 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
6228
6229static int snapshot_raw_open(struct inode *inode, struct file *filp)
6230{
6231 struct ftrace_buffer_info *info;
6232 int ret;
6233
6234 ret = tracing_buffers_open(inode, filp);
6235 if (ret < 0)
6236 return ret;
6237
6238 info = filp->private_data;
6239
6240 if (info->iter.trace->use_max_tr) {
6241 tracing_buffers_release(inode, filp);
6242 return -EBUSY;
6243 }
6244
6245 info->iter.snapshot = true;
6246 info->iter.trace_buffer = &info->iter.tr->max_buffer;
6247
6248 return ret;
6249}
6250
debdd57f
HT
6251#endif /* CONFIG_TRACER_SNAPSHOT */
6252
6253
6508fa76
SF
6254static const struct file_operations tracing_thresh_fops = {
6255 .open = tracing_open_generic,
6256 .read = tracing_thresh_read,
6257 .write = tracing_thresh_write,
6258 .llseek = generic_file_llseek,
6259};
6260
f971cc9a 6261#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
5e2336a0 6262static const struct file_operations tracing_max_lat_fops = {
4bf39a94
IM
6263 .open = tracing_open_generic,
6264 .read = tracing_max_lat_read,
6265 .write = tracing_max_lat_write,
b444786f 6266 .llseek = generic_file_llseek,
bc0c38d1 6267};
e428abbb 6268#endif
bc0c38d1 6269
5e2336a0 6270static const struct file_operations set_tracer_fops = {
4bf39a94
IM
6271 .open = tracing_open_generic,
6272 .read = tracing_set_trace_read,
6273 .write = tracing_set_trace_write,
b444786f 6274 .llseek = generic_file_llseek,
bc0c38d1
SR
6275};
6276
5e2336a0 6277static const struct file_operations tracing_pipe_fops = {
4bf39a94 6278 .open = tracing_open_pipe,
2a2cc8f7 6279 .poll = tracing_poll_pipe,
4bf39a94 6280 .read = tracing_read_pipe,
3c56819b 6281 .splice_read = tracing_splice_read_pipe,
4bf39a94 6282 .release = tracing_release_pipe,
b444786f 6283 .llseek = no_llseek,
b3806b43
SR
6284};
6285
5e2336a0 6286static const struct file_operations tracing_entries_fops = {
0bc392ee 6287 .open = tracing_open_generic_tr,
a98a3c3f
SR
6288 .read = tracing_entries_read,
6289 .write = tracing_entries_write,
b444786f 6290 .llseek = generic_file_llseek,
0bc392ee 6291 .release = tracing_release_generic_tr,
a98a3c3f
SR
6292};
6293
f81ab074 6294static const struct file_operations tracing_total_entries_fops = {
7b85af63 6295 .open = tracing_open_generic_tr,
f81ab074
VN
6296 .read = tracing_total_entries_read,
6297 .llseek = generic_file_llseek,
7b85af63 6298 .release = tracing_release_generic_tr,
f81ab074
VN
6299};
6300
4f271a2a 6301static const struct file_operations tracing_free_buffer_fops = {
7b85af63 6302 .open = tracing_open_generic_tr,
4f271a2a
VN
6303 .write = tracing_free_buffer_write,
6304 .release = tracing_free_buffer_release,
6305};
6306
5e2336a0 6307static const struct file_operations tracing_mark_fops = {
7b85af63 6308 .open = tracing_open_generic_tr,
5bf9a1ee 6309 .write = tracing_mark_write,
b444786f 6310 .llseek = generic_file_llseek,
7b85af63 6311 .release = tracing_release_generic_tr,
5bf9a1ee
PP
6312};
6313
fa32e855
SR
6314static const struct file_operations tracing_mark_raw_fops = {
6315 .open = tracing_open_generic_tr,
6316 .write = tracing_mark_raw_write,
6317 .llseek = generic_file_llseek,
6318 .release = tracing_release_generic_tr,
6319};
6320
5079f326 6321static const struct file_operations trace_clock_fops = {
13f16d20
LZ
6322 .open = tracing_clock_open,
6323 .read = seq_read,
6324 .llseek = seq_lseek,
7b85af63 6325 .release = tracing_single_release_tr,
5079f326
Z
6326 .write = tracing_clock_write,
6327};
6328
debdd57f
HT
6329#ifdef CONFIG_TRACER_SNAPSHOT
6330static const struct file_operations snapshot_fops = {
6331 .open = tracing_snapshot_open,
6332 .read = seq_read,
6333 .write = tracing_snapshot_write,
098c879e 6334 .llseek = tracing_lseek,
2b6080f2 6335 .release = tracing_snapshot_release,
debdd57f 6336};
debdd57f 6337
6de58e62
SRRH
6338static const struct file_operations snapshot_raw_fops = {
6339 .open = snapshot_raw_open,
6340 .read = tracing_buffers_read,
6341 .release = tracing_buffers_release,
6342 .splice_read = tracing_buffers_splice_read,
6343 .llseek = no_llseek,
2cadf913
SR
6344};
6345
6de58e62
SRRH
6346#endif /* CONFIG_TRACER_SNAPSHOT */
6347
2cadf913
SR
6348static int tracing_buffers_open(struct inode *inode, struct file *filp)
6349{
46ef2be0 6350 struct trace_array *tr = inode->i_private;
2cadf913 6351 struct ftrace_buffer_info *info;
7b85af63 6352 int ret;
2cadf913
SR
6353
6354 if (tracing_disabled)
6355 return -ENODEV;
6356
7b85af63
SRRH
6357 if (trace_array_get(tr) < 0)
6358 return -ENODEV;
6359
2cadf913 6360 info = kzalloc(sizeof(*info), GFP_KERNEL);
7b85af63
SRRH
6361 if (!info) {
6362 trace_array_put(tr);
2cadf913 6363 return -ENOMEM;
7b85af63 6364 }
2cadf913 6365
a695cb58
SRRH
6366 mutex_lock(&trace_types_lock);
6367
cc60cdc9 6368 info->iter.tr = tr;
46ef2be0 6369 info->iter.cpu_file = tracing_get_cpu(inode);
b627344f 6370 info->iter.trace = tr->current_trace;
12883efb 6371 info->iter.trace_buffer = &tr->trace_buffer;
cc60cdc9 6372 info->spare = NULL;
2cadf913 6373 /* Force reading ring buffer for first read */
cc60cdc9 6374 info->read = (unsigned int)-1;
2cadf913
SR
6375
6376 filp->private_data = info;
6377
cf6ab6d9
SRRH
6378 tr->current_trace->ref++;
6379
a695cb58
SRRH
6380 mutex_unlock(&trace_types_lock);
6381
7b85af63
SRRH
6382 ret = nonseekable_open(inode, filp);
6383 if (ret < 0)
6384 trace_array_put(tr);
6385
6386 return ret;
2cadf913
SR
6387}
6388
cc60cdc9
SR
6389static unsigned int
6390tracing_buffers_poll(struct file *filp, poll_table *poll_table)
6391{
6392 struct ftrace_buffer_info *info = filp->private_data;
6393 struct trace_iterator *iter = &info->iter;
6394
6395 return trace_poll(iter, filp, poll_table);
6396}
6397
2cadf913
SR
6398static ssize_t
6399tracing_buffers_read(struct file *filp, char __user *ubuf,
6400 size_t count, loff_t *ppos)
6401{
6402 struct ftrace_buffer_info *info = filp->private_data;
cc60cdc9 6403 struct trace_iterator *iter = &info->iter;
2cadf913 6404 ssize_t ret;
6de58e62 6405 ssize_t size;
2cadf913 6406
2dc5d12b
SR
6407 if (!count)
6408 return 0;
6409
6de58e62 6410#ifdef CONFIG_TRACER_MAX_TRACE
d716ff71
SRRH
6411 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
6412 return -EBUSY;
6de58e62
SRRH
6413#endif
6414
73a757e6 6415 if (!info->spare) {
12883efb
SRRH
6416 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
6417 iter->cpu_file);
73a757e6
SRV
6418 info->spare_cpu = iter->cpu_file;
6419 }
ddd538f3 6420 if (!info->spare)
d716ff71 6421 return -ENOMEM;
ddd538f3 6422
2cadf913
SR
6423 /* Do we have previous read data to read? */
6424 if (info->read < PAGE_SIZE)
6425 goto read;
6426
b627344f 6427 again:
cc60cdc9 6428 trace_access_lock(iter->cpu_file);
12883efb 6429 ret = ring_buffer_read_page(iter->trace_buffer->buffer,
2cadf913
SR
6430 &info->spare,
6431 count,
cc60cdc9
SR
6432 iter->cpu_file, 0);
6433 trace_access_unlock(iter->cpu_file);
2cadf913 6434
b627344f
SR
6435 if (ret < 0) {
6436 if (trace_empty(iter)) {
d716ff71
SRRH
6437 if ((filp->f_flags & O_NONBLOCK))
6438 return -EAGAIN;
6439
e30f53aa 6440 ret = wait_on_pipe(iter, false);
d716ff71
SRRH
6441 if (ret)
6442 return ret;
6443
b627344f
SR
6444 goto again;
6445 }
d716ff71 6446 return 0;
b627344f 6447 }
436fc280 6448
436fc280 6449 info->read = 0;
b627344f 6450 read:
2cadf913
SR
6451 size = PAGE_SIZE - info->read;
6452 if (size > count)
6453 size = count;
6454
6455 ret = copy_to_user(ubuf, info->spare + info->read, size);
d716ff71
SRRH
6456 if (ret == size)
6457 return -EFAULT;
6458
2dc5d12b
SR
6459 size -= ret;
6460
2cadf913
SR
6461 *ppos += size;
6462 info->read += size;
6463
6464 return size;
6465}
6466
6467static int tracing_buffers_release(struct inode *inode, struct file *file)
6468{
6469 struct ftrace_buffer_info *info = file->private_data;
cc60cdc9 6470 struct trace_iterator *iter = &info->iter;
2cadf913 6471
a695cb58
SRRH
6472 mutex_lock(&trace_types_lock);
6473
cf6ab6d9
SRRH
6474 iter->tr->current_trace->ref--;
6475
ff451961 6476 __trace_array_put(iter->tr);
2cadf913 6477
ddd538f3 6478 if (info->spare)
73a757e6
SRV
6479 ring_buffer_free_read_page(iter->trace_buffer->buffer,
6480 info->spare_cpu, info->spare);
2cadf913
SR
6481 kfree(info);
6482
a695cb58
SRRH
6483 mutex_unlock(&trace_types_lock);
6484
2cadf913
SR
6485 return 0;
6486}
6487
6488struct buffer_ref {
6489 struct ring_buffer *buffer;
6490 void *page;
73a757e6 6491 int cpu;
2cadf913
SR
6492 int ref;
6493};
6494
6495static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
6496 struct pipe_buffer *buf)
6497{
6498 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
6499
6500 if (--ref->ref)
6501 return;
6502
73a757e6 6503 ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
2cadf913
SR
6504 kfree(ref);
6505 buf->private = 0;
6506}
6507
2cadf913
SR
6508static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
6509 struct pipe_buffer *buf)
6510{
6511 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
6512
6513 ref->ref++;
6514}
6515
6516/* Pipe buffer operations for a buffer. */
28dfef8f 6517static const struct pipe_buf_operations buffer_pipe_buf_ops = {
2cadf913 6518 .can_merge = 0,
2cadf913
SR
6519 .confirm = generic_pipe_buf_confirm,
6520 .release = buffer_pipe_buf_release,
d55cb6cf 6521 .steal = generic_pipe_buf_steal,
2cadf913
SR
6522 .get = buffer_pipe_buf_get,
6523};
6524
6525/*
6526 * Callback from splice_to_pipe(), if we need to release some pages
6527 * at the end of the spd in case we error'ed out in filling the pipe.
6528 */
6529static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
6530{
6531 struct buffer_ref *ref =
6532 (struct buffer_ref *)spd->partial[i].private;
6533
6534 if (--ref->ref)
6535 return;
6536
73a757e6 6537 ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
2cadf913
SR
6538 kfree(ref);
6539 spd->partial[i].private = 0;
6540}
6541
6542static ssize_t
6543tracing_buffers_splice_read(struct file *file, loff_t *ppos,
6544 struct pipe_inode_info *pipe, size_t len,
6545 unsigned int flags)
6546{
6547 struct ftrace_buffer_info *info = file->private_data;
cc60cdc9 6548 struct trace_iterator *iter = &info->iter;
35f3d14d
JA
6549 struct partial_page partial_def[PIPE_DEF_BUFFERS];
6550 struct page *pages_def[PIPE_DEF_BUFFERS];
2cadf913 6551 struct splice_pipe_desc spd = {
35f3d14d
JA
6552 .pages = pages_def,
6553 .partial = partial_def,
047fe360 6554 .nr_pages_max = PIPE_DEF_BUFFERS,
2cadf913
SR
6555 .ops = &buffer_pipe_buf_ops,
6556 .spd_release = buffer_spd_release,
6557 };
6558 struct buffer_ref *ref;
93459c6c 6559 int entries, size, i;
07906da7 6560 ssize_t ret = 0;
2cadf913 6561
6de58e62 6562#ifdef CONFIG_TRACER_MAX_TRACE
d716ff71
SRRH
6563 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
6564 return -EBUSY;
6de58e62
SRRH
6565#endif
6566
d716ff71
SRRH
6567 if (*ppos & (PAGE_SIZE - 1))
6568 return -EINVAL;
93cfb3c9
LJ
6569
6570 if (len & (PAGE_SIZE - 1)) {
d716ff71
SRRH
6571 if (len < PAGE_SIZE)
6572 return -EINVAL;
93cfb3c9
LJ
6573 len &= PAGE_MASK;
6574 }
6575
1ae2293d
AV
6576 if (splice_grow_spd(pipe, &spd))
6577 return -ENOMEM;
6578
cc60cdc9
SR
6579 again:
6580 trace_access_lock(iter->cpu_file);
12883efb 6581 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
93459c6c 6582
a786c06d 6583 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
2cadf913
SR
6584 struct page *page;
6585 int r;
6586
6587 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
07906da7
RV
6588 if (!ref) {
6589 ret = -ENOMEM;
2cadf913 6590 break;
07906da7 6591 }
2cadf913 6592
7267fa68 6593 ref->ref = 1;
12883efb 6594 ref->buffer = iter->trace_buffer->buffer;
cc60cdc9 6595 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
2cadf913 6596 if (!ref->page) {
07906da7 6597 ret = -ENOMEM;
2cadf913
SR
6598 kfree(ref);
6599 break;
6600 }
73a757e6 6601 ref->cpu = iter->cpu_file;
2cadf913
SR
6602
6603 r = ring_buffer_read_page(ref->buffer, &ref->page,
cc60cdc9 6604 len, iter->cpu_file, 1);
2cadf913 6605 if (r < 0) {
73a757e6
SRV
6606 ring_buffer_free_read_page(ref->buffer, ref->cpu,
6607 ref->page);
2cadf913
SR
6608 kfree(ref);
6609 break;
6610 }
6611
6612 /*
6613 * zero out any left over data, this is going to
6614 * user land.
6615 */
6616 size = ring_buffer_page_len(ref->page);
6617 if (size < PAGE_SIZE)
6618 memset(ref->page + size, 0, PAGE_SIZE - size);
6619
6620 page = virt_to_page(ref->page);
6621
6622 spd.pages[i] = page;
6623 spd.partial[i].len = PAGE_SIZE;
6624 spd.partial[i].offset = 0;
6625 spd.partial[i].private = (unsigned long)ref;
6626 spd.nr_pages++;
93cfb3c9 6627 *ppos += PAGE_SIZE;
93459c6c 6628
12883efb 6629 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
2cadf913
SR
6630 }
6631
cc60cdc9 6632 trace_access_unlock(iter->cpu_file);
2cadf913
SR
6633 spd.nr_pages = i;
6634
6635 /* did we read anything? */
6636 if (!spd.nr_pages) {
07906da7 6637 if (ret)
1ae2293d 6638 goto out;
d716ff71 6639
1ae2293d 6640 ret = -EAGAIN;
d716ff71 6641 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
1ae2293d 6642 goto out;
07906da7 6643
e30f53aa 6644 ret = wait_on_pipe(iter, true);
8b8b3683 6645 if (ret)
1ae2293d 6646 goto out;
e30f53aa 6647
cc60cdc9 6648 goto again;
2cadf913
SR
6649 }
6650
6651 ret = splice_to_pipe(pipe, &spd);
1ae2293d 6652out:
047fe360 6653 splice_shrink_spd(&spd);
6de58e62 6654
2cadf913
SR
6655 return ret;
6656}
6657
6658static const struct file_operations tracing_buffers_fops = {
6659 .open = tracing_buffers_open,
6660 .read = tracing_buffers_read,
cc60cdc9 6661 .poll = tracing_buffers_poll,
2cadf913
SR
6662 .release = tracing_buffers_release,
6663 .splice_read = tracing_buffers_splice_read,
6664 .llseek = no_llseek,
6665};
6666
c8d77183
SR
6667static ssize_t
6668tracing_stats_read(struct file *filp, char __user *ubuf,
6669 size_t count, loff_t *ppos)
6670{
4d3435b8
ON
6671 struct inode *inode = file_inode(filp);
6672 struct trace_array *tr = inode->i_private;
12883efb 6673 struct trace_buffer *trace_buf = &tr->trace_buffer;
4d3435b8 6674 int cpu = tracing_get_cpu(inode);
c8d77183
SR
6675 struct trace_seq *s;
6676 unsigned long cnt;
c64e148a
VN
6677 unsigned long long t;
6678 unsigned long usec_rem;
c8d77183 6679
e4f2d10f 6680 s = kmalloc(sizeof(*s), GFP_KERNEL);
c8d77183 6681 if (!s)
a646365c 6682 return -ENOMEM;
c8d77183
SR
6683
6684 trace_seq_init(s);
6685
12883efb 6686 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
c8d77183
SR
6687 trace_seq_printf(s, "entries: %ld\n", cnt);
6688
12883efb 6689 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
c8d77183
SR
6690 trace_seq_printf(s, "overrun: %ld\n", cnt);
6691
12883efb 6692 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
c8d77183
SR
6693 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
6694
12883efb 6695 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
c64e148a
VN
6696 trace_seq_printf(s, "bytes: %ld\n", cnt);
6697
58e8eedf 6698 if (trace_clocks[tr->clock_id].in_ns) {
11043d8b 6699 /* local or global for trace_clock */
12883efb 6700 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
11043d8b
YY
6701 usec_rem = do_div(t, USEC_PER_SEC);
6702 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
6703 t, usec_rem);
6704
12883efb 6705 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
11043d8b
YY
6706 usec_rem = do_div(t, USEC_PER_SEC);
6707 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
6708 } else {
6709 /* counter or tsc mode for trace_clock */
6710 trace_seq_printf(s, "oldest event ts: %llu\n",
12883efb 6711 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
c64e148a 6712
11043d8b 6713 trace_seq_printf(s, "now ts: %llu\n",
12883efb 6714 ring_buffer_time_stamp(trace_buf->buffer, cpu));
11043d8b 6715 }
c64e148a 6716
12883efb 6717 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
884bfe89
SP
6718 trace_seq_printf(s, "dropped events: %ld\n", cnt);
6719
12883efb 6720 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
ad964704
SRRH
6721 trace_seq_printf(s, "read events: %ld\n", cnt);
6722
5ac48378
SRRH
6723 count = simple_read_from_buffer(ubuf, count, ppos,
6724 s->buffer, trace_seq_used(s));
c8d77183
SR
6725
6726 kfree(s);
6727
6728 return count;
6729}
6730
6731static const struct file_operations tracing_stats_fops = {
4d3435b8 6732 .open = tracing_open_generic_tr,
c8d77183 6733 .read = tracing_stats_read,
b444786f 6734 .llseek = generic_file_llseek,
4d3435b8 6735 .release = tracing_release_generic_tr,
c8d77183
SR
6736};
6737
bc0c38d1
SR
6738#ifdef CONFIG_DYNAMIC_FTRACE
6739
6740static ssize_t
b807c3d0 6741tracing_read_dyn_info(struct file *filp, char __user *ubuf,
bc0c38d1
SR
6742 size_t cnt, loff_t *ppos)
6743{
6744 unsigned long *p = filp->private_data;
6a9c981b 6745 char buf[64]; /* Not too big for a shallow stack */
bc0c38d1
SR
6746 int r;
6747
6a9c981b 6748 r = scnprintf(buf, 63, "%ld", *p);
b807c3d0
SR
6749 buf[r++] = '\n';
6750
6a9c981b 6751 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
bc0c38d1
SR
6752}
6753
5e2336a0 6754static const struct file_operations tracing_dyn_info_fops = {
4bf39a94 6755 .open = tracing_open_generic,
b807c3d0 6756 .read = tracing_read_dyn_info,
b444786f 6757 .llseek = generic_file_llseek,
bc0c38d1 6758};
77fd5c15 6759#endif /* CONFIG_DYNAMIC_FTRACE */
bc0c38d1 6760
77fd5c15
SRRH
6761#if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
6762static void
bca6c8d0 6763ftrace_snapshot(unsigned long ip, unsigned long parent_ip,
b5f081b5 6764 struct trace_array *tr, struct ftrace_probe_ops *ops,
6e444319 6765 void *data)
77fd5c15 6766{
cab50379 6767 tracing_snapshot_instance(tr);
77fd5c15 6768}
bc0c38d1 6769
77fd5c15 6770static void
bca6c8d0 6771ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip,
b5f081b5 6772 struct trace_array *tr, struct ftrace_probe_ops *ops,
6e444319 6773 void *data)
bc0c38d1 6774{
6e444319 6775 struct ftrace_func_mapper *mapper = data;
1a93f8bd 6776 long *count = NULL;
77fd5c15 6777
1a93f8bd
SRV
6778 if (mapper)
6779 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
6780
6781 if (count) {
6782
6783 if (*count <= 0)
6784 return;
bc0c38d1 6785
77fd5c15 6786 (*count)--;
1a93f8bd 6787 }
77fd5c15 6788
cab50379 6789 tracing_snapshot_instance(tr);
77fd5c15
SRRH
6790}
6791
6792static int
6793ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
6794 struct ftrace_probe_ops *ops, void *data)
6795{
6e444319 6796 struct ftrace_func_mapper *mapper = data;
1a93f8bd 6797 long *count = NULL;
77fd5c15
SRRH
6798
6799 seq_printf(m, "%ps:", (void *)ip);
6800
fa6f0cc7 6801 seq_puts(m, "snapshot");
77fd5c15 6802
1a93f8bd
SRV
6803 if (mapper)
6804 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
6805
6806 if (count)
6807 seq_printf(m, ":count=%ld\n", *count);
77fd5c15 6808 else
1a93f8bd 6809 seq_puts(m, ":unlimited\n");
77fd5c15
SRRH
6810
6811 return 0;
6812}
6813
1a93f8bd 6814static int
b5f081b5 6815ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
6e444319 6816 unsigned long ip, void *init_data, void **data)
1a93f8bd 6817{
6e444319
SRV
6818 struct ftrace_func_mapper *mapper = *data;
6819
6820 if (!mapper) {
6821 mapper = allocate_ftrace_func_mapper();
6822 if (!mapper)
6823 return -ENOMEM;
6824 *data = mapper;
6825 }
1a93f8bd 6826
6e444319 6827 return ftrace_func_mapper_add_ip(mapper, ip, init_data);
1a93f8bd
SRV
6828}
6829
6830static void
b5f081b5 6831ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
6e444319 6832 unsigned long ip, void *data)
1a93f8bd 6833{
6e444319
SRV
6834 struct ftrace_func_mapper *mapper = data;
6835
6836 if (!ip) {
6837 if (!mapper)
6838 return;
6839 free_ftrace_func_mapper(mapper, NULL);
6840 return;
6841 }
1a93f8bd
SRV
6842
6843 ftrace_func_mapper_remove_ip(mapper, ip);
6844}
6845
77fd5c15
SRRH
6846static struct ftrace_probe_ops snapshot_probe_ops = {
6847 .func = ftrace_snapshot,
6848 .print = ftrace_snapshot_print,
6849};
6850
6851static struct ftrace_probe_ops snapshot_count_probe_ops = {
6852 .func = ftrace_count_snapshot,
6853 .print = ftrace_snapshot_print,
1a93f8bd
SRV
6854 .init = ftrace_snapshot_init,
6855 .free = ftrace_snapshot_free,
77fd5c15
SRRH
6856};
6857
6858static int
04ec7bb6 6859ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
77fd5c15
SRRH
6860 char *glob, char *cmd, char *param, int enable)
6861{
6862 struct ftrace_probe_ops *ops;
6863 void *count = (void *)-1;
6864 char *number;
6865 int ret;
6866
6867 /* hash funcs only work with set_ftrace_filter */
6868 if (!enable)
6869 return -EINVAL;
6870
6871 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
6872
d3d532d7 6873 if (glob[0] == '!')
7b60f3d8 6874 return unregister_ftrace_function_probe_func(glob+1, tr, ops);
77fd5c15
SRRH
6875
6876 if (!param)
6877 goto out_reg;
6878
6879 number = strsep(&param, ":");
6880
6881 if (!strlen(number))
6882 goto out_reg;
6883
6884 /*
6885 * We use the callback data field (which is a pointer)
6886 * as our counter.
6887 */
6888 ret = kstrtoul(number, 0, (unsigned long *)&count);
6889 if (ret)
6890 return ret;
6891
6892 out_reg:
4c174688 6893 ret = alloc_snapshot(tr);
df62db5b
SRV
6894 if (ret < 0)
6895 goto out;
77fd5c15 6896
4c174688 6897 ret = register_ftrace_function_probe(glob, tr, ops, count);
77fd5c15 6898
df62db5b 6899 out:
77fd5c15
SRRH
6900 return ret < 0 ? ret : 0;
6901}
6902
6903static struct ftrace_func_command ftrace_snapshot_cmd = {
6904 .name = "snapshot",
6905 .func = ftrace_trace_snapshot_callback,
6906};
6907
38de93ab 6908static __init int register_snapshot_cmd(void)
77fd5c15
SRRH
6909{
6910 return register_ftrace_command(&ftrace_snapshot_cmd);
6911}
6912#else
38de93ab 6913static inline __init int register_snapshot_cmd(void) { return 0; }
77fd5c15 6914#endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
bc0c38d1 6915
7eeafbca 6916static struct dentry *tracing_get_dentry(struct trace_array *tr)
bc0c38d1 6917{
8434dc93
SRRH
6918 if (WARN_ON(!tr->dir))
6919 return ERR_PTR(-ENODEV);
6920
6921 /* Top directory uses NULL as the parent */
6922 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
6923 return NULL;
6924
6925 /* All sub buffers have a descriptor */
2b6080f2 6926 return tr->dir;
bc0c38d1
SR
6927}
6928
2b6080f2 6929static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
b04cc6b1 6930{
b04cc6b1
FW
6931 struct dentry *d_tracer;
6932
2b6080f2
SR
6933 if (tr->percpu_dir)
6934 return tr->percpu_dir;
b04cc6b1 6935
7eeafbca 6936 d_tracer = tracing_get_dentry(tr);
14a5ae40 6937 if (IS_ERR(d_tracer))
b04cc6b1
FW
6938 return NULL;
6939
8434dc93 6940 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
b04cc6b1 6941
2b6080f2 6942 WARN_ONCE(!tr->percpu_dir,
8434dc93 6943 "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
b04cc6b1 6944
2b6080f2 6945 return tr->percpu_dir;
b04cc6b1
FW
6946}
6947
649e9c70
ON
6948static struct dentry *
6949trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
6950 void *data, long cpu, const struct file_operations *fops)
6951{
6952 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
6953
6954 if (ret) /* See tracing_get_cpu() */
7682c918 6955 d_inode(ret)->i_cdev = (void *)(cpu + 1);
649e9c70
ON
6956 return ret;
6957}
6958
2b6080f2 6959static void
8434dc93 6960tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
b04cc6b1 6961{
2b6080f2 6962 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
5452af66 6963 struct dentry *d_cpu;
dd49a38c 6964 char cpu_dir[30]; /* 30 characters should be more than enough */
b04cc6b1 6965
0a3d7ce7
NK
6966 if (!d_percpu)
6967 return;
6968
dd49a38c 6969 snprintf(cpu_dir, 30, "cpu%ld", cpu);
8434dc93 6970 d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
8656e7a2 6971 if (!d_cpu) {
a395d6a7 6972 pr_warn("Could not create tracefs '%s' entry\n", cpu_dir);
8656e7a2
FW
6973 return;
6974 }
b04cc6b1 6975
8656e7a2 6976 /* per cpu trace_pipe */
649e9c70 6977 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
15544209 6978 tr, cpu, &tracing_pipe_fops);
b04cc6b1
FW
6979
6980 /* per cpu trace */
649e9c70 6981 trace_create_cpu_file("trace", 0644, d_cpu,
6484c71c 6982 tr, cpu, &tracing_fops);
7f96f93f 6983
649e9c70 6984 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
46ef2be0 6985 tr, cpu, &tracing_buffers_fops);
7f96f93f 6986
649e9c70 6987 trace_create_cpu_file("stats", 0444, d_cpu,
4d3435b8 6988 tr, cpu, &tracing_stats_fops);
438ced17 6989
649e9c70 6990 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
0bc392ee 6991 tr, cpu, &tracing_entries_fops);
f1affcaa
SRRH
6992
6993#ifdef CONFIG_TRACER_SNAPSHOT
649e9c70 6994 trace_create_cpu_file("snapshot", 0644, d_cpu,
6484c71c 6995 tr, cpu, &snapshot_fops);
6de58e62 6996
649e9c70 6997 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
46ef2be0 6998 tr, cpu, &snapshot_raw_fops);
f1affcaa 6999#endif
b04cc6b1
FW
7000}
7001
60a11774
SR
7002#ifdef CONFIG_FTRACE_SELFTEST
7003/* Let selftest have access to static functions in this file */
7004#include "trace_selftest.c"
7005#endif
7006
577b785f
SR
7007static ssize_t
7008trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
7009 loff_t *ppos)
7010{
7011 struct trace_option_dentry *topt = filp->private_data;
7012 char *buf;
7013
7014 if (topt->flags->val & topt->opt->bit)
7015 buf = "1\n";
7016 else
7017 buf = "0\n";
7018
7019 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
7020}
7021
7022static ssize_t
7023trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
7024 loff_t *ppos)
7025{
7026 struct trace_option_dentry *topt = filp->private_data;
7027 unsigned long val;
577b785f
SR
7028 int ret;
7029
22fe9b54
PH
7030 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7031 if (ret)
577b785f
SR
7032 return ret;
7033
8d18eaaf
LZ
7034 if (val != 0 && val != 1)
7035 return -EINVAL;
577b785f 7036
8d18eaaf 7037 if (!!(topt->flags->val & topt->opt->bit) != val) {
577b785f 7038 mutex_lock(&trace_types_lock);
8c1a49ae 7039 ret = __set_tracer_option(topt->tr, topt->flags,
c757bea9 7040 topt->opt, !val);
577b785f
SR
7041 mutex_unlock(&trace_types_lock);
7042 if (ret)
7043 return ret;
577b785f
SR
7044 }
7045
7046 *ppos += cnt;
7047
7048 return cnt;
7049}
7050
7051
7052static const struct file_operations trace_options_fops = {
7053 .open = tracing_open_generic,
7054 .read = trace_options_read,
7055 .write = trace_options_write,
b444786f 7056 .llseek = generic_file_llseek,
577b785f
SR
7057};
7058
9a38a885
SRRH
7059/*
7060 * In order to pass in both the trace_array descriptor as well as the index
7061 * to the flag that the trace option file represents, the trace_array
7062 * has a character array of trace_flags_index[], which holds the index
7063 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
7064 * The address of this character array is passed to the flag option file
7065 * read/write callbacks.
7066 *
7067 * In order to extract both the index and the trace_array descriptor,
7068 * get_tr_index() uses the following algorithm.
7069 *
7070 * idx = *ptr;
7071 *
7072 * As the pointer itself contains the address of the index (remember
7073 * index[1] == 1).
7074 *
7075 * Then to get the trace_array descriptor, by subtracting that index
7076 * from the ptr, we get to the start of the index itself.
7077 *
7078 * ptr - idx == &index[0]
7079 *
7080 * Then a simple container_of() from that pointer gets us to the
7081 * trace_array descriptor.
7082 */
7083static void get_tr_index(void *data, struct trace_array **ptr,
7084 unsigned int *pindex)
7085{
7086 *pindex = *(unsigned char *)data;
7087
7088 *ptr = container_of(data - *pindex, struct trace_array,
7089 trace_flags_index);
7090}
7091
a8259075
SR
7092static ssize_t
7093trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
7094 loff_t *ppos)
7095{
9a38a885
SRRH
7096 void *tr_index = filp->private_data;
7097 struct trace_array *tr;
7098 unsigned int index;
a8259075
SR
7099 char *buf;
7100
9a38a885
SRRH
7101 get_tr_index(tr_index, &tr, &index);
7102
7103 if (tr->trace_flags & (1 << index))
a8259075
SR
7104 buf = "1\n";
7105 else
7106 buf = "0\n";
7107
7108 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
7109}
7110
7111static ssize_t
7112trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
7113 loff_t *ppos)
7114{
9a38a885
SRRH
7115 void *tr_index = filp->private_data;
7116 struct trace_array *tr;
7117 unsigned int index;
a8259075
SR
7118 unsigned long val;
7119 int ret;
7120
9a38a885
SRRH
7121 get_tr_index(tr_index, &tr, &index);
7122
22fe9b54
PH
7123 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7124 if (ret)
a8259075
SR
7125 return ret;
7126
f2d84b65 7127 if (val != 0 && val != 1)
a8259075 7128 return -EINVAL;
69d34da2
SRRH
7129
7130 mutex_lock(&trace_types_lock);
2b6080f2 7131 ret = set_tracer_flag(tr, 1 << index, val);
69d34da2 7132 mutex_unlock(&trace_types_lock);
a8259075 7133
613f04a0
SRRH
7134 if (ret < 0)
7135 return ret;
7136
a8259075
SR
7137 *ppos += cnt;
7138
7139 return cnt;
7140}
7141
a8259075
SR
7142static const struct file_operations trace_options_core_fops = {
7143 .open = tracing_open_generic,
7144 .read = trace_options_core_read,
7145 .write = trace_options_core_write,
b444786f 7146 .llseek = generic_file_llseek,
a8259075
SR
7147};
7148
5452af66 7149struct dentry *trace_create_file(const char *name,
f4ae40a6 7150 umode_t mode,
5452af66
FW
7151 struct dentry *parent,
7152 void *data,
7153 const struct file_operations *fops)
7154{
7155 struct dentry *ret;
7156
8434dc93 7157 ret = tracefs_create_file(name, mode, parent, data, fops);
5452af66 7158 if (!ret)
a395d6a7 7159 pr_warn("Could not create tracefs '%s' entry\n", name);
5452af66
FW
7160
7161 return ret;
7162}
7163
7164
2b6080f2 7165static struct dentry *trace_options_init_dentry(struct trace_array *tr)
a8259075
SR
7166{
7167 struct dentry *d_tracer;
a8259075 7168
2b6080f2
SR
7169 if (tr->options)
7170 return tr->options;
a8259075 7171
7eeafbca 7172 d_tracer = tracing_get_dentry(tr);
14a5ae40 7173 if (IS_ERR(d_tracer))
a8259075
SR
7174 return NULL;
7175
8434dc93 7176 tr->options = tracefs_create_dir("options", d_tracer);
2b6080f2 7177 if (!tr->options) {
a395d6a7 7178 pr_warn("Could not create tracefs directory 'options'\n");
a8259075
SR
7179 return NULL;
7180 }
7181
2b6080f2 7182 return tr->options;
a8259075
SR
7183}
7184
577b785f 7185static void
2b6080f2
SR
7186create_trace_option_file(struct trace_array *tr,
7187 struct trace_option_dentry *topt,
577b785f
SR
7188 struct tracer_flags *flags,
7189 struct tracer_opt *opt)
7190{
7191 struct dentry *t_options;
577b785f 7192
2b6080f2 7193 t_options = trace_options_init_dentry(tr);
577b785f
SR
7194 if (!t_options)
7195 return;
7196
7197 topt->flags = flags;
7198 topt->opt = opt;
2b6080f2 7199 topt->tr = tr;
577b785f 7200
5452af66 7201 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
577b785f
SR
7202 &trace_options_fops);
7203
577b785f
SR
7204}
7205
37aea98b 7206static void
2b6080f2 7207create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
577b785f
SR
7208{
7209 struct trace_option_dentry *topts;
37aea98b 7210 struct trace_options *tr_topts;
577b785f
SR
7211 struct tracer_flags *flags;
7212 struct tracer_opt *opts;
7213 int cnt;
37aea98b 7214 int i;
577b785f
SR
7215
7216 if (!tracer)
37aea98b 7217 return;
577b785f
SR
7218
7219 flags = tracer->flags;
7220
7221 if (!flags || !flags->opts)
37aea98b
SRRH
7222 return;
7223
7224 /*
7225 * If this is an instance, only create flags for tracers
7226 * the instance may have.
7227 */
7228 if (!trace_ok_for_array(tracer, tr))
7229 return;
7230
7231 for (i = 0; i < tr->nr_topts; i++) {
d39cdd20
CH
7232 /* Make sure there's no duplicate flags. */
7233 if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
37aea98b
SRRH
7234 return;
7235 }
577b785f
SR
7236
7237 opts = flags->opts;
7238
7239 for (cnt = 0; opts[cnt].name; cnt++)
7240 ;
7241
0cfe8245 7242 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
577b785f 7243 if (!topts)
37aea98b
SRRH
7244 return;
7245
7246 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
7247 GFP_KERNEL);
7248 if (!tr_topts) {
7249 kfree(topts);
7250 return;
7251 }
7252
7253 tr->topts = tr_topts;
7254 tr->topts[tr->nr_topts].tracer = tracer;
7255 tr->topts[tr->nr_topts].topts = topts;
7256 tr->nr_topts++;
577b785f 7257
41d9c0be 7258 for (cnt = 0; opts[cnt].name; cnt++) {
2b6080f2 7259 create_trace_option_file(tr, &topts[cnt], flags,
577b785f 7260 &opts[cnt]);
41d9c0be
SRRH
7261 WARN_ONCE(topts[cnt].entry == NULL,
7262 "Failed to create trace option: %s",
7263 opts[cnt].name);
7264 }
577b785f
SR
7265}
7266
a8259075 7267static struct dentry *
2b6080f2
SR
7268create_trace_option_core_file(struct trace_array *tr,
7269 const char *option, long index)
a8259075
SR
7270{
7271 struct dentry *t_options;
a8259075 7272
2b6080f2 7273 t_options = trace_options_init_dentry(tr);
a8259075
SR
7274 if (!t_options)
7275 return NULL;
7276
9a38a885
SRRH
7277 return trace_create_file(option, 0644, t_options,
7278 (void *)&tr->trace_flags_index[index],
7279 &trace_options_core_fops);
a8259075
SR
7280}
7281
16270145 7282static void create_trace_options_dir(struct trace_array *tr)
a8259075
SR
7283{
7284 struct dentry *t_options;
16270145 7285 bool top_level = tr == &global_trace;
a8259075
SR
7286 int i;
7287
2b6080f2 7288 t_options = trace_options_init_dentry(tr);
a8259075
SR
7289 if (!t_options)
7290 return;
7291
16270145
SRRH
7292 for (i = 0; trace_options[i]; i++) {
7293 if (top_level ||
7294 !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
7295 create_trace_option_core_file(tr, trace_options[i], i);
7296 }
a8259075
SR
7297}
7298
499e5470
SR
7299static ssize_t
7300rb_simple_read(struct file *filp, char __user *ubuf,
7301 size_t cnt, loff_t *ppos)
7302{
348f0fc2 7303 struct trace_array *tr = filp->private_data;
499e5470
SR
7304 char buf[64];
7305 int r;
7306
10246fa3 7307 r = tracer_tracing_is_on(tr);
499e5470
SR
7308 r = sprintf(buf, "%d\n", r);
7309
7310 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
7311}
7312
7313static ssize_t
7314rb_simple_write(struct file *filp, const char __user *ubuf,
7315 size_t cnt, loff_t *ppos)
7316{
348f0fc2 7317 struct trace_array *tr = filp->private_data;
12883efb 7318 struct ring_buffer *buffer = tr->trace_buffer.buffer;
499e5470
SR
7319 unsigned long val;
7320 int ret;
7321
7322 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7323 if (ret)
7324 return ret;
7325
7326 if (buffer) {
2df8f8a6
SR
7327 mutex_lock(&trace_types_lock);
7328 if (val) {
10246fa3 7329 tracer_tracing_on(tr);
2b6080f2
SR
7330 if (tr->current_trace->start)
7331 tr->current_trace->start(tr);
2df8f8a6 7332 } else {
10246fa3 7333 tracer_tracing_off(tr);
2b6080f2
SR
7334 if (tr->current_trace->stop)
7335 tr->current_trace->stop(tr);
2df8f8a6
SR
7336 }
7337 mutex_unlock(&trace_types_lock);
499e5470
SR
7338 }
7339
7340 (*ppos)++;
7341
7342 return cnt;
7343}
7344
7345static const struct file_operations rb_simple_fops = {
7b85af63 7346 .open = tracing_open_generic_tr,
499e5470
SR
7347 .read = rb_simple_read,
7348 .write = rb_simple_write,
7b85af63 7349 .release = tracing_release_generic_tr,
499e5470
SR
7350 .llseek = default_llseek,
7351};
7352
277ba044
SR
7353struct dentry *trace_instance_dir;
7354
7355static void
8434dc93 7356init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
277ba044 7357
55034cd6
SRRH
7358static int
7359allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
277ba044
SR
7360{
7361 enum ring_buffer_flags rb_flags;
737223fb 7362
983f938a 7363 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
737223fb 7364
dced341b
SRRH
7365 buf->tr = tr;
7366
55034cd6
SRRH
7367 buf->buffer = ring_buffer_alloc(size, rb_flags);
7368 if (!buf->buffer)
7369 return -ENOMEM;
737223fb 7370
55034cd6
SRRH
7371 buf->data = alloc_percpu(struct trace_array_cpu);
7372 if (!buf->data) {
7373 ring_buffer_free(buf->buffer);
7374 return -ENOMEM;
7375 }
737223fb 7376
737223fb
SRRH
7377 /* Allocate the first page for all buffers */
7378 set_buffer_entries(&tr->trace_buffer,
7379 ring_buffer_size(tr->trace_buffer.buffer, 0));
7380
55034cd6
SRRH
7381 return 0;
7382}
737223fb 7383
55034cd6
SRRH
7384static int allocate_trace_buffers(struct trace_array *tr, int size)
7385{
7386 int ret;
737223fb 7387
55034cd6
SRRH
7388 ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
7389 if (ret)
7390 return ret;
737223fb 7391
55034cd6
SRRH
7392#ifdef CONFIG_TRACER_MAX_TRACE
7393 ret = allocate_trace_buffer(tr, &tr->max_buffer,
7394 allocate_snapshot ? size : 1);
7395 if (WARN_ON(ret)) {
737223fb 7396 ring_buffer_free(tr->trace_buffer.buffer);
55034cd6
SRRH
7397 free_percpu(tr->trace_buffer.data);
7398 return -ENOMEM;
7399 }
7400 tr->allocated_snapshot = allocate_snapshot;
737223fb 7401
55034cd6
SRRH
7402 /*
7403 * Only the top level trace array gets its snapshot allocated
7404 * from the kernel command line.
7405 */
7406 allocate_snapshot = false;
737223fb 7407#endif
55034cd6 7408 return 0;
737223fb
SRRH
7409}
7410
f0b70cc4
SRRH
7411static void free_trace_buffer(struct trace_buffer *buf)
7412{
7413 if (buf->buffer) {
7414 ring_buffer_free(buf->buffer);
7415 buf->buffer = NULL;
7416 free_percpu(buf->data);
7417 buf->data = NULL;
7418 }
7419}
7420
23aaa3c1
SRRH
7421static void free_trace_buffers(struct trace_array *tr)
7422{
7423 if (!tr)
7424 return;
7425
f0b70cc4 7426 free_trace_buffer(&tr->trace_buffer);
23aaa3c1
SRRH
7427
7428#ifdef CONFIG_TRACER_MAX_TRACE
f0b70cc4 7429 free_trace_buffer(&tr->max_buffer);
23aaa3c1
SRRH
7430#endif
7431}
7432
9a38a885
SRRH
7433static void init_trace_flags_index(struct trace_array *tr)
7434{
7435 int i;
7436
7437 /* Used by the trace options files */
7438 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
7439 tr->trace_flags_index[i] = i;
7440}
7441
37aea98b
SRRH
7442static void __update_tracer_options(struct trace_array *tr)
7443{
7444 struct tracer *t;
7445
7446 for (t = trace_types; t; t = t->next)
7447 add_tracer_options(tr, t);
7448}
7449
7450static void update_tracer_options(struct trace_array *tr)
7451{
7452 mutex_lock(&trace_types_lock);
7453 __update_tracer_options(tr);
7454 mutex_unlock(&trace_types_lock);
7455}
7456
eae47358 7457static int instance_mkdir(const char *name)
737223fb 7458{
277ba044
SR
7459 struct trace_array *tr;
7460 int ret;
277ba044
SR
7461
7462 mutex_lock(&trace_types_lock);
7463
7464 ret = -EEXIST;
7465 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
7466 if (tr->name && strcmp(tr->name, name) == 0)
7467 goto out_unlock;
7468 }
7469
7470 ret = -ENOMEM;
7471 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
7472 if (!tr)
7473 goto out_unlock;
7474
7475 tr->name = kstrdup(name, GFP_KERNEL);
7476 if (!tr->name)
7477 goto out_free_tr;
7478
ccfe9e42
AL
7479 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
7480 goto out_free_tr;
7481
20550622 7482 tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
983f938a 7483
ccfe9e42
AL
7484 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
7485
277ba044
SR
7486 raw_spin_lock_init(&tr->start_lock);
7487
0b9b12c1
SRRH
7488 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
7489
277ba044
SR
7490 tr->current_trace = &nop_trace;
7491
7492 INIT_LIST_HEAD(&tr->systems);
7493 INIT_LIST_HEAD(&tr->events);
7494
737223fb 7495 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
277ba044
SR
7496 goto out_free_tr;
7497
8434dc93 7498 tr->dir = tracefs_create_dir(name, trace_instance_dir);
277ba044
SR
7499 if (!tr->dir)
7500 goto out_free_tr;
7501
7502 ret = event_trace_add_tracer(tr->dir, tr);
609e85a7 7503 if (ret) {
8434dc93 7504 tracefs_remove_recursive(tr->dir);
277ba044 7505 goto out_free_tr;
609e85a7 7506 }
277ba044 7507
04ec7bb6
SRV
7508 ftrace_init_trace_array(tr);
7509
8434dc93 7510 init_tracer_tracefs(tr, tr->dir);
9a38a885 7511 init_trace_flags_index(tr);
37aea98b 7512 __update_tracer_options(tr);
277ba044
SR
7513
7514 list_add(&tr->list, &ftrace_trace_arrays);
7515
7516 mutex_unlock(&trace_types_lock);
7517
7518 return 0;
7519
7520 out_free_tr:
23aaa3c1 7521 free_trace_buffers(tr);
ccfe9e42 7522 free_cpumask_var(tr->tracing_cpumask);
277ba044
SR
7523 kfree(tr->name);
7524 kfree(tr);
7525
7526 out_unlock:
7527 mutex_unlock(&trace_types_lock);
7528
7529 return ret;
7530
7531}
7532
eae47358 7533static int instance_rmdir(const char *name)
0c8916c3
SR
7534{
7535 struct trace_array *tr;
7536 int found = 0;
7537 int ret;
37aea98b 7538 int i;
0c8916c3
SR
7539
7540 mutex_lock(&trace_types_lock);
7541
7542 ret = -ENODEV;
7543 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
7544 if (tr->name && strcmp(tr->name, name) == 0) {
7545 found = 1;
7546 break;
7547 }
7548 }
7549 if (!found)
7550 goto out_unlock;
7551
a695cb58 7552 ret = -EBUSY;
cf6ab6d9 7553 if (tr->ref || (tr->current_trace && tr->current_trace->ref))
a695cb58
SRRH
7554 goto out_unlock;
7555
0c8916c3
SR
7556 list_del(&tr->list);
7557
20550622
SRRH
7558 /* Disable all the flags that were enabled coming in */
7559 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) {
7560 if ((1 << i) & ZEROED_TRACE_FLAGS)
7561 set_tracer_flag(tr, 1 << i, 0);
7562 }
7563
6b450d25 7564 tracing_set_nop(tr);
a0e6369e 7565 clear_ftrace_function_probes(tr);
0c8916c3 7566 event_trace_del_tracer(tr);
d879d0b8 7567 ftrace_clear_pids(tr);
591dffda 7568 ftrace_destroy_function_files(tr);
681a4a2f 7569 tracefs_remove_recursive(tr->dir);
a9fcaaac 7570 free_trace_buffers(tr);
0c8916c3 7571
37aea98b
SRRH
7572 for (i = 0; i < tr->nr_topts; i++) {
7573 kfree(tr->topts[i].topts);
7574 }
7575 kfree(tr->topts);
7576
0c8916c3
SR
7577 kfree(tr->name);
7578 kfree(tr);
7579
7580 ret = 0;
7581
7582 out_unlock:
7583 mutex_unlock(&trace_types_lock);
7584
7585 return ret;
7586}
7587
277ba044
SR
7588static __init void create_trace_instances(struct dentry *d_tracer)
7589{
eae47358
SRRH
7590 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
7591 instance_mkdir,
7592 instance_rmdir);
277ba044
SR
7593 if (WARN_ON(!trace_instance_dir))
7594 return;
277ba044
SR
7595}
7596
2b6080f2 7597static void
8434dc93 7598init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
2b6080f2 7599{
121aaee7 7600 int cpu;
2b6080f2 7601
607e2ea1
SRRH
7602 trace_create_file("available_tracers", 0444, d_tracer,
7603 tr, &show_traces_fops);
7604
7605 trace_create_file("current_tracer", 0644, d_tracer,
7606 tr, &set_tracer_fops);
7607
ccfe9e42
AL
7608 trace_create_file("tracing_cpumask", 0644, d_tracer,
7609 tr, &tracing_cpumask_fops);
7610
2b6080f2
SR
7611 trace_create_file("trace_options", 0644, d_tracer,
7612 tr, &tracing_iter_fops);
7613
7614 trace_create_file("trace", 0644, d_tracer,
6484c71c 7615 tr, &tracing_fops);
2b6080f2
SR
7616
7617 trace_create_file("trace_pipe", 0444, d_tracer,
15544209 7618 tr, &tracing_pipe_fops);
2b6080f2
SR
7619
7620 trace_create_file("buffer_size_kb", 0644, d_tracer,
0bc392ee 7621 tr, &tracing_entries_fops);
2b6080f2
SR
7622
7623 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
7624 tr, &tracing_total_entries_fops);
7625
238ae93d 7626 trace_create_file("free_buffer", 0200, d_tracer,
2b6080f2
SR
7627 tr, &tracing_free_buffer_fops);
7628
7629 trace_create_file("trace_marker", 0220, d_tracer,
7630 tr, &tracing_mark_fops);
7631
fa32e855
SR
7632 trace_create_file("trace_marker_raw", 0220, d_tracer,
7633 tr, &tracing_mark_raw_fops);
7634
2b6080f2
SR
7635 trace_create_file("trace_clock", 0644, d_tracer, tr,
7636 &trace_clock_fops);
7637
7638 trace_create_file("tracing_on", 0644, d_tracer,
6484c71c 7639 tr, &rb_simple_fops);
ce9bae55 7640
16270145
SRRH
7641 create_trace_options_dir(tr);
7642
f971cc9a 7643#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
6d9b3fa5
SRRH
7644 trace_create_file("tracing_max_latency", 0644, d_tracer,
7645 &tr->max_latency, &tracing_max_lat_fops);
7646#endif
7647
591dffda
SRRH
7648 if (ftrace_create_function_files(tr, d_tracer))
7649 WARN(1, "Could not allocate function filter files");
7650
ce9bae55
SRRH
7651#ifdef CONFIG_TRACER_SNAPSHOT
7652 trace_create_file("snapshot", 0644, d_tracer,
6484c71c 7653 tr, &snapshot_fops);
ce9bae55 7654#endif
121aaee7
SRRH
7655
7656 for_each_tracing_cpu(cpu)
8434dc93 7657 tracing_init_tracefs_percpu(tr, cpu);
121aaee7 7658
345ddcc8 7659 ftrace_init_tracefs(tr, d_tracer);
2b6080f2
SR
7660}
7661
93faccbb 7662static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore)
f76180bc
SRRH
7663{
7664 struct vfsmount *mnt;
7665 struct file_system_type *type;
7666
7667 /*
7668 * To maintain backward compatibility for tools that mount
7669 * debugfs to get to the tracing facility, tracefs is automatically
7670 * mounted to the debugfs/tracing directory.
7671 */
7672 type = get_fs_type("tracefs");
7673 if (!type)
7674 return NULL;
93faccbb 7675 mnt = vfs_submount(mntpt, type, "tracefs", NULL);
f76180bc
SRRH
7676 put_filesystem(type);
7677 if (IS_ERR(mnt))
7678 return NULL;
7679 mntget(mnt);
7680
7681 return mnt;
7682}
7683
7eeafbca
SRRH
7684/**
7685 * tracing_init_dentry - initialize top level trace array
7686 *
7687 * This is called when creating files or directories in the tracing
7688 * directory. It is called via fs_initcall() by any of the boot up code
7689 * and expects to return the dentry of the top level tracing directory.
7690 */
7691struct dentry *tracing_init_dentry(void)
7692{
7693 struct trace_array *tr = &global_trace;
7694
f76180bc 7695 /* The top level trace array uses NULL as parent */
7eeafbca 7696 if (tr->dir)
f76180bc 7697 return NULL;
7eeafbca 7698
8b129199
JW
7699 if (WARN_ON(!tracefs_initialized()) ||
7700 (IS_ENABLED(CONFIG_DEBUG_FS) &&
7701 WARN_ON(!debugfs_initialized())))
7eeafbca
SRRH
7702 return ERR_PTR(-ENODEV);
7703
f76180bc
SRRH
7704 /*
7705 * As there may still be users that expect the tracing
7706 * files to exist in debugfs/tracing, we must automount
7707 * the tracefs file system there, so older tools still
7708 * work with the newer kerenl.
7709 */
7710 tr->dir = debugfs_create_automount("tracing", NULL,
7711 trace_automount, NULL);
7eeafbca
SRRH
7712 if (!tr->dir) {
7713 pr_warn_once("Could not create debugfs directory 'tracing'\n");
7714 return ERR_PTR(-ENOMEM);
7715 }
7716
8434dc93 7717 return NULL;
7eeafbca
SRRH
7718}
7719
00f4b652
JL
7720extern struct trace_eval_map *__start_ftrace_eval_maps[];
7721extern struct trace_eval_map *__stop_ftrace_eval_maps[];
0c564a53 7722
5f60b351 7723static void __init trace_eval_init(void)
0c564a53 7724{
3673b8e4
SRRH
7725 int len;
7726
02fd7f68 7727 len = __stop_ftrace_eval_maps - __start_ftrace_eval_maps;
f57a4143 7728 trace_insert_eval_map(NULL, __start_ftrace_eval_maps, len);
3673b8e4
SRRH
7729}
7730
7731#ifdef CONFIG_MODULES
f57a4143 7732static void trace_module_add_evals(struct module *mod)
3673b8e4 7733{
99be647c 7734 if (!mod->num_trace_evals)
3673b8e4
SRRH
7735 return;
7736
7737 /*
7738 * Modules with bad taint do not have events created, do
7739 * not bother with enums either.
7740 */
7741 if (trace_module_has_bad_taint(mod))
7742 return;
7743
f57a4143 7744 trace_insert_eval_map(mod, mod->trace_evals, mod->num_trace_evals);
3673b8e4
SRRH
7745}
7746
681bec03 7747#ifdef CONFIG_TRACE_EVAL_MAP_FILE
f57a4143 7748static void trace_module_remove_evals(struct module *mod)
9828413d 7749{
23bf8cb8
JL
7750 union trace_eval_map_item *map;
7751 union trace_eval_map_item **last = &trace_eval_maps;
9828413d 7752
99be647c 7753 if (!mod->num_trace_evals)
9828413d
SRRH
7754 return;
7755
1793ed93 7756 mutex_lock(&trace_eval_mutex);
9828413d 7757
23bf8cb8 7758 map = trace_eval_maps;
9828413d
SRRH
7759
7760 while (map) {
7761 if (map->head.mod == mod)
7762 break;
5f60b351 7763 map = trace_eval_jmp_to_tail(map);
9828413d
SRRH
7764 last = &map->tail.next;
7765 map = map->tail.next;
7766 }
7767 if (!map)
7768 goto out;
7769
5f60b351 7770 *last = trace_eval_jmp_to_tail(map)->tail.next;
9828413d
SRRH
7771 kfree(map);
7772 out:
1793ed93 7773 mutex_unlock(&trace_eval_mutex);
9828413d
SRRH
7774}
7775#else
f57a4143 7776static inline void trace_module_remove_evals(struct module *mod) { }
681bec03 7777#endif /* CONFIG_TRACE_EVAL_MAP_FILE */
9828413d 7778
3673b8e4
SRRH
7779static int trace_module_notify(struct notifier_block *self,
7780 unsigned long val, void *data)
7781{
7782 struct module *mod = data;
7783
7784 switch (val) {
7785 case MODULE_STATE_COMING:
f57a4143 7786 trace_module_add_evals(mod);
3673b8e4 7787 break;
9828413d 7788 case MODULE_STATE_GOING:
f57a4143 7789 trace_module_remove_evals(mod);
9828413d 7790 break;
3673b8e4
SRRH
7791 }
7792
7793 return 0;
0c564a53
SRRH
7794}
7795
3673b8e4
SRRH
7796static struct notifier_block trace_module_nb = {
7797 .notifier_call = trace_module_notify,
7798 .priority = 0,
7799};
9828413d 7800#endif /* CONFIG_MODULES */
3673b8e4 7801
8434dc93 7802static __init int tracer_init_tracefs(void)
bc0c38d1
SR
7803{
7804 struct dentry *d_tracer;
bc0c38d1 7805
7e53bd42
LJ
7806 trace_access_lock_init();
7807
bc0c38d1 7808 d_tracer = tracing_init_dentry();
14a5ae40 7809 if (IS_ERR(d_tracer))
ed6f1c99 7810 return 0;
bc0c38d1 7811
8434dc93 7812 init_tracer_tracefs(&global_trace, d_tracer);
501c2375 7813 ftrace_init_tracefs_toplevel(&global_trace, d_tracer);
bc0c38d1 7814
5452af66 7815 trace_create_file("tracing_thresh", 0644, d_tracer,
6508fa76 7816 &global_trace, &tracing_thresh_fops);
a8259075 7817
339ae5d3 7818 trace_create_file("README", 0444, d_tracer,
5452af66
FW
7819 NULL, &tracing_readme_fops);
7820
69abe6a5
AP
7821 trace_create_file("saved_cmdlines", 0444, d_tracer,
7822 NULL, &tracing_saved_cmdlines_fops);
5bf9a1ee 7823
939c7a4f
YY
7824 trace_create_file("saved_cmdlines_size", 0644, d_tracer,
7825 NULL, &tracing_saved_cmdlines_size_fops);
7826
5f60b351 7827 trace_eval_init();
0c564a53 7828
f57a4143 7829 trace_create_eval_file(d_tracer);
9828413d 7830
3673b8e4
SRRH
7831#ifdef CONFIG_MODULES
7832 register_module_notifier(&trace_module_nb);
7833#endif
7834
bc0c38d1 7835#ifdef CONFIG_DYNAMIC_FTRACE
5452af66
FW
7836 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
7837 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
bc0c38d1 7838#endif
b04cc6b1 7839
277ba044 7840 create_trace_instances(d_tracer);
5452af66 7841
37aea98b 7842 update_tracer_options(&global_trace);
09d23a1d 7843
b5ad384e 7844 return 0;
bc0c38d1
SR
7845}
7846
3f5a54e3
SR
7847static int trace_panic_handler(struct notifier_block *this,
7848 unsigned long event, void *unused)
7849{
944ac425 7850 if (ftrace_dump_on_oops)
cecbca96 7851 ftrace_dump(ftrace_dump_on_oops);
3f5a54e3
SR
7852 return NOTIFY_OK;
7853}
7854
7855static struct notifier_block trace_panic_notifier = {
7856 .notifier_call = trace_panic_handler,
7857 .next = NULL,
7858 .priority = 150 /* priority: INT_MAX >= x >= 0 */
7859};
7860
7861static int trace_die_handler(struct notifier_block *self,
7862 unsigned long val,
7863 void *data)
7864{
7865 switch (val) {
7866 case DIE_OOPS:
944ac425 7867 if (ftrace_dump_on_oops)
cecbca96 7868 ftrace_dump(ftrace_dump_on_oops);
3f5a54e3
SR
7869 break;
7870 default:
7871 break;
7872 }
7873 return NOTIFY_OK;
7874}
7875
7876static struct notifier_block trace_die_notifier = {
7877 .notifier_call = trace_die_handler,
7878 .priority = 200
7879};
7880
7881/*
7882 * printk is set to max of 1024, we really don't need it that big.
7883 * Nothing should be printing 1000 characters anyway.
7884 */
7885#define TRACE_MAX_PRINT 1000
7886
7887/*
7888 * Define here KERN_TRACE so that we have one place to modify
7889 * it if we decide to change what log level the ftrace dump
7890 * should be at.
7891 */
428aee14 7892#define KERN_TRACE KERN_EMERG
3f5a54e3 7893
955b61e5 7894void
3f5a54e3
SR
7895trace_printk_seq(struct trace_seq *s)
7896{
7897 /* Probably should print a warning here. */
3a161d99
SRRH
7898 if (s->seq.len >= TRACE_MAX_PRINT)
7899 s->seq.len = TRACE_MAX_PRINT;
3f5a54e3 7900
820b75f6
SRRH
7901 /*
7902 * More paranoid code. Although the buffer size is set to
7903 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
7904 * an extra layer of protection.
7905 */
7906 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
7907 s->seq.len = s->seq.size - 1;
3f5a54e3
SR
7908
7909 /* should be zero ended, but we are paranoid. */
3a161d99 7910 s->buffer[s->seq.len] = 0;
3f5a54e3
SR
7911
7912 printk(KERN_TRACE "%s", s->buffer);
7913
f9520750 7914 trace_seq_init(s);
3f5a54e3
SR
7915}
7916
955b61e5
JW
7917void trace_init_global_iter(struct trace_iterator *iter)
7918{
7919 iter->tr = &global_trace;
2b6080f2 7920 iter->trace = iter->tr->current_trace;
ae3b5093 7921 iter->cpu_file = RING_BUFFER_ALL_CPUS;
12883efb 7922 iter->trace_buffer = &global_trace.trace_buffer;
b2f974d6
CS
7923
7924 if (iter->trace && iter->trace->open)
7925 iter->trace->open(iter);
7926
7927 /* Annotate start of buffers if we had overruns */
7928 if (ring_buffer_overruns(iter->trace_buffer->buffer))
7929 iter->iter_flags |= TRACE_FILE_ANNOTATE;
7930
7931 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
7932 if (trace_clocks[iter->tr->clock_id].in_ns)
7933 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
955b61e5
JW
7934}
7935
7fe70b57 7936void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
3f5a54e3 7937{
3f5a54e3
SR
7938 /* use static because iter can be a bit big for the stack */
7939 static struct trace_iterator iter;
7fe70b57 7940 static atomic_t dump_running;
983f938a 7941 struct trace_array *tr = &global_trace;
cf586b61 7942 unsigned int old_userobj;
d769041f
SR
7943 unsigned long flags;
7944 int cnt = 0, cpu;
3f5a54e3 7945
7fe70b57
SRRH
7946 /* Only allow one dump user at a time. */
7947 if (atomic_inc_return(&dump_running) != 1) {
7948 atomic_dec(&dump_running);
7949 return;
7950 }
3f5a54e3 7951
7fe70b57
SRRH
7952 /*
7953 * Always turn off tracing when we dump.
7954 * We don't need to show trace output of what happens
7955 * between multiple crashes.
7956 *
7957 * If the user does a sysrq-z, then they can re-enable
7958 * tracing with echo 1 > tracing_on.
7959 */
0ee6b6cf 7960 tracing_off();
cf586b61 7961
7fe70b57 7962 local_irq_save(flags);
3f5a54e3 7963
38dbe0b1 7964 /* Simulate the iterator */
955b61e5
JW
7965 trace_init_global_iter(&iter);
7966
d769041f 7967 for_each_tracing_cpu(cpu) {
5e2d5ef8 7968 atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
d769041f
SR
7969 }
7970
983f938a 7971 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
cf586b61 7972
b54d3de9 7973 /* don't look at user memory in panic mode */
983f938a 7974 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
b54d3de9 7975
cecbca96
FW
7976 switch (oops_dump_mode) {
7977 case DUMP_ALL:
ae3b5093 7978 iter.cpu_file = RING_BUFFER_ALL_CPUS;
cecbca96
FW
7979 break;
7980 case DUMP_ORIG:
7981 iter.cpu_file = raw_smp_processor_id();
7982 break;
7983 case DUMP_NONE:
7984 goto out_enable;
7985 default:
7986 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
ae3b5093 7987 iter.cpu_file = RING_BUFFER_ALL_CPUS;
cecbca96
FW
7988 }
7989
7990 printk(KERN_TRACE "Dumping ftrace buffer:\n");
3f5a54e3 7991
7fe70b57
SRRH
7992 /* Did function tracer already get disabled? */
7993 if (ftrace_is_dead()) {
7994 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
7995 printk("# MAY BE MISSING FUNCTION EVENTS\n");
7996 }
7997
3f5a54e3
SR
7998 /*
7999 * We need to stop all tracing on all CPUS to read the
8000 * the next buffer. This is a bit expensive, but is
8001 * not done often. We fill all what we can read,
8002 * and then release the locks again.
8003 */
8004
3f5a54e3
SR
8005 while (!trace_empty(&iter)) {
8006
8007 if (!cnt)
8008 printk(KERN_TRACE "---------------------------------\n");
8009
8010 cnt++;
8011
8012 /* reset all but tr, trace, and overruns */
8013 memset(&iter.seq, 0,
8014 sizeof(struct trace_iterator) -
8015 offsetof(struct trace_iterator, seq));
8016 iter.iter_flags |= TRACE_FILE_LAT_FMT;
8017 iter.pos = -1;
8018
955b61e5 8019 if (trace_find_next_entry_inc(&iter) != NULL) {
74e7ff8c
LJ
8020 int ret;
8021
8022 ret = print_trace_line(&iter);
8023 if (ret != TRACE_TYPE_NO_CONSUME)
8024 trace_consume(&iter);
3f5a54e3 8025 }
b892e5c8 8026 touch_nmi_watchdog();
3f5a54e3
SR
8027
8028 trace_printk_seq(&iter.seq);
8029 }
8030
8031 if (!cnt)
8032 printk(KERN_TRACE " (ftrace buffer empty)\n");
8033 else
8034 printk(KERN_TRACE "---------------------------------\n");
8035
cecbca96 8036 out_enable:
983f938a 8037 tr->trace_flags |= old_userobj;
cf586b61 8038
7fe70b57
SRRH
8039 for_each_tracing_cpu(cpu) {
8040 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
cf586b61 8041 }
7fe70b57 8042 atomic_dec(&dump_running);
cd891ae0 8043 local_irq_restore(flags);
3f5a54e3 8044}
a8eecf22 8045EXPORT_SYMBOL_GPL(ftrace_dump);
cf586b61 8046
3928a8a2 8047__init static int tracer_alloc_buffers(void)
bc0c38d1 8048{
73c5162a 8049 int ring_buf_size;
9e01c1b7 8050 int ret = -ENOMEM;
4c11d7ae 8051
b5e87c05
SRRH
8052 /*
8053 * Make sure we don't accidently add more trace options
8054 * than we have bits for.
8055 */
9a38a885 8056 BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
b5e87c05 8057
9e01c1b7
RR
8058 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
8059 goto out;
8060
ccfe9e42 8061 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
9e01c1b7 8062 goto out_free_buffer_mask;
4c11d7ae 8063
07d777fe
SR
8064 /* Only allocate trace_printk buffers if a trace_printk exists */
8065 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
81698831 8066 /* Must be called before global_trace.buffer is allocated */
07d777fe
SR
8067 trace_printk_init_buffers();
8068
73c5162a
SR
8069 /* To save memory, keep the ring buffer size to its minimum */
8070 if (ring_buffer_expanded)
8071 ring_buf_size = trace_buf_size;
8072 else
8073 ring_buf_size = 1;
8074
9e01c1b7 8075 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
ccfe9e42 8076 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
9e01c1b7 8077
2b6080f2
SR
8078 raw_spin_lock_init(&global_trace.start_lock);
8079
b32614c0
SAS
8080 /*
8081 * The prepare callbacks allocates some memory for the ring buffer. We
8082 * don't free the buffer if the if the CPU goes down. If we were to free
8083 * the buffer, then the user would lose any trace that was in the
8084 * buffer. The memory will be removed once the "instance" is removed.
8085 */
8086 ret = cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE,
8087 "trace/RB:preapre", trace_rb_cpu_prepare,
8088 NULL);
8089 if (ret < 0)
8090 goto out_free_cpumask;
2c4a33ab
SRRH
8091 /* Used for event triggers */
8092 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
8093 if (!temp_buffer)
b32614c0 8094 goto out_rm_hp_state;
2c4a33ab 8095
939c7a4f
YY
8096 if (trace_create_savedcmd() < 0)
8097 goto out_free_temp_buffer;
8098
9e01c1b7 8099 /* TODO: make the number of buffers hot pluggable with CPUS */
737223fb 8100 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
3928a8a2
SR
8101 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
8102 WARN_ON(1);
939c7a4f 8103 goto out_free_savedcmd;
4c11d7ae 8104 }
a7603ff4 8105
499e5470
SR
8106 if (global_trace.buffer_disabled)
8107 tracing_off();
4c11d7ae 8108
e1e232ca
SR
8109 if (trace_boot_clock) {
8110 ret = tracing_set_clock(&global_trace, trace_boot_clock);
8111 if (ret < 0)
a395d6a7
JP
8112 pr_warn("Trace clock %s not defined, going back to default\n",
8113 trace_boot_clock);
e1e232ca
SR
8114 }
8115
ca164318
SRRH
8116 /*
8117 * register_tracer() might reference current_trace, so it
8118 * needs to be set before we register anything. This is
8119 * just a bootstrap of current_trace anyway.
8120 */
2b6080f2
SR
8121 global_trace.current_trace = &nop_trace;
8122
0b9b12c1
SRRH
8123 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
8124
4104d326
SRRH
8125 ftrace_init_global_array_ops(&global_trace);
8126
9a38a885
SRRH
8127 init_trace_flags_index(&global_trace);
8128
ca164318
SRRH
8129 register_tracer(&nop_trace);
8130
dbeafd0d
SRV
8131 /* Function tracing may start here (via kernel command line) */
8132 init_function_trace();
8133
60a11774
SR
8134 /* All seems OK, enable tracing */
8135 tracing_disabled = 0;
3928a8a2 8136
3f5a54e3
SR
8137 atomic_notifier_chain_register(&panic_notifier_list,
8138 &trace_panic_notifier);
8139
8140 register_die_notifier(&trace_die_notifier);
2fc1dfbe 8141
ae63b31e
SR
8142 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
8143
8144 INIT_LIST_HEAD(&global_trace.systems);
8145 INIT_LIST_HEAD(&global_trace.events);
8146 list_add(&global_trace.list, &ftrace_trace_arrays);
8147
a4d1e688 8148 apply_trace_boot_options();
7bcfaf54 8149
77fd5c15
SRRH
8150 register_snapshot_cmd();
8151
2fc1dfbe 8152 return 0;
3f5a54e3 8153
939c7a4f
YY
8154out_free_savedcmd:
8155 free_saved_cmdlines_buffer(savedcmd);
2c4a33ab
SRRH
8156out_free_temp_buffer:
8157 ring_buffer_free(temp_buffer);
b32614c0
SAS
8158out_rm_hp_state:
8159 cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE);
9e01c1b7 8160out_free_cpumask:
ccfe9e42 8161 free_cpumask_var(global_trace.tracing_cpumask);
9e01c1b7
RR
8162out_free_buffer_mask:
8163 free_cpumask_var(tracing_buffer_mask);
8164out:
8165 return ret;
bc0c38d1 8166}
b2821ae6 8167
e725c731 8168void __init early_trace_init(void)
5f893b26 8169{
0daa2302
SRRH
8170 if (tracepoint_printk) {
8171 tracepoint_print_iter =
8172 kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
8173 if (WARN_ON(!tracepoint_print_iter))
8174 tracepoint_printk = 0;
42391745
SRRH
8175 else
8176 static_key_enable(&tracepoint_printk_key.key);
0daa2302 8177 }
5f893b26 8178 tracer_alloc_buffers();
e725c731
SRV
8179}
8180
8181void __init trace_init(void)
8182{
0c564a53 8183 trace_event_init();
5f893b26
SRRH
8184}
8185
b2821ae6
SR
8186__init static int clear_boot_tracer(void)
8187{
8188 /*
8189 * The default tracer at boot buffer is an init section.
8190 * This function is called in lateinit. If we did not
8191 * find the boot tracer, then clear it out, to prevent
8192 * later registration from accessing the buffer that is
8193 * about to be freed.
8194 */
8195 if (!default_bootup_tracer)
8196 return 0;
8197
8198 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
8199 default_bootup_tracer);
8200 default_bootup_tracer = NULL;
8201
8202 return 0;
8203}
8204
8434dc93 8205fs_initcall(tracer_init_tracefs);
b2821ae6 8206late_initcall(clear_boot_tracer);