ipc, kernel, mm: annotate ->poll() instances
[linux-block.git] / kernel / trace / trace.c
CommitLineData
bc0c38d1
SR
1/*
2 * ring buffer based function tracer
3 *
2b6080f2 4 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
bc0c38d1
SR
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
6d49e352 12 * Copyright (C) 2004 Nadia Yvette Chambers
bc0c38d1 13 */
2cadf913 14#include <linux/ring_buffer.h>
273b281f 15#include <generated/utsrelease.h>
2cadf913
SR
16#include <linux/stacktrace.h>
17#include <linux/writeback.h>
bc0c38d1
SR
18#include <linux/kallsyms.h>
19#include <linux/seq_file.h>
3f5a54e3 20#include <linux/notifier.h>
2cadf913 21#include <linux/irqflags.h>
bc0c38d1 22#include <linux/debugfs.h>
8434dc93 23#include <linux/tracefs.h>
4c11d7ae 24#include <linux/pagemap.h>
bc0c38d1
SR
25#include <linux/hardirq.h>
26#include <linux/linkage.h>
27#include <linux/uaccess.h>
76c813e2 28#include <linux/vmalloc.h>
bc0c38d1
SR
29#include <linux/ftrace.h>
30#include <linux/module.h>
31#include <linux/percpu.h>
2cadf913 32#include <linux/splice.h>
3f5a54e3 33#include <linux/kdebug.h>
5f0c6c03 34#include <linux/string.h>
f76180bc 35#include <linux/mount.h>
7e53bd42 36#include <linux/rwsem.h>
5a0e3ad6 37#include <linux/slab.h>
bc0c38d1
SR
38#include <linux/ctype.h>
39#include <linux/init.h>
2a2cc8f7 40#include <linux/poll.h>
b892e5c8 41#include <linux/nmi.h>
bc0c38d1 42#include <linux/fs.h>
478409dd 43#include <linux/trace.h>
8bd75c77 44#include <linux/sched/rt.h>
86387f7e 45
bc0c38d1 46#include "trace.h"
f0868d1e 47#include "trace_output.h"
bc0c38d1 48
73c5162a
SR
49/*
50 * On boot up, the ring buffer is set to the minimum size, so that
51 * we do not waste memory on systems that are not using tracing.
52 */
55034cd6 53bool ring_buffer_expanded;
73c5162a 54
8e1b82e0
FW
55/*
56 * We need to change this state when a selftest is running.
ff32504f
FW
57 * A selftest will lurk into the ring-buffer to count the
58 * entries inserted during the selftest although some concurrent
5e1607a0 59 * insertions into the ring-buffer such as trace_printk could occurred
ff32504f
FW
60 * at the same time, giving false positive or negative results.
61 */
8e1b82e0 62static bool __read_mostly tracing_selftest_running;
ff32504f 63
b2821ae6
SR
64/*
65 * If a tracer is running, we do not want to run SELFTEST.
66 */
020e5f85 67bool __read_mostly tracing_selftest_disabled;
b2821ae6 68
0daa2302
SRRH
69/* Pipe tracepoints to printk */
70struct trace_iterator *tracepoint_print_iter;
71int tracepoint_printk;
42391745 72static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key);
0daa2302 73
adf9f195
FW
74/* For tracers that don't implement custom flags */
75static struct tracer_opt dummy_tracer_opt[] = {
76 { }
77};
78
8c1a49ae
SRRH
79static int
80dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
adf9f195
FW
81{
82 return 0;
83}
0f048701 84
7ffbd48d
SR
85/*
86 * To prevent the comm cache from being overwritten when no
87 * tracing is active, only save the comm when a trace event
88 * occurred.
89 */
d914ba37 90static DEFINE_PER_CPU(bool, trace_taskinfo_save);
7ffbd48d 91
0f048701
SR
92/*
93 * Kill all tracing for good (never come back).
94 * It is initialized to 1 but will turn to zero if the initialization
95 * of the tracer is successful. But that is the only place that sets
96 * this back to zero.
97 */
4fd27358 98static int tracing_disabled = 1;
0f048701 99
955b61e5 100cpumask_var_t __read_mostly tracing_buffer_mask;
ab46428c 101
944ac425
SR
102/*
103 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
104 *
105 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
106 * is set, then ftrace_dump is called. This will output the contents
107 * of the ftrace buffers to the console. This is very useful for
108 * capturing traces that lead to crashes and outputing it to a
109 * serial console.
110 *
111 * It is default off, but you can enable it with either specifying
112 * "ftrace_dump_on_oops" in the kernel command line, or setting
cecbca96
FW
113 * /proc/sys/kernel/ftrace_dump_on_oops
114 * Set 1 if you want to dump buffers of all CPUs
115 * Set 2 if you want to dump the buffer of the CPU that triggered oops
944ac425 116 */
cecbca96
FW
117
118enum ftrace_dump_mode ftrace_dump_on_oops;
944ac425 119
de7edd31
SRRH
120/* When set, tracing will stop when a WARN*() is hit */
121int __disable_trace_on_warning;
122
681bec03
JL
123#ifdef CONFIG_TRACE_EVAL_MAP_FILE
124/* Map of enums to their values, for "eval_map" file */
23bf8cb8 125struct trace_eval_map_head {
9828413d
SRRH
126 struct module *mod;
127 unsigned long length;
128};
129
23bf8cb8 130union trace_eval_map_item;
9828413d 131
23bf8cb8 132struct trace_eval_map_tail {
9828413d
SRRH
133 /*
134 * "end" is first and points to NULL as it must be different
00f4b652 135 * than "mod" or "eval_string"
9828413d 136 */
23bf8cb8 137 union trace_eval_map_item *next;
9828413d
SRRH
138 const char *end; /* points to NULL */
139};
140
1793ed93 141static DEFINE_MUTEX(trace_eval_mutex);
9828413d
SRRH
142
143/*
23bf8cb8 144 * The trace_eval_maps are saved in an array with two extra elements,
9828413d
SRRH
145 * one at the beginning, and one at the end. The beginning item contains
146 * the count of the saved maps (head.length), and the module they
147 * belong to if not built in (head.mod). The ending item contains a
681bec03 148 * pointer to the next array of saved eval_map items.
9828413d 149 */
23bf8cb8 150union trace_eval_map_item {
00f4b652 151 struct trace_eval_map map;
23bf8cb8
JL
152 struct trace_eval_map_head head;
153 struct trace_eval_map_tail tail;
9828413d
SRRH
154};
155
23bf8cb8 156static union trace_eval_map_item *trace_eval_maps;
681bec03 157#endif /* CONFIG_TRACE_EVAL_MAP_FILE */
9828413d 158
607e2ea1 159static int tracing_set_tracer(struct trace_array *tr, const char *buf);
b2821ae6 160
ee6c2c1b
LZ
161#define MAX_TRACER_SIZE 100
162static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
b2821ae6 163static char *default_bootup_tracer;
d9e54076 164
55034cd6
SRRH
165static bool allocate_snapshot;
166
1beee96b 167static int __init set_cmdline_ftrace(char *str)
d9e54076 168{
67012ab1 169 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
b2821ae6 170 default_bootup_tracer = bootup_tracer_buf;
73c5162a 171 /* We are using ftrace early, expand it */
55034cd6 172 ring_buffer_expanded = true;
d9e54076
PZ
173 return 1;
174}
1beee96b 175__setup("ftrace=", set_cmdline_ftrace);
d9e54076 176
944ac425
SR
177static int __init set_ftrace_dump_on_oops(char *str)
178{
cecbca96
FW
179 if (*str++ != '=' || !*str) {
180 ftrace_dump_on_oops = DUMP_ALL;
181 return 1;
182 }
183
184 if (!strcmp("orig_cpu", str)) {
185 ftrace_dump_on_oops = DUMP_ORIG;
186 return 1;
187 }
188
189 return 0;
944ac425
SR
190}
191__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
60a11774 192
de7edd31
SRRH
193static int __init stop_trace_on_warning(char *str)
194{
933ff9f2
LCG
195 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
196 __disable_trace_on_warning = 1;
de7edd31
SRRH
197 return 1;
198}
933ff9f2 199__setup("traceoff_on_warning", stop_trace_on_warning);
de7edd31 200
3209cff4 201static int __init boot_alloc_snapshot(char *str)
55034cd6
SRRH
202{
203 allocate_snapshot = true;
204 /* We also need the main ring buffer expanded */
205 ring_buffer_expanded = true;
206 return 1;
207}
3209cff4 208__setup("alloc_snapshot", boot_alloc_snapshot);
55034cd6 209
7bcfaf54
SR
210
211static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
7bcfaf54
SR
212
213static int __init set_trace_boot_options(char *str)
214{
67012ab1 215 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
7bcfaf54
SR
216 return 0;
217}
218__setup("trace_options=", set_trace_boot_options);
219
e1e232ca
SR
220static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
221static char *trace_boot_clock __initdata;
222
223static int __init set_trace_boot_clock(char *str)
224{
225 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
226 trace_boot_clock = trace_boot_clock_buf;
227 return 0;
228}
229__setup("trace_clock=", set_trace_boot_clock);
230
0daa2302
SRRH
231static int __init set_tracepoint_printk(char *str)
232{
233 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
234 tracepoint_printk = 1;
235 return 1;
236}
237__setup("tp_printk", set_tracepoint_printk);
de7edd31 238
a5a1d1c2 239unsigned long long ns2usecs(u64 nsec)
bc0c38d1
SR
240{
241 nsec += 500;
242 do_div(nsec, 1000);
243 return nsec;
244}
245
983f938a
SRRH
246/* trace_flags holds trace_options default values */
247#define TRACE_DEFAULT_FLAGS \
248 (FUNCTION_DEFAULT_FLAGS | \
249 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
250 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
251 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
252 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS)
253
16270145
SRRH
254/* trace_options that are only supported by global_trace */
255#define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
256 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
257
20550622
SRRH
258/* trace_flags that are default zero for instances */
259#define ZEROED_TRACE_FLAGS \
1e10486f 260 (TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK)
16270145 261
4fcdae83 262/*
67d04bb2
JF
263 * The global_trace is the descriptor that holds the top-level tracing
264 * buffers for the live tracing.
4fcdae83 265 */
983f938a
SRRH
266static struct trace_array global_trace = {
267 .trace_flags = TRACE_DEFAULT_FLAGS,
268};
bc0c38d1 269
ae63b31e 270LIST_HEAD(ftrace_trace_arrays);
bc0c38d1 271
ff451961
SRRH
272int trace_array_get(struct trace_array *this_tr)
273{
274 struct trace_array *tr;
275 int ret = -ENODEV;
276
277 mutex_lock(&trace_types_lock);
278 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
279 if (tr == this_tr) {
280 tr->ref++;
281 ret = 0;
282 break;
283 }
284 }
285 mutex_unlock(&trace_types_lock);
286
287 return ret;
288}
289
290static void __trace_array_put(struct trace_array *this_tr)
291{
292 WARN_ON(!this_tr->ref);
293 this_tr->ref--;
294}
295
296void trace_array_put(struct trace_array *this_tr)
297{
298 mutex_lock(&trace_types_lock);
299 __trace_array_put(this_tr);
300 mutex_unlock(&trace_types_lock);
301}
302
2425bcb9 303int call_filter_check_discard(struct trace_event_call *call, void *rec,
f306cc82
TZ
304 struct ring_buffer *buffer,
305 struct ring_buffer_event *event)
306{
307 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
308 !filter_match_preds(call->filter, rec)) {
0fc1b09f 309 __trace_event_discard_commit(buffer, event);
f306cc82
TZ
310 return 1;
311 }
312
313 return 0;
eb02ce01
TZ
314}
315
76c813e2
SRRH
316void trace_free_pid_list(struct trace_pid_list *pid_list)
317{
318 vfree(pid_list->pids);
319 kfree(pid_list);
320}
321
d8275c45
SR
322/**
323 * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
324 * @filtered_pids: The list of pids to check
325 * @search_pid: The PID to find in @filtered_pids
326 *
327 * Returns true if @search_pid is fonud in @filtered_pids, and false otherwis.
328 */
329bool
330trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
331{
332 /*
333 * If pid_max changed after filtered_pids was created, we
334 * by default ignore all pids greater than the previous pid_max.
335 */
336 if (search_pid >= filtered_pids->pid_max)
337 return false;
338
339 return test_bit(search_pid, filtered_pids->pids);
340}
341
342/**
343 * trace_ignore_this_task - should a task be ignored for tracing
344 * @filtered_pids: The list of pids to check
345 * @task: The task that should be ignored if not filtered
346 *
347 * Checks if @task should be traced or not from @filtered_pids.
348 * Returns true if @task should *NOT* be traced.
349 * Returns false if @task should be traced.
350 */
351bool
352trace_ignore_this_task(struct trace_pid_list *filtered_pids, struct task_struct *task)
353{
354 /*
355 * Return false, because if filtered_pids does not exist,
356 * all pids are good to trace.
357 */
358 if (!filtered_pids)
359 return false;
360
361 return !trace_find_filtered_pid(filtered_pids, task->pid);
362}
363
364/**
365 * trace_pid_filter_add_remove - Add or remove a task from a pid_list
366 * @pid_list: The list to modify
367 * @self: The current task for fork or NULL for exit
368 * @task: The task to add or remove
369 *
370 * If adding a task, if @self is defined, the task is only added if @self
371 * is also included in @pid_list. This happens on fork and tasks should
372 * only be added when the parent is listed. If @self is NULL, then the
373 * @task pid will be removed from the list, which would happen on exit
374 * of a task.
375 */
376void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
377 struct task_struct *self,
378 struct task_struct *task)
379{
380 if (!pid_list)
381 return;
382
383 /* For forks, we only add if the forking task is listed */
384 if (self) {
385 if (!trace_find_filtered_pid(pid_list, self->pid))
386 return;
387 }
388
389 /* Sorry, but we don't support pid_max changing after setting */
390 if (task->pid >= pid_list->pid_max)
391 return;
392
393 /* "self" is set for forks, and NULL for exits */
394 if (self)
395 set_bit(task->pid, pid_list->pids);
396 else
397 clear_bit(task->pid, pid_list->pids);
398}
399
5cc8976b
SRRH
400/**
401 * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
402 * @pid_list: The pid list to show
403 * @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
404 * @pos: The position of the file
405 *
406 * This is used by the seq_file "next" operation to iterate the pids
407 * listed in a trace_pid_list structure.
408 *
409 * Returns the pid+1 as we want to display pid of zero, but NULL would
410 * stop the iteration.
411 */
412void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos)
413{
414 unsigned long pid = (unsigned long)v;
415
416 (*pos)++;
417
418 /* pid already is +1 of the actual prevous bit */
419 pid = find_next_bit(pid_list->pids, pid_list->pid_max, pid);
420
421 /* Return pid + 1 to allow zero to be represented */
422 if (pid < pid_list->pid_max)
423 return (void *)(pid + 1);
424
425 return NULL;
426}
427
428/**
429 * trace_pid_start - Used for seq_file to start reading pid lists
430 * @pid_list: The pid list to show
431 * @pos: The position of the file
432 *
433 * This is used by seq_file "start" operation to start the iteration
434 * of listing pids.
435 *
436 * Returns the pid+1 as we want to display pid of zero, but NULL would
437 * stop the iteration.
438 */
439void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos)
440{
441 unsigned long pid;
442 loff_t l = 0;
443
444 pid = find_first_bit(pid_list->pids, pid_list->pid_max);
445 if (pid >= pid_list->pid_max)
446 return NULL;
447
448 /* Return pid + 1 so that zero can be the exit value */
449 for (pid++; pid && l < *pos;
450 pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l))
451 ;
452 return (void *)pid;
453}
454
455/**
456 * trace_pid_show - show the current pid in seq_file processing
457 * @m: The seq_file structure to write into
458 * @v: A void pointer of the pid (+1) value to display
459 *
460 * Can be directly used by seq_file operations to display the current
461 * pid value.
462 */
463int trace_pid_show(struct seq_file *m, void *v)
464{
465 unsigned long pid = (unsigned long)v - 1;
466
467 seq_printf(m, "%lu\n", pid);
468 return 0;
469}
470
76c813e2
SRRH
471/* 128 should be much more than enough */
472#define PID_BUF_SIZE 127
473
474int trace_pid_write(struct trace_pid_list *filtered_pids,
475 struct trace_pid_list **new_pid_list,
476 const char __user *ubuf, size_t cnt)
477{
478 struct trace_pid_list *pid_list;
479 struct trace_parser parser;
480 unsigned long val;
481 int nr_pids = 0;
482 ssize_t read = 0;
483 ssize_t ret = 0;
484 loff_t pos;
485 pid_t pid;
486
487 if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1))
488 return -ENOMEM;
489
490 /*
491 * Always recreate a new array. The write is an all or nothing
492 * operation. Always create a new array when adding new pids by
493 * the user. If the operation fails, then the current list is
494 * not modified.
495 */
496 pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL);
497 if (!pid_list)
498 return -ENOMEM;
499
500 pid_list->pid_max = READ_ONCE(pid_max);
501
502 /* Only truncating will shrink pid_max */
503 if (filtered_pids && filtered_pids->pid_max > pid_list->pid_max)
504 pid_list->pid_max = filtered_pids->pid_max;
505
506 pid_list->pids = vzalloc((pid_list->pid_max + 7) >> 3);
507 if (!pid_list->pids) {
508 kfree(pid_list);
509 return -ENOMEM;
510 }
511
512 if (filtered_pids) {
513 /* copy the current bits to the new max */
67f20b08
WY
514 for_each_set_bit(pid, filtered_pids->pids,
515 filtered_pids->pid_max) {
76c813e2 516 set_bit(pid, pid_list->pids);
76c813e2
SRRH
517 nr_pids++;
518 }
519 }
520
521 while (cnt > 0) {
522
523 pos = 0;
524
525 ret = trace_get_user(&parser, ubuf, cnt, &pos);
526 if (ret < 0 || !trace_parser_loaded(&parser))
527 break;
528
529 read += ret;
530 ubuf += ret;
531 cnt -= ret;
532
533 parser.buffer[parser.idx] = 0;
534
535 ret = -EINVAL;
536 if (kstrtoul(parser.buffer, 0, &val))
537 break;
538 if (val >= pid_list->pid_max)
539 break;
540
541 pid = (pid_t)val;
542
543 set_bit(pid, pid_list->pids);
544 nr_pids++;
545
546 trace_parser_clear(&parser);
547 ret = 0;
548 }
549 trace_parser_put(&parser);
550
551 if (ret < 0) {
552 trace_free_pid_list(pid_list);
553 return ret;
554 }
555
556 if (!nr_pids) {
557 /* Cleared the list of pids */
558 trace_free_pid_list(pid_list);
559 read = ret;
560 pid_list = NULL;
561 }
562
563 *new_pid_list = pid_list;
564
565 return read;
566}
567
a5a1d1c2 568static u64 buffer_ftrace_now(struct trace_buffer *buf, int cpu)
37886f6a
SR
569{
570 u64 ts;
571
572 /* Early boot up does not have a buffer yet */
9457158b 573 if (!buf->buffer)
37886f6a
SR
574 return trace_clock_local();
575
9457158b
AL
576 ts = ring_buffer_time_stamp(buf->buffer, cpu);
577 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
37886f6a
SR
578
579 return ts;
580}
bc0c38d1 581
a5a1d1c2 582u64 ftrace_now(int cpu)
9457158b
AL
583{
584 return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
585}
586
10246fa3
SRRH
587/**
588 * tracing_is_enabled - Show if global_trace has been disabled
589 *
590 * Shows if the global trace has been enabled or not. It uses the
591 * mirror flag "buffer_disabled" to be used in fast paths such as for
592 * the irqsoff tracer. But it may be inaccurate due to races. If you
593 * need to know the accurate state, use tracing_is_on() which is a little
594 * slower, but accurate.
595 */
9036990d
SR
596int tracing_is_enabled(void)
597{
10246fa3
SRRH
598 /*
599 * For quick access (irqsoff uses this in fast path), just
600 * return the mirror variable of the state of the ring buffer.
601 * It's a little racy, but we don't really care.
602 */
603 smp_rmb();
604 return !global_trace.buffer_disabled;
9036990d
SR
605}
606
4fcdae83 607/*
3928a8a2
SR
608 * trace_buf_size is the size in bytes that is allocated
609 * for a buffer. Note, the number of bytes is always rounded
610 * to page size.
3f5a54e3
SR
611 *
612 * This number is purposely set to a low number of 16384.
613 * If the dump on oops happens, it will be much appreciated
614 * to not have to wait for all that output. Anyway this can be
615 * boot time and run time configurable.
4fcdae83 616 */
3928a8a2 617#define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
3f5a54e3 618
3928a8a2 619static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
bc0c38d1 620
4fcdae83 621/* trace_types holds a link list of available tracers. */
bc0c38d1 622static struct tracer *trace_types __read_mostly;
4fcdae83 623
4fcdae83
SR
624/*
625 * trace_types_lock is used to protect the trace_types list.
4fcdae83 626 */
a8227415 627DEFINE_MUTEX(trace_types_lock);
4fcdae83 628
7e53bd42
LJ
629/*
630 * serialize the access of the ring buffer
631 *
632 * ring buffer serializes readers, but it is low level protection.
633 * The validity of the events (which returns by ring_buffer_peek() ..etc)
634 * are not protected by ring buffer.
635 *
636 * The content of events may become garbage if we allow other process consumes
637 * these events concurrently:
638 * A) the page of the consumed events may become a normal page
639 * (not reader page) in ring buffer, and this page will be rewrited
640 * by events producer.
641 * B) The page of the consumed events may become a page for splice_read,
642 * and this page will be returned to system.
643 *
644 * These primitives allow multi process access to different cpu ring buffer
645 * concurrently.
646 *
647 * These primitives don't distinguish read-only and read-consume access.
648 * Multi read-only access are also serialized.
649 */
650
651#ifdef CONFIG_SMP
652static DECLARE_RWSEM(all_cpu_access_lock);
653static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
654
655static inline void trace_access_lock(int cpu)
656{
ae3b5093 657 if (cpu == RING_BUFFER_ALL_CPUS) {
7e53bd42
LJ
658 /* gain it for accessing the whole ring buffer. */
659 down_write(&all_cpu_access_lock);
660 } else {
661 /* gain it for accessing a cpu ring buffer. */
662
ae3b5093 663 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
7e53bd42
LJ
664 down_read(&all_cpu_access_lock);
665
666 /* Secondly block other access to this @cpu ring buffer. */
667 mutex_lock(&per_cpu(cpu_access_lock, cpu));
668 }
669}
670
671static inline void trace_access_unlock(int cpu)
672{
ae3b5093 673 if (cpu == RING_BUFFER_ALL_CPUS) {
7e53bd42
LJ
674 up_write(&all_cpu_access_lock);
675 } else {
676 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
677 up_read(&all_cpu_access_lock);
678 }
679}
680
681static inline void trace_access_lock_init(void)
682{
683 int cpu;
684
685 for_each_possible_cpu(cpu)
686 mutex_init(&per_cpu(cpu_access_lock, cpu));
687}
688
689#else
690
691static DEFINE_MUTEX(access_lock);
692
693static inline void trace_access_lock(int cpu)
694{
695 (void)cpu;
696 mutex_lock(&access_lock);
697}
698
699static inline void trace_access_unlock(int cpu)
700{
701 (void)cpu;
702 mutex_unlock(&access_lock);
703}
704
705static inline void trace_access_lock_init(void)
706{
707}
708
709#endif
710
d78a4614
SRRH
711#ifdef CONFIG_STACKTRACE
712static void __ftrace_trace_stack(struct ring_buffer *buffer,
713 unsigned long flags,
714 int skip, int pc, struct pt_regs *regs);
2d34f489
SRRH
715static inline void ftrace_trace_stack(struct trace_array *tr,
716 struct ring_buffer *buffer,
73dddbb5
SRRH
717 unsigned long flags,
718 int skip, int pc, struct pt_regs *regs);
ca475e83 719
d78a4614
SRRH
720#else
721static inline void __ftrace_trace_stack(struct ring_buffer *buffer,
722 unsigned long flags,
723 int skip, int pc, struct pt_regs *regs)
724{
725}
2d34f489
SRRH
726static inline void ftrace_trace_stack(struct trace_array *tr,
727 struct ring_buffer *buffer,
73dddbb5
SRRH
728 unsigned long flags,
729 int skip, int pc, struct pt_regs *regs)
ca475e83
SRRH
730{
731}
732
d78a4614
SRRH
733#endif
734
3e9a8aad
SRRH
735static __always_inline void
736trace_event_setup(struct ring_buffer_event *event,
737 int type, unsigned long flags, int pc)
738{
739 struct trace_entry *ent = ring_buffer_event_data(event);
740
741 tracing_generic_entry_update(ent, flags, pc);
742 ent->type = type;
743}
744
745static __always_inline struct ring_buffer_event *
746__trace_buffer_lock_reserve(struct ring_buffer *buffer,
747 int type,
748 unsigned long len,
749 unsigned long flags, int pc)
750{
751 struct ring_buffer_event *event;
752
753 event = ring_buffer_lock_reserve(buffer, len);
754 if (event != NULL)
755 trace_event_setup(event, type, flags, pc);
756
757 return event;
758}
759
2290f2c5 760void tracer_tracing_on(struct trace_array *tr)
10246fa3
SRRH
761{
762 if (tr->trace_buffer.buffer)
763 ring_buffer_record_on(tr->trace_buffer.buffer);
764 /*
765 * This flag is looked at when buffers haven't been allocated
766 * yet, or by some tracers (like irqsoff), that just want to
767 * know if the ring buffer has been disabled, but it can handle
768 * races of where it gets disabled but we still do a record.
769 * As the check is in the fast path of the tracers, it is more
770 * important to be fast than accurate.
771 */
772 tr->buffer_disabled = 0;
773 /* Make the flag seen by readers */
774 smp_wmb();
775}
776
499e5470
SR
777/**
778 * tracing_on - enable tracing buffers
779 *
780 * This function enables tracing buffers that may have been
781 * disabled with tracing_off.
782 */
783void tracing_on(void)
784{
10246fa3 785 tracer_tracing_on(&global_trace);
499e5470
SR
786}
787EXPORT_SYMBOL_GPL(tracing_on);
788
52ffabe3
SRRH
789
790static __always_inline void
791__buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
792{
d914ba37 793 __this_cpu_write(trace_taskinfo_save, true);
52ffabe3
SRRH
794
795 /* If this is the temp buffer, we need to commit fully */
796 if (this_cpu_read(trace_buffered_event) == event) {
797 /* Length is in event->array[0] */
798 ring_buffer_write(buffer, event->array[0], &event->array[1]);
799 /* Release the temp buffer */
800 this_cpu_dec(trace_buffered_event_cnt);
801 } else
802 ring_buffer_unlock_commit(buffer, event);
803}
804
09ae7234
SRRH
805/**
806 * __trace_puts - write a constant string into the trace buffer.
807 * @ip: The address of the caller
808 * @str: The constant string to write
809 * @size: The size of the string.
810 */
811int __trace_puts(unsigned long ip, const char *str, int size)
812{
813 struct ring_buffer_event *event;
814 struct ring_buffer *buffer;
815 struct print_entry *entry;
816 unsigned long irq_flags;
817 int alloc;
8abfb872
J
818 int pc;
819
983f938a 820 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
f0160a5a
J
821 return 0;
822
8abfb872 823 pc = preempt_count();
09ae7234 824
3132e107
SRRH
825 if (unlikely(tracing_selftest_running || tracing_disabled))
826 return 0;
827
09ae7234
SRRH
828 alloc = sizeof(*entry) + size + 2; /* possible \n added */
829
830 local_save_flags(irq_flags);
831 buffer = global_trace.trace_buffer.buffer;
3e9a8aad
SRRH
832 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
833 irq_flags, pc);
09ae7234
SRRH
834 if (!event)
835 return 0;
836
837 entry = ring_buffer_event_data(event);
838 entry->ip = ip;
839
840 memcpy(&entry->buf, str, size);
841
842 /* Add a newline if necessary */
843 if (entry->buf[size - 1] != '\n') {
844 entry->buf[size] = '\n';
845 entry->buf[size + 1] = '\0';
846 } else
847 entry->buf[size] = '\0';
848
849 __buffer_unlock_commit(buffer, event);
2d34f489 850 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
09ae7234
SRRH
851
852 return size;
853}
854EXPORT_SYMBOL_GPL(__trace_puts);
855
856/**
857 * __trace_bputs - write the pointer to a constant string into trace buffer
858 * @ip: The address of the caller
859 * @str: The constant string to write to the buffer to
860 */
861int __trace_bputs(unsigned long ip, const char *str)
862{
863 struct ring_buffer_event *event;
864 struct ring_buffer *buffer;
865 struct bputs_entry *entry;
866 unsigned long irq_flags;
867 int size = sizeof(struct bputs_entry);
8abfb872
J
868 int pc;
869
983f938a 870 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
f0160a5a
J
871 return 0;
872
8abfb872 873 pc = preempt_count();
09ae7234 874
3132e107
SRRH
875 if (unlikely(tracing_selftest_running || tracing_disabled))
876 return 0;
877
09ae7234
SRRH
878 local_save_flags(irq_flags);
879 buffer = global_trace.trace_buffer.buffer;
3e9a8aad
SRRH
880 event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
881 irq_flags, pc);
09ae7234
SRRH
882 if (!event)
883 return 0;
884
885 entry = ring_buffer_event_data(event);
886 entry->ip = ip;
887 entry->str = str;
888
889 __buffer_unlock_commit(buffer, event);
2d34f489 890 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
09ae7234
SRRH
891
892 return 1;
893}
894EXPORT_SYMBOL_GPL(__trace_bputs);
895
ad909e21 896#ifdef CONFIG_TRACER_SNAPSHOT
cab50379 897static void tracing_snapshot_instance(struct trace_array *tr)
ad909e21 898{
ad909e21
SRRH
899 struct tracer *tracer = tr->current_trace;
900 unsigned long flags;
901
1b22e382
SRRH
902 if (in_nmi()) {
903 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
904 internal_trace_puts("*** snapshot is being ignored ***\n");
905 return;
906 }
907
ad909e21 908 if (!tr->allocated_snapshot) {
ca268da6
SRRH
909 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
910 internal_trace_puts("*** stopping trace here! ***\n");
ad909e21
SRRH
911 tracing_off();
912 return;
913 }
914
915 /* Note, snapshot can not be used when the tracer uses it */
916 if (tracer->use_max_tr) {
ca268da6
SRRH
917 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
918 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
ad909e21
SRRH
919 return;
920 }
921
922 local_irq_save(flags);
923 update_max_tr(tr, current, smp_processor_id());
924 local_irq_restore(flags);
925}
cab50379
SRV
926
927/**
928 * trace_snapshot - take a snapshot of the current buffer.
929 *
930 * This causes a swap between the snapshot buffer and the current live
931 * tracing buffer. You can use this to take snapshots of the live
932 * trace when some condition is triggered, but continue to trace.
933 *
934 * Note, make sure to allocate the snapshot with either
935 * a tracing_snapshot_alloc(), or by doing it manually
936 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
937 *
938 * If the snapshot buffer is not allocated, it will stop tracing.
939 * Basically making a permanent snapshot.
940 */
941void tracing_snapshot(void)
942{
943 struct trace_array *tr = &global_trace;
944
945 tracing_snapshot_instance(tr);
946}
1b22e382 947EXPORT_SYMBOL_GPL(tracing_snapshot);
ad909e21
SRRH
948
949static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
950 struct trace_buffer *size_buf, int cpu_id);
3209cff4
SRRH
951static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
952
953static int alloc_snapshot(struct trace_array *tr)
954{
955 int ret;
956
957 if (!tr->allocated_snapshot) {
958
959 /* allocate spare buffer */
960 ret = resize_buffer_duplicate_size(&tr->max_buffer,
961 &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
962 if (ret < 0)
963 return ret;
964
965 tr->allocated_snapshot = true;
966 }
967
968 return 0;
969}
970
ad1438a0 971static void free_snapshot(struct trace_array *tr)
3209cff4
SRRH
972{
973 /*
974 * We don't free the ring buffer. instead, resize it because
975 * The max_tr ring buffer has some state (e.g. ring->clock) and
976 * we want preserve it.
977 */
978 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
979 set_buffer_entries(&tr->max_buffer, 1);
980 tracing_reset_online_cpus(&tr->max_buffer);
981 tr->allocated_snapshot = false;
982}
ad909e21 983
93e31ffb
TZ
984/**
985 * tracing_alloc_snapshot - allocate snapshot buffer.
986 *
987 * This only allocates the snapshot buffer if it isn't already
988 * allocated - it doesn't also take a snapshot.
989 *
990 * This is meant to be used in cases where the snapshot buffer needs
991 * to be set up for events that can't sleep but need to be able to
992 * trigger a snapshot.
993 */
994int tracing_alloc_snapshot(void)
995{
996 struct trace_array *tr = &global_trace;
997 int ret;
998
999 ret = alloc_snapshot(tr);
1000 WARN_ON(ret < 0);
1001
1002 return ret;
1003}
1004EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1005
ad909e21
SRRH
1006/**
1007 * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
1008 *
1009 * This is similar to trace_snapshot(), but it will allocate the
1010 * snapshot buffer if it isn't already allocated. Use this only
1011 * where it is safe to sleep, as the allocation may sleep.
1012 *
1013 * This causes a swap between the snapshot buffer and the current live
1014 * tracing buffer. You can use this to take snapshots of the live
1015 * trace when some condition is triggered, but continue to trace.
1016 */
1017void tracing_snapshot_alloc(void)
1018{
ad909e21
SRRH
1019 int ret;
1020
93e31ffb
TZ
1021 ret = tracing_alloc_snapshot();
1022 if (ret < 0)
3209cff4 1023 return;
ad909e21
SRRH
1024
1025 tracing_snapshot();
1026}
1b22e382 1027EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
ad909e21
SRRH
1028#else
1029void tracing_snapshot(void)
1030{
1031 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
1032}
1b22e382 1033EXPORT_SYMBOL_GPL(tracing_snapshot);
93e31ffb
TZ
1034int tracing_alloc_snapshot(void)
1035{
1036 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
1037 return -ENODEV;
1038}
1039EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
ad909e21
SRRH
1040void tracing_snapshot_alloc(void)
1041{
1042 /* Give warning */
1043 tracing_snapshot();
1044}
1b22e382 1045EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
ad909e21
SRRH
1046#endif /* CONFIG_TRACER_SNAPSHOT */
1047
2290f2c5 1048void tracer_tracing_off(struct trace_array *tr)
10246fa3
SRRH
1049{
1050 if (tr->trace_buffer.buffer)
1051 ring_buffer_record_off(tr->trace_buffer.buffer);
1052 /*
1053 * This flag is looked at when buffers haven't been allocated
1054 * yet, or by some tracers (like irqsoff), that just want to
1055 * know if the ring buffer has been disabled, but it can handle
1056 * races of where it gets disabled but we still do a record.
1057 * As the check is in the fast path of the tracers, it is more
1058 * important to be fast than accurate.
1059 */
1060 tr->buffer_disabled = 1;
1061 /* Make the flag seen by readers */
1062 smp_wmb();
1063}
1064
499e5470
SR
1065/**
1066 * tracing_off - turn off tracing buffers
1067 *
1068 * This function stops the tracing buffers from recording data.
1069 * It does not disable any overhead the tracers themselves may
1070 * be causing. This function simply causes all recording to
1071 * the ring buffers to fail.
1072 */
1073void tracing_off(void)
1074{
10246fa3 1075 tracer_tracing_off(&global_trace);
499e5470
SR
1076}
1077EXPORT_SYMBOL_GPL(tracing_off);
1078
de7edd31
SRRH
1079void disable_trace_on_warning(void)
1080{
1081 if (__disable_trace_on_warning)
1082 tracing_off();
1083}
1084
10246fa3
SRRH
1085/**
1086 * tracer_tracing_is_on - show real state of ring buffer enabled
1087 * @tr : the trace array to know if ring buffer is enabled
1088 *
1089 * Shows real state of the ring buffer if it is enabled or not.
1090 */
e7c15cd8 1091int tracer_tracing_is_on(struct trace_array *tr)
10246fa3
SRRH
1092{
1093 if (tr->trace_buffer.buffer)
1094 return ring_buffer_record_is_on(tr->trace_buffer.buffer);
1095 return !tr->buffer_disabled;
1096}
1097
499e5470
SR
1098/**
1099 * tracing_is_on - show state of ring buffers enabled
1100 */
1101int tracing_is_on(void)
1102{
10246fa3 1103 return tracer_tracing_is_on(&global_trace);
499e5470
SR
1104}
1105EXPORT_SYMBOL_GPL(tracing_is_on);
1106
3928a8a2 1107static int __init set_buf_size(char *str)
bc0c38d1 1108{
3928a8a2 1109 unsigned long buf_size;
c6caeeb1 1110
bc0c38d1
SR
1111 if (!str)
1112 return 0;
9d612bef 1113 buf_size = memparse(str, &str);
c6caeeb1 1114 /* nr_entries can not be zero */
9d612bef 1115 if (buf_size == 0)
c6caeeb1 1116 return 0;
3928a8a2 1117 trace_buf_size = buf_size;
bc0c38d1
SR
1118 return 1;
1119}
3928a8a2 1120__setup("trace_buf_size=", set_buf_size);
bc0c38d1 1121
0e950173
TB
1122static int __init set_tracing_thresh(char *str)
1123{
87abb3b1 1124 unsigned long threshold;
0e950173
TB
1125 int ret;
1126
1127 if (!str)
1128 return 0;
bcd83ea6 1129 ret = kstrtoul(str, 0, &threshold);
0e950173
TB
1130 if (ret < 0)
1131 return 0;
87abb3b1 1132 tracing_thresh = threshold * 1000;
0e950173
TB
1133 return 1;
1134}
1135__setup("tracing_thresh=", set_tracing_thresh);
1136
57f50be1
SR
1137unsigned long nsecs_to_usecs(unsigned long nsecs)
1138{
1139 return nsecs / 1000;
1140}
1141
a3418a36
SRRH
1142/*
1143 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
f57a4143 1144 * It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that
a3418a36 1145 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
f57a4143 1146 * of strings in the order that the evals (enum) were defined.
a3418a36
SRRH
1147 */
1148#undef C
1149#define C(a, b) b
1150
4fcdae83 1151/* These must match the bit postions in trace_iterator_flags */
bc0c38d1 1152static const char *trace_options[] = {
a3418a36 1153 TRACE_FLAGS
bc0c38d1
SR
1154 NULL
1155};
1156
5079f326
Z
1157static struct {
1158 u64 (*func)(void);
1159 const char *name;
8be0709f 1160 int in_ns; /* is this clock in nanoseconds? */
5079f326 1161} trace_clocks[] = {
1b3e5c09
TG
1162 { trace_clock_local, "local", 1 },
1163 { trace_clock_global, "global", 1 },
1164 { trace_clock_counter, "counter", 0 },
e7fda6c4 1165 { trace_clock_jiffies, "uptime", 0 },
1b3e5c09
TG
1166 { trace_clock, "perf", 1 },
1167 { ktime_get_mono_fast_ns, "mono", 1 },
aabfa5f2 1168 { ktime_get_raw_fast_ns, "mono_raw", 1 },
80ec3552 1169 { ktime_get_boot_fast_ns, "boot", 1 },
8cbd9cc6 1170 ARCH_TRACE_CLOCKS
5079f326
Z
1171};
1172
b63f39ea 1173/*
1174 * trace_parser_get_init - gets the buffer for trace parser
1175 */
1176int trace_parser_get_init(struct trace_parser *parser, int size)
1177{
1178 memset(parser, 0, sizeof(*parser));
1179
1180 parser->buffer = kmalloc(size, GFP_KERNEL);
1181 if (!parser->buffer)
1182 return 1;
1183
1184 parser->size = size;
1185 return 0;
1186}
1187
1188/*
1189 * trace_parser_put - frees the buffer for trace parser
1190 */
1191void trace_parser_put(struct trace_parser *parser)
1192{
1193 kfree(parser->buffer);
0e684b65 1194 parser->buffer = NULL;
b63f39ea 1195}
1196
1197/*
1198 * trace_get_user - reads the user input string separated by space
1199 * (matched by isspace(ch))
1200 *
1201 * For each string found the 'struct trace_parser' is updated,
1202 * and the function returns.
1203 *
1204 * Returns number of bytes read.
1205 *
1206 * See kernel/trace/trace.h for 'struct trace_parser' details.
1207 */
1208int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1209 size_t cnt, loff_t *ppos)
1210{
1211 char ch;
1212 size_t read = 0;
1213 ssize_t ret;
1214
1215 if (!*ppos)
1216 trace_parser_clear(parser);
1217
1218 ret = get_user(ch, ubuf++);
1219 if (ret)
1220 goto out;
1221
1222 read++;
1223 cnt--;
1224
1225 /*
1226 * The parser is not finished with the last write,
1227 * continue reading the user input without skipping spaces.
1228 */
1229 if (!parser->cont) {
1230 /* skip white space */
1231 while (cnt && isspace(ch)) {
1232 ret = get_user(ch, ubuf++);
1233 if (ret)
1234 goto out;
1235 read++;
1236 cnt--;
1237 }
1238
1239 /* only spaces were written */
1240 if (isspace(ch)) {
1241 *ppos += read;
1242 ret = read;
1243 goto out;
1244 }
1245
1246 parser->idx = 0;
1247 }
1248
1249 /* read the non-space input */
1250 while (cnt && !isspace(ch)) {
3c235a33 1251 if (parser->idx < parser->size - 1)
b63f39ea 1252 parser->buffer[parser->idx++] = ch;
1253 else {
1254 ret = -EINVAL;
1255 goto out;
1256 }
1257 ret = get_user(ch, ubuf++);
1258 if (ret)
1259 goto out;
1260 read++;
1261 cnt--;
1262 }
1263
1264 /* We either got finished input or we have to wait for another call. */
1265 if (isspace(ch)) {
1266 parser->buffer[parser->idx] = 0;
1267 parser->cont = false;
057db848 1268 } else if (parser->idx < parser->size - 1) {
b63f39ea 1269 parser->cont = true;
1270 parser->buffer[parser->idx++] = ch;
057db848
SR
1271 } else {
1272 ret = -EINVAL;
1273 goto out;
b63f39ea 1274 }
1275
1276 *ppos += read;
1277 ret = read;
1278
1279out:
1280 return ret;
1281}
1282
3a161d99 1283/* TODO add a seq_buf_to_buffer() */
b8b94265 1284static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
3c56819b
EGM
1285{
1286 int len;
3c56819b 1287
5ac48378 1288 if (trace_seq_used(s) <= s->seq.readpos)
3c56819b
EGM
1289 return -EBUSY;
1290
5ac48378 1291 len = trace_seq_used(s) - s->seq.readpos;
3c56819b
EGM
1292 if (cnt > len)
1293 cnt = len;
3a161d99 1294 memcpy(buf, s->buffer + s->seq.readpos, cnt);
3c56819b 1295
3a161d99 1296 s->seq.readpos += cnt;
3c56819b
EGM
1297 return cnt;
1298}
1299
0e950173
TB
1300unsigned long __read_mostly tracing_thresh;
1301
5d4a9dba 1302#ifdef CONFIG_TRACER_MAX_TRACE
5d4a9dba
SR
1303/*
1304 * Copy the new maximum trace into the separate maximum-trace
1305 * structure. (this way the maximum trace is permanently saved,
1306 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
1307 */
1308static void
1309__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1310{
12883efb
SRRH
1311 struct trace_buffer *trace_buf = &tr->trace_buffer;
1312 struct trace_buffer *max_buf = &tr->max_buffer;
1313 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1314 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
5d4a9dba 1315
12883efb
SRRH
1316 max_buf->cpu = cpu;
1317 max_buf->time_start = data->preempt_timestamp;
5d4a9dba 1318
6d9b3fa5 1319 max_data->saved_latency = tr->max_latency;
8248ac05
SR
1320 max_data->critical_start = data->critical_start;
1321 max_data->critical_end = data->critical_end;
5d4a9dba 1322
1acaa1b2 1323 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
8248ac05 1324 max_data->pid = tsk->pid;
f17a5194
SRRH
1325 /*
1326 * If tsk == current, then use current_uid(), as that does not use
1327 * RCU. The irq tracer can be called out of RCU scope.
1328 */
1329 if (tsk == current)
1330 max_data->uid = current_uid();
1331 else
1332 max_data->uid = task_uid(tsk);
1333
8248ac05
SR
1334 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1335 max_data->policy = tsk->policy;
1336 max_data->rt_priority = tsk->rt_priority;
5d4a9dba
SR
1337
1338 /* record this tasks comm */
1339 tracing_record_cmdline(tsk);
1340}
1341
4fcdae83
SR
1342/**
1343 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1344 * @tr: tracer
1345 * @tsk: the task with the latency
1346 * @cpu: The cpu that initiated the trace.
1347 *
1348 * Flip the buffers between the @tr and the max_tr and record information
1349 * about which task was the cause of this latency.
1350 */
e309b41d 1351void
bc0c38d1
SR
1352update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1353{
2721e72d 1354 struct ring_buffer *buf;
bc0c38d1 1355
2b6080f2 1356 if (tr->stop_count)
b8de7bd1
SR
1357 return;
1358
4c11d7ae 1359 WARN_ON_ONCE(!irqs_disabled());
34600f0e 1360
45ad21ca 1361 if (!tr->allocated_snapshot) {
debdd57f 1362 /* Only the nop tracer should hit this when disabling */
2b6080f2 1363 WARN_ON_ONCE(tr->current_trace != &nop_trace);
34600f0e 1364 return;
debdd57f 1365 }
34600f0e 1366
0b9b12c1 1367 arch_spin_lock(&tr->max_lock);
3928a8a2 1368
12883efb
SRRH
1369 buf = tr->trace_buffer.buffer;
1370 tr->trace_buffer.buffer = tr->max_buffer.buffer;
1371 tr->max_buffer.buffer = buf;
3928a8a2 1372
bc0c38d1 1373 __update_max_tr(tr, tsk, cpu);
0b9b12c1 1374 arch_spin_unlock(&tr->max_lock);
bc0c38d1
SR
1375}
1376
1377/**
1378 * update_max_tr_single - only copy one trace over, and reset the rest
1379 * @tr - tracer
1380 * @tsk - task with the latency
1381 * @cpu - the cpu of the buffer to copy.
4fcdae83
SR
1382 *
1383 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
bc0c38d1 1384 */
e309b41d 1385void
bc0c38d1
SR
1386update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1387{
3928a8a2 1388 int ret;
bc0c38d1 1389
2b6080f2 1390 if (tr->stop_count)
b8de7bd1
SR
1391 return;
1392
4c11d7ae 1393 WARN_ON_ONCE(!irqs_disabled());
6c24499f 1394 if (!tr->allocated_snapshot) {
2930e04d 1395 /* Only the nop tracer should hit this when disabling */
9e8529af 1396 WARN_ON_ONCE(tr->current_trace != &nop_trace);
ef710e10 1397 return;
2930e04d 1398 }
ef710e10 1399
0b9b12c1 1400 arch_spin_lock(&tr->max_lock);
bc0c38d1 1401
12883efb 1402 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
3928a8a2 1403
e8165dbb
SR
1404 if (ret == -EBUSY) {
1405 /*
1406 * We failed to swap the buffer due to a commit taking
1407 * place on this CPU. We fail to record, but we reset
1408 * the max trace buffer (no one writes directly to it)
1409 * and flag that it failed.
1410 */
12883efb 1411 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
e8165dbb
SR
1412 "Failed to swap buffers due to commit in progress\n");
1413 }
1414
e8165dbb 1415 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
bc0c38d1
SR
1416
1417 __update_max_tr(tr, tsk, cpu);
0b9b12c1 1418 arch_spin_unlock(&tr->max_lock);
bc0c38d1 1419}
5d4a9dba 1420#endif /* CONFIG_TRACER_MAX_TRACE */
bc0c38d1 1421
e30f53aa 1422static int wait_on_pipe(struct trace_iterator *iter, bool full)
0d5c6e1c 1423{
15693458
SRRH
1424 /* Iterators are static, they should be filled or empty */
1425 if (trace_buffer_iter(iter, iter->cpu_file))
8b8b3683 1426 return 0;
0d5c6e1c 1427
e30f53aa
RV
1428 return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file,
1429 full);
0d5c6e1c
SR
1430}
1431
f4e781c0 1432#ifdef CONFIG_FTRACE_STARTUP_TEST
9afecfbb
SRV
1433static bool selftests_can_run;
1434
1435struct trace_selftests {
1436 struct list_head list;
1437 struct tracer *type;
1438};
1439
1440static LIST_HEAD(postponed_selftests);
1441
1442static int save_selftest(struct tracer *type)
1443{
1444 struct trace_selftests *selftest;
1445
1446 selftest = kmalloc(sizeof(*selftest), GFP_KERNEL);
1447 if (!selftest)
1448 return -ENOMEM;
1449
1450 selftest->type = type;
1451 list_add(&selftest->list, &postponed_selftests);
1452 return 0;
1453}
1454
f4e781c0
SRRH
1455static int run_tracer_selftest(struct tracer *type)
1456{
1457 struct trace_array *tr = &global_trace;
1458 struct tracer *saved_tracer = tr->current_trace;
1459 int ret;
0d5c6e1c 1460
f4e781c0
SRRH
1461 if (!type->selftest || tracing_selftest_disabled)
1462 return 0;
0d5c6e1c 1463
9afecfbb
SRV
1464 /*
1465 * If a tracer registers early in boot up (before scheduling is
1466 * initialized and such), then do not run its selftests yet.
1467 * Instead, run it a little later in the boot process.
1468 */
1469 if (!selftests_can_run)
1470 return save_selftest(type);
1471
0d5c6e1c 1472 /*
f4e781c0
SRRH
1473 * Run a selftest on this tracer.
1474 * Here we reset the trace buffer, and set the current
1475 * tracer to be this tracer. The tracer can then run some
1476 * internal tracing to verify that everything is in order.
1477 * If we fail, we do not register this tracer.
0d5c6e1c 1478 */
f4e781c0 1479 tracing_reset_online_cpus(&tr->trace_buffer);
0d5c6e1c 1480
f4e781c0
SRRH
1481 tr->current_trace = type;
1482
1483#ifdef CONFIG_TRACER_MAX_TRACE
1484 if (type->use_max_tr) {
1485 /* If we expanded the buffers, make sure the max is expanded too */
1486 if (ring_buffer_expanded)
1487 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1488 RING_BUFFER_ALL_CPUS);
1489 tr->allocated_snapshot = true;
1490 }
1491#endif
1492
1493 /* the test is responsible for initializing and enabling */
1494 pr_info("Testing tracer %s: ", type->name);
1495 ret = type->selftest(type, tr);
1496 /* the test is responsible for resetting too */
1497 tr->current_trace = saved_tracer;
1498 if (ret) {
1499 printk(KERN_CONT "FAILED!\n");
1500 /* Add the warning after printing 'FAILED' */
1501 WARN_ON(1);
1502 return -1;
1503 }
1504 /* Only reset on passing, to avoid touching corrupted buffers */
1505 tracing_reset_online_cpus(&tr->trace_buffer);
1506
1507#ifdef CONFIG_TRACER_MAX_TRACE
1508 if (type->use_max_tr) {
1509 tr->allocated_snapshot = false;
0d5c6e1c 1510
f4e781c0
SRRH
1511 /* Shrink the max buffer again */
1512 if (ring_buffer_expanded)
1513 ring_buffer_resize(tr->max_buffer.buffer, 1,
1514 RING_BUFFER_ALL_CPUS);
1515 }
1516#endif
1517
1518 printk(KERN_CONT "PASSED\n");
1519 return 0;
1520}
9afecfbb
SRV
1521
1522static __init int init_trace_selftests(void)
1523{
1524 struct trace_selftests *p, *n;
1525 struct tracer *t, **last;
1526 int ret;
1527
1528 selftests_can_run = true;
1529
1530 mutex_lock(&trace_types_lock);
1531
1532 if (list_empty(&postponed_selftests))
1533 goto out;
1534
1535 pr_info("Running postponed tracer tests:\n");
1536
1537 list_for_each_entry_safe(p, n, &postponed_selftests, list) {
1538 ret = run_tracer_selftest(p->type);
1539 /* If the test fails, then warn and remove from available_tracers */
1540 if (ret < 0) {
1541 WARN(1, "tracer: %s failed selftest, disabling\n",
1542 p->type->name);
1543 last = &trace_types;
1544 for (t = trace_types; t; t = t->next) {
1545 if (t == p->type) {
1546 *last = t->next;
1547 break;
1548 }
1549 last = &t->next;
1550 }
1551 }
1552 list_del(&p->list);
1553 kfree(p);
1554 }
1555
1556 out:
1557 mutex_unlock(&trace_types_lock);
1558
1559 return 0;
1560}
b9ef0326 1561core_initcall(init_trace_selftests);
f4e781c0
SRRH
1562#else
1563static inline int run_tracer_selftest(struct tracer *type)
1564{
1565 return 0;
0d5c6e1c 1566}
f4e781c0 1567#endif /* CONFIG_FTRACE_STARTUP_TEST */
0d5c6e1c 1568
41d9c0be
SRRH
1569static void add_tracer_options(struct trace_array *tr, struct tracer *t);
1570
a4d1e688
JW
1571static void __init apply_trace_boot_options(void);
1572
4fcdae83
SR
1573/**
1574 * register_tracer - register a tracer with the ftrace system.
1575 * @type - the plugin for the tracer
1576 *
1577 * Register a new plugin tracer.
1578 */
a4d1e688 1579int __init register_tracer(struct tracer *type)
bc0c38d1
SR
1580{
1581 struct tracer *t;
bc0c38d1
SR
1582 int ret = 0;
1583
1584 if (!type->name) {
1585 pr_info("Tracer must have a name\n");
1586 return -1;
1587 }
1588
24a461d5 1589 if (strlen(type->name) >= MAX_TRACER_SIZE) {
ee6c2c1b
LZ
1590 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1591 return -1;
1592 }
1593
bc0c38d1 1594 mutex_lock(&trace_types_lock);
86fa2f60 1595
8e1b82e0
FW
1596 tracing_selftest_running = true;
1597
bc0c38d1
SR
1598 for (t = trace_types; t; t = t->next) {
1599 if (strcmp(type->name, t->name) == 0) {
1600 /* already found */
ee6c2c1b 1601 pr_info("Tracer %s already registered\n",
bc0c38d1
SR
1602 type->name);
1603 ret = -1;
1604 goto out;
1605 }
1606 }
1607
adf9f195
FW
1608 if (!type->set_flag)
1609 type->set_flag = &dummy_set_flag;
d39cdd20
CH
1610 if (!type->flags) {
1611 /*allocate a dummy tracer_flags*/
1612 type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
c8ca003b
CH
1613 if (!type->flags) {
1614 ret = -ENOMEM;
1615 goto out;
1616 }
d39cdd20
CH
1617 type->flags->val = 0;
1618 type->flags->opts = dummy_tracer_opt;
1619 } else
adf9f195
FW
1620 if (!type->flags->opts)
1621 type->flags->opts = dummy_tracer_opt;
6eaaa5d5 1622
d39cdd20
CH
1623 /* store the tracer for __set_tracer_option */
1624 type->flags->trace = type;
1625
f4e781c0
SRRH
1626 ret = run_tracer_selftest(type);
1627 if (ret < 0)
1628 goto out;
60a11774 1629
bc0c38d1
SR
1630 type->next = trace_types;
1631 trace_types = type;
41d9c0be 1632 add_tracer_options(&global_trace, type);
60a11774 1633
bc0c38d1 1634 out:
8e1b82e0 1635 tracing_selftest_running = false;
bc0c38d1
SR
1636 mutex_unlock(&trace_types_lock);
1637
dac74940
SR
1638 if (ret || !default_bootup_tracer)
1639 goto out_unlock;
1640
ee6c2c1b 1641 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
dac74940
SR
1642 goto out_unlock;
1643
1644 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1645 /* Do we want this tracer to start on bootup? */
607e2ea1 1646 tracing_set_tracer(&global_trace, type->name);
dac74940 1647 default_bootup_tracer = NULL;
a4d1e688
JW
1648
1649 apply_trace_boot_options();
1650
dac74940 1651 /* disable other selftests, since this will break it. */
55034cd6 1652 tracing_selftest_disabled = true;
b2821ae6 1653#ifdef CONFIG_FTRACE_STARTUP_TEST
dac74940
SR
1654 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1655 type->name);
b2821ae6 1656#endif
b2821ae6 1657
dac74940 1658 out_unlock:
bc0c38d1
SR
1659 return ret;
1660}
1661
12883efb 1662void tracing_reset(struct trace_buffer *buf, int cpu)
f633903a 1663{
12883efb 1664 struct ring_buffer *buffer = buf->buffer;
f633903a 1665
a5416411
HT
1666 if (!buffer)
1667 return;
1668
f633903a
SR
1669 ring_buffer_record_disable(buffer);
1670
1671 /* Make sure all commits have finished */
1672 synchronize_sched();
68179686 1673 ring_buffer_reset_cpu(buffer, cpu);
f633903a
SR
1674
1675 ring_buffer_record_enable(buffer);
1676}
1677
12883efb 1678void tracing_reset_online_cpus(struct trace_buffer *buf)
213cc060 1679{
12883efb 1680 struct ring_buffer *buffer = buf->buffer;
213cc060
PE
1681 int cpu;
1682
a5416411
HT
1683 if (!buffer)
1684 return;
1685
621968cd
SR
1686 ring_buffer_record_disable(buffer);
1687
1688 /* Make sure all commits have finished */
1689 synchronize_sched();
1690
9457158b 1691 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
213cc060
PE
1692
1693 for_each_online_cpu(cpu)
68179686 1694 ring_buffer_reset_cpu(buffer, cpu);
621968cd
SR
1695
1696 ring_buffer_record_enable(buffer);
213cc060
PE
1697}
1698
09d8091c 1699/* Must have trace_types_lock held */
873c642f 1700void tracing_reset_all_online_cpus(void)
9456f0fa 1701{
873c642f
SRRH
1702 struct trace_array *tr;
1703
873c642f 1704 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
065e63f9
SRV
1705 if (!tr->clear_trace)
1706 continue;
1707 tr->clear_trace = false;
12883efb
SRRH
1708 tracing_reset_online_cpus(&tr->trace_buffer);
1709#ifdef CONFIG_TRACER_MAX_TRACE
1710 tracing_reset_online_cpus(&tr->max_buffer);
1711#endif
873c642f 1712 }
9456f0fa
SR
1713}
1714
d914ba37
JF
1715static int *tgid_map;
1716
939c7a4f 1717#define SAVED_CMDLINES_DEFAULT 128
2c7eea4c 1718#define NO_CMDLINE_MAP UINT_MAX
edc35bd7 1719static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
939c7a4f
YY
1720struct saved_cmdlines_buffer {
1721 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1722 unsigned *map_cmdline_to_pid;
1723 unsigned cmdline_num;
1724 int cmdline_idx;
1725 char *saved_cmdlines;
1726};
1727static struct saved_cmdlines_buffer *savedcmd;
25b0b44a 1728
25b0b44a 1729/* temporary disable recording */
d914ba37 1730static atomic_t trace_record_taskinfo_disabled __read_mostly;
bc0c38d1 1731
939c7a4f
YY
1732static inline char *get_saved_cmdlines(int idx)
1733{
1734 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
1735}
1736
1737static inline void set_cmdline(int idx, const char *cmdline)
bc0c38d1 1738{
939c7a4f
YY
1739 memcpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
1740}
1741
1742static int allocate_cmdlines_buffer(unsigned int val,
1743 struct saved_cmdlines_buffer *s)
1744{
1745 s->map_cmdline_to_pid = kmalloc(val * sizeof(*s->map_cmdline_to_pid),
1746 GFP_KERNEL);
1747 if (!s->map_cmdline_to_pid)
1748 return -ENOMEM;
1749
1750 s->saved_cmdlines = kmalloc(val * TASK_COMM_LEN, GFP_KERNEL);
1751 if (!s->saved_cmdlines) {
1752 kfree(s->map_cmdline_to_pid);
1753 return -ENOMEM;
1754 }
1755
1756 s->cmdline_idx = 0;
1757 s->cmdline_num = val;
1758 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
1759 sizeof(s->map_pid_to_cmdline));
1760 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
1761 val * sizeof(*s->map_cmdline_to_pid));
1762
1763 return 0;
1764}
1765
1766static int trace_create_savedcmd(void)
1767{
1768 int ret;
1769
a6af8fbf 1770 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
939c7a4f
YY
1771 if (!savedcmd)
1772 return -ENOMEM;
1773
1774 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
1775 if (ret < 0) {
1776 kfree(savedcmd);
1777 savedcmd = NULL;
1778 return -ENOMEM;
1779 }
1780
1781 return 0;
bc0c38d1
SR
1782}
1783
b5130b1e
CE
1784int is_tracing_stopped(void)
1785{
2b6080f2 1786 return global_trace.stop_count;
b5130b1e
CE
1787}
1788
0f048701
SR
1789/**
1790 * tracing_start - quick start of the tracer
1791 *
1792 * If tracing is enabled but was stopped by tracing_stop,
1793 * this will start the tracer back up.
1794 */
1795void tracing_start(void)
1796{
1797 struct ring_buffer *buffer;
1798 unsigned long flags;
1799
1800 if (tracing_disabled)
1801 return;
1802
2b6080f2
SR
1803 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1804 if (--global_trace.stop_count) {
1805 if (global_trace.stop_count < 0) {
b06a8301
SR
1806 /* Someone screwed up their debugging */
1807 WARN_ON_ONCE(1);
2b6080f2 1808 global_trace.stop_count = 0;
b06a8301 1809 }
0f048701
SR
1810 goto out;
1811 }
1812
a2f80714 1813 /* Prevent the buffers from switching */
0b9b12c1 1814 arch_spin_lock(&global_trace.max_lock);
0f048701 1815
12883efb 1816 buffer = global_trace.trace_buffer.buffer;
0f048701
SR
1817 if (buffer)
1818 ring_buffer_record_enable(buffer);
1819
12883efb
SRRH
1820#ifdef CONFIG_TRACER_MAX_TRACE
1821 buffer = global_trace.max_buffer.buffer;
0f048701
SR
1822 if (buffer)
1823 ring_buffer_record_enable(buffer);
12883efb 1824#endif
0f048701 1825
0b9b12c1 1826 arch_spin_unlock(&global_trace.max_lock);
a2f80714 1827
0f048701 1828 out:
2b6080f2
SR
1829 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1830}
1831
1832static void tracing_start_tr(struct trace_array *tr)
1833{
1834 struct ring_buffer *buffer;
1835 unsigned long flags;
1836
1837 if (tracing_disabled)
1838 return;
1839
1840 /* If global, we need to also start the max tracer */
1841 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1842 return tracing_start();
1843
1844 raw_spin_lock_irqsave(&tr->start_lock, flags);
1845
1846 if (--tr->stop_count) {
1847 if (tr->stop_count < 0) {
1848 /* Someone screwed up their debugging */
1849 WARN_ON_ONCE(1);
1850 tr->stop_count = 0;
1851 }
1852 goto out;
1853 }
1854
12883efb 1855 buffer = tr->trace_buffer.buffer;
2b6080f2
SR
1856 if (buffer)
1857 ring_buffer_record_enable(buffer);
1858
1859 out:
1860 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
0f048701
SR
1861}
1862
1863/**
1864 * tracing_stop - quick stop of the tracer
1865 *
1866 * Light weight way to stop tracing. Use in conjunction with
1867 * tracing_start.
1868 */
1869void tracing_stop(void)
1870{
1871 struct ring_buffer *buffer;
1872 unsigned long flags;
1873
2b6080f2
SR
1874 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1875 if (global_trace.stop_count++)
0f048701
SR
1876 goto out;
1877
a2f80714 1878 /* Prevent the buffers from switching */
0b9b12c1 1879 arch_spin_lock(&global_trace.max_lock);
a2f80714 1880
12883efb 1881 buffer = global_trace.trace_buffer.buffer;
0f048701
SR
1882 if (buffer)
1883 ring_buffer_record_disable(buffer);
1884
12883efb
SRRH
1885#ifdef CONFIG_TRACER_MAX_TRACE
1886 buffer = global_trace.max_buffer.buffer;
0f048701
SR
1887 if (buffer)
1888 ring_buffer_record_disable(buffer);
12883efb 1889#endif
0f048701 1890
0b9b12c1 1891 arch_spin_unlock(&global_trace.max_lock);
a2f80714 1892
0f048701 1893 out:
2b6080f2
SR
1894 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1895}
1896
1897static void tracing_stop_tr(struct trace_array *tr)
1898{
1899 struct ring_buffer *buffer;
1900 unsigned long flags;
1901
1902 /* If global, we need to also stop the max tracer */
1903 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1904 return tracing_stop();
1905
1906 raw_spin_lock_irqsave(&tr->start_lock, flags);
1907 if (tr->stop_count++)
1908 goto out;
1909
12883efb 1910 buffer = tr->trace_buffer.buffer;
2b6080f2
SR
1911 if (buffer)
1912 ring_buffer_record_disable(buffer);
1913
1914 out:
1915 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
0f048701
SR
1916}
1917
379cfdac 1918static int trace_save_cmdline(struct task_struct *tsk)
bc0c38d1 1919{
a635cf04 1920 unsigned pid, idx;
bc0c38d1 1921
eaf260ac
JF
1922 /* treat recording of idle task as a success */
1923 if (!tsk->pid)
1924 return 1;
1925
1926 if (unlikely(tsk->pid > PID_MAX_DEFAULT))
379cfdac 1927 return 0;
bc0c38d1
SR
1928
1929 /*
1930 * It's not the end of the world if we don't get
1931 * the lock, but we also don't want to spin
1932 * nor do we want to disable interrupts,
1933 * so if we miss here, then better luck next time.
1934 */
0199c4e6 1935 if (!arch_spin_trylock(&trace_cmdline_lock))
379cfdac 1936 return 0;
bc0c38d1 1937
939c7a4f 1938 idx = savedcmd->map_pid_to_cmdline[tsk->pid];
2c7eea4c 1939 if (idx == NO_CMDLINE_MAP) {
939c7a4f 1940 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
bc0c38d1 1941
a635cf04
CE
1942 /*
1943 * Check whether the cmdline buffer at idx has a pid
1944 * mapped. We are going to overwrite that entry so we
1945 * need to clear the map_pid_to_cmdline. Otherwise we
1946 * would read the new comm for the old pid.
1947 */
939c7a4f 1948 pid = savedcmd->map_cmdline_to_pid[idx];
a635cf04 1949 if (pid != NO_CMDLINE_MAP)
939c7a4f 1950 savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
bc0c38d1 1951
939c7a4f
YY
1952 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
1953 savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
bc0c38d1 1954
939c7a4f 1955 savedcmd->cmdline_idx = idx;
bc0c38d1
SR
1956 }
1957
939c7a4f 1958 set_cmdline(idx, tsk->comm);
bc0c38d1 1959
0199c4e6 1960 arch_spin_unlock(&trace_cmdline_lock);
379cfdac
SRRH
1961
1962 return 1;
bc0c38d1
SR
1963}
1964
4c27e756 1965static void __trace_find_cmdline(int pid, char comm[])
bc0c38d1 1966{
bc0c38d1
SR
1967 unsigned map;
1968
4ca53085
SR
1969 if (!pid) {
1970 strcpy(comm, "<idle>");
1971 return;
1972 }
bc0c38d1 1973
74bf4076
SR
1974 if (WARN_ON_ONCE(pid < 0)) {
1975 strcpy(comm, "<XXX>");
1976 return;
1977 }
1978
4ca53085
SR
1979 if (pid > PID_MAX_DEFAULT) {
1980 strcpy(comm, "<...>");
1981 return;
1982 }
bc0c38d1 1983
939c7a4f 1984 map = savedcmd->map_pid_to_cmdline[pid];
50d88758 1985 if (map != NO_CMDLINE_MAP)
e09e2867 1986 strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN);
50d88758
TG
1987 else
1988 strcpy(comm, "<...>");
4c27e756
SRRH
1989}
1990
1991void trace_find_cmdline(int pid, char comm[])
1992{
1993 preempt_disable();
1994 arch_spin_lock(&trace_cmdline_lock);
1995
1996 __trace_find_cmdline(pid, comm);
bc0c38d1 1997
0199c4e6 1998 arch_spin_unlock(&trace_cmdline_lock);
5b6045a9 1999 preempt_enable();
bc0c38d1
SR
2000}
2001
d914ba37
JF
2002int trace_find_tgid(int pid)
2003{
2004 if (unlikely(!tgid_map || !pid || pid > PID_MAX_DEFAULT))
2005 return 0;
2006
2007 return tgid_map[pid];
2008}
2009
2010static int trace_save_tgid(struct task_struct *tsk)
2011{
bd45d34d
JF
2012 /* treat recording of idle task as a success */
2013 if (!tsk->pid)
2014 return 1;
2015
2016 if (unlikely(!tgid_map || tsk->pid > PID_MAX_DEFAULT))
d914ba37
JF
2017 return 0;
2018
2019 tgid_map[tsk->pid] = tsk->tgid;
2020 return 1;
2021}
2022
2023static bool tracing_record_taskinfo_skip(int flags)
2024{
2025 if (unlikely(!(flags & (TRACE_RECORD_CMDLINE | TRACE_RECORD_TGID))))
2026 return true;
2027 if (atomic_read(&trace_record_taskinfo_disabled) || !tracing_is_on())
2028 return true;
2029 if (!__this_cpu_read(trace_taskinfo_save))
2030 return true;
2031 return false;
2032}
2033
2034/**
2035 * tracing_record_taskinfo - record the task info of a task
2036 *
2037 * @task - task to record
2038 * @flags - TRACE_RECORD_CMDLINE for recording comm
2039 * - TRACE_RECORD_TGID for recording tgid
2040 */
2041void tracing_record_taskinfo(struct task_struct *task, int flags)
2042{
29b1a8ad
JF
2043 bool done;
2044
d914ba37
JF
2045 if (tracing_record_taskinfo_skip(flags))
2046 return;
29b1a8ad
JF
2047
2048 /*
2049 * Record as much task information as possible. If some fail, continue
2050 * to try to record the others.
2051 */
2052 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(task);
2053 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(task);
2054
2055 /* If recording any information failed, retry again soon. */
2056 if (!done)
d914ba37
JF
2057 return;
2058
2059 __this_cpu_write(trace_taskinfo_save, false);
2060}
2061
2062/**
2063 * tracing_record_taskinfo_sched_switch - record task info for sched_switch
2064 *
2065 * @prev - previous task during sched_switch
2066 * @next - next task during sched_switch
2067 * @flags - TRACE_RECORD_CMDLINE for recording comm
2068 * TRACE_RECORD_TGID for recording tgid
2069 */
2070void tracing_record_taskinfo_sched_switch(struct task_struct *prev,
2071 struct task_struct *next, int flags)
bc0c38d1 2072{
29b1a8ad
JF
2073 bool done;
2074
d914ba37
JF
2075 if (tracing_record_taskinfo_skip(flags))
2076 return;
2077
29b1a8ad
JF
2078 /*
2079 * Record as much task information as possible. If some fail, continue
2080 * to try to record the others.
2081 */
2082 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(prev);
2083 done &= !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(next);
2084 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(prev);
2085 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(next);
bc0c38d1 2086
29b1a8ad
JF
2087 /* If recording any information failed, retry again soon. */
2088 if (!done)
7ffbd48d
SR
2089 return;
2090
d914ba37
JF
2091 __this_cpu_write(trace_taskinfo_save, false);
2092}
2093
2094/* Helpers to record a specific task information */
2095void tracing_record_cmdline(struct task_struct *task)
2096{
2097 tracing_record_taskinfo(task, TRACE_RECORD_CMDLINE);
2098}
2099
2100void tracing_record_tgid(struct task_struct *task)
2101{
2102 tracing_record_taskinfo(task, TRACE_RECORD_TGID);
bc0c38d1
SR
2103}
2104
af0009fc
SRV
2105/*
2106 * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
2107 * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
2108 * simplifies those functions and keeps them in sync.
2109 */
2110enum print_line_t trace_handle_return(struct trace_seq *s)
2111{
2112 return trace_seq_has_overflowed(s) ?
2113 TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED;
2114}
2115EXPORT_SYMBOL_GPL(trace_handle_return);
2116
45dcd8b8 2117void
38697053
SR
2118tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
2119 int pc)
bc0c38d1
SR
2120{
2121 struct task_struct *tsk = current;
bc0c38d1 2122
777e208d
SR
2123 entry->preempt_count = pc & 0xff;
2124 entry->pid = (tsk) ? tsk->pid : 0;
2125 entry->flags =
9244489a 2126#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
2e2ca155 2127 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
9244489a
SR
2128#else
2129 TRACE_FLAG_IRQS_NOSUPPORT |
2130#endif
7e6867bf 2131 ((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) |
bc0c38d1 2132 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
c59f29cb 2133 ((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) |
e5137b50
PZ
2134 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
2135 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
bc0c38d1 2136}
f413cdb8 2137EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
bc0c38d1 2138
e77405ad
SR
2139struct ring_buffer_event *
2140trace_buffer_lock_reserve(struct ring_buffer *buffer,
2141 int type,
2142 unsigned long len,
2143 unsigned long flags, int pc)
51a763dd 2144{
3e9a8aad 2145 return __trace_buffer_lock_reserve(buffer, type, len, flags, pc);
0fc1b09f
SRRH
2146}
2147
2148DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
2149DEFINE_PER_CPU(int, trace_buffered_event_cnt);
2150static int trace_buffered_event_ref;
2151
2152/**
2153 * trace_buffered_event_enable - enable buffering events
2154 *
2155 * When events are being filtered, it is quicker to use a temporary
2156 * buffer to write the event data into if there's a likely chance
2157 * that it will not be committed. The discard of the ring buffer
2158 * is not as fast as committing, and is much slower than copying
2159 * a commit.
2160 *
2161 * When an event is to be filtered, allocate per cpu buffers to
2162 * write the event data into, and if the event is filtered and discarded
2163 * it is simply dropped, otherwise, the entire data is to be committed
2164 * in one shot.
2165 */
2166void trace_buffered_event_enable(void)
2167{
2168 struct ring_buffer_event *event;
2169 struct page *page;
2170 int cpu;
51a763dd 2171
0fc1b09f
SRRH
2172 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2173
2174 if (trace_buffered_event_ref++)
2175 return;
2176
2177 for_each_tracing_cpu(cpu) {
2178 page = alloc_pages_node(cpu_to_node(cpu),
2179 GFP_KERNEL | __GFP_NORETRY, 0);
2180 if (!page)
2181 goto failed;
2182
2183 event = page_address(page);
2184 memset(event, 0, sizeof(*event));
2185
2186 per_cpu(trace_buffered_event, cpu) = event;
2187
2188 preempt_disable();
2189 if (cpu == smp_processor_id() &&
2190 this_cpu_read(trace_buffered_event) !=
2191 per_cpu(trace_buffered_event, cpu))
2192 WARN_ON_ONCE(1);
2193 preempt_enable();
51a763dd
ACM
2194 }
2195
0fc1b09f
SRRH
2196 return;
2197 failed:
2198 trace_buffered_event_disable();
2199}
2200
2201static void enable_trace_buffered_event(void *data)
2202{
2203 /* Probably not needed, but do it anyway */
2204 smp_rmb();
2205 this_cpu_dec(trace_buffered_event_cnt);
2206}
2207
2208static void disable_trace_buffered_event(void *data)
2209{
2210 this_cpu_inc(trace_buffered_event_cnt);
2211}
2212
2213/**
2214 * trace_buffered_event_disable - disable buffering events
2215 *
2216 * When a filter is removed, it is faster to not use the buffered
2217 * events, and to commit directly into the ring buffer. Free up
2218 * the temp buffers when there are no more users. This requires
2219 * special synchronization with current events.
2220 */
2221void trace_buffered_event_disable(void)
2222{
2223 int cpu;
2224
2225 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2226
2227 if (WARN_ON_ONCE(!trace_buffered_event_ref))
2228 return;
2229
2230 if (--trace_buffered_event_ref)
2231 return;
2232
2233 preempt_disable();
2234 /* For each CPU, set the buffer as used. */
2235 smp_call_function_many(tracing_buffer_mask,
2236 disable_trace_buffered_event, NULL, 1);
2237 preempt_enable();
2238
2239 /* Wait for all current users to finish */
2240 synchronize_sched();
2241
2242 for_each_tracing_cpu(cpu) {
2243 free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
2244 per_cpu(trace_buffered_event, cpu) = NULL;
2245 }
2246 /*
2247 * Make sure trace_buffered_event is NULL before clearing
2248 * trace_buffered_event_cnt.
2249 */
2250 smp_wmb();
2251
2252 preempt_disable();
2253 /* Do the work on each cpu */
2254 smp_call_function_many(tracing_buffer_mask,
2255 enable_trace_buffered_event, NULL, 1);
2256 preempt_enable();
51a763dd 2257}
51a763dd 2258
2c4a33ab
SRRH
2259static struct ring_buffer *temp_buffer;
2260
ccb469a1
SR
2261struct ring_buffer_event *
2262trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
7f1d2f82 2263 struct trace_event_file *trace_file,
ccb469a1
SR
2264 int type, unsigned long len,
2265 unsigned long flags, int pc)
2266{
2c4a33ab 2267 struct ring_buffer_event *entry;
0fc1b09f 2268 int val;
2c4a33ab 2269
7f1d2f82 2270 *current_rb = trace_file->tr->trace_buffer.buffer;
0fc1b09f
SRRH
2271
2272 if ((trace_file->flags &
2273 (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED)) &&
2274 (entry = this_cpu_read(trace_buffered_event))) {
2275 /* Try to use the per cpu buffer first */
2276 val = this_cpu_inc_return(trace_buffered_event_cnt);
2277 if (val == 1) {
2278 trace_event_setup(entry, type, flags, pc);
2279 entry->array[0] = len;
2280 return entry;
2281 }
2282 this_cpu_dec(trace_buffered_event_cnt);
2283 }
2284
3e9a8aad
SRRH
2285 entry = __trace_buffer_lock_reserve(*current_rb,
2286 type, len, flags, pc);
2c4a33ab
SRRH
2287 /*
2288 * If tracing is off, but we have triggers enabled
2289 * we still need to look at the event data. Use the temp_buffer
2290 * to store the trace event for the tigger to use. It's recusive
2291 * safe and will not be recorded anywhere.
2292 */
5d6ad960 2293 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
2c4a33ab 2294 *current_rb = temp_buffer;
3e9a8aad
SRRH
2295 entry = __trace_buffer_lock_reserve(*current_rb,
2296 type, len, flags, pc);
2c4a33ab
SRRH
2297 }
2298 return entry;
ccb469a1
SR
2299}
2300EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
2301
42391745
SRRH
2302static DEFINE_SPINLOCK(tracepoint_iter_lock);
2303static DEFINE_MUTEX(tracepoint_printk_mutex);
2304
2305static void output_printk(struct trace_event_buffer *fbuffer)
2306{
2307 struct trace_event_call *event_call;
2308 struct trace_event *event;
2309 unsigned long flags;
2310 struct trace_iterator *iter = tracepoint_print_iter;
2311
2312 /* We should never get here if iter is NULL */
2313 if (WARN_ON_ONCE(!iter))
2314 return;
2315
2316 event_call = fbuffer->trace_file->event_call;
2317 if (!event_call || !event_call->event.funcs ||
2318 !event_call->event.funcs->trace)
2319 return;
2320
2321 event = &fbuffer->trace_file->event_call->event;
2322
2323 spin_lock_irqsave(&tracepoint_iter_lock, flags);
2324 trace_seq_init(&iter->seq);
2325 iter->ent = fbuffer->entry;
2326 event_call->event.funcs->trace(iter, 0, event);
2327 trace_seq_putc(&iter->seq, 0);
2328 printk("%s", iter->seq.buffer);
2329
2330 spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
2331}
2332
2333int tracepoint_printk_sysctl(struct ctl_table *table, int write,
2334 void __user *buffer, size_t *lenp,
2335 loff_t *ppos)
2336{
2337 int save_tracepoint_printk;
2338 int ret;
2339
2340 mutex_lock(&tracepoint_printk_mutex);
2341 save_tracepoint_printk = tracepoint_printk;
2342
2343 ret = proc_dointvec(table, write, buffer, lenp, ppos);
2344
2345 /*
2346 * This will force exiting early, as tracepoint_printk
2347 * is always zero when tracepoint_printk_iter is not allocated
2348 */
2349 if (!tracepoint_print_iter)
2350 tracepoint_printk = 0;
2351
2352 if (save_tracepoint_printk == tracepoint_printk)
2353 goto out;
2354
2355 if (tracepoint_printk)
2356 static_key_enable(&tracepoint_printk_key.key);
2357 else
2358 static_key_disable(&tracepoint_printk_key.key);
2359
2360 out:
2361 mutex_unlock(&tracepoint_printk_mutex);
2362
2363 return ret;
2364}
2365
2366void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
2367{
2368 if (static_key_false(&tracepoint_printk_key.key))
2369 output_printk(fbuffer);
2370
2371 event_trigger_unlock_commit(fbuffer->trace_file, fbuffer->buffer,
2372 fbuffer->event, fbuffer->entry,
2373 fbuffer->flags, fbuffer->pc);
2374}
2375EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
2376
b7f0c959
SRRH
2377void trace_buffer_unlock_commit_regs(struct trace_array *tr,
2378 struct ring_buffer *buffer,
0d5c6e1c
SR
2379 struct ring_buffer_event *event,
2380 unsigned long flags, int pc,
2381 struct pt_regs *regs)
1fd8df2c 2382{
7ffbd48d 2383 __buffer_unlock_commit(buffer, event);
1fd8df2c 2384
be54f69c
SRRH
2385 /*
2386 * If regs is not set, then skip the following callers:
2387 * trace_buffer_unlock_commit_regs
2388 * event_trigger_unlock_commit
2389 * trace_event_buffer_commit
2390 * trace_event_raw_event_sched_switch
2391 * Note, we can still get here via blktrace, wakeup tracer
2392 * and mmiotrace, but that's ok if they lose a function or
2393 * two. They are that meaningful.
2394 */
2395 ftrace_trace_stack(tr, buffer, flags, regs ? 0 : 4, pc, regs);
1fd8df2c
MH
2396 ftrace_trace_userstack(buffer, flags, pc);
2397}
1fd8df2c 2398
52ffabe3
SRRH
2399/*
2400 * Similar to trace_buffer_unlock_commit_regs() but do not dump stack.
2401 */
2402void
2403trace_buffer_unlock_commit_nostack(struct ring_buffer *buffer,
2404 struct ring_buffer_event *event)
2405{
2406 __buffer_unlock_commit(buffer, event);
2407}
2408
478409dd
CZ
2409static void
2410trace_process_export(struct trace_export *export,
2411 struct ring_buffer_event *event)
2412{
2413 struct trace_entry *entry;
2414 unsigned int size = 0;
2415
2416 entry = ring_buffer_event_data(event);
2417 size = ring_buffer_event_length(event);
2418 export->write(entry, size);
2419}
2420
2421static DEFINE_MUTEX(ftrace_export_lock);
2422
2423static struct trace_export __rcu *ftrace_exports_list __read_mostly;
2424
2425static DEFINE_STATIC_KEY_FALSE(ftrace_exports_enabled);
2426
2427static inline void ftrace_exports_enable(void)
2428{
2429 static_branch_enable(&ftrace_exports_enabled);
2430}
2431
2432static inline void ftrace_exports_disable(void)
2433{
2434 static_branch_disable(&ftrace_exports_enabled);
2435}
2436
2437void ftrace_exports(struct ring_buffer_event *event)
2438{
2439 struct trace_export *export;
2440
2441 preempt_disable_notrace();
2442
2443 export = rcu_dereference_raw_notrace(ftrace_exports_list);
2444 while (export) {
2445 trace_process_export(export, event);
2446 export = rcu_dereference_raw_notrace(export->next);
2447 }
2448
2449 preempt_enable_notrace();
2450}
2451
2452static inline void
2453add_trace_export(struct trace_export **list, struct trace_export *export)
2454{
2455 rcu_assign_pointer(export->next, *list);
2456 /*
2457 * We are entering export into the list but another
2458 * CPU might be walking that list. We need to make sure
2459 * the export->next pointer is valid before another CPU sees
2460 * the export pointer included into the list.
2461 */
2462 rcu_assign_pointer(*list, export);
2463}
2464
2465static inline int
2466rm_trace_export(struct trace_export **list, struct trace_export *export)
2467{
2468 struct trace_export **p;
2469
2470 for (p = list; *p != NULL; p = &(*p)->next)
2471 if (*p == export)
2472 break;
2473
2474 if (*p != export)
2475 return -1;
2476
2477 rcu_assign_pointer(*p, (*p)->next);
2478
2479 return 0;
2480}
2481
2482static inline void
2483add_ftrace_export(struct trace_export **list, struct trace_export *export)
2484{
2485 if (*list == NULL)
2486 ftrace_exports_enable();
2487
2488 add_trace_export(list, export);
2489}
2490
2491static inline int
2492rm_ftrace_export(struct trace_export **list, struct trace_export *export)
2493{
2494 int ret;
2495
2496 ret = rm_trace_export(list, export);
2497 if (*list == NULL)
2498 ftrace_exports_disable();
2499
2500 return ret;
2501}
2502
2503int register_ftrace_export(struct trace_export *export)
2504{
2505 if (WARN_ON_ONCE(!export->write))
2506 return -1;
2507
2508 mutex_lock(&ftrace_export_lock);
2509
2510 add_ftrace_export(&ftrace_exports_list, export);
2511
2512 mutex_unlock(&ftrace_export_lock);
2513
2514 return 0;
2515}
2516EXPORT_SYMBOL_GPL(register_ftrace_export);
2517
2518int unregister_ftrace_export(struct trace_export *export)
2519{
2520 int ret;
2521
2522 mutex_lock(&ftrace_export_lock);
2523
2524 ret = rm_ftrace_export(&ftrace_exports_list, export);
2525
2526 mutex_unlock(&ftrace_export_lock);
2527
2528 return ret;
2529}
2530EXPORT_SYMBOL_GPL(unregister_ftrace_export);
2531
e309b41d 2532void
7be42151 2533trace_function(struct trace_array *tr,
38697053
SR
2534 unsigned long ip, unsigned long parent_ip, unsigned long flags,
2535 int pc)
bc0c38d1 2536{
2425bcb9 2537 struct trace_event_call *call = &event_function;
12883efb 2538 struct ring_buffer *buffer = tr->trace_buffer.buffer;
3928a8a2 2539 struct ring_buffer_event *event;
777e208d 2540 struct ftrace_entry *entry;
bc0c38d1 2541
3e9a8aad
SRRH
2542 event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
2543 flags, pc);
3928a8a2
SR
2544 if (!event)
2545 return;
2546 entry = ring_buffer_event_data(event);
777e208d
SR
2547 entry->ip = ip;
2548 entry->parent_ip = parent_ip;
e1112b4d 2549
478409dd
CZ
2550 if (!call_filter_check_discard(call, entry, buffer, event)) {
2551 if (static_branch_unlikely(&ftrace_exports_enabled))
2552 ftrace_exports(event);
7ffbd48d 2553 __buffer_unlock_commit(buffer, event);
478409dd 2554 }
bc0c38d1
SR
2555}
2556
c0a0d0d3 2557#ifdef CONFIG_STACKTRACE
4a9bd3f1
SR
2558
2559#define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
2560struct ftrace_stack {
2561 unsigned long calls[FTRACE_STACK_MAX_ENTRIES];
2562};
2563
2564static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
2565static DEFINE_PER_CPU(int, ftrace_stack_reserve);
2566
e77405ad 2567static void __ftrace_trace_stack(struct ring_buffer *buffer,
53614991 2568 unsigned long flags,
1fd8df2c 2569 int skip, int pc, struct pt_regs *regs)
86387f7e 2570{
2425bcb9 2571 struct trace_event_call *call = &event_kernel_stack;
3928a8a2 2572 struct ring_buffer_event *event;
777e208d 2573 struct stack_entry *entry;
86387f7e 2574 struct stack_trace trace;
4a9bd3f1
SR
2575 int use_stack;
2576 int size = FTRACE_STACK_ENTRIES;
2577
2578 trace.nr_entries = 0;
2579 trace.skip = skip;
2580
be54f69c
SRRH
2581 /*
2582 * Add two, for this function and the call to save_stack_trace()
2583 * If regs is set, then these functions will not be in the way.
2584 */
2585 if (!regs)
2586 trace.skip += 2;
2587
4a9bd3f1
SR
2588 /*
2589 * Since events can happen in NMIs there's no safe way to
2590 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
2591 * or NMI comes in, it will just have to use the default
2592 * FTRACE_STACK_SIZE.
2593 */
2594 preempt_disable_notrace();
2595
82146529 2596 use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
4a9bd3f1
SR
2597 /*
2598 * We don't need any atomic variables, just a barrier.
2599 * If an interrupt comes in, we don't care, because it would
2600 * have exited and put the counter back to what we want.
2601 * We just need a barrier to keep gcc from moving things
2602 * around.
2603 */
2604 barrier();
2605 if (use_stack == 1) {
bdffd893 2606 trace.entries = this_cpu_ptr(ftrace_stack.calls);
4a9bd3f1
SR
2607 trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
2608
2609 if (regs)
2610 save_stack_trace_regs(regs, &trace);
2611 else
2612 save_stack_trace(&trace);
2613
2614 if (trace.nr_entries > size)
2615 size = trace.nr_entries;
2616 } else
2617 /* From now on, use_stack is a boolean */
2618 use_stack = 0;
2619
2620 size *= sizeof(unsigned long);
86387f7e 2621
3e9a8aad
SRRH
2622 event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
2623 sizeof(*entry) + size, flags, pc);
3928a8a2 2624 if (!event)
4a9bd3f1
SR
2625 goto out;
2626 entry = ring_buffer_event_data(event);
86387f7e 2627
4a9bd3f1
SR
2628 memset(&entry->caller, 0, size);
2629
2630 if (use_stack)
2631 memcpy(&entry->caller, trace.entries,
2632 trace.nr_entries * sizeof(unsigned long));
2633 else {
2634 trace.max_entries = FTRACE_STACK_ENTRIES;
2635 trace.entries = entry->caller;
2636 if (regs)
2637 save_stack_trace_regs(regs, &trace);
2638 else
2639 save_stack_trace(&trace);
2640 }
2641
2642 entry->size = trace.nr_entries;
86387f7e 2643
f306cc82 2644 if (!call_filter_check_discard(call, entry, buffer, event))
7ffbd48d 2645 __buffer_unlock_commit(buffer, event);
4a9bd3f1
SR
2646
2647 out:
2648 /* Again, don't let gcc optimize things here */
2649 barrier();
82146529 2650 __this_cpu_dec(ftrace_stack_reserve);
4a9bd3f1
SR
2651 preempt_enable_notrace();
2652
f0a920d5
IM
2653}
2654
2d34f489
SRRH
2655static inline void ftrace_trace_stack(struct trace_array *tr,
2656 struct ring_buffer *buffer,
73dddbb5
SRRH
2657 unsigned long flags,
2658 int skip, int pc, struct pt_regs *regs)
53614991 2659{
2d34f489 2660 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
53614991
SR
2661 return;
2662
73dddbb5 2663 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
53614991
SR
2664}
2665
c0a0d0d3
FW
2666void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
2667 int pc)
38697053 2668{
a33d7d94
SRV
2669 struct ring_buffer *buffer = tr->trace_buffer.buffer;
2670
2671 if (rcu_is_watching()) {
2672 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
2673 return;
2674 }
2675
2676 /*
2677 * When an NMI triggers, RCU is enabled via rcu_nmi_enter(),
2678 * but if the above rcu_is_watching() failed, then the NMI
2679 * triggered someplace critical, and rcu_irq_enter() should
2680 * not be called from NMI.
2681 */
2682 if (unlikely(in_nmi()))
2683 return;
2684
2685 /*
2686 * It is possible that a function is being traced in a
2687 * location that RCU is not watching. A call to
2688 * rcu_irq_enter() will make sure that it is, but there's
2689 * a few internal rcu functions that could be traced
2690 * where that wont work either. In those cases, we just
2691 * do nothing.
2692 */
2693 if (unlikely(rcu_irq_enter_disabled()))
2694 return;
2695
2696 rcu_irq_enter_irqson();
2697 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
2698 rcu_irq_exit_irqson();
38697053
SR
2699}
2700
03889384
SR
2701/**
2702 * trace_dump_stack - record a stack back trace in the trace buffer
c142be8e 2703 * @skip: Number of functions to skip (helper handlers)
03889384 2704 */
c142be8e 2705void trace_dump_stack(int skip)
03889384
SR
2706{
2707 unsigned long flags;
2708
2709 if (tracing_disabled || tracing_selftest_running)
e36c5458 2710 return;
03889384
SR
2711
2712 local_save_flags(flags);
2713
c142be8e
SRRH
2714 /*
2715 * Skip 3 more, seems to get us at the caller of
2716 * this function.
2717 */
2718 skip += 3;
2719 __ftrace_trace_stack(global_trace.trace_buffer.buffer,
2720 flags, skip, preempt_count(), NULL);
03889384
SR
2721}
2722
91e86e56
SR
2723static DEFINE_PER_CPU(int, user_stack_count);
2724
e77405ad
SR
2725void
2726ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
02b67518 2727{
2425bcb9 2728 struct trace_event_call *call = &event_user_stack;
8d7c6a96 2729 struct ring_buffer_event *event;
02b67518
TE
2730 struct userstack_entry *entry;
2731 struct stack_trace trace;
02b67518 2732
983f938a 2733 if (!(global_trace.trace_flags & TRACE_ITER_USERSTACKTRACE))
02b67518
TE
2734 return;
2735
b6345879
SR
2736 /*
2737 * NMIs can not handle page faults, even with fix ups.
2738 * The save user stack can (and often does) fault.
2739 */
2740 if (unlikely(in_nmi()))
2741 return;
02b67518 2742
91e86e56
SR
2743 /*
2744 * prevent recursion, since the user stack tracing may
2745 * trigger other kernel events.
2746 */
2747 preempt_disable();
2748 if (__this_cpu_read(user_stack_count))
2749 goto out;
2750
2751 __this_cpu_inc(user_stack_count);
2752
3e9a8aad
SRRH
2753 event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
2754 sizeof(*entry), flags, pc);
02b67518 2755 if (!event)
1dbd1951 2756 goto out_drop_count;
02b67518 2757 entry = ring_buffer_event_data(event);
02b67518 2758
48659d31 2759 entry->tgid = current->tgid;
02b67518
TE
2760 memset(&entry->caller, 0, sizeof(entry->caller));
2761
2762 trace.nr_entries = 0;
2763 trace.max_entries = FTRACE_STACK_ENTRIES;
2764 trace.skip = 0;
2765 trace.entries = entry->caller;
2766
2767 save_stack_trace_user(&trace);
f306cc82 2768 if (!call_filter_check_discard(call, entry, buffer, event))
7ffbd48d 2769 __buffer_unlock_commit(buffer, event);
91e86e56 2770
1dbd1951 2771 out_drop_count:
91e86e56 2772 __this_cpu_dec(user_stack_count);
91e86e56
SR
2773 out:
2774 preempt_enable();
02b67518
TE
2775}
2776
4fd27358
HE
2777#ifdef UNUSED
2778static void __trace_userstack(struct trace_array *tr, unsigned long flags)
02b67518 2779{
7be42151 2780 ftrace_trace_userstack(tr, flags, preempt_count());
02b67518 2781}
4fd27358 2782#endif /* UNUSED */
02b67518 2783
c0a0d0d3
FW
2784#endif /* CONFIG_STACKTRACE */
2785
07d777fe
SR
2786/* created for use with alloc_percpu */
2787struct trace_buffer_struct {
e2ace001
AL
2788 int nesting;
2789 char buffer[4][TRACE_BUF_SIZE];
07d777fe
SR
2790};
2791
2792static struct trace_buffer_struct *trace_percpu_buffer;
07d777fe
SR
2793
2794/*
e2ace001
AL
2795 * Thise allows for lockless recording. If we're nested too deeply, then
2796 * this returns NULL.
07d777fe
SR
2797 */
2798static char *get_trace_buf(void)
2799{
e2ace001 2800 struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
07d777fe 2801
e2ace001 2802 if (!buffer || buffer->nesting >= 4)
07d777fe
SR
2803 return NULL;
2804
3d9622c1
SRV
2805 buffer->nesting++;
2806
2807 /* Interrupts must see nesting incremented before we use the buffer */
2808 barrier();
2809 return &buffer->buffer[buffer->nesting][0];
e2ace001
AL
2810}
2811
2812static void put_trace_buf(void)
2813{
3d9622c1
SRV
2814 /* Don't let the decrement of nesting leak before this */
2815 barrier();
e2ace001 2816 this_cpu_dec(trace_percpu_buffer->nesting);
07d777fe
SR
2817}
2818
2819static int alloc_percpu_trace_buffer(void)
2820{
2821 struct trace_buffer_struct *buffers;
07d777fe
SR
2822
2823 buffers = alloc_percpu(struct trace_buffer_struct);
e2ace001
AL
2824 if (WARN(!buffers, "Could not allocate percpu trace_printk buffer"))
2825 return -ENOMEM;
07d777fe
SR
2826
2827 trace_percpu_buffer = buffers;
07d777fe 2828 return 0;
07d777fe
SR
2829}
2830
81698831
SR
2831static int buffers_allocated;
2832
07d777fe
SR
2833void trace_printk_init_buffers(void)
2834{
07d777fe
SR
2835 if (buffers_allocated)
2836 return;
2837
2838 if (alloc_percpu_trace_buffer())
2839 return;
2840
2184db46
SR
2841 /* trace_printk() is for debug use only. Don't use it in production. */
2842
a395d6a7
JP
2843 pr_warn("\n");
2844 pr_warn("**********************************************************\n");
2845 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2846 pr_warn("** **\n");
2847 pr_warn("** trace_printk() being used. Allocating extra memory. **\n");
2848 pr_warn("** **\n");
2849 pr_warn("** This means that this is a DEBUG kernel and it is **\n");
2850 pr_warn("** unsafe for production use. **\n");
2851 pr_warn("** **\n");
2852 pr_warn("** If you see this message and you are not debugging **\n");
2853 pr_warn("** the kernel, report this immediately to your vendor! **\n");
2854 pr_warn("** **\n");
2855 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2856 pr_warn("**********************************************************\n");
07d777fe 2857
b382ede6
SR
2858 /* Expand the buffers to set size */
2859 tracing_update_buffers();
2860
07d777fe 2861 buffers_allocated = 1;
81698831
SR
2862
2863 /*
2864 * trace_printk_init_buffers() can be called by modules.
2865 * If that happens, then we need to start cmdline recording
2866 * directly here. If the global_trace.buffer is already
2867 * allocated here, then this was called by module code.
2868 */
12883efb 2869 if (global_trace.trace_buffer.buffer)
81698831
SR
2870 tracing_start_cmdline_record();
2871}
2872
2873void trace_printk_start_comm(void)
2874{
2875 /* Start tracing comms if trace printk is set */
2876 if (!buffers_allocated)
2877 return;
2878 tracing_start_cmdline_record();
2879}
2880
2881static void trace_printk_start_stop_comm(int enabled)
2882{
2883 if (!buffers_allocated)
2884 return;
2885
2886 if (enabled)
2887 tracing_start_cmdline_record();
2888 else
2889 tracing_stop_cmdline_record();
07d777fe
SR
2890}
2891
769b0441 2892/**
48ead020 2893 * trace_vbprintk - write binary msg to tracing buffer
769b0441
FW
2894 *
2895 */
40ce74f1 2896int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
769b0441 2897{
2425bcb9 2898 struct trace_event_call *call = &event_bprint;
769b0441 2899 struct ring_buffer_event *event;
e77405ad 2900 struct ring_buffer *buffer;
769b0441 2901 struct trace_array *tr = &global_trace;
48ead020 2902 struct bprint_entry *entry;
769b0441 2903 unsigned long flags;
07d777fe
SR
2904 char *tbuffer;
2905 int len = 0, size, pc;
769b0441
FW
2906
2907 if (unlikely(tracing_selftest_running || tracing_disabled))
2908 return 0;
2909
2910 /* Don't pollute graph traces with trace_vprintk internals */
2911 pause_graph_tracing();
2912
2913 pc = preempt_count();
5168ae50 2914 preempt_disable_notrace();
769b0441 2915
07d777fe
SR
2916 tbuffer = get_trace_buf();
2917 if (!tbuffer) {
2918 len = 0;
e2ace001 2919 goto out_nobuffer;
07d777fe 2920 }
769b0441 2921
07d777fe 2922 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
769b0441 2923
07d777fe
SR
2924 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
2925 goto out;
769b0441 2926
07d777fe 2927 local_save_flags(flags);
769b0441 2928 size = sizeof(*entry) + sizeof(u32) * len;
12883efb 2929 buffer = tr->trace_buffer.buffer;
3e9a8aad
SRRH
2930 event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
2931 flags, pc);
769b0441 2932 if (!event)
07d777fe 2933 goto out;
769b0441
FW
2934 entry = ring_buffer_event_data(event);
2935 entry->ip = ip;
769b0441
FW
2936 entry->fmt = fmt;
2937
07d777fe 2938 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
f306cc82 2939 if (!call_filter_check_discard(call, entry, buffer, event)) {
7ffbd48d 2940 __buffer_unlock_commit(buffer, event);
2d34f489 2941 ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
d931369b 2942 }
769b0441 2943
769b0441 2944out:
e2ace001
AL
2945 put_trace_buf();
2946
2947out_nobuffer:
5168ae50 2948 preempt_enable_notrace();
769b0441
FW
2949 unpause_graph_tracing();
2950
2951 return len;
2952}
48ead020
FW
2953EXPORT_SYMBOL_GPL(trace_vbprintk);
2954
12883efb
SRRH
2955static int
2956__trace_array_vprintk(struct ring_buffer *buffer,
2957 unsigned long ip, const char *fmt, va_list args)
48ead020 2958{
2425bcb9 2959 struct trace_event_call *call = &event_print;
48ead020 2960 struct ring_buffer_event *event;
07d777fe 2961 int len = 0, size, pc;
48ead020 2962 struct print_entry *entry;
07d777fe
SR
2963 unsigned long flags;
2964 char *tbuffer;
48ead020
FW
2965
2966 if (tracing_disabled || tracing_selftest_running)
2967 return 0;
2968
07d777fe
SR
2969 /* Don't pollute graph traces with trace_vprintk internals */
2970 pause_graph_tracing();
2971
48ead020
FW
2972 pc = preempt_count();
2973 preempt_disable_notrace();
48ead020 2974
07d777fe
SR
2975
2976 tbuffer = get_trace_buf();
2977 if (!tbuffer) {
2978 len = 0;
e2ace001 2979 goto out_nobuffer;
07d777fe 2980 }
48ead020 2981
3558a5ac 2982 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
48ead020 2983
07d777fe 2984 local_save_flags(flags);
48ead020 2985 size = sizeof(*entry) + len + 1;
3e9a8aad
SRRH
2986 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
2987 flags, pc);
48ead020 2988 if (!event)
07d777fe 2989 goto out;
48ead020 2990 entry = ring_buffer_event_data(event);
c13d2f7c 2991 entry->ip = ip;
48ead020 2992
3558a5ac 2993 memcpy(&entry->buf, tbuffer, len + 1);
f306cc82 2994 if (!call_filter_check_discard(call, entry, buffer, event)) {
7ffbd48d 2995 __buffer_unlock_commit(buffer, event);
2d34f489 2996 ftrace_trace_stack(&global_trace, buffer, flags, 6, pc, NULL);
d931369b 2997 }
e2ace001
AL
2998
2999out:
3000 put_trace_buf();
3001
3002out_nobuffer:
48ead020 3003 preempt_enable_notrace();
07d777fe 3004 unpause_graph_tracing();
48ead020
FW
3005
3006 return len;
3007}
659372d3 3008
12883efb
SRRH
3009int trace_array_vprintk(struct trace_array *tr,
3010 unsigned long ip, const char *fmt, va_list args)
3011{
3012 return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
3013}
3014
3015int trace_array_printk(struct trace_array *tr,
3016 unsigned long ip, const char *fmt, ...)
3017{
3018 int ret;
3019 va_list ap;
3020
983f938a 3021 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
12883efb
SRRH
3022 return 0;
3023
3024 va_start(ap, fmt);
3025 ret = trace_array_vprintk(tr, ip, fmt, ap);
3026 va_end(ap);
3027 return ret;
3028}
3029
3030int trace_array_printk_buf(struct ring_buffer *buffer,
3031 unsigned long ip, const char *fmt, ...)
3032{
3033 int ret;
3034 va_list ap;
3035
983f938a 3036 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
12883efb
SRRH
3037 return 0;
3038
3039 va_start(ap, fmt);
3040 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
3041 va_end(ap);
3042 return ret;
3043}
3044
659372d3
SR
3045int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
3046{
a813a159 3047 return trace_array_vprintk(&global_trace, ip, fmt, args);
659372d3 3048}
769b0441
FW
3049EXPORT_SYMBOL_GPL(trace_vprintk);
3050
e2ac8ef5 3051static void trace_iterator_increment(struct trace_iterator *iter)
5a90f577 3052{
6d158a81
SR
3053 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
3054
5a90f577 3055 iter->idx++;
6d158a81
SR
3056 if (buf_iter)
3057 ring_buffer_read(buf_iter, NULL);
5a90f577
SR
3058}
3059
e309b41d 3060static struct trace_entry *
bc21b478
SR
3061peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
3062 unsigned long *lost_events)
dd0e545f 3063{
3928a8a2 3064 struct ring_buffer_event *event;
6d158a81 3065 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
dd0e545f 3066
d769041f
SR
3067 if (buf_iter)
3068 event = ring_buffer_iter_peek(buf_iter, ts);
3069 else
12883efb 3070 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
bc21b478 3071 lost_events);
d769041f 3072
4a9bd3f1
SR
3073 if (event) {
3074 iter->ent_size = ring_buffer_event_length(event);
3075 return ring_buffer_event_data(event);
3076 }
3077 iter->ent_size = 0;
3078 return NULL;
dd0e545f 3079}
d769041f 3080
dd0e545f 3081static struct trace_entry *
bc21b478
SR
3082__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
3083 unsigned long *missing_events, u64 *ent_ts)
bc0c38d1 3084{
12883efb 3085 struct ring_buffer *buffer = iter->trace_buffer->buffer;
bc0c38d1 3086 struct trace_entry *ent, *next = NULL;
aa27497c 3087 unsigned long lost_events = 0, next_lost = 0;
b04cc6b1 3088 int cpu_file = iter->cpu_file;
3928a8a2 3089 u64 next_ts = 0, ts;
bc0c38d1 3090 int next_cpu = -1;
12b5da34 3091 int next_size = 0;
bc0c38d1
SR
3092 int cpu;
3093
b04cc6b1
FW
3094 /*
3095 * If we are in a per_cpu trace file, don't bother by iterating over
3096 * all cpu and peek directly.
3097 */
ae3b5093 3098 if (cpu_file > RING_BUFFER_ALL_CPUS) {
b04cc6b1
FW
3099 if (ring_buffer_empty_cpu(buffer, cpu_file))
3100 return NULL;
bc21b478 3101 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
b04cc6b1
FW
3102 if (ent_cpu)
3103 *ent_cpu = cpu_file;
3104
3105 return ent;
3106 }
3107
ab46428c 3108 for_each_tracing_cpu(cpu) {
dd0e545f 3109
3928a8a2
SR
3110 if (ring_buffer_empty_cpu(buffer, cpu))
3111 continue;
dd0e545f 3112
bc21b478 3113 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
dd0e545f 3114
cdd31cd2
IM
3115 /*
3116 * Pick the entry with the smallest timestamp:
3117 */
3928a8a2 3118 if (ent && (!next || ts < next_ts)) {
bc0c38d1
SR
3119 next = ent;
3120 next_cpu = cpu;
3928a8a2 3121 next_ts = ts;
bc21b478 3122 next_lost = lost_events;
12b5da34 3123 next_size = iter->ent_size;
bc0c38d1
SR
3124 }
3125 }
3126
12b5da34
SR
3127 iter->ent_size = next_size;
3128
bc0c38d1
SR
3129 if (ent_cpu)
3130 *ent_cpu = next_cpu;
3131
3928a8a2
SR
3132 if (ent_ts)
3133 *ent_ts = next_ts;
3134
bc21b478
SR
3135 if (missing_events)
3136 *missing_events = next_lost;
3137
bc0c38d1
SR
3138 return next;
3139}
3140
dd0e545f 3141/* Find the next real entry, without updating the iterator itself */
c4a8e8be
FW
3142struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
3143 int *ent_cpu, u64 *ent_ts)
bc0c38d1 3144{
bc21b478 3145 return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
dd0e545f
SR
3146}
3147
3148/* Find the next real entry, and increment the iterator to the next entry */
955b61e5 3149void *trace_find_next_entry_inc(struct trace_iterator *iter)
dd0e545f 3150{
bc21b478
SR
3151 iter->ent = __find_next_entry(iter, &iter->cpu,
3152 &iter->lost_events, &iter->ts);
dd0e545f 3153
3928a8a2 3154 if (iter->ent)
e2ac8ef5 3155 trace_iterator_increment(iter);
dd0e545f 3156
3928a8a2 3157 return iter->ent ? iter : NULL;
b3806b43 3158}
bc0c38d1 3159
e309b41d 3160static void trace_consume(struct trace_iterator *iter)
b3806b43 3161{
12883efb 3162 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
bc21b478 3163 &iter->lost_events);
bc0c38d1
SR
3164}
3165
e309b41d 3166static void *s_next(struct seq_file *m, void *v, loff_t *pos)
bc0c38d1
SR
3167{
3168 struct trace_iterator *iter = m->private;
bc0c38d1 3169 int i = (int)*pos;
4e3c3333 3170 void *ent;
bc0c38d1 3171
a63ce5b3
SR
3172 WARN_ON_ONCE(iter->leftover);
3173
bc0c38d1
SR
3174 (*pos)++;
3175
3176 /* can't go backwards */
3177 if (iter->idx > i)
3178 return NULL;
3179
3180 if (iter->idx < 0)
955b61e5 3181 ent = trace_find_next_entry_inc(iter);
bc0c38d1
SR
3182 else
3183 ent = iter;
3184
3185 while (ent && iter->idx < i)
955b61e5 3186 ent = trace_find_next_entry_inc(iter);
bc0c38d1
SR
3187
3188 iter->pos = *pos;
3189
bc0c38d1
SR
3190 return ent;
3191}
3192
955b61e5 3193void tracing_iter_reset(struct trace_iterator *iter, int cpu)
2f26ebd5 3194{
2f26ebd5
SR
3195 struct ring_buffer_event *event;
3196 struct ring_buffer_iter *buf_iter;
3197 unsigned long entries = 0;
3198 u64 ts;
3199
12883efb 3200 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
2f26ebd5 3201
6d158a81
SR
3202 buf_iter = trace_buffer_iter(iter, cpu);
3203 if (!buf_iter)
2f26ebd5
SR
3204 return;
3205
2f26ebd5
SR
3206 ring_buffer_iter_reset(buf_iter);
3207
3208 /*
3209 * We could have the case with the max latency tracers
3210 * that a reset never took place on a cpu. This is evident
3211 * by the timestamp being before the start of the buffer.
3212 */
3213 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
12883efb 3214 if (ts >= iter->trace_buffer->time_start)
2f26ebd5
SR
3215 break;
3216 entries++;
3217 ring_buffer_read(buf_iter, NULL);
3218 }
3219
12883efb 3220 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
2f26ebd5
SR
3221}
3222
d7350c3f 3223/*
d7350c3f
FW
3224 * The current tracer is copied to avoid a global locking
3225 * all around.
3226 */
bc0c38d1
SR
3227static void *s_start(struct seq_file *m, loff_t *pos)
3228{
3229 struct trace_iterator *iter = m->private;
2b6080f2 3230 struct trace_array *tr = iter->tr;
b04cc6b1 3231 int cpu_file = iter->cpu_file;
bc0c38d1
SR
3232 void *p = NULL;
3233 loff_t l = 0;
3928a8a2 3234 int cpu;
bc0c38d1 3235
2fd196ec
HT
3236 /*
3237 * copy the tracer to avoid using a global lock all around.
3238 * iter->trace is a copy of current_trace, the pointer to the
3239 * name may be used instead of a strcmp(), as iter->trace->name
3240 * will point to the same string as current_trace->name.
3241 */
bc0c38d1 3242 mutex_lock(&trace_types_lock);
2b6080f2
SR
3243 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
3244 *iter->trace = *tr->current_trace;
d7350c3f 3245 mutex_unlock(&trace_types_lock);
bc0c38d1 3246
12883efb 3247#ifdef CONFIG_TRACER_MAX_TRACE
debdd57f
HT
3248 if (iter->snapshot && iter->trace->use_max_tr)
3249 return ERR_PTR(-EBUSY);
12883efb 3250#endif
debdd57f
HT
3251
3252 if (!iter->snapshot)
d914ba37 3253 atomic_inc(&trace_record_taskinfo_disabled);
bc0c38d1 3254
bc0c38d1
SR
3255 if (*pos != iter->pos) {
3256 iter->ent = NULL;
3257 iter->cpu = 0;
3258 iter->idx = -1;
3259
ae3b5093 3260 if (cpu_file == RING_BUFFER_ALL_CPUS) {
b04cc6b1 3261 for_each_tracing_cpu(cpu)
2f26ebd5 3262 tracing_iter_reset(iter, cpu);
b04cc6b1 3263 } else
2f26ebd5 3264 tracing_iter_reset(iter, cpu_file);
bc0c38d1 3265
ac91d854 3266 iter->leftover = 0;
bc0c38d1
SR
3267 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
3268 ;
3269
3270 } else {
a63ce5b3
SR
3271 /*
3272 * If we overflowed the seq_file before, then we want
3273 * to just reuse the trace_seq buffer again.
3274 */
3275 if (iter->leftover)
3276 p = iter;
3277 else {
3278 l = *pos - 1;
3279 p = s_next(m, p, &l);
3280 }
bc0c38d1
SR
3281 }
3282
4f535968 3283 trace_event_read_lock();
7e53bd42 3284 trace_access_lock(cpu_file);
bc0c38d1
SR
3285 return p;
3286}
3287
3288static void s_stop(struct seq_file *m, void *p)
3289{
7e53bd42
LJ
3290 struct trace_iterator *iter = m->private;
3291
12883efb 3292#ifdef CONFIG_TRACER_MAX_TRACE
debdd57f
HT
3293 if (iter->snapshot && iter->trace->use_max_tr)
3294 return;
12883efb 3295#endif
debdd57f
HT
3296
3297 if (!iter->snapshot)
d914ba37 3298 atomic_dec(&trace_record_taskinfo_disabled);
12883efb 3299
7e53bd42 3300 trace_access_unlock(iter->cpu_file);
4f535968 3301 trace_event_read_unlock();
bc0c38d1
SR
3302}
3303
39eaf7ef 3304static void
12883efb
SRRH
3305get_total_entries(struct trace_buffer *buf,
3306 unsigned long *total, unsigned long *entries)
39eaf7ef
SR
3307{
3308 unsigned long count;
3309 int cpu;
3310
3311 *total = 0;
3312 *entries = 0;
3313
3314 for_each_tracing_cpu(cpu) {
12883efb 3315 count = ring_buffer_entries_cpu(buf->buffer, cpu);
39eaf7ef
SR
3316 /*
3317 * If this buffer has skipped entries, then we hold all
3318 * entries for the trace and we need to ignore the
3319 * ones before the time stamp.
3320 */
12883efb
SRRH
3321 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
3322 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
39eaf7ef
SR
3323 /* total is the same as the entries */
3324 *total += count;
3325 } else
3326 *total += count +
12883efb 3327 ring_buffer_overrun_cpu(buf->buffer, cpu);
39eaf7ef
SR
3328 *entries += count;
3329 }
3330}
3331
e309b41d 3332static void print_lat_help_header(struct seq_file *m)
bc0c38d1 3333{
d79ac28f
RV
3334 seq_puts(m, "# _------=> CPU# \n"
3335 "# / _-----=> irqs-off \n"
3336 "# | / _----=> need-resched \n"
3337 "# || / _---=> hardirq/softirq \n"
3338 "# ||| / _--=> preempt-depth \n"
3339 "# |||| / delay \n"
3340 "# cmd pid ||||| time | caller \n"
3341 "# \\ / ||||| \\ | / \n");
bc0c38d1
SR
3342}
3343
12883efb 3344static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
bc0c38d1 3345{
39eaf7ef
SR
3346 unsigned long total;
3347 unsigned long entries;
3348
12883efb 3349 get_total_entries(buf, &total, &entries);
39eaf7ef
SR
3350 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
3351 entries, total, num_online_cpus());
3352 seq_puts(m, "#\n");
3353}
3354
441dae8f
JF
3355static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m,
3356 unsigned int flags)
39eaf7ef 3357{
441dae8f
JF
3358 bool tgid = flags & TRACE_ITER_RECORD_TGID;
3359
12883efb 3360 print_event_info(buf, m);
441dae8f
JF
3361
3362 seq_printf(m, "# TASK-PID CPU# %s TIMESTAMP FUNCTION\n", tgid ? "TGID " : "");
3363 seq_printf(m, "# | | | %s | |\n", tgid ? " | " : "");
bc0c38d1
SR
3364}
3365
441dae8f
JF
3366static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m,
3367 unsigned int flags)
77271ce4 3368{
441dae8f 3369 bool tgid = flags & TRACE_ITER_RECORD_TGID;
b11fb737
SRV
3370 const char tgid_space[] = " ";
3371 const char space[] = " ";
3372
3373 seq_printf(m, "# %s _-----=> irqs-off\n",
3374 tgid ? tgid_space : space);
3375 seq_printf(m, "# %s / _----=> need-resched\n",
3376 tgid ? tgid_space : space);
3377 seq_printf(m, "# %s| / _---=> hardirq/softirq\n",
3378 tgid ? tgid_space : space);
3379 seq_printf(m, "# %s|| / _--=> preempt-depth\n",
3380 tgid ? tgid_space : space);
3381 seq_printf(m, "# %s||| / delay\n",
3382 tgid ? tgid_space : space);
3383 seq_printf(m, "# TASK-PID CPU#%s|||| TIMESTAMP FUNCTION\n",
3384 tgid ? " TGID " : space);
3385 seq_printf(m, "# | | | %s|||| | |\n",
3386 tgid ? " | " : space);
77271ce4 3387}
bc0c38d1 3388
62b915f1 3389void
bc0c38d1
SR
3390print_trace_header(struct seq_file *m, struct trace_iterator *iter)
3391{
983f938a 3392 unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
12883efb
SRRH
3393 struct trace_buffer *buf = iter->trace_buffer;
3394 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
2b6080f2 3395 struct tracer *type = iter->trace;
39eaf7ef
SR
3396 unsigned long entries;
3397 unsigned long total;
bc0c38d1
SR
3398 const char *name = "preemption";
3399
d840f718 3400 name = type->name;
bc0c38d1 3401
12883efb 3402 get_total_entries(buf, &total, &entries);
bc0c38d1 3403
888b55dc 3404 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
bc0c38d1 3405 name, UTS_RELEASE);
888b55dc 3406 seq_puts(m, "# -----------------------------------"
bc0c38d1 3407 "---------------------------------\n");
888b55dc 3408 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
bc0c38d1 3409 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
57f50be1 3410 nsecs_to_usecs(data->saved_latency),
bc0c38d1 3411 entries,
4c11d7ae 3412 total,
12883efb 3413 buf->cpu,
bc0c38d1
SR
3414#if defined(CONFIG_PREEMPT_NONE)
3415 "server",
3416#elif defined(CONFIG_PREEMPT_VOLUNTARY)
3417 "desktop",
b5c21b45 3418#elif defined(CONFIG_PREEMPT)
bc0c38d1
SR
3419 "preempt",
3420#else
3421 "unknown",
3422#endif
3423 /* These are reserved for later use */
3424 0, 0, 0, 0);
3425#ifdef CONFIG_SMP
3426 seq_printf(m, " #P:%d)\n", num_online_cpus());
3427#else
3428 seq_puts(m, ")\n");
3429#endif
888b55dc
KM
3430 seq_puts(m, "# -----------------\n");
3431 seq_printf(m, "# | task: %.16s-%d "
bc0c38d1 3432 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
d20b92ab
EB
3433 data->comm, data->pid,
3434 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
bc0c38d1 3435 data->policy, data->rt_priority);
888b55dc 3436 seq_puts(m, "# -----------------\n");
bc0c38d1
SR
3437
3438 if (data->critical_start) {
888b55dc 3439 seq_puts(m, "# => started at: ");
214023c3
SR
3440 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
3441 trace_print_seq(m, &iter->seq);
888b55dc 3442 seq_puts(m, "\n# => ended at: ");
214023c3
SR
3443 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
3444 trace_print_seq(m, &iter->seq);
8248ac05 3445 seq_puts(m, "\n#\n");
bc0c38d1
SR
3446 }
3447
888b55dc 3448 seq_puts(m, "#\n");
bc0c38d1
SR
3449}
3450
a309720c
SR
3451static void test_cpu_buff_start(struct trace_iterator *iter)
3452{
3453 struct trace_seq *s = &iter->seq;
983f938a 3454 struct trace_array *tr = iter->tr;
a309720c 3455
983f938a 3456 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
12ef7d44
SR
3457 return;
3458
3459 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
3460 return;
3461
4dbbe2d8
MK
3462 if (cpumask_available(iter->started) &&
3463 cpumask_test_cpu(iter->cpu, iter->started))
a309720c
SR
3464 return;
3465
12883efb 3466 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
2f26ebd5
SR
3467 return;
3468
4dbbe2d8 3469 if (cpumask_available(iter->started))
919cd979 3470 cpumask_set_cpu(iter->cpu, iter->started);
b0dfa978
FW
3471
3472 /* Don't print started cpu buffer for the first entry of the trace */
3473 if (iter->idx > 1)
3474 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
3475 iter->cpu);
a309720c
SR
3476}
3477
2c4f035f 3478static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
bc0c38d1 3479{
983f938a 3480 struct trace_array *tr = iter->tr;
214023c3 3481 struct trace_seq *s = &iter->seq;
983f938a 3482 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
4e3c3333 3483 struct trace_entry *entry;
f633cef0 3484 struct trace_event *event;
bc0c38d1 3485
4e3c3333 3486 entry = iter->ent;
dd0e545f 3487
a309720c
SR
3488 test_cpu_buff_start(iter);
3489
c4a8e8be 3490 event = ftrace_find_event(entry->type);
bc0c38d1 3491
983f938a 3492 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
19a7fe20
SRRH
3493 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
3494 trace_print_lat_context(iter);
3495 else
3496 trace_print_context(iter);
c4a8e8be 3497 }
bc0c38d1 3498
19a7fe20
SRRH
3499 if (trace_seq_has_overflowed(s))
3500 return TRACE_TYPE_PARTIAL_LINE;
3501
268ccda0 3502 if (event)
a9a57763 3503 return event->funcs->trace(iter, sym_flags, event);
d9793bd8 3504
19a7fe20 3505 trace_seq_printf(s, "Unknown type %d\n", entry->type);
02b67518 3506
19a7fe20 3507 return trace_handle_return(s);
bc0c38d1
SR
3508}
3509
2c4f035f 3510static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
f9896bf3 3511{
983f938a 3512 struct trace_array *tr = iter->tr;
f9896bf3
IM
3513 struct trace_seq *s = &iter->seq;
3514 struct trace_entry *entry;
f633cef0 3515 struct trace_event *event;
f9896bf3
IM
3516
3517 entry = iter->ent;
dd0e545f 3518
983f938a 3519 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
19a7fe20
SRRH
3520 trace_seq_printf(s, "%d %d %llu ",
3521 entry->pid, iter->cpu, iter->ts);
3522
3523 if (trace_seq_has_overflowed(s))
3524 return TRACE_TYPE_PARTIAL_LINE;
f9896bf3 3525
f633cef0 3526 event = ftrace_find_event(entry->type);
268ccda0 3527 if (event)
a9a57763 3528 return event->funcs->raw(iter, 0, event);
d9793bd8 3529
19a7fe20 3530 trace_seq_printf(s, "%d ?\n", entry->type);
777e208d 3531
19a7fe20 3532 return trace_handle_return(s);
f9896bf3
IM
3533}
3534
2c4f035f 3535static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
5e3ca0ec 3536{
983f938a 3537 struct trace_array *tr = iter->tr;
5e3ca0ec
IM
3538 struct trace_seq *s = &iter->seq;
3539 unsigned char newline = '\n';
3540 struct trace_entry *entry;
f633cef0 3541 struct trace_event *event;
5e3ca0ec
IM
3542
3543 entry = iter->ent;
dd0e545f 3544
983f938a 3545 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
19a7fe20
SRRH
3546 SEQ_PUT_HEX_FIELD(s, entry->pid);
3547 SEQ_PUT_HEX_FIELD(s, iter->cpu);
3548 SEQ_PUT_HEX_FIELD(s, iter->ts);
3549 if (trace_seq_has_overflowed(s))
3550 return TRACE_TYPE_PARTIAL_LINE;
c4a8e8be 3551 }
5e3ca0ec 3552
f633cef0 3553 event = ftrace_find_event(entry->type);
268ccda0 3554 if (event) {
a9a57763 3555 enum print_line_t ret = event->funcs->hex(iter, 0, event);
d9793bd8
ACM
3556 if (ret != TRACE_TYPE_HANDLED)
3557 return ret;
3558 }
7104f300 3559
19a7fe20 3560 SEQ_PUT_FIELD(s, newline);
5e3ca0ec 3561
19a7fe20 3562 return trace_handle_return(s);
5e3ca0ec
IM
3563}
3564
2c4f035f 3565static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
cb0f12aa 3566{
983f938a 3567 struct trace_array *tr = iter->tr;
cb0f12aa
IM
3568 struct trace_seq *s = &iter->seq;
3569 struct trace_entry *entry;
f633cef0 3570 struct trace_event *event;
cb0f12aa
IM
3571
3572 entry = iter->ent;
dd0e545f 3573
983f938a 3574 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
19a7fe20
SRRH
3575 SEQ_PUT_FIELD(s, entry->pid);
3576 SEQ_PUT_FIELD(s, iter->cpu);
3577 SEQ_PUT_FIELD(s, iter->ts);
3578 if (trace_seq_has_overflowed(s))
3579 return TRACE_TYPE_PARTIAL_LINE;
c4a8e8be 3580 }
cb0f12aa 3581
f633cef0 3582 event = ftrace_find_event(entry->type);
a9a57763
SR
3583 return event ? event->funcs->binary(iter, 0, event) :
3584 TRACE_TYPE_HANDLED;
cb0f12aa
IM
3585}
3586
62b915f1 3587int trace_empty(struct trace_iterator *iter)
bc0c38d1 3588{
6d158a81 3589 struct ring_buffer_iter *buf_iter;
bc0c38d1
SR
3590 int cpu;
3591
9aba60fe 3592 /* If we are looking at one CPU buffer, only check that one */
ae3b5093 3593 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
9aba60fe 3594 cpu = iter->cpu_file;
6d158a81
SR
3595 buf_iter = trace_buffer_iter(iter, cpu);
3596 if (buf_iter) {
3597 if (!ring_buffer_iter_empty(buf_iter))
9aba60fe
SR
3598 return 0;
3599 } else {
12883efb 3600 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
9aba60fe
SR
3601 return 0;
3602 }
3603 return 1;
3604 }
3605
ab46428c 3606 for_each_tracing_cpu(cpu) {
6d158a81
SR
3607 buf_iter = trace_buffer_iter(iter, cpu);
3608 if (buf_iter) {
3609 if (!ring_buffer_iter_empty(buf_iter))
d769041f
SR
3610 return 0;
3611 } else {
12883efb 3612 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
d769041f
SR
3613 return 0;
3614 }
bc0c38d1 3615 }
d769041f 3616
797d3712 3617 return 1;
bc0c38d1
SR
3618}
3619
4f535968 3620/* Called with trace_event_read_lock() held. */
955b61e5 3621enum print_line_t print_trace_line(struct trace_iterator *iter)
f9896bf3 3622{
983f938a
SRRH
3623 struct trace_array *tr = iter->tr;
3624 unsigned long trace_flags = tr->trace_flags;
2c4f035f
FW
3625 enum print_line_t ret;
3626
19a7fe20
SRRH
3627 if (iter->lost_events) {
3628 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
3629 iter->cpu, iter->lost_events);
3630 if (trace_seq_has_overflowed(&iter->seq))
3631 return TRACE_TYPE_PARTIAL_LINE;
3632 }
bc21b478 3633
2c4f035f
FW
3634 if (iter->trace && iter->trace->print_line) {
3635 ret = iter->trace->print_line(iter);
3636 if (ret != TRACE_TYPE_UNHANDLED)
3637 return ret;
3638 }
72829bc3 3639
09ae7234
SRRH
3640 if (iter->ent->type == TRACE_BPUTS &&
3641 trace_flags & TRACE_ITER_PRINTK &&
3642 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
3643 return trace_print_bputs_msg_only(iter);
3644
48ead020
FW
3645 if (iter->ent->type == TRACE_BPRINT &&
3646 trace_flags & TRACE_ITER_PRINTK &&
3647 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
5ef841f6 3648 return trace_print_bprintk_msg_only(iter);
48ead020 3649
66896a85
FW
3650 if (iter->ent->type == TRACE_PRINT &&
3651 trace_flags & TRACE_ITER_PRINTK &&
3652 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
5ef841f6 3653 return trace_print_printk_msg_only(iter);
66896a85 3654
cb0f12aa
IM
3655 if (trace_flags & TRACE_ITER_BIN)
3656 return print_bin_fmt(iter);
3657
5e3ca0ec
IM
3658 if (trace_flags & TRACE_ITER_HEX)
3659 return print_hex_fmt(iter);
3660
f9896bf3
IM
3661 if (trace_flags & TRACE_ITER_RAW)
3662 return print_raw_fmt(iter);
3663
f9896bf3
IM
3664 return print_trace_fmt(iter);
3665}
3666
7e9a49ef
JO
3667void trace_latency_header(struct seq_file *m)
3668{
3669 struct trace_iterator *iter = m->private;
983f938a 3670 struct trace_array *tr = iter->tr;
7e9a49ef
JO
3671
3672 /* print nothing if the buffers are empty */
3673 if (trace_empty(iter))
3674 return;
3675
3676 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
3677 print_trace_header(m, iter);
3678
983f938a 3679 if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
7e9a49ef
JO
3680 print_lat_help_header(m);
3681}
3682
62b915f1
JO
3683void trace_default_header(struct seq_file *m)
3684{
3685 struct trace_iterator *iter = m->private;
983f938a
SRRH
3686 struct trace_array *tr = iter->tr;
3687 unsigned long trace_flags = tr->trace_flags;
62b915f1 3688
f56e7f8e
JO
3689 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
3690 return;
3691
62b915f1
JO
3692 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
3693 /* print nothing if the buffers are empty */
3694 if (trace_empty(iter))
3695 return;
3696 print_trace_header(m, iter);
3697 if (!(trace_flags & TRACE_ITER_VERBOSE))
3698 print_lat_help_header(m);
3699 } else {
77271ce4
SR
3700 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
3701 if (trace_flags & TRACE_ITER_IRQ_INFO)
441dae8f
JF
3702 print_func_help_header_irq(iter->trace_buffer,
3703 m, trace_flags);
77271ce4 3704 else
441dae8f
JF
3705 print_func_help_header(iter->trace_buffer, m,
3706 trace_flags);
77271ce4 3707 }
62b915f1
JO
3708 }
3709}
3710
e0a413f6
SR
3711static void test_ftrace_alive(struct seq_file *m)
3712{
3713 if (!ftrace_is_dead())
3714 return;
d79ac28f
RV
3715 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
3716 "# MAY BE MISSING FUNCTION EVENTS\n");
e0a413f6
SR
3717}
3718
d8741e2e 3719#ifdef CONFIG_TRACER_MAX_TRACE
f1affcaa 3720static void show_snapshot_main_help(struct seq_file *m)
d8741e2e 3721{
d79ac28f
RV
3722 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
3723 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
3724 "# Takes a snapshot of the main buffer.\n"
3725 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
3726 "# (Doesn't have to be '2' works with any number that\n"
3727 "# is not a '0' or '1')\n");
d8741e2e 3728}
f1affcaa
SRRH
3729
3730static void show_snapshot_percpu_help(struct seq_file *m)
3731{
fa6f0cc7 3732 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
f1affcaa 3733#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
d79ac28f
RV
3734 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
3735 "# Takes a snapshot of the main buffer for this cpu.\n");
f1affcaa 3736#else
d79ac28f
RV
3737 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
3738 "# Must use main snapshot file to allocate.\n");
f1affcaa 3739#endif
d79ac28f
RV
3740 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
3741 "# (Doesn't have to be '2' works with any number that\n"
3742 "# is not a '0' or '1')\n");
f1affcaa
SRRH
3743}
3744
d8741e2e
SRRH
3745static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
3746{
45ad21ca 3747 if (iter->tr->allocated_snapshot)
fa6f0cc7 3748 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
d8741e2e 3749 else
fa6f0cc7 3750 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
d8741e2e 3751
fa6f0cc7 3752 seq_puts(m, "# Snapshot commands:\n");
f1affcaa
SRRH
3753 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
3754 show_snapshot_main_help(m);
3755 else
3756 show_snapshot_percpu_help(m);
d8741e2e
SRRH
3757}
3758#else
3759/* Should never be called */
3760static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
3761#endif
3762
bc0c38d1
SR
3763static int s_show(struct seq_file *m, void *v)
3764{
3765 struct trace_iterator *iter = v;
a63ce5b3 3766 int ret;
bc0c38d1
SR
3767
3768 if (iter->ent == NULL) {
3769 if (iter->tr) {
3770 seq_printf(m, "# tracer: %s\n", iter->trace->name);
3771 seq_puts(m, "#\n");
e0a413f6 3772 test_ftrace_alive(m);
bc0c38d1 3773 }
d8741e2e
SRRH
3774 if (iter->snapshot && trace_empty(iter))
3775 print_snapshot_help(m, iter);
3776 else if (iter->trace && iter->trace->print_header)
8bba1bf5 3777 iter->trace->print_header(m);
62b915f1
JO
3778 else
3779 trace_default_header(m);
3780
a63ce5b3
SR
3781 } else if (iter->leftover) {
3782 /*
3783 * If we filled the seq_file buffer earlier, we
3784 * want to just show it now.
3785 */
3786 ret = trace_print_seq(m, &iter->seq);
3787
3788 /* ret should this time be zero, but you never know */
3789 iter->leftover = ret;
3790
bc0c38d1 3791 } else {
f9896bf3 3792 print_trace_line(iter);
a63ce5b3
SR
3793 ret = trace_print_seq(m, &iter->seq);
3794 /*
3795 * If we overflow the seq_file buffer, then it will
3796 * ask us for this data again at start up.
3797 * Use that instead.
3798 * ret is 0 if seq_file write succeeded.
3799 * -1 otherwise.
3800 */
3801 iter->leftover = ret;
bc0c38d1
SR
3802 }
3803
3804 return 0;
3805}
3806
649e9c70
ON
3807/*
3808 * Should be used after trace_array_get(), trace_types_lock
3809 * ensures that i_cdev was already initialized.
3810 */
3811static inline int tracing_get_cpu(struct inode *inode)
3812{
3813 if (inode->i_cdev) /* See trace_create_cpu_file() */
3814 return (long)inode->i_cdev - 1;
3815 return RING_BUFFER_ALL_CPUS;
3816}
3817
88e9d34c 3818static const struct seq_operations tracer_seq_ops = {
4bf39a94
IM
3819 .start = s_start,
3820 .next = s_next,
3821 .stop = s_stop,
3822 .show = s_show,
bc0c38d1
SR
3823};
3824
e309b41d 3825static struct trace_iterator *
6484c71c 3826__tracing_open(struct inode *inode, struct file *file, bool snapshot)
bc0c38d1 3827{
6484c71c 3828 struct trace_array *tr = inode->i_private;
bc0c38d1 3829 struct trace_iterator *iter;
50e18b94 3830 int cpu;
bc0c38d1 3831
85a2f9b4
SR
3832 if (tracing_disabled)
3833 return ERR_PTR(-ENODEV);
60a11774 3834
50e18b94 3835 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
85a2f9b4
SR
3836 if (!iter)
3837 return ERR_PTR(-ENOMEM);
bc0c38d1 3838
72917235 3839 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
6d158a81 3840 GFP_KERNEL);
93574fcc
DC
3841 if (!iter->buffer_iter)
3842 goto release;
3843
d7350c3f
FW
3844 /*
3845 * We make a copy of the current tracer to avoid concurrent
3846 * changes on it while we are reading.
3847 */
bc0c38d1 3848 mutex_lock(&trace_types_lock);
d7350c3f 3849 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
85a2f9b4 3850 if (!iter->trace)
d7350c3f 3851 goto fail;
85a2f9b4 3852
2b6080f2 3853 *iter->trace = *tr->current_trace;
d7350c3f 3854
79f55997 3855 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
b0dfa978
FW
3856 goto fail;
3857
12883efb
SRRH
3858 iter->tr = tr;
3859
3860#ifdef CONFIG_TRACER_MAX_TRACE
2b6080f2
SR
3861 /* Currently only the top directory has a snapshot */
3862 if (tr->current_trace->print_max || snapshot)
12883efb 3863 iter->trace_buffer = &tr->max_buffer;
bc0c38d1 3864 else
12883efb
SRRH
3865#endif
3866 iter->trace_buffer = &tr->trace_buffer;
debdd57f 3867 iter->snapshot = snapshot;
bc0c38d1 3868 iter->pos = -1;
6484c71c 3869 iter->cpu_file = tracing_get_cpu(inode);
d7350c3f 3870 mutex_init(&iter->mutex);
bc0c38d1 3871
8bba1bf5
MM
3872 /* Notify the tracer early; before we stop tracing. */
3873 if (iter->trace && iter->trace->open)
a93751ca 3874 iter->trace->open(iter);
8bba1bf5 3875
12ef7d44 3876 /* Annotate start of buffers if we had overruns */
12883efb 3877 if (ring_buffer_overruns(iter->trace_buffer->buffer))
12ef7d44
SR
3878 iter->iter_flags |= TRACE_FILE_ANNOTATE;
3879
8be0709f 3880 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
58e8eedf 3881 if (trace_clocks[tr->clock_id].in_ns)
8be0709f
DS
3882 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
3883
debdd57f
HT
3884 /* stop the trace while dumping if we are not opening "snapshot" */
3885 if (!iter->snapshot)
2b6080f2 3886 tracing_stop_tr(tr);
2f26ebd5 3887
ae3b5093 3888 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
b04cc6b1 3889 for_each_tracing_cpu(cpu) {
b04cc6b1 3890 iter->buffer_iter[cpu] =
12883efb 3891 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
72c9ddfd
DM
3892 }
3893 ring_buffer_read_prepare_sync();
3894 for_each_tracing_cpu(cpu) {
3895 ring_buffer_read_start(iter->buffer_iter[cpu]);
2f26ebd5 3896 tracing_iter_reset(iter, cpu);
b04cc6b1
FW
3897 }
3898 } else {
3899 cpu = iter->cpu_file;
3928a8a2 3900 iter->buffer_iter[cpu] =
12883efb 3901 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
72c9ddfd
DM
3902 ring_buffer_read_prepare_sync();
3903 ring_buffer_read_start(iter->buffer_iter[cpu]);
2f26ebd5 3904 tracing_iter_reset(iter, cpu);
3928a8a2
SR
3905 }
3906
bc0c38d1
SR
3907 mutex_unlock(&trace_types_lock);
3908
bc0c38d1 3909 return iter;
3928a8a2 3910
d7350c3f 3911 fail:
3928a8a2 3912 mutex_unlock(&trace_types_lock);
d7350c3f 3913 kfree(iter->trace);
6d158a81 3914 kfree(iter->buffer_iter);
93574fcc 3915release:
50e18b94
JO
3916 seq_release_private(inode, file);
3917 return ERR_PTR(-ENOMEM);
bc0c38d1
SR
3918}
3919
3920int tracing_open_generic(struct inode *inode, struct file *filp)
3921{
60a11774
SR
3922 if (tracing_disabled)
3923 return -ENODEV;
3924
bc0c38d1
SR
3925 filp->private_data = inode->i_private;
3926 return 0;
3927}
3928
2e86421d
GB
3929bool tracing_is_disabled(void)
3930{
3931 return (tracing_disabled) ? true: false;
3932}
3933
7b85af63
SRRH
3934/*
3935 * Open and update trace_array ref count.
3936 * Must have the current trace_array passed to it.
3937 */
dcc30223 3938static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
7b85af63
SRRH
3939{
3940 struct trace_array *tr = inode->i_private;
3941
3942 if (tracing_disabled)
3943 return -ENODEV;
3944
3945 if (trace_array_get(tr) < 0)
3946 return -ENODEV;
3947
3948 filp->private_data = inode->i_private;
3949
3950 return 0;
7b85af63
SRRH
3951}
3952
4fd27358 3953static int tracing_release(struct inode *inode, struct file *file)
bc0c38d1 3954{
6484c71c 3955 struct trace_array *tr = inode->i_private;
907f2784 3956 struct seq_file *m = file->private_data;
4acd4d00 3957 struct trace_iterator *iter;
3928a8a2 3958 int cpu;
bc0c38d1 3959
ff451961 3960 if (!(file->f_mode & FMODE_READ)) {
6484c71c 3961 trace_array_put(tr);
4acd4d00 3962 return 0;
ff451961 3963 }
4acd4d00 3964
6484c71c 3965 /* Writes do not use seq_file */
4acd4d00 3966 iter = m->private;
bc0c38d1 3967 mutex_lock(&trace_types_lock);
a695cb58 3968
3928a8a2
SR
3969 for_each_tracing_cpu(cpu) {
3970 if (iter->buffer_iter[cpu])
3971 ring_buffer_read_finish(iter->buffer_iter[cpu]);
3972 }
3973
bc0c38d1
SR
3974 if (iter->trace && iter->trace->close)
3975 iter->trace->close(iter);
3976
debdd57f
HT
3977 if (!iter->snapshot)
3978 /* reenable tracing if it was previously enabled */
2b6080f2 3979 tracing_start_tr(tr);
f77d09a3
AL
3980
3981 __trace_array_put(tr);
3982
bc0c38d1
SR
3983 mutex_unlock(&trace_types_lock);
3984
d7350c3f 3985 mutex_destroy(&iter->mutex);
b0dfa978 3986 free_cpumask_var(iter->started);
d7350c3f 3987 kfree(iter->trace);
6d158a81 3988 kfree(iter->buffer_iter);
50e18b94 3989 seq_release_private(inode, file);
ff451961 3990
bc0c38d1
SR
3991 return 0;
3992}
3993
7b85af63
SRRH
3994static int tracing_release_generic_tr(struct inode *inode, struct file *file)
3995{
3996 struct trace_array *tr = inode->i_private;
3997
3998 trace_array_put(tr);
bc0c38d1
SR
3999 return 0;
4000}
4001
7b85af63
SRRH
4002static int tracing_single_release_tr(struct inode *inode, struct file *file)
4003{
4004 struct trace_array *tr = inode->i_private;
4005
4006 trace_array_put(tr);
4007
4008 return single_release(inode, file);
4009}
4010
bc0c38d1
SR
4011static int tracing_open(struct inode *inode, struct file *file)
4012{
6484c71c 4013 struct trace_array *tr = inode->i_private;
85a2f9b4
SR
4014 struct trace_iterator *iter;
4015 int ret = 0;
bc0c38d1 4016
ff451961
SRRH
4017 if (trace_array_get(tr) < 0)
4018 return -ENODEV;
4019
4acd4d00 4020 /* If this file was open for write, then erase contents */
6484c71c
ON
4021 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
4022 int cpu = tracing_get_cpu(inode);
8dd33bcb
BY
4023 struct trace_buffer *trace_buf = &tr->trace_buffer;
4024
4025#ifdef CONFIG_TRACER_MAX_TRACE
4026 if (tr->current_trace->print_max)
4027 trace_buf = &tr->max_buffer;
4028#endif
6484c71c
ON
4029
4030 if (cpu == RING_BUFFER_ALL_CPUS)
8dd33bcb 4031 tracing_reset_online_cpus(trace_buf);
4acd4d00 4032 else
8dd33bcb 4033 tracing_reset(trace_buf, cpu);
4acd4d00 4034 }
bc0c38d1 4035
4acd4d00 4036 if (file->f_mode & FMODE_READ) {
6484c71c 4037 iter = __tracing_open(inode, file, false);
4acd4d00
SR
4038 if (IS_ERR(iter))
4039 ret = PTR_ERR(iter);
983f938a 4040 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
4acd4d00
SR
4041 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4042 }
ff451961
SRRH
4043
4044 if (ret < 0)
4045 trace_array_put(tr);
4046
bc0c38d1
SR
4047 return ret;
4048}
4049
607e2ea1
SRRH
4050/*
4051 * Some tracers are not suitable for instance buffers.
4052 * A tracer is always available for the global array (toplevel)
4053 * or if it explicitly states that it is.
4054 */
4055static bool
4056trace_ok_for_array(struct tracer *t, struct trace_array *tr)
4057{
4058 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
4059}
4060
4061/* Find the next tracer that this trace array may use */
4062static struct tracer *
4063get_tracer_for_array(struct trace_array *tr, struct tracer *t)
4064{
4065 while (t && !trace_ok_for_array(t, tr))
4066 t = t->next;
4067
4068 return t;
4069}
4070
e309b41d 4071static void *
bc0c38d1
SR
4072t_next(struct seq_file *m, void *v, loff_t *pos)
4073{
607e2ea1 4074 struct trace_array *tr = m->private;
f129e965 4075 struct tracer *t = v;
bc0c38d1
SR
4076
4077 (*pos)++;
4078
4079 if (t)
607e2ea1 4080 t = get_tracer_for_array(tr, t->next);
bc0c38d1 4081
bc0c38d1
SR
4082 return t;
4083}
4084
4085static void *t_start(struct seq_file *m, loff_t *pos)
4086{
607e2ea1 4087 struct trace_array *tr = m->private;
f129e965 4088 struct tracer *t;
bc0c38d1
SR
4089 loff_t l = 0;
4090
4091 mutex_lock(&trace_types_lock);
607e2ea1
SRRH
4092
4093 t = get_tracer_for_array(tr, trace_types);
4094 for (; t && l < *pos; t = t_next(m, t, &l))
4095 ;
bc0c38d1
SR
4096
4097 return t;
4098}
4099
4100static void t_stop(struct seq_file *m, void *p)
4101{
4102 mutex_unlock(&trace_types_lock);
4103}
4104
4105static int t_show(struct seq_file *m, void *v)
4106{
4107 struct tracer *t = v;
4108
4109 if (!t)
4110 return 0;
4111
fa6f0cc7 4112 seq_puts(m, t->name);
bc0c38d1
SR
4113 if (t->next)
4114 seq_putc(m, ' ');
4115 else
4116 seq_putc(m, '\n');
4117
4118 return 0;
4119}
4120
88e9d34c 4121static const struct seq_operations show_traces_seq_ops = {
4bf39a94
IM
4122 .start = t_start,
4123 .next = t_next,
4124 .stop = t_stop,
4125 .show = t_show,
bc0c38d1
SR
4126};
4127
4128static int show_traces_open(struct inode *inode, struct file *file)
4129{
607e2ea1
SRRH
4130 struct trace_array *tr = inode->i_private;
4131 struct seq_file *m;
4132 int ret;
4133
60a11774
SR
4134 if (tracing_disabled)
4135 return -ENODEV;
4136
607e2ea1
SRRH
4137 ret = seq_open(file, &show_traces_seq_ops);
4138 if (ret)
4139 return ret;
4140
4141 m = file->private_data;
4142 m->private = tr;
4143
4144 return 0;
bc0c38d1
SR
4145}
4146
4acd4d00
SR
4147static ssize_t
4148tracing_write_stub(struct file *filp, const char __user *ubuf,
4149 size_t count, loff_t *ppos)
4150{
4151 return count;
4152}
4153
098c879e 4154loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
364829b1 4155{
098c879e
SRRH
4156 int ret;
4157
364829b1 4158 if (file->f_mode & FMODE_READ)
098c879e 4159 ret = seq_lseek(file, offset, whence);
364829b1 4160 else
098c879e
SRRH
4161 file->f_pos = ret = 0;
4162
4163 return ret;
364829b1
SP
4164}
4165
5e2336a0 4166static const struct file_operations tracing_fops = {
4bf39a94
IM
4167 .open = tracing_open,
4168 .read = seq_read,
4acd4d00 4169 .write = tracing_write_stub,
098c879e 4170 .llseek = tracing_lseek,
4bf39a94 4171 .release = tracing_release,
bc0c38d1
SR
4172};
4173
5e2336a0 4174static const struct file_operations show_traces_fops = {
c7078de1
IM
4175 .open = show_traces_open,
4176 .read = seq_read,
4177 .release = seq_release,
b444786f 4178 .llseek = seq_lseek,
c7078de1
IM
4179};
4180
36dfe925
IM
4181/*
4182 * The tracer itself will not take this lock, but still we want
4183 * to provide a consistent cpumask to user-space:
4184 */
4185static DEFINE_MUTEX(tracing_cpumask_update_lock);
4186
4187/*
4188 * Temporary storage for the character representation of the
4189 * CPU bitmask (and one more byte for the newline):
4190 */
4191static char mask_str[NR_CPUS + 1];
4192
c7078de1
IM
4193static ssize_t
4194tracing_cpumask_read(struct file *filp, char __user *ubuf,
4195 size_t count, loff_t *ppos)
4196{
ccfe9e42 4197 struct trace_array *tr = file_inode(filp)->i_private;
36dfe925 4198 int len;
c7078de1
IM
4199
4200 mutex_lock(&tracing_cpumask_update_lock);
36dfe925 4201
1a40243b
TH
4202 len = snprintf(mask_str, count, "%*pb\n",
4203 cpumask_pr_args(tr->tracing_cpumask));
4204 if (len >= count) {
36dfe925
IM
4205 count = -EINVAL;
4206 goto out_err;
4207 }
36dfe925
IM
4208 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
4209
4210out_err:
c7078de1
IM
4211 mutex_unlock(&tracing_cpumask_update_lock);
4212
4213 return count;
4214}
4215
4216static ssize_t
4217tracing_cpumask_write(struct file *filp, const char __user *ubuf,
4218 size_t count, loff_t *ppos)
4219{
ccfe9e42 4220 struct trace_array *tr = file_inode(filp)->i_private;
9e01c1b7 4221 cpumask_var_t tracing_cpumask_new;
2b6080f2 4222 int err, cpu;
9e01c1b7
RR
4223
4224 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
4225 return -ENOMEM;
c7078de1 4226
9e01c1b7 4227 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
c7078de1 4228 if (err)
36dfe925
IM
4229 goto err_unlock;
4230
215368e8
LZ
4231 mutex_lock(&tracing_cpumask_update_lock);
4232
a5e25883 4233 local_irq_disable();
0b9b12c1 4234 arch_spin_lock(&tr->max_lock);
ab46428c 4235 for_each_tracing_cpu(cpu) {
36dfe925
IM
4236 /*
4237 * Increase/decrease the disabled counter if we are
4238 * about to flip a bit in the cpumask:
4239 */
ccfe9e42 4240 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
9e01c1b7 4241 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
12883efb
SRRH
4242 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
4243 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
36dfe925 4244 }
ccfe9e42 4245 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
9e01c1b7 4246 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
12883efb
SRRH
4247 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
4248 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
36dfe925
IM
4249 }
4250 }
0b9b12c1 4251 arch_spin_unlock(&tr->max_lock);
a5e25883 4252 local_irq_enable();
36dfe925 4253
ccfe9e42 4254 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
36dfe925
IM
4255
4256 mutex_unlock(&tracing_cpumask_update_lock);
9e01c1b7 4257 free_cpumask_var(tracing_cpumask_new);
c7078de1
IM
4258
4259 return count;
36dfe925
IM
4260
4261err_unlock:
215368e8 4262 free_cpumask_var(tracing_cpumask_new);
36dfe925
IM
4263
4264 return err;
c7078de1
IM
4265}
4266
5e2336a0 4267static const struct file_operations tracing_cpumask_fops = {
ccfe9e42 4268 .open = tracing_open_generic_tr,
c7078de1
IM
4269 .read = tracing_cpumask_read,
4270 .write = tracing_cpumask_write,
ccfe9e42 4271 .release = tracing_release_generic_tr,
b444786f 4272 .llseek = generic_file_llseek,
bc0c38d1
SR
4273};
4274
fdb372ed 4275static int tracing_trace_options_show(struct seq_file *m, void *v)
bc0c38d1 4276{
d8e83d26 4277 struct tracer_opt *trace_opts;
2b6080f2 4278 struct trace_array *tr = m->private;
d8e83d26 4279 u32 tracer_flags;
d8e83d26 4280 int i;
adf9f195 4281
d8e83d26 4282 mutex_lock(&trace_types_lock);
2b6080f2
SR
4283 tracer_flags = tr->current_trace->flags->val;
4284 trace_opts = tr->current_trace->flags->opts;
d8e83d26 4285
bc0c38d1 4286 for (i = 0; trace_options[i]; i++) {
983f938a 4287 if (tr->trace_flags & (1 << i))
fdb372ed 4288 seq_printf(m, "%s\n", trace_options[i]);
bc0c38d1 4289 else
fdb372ed 4290 seq_printf(m, "no%s\n", trace_options[i]);
bc0c38d1
SR
4291 }
4292
adf9f195
FW
4293 for (i = 0; trace_opts[i].name; i++) {
4294 if (tracer_flags & trace_opts[i].bit)
fdb372ed 4295 seq_printf(m, "%s\n", trace_opts[i].name);
adf9f195 4296 else
fdb372ed 4297 seq_printf(m, "no%s\n", trace_opts[i].name);
adf9f195 4298 }
d8e83d26 4299 mutex_unlock(&trace_types_lock);
adf9f195 4300
fdb372ed 4301 return 0;
bc0c38d1 4302}
bc0c38d1 4303
8c1a49ae 4304static int __set_tracer_option(struct trace_array *tr,
8d18eaaf
LZ
4305 struct tracer_flags *tracer_flags,
4306 struct tracer_opt *opts, int neg)
4307{
d39cdd20 4308 struct tracer *trace = tracer_flags->trace;
8d18eaaf 4309 int ret;
bc0c38d1 4310
8c1a49ae 4311 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
8d18eaaf
LZ
4312 if (ret)
4313 return ret;
4314
4315 if (neg)
4316 tracer_flags->val &= ~opts->bit;
4317 else
4318 tracer_flags->val |= opts->bit;
4319 return 0;
bc0c38d1
SR
4320}
4321
adf9f195 4322/* Try to assign a tracer specific option */
8c1a49ae 4323static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
adf9f195 4324{
8c1a49ae 4325 struct tracer *trace = tr->current_trace;
7770841e 4326 struct tracer_flags *tracer_flags = trace->flags;
adf9f195 4327 struct tracer_opt *opts = NULL;
8d18eaaf 4328 int i;
adf9f195 4329
7770841e
Z
4330 for (i = 0; tracer_flags->opts[i].name; i++) {
4331 opts = &tracer_flags->opts[i];
adf9f195 4332
8d18eaaf 4333 if (strcmp(cmp, opts->name) == 0)
8c1a49ae 4334 return __set_tracer_option(tr, trace->flags, opts, neg);
adf9f195 4335 }
adf9f195 4336
8d18eaaf 4337 return -EINVAL;
adf9f195
FW
4338}
4339
613f04a0
SRRH
4340/* Some tracers require overwrite to stay enabled */
4341int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
4342{
4343 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
4344 return -1;
4345
4346 return 0;
4347}
4348
2b6080f2 4349int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
af4617bd
SR
4350{
4351 /* do nothing if flag is already set */
983f938a 4352 if (!!(tr->trace_flags & mask) == !!enabled)
613f04a0
SRRH
4353 return 0;
4354
4355 /* Give the tracer a chance to approve the change */
2b6080f2 4356 if (tr->current_trace->flag_changed)
bf6065b5 4357 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
613f04a0 4358 return -EINVAL;
af4617bd
SR
4359
4360 if (enabled)
983f938a 4361 tr->trace_flags |= mask;
af4617bd 4362 else
983f938a 4363 tr->trace_flags &= ~mask;
e870e9a1
LZ
4364
4365 if (mask == TRACE_ITER_RECORD_CMD)
4366 trace_event_enable_cmd_record(enabled);
750912fa 4367
d914ba37
JF
4368 if (mask == TRACE_ITER_RECORD_TGID) {
4369 if (!tgid_map)
4370 tgid_map = kzalloc((PID_MAX_DEFAULT + 1) * sizeof(*tgid_map),
4371 GFP_KERNEL);
4372 if (!tgid_map) {
4373 tr->trace_flags &= ~TRACE_ITER_RECORD_TGID;
4374 return -ENOMEM;
4375 }
4376
4377 trace_event_enable_tgid_record(enabled);
4378 }
4379
c37775d5
SR
4380 if (mask == TRACE_ITER_EVENT_FORK)
4381 trace_event_follow_fork(tr, enabled);
4382
1e10486f
NK
4383 if (mask == TRACE_ITER_FUNC_FORK)
4384 ftrace_pid_follow_fork(tr, enabled);
4385
80902822 4386 if (mask == TRACE_ITER_OVERWRITE) {
12883efb 4387 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
80902822 4388#ifdef CONFIG_TRACER_MAX_TRACE
12883efb 4389 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
80902822
SRRH
4390#endif
4391 }
81698831 4392
b9f9108c 4393 if (mask == TRACE_ITER_PRINTK) {
81698831 4394 trace_printk_start_stop_comm(enabled);
b9f9108c
SRRH
4395 trace_printk_control(enabled);
4396 }
613f04a0
SRRH
4397
4398 return 0;
af4617bd
SR
4399}
4400
2b6080f2 4401static int trace_set_options(struct trace_array *tr, char *option)
bc0c38d1 4402{
8d18eaaf 4403 char *cmp;
bc0c38d1 4404 int neg = 0;
613f04a0 4405 int ret = -ENODEV;
bc0c38d1 4406 int i;
a4d1e688 4407 size_t orig_len = strlen(option);
bc0c38d1 4408
7bcfaf54 4409 cmp = strstrip(option);
bc0c38d1 4410
8d18eaaf 4411 if (strncmp(cmp, "no", 2) == 0) {
bc0c38d1
SR
4412 neg = 1;
4413 cmp += 2;
4414 }
4415
69d34da2
SRRH
4416 mutex_lock(&trace_types_lock);
4417
bc0c38d1 4418 for (i = 0; trace_options[i]; i++) {
8d18eaaf 4419 if (strcmp(cmp, trace_options[i]) == 0) {
2b6080f2 4420 ret = set_tracer_flag(tr, 1 << i, !neg);
bc0c38d1
SR
4421 break;
4422 }
4423 }
adf9f195
FW
4424
4425 /* If no option could be set, test the specific tracer options */
69d34da2 4426 if (!trace_options[i])
8c1a49ae 4427 ret = set_tracer_option(tr, cmp, neg);
69d34da2
SRRH
4428
4429 mutex_unlock(&trace_types_lock);
bc0c38d1 4430
a4d1e688
JW
4431 /*
4432 * If the first trailing whitespace is replaced with '\0' by strstrip,
4433 * turn it back into a space.
4434 */
4435 if (orig_len > strlen(option))
4436 option[strlen(option)] = ' ';
4437
7bcfaf54
SR
4438 return ret;
4439}
4440
a4d1e688
JW
4441static void __init apply_trace_boot_options(void)
4442{
4443 char *buf = trace_boot_options_buf;
4444 char *option;
4445
4446 while (true) {
4447 option = strsep(&buf, ",");
4448
4449 if (!option)
4450 break;
a4d1e688 4451
43ed3843
SRRH
4452 if (*option)
4453 trace_set_options(&global_trace, option);
a4d1e688
JW
4454
4455 /* Put back the comma to allow this to be called again */
4456 if (buf)
4457 *(buf - 1) = ',';
4458 }
4459}
4460
7bcfaf54
SR
4461static ssize_t
4462tracing_trace_options_write(struct file *filp, const char __user *ubuf,
4463 size_t cnt, loff_t *ppos)
4464{
2b6080f2
SR
4465 struct seq_file *m = filp->private_data;
4466 struct trace_array *tr = m->private;
7bcfaf54 4467 char buf[64];
613f04a0 4468 int ret;
7bcfaf54
SR
4469
4470 if (cnt >= sizeof(buf))
4471 return -EINVAL;
4472
4afe6495 4473 if (copy_from_user(buf, ubuf, cnt))
7bcfaf54
SR
4474 return -EFAULT;
4475
a8dd2176
SR
4476 buf[cnt] = 0;
4477
2b6080f2 4478 ret = trace_set_options(tr, buf);
613f04a0
SRRH
4479 if (ret < 0)
4480 return ret;
7bcfaf54 4481
cf8517cf 4482 *ppos += cnt;
bc0c38d1
SR
4483
4484 return cnt;
4485}
4486
fdb372ed
LZ
4487static int tracing_trace_options_open(struct inode *inode, struct file *file)
4488{
7b85af63 4489 struct trace_array *tr = inode->i_private;
f77d09a3 4490 int ret;
7b85af63 4491
fdb372ed
LZ
4492 if (tracing_disabled)
4493 return -ENODEV;
2b6080f2 4494
7b85af63
SRRH
4495 if (trace_array_get(tr) < 0)
4496 return -ENODEV;
4497
f77d09a3
AL
4498 ret = single_open(file, tracing_trace_options_show, inode->i_private);
4499 if (ret < 0)
4500 trace_array_put(tr);
4501
4502 return ret;
fdb372ed
LZ
4503}
4504
5e2336a0 4505static const struct file_operations tracing_iter_fops = {
fdb372ed
LZ
4506 .open = tracing_trace_options_open,
4507 .read = seq_read,
4508 .llseek = seq_lseek,
7b85af63 4509 .release = tracing_single_release_tr,
ee6bce52 4510 .write = tracing_trace_options_write,
bc0c38d1
SR
4511};
4512
7bd2f24c
IM
4513static const char readme_msg[] =
4514 "tracing mini-HOWTO:\n\n"
22f45649
SRRH
4515 "# echo 0 > tracing_on : quick way to disable tracing\n"
4516 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
4517 " Important files:\n"
4518 " trace\t\t\t- The static contents of the buffer\n"
4519 "\t\t\t To clear the buffer write into this file: echo > trace\n"
4520 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
4521 " current_tracer\t- function and latency tracers\n"
4522 " available_tracers\t- list of configured tracers for current_tracer\n"
4523 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
4524 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
4525 " trace_clock\t\t-change the clock used to order events\n"
4526 " local: Per cpu clock but may not be synced across CPUs\n"
4527 " global: Synced across CPUs but slows tracing down.\n"
4528 " counter: Not a clock, but just an increment\n"
4529 " uptime: Jiffy counter from time of boot\n"
4530 " perf: Same clock that perf events use\n"
4531#ifdef CONFIG_X86_64
4532 " x86-tsc: TSC cycle counter\n"
4533#endif
4534 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
fa32e855 4535 "\n trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n"
22f45649
SRRH
4536 " tracing_cpumask\t- Limit which CPUs to trace\n"
4537 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
4538 "\t\t\t Remove sub-buffer with rmdir\n"
4539 " trace_options\t\t- Set format or modify how tracing happens\n"
71485c45
SRRH
4540 "\t\t\t Disable an option by adding a suffix 'no' to the\n"
4541 "\t\t\t option name\n"
939c7a4f 4542 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
22f45649
SRRH
4543#ifdef CONFIG_DYNAMIC_FTRACE
4544 "\n available_filter_functions - list of functions that can be filtered on\n"
71485c45
SRRH
4545 " set_ftrace_filter\t- echo function name in here to only trace these\n"
4546 "\t\t\t functions\n"
60f1d5e3 4547 "\t accepts: func_full_name or glob-matching-pattern\n"
71485c45
SRRH
4548 "\t modules: Can select a group via module\n"
4549 "\t Format: :mod:<module-name>\n"
4550 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
4551 "\t triggers: a command to perform when function is hit\n"
4552 "\t Format: <function>:<trigger>[:count]\n"
4553 "\t trigger: traceon, traceoff\n"
4554 "\t\t enable_event:<system>:<event>\n"
4555 "\t\t disable_event:<system>:<event>\n"
22f45649 4556#ifdef CONFIG_STACKTRACE
71485c45 4557 "\t\t stacktrace\n"
22f45649
SRRH
4558#endif
4559#ifdef CONFIG_TRACER_SNAPSHOT
71485c45 4560 "\t\t snapshot\n"
22f45649 4561#endif
17a280ea
SRRH
4562 "\t\t dump\n"
4563 "\t\t cpudump\n"
71485c45
SRRH
4564 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
4565 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
4566 "\t The first one will disable tracing every time do_fault is hit\n"
4567 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
4568 "\t The first time do trap is hit and it disables tracing, the\n"
4569 "\t counter will decrement to 2. If tracing is already disabled,\n"
4570 "\t the counter will not decrement. It only decrements when the\n"
4571 "\t trigger did work\n"
4572 "\t To remove trigger without count:\n"
4573 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
4574 "\t To remove trigger with a count:\n"
4575 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
22f45649 4576 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
71485c45
SRRH
4577 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
4578 "\t modules: Can select a group via module command :mod:\n"
4579 "\t Does not accept triggers\n"
22f45649
SRRH
4580#endif /* CONFIG_DYNAMIC_FTRACE */
4581#ifdef CONFIG_FUNCTION_TRACER
71485c45
SRRH
4582 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
4583 "\t\t (function)\n"
22f45649
SRRH
4584#endif
4585#ifdef CONFIG_FUNCTION_GRAPH_TRACER
4586 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
d048a8c7 4587 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
22f45649
SRRH
4588 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
4589#endif
4590#ifdef CONFIG_TRACER_SNAPSHOT
71485c45
SRRH
4591 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
4592 "\t\t\t snapshot buffer. Read the contents for more\n"
4593 "\t\t\t information\n"
22f45649 4594#endif
991821c8 4595#ifdef CONFIG_STACK_TRACER
22f45649
SRRH
4596 " stack_trace\t\t- Shows the max stack trace when active\n"
4597 " stack_max_size\t- Shows current max stack size that was traced\n"
71485c45
SRRH
4598 "\t\t\t Write into this file to reset the max size (trigger a\n"
4599 "\t\t\t new trace)\n"
22f45649 4600#ifdef CONFIG_DYNAMIC_FTRACE
71485c45
SRRH
4601 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
4602 "\t\t\t traces\n"
22f45649 4603#endif
991821c8 4604#endif /* CONFIG_STACK_TRACER */
6b0b7551 4605#ifdef CONFIG_KPROBE_EVENTS
86425625
MH
4606 " kprobe_events\t\t- Add/remove/show the kernel dynamic events\n"
4607 "\t\t\t Write into this file to define/undefine new trace events.\n"
4608#endif
6b0b7551 4609#ifdef CONFIG_UPROBE_EVENTS
86425625
MH
4610 " uprobe_events\t\t- Add/remove/show the userspace dynamic events\n"
4611 "\t\t\t Write into this file to define/undefine new trace events.\n"
4612#endif
6b0b7551 4613#if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
86425625 4614 "\t accepts: event-definitions (one definition per line)\n"
c3ca46ef
MH
4615 "\t Format: p[:[<group>/]<event>] <place> [<args>]\n"
4616 "\t r[maxactive][:[<group>/]<event>] <place> [<args>]\n"
86425625 4617 "\t -:[<group>/]<event>\n"
6b0b7551 4618#ifdef CONFIG_KPROBE_EVENTS
86425625 4619 "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
35b6f55a 4620 "place (kretprobe): [<module>:]<symbol>[+<offset>]|<memaddr>\n"
86425625 4621#endif
6b0b7551 4622#ifdef CONFIG_UPROBE_EVENTS
86425625
MH
4623 "\t place: <path>:<offset>\n"
4624#endif
4625 "\t args: <name>=fetcharg[:type]\n"
4626 "\t fetcharg: %<register>, @<address>, @<symbol>[+|-<offset>],\n"
4627 "\t $stack<index>, $stack, $retval, $comm\n"
4628 "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string,\n"
4629 "\t b<bit-width>@<bit-offset>/<container-size>\n"
4630#endif
26f25564
TZ
4631 " events/\t\t- Directory containing all trace event subsystems:\n"
4632 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
4633 " events/<system>/\t- Directory containing all trace events for <system>:\n"
71485c45
SRRH
4634 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
4635 "\t\t\t events\n"
26f25564 4636 " filter\t\t- If set, only events passing filter are traced\n"
71485c45
SRRH
4637 " events/<system>/<event>/\t- Directory containing control files for\n"
4638 "\t\t\t <event>:\n"
26f25564
TZ
4639 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
4640 " filter\t\t- If set, only events passing filter are traced\n"
4641 " trigger\t\t- If set, a command to perform when event is hit\n"
71485c45
SRRH
4642 "\t Format: <trigger>[:count][if <filter>]\n"
4643 "\t trigger: traceon, traceoff\n"
4644 "\t enable_event:<system>:<event>\n"
4645 "\t disable_event:<system>:<event>\n"
d0bad49b
TZ
4646#ifdef CONFIG_HIST_TRIGGERS
4647 "\t enable_hist:<system>:<event>\n"
4648 "\t disable_hist:<system>:<event>\n"
4649#endif
26f25564 4650#ifdef CONFIG_STACKTRACE
71485c45 4651 "\t\t stacktrace\n"
26f25564
TZ
4652#endif
4653#ifdef CONFIG_TRACER_SNAPSHOT
71485c45 4654 "\t\t snapshot\n"
7ef224d1
TZ
4655#endif
4656#ifdef CONFIG_HIST_TRIGGERS
4657 "\t\t hist (see below)\n"
26f25564 4658#endif
71485c45
SRRH
4659 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
4660 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
4661 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
4662 "\t events/block/block_unplug/trigger\n"
4663 "\t The first disables tracing every time block_unplug is hit.\n"
4664 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
4665 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
4666 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
4667 "\t Like function triggers, the counter is only decremented if it\n"
4668 "\t enabled or disabled tracing.\n"
4669 "\t To remove a trigger without a count:\n"
4670 "\t echo '!<trigger> > <system>/<event>/trigger\n"
4671 "\t To remove a trigger with a count:\n"
4672 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
4673 "\t Filters can be ignored when removing a trigger.\n"
7ef224d1
TZ
4674#ifdef CONFIG_HIST_TRIGGERS
4675 " hist trigger\t- If set, event hits are aggregated into a hash table\n"
76a3b0c8 4676 "\t Format: hist:keys=<field1[,field2,...]>\n"
f2606835 4677 "\t [:values=<field1[,field2,...]>]\n"
e62347d2 4678 "\t [:sort=<field1[,field2,...]>]\n"
7ef224d1 4679 "\t [:size=#entries]\n"
e86ae9ba 4680 "\t [:pause][:continue][:clear]\n"
5463bfda 4681 "\t [:name=histname1]\n"
7ef224d1
TZ
4682 "\t [if <filter>]\n\n"
4683 "\t When a matching event is hit, an entry is added to a hash\n"
f2606835
TZ
4684 "\t table using the key(s) and value(s) named, and the value of a\n"
4685 "\t sum called 'hitcount' is incremented. Keys and values\n"
4686 "\t correspond to fields in the event's format description. Keys\n"
69a0200c
TZ
4687 "\t can be any field, or the special string 'stacktrace'.\n"
4688 "\t Compound keys consisting of up to two fields can be specified\n"
4689 "\t by the 'keys' keyword. Values must correspond to numeric\n"
4690 "\t fields. Sort keys consisting of up to two fields can be\n"
4691 "\t specified using the 'sort' keyword. The sort direction can\n"
4692 "\t be modified by appending '.descending' or '.ascending' to a\n"
4693 "\t sort field. The 'size' parameter can be used to specify more\n"
5463bfda
TZ
4694 "\t or fewer than the default 2048 entries for the hashtable size.\n"
4695 "\t If a hist trigger is given a name using the 'name' parameter,\n"
4696 "\t its histogram data will be shared with other triggers of the\n"
4697 "\t same name, and trigger hits will update this common data.\n\n"
7ef224d1 4698 "\t Reading the 'hist' file for the event will dump the hash\n"
52a7f16d
TZ
4699 "\t table in its entirety to stdout. If there are multiple hist\n"
4700 "\t triggers attached to an event, there will be a table for each\n"
5463bfda
TZ
4701 "\t trigger in the output. The table displayed for a named\n"
4702 "\t trigger will be the same as any other instance having the\n"
4703 "\t same name. The default format used to display a given field\n"
4704 "\t can be modified by appending any of the following modifiers\n"
4705 "\t to the field name, as applicable:\n\n"
c6afad49
TZ
4706 "\t .hex display a number as a hex value\n"
4707 "\t .sym display an address as a symbol\n"
6b4827ad 4708 "\t .sym-offset display an address as a symbol and offset\n"
31696198
TZ
4709 "\t .execname display a common_pid as a program name\n"
4710 "\t .syscall display a syscall id as a syscall name\n\n"
4b94f5b7 4711 "\t .log2 display log2 value rather than raw number\n\n"
83e99914
TZ
4712 "\t The 'pause' parameter can be used to pause an existing hist\n"
4713 "\t trigger or to start a hist trigger but not log any events\n"
4714 "\t until told to do so. 'continue' can be used to start or\n"
4715 "\t restart a paused hist trigger.\n\n"
e86ae9ba
TZ
4716 "\t The 'clear' parameter will clear the contents of a running\n"
4717 "\t hist trigger and leave its current paused/active state\n"
4718 "\t unchanged.\n\n"
d0bad49b
TZ
4719 "\t The enable_hist and disable_hist triggers can be used to\n"
4720 "\t have one event conditionally start and stop another event's\n"
4721 "\t already-attached hist trigger. The syntax is analagous to\n"
4722 "\t the enable_event and disable_event triggers.\n"
7ef224d1 4723#endif
7bd2f24c
IM
4724;
4725
4726static ssize_t
4727tracing_readme_read(struct file *filp, char __user *ubuf,
4728 size_t cnt, loff_t *ppos)
4729{
4730 return simple_read_from_buffer(ubuf, cnt, ppos,
4731 readme_msg, strlen(readme_msg));
4732}
4733
5e2336a0 4734static const struct file_operations tracing_readme_fops = {
c7078de1
IM
4735 .open = tracing_open_generic,
4736 .read = tracing_readme_read,
b444786f 4737 .llseek = generic_file_llseek,
7bd2f24c
IM
4738};
4739
99c621d7
MS
4740static void *saved_tgids_next(struct seq_file *m, void *v, loff_t *pos)
4741{
4742 int *ptr = v;
4743
4744 if (*pos || m->count)
4745 ptr++;
4746
4747 (*pos)++;
4748
4749 for (; ptr <= &tgid_map[PID_MAX_DEFAULT]; ptr++) {
4750 if (trace_find_tgid(*ptr))
4751 return ptr;
4752 }
4753
4754 return NULL;
4755}
4756
4757static void *saved_tgids_start(struct seq_file *m, loff_t *pos)
4758{
4759 void *v;
4760 loff_t l = 0;
4761
4762 if (!tgid_map)
4763 return NULL;
4764
4765 v = &tgid_map[0];
4766 while (l <= *pos) {
4767 v = saved_tgids_next(m, v, &l);
4768 if (!v)
4769 return NULL;
4770 }
4771
4772 return v;
4773}
4774
4775static void saved_tgids_stop(struct seq_file *m, void *v)
4776{
4777}
4778
4779static int saved_tgids_show(struct seq_file *m, void *v)
4780{
4781 int pid = (int *)v - tgid_map;
4782
4783 seq_printf(m, "%d %d\n", pid, trace_find_tgid(pid));
4784 return 0;
4785}
4786
4787static const struct seq_operations tracing_saved_tgids_seq_ops = {
4788 .start = saved_tgids_start,
4789 .stop = saved_tgids_stop,
4790 .next = saved_tgids_next,
4791 .show = saved_tgids_show,
4792};
4793
4794static int tracing_saved_tgids_open(struct inode *inode, struct file *filp)
4795{
4796 if (tracing_disabled)
4797 return -ENODEV;
4798
4799 return seq_open(filp, &tracing_saved_tgids_seq_ops);
4800}
4801
4802
4803static const struct file_operations tracing_saved_tgids_fops = {
4804 .open = tracing_saved_tgids_open,
4805 .read = seq_read,
4806 .llseek = seq_lseek,
4807 .release = seq_release,
4808};
4809
42584c81
YY
4810static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
4811{
4812 unsigned int *ptr = v;
69abe6a5 4813
42584c81
YY
4814 if (*pos || m->count)
4815 ptr++;
69abe6a5 4816
42584c81 4817 (*pos)++;
69abe6a5 4818
939c7a4f
YY
4819 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
4820 ptr++) {
42584c81
YY
4821 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
4822 continue;
69abe6a5 4823
42584c81
YY
4824 return ptr;
4825 }
69abe6a5 4826
42584c81
YY
4827 return NULL;
4828}
4829
4830static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
4831{
4832 void *v;
4833 loff_t l = 0;
69abe6a5 4834
4c27e756
SRRH
4835 preempt_disable();
4836 arch_spin_lock(&trace_cmdline_lock);
4837
939c7a4f 4838 v = &savedcmd->map_cmdline_to_pid[0];
42584c81
YY
4839 while (l <= *pos) {
4840 v = saved_cmdlines_next(m, v, &l);
4841 if (!v)
4842 return NULL;
69abe6a5
AP
4843 }
4844
42584c81
YY
4845 return v;
4846}
4847
4848static void saved_cmdlines_stop(struct seq_file *m, void *v)
4849{
4c27e756
SRRH
4850 arch_spin_unlock(&trace_cmdline_lock);
4851 preempt_enable();
42584c81 4852}
69abe6a5 4853
42584c81
YY
4854static int saved_cmdlines_show(struct seq_file *m, void *v)
4855{
4856 char buf[TASK_COMM_LEN];
4857 unsigned int *pid = v;
69abe6a5 4858
4c27e756 4859 __trace_find_cmdline(*pid, buf);
42584c81
YY
4860 seq_printf(m, "%d %s\n", *pid, buf);
4861 return 0;
4862}
4863
4864static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
4865 .start = saved_cmdlines_start,
4866 .next = saved_cmdlines_next,
4867 .stop = saved_cmdlines_stop,
4868 .show = saved_cmdlines_show,
4869};
4870
4871static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
4872{
4873 if (tracing_disabled)
4874 return -ENODEV;
4875
4876 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
69abe6a5
AP
4877}
4878
4879static const struct file_operations tracing_saved_cmdlines_fops = {
42584c81
YY
4880 .open = tracing_saved_cmdlines_open,
4881 .read = seq_read,
4882 .llseek = seq_lseek,
4883 .release = seq_release,
69abe6a5
AP
4884};
4885
939c7a4f
YY
4886static ssize_t
4887tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
4888 size_t cnt, loff_t *ppos)
4889{
4890 char buf[64];
4891 int r;
4892
4893 arch_spin_lock(&trace_cmdline_lock);
a6af8fbf 4894 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
939c7a4f
YY
4895 arch_spin_unlock(&trace_cmdline_lock);
4896
4897 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4898}
4899
4900static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
4901{
4902 kfree(s->saved_cmdlines);
4903 kfree(s->map_cmdline_to_pid);
4904 kfree(s);
4905}
4906
4907static int tracing_resize_saved_cmdlines(unsigned int val)
4908{
4909 struct saved_cmdlines_buffer *s, *savedcmd_temp;
4910
a6af8fbf 4911 s = kmalloc(sizeof(*s), GFP_KERNEL);
939c7a4f
YY
4912 if (!s)
4913 return -ENOMEM;
4914
4915 if (allocate_cmdlines_buffer(val, s) < 0) {
4916 kfree(s);
4917 return -ENOMEM;
4918 }
4919
4920 arch_spin_lock(&trace_cmdline_lock);
4921 savedcmd_temp = savedcmd;
4922 savedcmd = s;
4923 arch_spin_unlock(&trace_cmdline_lock);
4924 free_saved_cmdlines_buffer(savedcmd_temp);
4925
4926 return 0;
4927}
4928
4929static ssize_t
4930tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
4931 size_t cnt, loff_t *ppos)
4932{
4933 unsigned long val;
4934 int ret;
4935
4936 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4937 if (ret)
4938 return ret;
4939
4940 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
4941 if (!val || val > PID_MAX_DEFAULT)
4942 return -EINVAL;
4943
4944 ret = tracing_resize_saved_cmdlines((unsigned int)val);
4945 if (ret < 0)
4946 return ret;
4947
4948 *ppos += cnt;
4949
4950 return cnt;
4951}
4952
4953static const struct file_operations tracing_saved_cmdlines_size_fops = {
4954 .open = tracing_open_generic,
4955 .read = tracing_saved_cmdlines_size_read,
4956 .write = tracing_saved_cmdlines_size_write,
4957};
4958
681bec03 4959#ifdef CONFIG_TRACE_EVAL_MAP_FILE
23bf8cb8 4960static union trace_eval_map_item *
f57a4143 4961update_eval_map(union trace_eval_map_item *ptr)
9828413d 4962{
00f4b652 4963 if (!ptr->map.eval_string) {
9828413d
SRRH
4964 if (ptr->tail.next) {
4965 ptr = ptr->tail.next;
4966 /* Set ptr to the next real item (skip head) */
4967 ptr++;
4968 } else
4969 return NULL;
4970 }
4971 return ptr;
4972}
4973
f57a4143 4974static void *eval_map_next(struct seq_file *m, void *v, loff_t *pos)
9828413d 4975{
23bf8cb8 4976 union trace_eval_map_item *ptr = v;
9828413d
SRRH
4977
4978 /*
4979 * Paranoid! If ptr points to end, we don't want to increment past it.
4980 * This really should never happen.
4981 */
f57a4143 4982 ptr = update_eval_map(ptr);
9828413d
SRRH
4983 if (WARN_ON_ONCE(!ptr))
4984 return NULL;
4985
4986 ptr++;
4987
4988 (*pos)++;
4989
f57a4143 4990 ptr = update_eval_map(ptr);
9828413d
SRRH
4991
4992 return ptr;
4993}
4994
f57a4143 4995static void *eval_map_start(struct seq_file *m, loff_t *pos)
9828413d 4996{
23bf8cb8 4997 union trace_eval_map_item *v;
9828413d
SRRH
4998 loff_t l = 0;
4999
1793ed93 5000 mutex_lock(&trace_eval_mutex);
9828413d 5001
23bf8cb8 5002 v = trace_eval_maps;
9828413d
SRRH
5003 if (v)
5004 v++;
5005
5006 while (v && l < *pos) {
f57a4143 5007 v = eval_map_next(m, v, &l);
9828413d
SRRH
5008 }
5009
5010 return v;
5011}
5012
f57a4143 5013static void eval_map_stop(struct seq_file *m, void *v)
9828413d 5014{
1793ed93 5015 mutex_unlock(&trace_eval_mutex);
9828413d
SRRH
5016}
5017
f57a4143 5018static int eval_map_show(struct seq_file *m, void *v)
9828413d 5019{
23bf8cb8 5020 union trace_eval_map_item *ptr = v;
9828413d
SRRH
5021
5022 seq_printf(m, "%s %ld (%s)\n",
00f4b652 5023 ptr->map.eval_string, ptr->map.eval_value,
9828413d
SRRH
5024 ptr->map.system);
5025
5026 return 0;
5027}
5028
f57a4143
JL
5029static const struct seq_operations tracing_eval_map_seq_ops = {
5030 .start = eval_map_start,
5031 .next = eval_map_next,
5032 .stop = eval_map_stop,
5033 .show = eval_map_show,
9828413d
SRRH
5034};
5035
f57a4143 5036static int tracing_eval_map_open(struct inode *inode, struct file *filp)
9828413d
SRRH
5037{
5038 if (tracing_disabled)
5039 return -ENODEV;
5040
f57a4143 5041 return seq_open(filp, &tracing_eval_map_seq_ops);
9828413d
SRRH
5042}
5043
f57a4143
JL
5044static const struct file_operations tracing_eval_map_fops = {
5045 .open = tracing_eval_map_open,
9828413d
SRRH
5046 .read = seq_read,
5047 .llseek = seq_lseek,
5048 .release = seq_release,
5049};
5050
23bf8cb8 5051static inline union trace_eval_map_item *
5f60b351 5052trace_eval_jmp_to_tail(union trace_eval_map_item *ptr)
9828413d
SRRH
5053{
5054 /* Return tail of array given the head */
5055 return ptr + ptr->head.length + 1;
5056}
5057
5058static void
f57a4143 5059trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start,
9828413d
SRRH
5060 int len)
5061{
00f4b652
JL
5062 struct trace_eval_map **stop;
5063 struct trace_eval_map **map;
23bf8cb8
JL
5064 union trace_eval_map_item *map_array;
5065 union trace_eval_map_item *ptr;
9828413d
SRRH
5066
5067 stop = start + len;
5068
5069 /*
23bf8cb8 5070 * The trace_eval_maps contains the map plus a head and tail item,
9828413d
SRRH
5071 * where the head holds the module and length of array, and the
5072 * tail holds a pointer to the next list.
5073 */
5074 map_array = kmalloc(sizeof(*map_array) * (len + 2), GFP_KERNEL);
5075 if (!map_array) {
f57a4143 5076 pr_warn("Unable to allocate trace eval mapping\n");
9828413d
SRRH
5077 return;
5078 }
5079
1793ed93 5080 mutex_lock(&trace_eval_mutex);
9828413d 5081
23bf8cb8
JL
5082 if (!trace_eval_maps)
5083 trace_eval_maps = map_array;
9828413d 5084 else {
23bf8cb8 5085 ptr = trace_eval_maps;
9828413d 5086 for (;;) {
5f60b351 5087 ptr = trace_eval_jmp_to_tail(ptr);
9828413d
SRRH
5088 if (!ptr->tail.next)
5089 break;
5090 ptr = ptr->tail.next;
5091
5092 }
5093 ptr->tail.next = map_array;
5094 }
5095 map_array->head.mod = mod;
5096 map_array->head.length = len;
5097 map_array++;
5098
5099 for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
5100 map_array->map = **map;
5101 map_array++;
5102 }
5103 memset(map_array, 0, sizeof(*map_array));
5104
1793ed93 5105 mutex_unlock(&trace_eval_mutex);
9828413d
SRRH
5106}
5107
f57a4143 5108static void trace_create_eval_file(struct dentry *d_tracer)
9828413d 5109{
681bec03 5110 trace_create_file("eval_map", 0444, d_tracer,
f57a4143 5111 NULL, &tracing_eval_map_fops);
9828413d
SRRH
5112}
5113
681bec03 5114#else /* CONFIG_TRACE_EVAL_MAP_FILE */
f57a4143
JL
5115static inline void trace_create_eval_file(struct dentry *d_tracer) { }
5116static inline void trace_insert_eval_map_file(struct module *mod,
00f4b652 5117 struct trace_eval_map **start, int len) { }
681bec03 5118#endif /* !CONFIG_TRACE_EVAL_MAP_FILE */
9828413d 5119
f57a4143 5120static void trace_insert_eval_map(struct module *mod,
00f4b652 5121 struct trace_eval_map **start, int len)
0c564a53 5122{
00f4b652 5123 struct trace_eval_map **map;
0c564a53
SRRH
5124
5125 if (len <= 0)
5126 return;
5127
5128 map = start;
5129
f57a4143 5130 trace_event_eval_update(map, len);
9828413d 5131
f57a4143 5132 trace_insert_eval_map_file(mod, start, len);
0c564a53
SRRH
5133}
5134
bc0c38d1
SR
5135static ssize_t
5136tracing_set_trace_read(struct file *filp, char __user *ubuf,
5137 size_t cnt, loff_t *ppos)
5138{
2b6080f2 5139 struct trace_array *tr = filp->private_data;
ee6c2c1b 5140 char buf[MAX_TRACER_SIZE+2];
bc0c38d1
SR
5141 int r;
5142
5143 mutex_lock(&trace_types_lock);
2b6080f2 5144 r = sprintf(buf, "%s\n", tr->current_trace->name);
bc0c38d1
SR
5145 mutex_unlock(&trace_types_lock);
5146
4bf39a94 5147 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
bc0c38d1
SR
5148}
5149
b6f11df2
ACM
5150int tracer_init(struct tracer *t, struct trace_array *tr)
5151{
12883efb 5152 tracing_reset_online_cpus(&tr->trace_buffer);
b6f11df2
ACM
5153 return t->init(tr);
5154}
5155
12883efb 5156static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
438ced17
VN
5157{
5158 int cpu;
737223fb 5159
438ced17 5160 for_each_tracing_cpu(cpu)
12883efb 5161 per_cpu_ptr(buf->data, cpu)->entries = val;
438ced17
VN
5162}
5163
12883efb 5164#ifdef CONFIG_TRACER_MAX_TRACE
d60da506 5165/* resize @tr's buffer to the size of @size_tr's entries */
12883efb
SRRH
5166static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
5167 struct trace_buffer *size_buf, int cpu_id)
d60da506
HT
5168{
5169 int cpu, ret = 0;
5170
5171 if (cpu_id == RING_BUFFER_ALL_CPUS) {
5172 for_each_tracing_cpu(cpu) {
12883efb
SRRH
5173 ret = ring_buffer_resize(trace_buf->buffer,
5174 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
d60da506
HT
5175 if (ret < 0)
5176 break;
12883efb
SRRH
5177 per_cpu_ptr(trace_buf->data, cpu)->entries =
5178 per_cpu_ptr(size_buf->data, cpu)->entries;
d60da506
HT
5179 }
5180 } else {
12883efb
SRRH
5181 ret = ring_buffer_resize(trace_buf->buffer,
5182 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
d60da506 5183 if (ret == 0)
12883efb
SRRH
5184 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
5185 per_cpu_ptr(size_buf->data, cpu_id)->entries;
d60da506
HT
5186 }
5187
5188 return ret;
5189}
12883efb 5190#endif /* CONFIG_TRACER_MAX_TRACE */
d60da506 5191
2b6080f2
SR
5192static int __tracing_resize_ring_buffer(struct trace_array *tr,
5193 unsigned long size, int cpu)
73c5162a
SR
5194{
5195 int ret;
5196
5197 /*
5198 * If kernel or user changes the size of the ring buffer
a123c52b
SR
5199 * we use the size that was given, and we can forget about
5200 * expanding it later.
73c5162a 5201 */
55034cd6 5202 ring_buffer_expanded = true;
73c5162a 5203
b382ede6 5204 /* May be called before buffers are initialized */
12883efb 5205 if (!tr->trace_buffer.buffer)
b382ede6
SR
5206 return 0;
5207
12883efb 5208 ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
73c5162a
SR
5209 if (ret < 0)
5210 return ret;
5211
12883efb 5212#ifdef CONFIG_TRACER_MAX_TRACE
2b6080f2
SR
5213 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
5214 !tr->current_trace->use_max_tr)
ef710e10
KM
5215 goto out;
5216
12883efb 5217 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
73c5162a 5218 if (ret < 0) {
12883efb
SRRH
5219 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
5220 &tr->trace_buffer, cpu);
73c5162a 5221 if (r < 0) {
a123c52b
SR
5222 /*
5223 * AARGH! We are left with different
5224 * size max buffer!!!!
5225 * The max buffer is our "snapshot" buffer.
5226 * When a tracer needs a snapshot (one of the
5227 * latency tracers), it swaps the max buffer
5228 * with the saved snap shot. We succeeded to
5229 * update the size of the main buffer, but failed to
5230 * update the size of the max buffer. But when we tried
5231 * to reset the main buffer to the original size, we
5232 * failed there too. This is very unlikely to
5233 * happen, but if it does, warn and kill all
5234 * tracing.
5235 */
73c5162a
SR
5236 WARN_ON(1);
5237 tracing_disabled = 1;
5238 }
5239 return ret;
5240 }
5241
438ced17 5242 if (cpu == RING_BUFFER_ALL_CPUS)
12883efb 5243 set_buffer_entries(&tr->max_buffer, size);
438ced17 5244 else
12883efb 5245 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
438ced17 5246
ef710e10 5247 out:
12883efb
SRRH
5248#endif /* CONFIG_TRACER_MAX_TRACE */
5249
438ced17 5250 if (cpu == RING_BUFFER_ALL_CPUS)
12883efb 5251 set_buffer_entries(&tr->trace_buffer, size);
438ced17 5252 else
12883efb 5253 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
73c5162a
SR
5254
5255 return ret;
5256}
5257
2b6080f2
SR
5258static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
5259 unsigned long size, int cpu_id)
4f271a2a 5260{
83f40318 5261 int ret = size;
4f271a2a
VN
5262
5263 mutex_lock(&trace_types_lock);
5264
438ced17
VN
5265 if (cpu_id != RING_BUFFER_ALL_CPUS) {
5266 /* make sure, this cpu is enabled in the mask */
5267 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
5268 ret = -EINVAL;
5269 goto out;
5270 }
5271 }
4f271a2a 5272
2b6080f2 5273 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
4f271a2a
VN
5274 if (ret < 0)
5275 ret = -ENOMEM;
5276
438ced17 5277out:
4f271a2a
VN
5278 mutex_unlock(&trace_types_lock);
5279
5280 return ret;
5281}
5282
ef710e10 5283
1852fcce
SR
5284/**
5285 * tracing_update_buffers - used by tracing facility to expand ring buffers
5286 *
5287 * To save on memory when the tracing is never used on a system with it
5288 * configured in. The ring buffers are set to a minimum size. But once
5289 * a user starts to use the tracing facility, then they need to grow
5290 * to their default size.
5291 *
5292 * This function is to be called when a tracer is about to be used.
5293 */
5294int tracing_update_buffers(void)
5295{
5296 int ret = 0;
5297
1027fcb2 5298 mutex_lock(&trace_types_lock);
1852fcce 5299 if (!ring_buffer_expanded)
2b6080f2 5300 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
438ced17 5301 RING_BUFFER_ALL_CPUS);
1027fcb2 5302 mutex_unlock(&trace_types_lock);
1852fcce
SR
5303
5304 return ret;
5305}
5306
577b785f
SR
5307struct trace_option_dentry;
5308
37aea98b 5309static void
2b6080f2 5310create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
577b785f 5311
6b450d25
SRRH
5312/*
5313 * Used to clear out the tracer before deletion of an instance.
5314 * Must have trace_types_lock held.
5315 */
5316static void tracing_set_nop(struct trace_array *tr)
5317{
5318 if (tr->current_trace == &nop_trace)
5319 return;
5320
50512ab5 5321 tr->current_trace->enabled--;
6b450d25
SRRH
5322
5323 if (tr->current_trace->reset)
5324 tr->current_trace->reset(tr);
5325
5326 tr->current_trace = &nop_trace;
5327}
5328
41d9c0be 5329static void add_tracer_options(struct trace_array *tr, struct tracer *t)
bc0c38d1 5330{
09d23a1d
SRRH
5331 /* Only enable if the directory has been created already. */
5332 if (!tr->dir)
5333 return;
5334
37aea98b 5335 create_trace_option_files(tr, t);
09d23a1d
SRRH
5336}
5337
5338static int tracing_set_tracer(struct trace_array *tr, const char *buf)
5339{
bc0c38d1 5340 struct tracer *t;
12883efb 5341#ifdef CONFIG_TRACER_MAX_TRACE
34600f0e 5342 bool had_max_tr;
12883efb 5343#endif
d9e54076 5344 int ret = 0;
bc0c38d1 5345
1027fcb2
SR
5346 mutex_lock(&trace_types_lock);
5347
73c5162a 5348 if (!ring_buffer_expanded) {
2b6080f2 5349 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
438ced17 5350 RING_BUFFER_ALL_CPUS);
73c5162a 5351 if (ret < 0)
59f586db 5352 goto out;
73c5162a
SR
5353 ret = 0;
5354 }
5355
bc0c38d1
SR
5356 for (t = trace_types; t; t = t->next) {
5357 if (strcmp(t->name, buf) == 0)
5358 break;
5359 }
c2931e05
FW
5360 if (!t) {
5361 ret = -EINVAL;
5362 goto out;
5363 }
2b6080f2 5364 if (t == tr->current_trace)
bc0c38d1
SR
5365 goto out;
5366
c7b3ae0b
ZSZ
5367 /* Some tracers won't work on kernel command line */
5368 if (system_state < SYSTEM_RUNNING && t->noboot) {
5369 pr_warn("Tracer '%s' is not allowed on command line, ignored\n",
5370 t->name);
5371 goto out;
5372 }
5373
607e2ea1
SRRH
5374 /* Some tracers are only allowed for the top level buffer */
5375 if (!trace_ok_for_array(t, tr)) {
5376 ret = -EINVAL;
5377 goto out;
5378 }
5379
cf6ab6d9
SRRH
5380 /* If trace pipe files are being read, we can't change the tracer */
5381 if (tr->current_trace->ref) {
5382 ret = -EBUSY;
5383 goto out;
5384 }
5385
9f029e83 5386 trace_branch_disable();
613f04a0 5387
50512ab5 5388 tr->current_trace->enabled--;
613f04a0 5389
2b6080f2
SR
5390 if (tr->current_trace->reset)
5391 tr->current_trace->reset(tr);
34600f0e 5392
12883efb 5393 /* Current trace needs to be nop_trace before synchronize_sched */
2b6080f2 5394 tr->current_trace = &nop_trace;
34600f0e 5395
45ad21ca
SRRH
5396#ifdef CONFIG_TRACER_MAX_TRACE
5397 had_max_tr = tr->allocated_snapshot;
34600f0e
SR
5398
5399 if (had_max_tr && !t->use_max_tr) {
5400 /*
5401 * We need to make sure that the update_max_tr sees that
5402 * current_trace changed to nop_trace to keep it from
5403 * swapping the buffers after we resize it.
5404 * The update_max_tr is called from interrupts disabled
5405 * so a synchronized_sched() is sufficient.
5406 */
5407 synchronize_sched();
3209cff4 5408 free_snapshot(tr);
ef710e10 5409 }
12883efb 5410#endif
12883efb
SRRH
5411
5412#ifdef CONFIG_TRACER_MAX_TRACE
34600f0e 5413 if (t->use_max_tr && !had_max_tr) {
3209cff4 5414 ret = alloc_snapshot(tr);
d60da506
HT
5415 if (ret < 0)
5416 goto out;
ef710e10 5417 }
12883efb 5418#endif
577b785f 5419
1c80025a 5420 if (t->init) {
b6f11df2 5421 ret = tracer_init(t, tr);
1c80025a
FW
5422 if (ret)
5423 goto out;
5424 }
bc0c38d1 5425
2b6080f2 5426 tr->current_trace = t;
50512ab5 5427 tr->current_trace->enabled++;
9f029e83 5428 trace_branch_enable(tr);
bc0c38d1
SR
5429 out:
5430 mutex_unlock(&trace_types_lock);
5431
d9e54076
PZ
5432 return ret;
5433}
5434
5435static ssize_t
5436tracing_set_trace_write(struct file *filp, const char __user *ubuf,
5437 size_t cnt, loff_t *ppos)
5438{
607e2ea1 5439 struct trace_array *tr = filp->private_data;
ee6c2c1b 5440 char buf[MAX_TRACER_SIZE+1];
d9e54076
PZ
5441 int i;
5442 size_t ret;
e6e7a65a
FW
5443 int err;
5444
5445 ret = cnt;
d9e54076 5446
ee6c2c1b
LZ
5447 if (cnt > MAX_TRACER_SIZE)
5448 cnt = MAX_TRACER_SIZE;
d9e54076 5449
4afe6495 5450 if (copy_from_user(buf, ubuf, cnt))
d9e54076
PZ
5451 return -EFAULT;
5452
5453 buf[cnt] = 0;
5454
5455 /* strip ending whitespace. */
5456 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
5457 buf[i] = 0;
5458
607e2ea1 5459 err = tracing_set_tracer(tr, buf);
e6e7a65a
FW
5460 if (err)
5461 return err;
d9e54076 5462
cf8517cf 5463 *ppos += ret;
bc0c38d1 5464
c2931e05 5465 return ret;
bc0c38d1
SR
5466}
5467
5468static ssize_t
6508fa76
SF
5469tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
5470 size_t cnt, loff_t *ppos)
bc0c38d1 5471{
bc0c38d1
SR
5472 char buf[64];
5473 int r;
5474
cffae437 5475 r = snprintf(buf, sizeof(buf), "%ld\n",
bc0c38d1 5476 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
cffae437
SR
5477 if (r > sizeof(buf))
5478 r = sizeof(buf);
4bf39a94 5479 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
bc0c38d1
SR
5480}
5481
5482static ssize_t
6508fa76
SF
5483tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
5484 size_t cnt, loff_t *ppos)
bc0c38d1 5485{
5e39841c 5486 unsigned long val;
c6caeeb1 5487 int ret;
bc0c38d1 5488
22fe9b54
PH
5489 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5490 if (ret)
c6caeeb1 5491 return ret;
bc0c38d1
SR
5492
5493 *ptr = val * 1000;
5494
5495 return cnt;
5496}
5497
6508fa76
SF
5498static ssize_t
5499tracing_thresh_read(struct file *filp, char __user *ubuf,
5500 size_t cnt, loff_t *ppos)
5501{
5502 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
5503}
5504
5505static ssize_t
5506tracing_thresh_write(struct file *filp, const char __user *ubuf,
5507 size_t cnt, loff_t *ppos)
5508{
5509 struct trace_array *tr = filp->private_data;
5510 int ret;
5511
5512 mutex_lock(&trace_types_lock);
5513 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
5514 if (ret < 0)
5515 goto out;
5516
5517 if (tr->current_trace->update_thresh) {
5518 ret = tr->current_trace->update_thresh(tr);
5519 if (ret < 0)
5520 goto out;
5521 }
5522
5523 ret = cnt;
5524out:
5525 mutex_unlock(&trace_types_lock);
5526
5527 return ret;
5528}
5529
f971cc9a 5530#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
e428abbb 5531
6508fa76
SF
5532static ssize_t
5533tracing_max_lat_read(struct file *filp, char __user *ubuf,
5534 size_t cnt, loff_t *ppos)
5535{
5536 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
5537}
5538
5539static ssize_t
5540tracing_max_lat_write(struct file *filp, const char __user *ubuf,
5541 size_t cnt, loff_t *ppos)
5542{
5543 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
5544}
5545
e428abbb
CG
5546#endif
5547
b3806b43
SR
5548static int tracing_open_pipe(struct inode *inode, struct file *filp)
5549{
15544209 5550 struct trace_array *tr = inode->i_private;
b3806b43 5551 struct trace_iterator *iter;
b04cc6b1 5552 int ret = 0;
b3806b43
SR
5553
5554 if (tracing_disabled)
5555 return -ENODEV;
5556
7b85af63
SRRH
5557 if (trace_array_get(tr) < 0)
5558 return -ENODEV;
5559
b04cc6b1
FW
5560 mutex_lock(&trace_types_lock);
5561
b3806b43
SR
5562 /* create a buffer to store the information to pass to userspace */
5563 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
b04cc6b1
FW
5564 if (!iter) {
5565 ret = -ENOMEM;
f77d09a3 5566 __trace_array_put(tr);
b04cc6b1
FW
5567 goto out;
5568 }
b3806b43 5569
3a161d99 5570 trace_seq_init(&iter->seq);
d716ff71 5571 iter->trace = tr->current_trace;
d7350c3f 5572
4462344e 5573 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
b04cc6b1 5574 ret = -ENOMEM;
d7350c3f 5575 goto fail;
4462344e
RR
5576 }
5577
a309720c 5578 /* trace pipe does not show start of buffer */
4462344e 5579 cpumask_setall(iter->started);
a309720c 5580
983f938a 5581 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
112f38a7
SR
5582 iter->iter_flags |= TRACE_FILE_LAT_FMT;
5583
8be0709f 5584 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
58e8eedf 5585 if (trace_clocks[tr->clock_id].in_ns)
8be0709f
DS
5586 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
5587
15544209
ON
5588 iter->tr = tr;
5589 iter->trace_buffer = &tr->trace_buffer;
5590 iter->cpu_file = tracing_get_cpu(inode);
d7350c3f 5591 mutex_init(&iter->mutex);
b3806b43
SR
5592 filp->private_data = iter;
5593
107bad8b
SR
5594 if (iter->trace->pipe_open)
5595 iter->trace->pipe_open(iter);
107bad8b 5596
b444786f 5597 nonseekable_open(inode, filp);
cf6ab6d9
SRRH
5598
5599 tr->current_trace->ref++;
b04cc6b1
FW
5600out:
5601 mutex_unlock(&trace_types_lock);
5602 return ret;
d7350c3f
FW
5603
5604fail:
5605 kfree(iter->trace);
5606 kfree(iter);
7b85af63 5607 __trace_array_put(tr);
d7350c3f
FW
5608 mutex_unlock(&trace_types_lock);
5609 return ret;
b3806b43
SR
5610}
5611
5612static int tracing_release_pipe(struct inode *inode, struct file *file)
5613{
5614 struct trace_iterator *iter = file->private_data;
15544209 5615 struct trace_array *tr = inode->i_private;
b3806b43 5616
b04cc6b1
FW
5617 mutex_lock(&trace_types_lock);
5618
cf6ab6d9
SRRH
5619 tr->current_trace->ref--;
5620
29bf4a5e 5621 if (iter->trace->pipe_close)
c521efd1
SR
5622 iter->trace->pipe_close(iter);
5623
b04cc6b1
FW
5624 mutex_unlock(&trace_types_lock);
5625
4462344e 5626 free_cpumask_var(iter->started);
d7350c3f 5627 mutex_destroy(&iter->mutex);
b3806b43 5628 kfree(iter);
b3806b43 5629
7b85af63
SRRH
5630 trace_array_put(tr);
5631
b3806b43
SR
5632 return 0;
5633}
5634
9dd95748 5635static __poll_t
cc60cdc9 5636trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
2a2cc8f7 5637{
983f938a
SRRH
5638 struct trace_array *tr = iter->tr;
5639
15693458
SRRH
5640 /* Iterators are static, they should be filled or empty */
5641 if (trace_buffer_iter(iter, iter->cpu_file))
5642 return POLLIN | POLLRDNORM;
2a2cc8f7 5643
983f938a 5644 if (tr->trace_flags & TRACE_ITER_BLOCK)
2a2cc8f7
SSP
5645 /*
5646 * Always select as readable when in blocking mode
5647 */
5648 return POLLIN | POLLRDNORM;
15693458 5649 else
12883efb 5650 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
15693458 5651 filp, poll_table);
2a2cc8f7 5652}
2a2cc8f7 5653
9dd95748 5654static __poll_t
cc60cdc9
SR
5655tracing_poll_pipe(struct file *filp, poll_table *poll_table)
5656{
5657 struct trace_iterator *iter = filp->private_data;
5658
5659 return trace_poll(iter, filp, poll_table);
2a2cc8f7
SSP
5660}
5661
d716ff71 5662/* Must be called with iter->mutex held. */
ff98781b 5663static int tracing_wait_pipe(struct file *filp)
b3806b43
SR
5664{
5665 struct trace_iterator *iter = filp->private_data;
8b8b3683 5666 int ret;
b3806b43 5667
b3806b43 5668 while (trace_empty(iter)) {
2dc8f095 5669
107bad8b 5670 if ((filp->f_flags & O_NONBLOCK)) {
ff98781b 5671 return -EAGAIN;
107bad8b 5672 }
2dc8f095 5673
b3806b43 5674 /*
250bfd3d 5675 * We block until we read something and tracing is disabled.
b3806b43
SR
5676 * We still block if tracing is disabled, but we have never
5677 * read anything. This allows a user to cat this file, and
5678 * then enable tracing. But after we have read something,
5679 * we give an EOF when tracing is again disabled.
5680 *
5681 * iter->pos will be 0 if we haven't read anything.
5682 */
75df6e68 5683 if (!tracer_tracing_is_on(iter->tr) && iter->pos)
b3806b43 5684 break;
f4874261
SRRH
5685
5686 mutex_unlock(&iter->mutex);
5687
e30f53aa 5688 ret = wait_on_pipe(iter, false);
f4874261
SRRH
5689
5690 mutex_lock(&iter->mutex);
5691
8b8b3683
SRRH
5692 if (ret)
5693 return ret;
b3806b43
SR
5694 }
5695
ff98781b
EGM
5696 return 1;
5697}
5698
5699/*
5700 * Consumer reader.
5701 */
5702static ssize_t
5703tracing_read_pipe(struct file *filp, char __user *ubuf,
5704 size_t cnt, loff_t *ppos)
5705{
5706 struct trace_iterator *iter = filp->private_data;
5707 ssize_t sret;
5708
d7350c3f
FW
5709 /*
5710 * Avoid more than one consumer on a single file descriptor
5711 * This is just a matter of traces coherency, the ring buffer itself
5712 * is protected.
5713 */
5714 mutex_lock(&iter->mutex);
1245800c
SRRH
5715
5716 /* return any leftover data */
5717 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
5718 if (sret != -EBUSY)
5719 goto out;
5720
5721 trace_seq_init(&iter->seq);
5722
ff98781b
EGM
5723 if (iter->trace->read) {
5724 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
5725 if (sret)
5726 goto out;
5727 }
5728
5729waitagain:
5730 sret = tracing_wait_pipe(filp);
5731 if (sret <= 0)
5732 goto out;
5733
b3806b43 5734 /* stop when tracing is finished */
ff98781b
EGM
5735 if (trace_empty(iter)) {
5736 sret = 0;
107bad8b 5737 goto out;
ff98781b 5738 }
b3806b43
SR
5739
5740 if (cnt >= PAGE_SIZE)
5741 cnt = PAGE_SIZE - 1;
5742
53d0aa77 5743 /* reset all but tr, trace, and overruns */
53d0aa77
SR
5744 memset(&iter->seq, 0,
5745 sizeof(struct trace_iterator) -
5746 offsetof(struct trace_iterator, seq));
ed5467da 5747 cpumask_clear(iter->started);
4823ed7e 5748 iter->pos = -1;
b3806b43 5749
4f535968 5750 trace_event_read_lock();
7e53bd42 5751 trace_access_lock(iter->cpu_file);
955b61e5 5752 while (trace_find_next_entry_inc(iter) != NULL) {
2c4f035f 5753 enum print_line_t ret;
5ac48378 5754 int save_len = iter->seq.seq.len;
088b1e42 5755
f9896bf3 5756 ret = print_trace_line(iter);
2c4f035f 5757 if (ret == TRACE_TYPE_PARTIAL_LINE) {
088b1e42 5758 /* don't print partial lines */
5ac48378 5759 iter->seq.seq.len = save_len;
b3806b43 5760 break;
088b1e42 5761 }
b91facc3
FW
5762 if (ret != TRACE_TYPE_NO_CONSUME)
5763 trace_consume(iter);
b3806b43 5764
5ac48378 5765 if (trace_seq_used(&iter->seq) >= cnt)
b3806b43 5766 break;
ee5e51f5
JO
5767
5768 /*
5769 * Setting the full flag means we reached the trace_seq buffer
5770 * size and we should leave by partial output condition above.
5771 * One of the trace_seq_* functions is not used properly.
5772 */
5773 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
5774 iter->ent->type);
b3806b43 5775 }
7e53bd42 5776 trace_access_unlock(iter->cpu_file);
4f535968 5777 trace_event_read_unlock();
b3806b43 5778
b3806b43 5779 /* Now copy what we have to the user */
6c6c2796 5780 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
5ac48378 5781 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
f9520750 5782 trace_seq_init(&iter->seq);
9ff4b974
PP
5783
5784 /*
25985edc 5785 * If there was nothing to send to user, in spite of consuming trace
9ff4b974
PP
5786 * entries, go back to wait for more entries.
5787 */
6c6c2796 5788 if (sret == -EBUSY)
9ff4b974 5789 goto waitagain;
b3806b43 5790
107bad8b 5791out:
d7350c3f 5792 mutex_unlock(&iter->mutex);
107bad8b 5793
6c6c2796 5794 return sret;
b3806b43
SR
5795}
5796
3c56819b
EGM
5797static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
5798 unsigned int idx)
5799{
5800 __free_page(spd->pages[idx]);
5801}
5802
28dfef8f 5803static const struct pipe_buf_operations tracing_pipe_buf_ops = {
34cd4998 5804 .can_merge = 0,
34cd4998 5805 .confirm = generic_pipe_buf_confirm,
92fdd98c 5806 .release = generic_pipe_buf_release,
34cd4998
SR
5807 .steal = generic_pipe_buf_steal,
5808 .get = generic_pipe_buf_get,
3c56819b
EGM
5809};
5810
34cd4998 5811static size_t
fa7c7f6e 5812tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
34cd4998
SR
5813{
5814 size_t count;
74f06bb7 5815 int save_len;
34cd4998
SR
5816 int ret;
5817
5818 /* Seq buffer is page-sized, exactly what we need. */
5819 for (;;) {
74f06bb7 5820 save_len = iter->seq.seq.len;
34cd4998 5821 ret = print_trace_line(iter);
74f06bb7
SRRH
5822
5823 if (trace_seq_has_overflowed(&iter->seq)) {
5824 iter->seq.seq.len = save_len;
34cd4998
SR
5825 break;
5826 }
74f06bb7
SRRH
5827
5828 /*
5829 * This should not be hit, because it should only
5830 * be set if the iter->seq overflowed. But check it
5831 * anyway to be safe.
5832 */
34cd4998 5833 if (ret == TRACE_TYPE_PARTIAL_LINE) {
74f06bb7
SRRH
5834 iter->seq.seq.len = save_len;
5835 break;
5836 }
5837
5ac48378 5838 count = trace_seq_used(&iter->seq) - save_len;
74f06bb7
SRRH
5839 if (rem < count) {
5840 rem = 0;
5841 iter->seq.seq.len = save_len;
34cd4998
SR
5842 break;
5843 }
5844
74e7ff8c
LJ
5845 if (ret != TRACE_TYPE_NO_CONSUME)
5846 trace_consume(iter);
34cd4998 5847 rem -= count;
955b61e5 5848 if (!trace_find_next_entry_inc(iter)) {
34cd4998
SR
5849 rem = 0;
5850 iter->ent = NULL;
5851 break;
5852 }
5853 }
5854
5855 return rem;
5856}
5857
3c56819b
EGM
5858static ssize_t tracing_splice_read_pipe(struct file *filp,
5859 loff_t *ppos,
5860 struct pipe_inode_info *pipe,
5861 size_t len,
5862 unsigned int flags)
5863{
35f3d14d
JA
5864 struct page *pages_def[PIPE_DEF_BUFFERS];
5865 struct partial_page partial_def[PIPE_DEF_BUFFERS];
3c56819b
EGM
5866 struct trace_iterator *iter = filp->private_data;
5867 struct splice_pipe_desc spd = {
35f3d14d
JA
5868 .pages = pages_def,
5869 .partial = partial_def,
34cd4998 5870 .nr_pages = 0, /* This gets updated below. */
047fe360 5871 .nr_pages_max = PIPE_DEF_BUFFERS,
34cd4998
SR
5872 .ops = &tracing_pipe_buf_ops,
5873 .spd_release = tracing_spd_release_pipe,
3c56819b
EGM
5874 };
5875 ssize_t ret;
34cd4998 5876 size_t rem;
3c56819b
EGM
5877 unsigned int i;
5878
35f3d14d
JA
5879 if (splice_grow_spd(pipe, &spd))
5880 return -ENOMEM;
5881
d7350c3f 5882 mutex_lock(&iter->mutex);
3c56819b
EGM
5883
5884 if (iter->trace->splice_read) {
5885 ret = iter->trace->splice_read(iter, filp,
5886 ppos, pipe, len, flags);
5887 if (ret)
34cd4998 5888 goto out_err;
3c56819b
EGM
5889 }
5890
5891 ret = tracing_wait_pipe(filp);
5892 if (ret <= 0)
34cd4998 5893 goto out_err;
3c56819b 5894
955b61e5 5895 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
3c56819b 5896 ret = -EFAULT;
34cd4998 5897 goto out_err;
3c56819b
EGM
5898 }
5899
4f535968 5900 trace_event_read_lock();
7e53bd42 5901 trace_access_lock(iter->cpu_file);
4f535968 5902
3c56819b 5903 /* Fill as many pages as possible. */
a786c06d 5904 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
35f3d14d
JA
5905 spd.pages[i] = alloc_page(GFP_KERNEL);
5906 if (!spd.pages[i])
34cd4998 5907 break;
3c56819b 5908
fa7c7f6e 5909 rem = tracing_fill_pipe_page(rem, iter);
3c56819b
EGM
5910
5911 /* Copy the data into the page, so we can start over. */
5912 ret = trace_seq_to_buffer(&iter->seq,
35f3d14d 5913 page_address(spd.pages[i]),
5ac48378 5914 trace_seq_used(&iter->seq));
3c56819b 5915 if (ret < 0) {
35f3d14d 5916 __free_page(spd.pages[i]);
3c56819b
EGM
5917 break;
5918 }
35f3d14d 5919 spd.partial[i].offset = 0;
5ac48378 5920 spd.partial[i].len = trace_seq_used(&iter->seq);
3c56819b 5921
f9520750 5922 trace_seq_init(&iter->seq);
3c56819b
EGM
5923 }
5924
7e53bd42 5925 trace_access_unlock(iter->cpu_file);
4f535968 5926 trace_event_read_unlock();
d7350c3f 5927 mutex_unlock(&iter->mutex);
3c56819b
EGM
5928
5929 spd.nr_pages = i;
5930
a29054d9
SRRH
5931 if (i)
5932 ret = splice_to_pipe(pipe, &spd);
5933 else
5934 ret = 0;
35f3d14d 5935out:
047fe360 5936 splice_shrink_spd(&spd);
35f3d14d 5937 return ret;
3c56819b 5938
34cd4998 5939out_err:
d7350c3f 5940 mutex_unlock(&iter->mutex);
35f3d14d 5941 goto out;
3c56819b
EGM
5942}
5943
a98a3c3f
SR
5944static ssize_t
5945tracing_entries_read(struct file *filp, char __user *ubuf,
5946 size_t cnt, loff_t *ppos)
5947{
0bc392ee
ON
5948 struct inode *inode = file_inode(filp);
5949 struct trace_array *tr = inode->i_private;
5950 int cpu = tracing_get_cpu(inode);
438ced17
VN
5951 char buf[64];
5952 int r = 0;
5953 ssize_t ret;
a98a3c3f 5954
db526ca3 5955 mutex_lock(&trace_types_lock);
438ced17 5956
0bc392ee 5957 if (cpu == RING_BUFFER_ALL_CPUS) {
438ced17
VN
5958 int cpu, buf_size_same;
5959 unsigned long size;
5960
5961 size = 0;
5962 buf_size_same = 1;
5963 /* check if all cpu sizes are same */
5964 for_each_tracing_cpu(cpu) {
5965 /* fill in the size from first enabled cpu */
5966 if (size == 0)
12883efb
SRRH
5967 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
5968 if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
438ced17
VN
5969 buf_size_same = 0;
5970 break;
5971 }
5972 }
5973
5974 if (buf_size_same) {
5975 if (!ring_buffer_expanded)
5976 r = sprintf(buf, "%lu (expanded: %lu)\n",
5977 size >> 10,
5978 trace_buf_size >> 10);
5979 else
5980 r = sprintf(buf, "%lu\n", size >> 10);
5981 } else
5982 r = sprintf(buf, "X\n");
5983 } else
0bc392ee 5984 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
438ced17 5985
db526ca3
SR
5986 mutex_unlock(&trace_types_lock);
5987
438ced17
VN
5988 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5989 return ret;
a98a3c3f
SR
5990}
5991
5992static ssize_t
5993tracing_entries_write(struct file *filp, const char __user *ubuf,
5994 size_t cnt, loff_t *ppos)
5995{
0bc392ee
ON
5996 struct inode *inode = file_inode(filp);
5997 struct trace_array *tr = inode->i_private;
a98a3c3f 5998 unsigned long val;
4f271a2a 5999 int ret;
a98a3c3f 6000
22fe9b54
PH
6001 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6002 if (ret)
c6caeeb1 6003 return ret;
a98a3c3f
SR
6004
6005 /* must have at least 1 entry */
6006 if (!val)
6007 return -EINVAL;
6008
1696b2b0
SR
6009 /* value is in KB */
6010 val <<= 10;
0bc392ee 6011 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
4f271a2a
VN
6012 if (ret < 0)
6013 return ret;
a98a3c3f 6014
cf8517cf 6015 *ppos += cnt;
a98a3c3f 6016
4f271a2a
VN
6017 return cnt;
6018}
bf5e6519 6019
f81ab074
VN
6020static ssize_t
6021tracing_total_entries_read(struct file *filp, char __user *ubuf,
6022 size_t cnt, loff_t *ppos)
6023{
6024 struct trace_array *tr = filp->private_data;
6025 char buf[64];
6026 int r, cpu;
6027 unsigned long size = 0, expanded_size = 0;
6028
6029 mutex_lock(&trace_types_lock);
6030 for_each_tracing_cpu(cpu) {
12883efb 6031 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
f81ab074
VN
6032 if (!ring_buffer_expanded)
6033 expanded_size += trace_buf_size >> 10;
6034 }
6035 if (ring_buffer_expanded)
6036 r = sprintf(buf, "%lu\n", size);
6037 else
6038 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
6039 mutex_unlock(&trace_types_lock);
6040
6041 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6042}
6043
4f271a2a
VN
6044static ssize_t
6045tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
6046 size_t cnt, loff_t *ppos)
6047{
6048 /*
6049 * There is no need to read what the user has written, this function
6050 * is just to make sure that there is no error when "echo" is used
6051 */
6052
6053 *ppos += cnt;
a98a3c3f
SR
6054
6055 return cnt;
6056}
6057
4f271a2a
VN
6058static int
6059tracing_free_buffer_release(struct inode *inode, struct file *filp)
6060{
2b6080f2
SR
6061 struct trace_array *tr = inode->i_private;
6062
cf30cf67 6063 /* disable tracing ? */
983f938a 6064 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
711e1243 6065 tracer_tracing_off(tr);
4f271a2a 6066 /* resize the ring buffer to 0 */
2b6080f2 6067 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
4f271a2a 6068
7b85af63
SRRH
6069 trace_array_put(tr);
6070
4f271a2a
VN
6071 return 0;
6072}
6073
5bf9a1ee
PP
6074static ssize_t
6075tracing_mark_write(struct file *filp, const char __user *ubuf,
6076 size_t cnt, loff_t *fpos)
6077{
2d71619c 6078 struct trace_array *tr = filp->private_data;
d696b58c
SR
6079 struct ring_buffer_event *event;
6080 struct ring_buffer *buffer;
6081 struct print_entry *entry;
6082 unsigned long irq_flags;
656c7f0d 6083 const char faulted[] = "<faulted>";
d696b58c 6084 ssize_t written;
d696b58c
SR
6085 int size;
6086 int len;
fa32e855 6087
656c7f0d
SRRH
6088/* Used in tracing_mark_raw_write() as well */
6089#define FAULTED_SIZE (sizeof(faulted) - 1) /* '\0' is already accounted for */
5bf9a1ee 6090
c76f0694 6091 if (tracing_disabled)
5bf9a1ee
PP
6092 return -EINVAL;
6093
983f938a 6094 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
5224c3a3
MSB
6095 return -EINVAL;
6096
5bf9a1ee
PP
6097 if (cnt > TRACE_BUF_SIZE)
6098 cnt = TRACE_BUF_SIZE;
6099
d696b58c 6100 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
5bf9a1ee 6101
d696b58c 6102 local_save_flags(irq_flags);
656c7f0d 6103 size = sizeof(*entry) + cnt + 2; /* add '\0' and possible '\n' */
d696b58c 6104
656c7f0d
SRRH
6105 /* If less than "<faulted>", then make sure we can still add that */
6106 if (cnt < FAULTED_SIZE)
6107 size += FAULTED_SIZE - cnt;
d696b58c 6108
2d71619c 6109 buffer = tr->trace_buffer.buffer;
3e9a8aad
SRRH
6110 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
6111 irq_flags, preempt_count());
656c7f0d 6112 if (unlikely(!event))
d696b58c 6113 /* Ring buffer disabled, return as if not open for write */
656c7f0d 6114 return -EBADF;
d696b58c
SR
6115
6116 entry = ring_buffer_event_data(event);
6117 entry->ip = _THIS_IP_;
6118
656c7f0d
SRRH
6119 len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt);
6120 if (len) {
6121 memcpy(&entry->buf, faulted, FAULTED_SIZE);
6122 cnt = FAULTED_SIZE;
6123 written = -EFAULT;
c13d2f7c 6124 } else
656c7f0d
SRRH
6125 written = cnt;
6126 len = cnt;
5bf9a1ee 6127
d696b58c
SR
6128 if (entry->buf[cnt - 1] != '\n') {
6129 entry->buf[cnt] = '\n';
6130 entry->buf[cnt + 1] = '\0';
6131 } else
6132 entry->buf[cnt] = '\0';
6133
7ffbd48d 6134 __buffer_unlock_commit(buffer, event);
5bf9a1ee 6135
656c7f0d
SRRH
6136 if (written > 0)
6137 *fpos += written;
5bf9a1ee 6138
fa32e855
SR
6139 return written;
6140}
6141
6142/* Limit it for now to 3K (including tag) */
6143#define RAW_DATA_MAX_SIZE (1024*3)
6144
6145static ssize_t
6146tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
6147 size_t cnt, loff_t *fpos)
6148{
6149 struct trace_array *tr = filp->private_data;
6150 struct ring_buffer_event *event;
6151 struct ring_buffer *buffer;
6152 struct raw_data_entry *entry;
656c7f0d 6153 const char faulted[] = "<faulted>";
fa32e855 6154 unsigned long irq_flags;
fa32e855 6155 ssize_t written;
fa32e855
SR
6156 int size;
6157 int len;
6158
656c7f0d
SRRH
6159#define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
6160
fa32e855
SR
6161 if (tracing_disabled)
6162 return -EINVAL;
6163
6164 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
6165 return -EINVAL;
6166
6167 /* The marker must at least have a tag id */
6168 if (cnt < sizeof(unsigned int) || cnt > RAW_DATA_MAX_SIZE)
6169 return -EINVAL;
6170
6171 if (cnt > TRACE_BUF_SIZE)
6172 cnt = TRACE_BUF_SIZE;
6173
6174 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
6175
fa32e855
SR
6176 local_save_flags(irq_flags);
6177 size = sizeof(*entry) + cnt;
656c7f0d
SRRH
6178 if (cnt < FAULT_SIZE_ID)
6179 size += FAULT_SIZE_ID - cnt;
6180
fa32e855 6181 buffer = tr->trace_buffer.buffer;
3e9a8aad
SRRH
6182 event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
6183 irq_flags, preempt_count());
656c7f0d 6184 if (!event)
fa32e855 6185 /* Ring buffer disabled, return as if not open for write */
656c7f0d 6186 return -EBADF;
fa32e855
SR
6187
6188 entry = ring_buffer_event_data(event);
6189
656c7f0d
SRRH
6190 len = __copy_from_user_inatomic(&entry->id, ubuf, cnt);
6191 if (len) {
6192 entry->id = -1;
6193 memcpy(&entry->buf, faulted, FAULTED_SIZE);
6194 written = -EFAULT;
fa32e855 6195 } else
656c7f0d 6196 written = cnt;
fa32e855
SR
6197
6198 __buffer_unlock_commit(buffer, event);
6199
656c7f0d
SRRH
6200 if (written > 0)
6201 *fpos += written;
1aa54bca
MS
6202
6203 return written;
5bf9a1ee
PP
6204}
6205
13f16d20 6206static int tracing_clock_show(struct seq_file *m, void *v)
5079f326 6207{
2b6080f2 6208 struct trace_array *tr = m->private;
5079f326
Z
6209 int i;
6210
6211 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
13f16d20 6212 seq_printf(m,
5079f326 6213 "%s%s%s%s", i ? " " : "",
2b6080f2
SR
6214 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
6215 i == tr->clock_id ? "]" : "");
13f16d20 6216 seq_putc(m, '\n');
5079f326 6217
13f16d20 6218 return 0;
5079f326
Z
6219}
6220
e1e232ca 6221static int tracing_set_clock(struct trace_array *tr, const char *clockstr)
5079f326 6222{
5079f326
Z
6223 int i;
6224
5079f326
Z
6225 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
6226 if (strcmp(trace_clocks[i].name, clockstr) == 0)
6227 break;
6228 }
6229 if (i == ARRAY_SIZE(trace_clocks))
6230 return -EINVAL;
6231
5079f326
Z
6232 mutex_lock(&trace_types_lock);
6233
2b6080f2
SR
6234 tr->clock_id = i;
6235
12883efb 6236 ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
5079f326 6237
60303ed3
DS
6238 /*
6239 * New clock may not be consistent with the previous clock.
6240 * Reset the buffer so that it doesn't have incomparable timestamps.
6241 */
9457158b 6242 tracing_reset_online_cpus(&tr->trace_buffer);
12883efb
SRRH
6243
6244#ifdef CONFIG_TRACER_MAX_TRACE
170b3b10 6245 if (tr->max_buffer.buffer)
12883efb 6246 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
9457158b 6247 tracing_reset_online_cpus(&tr->max_buffer);
12883efb 6248#endif
60303ed3 6249
5079f326
Z
6250 mutex_unlock(&trace_types_lock);
6251
e1e232ca
SR
6252 return 0;
6253}
6254
6255static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
6256 size_t cnt, loff_t *fpos)
6257{
6258 struct seq_file *m = filp->private_data;
6259 struct trace_array *tr = m->private;
6260 char buf[64];
6261 const char *clockstr;
6262 int ret;
6263
6264 if (cnt >= sizeof(buf))
6265 return -EINVAL;
6266
4afe6495 6267 if (copy_from_user(buf, ubuf, cnt))
e1e232ca
SR
6268 return -EFAULT;
6269
6270 buf[cnt] = 0;
6271
6272 clockstr = strstrip(buf);
6273
6274 ret = tracing_set_clock(tr, clockstr);
6275 if (ret)
6276 return ret;
6277
5079f326
Z
6278 *fpos += cnt;
6279
6280 return cnt;
6281}
6282
13f16d20
LZ
6283static int tracing_clock_open(struct inode *inode, struct file *file)
6284{
7b85af63
SRRH
6285 struct trace_array *tr = inode->i_private;
6286 int ret;
6287
13f16d20
LZ
6288 if (tracing_disabled)
6289 return -ENODEV;
2b6080f2 6290
7b85af63
SRRH
6291 if (trace_array_get(tr))
6292 return -ENODEV;
6293
6294 ret = single_open(file, tracing_clock_show, inode->i_private);
6295 if (ret < 0)
6296 trace_array_put(tr);
6297
6298 return ret;
13f16d20
LZ
6299}
6300
6de58e62
SRRH
6301struct ftrace_buffer_info {
6302 struct trace_iterator iter;
6303 void *spare;
73a757e6 6304 unsigned int spare_cpu;
6de58e62
SRRH
6305 unsigned int read;
6306};
6307
debdd57f
HT
6308#ifdef CONFIG_TRACER_SNAPSHOT
6309static int tracing_snapshot_open(struct inode *inode, struct file *file)
6310{
6484c71c 6311 struct trace_array *tr = inode->i_private;
debdd57f 6312 struct trace_iterator *iter;
2b6080f2 6313 struct seq_file *m;
debdd57f
HT
6314 int ret = 0;
6315
ff451961
SRRH
6316 if (trace_array_get(tr) < 0)
6317 return -ENODEV;
6318
debdd57f 6319 if (file->f_mode & FMODE_READ) {
6484c71c 6320 iter = __tracing_open(inode, file, true);
debdd57f
HT
6321 if (IS_ERR(iter))
6322 ret = PTR_ERR(iter);
2b6080f2
SR
6323 } else {
6324 /* Writes still need the seq_file to hold the private data */
f77d09a3 6325 ret = -ENOMEM;
2b6080f2
SR
6326 m = kzalloc(sizeof(*m), GFP_KERNEL);
6327 if (!m)
f77d09a3 6328 goto out;
2b6080f2
SR
6329 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
6330 if (!iter) {
6331 kfree(m);
f77d09a3 6332 goto out;
2b6080f2 6333 }
f77d09a3
AL
6334 ret = 0;
6335
ff451961 6336 iter->tr = tr;
6484c71c
ON
6337 iter->trace_buffer = &tr->max_buffer;
6338 iter->cpu_file = tracing_get_cpu(inode);
2b6080f2
SR
6339 m->private = iter;
6340 file->private_data = m;
debdd57f 6341 }
f77d09a3 6342out:
ff451961
SRRH
6343 if (ret < 0)
6344 trace_array_put(tr);
6345
debdd57f
HT
6346 return ret;
6347}
6348
6349static ssize_t
6350tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
6351 loff_t *ppos)
6352{
2b6080f2
SR
6353 struct seq_file *m = filp->private_data;
6354 struct trace_iterator *iter = m->private;
6355 struct trace_array *tr = iter->tr;
debdd57f
HT
6356 unsigned long val;
6357 int ret;
6358
6359 ret = tracing_update_buffers();
6360 if (ret < 0)
6361 return ret;
6362
6363 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6364 if (ret)
6365 return ret;
6366
6367 mutex_lock(&trace_types_lock);
6368
2b6080f2 6369 if (tr->current_trace->use_max_tr) {
debdd57f
HT
6370 ret = -EBUSY;
6371 goto out;
6372 }
6373
6374 switch (val) {
6375 case 0:
f1affcaa
SRRH
6376 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
6377 ret = -EINVAL;
6378 break;
debdd57f 6379 }
3209cff4
SRRH
6380 if (tr->allocated_snapshot)
6381 free_snapshot(tr);
debdd57f
HT
6382 break;
6383 case 1:
f1affcaa
SRRH
6384/* Only allow per-cpu swap if the ring buffer supports it */
6385#ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
6386 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
6387 ret = -EINVAL;
6388 break;
6389 }
6390#endif
45ad21ca 6391 if (!tr->allocated_snapshot) {
3209cff4 6392 ret = alloc_snapshot(tr);
debdd57f
HT
6393 if (ret < 0)
6394 break;
debdd57f 6395 }
debdd57f
HT
6396 local_irq_disable();
6397 /* Now, we're going to swap */
f1affcaa 6398 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
ce9bae55 6399 update_max_tr(tr, current, smp_processor_id());
f1affcaa 6400 else
ce9bae55 6401 update_max_tr_single(tr, current, iter->cpu_file);
debdd57f
HT
6402 local_irq_enable();
6403 break;
6404 default:
45ad21ca 6405 if (tr->allocated_snapshot) {
f1affcaa
SRRH
6406 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
6407 tracing_reset_online_cpus(&tr->max_buffer);
6408 else
6409 tracing_reset(&tr->max_buffer, iter->cpu_file);
6410 }
debdd57f
HT
6411 break;
6412 }
6413
6414 if (ret >= 0) {
6415 *ppos += cnt;
6416 ret = cnt;
6417 }
6418out:
6419 mutex_unlock(&trace_types_lock);
6420 return ret;
6421}
2b6080f2
SR
6422
6423static int tracing_snapshot_release(struct inode *inode, struct file *file)
6424{
6425 struct seq_file *m = file->private_data;
ff451961
SRRH
6426 int ret;
6427
6428 ret = tracing_release(inode, file);
2b6080f2
SR
6429
6430 if (file->f_mode & FMODE_READ)
ff451961 6431 return ret;
2b6080f2
SR
6432
6433 /* If write only, the seq_file is just a stub */
6434 if (m)
6435 kfree(m->private);
6436 kfree(m);
6437
6438 return 0;
6439}
6440
6de58e62
SRRH
6441static int tracing_buffers_open(struct inode *inode, struct file *filp);
6442static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
6443 size_t count, loff_t *ppos);
6444static int tracing_buffers_release(struct inode *inode, struct file *file);
6445static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
6446 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
6447
6448static int snapshot_raw_open(struct inode *inode, struct file *filp)
6449{
6450 struct ftrace_buffer_info *info;
6451 int ret;
6452
6453 ret = tracing_buffers_open(inode, filp);
6454 if (ret < 0)
6455 return ret;
6456
6457 info = filp->private_data;
6458
6459 if (info->iter.trace->use_max_tr) {
6460 tracing_buffers_release(inode, filp);
6461 return -EBUSY;
6462 }
6463
6464 info->iter.snapshot = true;
6465 info->iter.trace_buffer = &info->iter.tr->max_buffer;
6466
6467 return ret;
6468}
6469
debdd57f
HT
6470#endif /* CONFIG_TRACER_SNAPSHOT */
6471
6472
6508fa76
SF
6473static const struct file_operations tracing_thresh_fops = {
6474 .open = tracing_open_generic,
6475 .read = tracing_thresh_read,
6476 .write = tracing_thresh_write,
6477 .llseek = generic_file_llseek,
6478};
6479
f971cc9a 6480#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
5e2336a0 6481static const struct file_operations tracing_max_lat_fops = {
4bf39a94
IM
6482 .open = tracing_open_generic,
6483 .read = tracing_max_lat_read,
6484 .write = tracing_max_lat_write,
b444786f 6485 .llseek = generic_file_llseek,
bc0c38d1 6486};
e428abbb 6487#endif
bc0c38d1 6488
5e2336a0 6489static const struct file_operations set_tracer_fops = {
4bf39a94
IM
6490 .open = tracing_open_generic,
6491 .read = tracing_set_trace_read,
6492 .write = tracing_set_trace_write,
b444786f 6493 .llseek = generic_file_llseek,
bc0c38d1
SR
6494};
6495
5e2336a0 6496static const struct file_operations tracing_pipe_fops = {
4bf39a94 6497 .open = tracing_open_pipe,
2a2cc8f7 6498 .poll = tracing_poll_pipe,
4bf39a94 6499 .read = tracing_read_pipe,
3c56819b 6500 .splice_read = tracing_splice_read_pipe,
4bf39a94 6501 .release = tracing_release_pipe,
b444786f 6502 .llseek = no_llseek,
b3806b43
SR
6503};
6504
5e2336a0 6505static const struct file_operations tracing_entries_fops = {
0bc392ee 6506 .open = tracing_open_generic_tr,
a98a3c3f
SR
6507 .read = tracing_entries_read,
6508 .write = tracing_entries_write,
b444786f 6509 .llseek = generic_file_llseek,
0bc392ee 6510 .release = tracing_release_generic_tr,
a98a3c3f
SR
6511};
6512
f81ab074 6513static const struct file_operations tracing_total_entries_fops = {
7b85af63 6514 .open = tracing_open_generic_tr,
f81ab074
VN
6515 .read = tracing_total_entries_read,
6516 .llseek = generic_file_llseek,
7b85af63 6517 .release = tracing_release_generic_tr,
f81ab074
VN
6518};
6519
4f271a2a 6520static const struct file_operations tracing_free_buffer_fops = {
7b85af63 6521 .open = tracing_open_generic_tr,
4f271a2a
VN
6522 .write = tracing_free_buffer_write,
6523 .release = tracing_free_buffer_release,
6524};
6525
5e2336a0 6526static const struct file_operations tracing_mark_fops = {
7b85af63 6527 .open = tracing_open_generic_tr,
5bf9a1ee 6528 .write = tracing_mark_write,
b444786f 6529 .llseek = generic_file_llseek,
7b85af63 6530 .release = tracing_release_generic_tr,
5bf9a1ee
PP
6531};
6532
fa32e855
SR
6533static const struct file_operations tracing_mark_raw_fops = {
6534 .open = tracing_open_generic_tr,
6535 .write = tracing_mark_raw_write,
6536 .llseek = generic_file_llseek,
6537 .release = tracing_release_generic_tr,
6538};
6539
5079f326 6540static const struct file_operations trace_clock_fops = {
13f16d20
LZ
6541 .open = tracing_clock_open,
6542 .read = seq_read,
6543 .llseek = seq_lseek,
7b85af63 6544 .release = tracing_single_release_tr,
5079f326
Z
6545 .write = tracing_clock_write,
6546};
6547
debdd57f
HT
6548#ifdef CONFIG_TRACER_SNAPSHOT
6549static const struct file_operations snapshot_fops = {
6550 .open = tracing_snapshot_open,
6551 .read = seq_read,
6552 .write = tracing_snapshot_write,
098c879e 6553 .llseek = tracing_lseek,
2b6080f2 6554 .release = tracing_snapshot_release,
debdd57f 6555};
debdd57f 6556
6de58e62
SRRH
6557static const struct file_operations snapshot_raw_fops = {
6558 .open = snapshot_raw_open,
6559 .read = tracing_buffers_read,
6560 .release = tracing_buffers_release,
6561 .splice_read = tracing_buffers_splice_read,
6562 .llseek = no_llseek,
2cadf913
SR
6563};
6564
6de58e62
SRRH
6565#endif /* CONFIG_TRACER_SNAPSHOT */
6566
2cadf913
SR
6567static int tracing_buffers_open(struct inode *inode, struct file *filp)
6568{
46ef2be0 6569 struct trace_array *tr = inode->i_private;
2cadf913 6570 struct ftrace_buffer_info *info;
7b85af63 6571 int ret;
2cadf913
SR
6572
6573 if (tracing_disabled)
6574 return -ENODEV;
6575
7b85af63
SRRH
6576 if (trace_array_get(tr) < 0)
6577 return -ENODEV;
6578
2cadf913 6579 info = kzalloc(sizeof(*info), GFP_KERNEL);
7b85af63
SRRH
6580 if (!info) {
6581 trace_array_put(tr);
2cadf913 6582 return -ENOMEM;
7b85af63 6583 }
2cadf913 6584
a695cb58
SRRH
6585 mutex_lock(&trace_types_lock);
6586
cc60cdc9 6587 info->iter.tr = tr;
46ef2be0 6588 info->iter.cpu_file = tracing_get_cpu(inode);
b627344f 6589 info->iter.trace = tr->current_trace;
12883efb 6590 info->iter.trace_buffer = &tr->trace_buffer;
cc60cdc9 6591 info->spare = NULL;
2cadf913 6592 /* Force reading ring buffer for first read */
cc60cdc9 6593 info->read = (unsigned int)-1;
2cadf913
SR
6594
6595 filp->private_data = info;
6596
cf6ab6d9
SRRH
6597 tr->current_trace->ref++;
6598
a695cb58
SRRH
6599 mutex_unlock(&trace_types_lock);
6600
7b85af63
SRRH
6601 ret = nonseekable_open(inode, filp);
6602 if (ret < 0)
6603 trace_array_put(tr);
6604
6605 return ret;
2cadf913
SR
6606}
6607
9dd95748 6608static __poll_t
cc60cdc9
SR
6609tracing_buffers_poll(struct file *filp, poll_table *poll_table)
6610{
6611 struct ftrace_buffer_info *info = filp->private_data;
6612 struct trace_iterator *iter = &info->iter;
6613
6614 return trace_poll(iter, filp, poll_table);
6615}
6616
2cadf913
SR
6617static ssize_t
6618tracing_buffers_read(struct file *filp, char __user *ubuf,
6619 size_t count, loff_t *ppos)
6620{
6621 struct ftrace_buffer_info *info = filp->private_data;
cc60cdc9 6622 struct trace_iterator *iter = &info->iter;
a7e52ad7 6623 ssize_t ret = 0;
6de58e62 6624 ssize_t size;
2cadf913 6625
2dc5d12b
SR
6626 if (!count)
6627 return 0;
6628
6de58e62 6629#ifdef CONFIG_TRACER_MAX_TRACE
d716ff71
SRRH
6630 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
6631 return -EBUSY;
6de58e62
SRRH
6632#endif
6633
73a757e6 6634 if (!info->spare) {
12883efb
SRRH
6635 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
6636 iter->cpu_file);
a7e52ad7
SRV
6637 if (IS_ERR(info->spare)) {
6638 ret = PTR_ERR(info->spare);
6639 info->spare = NULL;
6640 } else {
6641 info->spare_cpu = iter->cpu_file;
6642 }
73a757e6 6643 }
ddd538f3 6644 if (!info->spare)
a7e52ad7 6645 return ret;
ddd538f3 6646
2cadf913
SR
6647 /* Do we have previous read data to read? */
6648 if (info->read < PAGE_SIZE)
6649 goto read;
6650
b627344f 6651 again:
cc60cdc9 6652 trace_access_lock(iter->cpu_file);
12883efb 6653 ret = ring_buffer_read_page(iter->trace_buffer->buffer,
2cadf913
SR
6654 &info->spare,
6655 count,
cc60cdc9
SR
6656 iter->cpu_file, 0);
6657 trace_access_unlock(iter->cpu_file);
2cadf913 6658
b627344f
SR
6659 if (ret < 0) {
6660 if (trace_empty(iter)) {
d716ff71
SRRH
6661 if ((filp->f_flags & O_NONBLOCK))
6662 return -EAGAIN;
6663
e30f53aa 6664 ret = wait_on_pipe(iter, false);
d716ff71
SRRH
6665 if (ret)
6666 return ret;
6667
b627344f
SR
6668 goto again;
6669 }
d716ff71 6670 return 0;
b627344f 6671 }
436fc280 6672
436fc280 6673 info->read = 0;
b627344f 6674 read:
2cadf913
SR
6675 size = PAGE_SIZE - info->read;
6676 if (size > count)
6677 size = count;
6678
6679 ret = copy_to_user(ubuf, info->spare + info->read, size);
d716ff71
SRRH
6680 if (ret == size)
6681 return -EFAULT;
6682
2dc5d12b
SR
6683 size -= ret;
6684
2cadf913
SR
6685 *ppos += size;
6686 info->read += size;
6687
6688 return size;
6689}
6690
6691static int tracing_buffers_release(struct inode *inode, struct file *file)
6692{
6693 struct ftrace_buffer_info *info = file->private_data;
cc60cdc9 6694 struct trace_iterator *iter = &info->iter;
2cadf913 6695
a695cb58
SRRH
6696 mutex_lock(&trace_types_lock);
6697
cf6ab6d9
SRRH
6698 iter->tr->current_trace->ref--;
6699
ff451961 6700 __trace_array_put(iter->tr);
2cadf913 6701
ddd538f3 6702 if (info->spare)
73a757e6
SRV
6703 ring_buffer_free_read_page(iter->trace_buffer->buffer,
6704 info->spare_cpu, info->spare);
2cadf913
SR
6705 kfree(info);
6706
a695cb58
SRRH
6707 mutex_unlock(&trace_types_lock);
6708
2cadf913
SR
6709 return 0;
6710}
6711
6712struct buffer_ref {
6713 struct ring_buffer *buffer;
6714 void *page;
73a757e6 6715 int cpu;
2cadf913
SR
6716 int ref;
6717};
6718
6719static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
6720 struct pipe_buffer *buf)
6721{
6722 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
6723
6724 if (--ref->ref)
6725 return;
6726
73a757e6 6727 ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
2cadf913
SR
6728 kfree(ref);
6729 buf->private = 0;
6730}
6731
2cadf913
SR
6732static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
6733 struct pipe_buffer *buf)
6734{
6735 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
6736
6737 ref->ref++;
6738}
6739
6740/* Pipe buffer operations for a buffer. */
28dfef8f 6741static const struct pipe_buf_operations buffer_pipe_buf_ops = {
2cadf913 6742 .can_merge = 0,
2cadf913
SR
6743 .confirm = generic_pipe_buf_confirm,
6744 .release = buffer_pipe_buf_release,
d55cb6cf 6745 .steal = generic_pipe_buf_steal,
2cadf913
SR
6746 .get = buffer_pipe_buf_get,
6747};
6748
6749/*
6750 * Callback from splice_to_pipe(), if we need to release some pages
6751 * at the end of the spd in case we error'ed out in filling the pipe.
6752 */
6753static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
6754{
6755 struct buffer_ref *ref =
6756 (struct buffer_ref *)spd->partial[i].private;
6757
6758 if (--ref->ref)
6759 return;
6760
73a757e6 6761 ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
2cadf913
SR
6762 kfree(ref);
6763 spd->partial[i].private = 0;
6764}
6765
6766static ssize_t
6767tracing_buffers_splice_read(struct file *file, loff_t *ppos,
6768 struct pipe_inode_info *pipe, size_t len,
6769 unsigned int flags)
6770{
6771 struct ftrace_buffer_info *info = file->private_data;
cc60cdc9 6772 struct trace_iterator *iter = &info->iter;
35f3d14d
JA
6773 struct partial_page partial_def[PIPE_DEF_BUFFERS];
6774 struct page *pages_def[PIPE_DEF_BUFFERS];
2cadf913 6775 struct splice_pipe_desc spd = {
35f3d14d
JA
6776 .pages = pages_def,
6777 .partial = partial_def,
047fe360 6778 .nr_pages_max = PIPE_DEF_BUFFERS,
2cadf913
SR
6779 .ops = &buffer_pipe_buf_ops,
6780 .spd_release = buffer_spd_release,
6781 };
6782 struct buffer_ref *ref;
93459c6c 6783 int entries, size, i;
07906da7 6784 ssize_t ret = 0;
2cadf913 6785
6de58e62 6786#ifdef CONFIG_TRACER_MAX_TRACE
d716ff71
SRRH
6787 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
6788 return -EBUSY;
6de58e62
SRRH
6789#endif
6790
d716ff71
SRRH
6791 if (*ppos & (PAGE_SIZE - 1))
6792 return -EINVAL;
93cfb3c9
LJ
6793
6794 if (len & (PAGE_SIZE - 1)) {
d716ff71
SRRH
6795 if (len < PAGE_SIZE)
6796 return -EINVAL;
93cfb3c9
LJ
6797 len &= PAGE_MASK;
6798 }
6799
1ae2293d
AV
6800 if (splice_grow_spd(pipe, &spd))
6801 return -ENOMEM;
6802
cc60cdc9
SR
6803 again:
6804 trace_access_lock(iter->cpu_file);
12883efb 6805 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
93459c6c 6806
a786c06d 6807 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
2cadf913
SR
6808 struct page *page;
6809 int r;
6810
6811 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
07906da7
RV
6812 if (!ref) {
6813 ret = -ENOMEM;
2cadf913 6814 break;
07906da7 6815 }
2cadf913 6816
7267fa68 6817 ref->ref = 1;
12883efb 6818 ref->buffer = iter->trace_buffer->buffer;
cc60cdc9 6819 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
a7e52ad7
SRV
6820 if (IS_ERR(ref->page)) {
6821 ret = PTR_ERR(ref->page);
6822 ref->page = NULL;
2cadf913
SR
6823 kfree(ref);
6824 break;
6825 }
73a757e6 6826 ref->cpu = iter->cpu_file;
2cadf913
SR
6827
6828 r = ring_buffer_read_page(ref->buffer, &ref->page,
cc60cdc9 6829 len, iter->cpu_file, 1);
2cadf913 6830 if (r < 0) {
73a757e6
SRV
6831 ring_buffer_free_read_page(ref->buffer, ref->cpu,
6832 ref->page);
2cadf913
SR
6833 kfree(ref);
6834 break;
6835 }
6836
6837 /*
6838 * zero out any left over data, this is going to
6839 * user land.
6840 */
6841 size = ring_buffer_page_len(ref->page);
6842 if (size < PAGE_SIZE)
6843 memset(ref->page + size, 0, PAGE_SIZE - size);
6844
6845 page = virt_to_page(ref->page);
6846
6847 spd.pages[i] = page;
6848 spd.partial[i].len = PAGE_SIZE;
6849 spd.partial[i].offset = 0;
6850 spd.partial[i].private = (unsigned long)ref;
6851 spd.nr_pages++;
93cfb3c9 6852 *ppos += PAGE_SIZE;
93459c6c 6853
12883efb 6854 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
2cadf913
SR
6855 }
6856
cc60cdc9 6857 trace_access_unlock(iter->cpu_file);
2cadf913
SR
6858 spd.nr_pages = i;
6859
6860 /* did we read anything? */
6861 if (!spd.nr_pages) {
07906da7 6862 if (ret)
1ae2293d 6863 goto out;
d716ff71 6864
1ae2293d 6865 ret = -EAGAIN;
d716ff71 6866 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
1ae2293d 6867 goto out;
07906da7 6868
e30f53aa 6869 ret = wait_on_pipe(iter, true);
8b8b3683 6870 if (ret)
1ae2293d 6871 goto out;
e30f53aa 6872
cc60cdc9 6873 goto again;
2cadf913
SR
6874 }
6875
6876 ret = splice_to_pipe(pipe, &spd);
1ae2293d 6877out:
047fe360 6878 splice_shrink_spd(&spd);
6de58e62 6879
2cadf913
SR
6880 return ret;
6881}
6882
6883static const struct file_operations tracing_buffers_fops = {
6884 .open = tracing_buffers_open,
6885 .read = tracing_buffers_read,
cc60cdc9 6886 .poll = tracing_buffers_poll,
2cadf913
SR
6887 .release = tracing_buffers_release,
6888 .splice_read = tracing_buffers_splice_read,
6889 .llseek = no_llseek,
6890};
6891
c8d77183
SR
6892static ssize_t
6893tracing_stats_read(struct file *filp, char __user *ubuf,
6894 size_t count, loff_t *ppos)
6895{
4d3435b8
ON
6896 struct inode *inode = file_inode(filp);
6897 struct trace_array *tr = inode->i_private;
12883efb 6898 struct trace_buffer *trace_buf = &tr->trace_buffer;
4d3435b8 6899 int cpu = tracing_get_cpu(inode);
c8d77183
SR
6900 struct trace_seq *s;
6901 unsigned long cnt;
c64e148a
VN
6902 unsigned long long t;
6903 unsigned long usec_rem;
c8d77183 6904
e4f2d10f 6905 s = kmalloc(sizeof(*s), GFP_KERNEL);
c8d77183 6906 if (!s)
a646365c 6907 return -ENOMEM;
c8d77183
SR
6908
6909 trace_seq_init(s);
6910
12883efb 6911 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
c8d77183
SR
6912 trace_seq_printf(s, "entries: %ld\n", cnt);
6913
12883efb 6914 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
c8d77183
SR
6915 trace_seq_printf(s, "overrun: %ld\n", cnt);
6916
12883efb 6917 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
c8d77183
SR
6918 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
6919
12883efb 6920 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
c64e148a
VN
6921 trace_seq_printf(s, "bytes: %ld\n", cnt);
6922
58e8eedf 6923 if (trace_clocks[tr->clock_id].in_ns) {
11043d8b 6924 /* local or global for trace_clock */
12883efb 6925 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
11043d8b
YY
6926 usec_rem = do_div(t, USEC_PER_SEC);
6927 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
6928 t, usec_rem);
6929
12883efb 6930 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
11043d8b
YY
6931 usec_rem = do_div(t, USEC_PER_SEC);
6932 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
6933 } else {
6934 /* counter or tsc mode for trace_clock */
6935 trace_seq_printf(s, "oldest event ts: %llu\n",
12883efb 6936 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
c64e148a 6937
11043d8b 6938 trace_seq_printf(s, "now ts: %llu\n",
12883efb 6939 ring_buffer_time_stamp(trace_buf->buffer, cpu));
11043d8b 6940 }
c64e148a 6941
12883efb 6942 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
884bfe89
SP
6943 trace_seq_printf(s, "dropped events: %ld\n", cnt);
6944
12883efb 6945 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
ad964704
SRRH
6946 trace_seq_printf(s, "read events: %ld\n", cnt);
6947
5ac48378
SRRH
6948 count = simple_read_from_buffer(ubuf, count, ppos,
6949 s->buffer, trace_seq_used(s));
c8d77183
SR
6950
6951 kfree(s);
6952
6953 return count;
6954}
6955
6956static const struct file_operations tracing_stats_fops = {
4d3435b8 6957 .open = tracing_open_generic_tr,
c8d77183 6958 .read = tracing_stats_read,
b444786f 6959 .llseek = generic_file_llseek,
4d3435b8 6960 .release = tracing_release_generic_tr,
c8d77183
SR
6961};
6962
bc0c38d1
SR
6963#ifdef CONFIG_DYNAMIC_FTRACE
6964
6965static ssize_t
b807c3d0 6966tracing_read_dyn_info(struct file *filp, char __user *ubuf,
bc0c38d1
SR
6967 size_t cnt, loff_t *ppos)
6968{
6969 unsigned long *p = filp->private_data;
6a9c981b 6970 char buf[64]; /* Not too big for a shallow stack */
bc0c38d1
SR
6971 int r;
6972
6a9c981b 6973 r = scnprintf(buf, 63, "%ld", *p);
b807c3d0
SR
6974 buf[r++] = '\n';
6975
6a9c981b 6976 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
bc0c38d1
SR
6977}
6978
5e2336a0 6979static const struct file_operations tracing_dyn_info_fops = {
4bf39a94 6980 .open = tracing_open_generic,
b807c3d0 6981 .read = tracing_read_dyn_info,
b444786f 6982 .llseek = generic_file_llseek,
bc0c38d1 6983};
77fd5c15 6984#endif /* CONFIG_DYNAMIC_FTRACE */
bc0c38d1 6985
77fd5c15
SRRH
6986#if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
6987static void
bca6c8d0 6988ftrace_snapshot(unsigned long ip, unsigned long parent_ip,
b5f081b5 6989 struct trace_array *tr, struct ftrace_probe_ops *ops,
6e444319 6990 void *data)
77fd5c15 6991{
cab50379 6992 tracing_snapshot_instance(tr);
77fd5c15 6993}
bc0c38d1 6994
77fd5c15 6995static void
bca6c8d0 6996ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip,
b5f081b5 6997 struct trace_array *tr, struct ftrace_probe_ops *ops,
6e444319 6998 void *data)
bc0c38d1 6999{
6e444319 7000 struct ftrace_func_mapper *mapper = data;
1a93f8bd 7001 long *count = NULL;
77fd5c15 7002
1a93f8bd
SRV
7003 if (mapper)
7004 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
7005
7006 if (count) {
7007
7008 if (*count <= 0)
7009 return;
bc0c38d1 7010
77fd5c15 7011 (*count)--;
1a93f8bd 7012 }
77fd5c15 7013
cab50379 7014 tracing_snapshot_instance(tr);
77fd5c15
SRRH
7015}
7016
7017static int
7018ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
7019 struct ftrace_probe_ops *ops, void *data)
7020{
6e444319 7021 struct ftrace_func_mapper *mapper = data;
1a93f8bd 7022 long *count = NULL;
77fd5c15
SRRH
7023
7024 seq_printf(m, "%ps:", (void *)ip);
7025
fa6f0cc7 7026 seq_puts(m, "snapshot");
77fd5c15 7027
1a93f8bd
SRV
7028 if (mapper)
7029 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
7030
7031 if (count)
7032 seq_printf(m, ":count=%ld\n", *count);
77fd5c15 7033 else
1a93f8bd 7034 seq_puts(m, ":unlimited\n");
77fd5c15
SRRH
7035
7036 return 0;
7037}
7038
1a93f8bd 7039static int
b5f081b5 7040ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
6e444319 7041 unsigned long ip, void *init_data, void **data)
1a93f8bd 7042{
6e444319
SRV
7043 struct ftrace_func_mapper *mapper = *data;
7044
7045 if (!mapper) {
7046 mapper = allocate_ftrace_func_mapper();
7047 if (!mapper)
7048 return -ENOMEM;
7049 *data = mapper;
7050 }
1a93f8bd 7051
6e444319 7052 return ftrace_func_mapper_add_ip(mapper, ip, init_data);
1a93f8bd
SRV
7053}
7054
7055static void
b5f081b5 7056ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
6e444319 7057 unsigned long ip, void *data)
1a93f8bd 7058{
6e444319
SRV
7059 struct ftrace_func_mapper *mapper = data;
7060
7061 if (!ip) {
7062 if (!mapper)
7063 return;
7064 free_ftrace_func_mapper(mapper, NULL);
7065 return;
7066 }
1a93f8bd
SRV
7067
7068 ftrace_func_mapper_remove_ip(mapper, ip);
7069}
7070
77fd5c15
SRRH
7071static struct ftrace_probe_ops snapshot_probe_ops = {
7072 .func = ftrace_snapshot,
7073 .print = ftrace_snapshot_print,
7074};
7075
7076static struct ftrace_probe_ops snapshot_count_probe_ops = {
7077 .func = ftrace_count_snapshot,
7078 .print = ftrace_snapshot_print,
1a93f8bd
SRV
7079 .init = ftrace_snapshot_init,
7080 .free = ftrace_snapshot_free,
77fd5c15
SRRH
7081};
7082
7083static int
04ec7bb6 7084ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
77fd5c15
SRRH
7085 char *glob, char *cmd, char *param, int enable)
7086{
7087 struct ftrace_probe_ops *ops;
7088 void *count = (void *)-1;
7089 char *number;
7090 int ret;
7091
0f179765
SRV
7092 if (!tr)
7093 return -ENODEV;
7094
77fd5c15
SRRH
7095 /* hash funcs only work with set_ftrace_filter */
7096 if (!enable)
7097 return -EINVAL;
7098
7099 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
7100
d3d532d7 7101 if (glob[0] == '!')
7b60f3d8 7102 return unregister_ftrace_function_probe_func(glob+1, tr, ops);
77fd5c15
SRRH
7103
7104 if (!param)
7105 goto out_reg;
7106
7107 number = strsep(&param, ":");
7108
7109 if (!strlen(number))
7110 goto out_reg;
7111
7112 /*
7113 * We use the callback data field (which is a pointer)
7114 * as our counter.
7115 */
7116 ret = kstrtoul(number, 0, (unsigned long *)&count);
7117 if (ret)
7118 return ret;
7119
7120 out_reg:
4c174688 7121 ret = alloc_snapshot(tr);
df62db5b
SRV
7122 if (ret < 0)
7123 goto out;
77fd5c15 7124
4c174688 7125 ret = register_ftrace_function_probe(glob, tr, ops, count);
77fd5c15 7126
df62db5b 7127 out:
77fd5c15
SRRH
7128 return ret < 0 ? ret : 0;
7129}
7130
7131static struct ftrace_func_command ftrace_snapshot_cmd = {
7132 .name = "snapshot",
7133 .func = ftrace_trace_snapshot_callback,
7134};
7135
38de93ab 7136static __init int register_snapshot_cmd(void)
77fd5c15
SRRH
7137{
7138 return register_ftrace_command(&ftrace_snapshot_cmd);
7139}
7140#else
38de93ab 7141static inline __init int register_snapshot_cmd(void) { return 0; }
77fd5c15 7142#endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
bc0c38d1 7143
7eeafbca 7144static struct dentry *tracing_get_dentry(struct trace_array *tr)
bc0c38d1 7145{
8434dc93
SRRH
7146 if (WARN_ON(!tr->dir))
7147 return ERR_PTR(-ENODEV);
7148
7149 /* Top directory uses NULL as the parent */
7150 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
7151 return NULL;
7152
7153 /* All sub buffers have a descriptor */
2b6080f2 7154 return tr->dir;
bc0c38d1
SR
7155}
7156
2b6080f2 7157static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
b04cc6b1 7158{
b04cc6b1
FW
7159 struct dentry *d_tracer;
7160
2b6080f2
SR
7161 if (tr->percpu_dir)
7162 return tr->percpu_dir;
b04cc6b1 7163
7eeafbca 7164 d_tracer = tracing_get_dentry(tr);
14a5ae40 7165 if (IS_ERR(d_tracer))
b04cc6b1
FW
7166 return NULL;
7167
8434dc93 7168 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
b04cc6b1 7169
2b6080f2 7170 WARN_ONCE(!tr->percpu_dir,
8434dc93 7171 "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
b04cc6b1 7172
2b6080f2 7173 return tr->percpu_dir;
b04cc6b1
FW
7174}
7175
649e9c70
ON
7176static struct dentry *
7177trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
7178 void *data, long cpu, const struct file_operations *fops)
7179{
7180 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
7181
7182 if (ret) /* See tracing_get_cpu() */
7682c918 7183 d_inode(ret)->i_cdev = (void *)(cpu + 1);
649e9c70
ON
7184 return ret;
7185}
7186
2b6080f2 7187static void
8434dc93 7188tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
b04cc6b1 7189{
2b6080f2 7190 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
5452af66 7191 struct dentry *d_cpu;
dd49a38c 7192 char cpu_dir[30]; /* 30 characters should be more than enough */
b04cc6b1 7193
0a3d7ce7
NK
7194 if (!d_percpu)
7195 return;
7196
dd49a38c 7197 snprintf(cpu_dir, 30, "cpu%ld", cpu);
8434dc93 7198 d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
8656e7a2 7199 if (!d_cpu) {
a395d6a7 7200 pr_warn("Could not create tracefs '%s' entry\n", cpu_dir);
8656e7a2
FW
7201 return;
7202 }
b04cc6b1 7203
8656e7a2 7204 /* per cpu trace_pipe */
649e9c70 7205 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
15544209 7206 tr, cpu, &tracing_pipe_fops);
b04cc6b1
FW
7207
7208 /* per cpu trace */
649e9c70 7209 trace_create_cpu_file("trace", 0644, d_cpu,
6484c71c 7210 tr, cpu, &tracing_fops);
7f96f93f 7211
649e9c70 7212 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
46ef2be0 7213 tr, cpu, &tracing_buffers_fops);
7f96f93f 7214
649e9c70 7215 trace_create_cpu_file("stats", 0444, d_cpu,
4d3435b8 7216 tr, cpu, &tracing_stats_fops);
438ced17 7217
649e9c70 7218 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
0bc392ee 7219 tr, cpu, &tracing_entries_fops);
f1affcaa
SRRH
7220
7221#ifdef CONFIG_TRACER_SNAPSHOT
649e9c70 7222 trace_create_cpu_file("snapshot", 0644, d_cpu,
6484c71c 7223 tr, cpu, &snapshot_fops);
6de58e62 7224
649e9c70 7225 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
46ef2be0 7226 tr, cpu, &snapshot_raw_fops);
f1affcaa 7227#endif
b04cc6b1
FW
7228}
7229
60a11774
SR
7230#ifdef CONFIG_FTRACE_SELFTEST
7231/* Let selftest have access to static functions in this file */
7232#include "trace_selftest.c"
7233#endif
7234
577b785f
SR
7235static ssize_t
7236trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
7237 loff_t *ppos)
7238{
7239 struct trace_option_dentry *topt = filp->private_data;
7240 char *buf;
7241
7242 if (topt->flags->val & topt->opt->bit)
7243 buf = "1\n";
7244 else
7245 buf = "0\n";
7246
7247 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
7248}
7249
7250static ssize_t
7251trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
7252 loff_t *ppos)
7253{
7254 struct trace_option_dentry *topt = filp->private_data;
7255 unsigned long val;
577b785f
SR
7256 int ret;
7257
22fe9b54
PH
7258 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7259 if (ret)
577b785f
SR
7260 return ret;
7261
8d18eaaf
LZ
7262 if (val != 0 && val != 1)
7263 return -EINVAL;
577b785f 7264
8d18eaaf 7265 if (!!(topt->flags->val & topt->opt->bit) != val) {
577b785f 7266 mutex_lock(&trace_types_lock);
8c1a49ae 7267 ret = __set_tracer_option(topt->tr, topt->flags,
c757bea9 7268 topt->opt, !val);
577b785f
SR
7269 mutex_unlock(&trace_types_lock);
7270 if (ret)
7271 return ret;
577b785f
SR
7272 }
7273
7274 *ppos += cnt;
7275
7276 return cnt;
7277}
7278
7279
7280static const struct file_operations trace_options_fops = {
7281 .open = tracing_open_generic,
7282 .read = trace_options_read,
7283 .write = trace_options_write,
b444786f 7284 .llseek = generic_file_llseek,
577b785f
SR
7285};
7286
9a38a885
SRRH
7287/*
7288 * In order to pass in both the trace_array descriptor as well as the index
7289 * to the flag that the trace option file represents, the trace_array
7290 * has a character array of trace_flags_index[], which holds the index
7291 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
7292 * The address of this character array is passed to the flag option file
7293 * read/write callbacks.
7294 *
7295 * In order to extract both the index and the trace_array descriptor,
7296 * get_tr_index() uses the following algorithm.
7297 *
7298 * idx = *ptr;
7299 *
7300 * As the pointer itself contains the address of the index (remember
7301 * index[1] == 1).
7302 *
7303 * Then to get the trace_array descriptor, by subtracting that index
7304 * from the ptr, we get to the start of the index itself.
7305 *
7306 * ptr - idx == &index[0]
7307 *
7308 * Then a simple container_of() from that pointer gets us to the
7309 * trace_array descriptor.
7310 */
7311static void get_tr_index(void *data, struct trace_array **ptr,
7312 unsigned int *pindex)
7313{
7314 *pindex = *(unsigned char *)data;
7315
7316 *ptr = container_of(data - *pindex, struct trace_array,
7317 trace_flags_index);
7318}
7319
a8259075
SR
7320static ssize_t
7321trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
7322 loff_t *ppos)
7323{
9a38a885
SRRH
7324 void *tr_index = filp->private_data;
7325 struct trace_array *tr;
7326 unsigned int index;
a8259075
SR
7327 char *buf;
7328
9a38a885
SRRH
7329 get_tr_index(tr_index, &tr, &index);
7330
7331 if (tr->trace_flags & (1 << index))
a8259075
SR
7332 buf = "1\n";
7333 else
7334 buf = "0\n";
7335
7336 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
7337}
7338
7339static ssize_t
7340trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
7341 loff_t *ppos)
7342{
9a38a885
SRRH
7343 void *tr_index = filp->private_data;
7344 struct trace_array *tr;
7345 unsigned int index;
a8259075
SR
7346 unsigned long val;
7347 int ret;
7348
9a38a885
SRRH
7349 get_tr_index(tr_index, &tr, &index);
7350
22fe9b54
PH
7351 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7352 if (ret)
a8259075
SR
7353 return ret;
7354
f2d84b65 7355 if (val != 0 && val != 1)
a8259075 7356 return -EINVAL;
69d34da2
SRRH
7357
7358 mutex_lock(&trace_types_lock);
2b6080f2 7359 ret = set_tracer_flag(tr, 1 << index, val);
69d34da2 7360 mutex_unlock(&trace_types_lock);
a8259075 7361
613f04a0
SRRH
7362 if (ret < 0)
7363 return ret;
7364
a8259075
SR
7365 *ppos += cnt;
7366
7367 return cnt;
7368}
7369
a8259075
SR
7370static const struct file_operations trace_options_core_fops = {
7371 .open = tracing_open_generic,
7372 .read = trace_options_core_read,
7373 .write = trace_options_core_write,
b444786f 7374 .llseek = generic_file_llseek,
a8259075
SR
7375};
7376
5452af66 7377struct dentry *trace_create_file(const char *name,
f4ae40a6 7378 umode_t mode,
5452af66
FW
7379 struct dentry *parent,
7380 void *data,
7381 const struct file_operations *fops)
7382{
7383 struct dentry *ret;
7384
8434dc93 7385 ret = tracefs_create_file(name, mode, parent, data, fops);
5452af66 7386 if (!ret)
a395d6a7 7387 pr_warn("Could not create tracefs '%s' entry\n", name);
5452af66
FW
7388
7389 return ret;
7390}
7391
7392
2b6080f2 7393static struct dentry *trace_options_init_dentry(struct trace_array *tr)
a8259075
SR
7394{
7395 struct dentry *d_tracer;
a8259075 7396
2b6080f2
SR
7397 if (tr->options)
7398 return tr->options;
a8259075 7399
7eeafbca 7400 d_tracer = tracing_get_dentry(tr);
14a5ae40 7401 if (IS_ERR(d_tracer))
a8259075
SR
7402 return NULL;
7403
8434dc93 7404 tr->options = tracefs_create_dir("options", d_tracer);
2b6080f2 7405 if (!tr->options) {
a395d6a7 7406 pr_warn("Could not create tracefs directory 'options'\n");
a8259075
SR
7407 return NULL;
7408 }
7409
2b6080f2 7410 return tr->options;
a8259075
SR
7411}
7412
577b785f 7413static void
2b6080f2
SR
7414create_trace_option_file(struct trace_array *tr,
7415 struct trace_option_dentry *topt,
577b785f
SR
7416 struct tracer_flags *flags,
7417 struct tracer_opt *opt)
7418{
7419 struct dentry *t_options;
577b785f 7420
2b6080f2 7421 t_options = trace_options_init_dentry(tr);
577b785f
SR
7422 if (!t_options)
7423 return;
7424
7425 topt->flags = flags;
7426 topt->opt = opt;
2b6080f2 7427 topt->tr = tr;
577b785f 7428
5452af66 7429 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
577b785f
SR
7430 &trace_options_fops);
7431
577b785f
SR
7432}
7433
37aea98b 7434static void
2b6080f2 7435create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
577b785f
SR
7436{
7437 struct trace_option_dentry *topts;
37aea98b 7438 struct trace_options *tr_topts;
577b785f
SR
7439 struct tracer_flags *flags;
7440 struct tracer_opt *opts;
7441 int cnt;
37aea98b 7442 int i;
577b785f
SR
7443
7444 if (!tracer)
37aea98b 7445 return;
577b785f
SR
7446
7447 flags = tracer->flags;
7448
7449 if (!flags || !flags->opts)
37aea98b
SRRH
7450 return;
7451
7452 /*
7453 * If this is an instance, only create flags for tracers
7454 * the instance may have.
7455 */
7456 if (!trace_ok_for_array(tracer, tr))
7457 return;
7458
7459 for (i = 0; i < tr->nr_topts; i++) {
d39cdd20
CH
7460 /* Make sure there's no duplicate flags. */
7461 if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
37aea98b
SRRH
7462 return;
7463 }
577b785f
SR
7464
7465 opts = flags->opts;
7466
7467 for (cnt = 0; opts[cnt].name; cnt++)
7468 ;
7469
0cfe8245 7470 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
577b785f 7471 if (!topts)
37aea98b
SRRH
7472 return;
7473
7474 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
7475 GFP_KERNEL);
7476 if (!tr_topts) {
7477 kfree(topts);
7478 return;
7479 }
7480
7481 tr->topts = tr_topts;
7482 tr->topts[tr->nr_topts].tracer = tracer;
7483 tr->topts[tr->nr_topts].topts = topts;
7484 tr->nr_topts++;
577b785f 7485
41d9c0be 7486 for (cnt = 0; opts[cnt].name; cnt++) {
2b6080f2 7487 create_trace_option_file(tr, &topts[cnt], flags,
577b785f 7488 &opts[cnt]);
41d9c0be
SRRH
7489 WARN_ONCE(topts[cnt].entry == NULL,
7490 "Failed to create trace option: %s",
7491 opts[cnt].name);
7492 }
577b785f
SR
7493}
7494
a8259075 7495static struct dentry *
2b6080f2
SR
7496create_trace_option_core_file(struct trace_array *tr,
7497 const char *option, long index)
a8259075
SR
7498{
7499 struct dentry *t_options;
a8259075 7500
2b6080f2 7501 t_options = trace_options_init_dentry(tr);
a8259075
SR
7502 if (!t_options)
7503 return NULL;
7504
9a38a885
SRRH
7505 return trace_create_file(option, 0644, t_options,
7506 (void *)&tr->trace_flags_index[index],
7507 &trace_options_core_fops);
a8259075
SR
7508}
7509
16270145 7510static void create_trace_options_dir(struct trace_array *tr)
a8259075
SR
7511{
7512 struct dentry *t_options;
16270145 7513 bool top_level = tr == &global_trace;
a8259075
SR
7514 int i;
7515
2b6080f2 7516 t_options = trace_options_init_dentry(tr);
a8259075
SR
7517 if (!t_options)
7518 return;
7519
16270145
SRRH
7520 for (i = 0; trace_options[i]; i++) {
7521 if (top_level ||
7522 !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
7523 create_trace_option_core_file(tr, trace_options[i], i);
7524 }
a8259075
SR
7525}
7526
499e5470
SR
7527static ssize_t
7528rb_simple_read(struct file *filp, char __user *ubuf,
7529 size_t cnt, loff_t *ppos)
7530{
348f0fc2 7531 struct trace_array *tr = filp->private_data;
499e5470
SR
7532 char buf[64];
7533 int r;
7534
10246fa3 7535 r = tracer_tracing_is_on(tr);
499e5470
SR
7536 r = sprintf(buf, "%d\n", r);
7537
7538 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
7539}
7540
7541static ssize_t
7542rb_simple_write(struct file *filp, const char __user *ubuf,
7543 size_t cnt, loff_t *ppos)
7544{
348f0fc2 7545 struct trace_array *tr = filp->private_data;
12883efb 7546 struct ring_buffer *buffer = tr->trace_buffer.buffer;
499e5470
SR
7547 unsigned long val;
7548 int ret;
7549
7550 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7551 if (ret)
7552 return ret;
7553
7554 if (buffer) {
2df8f8a6
SR
7555 mutex_lock(&trace_types_lock);
7556 if (val) {
10246fa3 7557 tracer_tracing_on(tr);
2b6080f2
SR
7558 if (tr->current_trace->start)
7559 tr->current_trace->start(tr);
2df8f8a6 7560 } else {
10246fa3 7561 tracer_tracing_off(tr);
2b6080f2
SR
7562 if (tr->current_trace->stop)
7563 tr->current_trace->stop(tr);
2df8f8a6
SR
7564 }
7565 mutex_unlock(&trace_types_lock);
499e5470
SR
7566 }
7567
7568 (*ppos)++;
7569
7570 return cnt;
7571}
7572
7573static const struct file_operations rb_simple_fops = {
7b85af63 7574 .open = tracing_open_generic_tr,
499e5470
SR
7575 .read = rb_simple_read,
7576 .write = rb_simple_write,
7b85af63 7577 .release = tracing_release_generic_tr,
499e5470
SR
7578 .llseek = default_llseek,
7579};
7580
277ba044
SR
7581struct dentry *trace_instance_dir;
7582
7583static void
8434dc93 7584init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
277ba044 7585
55034cd6
SRRH
7586static int
7587allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
277ba044
SR
7588{
7589 enum ring_buffer_flags rb_flags;
737223fb 7590
983f938a 7591 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
737223fb 7592
dced341b
SRRH
7593 buf->tr = tr;
7594
55034cd6
SRRH
7595 buf->buffer = ring_buffer_alloc(size, rb_flags);
7596 if (!buf->buffer)
7597 return -ENOMEM;
737223fb 7598
55034cd6
SRRH
7599 buf->data = alloc_percpu(struct trace_array_cpu);
7600 if (!buf->data) {
7601 ring_buffer_free(buf->buffer);
7602 return -ENOMEM;
7603 }
737223fb 7604
737223fb
SRRH
7605 /* Allocate the first page for all buffers */
7606 set_buffer_entries(&tr->trace_buffer,
7607 ring_buffer_size(tr->trace_buffer.buffer, 0));
7608
55034cd6
SRRH
7609 return 0;
7610}
737223fb 7611
55034cd6
SRRH
7612static int allocate_trace_buffers(struct trace_array *tr, int size)
7613{
7614 int ret;
737223fb 7615
55034cd6
SRRH
7616 ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
7617 if (ret)
7618 return ret;
737223fb 7619
55034cd6
SRRH
7620#ifdef CONFIG_TRACER_MAX_TRACE
7621 ret = allocate_trace_buffer(tr, &tr->max_buffer,
7622 allocate_snapshot ? size : 1);
7623 if (WARN_ON(ret)) {
737223fb 7624 ring_buffer_free(tr->trace_buffer.buffer);
55034cd6
SRRH
7625 free_percpu(tr->trace_buffer.data);
7626 return -ENOMEM;
7627 }
7628 tr->allocated_snapshot = allocate_snapshot;
737223fb 7629
55034cd6
SRRH
7630 /*
7631 * Only the top level trace array gets its snapshot allocated
7632 * from the kernel command line.
7633 */
7634 allocate_snapshot = false;
737223fb 7635#endif
55034cd6 7636 return 0;
737223fb
SRRH
7637}
7638
f0b70cc4
SRRH
7639static void free_trace_buffer(struct trace_buffer *buf)
7640{
7641 if (buf->buffer) {
7642 ring_buffer_free(buf->buffer);
7643 buf->buffer = NULL;
7644 free_percpu(buf->data);
7645 buf->data = NULL;
7646 }
7647}
7648
23aaa3c1
SRRH
7649static void free_trace_buffers(struct trace_array *tr)
7650{
7651 if (!tr)
7652 return;
7653
f0b70cc4 7654 free_trace_buffer(&tr->trace_buffer);
23aaa3c1
SRRH
7655
7656#ifdef CONFIG_TRACER_MAX_TRACE
f0b70cc4 7657 free_trace_buffer(&tr->max_buffer);
23aaa3c1
SRRH
7658#endif
7659}
7660
9a38a885
SRRH
7661static void init_trace_flags_index(struct trace_array *tr)
7662{
7663 int i;
7664
7665 /* Used by the trace options files */
7666 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
7667 tr->trace_flags_index[i] = i;
7668}
7669
37aea98b
SRRH
7670static void __update_tracer_options(struct trace_array *tr)
7671{
7672 struct tracer *t;
7673
7674 for (t = trace_types; t; t = t->next)
7675 add_tracer_options(tr, t);
7676}
7677
7678static void update_tracer_options(struct trace_array *tr)
7679{
7680 mutex_lock(&trace_types_lock);
7681 __update_tracer_options(tr);
7682 mutex_unlock(&trace_types_lock);
7683}
7684
eae47358 7685static int instance_mkdir(const char *name)
737223fb 7686{
277ba044
SR
7687 struct trace_array *tr;
7688 int ret;
277ba044 7689
12ecef0c 7690 mutex_lock(&event_mutex);
277ba044
SR
7691 mutex_lock(&trace_types_lock);
7692
7693 ret = -EEXIST;
7694 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
7695 if (tr->name && strcmp(tr->name, name) == 0)
7696 goto out_unlock;
7697 }
7698
7699 ret = -ENOMEM;
7700 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
7701 if (!tr)
7702 goto out_unlock;
7703
7704 tr->name = kstrdup(name, GFP_KERNEL);
7705 if (!tr->name)
7706 goto out_free_tr;
7707
ccfe9e42
AL
7708 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
7709 goto out_free_tr;
7710
20550622 7711 tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
983f938a 7712
ccfe9e42
AL
7713 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
7714
277ba044
SR
7715 raw_spin_lock_init(&tr->start_lock);
7716
0b9b12c1
SRRH
7717 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
7718
277ba044
SR
7719 tr->current_trace = &nop_trace;
7720
7721 INIT_LIST_HEAD(&tr->systems);
7722 INIT_LIST_HEAD(&tr->events);
7723
737223fb 7724 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
277ba044
SR
7725 goto out_free_tr;
7726
8434dc93 7727 tr->dir = tracefs_create_dir(name, trace_instance_dir);
277ba044
SR
7728 if (!tr->dir)
7729 goto out_free_tr;
7730
7731 ret = event_trace_add_tracer(tr->dir, tr);
609e85a7 7732 if (ret) {
8434dc93 7733 tracefs_remove_recursive(tr->dir);
277ba044 7734 goto out_free_tr;
609e85a7 7735 }
277ba044 7736
04ec7bb6
SRV
7737 ftrace_init_trace_array(tr);
7738
8434dc93 7739 init_tracer_tracefs(tr, tr->dir);
9a38a885 7740 init_trace_flags_index(tr);
37aea98b 7741 __update_tracer_options(tr);
277ba044
SR
7742
7743 list_add(&tr->list, &ftrace_trace_arrays);
7744
7745 mutex_unlock(&trace_types_lock);
12ecef0c 7746 mutex_unlock(&event_mutex);
277ba044
SR
7747
7748 return 0;
7749
7750 out_free_tr:
23aaa3c1 7751 free_trace_buffers(tr);
ccfe9e42 7752 free_cpumask_var(tr->tracing_cpumask);
277ba044
SR
7753 kfree(tr->name);
7754 kfree(tr);
7755
7756 out_unlock:
7757 mutex_unlock(&trace_types_lock);
12ecef0c 7758 mutex_unlock(&event_mutex);
277ba044
SR
7759
7760 return ret;
7761
7762}
7763
eae47358 7764static int instance_rmdir(const char *name)
0c8916c3
SR
7765{
7766 struct trace_array *tr;
7767 int found = 0;
7768 int ret;
37aea98b 7769 int i;
0c8916c3 7770
12ecef0c 7771 mutex_lock(&event_mutex);
0c8916c3
SR
7772 mutex_lock(&trace_types_lock);
7773
7774 ret = -ENODEV;
7775 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
7776 if (tr->name && strcmp(tr->name, name) == 0) {
7777 found = 1;
7778 break;
7779 }
7780 }
7781 if (!found)
7782 goto out_unlock;
7783
a695cb58 7784 ret = -EBUSY;
cf6ab6d9 7785 if (tr->ref || (tr->current_trace && tr->current_trace->ref))
a695cb58
SRRH
7786 goto out_unlock;
7787
0c8916c3
SR
7788 list_del(&tr->list);
7789
20550622
SRRH
7790 /* Disable all the flags that were enabled coming in */
7791 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) {
7792 if ((1 << i) & ZEROED_TRACE_FLAGS)
7793 set_tracer_flag(tr, 1 << i, 0);
7794 }
7795
6b450d25 7796 tracing_set_nop(tr);
a0e6369e 7797 clear_ftrace_function_probes(tr);
0c8916c3 7798 event_trace_del_tracer(tr);
d879d0b8 7799 ftrace_clear_pids(tr);
591dffda 7800 ftrace_destroy_function_files(tr);
681a4a2f 7801 tracefs_remove_recursive(tr->dir);
a9fcaaac 7802 free_trace_buffers(tr);
0c8916c3 7803
37aea98b
SRRH
7804 for (i = 0; i < tr->nr_topts; i++) {
7805 kfree(tr->topts[i].topts);
7806 }
7807 kfree(tr->topts);
7808
db9108e0 7809 free_cpumask_var(tr->tracing_cpumask);
0c8916c3
SR
7810 kfree(tr->name);
7811 kfree(tr);
7812
7813 ret = 0;
7814
7815 out_unlock:
7816 mutex_unlock(&trace_types_lock);
12ecef0c 7817 mutex_unlock(&event_mutex);
0c8916c3
SR
7818
7819 return ret;
7820}
7821
277ba044
SR
7822static __init void create_trace_instances(struct dentry *d_tracer)
7823{
eae47358
SRRH
7824 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
7825 instance_mkdir,
7826 instance_rmdir);
277ba044
SR
7827 if (WARN_ON(!trace_instance_dir))
7828 return;
277ba044
SR
7829}
7830
2b6080f2 7831static void
8434dc93 7832init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
2b6080f2 7833{
121aaee7 7834 int cpu;
2b6080f2 7835
607e2ea1
SRRH
7836 trace_create_file("available_tracers", 0444, d_tracer,
7837 tr, &show_traces_fops);
7838
7839 trace_create_file("current_tracer", 0644, d_tracer,
7840 tr, &set_tracer_fops);
7841
ccfe9e42
AL
7842 trace_create_file("tracing_cpumask", 0644, d_tracer,
7843 tr, &tracing_cpumask_fops);
7844
2b6080f2
SR
7845 trace_create_file("trace_options", 0644, d_tracer,
7846 tr, &tracing_iter_fops);
7847
7848 trace_create_file("trace", 0644, d_tracer,
6484c71c 7849 tr, &tracing_fops);
2b6080f2
SR
7850
7851 trace_create_file("trace_pipe", 0444, d_tracer,
15544209 7852 tr, &tracing_pipe_fops);
2b6080f2
SR
7853
7854 trace_create_file("buffer_size_kb", 0644, d_tracer,
0bc392ee 7855 tr, &tracing_entries_fops);
2b6080f2
SR
7856
7857 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
7858 tr, &tracing_total_entries_fops);
7859
238ae93d 7860 trace_create_file("free_buffer", 0200, d_tracer,
2b6080f2
SR
7861 tr, &tracing_free_buffer_fops);
7862
7863 trace_create_file("trace_marker", 0220, d_tracer,
7864 tr, &tracing_mark_fops);
7865
fa32e855
SR
7866 trace_create_file("trace_marker_raw", 0220, d_tracer,
7867 tr, &tracing_mark_raw_fops);
7868
2b6080f2
SR
7869 trace_create_file("trace_clock", 0644, d_tracer, tr,
7870 &trace_clock_fops);
7871
7872 trace_create_file("tracing_on", 0644, d_tracer,
6484c71c 7873 tr, &rb_simple_fops);
ce9bae55 7874
16270145
SRRH
7875 create_trace_options_dir(tr);
7876
f971cc9a 7877#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
6d9b3fa5
SRRH
7878 trace_create_file("tracing_max_latency", 0644, d_tracer,
7879 &tr->max_latency, &tracing_max_lat_fops);
7880#endif
7881
591dffda
SRRH
7882 if (ftrace_create_function_files(tr, d_tracer))
7883 WARN(1, "Could not allocate function filter files");
7884
ce9bae55
SRRH
7885#ifdef CONFIG_TRACER_SNAPSHOT
7886 trace_create_file("snapshot", 0644, d_tracer,
6484c71c 7887 tr, &snapshot_fops);
ce9bae55 7888#endif
121aaee7
SRRH
7889
7890 for_each_tracing_cpu(cpu)
8434dc93 7891 tracing_init_tracefs_percpu(tr, cpu);
121aaee7 7892
345ddcc8 7893 ftrace_init_tracefs(tr, d_tracer);
2b6080f2
SR
7894}
7895
93faccbb 7896static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore)
f76180bc
SRRH
7897{
7898 struct vfsmount *mnt;
7899 struct file_system_type *type;
7900
7901 /*
7902 * To maintain backward compatibility for tools that mount
7903 * debugfs to get to the tracing facility, tracefs is automatically
7904 * mounted to the debugfs/tracing directory.
7905 */
7906 type = get_fs_type("tracefs");
7907 if (!type)
7908 return NULL;
93faccbb 7909 mnt = vfs_submount(mntpt, type, "tracefs", NULL);
f76180bc
SRRH
7910 put_filesystem(type);
7911 if (IS_ERR(mnt))
7912 return NULL;
7913 mntget(mnt);
7914
7915 return mnt;
7916}
7917
7eeafbca
SRRH
7918/**
7919 * tracing_init_dentry - initialize top level trace array
7920 *
7921 * This is called when creating files or directories in the tracing
7922 * directory. It is called via fs_initcall() by any of the boot up code
7923 * and expects to return the dentry of the top level tracing directory.
7924 */
7925struct dentry *tracing_init_dentry(void)
7926{
7927 struct trace_array *tr = &global_trace;
7928
f76180bc 7929 /* The top level trace array uses NULL as parent */
7eeafbca 7930 if (tr->dir)
f76180bc 7931 return NULL;
7eeafbca 7932
8b129199
JW
7933 if (WARN_ON(!tracefs_initialized()) ||
7934 (IS_ENABLED(CONFIG_DEBUG_FS) &&
7935 WARN_ON(!debugfs_initialized())))
7eeafbca
SRRH
7936 return ERR_PTR(-ENODEV);
7937
f76180bc
SRRH
7938 /*
7939 * As there may still be users that expect the tracing
7940 * files to exist in debugfs/tracing, we must automount
7941 * the tracefs file system there, so older tools still
7942 * work with the newer kerenl.
7943 */
7944 tr->dir = debugfs_create_automount("tracing", NULL,
7945 trace_automount, NULL);
7eeafbca
SRRH
7946 if (!tr->dir) {
7947 pr_warn_once("Could not create debugfs directory 'tracing'\n");
7948 return ERR_PTR(-ENOMEM);
7949 }
7950
8434dc93 7951 return NULL;
7eeafbca
SRRH
7952}
7953
00f4b652
JL
7954extern struct trace_eval_map *__start_ftrace_eval_maps[];
7955extern struct trace_eval_map *__stop_ftrace_eval_maps[];
0c564a53 7956
5f60b351 7957static void __init trace_eval_init(void)
0c564a53 7958{
3673b8e4
SRRH
7959 int len;
7960
02fd7f68 7961 len = __stop_ftrace_eval_maps - __start_ftrace_eval_maps;
f57a4143 7962 trace_insert_eval_map(NULL, __start_ftrace_eval_maps, len);
3673b8e4
SRRH
7963}
7964
7965#ifdef CONFIG_MODULES
f57a4143 7966static void trace_module_add_evals(struct module *mod)
3673b8e4 7967{
99be647c 7968 if (!mod->num_trace_evals)
3673b8e4
SRRH
7969 return;
7970
7971 /*
7972 * Modules with bad taint do not have events created, do
7973 * not bother with enums either.
7974 */
7975 if (trace_module_has_bad_taint(mod))
7976 return;
7977
f57a4143 7978 trace_insert_eval_map(mod, mod->trace_evals, mod->num_trace_evals);
3673b8e4
SRRH
7979}
7980
681bec03 7981#ifdef CONFIG_TRACE_EVAL_MAP_FILE
f57a4143 7982static void trace_module_remove_evals(struct module *mod)
9828413d 7983{
23bf8cb8
JL
7984 union trace_eval_map_item *map;
7985 union trace_eval_map_item **last = &trace_eval_maps;
9828413d 7986
99be647c 7987 if (!mod->num_trace_evals)
9828413d
SRRH
7988 return;
7989
1793ed93 7990 mutex_lock(&trace_eval_mutex);
9828413d 7991
23bf8cb8 7992 map = trace_eval_maps;
9828413d
SRRH
7993
7994 while (map) {
7995 if (map->head.mod == mod)
7996 break;
5f60b351 7997 map = trace_eval_jmp_to_tail(map);
9828413d
SRRH
7998 last = &map->tail.next;
7999 map = map->tail.next;
8000 }
8001 if (!map)
8002 goto out;
8003
5f60b351 8004 *last = trace_eval_jmp_to_tail(map)->tail.next;
9828413d
SRRH
8005 kfree(map);
8006 out:
1793ed93 8007 mutex_unlock(&trace_eval_mutex);
9828413d
SRRH
8008}
8009#else
f57a4143 8010static inline void trace_module_remove_evals(struct module *mod) { }
681bec03 8011#endif /* CONFIG_TRACE_EVAL_MAP_FILE */
9828413d 8012
3673b8e4
SRRH
8013static int trace_module_notify(struct notifier_block *self,
8014 unsigned long val, void *data)
8015{
8016 struct module *mod = data;
8017
8018 switch (val) {
8019 case MODULE_STATE_COMING:
f57a4143 8020 trace_module_add_evals(mod);
3673b8e4 8021 break;
9828413d 8022 case MODULE_STATE_GOING:
f57a4143 8023 trace_module_remove_evals(mod);
9828413d 8024 break;
3673b8e4
SRRH
8025 }
8026
8027 return 0;
0c564a53
SRRH
8028}
8029
3673b8e4
SRRH
8030static struct notifier_block trace_module_nb = {
8031 .notifier_call = trace_module_notify,
8032 .priority = 0,
8033};
9828413d 8034#endif /* CONFIG_MODULES */
3673b8e4 8035
8434dc93 8036static __init int tracer_init_tracefs(void)
bc0c38d1
SR
8037{
8038 struct dentry *d_tracer;
bc0c38d1 8039
7e53bd42
LJ
8040 trace_access_lock_init();
8041
bc0c38d1 8042 d_tracer = tracing_init_dentry();
14a5ae40 8043 if (IS_ERR(d_tracer))
ed6f1c99 8044 return 0;
bc0c38d1 8045
8434dc93 8046 init_tracer_tracefs(&global_trace, d_tracer);
501c2375 8047 ftrace_init_tracefs_toplevel(&global_trace, d_tracer);
bc0c38d1 8048
5452af66 8049 trace_create_file("tracing_thresh", 0644, d_tracer,
6508fa76 8050 &global_trace, &tracing_thresh_fops);
a8259075 8051
339ae5d3 8052 trace_create_file("README", 0444, d_tracer,
5452af66
FW
8053 NULL, &tracing_readme_fops);
8054
69abe6a5
AP
8055 trace_create_file("saved_cmdlines", 0444, d_tracer,
8056 NULL, &tracing_saved_cmdlines_fops);
5bf9a1ee 8057
939c7a4f
YY
8058 trace_create_file("saved_cmdlines_size", 0644, d_tracer,
8059 NULL, &tracing_saved_cmdlines_size_fops);
8060
99c621d7
MS
8061 trace_create_file("saved_tgids", 0444, d_tracer,
8062 NULL, &tracing_saved_tgids_fops);
8063
5f60b351 8064 trace_eval_init();
0c564a53 8065
f57a4143 8066 trace_create_eval_file(d_tracer);
9828413d 8067
3673b8e4
SRRH
8068#ifdef CONFIG_MODULES
8069 register_module_notifier(&trace_module_nb);
8070#endif
8071
bc0c38d1 8072#ifdef CONFIG_DYNAMIC_FTRACE
5452af66
FW
8073 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
8074 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
bc0c38d1 8075#endif
b04cc6b1 8076
277ba044 8077 create_trace_instances(d_tracer);
5452af66 8078
37aea98b 8079 update_tracer_options(&global_trace);
09d23a1d 8080
b5ad384e 8081 return 0;
bc0c38d1
SR
8082}
8083
3f5a54e3
SR
8084static int trace_panic_handler(struct notifier_block *this,
8085 unsigned long event, void *unused)
8086{
944ac425 8087 if (ftrace_dump_on_oops)
cecbca96 8088 ftrace_dump(ftrace_dump_on_oops);
3f5a54e3
SR
8089 return NOTIFY_OK;
8090}
8091
8092static struct notifier_block trace_panic_notifier = {
8093 .notifier_call = trace_panic_handler,
8094 .next = NULL,
8095 .priority = 150 /* priority: INT_MAX >= x >= 0 */
8096};
8097
8098static int trace_die_handler(struct notifier_block *self,
8099 unsigned long val,
8100 void *data)
8101{
8102 switch (val) {
8103 case DIE_OOPS:
944ac425 8104 if (ftrace_dump_on_oops)
cecbca96 8105 ftrace_dump(ftrace_dump_on_oops);
3f5a54e3
SR
8106 break;
8107 default:
8108 break;
8109 }
8110 return NOTIFY_OK;
8111}
8112
8113static struct notifier_block trace_die_notifier = {
8114 .notifier_call = trace_die_handler,
8115 .priority = 200
8116};
8117
8118/*
8119 * printk is set to max of 1024, we really don't need it that big.
8120 * Nothing should be printing 1000 characters anyway.
8121 */
8122#define TRACE_MAX_PRINT 1000
8123
8124/*
8125 * Define here KERN_TRACE so that we have one place to modify
8126 * it if we decide to change what log level the ftrace dump
8127 * should be at.
8128 */
428aee14 8129#define KERN_TRACE KERN_EMERG
3f5a54e3 8130
955b61e5 8131void
3f5a54e3
SR
8132trace_printk_seq(struct trace_seq *s)
8133{
8134 /* Probably should print a warning here. */
3a161d99
SRRH
8135 if (s->seq.len >= TRACE_MAX_PRINT)
8136 s->seq.len = TRACE_MAX_PRINT;
3f5a54e3 8137
820b75f6
SRRH
8138 /*
8139 * More paranoid code. Although the buffer size is set to
8140 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
8141 * an extra layer of protection.
8142 */
8143 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
8144 s->seq.len = s->seq.size - 1;
3f5a54e3
SR
8145
8146 /* should be zero ended, but we are paranoid. */
3a161d99 8147 s->buffer[s->seq.len] = 0;
3f5a54e3
SR
8148
8149 printk(KERN_TRACE "%s", s->buffer);
8150
f9520750 8151 trace_seq_init(s);
3f5a54e3
SR
8152}
8153
955b61e5
JW
8154void trace_init_global_iter(struct trace_iterator *iter)
8155{
8156 iter->tr = &global_trace;
2b6080f2 8157 iter->trace = iter->tr->current_trace;
ae3b5093 8158 iter->cpu_file = RING_BUFFER_ALL_CPUS;
12883efb 8159 iter->trace_buffer = &global_trace.trace_buffer;
b2f974d6
CS
8160
8161 if (iter->trace && iter->trace->open)
8162 iter->trace->open(iter);
8163
8164 /* Annotate start of buffers if we had overruns */
8165 if (ring_buffer_overruns(iter->trace_buffer->buffer))
8166 iter->iter_flags |= TRACE_FILE_ANNOTATE;
8167
8168 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
8169 if (trace_clocks[iter->tr->clock_id].in_ns)
8170 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
955b61e5
JW
8171}
8172
7fe70b57 8173void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
3f5a54e3 8174{
3f5a54e3
SR
8175 /* use static because iter can be a bit big for the stack */
8176 static struct trace_iterator iter;
7fe70b57 8177 static atomic_t dump_running;
983f938a 8178 struct trace_array *tr = &global_trace;
cf586b61 8179 unsigned int old_userobj;
d769041f
SR
8180 unsigned long flags;
8181 int cnt = 0, cpu;
3f5a54e3 8182
7fe70b57
SRRH
8183 /* Only allow one dump user at a time. */
8184 if (atomic_inc_return(&dump_running) != 1) {
8185 atomic_dec(&dump_running);
8186 return;
8187 }
3f5a54e3 8188
7fe70b57
SRRH
8189 /*
8190 * Always turn off tracing when we dump.
8191 * We don't need to show trace output of what happens
8192 * between multiple crashes.
8193 *
8194 * If the user does a sysrq-z, then they can re-enable
8195 * tracing with echo 1 > tracing_on.
8196 */
0ee6b6cf 8197 tracing_off();
cf586b61 8198
7fe70b57 8199 local_irq_save(flags);
3f5a54e3 8200
38dbe0b1 8201 /* Simulate the iterator */
955b61e5
JW
8202 trace_init_global_iter(&iter);
8203
d769041f 8204 for_each_tracing_cpu(cpu) {
5e2d5ef8 8205 atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
d769041f
SR
8206 }
8207
983f938a 8208 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
cf586b61 8209
b54d3de9 8210 /* don't look at user memory in panic mode */
983f938a 8211 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
b54d3de9 8212
cecbca96
FW
8213 switch (oops_dump_mode) {
8214 case DUMP_ALL:
ae3b5093 8215 iter.cpu_file = RING_BUFFER_ALL_CPUS;
cecbca96
FW
8216 break;
8217 case DUMP_ORIG:
8218 iter.cpu_file = raw_smp_processor_id();
8219 break;
8220 case DUMP_NONE:
8221 goto out_enable;
8222 default:
8223 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
ae3b5093 8224 iter.cpu_file = RING_BUFFER_ALL_CPUS;
cecbca96
FW
8225 }
8226
8227 printk(KERN_TRACE "Dumping ftrace buffer:\n");
3f5a54e3 8228
7fe70b57
SRRH
8229 /* Did function tracer already get disabled? */
8230 if (ftrace_is_dead()) {
8231 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
8232 printk("# MAY BE MISSING FUNCTION EVENTS\n");
8233 }
8234
3f5a54e3
SR
8235 /*
8236 * We need to stop all tracing on all CPUS to read the
8237 * the next buffer. This is a bit expensive, but is
8238 * not done often. We fill all what we can read,
8239 * and then release the locks again.
8240 */
8241
3f5a54e3
SR
8242 while (!trace_empty(&iter)) {
8243
8244 if (!cnt)
8245 printk(KERN_TRACE "---------------------------------\n");
8246
8247 cnt++;
8248
8249 /* reset all but tr, trace, and overruns */
8250 memset(&iter.seq, 0,
8251 sizeof(struct trace_iterator) -
8252 offsetof(struct trace_iterator, seq));
8253 iter.iter_flags |= TRACE_FILE_LAT_FMT;
8254 iter.pos = -1;
8255
955b61e5 8256 if (trace_find_next_entry_inc(&iter) != NULL) {
74e7ff8c
LJ
8257 int ret;
8258
8259 ret = print_trace_line(&iter);
8260 if (ret != TRACE_TYPE_NO_CONSUME)
8261 trace_consume(&iter);
3f5a54e3 8262 }
b892e5c8 8263 touch_nmi_watchdog();
3f5a54e3
SR
8264
8265 trace_printk_seq(&iter.seq);
8266 }
8267
8268 if (!cnt)
8269 printk(KERN_TRACE " (ftrace buffer empty)\n");
8270 else
8271 printk(KERN_TRACE "---------------------------------\n");
8272
cecbca96 8273 out_enable:
983f938a 8274 tr->trace_flags |= old_userobj;
cf586b61 8275
7fe70b57
SRRH
8276 for_each_tracing_cpu(cpu) {
8277 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
cf586b61 8278 }
7fe70b57 8279 atomic_dec(&dump_running);
cd891ae0 8280 local_irq_restore(flags);
3f5a54e3 8281}
a8eecf22 8282EXPORT_SYMBOL_GPL(ftrace_dump);
cf586b61 8283
7e465baa
TZ
8284int trace_run_command(const char *buf, int (*createfn)(int, char **))
8285{
8286 char **argv;
8287 int argc, ret;
8288
8289 argc = 0;
8290 ret = 0;
8291 argv = argv_split(GFP_KERNEL, buf, &argc);
8292 if (!argv)
8293 return -ENOMEM;
8294
8295 if (argc)
8296 ret = createfn(argc, argv);
8297
8298 argv_free(argv);
8299
8300 return ret;
8301}
8302
8303#define WRITE_BUFSIZE 4096
8304
8305ssize_t trace_parse_run_command(struct file *file, const char __user *buffer,
8306 size_t count, loff_t *ppos,
8307 int (*createfn)(int, char **))
8308{
8309 char *kbuf, *buf, *tmp;
8310 int ret = 0;
8311 size_t done = 0;
8312 size_t size;
8313
8314 kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL);
8315 if (!kbuf)
8316 return -ENOMEM;
8317
8318 while (done < count) {
8319 size = count - done;
8320
8321 if (size >= WRITE_BUFSIZE)
8322 size = WRITE_BUFSIZE - 1;
8323
8324 if (copy_from_user(kbuf, buffer + done, size)) {
8325 ret = -EFAULT;
8326 goto out;
8327 }
8328 kbuf[size] = '\0';
8329 buf = kbuf;
8330 do {
8331 tmp = strchr(buf, '\n');
8332 if (tmp) {
8333 *tmp = '\0';
8334 size = tmp - buf + 1;
8335 } else {
8336 size = strlen(buf);
8337 if (done + size < count) {
8338 if (buf != kbuf)
8339 break;
8340 /* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */
8341 pr_warn("Line length is too long: Should be less than %d\n",
8342 WRITE_BUFSIZE - 2);
8343 ret = -EINVAL;
8344 goto out;
8345 }
8346 }
8347 done += size;
8348
8349 /* Remove comments */
8350 tmp = strchr(buf, '#');
8351
8352 if (tmp)
8353 *tmp = '\0';
8354
8355 ret = trace_run_command(buf, createfn);
8356 if (ret)
8357 goto out;
8358 buf += size;
8359
8360 } while (done < count);
8361 }
8362 ret = done;
8363
8364out:
8365 kfree(kbuf);
8366
8367 return ret;
8368}
8369
3928a8a2 8370__init static int tracer_alloc_buffers(void)
bc0c38d1 8371{
73c5162a 8372 int ring_buf_size;
9e01c1b7 8373 int ret = -ENOMEM;
4c11d7ae 8374
b5e87c05
SRRH
8375 /*
8376 * Make sure we don't accidently add more trace options
8377 * than we have bits for.
8378 */
9a38a885 8379 BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
b5e87c05 8380
9e01c1b7
RR
8381 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
8382 goto out;
8383
ccfe9e42 8384 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
9e01c1b7 8385 goto out_free_buffer_mask;
4c11d7ae 8386
07d777fe
SR
8387 /* Only allocate trace_printk buffers if a trace_printk exists */
8388 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
81698831 8389 /* Must be called before global_trace.buffer is allocated */
07d777fe
SR
8390 trace_printk_init_buffers();
8391
73c5162a
SR
8392 /* To save memory, keep the ring buffer size to its minimum */
8393 if (ring_buffer_expanded)
8394 ring_buf_size = trace_buf_size;
8395 else
8396 ring_buf_size = 1;
8397
9e01c1b7 8398 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
ccfe9e42 8399 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
9e01c1b7 8400
2b6080f2
SR
8401 raw_spin_lock_init(&global_trace.start_lock);
8402
b32614c0
SAS
8403 /*
8404 * The prepare callbacks allocates some memory for the ring buffer. We
8405 * don't free the buffer if the if the CPU goes down. If we were to free
8406 * the buffer, then the user would lose any trace that was in the
8407 * buffer. The memory will be removed once the "instance" is removed.
8408 */
8409 ret = cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE,
8410 "trace/RB:preapre", trace_rb_cpu_prepare,
8411 NULL);
8412 if (ret < 0)
8413 goto out_free_cpumask;
2c4a33ab 8414 /* Used for event triggers */
147d88e0 8415 ret = -ENOMEM;
2c4a33ab
SRRH
8416 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
8417 if (!temp_buffer)
b32614c0 8418 goto out_rm_hp_state;
2c4a33ab 8419
939c7a4f
YY
8420 if (trace_create_savedcmd() < 0)
8421 goto out_free_temp_buffer;
8422
9e01c1b7 8423 /* TODO: make the number of buffers hot pluggable with CPUS */
737223fb 8424 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
3928a8a2
SR
8425 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
8426 WARN_ON(1);
939c7a4f 8427 goto out_free_savedcmd;
4c11d7ae 8428 }
a7603ff4 8429
499e5470
SR
8430 if (global_trace.buffer_disabled)
8431 tracing_off();
4c11d7ae 8432
e1e232ca
SR
8433 if (trace_boot_clock) {
8434 ret = tracing_set_clock(&global_trace, trace_boot_clock);
8435 if (ret < 0)
a395d6a7
JP
8436 pr_warn("Trace clock %s not defined, going back to default\n",
8437 trace_boot_clock);
e1e232ca
SR
8438 }
8439
ca164318
SRRH
8440 /*
8441 * register_tracer() might reference current_trace, so it
8442 * needs to be set before we register anything. This is
8443 * just a bootstrap of current_trace anyway.
8444 */
2b6080f2
SR
8445 global_trace.current_trace = &nop_trace;
8446
0b9b12c1
SRRH
8447 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
8448
4104d326
SRRH
8449 ftrace_init_global_array_ops(&global_trace);
8450
9a38a885
SRRH
8451 init_trace_flags_index(&global_trace);
8452
ca164318
SRRH
8453 register_tracer(&nop_trace);
8454
dbeafd0d
SRV
8455 /* Function tracing may start here (via kernel command line) */
8456 init_function_trace();
8457
60a11774
SR
8458 /* All seems OK, enable tracing */
8459 tracing_disabled = 0;
3928a8a2 8460
3f5a54e3
SR
8461 atomic_notifier_chain_register(&panic_notifier_list,
8462 &trace_panic_notifier);
8463
8464 register_die_notifier(&trace_die_notifier);
2fc1dfbe 8465
ae63b31e
SR
8466 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
8467
8468 INIT_LIST_HEAD(&global_trace.systems);
8469 INIT_LIST_HEAD(&global_trace.events);
8470 list_add(&global_trace.list, &ftrace_trace_arrays);
8471
a4d1e688 8472 apply_trace_boot_options();
7bcfaf54 8473
77fd5c15
SRRH
8474 register_snapshot_cmd();
8475
2fc1dfbe 8476 return 0;
3f5a54e3 8477
939c7a4f
YY
8478out_free_savedcmd:
8479 free_saved_cmdlines_buffer(savedcmd);
2c4a33ab
SRRH
8480out_free_temp_buffer:
8481 ring_buffer_free(temp_buffer);
b32614c0
SAS
8482out_rm_hp_state:
8483 cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE);
9e01c1b7 8484out_free_cpumask:
ccfe9e42 8485 free_cpumask_var(global_trace.tracing_cpumask);
9e01c1b7
RR
8486out_free_buffer_mask:
8487 free_cpumask_var(tracing_buffer_mask);
8488out:
8489 return ret;
bc0c38d1 8490}
b2821ae6 8491
e725c731 8492void __init early_trace_init(void)
5f893b26 8493{
0daa2302
SRRH
8494 if (tracepoint_printk) {
8495 tracepoint_print_iter =
8496 kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
8497 if (WARN_ON(!tracepoint_print_iter))
8498 tracepoint_printk = 0;
42391745
SRRH
8499 else
8500 static_key_enable(&tracepoint_printk_key.key);
0daa2302 8501 }
5f893b26 8502 tracer_alloc_buffers();
e725c731
SRV
8503}
8504
8505void __init trace_init(void)
8506{
0c564a53 8507 trace_event_init();
5f893b26
SRRH
8508}
8509
b2821ae6
SR
8510__init static int clear_boot_tracer(void)
8511{
8512 /*
8513 * The default tracer at boot buffer is an init section.
8514 * This function is called in lateinit. If we did not
8515 * find the boot tracer, then clear it out, to prevent
8516 * later registration from accessing the buffer that is
8517 * about to be freed.
8518 */
8519 if (!default_bootup_tracer)
8520 return 0;
8521
8522 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
8523 default_bootup_tracer);
8524 default_bootup_tracer = NULL;
8525
8526 return 0;
8527}
8528
8434dc93 8529fs_initcall(tracer_init_tracefs);
4bb0f0e7 8530late_initcall_sync(clear_boot_tracer);