Merge tag 'backlight-next-6.4' of git://git.kernel.org/pub/scm/linux/kernel/git/lee...
[linux-block.git] / kernel / trace / trace_events.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * event tracer
4  *
5  * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
6  *
7  *  - Added format output of fields of the trace point.
8  *    This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
9  *
10  */
11
12 #define pr_fmt(fmt) fmt
13
14 #include <linux/workqueue.h>
15 #include <linux/security.h>
16 #include <linux/spinlock.h>
17 #include <linux/kthread.h>
18 #include <linux/tracefs.h>
19 #include <linux/uaccess.h>
20 #include <linux/module.h>
21 #include <linux/ctype.h>
22 #include <linux/sort.h>
23 #include <linux/slab.h>
24 #include <linux/delay.h>
25
26 #include <trace/events/sched.h>
27 #include <trace/syscall.h>
28
29 #include <asm/setup.h>
30
31 #include "trace_output.h"
32
33 #undef TRACE_SYSTEM
34 #define TRACE_SYSTEM "TRACE_SYSTEM"
35
36 DEFINE_MUTEX(event_mutex);
37
38 LIST_HEAD(ftrace_events);
39 static LIST_HEAD(ftrace_generic_fields);
40 static LIST_HEAD(ftrace_common_fields);
41 static bool eventdir_initialized;
42
43 static LIST_HEAD(module_strings);
44
45 struct module_string {
46         struct list_head        next;
47         struct module           *module;
48         char                    *str;
49 };
50
51 #define GFP_TRACE (GFP_KERNEL | __GFP_ZERO)
52
53 static struct kmem_cache *field_cachep;
54 static struct kmem_cache *file_cachep;
55
56 static inline int system_refcount(struct event_subsystem *system)
57 {
58         return system->ref_count;
59 }
60
61 static int system_refcount_inc(struct event_subsystem *system)
62 {
63         return system->ref_count++;
64 }
65
66 static int system_refcount_dec(struct event_subsystem *system)
67 {
68         return --system->ref_count;
69 }
70
71 /* Double loops, do not use break, only goto's work */
72 #define do_for_each_event_file(tr, file)                        \
73         list_for_each_entry(tr, &ftrace_trace_arrays, list) {   \
74                 list_for_each_entry(file, &tr->events, list)
75
76 #define do_for_each_event_file_safe(tr, file)                   \
77         list_for_each_entry(tr, &ftrace_trace_arrays, list) {   \
78                 struct trace_event_file *___n;                          \
79                 list_for_each_entry_safe(file, ___n, &tr->events, list)
80
81 #define while_for_each_event_file()             \
82         }
83
84 static struct ftrace_event_field *
85 __find_event_field(struct list_head *head, char *name)
86 {
87         struct ftrace_event_field *field;
88
89         list_for_each_entry(field, head, link) {
90                 if (!strcmp(field->name, name))
91                         return field;
92         }
93
94         return NULL;
95 }
96
97 struct ftrace_event_field *
98 trace_find_event_field(struct trace_event_call *call, char *name)
99 {
100         struct ftrace_event_field *field;
101         struct list_head *head;
102
103         head = trace_get_fields(call);
104         field = __find_event_field(head, name);
105         if (field)
106                 return field;
107
108         field = __find_event_field(&ftrace_generic_fields, name);
109         if (field)
110                 return field;
111
112         return __find_event_field(&ftrace_common_fields, name);
113 }
114
115 static int __trace_define_field(struct list_head *head, const char *type,
116                                 const char *name, int offset, int size,
117                                 int is_signed, int filter_type, int len)
118 {
119         struct ftrace_event_field *field;
120
121         field = kmem_cache_alloc(field_cachep, GFP_TRACE);
122         if (!field)
123                 return -ENOMEM;
124
125         field->name = name;
126         field->type = type;
127
128         if (filter_type == FILTER_OTHER)
129                 field->filter_type = filter_assign_type(type);
130         else
131                 field->filter_type = filter_type;
132
133         field->offset = offset;
134         field->size = size;
135         field->is_signed = is_signed;
136         field->len = len;
137
138         list_add(&field->link, head);
139
140         return 0;
141 }
142
143 int trace_define_field(struct trace_event_call *call, const char *type,
144                        const char *name, int offset, int size, int is_signed,
145                        int filter_type)
146 {
147         struct list_head *head;
148
149         if (WARN_ON(!call->class))
150                 return 0;
151
152         head = trace_get_fields(call);
153         return __trace_define_field(head, type, name, offset, size,
154                                     is_signed, filter_type, 0);
155 }
156 EXPORT_SYMBOL_GPL(trace_define_field);
157
158 static int trace_define_field_ext(struct trace_event_call *call, const char *type,
159                        const char *name, int offset, int size, int is_signed,
160                        int filter_type, int len)
161 {
162         struct list_head *head;
163
164         if (WARN_ON(!call->class))
165                 return 0;
166
167         head = trace_get_fields(call);
168         return __trace_define_field(head, type, name, offset, size,
169                                     is_signed, filter_type, len);
170 }
171
172 #define __generic_field(type, item, filter_type)                        \
173         ret = __trace_define_field(&ftrace_generic_fields, #type,       \
174                                    #item, 0, 0, is_signed_type(type),   \
175                                    filter_type, 0);                     \
176         if (ret)                                                        \
177                 return ret;
178
179 #define __common_field(type, item)                                      \
180         ret = __trace_define_field(&ftrace_common_fields, #type,        \
181                                    "common_" #item,                     \
182                                    offsetof(typeof(ent), item),         \
183                                    sizeof(ent.item),                    \
184                                    is_signed_type(type), FILTER_OTHER, 0);      \
185         if (ret)                                                        \
186                 return ret;
187
188 static int trace_define_generic_fields(void)
189 {
190         int ret;
191
192         __generic_field(int, CPU, FILTER_CPU);
193         __generic_field(int, cpu, FILTER_CPU);
194         __generic_field(int, common_cpu, FILTER_CPU);
195         __generic_field(char *, COMM, FILTER_COMM);
196         __generic_field(char *, comm, FILTER_COMM);
197
198         return ret;
199 }
200
201 static int trace_define_common_fields(void)
202 {
203         int ret;
204         struct trace_entry ent;
205
206         __common_field(unsigned short, type);
207         __common_field(unsigned char, flags);
208         /* Holds both preempt_count and migrate_disable */
209         __common_field(unsigned char, preempt_count);
210         __common_field(int, pid);
211
212         return ret;
213 }
214
215 static void trace_destroy_fields(struct trace_event_call *call)
216 {
217         struct ftrace_event_field *field, *next;
218         struct list_head *head;
219
220         head = trace_get_fields(call);
221         list_for_each_entry_safe(field, next, head, link) {
222                 list_del(&field->link);
223                 kmem_cache_free(field_cachep, field);
224         }
225 }
226
227 /*
228  * run-time version of trace_event_get_offsets_<call>() that returns the last
229  * accessible offset of trace fields excluding __dynamic_array bytes
230  */
231 int trace_event_get_offsets(struct trace_event_call *call)
232 {
233         struct ftrace_event_field *tail;
234         struct list_head *head;
235
236         head = trace_get_fields(call);
237         /*
238          * head->next points to the last field with the largest offset,
239          * since it was added last by trace_define_field()
240          */
241         tail = list_first_entry(head, struct ftrace_event_field, link);
242         return tail->offset + tail->size;
243 }
244
245 /*
246  * Check if the referenced field is an array and return true,
247  * as arrays are OK to dereference.
248  */
249 static bool test_field(const char *fmt, struct trace_event_call *call)
250 {
251         struct trace_event_fields *field = call->class->fields_array;
252         const char *array_descriptor;
253         const char *p = fmt;
254         int len;
255
256         if (!(len = str_has_prefix(fmt, "REC->")))
257                 return false;
258         fmt += len;
259         for (p = fmt; *p; p++) {
260                 if (!isalnum(*p) && *p != '_')
261                         break;
262         }
263         len = p - fmt;
264
265         for (; field->type; field++) {
266                 if (strncmp(field->name, fmt, len) ||
267                     field->name[len])
268                         continue;
269                 array_descriptor = strchr(field->type, '[');
270                 /* This is an array and is OK to dereference. */
271                 return array_descriptor != NULL;
272         }
273         return false;
274 }
275
276 /*
277  * Examine the print fmt of the event looking for unsafe dereference
278  * pointers using %p* that could be recorded in the trace event and
279  * much later referenced after the pointer was freed. Dereferencing
280  * pointers are OK, if it is dereferenced into the event itself.
281  */
282 static void test_event_printk(struct trace_event_call *call)
283 {
284         u64 dereference_flags = 0;
285         bool first = true;
286         const char *fmt, *c, *r, *a;
287         int parens = 0;
288         char in_quote = 0;
289         int start_arg = 0;
290         int arg = 0;
291         int i;
292
293         fmt = call->print_fmt;
294
295         if (!fmt)
296                 return;
297
298         for (i = 0; fmt[i]; i++) {
299                 switch (fmt[i]) {
300                 case '\\':
301                         i++;
302                         if (!fmt[i])
303                                 return;
304                         continue;
305                 case '"':
306                 case '\'':
307                         /*
308                          * The print fmt starts with a string that
309                          * is processed first to find %p* usage,
310                          * then after the first string, the print fmt
311                          * contains arguments that are used to check
312                          * if the dereferenced %p* usage is safe.
313                          */
314                         if (first) {
315                                 if (fmt[i] == '\'')
316                                         continue;
317                                 if (in_quote) {
318                                         arg = 0;
319                                         first = false;
320                                         /*
321                                          * If there was no %p* uses
322                                          * the fmt is OK.
323                                          */
324                                         if (!dereference_flags)
325                                                 return;
326                                 }
327                         }
328                         if (in_quote) {
329                                 if (in_quote == fmt[i])
330                                         in_quote = 0;
331                         } else {
332                                 in_quote = fmt[i];
333                         }
334                         continue;
335                 case '%':
336                         if (!first || !in_quote)
337                                 continue;
338                         i++;
339                         if (!fmt[i])
340                                 return;
341                         switch (fmt[i]) {
342                         case '%':
343                                 continue;
344                         case 'p':
345                                 /* Find dereferencing fields */
346                                 switch (fmt[i + 1]) {
347                                 case 'B': case 'R': case 'r':
348                                 case 'b': case 'M': case 'm':
349                                 case 'I': case 'i': case 'E':
350                                 case 'U': case 'V': case 'N':
351                                 case 'a': case 'd': case 'D':
352                                 case 'g': case 't': case 'C':
353                                 case 'O': case 'f':
354                                         if (WARN_ONCE(arg == 63,
355                                                       "Too many args for event: %s",
356                                                       trace_event_name(call)))
357                                                 return;
358                                         dereference_flags |= 1ULL << arg;
359                                 }
360                                 break;
361                         default:
362                         {
363                                 bool star = false;
364                                 int j;
365
366                                 /* Increment arg if %*s exists. */
367                                 for (j = 0; fmt[i + j]; j++) {
368                                         if (isdigit(fmt[i + j]) ||
369                                             fmt[i + j] == '.')
370                                                 continue;
371                                         if (fmt[i + j] == '*') {
372                                                 star = true;
373                                                 continue;
374                                         }
375                                         if ((fmt[i + j] == 's') && star)
376                                                 arg++;
377                                         break;
378                                 }
379                                 break;
380                         } /* default */
381
382                         } /* switch */
383                         arg++;
384                         continue;
385                 case '(':
386                         if (in_quote)
387                                 continue;
388                         parens++;
389                         continue;
390                 case ')':
391                         if (in_quote)
392                                 continue;
393                         parens--;
394                         if (WARN_ONCE(parens < 0,
395                                       "Paren mismatch for event: %s\narg='%s'\n%*s",
396                                       trace_event_name(call),
397                                       fmt + start_arg,
398                                       (i - start_arg) + 5, "^"))
399                                 return;
400                         continue;
401                 case ',':
402                         if (in_quote || parens)
403                                 continue;
404                         i++;
405                         while (isspace(fmt[i]))
406                                 i++;
407                         start_arg = i;
408                         if (!(dereference_flags & (1ULL << arg)))
409                                 goto next_arg;
410
411                         /* Find the REC-> in the argument */
412                         c = strchr(fmt + i, ',');
413                         r = strstr(fmt + i, "REC->");
414                         if (r && (!c || r < c)) {
415                                 /*
416                                  * Addresses of events on the buffer,
417                                  * or an array on the buffer is
418                                  * OK to dereference.
419                                  * There's ways to fool this, but
420                                  * this is to catch common mistakes,
421                                  * not malicious code.
422                                  */
423                                 a = strchr(fmt + i, '&');
424                                 if ((a && (a < r)) || test_field(r, call))
425                                         dereference_flags &= ~(1ULL << arg);
426                         } else if ((r = strstr(fmt + i, "__get_dynamic_array(")) &&
427                                    (!c || r < c)) {
428                                 dereference_flags &= ~(1ULL << arg);
429                         } else if ((r = strstr(fmt + i, "__get_sockaddr(")) &&
430                                    (!c || r < c)) {
431                                 dereference_flags &= ~(1ULL << arg);
432                         }
433
434                 next_arg:
435                         i--;
436                         arg++;
437                 }
438         }
439
440         /*
441          * If you triggered the below warning, the trace event reported
442          * uses an unsafe dereference pointer %p*. As the data stored
443          * at the trace event time may no longer exist when the trace
444          * event is printed, dereferencing to the original source is
445          * unsafe. The source of the dereference must be copied into the
446          * event itself, and the dereference must access the copy instead.
447          */
448         if (WARN_ON_ONCE(dereference_flags)) {
449                 arg = 1;
450                 while (!(dereference_flags & 1)) {
451                         dereference_flags >>= 1;
452                         arg++;
453                 }
454                 pr_warn("event %s has unsafe dereference of argument %d\n",
455                         trace_event_name(call), arg);
456                 pr_warn("print_fmt: %s\n", fmt);
457         }
458 }
459
460 int trace_event_raw_init(struct trace_event_call *call)
461 {
462         int id;
463
464         id = register_trace_event(&call->event);
465         if (!id)
466                 return -ENODEV;
467
468         test_event_printk(call);
469
470         return 0;
471 }
472 EXPORT_SYMBOL_GPL(trace_event_raw_init);
473
474 bool trace_event_ignore_this_pid(struct trace_event_file *trace_file)
475 {
476         struct trace_array *tr = trace_file->tr;
477         struct trace_array_cpu *data;
478         struct trace_pid_list *no_pid_list;
479         struct trace_pid_list *pid_list;
480
481         pid_list = rcu_dereference_raw(tr->filtered_pids);
482         no_pid_list = rcu_dereference_raw(tr->filtered_no_pids);
483
484         if (!pid_list && !no_pid_list)
485                 return false;
486
487         data = this_cpu_ptr(tr->array_buffer.data);
488
489         return data->ignore_pid;
490 }
491 EXPORT_SYMBOL_GPL(trace_event_ignore_this_pid);
492
493 void *trace_event_buffer_reserve(struct trace_event_buffer *fbuffer,
494                                  struct trace_event_file *trace_file,
495                                  unsigned long len)
496 {
497         struct trace_event_call *event_call = trace_file->event_call;
498
499         if ((trace_file->flags & EVENT_FILE_FL_PID_FILTER) &&
500             trace_event_ignore_this_pid(trace_file))
501                 return NULL;
502
503         /*
504          * If CONFIG_PREEMPTION is enabled, then the tracepoint itself disables
505          * preemption (adding one to the preempt_count). Since we are
506          * interested in the preempt_count at the time the tracepoint was
507          * hit, we need to subtract one to offset the increment.
508          */
509         fbuffer->trace_ctx = tracing_gen_ctx_dec();
510         fbuffer->trace_file = trace_file;
511
512         fbuffer->event =
513                 trace_event_buffer_lock_reserve(&fbuffer->buffer, trace_file,
514                                                 event_call->event.type, len,
515                                                 fbuffer->trace_ctx);
516         if (!fbuffer->event)
517                 return NULL;
518
519         fbuffer->regs = NULL;
520         fbuffer->entry = ring_buffer_event_data(fbuffer->event);
521         return fbuffer->entry;
522 }
523 EXPORT_SYMBOL_GPL(trace_event_buffer_reserve);
524
525 int trace_event_reg(struct trace_event_call *call,
526                     enum trace_reg type, void *data)
527 {
528         struct trace_event_file *file = data;
529
530         WARN_ON(!(call->flags & TRACE_EVENT_FL_TRACEPOINT));
531         switch (type) {
532         case TRACE_REG_REGISTER:
533                 return tracepoint_probe_register(call->tp,
534                                                  call->class->probe,
535                                                  file);
536         case TRACE_REG_UNREGISTER:
537                 tracepoint_probe_unregister(call->tp,
538                                             call->class->probe,
539                                             file);
540                 return 0;
541
542 #ifdef CONFIG_PERF_EVENTS
543         case TRACE_REG_PERF_REGISTER:
544                 return tracepoint_probe_register(call->tp,
545                                                  call->class->perf_probe,
546                                                  call);
547         case TRACE_REG_PERF_UNREGISTER:
548                 tracepoint_probe_unregister(call->tp,
549                                             call->class->perf_probe,
550                                             call);
551                 return 0;
552         case TRACE_REG_PERF_OPEN:
553         case TRACE_REG_PERF_CLOSE:
554         case TRACE_REG_PERF_ADD:
555         case TRACE_REG_PERF_DEL:
556                 return 0;
557 #endif
558         }
559         return 0;
560 }
561 EXPORT_SYMBOL_GPL(trace_event_reg);
562
563 void trace_event_enable_cmd_record(bool enable)
564 {
565         struct trace_event_file *file;
566         struct trace_array *tr;
567
568         lockdep_assert_held(&event_mutex);
569
570         do_for_each_event_file(tr, file) {
571
572                 if (!(file->flags & EVENT_FILE_FL_ENABLED))
573                         continue;
574
575                 if (enable) {
576                         tracing_start_cmdline_record();
577                         set_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags);
578                 } else {
579                         tracing_stop_cmdline_record();
580                         clear_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags);
581                 }
582         } while_for_each_event_file();
583 }
584
585 void trace_event_enable_tgid_record(bool enable)
586 {
587         struct trace_event_file *file;
588         struct trace_array *tr;
589
590         lockdep_assert_held(&event_mutex);
591
592         do_for_each_event_file(tr, file) {
593                 if (!(file->flags & EVENT_FILE_FL_ENABLED))
594                         continue;
595
596                 if (enable) {
597                         tracing_start_tgid_record();
598                         set_bit(EVENT_FILE_FL_RECORDED_TGID_BIT, &file->flags);
599                 } else {
600                         tracing_stop_tgid_record();
601                         clear_bit(EVENT_FILE_FL_RECORDED_TGID_BIT,
602                                   &file->flags);
603                 }
604         } while_for_each_event_file();
605 }
606
607 static int __ftrace_event_enable_disable(struct trace_event_file *file,
608                                          int enable, int soft_disable)
609 {
610         struct trace_event_call *call = file->event_call;
611         struct trace_array *tr = file->tr;
612         unsigned long file_flags = file->flags;
613         int ret = 0;
614         int disable;
615
616         switch (enable) {
617         case 0:
618                 /*
619                  * When soft_disable is set and enable is cleared, the sm_ref
620                  * reference counter is decremented. If it reaches 0, we want
621                  * to clear the SOFT_DISABLED flag but leave the event in the
622                  * state that it was. That is, if the event was enabled and
623                  * SOFT_DISABLED isn't set, then do nothing. But if SOFT_DISABLED
624                  * is set we do not want the event to be enabled before we
625                  * clear the bit.
626                  *
627                  * When soft_disable is not set but the SOFT_MODE flag is,
628                  * we do nothing. Do not disable the tracepoint, otherwise
629                  * "soft enable"s (clearing the SOFT_DISABLED bit) wont work.
630                  */
631                 if (soft_disable) {
632                         if (atomic_dec_return(&file->sm_ref) > 0)
633                                 break;
634                         disable = file->flags & EVENT_FILE_FL_SOFT_DISABLED;
635                         clear_bit(EVENT_FILE_FL_SOFT_MODE_BIT, &file->flags);
636                 } else
637                         disable = !(file->flags & EVENT_FILE_FL_SOFT_MODE);
638
639                 if (disable && (file->flags & EVENT_FILE_FL_ENABLED)) {
640                         clear_bit(EVENT_FILE_FL_ENABLED_BIT, &file->flags);
641                         if (file->flags & EVENT_FILE_FL_RECORDED_CMD) {
642                                 tracing_stop_cmdline_record();
643                                 clear_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags);
644                         }
645
646                         if (file->flags & EVENT_FILE_FL_RECORDED_TGID) {
647                                 tracing_stop_tgid_record();
648                                 clear_bit(EVENT_FILE_FL_RECORDED_TGID_BIT, &file->flags);
649                         }
650
651                         call->class->reg(call, TRACE_REG_UNREGISTER, file);
652                 }
653                 /* If in SOFT_MODE, just set the SOFT_DISABLE_BIT, else clear it */
654                 if (file->flags & EVENT_FILE_FL_SOFT_MODE)
655                         set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags);
656                 else
657                         clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags);
658                 break;
659         case 1:
660                 /*
661                  * When soft_disable is set and enable is set, we want to
662                  * register the tracepoint for the event, but leave the event
663                  * as is. That means, if the event was already enabled, we do
664                  * nothing (but set SOFT_MODE). If the event is disabled, we
665                  * set SOFT_DISABLED before enabling the event tracepoint, so
666                  * it still seems to be disabled.
667                  */
668                 if (!soft_disable)
669                         clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags);
670                 else {
671                         if (atomic_inc_return(&file->sm_ref) > 1)
672                                 break;
673                         set_bit(EVENT_FILE_FL_SOFT_MODE_BIT, &file->flags);
674                 }
675
676                 if (!(file->flags & EVENT_FILE_FL_ENABLED)) {
677                         bool cmd = false, tgid = false;
678
679                         /* Keep the event disabled, when going to SOFT_MODE. */
680                         if (soft_disable)
681                                 set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags);
682
683                         if (tr->trace_flags & TRACE_ITER_RECORD_CMD) {
684                                 cmd = true;
685                                 tracing_start_cmdline_record();
686                                 set_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags);
687                         }
688
689                         if (tr->trace_flags & TRACE_ITER_RECORD_TGID) {
690                                 tgid = true;
691                                 tracing_start_tgid_record();
692                                 set_bit(EVENT_FILE_FL_RECORDED_TGID_BIT, &file->flags);
693                         }
694
695                         ret = call->class->reg(call, TRACE_REG_REGISTER, file);
696                         if (ret) {
697                                 if (cmd)
698                                         tracing_stop_cmdline_record();
699                                 if (tgid)
700                                         tracing_stop_tgid_record();
701                                 pr_info("event trace: Could not enable event "
702                                         "%s\n", trace_event_name(call));
703                                 break;
704                         }
705                         set_bit(EVENT_FILE_FL_ENABLED_BIT, &file->flags);
706
707                         /* WAS_ENABLED gets set but never cleared. */
708                         set_bit(EVENT_FILE_FL_WAS_ENABLED_BIT, &file->flags);
709                 }
710                 break;
711         }
712
713         /* Enable or disable use of trace_buffered_event */
714         if ((file_flags & EVENT_FILE_FL_SOFT_DISABLED) !=
715             (file->flags & EVENT_FILE_FL_SOFT_DISABLED)) {
716                 if (file->flags & EVENT_FILE_FL_SOFT_DISABLED)
717                         trace_buffered_event_enable();
718                 else
719                         trace_buffered_event_disable();
720         }
721
722         return ret;
723 }
724
725 int trace_event_enable_disable(struct trace_event_file *file,
726                                int enable, int soft_disable)
727 {
728         return __ftrace_event_enable_disable(file, enable, soft_disable);
729 }
730
731 static int ftrace_event_enable_disable(struct trace_event_file *file,
732                                        int enable)
733 {
734         return __ftrace_event_enable_disable(file, enable, 0);
735 }
736
737 static void ftrace_clear_events(struct trace_array *tr)
738 {
739         struct trace_event_file *file;
740
741         mutex_lock(&event_mutex);
742         list_for_each_entry(file, &tr->events, list) {
743                 ftrace_event_enable_disable(file, 0);
744         }
745         mutex_unlock(&event_mutex);
746 }
747
748 static void
749 event_filter_pid_sched_process_exit(void *data, struct task_struct *task)
750 {
751         struct trace_pid_list *pid_list;
752         struct trace_array *tr = data;
753
754         pid_list = rcu_dereference_raw(tr->filtered_pids);
755         trace_filter_add_remove_task(pid_list, NULL, task);
756
757         pid_list = rcu_dereference_raw(tr->filtered_no_pids);
758         trace_filter_add_remove_task(pid_list, NULL, task);
759 }
760
761 static void
762 event_filter_pid_sched_process_fork(void *data,
763                                     struct task_struct *self,
764                                     struct task_struct *task)
765 {
766         struct trace_pid_list *pid_list;
767         struct trace_array *tr = data;
768
769         pid_list = rcu_dereference_sched(tr->filtered_pids);
770         trace_filter_add_remove_task(pid_list, self, task);
771
772         pid_list = rcu_dereference_sched(tr->filtered_no_pids);
773         trace_filter_add_remove_task(pid_list, self, task);
774 }
775
776 void trace_event_follow_fork(struct trace_array *tr, bool enable)
777 {
778         if (enable) {
779                 register_trace_prio_sched_process_fork(event_filter_pid_sched_process_fork,
780                                                        tr, INT_MIN);
781                 register_trace_prio_sched_process_free(event_filter_pid_sched_process_exit,
782                                                        tr, INT_MAX);
783         } else {
784                 unregister_trace_sched_process_fork(event_filter_pid_sched_process_fork,
785                                                     tr);
786                 unregister_trace_sched_process_free(event_filter_pid_sched_process_exit,
787                                                     tr);
788         }
789 }
790
791 static void
792 event_filter_pid_sched_switch_probe_pre(void *data, bool preempt,
793                                         struct task_struct *prev,
794                                         struct task_struct *next,
795                                         unsigned int prev_state)
796 {
797         struct trace_array *tr = data;
798         struct trace_pid_list *no_pid_list;
799         struct trace_pid_list *pid_list;
800         bool ret;
801
802         pid_list = rcu_dereference_sched(tr->filtered_pids);
803         no_pid_list = rcu_dereference_sched(tr->filtered_no_pids);
804
805         /*
806          * Sched switch is funny, as we only want to ignore it
807          * in the notrace case if both prev and next should be ignored.
808          */
809         ret = trace_ignore_this_task(NULL, no_pid_list, prev) &&
810                 trace_ignore_this_task(NULL, no_pid_list, next);
811
812         this_cpu_write(tr->array_buffer.data->ignore_pid, ret ||
813                        (trace_ignore_this_task(pid_list, NULL, prev) &&
814                         trace_ignore_this_task(pid_list, NULL, next)));
815 }
816
817 static void
818 event_filter_pid_sched_switch_probe_post(void *data, bool preempt,
819                                          struct task_struct *prev,
820                                          struct task_struct *next,
821                                          unsigned int prev_state)
822 {
823         struct trace_array *tr = data;
824         struct trace_pid_list *no_pid_list;
825         struct trace_pid_list *pid_list;
826
827         pid_list = rcu_dereference_sched(tr->filtered_pids);
828         no_pid_list = rcu_dereference_sched(tr->filtered_no_pids);
829
830         this_cpu_write(tr->array_buffer.data->ignore_pid,
831                        trace_ignore_this_task(pid_list, no_pid_list, next));
832 }
833
834 static void
835 event_filter_pid_sched_wakeup_probe_pre(void *data, struct task_struct *task)
836 {
837         struct trace_array *tr = data;
838         struct trace_pid_list *no_pid_list;
839         struct trace_pid_list *pid_list;
840
841         /* Nothing to do if we are already tracing */
842         if (!this_cpu_read(tr->array_buffer.data->ignore_pid))
843                 return;
844
845         pid_list = rcu_dereference_sched(tr->filtered_pids);
846         no_pid_list = rcu_dereference_sched(tr->filtered_no_pids);
847
848         this_cpu_write(tr->array_buffer.data->ignore_pid,
849                        trace_ignore_this_task(pid_list, no_pid_list, task));
850 }
851
852 static void
853 event_filter_pid_sched_wakeup_probe_post(void *data, struct task_struct *task)
854 {
855         struct trace_array *tr = data;
856         struct trace_pid_list *no_pid_list;
857         struct trace_pid_list *pid_list;
858
859         /* Nothing to do if we are not tracing */
860         if (this_cpu_read(tr->array_buffer.data->ignore_pid))
861                 return;
862
863         pid_list = rcu_dereference_sched(tr->filtered_pids);
864         no_pid_list = rcu_dereference_sched(tr->filtered_no_pids);
865
866         /* Set tracing if current is enabled */
867         this_cpu_write(tr->array_buffer.data->ignore_pid,
868                        trace_ignore_this_task(pid_list, no_pid_list, current));
869 }
870
871 static void unregister_pid_events(struct trace_array *tr)
872 {
873         unregister_trace_sched_switch(event_filter_pid_sched_switch_probe_pre, tr);
874         unregister_trace_sched_switch(event_filter_pid_sched_switch_probe_post, tr);
875
876         unregister_trace_sched_wakeup(event_filter_pid_sched_wakeup_probe_pre, tr);
877         unregister_trace_sched_wakeup(event_filter_pid_sched_wakeup_probe_post, tr);
878
879         unregister_trace_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_pre, tr);
880         unregister_trace_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_post, tr);
881
882         unregister_trace_sched_waking(event_filter_pid_sched_wakeup_probe_pre, tr);
883         unregister_trace_sched_waking(event_filter_pid_sched_wakeup_probe_post, tr);
884 }
885
886 static void __ftrace_clear_event_pids(struct trace_array *tr, int type)
887 {
888         struct trace_pid_list *pid_list;
889         struct trace_pid_list *no_pid_list;
890         struct trace_event_file *file;
891         int cpu;
892
893         pid_list = rcu_dereference_protected(tr->filtered_pids,
894                                              lockdep_is_held(&event_mutex));
895         no_pid_list = rcu_dereference_protected(tr->filtered_no_pids,
896                                              lockdep_is_held(&event_mutex));
897
898         /* Make sure there's something to do */
899         if (!pid_type_enabled(type, pid_list, no_pid_list))
900                 return;
901
902         if (!still_need_pid_events(type, pid_list, no_pid_list)) {
903                 unregister_pid_events(tr);
904
905                 list_for_each_entry(file, &tr->events, list) {
906                         clear_bit(EVENT_FILE_FL_PID_FILTER_BIT, &file->flags);
907                 }
908
909                 for_each_possible_cpu(cpu)
910                         per_cpu_ptr(tr->array_buffer.data, cpu)->ignore_pid = false;
911         }
912
913         if (type & TRACE_PIDS)
914                 rcu_assign_pointer(tr->filtered_pids, NULL);
915
916         if (type & TRACE_NO_PIDS)
917                 rcu_assign_pointer(tr->filtered_no_pids, NULL);
918
919         /* Wait till all users are no longer using pid filtering */
920         tracepoint_synchronize_unregister();
921
922         if ((type & TRACE_PIDS) && pid_list)
923                 trace_pid_list_free(pid_list);
924
925         if ((type & TRACE_NO_PIDS) && no_pid_list)
926                 trace_pid_list_free(no_pid_list);
927 }
928
929 static void ftrace_clear_event_pids(struct trace_array *tr, int type)
930 {
931         mutex_lock(&event_mutex);
932         __ftrace_clear_event_pids(tr, type);
933         mutex_unlock(&event_mutex);
934 }
935
936 static void __put_system(struct event_subsystem *system)
937 {
938         struct event_filter *filter = system->filter;
939
940         WARN_ON_ONCE(system_refcount(system) == 0);
941         if (system_refcount_dec(system))
942                 return;
943
944         list_del(&system->list);
945
946         if (filter) {
947                 kfree(filter->filter_string);
948                 kfree(filter);
949         }
950         kfree_const(system->name);
951         kfree(system);
952 }
953
954 static void __get_system(struct event_subsystem *system)
955 {
956         WARN_ON_ONCE(system_refcount(system) == 0);
957         system_refcount_inc(system);
958 }
959
960 static void __get_system_dir(struct trace_subsystem_dir *dir)
961 {
962         WARN_ON_ONCE(dir->ref_count == 0);
963         dir->ref_count++;
964         __get_system(dir->subsystem);
965 }
966
967 static void __put_system_dir(struct trace_subsystem_dir *dir)
968 {
969         WARN_ON_ONCE(dir->ref_count == 0);
970         /* If the subsystem is about to be freed, the dir must be too */
971         WARN_ON_ONCE(system_refcount(dir->subsystem) == 1 && dir->ref_count != 1);
972
973         __put_system(dir->subsystem);
974         if (!--dir->ref_count)
975                 kfree(dir);
976 }
977
978 static void put_system(struct trace_subsystem_dir *dir)
979 {
980         mutex_lock(&event_mutex);
981         __put_system_dir(dir);
982         mutex_unlock(&event_mutex);
983 }
984
985 static void remove_subsystem(struct trace_subsystem_dir *dir)
986 {
987         if (!dir)
988                 return;
989
990         if (!--dir->nr_events) {
991                 tracefs_remove(dir->entry);
992                 list_del(&dir->list);
993                 __put_system_dir(dir);
994         }
995 }
996
997 static void remove_event_file_dir(struct trace_event_file *file)
998 {
999         struct dentry *dir = file->dir;
1000         struct dentry *child;
1001
1002         if (dir) {
1003                 spin_lock(&dir->d_lock);        /* probably unneeded */
1004                 list_for_each_entry(child, &dir->d_subdirs, d_child) {
1005                         if (d_really_is_positive(child))        /* probably unneeded */
1006                                 d_inode(child)->i_private = NULL;
1007                 }
1008                 spin_unlock(&dir->d_lock);
1009
1010                 tracefs_remove(dir);
1011         }
1012
1013         list_del(&file->list);
1014         remove_subsystem(file->system);
1015         free_event_filter(file->filter);
1016         kmem_cache_free(file_cachep, file);
1017 }
1018
1019 /*
1020  * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events.
1021  */
1022 static int
1023 __ftrace_set_clr_event_nolock(struct trace_array *tr, const char *match,
1024                               const char *sub, const char *event, int set)
1025 {
1026         struct trace_event_file *file;
1027         struct trace_event_call *call;
1028         const char *name;
1029         int ret = -EINVAL;
1030         int eret = 0;
1031
1032         list_for_each_entry(file, &tr->events, list) {
1033
1034                 call = file->event_call;
1035                 name = trace_event_name(call);
1036
1037                 if (!name || !call->class || !call->class->reg)
1038                         continue;
1039
1040                 if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)
1041                         continue;
1042
1043                 if (match &&
1044                     strcmp(match, name) != 0 &&
1045                     strcmp(match, call->class->system) != 0)
1046                         continue;
1047
1048                 if (sub && strcmp(sub, call->class->system) != 0)
1049                         continue;
1050
1051                 if (event && strcmp(event, name) != 0)
1052                         continue;
1053
1054                 ret = ftrace_event_enable_disable(file, set);
1055
1056                 /*
1057                  * Save the first error and return that. Some events
1058                  * may still have been enabled, but let the user
1059                  * know that something went wrong.
1060                  */
1061                 if (ret && !eret)
1062                         eret = ret;
1063
1064                 ret = eret;
1065         }
1066
1067         return ret;
1068 }
1069
1070 static int __ftrace_set_clr_event(struct trace_array *tr, const char *match,
1071                                   const char *sub, const char *event, int set)
1072 {
1073         int ret;
1074
1075         mutex_lock(&event_mutex);
1076         ret = __ftrace_set_clr_event_nolock(tr, match, sub, event, set);
1077         mutex_unlock(&event_mutex);
1078
1079         return ret;
1080 }
1081
1082 int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set)
1083 {
1084         char *event = NULL, *sub = NULL, *match;
1085         int ret;
1086
1087         if (!tr)
1088                 return -ENOENT;
1089         /*
1090          * The buf format can be <subsystem>:<event-name>
1091          *  *:<event-name> means any event by that name.
1092          *  :<event-name> is the same.
1093          *
1094          *  <subsystem>:* means all events in that subsystem
1095          *  <subsystem>: means the same.
1096          *
1097          *  <name> (no ':') means all events in a subsystem with
1098          *  the name <name> or any event that matches <name>
1099          */
1100
1101         match = strsep(&buf, ":");
1102         if (buf) {
1103                 sub = match;
1104                 event = buf;
1105                 match = NULL;
1106
1107                 if (!strlen(sub) || strcmp(sub, "*") == 0)
1108                         sub = NULL;
1109                 if (!strlen(event) || strcmp(event, "*") == 0)
1110                         event = NULL;
1111         }
1112
1113         ret = __ftrace_set_clr_event(tr, match, sub, event, set);
1114
1115         /* Put back the colon to allow this to be called again */
1116         if (buf)
1117                 *(buf - 1) = ':';
1118
1119         return ret;
1120 }
1121
1122 /**
1123  * trace_set_clr_event - enable or disable an event
1124  * @system: system name to match (NULL for any system)
1125  * @event: event name to match (NULL for all events, within system)
1126  * @set: 1 to enable, 0 to disable
1127  *
1128  * This is a way for other parts of the kernel to enable or disable
1129  * event recording.
1130  *
1131  * Returns 0 on success, -EINVAL if the parameters do not match any
1132  * registered events.
1133  */
1134 int trace_set_clr_event(const char *system, const char *event, int set)
1135 {
1136         struct trace_array *tr = top_trace_array();
1137
1138         if (!tr)
1139                 return -ENODEV;
1140
1141         return __ftrace_set_clr_event(tr, NULL, system, event, set);
1142 }
1143 EXPORT_SYMBOL_GPL(trace_set_clr_event);
1144
1145 /**
1146  * trace_array_set_clr_event - enable or disable an event for a trace array.
1147  * @tr: concerned trace array.
1148  * @system: system name to match (NULL for any system)
1149  * @event: event name to match (NULL for all events, within system)
1150  * @enable: true to enable, false to disable
1151  *
1152  * This is a way for other parts of the kernel to enable or disable
1153  * event recording.
1154  *
1155  * Returns 0 on success, -EINVAL if the parameters do not match any
1156  * registered events.
1157  */
1158 int trace_array_set_clr_event(struct trace_array *tr, const char *system,
1159                 const char *event, bool enable)
1160 {
1161         int set;
1162
1163         if (!tr)
1164                 return -ENOENT;
1165
1166         set = (enable == true) ? 1 : 0;
1167         return __ftrace_set_clr_event(tr, NULL, system, event, set);
1168 }
1169 EXPORT_SYMBOL_GPL(trace_array_set_clr_event);
1170
1171 /* 128 should be much more than enough */
1172 #define EVENT_BUF_SIZE          127
1173
1174 static ssize_t
1175 ftrace_event_write(struct file *file, const char __user *ubuf,
1176                    size_t cnt, loff_t *ppos)
1177 {
1178         struct trace_parser parser;
1179         struct seq_file *m = file->private_data;
1180         struct trace_array *tr = m->private;
1181         ssize_t read, ret;
1182
1183         if (!cnt)
1184                 return 0;
1185
1186         ret = tracing_update_buffers();
1187         if (ret < 0)
1188                 return ret;
1189
1190         if (trace_parser_get_init(&parser, EVENT_BUF_SIZE + 1))
1191                 return -ENOMEM;
1192
1193         read = trace_get_user(&parser, ubuf, cnt, ppos);
1194
1195         if (read >= 0 && trace_parser_loaded((&parser))) {
1196                 int set = 1;
1197
1198                 if (*parser.buffer == '!')
1199                         set = 0;
1200
1201                 ret = ftrace_set_clr_event(tr, parser.buffer + !set, set);
1202                 if (ret)
1203                         goto out_put;
1204         }
1205
1206         ret = read;
1207
1208  out_put:
1209         trace_parser_put(&parser);
1210
1211         return ret;
1212 }
1213
1214 static void *
1215 t_next(struct seq_file *m, void *v, loff_t *pos)
1216 {
1217         struct trace_event_file *file = v;
1218         struct trace_event_call *call;
1219         struct trace_array *tr = m->private;
1220
1221         (*pos)++;
1222
1223         list_for_each_entry_continue(file, &tr->events, list) {
1224                 call = file->event_call;
1225                 /*
1226                  * The ftrace subsystem is for showing formats only.
1227                  * They can not be enabled or disabled via the event files.
1228                  */
1229                 if (call->class && call->class->reg &&
1230                     !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
1231                         return file;
1232         }
1233
1234         return NULL;
1235 }
1236
1237 static void *t_start(struct seq_file *m, loff_t *pos)
1238 {
1239         struct trace_event_file *file;
1240         struct trace_array *tr = m->private;
1241         loff_t l;
1242
1243         mutex_lock(&event_mutex);
1244
1245         file = list_entry(&tr->events, struct trace_event_file, list);
1246         for (l = 0; l <= *pos; ) {
1247                 file = t_next(m, file, &l);
1248                 if (!file)
1249                         break;
1250         }
1251         return file;
1252 }
1253
1254 static void *
1255 s_next(struct seq_file *m, void *v, loff_t *pos)
1256 {
1257         struct trace_event_file *file = v;
1258         struct trace_array *tr = m->private;
1259
1260         (*pos)++;
1261
1262         list_for_each_entry_continue(file, &tr->events, list) {
1263                 if (file->flags & EVENT_FILE_FL_ENABLED)
1264                         return file;
1265         }
1266
1267         return NULL;
1268 }
1269
1270 static void *s_start(struct seq_file *m, loff_t *pos)
1271 {
1272         struct trace_event_file *file;
1273         struct trace_array *tr = m->private;
1274         loff_t l;
1275
1276         mutex_lock(&event_mutex);
1277
1278         file = list_entry(&tr->events, struct trace_event_file, list);
1279         for (l = 0; l <= *pos; ) {
1280                 file = s_next(m, file, &l);
1281                 if (!file)
1282                         break;
1283         }
1284         return file;
1285 }
1286
1287 static int t_show(struct seq_file *m, void *v)
1288 {
1289         struct trace_event_file *file = v;
1290         struct trace_event_call *call = file->event_call;
1291
1292         if (strcmp(call->class->system, TRACE_SYSTEM) != 0)
1293                 seq_printf(m, "%s:", call->class->system);
1294         seq_printf(m, "%s\n", trace_event_name(call));
1295
1296         return 0;
1297 }
1298
1299 static void t_stop(struct seq_file *m, void *p)
1300 {
1301         mutex_unlock(&event_mutex);
1302 }
1303
1304 static void *
1305 __next(struct seq_file *m, void *v, loff_t *pos, int type)
1306 {
1307         struct trace_array *tr = m->private;
1308         struct trace_pid_list *pid_list;
1309
1310         if (type == TRACE_PIDS)
1311                 pid_list = rcu_dereference_sched(tr->filtered_pids);
1312         else
1313                 pid_list = rcu_dereference_sched(tr->filtered_no_pids);
1314
1315         return trace_pid_next(pid_list, v, pos);
1316 }
1317
1318 static void *
1319 p_next(struct seq_file *m, void *v, loff_t *pos)
1320 {
1321         return __next(m, v, pos, TRACE_PIDS);
1322 }
1323
1324 static void *
1325 np_next(struct seq_file *m, void *v, loff_t *pos)
1326 {
1327         return __next(m, v, pos, TRACE_NO_PIDS);
1328 }
1329
1330 static void *__start(struct seq_file *m, loff_t *pos, int type)
1331         __acquires(RCU)
1332 {
1333         struct trace_pid_list *pid_list;
1334         struct trace_array *tr = m->private;
1335
1336         /*
1337          * Grab the mutex, to keep calls to p_next() having the same
1338          * tr->filtered_pids as p_start() has.
1339          * If we just passed the tr->filtered_pids around, then RCU would
1340          * have been enough, but doing that makes things more complex.
1341          */
1342         mutex_lock(&event_mutex);
1343         rcu_read_lock_sched();
1344
1345         if (type == TRACE_PIDS)
1346                 pid_list = rcu_dereference_sched(tr->filtered_pids);
1347         else
1348                 pid_list = rcu_dereference_sched(tr->filtered_no_pids);
1349
1350         if (!pid_list)
1351                 return NULL;
1352
1353         return trace_pid_start(pid_list, pos);
1354 }
1355
1356 static void *p_start(struct seq_file *m, loff_t *pos)
1357         __acquires(RCU)
1358 {
1359         return __start(m, pos, TRACE_PIDS);
1360 }
1361
1362 static void *np_start(struct seq_file *m, loff_t *pos)
1363         __acquires(RCU)
1364 {
1365         return __start(m, pos, TRACE_NO_PIDS);
1366 }
1367
1368 static void p_stop(struct seq_file *m, void *p)
1369         __releases(RCU)
1370 {
1371         rcu_read_unlock_sched();
1372         mutex_unlock(&event_mutex);
1373 }
1374
1375 static ssize_t
1376 event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
1377                   loff_t *ppos)
1378 {
1379         struct trace_event_file *file;
1380         unsigned long flags;
1381         char buf[4] = "0";
1382
1383         mutex_lock(&event_mutex);
1384         file = event_file_data(filp);
1385         if (likely(file))
1386                 flags = file->flags;
1387         mutex_unlock(&event_mutex);
1388
1389         if (!file)
1390                 return -ENODEV;
1391
1392         if (flags & EVENT_FILE_FL_ENABLED &&
1393             !(flags & EVENT_FILE_FL_SOFT_DISABLED))
1394                 strcpy(buf, "1");
1395
1396         if (flags & EVENT_FILE_FL_SOFT_DISABLED ||
1397             flags & EVENT_FILE_FL_SOFT_MODE)
1398                 strcat(buf, "*");
1399
1400         strcat(buf, "\n");
1401
1402         return simple_read_from_buffer(ubuf, cnt, ppos, buf, strlen(buf));
1403 }
1404
1405 static ssize_t
1406 event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
1407                    loff_t *ppos)
1408 {
1409         struct trace_event_file *file;
1410         unsigned long val;
1411         int ret;
1412
1413         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
1414         if (ret)
1415                 return ret;
1416
1417         ret = tracing_update_buffers();
1418         if (ret < 0)
1419                 return ret;
1420
1421         switch (val) {
1422         case 0:
1423         case 1:
1424                 ret = -ENODEV;
1425                 mutex_lock(&event_mutex);
1426                 file = event_file_data(filp);
1427                 if (likely(file))
1428                         ret = ftrace_event_enable_disable(file, val);
1429                 mutex_unlock(&event_mutex);
1430                 break;
1431
1432         default:
1433                 return -EINVAL;
1434         }
1435
1436         *ppos += cnt;
1437
1438         return ret ? ret : cnt;
1439 }
1440
1441 static ssize_t
1442 system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
1443                    loff_t *ppos)
1444 {
1445         const char set_to_char[4] = { '?', '0', '1', 'X' };
1446         struct trace_subsystem_dir *dir = filp->private_data;
1447         struct event_subsystem *system = dir->subsystem;
1448         struct trace_event_call *call;
1449         struct trace_event_file *file;
1450         struct trace_array *tr = dir->tr;
1451         char buf[2];
1452         int set = 0;
1453         int ret;
1454
1455         mutex_lock(&event_mutex);
1456         list_for_each_entry(file, &tr->events, list) {
1457                 call = file->event_call;
1458                 if ((call->flags & TRACE_EVENT_FL_IGNORE_ENABLE) ||
1459                     !trace_event_name(call) || !call->class || !call->class->reg)
1460                         continue;
1461
1462                 if (system && strcmp(call->class->system, system->name) != 0)
1463                         continue;
1464
1465                 /*
1466                  * We need to find out if all the events are set
1467                  * or if all events or cleared, or if we have
1468                  * a mixture.
1469                  */
1470                 set |= (1 << !!(file->flags & EVENT_FILE_FL_ENABLED));
1471
1472                 /*
1473                  * If we have a mixture, no need to look further.
1474                  */
1475                 if (set == 3)
1476                         break;
1477         }
1478         mutex_unlock(&event_mutex);
1479
1480         buf[0] = set_to_char[set];
1481         buf[1] = '\n';
1482
1483         ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
1484
1485         return ret;
1486 }
1487
1488 static ssize_t
1489 system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
1490                     loff_t *ppos)
1491 {
1492         struct trace_subsystem_dir *dir = filp->private_data;
1493         struct event_subsystem *system = dir->subsystem;
1494         const char *name = NULL;
1495         unsigned long val;
1496         ssize_t ret;
1497
1498         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
1499         if (ret)
1500                 return ret;
1501
1502         ret = tracing_update_buffers();
1503         if (ret < 0)
1504                 return ret;
1505
1506         if (val != 0 && val != 1)
1507                 return -EINVAL;
1508
1509         /*
1510          * Opening of "enable" adds a ref count to system,
1511          * so the name is safe to use.
1512          */
1513         if (system)
1514                 name = system->name;
1515
1516         ret = __ftrace_set_clr_event(dir->tr, NULL, name, NULL, val);
1517         if (ret)
1518                 goto out;
1519
1520         ret = cnt;
1521
1522 out:
1523         *ppos += cnt;
1524
1525         return ret;
1526 }
1527
1528 enum {
1529         FORMAT_HEADER           = 1,
1530         FORMAT_FIELD_SEPERATOR  = 2,
1531         FORMAT_PRINTFMT         = 3,
1532 };
1533
1534 static void *f_next(struct seq_file *m, void *v, loff_t *pos)
1535 {
1536         struct trace_event_call *call = event_file_data(m->private);
1537         struct list_head *common_head = &ftrace_common_fields;
1538         struct list_head *head = trace_get_fields(call);
1539         struct list_head *node = v;
1540
1541         (*pos)++;
1542
1543         switch ((unsigned long)v) {
1544         case FORMAT_HEADER:
1545                 node = common_head;
1546                 break;
1547
1548         case FORMAT_FIELD_SEPERATOR:
1549                 node = head;
1550                 break;
1551
1552         case FORMAT_PRINTFMT:
1553                 /* all done */
1554                 return NULL;
1555         }
1556
1557         node = node->prev;
1558         if (node == common_head)
1559                 return (void *)FORMAT_FIELD_SEPERATOR;
1560         else if (node == head)
1561                 return (void *)FORMAT_PRINTFMT;
1562         else
1563                 return node;
1564 }
1565
1566 static int f_show(struct seq_file *m, void *v)
1567 {
1568         struct trace_event_call *call = event_file_data(m->private);
1569         struct ftrace_event_field *field;
1570         const char *array_descriptor;
1571
1572         switch ((unsigned long)v) {
1573         case FORMAT_HEADER:
1574                 seq_printf(m, "name: %s\n", trace_event_name(call));
1575                 seq_printf(m, "ID: %d\n", call->event.type);
1576                 seq_puts(m, "format:\n");
1577                 return 0;
1578
1579         case FORMAT_FIELD_SEPERATOR:
1580                 seq_putc(m, '\n');
1581                 return 0;
1582
1583         case FORMAT_PRINTFMT:
1584                 seq_printf(m, "\nprint fmt: %s\n",
1585                            call->print_fmt);
1586                 return 0;
1587         }
1588
1589         field = list_entry(v, struct ftrace_event_field, link);
1590         /*
1591          * Smartly shows the array type(except dynamic array).
1592          * Normal:
1593          *      field:TYPE VAR
1594          * If TYPE := TYPE[LEN], it is shown:
1595          *      field:TYPE VAR[LEN]
1596          */
1597         array_descriptor = strchr(field->type, '[');
1598
1599         if (str_has_prefix(field->type, "__data_loc"))
1600                 array_descriptor = NULL;
1601
1602         if (!array_descriptor)
1603                 seq_printf(m, "\tfield:%s %s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
1604                            field->type, field->name, field->offset,
1605                            field->size, !!field->is_signed);
1606         else if (field->len)
1607                 seq_printf(m, "\tfield:%.*s %s[%d];\toffset:%u;\tsize:%u;\tsigned:%d;\n",
1608                            (int)(array_descriptor - field->type),
1609                            field->type, field->name,
1610                            field->len, field->offset,
1611                            field->size, !!field->is_signed);
1612         else
1613                 seq_printf(m, "\tfield:%.*s %s[];\toffset:%u;\tsize:%u;\tsigned:%d;\n",
1614                                 (int)(array_descriptor - field->type),
1615                                 field->type, field->name,
1616                                 field->offset, field->size, !!field->is_signed);
1617
1618         return 0;
1619 }
1620
1621 static void *f_start(struct seq_file *m, loff_t *pos)
1622 {
1623         void *p = (void *)FORMAT_HEADER;
1624         loff_t l = 0;
1625
1626         /* ->stop() is called even if ->start() fails */
1627         mutex_lock(&event_mutex);
1628         if (!event_file_data(m->private))
1629                 return ERR_PTR(-ENODEV);
1630
1631         while (l < *pos && p)
1632                 p = f_next(m, p, &l);
1633
1634         return p;
1635 }
1636
1637 static void f_stop(struct seq_file *m, void *p)
1638 {
1639         mutex_unlock(&event_mutex);
1640 }
1641
1642 static const struct seq_operations trace_format_seq_ops = {
1643         .start          = f_start,
1644         .next           = f_next,
1645         .stop           = f_stop,
1646         .show           = f_show,
1647 };
1648
1649 static int trace_format_open(struct inode *inode, struct file *file)
1650 {
1651         struct seq_file *m;
1652         int ret;
1653
1654         /* Do we want to hide event format files on tracefs lockdown? */
1655
1656         ret = seq_open(file, &trace_format_seq_ops);
1657         if (ret < 0)
1658                 return ret;
1659
1660         m = file->private_data;
1661         m->private = file;
1662
1663         return 0;
1664 }
1665
1666 static ssize_t
1667 event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
1668 {
1669         int id = (long)event_file_data(filp);
1670         char buf[32];
1671         int len;
1672
1673         if (unlikely(!id))
1674                 return -ENODEV;
1675
1676         len = sprintf(buf, "%d\n", id);
1677
1678         return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
1679 }
1680
1681 static ssize_t
1682 event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
1683                   loff_t *ppos)
1684 {
1685         struct trace_event_file *file;
1686         struct trace_seq *s;
1687         int r = -ENODEV;
1688
1689         if (*ppos)
1690                 return 0;
1691
1692         s = kmalloc(sizeof(*s), GFP_KERNEL);
1693
1694         if (!s)
1695                 return -ENOMEM;
1696
1697         trace_seq_init(s);
1698
1699         mutex_lock(&event_mutex);
1700         file = event_file_data(filp);
1701         if (file)
1702                 print_event_filter(file, s);
1703         mutex_unlock(&event_mutex);
1704
1705         if (file)
1706                 r = simple_read_from_buffer(ubuf, cnt, ppos,
1707                                             s->buffer, trace_seq_used(s));
1708
1709         kfree(s);
1710
1711         return r;
1712 }
1713
1714 static ssize_t
1715 event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
1716                    loff_t *ppos)
1717 {
1718         struct trace_event_file *file;
1719         char *buf;
1720         int err = -ENODEV;
1721
1722         if (cnt >= PAGE_SIZE)
1723                 return -EINVAL;
1724
1725         buf = memdup_user_nul(ubuf, cnt);
1726         if (IS_ERR(buf))
1727                 return PTR_ERR(buf);
1728
1729         mutex_lock(&event_mutex);
1730         file = event_file_data(filp);
1731         if (file)
1732                 err = apply_event_filter(file, buf);
1733         mutex_unlock(&event_mutex);
1734
1735         kfree(buf);
1736         if (err < 0)
1737                 return err;
1738
1739         *ppos += cnt;
1740
1741         return cnt;
1742 }
1743
1744 static LIST_HEAD(event_subsystems);
1745
1746 static int subsystem_open(struct inode *inode, struct file *filp)
1747 {
1748         struct trace_subsystem_dir *dir = NULL, *iter_dir;
1749         struct trace_array *tr = NULL, *iter_tr;
1750         struct event_subsystem *system = NULL;
1751         int ret;
1752
1753         if (tracing_is_disabled())
1754                 return -ENODEV;
1755
1756         /* Make sure the system still exists */
1757         mutex_lock(&event_mutex);
1758         mutex_lock(&trace_types_lock);
1759         list_for_each_entry(iter_tr, &ftrace_trace_arrays, list) {
1760                 list_for_each_entry(iter_dir, &iter_tr->systems, list) {
1761                         if (iter_dir == inode->i_private) {
1762                                 /* Don't open systems with no events */
1763                                 tr = iter_tr;
1764                                 dir = iter_dir;
1765                                 if (dir->nr_events) {
1766                                         __get_system_dir(dir);
1767                                         system = dir->subsystem;
1768                                 }
1769                                 goto exit_loop;
1770                         }
1771                 }
1772         }
1773  exit_loop:
1774         mutex_unlock(&trace_types_lock);
1775         mutex_unlock(&event_mutex);
1776
1777         if (!system)
1778                 return -ENODEV;
1779
1780         /* Still need to increment the ref count of the system */
1781         if (trace_array_get(tr) < 0) {
1782                 put_system(dir);
1783                 return -ENODEV;
1784         }
1785
1786         ret = tracing_open_generic(inode, filp);
1787         if (ret < 0) {
1788                 trace_array_put(tr);
1789                 put_system(dir);
1790         }
1791
1792         return ret;
1793 }
1794
1795 static int system_tr_open(struct inode *inode, struct file *filp)
1796 {
1797         struct trace_subsystem_dir *dir;
1798         struct trace_array *tr = inode->i_private;
1799         int ret;
1800
1801         /* Make a temporary dir that has no system but points to tr */
1802         dir = kzalloc(sizeof(*dir), GFP_KERNEL);
1803         if (!dir)
1804                 return -ENOMEM;
1805
1806         ret = tracing_open_generic_tr(inode, filp);
1807         if (ret < 0) {
1808                 kfree(dir);
1809                 return ret;
1810         }
1811         dir->tr = tr;
1812         filp->private_data = dir;
1813
1814         return 0;
1815 }
1816
1817 static int subsystem_release(struct inode *inode, struct file *file)
1818 {
1819         struct trace_subsystem_dir *dir = file->private_data;
1820
1821         trace_array_put(dir->tr);
1822
1823         /*
1824          * If dir->subsystem is NULL, then this is a temporary
1825          * descriptor that was made for a trace_array to enable
1826          * all subsystems.
1827          */
1828         if (dir->subsystem)
1829                 put_system(dir);
1830         else
1831                 kfree(dir);
1832
1833         return 0;
1834 }
1835
1836 static ssize_t
1837 subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
1838                       loff_t *ppos)
1839 {
1840         struct trace_subsystem_dir *dir = filp->private_data;
1841         struct event_subsystem *system = dir->subsystem;
1842         struct trace_seq *s;
1843         int r;
1844
1845         if (*ppos)
1846                 return 0;
1847
1848         s = kmalloc(sizeof(*s), GFP_KERNEL);
1849         if (!s)
1850                 return -ENOMEM;
1851
1852         trace_seq_init(s);
1853
1854         print_subsystem_event_filter(system, s);
1855         r = simple_read_from_buffer(ubuf, cnt, ppos,
1856                                     s->buffer, trace_seq_used(s));
1857
1858         kfree(s);
1859
1860         return r;
1861 }
1862
1863 static ssize_t
1864 subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
1865                        loff_t *ppos)
1866 {
1867         struct trace_subsystem_dir *dir = filp->private_data;
1868         char *buf;
1869         int err;
1870
1871         if (cnt >= PAGE_SIZE)
1872                 return -EINVAL;
1873
1874         buf = memdup_user_nul(ubuf, cnt);
1875         if (IS_ERR(buf))
1876                 return PTR_ERR(buf);
1877
1878         err = apply_subsystem_event_filter(dir, buf);
1879         kfree(buf);
1880         if (err < 0)
1881                 return err;
1882
1883         *ppos += cnt;
1884
1885         return cnt;
1886 }
1887
1888 static ssize_t
1889 show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
1890 {
1891         int (*func)(struct trace_seq *s) = filp->private_data;
1892         struct trace_seq *s;
1893         int r;
1894
1895         if (*ppos)
1896                 return 0;
1897
1898         s = kmalloc(sizeof(*s), GFP_KERNEL);
1899         if (!s)
1900                 return -ENOMEM;
1901
1902         trace_seq_init(s);
1903
1904         func(s);
1905         r = simple_read_from_buffer(ubuf, cnt, ppos,
1906                                     s->buffer, trace_seq_used(s));
1907
1908         kfree(s);
1909
1910         return r;
1911 }
1912
1913 static void ignore_task_cpu(void *data)
1914 {
1915         struct trace_array *tr = data;
1916         struct trace_pid_list *pid_list;
1917         struct trace_pid_list *no_pid_list;
1918
1919         /*
1920          * This function is called by on_each_cpu() while the
1921          * event_mutex is held.
1922          */
1923         pid_list = rcu_dereference_protected(tr->filtered_pids,
1924                                              mutex_is_locked(&event_mutex));
1925         no_pid_list = rcu_dereference_protected(tr->filtered_no_pids,
1926                                              mutex_is_locked(&event_mutex));
1927
1928         this_cpu_write(tr->array_buffer.data->ignore_pid,
1929                        trace_ignore_this_task(pid_list, no_pid_list, current));
1930 }
1931
1932 static void register_pid_events(struct trace_array *tr)
1933 {
1934         /*
1935          * Register a probe that is called before all other probes
1936          * to set ignore_pid if next or prev do not match.
1937          * Register a probe this is called after all other probes
1938          * to only keep ignore_pid set if next pid matches.
1939          */
1940         register_trace_prio_sched_switch(event_filter_pid_sched_switch_probe_pre,
1941                                          tr, INT_MAX);
1942         register_trace_prio_sched_switch(event_filter_pid_sched_switch_probe_post,
1943                                          tr, 0);
1944
1945         register_trace_prio_sched_wakeup(event_filter_pid_sched_wakeup_probe_pre,
1946                                          tr, INT_MAX);
1947         register_trace_prio_sched_wakeup(event_filter_pid_sched_wakeup_probe_post,
1948                                          tr, 0);
1949
1950         register_trace_prio_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_pre,
1951                                              tr, INT_MAX);
1952         register_trace_prio_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_post,
1953                                              tr, 0);
1954
1955         register_trace_prio_sched_waking(event_filter_pid_sched_wakeup_probe_pre,
1956                                          tr, INT_MAX);
1957         register_trace_prio_sched_waking(event_filter_pid_sched_wakeup_probe_post,
1958                                          tr, 0);
1959 }
1960
1961 static ssize_t
1962 event_pid_write(struct file *filp, const char __user *ubuf,
1963                 size_t cnt, loff_t *ppos, int type)
1964 {
1965         struct seq_file *m = filp->private_data;
1966         struct trace_array *tr = m->private;
1967         struct trace_pid_list *filtered_pids = NULL;
1968         struct trace_pid_list *other_pids = NULL;
1969         struct trace_pid_list *pid_list;
1970         struct trace_event_file *file;
1971         ssize_t ret;
1972
1973         if (!cnt)
1974                 return 0;
1975
1976         ret = tracing_update_buffers();
1977         if (ret < 0)
1978                 return ret;
1979
1980         mutex_lock(&event_mutex);
1981
1982         if (type == TRACE_PIDS) {
1983                 filtered_pids = rcu_dereference_protected(tr->filtered_pids,
1984                                                           lockdep_is_held(&event_mutex));
1985                 other_pids = rcu_dereference_protected(tr->filtered_no_pids,
1986                                                           lockdep_is_held(&event_mutex));
1987         } else {
1988                 filtered_pids = rcu_dereference_protected(tr->filtered_no_pids,
1989                                                           lockdep_is_held(&event_mutex));
1990                 other_pids = rcu_dereference_protected(tr->filtered_pids,
1991                                                           lockdep_is_held(&event_mutex));
1992         }
1993
1994         ret = trace_pid_write(filtered_pids, &pid_list, ubuf, cnt);
1995         if (ret < 0)
1996                 goto out;
1997
1998         if (type == TRACE_PIDS)
1999                 rcu_assign_pointer(tr->filtered_pids, pid_list);
2000         else
2001                 rcu_assign_pointer(tr->filtered_no_pids, pid_list);
2002
2003         list_for_each_entry(file, &tr->events, list) {
2004                 set_bit(EVENT_FILE_FL_PID_FILTER_BIT, &file->flags);
2005         }
2006
2007         if (filtered_pids) {
2008                 tracepoint_synchronize_unregister();
2009                 trace_pid_list_free(filtered_pids);
2010         } else if (pid_list && !other_pids) {
2011                 register_pid_events(tr);
2012         }
2013
2014         /*
2015          * Ignoring of pids is done at task switch. But we have to
2016          * check for those tasks that are currently running.
2017          * Always do this in case a pid was appended or removed.
2018          */
2019         on_each_cpu(ignore_task_cpu, tr, 1);
2020
2021  out:
2022         mutex_unlock(&event_mutex);
2023
2024         if (ret > 0)
2025                 *ppos += ret;
2026
2027         return ret;
2028 }
2029
2030 static ssize_t
2031 ftrace_event_pid_write(struct file *filp, const char __user *ubuf,
2032                        size_t cnt, loff_t *ppos)
2033 {
2034         return event_pid_write(filp, ubuf, cnt, ppos, TRACE_PIDS);
2035 }
2036
2037 static ssize_t
2038 ftrace_event_npid_write(struct file *filp, const char __user *ubuf,
2039                         size_t cnt, loff_t *ppos)
2040 {
2041         return event_pid_write(filp, ubuf, cnt, ppos, TRACE_NO_PIDS);
2042 }
2043
2044 static int ftrace_event_avail_open(struct inode *inode, struct file *file);
2045 static int ftrace_event_set_open(struct inode *inode, struct file *file);
2046 static int ftrace_event_set_pid_open(struct inode *inode, struct file *file);
2047 static int ftrace_event_set_npid_open(struct inode *inode, struct file *file);
2048 static int ftrace_event_release(struct inode *inode, struct file *file);
2049
2050 static const struct seq_operations show_event_seq_ops = {
2051         .start = t_start,
2052         .next = t_next,
2053         .show = t_show,
2054         .stop = t_stop,
2055 };
2056
2057 static const struct seq_operations show_set_event_seq_ops = {
2058         .start = s_start,
2059         .next = s_next,
2060         .show = t_show,
2061         .stop = t_stop,
2062 };
2063
2064 static const struct seq_operations show_set_pid_seq_ops = {
2065         .start = p_start,
2066         .next = p_next,
2067         .show = trace_pid_show,
2068         .stop = p_stop,
2069 };
2070
2071 static const struct seq_operations show_set_no_pid_seq_ops = {
2072         .start = np_start,
2073         .next = np_next,
2074         .show = trace_pid_show,
2075         .stop = p_stop,
2076 };
2077
2078 static const struct file_operations ftrace_avail_fops = {
2079         .open = ftrace_event_avail_open,
2080         .read = seq_read,
2081         .llseek = seq_lseek,
2082         .release = seq_release,
2083 };
2084
2085 static const struct file_operations ftrace_set_event_fops = {
2086         .open = ftrace_event_set_open,
2087         .read = seq_read,
2088         .write = ftrace_event_write,
2089         .llseek = seq_lseek,
2090         .release = ftrace_event_release,
2091 };
2092
2093 static const struct file_operations ftrace_set_event_pid_fops = {
2094         .open = ftrace_event_set_pid_open,
2095         .read = seq_read,
2096         .write = ftrace_event_pid_write,
2097         .llseek = seq_lseek,
2098         .release = ftrace_event_release,
2099 };
2100
2101 static const struct file_operations ftrace_set_event_notrace_pid_fops = {
2102         .open = ftrace_event_set_npid_open,
2103         .read = seq_read,
2104         .write = ftrace_event_npid_write,
2105         .llseek = seq_lseek,
2106         .release = ftrace_event_release,
2107 };
2108
2109 static const struct file_operations ftrace_enable_fops = {
2110         .open = tracing_open_generic,
2111         .read = event_enable_read,
2112         .write = event_enable_write,
2113         .llseek = default_llseek,
2114 };
2115
2116 static const struct file_operations ftrace_event_format_fops = {
2117         .open = trace_format_open,
2118         .read = seq_read,
2119         .llseek = seq_lseek,
2120         .release = seq_release,
2121 };
2122
2123 static const struct file_operations ftrace_event_id_fops = {
2124         .read = event_id_read,
2125         .llseek = default_llseek,
2126 };
2127
2128 static const struct file_operations ftrace_event_filter_fops = {
2129         .open = tracing_open_generic,
2130         .read = event_filter_read,
2131         .write = event_filter_write,
2132         .llseek = default_llseek,
2133 };
2134
2135 static const struct file_operations ftrace_subsystem_filter_fops = {
2136         .open = subsystem_open,
2137         .read = subsystem_filter_read,
2138         .write = subsystem_filter_write,
2139         .llseek = default_llseek,
2140         .release = subsystem_release,
2141 };
2142
2143 static const struct file_operations ftrace_system_enable_fops = {
2144         .open = subsystem_open,
2145         .read = system_enable_read,
2146         .write = system_enable_write,
2147         .llseek = default_llseek,
2148         .release = subsystem_release,
2149 };
2150
2151 static const struct file_operations ftrace_tr_enable_fops = {
2152         .open = system_tr_open,
2153         .read = system_enable_read,
2154         .write = system_enable_write,
2155         .llseek = default_llseek,
2156         .release = subsystem_release,
2157 };
2158
2159 static const struct file_operations ftrace_show_header_fops = {
2160         .open = tracing_open_generic,
2161         .read = show_header,
2162         .llseek = default_llseek,
2163 };
2164
2165 static int
2166 ftrace_event_open(struct inode *inode, struct file *file,
2167                   const struct seq_operations *seq_ops)
2168 {
2169         struct seq_file *m;
2170         int ret;
2171
2172         ret = security_locked_down(LOCKDOWN_TRACEFS);
2173         if (ret)
2174                 return ret;
2175
2176         ret = seq_open(file, seq_ops);
2177         if (ret < 0)
2178                 return ret;
2179         m = file->private_data;
2180         /* copy tr over to seq ops */
2181         m->private = inode->i_private;
2182
2183         return ret;
2184 }
2185
2186 static int ftrace_event_release(struct inode *inode, struct file *file)
2187 {
2188         struct trace_array *tr = inode->i_private;
2189
2190         trace_array_put(tr);
2191
2192         return seq_release(inode, file);
2193 }
2194
2195 static int
2196 ftrace_event_avail_open(struct inode *inode, struct file *file)
2197 {
2198         const struct seq_operations *seq_ops = &show_event_seq_ops;
2199
2200         /* Checks for tracefs lockdown */
2201         return ftrace_event_open(inode, file, seq_ops);
2202 }
2203
2204 static int
2205 ftrace_event_set_open(struct inode *inode, struct file *file)
2206 {
2207         const struct seq_operations *seq_ops = &show_set_event_seq_ops;
2208         struct trace_array *tr = inode->i_private;
2209         int ret;
2210
2211         ret = tracing_check_open_get_tr(tr);
2212         if (ret)
2213                 return ret;
2214
2215         if ((file->f_mode & FMODE_WRITE) &&
2216             (file->f_flags & O_TRUNC))
2217                 ftrace_clear_events(tr);
2218
2219         ret = ftrace_event_open(inode, file, seq_ops);
2220         if (ret < 0)
2221                 trace_array_put(tr);
2222         return ret;
2223 }
2224
2225 static int
2226 ftrace_event_set_pid_open(struct inode *inode, struct file *file)
2227 {
2228         const struct seq_operations *seq_ops = &show_set_pid_seq_ops;
2229         struct trace_array *tr = inode->i_private;
2230         int ret;
2231
2232         ret = tracing_check_open_get_tr(tr);
2233         if (ret)
2234                 return ret;
2235
2236         if ((file->f_mode & FMODE_WRITE) &&
2237             (file->f_flags & O_TRUNC))
2238                 ftrace_clear_event_pids(tr, TRACE_PIDS);
2239
2240         ret = ftrace_event_open(inode, file, seq_ops);
2241         if (ret < 0)
2242                 trace_array_put(tr);
2243         return ret;
2244 }
2245
2246 static int
2247 ftrace_event_set_npid_open(struct inode *inode, struct file *file)
2248 {
2249         const struct seq_operations *seq_ops = &show_set_no_pid_seq_ops;
2250         struct trace_array *tr = inode->i_private;
2251         int ret;
2252
2253         ret = tracing_check_open_get_tr(tr);
2254         if (ret)
2255                 return ret;
2256
2257         if ((file->f_mode & FMODE_WRITE) &&
2258             (file->f_flags & O_TRUNC))
2259                 ftrace_clear_event_pids(tr, TRACE_NO_PIDS);
2260
2261         ret = ftrace_event_open(inode, file, seq_ops);
2262         if (ret < 0)
2263                 trace_array_put(tr);
2264         return ret;
2265 }
2266
2267 static struct event_subsystem *
2268 create_new_subsystem(const char *name)
2269 {
2270         struct event_subsystem *system;
2271
2272         /* need to create new entry */
2273         system = kmalloc(sizeof(*system), GFP_KERNEL);
2274         if (!system)
2275                 return NULL;
2276
2277         system->ref_count = 1;
2278
2279         /* Only allocate if dynamic (kprobes and modules) */
2280         system->name = kstrdup_const(name, GFP_KERNEL);
2281         if (!system->name)
2282                 goto out_free;
2283
2284         system->filter = kzalloc(sizeof(struct event_filter), GFP_KERNEL);
2285         if (!system->filter)
2286                 goto out_free;
2287
2288         list_add(&system->list, &event_subsystems);
2289
2290         return system;
2291
2292  out_free:
2293         kfree_const(system->name);
2294         kfree(system);
2295         return NULL;
2296 }
2297
2298 static struct dentry *
2299 event_subsystem_dir(struct trace_array *tr, const char *name,
2300                     struct trace_event_file *file, struct dentry *parent)
2301 {
2302         struct event_subsystem *system, *iter;
2303         struct trace_subsystem_dir *dir;
2304         struct dentry *entry;
2305
2306         /* First see if we did not already create this dir */
2307         list_for_each_entry(dir, &tr->systems, list) {
2308                 system = dir->subsystem;
2309                 if (strcmp(system->name, name) == 0) {
2310                         dir->nr_events++;
2311                         file->system = dir;
2312                         return dir->entry;
2313                 }
2314         }
2315
2316         /* Now see if the system itself exists. */
2317         system = NULL;
2318         list_for_each_entry(iter, &event_subsystems, list) {
2319                 if (strcmp(iter->name, name) == 0) {
2320                         system = iter;
2321                         break;
2322                 }
2323         }
2324
2325         dir = kmalloc(sizeof(*dir), GFP_KERNEL);
2326         if (!dir)
2327                 goto out_fail;
2328
2329         if (!system) {
2330                 system = create_new_subsystem(name);
2331                 if (!system)
2332                         goto out_free;
2333         } else
2334                 __get_system(system);
2335
2336         dir->entry = tracefs_create_dir(name, parent);
2337         if (!dir->entry) {
2338                 pr_warn("Failed to create system directory %s\n", name);
2339                 __put_system(system);
2340                 goto out_free;
2341         }
2342
2343         dir->tr = tr;
2344         dir->ref_count = 1;
2345         dir->nr_events = 1;
2346         dir->subsystem = system;
2347         file->system = dir;
2348
2349         /* the ftrace system is special, do not create enable or filter files */
2350         if (strcmp(name, "ftrace") != 0) {
2351
2352                 entry = tracefs_create_file("filter", TRACE_MODE_WRITE,
2353                                             dir->entry, dir,
2354                                             &ftrace_subsystem_filter_fops);
2355                 if (!entry) {
2356                         kfree(system->filter);
2357                         system->filter = NULL;
2358                         pr_warn("Could not create tracefs '%s/filter' entry\n", name);
2359                 }
2360
2361                 trace_create_file("enable", TRACE_MODE_WRITE, dir->entry, dir,
2362                                   &ftrace_system_enable_fops);
2363         }
2364
2365         list_add(&dir->list, &tr->systems);
2366
2367         return dir->entry;
2368
2369  out_free:
2370         kfree(dir);
2371  out_fail:
2372         /* Only print this message if failed on memory allocation */
2373         if (!dir || !system)
2374                 pr_warn("No memory to create event subsystem %s\n", name);
2375         return NULL;
2376 }
2377
2378 static int
2379 event_define_fields(struct trace_event_call *call)
2380 {
2381         struct list_head *head;
2382         int ret = 0;
2383
2384         /*
2385          * Other events may have the same class. Only update
2386          * the fields if they are not already defined.
2387          */
2388         head = trace_get_fields(call);
2389         if (list_empty(head)) {
2390                 struct trace_event_fields *field = call->class->fields_array;
2391                 unsigned int offset = sizeof(struct trace_entry);
2392
2393                 for (; field->type; field++) {
2394                         if (field->type == TRACE_FUNCTION_TYPE) {
2395                                 field->define_fields(call);
2396                                 break;
2397                         }
2398
2399                         offset = ALIGN(offset, field->align);
2400                         ret = trace_define_field_ext(call, field->type, field->name,
2401                                                  offset, field->size,
2402                                                  field->is_signed, field->filter_type,
2403                                                  field->len);
2404                         if (WARN_ON_ONCE(ret)) {
2405                                 pr_err("error code is %d\n", ret);
2406                                 break;
2407                         }
2408
2409                         offset += field->size;
2410                 }
2411         }
2412
2413         return ret;
2414 }
2415
2416 static int
2417 event_create_dir(struct dentry *parent, struct trace_event_file *file)
2418 {
2419         struct trace_event_call *call = file->event_call;
2420         struct trace_array *tr = file->tr;
2421         struct dentry *d_events;
2422         const char *name;
2423         int ret;
2424
2425         /*
2426          * If the trace point header did not define TRACE_SYSTEM
2427          * then the system would be called "TRACE_SYSTEM".
2428          */
2429         if (strcmp(call->class->system, TRACE_SYSTEM) != 0) {
2430                 d_events = event_subsystem_dir(tr, call->class->system, file, parent);
2431                 if (!d_events)
2432                         return -ENOMEM;
2433         } else
2434                 d_events = parent;
2435
2436         name = trace_event_name(call);
2437         file->dir = tracefs_create_dir(name, d_events);
2438         if (!file->dir) {
2439                 pr_warn("Could not create tracefs '%s' directory\n", name);
2440                 return -1;
2441         }
2442
2443         if (call->class->reg && !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
2444                 trace_create_file("enable", TRACE_MODE_WRITE, file->dir, file,
2445                                   &ftrace_enable_fops);
2446
2447 #ifdef CONFIG_PERF_EVENTS
2448         if (call->event.type && call->class->reg)
2449                 trace_create_file("id", TRACE_MODE_READ, file->dir,
2450                                   (void *)(long)call->event.type,
2451                                   &ftrace_event_id_fops);
2452 #endif
2453
2454         ret = event_define_fields(call);
2455         if (ret < 0) {
2456                 pr_warn("Could not initialize trace point events/%s\n", name);
2457                 return ret;
2458         }
2459
2460         /*
2461          * Only event directories that can be enabled should have
2462          * triggers or filters.
2463          */
2464         if (!(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)) {
2465                 trace_create_file("filter", TRACE_MODE_WRITE, file->dir,
2466                                   file, &ftrace_event_filter_fops);
2467
2468                 trace_create_file("trigger", TRACE_MODE_WRITE, file->dir,
2469                                   file, &event_trigger_fops);
2470         }
2471
2472 #ifdef CONFIG_HIST_TRIGGERS
2473         trace_create_file("hist", TRACE_MODE_READ, file->dir, file,
2474                           &event_hist_fops);
2475 #endif
2476 #ifdef CONFIG_HIST_TRIGGERS_DEBUG
2477         trace_create_file("hist_debug", TRACE_MODE_READ, file->dir, file,
2478                           &event_hist_debug_fops);
2479 #endif
2480         trace_create_file("format", TRACE_MODE_READ, file->dir, call,
2481                           &ftrace_event_format_fops);
2482
2483 #ifdef CONFIG_TRACE_EVENT_INJECT
2484         if (call->event.type && call->class->reg)
2485                 trace_create_file("inject", 0200, file->dir, file,
2486                                   &event_inject_fops);
2487 #endif
2488
2489         return 0;
2490 }
2491
2492 static void remove_event_from_tracers(struct trace_event_call *call)
2493 {
2494         struct trace_event_file *file;
2495         struct trace_array *tr;
2496
2497         do_for_each_event_file_safe(tr, file) {
2498                 if (file->event_call != call)
2499                         continue;
2500
2501                 remove_event_file_dir(file);
2502                 /*
2503                  * The do_for_each_event_file_safe() is
2504                  * a double loop. After finding the call for this
2505                  * trace_array, we use break to jump to the next
2506                  * trace_array.
2507                  */
2508                 break;
2509         } while_for_each_event_file();
2510 }
2511
2512 static void event_remove(struct trace_event_call *call)
2513 {
2514         struct trace_array *tr;
2515         struct trace_event_file *file;
2516
2517         do_for_each_event_file(tr, file) {
2518                 if (file->event_call != call)
2519                         continue;
2520
2521                 if (file->flags & EVENT_FILE_FL_WAS_ENABLED)
2522                         tr->clear_trace = true;
2523
2524                 ftrace_event_enable_disable(file, 0);
2525                 /*
2526                  * The do_for_each_event_file() is
2527                  * a double loop. After finding the call for this
2528                  * trace_array, we use break to jump to the next
2529                  * trace_array.
2530                  */
2531                 break;
2532         } while_for_each_event_file();
2533
2534         if (call->event.funcs)
2535                 __unregister_trace_event(&call->event);
2536         remove_event_from_tracers(call);
2537         list_del(&call->list);
2538 }
2539
2540 static int event_init(struct trace_event_call *call)
2541 {
2542         int ret = 0;
2543         const char *name;
2544
2545         name = trace_event_name(call);
2546         if (WARN_ON(!name))
2547                 return -EINVAL;
2548
2549         if (call->class->raw_init) {
2550                 ret = call->class->raw_init(call);
2551                 if (ret < 0 && ret != -ENOSYS)
2552                         pr_warn("Could not initialize trace events/%s\n", name);
2553         }
2554
2555         return ret;
2556 }
2557
2558 static int
2559 __register_event(struct trace_event_call *call, struct module *mod)
2560 {
2561         int ret;
2562
2563         ret = event_init(call);
2564         if (ret < 0)
2565                 return ret;
2566
2567         list_add(&call->list, &ftrace_events);
2568         if (call->flags & TRACE_EVENT_FL_DYNAMIC)
2569                 atomic_set(&call->refcnt, 0);
2570         else
2571                 call->module = mod;
2572
2573         return 0;
2574 }
2575
2576 static char *eval_replace(char *ptr, struct trace_eval_map *map, int len)
2577 {
2578         int rlen;
2579         int elen;
2580
2581         /* Find the length of the eval value as a string */
2582         elen = snprintf(ptr, 0, "%ld", map->eval_value);
2583         /* Make sure there's enough room to replace the string with the value */
2584         if (len < elen)
2585                 return NULL;
2586
2587         snprintf(ptr, elen + 1, "%ld", map->eval_value);
2588
2589         /* Get the rest of the string of ptr */
2590         rlen = strlen(ptr + len);
2591         memmove(ptr + elen, ptr + len, rlen);
2592         /* Make sure we end the new string */
2593         ptr[elen + rlen] = 0;
2594
2595         return ptr + elen;
2596 }
2597
2598 static void update_event_printk(struct trace_event_call *call,
2599                                 struct trace_eval_map *map)
2600 {
2601         char *ptr;
2602         int quote = 0;
2603         int len = strlen(map->eval_string);
2604
2605         for (ptr = call->print_fmt; *ptr; ptr++) {
2606                 if (*ptr == '\\') {
2607                         ptr++;
2608                         /* paranoid */
2609                         if (!*ptr)
2610                                 break;
2611                         continue;
2612                 }
2613                 if (*ptr == '"') {
2614                         quote ^= 1;
2615                         continue;
2616                 }
2617                 if (quote)
2618                         continue;
2619                 if (isdigit(*ptr)) {
2620                         /* skip numbers */
2621                         do {
2622                                 ptr++;
2623                                 /* Check for alpha chars like ULL */
2624                         } while (isalnum(*ptr));
2625                         if (!*ptr)
2626                                 break;
2627                         /*
2628                          * A number must have some kind of delimiter after
2629                          * it, and we can ignore that too.
2630                          */
2631                         continue;
2632                 }
2633                 if (isalpha(*ptr) || *ptr == '_') {
2634                         if (strncmp(map->eval_string, ptr, len) == 0 &&
2635                             !isalnum(ptr[len]) && ptr[len] != '_') {
2636                                 ptr = eval_replace(ptr, map, len);
2637                                 /* enum/sizeof string smaller than value */
2638                                 if (WARN_ON_ONCE(!ptr))
2639                                         return;
2640                                 /*
2641                                  * No need to decrement here, as eval_replace()
2642                                  * returns the pointer to the character passed
2643                                  * the eval, and two evals can not be placed
2644                                  * back to back without something in between.
2645                                  * We can skip that something in between.
2646                                  */
2647                                 continue;
2648                         }
2649                 skip_more:
2650                         do {
2651                                 ptr++;
2652                         } while (isalnum(*ptr) || *ptr == '_');
2653                         if (!*ptr)
2654                                 break;
2655                         /*
2656                          * If what comes after this variable is a '.' or
2657                          * '->' then we can continue to ignore that string.
2658                          */
2659                         if (*ptr == '.' || (ptr[0] == '-' && ptr[1] == '>')) {
2660                                 ptr += *ptr == '.' ? 1 : 2;
2661                                 if (!*ptr)
2662                                         break;
2663                                 goto skip_more;
2664                         }
2665                         /*
2666                          * Once again, we can skip the delimiter that came
2667                          * after the string.
2668                          */
2669                         continue;
2670                 }
2671         }
2672 }
2673
2674 static void add_str_to_module(struct module *module, char *str)
2675 {
2676         struct module_string *modstr;
2677
2678         modstr = kmalloc(sizeof(*modstr), GFP_KERNEL);
2679
2680         /*
2681          * If we failed to allocate memory here, then we'll just
2682          * let the str memory leak when the module is removed.
2683          * If this fails to allocate, there's worse problems than
2684          * a leaked string on module removal.
2685          */
2686         if (WARN_ON_ONCE(!modstr))
2687                 return;
2688
2689         modstr->module = module;
2690         modstr->str = str;
2691
2692         list_add(&modstr->next, &module_strings);
2693 }
2694
2695 static void update_event_fields(struct trace_event_call *call,
2696                                 struct trace_eval_map *map)
2697 {
2698         struct ftrace_event_field *field;
2699         struct list_head *head;
2700         char *ptr;
2701         char *str;
2702         int len = strlen(map->eval_string);
2703
2704         /* Dynamic events should never have field maps */
2705         if (WARN_ON_ONCE(call->flags & TRACE_EVENT_FL_DYNAMIC))
2706                 return;
2707
2708         head = trace_get_fields(call);
2709         list_for_each_entry(field, head, link) {
2710                 ptr = strchr(field->type, '[');
2711                 if (!ptr)
2712                         continue;
2713                 ptr++;
2714
2715                 if (!isalpha(*ptr) && *ptr != '_')
2716                         continue;
2717
2718                 if (strncmp(map->eval_string, ptr, len) != 0)
2719                         continue;
2720
2721                 str = kstrdup(field->type, GFP_KERNEL);
2722                 if (WARN_ON_ONCE(!str))
2723                         return;
2724                 ptr = str + (ptr - field->type);
2725                 ptr = eval_replace(ptr, map, len);
2726                 /* enum/sizeof string smaller than value */
2727                 if (WARN_ON_ONCE(!ptr)) {
2728                         kfree(str);
2729                         continue;
2730                 }
2731
2732                 /*
2733                  * If the event is part of a module, then we need to free the string
2734                  * when the module is removed. Otherwise, it will stay allocated
2735                  * until a reboot.
2736                  */
2737                 if (call->module)
2738                         add_str_to_module(call->module, str);
2739
2740                 field->type = str;
2741         }
2742 }
2743
2744 void trace_event_eval_update(struct trace_eval_map **map, int len)
2745 {
2746         struct trace_event_call *call, *p;
2747         const char *last_system = NULL;
2748         bool first = false;
2749         int last_i;
2750         int i;
2751
2752         down_write(&trace_event_sem);
2753         list_for_each_entry_safe(call, p, &ftrace_events, list) {
2754                 /* events are usually grouped together with systems */
2755                 if (!last_system || call->class->system != last_system) {
2756                         first = true;
2757                         last_i = 0;
2758                         last_system = call->class->system;
2759                 }
2760
2761                 /*
2762                  * Since calls are grouped by systems, the likelihood that the
2763                  * next call in the iteration belongs to the same system as the
2764                  * previous call is high. As an optimization, we skip searching
2765                  * for a map[] that matches the call's system if the last call
2766                  * was from the same system. That's what last_i is for. If the
2767                  * call has the same system as the previous call, then last_i
2768                  * will be the index of the first map[] that has a matching
2769                  * system.
2770                  */
2771                 for (i = last_i; i < len; i++) {
2772                         if (call->class->system == map[i]->system) {
2773                                 /* Save the first system if need be */
2774                                 if (first) {
2775                                         last_i = i;
2776                                         first = false;
2777                                 }
2778                                 update_event_printk(call, map[i]);
2779                                 update_event_fields(call, map[i]);
2780                         }
2781                 }
2782         }
2783         up_write(&trace_event_sem);
2784 }
2785
2786 static struct trace_event_file *
2787 trace_create_new_event(struct trace_event_call *call,
2788                        struct trace_array *tr)
2789 {
2790         struct trace_pid_list *no_pid_list;
2791         struct trace_pid_list *pid_list;
2792         struct trace_event_file *file;
2793         unsigned int first;
2794
2795         file = kmem_cache_alloc(file_cachep, GFP_TRACE);
2796         if (!file)
2797                 return NULL;
2798
2799         pid_list = rcu_dereference_protected(tr->filtered_pids,
2800                                              lockdep_is_held(&event_mutex));
2801         no_pid_list = rcu_dereference_protected(tr->filtered_no_pids,
2802                                              lockdep_is_held(&event_mutex));
2803
2804         if (!trace_pid_list_first(pid_list, &first) ||
2805             !trace_pid_list_first(no_pid_list, &first))
2806                 file->flags |= EVENT_FILE_FL_PID_FILTER;
2807
2808         file->event_call = call;
2809         file->tr = tr;
2810         atomic_set(&file->sm_ref, 0);
2811         atomic_set(&file->tm_ref, 0);
2812         INIT_LIST_HEAD(&file->triggers);
2813         list_add(&file->list, &tr->events);
2814
2815         return file;
2816 }
2817
2818 #define MAX_BOOT_TRIGGERS 32
2819
2820 static struct boot_triggers {
2821         const char              *event;
2822         char                    *trigger;
2823 } bootup_triggers[MAX_BOOT_TRIGGERS];
2824
2825 static char bootup_trigger_buf[COMMAND_LINE_SIZE];
2826 static int nr_boot_triggers;
2827
2828 static __init int setup_trace_triggers(char *str)
2829 {
2830         char *trigger;
2831         char *buf;
2832         int i;
2833
2834         strlcpy(bootup_trigger_buf, str, COMMAND_LINE_SIZE);
2835         ring_buffer_expanded = true;
2836         disable_tracing_selftest("running event triggers");
2837
2838         buf = bootup_trigger_buf;
2839         for (i = 0; i < MAX_BOOT_TRIGGERS; i++) {
2840                 trigger = strsep(&buf, ",");
2841                 if (!trigger)
2842                         break;
2843                 bootup_triggers[i].event = strsep(&trigger, ".");
2844                 bootup_triggers[i].trigger = trigger;
2845                 if (!bootup_triggers[i].trigger)
2846                         break;
2847         }
2848
2849         nr_boot_triggers = i;
2850         return 1;
2851 }
2852 __setup("trace_trigger=", setup_trace_triggers);
2853
2854 /* Add an event to a trace directory */
2855 static int
2856 __trace_add_new_event(struct trace_event_call *call, struct trace_array *tr)
2857 {
2858         struct trace_event_file *file;
2859
2860         file = trace_create_new_event(call, tr);
2861         if (!file)
2862                 return -ENOMEM;
2863
2864         if (eventdir_initialized)
2865                 return event_create_dir(tr->event_dir, file);
2866         else
2867                 return event_define_fields(call);
2868 }
2869
2870 static void trace_early_triggers(struct trace_event_file *file, const char *name)
2871 {
2872         int ret;
2873         int i;
2874
2875         for (i = 0; i < nr_boot_triggers; i++) {
2876                 if (strcmp(name, bootup_triggers[i].event))
2877                         continue;
2878                 mutex_lock(&event_mutex);
2879                 ret = trigger_process_regex(file, bootup_triggers[i].trigger);
2880                 mutex_unlock(&event_mutex);
2881                 if (ret)
2882                         pr_err("Failed to register trigger '%s' on event %s\n",
2883                                bootup_triggers[i].trigger,
2884                                bootup_triggers[i].event);
2885         }
2886 }
2887
2888 /*
2889  * Just create a descriptor for early init. A descriptor is required
2890  * for enabling events at boot. We want to enable events before
2891  * the filesystem is initialized.
2892  */
2893 static int
2894 __trace_early_add_new_event(struct trace_event_call *call,
2895                             struct trace_array *tr)
2896 {
2897         struct trace_event_file *file;
2898         int ret;
2899
2900         file = trace_create_new_event(call, tr);
2901         if (!file)
2902                 return -ENOMEM;
2903
2904         ret = event_define_fields(call);
2905         if (ret)
2906                 return ret;
2907
2908         trace_early_triggers(file, trace_event_name(call));
2909
2910         return 0;
2911 }
2912
2913 struct ftrace_module_file_ops;
2914 static void __add_event_to_tracers(struct trace_event_call *call);
2915
2916 /* Add an additional event_call dynamically */
2917 int trace_add_event_call(struct trace_event_call *call)
2918 {
2919         int ret;
2920         lockdep_assert_held(&event_mutex);
2921
2922         mutex_lock(&trace_types_lock);
2923
2924         ret = __register_event(call, NULL);
2925         if (ret >= 0)
2926                 __add_event_to_tracers(call);
2927
2928         mutex_unlock(&trace_types_lock);
2929         return ret;
2930 }
2931 EXPORT_SYMBOL_GPL(trace_add_event_call);
2932
2933 /*
2934  * Must be called under locking of trace_types_lock, event_mutex and
2935  * trace_event_sem.
2936  */
2937 static void __trace_remove_event_call(struct trace_event_call *call)
2938 {
2939         event_remove(call);
2940         trace_destroy_fields(call);
2941         free_event_filter(call->filter);
2942         call->filter = NULL;
2943 }
2944
2945 static int probe_remove_event_call(struct trace_event_call *call)
2946 {
2947         struct trace_array *tr;
2948         struct trace_event_file *file;
2949
2950 #ifdef CONFIG_PERF_EVENTS
2951         if (call->perf_refcount)
2952                 return -EBUSY;
2953 #endif
2954         do_for_each_event_file(tr, file) {
2955                 if (file->event_call != call)
2956                         continue;
2957                 /*
2958                  * We can't rely on ftrace_event_enable_disable(enable => 0)
2959                  * we are going to do, EVENT_FILE_FL_SOFT_MODE can suppress
2960                  * TRACE_REG_UNREGISTER.
2961                  */
2962                 if (file->flags & EVENT_FILE_FL_ENABLED)
2963                         goto busy;
2964
2965                 if (file->flags & EVENT_FILE_FL_WAS_ENABLED)
2966                         tr->clear_trace = true;
2967                 /*
2968                  * The do_for_each_event_file_safe() is
2969                  * a double loop. After finding the call for this
2970                  * trace_array, we use break to jump to the next
2971                  * trace_array.
2972                  */
2973                 break;
2974         } while_for_each_event_file();
2975
2976         __trace_remove_event_call(call);
2977
2978         return 0;
2979  busy:
2980         /* No need to clear the trace now */
2981         list_for_each_entry(tr, &ftrace_trace_arrays, list) {
2982                 tr->clear_trace = false;
2983         }
2984         return -EBUSY;
2985 }
2986
2987 /* Remove an event_call */
2988 int trace_remove_event_call(struct trace_event_call *call)
2989 {
2990         int ret;
2991
2992         lockdep_assert_held(&event_mutex);
2993
2994         mutex_lock(&trace_types_lock);
2995         down_write(&trace_event_sem);
2996         ret = probe_remove_event_call(call);
2997         up_write(&trace_event_sem);
2998         mutex_unlock(&trace_types_lock);
2999
3000         return ret;
3001 }
3002 EXPORT_SYMBOL_GPL(trace_remove_event_call);
3003
3004 #define for_each_event(event, start, end)                       \
3005         for (event = start;                                     \
3006              (unsigned long)event < (unsigned long)end;         \
3007              event++)
3008
3009 #ifdef CONFIG_MODULES
3010
3011 static void trace_module_add_events(struct module *mod)
3012 {
3013         struct trace_event_call **call, **start, **end;
3014
3015         if (!mod->num_trace_events)
3016                 return;
3017
3018         /* Don't add infrastructure for mods without tracepoints */
3019         if (trace_module_has_bad_taint(mod)) {
3020                 pr_err("%s: module has bad taint, not creating trace events\n",
3021                        mod->name);
3022                 return;
3023         }
3024
3025         start = mod->trace_events;
3026         end = mod->trace_events + mod->num_trace_events;
3027
3028         for_each_event(call, start, end) {
3029                 __register_event(*call, mod);
3030                 __add_event_to_tracers(*call);
3031         }
3032 }
3033
3034 static void trace_module_remove_events(struct module *mod)
3035 {
3036         struct trace_event_call *call, *p;
3037         struct module_string *modstr, *m;
3038
3039         down_write(&trace_event_sem);
3040         list_for_each_entry_safe(call, p, &ftrace_events, list) {
3041                 if ((call->flags & TRACE_EVENT_FL_DYNAMIC) || !call->module)
3042                         continue;
3043                 if (call->module == mod)
3044                         __trace_remove_event_call(call);
3045         }
3046         /* Check for any strings allocade for this module */
3047         list_for_each_entry_safe(modstr, m, &module_strings, next) {
3048                 if (modstr->module != mod)
3049                         continue;
3050                 list_del(&modstr->next);
3051                 kfree(modstr->str);
3052                 kfree(modstr);
3053         }
3054         up_write(&trace_event_sem);
3055
3056         /*
3057          * It is safest to reset the ring buffer if the module being unloaded
3058          * registered any events that were used. The only worry is if
3059          * a new module gets loaded, and takes on the same id as the events
3060          * of this module. When printing out the buffer, traced events left
3061          * over from this module may be passed to the new module events and
3062          * unexpected results may occur.
3063          */
3064         tracing_reset_all_online_cpus_unlocked();
3065 }
3066
3067 static int trace_module_notify(struct notifier_block *self,
3068                                unsigned long val, void *data)
3069 {
3070         struct module *mod = data;
3071
3072         mutex_lock(&event_mutex);
3073         mutex_lock(&trace_types_lock);
3074         switch (val) {
3075         case MODULE_STATE_COMING:
3076                 trace_module_add_events(mod);
3077                 break;
3078         case MODULE_STATE_GOING:
3079                 trace_module_remove_events(mod);
3080                 break;
3081         }
3082         mutex_unlock(&trace_types_lock);
3083         mutex_unlock(&event_mutex);
3084
3085         return NOTIFY_OK;
3086 }
3087
3088 static struct notifier_block trace_module_nb = {
3089         .notifier_call = trace_module_notify,
3090         .priority = 1, /* higher than trace.c module notify */
3091 };
3092 #endif /* CONFIG_MODULES */
3093
3094 /* Create a new event directory structure for a trace directory. */
3095 static void
3096 __trace_add_event_dirs(struct trace_array *tr)
3097 {
3098         struct trace_event_call *call;
3099         int ret;
3100
3101         list_for_each_entry(call, &ftrace_events, list) {
3102                 ret = __trace_add_new_event(call, tr);
3103                 if (ret < 0)
3104                         pr_warn("Could not create directory for event %s\n",
3105                                 trace_event_name(call));
3106         }
3107 }
3108
3109 /* Returns any file that matches the system and event */
3110 struct trace_event_file *
3111 __find_event_file(struct trace_array *tr, const char *system, const char *event)
3112 {
3113         struct trace_event_file *file;
3114         struct trace_event_call *call;
3115         const char *name;
3116
3117         list_for_each_entry(file, &tr->events, list) {
3118
3119                 call = file->event_call;
3120                 name = trace_event_name(call);
3121
3122                 if (!name || !call->class)
3123                         continue;
3124
3125                 if (strcmp(event, name) == 0 &&
3126                     strcmp(system, call->class->system) == 0)
3127                         return file;
3128         }
3129         return NULL;
3130 }
3131
3132 /* Returns valid trace event files that match system and event */
3133 struct trace_event_file *
3134 find_event_file(struct trace_array *tr, const char *system, const char *event)
3135 {
3136         struct trace_event_file *file;
3137
3138         file = __find_event_file(tr, system, event);
3139         if (!file || !file->event_call->class->reg ||
3140             file->event_call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)
3141                 return NULL;
3142
3143         return file;
3144 }
3145
3146 /**
3147  * trace_get_event_file - Find and return a trace event file
3148  * @instance: The name of the trace instance containing the event
3149  * @system: The name of the system containing the event
3150  * @event: The name of the event
3151  *
3152  * Return a trace event file given the trace instance name, trace
3153  * system, and trace event name.  If the instance name is NULL, it
3154  * refers to the top-level trace array.
3155  *
3156  * This function will look it up and return it if found, after calling
3157  * trace_array_get() to prevent the instance from going away, and
3158  * increment the event's module refcount to prevent it from being
3159  * removed.
3160  *
3161  * To release the file, call trace_put_event_file(), which will call
3162  * trace_array_put() and decrement the event's module refcount.
3163  *
3164  * Return: The trace event on success, ERR_PTR otherwise.
3165  */
3166 struct trace_event_file *trace_get_event_file(const char *instance,
3167                                               const char *system,
3168                                               const char *event)
3169 {
3170         struct trace_array *tr = top_trace_array();
3171         struct trace_event_file *file = NULL;
3172         int ret = -EINVAL;
3173
3174         if (instance) {
3175                 tr = trace_array_find_get(instance);
3176                 if (!tr)
3177                         return ERR_PTR(-ENOENT);
3178         } else {
3179                 ret = trace_array_get(tr);
3180                 if (ret)
3181                         return ERR_PTR(ret);
3182         }
3183
3184         mutex_lock(&event_mutex);
3185
3186         file = find_event_file(tr, system, event);
3187         if (!file) {
3188                 trace_array_put(tr);
3189                 ret = -EINVAL;
3190                 goto out;
3191         }
3192
3193         /* Don't let event modules unload while in use */
3194         ret = trace_event_try_get_ref(file->event_call);
3195         if (!ret) {
3196                 trace_array_put(tr);
3197                 ret = -EBUSY;
3198                 goto out;
3199         }
3200
3201         ret = 0;
3202  out:
3203         mutex_unlock(&event_mutex);
3204
3205         if (ret)
3206                 file = ERR_PTR(ret);
3207
3208         return file;
3209 }
3210 EXPORT_SYMBOL_GPL(trace_get_event_file);
3211
3212 /**
3213  * trace_put_event_file - Release a file from trace_get_event_file()
3214  * @file: The trace event file
3215  *
3216  * If a file was retrieved using trace_get_event_file(), this should
3217  * be called when it's no longer needed.  It will cancel the previous
3218  * trace_array_get() called by that function, and decrement the
3219  * event's module refcount.
3220  */
3221 void trace_put_event_file(struct trace_event_file *file)
3222 {
3223         mutex_lock(&event_mutex);
3224         trace_event_put_ref(file->event_call);
3225         mutex_unlock(&event_mutex);
3226
3227         trace_array_put(file->tr);
3228 }
3229 EXPORT_SYMBOL_GPL(trace_put_event_file);
3230
3231 #ifdef CONFIG_DYNAMIC_FTRACE
3232
3233 /* Avoid typos */
3234 #define ENABLE_EVENT_STR        "enable_event"
3235 #define DISABLE_EVENT_STR       "disable_event"
3236
3237 struct event_probe_data {
3238         struct trace_event_file *file;
3239         unsigned long                   count;
3240         int                             ref;
3241         bool                            enable;
3242 };
3243
3244 static void update_event_probe(struct event_probe_data *data)
3245 {
3246         if (data->enable)
3247                 clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &data->file->flags);
3248         else
3249                 set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &data->file->flags);
3250 }
3251
3252 static void
3253 event_enable_probe(unsigned long ip, unsigned long parent_ip,
3254                    struct trace_array *tr, struct ftrace_probe_ops *ops,
3255                    void *data)
3256 {
3257         struct ftrace_func_mapper *mapper = data;
3258         struct event_probe_data *edata;
3259         void **pdata;
3260
3261         pdata = ftrace_func_mapper_find_ip(mapper, ip);
3262         if (!pdata || !*pdata)
3263                 return;
3264
3265         edata = *pdata;
3266         update_event_probe(edata);
3267 }
3268
3269 static void
3270 event_enable_count_probe(unsigned long ip, unsigned long parent_ip,
3271                          struct trace_array *tr, struct ftrace_probe_ops *ops,
3272                          void *data)
3273 {
3274         struct ftrace_func_mapper *mapper = data;
3275         struct event_probe_data *edata;
3276         void **pdata;
3277
3278         pdata = ftrace_func_mapper_find_ip(mapper, ip);
3279         if (!pdata || !*pdata)
3280                 return;
3281
3282         edata = *pdata;
3283
3284         if (!edata->count)
3285                 return;
3286
3287         /* Skip if the event is in a state we want to switch to */
3288         if (edata->enable == !(edata->file->flags & EVENT_FILE_FL_SOFT_DISABLED))
3289                 return;
3290
3291         if (edata->count != -1)
3292                 (edata->count)--;
3293
3294         update_event_probe(edata);
3295 }
3296
3297 static int
3298 event_enable_print(struct seq_file *m, unsigned long ip,
3299                    struct ftrace_probe_ops *ops, void *data)
3300 {
3301         struct ftrace_func_mapper *mapper = data;
3302         struct event_probe_data *edata;
3303         void **pdata;
3304
3305         pdata = ftrace_func_mapper_find_ip(mapper, ip);
3306
3307         if (WARN_ON_ONCE(!pdata || !*pdata))
3308                 return 0;
3309
3310         edata = *pdata;
3311
3312         seq_printf(m, "%ps:", (void *)ip);
3313
3314         seq_printf(m, "%s:%s:%s",
3315                    edata->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR,
3316                    edata->file->event_call->class->system,
3317                    trace_event_name(edata->file->event_call));
3318
3319         if (edata->count == -1)
3320                 seq_puts(m, ":unlimited\n");
3321         else
3322                 seq_printf(m, ":count=%ld\n", edata->count);
3323
3324         return 0;
3325 }
3326
3327 static int
3328 event_enable_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
3329                   unsigned long ip, void *init_data, void **data)
3330 {
3331         struct ftrace_func_mapper *mapper = *data;
3332         struct event_probe_data *edata = init_data;
3333         int ret;
3334
3335         if (!mapper) {
3336                 mapper = allocate_ftrace_func_mapper();
3337                 if (!mapper)
3338                         return -ENODEV;
3339                 *data = mapper;
3340         }
3341
3342         ret = ftrace_func_mapper_add_ip(mapper, ip, edata);
3343         if (ret < 0)
3344                 return ret;
3345
3346         edata->ref++;
3347
3348         return 0;
3349 }
3350
3351 static int free_probe_data(void *data)
3352 {
3353         struct event_probe_data *edata = data;
3354
3355         edata->ref--;
3356         if (!edata->ref) {
3357                 /* Remove the SOFT_MODE flag */
3358                 __ftrace_event_enable_disable(edata->file, 0, 1);
3359                 trace_event_put_ref(edata->file->event_call);
3360                 kfree(edata);
3361         }
3362         return 0;
3363 }
3364
3365 static void
3366 event_enable_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
3367                   unsigned long ip, void *data)
3368 {
3369         struct ftrace_func_mapper *mapper = data;
3370         struct event_probe_data *edata;
3371
3372         if (!ip) {
3373                 if (!mapper)
3374                         return;
3375                 free_ftrace_func_mapper(mapper, free_probe_data);
3376                 return;
3377         }
3378
3379         edata = ftrace_func_mapper_remove_ip(mapper, ip);
3380
3381         if (WARN_ON_ONCE(!edata))
3382                 return;
3383
3384         if (WARN_ON_ONCE(edata->ref <= 0))
3385                 return;
3386
3387         free_probe_data(edata);
3388 }
3389
3390 static struct ftrace_probe_ops event_enable_probe_ops = {
3391         .func                   = event_enable_probe,
3392         .print                  = event_enable_print,
3393         .init                   = event_enable_init,
3394         .free                   = event_enable_free,
3395 };
3396
3397 static struct ftrace_probe_ops event_enable_count_probe_ops = {
3398         .func                   = event_enable_count_probe,
3399         .print                  = event_enable_print,
3400         .init                   = event_enable_init,
3401         .free                   = event_enable_free,
3402 };
3403
3404 static struct ftrace_probe_ops event_disable_probe_ops = {
3405         .func                   = event_enable_probe,
3406         .print                  = event_enable_print,
3407         .init                   = event_enable_init,
3408         .free                   = event_enable_free,
3409 };
3410
3411 static struct ftrace_probe_ops event_disable_count_probe_ops = {
3412         .func                   = event_enable_count_probe,
3413         .print                  = event_enable_print,
3414         .init                   = event_enable_init,
3415         .free                   = event_enable_free,
3416 };
3417
3418 static int
3419 event_enable_func(struct trace_array *tr, struct ftrace_hash *hash,
3420                   char *glob, char *cmd, char *param, int enabled)
3421 {
3422         struct trace_event_file *file;
3423         struct ftrace_probe_ops *ops;
3424         struct event_probe_data *data;
3425         const char *system;
3426         const char *event;
3427         char *number;
3428         bool enable;
3429         int ret;
3430
3431         if (!tr)
3432                 return -ENODEV;
3433
3434         /* hash funcs only work with set_ftrace_filter */
3435         if (!enabled || !param)
3436                 return -EINVAL;
3437
3438         system = strsep(&param, ":");
3439         if (!param)
3440                 return -EINVAL;
3441
3442         event = strsep(&param, ":");
3443
3444         mutex_lock(&event_mutex);
3445
3446         ret = -EINVAL;
3447         file = find_event_file(tr, system, event);
3448         if (!file)
3449                 goto out;
3450
3451         enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
3452
3453         if (enable)
3454                 ops = param ? &event_enable_count_probe_ops : &event_enable_probe_ops;
3455         else
3456                 ops = param ? &event_disable_count_probe_ops : &event_disable_probe_ops;
3457
3458         if (glob[0] == '!') {
3459                 ret = unregister_ftrace_function_probe_func(glob+1, tr, ops);
3460                 goto out;
3461         }
3462
3463         ret = -ENOMEM;
3464
3465         data = kzalloc(sizeof(*data), GFP_KERNEL);
3466         if (!data)
3467                 goto out;
3468
3469         data->enable = enable;
3470         data->count = -1;
3471         data->file = file;
3472
3473         if (!param)
3474                 goto out_reg;
3475
3476         number = strsep(&param, ":");
3477
3478         ret = -EINVAL;
3479         if (!strlen(number))
3480                 goto out_free;
3481
3482         /*
3483          * We use the callback data field (which is a pointer)
3484          * as our counter.
3485          */
3486         ret = kstrtoul(number, 0, &data->count);
3487         if (ret)
3488                 goto out_free;
3489
3490  out_reg:
3491         /* Don't let event modules unload while probe registered */
3492         ret = trace_event_try_get_ref(file->event_call);
3493         if (!ret) {
3494                 ret = -EBUSY;
3495                 goto out_free;
3496         }
3497
3498         ret = __ftrace_event_enable_disable(file, 1, 1);
3499         if (ret < 0)
3500                 goto out_put;
3501
3502         ret = register_ftrace_function_probe(glob, tr, ops, data);
3503         /*
3504          * The above returns on success the # of functions enabled,
3505          * but if it didn't find any functions it returns zero.
3506          * Consider no functions a failure too.
3507          */
3508         if (!ret) {
3509                 ret = -ENOENT;
3510                 goto out_disable;
3511         } else if (ret < 0)
3512                 goto out_disable;
3513         /* Just return zero, not the number of enabled functions */
3514         ret = 0;
3515  out:
3516         mutex_unlock(&event_mutex);
3517         return ret;
3518
3519  out_disable:
3520         __ftrace_event_enable_disable(file, 0, 1);
3521  out_put:
3522         trace_event_put_ref(file->event_call);
3523  out_free:
3524         kfree(data);
3525         goto out;
3526 }
3527
3528 static struct ftrace_func_command event_enable_cmd = {
3529         .name                   = ENABLE_EVENT_STR,
3530         .func                   = event_enable_func,
3531 };
3532
3533 static struct ftrace_func_command event_disable_cmd = {
3534         .name                   = DISABLE_EVENT_STR,
3535         .func                   = event_enable_func,
3536 };
3537
3538 static __init int register_event_cmds(void)
3539 {
3540         int ret;
3541
3542         ret = register_ftrace_command(&event_enable_cmd);
3543         if (WARN_ON(ret < 0))
3544                 return ret;
3545         ret = register_ftrace_command(&event_disable_cmd);
3546         if (WARN_ON(ret < 0))
3547                 unregister_ftrace_command(&event_enable_cmd);
3548         return ret;
3549 }
3550 #else
3551 static inline int register_event_cmds(void) { return 0; }
3552 #endif /* CONFIG_DYNAMIC_FTRACE */
3553
3554 /*
3555  * The top level array and trace arrays created by boot-time tracing
3556  * have already had its trace_event_file descriptors created in order
3557  * to allow for early events to be recorded.
3558  * This function is called after the tracefs has been initialized,
3559  * and we now have to create the files associated to the events.
3560  */
3561 static void __trace_early_add_event_dirs(struct trace_array *tr)
3562 {
3563         struct trace_event_file *file;
3564         int ret;
3565
3566
3567         list_for_each_entry(file, &tr->events, list) {
3568                 ret = event_create_dir(tr->event_dir, file);
3569                 if (ret < 0)
3570                         pr_warn("Could not create directory for event %s\n",
3571                                 trace_event_name(file->event_call));
3572         }
3573 }
3574
3575 /*
3576  * For early boot up, the top trace array and the trace arrays created
3577  * by boot-time tracing require to have a list of events that can be
3578  * enabled. This must be done before the filesystem is set up in order
3579  * to allow events to be traced early.
3580  */
3581 void __trace_early_add_events(struct trace_array *tr)
3582 {
3583         struct trace_event_call *call;
3584         int ret;
3585
3586         list_for_each_entry(call, &ftrace_events, list) {
3587                 /* Early boot up should not have any modules loaded */
3588                 if (!(call->flags & TRACE_EVENT_FL_DYNAMIC) &&
3589                     WARN_ON_ONCE(call->module))
3590                         continue;
3591
3592                 ret = __trace_early_add_new_event(call, tr);
3593                 if (ret < 0)
3594                         pr_warn("Could not create early event %s\n",
3595                                 trace_event_name(call));
3596         }
3597 }
3598
3599 /* Remove the event directory structure for a trace directory. */
3600 static void
3601 __trace_remove_event_dirs(struct trace_array *tr)
3602 {
3603         struct trace_event_file *file, *next;
3604
3605         list_for_each_entry_safe(file, next, &tr->events, list)
3606                 remove_event_file_dir(file);
3607 }
3608
3609 static void __add_event_to_tracers(struct trace_event_call *call)
3610 {
3611         struct trace_array *tr;
3612
3613         list_for_each_entry(tr, &ftrace_trace_arrays, list)
3614                 __trace_add_new_event(call, tr);
3615 }
3616
3617 extern struct trace_event_call *__start_ftrace_events[];
3618 extern struct trace_event_call *__stop_ftrace_events[];
3619
3620 static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata;
3621
3622 static __init int setup_trace_event(char *str)
3623 {
3624         strlcpy(bootup_event_buf, str, COMMAND_LINE_SIZE);
3625         ring_buffer_expanded = true;
3626         disable_tracing_selftest("running event tracing");
3627
3628         return 1;
3629 }
3630 __setup("trace_event=", setup_trace_event);
3631
3632 /* Expects to have event_mutex held when called */
3633 static int
3634 create_event_toplevel_files(struct dentry *parent, struct trace_array *tr)
3635 {
3636         struct dentry *d_events;
3637         struct dentry *entry;
3638
3639         entry = trace_create_file("set_event", TRACE_MODE_WRITE, parent,
3640                                   tr, &ftrace_set_event_fops);
3641         if (!entry)
3642                 return -ENOMEM;
3643
3644         d_events = tracefs_create_dir("events", parent);
3645         if (!d_events) {
3646                 pr_warn("Could not create tracefs 'events' directory\n");
3647                 return -ENOMEM;
3648         }
3649
3650         entry = trace_create_file("enable", TRACE_MODE_WRITE, d_events,
3651                                   tr, &ftrace_tr_enable_fops);
3652         if (!entry)
3653                 return -ENOMEM;
3654
3655         /* There are not as crucial, just warn if they are not created */
3656
3657         trace_create_file("set_event_pid", TRACE_MODE_WRITE, parent,
3658                           tr, &ftrace_set_event_pid_fops);
3659
3660         trace_create_file("set_event_notrace_pid",
3661                           TRACE_MODE_WRITE, parent, tr,
3662                           &ftrace_set_event_notrace_pid_fops);
3663
3664         /* ring buffer internal formats */
3665         trace_create_file("header_page", TRACE_MODE_READ, d_events,
3666                                   ring_buffer_print_page_header,
3667                                   &ftrace_show_header_fops);
3668
3669         trace_create_file("header_event", TRACE_MODE_READ, d_events,
3670                                   ring_buffer_print_entry_header,
3671                                   &ftrace_show_header_fops);
3672
3673         tr->event_dir = d_events;
3674
3675         return 0;
3676 }
3677
3678 /**
3679  * event_trace_add_tracer - add a instance of a trace_array to events
3680  * @parent: The parent dentry to place the files/directories for events in
3681  * @tr: The trace array associated with these events
3682  *
3683  * When a new instance is created, it needs to set up its events
3684  * directory, as well as other files associated with events. It also
3685  * creates the event hierarchy in the @parent/events directory.
3686  *
3687  * Returns 0 on success.
3688  *
3689  * Must be called with event_mutex held.
3690  */
3691 int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr)
3692 {
3693         int ret;
3694
3695         lockdep_assert_held(&event_mutex);
3696
3697         ret = create_event_toplevel_files(parent, tr);
3698         if (ret)
3699                 goto out;
3700
3701         down_write(&trace_event_sem);
3702         /* If tr already has the event list, it is initialized in early boot. */
3703         if (unlikely(!list_empty(&tr->events)))
3704                 __trace_early_add_event_dirs(tr);
3705         else
3706                 __trace_add_event_dirs(tr);
3707         up_write(&trace_event_sem);
3708
3709  out:
3710         return ret;
3711 }
3712
3713 /*
3714  * The top trace array already had its file descriptors created.
3715  * Now the files themselves need to be created.
3716  */
3717 static __init int
3718 early_event_add_tracer(struct dentry *parent, struct trace_array *tr)
3719 {
3720         int ret;
3721
3722         mutex_lock(&event_mutex);
3723
3724         ret = create_event_toplevel_files(parent, tr);
3725         if (ret)
3726                 goto out_unlock;
3727
3728         down_write(&trace_event_sem);
3729         __trace_early_add_event_dirs(tr);
3730         up_write(&trace_event_sem);
3731
3732  out_unlock:
3733         mutex_unlock(&event_mutex);
3734
3735         return ret;
3736 }
3737
3738 /* Must be called with event_mutex held */
3739 int event_trace_del_tracer(struct trace_array *tr)
3740 {
3741         lockdep_assert_held(&event_mutex);
3742
3743         /* Disable any event triggers and associated soft-disabled events */
3744         clear_event_triggers(tr);
3745
3746         /* Clear the pid list */
3747         __ftrace_clear_event_pids(tr, TRACE_PIDS | TRACE_NO_PIDS);
3748
3749         /* Disable any running events */
3750         __ftrace_set_clr_event_nolock(tr, NULL, NULL, NULL, 0);
3751
3752         /* Make sure no more events are being executed */
3753         tracepoint_synchronize_unregister();
3754
3755         down_write(&trace_event_sem);
3756         __trace_remove_event_dirs(tr);
3757         tracefs_remove(tr->event_dir);
3758         up_write(&trace_event_sem);
3759
3760         tr->event_dir = NULL;
3761
3762         return 0;
3763 }
3764
3765 static __init int event_trace_memsetup(void)
3766 {
3767         field_cachep = KMEM_CACHE(ftrace_event_field, SLAB_PANIC);
3768         file_cachep = KMEM_CACHE(trace_event_file, SLAB_PANIC);
3769         return 0;
3770 }
3771
3772 __init void
3773 early_enable_events(struct trace_array *tr, char *buf, bool disable_first)
3774 {
3775         char *token;
3776         int ret;
3777
3778         while (true) {
3779                 token = strsep(&buf, ",");
3780
3781                 if (!token)
3782                         break;
3783
3784                 if (*token) {
3785                         /* Restarting syscalls requires that we stop them first */
3786                         if (disable_first)
3787                                 ftrace_set_clr_event(tr, token, 0);
3788
3789                         ret = ftrace_set_clr_event(tr, token, 1);
3790                         if (ret)
3791                                 pr_warn("Failed to enable trace event: %s\n", token);
3792                 }
3793
3794                 /* Put back the comma to allow this to be called again */
3795                 if (buf)
3796                         *(buf - 1) = ',';
3797         }
3798 }
3799
3800 static __init int event_trace_enable(void)
3801 {
3802         struct trace_array *tr = top_trace_array();
3803         struct trace_event_call **iter, *call;
3804         int ret;
3805
3806         if (!tr)
3807                 return -ENODEV;
3808
3809         for_each_event(iter, __start_ftrace_events, __stop_ftrace_events) {
3810
3811                 call = *iter;
3812                 ret = event_init(call);
3813                 if (!ret)
3814                         list_add(&call->list, &ftrace_events);
3815         }
3816
3817         register_trigger_cmds();
3818
3819         /*
3820          * We need the top trace array to have a working set of trace
3821          * points at early init, before the debug files and directories
3822          * are created. Create the file entries now, and attach them
3823          * to the actual file dentries later.
3824          */
3825         __trace_early_add_events(tr);
3826
3827         early_enable_events(tr, bootup_event_buf, false);
3828
3829         trace_printk_start_comm();
3830
3831         register_event_cmds();
3832
3833
3834         return 0;
3835 }
3836
3837 /*
3838  * event_trace_enable() is called from trace_event_init() first to
3839  * initialize events and perhaps start any events that are on the
3840  * command line. Unfortunately, there are some events that will not
3841  * start this early, like the system call tracepoints that need
3842  * to set the %SYSCALL_WORK_SYSCALL_TRACEPOINT flag of pid 1. But
3843  * event_trace_enable() is called before pid 1 starts, and this flag
3844  * is never set, making the syscall tracepoint never get reached, but
3845  * the event is enabled regardless (and not doing anything).
3846  */
3847 static __init int event_trace_enable_again(void)
3848 {
3849         struct trace_array *tr;
3850
3851         tr = top_trace_array();
3852         if (!tr)
3853                 return -ENODEV;
3854
3855         early_enable_events(tr, bootup_event_buf, true);
3856
3857         return 0;
3858 }
3859
3860 early_initcall(event_trace_enable_again);
3861
3862 /* Init fields which doesn't related to the tracefs */
3863 static __init int event_trace_init_fields(void)
3864 {
3865         if (trace_define_generic_fields())
3866                 pr_warn("tracing: Failed to allocated generic fields");
3867
3868         if (trace_define_common_fields())
3869                 pr_warn("tracing: Failed to allocate common fields");
3870
3871         return 0;
3872 }
3873
3874 __init int event_trace_init(void)
3875 {
3876         struct trace_array *tr;
3877         int ret;
3878
3879         tr = top_trace_array();
3880         if (!tr)
3881                 return -ENODEV;
3882
3883         trace_create_file("available_events", TRACE_MODE_READ,
3884                           NULL, tr, &ftrace_avail_fops);
3885
3886         ret = early_event_add_tracer(NULL, tr);
3887         if (ret)
3888                 return ret;
3889
3890 #ifdef CONFIG_MODULES
3891         ret = register_module_notifier(&trace_module_nb);
3892         if (ret)
3893                 pr_warn("Failed to register trace events module notifier\n");
3894 #endif
3895
3896         eventdir_initialized = true;
3897
3898         return 0;
3899 }
3900
3901 void __init trace_event_init(void)
3902 {
3903         event_trace_memsetup();
3904         init_ftrace_syscalls();
3905         event_trace_enable();
3906         event_trace_init_fields();
3907 }
3908
3909 #ifdef CONFIG_EVENT_TRACE_STARTUP_TEST
3910
3911 static DEFINE_SPINLOCK(test_spinlock);
3912 static DEFINE_SPINLOCK(test_spinlock_irq);
3913 static DEFINE_MUTEX(test_mutex);
3914
3915 static __init void test_work(struct work_struct *dummy)
3916 {
3917         spin_lock(&test_spinlock);
3918         spin_lock_irq(&test_spinlock_irq);
3919         udelay(1);
3920         spin_unlock_irq(&test_spinlock_irq);
3921         spin_unlock(&test_spinlock);
3922
3923         mutex_lock(&test_mutex);
3924         msleep(1);
3925         mutex_unlock(&test_mutex);
3926 }
3927
3928 static __init int event_test_thread(void *unused)
3929 {
3930         void *test_malloc;
3931
3932         test_malloc = kmalloc(1234, GFP_KERNEL);
3933         if (!test_malloc)
3934                 pr_info("failed to kmalloc\n");
3935
3936         schedule_on_each_cpu(test_work);
3937
3938         kfree(test_malloc);
3939
3940         set_current_state(TASK_INTERRUPTIBLE);
3941         while (!kthread_should_stop()) {
3942                 schedule();
3943                 set_current_state(TASK_INTERRUPTIBLE);
3944         }
3945         __set_current_state(TASK_RUNNING);
3946
3947         return 0;
3948 }
3949
3950 /*
3951  * Do various things that may trigger events.
3952  */
3953 static __init void event_test_stuff(void)
3954 {
3955         struct task_struct *test_thread;
3956
3957         test_thread = kthread_run(event_test_thread, NULL, "test-events");
3958         msleep(1);
3959         kthread_stop(test_thread);
3960 }
3961
3962 /*
3963  * For every trace event defined, we will test each trace point separately,
3964  * and then by groups, and finally all trace points.
3965  */
3966 static __init void event_trace_self_tests(void)
3967 {
3968         struct trace_subsystem_dir *dir;
3969         struct trace_event_file *file;
3970         struct trace_event_call *call;
3971         struct event_subsystem *system;
3972         struct trace_array *tr;
3973         int ret;
3974
3975         tr = top_trace_array();
3976         if (!tr)
3977                 return;
3978
3979         pr_info("Running tests on trace events:\n");
3980
3981         list_for_each_entry(file, &tr->events, list) {
3982
3983                 call = file->event_call;
3984
3985                 /* Only test those that have a probe */
3986                 if (!call->class || !call->class->probe)
3987                         continue;
3988
3989 /*
3990  * Testing syscall events here is pretty useless, but
3991  * we still do it if configured. But this is time consuming.
3992  * What we really need is a user thread to perform the
3993  * syscalls as we test.
3994  */
3995 #ifndef CONFIG_EVENT_TRACE_TEST_SYSCALLS
3996                 if (call->class->system &&
3997                     strcmp(call->class->system, "syscalls") == 0)
3998                         continue;
3999 #endif
4000
4001                 pr_info("Testing event %s: ", trace_event_name(call));
4002
4003                 /*
4004                  * If an event is already enabled, someone is using
4005                  * it and the self test should not be on.
4006                  */
4007                 if (file->flags & EVENT_FILE_FL_ENABLED) {
4008                         pr_warn("Enabled event during self test!\n");
4009                         WARN_ON_ONCE(1);
4010                         continue;
4011                 }
4012
4013                 ftrace_event_enable_disable(file, 1);
4014                 event_test_stuff();
4015                 ftrace_event_enable_disable(file, 0);
4016
4017                 pr_cont("OK\n");
4018         }
4019
4020         /* Now test at the sub system level */
4021
4022         pr_info("Running tests on trace event systems:\n");
4023
4024         list_for_each_entry(dir, &tr->systems, list) {
4025
4026                 system = dir->subsystem;
4027
4028                 /* the ftrace system is special, skip it */
4029                 if (strcmp(system->name, "ftrace") == 0)
4030                         continue;
4031
4032                 pr_info("Testing event system %s: ", system->name);
4033
4034                 ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 1);
4035                 if (WARN_ON_ONCE(ret)) {
4036                         pr_warn("error enabling system %s\n",
4037                                 system->name);
4038                         continue;
4039                 }
4040
4041                 event_test_stuff();
4042
4043                 ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 0);
4044                 if (WARN_ON_ONCE(ret)) {
4045                         pr_warn("error disabling system %s\n",
4046                                 system->name);
4047                         continue;
4048                 }
4049
4050                 pr_cont("OK\n");
4051         }
4052
4053         /* Test with all events enabled */
4054
4055         pr_info("Running tests on all trace events:\n");
4056         pr_info("Testing all events: ");
4057
4058         ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 1);
4059         if (WARN_ON_ONCE(ret)) {
4060                 pr_warn("error enabling all events\n");
4061                 return;
4062         }
4063
4064         event_test_stuff();
4065
4066         /* reset sysname */
4067         ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 0);
4068         if (WARN_ON_ONCE(ret)) {
4069                 pr_warn("error disabling all events\n");
4070                 return;
4071         }
4072
4073         pr_cont("OK\n");
4074 }
4075
4076 #ifdef CONFIG_FUNCTION_TRACER
4077
4078 static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable);
4079
4080 static struct trace_event_file event_trace_file __initdata;
4081
4082 static void __init
4083 function_test_events_call(unsigned long ip, unsigned long parent_ip,
4084                           struct ftrace_ops *op, struct ftrace_regs *regs)
4085 {
4086         struct trace_buffer *buffer;
4087         struct ring_buffer_event *event;
4088         struct ftrace_entry *entry;
4089         unsigned int trace_ctx;
4090         long disabled;
4091         int cpu;
4092
4093         trace_ctx = tracing_gen_ctx();
4094         preempt_disable_notrace();
4095         cpu = raw_smp_processor_id();
4096         disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu));
4097
4098         if (disabled != 1)
4099                 goto out;
4100
4101         event = trace_event_buffer_lock_reserve(&buffer, &event_trace_file,
4102                                                 TRACE_FN, sizeof(*entry),
4103                                                 trace_ctx);
4104         if (!event)
4105                 goto out;
4106         entry   = ring_buffer_event_data(event);
4107         entry->ip                       = ip;
4108         entry->parent_ip                = parent_ip;
4109
4110         event_trigger_unlock_commit(&event_trace_file, buffer, event,
4111                                     entry, trace_ctx);
4112  out:
4113         atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
4114         preempt_enable_notrace();
4115 }
4116
4117 static struct ftrace_ops trace_ops __initdata  =
4118 {
4119         .func = function_test_events_call,
4120 };
4121
4122 static __init void event_trace_self_test_with_function(void)
4123 {
4124         int ret;
4125
4126         event_trace_file.tr = top_trace_array();
4127         if (WARN_ON(!event_trace_file.tr))
4128                 return;
4129
4130         ret = register_ftrace_function(&trace_ops);
4131         if (WARN_ON(ret < 0)) {
4132                 pr_info("Failed to enable function tracer for event tests\n");
4133                 return;
4134         }
4135         pr_info("Running tests again, along with the function tracer\n");
4136         event_trace_self_tests();
4137         unregister_ftrace_function(&trace_ops);
4138 }
4139 #else
4140 static __init void event_trace_self_test_with_function(void)
4141 {
4142 }
4143 #endif
4144
4145 static __init int event_trace_self_tests_init(void)
4146 {
4147         if (!tracing_selftest_disabled) {
4148                 event_trace_self_tests();
4149                 event_trace_self_test_with_function();
4150         }
4151
4152         return 0;
4153 }
4154
4155 late_initcall(event_trace_self_tests_init);
4156
4157 #endif