Merge tag 'armsoc-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc
[linux-2.6-block.git] / kernel / trace / trace_events.c
1 /*
2  * event tracer
3  *
4  * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
5  *
6  *  - Added format output of fields of the trace point.
7  *    This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
8  *
9  */
10
11 #define pr_fmt(fmt) fmt
12
13 #include <linux/workqueue.h>
14 #include <linux/spinlock.h>
15 #include <linux/kthread.h>
16 #include <linux/tracefs.h>
17 #include <linux/uaccess.h>
18 #include <linux/vmalloc.h>
19 #include <linux/module.h>
20 #include <linux/ctype.h>
21 #include <linux/sort.h>
22 #include <linux/slab.h>
23 #include <linux/delay.h>
24
25 #include <trace/events/sched.h>
26
27 #include <asm/setup.h>
28
29 #include "trace_output.h"
30
31 #undef TRACE_SYSTEM
32 #define TRACE_SYSTEM "TRACE_SYSTEM"
33
34 DEFINE_MUTEX(event_mutex);
35
36 LIST_HEAD(ftrace_events);
37 static LIST_HEAD(ftrace_generic_fields);
38 static LIST_HEAD(ftrace_common_fields);
39
40 #define GFP_TRACE (GFP_KERNEL | __GFP_ZERO)
41
42 static struct kmem_cache *field_cachep;
43 static struct kmem_cache *file_cachep;
44
45 static inline int system_refcount(struct event_subsystem *system)
46 {
47         return system->ref_count;
48 }
49
50 static int system_refcount_inc(struct event_subsystem *system)
51 {
52         return system->ref_count++;
53 }
54
55 static int system_refcount_dec(struct event_subsystem *system)
56 {
57         return --system->ref_count;
58 }
59
60 /* Double loops, do not use break, only goto's work */
61 #define do_for_each_event_file(tr, file)                        \
62         list_for_each_entry(tr, &ftrace_trace_arrays, list) {   \
63                 list_for_each_entry(file, &tr->events, list)
64
65 #define do_for_each_event_file_safe(tr, file)                   \
66         list_for_each_entry(tr, &ftrace_trace_arrays, list) {   \
67                 struct trace_event_file *___n;                          \
68                 list_for_each_entry_safe(file, ___n, &tr->events, list)
69
70 #define while_for_each_event_file()             \
71         }
72
73 static struct list_head *
74 trace_get_fields(struct trace_event_call *event_call)
75 {
76         if (!event_call->class->get_fields)
77                 return &event_call->class->fields;
78         return event_call->class->get_fields(event_call);
79 }
80
81 static struct ftrace_event_field *
82 __find_event_field(struct list_head *head, char *name)
83 {
84         struct ftrace_event_field *field;
85
86         list_for_each_entry(field, head, link) {
87                 if (!strcmp(field->name, name))
88                         return field;
89         }
90
91         return NULL;
92 }
93
94 struct ftrace_event_field *
95 trace_find_event_field(struct trace_event_call *call, char *name)
96 {
97         struct ftrace_event_field *field;
98         struct list_head *head;
99
100         head = trace_get_fields(call);
101         field = __find_event_field(head, name);
102         if (field)
103                 return field;
104
105         field = __find_event_field(&ftrace_generic_fields, name);
106         if (field)
107                 return field;
108
109         return __find_event_field(&ftrace_common_fields, name);
110 }
111
112 static int __trace_define_field(struct list_head *head, const char *type,
113                                 const char *name, int offset, int size,
114                                 int is_signed, int filter_type)
115 {
116         struct ftrace_event_field *field;
117
118         field = kmem_cache_alloc(field_cachep, GFP_TRACE);
119         if (!field)
120                 return -ENOMEM;
121
122         field->name = name;
123         field->type = type;
124
125         if (filter_type == FILTER_OTHER)
126                 field->filter_type = filter_assign_type(type);
127         else
128                 field->filter_type = filter_type;
129
130         field->offset = offset;
131         field->size = size;
132         field->is_signed = is_signed;
133
134         list_add(&field->link, head);
135
136         return 0;
137 }
138
139 int trace_define_field(struct trace_event_call *call, const char *type,
140                        const char *name, int offset, int size, int is_signed,
141                        int filter_type)
142 {
143         struct list_head *head;
144
145         if (WARN_ON(!call->class))
146                 return 0;
147
148         head = trace_get_fields(call);
149         return __trace_define_field(head, type, name, offset, size,
150                                     is_signed, filter_type);
151 }
152 EXPORT_SYMBOL_GPL(trace_define_field);
153
154 #define __generic_field(type, item, filter_type)                        \
155         ret = __trace_define_field(&ftrace_generic_fields, #type,       \
156                                    #item, 0, 0, is_signed_type(type),   \
157                                    filter_type);                        \
158         if (ret)                                                        \
159                 return ret;
160
161 #define __common_field(type, item)                                      \
162         ret = __trace_define_field(&ftrace_common_fields, #type,        \
163                                    "common_" #item,                     \
164                                    offsetof(typeof(ent), item),         \
165                                    sizeof(ent.item),                    \
166                                    is_signed_type(type), FILTER_OTHER); \
167         if (ret)                                                        \
168                 return ret;
169
170 static int trace_define_generic_fields(void)
171 {
172         int ret;
173
174         __generic_field(int, CPU, FILTER_CPU);
175         __generic_field(int, cpu, FILTER_CPU);
176         __generic_field(char *, COMM, FILTER_COMM);
177         __generic_field(char *, comm, FILTER_COMM);
178
179         return ret;
180 }
181
182 static int trace_define_common_fields(void)
183 {
184         int ret;
185         struct trace_entry ent;
186
187         __common_field(unsigned short, type);
188         __common_field(unsigned char, flags);
189         __common_field(unsigned char, preempt_count);
190         __common_field(int, pid);
191
192         return ret;
193 }
194
195 static void trace_destroy_fields(struct trace_event_call *call)
196 {
197         struct ftrace_event_field *field, *next;
198         struct list_head *head;
199
200         head = trace_get_fields(call);
201         list_for_each_entry_safe(field, next, head, link) {
202                 list_del(&field->link);
203                 kmem_cache_free(field_cachep, field);
204         }
205 }
206
207 /*
208  * run-time version of trace_event_get_offsets_<call>() that returns the last
209  * accessible offset of trace fields excluding __dynamic_array bytes
210  */
211 int trace_event_get_offsets(struct trace_event_call *call)
212 {
213         struct ftrace_event_field *tail;
214         struct list_head *head;
215
216         head = trace_get_fields(call);
217         /*
218          * head->next points to the last field with the largest offset,
219          * since it was added last by trace_define_field()
220          */
221         tail = list_first_entry(head, struct ftrace_event_field, link);
222         return tail->offset + tail->size;
223 }
224
225 int trace_event_raw_init(struct trace_event_call *call)
226 {
227         int id;
228
229         id = register_trace_event(&call->event);
230         if (!id)
231                 return -ENODEV;
232
233         return 0;
234 }
235 EXPORT_SYMBOL_GPL(trace_event_raw_init);
236
237 bool trace_event_ignore_this_pid(struct trace_event_file *trace_file)
238 {
239         struct trace_array *tr = trace_file->tr;
240         struct trace_array_cpu *data;
241         struct trace_pid_list *pid_list;
242
243         pid_list = rcu_dereference_sched(tr->filtered_pids);
244         if (!pid_list)
245                 return false;
246
247         data = this_cpu_ptr(tr->trace_buffer.data);
248
249         return data->ignore_pid;
250 }
251 EXPORT_SYMBOL_GPL(trace_event_ignore_this_pid);
252
253 void *trace_event_buffer_reserve(struct trace_event_buffer *fbuffer,
254                                  struct trace_event_file *trace_file,
255                                  unsigned long len)
256 {
257         struct trace_event_call *event_call = trace_file->event_call;
258
259         if ((trace_file->flags & EVENT_FILE_FL_PID_FILTER) &&
260             trace_event_ignore_this_pid(trace_file))
261                 return NULL;
262
263         local_save_flags(fbuffer->flags);
264         fbuffer->pc = preempt_count();
265         fbuffer->trace_file = trace_file;
266
267         fbuffer->event =
268                 trace_event_buffer_lock_reserve(&fbuffer->buffer, trace_file,
269                                                 event_call->event.type, len,
270                                                 fbuffer->flags, fbuffer->pc);
271         if (!fbuffer->event)
272                 return NULL;
273
274         fbuffer->entry = ring_buffer_event_data(fbuffer->event);
275         return fbuffer->entry;
276 }
277 EXPORT_SYMBOL_GPL(trace_event_buffer_reserve);
278
279 static DEFINE_SPINLOCK(tracepoint_iter_lock);
280
281 static void output_printk(struct trace_event_buffer *fbuffer)
282 {
283         struct trace_event_call *event_call;
284         struct trace_event *event;
285         unsigned long flags;
286         struct trace_iterator *iter = tracepoint_print_iter;
287
288         if (!iter)
289                 return;
290
291         event_call = fbuffer->trace_file->event_call;
292         if (!event_call || !event_call->event.funcs ||
293             !event_call->event.funcs->trace)
294                 return;
295
296         event = &fbuffer->trace_file->event_call->event;
297
298         spin_lock_irqsave(&tracepoint_iter_lock, flags);
299         trace_seq_init(&iter->seq);
300         iter->ent = fbuffer->entry;
301         event_call->event.funcs->trace(iter, 0, event);
302         trace_seq_putc(&iter->seq, 0);
303         printk("%s", iter->seq.buffer);
304
305         spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
306 }
307
308 void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
309 {
310         if (tracepoint_printk)
311                 output_printk(fbuffer);
312
313         event_trigger_unlock_commit(fbuffer->trace_file, fbuffer->buffer,
314                                     fbuffer->event, fbuffer->entry,
315                                     fbuffer->flags, fbuffer->pc);
316 }
317 EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
318
319 int trace_event_reg(struct trace_event_call *call,
320                     enum trace_reg type, void *data)
321 {
322         struct trace_event_file *file = data;
323
324         WARN_ON(!(call->flags & TRACE_EVENT_FL_TRACEPOINT));
325         switch (type) {
326         case TRACE_REG_REGISTER:
327                 return tracepoint_probe_register(call->tp,
328                                                  call->class->probe,
329                                                  file);
330         case TRACE_REG_UNREGISTER:
331                 tracepoint_probe_unregister(call->tp,
332                                             call->class->probe,
333                                             file);
334                 return 0;
335
336 #ifdef CONFIG_PERF_EVENTS
337         case TRACE_REG_PERF_REGISTER:
338                 return tracepoint_probe_register(call->tp,
339                                                  call->class->perf_probe,
340                                                  call);
341         case TRACE_REG_PERF_UNREGISTER:
342                 tracepoint_probe_unregister(call->tp,
343                                             call->class->perf_probe,
344                                             call);
345                 return 0;
346         case TRACE_REG_PERF_OPEN:
347         case TRACE_REG_PERF_CLOSE:
348         case TRACE_REG_PERF_ADD:
349         case TRACE_REG_PERF_DEL:
350                 return 0;
351 #endif
352         }
353         return 0;
354 }
355 EXPORT_SYMBOL_GPL(trace_event_reg);
356
357 void trace_event_enable_cmd_record(bool enable)
358 {
359         struct trace_event_file *file;
360         struct trace_array *tr;
361
362         mutex_lock(&event_mutex);
363         do_for_each_event_file(tr, file) {
364
365                 if (!(file->flags & EVENT_FILE_FL_ENABLED))
366                         continue;
367
368                 if (enable) {
369                         tracing_start_cmdline_record();
370                         set_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags);
371                 } else {
372                         tracing_stop_cmdline_record();
373                         clear_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags);
374                 }
375         } while_for_each_event_file();
376         mutex_unlock(&event_mutex);
377 }
378
379 static int __ftrace_event_enable_disable(struct trace_event_file *file,
380                                          int enable, int soft_disable)
381 {
382         struct trace_event_call *call = file->event_call;
383         struct trace_array *tr = file->tr;
384         unsigned long file_flags = file->flags;
385         int ret = 0;
386         int disable;
387
388         switch (enable) {
389         case 0:
390                 /*
391                  * When soft_disable is set and enable is cleared, the sm_ref
392                  * reference counter is decremented. If it reaches 0, we want
393                  * to clear the SOFT_DISABLED flag but leave the event in the
394                  * state that it was. That is, if the event was enabled and
395                  * SOFT_DISABLED isn't set, then do nothing. But if SOFT_DISABLED
396                  * is set we do not want the event to be enabled before we
397                  * clear the bit.
398                  *
399                  * When soft_disable is not set but the SOFT_MODE flag is,
400                  * we do nothing. Do not disable the tracepoint, otherwise
401                  * "soft enable"s (clearing the SOFT_DISABLED bit) wont work.
402                  */
403                 if (soft_disable) {
404                         if (atomic_dec_return(&file->sm_ref) > 0)
405                                 break;
406                         disable = file->flags & EVENT_FILE_FL_SOFT_DISABLED;
407                         clear_bit(EVENT_FILE_FL_SOFT_MODE_BIT, &file->flags);
408                 } else
409                         disable = !(file->flags & EVENT_FILE_FL_SOFT_MODE);
410
411                 if (disable && (file->flags & EVENT_FILE_FL_ENABLED)) {
412                         clear_bit(EVENT_FILE_FL_ENABLED_BIT, &file->flags);
413                         if (file->flags & EVENT_FILE_FL_RECORDED_CMD) {
414                                 tracing_stop_cmdline_record();
415                                 clear_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags);
416                         }
417                         call->class->reg(call, TRACE_REG_UNREGISTER, file);
418                 }
419                 /* If in SOFT_MODE, just set the SOFT_DISABLE_BIT, else clear it */
420                 if (file->flags & EVENT_FILE_FL_SOFT_MODE)
421                         set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags);
422                 else
423                         clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags);
424                 break;
425         case 1:
426                 /*
427                  * When soft_disable is set and enable is set, we want to
428                  * register the tracepoint for the event, but leave the event
429                  * as is. That means, if the event was already enabled, we do
430                  * nothing (but set SOFT_MODE). If the event is disabled, we
431                  * set SOFT_DISABLED before enabling the event tracepoint, so
432                  * it still seems to be disabled.
433                  */
434                 if (!soft_disable)
435                         clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags);
436                 else {
437                         if (atomic_inc_return(&file->sm_ref) > 1)
438                                 break;
439                         set_bit(EVENT_FILE_FL_SOFT_MODE_BIT, &file->flags);
440                 }
441
442                 if (!(file->flags & EVENT_FILE_FL_ENABLED)) {
443
444                         /* Keep the event disabled, when going to SOFT_MODE. */
445                         if (soft_disable)
446                                 set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags);
447
448                         if (tr->trace_flags & TRACE_ITER_RECORD_CMD) {
449                                 tracing_start_cmdline_record();
450                                 set_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags);
451                         }
452                         ret = call->class->reg(call, TRACE_REG_REGISTER, file);
453                         if (ret) {
454                                 tracing_stop_cmdline_record();
455                                 pr_info("event trace: Could not enable event "
456                                         "%s\n", trace_event_name(call));
457                                 break;
458                         }
459                         set_bit(EVENT_FILE_FL_ENABLED_BIT, &file->flags);
460
461                         /* WAS_ENABLED gets set but never cleared. */
462                         call->flags |= TRACE_EVENT_FL_WAS_ENABLED;
463                 }
464                 break;
465         }
466
467         /* Enable or disable use of trace_buffered_event */
468         if ((file_flags & EVENT_FILE_FL_SOFT_DISABLED) !=
469             (file->flags & EVENT_FILE_FL_SOFT_DISABLED)) {
470                 if (file->flags & EVENT_FILE_FL_SOFT_DISABLED)
471                         trace_buffered_event_enable();
472                 else
473                         trace_buffered_event_disable();
474         }
475
476         return ret;
477 }
478
479 int trace_event_enable_disable(struct trace_event_file *file,
480                                int enable, int soft_disable)
481 {
482         return __ftrace_event_enable_disable(file, enable, soft_disable);
483 }
484
485 static int ftrace_event_enable_disable(struct trace_event_file *file,
486                                        int enable)
487 {
488         return __ftrace_event_enable_disable(file, enable, 0);
489 }
490
491 static void ftrace_clear_events(struct trace_array *tr)
492 {
493         struct trace_event_file *file;
494
495         mutex_lock(&event_mutex);
496         list_for_each_entry(file, &tr->events, list) {
497                 ftrace_event_enable_disable(file, 0);
498         }
499         mutex_unlock(&event_mutex);
500 }
501
502 /* Shouldn't this be in a header? */
503 extern int pid_max;
504
505 /* Returns true if found in filter */
506 static bool
507 find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
508 {
509         /*
510          * If pid_max changed after filtered_pids was created, we
511          * by default ignore all pids greater than the previous pid_max.
512          */
513         if (search_pid >= filtered_pids->pid_max)
514                 return false;
515
516         return test_bit(search_pid, filtered_pids->pids);
517 }
518
519 static bool
520 ignore_this_task(struct trace_pid_list *filtered_pids, struct task_struct *task)
521 {
522         /*
523          * Return false, because if filtered_pids does not exist,
524          * all pids are good to trace.
525          */
526         if (!filtered_pids)
527                 return false;
528
529         return !find_filtered_pid(filtered_pids, task->pid);
530 }
531
532 static void filter_add_remove_task(struct trace_pid_list *pid_list,
533                                    struct task_struct *self,
534                                    struct task_struct *task)
535 {
536         if (!pid_list)
537                 return;
538
539         /* For forks, we only add if the forking task is listed */
540         if (self) {
541                 if (!find_filtered_pid(pid_list, self->pid))
542                         return;
543         }
544
545         /* Sorry, but we don't support pid_max changing after setting */
546         if (task->pid >= pid_list->pid_max)
547                 return;
548
549         /* "self" is set for forks, and NULL for exits */
550         if (self)
551                 set_bit(task->pid, pid_list->pids);
552         else
553                 clear_bit(task->pid, pid_list->pids);
554 }
555
556 static void
557 event_filter_pid_sched_process_exit(void *data, struct task_struct *task)
558 {
559         struct trace_pid_list *pid_list;
560         struct trace_array *tr = data;
561
562         pid_list = rcu_dereference_sched(tr->filtered_pids);
563         filter_add_remove_task(pid_list, NULL, task);
564 }
565
566 static void
567 event_filter_pid_sched_process_fork(void *data,
568                                     struct task_struct *self,
569                                     struct task_struct *task)
570 {
571         struct trace_pid_list *pid_list;
572         struct trace_array *tr = data;
573
574         pid_list = rcu_dereference_sched(tr->filtered_pids);
575         filter_add_remove_task(pid_list, self, task);
576 }
577
578 void trace_event_follow_fork(struct trace_array *tr, bool enable)
579 {
580         if (enable) {
581                 register_trace_prio_sched_process_fork(event_filter_pid_sched_process_fork,
582                                                        tr, INT_MIN);
583                 register_trace_prio_sched_process_exit(event_filter_pid_sched_process_exit,
584                                                        tr, INT_MAX);
585         } else {
586                 unregister_trace_sched_process_fork(event_filter_pid_sched_process_fork,
587                                                     tr);
588                 unregister_trace_sched_process_exit(event_filter_pid_sched_process_exit,
589                                                     tr);
590         }
591 }
592
593 static void
594 event_filter_pid_sched_switch_probe_pre(void *data, bool preempt,
595                     struct task_struct *prev, struct task_struct *next)
596 {
597         struct trace_array *tr = data;
598         struct trace_pid_list *pid_list;
599
600         pid_list = rcu_dereference_sched(tr->filtered_pids);
601
602         this_cpu_write(tr->trace_buffer.data->ignore_pid,
603                        ignore_this_task(pid_list, prev) &&
604                        ignore_this_task(pid_list, next));
605 }
606
607 static void
608 event_filter_pid_sched_switch_probe_post(void *data, bool preempt,
609                     struct task_struct *prev, struct task_struct *next)
610 {
611         struct trace_array *tr = data;
612         struct trace_pid_list *pid_list;
613
614         pid_list = rcu_dereference_sched(tr->filtered_pids);
615
616         this_cpu_write(tr->trace_buffer.data->ignore_pid,
617                        ignore_this_task(pid_list, next));
618 }
619
620 static void
621 event_filter_pid_sched_wakeup_probe_pre(void *data, struct task_struct *task)
622 {
623         struct trace_array *tr = data;
624         struct trace_pid_list *pid_list;
625
626         /* Nothing to do if we are already tracing */
627         if (!this_cpu_read(tr->trace_buffer.data->ignore_pid))
628                 return;
629
630         pid_list = rcu_dereference_sched(tr->filtered_pids);
631
632         this_cpu_write(tr->trace_buffer.data->ignore_pid,
633                        ignore_this_task(pid_list, task));
634 }
635
636 static void
637 event_filter_pid_sched_wakeup_probe_post(void *data, struct task_struct *task)
638 {
639         struct trace_array *tr = data;
640         struct trace_pid_list *pid_list;
641
642         /* Nothing to do if we are not tracing */
643         if (this_cpu_read(tr->trace_buffer.data->ignore_pid))
644                 return;
645
646         pid_list = rcu_dereference_sched(tr->filtered_pids);
647
648         /* Set tracing if current is enabled */
649         this_cpu_write(tr->trace_buffer.data->ignore_pid,
650                        ignore_this_task(pid_list, current));
651 }
652
653 static void __ftrace_clear_event_pids(struct trace_array *tr)
654 {
655         struct trace_pid_list *pid_list;
656         struct trace_event_file *file;
657         int cpu;
658
659         pid_list = rcu_dereference_protected(tr->filtered_pids,
660                                              lockdep_is_held(&event_mutex));
661         if (!pid_list)
662                 return;
663
664         unregister_trace_sched_switch(event_filter_pid_sched_switch_probe_pre, tr);
665         unregister_trace_sched_switch(event_filter_pid_sched_switch_probe_post, tr);
666
667         unregister_trace_sched_wakeup(event_filter_pid_sched_wakeup_probe_pre, tr);
668         unregister_trace_sched_wakeup(event_filter_pid_sched_wakeup_probe_post, tr);
669
670         unregister_trace_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_pre, tr);
671         unregister_trace_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_post, tr);
672
673         unregister_trace_sched_waking(event_filter_pid_sched_wakeup_probe_pre, tr);
674         unregister_trace_sched_waking(event_filter_pid_sched_wakeup_probe_post, tr);
675
676         list_for_each_entry(file, &tr->events, list) {
677                 clear_bit(EVENT_FILE_FL_PID_FILTER_BIT, &file->flags);
678         }
679
680         for_each_possible_cpu(cpu)
681                 per_cpu_ptr(tr->trace_buffer.data, cpu)->ignore_pid = false;
682
683         rcu_assign_pointer(tr->filtered_pids, NULL);
684
685         /* Wait till all users are no longer using pid filtering */
686         synchronize_sched();
687
688         vfree(pid_list->pids);
689         kfree(pid_list);
690 }
691
692 static void ftrace_clear_event_pids(struct trace_array *tr)
693 {
694         mutex_lock(&event_mutex);
695         __ftrace_clear_event_pids(tr);
696         mutex_unlock(&event_mutex);
697 }
698
699 static void __put_system(struct event_subsystem *system)
700 {
701         struct event_filter *filter = system->filter;
702
703         WARN_ON_ONCE(system_refcount(system) == 0);
704         if (system_refcount_dec(system))
705                 return;
706
707         list_del(&system->list);
708
709         if (filter) {
710                 kfree(filter->filter_string);
711                 kfree(filter);
712         }
713         kfree_const(system->name);
714         kfree(system);
715 }
716
717 static void __get_system(struct event_subsystem *system)
718 {
719         WARN_ON_ONCE(system_refcount(system) == 0);
720         system_refcount_inc(system);
721 }
722
723 static void __get_system_dir(struct trace_subsystem_dir *dir)
724 {
725         WARN_ON_ONCE(dir->ref_count == 0);
726         dir->ref_count++;
727         __get_system(dir->subsystem);
728 }
729
730 static void __put_system_dir(struct trace_subsystem_dir *dir)
731 {
732         WARN_ON_ONCE(dir->ref_count == 0);
733         /* If the subsystem is about to be freed, the dir must be too */
734         WARN_ON_ONCE(system_refcount(dir->subsystem) == 1 && dir->ref_count != 1);
735
736         __put_system(dir->subsystem);
737         if (!--dir->ref_count)
738                 kfree(dir);
739 }
740
741 static void put_system(struct trace_subsystem_dir *dir)
742 {
743         mutex_lock(&event_mutex);
744         __put_system_dir(dir);
745         mutex_unlock(&event_mutex);
746 }
747
748 static void remove_subsystem(struct trace_subsystem_dir *dir)
749 {
750         if (!dir)
751                 return;
752
753         if (!--dir->nr_events) {
754                 tracefs_remove_recursive(dir->entry);
755                 list_del(&dir->list);
756                 __put_system_dir(dir);
757         }
758 }
759
760 static void remove_event_file_dir(struct trace_event_file *file)
761 {
762         struct dentry *dir = file->dir;
763         struct dentry *child;
764
765         if (dir) {
766                 spin_lock(&dir->d_lock);        /* probably unneeded */
767                 list_for_each_entry(child, &dir->d_subdirs, d_child) {
768                         if (d_really_is_positive(child))        /* probably unneeded */
769                                 d_inode(child)->i_private = NULL;
770                 }
771                 spin_unlock(&dir->d_lock);
772
773                 tracefs_remove_recursive(dir);
774         }
775
776         list_del(&file->list);
777         remove_subsystem(file->system);
778         free_event_filter(file->filter);
779         kmem_cache_free(file_cachep, file);
780 }
781
782 /*
783  * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events.
784  */
785 static int
786 __ftrace_set_clr_event_nolock(struct trace_array *tr, const char *match,
787                               const char *sub, const char *event, int set)
788 {
789         struct trace_event_file *file;
790         struct trace_event_call *call;
791         const char *name;
792         int ret = -EINVAL;
793
794         list_for_each_entry(file, &tr->events, list) {
795
796                 call = file->event_call;
797                 name = trace_event_name(call);
798
799                 if (!name || !call->class || !call->class->reg)
800                         continue;
801
802                 if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)
803                         continue;
804
805                 if (match &&
806                     strcmp(match, name) != 0 &&
807                     strcmp(match, call->class->system) != 0)
808                         continue;
809
810                 if (sub && strcmp(sub, call->class->system) != 0)
811                         continue;
812
813                 if (event && strcmp(event, name) != 0)
814                         continue;
815
816                 ftrace_event_enable_disable(file, set);
817
818                 ret = 0;
819         }
820
821         return ret;
822 }
823
824 static int __ftrace_set_clr_event(struct trace_array *tr, const char *match,
825                                   const char *sub, const char *event, int set)
826 {
827         int ret;
828
829         mutex_lock(&event_mutex);
830         ret = __ftrace_set_clr_event_nolock(tr, match, sub, event, set);
831         mutex_unlock(&event_mutex);
832
833         return ret;
834 }
835
836 static int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set)
837 {
838         char *event = NULL, *sub = NULL, *match;
839         int ret;
840
841         /*
842          * The buf format can be <subsystem>:<event-name>
843          *  *:<event-name> means any event by that name.
844          *  :<event-name> is the same.
845          *
846          *  <subsystem>:* means all events in that subsystem
847          *  <subsystem>: means the same.
848          *
849          *  <name> (no ':') means all events in a subsystem with
850          *  the name <name> or any event that matches <name>
851          */
852
853         match = strsep(&buf, ":");
854         if (buf) {
855                 sub = match;
856                 event = buf;
857                 match = NULL;
858
859                 if (!strlen(sub) || strcmp(sub, "*") == 0)
860                         sub = NULL;
861                 if (!strlen(event) || strcmp(event, "*") == 0)
862                         event = NULL;
863         }
864
865         ret = __ftrace_set_clr_event(tr, match, sub, event, set);
866
867         /* Put back the colon to allow this to be called again */
868         if (buf)
869                 *(buf - 1) = ':';
870
871         return ret;
872 }
873
874 /**
875  * trace_set_clr_event - enable or disable an event
876  * @system: system name to match (NULL for any system)
877  * @event: event name to match (NULL for all events, within system)
878  * @set: 1 to enable, 0 to disable
879  *
880  * This is a way for other parts of the kernel to enable or disable
881  * event recording.
882  *
883  * Returns 0 on success, -EINVAL if the parameters do not match any
884  * registered events.
885  */
886 int trace_set_clr_event(const char *system, const char *event, int set)
887 {
888         struct trace_array *tr = top_trace_array();
889
890         if (!tr)
891                 return -ENODEV;
892
893         return __ftrace_set_clr_event(tr, NULL, system, event, set);
894 }
895 EXPORT_SYMBOL_GPL(trace_set_clr_event);
896
897 /* 128 should be much more than enough */
898 #define EVENT_BUF_SIZE          127
899
900 static ssize_t
901 ftrace_event_write(struct file *file, const char __user *ubuf,
902                    size_t cnt, loff_t *ppos)
903 {
904         struct trace_parser parser;
905         struct seq_file *m = file->private_data;
906         struct trace_array *tr = m->private;
907         ssize_t read, ret;
908
909         if (!cnt)
910                 return 0;
911
912         ret = tracing_update_buffers();
913         if (ret < 0)
914                 return ret;
915
916         if (trace_parser_get_init(&parser, EVENT_BUF_SIZE + 1))
917                 return -ENOMEM;
918
919         read = trace_get_user(&parser, ubuf, cnt, ppos);
920
921         if (read >= 0 && trace_parser_loaded((&parser))) {
922                 int set = 1;
923
924                 if (*parser.buffer == '!')
925                         set = 0;
926
927                 parser.buffer[parser.idx] = 0;
928
929                 ret = ftrace_set_clr_event(tr, parser.buffer + !set, set);
930                 if (ret)
931                         goto out_put;
932         }
933
934         ret = read;
935
936  out_put:
937         trace_parser_put(&parser);
938
939         return ret;
940 }
941
942 static void *
943 t_next(struct seq_file *m, void *v, loff_t *pos)
944 {
945         struct trace_event_file *file = v;
946         struct trace_event_call *call;
947         struct trace_array *tr = m->private;
948
949         (*pos)++;
950
951         list_for_each_entry_continue(file, &tr->events, list) {
952                 call = file->event_call;
953                 /*
954                  * The ftrace subsystem is for showing formats only.
955                  * They can not be enabled or disabled via the event files.
956                  */
957                 if (call->class && call->class->reg &&
958                     !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
959                         return file;
960         }
961
962         return NULL;
963 }
964
965 static void *t_start(struct seq_file *m, loff_t *pos)
966 {
967         struct trace_event_file *file;
968         struct trace_array *tr = m->private;
969         loff_t l;
970
971         mutex_lock(&event_mutex);
972
973         file = list_entry(&tr->events, struct trace_event_file, list);
974         for (l = 0; l <= *pos; ) {
975                 file = t_next(m, file, &l);
976                 if (!file)
977                         break;
978         }
979         return file;
980 }
981
982 static void *
983 s_next(struct seq_file *m, void *v, loff_t *pos)
984 {
985         struct trace_event_file *file = v;
986         struct trace_array *tr = m->private;
987
988         (*pos)++;
989
990         list_for_each_entry_continue(file, &tr->events, list) {
991                 if (file->flags & EVENT_FILE_FL_ENABLED)
992                         return file;
993         }
994
995         return NULL;
996 }
997
998 static void *s_start(struct seq_file *m, loff_t *pos)
999 {
1000         struct trace_event_file *file;
1001         struct trace_array *tr = m->private;
1002         loff_t l;
1003
1004         mutex_lock(&event_mutex);
1005
1006         file = list_entry(&tr->events, struct trace_event_file, list);
1007         for (l = 0; l <= *pos; ) {
1008                 file = s_next(m, file, &l);
1009                 if (!file)
1010                         break;
1011         }
1012         return file;
1013 }
1014
1015 static int t_show(struct seq_file *m, void *v)
1016 {
1017         struct trace_event_file *file = v;
1018         struct trace_event_call *call = file->event_call;
1019
1020         if (strcmp(call->class->system, TRACE_SYSTEM) != 0)
1021                 seq_printf(m, "%s:", call->class->system);
1022         seq_printf(m, "%s\n", trace_event_name(call));
1023
1024         return 0;
1025 }
1026
1027 static void t_stop(struct seq_file *m, void *p)
1028 {
1029         mutex_unlock(&event_mutex);
1030 }
1031
1032 static void *
1033 p_next(struct seq_file *m, void *v, loff_t *pos)
1034 {
1035         struct trace_array *tr = m->private;
1036         struct trace_pid_list *pid_list = rcu_dereference_sched(tr->filtered_pids);
1037         unsigned long pid = (unsigned long)v;
1038
1039         (*pos)++;
1040
1041         /* pid already is +1 of the actual prevous bit */
1042         pid = find_next_bit(pid_list->pids, pid_list->pid_max, pid);
1043
1044         /* Return pid + 1 to allow zero to be represented */
1045         if (pid < pid_list->pid_max)
1046                 return (void *)(pid + 1);
1047
1048         return NULL;
1049 }
1050
1051 static void *p_start(struct seq_file *m, loff_t *pos)
1052         __acquires(RCU)
1053 {
1054         struct trace_pid_list *pid_list;
1055         struct trace_array *tr = m->private;
1056         unsigned long pid;
1057         loff_t l = 0;
1058
1059         /*
1060          * Grab the mutex, to keep calls to p_next() having the same
1061          * tr->filtered_pids as p_start() has.
1062          * If we just passed the tr->filtered_pids around, then RCU would
1063          * have been enough, but doing that makes things more complex.
1064          */
1065         mutex_lock(&event_mutex);
1066         rcu_read_lock_sched();
1067
1068         pid_list = rcu_dereference_sched(tr->filtered_pids);
1069
1070         if (!pid_list)
1071                 return NULL;
1072
1073         pid = find_first_bit(pid_list->pids, pid_list->pid_max);
1074         if (pid >= pid_list->pid_max)
1075                 return NULL;
1076
1077         /* Return pid + 1 so that zero can be the exit value */
1078         for (pid++; pid && l < *pos;
1079              pid = (unsigned long)p_next(m, (void *)pid, &l))
1080                 ;
1081         return (void *)pid;
1082 }
1083
1084 static void p_stop(struct seq_file *m, void *p)
1085         __releases(RCU)
1086 {
1087         rcu_read_unlock_sched();
1088         mutex_unlock(&event_mutex);
1089 }
1090
1091 static int p_show(struct seq_file *m, void *v)
1092 {
1093         unsigned long pid = (unsigned long)v - 1;
1094
1095         seq_printf(m, "%lu\n", pid);
1096         return 0;
1097 }
1098
1099 static ssize_t
1100 event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
1101                   loff_t *ppos)
1102 {
1103         struct trace_event_file *file;
1104         unsigned long flags;
1105         char buf[4] = "0";
1106
1107         mutex_lock(&event_mutex);
1108         file = event_file_data(filp);
1109         if (likely(file))
1110                 flags = file->flags;
1111         mutex_unlock(&event_mutex);
1112
1113         if (!file)
1114                 return -ENODEV;
1115
1116         if (flags & EVENT_FILE_FL_ENABLED &&
1117             !(flags & EVENT_FILE_FL_SOFT_DISABLED))
1118                 strcpy(buf, "1");
1119
1120         if (flags & EVENT_FILE_FL_SOFT_DISABLED ||
1121             flags & EVENT_FILE_FL_SOFT_MODE)
1122                 strcat(buf, "*");
1123
1124         strcat(buf, "\n");
1125
1126         return simple_read_from_buffer(ubuf, cnt, ppos, buf, strlen(buf));
1127 }
1128
1129 static ssize_t
1130 event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
1131                    loff_t *ppos)
1132 {
1133         struct trace_event_file *file;
1134         unsigned long val;
1135         int ret;
1136
1137         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
1138         if (ret)
1139                 return ret;
1140
1141         ret = tracing_update_buffers();
1142         if (ret < 0)
1143                 return ret;
1144
1145         switch (val) {
1146         case 0:
1147         case 1:
1148                 ret = -ENODEV;
1149                 mutex_lock(&event_mutex);
1150                 file = event_file_data(filp);
1151                 if (likely(file))
1152                         ret = ftrace_event_enable_disable(file, val);
1153                 mutex_unlock(&event_mutex);
1154                 break;
1155
1156         default:
1157                 return -EINVAL;
1158         }
1159
1160         *ppos += cnt;
1161
1162         return ret ? ret : cnt;
1163 }
1164
1165 static ssize_t
1166 system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
1167                    loff_t *ppos)
1168 {
1169         const char set_to_char[4] = { '?', '0', '1', 'X' };
1170         struct trace_subsystem_dir *dir = filp->private_data;
1171         struct event_subsystem *system = dir->subsystem;
1172         struct trace_event_call *call;
1173         struct trace_event_file *file;
1174         struct trace_array *tr = dir->tr;
1175         char buf[2];
1176         int set = 0;
1177         int ret;
1178
1179         mutex_lock(&event_mutex);
1180         list_for_each_entry(file, &tr->events, list) {
1181                 call = file->event_call;
1182                 if (!trace_event_name(call) || !call->class || !call->class->reg)
1183                         continue;
1184
1185                 if (system && strcmp(call->class->system, system->name) != 0)
1186                         continue;
1187
1188                 /*
1189                  * We need to find out if all the events are set
1190                  * or if all events or cleared, or if we have
1191                  * a mixture.
1192                  */
1193                 set |= (1 << !!(file->flags & EVENT_FILE_FL_ENABLED));
1194
1195                 /*
1196                  * If we have a mixture, no need to look further.
1197                  */
1198                 if (set == 3)
1199                         break;
1200         }
1201         mutex_unlock(&event_mutex);
1202
1203         buf[0] = set_to_char[set];
1204         buf[1] = '\n';
1205
1206         ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
1207
1208         return ret;
1209 }
1210
1211 static ssize_t
1212 system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
1213                     loff_t *ppos)
1214 {
1215         struct trace_subsystem_dir *dir = filp->private_data;
1216         struct event_subsystem *system = dir->subsystem;
1217         const char *name = NULL;
1218         unsigned long val;
1219         ssize_t ret;
1220
1221         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
1222         if (ret)
1223                 return ret;
1224
1225         ret = tracing_update_buffers();
1226         if (ret < 0)
1227                 return ret;
1228
1229         if (val != 0 && val != 1)
1230                 return -EINVAL;
1231
1232         /*
1233          * Opening of "enable" adds a ref count to system,
1234          * so the name is safe to use.
1235          */
1236         if (system)
1237                 name = system->name;
1238
1239         ret = __ftrace_set_clr_event(dir->tr, NULL, name, NULL, val);
1240         if (ret)
1241                 goto out;
1242
1243         ret = cnt;
1244
1245 out:
1246         *ppos += cnt;
1247
1248         return ret;
1249 }
1250
1251 enum {
1252         FORMAT_HEADER           = 1,
1253         FORMAT_FIELD_SEPERATOR  = 2,
1254         FORMAT_PRINTFMT         = 3,
1255 };
1256
1257 static void *f_next(struct seq_file *m, void *v, loff_t *pos)
1258 {
1259         struct trace_event_call *call = event_file_data(m->private);
1260         struct list_head *common_head = &ftrace_common_fields;
1261         struct list_head *head = trace_get_fields(call);
1262         struct list_head *node = v;
1263
1264         (*pos)++;
1265
1266         switch ((unsigned long)v) {
1267         case FORMAT_HEADER:
1268                 node = common_head;
1269                 break;
1270
1271         case FORMAT_FIELD_SEPERATOR:
1272                 node = head;
1273                 break;
1274
1275         case FORMAT_PRINTFMT:
1276                 /* all done */
1277                 return NULL;
1278         }
1279
1280         node = node->prev;
1281         if (node == common_head)
1282                 return (void *)FORMAT_FIELD_SEPERATOR;
1283         else if (node == head)
1284                 return (void *)FORMAT_PRINTFMT;
1285         else
1286                 return node;
1287 }
1288
1289 static int f_show(struct seq_file *m, void *v)
1290 {
1291         struct trace_event_call *call = event_file_data(m->private);
1292         struct ftrace_event_field *field;
1293         const char *array_descriptor;
1294
1295         switch ((unsigned long)v) {
1296         case FORMAT_HEADER:
1297                 seq_printf(m, "name: %s\n", trace_event_name(call));
1298                 seq_printf(m, "ID: %d\n", call->event.type);
1299                 seq_puts(m, "format:\n");
1300                 return 0;
1301
1302         case FORMAT_FIELD_SEPERATOR:
1303                 seq_putc(m, '\n');
1304                 return 0;
1305
1306         case FORMAT_PRINTFMT:
1307                 seq_printf(m, "\nprint fmt: %s\n",
1308                            call->print_fmt);
1309                 return 0;
1310         }
1311
1312         field = list_entry(v, struct ftrace_event_field, link);
1313         /*
1314          * Smartly shows the array type(except dynamic array).
1315          * Normal:
1316          *      field:TYPE VAR
1317          * If TYPE := TYPE[LEN], it is shown:
1318          *      field:TYPE VAR[LEN]
1319          */
1320         array_descriptor = strchr(field->type, '[');
1321
1322         if (!strncmp(field->type, "__data_loc", 10))
1323                 array_descriptor = NULL;
1324
1325         if (!array_descriptor)
1326                 seq_printf(m, "\tfield:%s %s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
1327                            field->type, field->name, field->offset,
1328                            field->size, !!field->is_signed);
1329         else
1330                 seq_printf(m, "\tfield:%.*s %s%s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
1331                            (int)(array_descriptor - field->type),
1332                            field->type, field->name,
1333                            array_descriptor, field->offset,
1334                            field->size, !!field->is_signed);
1335
1336         return 0;
1337 }
1338
1339 static void *f_start(struct seq_file *m, loff_t *pos)
1340 {
1341         void *p = (void *)FORMAT_HEADER;
1342         loff_t l = 0;
1343
1344         /* ->stop() is called even if ->start() fails */
1345         mutex_lock(&event_mutex);
1346         if (!event_file_data(m->private))
1347                 return ERR_PTR(-ENODEV);
1348
1349         while (l < *pos && p)
1350                 p = f_next(m, p, &l);
1351
1352         return p;
1353 }
1354
1355 static void f_stop(struct seq_file *m, void *p)
1356 {
1357         mutex_unlock(&event_mutex);
1358 }
1359
1360 static const struct seq_operations trace_format_seq_ops = {
1361         .start          = f_start,
1362         .next           = f_next,
1363         .stop           = f_stop,
1364         .show           = f_show,
1365 };
1366
1367 static int trace_format_open(struct inode *inode, struct file *file)
1368 {
1369         struct seq_file *m;
1370         int ret;
1371
1372         ret = seq_open(file, &trace_format_seq_ops);
1373         if (ret < 0)
1374                 return ret;
1375
1376         m = file->private_data;
1377         m->private = file;
1378
1379         return 0;
1380 }
1381
1382 static ssize_t
1383 event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
1384 {
1385         int id = (long)event_file_data(filp);
1386         char buf[32];
1387         int len;
1388
1389         if (*ppos)
1390                 return 0;
1391
1392         if (unlikely(!id))
1393                 return -ENODEV;
1394
1395         len = sprintf(buf, "%d\n", id);
1396
1397         return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
1398 }
1399
1400 static ssize_t
1401 event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
1402                   loff_t *ppos)
1403 {
1404         struct trace_event_file *file;
1405         struct trace_seq *s;
1406         int r = -ENODEV;
1407
1408         if (*ppos)
1409                 return 0;
1410
1411         s = kmalloc(sizeof(*s), GFP_KERNEL);
1412
1413         if (!s)
1414                 return -ENOMEM;
1415
1416         trace_seq_init(s);
1417
1418         mutex_lock(&event_mutex);
1419         file = event_file_data(filp);
1420         if (file)
1421                 print_event_filter(file, s);
1422         mutex_unlock(&event_mutex);
1423
1424         if (file)
1425                 r = simple_read_from_buffer(ubuf, cnt, ppos,
1426                                             s->buffer, trace_seq_used(s));
1427
1428         kfree(s);
1429
1430         return r;
1431 }
1432
1433 static ssize_t
1434 event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
1435                    loff_t *ppos)
1436 {
1437         struct trace_event_file *file;
1438         char *buf;
1439         int err = -ENODEV;
1440
1441         if (cnt >= PAGE_SIZE)
1442                 return -EINVAL;
1443
1444         buf = memdup_user_nul(ubuf, cnt);
1445         if (IS_ERR(buf))
1446                 return PTR_ERR(buf);
1447
1448         mutex_lock(&event_mutex);
1449         file = event_file_data(filp);
1450         if (file)
1451                 err = apply_event_filter(file, buf);
1452         mutex_unlock(&event_mutex);
1453
1454         kfree(buf);
1455         if (err < 0)
1456                 return err;
1457
1458         *ppos += cnt;
1459
1460         return cnt;
1461 }
1462
1463 static LIST_HEAD(event_subsystems);
1464
1465 static int subsystem_open(struct inode *inode, struct file *filp)
1466 {
1467         struct event_subsystem *system = NULL;
1468         struct trace_subsystem_dir *dir = NULL; /* Initialize for gcc */
1469         struct trace_array *tr;
1470         int ret;
1471
1472         if (tracing_is_disabled())
1473                 return -ENODEV;
1474
1475         /* Make sure the system still exists */
1476         mutex_lock(&trace_types_lock);
1477         mutex_lock(&event_mutex);
1478         list_for_each_entry(tr, &ftrace_trace_arrays, list) {
1479                 list_for_each_entry(dir, &tr->systems, list) {
1480                         if (dir == inode->i_private) {
1481                                 /* Don't open systems with no events */
1482                                 if (dir->nr_events) {
1483                                         __get_system_dir(dir);
1484                                         system = dir->subsystem;
1485                                 }
1486                                 goto exit_loop;
1487                         }
1488                 }
1489         }
1490  exit_loop:
1491         mutex_unlock(&event_mutex);
1492         mutex_unlock(&trace_types_lock);
1493
1494         if (!system)
1495                 return -ENODEV;
1496
1497         /* Some versions of gcc think dir can be uninitialized here */
1498         WARN_ON(!dir);
1499
1500         /* Still need to increment the ref count of the system */
1501         if (trace_array_get(tr) < 0) {
1502                 put_system(dir);
1503                 return -ENODEV;
1504         }
1505
1506         ret = tracing_open_generic(inode, filp);
1507         if (ret < 0) {
1508                 trace_array_put(tr);
1509                 put_system(dir);
1510         }
1511
1512         return ret;
1513 }
1514
1515 static int system_tr_open(struct inode *inode, struct file *filp)
1516 {
1517         struct trace_subsystem_dir *dir;
1518         struct trace_array *tr = inode->i_private;
1519         int ret;
1520
1521         if (tracing_is_disabled())
1522                 return -ENODEV;
1523
1524         if (trace_array_get(tr) < 0)
1525                 return -ENODEV;
1526
1527         /* Make a temporary dir that has no system but points to tr */
1528         dir = kzalloc(sizeof(*dir), GFP_KERNEL);
1529         if (!dir) {
1530                 trace_array_put(tr);
1531                 return -ENOMEM;
1532         }
1533
1534         dir->tr = tr;
1535
1536         ret = tracing_open_generic(inode, filp);
1537         if (ret < 0) {
1538                 trace_array_put(tr);
1539                 kfree(dir);
1540                 return ret;
1541         }
1542
1543         filp->private_data = dir;
1544
1545         return 0;
1546 }
1547
1548 static int subsystem_release(struct inode *inode, struct file *file)
1549 {
1550         struct trace_subsystem_dir *dir = file->private_data;
1551
1552         trace_array_put(dir->tr);
1553
1554         /*
1555          * If dir->subsystem is NULL, then this is a temporary
1556          * descriptor that was made for a trace_array to enable
1557          * all subsystems.
1558          */
1559         if (dir->subsystem)
1560                 put_system(dir);
1561         else
1562                 kfree(dir);
1563
1564         return 0;
1565 }
1566
1567 static ssize_t
1568 subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
1569                       loff_t *ppos)
1570 {
1571         struct trace_subsystem_dir *dir = filp->private_data;
1572         struct event_subsystem *system = dir->subsystem;
1573         struct trace_seq *s;
1574         int r;
1575
1576         if (*ppos)
1577                 return 0;
1578
1579         s = kmalloc(sizeof(*s), GFP_KERNEL);
1580         if (!s)
1581                 return -ENOMEM;
1582
1583         trace_seq_init(s);
1584
1585         print_subsystem_event_filter(system, s);
1586         r = simple_read_from_buffer(ubuf, cnt, ppos,
1587                                     s->buffer, trace_seq_used(s));
1588
1589         kfree(s);
1590
1591         return r;
1592 }
1593
1594 static ssize_t
1595 subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
1596                        loff_t *ppos)
1597 {
1598         struct trace_subsystem_dir *dir = filp->private_data;
1599         char *buf;
1600         int err;
1601
1602         if (cnt >= PAGE_SIZE)
1603                 return -EINVAL;
1604
1605         buf = memdup_user_nul(ubuf, cnt);
1606         if (IS_ERR(buf))
1607                 return PTR_ERR(buf);
1608
1609         err = apply_subsystem_event_filter(dir, buf);
1610         kfree(buf);
1611         if (err < 0)
1612                 return err;
1613
1614         *ppos += cnt;
1615
1616         return cnt;
1617 }
1618
1619 static ssize_t
1620 show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
1621 {
1622         int (*func)(struct trace_seq *s) = filp->private_data;
1623         struct trace_seq *s;
1624         int r;
1625
1626         if (*ppos)
1627                 return 0;
1628
1629         s = kmalloc(sizeof(*s), GFP_KERNEL);
1630         if (!s)
1631                 return -ENOMEM;
1632
1633         trace_seq_init(s);
1634
1635         func(s);
1636         r = simple_read_from_buffer(ubuf, cnt, ppos,
1637                                     s->buffer, trace_seq_used(s));
1638
1639         kfree(s);
1640
1641         return r;
1642 }
1643
1644 static void ignore_task_cpu(void *data)
1645 {
1646         struct trace_array *tr = data;
1647         struct trace_pid_list *pid_list;
1648
1649         /*
1650          * This function is called by on_each_cpu() while the
1651          * event_mutex is held.
1652          */
1653         pid_list = rcu_dereference_protected(tr->filtered_pids,
1654                                              mutex_is_locked(&event_mutex));
1655
1656         this_cpu_write(tr->trace_buffer.data->ignore_pid,
1657                        ignore_this_task(pid_list, current));
1658 }
1659
1660 static ssize_t
1661 ftrace_event_pid_write(struct file *filp, const char __user *ubuf,
1662                        size_t cnt, loff_t *ppos)
1663 {
1664         struct seq_file *m = filp->private_data;
1665         struct trace_array *tr = m->private;
1666         struct trace_pid_list *filtered_pids = NULL;
1667         struct trace_pid_list *pid_list;
1668         struct trace_event_file *file;
1669         struct trace_parser parser;
1670         unsigned long val;
1671         loff_t this_pos;
1672         ssize_t read = 0;
1673         ssize_t ret = 0;
1674         pid_t pid;
1675         int nr_pids = 0;
1676
1677         if (!cnt)
1678                 return 0;
1679
1680         ret = tracing_update_buffers();
1681         if (ret < 0)
1682                 return ret;
1683
1684         if (trace_parser_get_init(&parser, EVENT_BUF_SIZE + 1))
1685                 return -ENOMEM;
1686
1687         mutex_lock(&event_mutex);
1688         filtered_pids = rcu_dereference_protected(tr->filtered_pids,
1689                                              lockdep_is_held(&event_mutex));
1690
1691         /*
1692          * Always recreate a new array. The write is an all or nothing
1693          * operation. Always create a new array when adding new pids by
1694          * the user. If the operation fails, then the current list is
1695          * not modified.
1696          */
1697         pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL);
1698         if (!pid_list) {
1699                 read = -ENOMEM;
1700                 goto out;
1701         }
1702         pid_list->pid_max = READ_ONCE(pid_max);
1703         /* Only truncating will shrink pid_max */
1704         if (filtered_pids && filtered_pids->pid_max > pid_list->pid_max)
1705                 pid_list->pid_max = filtered_pids->pid_max;
1706         pid_list->pids = vzalloc((pid_list->pid_max + 7) >> 3);
1707         if (!pid_list->pids) {
1708                 kfree(pid_list);
1709                 read = -ENOMEM;
1710                 goto out;
1711         }
1712         if (filtered_pids) {
1713                 /* copy the current bits to the new max */
1714                 pid = find_first_bit(filtered_pids->pids,
1715                                      filtered_pids->pid_max);
1716                 while (pid < filtered_pids->pid_max) {
1717                         set_bit(pid, pid_list->pids);
1718                         pid = find_next_bit(filtered_pids->pids,
1719                                             filtered_pids->pid_max,
1720                                             pid + 1);
1721                         nr_pids++;
1722                 }
1723         }
1724
1725         while (cnt > 0) {
1726
1727                 this_pos = 0;
1728
1729                 ret = trace_get_user(&parser, ubuf, cnt, &this_pos);
1730                 if (ret < 0 || !trace_parser_loaded(&parser))
1731                         break;
1732
1733                 read += ret;
1734                 ubuf += ret;
1735                 cnt -= ret;
1736
1737                 parser.buffer[parser.idx] = 0;
1738
1739                 ret = -EINVAL;
1740                 if (kstrtoul(parser.buffer, 0, &val))
1741                         break;
1742                 if (val >= pid_list->pid_max)
1743                         break;
1744
1745                 pid = (pid_t)val;
1746
1747                 set_bit(pid, pid_list->pids);
1748                 nr_pids++;
1749
1750                 trace_parser_clear(&parser);
1751                 ret = 0;
1752         }
1753         trace_parser_put(&parser);
1754
1755         if (ret < 0) {
1756                 vfree(pid_list->pids);
1757                 kfree(pid_list);
1758                 read = ret;
1759                 goto out;
1760         }
1761
1762         if (!nr_pids) {
1763                 /* Cleared the list of pids */
1764                 vfree(pid_list->pids);
1765                 kfree(pid_list);
1766                 read = ret;
1767                 if (!filtered_pids)
1768                         goto out;
1769                 pid_list = NULL;
1770         }
1771         rcu_assign_pointer(tr->filtered_pids, pid_list);
1772
1773         list_for_each_entry(file, &tr->events, list) {
1774                 set_bit(EVENT_FILE_FL_PID_FILTER_BIT, &file->flags);
1775         }
1776
1777         if (filtered_pids) {
1778                 synchronize_sched();
1779
1780                 vfree(filtered_pids->pids);
1781                 kfree(filtered_pids);
1782         } else {
1783                 /*
1784                  * Register a probe that is called before all other probes
1785                  * to set ignore_pid if next or prev do not match.
1786                  * Register a probe this is called after all other probes
1787                  * to only keep ignore_pid set if next pid matches.
1788                  */
1789                 register_trace_prio_sched_switch(event_filter_pid_sched_switch_probe_pre,
1790                                                  tr, INT_MAX);
1791                 register_trace_prio_sched_switch(event_filter_pid_sched_switch_probe_post,
1792                                                  tr, 0);
1793
1794                 register_trace_prio_sched_wakeup(event_filter_pid_sched_wakeup_probe_pre,
1795                                                  tr, INT_MAX);
1796                 register_trace_prio_sched_wakeup(event_filter_pid_sched_wakeup_probe_post,
1797                                                  tr, 0);
1798
1799                 register_trace_prio_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_pre,
1800                                                      tr, INT_MAX);
1801                 register_trace_prio_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_post,
1802                                                      tr, 0);
1803
1804                 register_trace_prio_sched_waking(event_filter_pid_sched_wakeup_probe_pre,
1805                                                  tr, INT_MAX);
1806                 register_trace_prio_sched_waking(event_filter_pid_sched_wakeup_probe_post,
1807                                                  tr, 0);
1808         }
1809
1810         /*
1811          * Ignoring of pids is done at task switch. But we have to
1812          * check for those tasks that are currently running.
1813          * Always do this in case a pid was appended or removed.
1814          */
1815         on_each_cpu(ignore_task_cpu, tr, 1);
1816
1817  out:
1818         mutex_unlock(&event_mutex);
1819
1820         ret = read;
1821         if (read > 0)
1822                 *ppos += read;
1823
1824         return ret;
1825 }
1826
1827 static int ftrace_event_avail_open(struct inode *inode, struct file *file);
1828 static int ftrace_event_set_open(struct inode *inode, struct file *file);
1829 static int ftrace_event_set_pid_open(struct inode *inode, struct file *file);
1830 static int ftrace_event_release(struct inode *inode, struct file *file);
1831
1832 static const struct seq_operations show_event_seq_ops = {
1833         .start = t_start,
1834         .next = t_next,
1835         .show = t_show,
1836         .stop = t_stop,
1837 };
1838
1839 static const struct seq_operations show_set_event_seq_ops = {
1840         .start = s_start,
1841         .next = s_next,
1842         .show = t_show,
1843         .stop = t_stop,
1844 };
1845
1846 static const struct seq_operations show_set_pid_seq_ops = {
1847         .start = p_start,
1848         .next = p_next,
1849         .show = p_show,
1850         .stop = p_stop,
1851 };
1852
1853 static const struct file_operations ftrace_avail_fops = {
1854         .open = ftrace_event_avail_open,
1855         .read = seq_read,
1856         .llseek = seq_lseek,
1857         .release = seq_release,
1858 };
1859
1860 static const struct file_operations ftrace_set_event_fops = {
1861         .open = ftrace_event_set_open,
1862         .read = seq_read,
1863         .write = ftrace_event_write,
1864         .llseek = seq_lseek,
1865         .release = ftrace_event_release,
1866 };
1867
1868 static const struct file_operations ftrace_set_event_pid_fops = {
1869         .open = ftrace_event_set_pid_open,
1870         .read = seq_read,
1871         .write = ftrace_event_pid_write,
1872         .llseek = seq_lseek,
1873         .release = ftrace_event_release,
1874 };
1875
1876 static const struct file_operations ftrace_enable_fops = {
1877         .open = tracing_open_generic,
1878         .read = event_enable_read,
1879         .write = event_enable_write,
1880         .llseek = default_llseek,
1881 };
1882
1883 static const struct file_operations ftrace_event_format_fops = {
1884         .open = trace_format_open,
1885         .read = seq_read,
1886         .llseek = seq_lseek,
1887         .release = seq_release,
1888 };
1889
1890 static const struct file_operations ftrace_event_id_fops = {
1891         .read = event_id_read,
1892         .llseek = default_llseek,
1893 };
1894
1895 static const struct file_operations ftrace_event_filter_fops = {
1896         .open = tracing_open_generic,
1897         .read = event_filter_read,
1898         .write = event_filter_write,
1899         .llseek = default_llseek,
1900 };
1901
1902 static const struct file_operations ftrace_subsystem_filter_fops = {
1903         .open = subsystem_open,
1904         .read = subsystem_filter_read,
1905         .write = subsystem_filter_write,
1906         .llseek = default_llseek,
1907         .release = subsystem_release,
1908 };
1909
1910 static const struct file_operations ftrace_system_enable_fops = {
1911         .open = subsystem_open,
1912         .read = system_enable_read,
1913         .write = system_enable_write,
1914         .llseek = default_llseek,
1915         .release = subsystem_release,
1916 };
1917
1918 static const struct file_operations ftrace_tr_enable_fops = {
1919         .open = system_tr_open,
1920         .read = system_enable_read,
1921         .write = system_enable_write,
1922         .llseek = default_llseek,
1923         .release = subsystem_release,
1924 };
1925
1926 static const struct file_operations ftrace_show_header_fops = {
1927         .open = tracing_open_generic,
1928         .read = show_header,
1929         .llseek = default_llseek,
1930 };
1931
1932 static int
1933 ftrace_event_open(struct inode *inode, struct file *file,
1934                   const struct seq_operations *seq_ops)
1935 {
1936         struct seq_file *m;
1937         int ret;
1938
1939         ret = seq_open(file, seq_ops);
1940         if (ret < 0)
1941                 return ret;
1942         m = file->private_data;
1943         /* copy tr over to seq ops */
1944         m->private = inode->i_private;
1945
1946         return ret;
1947 }
1948
1949 static int ftrace_event_release(struct inode *inode, struct file *file)
1950 {
1951         struct trace_array *tr = inode->i_private;
1952
1953         trace_array_put(tr);
1954
1955         return seq_release(inode, file);
1956 }
1957
1958 static int
1959 ftrace_event_avail_open(struct inode *inode, struct file *file)
1960 {
1961         const struct seq_operations *seq_ops = &show_event_seq_ops;
1962
1963         return ftrace_event_open(inode, file, seq_ops);
1964 }
1965
1966 static int
1967 ftrace_event_set_open(struct inode *inode, struct file *file)
1968 {
1969         const struct seq_operations *seq_ops = &show_set_event_seq_ops;
1970         struct trace_array *tr = inode->i_private;
1971         int ret;
1972
1973         if (trace_array_get(tr) < 0)
1974                 return -ENODEV;
1975
1976         if ((file->f_mode & FMODE_WRITE) &&
1977             (file->f_flags & O_TRUNC))
1978                 ftrace_clear_events(tr);
1979
1980         ret = ftrace_event_open(inode, file, seq_ops);
1981         if (ret < 0)
1982                 trace_array_put(tr);
1983         return ret;
1984 }
1985
1986 static int
1987 ftrace_event_set_pid_open(struct inode *inode, struct file *file)
1988 {
1989         const struct seq_operations *seq_ops = &show_set_pid_seq_ops;
1990         struct trace_array *tr = inode->i_private;
1991         int ret;
1992
1993         if (trace_array_get(tr) < 0)
1994                 return -ENODEV;
1995
1996         if ((file->f_mode & FMODE_WRITE) &&
1997             (file->f_flags & O_TRUNC))
1998                 ftrace_clear_event_pids(tr);
1999
2000         ret = ftrace_event_open(inode, file, seq_ops);
2001         if (ret < 0)
2002                 trace_array_put(tr);
2003         return ret;
2004 }
2005
2006 static struct event_subsystem *
2007 create_new_subsystem(const char *name)
2008 {
2009         struct event_subsystem *system;
2010
2011         /* need to create new entry */
2012         system = kmalloc(sizeof(*system), GFP_KERNEL);
2013         if (!system)
2014                 return NULL;
2015
2016         system->ref_count = 1;
2017
2018         /* Only allocate if dynamic (kprobes and modules) */
2019         system->name = kstrdup_const(name, GFP_KERNEL);
2020         if (!system->name)
2021                 goto out_free;
2022
2023         system->filter = NULL;
2024
2025         system->filter = kzalloc(sizeof(struct event_filter), GFP_KERNEL);
2026         if (!system->filter)
2027                 goto out_free;
2028
2029         list_add(&system->list, &event_subsystems);
2030
2031         return system;
2032
2033  out_free:
2034         kfree_const(system->name);
2035         kfree(system);
2036         return NULL;
2037 }
2038
2039 static struct dentry *
2040 event_subsystem_dir(struct trace_array *tr, const char *name,
2041                     struct trace_event_file *file, struct dentry *parent)
2042 {
2043         struct trace_subsystem_dir *dir;
2044         struct event_subsystem *system;
2045         struct dentry *entry;
2046
2047         /* First see if we did not already create this dir */
2048         list_for_each_entry(dir, &tr->systems, list) {
2049                 system = dir->subsystem;
2050                 if (strcmp(system->name, name) == 0) {
2051                         dir->nr_events++;
2052                         file->system = dir;
2053                         return dir->entry;
2054                 }
2055         }
2056
2057         /* Now see if the system itself exists. */
2058         list_for_each_entry(system, &event_subsystems, list) {
2059                 if (strcmp(system->name, name) == 0)
2060                         break;
2061         }
2062         /* Reset system variable when not found */
2063         if (&system->list == &event_subsystems)
2064                 system = NULL;
2065
2066         dir = kmalloc(sizeof(*dir), GFP_KERNEL);
2067         if (!dir)
2068                 goto out_fail;
2069
2070         if (!system) {
2071                 system = create_new_subsystem(name);
2072                 if (!system)
2073                         goto out_free;
2074         } else
2075                 __get_system(system);
2076
2077         dir->entry = tracefs_create_dir(name, parent);
2078         if (!dir->entry) {
2079                 pr_warn("Failed to create system directory %s\n", name);
2080                 __put_system(system);
2081                 goto out_free;
2082         }
2083
2084         dir->tr = tr;
2085         dir->ref_count = 1;
2086         dir->nr_events = 1;
2087         dir->subsystem = system;
2088         file->system = dir;
2089
2090         entry = tracefs_create_file("filter", 0644, dir->entry, dir,
2091                                     &ftrace_subsystem_filter_fops);
2092         if (!entry) {
2093                 kfree(system->filter);
2094                 system->filter = NULL;
2095                 pr_warn("Could not create tracefs '%s/filter' entry\n", name);
2096         }
2097
2098         trace_create_file("enable", 0644, dir->entry, dir,
2099                           &ftrace_system_enable_fops);
2100
2101         list_add(&dir->list, &tr->systems);
2102
2103         return dir->entry;
2104
2105  out_free:
2106         kfree(dir);
2107  out_fail:
2108         /* Only print this message if failed on memory allocation */
2109         if (!dir || !system)
2110                 pr_warn("No memory to create event subsystem %s\n", name);
2111         return NULL;
2112 }
2113
2114 static int
2115 event_create_dir(struct dentry *parent, struct trace_event_file *file)
2116 {
2117         struct trace_event_call *call = file->event_call;
2118         struct trace_array *tr = file->tr;
2119         struct list_head *head;
2120         struct dentry *d_events;
2121         const char *name;
2122         int ret;
2123
2124         /*
2125          * If the trace point header did not define TRACE_SYSTEM
2126          * then the system would be called "TRACE_SYSTEM".
2127          */
2128         if (strcmp(call->class->system, TRACE_SYSTEM) != 0) {
2129                 d_events = event_subsystem_dir(tr, call->class->system, file, parent);
2130                 if (!d_events)
2131                         return -ENOMEM;
2132         } else
2133                 d_events = parent;
2134
2135         name = trace_event_name(call);
2136         file->dir = tracefs_create_dir(name, d_events);
2137         if (!file->dir) {
2138                 pr_warn("Could not create tracefs '%s' directory\n", name);
2139                 return -1;
2140         }
2141
2142         if (call->class->reg && !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
2143                 trace_create_file("enable", 0644, file->dir, file,
2144                                   &ftrace_enable_fops);
2145
2146 #ifdef CONFIG_PERF_EVENTS
2147         if (call->event.type && call->class->reg)
2148                 trace_create_file("id", 0444, file->dir,
2149                                   (void *)(long)call->event.type,
2150                                   &ftrace_event_id_fops);
2151 #endif
2152
2153         /*
2154          * Other events may have the same class. Only update
2155          * the fields if they are not already defined.
2156          */
2157         head = trace_get_fields(call);
2158         if (list_empty(head)) {
2159                 ret = call->class->define_fields(call);
2160                 if (ret < 0) {
2161                         pr_warn("Could not initialize trace point events/%s\n",
2162                                 name);
2163                         return -1;
2164                 }
2165         }
2166         trace_create_file("filter", 0644, file->dir, file,
2167                           &ftrace_event_filter_fops);
2168
2169         /*
2170          * Only event directories that can be enabled should have
2171          * triggers.
2172          */
2173         if (!(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
2174                 trace_create_file("trigger", 0644, file->dir, file,
2175                                   &event_trigger_fops);
2176
2177 #ifdef CONFIG_HIST_TRIGGERS
2178         trace_create_file("hist", 0444, file->dir, file,
2179                           &event_hist_fops);
2180 #endif
2181         trace_create_file("format", 0444, file->dir, call,
2182                           &ftrace_event_format_fops);
2183
2184         return 0;
2185 }
2186
2187 static void remove_event_from_tracers(struct trace_event_call *call)
2188 {
2189         struct trace_event_file *file;
2190         struct trace_array *tr;
2191
2192         do_for_each_event_file_safe(tr, file) {
2193                 if (file->event_call != call)
2194                         continue;
2195
2196                 remove_event_file_dir(file);
2197                 /*
2198                  * The do_for_each_event_file_safe() is
2199                  * a double loop. After finding the call for this
2200                  * trace_array, we use break to jump to the next
2201                  * trace_array.
2202                  */
2203                 break;
2204         } while_for_each_event_file();
2205 }
2206
2207 static void event_remove(struct trace_event_call *call)
2208 {
2209         struct trace_array *tr;
2210         struct trace_event_file *file;
2211
2212         do_for_each_event_file(tr, file) {
2213                 if (file->event_call != call)
2214                         continue;
2215                 ftrace_event_enable_disable(file, 0);
2216                 /*
2217                  * The do_for_each_event_file() is
2218                  * a double loop. After finding the call for this
2219                  * trace_array, we use break to jump to the next
2220                  * trace_array.
2221                  */
2222                 break;
2223         } while_for_each_event_file();
2224
2225         if (call->event.funcs)
2226                 __unregister_trace_event(&call->event);
2227         remove_event_from_tracers(call);
2228         list_del(&call->list);
2229 }
2230
2231 static int event_init(struct trace_event_call *call)
2232 {
2233         int ret = 0;
2234         const char *name;
2235
2236         name = trace_event_name(call);
2237         if (WARN_ON(!name))
2238                 return -EINVAL;
2239
2240         if (call->class->raw_init) {
2241                 ret = call->class->raw_init(call);
2242                 if (ret < 0 && ret != -ENOSYS)
2243                         pr_warn("Could not initialize trace events/%s\n", name);
2244         }
2245
2246         return ret;
2247 }
2248
2249 static int
2250 __register_event(struct trace_event_call *call, struct module *mod)
2251 {
2252         int ret;
2253
2254         ret = event_init(call);
2255         if (ret < 0)
2256                 return ret;
2257
2258         list_add(&call->list, &ftrace_events);
2259         call->mod = mod;
2260
2261         return 0;
2262 }
2263
2264 static char *enum_replace(char *ptr, struct trace_enum_map *map, int len)
2265 {
2266         int rlen;
2267         int elen;
2268
2269         /* Find the length of the enum value as a string */
2270         elen = snprintf(ptr, 0, "%ld", map->enum_value);
2271         /* Make sure there's enough room to replace the string with the value */
2272         if (len < elen)
2273                 return NULL;
2274
2275         snprintf(ptr, elen + 1, "%ld", map->enum_value);
2276
2277         /* Get the rest of the string of ptr */
2278         rlen = strlen(ptr + len);
2279         memmove(ptr + elen, ptr + len, rlen);
2280         /* Make sure we end the new string */
2281         ptr[elen + rlen] = 0;
2282
2283         return ptr + elen;
2284 }
2285
2286 static void update_event_printk(struct trace_event_call *call,
2287                                 struct trace_enum_map *map)
2288 {
2289         char *ptr;
2290         int quote = 0;
2291         int len = strlen(map->enum_string);
2292
2293         for (ptr = call->print_fmt; *ptr; ptr++) {
2294                 if (*ptr == '\\') {
2295                         ptr++;
2296                         /* paranoid */
2297                         if (!*ptr)
2298                                 break;
2299                         continue;
2300                 }
2301                 if (*ptr == '"') {
2302                         quote ^= 1;
2303                         continue;
2304                 }
2305                 if (quote)
2306                         continue;
2307                 if (isdigit(*ptr)) {
2308                         /* skip numbers */
2309                         do {
2310                                 ptr++;
2311                                 /* Check for alpha chars like ULL */
2312                         } while (isalnum(*ptr));
2313                         if (!*ptr)
2314                                 break;
2315                         /*
2316                          * A number must have some kind of delimiter after
2317                          * it, and we can ignore that too.
2318                          */
2319                         continue;
2320                 }
2321                 if (isalpha(*ptr) || *ptr == '_') {
2322                         if (strncmp(map->enum_string, ptr, len) == 0 &&
2323                             !isalnum(ptr[len]) && ptr[len] != '_') {
2324                                 ptr = enum_replace(ptr, map, len);
2325                                 /* Hmm, enum string smaller than value */
2326                                 if (WARN_ON_ONCE(!ptr))
2327                                         return;
2328                                 /*
2329                                  * No need to decrement here, as enum_replace()
2330                                  * returns the pointer to the character passed
2331                                  * the enum, and two enums can not be placed
2332                                  * back to back without something in between.
2333                                  * We can skip that something in between.
2334                                  */
2335                                 continue;
2336                         }
2337                 skip_more:
2338                         do {
2339                                 ptr++;
2340                         } while (isalnum(*ptr) || *ptr == '_');
2341                         if (!*ptr)
2342                                 break;
2343                         /*
2344                          * If what comes after this variable is a '.' or
2345                          * '->' then we can continue to ignore that string.
2346                          */
2347                         if (*ptr == '.' || (ptr[0] == '-' && ptr[1] == '>')) {
2348                                 ptr += *ptr == '.' ? 1 : 2;
2349                                 if (!*ptr)
2350                                         break;
2351                                 goto skip_more;
2352                         }
2353                         /*
2354                          * Once again, we can skip the delimiter that came
2355                          * after the string.
2356                          */
2357                         continue;
2358                 }
2359         }
2360 }
2361
2362 void trace_event_enum_update(struct trace_enum_map **map, int len)
2363 {
2364         struct trace_event_call *call, *p;
2365         const char *last_system = NULL;
2366         int last_i;
2367         int i;
2368
2369         down_write(&trace_event_sem);
2370         list_for_each_entry_safe(call, p, &ftrace_events, list) {
2371                 /* events are usually grouped together with systems */
2372                 if (!last_system || call->class->system != last_system) {
2373                         last_i = 0;
2374                         last_system = call->class->system;
2375                 }
2376
2377                 for (i = last_i; i < len; i++) {
2378                         if (call->class->system == map[i]->system) {
2379                                 /* Save the first system if need be */
2380                                 if (!last_i)
2381                                         last_i = i;
2382                                 update_event_printk(call, map[i]);
2383                         }
2384                 }
2385         }
2386         up_write(&trace_event_sem);
2387 }
2388
2389 static struct trace_event_file *
2390 trace_create_new_event(struct trace_event_call *call,
2391                        struct trace_array *tr)
2392 {
2393         struct trace_event_file *file;
2394
2395         file = kmem_cache_alloc(file_cachep, GFP_TRACE);
2396         if (!file)
2397                 return NULL;
2398
2399         file->event_call = call;
2400         file->tr = tr;
2401         atomic_set(&file->sm_ref, 0);
2402         atomic_set(&file->tm_ref, 0);
2403         INIT_LIST_HEAD(&file->triggers);
2404         list_add(&file->list, &tr->events);
2405
2406         return file;
2407 }
2408
2409 /* Add an event to a trace directory */
2410 static int
2411 __trace_add_new_event(struct trace_event_call *call, struct trace_array *tr)
2412 {
2413         struct trace_event_file *file;
2414
2415         file = trace_create_new_event(call, tr);
2416         if (!file)
2417                 return -ENOMEM;
2418
2419         return event_create_dir(tr->event_dir, file);
2420 }
2421
2422 /*
2423  * Just create a decriptor for early init. A descriptor is required
2424  * for enabling events at boot. We want to enable events before
2425  * the filesystem is initialized.
2426  */
2427 static __init int
2428 __trace_early_add_new_event(struct trace_event_call *call,
2429                             struct trace_array *tr)
2430 {
2431         struct trace_event_file *file;
2432
2433         file = trace_create_new_event(call, tr);
2434         if (!file)
2435                 return -ENOMEM;
2436
2437         return 0;
2438 }
2439
2440 struct ftrace_module_file_ops;
2441 static void __add_event_to_tracers(struct trace_event_call *call);
2442
2443 /* Add an additional event_call dynamically */
2444 int trace_add_event_call(struct trace_event_call *call)
2445 {
2446         int ret;
2447         mutex_lock(&trace_types_lock);
2448         mutex_lock(&event_mutex);
2449
2450         ret = __register_event(call, NULL);
2451         if (ret >= 0)
2452                 __add_event_to_tracers(call);
2453
2454         mutex_unlock(&event_mutex);
2455         mutex_unlock(&trace_types_lock);
2456         return ret;
2457 }
2458
2459 /*
2460  * Must be called under locking of trace_types_lock, event_mutex and
2461  * trace_event_sem.
2462  */
2463 static void __trace_remove_event_call(struct trace_event_call *call)
2464 {
2465         event_remove(call);
2466         trace_destroy_fields(call);
2467         free_event_filter(call->filter);
2468         call->filter = NULL;
2469 }
2470
2471 static int probe_remove_event_call(struct trace_event_call *call)
2472 {
2473         struct trace_array *tr;
2474         struct trace_event_file *file;
2475
2476 #ifdef CONFIG_PERF_EVENTS
2477         if (call->perf_refcount)
2478                 return -EBUSY;
2479 #endif
2480         do_for_each_event_file(tr, file) {
2481                 if (file->event_call != call)
2482                         continue;
2483                 /*
2484                  * We can't rely on ftrace_event_enable_disable(enable => 0)
2485                  * we are going to do, EVENT_FILE_FL_SOFT_MODE can suppress
2486                  * TRACE_REG_UNREGISTER.
2487                  */
2488                 if (file->flags & EVENT_FILE_FL_ENABLED)
2489                         return -EBUSY;
2490                 /*
2491                  * The do_for_each_event_file_safe() is
2492                  * a double loop. After finding the call for this
2493                  * trace_array, we use break to jump to the next
2494                  * trace_array.
2495                  */
2496                 break;
2497         } while_for_each_event_file();
2498
2499         __trace_remove_event_call(call);
2500
2501         return 0;
2502 }
2503
2504 /* Remove an event_call */
2505 int trace_remove_event_call(struct trace_event_call *call)
2506 {
2507         int ret;
2508
2509         mutex_lock(&trace_types_lock);
2510         mutex_lock(&event_mutex);
2511         down_write(&trace_event_sem);
2512         ret = probe_remove_event_call(call);
2513         up_write(&trace_event_sem);
2514         mutex_unlock(&event_mutex);
2515         mutex_unlock(&trace_types_lock);
2516
2517         return ret;
2518 }
2519
2520 #define for_each_event(event, start, end)                       \
2521         for (event = start;                                     \
2522              (unsigned long)event < (unsigned long)end;         \
2523              event++)
2524
2525 #ifdef CONFIG_MODULES
2526
2527 static void trace_module_add_events(struct module *mod)
2528 {
2529         struct trace_event_call **call, **start, **end;
2530
2531         if (!mod->num_trace_events)
2532                 return;
2533
2534         /* Don't add infrastructure for mods without tracepoints */
2535         if (trace_module_has_bad_taint(mod)) {
2536                 pr_err("%s: module has bad taint, not creating trace events\n",
2537                        mod->name);
2538                 return;
2539         }
2540
2541         start = mod->trace_events;
2542         end = mod->trace_events + mod->num_trace_events;
2543
2544         for_each_event(call, start, end) {
2545                 __register_event(*call, mod);
2546                 __add_event_to_tracers(*call);
2547         }
2548 }
2549
2550 static void trace_module_remove_events(struct module *mod)
2551 {
2552         struct trace_event_call *call, *p;
2553         bool clear_trace = false;
2554
2555         down_write(&trace_event_sem);
2556         list_for_each_entry_safe(call, p, &ftrace_events, list) {
2557                 if (call->mod == mod) {
2558                         if (call->flags & TRACE_EVENT_FL_WAS_ENABLED)
2559                                 clear_trace = true;
2560                         __trace_remove_event_call(call);
2561                 }
2562         }
2563         up_write(&trace_event_sem);
2564
2565         /*
2566          * It is safest to reset the ring buffer if the module being unloaded
2567          * registered any events that were used. The only worry is if
2568          * a new module gets loaded, and takes on the same id as the events
2569          * of this module. When printing out the buffer, traced events left
2570          * over from this module may be passed to the new module events and
2571          * unexpected results may occur.
2572          */
2573         if (clear_trace)
2574                 tracing_reset_all_online_cpus();
2575 }
2576
2577 static int trace_module_notify(struct notifier_block *self,
2578                                unsigned long val, void *data)
2579 {
2580         struct module *mod = data;
2581
2582         mutex_lock(&trace_types_lock);
2583         mutex_lock(&event_mutex);
2584         switch (val) {
2585         case MODULE_STATE_COMING:
2586                 trace_module_add_events(mod);
2587                 break;
2588         case MODULE_STATE_GOING:
2589                 trace_module_remove_events(mod);
2590                 break;
2591         }
2592         mutex_unlock(&event_mutex);
2593         mutex_unlock(&trace_types_lock);
2594
2595         return 0;
2596 }
2597
2598 static struct notifier_block trace_module_nb = {
2599         .notifier_call = trace_module_notify,
2600         .priority = 1, /* higher than trace.c module notify */
2601 };
2602 #endif /* CONFIG_MODULES */
2603
2604 /* Create a new event directory structure for a trace directory. */
2605 static void
2606 __trace_add_event_dirs(struct trace_array *tr)
2607 {
2608         struct trace_event_call *call;
2609         int ret;
2610
2611         list_for_each_entry(call, &ftrace_events, list) {
2612                 ret = __trace_add_new_event(call, tr);
2613                 if (ret < 0)
2614                         pr_warn("Could not create directory for event %s\n",
2615                                 trace_event_name(call));
2616         }
2617 }
2618
2619 struct trace_event_file *
2620 find_event_file(struct trace_array *tr, const char *system,  const char *event)
2621 {
2622         struct trace_event_file *file;
2623         struct trace_event_call *call;
2624         const char *name;
2625
2626         list_for_each_entry(file, &tr->events, list) {
2627
2628                 call = file->event_call;
2629                 name = trace_event_name(call);
2630
2631                 if (!name || !call->class || !call->class->reg)
2632                         continue;
2633
2634                 if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)
2635                         continue;
2636
2637                 if (strcmp(event, name) == 0 &&
2638                     strcmp(system, call->class->system) == 0)
2639                         return file;
2640         }
2641         return NULL;
2642 }
2643
2644 #ifdef CONFIG_DYNAMIC_FTRACE
2645
2646 /* Avoid typos */
2647 #define ENABLE_EVENT_STR        "enable_event"
2648 #define DISABLE_EVENT_STR       "disable_event"
2649
2650 struct event_probe_data {
2651         struct trace_event_file *file;
2652         unsigned long                   count;
2653         int                             ref;
2654         bool                            enable;
2655 };
2656
2657 static void
2658 event_enable_probe(unsigned long ip, unsigned long parent_ip, void **_data)
2659 {
2660         struct event_probe_data **pdata = (struct event_probe_data **)_data;
2661         struct event_probe_data *data = *pdata;
2662
2663         if (!data)
2664                 return;
2665
2666         if (data->enable)
2667                 clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &data->file->flags);
2668         else
2669                 set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &data->file->flags);
2670 }
2671
2672 static void
2673 event_enable_count_probe(unsigned long ip, unsigned long parent_ip, void **_data)
2674 {
2675         struct event_probe_data **pdata = (struct event_probe_data **)_data;
2676         struct event_probe_data *data = *pdata;
2677
2678         if (!data)
2679                 return;
2680
2681         if (!data->count)
2682                 return;
2683
2684         /* Skip if the event is in a state we want to switch to */
2685         if (data->enable == !(data->file->flags & EVENT_FILE_FL_SOFT_DISABLED))
2686                 return;
2687
2688         if (data->count != -1)
2689                 (data->count)--;
2690
2691         event_enable_probe(ip, parent_ip, _data);
2692 }
2693
2694 static int
2695 event_enable_print(struct seq_file *m, unsigned long ip,
2696                       struct ftrace_probe_ops *ops, void *_data)
2697 {
2698         struct event_probe_data *data = _data;
2699
2700         seq_printf(m, "%ps:", (void *)ip);
2701
2702         seq_printf(m, "%s:%s:%s",
2703                    data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR,
2704                    data->file->event_call->class->system,
2705                    trace_event_name(data->file->event_call));
2706
2707         if (data->count == -1)
2708                 seq_puts(m, ":unlimited\n");
2709         else
2710                 seq_printf(m, ":count=%ld\n", data->count);
2711
2712         return 0;
2713 }
2714
2715 static int
2716 event_enable_init(struct ftrace_probe_ops *ops, unsigned long ip,
2717                   void **_data)
2718 {
2719         struct event_probe_data **pdata = (struct event_probe_data **)_data;
2720         struct event_probe_data *data = *pdata;
2721
2722         data->ref++;
2723         return 0;
2724 }
2725
2726 static void
2727 event_enable_free(struct ftrace_probe_ops *ops, unsigned long ip,
2728                   void **_data)
2729 {
2730         struct event_probe_data **pdata = (struct event_probe_data **)_data;
2731         struct event_probe_data *data = *pdata;
2732
2733         if (WARN_ON_ONCE(data->ref <= 0))
2734                 return;
2735
2736         data->ref--;
2737         if (!data->ref) {
2738                 /* Remove the SOFT_MODE flag */
2739                 __ftrace_event_enable_disable(data->file, 0, 1);
2740                 module_put(data->file->event_call->mod);
2741                 kfree(data);
2742         }
2743         *pdata = NULL;
2744 }
2745
2746 static struct ftrace_probe_ops event_enable_probe_ops = {
2747         .func                   = event_enable_probe,
2748         .print                  = event_enable_print,
2749         .init                   = event_enable_init,
2750         .free                   = event_enable_free,
2751 };
2752
2753 static struct ftrace_probe_ops event_enable_count_probe_ops = {
2754         .func                   = event_enable_count_probe,
2755         .print                  = event_enable_print,
2756         .init                   = event_enable_init,
2757         .free                   = event_enable_free,
2758 };
2759
2760 static struct ftrace_probe_ops event_disable_probe_ops = {
2761         .func                   = event_enable_probe,
2762         .print                  = event_enable_print,
2763         .init                   = event_enable_init,
2764         .free                   = event_enable_free,
2765 };
2766
2767 static struct ftrace_probe_ops event_disable_count_probe_ops = {
2768         .func                   = event_enable_count_probe,
2769         .print                  = event_enable_print,
2770         .init                   = event_enable_init,
2771         .free                   = event_enable_free,
2772 };
2773
2774 static int
2775 event_enable_func(struct ftrace_hash *hash,
2776                   char *glob, char *cmd, char *param, int enabled)
2777 {
2778         struct trace_array *tr = top_trace_array();
2779         struct trace_event_file *file;
2780         struct ftrace_probe_ops *ops;
2781         struct event_probe_data *data;
2782         const char *system;
2783         const char *event;
2784         char *number;
2785         bool enable;
2786         int ret;
2787
2788         if (!tr)
2789                 return -ENODEV;
2790
2791         /* hash funcs only work with set_ftrace_filter */
2792         if (!enabled || !param)
2793                 return -EINVAL;
2794
2795         system = strsep(&param, ":");
2796         if (!param)
2797                 return -EINVAL;
2798
2799         event = strsep(&param, ":");
2800
2801         mutex_lock(&event_mutex);
2802
2803         ret = -EINVAL;
2804         file = find_event_file(tr, system, event);
2805         if (!file)
2806                 goto out;
2807
2808         enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
2809
2810         if (enable)
2811                 ops = param ? &event_enable_count_probe_ops : &event_enable_probe_ops;
2812         else
2813                 ops = param ? &event_disable_count_probe_ops : &event_disable_probe_ops;
2814
2815         if (glob[0] == '!') {
2816                 unregister_ftrace_function_probe_func(glob+1, ops);
2817                 ret = 0;
2818                 goto out;
2819         }
2820
2821         ret = -ENOMEM;
2822         data = kzalloc(sizeof(*data), GFP_KERNEL);
2823         if (!data)
2824                 goto out;
2825
2826         data->enable = enable;
2827         data->count = -1;
2828         data->file = file;
2829
2830         if (!param)
2831                 goto out_reg;
2832
2833         number = strsep(&param, ":");
2834
2835         ret = -EINVAL;
2836         if (!strlen(number))
2837                 goto out_free;
2838
2839         /*
2840          * We use the callback data field (which is a pointer)
2841          * as our counter.
2842          */
2843         ret = kstrtoul(number, 0, &data->count);
2844         if (ret)
2845                 goto out_free;
2846
2847  out_reg:
2848         /* Don't let event modules unload while probe registered */
2849         ret = try_module_get(file->event_call->mod);
2850         if (!ret) {
2851                 ret = -EBUSY;
2852                 goto out_free;
2853         }
2854
2855         ret = __ftrace_event_enable_disable(file, 1, 1);
2856         if (ret < 0)
2857                 goto out_put;
2858         ret = register_ftrace_function_probe(glob, ops, data);
2859         /*
2860          * The above returns on success the # of functions enabled,
2861          * but if it didn't find any functions it returns zero.
2862          * Consider no functions a failure too.
2863          */
2864         if (!ret) {
2865                 ret = -ENOENT;
2866                 goto out_disable;
2867         } else if (ret < 0)
2868                 goto out_disable;
2869         /* Just return zero, not the number of enabled functions */
2870         ret = 0;
2871  out:
2872         mutex_unlock(&event_mutex);
2873         return ret;
2874
2875  out_disable:
2876         __ftrace_event_enable_disable(file, 0, 1);
2877  out_put:
2878         module_put(file->event_call->mod);
2879  out_free:
2880         kfree(data);
2881         goto out;
2882 }
2883
2884 static struct ftrace_func_command event_enable_cmd = {
2885         .name                   = ENABLE_EVENT_STR,
2886         .func                   = event_enable_func,
2887 };
2888
2889 static struct ftrace_func_command event_disable_cmd = {
2890         .name                   = DISABLE_EVENT_STR,
2891         .func                   = event_enable_func,
2892 };
2893
2894 static __init int register_event_cmds(void)
2895 {
2896         int ret;
2897
2898         ret = register_ftrace_command(&event_enable_cmd);
2899         if (WARN_ON(ret < 0))
2900                 return ret;
2901         ret = register_ftrace_command(&event_disable_cmd);
2902         if (WARN_ON(ret < 0))
2903                 unregister_ftrace_command(&event_enable_cmd);
2904         return ret;
2905 }
2906 #else
2907 static inline int register_event_cmds(void) { return 0; }
2908 #endif /* CONFIG_DYNAMIC_FTRACE */
2909
2910 /*
2911  * The top level array has already had its trace_event_file
2912  * descriptors created in order to allow for early events to
2913  * be recorded. This function is called after the tracefs has been
2914  * initialized, and we now have to create the files associated
2915  * to the events.
2916  */
2917 static __init void
2918 __trace_early_add_event_dirs(struct trace_array *tr)
2919 {
2920         struct trace_event_file *file;
2921         int ret;
2922
2923
2924         list_for_each_entry(file, &tr->events, list) {
2925                 ret = event_create_dir(tr->event_dir, file);
2926                 if (ret < 0)
2927                         pr_warn("Could not create directory for event %s\n",
2928                                 trace_event_name(file->event_call));
2929         }
2930 }
2931
2932 /*
2933  * For early boot up, the top trace array requires to have
2934  * a list of events that can be enabled. This must be done before
2935  * the filesystem is set up in order to allow events to be traced
2936  * early.
2937  */
2938 static __init void
2939 __trace_early_add_events(struct trace_array *tr)
2940 {
2941         struct trace_event_call *call;
2942         int ret;
2943
2944         list_for_each_entry(call, &ftrace_events, list) {
2945                 /* Early boot up should not have any modules loaded */
2946                 if (WARN_ON_ONCE(call->mod))
2947                         continue;
2948
2949                 ret = __trace_early_add_new_event(call, tr);
2950                 if (ret < 0)
2951                         pr_warn("Could not create early event %s\n",
2952                                 trace_event_name(call));
2953         }
2954 }
2955
2956 /* Remove the event directory structure for a trace directory. */
2957 static void
2958 __trace_remove_event_dirs(struct trace_array *tr)
2959 {
2960         struct trace_event_file *file, *next;
2961
2962         list_for_each_entry_safe(file, next, &tr->events, list)
2963                 remove_event_file_dir(file);
2964 }
2965
2966 static void __add_event_to_tracers(struct trace_event_call *call)
2967 {
2968         struct trace_array *tr;
2969
2970         list_for_each_entry(tr, &ftrace_trace_arrays, list)
2971                 __trace_add_new_event(call, tr);
2972 }
2973
2974 extern struct trace_event_call *__start_ftrace_events[];
2975 extern struct trace_event_call *__stop_ftrace_events[];
2976
2977 static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata;
2978
2979 static __init int setup_trace_event(char *str)
2980 {
2981         strlcpy(bootup_event_buf, str, COMMAND_LINE_SIZE);
2982         ring_buffer_expanded = true;
2983         tracing_selftest_disabled = true;
2984
2985         return 1;
2986 }
2987 __setup("trace_event=", setup_trace_event);
2988
2989 /* Expects to have event_mutex held when called */
2990 static int
2991 create_event_toplevel_files(struct dentry *parent, struct trace_array *tr)
2992 {
2993         struct dentry *d_events;
2994         struct dentry *entry;
2995
2996         entry = tracefs_create_file("set_event", 0644, parent,
2997                                     tr, &ftrace_set_event_fops);
2998         if (!entry) {
2999                 pr_warn("Could not create tracefs 'set_event' entry\n");
3000                 return -ENOMEM;
3001         }
3002
3003         d_events = tracefs_create_dir("events", parent);
3004         if (!d_events) {
3005                 pr_warn("Could not create tracefs 'events' directory\n");
3006                 return -ENOMEM;
3007         }
3008
3009         entry = tracefs_create_file("set_event_pid", 0644, parent,
3010                                     tr, &ftrace_set_event_pid_fops);
3011
3012         /* ring buffer internal formats */
3013         trace_create_file("header_page", 0444, d_events,
3014                           ring_buffer_print_page_header,
3015                           &ftrace_show_header_fops);
3016
3017         trace_create_file("header_event", 0444, d_events,
3018                           ring_buffer_print_entry_header,
3019                           &ftrace_show_header_fops);
3020
3021         trace_create_file("enable", 0644, d_events,
3022                           tr, &ftrace_tr_enable_fops);
3023
3024         tr->event_dir = d_events;
3025
3026         return 0;
3027 }
3028
3029 /**
3030  * event_trace_add_tracer - add a instance of a trace_array to events
3031  * @parent: The parent dentry to place the files/directories for events in
3032  * @tr: The trace array associated with these events
3033  *
3034  * When a new instance is created, it needs to set up its events
3035  * directory, as well as other files associated with events. It also
3036  * creates the event hierachry in the @parent/events directory.
3037  *
3038  * Returns 0 on success.
3039  */
3040 int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr)
3041 {
3042         int ret;
3043
3044         mutex_lock(&event_mutex);
3045
3046         ret = create_event_toplevel_files(parent, tr);
3047         if (ret)
3048                 goto out_unlock;
3049
3050         down_write(&trace_event_sem);
3051         __trace_add_event_dirs(tr);
3052         up_write(&trace_event_sem);
3053
3054  out_unlock:
3055         mutex_unlock(&event_mutex);
3056
3057         return ret;
3058 }
3059
3060 /*
3061  * The top trace array already had its file descriptors created.
3062  * Now the files themselves need to be created.
3063  */
3064 static __init int
3065 early_event_add_tracer(struct dentry *parent, struct trace_array *tr)
3066 {
3067         int ret;
3068
3069         mutex_lock(&event_mutex);
3070
3071         ret = create_event_toplevel_files(parent, tr);
3072         if (ret)
3073                 goto out_unlock;
3074
3075         down_write(&trace_event_sem);
3076         __trace_early_add_event_dirs(tr);
3077         up_write(&trace_event_sem);
3078
3079  out_unlock:
3080         mutex_unlock(&event_mutex);
3081
3082         return ret;
3083 }
3084
3085 int event_trace_del_tracer(struct trace_array *tr)
3086 {
3087         mutex_lock(&event_mutex);
3088
3089         /* Disable any event triggers and associated soft-disabled events */
3090         clear_event_triggers(tr);
3091
3092         /* Clear the pid list */
3093         __ftrace_clear_event_pids(tr);
3094
3095         /* Disable any running events */
3096         __ftrace_set_clr_event_nolock(tr, NULL, NULL, NULL, 0);
3097
3098         /* Access to events are within rcu_read_lock_sched() */
3099         synchronize_sched();
3100
3101         down_write(&trace_event_sem);
3102         __trace_remove_event_dirs(tr);
3103         tracefs_remove_recursive(tr->event_dir);
3104         up_write(&trace_event_sem);
3105
3106         tr->event_dir = NULL;
3107
3108         mutex_unlock(&event_mutex);
3109
3110         return 0;
3111 }
3112
3113 static __init int event_trace_memsetup(void)
3114 {
3115         field_cachep = KMEM_CACHE(ftrace_event_field, SLAB_PANIC);
3116         file_cachep = KMEM_CACHE(trace_event_file, SLAB_PANIC);
3117         return 0;
3118 }
3119
3120 static __init void
3121 early_enable_events(struct trace_array *tr, bool disable_first)
3122 {
3123         char *buf = bootup_event_buf;
3124         char *token;
3125         int ret;
3126
3127         while (true) {
3128                 token = strsep(&buf, ",");
3129
3130                 if (!token)
3131                         break;
3132
3133                 if (*token) {
3134                         /* Restarting syscalls requires that we stop them first */
3135                         if (disable_first)
3136                                 ftrace_set_clr_event(tr, token, 0);
3137
3138                         ret = ftrace_set_clr_event(tr, token, 1);
3139                         if (ret)
3140                                 pr_warn("Failed to enable trace event: %s\n", token);
3141                 }
3142
3143                 /* Put back the comma to allow this to be called again */
3144                 if (buf)
3145                         *(buf - 1) = ',';
3146         }
3147 }
3148
3149 static __init int event_trace_enable(void)
3150 {
3151         struct trace_array *tr = top_trace_array();
3152         struct trace_event_call **iter, *call;
3153         int ret;
3154
3155         if (!tr)
3156                 return -ENODEV;
3157
3158         for_each_event(iter, __start_ftrace_events, __stop_ftrace_events) {
3159
3160                 call = *iter;
3161                 ret = event_init(call);
3162                 if (!ret)
3163                         list_add(&call->list, &ftrace_events);
3164         }
3165
3166         /*
3167          * We need the top trace array to have a working set of trace
3168          * points at early init, before the debug files and directories
3169          * are created. Create the file entries now, and attach them
3170          * to the actual file dentries later.
3171          */
3172         __trace_early_add_events(tr);
3173
3174         early_enable_events(tr, false);
3175
3176         trace_printk_start_comm();
3177
3178         register_event_cmds();
3179
3180         register_trigger_cmds();
3181
3182         return 0;
3183 }
3184
3185 /*
3186  * event_trace_enable() is called from trace_event_init() first to
3187  * initialize events and perhaps start any events that are on the
3188  * command line. Unfortunately, there are some events that will not
3189  * start this early, like the system call tracepoints that need
3190  * to set the TIF_SYSCALL_TRACEPOINT flag of pid 1. But event_trace_enable()
3191  * is called before pid 1 starts, and this flag is never set, making
3192  * the syscall tracepoint never get reached, but the event is enabled
3193  * regardless (and not doing anything).
3194  */
3195 static __init int event_trace_enable_again(void)
3196 {
3197         struct trace_array *tr;
3198
3199         tr = top_trace_array();
3200         if (!tr)
3201                 return -ENODEV;
3202
3203         early_enable_events(tr, true);
3204
3205         return 0;
3206 }
3207
3208 early_initcall(event_trace_enable_again);
3209
3210 static __init int event_trace_init(void)
3211 {
3212         struct trace_array *tr;
3213         struct dentry *d_tracer;
3214         struct dentry *entry;
3215         int ret;
3216
3217         tr = top_trace_array();
3218         if (!tr)
3219                 return -ENODEV;
3220
3221         d_tracer = tracing_init_dentry();
3222         if (IS_ERR(d_tracer))
3223                 return 0;
3224
3225         entry = tracefs_create_file("available_events", 0444, d_tracer,
3226                                     tr, &ftrace_avail_fops);
3227         if (!entry)
3228                 pr_warn("Could not create tracefs 'available_events' entry\n");
3229
3230         if (trace_define_generic_fields())
3231                 pr_warn("tracing: Failed to allocated generic fields");
3232
3233         if (trace_define_common_fields())
3234                 pr_warn("tracing: Failed to allocate common fields");
3235
3236         ret = early_event_add_tracer(d_tracer, tr);
3237         if (ret)
3238                 return ret;
3239
3240 #ifdef CONFIG_MODULES
3241         ret = register_module_notifier(&trace_module_nb);
3242         if (ret)
3243                 pr_warn("Failed to register trace events module notifier\n");
3244 #endif
3245         return 0;
3246 }
3247
3248 void __init trace_event_init(void)
3249 {
3250         event_trace_memsetup();
3251         init_ftrace_syscalls();
3252         event_trace_enable();
3253 }
3254
3255 fs_initcall(event_trace_init);
3256
3257 #ifdef CONFIG_FTRACE_STARTUP_TEST
3258
3259 static DEFINE_SPINLOCK(test_spinlock);
3260 static DEFINE_SPINLOCK(test_spinlock_irq);
3261 static DEFINE_MUTEX(test_mutex);
3262
3263 static __init void test_work(struct work_struct *dummy)
3264 {
3265         spin_lock(&test_spinlock);
3266         spin_lock_irq(&test_spinlock_irq);
3267         udelay(1);
3268         spin_unlock_irq(&test_spinlock_irq);
3269         spin_unlock(&test_spinlock);
3270
3271         mutex_lock(&test_mutex);
3272         msleep(1);
3273         mutex_unlock(&test_mutex);
3274 }
3275
3276 static __init int event_test_thread(void *unused)
3277 {
3278         void *test_malloc;
3279
3280         test_malloc = kmalloc(1234, GFP_KERNEL);
3281         if (!test_malloc)
3282                 pr_info("failed to kmalloc\n");
3283
3284         schedule_on_each_cpu(test_work);
3285
3286         kfree(test_malloc);
3287
3288         set_current_state(TASK_INTERRUPTIBLE);
3289         while (!kthread_should_stop()) {
3290                 schedule();
3291                 set_current_state(TASK_INTERRUPTIBLE);
3292         }
3293         __set_current_state(TASK_RUNNING);
3294
3295         return 0;
3296 }
3297
3298 /*
3299  * Do various things that may trigger events.
3300  */
3301 static __init void event_test_stuff(void)
3302 {
3303         struct task_struct *test_thread;
3304
3305         test_thread = kthread_run(event_test_thread, NULL, "test-events");
3306         msleep(1);
3307         kthread_stop(test_thread);
3308 }
3309
3310 /*
3311  * For every trace event defined, we will test each trace point separately,
3312  * and then by groups, and finally all trace points.
3313  */
3314 static __init void event_trace_self_tests(void)
3315 {
3316         struct trace_subsystem_dir *dir;
3317         struct trace_event_file *file;
3318         struct trace_event_call *call;
3319         struct event_subsystem *system;
3320         struct trace_array *tr;
3321         int ret;
3322
3323         tr = top_trace_array();
3324         if (!tr)
3325                 return;
3326
3327         pr_info("Running tests on trace events:\n");
3328
3329         list_for_each_entry(file, &tr->events, list) {
3330
3331                 call = file->event_call;
3332
3333                 /* Only test those that have a probe */
3334                 if (!call->class || !call->class->probe)
3335                         continue;
3336
3337 /*
3338  * Testing syscall events here is pretty useless, but
3339  * we still do it if configured. But this is time consuming.
3340  * What we really need is a user thread to perform the
3341  * syscalls as we test.
3342  */
3343 #ifndef CONFIG_EVENT_TRACE_TEST_SYSCALLS
3344                 if (call->class->system &&
3345                     strcmp(call->class->system, "syscalls") == 0)
3346                         continue;
3347 #endif
3348
3349                 pr_info("Testing event %s: ", trace_event_name(call));
3350
3351                 /*
3352                  * If an event is already enabled, someone is using
3353                  * it and the self test should not be on.
3354                  */
3355                 if (file->flags & EVENT_FILE_FL_ENABLED) {
3356                         pr_warn("Enabled event during self test!\n");
3357                         WARN_ON_ONCE(1);
3358                         continue;
3359                 }
3360
3361                 ftrace_event_enable_disable(file, 1);
3362                 event_test_stuff();
3363                 ftrace_event_enable_disable(file, 0);
3364
3365                 pr_cont("OK\n");
3366         }
3367
3368         /* Now test at the sub system level */
3369
3370         pr_info("Running tests on trace event systems:\n");
3371
3372         list_for_each_entry(dir, &tr->systems, list) {
3373
3374                 system = dir->subsystem;
3375
3376                 /* the ftrace system is special, skip it */
3377                 if (strcmp(system->name, "ftrace") == 0)
3378                         continue;
3379
3380                 pr_info("Testing event system %s: ", system->name);
3381
3382                 ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 1);
3383                 if (WARN_ON_ONCE(ret)) {
3384                         pr_warn("error enabling system %s\n",
3385                                 system->name);
3386                         continue;
3387                 }
3388
3389                 event_test_stuff();
3390
3391                 ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 0);
3392                 if (WARN_ON_ONCE(ret)) {
3393                         pr_warn("error disabling system %s\n",
3394                                 system->name);
3395                         continue;
3396                 }
3397
3398                 pr_cont("OK\n");
3399         }
3400
3401         /* Test with all events enabled */
3402
3403         pr_info("Running tests on all trace events:\n");
3404         pr_info("Testing all events: ");
3405
3406         ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 1);
3407         if (WARN_ON_ONCE(ret)) {
3408                 pr_warn("error enabling all events\n");
3409                 return;
3410         }
3411
3412         event_test_stuff();
3413
3414         /* reset sysname */
3415         ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 0);
3416         if (WARN_ON_ONCE(ret)) {
3417                 pr_warn("error disabling all events\n");
3418                 return;
3419         }
3420
3421         pr_cont("OK\n");
3422 }
3423
3424 #ifdef CONFIG_FUNCTION_TRACER
3425
3426 static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable);
3427
3428 static struct trace_event_file event_trace_file __initdata;
3429
3430 static void __init
3431 function_test_events_call(unsigned long ip, unsigned long parent_ip,
3432                           struct ftrace_ops *op, struct pt_regs *pt_regs)
3433 {
3434         struct ring_buffer_event *event;
3435         struct ring_buffer *buffer;
3436         struct ftrace_entry *entry;
3437         unsigned long flags;
3438         long disabled;
3439         int cpu;
3440         int pc;
3441
3442         pc = preempt_count();
3443         preempt_disable_notrace();
3444         cpu = raw_smp_processor_id();
3445         disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu));
3446
3447         if (disabled != 1)
3448                 goto out;
3449
3450         local_save_flags(flags);
3451
3452         event = trace_event_buffer_lock_reserve(&buffer, &event_trace_file,
3453                                                 TRACE_FN, sizeof(*entry),
3454                                                 flags, pc);
3455         if (!event)
3456                 goto out;
3457         entry   = ring_buffer_event_data(event);
3458         entry->ip                       = ip;
3459         entry->parent_ip                = parent_ip;
3460
3461         event_trigger_unlock_commit(&event_trace_file, buffer, event,
3462                                     entry, flags, pc);
3463  out:
3464         atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
3465         preempt_enable_notrace();
3466 }
3467
3468 static struct ftrace_ops trace_ops __initdata  =
3469 {
3470         .func = function_test_events_call,
3471         .flags = FTRACE_OPS_FL_RECURSION_SAFE,
3472 };
3473
3474 static __init void event_trace_self_test_with_function(void)
3475 {
3476         int ret;
3477
3478         event_trace_file.tr = top_trace_array();
3479         if (WARN_ON(!event_trace_file.tr))
3480                 return;
3481
3482         ret = register_ftrace_function(&trace_ops);
3483         if (WARN_ON(ret < 0)) {
3484                 pr_info("Failed to enable function tracer for event tests\n");
3485                 return;
3486         }
3487         pr_info("Running tests again, along with the function tracer\n");
3488         event_trace_self_tests();
3489         unregister_ftrace_function(&trace_ops);
3490 }
3491 #else
3492 static __init void event_trace_self_test_with_function(void)
3493 {
3494 }
3495 #endif
3496
3497 static __init int event_trace_self_tests_init(void)
3498 {
3499         if (!tracing_selftest_disabled) {
3500                 event_trace_self_tests();
3501                 event_trace_self_test_with_function();
3502         }
3503
3504         return 0;
3505 }
3506
3507 late_initcall(event_trace_self_tests_init);
3508
3509 #endif