Commit | Line | Data |
---|---|---|
16444a8a ACM |
1 | /* |
2 | * Infrastructure for profiling code inserted by 'gcc -pg'. | |
3 | * | |
4 | * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> | |
5 | * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com> | |
6 | * | |
7 | * Originally ported from the -rt patch by: | |
8 | * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com> | |
9 | * | |
10 | * Based on code in the latency_tracer, that is: | |
11 | * | |
12 | * Copyright (C) 2004-2006 Ingo Molnar | |
13 | * Copyright (C) 2004 William Lee Irwin III | |
14 | */ | |
15 | ||
3d083395 SR |
16 | #include <linux/stop_machine.h> |
17 | #include <linux/clocksource.h> | |
18 | #include <linux/kallsyms.h> | |
5072c59f SR |
19 | #include <linux/seq_file.h> |
20 | #include <linux/debugfs.h> | |
3d083395 SR |
21 | #include <linux/kthread.h> |
22 | #include <linux/hardirq.h> | |
16444a8a | 23 | #include <linux/ftrace.h> |
5072c59f | 24 | #include <linux/uaccess.h> |
b0fc494f | 25 | #include <linux/sysctl.h> |
3d083395 | 26 | #include <linux/hash.h> |
5072c59f | 27 | #include <linux/ctype.h> |
3d083395 SR |
28 | #include <linux/list.h> |
29 | ||
30 | #include "trace.h" | |
16444a8a | 31 | |
4eebcc81 SR |
32 | /* ftrace_enabled is a method to turn ftrace on or off */ |
33 | int ftrace_enabled __read_mostly; | |
d61f82d0 | 34 | static int last_ftrace_enabled; |
b0fc494f | 35 | |
4eebcc81 SR |
36 | /* |
37 | * ftrace_disabled is set when an anomaly is discovered. | |
38 | * ftrace_disabled is much stronger than ftrace_enabled. | |
39 | */ | |
40 | static int ftrace_disabled __read_mostly; | |
41 | ||
3d083395 | 42 | static DEFINE_SPINLOCK(ftrace_lock); |
b0fc494f SR |
43 | static DEFINE_MUTEX(ftrace_sysctl_lock); |
44 | ||
16444a8a ACM |
45 | static struct ftrace_ops ftrace_list_end __read_mostly = |
46 | { | |
47 | .func = ftrace_stub, | |
48 | }; | |
49 | ||
50 | static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end; | |
51 | ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; | |
52 | ||
53 | /* mcount is defined per arch in assembly */ | |
54 | EXPORT_SYMBOL(mcount); | |
55 | ||
56 | notrace void ftrace_list_func(unsigned long ip, unsigned long parent_ip) | |
57 | { | |
58 | struct ftrace_ops *op = ftrace_list; | |
59 | ||
60 | /* in case someone actually ports this to alpha! */ | |
61 | read_barrier_depends(); | |
62 | ||
63 | while (op != &ftrace_list_end) { | |
64 | /* silly alpha */ | |
65 | read_barrier_depends(); | |
66 | op->func(ip, parent_ip); | |
67 | op = op->next; | |
68 | }; | |
69 | } | |
70 | ||
71 | /** | |
3d083395 | 72 | * clear_ftrace_function - reset the ftrace function |
16444a8a | 73 | * |
3d083395 SR |
74 | * This NULLs the ftrace function and in essence stops |
75 | * tracing. There may be lag | |
16444a8a | 76 | */ |
3d083395 | 77 | void clear_ftrace_function(void) |
16444a8a | 78 | { |
3d083395 SR |
79 | ftrace_trace_function = ftrace_stub; |
80 | } | |
81 | ||
82 | static int notrace __register_ftrace_function(struct ftrace_ops *ops) | |
83 | { | |
84 | /* Should never be called by interrupts */ | |
85 | spin_lock(&ftrace_lock); | |
16444a8a | 86 | |
16444a8a ACM |
87 | ops->next = ftrace_list; |
88 | /* | |
89 | * We are entering ops into the ftrace_list but another | |
90 | * CPU might be walking that list. We need to make sure | |
91 | * the ops->next pointer is valid before another CPU sees | |
92 | * the ops pointer included into the ftrace_list. | |
93 | */ | |
94 | smp_wmb(); | |
95 | ftrace_list = ops; | |
3d083395 | 96 | |
b0fc494f SR |
97 | if (ftrace_enabled) { |
98 | /* | |
99 | * For one func, simply call it directly. | |
100 | * For more than one func, call the chain. | |
101 | */ | |
102 | if (ops->next == &ftrace_list_end) | |
103 | ftrace_trace_function = ops->func; | |
104 | else | |
105 | ftrace_trace_function = ftrace_list_func; | |
106 | } | |
3d083395 SR |
107 | |
108 | spin_unlock(&ftrace_lock); | |
16444a8a ACM |
109 | |
110 | return 0; | |
111 | } | |
112 | ||
3d083395 | 113 | static int notrace __unregister_ftrace_function(struct ftrace_ops *ops) |
16444a8a | 114 | { |
16444a8a ACM |
115 | struct ftrace_ops **p; |
116 | int ret = 0; | |
117 | ||
3d083395 | 118 | spin_lock(&ftrace_lock); |
16444a8a ACM |
119 | |
120 | /* | |
3d083395 SR |
121 | * If we are removing the last function, then simply point |
122 | * to the ftrace_stub. | |
16444a8a ACM |
123 | */ |
124 | if (ftrace_list == ops && ops->next == &ftrace_list_end) { | |
125 | ftrace_trace_function = ftrace_stub; | |
126 | ftrace_list = &ftrace_list_end; | |
127 | goto out; | |
128 | } | |
129 | ||
130 | for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next) | |
131 | if (*p == ops) | |
132 | break; | |
133 | ||
134 | if (*p != ops) { | |
135 | ret = -1; | |
136 | goto out; | |
137 | } | |
138 | ||
139 | *p = (*p)->next; | |
140 | ||
b0fc494f SR |
141 | if (ftrace_enabled) { |
142 | /* If we only have one func left, then call that directly */ | |
143 | if (ftrace_list == &ftrace_list_end || | |
144 | ftrace_list->next == &ftrace_list_end) | |
145 | ftrace_trace_function = ftrace_list->func; | |
146 | } | |
16444a8a ACM |
147 | |
148 | out: | |
3d083395 SR |
149 | spin_unlock(&ftrace_lock); |
150 | ||
151 | return ret; | |
152 | } | |
153 | ||
154 | #ifdef CONFIG_DYNAMIC_FTRACE | |
155 | ||
e1c08bdd SR |
156 | static struct task_struct *ftraced_task; |
157 | static DECLARE_WAIT_QUEUE_HEAD(ftraced_waiters); | |
158 | static unsigned long ftraced_iteration_counter; | |
159 | ||
d61f82d0 SR |
160 | enum { |
161 | FTRACE_ENABLE_CALLS = (1 << 0), | |
162 | FTRACE_DISABLE_CALLS = (1 << 1), | |
163 | FTRACE_UPDATE_TRACE_FUNC = (1 << 2), | |
164 | FTRACE_ENABLE_MCOUNT = (1 << 3), | |
165 | FTRACE_DISABLE_MCOUNT = (1 << 4), | |
166 | }; | |
167 | ||
5072c59f SR |
168 | static int ftrace_filtered; |
169 | ||
3d083395 SR |
170 | static struct hlist_head ftrace_hash[FTRACE_HASHSIZE]; |
171 | ||
172 | static DEFINE_PER_CPU(int, ftrace_shutdown_disable_cpu); | |
173 | ||
174 | static DEFINE_SPINLOCK(ftrace_shutdown_lock); | |
175 | static DEFINE_MUTEX(ftraced_lock); | |
5072c59f | 176 | static DEFINE_MUTEX(ftrace_filter_lock); |
3d083395 | 177 | |
3c1720f0 SR |
178 | struct ftrace_page { |
179 | struct ftrace_page *next; | |
180 | int index; | |
181 | struct dyn_ftrace records[]; | |
182 | } __attribute__((packed)); | |
183 | ||
184 | #define ENTRIES_PER_PAGE \ | |
185 | ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace)) | |
186 | ||
187 | /* estimate from running different kernels */ | |
188 | #define NR_TO_INIT 10000 | |
189 | ||
190 | static struct ftrace_page *ftrace_pages_start; | |
191 | static struct ftrace_page *ftrace_pages; | |
192 | ||
3d083395 SR |
193 | static int ftraced_trigger; |
194 | static int ftraced_suspend; | |
195 | ||
196 | static int ftrace_record_suspend; | |
197 | ||
37ad5084 SR |
198 | static struct dyn_ftrace *ftrace_free_records; |
199 | ||
9ff9cdb2 IM |
200 | static inline int notrace |
201 | ftrace_ip_in_hash(unsigned long ip, unsigned long key) | |
3d083395 SR |
202 | { |
203 | struct dyn_ftrace *p; | |
204 | struct hlist_node *t; | |
205 | int found = 0; | |
206 | ||
207 | hlist_for_each_entry(p, t, &ftrace_hash[key], node) { | |
208 | if (p->ip == ip) { | |
209 | found = 1; | |
210 | break; | |
211 | } | |
212 | } | |
213 | ||
214 | return found; | |
215 | } | |
216 | ||
217 | static inline void notrace | |
218 | ftrace_add_hash(struct dyn_ftrace *node, unsigned long key) | |
219 | { | |
220 | hlist_add_head(&node->node, &ftrace_hash[key]); | |
221 | } | |
222 | ||
37ad5084 SR |
223 | static notrace void ftrace_free_rec(struct dyn_ftrace *rec) |
224 | { | |
225 | /* no locking, only called from kstop_machine */ | |
226 | ||
227 | rec->ip = (unsigned long)ftrace_free_records; | |
228 | ftrace_free_records = rec; | |
229 | rec->flags |= FTRACE_FL_FREE; | |
230 | } | |
231 | ||
d61f82d0 | 232 | static notrace struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip) |
3c1720f0 | 233 | { |
37ad5084 SR |
234 | struct dyn_ftrace *rec; |
235 | ||
236 | /* First check for freed records */ | |
237 | if (ftrace_free_records) { | |
238 | rec = ftrace_free_records; | |
239 | ||
37ad5084 SR |
240 | if (unlikely(!(rec->flags & FTRACE_FL_FREE))) { |
241 | WARN_ON_ONCE(1); | |
242 | ftrace_free_records = NULL; | |
4eebcc81 SR |
243 | ftrace_disabled = 1; |
244 | ftrace_enabled = 0; | |
37ad5084 SR |
245 | return NULL; |
246 | } | |
247 | ||
248 | ftrace_free_records = (void *)rec->ip; | |
249 | memset(rec, 0, sizeof(*rec)); | |
250 | return rec; | |
251 | } | |
252 | ||
3c1720f0 SR |
253 | if (ftrace_pages->index == ENTRIES_PER_PAGE) { |
254 | if (!ftrace_pages->next) | |
255 | return NULL; | |
256 | ftrace_pages = ftrace_pages->next; | |
257 | } | |
258 | ||
259 | return &ftrace_pages->records[ftrace_pages->index++]; | |
260 | } | |
261 | ||
3d083395 | 262 | static void notrace |
d61f82d0 | 263 | ftrace_record_ip(unsigned long ip) |
3d083395 SR |
264 | { |
265 | struct dyn_ftrace *node; | |
266 | unsigned long flags; | |
267 | unsigned long key; | |
268 | int resched; | |
269 | int atomic; | |
270 | ||
4eebcc81 | 271 | if (!ftrace_enabled || ftrace_disabled) |
d61f82d0 SR |
272 | return; |
273 | ||
3d083395 SR |
274 | resched = need_resched(); |
275 | preempt_disable_notrace(); | |
276 | ||
277 | /* We simply need to protect against recursion */ | |
278 | __get_cpu_var(ftrace_shutdown_disable_cpu)++; | |
279 | if (__get_cpu_var(ftrace_shutdown_disable_cpu) != 1) | |
280 | goto out; | |
281 | ||
282 | if (unlikely(ftrace_record_suspend)) | |
283 | goto out; | |
284 | ||
285 | key = hash_long(ip, FTRACE_HASHBITS); | |
286 | ||
287 | WARN_ON_ONCE(key >= FTRACE_HASHSIZE); | |
288 | ||
289 | if (ftrace_ip_in_hash(ip, key)) | |
290 | goto out; | |
291 | ||
292 | atomic = irqs_disabled(); | |
293 | ||
294 | spin_lock_irqsave(&ftrace_shutdown_lock, flags); | |
295 | ||
296 | /* This ip may have hit the hash before the lock */ | |
297 | if (ftrace_ip_in_hash(ip, key)) | |
298 | goto out_unlock; | |
299 | ||
300 | /* | |
301 | * There's a slight race that the ftraced will update the | |
d61f82d0 | 302 | * hash and reset here. If it is already converted, skip it. |
3d083395 | 303 | */ |
d61f82d0 SR |
304 | if (ftrace_ip_converted(ip)) |
305 | goto out_unlock; | |
306 | ||
307 | node = ftrace_alloc_dyn_node(ip); | |
3d083395 SR |
308 | if (!node) |
309 | goto out_unlock; | |
310 | ||
311 | node->ip = ip; | |
312 | ||
313 | ftrace_add_hash(node, key); | |
314 | ||
315 | ftraced_trigger = 1; | |
316 | ||
317 | out_unlock: | |
318 | spin_unlock_irqrestore(&ftrace_shutdown_lock, flags); | |
319 | out: | |
320 | __get_cpu_var(ftrace_shutdown_disable_cpu)--; | |
321 | ||
322 | /* prevent recursion with scheduler */ | |
323 | if (resched) | |
324 | preempt_enable_no_resched_notrace(); | |
325 | else | |
326 | preempt_enable_notrace(); | |
327 | } | |
328 | ||
caf8cdeb SR |
329 | #define FTRACE_ADDR ((long)(ftrace_caller)) |
330 | #define MCOUNT_ADDR ((long)(mcount)) | |
3c1720f0 | 331 | |
5072c59f SR |
332 | static void notrace |
333 | __ftrace_replace_code(struct dyn_ftrace *rec, | |
334 | unsigned char *old, unsigned char *new, int enable) | |
335 | { | |
336 | unsigned long ip; | |
337 | int failed; | |
338 | ||
339 | ip = rec->ip; | |
340 | ||
341 | if (ftrace_filtered && enable) { | |
342 | unsigned long fl; | |
343 | /* | |
344 | * If filtering is on: | |
345 | * | |
346 | * If this record is set to be filtered and | |
347 | * is enabled then do nothing. | |
348 | * | |
349 | * If this record is set to be filtered and | |
350 | * it is not enabled, enable it. | |
351 | * | |
352 | * If this record is not set to be filtered | |
353 | * and it is not enabled do nothing. | |
354 | * | |
355 | * If this record is not set to be filtered and | |
356 | * it is enabled, disable it. | |
357 | */ | |
358 | fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_ENABLED); | |
359 | ||
360 | if ((fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED)) || | |
361 | (fl == 0)) | |
362 | return; | |
363 | ||
364 | /* | |
365 | * If it is enabled disable it, | |
366 | * otherwise enable it! | |
367 | */ | |
368 | if (fl == FTRACE_FL_ENABLED) { | |
369 | /* swap new and old */ | |
370 | new = old; | |
371 | old = ftrace_call_replace(ip, FTRACE_ADDR); | |
372 | rec->flags &= ~FTRACE_FL_ENABLED; | |
373 | } else { | |
374 | new = ftrace_call_replace(ip, FTRACE_ADDR); | |
375 | rec->flags |= FTRACE_FL_ENABLED; | |
376 | } | |
377 | } else { | |
378 | ||
379 | if (enable) | |
380 | new = ftrace_call_replace(ip, FTRACE_ADDR); | |
381 | else | |
382 | old = ftrace_call_replace(ip, FTRACE_ADDR); | |
383 | ||
384 | if (enable) { | |
385 | if (rec->flags & FTRACE_FL_ENABLED) | |
386 | return; | |
387 | rec->flags |= FTRACE_FL_ENABLED; | |
388 | } else { | |
389 | if (!(rec->flags & FTRACE_FL_ENABLED)) | |
390 | return; | |
391 | rec->flags &= ~FTRACE_FL_ENABLED; | |
392 | } | |
393 | } | |
394 | ||
395 | failed = ftrace_modify_code(ip, old, new); | |
37ad5084 SR |
396 | if (failed) { |
397 | unsigned long key; | |
398 | /* It is possible that the function hasn't been converted yet */ | |
399 | key = hash_long(ip, FTRACE_HASHBITS); | |
400 | if (!ftrace_ip_in_hash(ip, key)) { | |
401 | rec->flags |= FTRACE_FL_FAILED; | |
402 | ftrace_free_rec(rec); | |
403 | } | |
404 | ||
405 | } | |
5072c59f SR |
406 | } |
407 | ||
408 | static void notrace ftrace_replace_code(int enable) | |
3c1720f0 SR |
409 | { |
410 | unsigned char *new = NULL, *old = NULL; | |
411 | struct dyn_ftrace *rec; | |
412 | struct ftrace_page *pg; | |
3c1720f0 SR |
413 | int i; |
414 | ||
5072c59f | 415 | if (enable) |
3c1720f0 SR |
416 | old = ftrace_nop_replace(); |
417 | else | |
418 | new = ftrace_nop_replace(); | |
419 | ||
420 | for (pg = ftrace_pages_start; pg; pg = pg->next) { | |
421 | for (i = 0; i < pg->index; i++) { | |
422 | rec = &pg->records[i]; | |
423 | ||
424 | /* don't modify code that has already faulted */ | |
425 | if (rec->flags & FTRACE_FL_FAILED) | |
426 | continue; | |
427 | ||
5072c59f | 428 | __ftrace_replace_code(rec, old, new, enable); |
3c1720f0 SR |
429 | } |
430 | } | |
431 | } | |
432 | ||
3c1720f0 SR |
433 | static notrace void ftrace_shutdown_replenish(void) |
434 | { | |
435 | if (ftrace_pages->next) | |
436 | return; | |
437 | ||
438 | /* allocate another page */ | |
439 | ftrace_pages->next = (void *)get_zeroed_page(GFP_KERNEL); | |
440 | } | |
3d083395 | 441 | |
3c1720f0 | 442 | static notrace void |
d61f82d0 | 443 | ftrace_code_disable(struct dyn_ftrace *rec) |
3c1720f0 SR |
444 | { |
445 | unsigned long ip; | |
446 | unsigned char *nop, *call; | |
447 | int failed; | |
448 | ||
449 | ip = rec->ip; | |
450 | ||
451 | nop = ftrace_nop_replace(); | |
d61f82d0 | 452 | call = ftrace_call_replace(ip, MCOUNT_ADDR); |
3c1720f0 SR |
453 | |
454 | failed = ftrace_modify_code(ip, call, nop); | |
37ad5084 | 455 | if (failed) { |
3c1720f0 | 456 | rec->flags |= FTRACE_FL_FAILED; |
37ad5084 SR |
457 | ftrace_free_rec(rec); |
458 | } | |
3c1720f0 SR |
459 | } |
460 | ||
d61f82d0 | 461 | static int notrace __ftrace_modify_code(void *data) |
3d083395 | 462 | { |
d61f82d0 SR |
463 | unsigned long addr; |
464 | int *command = data; | |
465 | ||
466 | if (*command & FTRACE_ENABLE_CALLS) | |
467 | ftrace_replace_code(1); | |
468 | else if (*command & FTRACE_DISABLE_CALLS) | |
469 | ftrace_replace_code(0); | |
470 | ||
471 | if (*command & FTRACE_UPDATE_TRACE_FUNC) | |
472 | ftrace_update_ftrace_func(ftrace_trace_function); | |
473 | ||
474 | if (*command & FTRACE_ENABLE_MCOUNT) { | |
475 | addr = (unsigned long)ftrace_record_ip; | |
476 | ftrace_mcount_set(&addr); | |
477 | } else if (*command & FTRACE_DISABLE_MCOUNT) { | |
478 | addr = (unsigned long)ftrace_stub; | |
479 | ftrace_mcount_set(&addr); | |
480 | } | |
481 | ||
482 | return 0; | |
3d083395 SR |
483 | } |
484 | ||
d61f82d0 | 485 | static void notrace ftrace_run_update_code(int command) |
3d083395 | 486 | { |
d61f82d0 | 487 | stop_machine_run(__ftrace_modify_code, &command, NR_CPUS); |
3d083395 SR |
488 | } |
489 | ||
d61f82d0 SR |
490 | static ftrace_func_t saved_ftrace_func; |
491 | ||
3d083395 SR |
492 | static void notrace ftrace_startup(void) |
493 | { | |
d61f82d0 SR |
494 | int command = 0; |
495 | ||
4eebcc81 SR |
496 | if (unlikely(ftrace_disabled)) |
497 | return; | |
498 | ||
3d083395 SR |
499 | mutex_lock(&ftraced_lock); |
500 | ftraced_suspend++; | |
d61f82d0 SR |
501 | if (ftraced_suspend == 1) |
502 | command |= FTRACE_ENABLE_CALLS; | |
503 | ||
504 | if (saved_ftrace_func != ftrace_trace_function) { | |
505 | saved_ftrace_func = ftrace_trace_function; | |
506 | command |= FTRACE_UPDATE_TRACE_FUNC; | |
507 | } | |
508 | ||
509 | if (!command || !ftrace_enabled) | |
3d083395 | 510 | goto out; |
3d083395 | 511 | |
d61f82d0 | 512 | ftrace_run_update_code(command); |
3d083395 SR |
513 | out: |
514 | mutex_unlock(&ftraced_lock); | |
515 | } | |
516 | ||
517 | static void notrace ftrace_shutdown(void) | |
518 | { | |
d61f82d0 SR |
519 | int command = 0; |
520 | ||
4eebcc81 SR |
521 | if (unlikely(ftrace_disabled)) |
522 | return; | |
523 | ||
3d083395 SR |
524 | mutex_lock(&ftraced_lock); |
525 | ftraced_suspend--; | |
d61f82d0 SR |
526 | if (!ftraced_suspend) |
527 | command |= FTRACE_DISABLE_CALLS; | |
3d083395 | 528 | |
d61f82d0 SR |
529 | if (saved_ftrace_func != ftrace_trace_function) { |
530 | saved_ftrace_func = ftrace_trace_function; | |
531 | command |= FTRACE_UPDATE_TRACE_FUNC; | |
532 | } | |
3d083395 | 533 | |
d61f82d0 SR |
534 | if (!command || !ftrace_enabled) |
535 | goto out; | |
536 | ||
537 | ftrace_run_update_code(command); | |
3d083395 SR |
538 | out: |
539 | mutex_unlock(&ftraced_lock); | |
540 | } | |
541 | ||
b0fc494f SR |
542 | static void notrace ftrace_startup_sysctl(void) |
543 | { | |
d61f82d0 SR |
544 | int command = FTRACE_ENABLE_MCOUNT; |
545 | ||
4eebcc81 SR |
546 | if (unlikely(ftrace_disabled)) |
547 | return; | |
548 | ||
b0fc494f | 549 | mutex_lock(&ftraced_lock); |
d61f82d0 SR |
550 | /* Force update next time */ |
551 | saved_ftrace_func = NULL; | |
b0fc494f SR |
552 | /* ftraced_suspend is true if we want ftrace running */ |
553 | if (ftraced_suspend) | |
d61f82d0 SR |
554 | command |= FTRACE_ENABLE_CALLS; |
555 | ||
556 | ftrace_run_update_code(command); | |
b0fc494f SR |
557 | mutex_unlock(&ftraced_lock); |
558 | } | |
559 | ||
560 | static void notrace ftrace_shutdown_sysctl(void) | |
561 | { | |
d61f82d0 SR |
562 | int command = FTRACE_DISABLE_MCOUNT; |
563 | ||
4eebcc81 SR |
564 | if (unlikely(ftrace_disabled)) |
565 | return; | |
566 | ||
b0fc494f SR |
567 | mutex_lock(&ftraced_lock); |
568 | /* ftraced_suspend is true if ftrace is running */ | |
569 | if (ftraced_suspend) | |
d61f82d0 SR |
570 | command |= FTRACE_DISABLE_CALLS; |
571 | ||
572 | ftrace_run_update_code(command); | |
b0fc494f SR |
573 | mutex_unlock(&ftraced_lock); |
574 | } | |
575 | ||
3d083395 SR |
576 | static cycle_t ftrace_update_time; |
577 | static unsigned long ftrace_update_cnt; | |
578 | unsigned long ftrace_update_tot_cnt; | |
579 | ||
580 | static int notrace __ftrace_update_code(void *ignore) | |
581 | { | |
582 | struct dyn_ftrace *p; | |
583 | struct hlist_head head; | |
584 | struct hlist_node *t; | |
d61f82d0 | 585 | int save_ftrace_enabled; |
3d083395 SR |
586 | cycle_t start, stop; |
587 | int i; | |
588 | ||
d61f82d0 SR |
589 | /* Don't be recording funcs now */ |
590 | save_ftrace_enabled = ftrace_enabled; | |
591 | ftrace_enabled = 0; | |
3d083395 | 592 | |
750ed1a4 | 593 | start = ftrace_now(raw_smp_processor_id()); |
3d083395 SR |
594 | ftrace_update_cnt = 0; |
595 | ||
596 | /* No locks needed, the machine is stopped! */ | |
597 | for (i = 0; i < FTRACE_HASHSIZE; i++) { | |
598 | if (hlist_empty(&ftrace_hash[i])) | |
599 | continue; | |
600 | ||
601 | head = ftrace_hash[i]; | |
602 | INIT_HLIST_HEAD(&ftrace_hash[i]); | |
603 | ||
604 | /* all CPUS are stopped, we are safe to modify code */ | |
605 | hlist_for_each_entry(p, t, &head, node) { | |
d61f82d0 | 606 | ftrace_code_disable(p); |
3d083395 SR |
607 | ftrace_update_cnt++; |
608 | } | |
609 | ||
610 | } | |
611 | ||
750ed1a4 | 612 | stop = ftrace_now(raw_smp_processor_id()); |
3d083395 SR |
613 | ftrace_update_time = stop - start; |
614 | ftrace_update_tot_cnt += ftrace_update_cnt; | |
615 | ||
d61f82d0 | 616 | ftrace_enabled = save_ftrace_enabled; |
16444a8a ACM |
617 | |
618 | return 0; | |
619 | } | |
620 | ||
3d083395 SR |
621 | static void notrace ftrace_update_code(void) |
622 | { | |
4eebcc81 SR |
623 | if (unlikely(ftrace_disabled)) |
624 | return; | |
625 | ||
3d083395 SR |
626 | stop_machine_run(__ftrace_update_code, NULL, NR_CPUS); |
627 | } | |
628 | ||
629 | static int notrace ftraced(void *ignore) | |
630 | { | |
631 | unsigned long usecs; | |
632 | ||
633 | set_current_state(TASK_INTERRUPTIBLE); | |
634 | ||
635 | while (!kthread_should_stop()) { | |
636 | ||
637 | /* check once a second */ | |
638 | schedule_timeout(HZ); | |
639 | ||
4eebcc81 SR |
640 | if (unlikely(ftrace_disabled)) |
641 | continue; | |
642 | ||
b0fc494f | 643 | mutex_lock(&ftrace_sysctl_lock); |
3d083395 | 644 | mutex_lock(&ftraced_lock); |
b0fc494f | 645 | if (ftrace_enabled && ftraced_trigger && !ftraced_suspend) { |
3d083395 SR |
646 | ftrace_record_suspend++; |
647 | ftrace_update_code(); | |
648 | usecs = nsecs_to_usecs(ftrace_update_time); | |
649 | if (ftrace_update_tot_cnt > 100000) { | |
650 | ftrace_update_tot_cnt = 0; | |
651 | pr_info("hm, dftrace overflow: %lu change%s" | |
652 | " (%lu total) in %lu usec%s\n", | |
653 | ftrace_update_cnt, | |
654 | ftrace_update_cnt != 1 ? "s" : "", | |
655 | ftrace_update_tot_cnt, | |
656 | usecs, usecs != 1 ? "s" : ""); | |
4eebcc81 | 657 | ftrace_disabled = 1; |
3d083395 SR |
658 | WARN_ON_ONCE(1); |
659 | } | |
660 | ftraced_trigger = 0; | |
661 | ftrace_record_suspend--; | |
662 | } | |
e1c08bdd | 663 | ftraced_iteration_counter++; |
3d083395 | 664 | mutex_unlock(&ftraced_lock); |
b0fc494f | 665 | mutex_unlock(&ftrace_sysctl_lock); |
3d083395 | 666 | |
e1c08bdd SR |
667 | wake_up_interruptible(&ftraced_waiters); |
668 | ||
3d083395 SR |
669 | ftrace_shutdown_replenish(); |
670 | ||
671 | set_current_state(TASK_INTERRUPTIBLE); | |
672 | } | |
673 | __set_current_state(TASK_RUNNING); | |
674 | return 0; | |
675 | } | |
676 | ||
3c1720f0 SR |
677 | static int __init ftrace_dyn_table_alloc(void) |
678 | { | |
679 | struct ftrace_page *pg; | |
680 | int cnt; | |
681 | int i; | |
3c1720f0 SR |
682 | |
683 | /* allocate a few pages */ | |
684 | ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL); | |
685 | if (!ftrace_pages_start) | |
686 | return -1; | |
687 | ||
688 | /* | |
689 | * Allocate a few more pages. | |
690 | * | |
691 | * TODO: have some parser search vmlinux before | |
692 | * final linking to find all calls to ftrace. | |
693 | * Then we can: | |
694 | * a) know how many pages to allocate. | |
695 | * and/or | |
696 | * b) set up the table then. | |
697 | * | |
698 | * The dynamic code is still necessary for | |
699 | * modules. | |
700 | */ | |
701 | ||
702 | pg = ftrace_pages = ftrace_pages_start; | |
703 | ||
704 | cnt = NR_TO_INIT / ENTRIES_PER_PAGE; | |
705 | ||
706 | for (i = 0; i < cnt; i++) { | |
707 | pg->next = (void *)get_zeroed_page(GFP_KERNEL); | |
708 | ||
709 | /* If we fail, we'll try later anyway */ | |
710 | if (!pg->next) | |
711 | break; | |
712 | ||
713 | pg = pg->next; | |
714 | } | |
715 | ||
716 | return 0; | |
717 | } | |
718 | ||
5072c59f SR |
719 | enum { |
720 | FTRACE_ITER_FILTER = (1 << 0), | |
721 | FTRACE_ITER_CONT = (1 << 1), | |
722 | }; | |
723 | ||
724 | #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */ | |
725 | ||
726 | struct ftrace_iterator { | |
727 | loff_t pos; | |
728 | struct ftrace_page *pg; | |
729 | unsigned idx; | |
730 | unsigned flags; | |
731 | unsigned char buffer[FTRACE_BUFF_MAX+1]; | |
732 | unsigned buffer_idx; | |
733 | unsigned filtered; | |
734 | }; | |
735 | ||
736 | static void notrace * | |
737 | t_next(struct seq_file *m, void *v, loff_t *pos) | |
738 | { | |
739 | struct ftrace_iterator *iter = m->private; | |
740 | struct dyn_ftrace *rec = NULL; | |
741 | ||
742 | (*pos)++; | |
743 | ||
744 | retry: | |
745 | if (iter->idx >= iter->pg->index) { | |
746 | if (iter->pg->next) { | |
747 | iter->pg = iter->pg->next; | |
748 | iter->idx = 0; | |
749 | goto retry; | |
750 | } | |
751 | } else { | |
752 | rec = &iter->pg->records[iter->idx++]; | |
753 | if ((rec->flags & FTRACE_FL_FAILED) || | |
754 | ((iter->flags & FTRACE_ITER_FILTER) && | |
755 | !(rec->flags & FTRACE_FL_FILTER))) { | |
756 | rec = NULL; | |
757 | goto retry; | |
758 | } | |
759 | } | |
760 | ||
761 | iter->pos = *pos; | |
762 | ||
763 | return rec; | |
764 | } | |
765 | ||
766 | static void *t_start(struct seq_file *m, loff_t *pos) | |
767 | { | |
768 | struct ftrace_iterator *iter = m->private; | |
769 | void *p = NULL; | |
770 | loff_t l = -1; | |
771 | ||
772 | if (*pos != iter->pos) { | |
773 | for (p = t_next(m, p, &l); p && l < *pos; p = t_next(m, p, &l)) | |
774 | ; | |
775 | } else { | |
776 | l = *pos; | |
777 | p = t_next(m, p, &l); | |
778 | } | |
779 | ||
780 | return p; | |
781 | } | |
782 | ||
783 | static void t_stop(struct seq_file *m, void *p) | |
784 | { | |
785 | } | |
786 | ||
787 | static int t_show(struct seq_file *m, void *v) | |
788 | { | |
789 | struct dyn_ftrace *rec = v; | |
790 | char str[KSYM_SYMBOL_LEN]; | |
791 | ||
792 | if (!rec) | |
793 | return 0; | |
794 | ||
795 | kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); | |
796 | ||
797 | seq_printf(m, "%s\n", str); | |
798 | ||
799 | return 0; | |
800 | } | |
801 | ||
802 | static struct seq_operations show_ftrace_seq_ops = { | |
803 | .start = t_start, | |
804 | .next = t_next, | |
805 | .stop = t_stop, | |
806 | .show = t_show, | |
807 | }; | |
808 | ||
809 | static int notrace | |
810 | ftrace_avail_open(struct inode *inode, struct file *file) | |
811 | { | |
812 | struct ftrace_iterator *iter; | |
813 | int ret; | |
814 | ||
4eebcc81 SR |
815 | if (unlikely(ftrace_disabled)) |
816 | return -ENODEV; | |
817 | ||
5072c59f SR |
818 | iter = kzalloc(sizeof(*iter), GFP_KERNEL); |
819 | if (!iter) | |
820 | return -ENOMEM; | |
821 | ||
822 | iter->pg = ftrace_pages_start; | |
823 | iter->pos = -1; | |
824 | ||
825 | ret = seq_open(file, &show_ftrace_seq_ops); | |
826 | if (!ret) { | |
827 | struct seq_file *m = file->private_data; | |
4bf39a94 | 828 | |
5072c59f | 829 | m->private = iter; |
4bf39a94 | 830 | } else { |
5072c59f | 831 | kfree(iter); |
4bf39a94 | 832 | } |
5072c59f SR |
833 | |
834 | return ret; | |
835 | } | |
836 | ||
837 | int ftrace_avail_release(struct inode *inode, struct file *file) | |
838 | { | |
839 | struct seq_file *m = (struct seq_file *)file->private_data; | |
840 | struct ftrace_iterator *iter = m->private; | |
841 | ||
842 | seq_release(inode, file); | |
843 | kfree(iter); | |
4bf39a94 | 844 | |
5072c59f SR |
845 | return 0; |
846 | } | |
847 | ||
848 | static void notrace ftrace_filter_reset(void) | |
849 | { | |
850 | struct ftrace_page *pg; | |
851 | struct dyn_ftrace *rec; | |
852 | unsigned i; | |
853 | ||
854 | /* keep kstop machine from running */ | |
855 | preempt_disable(); | |
856 | ftrace_filtered = 0; | |
857 | pg = ftrace_pages_start; | |
858 | while (pg) { | |
859 | for (i = 0; i < pg->index; i++) { | |
860 | rec = &pg->records[i]; | |
861 | if (rec->flags & FTRACE_FL_FAILED) | |
862 | continue; | |
863 | rec->flags &= ~FTRACE_FL_FILTER; | |
864 | } | |
865 | pg = pg->next; | |
866 | } | |
867 | preempt_enable(); | |
868 | } | |
869 | ||
870 | static int notrace | |
871 | ftrace_filter_open(struct inode *inode, struct file *file) | |
872 | { | |
873 | struct ftrace_iterator *iter; | |
874 | int ret = 0; | |
875 | ||
4eebcc81 SR |
876 | if (unlikely(ftrace_disabled)) |
877 | return -ENODEV; | |
878 | ||
5072c59f SR |
879 | iter = kzalloc(sizeof(*iter), GFP_KERNEL); |
880 | if (!iter) | |
881 | return -ENOMEM; | |
882 | ||
883 | mutex_lock(&ftrace_filter_lock); | |
884 | if ((file->f_mode & FMODE_WRITE) && | |
885 | !(file->f_flags & O_APPEND)) | |
886 | ftrace_filter_reset(); | |
887 | ||
888 | if (file->f_mode & FMODE_READ) { | |
889 | iter->pg = ftrace_pages_start; | |
890 | iter->pos = -1; | |
891 | iter->flags = FTRACE_ITER_FILTER; | |
892 | ||
893 | ret = seq_open(file, &show_ftrace_seq_ops); | |
894 | if (!ret) { | |
895 | struct seq_file *m = file->private_data; | |
896 | m->private = iter; | |
897 | } else | |
898 | kfree(iter); | |
899 | } else | |
900 | file->private_data = iter; | |
901 | mutex_unlock(&ftrace_filter_lock); | |
902 | ||
903 | return ret; | |
904 | } | |
905 | ||
906 | static ssize_t notrace | |
907 | ftrace_filter_read(struct file *file, char __user *ubuf, | |
908 | size_t cnt, loff_t *ppos) | |
909 | { | |
910 | if (file->f_mode & FMODE_READ) | |
911 | return seq_read(file, ubuf, cnt, ppos); | |
912 | else | |
913 | return -EPERM; | |
914 | } | |
915 | ||
916 | static loff_t notrace | |
917 | ftrace_filter_lseek(struct file *file, loff_t offset, int origin) | |
918 | { | |
919 | loff_t ret; | |
920 | ||
921 | if (file->f_mode & FMODE_READ) | |
922 | ret = seq_lseek(file, offset, origin); | |
923 | else | |
924 | file->f_pos = ret = 1; | |
925 | ||
926 | return ret; | |
927 | } | |
928 | ||
929 | enum { | |
930 | MATCH_FULL, | |
931 | MATCH_FRONT_ONLY, | |
932 | MATCH_MIDDLE_ONLY, | |
933 | MATCH_END_ONLY, | |
934 | }; | |
935 | ||
936 | static void notrace | |
937 | ftrace_match(unsigned char *buff, int len) | |
938 | { | |
939 | char str[KSYM_SYMBOL_LEN]; | |
940 | char *search = NULL; | |
941 | struct ftrace_page *pg; | |
942 | struct dyn_ftrace *rec; | |
943 | int type = MATCH_FULL; | |
944 | unsigned i, match = 0, search_len = 0; | |
945 | ||
946 | for (i = 0; i < len; i++) { | |
947 | if (buff[i] == '*') { | |
948 | if (!i) { | |
949 | search = buff + i + 1; | |
950 | type = MATCH_END_ONLY; | |
951 | search_len = len - (i + 1); | |
952 | } else { | |
953 | if (type == MATCH_END_ONLY) { | |
954 | type = MATCH_MIDDLE_ONLY; | |
955 | } else { | |
956 | match = i; | |
957 | type = MATCH_FRONT_ONLY; | |
958 | } | |
959 | buff[i] = 0; | |
960 | break; | |
961 | } | |
962 | } | |
963 | } | |
964 | ||
965 | /* keep kstop machine from running */ | |
966 | preempt_disable(); | |
967 | ftrace_filtered = 1; | |
968 | pg = ftrace_pages_start; | |
969 | while (pg) { | |
970 | for (i = 0; i < pg->index; i++) { | |
971 | int matched = 0; | |
972 | char *ptr; | |
973 | ||
974 | rec = &pg->records[i]; | |
975 | if (rec->flags & FTRACE_FL_FAILED) | |
976 | continue; | |
977 | kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); | |
978 | switch (type) { | |
979 | case MATCH_FULL: | |
980 | if (strcmp(str, buff) == 0) | |
981 | matched = 1; | |
982 | break; | |
983 | case MATCH_FRONT_ONLY: | |
984 | if (memcmp(str, buff, match) == 0) | |
985 | matched = 1; | |
986 | break; | |
987 | case MATCH_MIDDLE_ONLY: | |
988 | if (strstr(str, search)) | |
989 | matched = 1; | |
990 | break; | |
991 | case MATCH_END_ONLY: | |
992 | ptr = strstr(str, search); | |
993 | if (ptr && (ptr[search_len] == 0)) | |
994 | matched = 1; | |
995 | break; | |
996 | } | |
997 | if (matched) | |
998 | rec->flags |= FTRACE_FL_FILTER; | |
999 | } | |
1000 | pg = pg->next; | |
1001 | } | |
1002 | preempt_enable(); | |
1003 | } | |
1004 | ||
1005 | static ssize_t notrace | |
1006 | ftrace_filter_write(struct file *file, const char __user *ubuf, | |
1007 | size_t cnt, loff_t *ppos) | |
1008 | { | |
1009 | struct ftrace_iterator *iter; | |
1010 | char ch; | |
1011 | size_t read = 0; | |
1012 | ssize_t ret; | |
1013 | ||
1014 | if (!cnt || cnt < 0) | |
1015 | return 0; | |
1016 | ||
1017 | mutex_lock(&ftrace_filter_lock); | |
1018 | ||
1019 | if (file->f_mode & FMODE_READ) { | |
1020 | struct seq_file *m = file->private_data; | |
1021 | iter = m->private; | |
1022 | } else | |
1023 | iter = file->private_data; | |
1024 | ||
1025 | if (!*ppos) { | |
1026 | iter->flags &= ~FTRACE_ITER_CONT; | |
1027 | iter->buffer_idx = 0; | |
1028 | } | |
1029 | ||
1030 | ret = get_user(ch, ubuf++); | |
1031 | if (ret) | |
1032 | goto out; | |
1033 | read++; | |
1034 | cnt--; | |
1035 | ||
1036 | if (!(iter->flags & ~FTRACE_ITER_CONT)) { | |
1037 | /* skip white space */ | |
1038 | while (cnt && isspace(ch)) { | |
1039 | ret = get_user(ch, ubuf++); | |
1040 | if (ret) | |
1041 | goto out; | |
1042 | read++; | |
1043 | cnt--; | |
1044 | } | |
1045 | ||
1046 | ||
1047 | if (isspace(ch)) { | |
1048 | file->f_pos += read; | |
1049 | ret = read; | |
1050 | goto out; | |
1051 | } | |
1052 | ||
1053 | iter->buffer_idx = 0; | |
1054 | } | |
1055 | ||
1056 | while (cnt && !isspace(ch)) { | |
1057 | if (iter->buffer_idx < FTRACE_BUFF_MAX) | |
1058 | iter->buffer[iter->buffer_idx++] = ch; | |
1059 | else { | |
1060 | ret = -EINVAL; | |
1061 | goto out; | |
1062 | } | |
1063 | ret = get_user(ch, ubuf++); | |
1064 | if (ret) | |
1065 | goto out; | |
1066 | read++; | |
1067 | cnt--; | |
1068 | } | |
1069 | ||
1070 | if (isspace(ch)) { | |
1071 | iter->filtered++; | |
1072 | iter->buffer[iter->buffer_idx] = 0; | |
1073 | ftrace_match(iter->buffer, iter->buffer_idx); | |
1074 | iter->buffer_idx = 0; | |
1075 | } else | |
1076 | iter->flags |= FTRACE_ITER_CONT; | |
1077 | ||
1078 | ||
1079 | file->f_pos += read; | |
1080 | ||
1081 | ret = read; | |
1082 | out: | |
1083 | mutex_unlock(&ftrace_filter_lock); | |
1084 | ||
1085 | return ret; | |
1086 | } | |
1087 | ||
77a2b37d SR |
1088 | /** |
1089 | * ftrace_set_filter - set a function to filter on in ftrace | |
1090 | * @buf - the string that holds the function filter text. | |
1091 | * @len - the length of the string. | |
1092 | * @reset - non zero to reset all filters before applying this filter. | |
1093 | * | |
1094 | * Filters denote which functions should be enabled when tracing is enabled. | |
1095 | * If @buf is NULL and reset is set, all functions will be enabled for tracing. | |
1096 | */ | |
1097 | notrace void ftrace_set_filter(unsigned char *buf, int len, int reset) | |
1098 | { | |
4eebcc81 SR |
1099 | if (unlikely(ftrace_disabled)) |
1100 | return; | |
1101 | ||
77a2b37d SR |
1102 | mutex_lock(&ftrace_filter_lock); |
1103 | if (reset) | |
1104 | ftrace_filter_reset(); | |
1105 | if (buf) | |
1106 | ftrace_match(buf, len); | |
1107 | mutex_unlock(&ftrace_filter_lock); | |
1108 | } | |
1109 | ||
5072c59f SR |
1110 | static int notrace |
1111 | ftrace_filter_release(struct inode *inode, struct file *file) | |
1112 | { | |
1113 | struct seq_file *m = (struct seq_file *)file->private_data; | |
1114 | struct ftrace_iterator *iter; | |
1115 | ||
1116 | mutex_lock(&ftrace_filter_lock); | |
1117 | if (file->f_mode & FMODE_READ) { | |
1118 | iter = m->private; | |
1119 | ||
1120 | seq_release(inode, file); | |
1121 | } else | |
1122 | iter = file->private_data; | |
1123 | ||
1124 | if (iter->buffer_idx) { | |
1125 | iter->filtered++; | |
1126 | iter->buffer[iter->buffer_idx] = 0; | |
1127 | ftrace_match(iter->buffer, iter->buffer_idx); | |
1128 | } | |
1129 | ||
1130 | mutex_lock(&ftrace_sysctl_lock); | |
1131 | mutex_lock(&ftraced_lock); | |
1132 | if (iter->filtered && ftraced_suspend && ftrace_enabled) | |
1133 | ftrace_run_update_code(FTRACE_ENABLE_CALLS); | |
1134 | mutex_unlock(&ftraced_lock); | |
1135 | mutex_unlock(&ftrace_sysctl_lock); | |
1136 | ||
1137 | kfree(iter); | |
1138 | mutex_unlock(&ftrace_filter_lock); | |
1139 | return 0; | |
1140 | } | |
1141 | ||
1142 | static struct file_operations ftrace_avail_fops = { | |
1143 | .open = ftrace_avail_open, | |
1144 | .read = seq_read, | |
1145 | .llseek = seq_lseek, | |
1146 | .release = ftrace_avail_release, | |
1147 | }; | |
1148 | ||
1149 | static struct file_operations ftrace_filter_fops = { | |
1150 | .open = ftrace_filter_open, | |
1151 | .read = ftrace_filter_read, | |
1152 | .write = ftrace_filter_write, | |
1153 | .llseek = ftrace_filter_lseek, | |
1154 | .release = ftrace_filter_release, | |
1155 | }; | |
1156 | ||
e1c08bdd SR |
1157 | /** |
1158 | * ftrace_force_update - force an update to all recording ftrace functions | |
1159 | * | |
1160 | * The ftrace dynamic update daemon only wakes up once a second. | |
1161 | * There may be cases where an update needs to be done immediately | |
1162 | * for tests or internal kernel tracing to begin. This function | |
1163 | * wakes the daemon to do an update and will not return until the | |
1164 | * update is complete. | |
1165 | */ | |
1166 | int ftrace_force_update(void) | |
1167 | { | |
1168 | unsigned long last_counter; | |
1169 | DECLARE_WAITQUEUE(wait, current); | |
1170 | int ret = 0; | |
1171 | ||
4eebcc81 | 1172 | if (unlikely(ftrace_disabled)) |
e1c08bdd SR |
1173 | return -ENODEV; |
1174 | ||
1175 | mutex_lock(&ftraced_lock); | |
1176 | last_counter = ftraced_iteration_counter; | |
1177 | ||
1178 | set_current_state(TASK_INTERRUPTIBLE); | |
1179 | add_wait_queue(&ftraced_waiters, &wait); | |
1180 | ||
4eebcc81 SR |
1181 | if (unlikely(!ftraced_task)) { |
1182 | ret = -ENODEV; | |
1183 | goto out; | |
1184 | } | |
1185 | ||
e1c08bdd SR |
1186 | do { |
1187 | mutex_unlock(&ftraced_lock); | |
1188 | wake_up_process(ftraced_task); | |
1189 | schedule(); | |
1190 | mutex_lock(&ftraced_lock); | |
1191 | if (signal_pending(current)) { | |
1192 | ret = -EINTR; | |
1193 | break; | |
1194 | } | |
1195 | set_current_state(TASK_INTERRUPTIBLE); | |
1196 | } while (last_counter == ftraced_iteration_counter); | |
1197 | ||
4eebcc81 | 1198 | out: |
e1c08bdd SR |
1199 | mutex_unlock(&ftraced_lock); |
1200 | remove_wait_queue(&ftraced_waiters, &wait); | |
1201 | set_current_state(TASK_RUNNING); | |
1202 | ||
1203 | return ret; | |
1204 | } | |
1205 | ||
4eebcc81 SR |
1206 | static void ftrace_force_shutdown(void) |
1207 | { | |
1208 | struct task_struct *task; | |
1209 | int command = FTRACE_DISABLE_CALLS | FTRACE_UPDATE_TRACE_FUNC; | |
1210 | ||
1211 | mutex_lock(&ftraced_lock); | |
1212 | task = ftraced_task; | |
1213 | ftraced_task = NULL; | |
1214 | ftraced_suspend = -1; | |
1215 | ftrace_run_update_code(command); | |
1216 | mutex_unlock(&ftraced_lock); | |
1217 | ||
1218 | if (task) | |
1219 | kthread_stop(task); | |
1220 | } | |
1221 | ||
5072c59f SR |
1222 | static __init int ftrace_init_debugfs(void) |
1223 | { | |
1224 | struct dentry *d_tracer; | |
1225 | struct dentry *entry; | |
1226 | ||
1227 | d_tracer = tracing_init_dentry(); | |
1228 | ||
1229 | entry = debugfs_create_file("available_filter_functions", 0444, | |
1230 | d_tracer, NULL, &ftrace_avail_fops); | |
1231 | if (!entry) | |
1232 | pr_warning("Could not create debugfs " | |
1233 | "'available_filter_functions' entry\n"); | |
1234 | ||
1235 | entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer, | |
1236 | NULL, &ftrace_filter_fops); | |
1237 | if (!entry) | |
1238 | pr_warning("Could not create debugfs " | |
1239 | "'set_ftrace_filter' entry\n"); | |
1240 | return 0; | |
1241 | } | |
1242 | ||
1243 | fs_initcall(ftrace_init_debugfs); | |
1244 | ||
d61f82d0 | 1245 | static int __init notrace ftrace_dynamic_init(void) |
3d083395 SR |
1246 | { |
1247 | struct task_struct *p; | |
d61f82d0 | 1248 | unsigned long addr; |
3d083395 SR |
1249 | int ret; |
1250 | ||
d61f82d0 | 1251 | addr = (unsigned long)ftrace_record_ip; |
9ff9cdb2 | 1252 | |
d61f82d0 SR |
1253 | stop_machine_run(ftrace_dyn_arch_init, &addr, NR_CPUS); |
1254 | ||
1255 | /* ftrace_dyn_arch_init places the return code in addr */ | |
4eebcc81 SR |
1256 | if (addr) { |
1257 | ret = (int)addr; | |
1258 | goto failed; | |
1259 | } | |
d61f82d0 | 1260 | |
3c1720f0 | 1261 | ret = ftrace_dyn_table_alloc(); |
3d083395 | 1262 | if (ret) |
4eebcc81 | 1263 | goto failed; |
3d083395 SR |
1264 | |
1265 | p = kthread_run(ftraced, NULL, "ftraced"); | |
4eebcc81 SR |
1266 | if (IS_ERR(p)) { |
1267 | ret = -1; | |
1268 | goto failed; | |
1269 | } | |
3d083395 | 1270 | |
d61f82d0 | 1271 | last_ftrace_enabled = ftrace_enabled = 1; |
e1c08bdd | 1272 | ftraced_task = p; |
3d083395 SR |
1273 | |
1274 | return 0; | |
4eebcc81 SR |
1275 | |
1276 | failed: | |
1277 | ftrace_disabled = 1; | |
1278 | return ret; | |
3d083395 SR |
1279 | } |
1280 | ||
d61f82d0 | 1281 | core_initcall(ftrace_dynamic_init); |
3d083395 | 1282 | #else |
c7aafc54 IM |
1283 | # define ftrace_startup() do { } while (0) |
1284 | # define ftrace_shutdown() do { } while (0) | |
1285 | # define ftrace_startup_sysctl() do { } while (0) | |
1286 | # define ftrace_shutdown_sysctl() do { } while (0) | |
4eebcc81 | 1287 | # define ftrace_force_shutdown() do { } while (0) |
3d083395 SR |
1288 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
1289 | ||
4eebcc81 SR |
1290 | /** |
1291 | * ftrace_kill - totally shutdown ftrace | |
1292 | * | |
1293 | * This is a safety measure. If something was detected that seems | |
1294 | * wrong, calling this function will keep ftrace from doing | |
1295 | * any more modifications, and updates. | |
1296 | * used when something went wrong. | |
1297 | */ | |
1298 | void ftrace_kill(void) | |
1299 | { | |
1300 | mutex_lock(&ftrace_sysctl_lock); | |
1301 | ftrace_disabled = 1; | |
1302 | ftrace_enabled = 0; | |
1303 | ||
1304 | clear_ftrace_function(); | |
1305 | mutex_unlock(&ftrace_sysctl_lock); | |
1306 | ||
1307 | /* Try to totally disable ftrace */ | |
1308 | ftrace_force_shutdown(); | |
1309 | } | |
1310 | ||
16444a8a | 1311 | /** |
3d083395 SR |
1312 | * register_ftrace_function - register a function for profiling |
1313 | * @ops - ops structure that holds the function for profiling. | |
16444a8a | 1314 | * |
3d083395 SR |
1315 | * Register a function to be called by all functions in the |
1316 | * kernel. | |
1317 | * | |
1318 | * Note: @ops->func and all the functions it calls must be labeled | |
1319 | * with "notrace", otherwise it will go into a | |
1320 | * recursive loop. | |
16444a8a | 1321 | */ |
3d083395 | 1322 | int register_ftrace_function(struct ftrace_ops *ops) |
16444a8a | 1323 | { |
b0fc494f SR |
1324 | int ret; |
1325 | ||
4eebcc81 SR |
1326 | if (unlikely(ftrace_disabled)) |
1327 | return -1; | |
1328 | ||
b0fc494f | 1329 | mutex_lock(&ftrace_sysctl_lock); |
b0fc494f | 1330 | ret = __register_ftrace_function(ops); |
d61f82d0 | 1331 | ftrace_startup(); |
b0fc494f SR |
1332 | mutex_unlock(&ftrace_sysctl_lock); |
1333 | ||
1334 | return ret; | |
3d083395 SR |
1335 | } |
1336 | ||
1337 | /** | |
1338 | * unregister_ftrace_function - unresgister a function for profiling. | |
1339 | * @ops - ops structure that holds the function to unregister | |
1340 | * | |
1341 | * Unregister a function that was added to be called by ftrace profiling. | |
1342 | */ | |
1343 | int unregister_ftrace_function(struct ftrace_ops *ops) | |
1344 | { | |
1345 | int ret; | |
1346 | ||
b0fc494f | 1347 | mutex_lock(&ftrace_sysctl_lock); |
3d083395 | 1348 | ret = __unregister_ftrace_function(ops); |
d61f82d0 | 1349 | ftrace_shutdown(); |
b0fc494f SR |
1350 | mutex_unlock(&ftrace_sysctl_lock); |
1351 | ||
1352 | return ret; | |
1353 | } | |
1354 | ||
1355 | notrace int | |
1356 | ftrace_enable_sysctl(struct ctl_table *table, int write, | |
5072c59f | 1357 | struct file *file, void __user *buffer, size_t *lenp, |
b0fc494f SR |
1358 | loff_t *ppos) |
1359 | { | |
1360 | int ret; | |
1361 | ||
4eebcc81 SR |
1362 | if (unlikely(ftrace_disabled)) |
1363 | return -ENODEV; | |
1364 | ||
b0fc494f SR |
1365 | mutex_lock(&ftrace_sysctl_lock); |
1366 | ||
5072c59f | 1367 | ret = proc_dointvec(table, write, file, buffer, lenp, ppos); |
b0fc494f SR |
1368 | |
1369 | if (ret || !write || (last_ftrace_enabled == ftrace_enabled)) | |
1370 | goto out; | |
1371 | ||
1372 | last_ftrace_enabled = ftrace_enabled; | |
1373 | ||
1374 | if (ftrace_enabled) { | |
1375 | ||
1376 | ftrace_startup_sysctl(); | |
1377 | ||
1378 | /* we are starting ftrace again */ | |
1379 | if (ftrace_list != &ftrace_list_end) { | |
1380 | if (ftrace_list->next == &ftrace_list_end) | |
1381 | ftrace_trace_function = ftrace_list->func; | |
1382 | else | |
1383 | ftrace_trace_function = ftrace_list_func; | |
1384 | } | |
1385 | ||
1386 | } else { | |
1387 | /* stopping ftrace calls (just send to ftrace_stub) */ | |
1388 | ftrace_trace_function = ftrace_stub; | |
1389 | ||
1390 | ftrace_shutdown_sysctl(); | |
1391 | } | |
1392 | ||
1393 | out: | |
1394 | mutex_unlock(&ftrace_sysctl_lock); | |
3d083395 | 1395 | return ret; |
16444a8a | 1396 | } |