tracepoint: Simplify tracepoint module search
[linux-2.6-block.git] / kernel / tracepoint.c
CommitLineData
97e1c18e 1/*
de7b2973 2 * Copyright (C) 2008-2014 Mathieu Desnoyers
97e1c18e
MD
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 */
18#include <linux/module.h>
19#include <linux/mutex.h>
20#include <linux/types.h>
21#include <linux/jhash.h>
22#include <linux/list.h>
23#include <linux/rcupdate.h>
24#include <linux/tracepoint.h>
25#include <linux/err.h>
26#include <linux/slab.h>
a871bd33 27#include <linux/sched.h>
c5905afb 28#include <linux/static_key.h>
97e1c18e 29
65498646
MD
30extern struct tracepoint * const __start___tracepoints_ptrs[];
31extern struct tracepoint * const __stop___tracepoints_ptrs[];
97e1c18e
MD
32
33/* Set to 1 to enable tracepoint debug output */
34static const int tracepoint_debug;
35
de7b2973 36#ifdef CONFIG_MODULES
97e1c18e 37/*
de7b2973 38 * Tracepoint module list mutex protects the local module list.
97e1c18e 39 */
de7b2973 40static DEFINE_MUTEX(tracepoint_module_list_mutex);
97e1c18e 41
de7b2973 42/* Local list of struct tp_module */
b75ef8b4
MD
43static LIST_HEAD(tracepoint_module_list);
44#endif /* CONFIG_MODULES */
45
97e1c18e 46/*
de7b2973
MD
47 * tracepoints_mutex protects the builtin and module tracepoints.
48 * tracepoints_mutex nests inside tracepoint_module_list_mutex.
97e1c18e 49 */
de7b2973 50static DEFINE_MUTEX(tracepoints_mutex);
97e1c18e
MD
51
52/*
53 * Note about RCU :
fd589a8f 54 * It is used to delay the free of multiple probes array until a quiescent
97e1c18e 55 * state is reached.
97e1c18e 56 */
19dba33c 57struct tp_probes {
0dea6d52 58 struct rcu_head rcu;
38516ab5 59 struct tracepoint_func probes[0];
19dba33c 60};
97e1c18e 61
19dba33c 62static inline void *allocate_probes(int count)
97e1c18e 63{
38516ab5 64 struct tp_probes *p = kmalloc(count * sizeof(struct tracepoint_func)
19dba33c
LJ
65 + sizeof(struct tp_probes), GFP_KERNEL);
66 return p == NULL ? NULL : p->probes;
97e1c18e
MD
67}
68
19dba33c 69static void rcu_free_old_probes(struct rcu_head *head)
97e1c18e 70{
0dea6d52 71 kfree(container_of(head, struct tp_probes, rcu));
19dba33c
LJ
72}
73
38516ab5 74static inline void release_probes(struct tracepoint_func *old)
19dba33c
LJ
75{
76 if (old) {
77 struct tp_probes *tp_probes = container_of(old,
78 struct tp_probes, probes[0]);
0dea6d52 79 call_rcu_sched(&tp_probes->rcu, rcu_free_old_probes);
19dba33c 80 }
97e1c18e
MD
81}
82
de7b2973 83static void debug_print_probes(struct tracepoint_func *funcs)
97e1c18e
MD
84{
85 int i;
86
de7b2973 87 if (!tracepoint_debug || !funcs)
97e1c18e
MD
88 return;
89
de7b2973
MD
90 for (i = 0; funcs[i].func; i++)
91 printk(KERN_DEBUG "Probe %d : %p\n", i, funcs[i].func);
97e1c18e
MD
92}
93
de7b2973
MD
94static struct tracepoint_func *func_add(struct tracepoint_func **funcs,
95 struct tracepoint_func *tp_func)
97e1c18e
MD
96{
97 int nr_probes = 0;
38516ab5 98 struct tracepoint_func *old, *new;
97e1c18e 99
de7b2973 100 if (WARN_ON(!tp_func->func))
4c69e6ea 101 return ERR_PTR(-EINVAL);
97e1c18e 102
de7b2973
MD
103 debug_print_probes(*funcs);
104 old = *funcs;
97e1c18e
MD
105 if (old) {
106 /* (N -> N+1), (N != 0, 1) probes */
38516ab5 107 for (nr_probes = 0; old[nr_probes].func; nr_probes++)
de7b2973
MD
108 if (old[nr_probes].func == tp_func->func &&
109 old[nr_probes].data == tp_func->data)
97e1c18e
MD
110 return ERR_PTR(-EEXIST);
111 }
112 /* + 2 : one for new probe, one for NULL func */
19dba33c 113 new = allocate_probes(nr_probes + 2);
97e1c18e
MD
114 if (new == NULL)
115 return ERR_PTR(-ENOMEM);
116 if (old)
38516ab5 117 memcpy(new, old, nr_probes * sizeof(struct tracepoint_func));
de7b2973 118 new[nr_probes] = *tp_func;
38516ab5 119 new[nr_probes + 1].func = NULL;
de7b2973
MD
120 *funcs = new;
121 debug_print_probes(*funcs);
97e1c18e
MD
122 return old;
123}
124
de7b2973
MD
125static void *func_remove(struct tracepoint_func **funcs,
126 struct tracepoint_func *tp_func)
97e1c18e
MD
127{
128 int nr_probes = 0, nr_del = 0, i;
38516ab5 129 struct tracepoint_func *old, *new;
97e1c18e 130
de7b2973 131 old = *funcs;
97e1c18e 132
f66af459 133 if (!old)
19dba33c 134 return ERR_PTR(-ENOENT);
f66af459 135
de7b2973 136 debug_print_probes(*funcs);
97e1c18e 137 /* (N -> M), (N > 1, M >= 0) probes */
de7b2973 138 if (tp_func->func) {
4c69e6ea 139 for (nr_probes = 0; old[nr_probes].func; nr_probes++) {
de7b2973
MD
140 if (old[nr_probes].func == tp_func->func &&
141 old[nr_probes].data == tp_func->data)
4c69e6ea
S
142 nr_del++;
143 }
97e1c18e
MD
144 }
145
4c69e6ea
S
146 /*
147 * If probe is NULL, then nr_probes = nr_del = 0, and then the
148 * entire entry will be removed.
149 */
97e1c18e
MD
150 if (nr_probes - nr_del == 0) {
151 /* N -> 0, (N > 1) */
de7b2973
MD
152 *funcs = NULL;
153 debug_print_probes(*funcs);
97e1c18e
MD
154 return old;
155 } else {
156 int j = 0;
157 /* N -> M, (N > 1, M > 0) */
158 /* + 1 for NULL */
19dba33c 159 new = allocate_probes(nr_probes - nr_del + 1);
97e1c18e
MD
160 if (new == NULL)
161 return ERR_PTR(-ENOMEM);
38516ab5 162 for (i = 0; old[i].func; i++)
de7b2973
MD
163 if (old[i].func != tp_func->func
164 || old[i].data != tp_func->data)
97e1c18e 165 new[j++] = old[i];
38516ab5 166 new[nr_probes - nr_del].func = NULL;
de7b2973 167 *funcs = new;
97e1c18e 168 }
de7b2973 169 debug_print_probes(*funcs);
97e1c18e
MD
170 return old;
171}
172
173/*
de7b2973 174 * Add the probe function to a tracepoint.
97e1c18e 175 */
de7b2973
MD
176static int tracepoint_add_func(struct tracepoint *tp,
177 struct tracepoint_func *func)
97e1c18e 178{
de7b2973 179 struct tracepoint_func *old, *tp_funcs;
97e1c18e 180
de7b2973
MD
181 if (tp->regfunc && !static_key_enabled(&tp->key))
182 tp->regfunc();
97e1c18e 183
de7b2973
MD
184 tp_funcs = tp->funcs;
185 old = func_add(&tp_funcs, func);
186 if (IS_ERR(old)) {
187 WARN_ON_ONCE(1);
188 return PTR_ERR(old);
189 }
190 release_probes(old);
97419875 191
97e1c18e
MD
192 /*
193 * rcu_assign_pointer has a smp_wmb() which makes sure that the new
194 * probe callbacks array is consistent before setting a pointer to it.
195 * This array is referenced by __DO_TRACE from
196 * include/linux/tracepoints.h. A matching smp_read_barrier_depends()
197 * is used.
198 */
de7b2973
MD
199 rcu_assign_pointer(tp->funcs, tp_funcs);
200 if (!static_key_enabled(&tp->key))
201 static_key_slow_inc(&tp->key);
202 return 0;
97e1c18e
MD
203}
204
205/*
de7b2973 206 * Remove a probe function from a tracepoint.
97e1c18e
MD
207 * Note: only waiting an RCU period after setting elem->call to the empty
208 * function insures that the original callback is not used anymore. This insured
209 * by preempt_disable around the call site.
210 */
de7b2973
MD
211static int tracepoint_remove_func(struct tracepoint *tp,
212 struct tracepoint_func *func)
97e1c18e 213{
de7b2973 214 struct tracepoint_func *old, *tp_funcs;
97e1c18e 215
de7b2973
MD
216 tp_funcs = tp->funcs;
217 old = func_remove(&tp_funcs, func);
218 if (IS_ERR(old)) {
219 WARN_ON_ONCE(1);
220 return PTR_ERR(old);
97e1c18e 221 }
de7b2973 222 release_probes(old);
b75ef8b4 223
de7b2973
MD
224 if (!tp_funcs) {
225 /* Removed last function */
226 if (tp->unregfunc && static_key_enabled(&tp->key))
227 tp->unregfunc();
b75ef8b4 228
de7b2973
MD
229 if (static_key_enabled(&tp->key))
230 static_key_slow_dec(&tp->key);
127cafbb 231 }
de7b2973
MD
232 rcu_assign_pointer(tp->funcs, tp_funcs);
233 return 0;
127cafbb
LJ
234}
235
97e1c18e
MD
236/**
237 * tracepoint_probe_register - Connect a probe to a tracepoint
de7b2973 238 * @tp: tracepoint
97e1c18e
MD
239 * @probe: probe handler
240 *
de7b2973
MD
241 * Returns 0 if ok, error value on error.
242 * Note: if @tp is within a module, the caller is responsible for
243 * unregistering the probe before the module is gone. This can be
244 * performed either with a tracepoint module going notifier, or from
245 * within module exit functions.
97e1c18e 246 */
de7b2973 247int tracepoint_probe_register(struct tracepoint *tp, void *probe, void *data)
97e1c18e 248{
de7b2973
MD
249 struct tracepoint_func tp_func;
250 int ret;
97e1c18e
MD
251
252 mutex_lock(&tracepoints_mutex);
de7b2973
MD
253 tp_func.func = probe;
254 tp_func.data = data;
255 ret = tracepoint_add_func(tp, &tp_func);
b75ef8b4 256 mutex_unlock(&tracepoints_mutex);
b196e2b9 257 return ret;
97e1c18e
MD
258}
259EXPORT_SYMBOL_GPL(tracepoint_probe_register);
260
261/**
262 * tracepoint_probe_unregister - Disconnect a probe from a tracepoint
de7b2973 263 * @tp: tracepoint
97e1c18e
MD
264 * @probe: probe function pointer
265 *
de7b2973 266 * Returns 0 if ok, error value on error.
97e1c18e 267 */
de7b2973 268int tracepoint_probe_unregister(struct tracepoint *tp, void *probe, void *data)
97e1c18e 269{
de7b2973
MD
270 struct tracepoint_func tp_func;
271 int ret;
97e1c18e
MD
272
273 mutex_lock(&tracepoints_mutex);
de7b2973
MD
274 tp_func.func = probe;
275 tp_func.data = data;
276 ret = tracepoint_remove_func(tp, &tp_func);
b75ef8b4 277 mutex_unlock(&tracepoints_mutex);
de7b2973 278 return ret;
97e1c18e
MD
279}
280EXPORT_SYMBOL_GPL(tracepoint_probe_unregister);
281
227a8375 282#ifdef CONFIG_MODULES
45ab2813
SRRH
283bool trace_module_has_bad_taint(struct module *mod)
284{
285 return mod->taints & ~((1 << TAINT_OOT_MODULE) | (1 << TAINT_CRAP));
286}
287
de7b2973
MD
288static BLOCKING_NOTIFIER_HEAD(tracepoint_notify_list);
289
290/**
291 * register_tracepoint_notifier - register tracepoint coming/going notifier
292 * @nb: notifier block
293 *
294 * Notifiers registered with this function are called on module
295 * coming/going with the tracepoint_module_list_mutex held.
296 * The notifier block callback should expect a "struct tp_module" data
297 * pointer.
298 */
299int register_tracepoint_module_notifier(struct notifier_block *nb)
300{
301 struct tp_module *tp_mod;
302 int ret;
303
304 mutex_lock(&tracepoint_module_list_mutex);
305 ret = blocking_notifier_chain_register(&tracepoint_notify_list, nb);
306 if (ret)
307 goto end;
308 list_for_each_entry(tp_mod, &tracepoint_module_list, list)
309 (void) nb->notifier_call(nb, MODULE_STATE_COMING, tp_mod);
310end:
311 mutex_unlock(&tracepoint_module_list_mutex);
312 return ret;
313}
314EXPORT_SYMBOL_GPL(register_tracepoint_module_notifier);
315
316/**
317 * unregister_tracepoint_notifier - unregister tracepoint coming/going notifier
318 * @nb: notifier block
319 *
320 * The notifier block callback should expect a "struct tp_module" data
321 * pointer.
322 */
323int unregister_tracepoint_module_notifier(struct notifier_block *nb)
324{
325 struct tp_module *tp_mod;
326 int ret;
327
328 mutex_lock(&tracepoint_module_list_mutex);
329 ret = blocking_notifier_chain_unregister(&tracepoint_notify_list, nb);
330 if (ret)
331 goto end;
332 list_for_each_entry(tp_mod, &tracepoint_module_list, list)
333 (void) nb->notifier_call(nb, MODULE_STATE_GOING, tp_mod);
334end:
335 mutex_unlock(&tracepoint_module_list_mutex);
336 return ret;
337
338}
339EXPORT_SYMBOL_GPL(unregister_tracepoint_module_notifier);
340
341/*
342 * Ensure the tracer unregistered the module's probes before the module
343 * teardown is performed. Prevents leaks of probe and data pointers.
344 */
345static void tp_module_going_check_quiescent(struct tracepoint * const *begin,
346 struct tracepoint * const *end)
347{
348 struct tracepoint * const *iter;
349
350 if (!begin)
351 return;
352 for (iter = begin; iter < end; iter++)
353 WARN_ON_ONCE((*iter)->funcs);
354}
355
b75ef8b4
MD
356static int tracepoint_module_coming(struct module *mod)
357{
0dea6d52 358 struct tp_module *tp_mod;
b75ef8b4
MD
359 int ret = 0;
360
7dec935a
SRRH
361 if (!mod->num_tracepoints)
362 return 0;
363
b75ef8b4 364 /*
c10076c4
SR
365 * We skip modules that taint the kernel, especially those with different
366 * module headers (for forced load), to make sure we don't cause a crash.
367 * Staging and out-of-tree GPL modules are fine.
b75ef8b4 368 */
45ab2813 369 if (trace_module_has_bad_taint(mod))
b75ef8b4 370 return 0;
de7b2973 371 mutex_lock(&tracepoint_module_list_mutex);
b75ef8b4
MD
372 tp_mod = kmalloc(sizeof(struct tp_module), GFP_KERNEL);
373 if (!tp_mod) {
374 ret = -ENOMEM;
375 goto end;
376 }
eb7d035c 377 tp_mod->mod = mod;
0dea6d52 378 list_add_tail(&tp_mod->list, &tracepoint_module_list);
de7b2973
MD
379 blocking_notifier_call_chain(&tracepoint_notify_list,
380 MODULE_STATE_COMING, tp_mod);
b75ef8b4 381end:
de7b2973 382 mutex_unlock(&tracepoint_module_list_mutex);
b75ef8b4
MD
383 return ret;
384}
385
de7b2973 386static void tracepoint_module_going(struct module *mod)
b75ef8b4 387{
de7b2973 388 struct tp_module *tp_mod;
b75ef8b4 389
7dec935a 390 if (!mod->num_tracepoints)
de7b2973 391 return;
7dec935a 392
de7b2973
MD
393 mutex_lock(&tracepoint_module_list_mutex);
394 list_for_each_entry(tp_mod, &tracepoint_module_list, list) {
eb7d035c 395 if (tp_mod->mod == mod) {
de7b2973
MD
396 blocking_notifier_call_chain(&tracepoint_notify_list,
397 MODULE_STATE_GOING, tp_mod);
398 list_del(&tp_mod->list);
399 kfree(tp_mod);
400 /*
401 * Called the going notifier before checking for
402 * quiescence.
403 */
404 tp_module_going_check_quiescent(mod->tracepoints_ptrs,
405 mod->tracepoints_ptrs + mod->num_tracepoints);
b75ef8b4
MD
406 break;
407 }
408 }
409 /*
410 * In the case of modules that were tainted at "coming", we'll simply
411 * walk through the list without finding it. We cannot use the "tainted"
412 * flag on "going", in case a module taints the kernel only after being
413 * loaded.
414 */
de7b2973 415 mutex_unlock(&tracepoint_module_list_mutex);
b75ef8b4 416}
227a8375 417
de7b2973
MD
418static int tracepoint_module_notify(struct notifier_block *self,
419 unsigned long val, void *data)
32f85742
MD
420{
421 struct module *mod = data;
b75ef8b4 422 int ret = 0;
32f85742
MD
423
424 switch (val) {
425 case MODULE_STATE_COMING:
b75ef8b4
MD
426 ret = tracepoint_module_coming(mod);
427 break;
428 case MODULE_STATE_LIVE:
429 break;
32f85742 430 case MODULE_STATE_GOING:
de7b2973
MD
431 tracepoint_module_going(mod);
432 break;
433 case MODULE_STATE_UNFORMED:
32f85742
MD
434 break;
435 }
b75ef8b4 436 return ret;
32f85742
MD
437}
438
de7b2973 439static struct notifier_block tracepoint_module_nb = {
32f85742
MD
440 .notifier_call = tracepoint_module_notify,
441 .priority = 0,
442};
443
de7b2973 444static __init int init_tracepoints(void)
32f85742 445{
de7b2973
MD
446 int ret;
447
448 ret = register_module_notifier(&tracepoint_module_nb);
eb7d035c 449 if (ret)
de7b2973 450 pr_warning("Failed to register tracepoint module enter notifier\n");
eb7d035c 451
de7b2973 452 return ret;
32f85742
MD
453}
454__initcall(init_tracepoints);
227a8375 455#endif /* CONFIG_MODULES */
a871bd33 456
de7b2973
MD
457static void for_each_tracepoint_range(struct tracepoint * const *begin,
458 struct tracepoint * const *end,
459 void (*fct)(struct tracepoint *tp, void *priv),
460 void *priv)
461{
462 struct tracepoint * const *iter;
463
464 if (!begin)
465 return;
466 for (iter = begin; iter < end; iter++)
467 fct(*iter, priv);
468}
469
470/**
471 * for_each_kernel_tracepoint - iteration on all kernel tracepoints
472 * @fct: callback
473 * @priv: private data
474 */
475void for_each_kernel_tracepoint(void (*fct)(struct tracepoint *tp, void *priv),
476 void *priv)
477{
478 for_each_tracepoint_range(__start___tracepoints_ptrs,
479 __stop___tracepoints_ptrs, fct, priv);
480}
481EXPORT_SYMBOL_GPL(for_each_kernel_tracepoint);
482
3d27d8cb 483#ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
60d970c2 484
97419875 485/* NB: reg/unreg are called while guarded with the tracepoints_mutex */
a871bd33
JB
486static int sys_tracepoint_refcount;
487
488void syscall_regfunc(void)
489{
490 unsigned long flags;
491 struct task_struct *g, *t;
492
a871bd33
JB
493 if (!sys_tracepoint_refcount) {
494 read_lock_irqsave(&tasklist_lock, flags);
495 do_each_thread(g, t) {
cc3b13c1
HB
496 /* Skip kernel threads. */
497 if (t->mm)
498 set_tsk_thread_flag(t, TIF_SYSCALL_TRACEPOINT);
a871bd33
JB
499 } while_each_thread(g, t);
500 read_unlock_irqrestore(&tasklist_lock, flags);
501 }
502 sys_tracepoint_refcount++;
a871bd33
JB
503}
504
505void syscall_unregfunc(void)
506{
507 unsigned long flags;
508 struct task_struct *g, *t;
509
a871bd33
JB
510 sys_tracepoint_refcount--;
511 if (!sys_tracepoint_refcount) {
512 read_lock_irqsave(&tasklist_lock, flags);
513 do_each_thread(g, t) {
66700001 514 clear_tsk_thread_flag(t, TIF_SYSCALL_TRACEPOINT);
a871bd33
JB
515 } while_each_thread(g, t);
516 read_unlock_irqrestore(&tasklist_lock, flags);
517 }
a871bd33 518}
60d970c2 519#endif