Revert "tracing: Move event storage for array from macro to standalone function"
[linux-2.6-block.git] / kernel / tracepoint.c
CommitLineData
97e1c18e
MD
1/*
2 * Copyright (C) 2008 Mathieu Desnoyers
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 */
18#include <linux/module.h>
19#include <linux/mutex.h>
20#include <linux/types.h>
21#include <linux/jhash.h>
22#include <linux/list.h>
23#include <linux/rcupdate.h>
24#include <linux/tracepoint.h>
25#include <linux/err.h>
26#include <linux/slab.h>
a871bd33 27#include <linux/sched.h>
c5905afb 28#include <linux/static_key.h>
97e1c18e 29
65498646
MD
30extern struct tracepoint * const __start___tracepoints_ptrs[];
31extern struct tracepoint * const __stop___tracepoints_ptrs[];
97e1c18e
MD
32
33/* Set to 1 to enable tracepoint debug output */
34static const int tracepoint_debug;
35
36/*
b75ef8b4
MD
37 * Tracepoints mutex protects the builtin and module tracepoints and the hash
38 * table, as well as the local module list.
97e1c18e
MD
39 */
40static DEFINE_MUTEX(tracepoints_mutex);
41
b75ef8b4
MD
42#ifdef CONFIG_MODULES
43/* Local list of struct module */
44static LIST_HEAD(tracepoint_module_list);
45#endif /* CONFIG_MODULES */
46
97e1c18e
MD
47/*
48 * Tracepoint hash table, containing the active tracepoints.
49 * Protected by tracepoints_mutex.
50 */
51#define TRACEPOINT_HASH_BITS 6
52#define TRACEPOINT_TABLE_SIZE (1 << TRACEPOINT_HASH_BITS)
19dba33c 53static struct hlist_head tracepoint_table[TRACEPOINT_TABLE_SIZE];
97e1c18e
MD
54
55/*
56 * Note about RCU :
fd589a8f 57 * It is used to delay the free of multiple probes array until a quiescent
97e1c18e
MD
58 * state is reached.
59 * Tracepoint entries modifications are protected by the tracepoints_mutex.
60 */
61struct tracepoint_entry {
62 struct hlist_node hlist;
38516ab5 63 struct tracepoint_func *funcs;
97e1c18e 64 int refcount; /* Number of times armed. 0 if disarmed. */
b196e2b9 65 int enabled; /* Tracepoint enabled */
97e1c18e
MD
66 char name[0];
67};
68
19dba33c 69struct tp_probes {
127cafbb
LJ
70 union {
71 struct rcu_head rcu;
72 struct list_head list;
73 } u;
38516ab5 74 struct tracepoint_func probes[0];
19dba33c 75};
97e1c18e 76
19dba33c 77static inline void *allocate_probes(int count)
97e1c18e 78{
38516ab5 79 struct tp_probes *p = kmalloc(count * sizeof(struct tracepoint_func)
19dba33c
LJ
80 + sizeof(struct tp_probes), GFP_KERNEL);
81 return p == NULL ? NULL : p->probes;
97e1c18e
MD
82}
83
19dba33c 84static void rcu_free_old_probes(struct rcu_head *head)
97e1c18e 85{
127cafbb 86 kfree(container_of(head, struct tp_probes, u.rcu));
19dba33c
LJ
87}
88
38516ab5 89static inline void release_probes(struct tracepoint_func *old)
19dba33c
LJ
90{
91 if (old) {
92 struct tp_probes *tp_probes = container_of(old,
93 struct tp_probes, probes[0]);
127cafbb 94 call_rcu_sched(&tp_probes->u.rcu, rcu_free_old_probes);
19dba33c 95 }
97e1c18e
MD
96}
97
98static void debug_print_probes(struct tracepoint_entry *entry)
99{
100 int i;
101
19dba33c 102 if (!tracepoint_debug || !entry->funcs)
97e1c18e
MD
103 return;
104
38516ab5
SR
105 for (i = 0; entry->funcs[i].func; i++)
106 printk(KERN_DEBUG "Probe %d : %p\n", i, entry->funcs[i].func);
97e1c18e
MD
107}
108
38516ab5
SR
109static struct tracepoint_func *
110tracepoint_entry_add_probe(struct tracepoint_entry *entry,
111 void *probe, void *data)
97e1c18e
MD
112{
113 int nr_probes = 0;
38516ab5 114 struct tracepoint_func *old, *new;
97e1c18e 115
4c69e6ea
S
116 if (WARN_ON(!probe))
117 return ERR_PTR(-EINVAL);
97e1c18e
MD
118
119 debug_print_probes(entry);
120 old = entry->funcs;
121 if (old) {
122 /* (N -> N+1), (N != 0, 1) probes */
38516ab5
SR
123 for (nr_probes = 0; old[nr_probes].func; nr_probes++)
124 if (old[nr_probes].func == probe &&
125 old[nr_probes].data == data)
97e1c18e
MD
126 return ERR_PTR(-EEXIST);
127 }
128 /* + 2 : one for new probe, one for NULL func */
19dba33c 129 new = allocate_probes(nr_probes + 2);
97e1c18e
MD
130 if (new == NULL)
131 return ERR_PTR(-ENOMEM);
132 if (old)
38516ab5
SR
133 memcpy(new, old, nr_probes * sizeof(struct tracepoint_func));
134 new[nr_probes].func = probe;
135 new[nr_probes].data = data;
136 new[nr_probes + 1].func = NULL;
97e1c18e
MD
137 entry->refcount = nr_probes + 1;
138 entry->funcs = new;
139 debug_print_probes(entry);
140 return old;
141}
142
143static void *
38516ab5
SR
144tracepoint_entry_remove_probe(struct tracepoint_entry *entry,
145 void *probe, void *data)
97e1c18e
MD
146{
147 int nr_probes = 0, nr_del = 0, i;
38516ab5 148 struct tracepoint_func *old, *new;
97e1c18e
MD
149
150 old = entry->funcs;
151
f66af459 152 if (!old)
19dba33c 153 return ERR_PTR(-ENOENT);
f66af459 154
97e1c18e
MD
155 debug_print_probes(entry);
156 /* (N -> M), (N > 1, M >= 0) probes */
4c69e6ea
S
157 if (probe) {
158 for (nr_probes = 0; old[nr_probes].func; nr_probes++) {
159 if (old[nr_probes].func == probe &&
160 old[nr_probes].data == data)
161 nr_del++;
162 }
97e1c18e
MD
163 }
164
4c69e6ea
S
165 /*
166 * If probe is NULL, then nr_probes = nr_del = 0, and then the
167 * entire entry will be removed.
168 */
97e1c18e
MD
169 if (nr_probes - nr_del == 0) {
170 /* N -> 0, (N > 1) */
171 entry->funcs = NULL;
172 entry->refcount = 0;
173 debug_print_probes(entry);
174 return old;
175 } else {
176 int j = 0;
177 /* N -> M, (N > 1, M > 0) */
178 /* + 1 for NULL */
19dba33c 179 new = allocate_probes(nr_probes - nr_del + 1);
97e1c18e
MD
180 if (new == NULL)
181 return ERR_PTR(-ENOMEM);
38516ab5 182 for (i = 0; old[i].func; i++)
4c69e6ea 183 if (old[i].func != probe || old[i].data != data)
97e1c18e 184 new[j++] = old[i];
38516ab5 185 new[nr_probes - nr_del].func = NULL;
97e1c18e
MD
186 entry->refcount = nr_probes - nr_del;
187 entry->funcs = new;
188 }
189 debug_print_probes(entry);
190 return old;
191}
192
193/*
194 * Get tracepoint if the tracepoint is present in the tracepoint hash table.
195 * Must be called with tracepoints_mutex held.
196 * Returns NULL if not present.
197 */
198static struct tracepoint_entry *get_tracepoint(const char *name)
199{
200 struct hlist_head *head;
97e1c18e
MD
201 struct tracepoint_entry *e;
202 u32 hash = jhash(name, strlen(name), 0);
203
9795302a 204 head = &tracepoint_table[hash & (TRACEPOINT_TABLE_SIZE - 1)];
b67bfe0d 205 hlist_for_each_entry(e, head, hlist) {
97e1c18e
MD
206 if (!strcmp(name, e->name))
207 return e;
208 }
209 return NULL;
210}
211
212/*
213 * Add the tracepoint to the tracepoint hash table. Must be called with
214 * tracepoints_mutex held.
215 */
216static struct tracepoint_entry *add_tracepoint(const char *name)
217{
218 struct hlist_head *head;
97e1c18e
MD
219 struct tracepoint_entry *e;
220 size_t name_len = strlen(name) + 1;
221 u32 hash = jhash(name, name_len-1, 0);
222
9795302a 223 head = &tracepoint_table[hash & (TRACEPOINT_TABLE_SIZE - 1)];
b67bfe0d 224 hlist_for_each_entry(e, head, hlist) {
97e1c18e
MD
225 if (!strcmp(name, e->name)) {
226 printk(KERN_NOTICE
227 "tracepoint %s busy\n", name);
228 return ERR_PTR(-EEXIST); /* Already there */
229 }
230 }
231 /*
232 * Using kmalloc here to allocate a variable length element. Could
233 * cause some memory fragmentation if overused.
234 */
235 e = kmalloc(sizeof(struct tracepoint_entry) + name_len, GFP_KERNEL);
236 if (!e)
237 return ERR_PTR(-ENOMEM);
238 memcpy(&e->name[0], name, name_len);
239 e->funcs = NULL;
240 e->refcount = 0;
b196e2b9 241 e->enabled = 0;
97e1c18e
MD
242 hlist_add_head(&e->hlist, head);
243 return e;
244}
245
246/*
247 * Remove the tracepoint from the tracepoint hash table. Must be called with
248 * mutex_lock held.
249 */
19dba33c 250static inline void remove_tracepoint(struct tracepoint_entry *e)
97e1c18e 251{
97e1c18e 252 hlist_del(&e->hlist);
97e1c18e 253 kfree(e);
97e1c18e
MD
254}
255
256/*
257 * Sets the probe callback corresponding to one tracepoint.
258 */
259static void set_tracepoint(struct tracepoint_entry **entry,
260 struct tracepoint *elem, int active)
261{
262 WARN_ON(strcmp((*entry)->name, elem->name) != 0);
263
c5905afb 264 if (elem->regfunc && !static_key_enabled(&elem->key) && active)
97419875 265 elem->regfunc();
c5905afb 266 else if (elem->unregfunc && static_key_enabled(&elem->key) && !active)
97419875
JS
267 elem->unregfunc();
268
97e1c18e
MD
269 /*
270 * rcu_assign_pointer has a smp_wmb() which makes sure that the new
271 * probe callbacks array is consistent before setting a pointer to it.
272 * This array is referenced by __DO_TRACE from
273 * include/linux/tracepoints.h. A matching smp_read_barrier_depends()
274 * is used.
275 */
276 rcu_assign_pointer(elem->funcs, (*entry)->funcs);
c5905afb
IM
277 if (active && !static_key_enabled(&elem->key))
278 static_key_slow_inc(&elem->key);
279 else if (!active && static_key_enabled(&elem->key))
280 static_key_slow_dec(&elem->key);
97e1c18e
MD
281}
282
283/*
284 * Disable a tracepoint and its probe callback.
285 * Note: only waiting an RCU period after setting elem->call to the empty
286 * function insures that the original callback is not used anymore. This insured
287 * by preempt_disable around the call site.
288 */
289static void disable_tracepoint(struct tracepoint *elem)
290{
c5905afb 291 if (elem->unregfunc && static_key_enabled(&elem->key))
97419875
JS
292 elem->unregfunc();
293
c5905afb
IM
294 if (static_key_enabled(&elem->key))
295 static_key_slow_dec(&elem->key);
de0baf9a 296 rcu_assign_pointer(elem->funcs, NULL);
97e1c18e
MD
297}
298
299/**
300 * tracepoint_update_probe_range - Update a probe range
301 * @begin: beginning of the range
302 * @end: end of the range
303 *
304 * Updates the probe callback corresponding to a range of tracepoints.
b75ef8b4 305 * Called with tracepoints_mutex held.
97e1c18e 306 */
b75ef8b4
MD
307static void tracepoint_update_probe_range(struct tracepoint * const *begin,
308 struct tracepoint * const *end)
97e1c18e 309{
65498646 310 struct tracepoint * const *iter;
97e1c18e
MD
311 struct tracepoint_entry *mark_entry;
312
ec625cb2 313 if (!begin)
09933a10 314 return;
09933a10 315
97e1c18e 316 for (iter = begin; iter < end; iter++) {
65498646 317 mark_entry = get_tracepoint((*iter)->name);
97e1c18e 318 if (mark_entry) {
65498646 319 set_tracepoint(&mark_entry, *iter,
97e1c18e 320 !!mark_entry->refcount);
b196e2b9 321 mark_entry->enabled = !!mark_entry->refcount;
97e1c18e 322 } else {
65498646 323 disable_tracepoint(*iter);
97e1c18e
MD
324 }
325 }
97e1c18e
MD
326}
327
b75ef8b4
MD
328#ifdef CONFIG_MODULES
329void module_update_tracepoints(void)
330{
331 struct tp_module *tp_mod;
332
333 list_for_each_entry(tp_mod, &tracepoint_module_list, list)
334 tracepoint_update_probe_range(tp_mod->tracepoints_ptrs,
335 tp_mod->tracepoints_ptrs + tp_mod->num_tracepoints);
336}
337#else /* CONFIG_MODULES */
338void module_update_tracepoints(void)
339{
340}
341#endif /* CONFIG_MODULES */
342
343
97e1c18e
MD
344/*
345 * Update probes, removing the faulty probes.
b75ef8b4 346 * Called with tracepoints_mutex held.
97e1c18e
MD
347 */
348static void tracepoint_update_probes(void)
349{
350 /* Core kernel tracepoints */
65498646
MD
351 tracepoint_update_probe_range(__start___tracepoints_ptrs,
352 __stop___tracepoints_ptrs);
97e1c18e
MD
353 /* tracepoints in modules. */
354 module_update_tracepoints();
355}
356
38516ab5
SR
357static struct tracepoint_func *
358tracepoint_add_probe(const char *name, void *probe, void *data)
127cafbb
LJ
359{
360 struct tracepoint_entry *entry;
38516ab5 361 struct tracepoint_func *old;
127cafbb
LJ
362
363 entry = get_tracepoint(name);
364 if (!entry) {
365 entry = add_tracepoint(name);
366 if (IS_ERR(entry))
38516ab5 367 return (struct tracepoint_func *)entry;
127cafbb 368 }
38516ab5 369 old = tracepoint_entry_add_probe(entry, probe, data);
127cafbb
LJ
370 if (IS_ERR(old) && !entry->refcount)
371 remove_tracepoint(entry);
372 return old;
373}
374
97e1c18e
MD
375/**
376 * tracepoint_probe_register - Connect a probe to a tracepoint
377 * @name: tracepoint name
378 * @probe: probe handler
4c11628a 379 * @data: probe private data
97e1c18e 380 *
3bbc8db3
MD
381 * Returns:
382 * - 0 if the probe was successfully registered, and tracepoint
383 * callsites are currently loaded for that probe,
384 * - -ENODEV if the probe was successfully registered, but no tracepoint
385 * callsite is currently loaded for that probe,
386 * - other negative error value on error.
387 *
388 * When tracepoint_probe_register() returns either 0 or -ENODEV,
389 * parameters @name, @probe, and @data may be used by the tracepoint
390 * infrastructure until the probe is unregistered.
391 *
97e1c18e
MD
392 * The probe address must at least be aligned on the architecture pointer size.
393 */
38516ab5 394int tracepoint_probe_register(const char *name, void *probe, void *data)
97e1c18e 395{
38516ab5 396 struct tracepoint_func *old;
b196e2b9
SR
397 struct tracepoint_entry *entry;
398 int ret = 0;
97e1c18e
MD
399
400 mutex_lock(&tracepoints_mutex);
38516ab5 401 old = tracepoint_add_probe(name, probe, data);
b75ef8b4
MD
402 if (IS_ERR(old)) {
403 mutex_unlock(&tracepoints_mutex);
127cafbb 404 return PTR_ERR(old);
b75ef8b4 405 }
97e1c18e 406 tracepoint_update_probes(); /* may update entry */
b196e2b9
SR
407 entry = get_tracepoint(name);
408 /* Make sure the entry was enabled */
409 if (!entry || !entry->enabled)
410 ret = -ENODEV;
b75ef8b4 411 mutex_unlock(&tracepoints_mutex);
19dba33c 412 release_probes(old);
b196e2b9 413 return ret;
97e1c18e
MD
414}
415EXPORT_SYMBOL_GPL(tracepoint_probe_register);
416
38516ab5
SR
417static struct tracepoint_func *
418tracepoint_remove_probe(const char *name, void *probe, void *data)
127cafbb
LJ
419{
420 struct tracepoint_entry *entry;
38516ab5 421 struct tracepoint_func *old;
127cafbb
LJ
422
423 entry = get_tracepoint(name);
424 if (!entry)
425 return ERR_PTR(-ENOENT);
38516ab5 426 old = tracepoint_entry_remove_probe(entry, probe, data);
127cafbb
LJ
427 if (IS_ERR(old))
428 return old;
429 if (!entry->refcount)
430 remove_tracepoint(entry);
431 return old;
432}
433
97e1c18e
MD
434/**
435 * tracepoint_probe_unregister - Disconnect a probe from a tracepoint
436 * @name: tracepoint name
437 * @probe: probe function pointer
4c11628a 438 * @data: probe private data
97e1c18e
MD
439 *
440 * We do not need to call a synchronize_sched to make sure the probes have
441 * finished running before doing a module unload, because the module unload
442 * itself uses stop_machine(), which insures that every preempt disabled section
443 * have finished.
444 */
38516ab5 445int tracepoint_probe_unregister(const char *name, void *probe, void *data)
97e1c18e 446{
38516ab5 447 struct tracepoint_func *old;
97e1c18e
MD
448
449 mutex_lock(&tracepoints_mutex);
38516ab5 450 old = tracepoint_remove_probe(name, probe, data);
b75ef8b4
MD
451 if (IS_ERR(old)) {
452 mutex_unlock(&tracepoints_mutex);
127cafbb 453 return PTR_ERR(old);
b75ef8b4 454 }
97e1c18e 455 tracepoint_update_probes(); /* may update entry */
b75ef8b4 456 mutex_unlock(&tracepoints_mutex);
19dba33c
LJ
457 release_probes(old);
458 return 0;
97e1c18e
MD
459}
460EXPORT_SYMBOL_GPL(tracepoint_probe_unregister);
461
127cafbb
LJ
462static LIST_HEAD(old_probes);
463static int need_update;
464
465static void tracepoint_add_old_probes(void *old)
466{
467 need_update = 1;
468 if (old) {
469 struct tp_probes *tp_probes = container_of(old,
470 struct tp_probes, probes[0]);
471 list_add(&tp_probes->u.list, &old_probes);
472 }
473}
474
475/**
476 * tracepoint_probe_register_noupdate - register a probe but not connect
477 * @name: tracepoint name
478 * @probe: probe handler
4c11628a 479 * @data: probe private data
127cafbb
LJ
480 *
481 * caller must call tracepoint_probe_update_all()
482 */
38516ab5
SR
483int tracepoint_probe_register_noupdate(const char *name, void *probe,
484 void *data)
127cafbb 485{
38516ab5 486 struct tracepoint_func *old;
127cafbb
LJ
487
488 mutex_lock(&tracepoints_mutex);
38516ab5 489 old = tracepoint_add_probe(name, probe, data);
127cafbb
LJ
490 if (IS_ERR(old)) {
491 mutex_unlock(&tracepoints_mutex);
492 return PTR_ERR(old);
493 }
494 tracepoint_add_old_probes(old);
495 mutex_unlock(&tracepoints_mutex);
496 return 0;
497}
498EXPORT_SYMBOL_GPL(tracepoint_probe_register_noupdate);
499
500/**
501 * tracepoint_probe_unregister_noupdate - remove a probe but not disconnect
502 * @name: tracepoint name
503 * @probe: probe function pointer
4c11628a 504 * @data: probe private data
127cafbb
LJ
505 *
506 * caller must call tracepoint_probe_update_all()
507 */
38516ab5
SR
508int tracepoint_probe_unregister_noupdate(const char *name, void *probe,
509 void *data)
127cafbb 510{
38516ab5 511 struct tracepoint_func *old;
127cafbb
LJ
512
513 mutex_lock(&tracepoints_mutex);
38516ab5 514 old = tracepoint_remove_probe(name, probe, data);
127cafbb
LJ
515 if (IS_ERR(old)) {
516 mutex_unlock(&tracepoints_mutex);
517 return PTR_ERR(old);
518 }
519 tracepoint_add_old_probes(old);
520 mutex_unlock(&tracepoints_mutex);
521 return 0;
522}
523EXPORT_SYMBOL_GPL(tracepoint_probe_unregister_noupdate);
524
525/**
526 * tracepoint_probe_update_all - update tracepoints
527 */
528void tracepoint_probe_update_all(void)
529{
530 LIST_HEAD(release_probes);
531 struct tp_probes *pos, *next;
532
533 mutex_lock(&tracepoints_mutex);
534 if (!need_update) {
535 mutex_unlock(&tracepoints_mutex);
536 return;
537 }
538 if (!list_empty(&old_probes))
539 list_replace_init(&old_probes, &release_probes);
540 need_update = 0;
127cafbb 541 tracepoint_update_probes();
b75ef8b4 542 mutex_unlock(&tracepoints_mutex);
127cafbb
LJ
543 list_for_each_entry_safe(pos, next, &release_probes, u.list) {
544 list_del(&pos->u.list);
545 call_rcu_sched(&pos->u.rcu, rcu_free_old_probes);
546 }
547}
548EXPORT_SYMBOL_GPL(tracepoint_probe_update_all);
549
97e1c18e
MD
550/**
551 * tracepoint_get_iter_range - Get a next tracepoint iterator given a range.
552 * @tracepoint: current tracepoints (in), next tracepoint (out)
553 * @begin: beginning of the range
554 * @end: end of the range
555 *
556 * Returns whether a next tracepoint has been found (1) or not (0).
557 * Will return the first tracepoint in the range if the input tracepoint is
558 * NULL.
559 */
b75ef8b4 560static int tracepoint_get_iter_range(struct tracepoint * const **tracepoint,
65498646 561 struct tracepoint * const *begin, struct tracepoint * const *end)
97e1c18e
MD
562{
563 if (!*tracepoint && begin != end) {
564 *tracepoint = begin;
565 return 1;
566 }
567 if (*tracepoint >= begin && *tracepoint < end)
568 return 1;
569 return 0;
570}
97e1c18e 571
b75ef8b4 572#ifdef CONFIG_MODULES
97e1c18e
MD
573static void tracepoint_get_iter(struct tracepoint_iter *iter)
574{
575 int found = 0;
b75ef8b4 576 struct tp_module *iter_mod;
97e1c18e
MD
577
578 /* Core kernel tracepoints */
579 if (!iter->module) {
580 found = tracepoint_get_iter_range(&iter->tracepoint,
65498646
MD
581 __start___tracepoints_ptrs,
582 __stop___tracepoints_ptrs);
97e1c18e
MD
583 if (found)
584 goto end;
585 }
b75ef8b4
MD
586 /* Tracepoints in modules */
587 mutex_lock(&tracepoints_mutex);
588 list_for_each_entry(iter_mod, &tracepoint_module_list, list) {
589 /*
590 * Sorted module list
591 */
592 if (iter_mod < iter->module)
593 continue;
594 else if (iter_mod > iter->module)
595 iter->tracepoint = NULL;
596 found = tracepoint_get_iter_range(&iter->tracepoint,
597 iter_mod->tracepoints_ptrs,
598 iter_mod->tracepoints_ptrs
599 + iter_mod->num_tracepoints);
600 if (found) {
601 iter->module = iter_mod;
602 break;
603 }
604 }
605 mutex_unlock(&tracepoints_mutex);
97e1c18e
MD
606end:
607 if (!found)
608 tracepoint_iter_reset(iter);
609}
b75ef8b4
MD
610#else /* CONFIG_MODULES */
611static void tracepoint_get_iter(struct tracepoint_iter *iter)
612{
613 int found = 0;
614
615 /* Core kernel tracepoints */
616 found = tracepoint_get_iter_range(&iter->tracepoint,
617 __start___tracepoints_ptrs,
618 __stop___tracepoints_ptrs);
619 if (!found)
620 tracepoint_iter_reset(iter);
621}
622#endif /* CONFIG_MODULES */
97e1c18e
MD
623
624void tracepoint_iter_start(struct tracepoint_iter *iter)
625{
626 tracepoint_get_iter(iter);
627}
628EXPORT_SYMBOL_GPL(tracepoint_iter_start);
629
630void tracepoint_iter_next(struct tracepoint_iter *iter)
631{
632 iter->tracepoint++;
633 /*
634 * iter->tracepoint may be invalid because we blindly incremented it.
635 * Make sure it is valid by marshalling on the tracepoints, getting the
636 * tracepoints from following modules if necessary.
637 */
638 tracepoint_get_iter(iter);
639}
640EXPORT_SYMBOL_GPL(tracepoint_iter_next);
641
642void tracepoint_iter_stop(struct tracepoint_iter *iter)
643{
644}
645EXPORT_SYMBOL_GPL(tracepoint_iter_stop);
646
647void tracepoint_iter_reset(struct tracepoint_iter *iter)
648{
b75ef8b4 649#ifdef CONFIG_MODULES
97e1c18e 650 iter->module = NULL;
b75ef8b4 651#endif /* CONFIG_MODULES */
97e1c18e
MD
652 iter->tracepoint = NULL;
653}
654EXPORT_SYMBOL_GPL(tracepoint_iter_reset);
32f85742 655
227a8375 656#ifdef CONFIG_MODULES
b75ef8b4
MD
657static int tracepoint_module_coming(struct module *mod)
658{
659 struct tp_module *tp_mod, *iter;
660 int ret = 0;
661
7dec935a
SRRH
662 if (!mod->num_tracepoints)
663 return 0;
664
b75ef8b4 665 /*
c10076c4
SR
666 * We skip modules that taint the kernel, especially those with different
667 * module headers (for forced load), to make sure we don't cause a crash.
668 * Staging and out-of-tree GPL modules are fine.
b75ef8b4 669 */
c10076c4 670 if (mod->taints & ~((1 << TAINT_OOT_MODULE) | (1 << TAINT_CRAP)))
b75ef8b4
MD
671 return 0;
672 mutex_lock(&tracepoints_mutex);
673 tp_mod = kmalloc(sizeof(struct tp_module), GFP_KERNEL);
674 if (!tp_mod) {
675 ret = -ENOMEM;
676 goto end;
677 }
678 tp_mod->num_tracepoints = mod->num_tracepoints;
679 tp_mod->tracepoints_ptrs = mod->tracepoints_ptrs;
680
681 /*
682 * tracepoint_module_list is kept sorted by struct module pointer
683 * address for iteration on tracepoints from a seq_file that can release
684 * the mutex between calls.
685 */
686 list_for_each_entry_reverse(iter, &tracepoint_module_list, list) {
687 BUG_ON(iter == tp_mod); /* Should never be in the list twice */
688 if (iter < tp_mod) {
689 /* We belong to the location right after iter. */
690 list_add(&tp_mod->list, &iter->list);
691 goto module_added;
692 }
693 }
694 /* We belong to the beginning of the list */
695 list_add(&tp_mod->list, &tracepoint_module_list);
696module_added:
697 tracepoint_update_probe_range(mod->tracepoints_ptrs,
698 mod->tracepoints_ptrs + mod->num_tracepoints);
699end:
700 mutex_unlock(&tracepoints_mutex);
701 return ret;
702}
703
704static int tracepoint_module_going(struct module *mod)
705{
706 struct tp_module *pos;
707
7dec935a
SRRH
708 if (!mod->num_tracepoints)
709 return 0;
710
b75ef8b4
MD
711 mutex_lock(&tracepoints_mutex);
712 tracepoint_update_probe_range(mod->tracepoints_ptrs,
713 mod->tracepoints_ptrs + mod->num_tracepoints);
714 list_for_each_entry(pos, &tracepoint_module_list, list) {
715 if (pos->tracepoints_ptrs == mod->tracepoints_ptrs) {
716 list_del(&pos->list);
717 kfree(pos);
718 break;
719 }
720 }
721 /*
722 * In the case of modules that were tainted at "coming", we'll simply
723 * walk through the list without finding it. We cannot use the "tainted"
724 * flag on "going", in case a module taints the kernel only after being
725 * loaded.
726 */
727 mutex_unlock(&tracepoints_mutex);
728 return 0;
729}
227a8375 730
32f85742
MD
731int tracepoint_module_notify(struct notifier_block *self,
732 unsigned long val, void *data)
733{
734 struct module *mod = data;
b75ef8b4 735 int ret = 0;
32f85742
MD
736
737 switch (val) {
738 case MODULE_STATE_COMING:
b75ef8b4
MD
739 ret = tracepoint_module_coming(mod);
740 break;
741 case MODULE_STATE_LIVE:
742 break;
32f85742 743 case MODULE_STATE_GOING:
b75ef8b4 744 ret = tracepoint_module_going(mod);
32f85742
MD
745 break;
746 }
b75ef8b4 747 return ret;
32f85742
MD
748}
749
750struct notifier_block tracepoint_module_nb = {
751 .notifier_call = tracepoint_module_notify,
752 .priority = 0,
753};
754
755static int init_tracepoints(void)
756{
757 return register_module_notifier(&tracepoint_module_nb);
758}
759__initcall(init_tracepoints);
227a8375 760#endif /* CONFIG_MODULES */
a871bd33 761
3d27d8cb 762#ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
60d970c2 763
97419875 764/* NB: reg/unreg are called while guarded with the tracepoints_mutex */
a871bd33
JB
765static int sys_tracepoint_refcount;
766
767void syscall_regfunc(void)
768{
769 unsigned long flags;
770 struct task_struct *g, *t;
771
a871bd33
JB
772 if (!sys_tracepoint_refcount) {
773 read_lock_irqsave(&tasklist_lock, flags);
774 do_each_thread(g, t) {
cc3b13c1
HB
775 /* Skip kernel threads. */
776 if (t->mm)
777 set_tsk_thread_flag(t, TIF_SYSCALL_TRACEPOINT);
a871bd33
JB
778 } while_each_thread(g, t);
779 read_unlock_irqrestore(&tasklist_lock, flags);
780 }
781 sys_tracepoint_refcount++;
a871bd33
JB
782}
783
784void syscall_unregfunc(void)
785{
786 unsigned long flags;
787 struct task_struct *g, *t;
788
a871bd33
JB
789 sys_tracepoint_refcount--;
790 if (!sys_tracepoint_refcount) {
791 read_lock_irqsave(&tasklist_lock, flags);
792 do_each_thread(g, t) {
66700001 793 clear_tsk_thread_flag(t, TIF_SYSCALL_TRACEPOINT);
a871bd33
JB
794 } while_each_thread(g, t);
795 read_unlock_irqrestore(&tasklist_lock, flags);
796 }
a871bd33 797}
60d970c2 798#endif