tracing: move utility functions from ftrace.h to kernel.h
[linux-block.git] / kernel / trace / Kconfig
CommitLineData
16444a8a 1#
606576ce
SR
2# Architectures that offer an FUNCTION_TRACER implementation should
3# select HAVE_FUNCTION_TRACER:
16444a8a 4#
2a3a4f66 5
8d26487f
TE
6config USER_STACKTRACE_SUPPORT
7 bool
8
2a3a4f66
FW
9config NOP_TRACER
10 bool
11
78d904b4
SR
12config HAVE_FTRACE_NMI_ENTER
13 bool
14
606576ce 15config HAVE_FUNCTION_TRACER
16444a8a 16 bool
bc0c38d1 17
fb52607a 18config HAVE_FUNCTION_GRAPH_TRACER
15e6cb36
FW
19 bool
20
60a7ecf4
SR
21config HAVE_FUNCTION_TRACE_MCOUNT_TEST
22 bool
23 help
24 This gets selected when the arch tests the function_trace_stop
25 variable at the mcount call site. Otherwise, this variable
26 is tested by the called function.
27
677aa9f7
SR
28config HAVE_DYNAMIC_FTRACE
29 bool
30
8da3821b
SR
31config HAVE_FTRACE_MCOUNT_RECORD
32 bool
33
1e9b51c2
MM
34config HAVE_HW_BRANCH_TRACER
35 bool
36
352ad25a
SR
37config TRACER_MAX_TRACE
38 bool
39
7a8e76a3
SR
40config RING_BUFFER
41 bool
42
78d904b4
SR
43config FTRACE_NMI_ENTER
44 bool
45 depends on HAVE_FTRACE_NMI_ENTER
46 default y
47
bc0c38d1
SR
48config TRACING
49 bool
50 select DEBUG_FS
7a8e76a3 51 select RING_BUFFER
c2c80529 52 select STACKTRACE if STACKTRACE_SUPPORT
5f87f112 53 select TRACEPOINTS
f3384b28 54 select NOP_TRACER
bc0c38d1 55
17d80fd0
PZ
56menu "Tracers"
57
606576ce 58config FUNCTION_TRACER
1b29b018 59 bool "Kernel Function Tracer"
606576ce 60 depends on HAVE_FUNCTION_TRACER
d3ee6d99 61 depends on DEBUG_KERNEL
1b29b018 62 select FRAME_POINTER
4d7a077c 63 select KALLSYMS
1b29b018 64 select TRACING
35e8e302 65 select CONTEXT_SWITCH_TRACER
1b29b018
SR
66 help
67 Enable the kernel to trace every kernel function. This is done
68 by using a compiler feature to insert a small, 5-byte No-Operation
69 instruction to the beginning of every kernel function, which NOP
70 sequence is then dynamically patched into a tracer call when
71 tracing is enabled by the administrator. If it's runtime disabled
72 (the bootup default), then the overhead of the instructions is very
73 small and not measurable even in micro-benchmarks.
35e8e302 74
fb52607a
FW
75config FUNCTION_GRAPH_TRACER
76 bool "Kernel Function Graph Tracer"
77 depends on HAVE_FUNCTION_GRAPH_TRACER
15e6cb36 78 depends on FUNCTION_TRACER
764f3b95 79 default y
15e6cb36 80 help
fb52607a
FW
81 Enable the kernel to trace a function at both its return
82 and its entry.
83 It's first purpose is to trace the duration of functions and
84 draw a call graph for each thread with some informations like
85 the return value.
86 This is done by setting the current return address on the current
87 task structure into a stack of calls.
15e6cb36 88
81d68a96
SR
89config IRQSOFF_TRACER
90 bool "Interrupts-off Latency Tracer"
91 default n
92 depends on TRACE_IRQFLAGS_SUPPORT
93 depends on GENERIC_TIME
d3ee6d99 94 depends on DEBUG_KERNEL
81d68a96
SR
95 select TRACE_IRQFLAGS
96 select TRACING
97 select TRACER_MAX_TRACE
98 help
99 This option measures the time spent in irqs-off critical
100 sections, with microsecond accuracy.
101
102 The default measurement method is a maximum search, which is
103 disabled by default and can be runtime (re-)started
104 via:
105
106 echo 0 > /debugfs/tracing/tracing_max_latency
107
6cd8a4bb
SR
108 (Note that kernel size and overhead increases with this option
109 enabled. This option and the preempt-off timing option can be
110 used together or separately.)
111
112config PREEMPT_TRACER
113 bool "Preemption-off Latency Tracer"
114 default n
115 depends on GENERIC_TIME
116 depends on PREEMPT
d3ee6d99 117 depends on DEBUG_KERNEL
6cd8a4bb
SR
118 select TRACING
119 select TRACER_MAX_TRACE
120 help
121 This option measures the time spent in preemption off critical
122 sections, with microsecond accuracy.
123
124 The default measurement method is a maximum search, which is
125 disabled by default and can be runtime (re-)started
126 via:
127
128 echo 0 > /debugfs/tracing/tracing_max_latency
129
130 (Note that kernel size and overhead increases with this option
131 enabled. This option and the irqs-off timing option can be
132 used together or separately.)
133
f06c3810
IM
134config SYSPROF_TRACER
135 bool "Sysprof Tracer"
4d2df795 136 depends on X86
f06c3810 137 select TRACING
b22f4858 138 select CONTEXT_SWITCH_TRACER
f06c3810
IM
139 help
140 This tracer provides the trace needed by the 'Sysprof' userspace
141 tool.
142
352ad25a
SR
143config SCHED_TRACER
144 bool "Scheduling Latency Tracer"
d3ee6d99 145 depends on DEBUG_KERNEL
352ad25a
SR
146 select TRACING
147 select CONTEXT_SWITCH_TRACER
148 select TRACER_MAX_TRACE
149 help
150 This tracer tracks the latency of the highest priority task
151 to be scheduled in, starting from the point it has woken up.
152
35e8e302
SR
153config CONTEXT_SWITCH_TRACER
154 bool "Trace process context switches"
d3ee6d99 155 depends on DEBUG_KERNEL
35e8e302
SR
156 select TRACING
157 select MARKERS
158 help
159 This tracer gets called from the context switch and records
160 all switching of tasks.
161
b77e38aa
SR
162config EVENT_TRACER
163 bool "Trace various events in the kernel"
164 depends on DEBUG_KERNEL
165 select TRACING
166 help
167 This tracer hooks to various trace points in the kernel
168 allowing the user to pick and choose which trace point they
169 want to trace.
170
1f5c2abb
FW
171config BOOT_TRACER
172 bool "Trace boot initcalls"
1f5c2abb
FW
173 depends on DEBUG_KERNEL
174 select TRACING
ea31e72d 175 select CONTEXT_SWITCH_TRACER
1f5c2abb
FW
176 help
177 This tracer helps developers to optimize boot times: it records
98d9c66a
IM
178 the timings of the initcalls and traces key events and the identity
179 of tasks that can cause boot delays, such as context-switches.
180
181 Its aim is to be parsed by the /scripts/bootgraph.pl tool to
182 produce pretty graphics about boot inefficiencies, giving a visual
183 representation of the delays during initcalls - but the raw
184 /debug/tracing/trace text output is readable too.
185
79fb0768
SR
186 You must pass in ftrace=initcall to the kernel command line
187 to enable this on bootup.
1f5c2abb 188
2ed84eeb 189config TRACE_BRANCH_PROFILING
1f0d69a9
SR
190 bool "Trace likely/unlikely profiler"
191 depends on DEBUG_KERNEL
192 select TRACING
193 help
194 This tracer profiles all the the likely and unlikely macros
195 in the kernel. It will display the results in:
196
45b79749 197 /debugfs/tracing/profile_annotated_branch
1f0d69a9
SR
198
199 Note: this will add a significant overhead, only turn this
200 on if you need to profile the system's use of these macros.
201
202 Say N if unsure.
203
2bcd521a
SR
204config PROFILE_ALL_BRANCHES
205 bool "Profile all if conditionals"
206 depends on TRACE_BRANCH_PROFILING
207 help
208 This tracer profiles all branch conditions. Every if ()
209 taken in the kernel is recorded whether it hit or miss.
210 The results will be displayed in:
211
212 /debugfs/tracing/profile_branch
213
214 This configuration, when enabled, will impose a great overhead
215 on the system. This should only be enabled when the system
216 is to be analyzed
217
218 Say N if unsure.
219
2ed84eeb 220config TRACING_BRANCHES
52f232cb
SR
221 bool
222 help
223 Selected by tracers that will trace the likely and unlikely
224 conditions. This prevents the tracers themselves from being
225 profiled. Profiling the tracing infrastructure can only happen
226 when the likelys and unlikelys are not being traced.
227
2ed84eeb 228config BRANCH_TRACER
52f232cb 229 bool "Trace likely/unlikely instances"
2ed84eeb
SR
230 depends on TRACE_BRANCH_PROFILING
231 select TRACING_BRANCHES
52f232cb
SR
232 help
233 This traces the events of likely and unlikely condition
234 calls in the kernel. The difference between this and the
235 "Trace likely/unlikely profiler" is that this is not a
236 histogram of the callers, but actually places the calling
237 events into a running trace buffer to see when and where the
238 events happened, as well as their results.
239
240 Say N if unsure.
241
f3f47a67
AV
242config POWER_TRACER
243 bool "Trace power consumption behavior"
244 depends on DEBUG_KERNEL
245 depends on X86
246 select TRACING
247 help
248 This tracer helps developers to analyze and optimize the kernels
249 power management decisions, specifically the C-state and P-state
250 behavior.
251
252
e5a81b62
SR
253config STACK_TRACER
254 bool "Trace max stack"
606576ce 255 depends on HAVE_FUNCTION_TRACER
2ff01c6a 256 depends on DEBUG_KERNEL
606576ce 257 select FUNCTION_TRACER
e5a81b62 258 select STACKTRACE
4d7a077c 259 select KALLSYMS
e5a81b62 260 help
4519d9e5
IM
261 This special tracer records the maximum stack footprint of the
262 kernel and displays it in debugfs/tracing/stack_trace.
263
264 This tracer works by hooking into every function call that the
265 kernel executes, and keeping a maximum stack depth value and
f38f1d2a
SR
266 stack-trace saved. If this is configured with DYNAMIC_FTRACE
267 then it will not have any overhead while the stack tracer
268 is disabled.
269
270 To enable the stack tracer on bootup, pass in 'stacktrace'
271 on the kernel command line.
272
273 The stack tracer can also be enabled or disabled via the
274 sysctl kernel.stack_tracer_enabled
4519d9e5
IM
275
276 Say N if unsure.
e5a81b62 277
a93751ca 278config HW_BRANCH_TRACER
1e9b51c2 279 depends on HAVE_HW_BRANCH_TRACER
a93751ca 280 bool "Trace hw branches"
1e9b51c2
MM
281 select TRACING
282 help
283 This tracer records all branches on the system in a circular
284 buffer giving access to the last N branches for each cpu.
285
36994e58
FW
286config KMEMTRACE
287 bool "Trace SLAB allocations"
288 select TRACING
36994e58
FW
289 help
290 kmemtrace provides tracing for slab allocator functions, such as
291 kmalloc, kfree, kmem_cache_alloc, kmem_cache_free etc.. Collected
292 data is then fed to the userspace application in order to analyse
293 allocation hotspots, internal fragmentation and so on, making it
294 possible to see how well an allocator performs, as well as debug
295 and profile kernel code.
296
297 This requires an userspace application to use. See
298 Documentation/vm/kmemtrace.txt for more information.
299
300 Saying Y will make the kernel somewhat larger and slower. However,
301 if you disable kmemtrace at run-time or boot-time, the performance
302 impact is minimal (depending on the arch the kernel is built for).
303
304 If unsure, say N.
305
e1d8aa9f
FW
306config WORKQUEUE_TRACER
307 bool "Trace workqueues"
308 select TRACING
309 help
310 The workqueue tracer provides some statistical informations
311 about each cpu workqueue thread such as the number of the
312 works inserted and executed since their creation. It can help
313 to evaluate the amount of work each of them have to perform.
314 For example it can help a developer to decide whether he should
315 choose a per cpu workqueue instead of a singlethreaded one.
316
2db270a8
FW
317config BLK_DEV_IO_TRACE
318 bool "Support for tracing block io actions"
319 depends on SYSFS
1dfba05d 320 depends on BLOCK
2db270a8
FW
321 select RELAY
322 select DEBUG_FS
323 select TRACEPOINTS
324 select TRACING
325 select STACKTRACE
326 help
327 Say Y here if you want to be able to trace the block layer actions
328 on a given queue. Tracing allows you to see any traffic happening
329 on a block device queue. For more information (and the userspace
330 support tools needed), fetch the blktrace tools from:
331
332 git://git.kernel.dk/blktrace.git
333
334 Tracing also is possible using the ftrace interface, e.g.:
335
336 echo 1 > /sys/block/sda/sda1/trace/enable
337 echo blk > /sys/kernel/debug/tracing/current_tracer
338 cat /sys/kernel/debug/tracing/trace_pipe
339
340 If unsure, say N.
36994e58 341
3d083395
SR
342config DYNAMIC_FTRACE
343 bool "enable/disable ftrace tracepoints dynamically"
606576ce 344 depends on FUNCTION_TRACER
677aa9f7 345 depends on HAVE_DYNAMIC_FTRACE
d3ee6d99 346 depends on DEBUG_KERNEL
3d083395
SR
347 default y
348 help
349 This option will modify all the calls to ftrace dynamically
350 (will patch them out of the binary image and replaces them
351 with a No-Op instruction) as they are called. A table is
352 created to dynamically enable them again.
353
606576ce 354 This way a CONFIG_FUNCTION_TRACER kernel is slightly larger, but otherwise
3d083395
SR
355 has native performance as long as no tracing is active.
356
357 The changes to the code are done by a kernel thread that
358 wakes up once a second and checks to see if any ftrace calls
359 were made. If so, it runs stop_machine (stops all CPUS)
360 and modifies the code to jump over the call to ftrace.
60a11774 361
8da3821b
SR
362config FTRACE_MCOUNT_RECORD
363 def_bool y
364 depends on DYNAMIC_FTRACE
365 depends on HAVE_FTRACE_MCOUNT_RECORD
366
60a11774
SR
367config FTRACE_SELFTEST
368 bool
369
370config FTRACE_STARTUP_TEST
371 bool "Perform a startup test on ftrace"
79fb0768 372 depends on TRACING && DEBUG_KERNEL
60a11774
SR
373 select FTRACE_SELFTEST
374 help
375 This option performs a series of startup tests on ftrace. On bootup
376 a series of tests are made to verify that the tracer is
377 functioning properly. It will do tests on all the configured
378 tracers of ftrace.
17d80fd0 379
fe6f90e5
PP
380config MMIOTRACE
381 bool "Memory mapped IO tracing"
382 depends on HAVE_MMIOTRACE_SUPPORT && DEBUG_KERNEL && PCI
383 select TRACING
384 help
385 Mmiotrace traces Memory Mapped I/O access and is meant for
386 debugging and reverse engineering. It is called from the ioremap
387 implementation and works via page faults. Tracing is disabled by
388 default and can be enabled at run-time.
389
390 See Documentation/tracers/mmiotrace.txt.
391 If you are not helping to develop drivers, say N.
392
393config MMIOTRACE_TEST
394 tristate "Test module for mmiotrace"
395 depends on MMIOTRACE && m
396 help
397 This is a dumb module for testing mmiotrace. It is very dangerous
398 as it will write garbage to IO memory starting at a given address.
399 However, it should be safe to use on e.g. unused portion of VRAM.
400
401 Say N, unless you absolutely know what you are doing.
402
17d80fd0 403endmenu