Commit | Line | Data |
---|---|---|
d2912cb1 | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
819e50e2 AT |
2 | /* |
3 | * arch/arm64/kernel/entry-ftrace.S | |
4 | * | |
5 | * Copyright (C) 2013 Linaro Limited | |
6 | * Author: AKASHI Takahiro <takahiro.akashi@linaro.org> | |
819e50e2 AT |
7 | */ |
8 | ||
9 | #include <linux/linkage.h> | |
3b23e499 | 10 | #include <asm/asm-offsets.h> |
f705d954 | 11 | #include <asm/assembler.h> |
819e50e2 AT |
12 | #include <asm/ftrace.h> |
13 | #include <asm/insn.h> | |
14 | ||
3b23e499 TD |
15 | #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS |
16 | /* | |
17 | * Due to -fpatchable-function-entry=2, the compiler has placed two NOPs before | |
18 | * the regular function prologue. For an enabled callsite, ftrace_init_nop() and | |
19 | * ftrace_make_call() have patched those NOPs to: | |
20 | * | |
21 | * MOV X9, LR | |
22 | * BL <entry> | |
23 | * | |
24 | * ... where <entry> is either ftrace_caller or ftrace_regs_caller. | |
25 | * | |
26 | * Each instrumented function follows the AAPCS, so here x0-x8 and x19-x30 are | |
27 | * live, and x9-x18 are safe to clobber. | |
28 | * | |
29 | * We save the callsite's context into a pt_regs before invoking any ftrace | |
30 | * callbacks. So that we can get a sensible backtrace, we create a stack record | |
31 | * for the callsite and the ftrace entry assembly. This is not sufficient for | |
32 | * reliable stacktrace: until we create the callsite stack record, its caller | |
33 | * is missing from the LR and existing chain of frame records. | |
34 | */ | |
35 | .macro ftrace_regs_entry, allregs=0 | |
36 | /* Make room for pt_regs, plus a callee frame */ | |
37 | sub sp, sp, #(S_FRAME_SIZE + 16) | |
38 | ||
39 | /* Save function arguments (and x9 for simplicity) */ | |
40 | stp x0, x1, [sp, #S_X0] | |
41 | stp x2, x3, [sp, #S_X2] | |
42 | stp x4, x5, [sp, #S_X4] | |
43 | stp x6, x7, [sp, #S_X6] | |
44 | stp x8, x9, [sp, #S_X8] | |
45 | ||
46 | /* Optionally save the callee-saved registers, always save the FP */ | |
47 | .if \allregs == 1 | |
48 | stp x10, x11, [sp, #S_X10] | |
49 | stp x12, x13, [sp, #S_X12] | |
50 | stp x14, x15, [sp, #S_X14] | |
51 | stp x16, x17, [sp, #S_X16] | |
52 | stp x18, x19, [sp, #S_X18] | |
53 | stp x20, x21, [sp, #S_X20] | |
54 | stp x22, x23, [sp, #S_X22] | |
55 | stp x24, x25, [sp, #S_X24] | |
56 | stp x26, x27, [sp, #S_X26] | |
57 | stp x28, x29, [sp, #S_X28] | |
58 | .else | |
59 | str x29, [sp, #S_FP] | |
60 | .endif | |
61 | ||
62 | /* Save the callsite's SP and LR */ | |
63 | add x10, sp, #(S_FRAME_SIZE + 16) | |
64 | stp x9, x10, [sp, #S_LR] | |
65 | ||
66 | /* Save the PC after the ftrace callsite */ | |
67 | str x30, [sp, #S_PC] | |
68 | ||
69 | /* Create a frame record for the callsite above pt_regs */ | |
70 | stp x29, x9, [sp, #S_FRAME_SIZE] | |
71 | add x29, sp, #S_FRAME_SIZE | |
72 | ||
73 | /* Create our frame record within pt_regs. */ | |
74 | stp x29, x30, [sp, #S_STACKFRAME] | |
75 | add x29, sp, #S_STACKFRAME | |
76 | .endm | |
77 | ||
78 | ENTRY(ftrace_regs_caller) | |
79 | ftrace_regs_entry 1 | |
80 | b ftrace_common | |
81 | ENDPROC(ftrace_regs_caller) | |
82 | ||
83 | ENTRY(ftrace_caller) | |
84 | ftrace_regs_entry 0 | |
85 | b ftrace_common | |
86 | ENDPROC(ftrace_caller) | |
87 | ||
88 | ENTRY(ftrace_common) | |
89 | sub x0, x30, #AARCH64_INSN_SIZE // ip (callsite's BL insn) | |
90 | mov x1, x9 // parent_ip (callsite's LR) | |
91 | ldr_l x2, function_trace_op // op | |
92 | mov x3, sp // regs | |
93 | ||
94 | GLOBAL(ftrace_call) | |
95 | bl ftrace_stub | |
96 | ||
97 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | |
98 | GLOBAL(ftrace_graph_call) // ftrace_graph_caller(); | |
99 | nop // If enabled, this will be replaced | |
100 | // "b ftrace_graph_caller" | |
101 | #endif | |
102 | ||
103 | /* | |
104 | * At the callsite x0-x8 and x19-x30 were live. Any C code will have preserved | |
105 | * x19-x29 per the AAPCS, and we created frame records upon entry, so we need | |
106 | * to restore x0-x8, x29, and x30. | |
107 | */ | |
108 | ftrace_common_return: | |
109 | /* Restore function arguments */ | |
110 | ldp x0, x1, [sp] | |
111 | ldp x2, x3, [sp, #S_X2] | |
112 | ldp x4, x5, [sp, #S_X4] | |
113 | ldp x6, x7, [sp, #S_X6] | |
114 | ldr x8, [sp, #S_X8] | |
115 | ||
116 | /* Restore the callsite's FP, LR, PC */ | |
117 | ldr x29, [sp, #S_FP] | |
118 | ldr x30, [sp, #S_LR] | |
119 | ldr x9, [sp, #S_PC] | |
120 | ||
121 | /* Restore the callsite's SP */ | |
122 | add sp, sp, #S_FRAME_SIZE + 16 | |
123 | ||
124 | ret x9 | |
125 | ENDPROC(ftrace_common) | |
126 | ||
127 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | |
128 | ENTRY(ftrace_graph_caller) | |
129 | ldr x0, [sp, #S_PC] | |
130 | sub x0, x0, #AARCH64_INSN_SIZE // ip (callsite's BL insn) | |
131 | add x1, sp, #S_LR // parent_ip (callsite's LR) | |
132 | ldr x2, [sp, #S_FRAME_SIZE] // parent fp (callsite's FP) | |
133 | bl prepare_ftrace_return | |
134 | b ftrace_common_return | |
135 | ENDPROC(ftrace_graph_caller) | |
136 | #else | |
137 | #endif | |
138 | ||
139 | #else /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */ | |
140 | ||
819e50e2 AT |
141 | /* |
142 | * Gcc with -pg will put the following code in the beginning of each function: | |
143 | * mov x0, x30 | |
144 | * bl _mcount | |
145 | * [function's body ...] | |
146 | * "bl _mcount" may be replaced to "bl ftrace_caller" or NOP if dynamic | |
147 | * ftrace is enabled. | |
148 | * | |
149 | * Please note that x0 as an argument will not be used here because we can | |
150 | * get lr(x30) of instrumented function at any time by winding up call stack | |
151 | * as long as the kernel is compiled without -fomit-frame-pointer. | |
152 | * (or CONFIG_FRAME_POINTER, this is forced on arm64) | |
153 | * | |
154 | * stack layout after mcount_enter in _mcount(): | |
155 | * | |
156 | * current sp/fp => 0:+-----+ | |
157 | * in _mcount() | x29 | -> instrumented function's fp | |
158 | * +-----+ | |
159 | * | x30 | -> _mcount()'s lr (= instrumented function's pc) | |
160 | * old sp => +16:+-----+ | |
161 | * when instrumented | | | |
162 | * function calls | ... | | |
163 | * _mcount() | | | |
164 | * | | | |
165 | * instrumented => +xx:+-----+ | |
166 | * function's fp | x29 | -> parent's fp | |
167 | * +-----+ | |
168 | * | x30 | -> instrumented function's lr (= parent's pc) | |
169 | * +-----+ | |
170 | * | ... | | |
171 | */ | |
172 | ||
173 | .macro mcount_enter | |
174 | stp x29, x30, [sp, #-16]! | |
175 | mov x29, sp | |
176 | .endm | |
177 | ||
178 | .macro mcount_exit | |
179 | ldp x29, x30, [sp], #16 | |
180 | ret | |
181 | .endm | |
182 | ||
183 | .macro mcount_adjust_addr rd, rn | |
184 | sub \rd, \rn, #AARCH64_INSN_SIZE | |
185 | .endm | |
186 | ||
187 | /* for instrumented function's parent */ | |
188 | .macro mcount_get_parent_fp reg | |
189 | ldr \reg, [x29] | |
190 | ldr \reg, [\reg] | |
191 | .endm | |
192 | ||
193 | /* for instrumented function */ | |
194 | .macro mcount_get_pc0 reg | |
195 | mcount_adjust_addr \reg, x30 | |
196 | .endm | |
197 | ||
198 | .macro mcount_get_pc reg | |
199 | ldr \reg, [x29, #8] | |
200 | mcount_adjust_addr \reg, \reg | |
201 | .endm | |
202 | ||
203 | .macro mcount_get_lr reg | |
204 | ldr \reg, [x29] | |
205 | ldr \reg, [\reg, #8] | |
819e50e2 AT |
206 | .endm |
207 | ||
208 | .macro mcount_get_lr_addr reg | |
209 | ldr \reg, [x29] | |
210 | add \reg, \reg, #8 | |
211 | .endm | |
212 | ||
bd7d38db | 213 | #ifndef CONFIG_DYNAMIC_FTRACE |
819e50e2 AT |
214 | /* |
215 | * void _mcount(unsigned long return_address) | |
216 | * @return_address: return address to instrumented function | |
217 | * | |
218 | * This function makes calls, if enabled, to: | |
219 | * - tracer function to probe instrumented function's entry, | |
220 | * - ftrace_graph_caller to set up an exit hook | |
221 | */ | |
222 | ENTRY(_mcount) | |
819e50e2 AT |
223 | mcount_enter |
224 | ||
829d2bd1 | 225 | ldr_l x2, ftrace_trace_function |
819e50e2 AT |
226 | adr x0, ftrace_stub |
227 | cmp x0, x2 // if (ftrace_trace_function | |
228 | b.eq skip_ftrace_call // != ftrace_stub) { | |
229 | ||
230 | mcount_get_pc x0 // function's pc | |
231 | mcount_get_lr x1 // function's lr (= parent's pc) | |
232 | blr x2 // (*ftrace_trace_function)(pc, lr); | |
233 | ||
d125bffc JT |
234 | skip_ftrace_call: // } |
235 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | |
829d2bd1 | 236 | ldr_l x2, ftrace_graph_return |
f1ba46ee AB |
237 | cmp x0, x2 // if ((ftrace_graph_return |
238 | b.ne ftrace_graph_caller // != ftrace_stub) | |
239 | ||
829d2bd1 MR |
240 | ldr_l x2, ftrace_graph_entry // || (ftrace_graph_entry |
241 | adr_l x0, ftrace_graph_entry_stub // != ftrace_graph_entry_stub)) | |
819e50e2 AT |
242 | cmp x0, x2 |
243 | b.ne ftrace_graph_caller // ftrace_graph_caller(); | |
819e50e2 | 244 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
d125bffc | 245 | mcount_exit |
819e50e2 | 246 | ENDPROC(_mcount) |
dbd31962 MR |
247 | EXPORT_SYMBOL(_mcount) |
248 | NOKPROBE(_mcount) | |
819e50e2 | 249 | |
bd7d38db AT |
250 | #else /* CONFIG_DYNAMIC_FTRACE */ |
251 | /* | |
252 | * _mcount() is used to build the kernel with -pg option, but all the branch | |
253 | * instructions to _mcount() are replaced to NOP initially at kernel start up, | |
254 | * and later on, NOP to branch to ftrace_caller() when enabled or branch to | |
255 | * NOP when disabled per-function base. | |
256 | */ | |
257 | ENTRY(_mcount) | |
258 | ret | |
259 | ENDPROC(_mcount) | |
dbd31962 MR |
260 | EXPORT_SYMBOL(_mcount) |
261 | NOKPROBE(_mcount) | |
bd7d38db AT |
262 | |
263 | /* | |
264 | * void ftrace_caller(unsigned long return_address) | |
265 | * @return_address: return address to instrumented function | |
266 | * | |
267 | * This function is a counterpart of _mcount() in 'static' ftrace, and | |
268 | * makes calls to: | |
269 | * - tracer function to probe instrumented function's entry, | |
270 | * - ftrace_graph_caller to set up an exit hook | |
271 | */ | |
272 | ENTRY(ftrace_caller) | |
273 | mcount_enter | |
274 | ||
275 | mcount_get_pc0 x0 // function's pc | |
276 | mcount_get_lr x1 // function's lr | |
277 | ||
e4fe1966 | 278 | GLOBAL(ftrace_call) // tracer(pc, lr); |
bd7d38db AT |
279 | nop // This will be replaced with "bl xxx" |
280 | // where xxx can be any kind of tracer. | |
281 | ||
282 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | |
e4fe1966 | 283 | GLOBAL(ftrace_graph_call) // ftrace_graph_caller(); |
bd7d38db AT |
284 | nop // If enabled, this will be replaced |
285 | // "b ftrace_graph_caller" | |
286 | #endif | |
287 | ||
288 | mcount_exit | |
289 | ENDPROC(ftrace_caller) | |
819e50e2 AT |
290 | |
291 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | |
292 | /* | |
293 | * void ftrace_graph_caller(void) | |
294 | * | |
295 | * Called from _mcount() or ftrace_caller() when function_graph tracer is | |
296 | * selected. | |
297 | * This function w/ prepare_ftrace_return() fakes link register's value on | |
298 | * the call stack in order to intercept instrumented function's return path | |
299 | * and run return_to_handler() later on its exit. | |
300 | */ | |
301 | ENTRY(ftrace_graph_caller) | |
7dc48bf9 MR |
302 | mcount_get_pc x0 // function's pc |
303 | mcount_get_lr_addr x1 // pointer to function's saved lr | |
819e50e2 | 304 | mcount_get_parent_fp x2 // parent's fp |
7dc48bf9 | 305 | bl prepare_ftrace_return // prepare_ftrace_return(pc, &lr, fp) |
819e50e2 AT |
306 | |
307 | mcount_exit | |
308 | ENDPROC(ftrace_graph_caller) | |
3b23e499 TD |
309 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
310 | #endif /* CONFIG_DYNAMIC_FTRACE */ | |
311 | #endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */ | |
312 | ||
313 | ENTRY(ftrace_stub) | |
314 | ret | |
315 | ENDPROC(ftrace_stub) | |
819e50e2 | 316 | |
3b23e499 | 317 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
819e50e2 AT |
318 | /* |
319 | * void return_to_handler(void) | |
320 | * | |
321 | * Run ftrace_return_to_handler() before going back to parent. | |
5c176aff | 322 | * @fp is checked against the value passed by ftrace_graph_caller(). |
819e50e2 AT |
323 | */ |
324 | ENTRY(return_to_handler) | |
49e258e0 MR |
325 | /* save return value regs */ |
326 | sub sp, sp, #64 | |
327 | stp x0, x1, [sp] | |
328 | stp x2, x3, [sp, #16] | |
329 | stp x4, x5, [sp, #32] | |
330 | stp x6, x7, [sp, #48] | |
331 | ||
819e50e2 AT |
332 | mov x0, x29 // parent's fp |
333 | bl ftrace_return_to_handler// addr = ftrace_return_to_hander(fp); | |
334 | mov x30, x0 // restore the original return address | |
49e258e0 MR |
335 | |
336 | /* restore return value regs */ | |
337 | ldp x0, x1, [sp] | |
338 | ldp x2, x3, [sp, #16] | |
339 | ldp x4, x5, [sp, #32] | |
340 | ldp x6, x7, [sp, #48] | |
341 | add sp, sp, #64 | |
342 | ||
819e50e2 AT |
343 | ret |
344 | END(return_to_handler) | |
345 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |