Commit | Line | Data |
---|---|---|
1c873be7 MF |
1 | /* |
2 | * mcount and friends -- ftrace stuff | |
3 | * | |
aebfef03 | 4 | * Copyright (C) 2009-2010 Analog Devices Inc. |
1c873be7 MF |
5 | * Licensed under the GPL-2 or later. |
6 | */ | |
7 | ||
8 | #include <linux/linkage.h> | |
9 | #include <asm/ftrace.h> | |
10 | ||
11 | .text | |
12 | ||
f5074429 MF |
13 | #ifdef CONFIG_DYNAMIC_FTRACE |
14 | ||
15 | /* Simple stub so we can boot the kernel until runtime patching has | |
16 | * disabled all calls to this. Then it'll be unused. | |
17 | */ | |
18 | ENTRY(__mcount) | |
19 | # if ANOMALY_05000371 | |
20 | nop; nop; nop; nop; | |
21 | # endif | |
22 | rts; | |
23 | ENDPROC(__mcount) | |
24 | ||
1c873be7 MF |
25 | /* GCC will have called us before setting up the function prologue, so we |
26 | * can clobber the normal scratch registers, but we need to make sure to | |
27 | * save/restore the registers used for argument passing (R0-R2) in case | |
28 | * the profiled function is using them. With data registers, R3 is the | |
29 | * only one we can blow away. With pointer registers, we have P0-P2. | |
30 | * | |
31 | * Upon entry, the RETS will point to the top of the current profiled | |
5bf9cbef YL |
32 | * function. And since GCC pushed the previous RETS for us, the previous |
33 | * function will be waiting there. mmmm pie. | |
1c873be7 | 34 | */ |
f5074429 MF |
35 | ENTRY(_ftrace_caller) |
36 | # ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST | |
37 | /* optional micro optimization: return if stopped */ | |
38 | p1.l = _function_trace_stop; | |
39 | p1.h = _function_trace_stop; | |
40 | r3 = [p1]; | |
41 | cc = r3 == 0; | |
42 | if ! cc jump _ftrace_stub (bp); | |
43 | # endif | |
44 | ||
45 | /* save first/second/third function arg and the return register */ | |
46 | [--sp] = r2; | |
47 | [--sp] = r0; | |
48 | [--sp] = r1; | |
49 | [--sp] = rets; | |
50 | ||
51 | /* function_trace_call(unsigned long ip, unsigned long parent_ip): | |
52 | * ip: this point was called by ... | |
53 | * parent_ip: ... this function | |
54 | * the ip itself will need adjusting for the mcount call | |
55 | */ | |
56 | r0 = rets; | |
57 | r1 = [sp + 16]; /* skip the 4 local regs on stack */ | |
58 | r0 += -MCOUNT_INSN_SIZE; | |
59 | ||
60 | .globl _ftrace_call | |
61 | _ftrace_call: | |
62 | call _ftrace_stub | |
63 | ||
64 | # ifdef CONFIG_FUNCTION_GRAPH_TRACER | |
65 | .globl _ftrace_graph_call | |
66 | _ftrace_graph_call: | |
67 | nop; /* jump _ftrace_graph_caller; */ | |
68 | # endif | |
69 | ||
70 | /* restore state and get out of dodge */ | |
71 | .Lfinish_trace: | |
72 | rets = [sp++]; | |
73 | r1 = [sp++]; | |
74 | r0 = [sp++]; | |
75 | r2 = [sp++]; | |
76 | ||
77 | .globl _ftrace_stub | |
78 | _ftrace_stub: | |
79 | rts; | |
80 | ENDPROC(_ftrace_caller) | |
81 | ||
82 | #else | |
83 | ||
84 | /* See documentation for _ftrace_caller */ | |
1c873be7 | 85 | ENTRY(__mcount) |
f5074429 | 86 | # ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST |
aebfef03 MF |
87 | /* optional micro optimization: return if stopped */ |
88 | p1.l = _function_trace_stop; | |
89 | p1.h = _function_trace_stop; | |
90 | r3 = [p1]; | |
91 | cc = r3 == 0; | |
92 | if ! cc jump _ftrace_stub (bp); | |
f5074429 | 93 | # endif |
aebfef03 | 94 | |
1c873be7 MF |
95 | /* save third function arg early so we can do testing below */ |
96 | [--sp] = r2; | |
97 | ||
98 | /* load the function pointer to the tracer */ | |
99 | p0.l = _ftrace_trace_function; | |
100 | p0.h = _ftrace_trace_function; | |
101 | r3 = [p0]; | |
102 | ||
103 | /* optional micro optimization: don't call the stub tracer */ | |
104 | r2.l = _ftrace_stub; | |
105 | r2.h = _ftrace_stub; | |
106 | cc = r2 == r3; | |
107 | if ! cc jump .Ldo_trace; | |
108 | ||
f5074429 | 109 | # ifdef CONFIG_FUNCTION_GRAPH_TRACER |
1ee76d7e MF |
110 | /* if the ftrace_graph_return function pointer is not set to |
111 | * the ftrace_stub entry, call prepare_ftrace_return(). | |
112 | */ | |
113 | p0.l = _ftrace_graph_return; | |
114 | p0.h = _ftrace_graph_return; | |
115 | r3 = [p0]; | |
116 | cc = r2 == r3; | |
117 | if ! cc jump _ftrace_graph_caller; | |
118 | ||
119 | /* similarly, if the ftrace_graph_entry function pointer is not | |
120 | * set to the ftrace_graph_entry_stub entry, ... | |
121 | */ | |
122 | p0.l = _ftrace_graph_entry; | |
123 | p0.h = _ftrace_graph_entry; | |
124 | r2.l = _ftrace_graph_entry_stub; | |
125 | r2.h = _ftrace_graph_entry_stub; | |
126 | r3 = [p0]; | |
127 | cc = r2 == r3; | |
128 | if ! cc jump _ftrace_graph_caller; | |
f5074429 | 129 | # endif |
1ee76d7e | 130 | |
1c873be7 MF |
131 | r2 = [sp++]; |
132 | rts; | |
133 | ||
134 | .Ldo_trace: | |
135 | ||
136 | /* save first/second function arg and the return register */ | |
137 | [--sp] = r0; | |
138 | [--sp] = r1; | |
139 | [--sp] = rets; | |
140 | ||
141 | /* setup the tracer function */ | |
142 | p0 = r3; | |
143 | ||
5bf9cbef YL |
144 | /* function_trace_call(unsigned long ip, unsigned long parent_ip): |
145 | * ip: this point was called by ... | |
146 | * parent_ip: ... this function | |
147 | * the ip itself will need adjusting for the mcount call | |
1c873be7 | 148 | */ |
5bf9cbef YL |
149 | r0 = rets; |
150 | r1 = [sp + 16]; /* skip the 4 local regs on stack */ | |
151 | r0 += -MCOUNT_INSN_SIZE; | |
1c873be7 MF |
152 | |
153 | /* call the tracer */ | |
154 | call (p0); | |
155 | ||
156 | /* restore state and get out of dodge */ | |
1ee76d7e | 157 | .Lfinish_trace: |
1c873be7 MF |
158 | rets = [sp++]; |
159 | r1 = [sp++]; | |
160 | r0 = [sp++]; | |
161 | r2 = [sp++]; | |
162 | ||
163 | .globl _ftrace_stub | |
164 | _ftrace_stub: | |
165 | rts; | |
166 | ENDPROC(__mcount) | |
1ee76d7e | 167 | |
f5074429 MF |
168 | #endif |
169 | ||
1ee76d7e MF |
170 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
171 | /* The prepare_ftrace_return() function is similar to the trace function | |
172 | * except it takes a pointer to the location of the frompc. This is so | |
173 | * the prepare_ftrace_return() can hijack it temporarily for probing | |
174 | * purposes. | |
175 | */ | |
176 | ENTRY(_ftrace_graph_caller) | |
f5074429 | 177 | # ifndef CONFIG_DYNAMIC_FTRACE |
1ee76d7e MF |
178 | /* save first/second function arg and the return register */ |
179 | [--sp] = r0; | |
180 | [--sp] = r1; | |
181 | [--sp] = rets; | |
182 | ||
b73faf74 MF |
183 | /* prepare_ftrace_return(parent, self_addr, frame_pointer) */ |
184 | r0 = sp; /* unsigned long *parent */ | |
185 | r1 = rets; /* unsigned long self_addr */ | |
f5074429 MF |
186 | # else |
187 | r0 = sp; /* unsigned long *parent */ | |
188 | r1 = [sp]; /* unsigned long self_addr */ | |
189 | # endif | |
190 | # ifdef CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST | |
b73faf74 | 191 | r2 = fp; /* unsigned long frame_pointer */ |
f5074429 | 192 | # endif |
5bf9cbef | 193 | r0 += 16; /* skip the 4 local regs on stack */ |
1ee76d7e MF |
194 | r1 += -MCOUNT_INSN_SIZE; |
195 | call _prepare_ftrace_return; | |
196 | ||
197 | jump .Lfinish_trace; | |
198 | ENDPROC(_ftrace_graph_caller) | |
199 | ||
200 | /* Undo the rewrite caused by ftrace_graph_caller(). The common function | |
201 | * ftrace_return_to_handler() will return the original rets so we can | |
202 | * restore it and be on our way. | |
203 | */ | |
204 | ENTRY(_return_to_handler) | |
205 | /* make sure original return values are saved */ | |
206 | [--sp] = p0; | |
207 | [--sp] = r0; | |
208 | [--sp] = r1; | |
209 | ||
210 | /* get original return address */ | |
f5074429 | 211 | # ifdef CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST |
b73faf74 | 212 | r0 = fp; /* Blackfin is sane, so omit this */ |
f5074429 | 213 | # endif |
1ee76d7e MF |
214 | call _ftrace_return_to_handler; |
215 | rets = r0; | |
216 | ||
217 | /* anomaly 05000371 - make sure we have at least three instructions | |
218 | * between rets setting and the return | |
219 | */ | |
220 | r1 = [sp++]; | |
221 | r0 = [sp++]; | |
222 | p0 = [sp++]; | |
223 | rts; | |
224 | ENDPROC(_return_to_handler) | |
225 | #endif |