Merge branches 'pm-devfreq', 'pm-qos', 'pm-tools' and 'pm-docs'
[linux-2.6-block.git] / arch / x86 / kernel / ftrace.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
3d083395 2/*
9d2099ab 3 * Dynamic function tracing support.
3d083395
SR
4 *
5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
6 *
7 * Thanks goes to Ingo Molnar, for suggesting the idea.
8 * Mathieu Desnoyers, for suggesting postponing the modifications.
9 * Arjan van de Ven, for keeping me straight, and explaining to me
10 * the dangers of modifying code on the run.
11 */
12
3bb258bf
JP
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
3d083395
SR
15#include <linux/spinlock.h>
16#include <linux/hardirq.h>
6f93fc07 17#include <linux/uaccess.h>
3d083395
SR
18#include <linux/ftrace.h>
19#include <linux/percpu.h>
19b3e967 20#include <linux/sched.h>
f3bea491 21#include <linux/slab.h>
3d083395
SR
22#include <linux/init.h>
23#include <linux/list.h>
84e1c6bb 24#include <linux/module.h>
d5b844a2 25#include <linux/memory.h>
ac0b14dc 26#include <linux/vmalloc.h>
3d083395 27
47788c58
FW
28#include <trace/syscall.h>
29
d1163651 30#include <asm/set_memory.h>
59a094c9 31#include <asm/kprobes.h>
395a59d0 32#include <asm/ftrace.h>
732f3ca7 33#include <asm/nops.h>
9e298e86 34#include <asm/text-patching.h>
3d083395 35
caf4b323 36#ifdef CONFIG_DYNAMIC_FTRACE
3d083395 37
768ae440
PZ
38static int ftrace_poke_late = 0;
39
3a2bfec0 40void ftrace_arch_code_modify_prepare(void)
074376ac 41 __acquires(&text_mutex)
16239630 42{
39611265
SRV
43 /*
44 * Need to grab text_mutex to prevent a race from module loading
45 * and live kernel patching from changing the text permissions while
46 * ftrace has it set to "read/write".
47 */
d5b844a2 48 mutex_lock(&text_mutex);
768ae440 49 ftrace_poke_late = 1;
16239630
SR
50}
51
3a2bfec0 52void ftrace_arch_code_modify_post_process(void)
074376ac 53 __releases(&text_mutex)
16239630 54{
768ae440
PZ
55 /*
56 * ftrace_make_{call,nop}() may be called during
57 * module load, and we need to finish the text_poke_queue()
58 * that they do, here.
59 */
60 text_poke_finish();
61 ftrace_poke_late = 0;
d5b844a2 62 mutex_unlock(&text_mutex);
16239630
SR
63}
64
768ae440 65static const char *ftrace_nop_replace(void)
17666f02 66{
a89dfde3 67 return x86_nops[5];
17666f02
SR
68}
69
768ae440 70static const char *ftrace_call_replace(unsigned long ip, unsigned long addr)
caf4b323 71{
67c1d4a2 72 return text_gen_insn(CALL_INSN_OPCODE, (void *)ip, (void *)addr);
caf4b323
FW
73}
74
768ae440 75static int ftrace_verify_code(unsigned long ip, const char *old_code)
3d083395 76{
768ae440 77 char cur_code[MCOUNT_INSN_SIZE];
b05086c7 78
3d083395 79 /*
c5d641f9
LB
80 * Note:
81 * We are paranoid about modifying text, as if a bug was to happen, it
82 * could cause us to read or write to someplace that could cause harm.
83 * Carefully read and modify the code with probe_kernel_*(), and make
84 * sure what we read is what we expected it to be before modifying it.
3d083395 85 */
76aefee5 86 /* read the text we want to modify */
fe557319 87 if (copy_from_kernel_nofault(cur_code, (void *)ip, MCOUNT_INSN_SIZE)) {
768ae440 88 WARN_ON(1);
593eb8a2 89 return -EFAULT;
768ae440 90 }
6f93fc07 91
76aefee5 92 /* Make sure it is what we expect it to be */
768ae440
PZ
93 if (memcmp(cur_code, old_code, MCOUNT_INSN_SIZE) != 0) {
94 WARN_ON(1);
593eb8a2 95 return -EINVAL;
768ae440 96 }
3d083395 97
768ae440
PZ
98 return 0;
99}
6f93fc07 100
38ebd8d1
BP
101/*
102 * Marked __ref because it calls text_poke_early() which is .init.text. That is
103 * ok because that call will happen early, during boot, when .init sections are
104 * still present.
105 */
106static int __ref
768ae440
PZ
107ftrace_modify_code_direct(unsigned long ip, const char *old_code,
108 const char *new_code)
109{
110 int ret = ftrace_verify_code(ip, old_code);
111 if (ret)
112 return ret;
3d083395 113
768ae440
PZ
114 /* replace the text with the new text */
115 if (ftrace_poke_late)
116 text_poke_queue((void *)ip, new_code, MCOUNT_INSN_SIZE, NULL);
117 else
118 text_poke_early((void *)ip, new_code, MCOUNT_INSN_SIZE);
6f93fc07 119 return 0;
3d083395
SR
120}
121
768ae440 122int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr)
31e88909 123{
31e88909 124 unsigned long ip = rec->ip;
768ae440 125 const char *new, *old;
31e88909
SR
126
127 old = ftrace_call_replace(ip, addr);
128 new = ftrace_nop_replace();
129
8a4d0a68
SR
130 /*
131 * On boot up, and when modules are loaded, the MCOUNT_ADDR
132 * is converted to a nop, and will never become MCOUNT_ADDR
133 * again. This code is either running before SMP (on boot up)
134 * or before the code will ever be executed (module load).
135 * We do not want to use the breakpoint version in this case,
136 * just modify the code directly.
137 */
138 if (addr == MCOUNT_ADDR)
768ae440 139 return ftrace_modify_code_direct(ip, old, new);
b05086c7 140
768ae440
PZ
141 /*
142 * x86 overrides ftrace_replace_code -- this function will never be used
143 * in this case.
144 */
8a4d0a68
SR
145 WARN_ONCE(1, "invalid use of ftrace_make_nop");
146 return -EINVAL;
31e88909
SR
147}
148
149int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
150{
31e88909 151 unsigned long ip = rec->ip;
768ae440 152 const char *new, *old;
31e88909
SR
153
154 old = ftrace_nop_replace();
155 new = ftrace_call_replace(ip, addr);
156
8a4d0a68
SR
157 /* Should only be called when module is loaded */
158 return ftrace_modify_code_direct(rec->ip, old, new);
d61f82d0
SR
159}
160
08f6fba5
SR
161/*
162 * Should never be called:
163 * As it is only called by __ftrace_replace_code() which is called by
164 * ftrace_replace_code() that x86 overrides, and by ftrace_update_code()
165 * which is called to turn mcount into nops or nops into function calls
166 * but not to convert a function from not using regs to one that uses
167 * regs, which ftrace_modify_call() is for.
168 */
169int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
170 unsigned long addr)
171{
172 WARN_ON(1);
173 return -EINVAL;
174}
08f6fba5 175
87fbb2ac 176int ftrace_update_ftrace_func(ftrace_func_t func)
08d636b6 177{
ab4ead02 178 unsigned long ip;
768ae440 179 const char *new;
ab4ead02 180
768ae440
PZ
181 ip = (unsigned long)(&ftrace_call);
182 new = ftrace_call_replace(ip, (unsigned long)func);
183 text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL);
08d636b6 184
768ae440
PZ
185 ip = (unsigned long)(&ftrace_regs_call);
186 new = ftrace_call_replace(ip, (unsigned long)func);
187 text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL);
08d636b6
SR
188
189 return 0;
190}
191
e4f5d544 192void ftrace_replace_code(int enable)
08d636b6
SR
193{
194 struct ftrace_rec_iter *iter;
195 struct dyn_ftrace *rec;
768ae440 196 const char *new, *old;
08d636b6
SR
197 int ret;
198
199 for_ftrace_rec_iter(iter) {
200 rec = ftrace_rec_iter_record(iter);
201
768ae440
PZ
202 switch (ftrace_test_record(rec, enable)) {
203 case FTRACE_UPDATE_IGNORE:
204 default:
205 continue;
08d636b6 206
768ae440
PZ
207 case FTRACE_UPDATE_MAKE_CALL:
208 old = ftrace_nop_replace();
209 break;
08d636b6 210
768ae440
PZ
211 case FTRACE_UPDATE_MODIFY_CALL:
212 case FTRACE_UPDATE_MAKE_NOP:
213 old = ftrace_call_replace(rec->ip, ftrace_get_addr_curr(rec));
214 break;
215 }
08d636b6 216
768ae440
PZ
217 ret = ftrace_verify_code(rec->ip, old);
218 if (ret) {
219 ftrace_bug(ret, rec);
220 return;
221 }
08d636b6
SR
222 }
223
08d636b6
SR
224 for_ftrace_rec_iter(iter) {
225 rec = ftrace_rec_iter_record(iter);
226
768ae440
PZ
227 switch (ftrace_test_record(rec, enable)) {
228 case FTRACE_UPDATE_IGNORE:
229 default:
230 continue;
08d636b6 231
768ae440
PZ
232 case FTRACE_UPDATE_MAKE_CALL:
233 case FTRACE_UPDATE_MODIFY_CALL:
234 new = ftrace_call_replace(rec->ip, ftrace_get_addr_new(rec));
235 break;
08d636b6 236
768ae440
PZ
237 case FTRACE_UPDATE_MAKE_NOP:
238 new = ftrace_nop_replace();
239 break;
240 }
08d636b6 241
768ae440
PZ
242 text_poke_queue((void *)rec->ip, new, MCOUNT_INSN_SIZE, NULL);
243 ftrace_update_record(rec, enable);
08d636b6 244 }
768ae440 245 text_poke_finish();
8a4d0a68
SR
246}
247
08d636b6
SR
248void arch_ftrace_update_code(int command)
249{
e4f5d544 250 ftrace_modify_all_code(command);
08d636b6
SR
251}
252
f3bea491
SRRH
253/* Currently only x86_64 supports dynamic trampolines */
254#ifdef CONFIG_X86_64
255
256#ifdef CONFIG_MODULES
257#include <linux/moduleloader.h>
258/* Module allocation simplifies allocating memory for code */
259static inline void *alloc_tramp(unsigned long size)
260{
261 return module_alloc(size);
262}
7fdfe1e4 263static inline void tramp_free(void *tramp)
f3bea491 264{
be1f221c 265 module_memfree(tramp);
f3bea491
SRRH
266}
267#else
268/* Trampolines can only be created if modules are supported */
269static inline void *alloc_tramp(unsigned long size)
270{
271 return NULL;
272}
7fdfe1e4 273static inline void tramp_free(void *tramp) { }
f3bea491
SRRH
274#endif
275
276/* Defined as markers to the end of the ftrace default trampolines */
f3bea491 277extern void ftrace_regs_caller_end(void);
0298739b
PZ
278extern void ftrace_regs_caller_ret(void);
279extern void ftrace_caller_end(void);
f3bea491
SRRH
280extern void ftrace_caller_op_ptr(void);
281extern void ftrace_regs_caller_op_ptr(void);
fe58acef 282extern void ftrace_regs_caller_jmp(void);
f3bea491
SRRH
283
284/* movq function_trace_op(%rip), %rdx */
285/* 0x48 0x8b 0x15 <offset-to-ftrace_trace_op (4 bytes)> */
286#define OP_REF_SIZE 7
287
288/*
289 * The ftrace_ops is passed to the function callback. Since the
290 * trampoline only services a single ftrace_ops, we can pass in
291 * that ops directly.
292 *
293 * The ftrace_op_code_union is used to create a pointer to the
294 * ftrace_ops that will be passed to the callback function.
295 */
296union ftrace_op_code_union {
297 char code[OP_REF_SIZE];
298 struct {
299 char op[3];
300 int offset;
301 } __attribute__((packed));
302};
303
1f001e9d 304#define RET_SIZE (IS_ENABLED(CONFIG_RETPOLINE) ? 5 : 1 + IS_ENABLED(CONFIG_SLS))
d2a68c4e 305
aec0be2d
SRRH
306static unsigned long
307create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
f3bea491 308{
f3bea491
SRRH
309 unsigned long start_offset;
310 unsigned long end_offset;
311 unsigned long op_offset;
768ae440 312 unsigned long call_offset;
fe58acef 313 unsigned long jmp_offset;
f3bea491 314 unsigned long offset;
3c0dab44 315 unsigned long npages;
f3bea491 316 unsigned long size;
f3bea491
SRRH
317 unsigned long *ptr;
318 void *trampoline;
d2a68c4e 319 void *ip;
f3bea491
SRRH
320 /* 48 8b 15 <offset> is movq <offset>(%rip), %rdx */
321 unsigned const char op_ref[] = { 0x48, 0x8b, 0x15 };
e52fc2cf 322 unsigned const char retq[] = { RET_INSN_OPCODE, INT3_INSN_OPCODE };
f3bea491
SRRH
323 union ftrace_op_code_union op_ptr;
324 int ret;
325
326 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
327 start_offset = (unsigned long)ftrace_regs_caller;
328 end_offset = (unsigned long)ftrace_regs_caller_end;
329 op_offset = (unsigned long)ftrace_regs_caller_op_ptr;
768ae440 330 call_offset = (unsigned long)ftrace_regs_call;
fe58acef 331 jmp_offset = (unsigned long)ftrace_regs_caller_jmp;
f3bea491
SRRH
332 } else {
333 start_offset = (unsigned long)ftrace_caller;
0298739b 334 end_offset = (unsigned long)ftrace_caller_end;
f3bea491 335 op_offset = (unsigned long)ftrace_caller_op_ptr;
768ae440 336 call_offset = (unsigned long)ftrace_call;
fe58acef 337 jmp_offset = 0;
f3bea491
SRRH
338 }
339
340 size = end_offset - start_offset;
341
342 /*
343 * Allocate enough size to store the ftrace_caller code,
d2a68c4e
SRV
344 * the iret , as well as the address of the ftrace_ops this
345 * trampoline is used for.
f3bea491 346 */
d2a68c4e 347 trampoline = alloc_tramp(size + RET_SIZE + sizeof(void *));
f3bea491
SRRH
348 if (!trampoline)
349 return 0;
350
d2a68c4e 351 *tramp_size = size + RET_SIZE + sizeof(void *);
3c0dab44 352 npages = DIV_ROUND_UP(*tramp_size, PAGE_SIZE);
aec0be2d 353
f3bea491 354 /* Copy ftrace_caller onto the trampoline memory */
fe557319 355 ret = copy_from_kernel_nofault(trampoline, (void *)start_offset, size);
d2a68c4e
SRV
356 if (WARN_ON(ret < 0))
357 goto fail;
f3bea491 358
d2a68c4e 359 ip = trampoline + size;
1f001e9d
PZ
360 if (cpu_feature_enabled(X86_FEATURE_RETHUNK))
361 __text_gen_insn(ip, JMP32_INSN_OPCODE, ip, &__x86_return_thunk, JMP32_INSN_SIZE);
362 else
363 memcpy(ip, retq, sizeof(retq));
f3bea491 364
fe58acef
SRV
365 /* No need to test direct calls on created trampolines */
366 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
367 /* NOP the jnz 1f; but make sure it's a 2 byte jnz */
368 ip = trampoline + (jmp_offset - start_offset);
369 if (WARN_ON(*(char *)ip != 0x75))
370 goto fail;
a89dfde3 371 ret = copy_from_kernel_nofault(ip, x86_nops[2], 2);
fe58acef
SRV
372 if (ret < 0)
373 goto fail;
374 }
375
f3bea491
SRRH
376 /*
377 * The address of the ftrace_ops that is used for this trampoline
378 * is stored at the end of the trampoline. This will be used to
379 * load the third parameter for the callback. Basically, that
380 * location at the end of the trampoline takes the place of
381 * the global function_trace_op variable.
382 */
383
d2a68c4e 384 ptr = (unsigned long *)(trampoline + size + RET_SIZE);
f3bea491
SRRH
385 *ptr = (unsigned long)ops;
386
387 op_offset -= start_offset;
388 memcpy(&op_ptr, trampoline + op_offset, OP_REF_SIZE);
389
390 /* Are we pointing to the reference? */
d2a68c4e
SRV
391 if (WARN_ON(memcmp(op_ptr.op, op_ref, 3) != 0))
392 goto fail;
f3bea491
SRRH
393
394 /* Load the contents of ptr into the callback parameter */
395 offset = (unsigned long)ptr;
396 offset -= (unsigned long)trampoline + op_offset + OP_REF_SIZE;
397
398 op_ptr.offset = offset;
399
400 /* put in the new offset to the ftrace_ops */
401 memcpy(trampoline + op_offset, &op_ptr, OP_REF_SIZE);
402
768ae440
PZ
403 /* put in the call to the function */
404 mutex_lock(&text_mutex);
405 call_offset -= start_offset;
406 memcpy(trampoline + call_offset,
407 text_gen_insn(CALL_INSN_OPCODE,
408 trampoline + call_offset,
409 ftrace_ops_get_func(ops)), CALL_INSN_SIZE);
410 mutex_unlock(&text_mutex);
411
f3bea491
SRRH
412 /* ALLOC_TRAMP flags lets us know we created it */
413 ops->flags |= FTRACE_OPS_FL_ALLOC_TRAMP;
414
7fdfe1e4
RE
415 set_vm_flush_reset_perms(trampoline);
416
59566b0b
SRV
417 if (likely(system_state != SYSTEM_BOOTING))
418 set_memory_ro((unsigned long)trampoline, npages);
3c0dab44 419 set_memory_x((unsigned long)trampoline, npages);
f3bea491 420 return (unsigned long)trampoline;
d2a68c4e 421fail:
7fdfe1e4 422 tramp_free(trampoline);
d2a68c4e 423 return 0;
f3bea491
SRRH
424}
425
59566b0b
SRV
426void set_ftrace_ops_ro(void)
427{
428 struct ftrace_ops *ops;
429 unsigned long start_offset;
430 unsigned long end_offset;
431 unsigned long npages;
432 unsigned long size;
433
434 do_for_each_ftrace_op(ops, ftrace_ops_list) {
435 if (!(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
436 continue;
437
438 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
439 start_offset = (unsigned long)ftrace_regs_caller;
440 end_offset = (unsigned long)ftrace_regs_caller_end;
441 } else {
442 start_offset = (unsigned long)ftrace_caller;
7c0577f4 443 end_offset = (unsigned long)ftrace_caller_end;
59566b0b
SRV
444 }
445 size = end_offset - start_offset;
446 size = size + RET_SIZE + sizeof(void *);
447 npages = DIV_ROUND_UP(size, PAGE_SIZE);
448 set_memory_ro((unsigned long)ops->trampoline, npages);
449 } while_for_each_ftrace_op(ops);
450}
451
15d5b02c
SRRH
452static unsigned long calc_trampoline_call_offset(bool save_regs)
453{
454 unsigned long start_offset;
455 unsigned long call_offset;
456
457 if (save_regs) {
458 start_offset = (unsigned long)ftrace_regs_caller;
459 call_offset = (unsigned long)ftrace_regs_call;
460 } else {
461 start_offset = (unsigned long)ftrace_caller;
462 call_offset = (unsigned long)ftrace_call;
463 }
464
465 return call_offset - start_offset;
466}
467
f3bea491
SRRH
468void arch_ftrace_update_trampoline(struct ftrace_ops *ops)
469{
470 ftrace_func_t func;
f3bea491
SRRH
471 unsigned long offset;
472 unsigned long ip;
aec0be2d 473 unsigned int size;
768ae440 474 const char *new;
f3bea491 475
768ae440 476 if (!ops->trampoline) {
aec0be2d 477 ops->trampoline = create_trampoline(ops, &size);
f3bea491
SRRH
478 if (!ops->trampoline)
479 return;
aec0be2d 480 ops->trampoline_size = size;
768ae440 481 return;
f3bea491
SRRH
482 }
483
768ae440
PZ
484 /*
485 * The ftrace_ops caller may set up its own trampoline.
486 * In such a case, this code must not modify it.
487 */
488 if (!(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
489 return;
490
15d5b02c 491 offset = calc_trampoline_call_offset(ops->flags & FTRACE_OPS_FL_SAVE_REGS);
f3bea491 492 ip = ops->trampoline + offset;
f3bea491
SRRH
493 func = ftrace_ops_get_func(ops);
494
768ae440 495 mutex_lock(&text_mutex);
f3bea491
SRRH
496 /* Do a safe modify in case the trampoline is executing */
497 new = ftrace_call_replace(ip, (unsigned long)func);
768ae440
PZ
498 text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL);
499 mutex_unlock(&text_mutex);
f3bea491 500}
15d5b02c
SRRH
501
502/* Return the address of the function the trampoline calls */
503static void *addr_from_call(void *ptr)
504{
67c1d4a2 505 union text_poke_insn call;
15d5b02c
SRRH
506 int ret;
507
fe557319 508 ret = copy_from_kernel_nofault(&call, ptr, CALL_INSN_SIZE);
15d5b02c
SRRH
509 if (WARN_ON_ONCE(ret < 0))
510 return NULL;
511
512 /* Make sure this is a call */
67c1d4a2
PZ
513 if (WARN_ON_ONCE(call.opcode != CALL_INSN_OPCODE)) {
514 pr_warn("Expected E8, got %x\n", call.opcode);
15d5b02c
SRRH
515 return NULL;
516 }
517
67c1d4a2 518 return ptr + CALL_INSN_SIZE + call.disp;
15d5b02c
SRRH
519}
520
0c0593b4 521void prepare_ftrace_return(unsigned long ip, unsigned long *parent,
15d5b02c
SRRH
522 unsigned long frame_pointer);
523
524/*
525 * If the ops->trampoline was not allocated, then it probably
526 * has a static trampoline func, or is the ftrace caller itself.
527 */
528static void *static_tramp_func(struct ftrace_ops *ops, struct dyn_ftrace *rec)
529{
530 unsigned long offset;
531 bool save_regs = rec->flags & FTRACE_FL_REGS_EN;
532 void *ptr;
533
534 if (ops && ops->trampoline) {
0c0593b4
SRV
535#if !defined(CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS) && \
536 defined(CONFIG_FUNCTION_GRAPH_TRACER)
15d5b02c
SRRH
537 /*
538 * We only know about function graph tracer setting as static
539 * trampoline.
540 */
541 if (ops->trampoline == FTRACE_GRAPH_ADDR)
542 return (void *)prepare_ftrace_return;
543#endif
544 return NULL;
545 }
546
547 offset = calc_trampoline_call_offset(save_regs);
548
549 if (save_regs)
550 ptr = (void *)FTRACE_REGS_ADDR + offset;
551 else
552 ptr = (void *)FTRACE_ADDR + offset;
553
554 return addr_from_call(ptr);
555}
556
557void *arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec)
558{
559 unsigned long offset;
560
561 /* If we didn't allocate this trampoline, consider it static */
562 if (!ops || !(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
563 return static_tramp_func(ops, rec);
564
565 offset = calc_trampoline_call_offset(ops->flags & FTRACE_OPS_FL_SAVE_REGS);
566 return addr_from_call((void *)ops->trampoline + offset);
567}
568
12cce594
SRRH
569void arch_ftrace_trampoline_free(struct ftrace_ops *ops)
570{
571 if (!ops || !(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
572 return;
573
7fdfe1e4 574 tramp_free((void *)ops->trampoline);
12cce594
SRRH
575 ops->trampoline = 0;
576}
15d5b02c 577
f3bea491
SRRH
578#endif /* CONFIG_X86_64 */
579#endif /* CONFIG_DYNAMIC_FTRACE */
580
581#ifdef CONFIG_FUNCTION_GRAPH_TRACER
582
e999995c 583#if defined(CONFIG_DYNAMIC_FTRACE) && !defined(CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS)
0c0593b4 584extern void ftrace_graph_call(void);
768ae440 585static const char *ftrace_jmp_replace(unsigned long ip, unsigned long addr)
745cfeaa 586{
67c1d4a2 587 return text_gen_insn(JMP32_INSN_OPCODE, (void *)ip, (void *)addr);
745cfeaa
SRV
588}
589
87fbb2ac
SRRH
590static int ftrace_mod_jmp(unsigned long ip, void *func)
591{
768ae440 592 const char *new;
5a45cfe1 593
87fbb2ac 594 new = ftrace_jmp_replace(ip, (unsigned long)func);
768ae440
PZ
595 text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL);
596 return 0;
5a45cfe1
SR
597}
598
599int ftrace_enable_ftrace_graph_caller(void)
600{
601 unsigned long ip = (unsigned long)(&ftrace_graph_call);
5a45cfe1 602
87fbb2ac 603 return ftrace_mod_jmp(ip, &ftrace_graph_caller);
5a45cfe1
SR
604}
605
606int ftrace_disable_ftrace_graph_caller(void)
607{
608 unsigned long ip = (unsigned long)(&ftrace_graph_call);
5a45cfe1 609
87fbb2ac 610 return ftrace_mod_jmp(ip, &ftrace_stub);
5a45cfe1 611}
e999995c 612#endif /* CONFIG_DYNAMIC_FTRACE && !CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS */
e7d3737e 613
e7d3737e
FW
614/*
615 * Hook the return address and push it in the stack of return addrs
616 * in current thread info.
617 */
8646698a 618void prepare_ftrace_return(unsigned long ip, unsigned long *parent,
71e308a2 619 unsigned long frame_pointer)
e7d3737e 620{
768ae440 621 unsigned long return_hooker = (unsigned long)&return_to_handler;
0c0593b4 622 int bit;
e7d3737e 623
34a477e5
JP
624 /*
625 * When resuming from suspend-to-ram, this function can be indirectly
626 * called from early CPU startup code while the CPU is in real mode,
627 * which would fail miserably. Make sure the stack pointer is a
628 * virtual address.
629 *
630 * This check isn't as accurate as virt_addr_valid(), but it should be
631 * good enough for this purpose, and it's fast.
632 */
633 if (unlikely((long)__builtin_frame_address(0) >= 0))
634 return;
635
84b2bc7f
SRRH
636 if (unlikely(ftrace_graph_is_dead()))
637 return;
638
380c4b14 639 if (unlikely(atomic_read(&current->tracing_graph_pause)))
e7d3737e
FW
640 return;
641
0c0593b4
SRV
642 bit = ftrace_test_recursion_trylock(ip, *parent);
643 if (bit < 0)
644 return;
645
8646698a
SRV
646 if (!function_graph_enter(*parent, ip, frame_pointer, parent))
647 *parent = return_hooker;
0c0593b4
SRV
648
649 ftrace_test_recursion_unlock(bit);
650}
651
652#ifdef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS
653void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
654 struct ftrace_ops *op, struct ftrace_regs *fregs)
655{
656 struct pt_regs *regs = &fregs->regs;
657 unsigned long *stack = (unsigned long *)kernel_stack_pointer(regs);
658
659 prepare_ftrace_return(ip, (unsigned long *)stack, 0);
e7d3737e 660}
0c0593b4
SRV
661#endif
662
fb52607a 663#endif /* CONFIG_FUNCTION_GRAPH_TRACER */