mm: introduce execmem_alloc() and execmem_free()
[linux-2.6-block.git] / arch / x86 / kernel / ftrace.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
3d083395 2/*
9d2099ab 3 * Dynamic function tracing support.
3d083395
SR
4 *
5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
6 *
7 * Thanks goes to Ingo Molnar, for suggesting the idea.
8 * Mathieu Desnoyers, for suggesting postponing the modifications.
9 * Arjan van de Ven, for keeping me straight, and explaining to me
10 * the dangers of modifying code on the run.
11 */
12
3bb258bf
JP
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
3d083395
SR
15#include <linux/spinlock.h>
16#include <linux/hardirq.h>
6f93fc07 17#include <linux/uaccess.h>
3d083395
SR
18#include <linux/ftrace.h>
19#include <linux/percpu.h>
19b3e967 20#include <linux/sched.h>
f3bea491 21#include <linux/slab.h>
3d083395
SR
22#include <linux/init.h>
23#include <linux/list.h>
84e1c6bb 24#include <linux/module.h>
d5b844a2 25#include <linux/memory.h>
ac0b14dc 26#include <linux/vmalloc.h>
d48567c9 27#include <linux/set_memory.h>
12af2b83 28#include <linux/execmem.h>
3d083395 29
47788c58
FW
30#include <trace/syscall.h>
31
59a094c9 32#include <asm/kprobes.h>
395a59d0 33#include <asm/ftrace.h>
732f3ca7 34#include <asm/nops.h>
9e298e86 35#include <asm/text-patching.h>
3d083395 36
caf4b323 37#ifdef CONFIG_DYNAMIC_FTRACE
3d083395 38
768ae440
PZ
39static int ftrace_poke_late = 0;
40
3a2bfec0 41void ftrace_arch_code_modify_prepare(void)
074376ac 42 __acquires(&text_mutex)
16239630 43{
39611265
SRV
44 /*
45 * Need to grab text_mutex to prevent a race from module loading
46 * and live kernel patching from changing the text permissions while
47 * ftrace has it set to "read/write".
48 */
d5b844a2 49 mutex_lock(&text_mutex);
768ae440 50 ftrace_poke_late = 1;
16239630
SR
51}
52
3a2bfec0 53void ftrace_arch_code_modify_post_process(void)
074376ac 54 __releases(&text_mutex)
16239630 55{
768ae440
PZ
56 /*
57 * ftrace_make_{call,nop}() may be called during
58 * module load, and we need to finish the text_poke_queue()
59 * that they do, here.
60 */
61 text_poke_finish();
62 ftrace_poke_late = 0;
d5b844a2 63 mutex_unlock(&text_mutex);
16239630
SR
64}
65
768ae440 66static const char *ftrace_nop_replace(void)
17666f02 67{
a89dfde3 68 return x86_nops[5];
17666f02
SR
69}
70
768ae440 71static const char *ftrace_call_replace(unsigned long ip, unsigned long addr)
caf4b323 72{
ee3e2469
PZ
73 /*
74 * No need to translate into a callthunk. The trampoline does
75 * the depth accounting itself.
76 */
67c1d4a2 77 return text_gen_insn(CALL_INSN_OPCODE, (void *)ip, (void *)addr);
caf4b323
FW
78}
79
768ae440 80static int ftrace_verify_code(unsigned long ip, const char *old_code)
3d083395 81{
768ae440 82 char cur_code[MCOUNT_INSN_SIZE];
b05086c7 83
3d083395 84 /*
c5d641f9
LB
85 * Note:
86 * We are paranoid about modifying text, as if a bug was to happen, it
87 * could cause us to read or write to someplace that could cause harm.
88 * Carefully read and modify the code with probe_kernel_*(), and make
89 * sure what we read is what we expected it to be before modifying it.
3d083395 90 */
76aefee5 91 /* read the text we want to modify */
fe557319 92 if (copy_from_kernel_nofault(cur_code, (void *)ip, MCOUNT_INSN_SIZE)) {
768ae440 93 WARN_ON(1);
593eb8a2 94 return -EFAULT;
768ae440 95 }
6f93fc07 96
76aefee5 97 /* Make sure it is what we expect it to be */
768ae440 98 if (memcmp(cur_code, old_code, MCOUNT_INSN_SIZE) != 0) {
ac6c1b2c 99 ftrace_expected = old_code;
768ae440 100 WARN_ON(1);
593eb8a2 101 return -EINVAL;
768ae440 102 }
3d083395 103
768ae440
PZ
104 return 0;
105}
6f93fc07 106
38ebd8d1
BP
107/*
108 * Marked __ref because it calls text_poke_early() which is .init.text. That is
109 * ok because that call will happen early, during boot, when .init sections are
110 * still present.
111 */
112static int __ref
768ae440
PZ
113ftrace_modify_code_direct(unsigned long ip, const char *old_code,
114 const char *new_code)
115{
116 int ret = ftrace_verify_code(ip, old_code);
117 if (ret)
118 return ret;
3d083395 119
768ae440
PZ
120 /* replace the text with the new text */
121 if (ftrace_poke_late)
122 text_poke_queue((void *)ip, new_code, MCOUNT_INSN_SIZE, NULL);
123 else
124 text_poke_early((void *)ip, new_code, MCOUNT_INSN_SIZE);
6f93fc07 125 return 0;
3d083395
SR
126}
127
768ae440 128int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr)
31e88909 129{
31e88909 130 unsigned long ip = rec->ip;
768ae440 131 const char *new, *old;
31e88909
SR
132
133 old = ftrace_call_replace(ip, addr);
134 new = ftrace_nop_replace();
135
8a4d0a68
SR
136 /*
137 * On boot up, and when modules are loaded, the MCOUNT_ADDR
138 * is converted to a nop, and will never become MCOUNT_ADDR
139 * again. This code is either running before SMP (on boot up)
140 * or before the code will ever be executed (module load).
141 * We do not want to use the breakpoint version in this case,
142 * just modify the code directly.
143 */
144 if (addr == MCOUNT_ADDR)
768ae440 145 return ftrace_modify_code_direct(ip, old, new);
b05086c7 146
768ae440
PZ
147 /*
148 * x86 overrides ftrace_replace_code -- this function will never be used
149 * in this case.
150 */
8a4d0a68
SR
151 WARN_ONCE(1, "invalid use of ftrace_make_nop");
152 return -EINVAL;
31e88909
SR
153}
154
155int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
156{
31e88909 157 unsigned long ip = rec->ip;
768ae440 158 const char *new, *old;
31e88909
SR
159
160 old = ftrace_nop_replace();
161 new = ftrace_call_replace(ip, addr);
162
8a4d0a68
SR
163 /* Should only be called when module is loaded */
164 return ftrace_modify_code_direct(rec->ip, old, new);
d61f82d0
SR
165}
166
08f6fba5
SR
167/*
168 * Should never be called:
169 * As it is only called by __ftrace_replace_code() which is called by
170 * ftrace_replace_code() that x86 overrides, and by ftrace_update_code()
171 * which is called to turn mcount into nops or nops into function calls
172 * but not to convert a function from not using regs to one that uses
173 * regs, which ftrace_modify_call() is for.
174 */
175int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
176 unsigned long addr)
177{
178 WARN_ON(1);
179 return -EINVAL;
180}
08f6fba5 181
87fbb2ac 182int ftrace_update_ftrace_func(ftrace_func_t func)
08d636b6 183{
ab4ead02 184 unsigned long ip;
768ae440 185 const char *new;
ab4ead02 186
768ae440
PZ
187 ip = (unsigned long)(&ftrace_call);
188 new = ftrace_call_replace(ip, (unsigned long)func);
189 text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL);
08d636b6 190
768ae440
PZ
191 ip = (unsigned long)(&ftrace_regs_call);
192 new = ftrace_call_replace(ip, (unsigned long)func);
193 text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL);
08d636b6
SR
194
195 return 0;
196}
197
e4f5d544 198void ftrace_replace_code(int enable)
08d636b6
SR
199{
200 struct ftrace_rec_iter *iter;
201 struct dyn_ftrace *rec;
768ae440 202 const char *new, *old;
08d636b6
SR
203 int ret;
204
205 for_ftrace_rec_iter(iter) {
206 rec = ftrace_rec_iter_record(iter);
207
768ae440
PZ
208 switch (ftrace_test_record(rec, enable)) {
209 case FTRACE_UPDATE_IGNORE:
210 default:
211 continue;
08d636b6 212
768ae440
PZ
213 case FTRACE_UPDATE_MAKE_CALL:
214 old = ftrace_nop_replace();
215 break;
08d636b6 216
768ae440
PZ
217 case FTRACE_UPDATE_MODIFY_CALL:
218 case FTRACE_UPDATE_MAKE_NOP:
219 old = ftrace_call_replace(rec->ip, ftrace_get_addr_curr(rec));
220 break;
221 }
08d636b6 222
768ae440
PZ
223 ret = ftrace_verify_code(rec->ip, old);
224 if (ret) {
fd3dc562 225 ftrace_expected = old;
768ae440 226 ftrace_bug(ret, rec);
fd3dc562 227 ftrace_expected = NULL;
768ae440
PZ
228 return;
229 }
08d636b6
SR
230 }
231
08d636b6
SR
232 for_ftrace_rec_iter(iter) {
233 rec = ftrace_rec_iter_record(iter);
234
768ae440
PZ
235 switch (ftrace_test_record(rec, enable)) {
236 case FTRACE_UPDATE_IGNORE:
237 default:
238 continue;
08d636b6 239
768ae440
PZ
240 case FTRACE_UPDATE_MAKE_CALL:
241 case FTRACE_UPDATE_MODIFY_CALL:
242 new = ftrace_call_replace(rec->ip, ftrace_get_addr_new(rec));
243 break;
08d636b6 244
768ae440
PZ
245 case FTRACE_UPDATE_MAKE_NOP:
246 new = ftrace_nop_replace();
247 break;
248 }
08d636b6 249
768ae440
PZ
250 text_poke_queue((void *)rec->ip, new, MCOUNT_INSN_SIZE, NULL);
251 ftrace_update_record(rec, enable);
08d636b6 252 }
768ae440 253 text_poke_finish();
8a4d0a68
SR
254}
255
08d636b6
SR
256void arch_ftrace_update_code(int command)
257{
e4f5d544 258 ftrace_modify_all_code(command);
08d636b6
SR
259}
260
f3bea491
SRRH
261/* Currently only x86_64 supports dynamic trampolines */
262#ifdef CONFIG_X86_64
263
264#ifdef CONFIG_MODULES
f3bea491
SRRH
265/* Module allocation simplifies allocating memory for code */
266static inline void *alloc_tramp(unsigned long size)
267{
12af2b83 268 return execmem_alloc(EXECMEM_FTRACE, size);
f3bea491 269}
7fdfe1e4 270static inline void tramp_free(void *tramp)
f3bea491 271{
12af2b83 272 execmem_free(tramp);
f3bea491
SRRH
273}
274#else
275/* Trampolines can only be created if modules are supported */
276static inline void *alloc_tramp(unsigned long size)
277{
278 return NULL;
279}
7fdfe1e4 280static inline void tramp_free(void *tramp) { }
f3bea491
SRRH
281#endif
282
283/* Defined as markers to the end of the ftrace default trampolines */
f3bea491 284extern void ftrace_regs_caller_end(void);
0298739b 285extern void ftrace_caller_end(void);
f3bea491
SRRH
286extern void ftrace_caller_op_ptr(void);
287extern void ftrace_regs_caller_op_ptr(void);
fe58acef 288extern void ftrace_regs_caller_jmp(void);
f3bea491
SRRH
289
290/* movq function_trace_op(%rip), %rdx */
291/* 0x48 0x8b 0x15 <offset-to-ftrace_trace_op (4 bytes)> */
292#define OP_REF_SIZE 7
293
294/*
295 * The ftrace_ops is passed to the function callback. Since the
296 * trampoline only services a single ftrace_ops, we can pass in
297 * that ops directly.
298 *
299 * The ftrace_op_code_union is used to create a pointer to the
300 * ftrace_ops that will be passed to the callback function.
301 */
302union ftrace_op_code_union {
303 char code[OP_REF_SIZE];
304 struct {
305 char op[3];
306 int offset;
307 } __attribute__((packed));
308};
309
7b75782f
BL
310#define RET_SIZE \
311 (IS_ENABLED(CONFIG_MITIGATION_RETPOLINE) ? 5 : 1 + IS_ENABLED(CONFIG_MITIGATION_SLS))
d2a68c4e 312
aec0be2d
SRRH
313static unsigned long
314create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
f3bea491 315{
f3bea491
SRRH
316 unsigned long start_offset;
317 unsigned long end_offset;
318 unsigned long op_offset;
768ae440 319 unsigned long call_offset;
fe58acef 320 unsigned long jmp_offset;
f3bea491 321 unsigned long offset;
3c0dab44 322 unsigned long npages;
f3bea491 323 unsigned long size;
f3bea491
SRRH
324 unsigned long *ptr;
325 void *trampoline;
ee3e2469 326 void *ip, *dest;
f3bea491
SRRH
327 /* 48 8b 15 <offset> is movq <offset>(%rip), %rdx */
328 unsigned const char op_ref[] = { 0x48, 0x8b, 0x15 };
e52fc2cf 329 unsigned const char retq[] = { RET_INSN_OPCODE, INT3_INSN_OPCODE };
f3bea491
SRRH
330 union ftrace_op_code_union op_ptr;
331 int ret;
332
333 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
334 start_offset = (unsigned long)ftrace_regs_caller;
335 end_offset = (unsigned long)ftrace_regs_caller_end;
336 op_offset = (unsigned long)ftrace_regs_caller_op_ptr;
768ae440 337 call_offset = (unsigned long)ftrace_regs_call;
fe58acef 338 jmp_offset = (unsigned long)ftrace_regs_caller_jmp;
f3bea491
SRRH
339 } else {
340 start_offset = (unsigned long)ftrace_caller;
0298739b 341 end_offset = (unsigned long)ftrace_caller_end;
f3bea491 342 op_offset = (unsigned long)ftrace_caller_op_ptr;
768ae440 343 call_offset = (unsigned long)ftrace_call;
fe58acef 344 jmp_offset = 0;
f3bea491
SRRH
345 }
346
347 size = end_offset - start_offset;
348
349 /*
350 * Allocate enough size to store the ftrace_caller code,
d2a68c4e
SRV
351 * the iret , as well as the address of the ftrace_ops this
352 * trampoline is used for.
f3bea491 353 */
d2a68c4e 354 trampoline = alloc_tramp(size + RET_SIZE + sizeof(void *));
f3bea491
SRRH
355 if (!trampoline)
356 return 0;
357
d2a68c4e 358 *tramp_size = size + RET_SIZE + sizeof(void *);
3c0dab44 359 npages = DIV_ROUND_UP(*tramp_size, PAGE_SIZE);
aec0be2d 360
f3bea491 361 /* Copy ftrace_caller onto the trampoline memory */
fe557319 362 ret = copy_from_kernel_nofault(trampoline, (void *)start_offset, size);
d2a68c4e
SRV
363 if (WARN_ON(ret < 0))
364 goto fail;
f3bea491 365
d2a68c4e 366 ip = trampoline + size;
1f001e9d 367 if (cpu_feature_enabled(X86_FEATURE_RETHUNK))
770ae1b7 368 __text_gen_insn(ip, JMP32_INSN_OPCODE, ip, x86_return_thunk, JMP32_INSN_SIZE);
1f001e9d
PZ
369 else
370 memcpy(ip, retq, sizeof(retq));
f3bea491 371
fe58acef
SRV
372 /* No need to test direct calls on created trampolines */
373 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
374 /* NOP the jnz 1f; but make sure it's a 2 byte jnz */
375 ip = trampoline + (jmp_offset - start_offset);
376 if (WARN_ON(*(char *)ip != 0x75))
377 goto fail;
a89dfde3 378 ret = copy_from_kernel_nofault(ip, x86_nops[2], 2);
fe58acef
SRV
379 if (ret < 0)
380 goto fail;
381 }
382
f3bea491
SRRH
383 /*
384 * The address of the ftrace_ops that is used for this trampoline
385 * is stored at the end of the trampoline. This will be used to
386 * load the third parameter for the callback. Basically, that
387 * location at the end of the trampoline takes the place of
388 * the global function_trace_op variable.
389 */
390
d2a68c4e 391 ptr = (unsigned long *)(trampoline + size + RET_SIZE);
f3bea491
SRRH
392 *ptr = (unsigned long)ops;
393
394 op_offset -= start_offset;
395 memcpy(&op_ptr, trampoline + op_offset, OP_REF_SIZE);
396
397 /* Are we pointing to the reference? */
d2a68c4e
SRV
398 if (WARN_ON(memcmp(op_ptr.op, op_ref, 3) != 0))
399 goto fail;
f3bea491
SRRH
400
401 /* Load the contents of ptr into the callback parameter */
402 offset = (unsigned long)ptr;
403 offset -= (unsigned long)trampoline + op_offset + OP_REF_SIZE;
404
405 op_ptr.offset = offset;
406
407 /* put in the new offset to the ftrace_ops */
408 memcpy(trampoline + op_offset, &op_ptr, OP_REF_SIZE);
409
768ae440
PZ
410 /* put in the call to the function */
411 mutex_lock(&text_mutex);
412 call_offset -= start_offset;
ee3e2469
PZ
413 /*
414 * No need to translate into a callthunk. The trampoline does
415 * the depth accounting before the call already.
416 */
417 dest = ftrace_ops_get_func(ops);
768ae440 418 memcpy(trampoline + call_offset,
ee3e2469
PZ
419 text_gen_insn(CALL_INSN_OPCODE, trampoline + call_offset, dest),
420 CALL_INSN_SIZE);
768ae440
PZ
421 mutex_unlock(&text_mutex);
422
f3bea491
SRRH
423 /* ALLOC_TRAMP flags lets us know we created it */
424 ops->flags |= FTRACE_OPS_FL_ALLOC_TRAMP;
425
d48567c9 426 set_memory_rox((unsigned long)trampoline, npages);
f3bea491 427 return (unsigned long)trampoline;
d2a68c4e 428fail:
7fdfe1e4 429 tramp_free(trampoline);
d2a68c4e 430 return 0;
f3bea491
SRRH
431}
432
59566b0b
SRV
433void set_ftrace_ops_ro(void)
434{
435 struct ftrace_ops *ops;
436 unsigned long start_offset;
437 unsigned long end_offset;
438 unsigned long npages;
439 unsigned long size;
440
441 do_for_each_ftrace_op(ops, ftrace_ops_list) {
442 if (!(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
443 continue;
444
445 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
446 start_offset = (unsigned long)ftrace_regs_caller;
447 end_offset = (unsigned long)ftrace_regs_caller_end;
448 } else {
449 start_offset = (unsigned long)ftrace_caller;
7c0577f4 450 end_offset = (unsigned long)ftrace_caller_end;
59566b0b
SRV
451 }
452 size = end_offset - start_offset;
453 size = size + RET_SIZE + sizeof(void *);
454 npages = DIV_ROUND_UP(size, PAGE_SIZE);
455 set_memory_ro((unsigned long)ops->trampoline, npages);
456 } while_for_each_ftrace_op(ops);
457}
458
15d5b02c
SRRH
459static unsigned long calc_trampoline_call_offset(bool save_regs)
460{
461 unsigned long start_offset;
462 unsigned long call_offset;
463
464 if (save_regs) {
465 start_offset = (unsigned long)ftrace_regs_caller;
466 call_offset = (unsigned long)ftrace_regs_call;
467 } else {
468 start_offset = (unsigned long)ftrace_caller;
469 call_offset = (unsigned long)ftrace_call;
470 }
471
472 return call_offset - start_offset;
473}
474
f3bea491
SRRH
475void arch_ftrace_update_trampoline(struct ftrace_ops *ops)
476{
477 ftrace_func_t func;
f3bea491
SRRH
478 unsigned long offset;
479 unsigned long ip;
aec0be2d 480 unsigned int size;
768ae440 481 const char *new;
f3bea491 482
768ae440 483 if (!ops->trampoline) {
aec0be2d 484 ops->trampoline = create_trampoline(ops, &size);
f3bea491
SRRH
485 if (!ops->trampoline)
486 return;
aec0be2d 487 ops->trampoline_size = size;
768ae440 488 return;
f3bea491
SRRH
489 }
490
768ae440
PZ
491 /*
492 * The ftrace_ops caller may set up its own trampoline.
493 * In such a case, this code must not modify it.
494 */
495 if (!(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
496 return;
497
15d5b02c 498 offset = calc_trampoline_call_offset(ops->flags & FTRACE_OPS_FL_SAVE_REGS);
f3bea491 499 ip = ops->trampoline + offset;
f3bea491
SRRH
500 func = ftrace_ops_get_func(ops);
501
768ae440 502 mutex_lock(&text_mutex);
f3bea491
SRRH
503 /* Do a safe modify in case the trampoline is executing */
504 new = ftrace_call_replace(ip, (unsigned long)func);
768ae440
PZ
505 text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL);
506 mutex_unlock(&text_mutex);
f3bea491 507}
15d5b02c
SRRH
508
509/* Return the address of the function the trampoline calls */
510static void *addr_from_call(void *ptr)
511{
67c1d4a2 512 union text_poke_insn call;
15d5b02c
SRRH
513 int ret;
514
fe557319 515 ret = copy_from_kernel_nofault(&call, ptr, CALL_INSN_SIZE);
15d5b02c
SRRH
516 if (WARN_ON_ONCE(ret < 0))
517 return NULL;
518
519 /* Make sure this is a call */
67c1d4a2
PZ
520 if (WARN_ON_ONCE(call.opcode != CALL_INSN_OPCODE)) {
521 pr_warn("Expected E8, got %x\n", call.opcode);
15d5b02c
SRRH
522 return NULL;
523 }
524
67c1d4a2 525 return ptr + CALL_INSN_SIZE + call.disp;
15d5b02c
SRRH
526}
527
15d5b02c
SRRH
528/*
529 * If the ops->trampoline was not allocated, then it probably
530 * has a static trampoline func, or is the ftrace caller itself.
531 */
532static void *static_tramp_func(struct ftrace_ops *ops, struct dyn_ftrace *rec)
533{
534 unsigned long offset;
535 bool save_regs = rec->flags & FTRACE_FL_REGS_EN;
536 void *ptr;
537
538 if (ops && ops->trampoline) {
0c0593b4
SRV
539#if !defined(CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS) && \
540 defined(CONFIG_FUNCTION_GRAPH_TRACER)
15d5b02c
SRRH
541 /*
542 * We only know about function graph tracer setting as static
543 * trampoline.
544 */
545 if (ops->trampoline == FTRACE_GRAPH_ADDR)
546 return (void *)prepare_ftrace_return;
547#endif
548 return NULL;
549 }
550
551 offset = calc_trampoline_call_offset(save_regs);
552
553 if (save_regs)
554 ptr = (void *)FTRACE_REGS_ADDR + offset;
555 else
556 ptr = (void *)FTRACE_ADDR + offset;
557
558 return addr_from_call(ptr);
559}
560
561void *arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec)
562{
563 unsigned long offset;
564
565 /* If we didn't allocate this trampoline, consider it static */
566 if (!ops || !(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
567 return static_tramp_func(ops, rec);
568
569 offset = calc_trampoline_call_offset(ops->flags & FTRACE_OPS_FL_SAVE_REGS);
570 return addr_from_call((void *)ops->trampoline + offset);
571}
572
12cce594
SRRH
573void arch_ftrace_trampoline_free(struct ftrace_ops *ops)
574{
575 if (!ops || !(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
576 return;
577
7fdfe1e4 578 tramp_free((void *)ops->trampoline);
12cce594
SRRH
579 ops->trampoline = 0;
580}
15d5b02c 581
f3bea491
SRRH
582#endif /* CONFIG_X86_64 */
583#endif /* CONFIG_DYNAMIC_FTRACE */
584
585#ifdef CONFIG_FUNCTION_GRAPH_TRACER
586
e999995c 587#if defined(CONFIG_DYNAMIC_FTRACE) && !defined(CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS)
0c0593b4 588extern void ftrace_graph_call(void);
768ae440 589static const char *ftrace_jmp_replace(unsigned long ip, unsigned long addr)
745cfeaa 590{
67c1d4a2 591 return text_gen_insn(JMP32_INSN_OPCODE, (void *)ip, (void *)addr);
745cfeaa
SRV
592}
593
87fbb2ac
SRRH
594static int ftrace_mod_jmp(unsigned long ip, void *func)
595{
768ae440 596 const char *new;
5a45cfe1 597
87fbb2ac 598 new = ftrace_jmp_replace(ip, (unsigned long)func);
768ae440
PZ
599 text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL);
600 return 0;
5a45cfe1
SR
601}
602
603int ftrace_enable_ftrace_graph_caller(void)
604{
605 unsigned long ip = (unsigned long)(&ftrace_graph_call);
5a45cfe1 606
87fbb2ac 607 return ftrace_mod_jmp(ip, &ftrace_graph_caller);
5a45cfe1
SR
608}
609
610int ftrace_disable_ftrace_graph_caller(void)
611{
612 unsigned long ip = (unsigned long)(&ftrace_graph_call);
5a45cfe1 613
87fbb2ac 614 return ftrace_mod_jmp(ip, &ftrace_stub);
5a45cfe1 615}
e999995c 616#endif /* CONFIG_DYNAMIC_FTRACE && !CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS */
e7d3737e 617
e7d3737e
FW
618/*
619 * Hook the return address and push it in the stack of return addrs
620 * in current thread info.
621 */
8646698a 622void prepare_ftrace_return(unsigned long ip, unsigned long *parent,
71e308a2 623 unsigned long frame_pointer)
e7d3737e 624{
768ae440 625 unsigned long return_hooker = (unsigned long)&return_to_handler;
0c0593b4 626 int bit;
e7d3737e 627
34a477e5
JP
628 /*
629 * When resuming from suspend-to-ram, this function can be indirectly
630 * called from early CPU startup code while the CPU is in real mode,
631 * which would fail miserably. Make sure the stack pointer is a
632 * virtual address.
633 *
634 * This check isn't as accurate as virt_addr_valid(), but it should be
635 * good enough for this purpose, and it's fast.
636 */
637 if (unlikely((long)__builtin_frame_address(0) >= 0))
638 return;
639
84b2bc7f
SRRH
640 if (unlikely(ftrace_graph_is_dead()))
641 return;
642
380c4b14 643 if (unlikely(atomic_read(&current->tracing_graph_pause)))
e7d3737e
FW
644 return;
645
0c0593b4
SRV
646 bit = ftrace_test_recursion_trylock(ip, *parent);
647 if (bit < 0)
648 return;
649
8646698a
SRV
650 if (!function_graph_enter(*parent, ip, frame_pointer, parent))
651 *parent = return_hooker;
0c0593b4
SRV
652
653 ftrace_test_recursion_unlock(bit);
654}
655
656#ifdef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS
657void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
658 struct ftrace_ops *op, struct ftrace_regs *fregs)
659{
660 struct pt_regs *regs = &fregs->regs;
661 unsigned long *stack = (unsigned long *)kernel_stack_pointer(regs);
662
663 prepare_ftrace_return(ip, (unsigned long *)stack, 0);
e7d3737e 664}
0c0593b4
SRV
665#endif
666
fb52607a 667#endif /* CONFIG_FUNCTION_GRAPH_TRACER */