objtool: Remove instruction::list
[linux-2.6-block.git] / tools / objtool / check.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright (C) 2015-2017 Josh Poimboeuf <jpoimboe@redhat.com>
4  */
5
6 #include <string.h>
7 #include <stdlib.h>
8 #include <inttypes.h>
9 #include <sys/mman.h>
10
11 #include <arch/elf.h>
12 #include <objtool/builtin.h>
13 #include <objtool/cfi.h>
14 #include <objtool/arch.h>
15 #include <objtool/check.h>
16 #include <objtool/special.h>
17 #include <objtool/warn.h>
18 #include <objtool/endianness.h>
19
20 #include <linux/objtool.h>
21 #include <linux/hashtable.h>
22 #include <linux/kernel.h>
23 #include <linux/static_call_types.h>
24
25 struct alternative {
26         struct alternative *next;
27         struct instruction *insn;
28         bool skip_orig;
29 };
30
31 static unsigned long nr_cfi, nr_cfi_reused, nr_cfi_cache;
32
33 static struct cfi_init_state initial_func_cfi;
34 static struct cfi_state init_cfi;
35 static struct cfi_state func_cfi;
36
37 struct instruction *find_insn(struct objtool_file *file,
38                               struct section *sec, unsigned long offset)
39 {
40         struct instruction *insn;
41
42         hash_for_each_possible(file->insn_hash, insn, hash, sec_offset_hash(sec, offset)) {
43                 if (insn->sec == sec && insn->offset == offset)
44                         return insn;
45         }
46
47         return NULL;
48 }
49
50 struct instruction *next_insn_same_sec(struct objtool_file *file,
51                                        struct instruction *insn)
52 {
53         if (insn->idx == INSN_CHUNK_MAX)
54                 return find_insn(file, insn->sec, insn->offset + insn->len);
55
56         insn++;
57         if (!insn->len)
58                 return NULL;
59
60         return insn;
61 }
62
63 static struct instruction *next_insn_same_func(struct objtool_file *file,
64                                                struct instruction *insn)
65 {
66         struct instruction *next = next_insn_same_sec(file, insn);
67         struct symbol *func = insn_func(insn);
68
69         if (!func)
70                 return NULL;
71
72         if (next && insn_func(next) == func)
73                 return next;
74
75         /* Check if we're already in the subfunction: */
76         if (func == func->cfunc)
77                 return NULL;
78
79         /* Move to the subfunction: */
80         return find_insn(file, func->cfunc->sec, func->cfunc->offset);
81 }
82
83 static struct instruction *prev_insn_same_sec(struct objtool_file *file,
84                                               struct instruction *insn)
85 {
86         if (insn->idx == 0) {
87                 if (insn->prev_len)
88                         return find_insn(file, insn->sec, insn->offset - insn->prev_len);
89                 return NULL;
90         }
91
92         return insn - 1;
93 }
94
95 static struct instruction *prev_insn_same_sym(struct objtool_file *file,
96                                               struct instruction *insn)
97 {
98         struct instruction *prev = prev_insn_same_sec(file, insn);
99
100         if (prev && insn_func(prev) == insn_func(insn))
101                 return prev;
102
103         return NULL;
104 }
105
106 #define for_each_insn(file, insn)                                       \
107         for (struct section *__sec, *__fake = (struct section *)1;      \
108              __fake; __fake = NULL)                                     \
109                 for_each_sec(file, __sec)                               \
110                         sec_for_each_insn(file, __sec, insn)
111
112 #define func_for_each_insn(file, func, insn)                            \
113         for (insn = find_insn(file, func->sec, func->offset);           \
114              insn;                                                      \
115              insn = next_insn_same_func(file, insn))
116
117 #define sym_for_each_insn(file, sym, insn)                              \
118         for (insn = find_insn(file, sym->sec, sym->offset);             \
119              insn && insn->offset < sym->offset + sym->len;             \
120              insn = next_insn_same_sec(file, insn))
121
122 #define sym_for_each_insn_continue_reverse(file, sym, insn)             \
123         for (insn = prev_insn_same_sec(file, insn);                     \
124              insn && insn->offset >= sym->offset;                       \
125              insn = prev_insn_same_sec(file, insn))
126
127 #define sec_for_each_insn_from(file, insn)                              \
128         for (; insn; insn = next_insn_same_sec(file, insn))
129
130 #define sec_for_each_insn_continue(file, insn)                          \
131         for (insn = next_insn_same_sec(file, insn); insn;               \
132              insn = next_insn_same_sec(file, insn))
133
134 static inline struct symbol *insn_call_dest(struct instruction *insn)
135 {
136         if (insn->type == INSN_JUMP_DYNAMIC ||
137             insn->type == INSN_CALL_DYNAMIC)
138                 return NULL;
139
140         return insn->_call_dest;
141 }
142
143 static inline struct reloc *insn_jump_table(struct instruction *insn)
144 {
145         if (insn->type == INSN_JUMP_DYNAMIC ||
146             insn->type == INSN_CALL_DYNAMIC)
147                 return insn->_jump_table;
148
149         return NULL;
150 }
151
152 static bool is_jump_table_jump(struct instruction *insn)
153 {
154         struct alt_group *alt_group = insn->alt_group;
155
156         if (insn_jump_table(insn))
157                 return true;
158
159         /* Retpoline alternative for a jump table? */
160         return alt_group && alt_group->orig_group &&
161                insn_jump_table(alt_group->orig_group->first_insn);
162 }
163
164 static bool is_sibling_call(struct instruction *insn)
165 {
166         /*
167          * Assume only STT_FUNC calls have jump-tables.
168          */
169         if (insn_func(insn)) {
170                 /* An indirect jump is either a sibling call or a jump to a table. */
171                 if (insn->type == INSN_JUMP_DYNAMIC)
172                         return !is_jump_table_jump(insn);
173         }
174
175         /* add_jump_destinations() sets insn_call_dest(insn) for sibling calls. */
176         return (is_static_jump(insn) && insn_call_dest(insn));
177 }
178
179 /*
180  * This checks to see if the given function is a "noreturn" function.
181  *
182  * For global functions which are outside the scope of this object file, we
183  * have to keep a manual list of them.
184  *
185  * For local functions, we have to detect them manually by simply looking for
186  * the lack of a return instruction.
187  */
188 static bool __dead_end_function(struct objtool_file *file, struct symbol *func,
189                                 int recursion)
190 {
191         int i;
192         struct instruction *insn;
193         bool empty = true;
194
195         /*
196          * Unfortunately these have to be hard coded because the noreturn
197          * attribute isn't provided in ELF data. Keep 'em sorted.
198          */
199         static const char * const global_noreturns[] = {
200                 "__invalid_creds",
201                 "__module_put_and_kthread_exit",
202                 "__reiserfs_panic",
203                 "__stack_chk_fail",
204                 "__ubsan_handle_builtin_unreachable",
205                 "cpu_bringup_and_idle",
206                 "cpu_startup_entry",
207                 "do_exit",
208                 "do_group_exit",
209                 "do_task_dead",
210                 "ex_handler_msr_mce",
211                 "fortify_panic",
212                 "kthread_complete_and_exit",
213                 "kthread_exit",
214                 "kunit_try_catch_throw",
215                 "lbug_with_loc",
216                 "machine_real_restart",
217                 "make_task_dead",
218                 "panic",
219                 "rewind_stack_and_make_dead",
220                 "sev_es_terminate",
221                 "snp_abort",
222                 "stop_this_cpu",
223                 "usercopy_abort",
224                 "xen_cpu_bringup_again",
225                 "xen_start_kernel",
226         };
227
228         if (!func)
229                 return false;
230
231         if (func->bind == STB_WEAK)
232                 return false;
233
234         if (func->bind == STB_GLOBAL)
235                 for (i = 0; i < ARRAY_SIZE(global_noreturns); i++)
236                         if (!strcmp(func->name, global_noreturns[i]))
237                                 return true;
238
239         if (!func->len)
240                 return false;
241
242         insn = find_insn(file, func->sec, func->offset);
243         if (!insn || !insn_func(insn))
244                 return false;
245
246         func_for_each_insn(file, func, insn) {
247                 empty = false;
248
249                 if (insn->type == INSN_RETURN)
250                         return false;
251         }
252
253         if (empty)
254                 return false;
255
256         /*
257          * A function can have a sibling call instead of a return.  In that
258          * case, the function's dead-end status depends on whether the target
259          * of the sibling call returns.
260          */
261         func_for_each_insn(file, func, insn) {
262                 if (is_sibling_call(insn)) {
263                         struct instruction *dest = insn->jump_dest;
264
265                         if (!dest)
266                                 /* sibling call to another file */
267                                 return false;
268
269                         /* local sibling call */
270                         if (recursion == 5) {
271                                 /*
272                                  * Infinite recursion: two functions have
273                                  * sibling calls to each other.  This is a very
274                                  * rare case.  It means they aren't dead ends.
275                                  */
276                                 return false;
277                         }
278
279                         return __dead_end_function(file, insn_func(dest), recursion+1);
280                 }
281         }
282
283         return true;
284 }
285
286 static bool dead_end_function(struct objtool_file *file, struct symbol *func)
287 {
288         return __dead_end_function(file, func, 0);
289 }
290
291 static void init_cfi_state(struct cfi_state *cfi)
292 {
293         int i;
294
295         for (i = 0; i < CFI_NUM_REGS; i++) {
296                 cfi->regs[i].base = CFI_UNDEFINED;
297                 cfi->vals[i].base = CFI_UNDEFINED;
298         }
299         cfi->cfa.base = CFI_UNDEFINED;
300         cfi->drap_reg = CFI_UNDEFINED;
301         cfi->drap_offset = -1;
302 }
303
304 static void init_insn_state(struct objtool_file *file, struct insn_state *state,
305                             struct section *sec)
306 {
307         memset(state, 0, sizeof(*state));
308         init_cfi_state(&state->cfi);
309
310         /*
311          * We need the full vmlinux for noinstr validation, otherwise we can
312          * not correctly determine insn_call_dest(insn)->sec (external symbols
313          * do not have a section).
314          */
315         if (opts.link && opts.noinstr && sec)
316                 state->noinstr = sec->noinstr;
317 }
318
319 static struct cfi_state *cfi_alloc(void)
320 {
321         struct cfi_state *cfi = calloc(sizeof(struct cfi_state), 1);
322         if (!cfi) {
323                 WARN("calloc failed");
324                 exit(1);
325         }
326         nr_cfi++;
327         return cfi;
328 }
329
330 static int cfi_bits;
331 static struct hlist_head *cfi_hash;
332
333 static inline bool cficmp(struct cfi_state *cfi1, struct cfi_state *cfi2)
334 {
335         return memcmp((void *)cfi1 + sizeof(cfi1->hash),
336                       (void *)cfi2 + sizeof(cfi2->hash),
337                       sizeof(struct cfi_state) - sizeof(struct hlist_node));
338 }
339
340 static inline u32 cfi_key(struct cfi_state *cfi)
341 {
342         return jhash((void *)cfi + sizeof(cfi->hash),
343                      sizeof(*cfi) - sizeof(cfi->hash), 0);
344 }
345
346 static struct cfi_state *cfi_hash_find_or_add(struct cfi_state *cfi)
347 {
348         struct hlist_head *head = &cfi_hash[hash_min(cfi_key(cfi), cfi_bits)];
349         struct cfi_state *obj;
350
351         hlist_for_each_entry(obj, head, hash) {
352                 if (!cficmp(cfi, obj)) {
353                         nr_cfi_cache++;
354                         return obj;
355                 }
356         }
357
358         obj = cfi_alloc();
359         *obj = *cfi;
360         hlist_add_head(&obj->hash, head);
361
362         return obj;
363 }
364
365 static void cfi_hash_add(struct cfi_state *cfi)
366 {
367         struct hlist_head *head = &cfi_hash[hash_min(cfi_key(cfi), cfi_bits)];
368
369         hlist_add_head(&cfi->hash, head);
370 }
371
372 static void *cfi_hash_alloc(unsigned long size)
373 {
374         cfi_bits = max(10, ilog2(size));
375         cfi_hash = mmap(NULL, sizeof(struct hlist_head) << cfi_bits,
376                         PROT_READ|PROT_WRITE,
377                         MAP_PRIVATE|MAP_ANON, -1, 0);
378         if (cfi_hash == (void *)-1L) {
379                 WARN("mmap fail cfi_hash");
380                 cfi_hash = NULL;
381         }  else if (opts.stats) {
382                 printf("cfi_bits: %d\n", cfi_bits);
383         }
384
385         return cfi_hash;
386 }
387
388 static unsigned long nr_insns;
389 static unsigned long nr_insns_visited;
390
391 /*
392  * Call the arch-specific instruction decoder for all the instructions and add
393  * them to the global instruction list.
394  */
395 static int decode_instructions(struct objtool_file *file)
396 {
397         struct section *sec;
398         struct symbol *func;
399         unsigned long offset;
400         struct instruction *insn;
401         int ret;
402
403         for_each_sec(file, sec) {
404                 struct instruction *insns = NULL;
405                 u8 prev_len = 0;
406                 u8 idx = 0;
407
408                 if (!(sec->sh.sh_flags & SHF_EXECINSTR))
409                         continue;
410
411                 if (strcmp(sec->name, ".altinstr_replacement") &&
412                     strcmp(sec->name, ".altinstr_aux") &&
413                     strncmp(sec->name, ".discard.", 9))
414                         sec->text = true;
415
416                 if (!strcmp(sec->name, ".noinstr.text") ||
417                     !strcmp(sec->name, ".entry.text") ||
418                     !strcmp(sec->name, ".cpuidle.text") ||
419                     !strncmp(sec->name, ".text.__x86.", 12))
420                         sec->noinstr = true;
421
422                 /*
423                  * .init.text code is ran before userspace and thus doesn't
424                  * strictly need retpolines, except for modules which are
425                  * loaded late, they very much do need retpoline in their
426                  * .init.text
427                  */
428                 if (!strcmp(sec->name, ".init.text") && !opts.module)
429                         sec->init = true;
430
431                 for (offset = 0; offset < sec->sh.sh_size; offset += insn->len) {
432                         if (!insns || idx == INSN_CHUNK_MAX) {
433                                 insns = calloc(sizeof(*insn), INSN_CHUNK_SIZE);
434                                 if (!insns) {
435                                         WARN("malloc failed");
436                                         return -1;
437                                 }
438                                 idx = 0;
439                         } else {
440                                 idx++;
441                         }
442                         insn = &insns[idx];
443                         insn->idx = idx;
444
445                         INIT_LIST_HEAD(&insn->call_node);
446                         insn->sec = sec;
447                         insn->offset = offset;
448                         insn->prev_len = prev_len;
449
450                         ret = arch_decode_instruction(file, sec, offset,
451                                                       sec->sh.sh_size - offset,
452                                                       insn);
453                         if (ret)
454                                 return ret;
455
456                         prev_len = insn->len;
457
458                         /*
459                          * By default, "ud2" is a dead end unless otherwise
460                          * annotated, because GCC 7 inserts it for certain
461                          * divide-by-zero cases.
462                          */
463                         if (insn->type == INSN_BUG)
464                                 insn->dead_end = true;
465
466                         hash_add(file->insn_hash, &insn->hash, sec_offset_hash(sec, insn->offset));
467                         nr_insns++;
468                 }
469
470 //              printf("%s: last chunk used: %d\n", sec->name, (int)idx);
471
472                 list_for_each_entry(func, &sec->symbol_list, list) {
473                         if (func->type != STT_NOTYPE && func->type != STT_FUNC)
474                                 continue;
475
476                         if (func->offset == sec->sh.sh_size) {
477                                 /* Heuristic: likely an "end" symbol */
478                                 if (func->type == STT_NOTYPE)
479                                         continue;
480                                 WARN("%s(): STT_FUNC at end of section",
481                                      func->name);
482                                 return -1;
483                         }
484
485                         if (func->return_thunk || func->alias != func)
486                                 continue;
487
488                         if (!find_insn(file, sec, func->offset)) {
489                                 WARN("%s(): can't find starting instruction",
490                                      func->name);
491                                 return -1;
492                         }
493
494                         sym_for_each_insn(file, func, insn) {
495                                 insn->sym = func;
496                                 if (func->type == STT_FUNC &&
497                                     insn->type == INSN_ENDBR &&
498                                     list_empty(&insn->call_node)) {
499                                         if (insn->offset == func->offset) {
500                                                 list_add_tail(&insn->call_node, &file->endbr_list);
501                                                 file->nr_endbr++;
502                                         } else {
503                                                 file->nr_endbr_int++;
504                                         }
505                                 }
506                         }
507                 }
508         }
509
510         if (opts.stats)
511                 printf("nr_insns: %lu\n", nr_insns);
512
513         return 0;
514 }
515
516 /*
517  * Read the pv_ops[] .data table to find the static initialized values.
518  */
519 static int add_pv_ops(struct objtool_file *file, const char *symname)
520 {
521         struct symbol *sym, *func;
522         unsigned long off, end;
523         struct reloc *rel;
524         int idx;
525
526         sym = find_symbol_by_name(file->elf, symname);
527         if (!sym)
528                 return 0;
529
530         off = sym->offset;
531         end = off + sym->len;
532         for (;;) {
533                 rel = find_reloc_by_dest_range(file->elf, sym->sec, off, end - off);
534                 if (!rel)
535                         break;
536
537                 func = rel->sym;
538                 if (func->type == STT_SECTION)
539                         func = find_symbol_by_offset(rel->sym->sec, rel->addend);
540
541                 idx = (rel->offset - sym->offset) / sizeof(unsigned long);
542
543                 objtool_pv_add(file, idx, func);
544
545                 off = rel->offset + 1;
546                 if (off > end)
547                         break;
548         }
549
550         return 0;
551 }
552
553 /*
554  * Allocate and initialize file->pv_ops[].
555  */
556 static int init_pv_ops(struct objtool_file *file)
557 {
558         static const char *pv_ops_tables[] = {
559                 "pv_ops",
560                 "xen_cpu_ops",
561                 "xen_irq_ops",
562                 "xen_mmu_ops",
563                 NULL,
564         };
565         const char *pv_ops;
566         struct symbol *sym;
567         int idx, nr;
568
569         if (!opts.noinstr)
570                 return 0;
571
572         file->pv_ops = NULL;
573
574         sym = find_symbol_by_name(file->elf, "pv_ops");
575         if (!sym)
576                 return 0;
577
578         nr = sym->len / sizeof(unsigned long);
579         file->pv_ops = calloc(sizeof(struct pv_state), nr);
580         if (!file->pv_ops)
581                 return -1;
582
583         for (idx = 0; idx < nr; idx++)
584                 INIT_LIST_HEAD(&file->pv_ops[idx].targets);
585
586         for (idx = 0; (pv_ops = pv_ops_tables[idx]); idx++)
587                 add_pv_ops(file, pv_ops);
588
589         return 0;
590 }
591
592 static struct instruction *find_last_insn(struct objtool_file *file,
593                                           struct section *sec)
594 {
595         struct instruction *insn = NULL;
596         unsigned int offset;
597         unsigned int end = (sec->sh.sh_size > 10) ? sec->sh.sh_size - 10 : 0;
598
599         for (offset = sec->sh.sh_size - 1; offset >= end && !insn; offset--)
600                 insn = find_insn(file, sec, offset);
601
602         return insn;
603 }
604
605 /*
606  * Mark "ud2" instructions and manually annotated dead ends.
607  */
608 static int add_dead_ends(struct objtool_file *file)
609 {
610         struct section *sec;
611         struct reloc *reloc;
612         struct instruction *insn;
613
614         /*
615          * Check for manually annotated dead ends.
616          */
617         sec = find_section_by_name(file->elf, ".rela.discard.unreachable");
618         if (!sec)
619                 goto reachable;
620
621         list_for_each_entry(reloc, &sec->reloc_list, list) {
622                 if (reloc->sym->type != STT_SECTION) {
623                         WARN("unexpected relocation symbol type in %s", sec->name);
624                         return -1;
625                 }
626                 insn = find_insn(file, reloc->sym->sec, reloc->addend);
627                 if (insn)
628                         insn = prev_insn_same_sec(file, insn);
629                 else if (reloc->addend == reloc->sym->sec->sh.sh_size) {
630                         insn = find_last_insn(file, reloc->sym->sec);
631                         if (!insn) {
632                                 WARN("can't find unreachable insn at %s+0x%" PRIx64,
633                                      reloc->sym->sec->name, reloc->addend);
634                                 return -1;
635                         }
636                 } else {
637                         WARN("can't find unreachable insn at %s+0x%" PRIx64,
638                              reloc->sym->sec->name, reloc->addend);
639                         return -1;
640                 }
641
642                 insn->dead_end = true;
643         }
644
645 reachable:
646         /*
647          * These manually annotated reachable checks are needed for GCC 4.4,
648          * where the Linux unreachable() macro isn't supported.  In that case
649          * GCC doesn't know the "ud2" is fatal, so it generates code as if it's
650          * not a dead end.
651          */
652         sec = find_section_by_name(file->elf, ".rela.discard.reachable");
653         if (!sec)
654                 return 0;
655
656         list_for_each_entry(reloc, &sec->reloc_list, list) {
657                 if (reloc->sym->type != STT_SECTION) {
658                         WARN("unexpected relocation symbol type in %s", sec->name);
659                         return -1;
660                 }
661                 insn = find_insn(file, reloc->sym->sec, reloc->addend);
662                 if (insn)
663                         insn = prev_insn_same_sec(file, insn);
664                 else if (reloc->addend == reloc->sym->sec->sh.sh_size) {
665                         insn = find_last_insn(file, reloc->sym->sec);
666                         if (!insn) {
667                                 WARN("can't find reachable insn at %s+0x%" PRIx64,
668                                      reloc->sym->sec->name, reloc->addend);
669                                 return -1;
670                         }
671                 } else {
672                         WARN("can't find reachable insn at %s+0x%" PRIx64,
673                              reloc->sym->sec->name, reloc->addend);
674                         return -1;
675                 }
676
677                 insn->dead_end = false;
678         }
679
680         return 0;
681 }
682
683 static int create_static_call_sections(struct objtool_file *file)
684 {
685         struct section *sec;
686         struct static_call_site *site;
687         struct instruction *insn;
688         struct symbol *key_sym;
689         char *key_name, *tmp;
690         int idx;
691
692         sec = find_section_by_name(file->elf, ".static_call_sites");
693         if (sec) {
694                 INIT_LIST_HEAD(&file->static_call_list);
695                 WARN("file already has .static_call_sites section, skipping");
696                 return 0;
697         }
698
699         if (list_empty(&file->static_call_list))
700                 return 0;
701
702         idx = 0;
703         list_for_each_entry(insn, &file->static_call_list, call_node)
704                 idx++;
705
706         sec = elf_create_section(file->elf, ".static_call_sites", SHF_WRITE,
707                                  sizeof(struct static_call_site), idx);
708         if (!sec)
709                 return -1;
710
711         idx = 0;
712         list_for_each_entry(insn, &file->static_call_list, call_node) {
713
714                 site = (struct static_call_site *)sec->data->d_buf + idx;
715                 memset(site, 0, sizeof(struct static_call_site));
716
717                 /* populate reloc for 'addr' */
718                 if (elf_add_reloc_to_insn(file->elf, sec,
719                                           idx * sizeof(struct static_call_site),
720                                           R_X86_64_PC32,
721                                           insn->sec, insn->offset))
722                         return -1;
723
724                 /* find key symbol */
725                 key_name = strdup(insn_call_dest(insn)->name);
726                 if (!key_name) {
727                         perror("strdup");
728                         return -1;
729                 }
730                 if (strncmp(key_name, STATIC_CALL_TRAMP_PREFIX_STR,
731                             STATIC_CALL_TRAMP_PREFIX_LEN)) {
732                         WARN("static_call: trampoline name malformed: %s", key_name);
733                         free(key_name);
734                         return -1;
735                 }
736                 tmp = key_name + STATIC_CALL_TRAMP_PREFIX_LEN - STATIC_CALL_KEY_PREFIX_LEN;
737                 memcpy(tmp, STATIC_CALL_KEY_PREFIX_STR, STATIC_CALL_KEY_PREFIX_LEN);
738
739                 key_sym = find_symbol_by_name(file->elf, tmp);
740                 if (!key_sym) {
741                         if (!opts.module) {
742                                 WARN("static_call: can't find static_call_key symbol: %s", tmp);
743                                 free(key_name);
744                                 return -1;
745                         }
746
747                         /*
748                          * For modules(), the key might not be exported, which
749                          * means the module can make static calls but isn't
750                          * allowed to change them.
751                          *
752                          * In that case we temporarily set the key to be the
753                          * trampoline address.  This is fixed up in
754                          * static_call_add_module().
755                          */
756                         key_sym = insn_call_dest(insn);
757                 }
758                 free(key_name);
759
760                 /* populate reloc for 'key' */
761                 if (elf_add_reloc(file->elf, sec,
762                                   idx * sizeof(struct static_call_site) + 4,
763                                   R_X86_64_PC32, key_sym,
764                                   is_sibling_call(insn) * STATIC_CALL_SITE_TAIL))
765                         return -1;
766
767                 idx++;
768         }
769
770         return 0;
771 }
772
773 static int create_retpoline_sites_sections(struct objtool_file *file)
774 {
775         struct instruction *insn;
776         struct section *sec;
777         int idx;
778
779         sec = find_section_by_name(file->elf, ".retpoline_sites");
780         if (sec) {
781                 WARN("file already has .retpoline_sites, skipping");
782                 return 0;
783         }
784
785         idx = 0;
786         list_for_each_entry(insn, &file->retpoline_call_list, call_node)
787                 idx++;
788
789         if (!idx)
790                 return 0;
791
792         sec = elf_create_section(file->elf, ".retpoline_sites", 0,
793                                  sizeof(int), idx);
794         if (!sec) {
795                 WARN("elf_create_section: .retpoline_sites");
796                 return -1;
797         }
798
799         idx = 0;
800         list_for_each_entry(insn, &file->retpoline_call_list, call_node) {
801
802                 int *site = (int *)sec->data->d_buf + idx;
803                 *site = 0;
804
805                 if (elf_add_reloc_to_insn(file->elf, sec,
806                                           idx * sizeof(int),
807                                           R_X86_64_PC32,
808                                           insn->sec, insn->offset)) {
809                         WARN("elf_add_reloc_to_insn: .retpoline_sites");
810                         return -1;
811                 }
812
813                 idx++;
814         }
815
816         return 0;
817 }
818
819 static int create_return_sites_sections(struct objtool_file *file)
820 {
821         struct instruction *insn;
822         struct section *sec;
823         int idx;
824
825         sec = find_section_by_name(file->elf, ".return_sites");
826         if (sec) {
827                 WARN("file already has .return_sites, skipping");
828                 return 0;
829         }
830
831         idx = 0;
832         list_for_each_entry(insn, &file->return_thunk_list, call_node)
833                 idx++;
834
835         if (!idx)
836                 return 0;
837
838         sec = elf_create_section(file->elf, ".return_sites", 0,
839                                  sizeof(int), idx);
840         if (!sec) {
841                 WARN("elf_create_section: .return_sites");
842                 return -1;
843         }
844
845         idx = 0;
846         list_for_each_entry(insn, &file->return_thunk_list, call_node) {
847
848                 int *site = (int *)sec->data->d_buf + idx;
849                 *site = 0;
850
851                 if (elf_add_reloc_to_insn(file->elf, sec,
852                                           idx * sizeof(int),
853                                           R_X86_64_PC32,
854                                           insn->sec, insn->offset)) {
855                         WARN("elf_add_reloc_to_insn: .return_sites");
856                         return -1;
857                 }
858
859                 idx++;
860         }
861
862         return 0;
863 }
864
865 static int create_ibt_endbr_seal_sections(struct objtool_file *file)
866 {
867         struct instruction *insn;
868         struct section *sec;
869         int idx;
870
871         sec = find_section_by_name(file->elf, ".ibt_endbr_seal");
872         if (sec) {
873                 WARN("file already has .ibt_endbr_seal, skipping");
874                 return 0;
875         }
876
877         idx = 0;
878         list_for_each_entry(insn, &file->endbr_list, call_node)
879                 idx++;
880
881         if (opts.stats) {
882                 printf("ibt: ENDBR at function start: %d\n", file->nr_endbr);
883                 printf("ibt: ENDBR inside functions:  %d\n", file->nr_endbr_int);
884                 printf("ibt: superfluous ENDBR:       %d\n", idx);
885         }
886
887         if (!idx)
888                 return 0;
889
890         sec = elf_create_section(file->elf, ".ibt_endbr_seal", 0,
891                                  sizeof(int), idx);
892         if (!sec) {
893                 WARN("elf_create_section: .ibt_endbr_seal");
894                 return -1;
895         }
896
897         idx = 0;
898         list_for_each_entry(insn, &file->endbr_list, call_node) {
899
900                 int *site = (int *)sec->data->d_buf + idx;
901                 struct symbol *sym = insn->sym;
902                 *site = 0;
903
904                 if (opts.module && sym && sym->type == STT_FUNC &&
905                     insn->offset == sym->offset &&
906                     (!strcmp(sym->name, "init_module") ||
907                      !strcmp(sym->name, "cleanup_module")))
908                         WARN("%s(): not an indirect call target", sym->name);
909
910                 if (elf_add_reloc_to_insn(file->elf, sec,
911                                           idx * sizeof(int),
912                                           R_X86_64_PC32,
913                                           insn->sec, insn->offset)) {
914                         WARN("elf_add_reloc_to_insn: .ibt_endbr_seal");
915                         return -1;
916                 }
917
918                 idx++;
919         }
920
921         return 0;
922 }
923
924 static int create_cfi_sections(struct objtool_file *file)
925 {
926         struct section *sec, *s;
927         struct symbol *sym;
928         unsigned int *loc;
929         int idx;
930
931         sec = find_section_by_name(file->elf, ".cfi_sites");
932         if (sec) {
933                 INIT_LIST_HEAD(&file->call_list);
934                 WARN("file already has .cfi_sites section, skipping");
935                 return 0;
936         }
937
938         idx = 0;
939         for_each_sec(file, s) {
940                 if (!s->text)
941                         continue;
942
943                 list_for_each_entry(sym, &s->symbol_list, list) {
944                         if (sym->type != STT_FUNC)
945                                 continue;
946
947                         if (strncmp(sym->name, "__cfi_", 6))
948                                 continue;
949
950                         idx++;
951                 }
952         }
953
954         sec = elf_create_section(file->elf, ".cfi_sites", 0, sizeof(unsigned int), idx);
955         if (!sec)
956                 return -1;
957
958         idx = 0;
959         for_each_sec(file, s) {
960                 if (!s->text)
961                         continue;
962
963                 list_for_each_entry(sym, &s->symbol_list, list) {
964                         if (sym->type != STT_FUNC)
965                                 continue;
966
967                         if (strncmp(sym->name, "__cfi_", 6))
968                                 continue;
969
970                         loc = (unsigned int *)sec->data->d_buf + idx;
971                         memset(loc, 0, sizeof(unsigned int));
972
973                         if (elf_add_reloc_to_insn(file->elf, sec,
974                                                   idx * sizeof(unsigned int),
975                                                   R_X86_64_PC32,
976                                                   s, sym->offset))
977                                 return -1;
978
979                         idx++;
980                 }
981         }
982
983         return 0;
984 }
985
986 static int create_mcount_loc_sections(struct objtool_file *file)
987 {
988         int addrsize = elf_class_addrsize(file->elf);
989         struct instruction *insn;
990         struct section *sec;
991         int idx;
992
993         sec = find_section_by_name(file->elf, "__mcount_loc");
994         if (sec) {
995                 INIT_LIST_HEAD(&file->mcount_loc_list);
996                 WARN("file already has __mcount_loc section, skipping");
997                 return 0;
998         }
999
1000         if (list_empty(&file->mcount_loc_list))
1001                 return 0;
1002
1003         idx = 0;
1004         list_for_each_entry(insn, &file->mcount_loc_list, call_node)
1005                 idx++;
1006
1007         sec = elf_create_section(file->elf, "__mcount_loc", 0, addrsize, idx);
1008         if (!sec)
1009                 return -1;
1010
1011         sec->sh.sh_addralign = addrsize;
1012
1013         idx = 0;
1014         list_for_each_entry(insn, &file->mcount_loc_list, call_node) {
1015                 void *loc;
1016
1017                 loc = sec->data->d_buf + idx;
1018                 memset(loc, 0, addrsize);
1019
1020                 if (elf_add_reloc_to_insn(file->elf, sec, idx,
1021                                           addrsize == sizeof(u64) ? R_ABS64 : R_ABS32,
1022                                           insn->sec, insn->offset))
1023                         return -1;
1024
1025                 idx += addrsize;
1026         }
1027
1028         return 0;
1029 }
1030
1031 static int create_direct_call_sections(struct objtool_file *file)
1032 {
1033         struct instruction *insn;
1034         struct section *sec;
1035         unsigned int *loc;
1036         int idx;
1037
1038         sec = find_section_by_name(file->elf, ".call_sites");
1039         if (sec) {
1040                 INIT_LIST_HEAD(&file->call_list);
1041                 WARN("file already has .call_sites section, skipping");
1042                 return 0;
1043         }
1044
1045         if (list_empty(&file->call_list))
1046                 return 0;
1047
1048         idx = 0;
1049         list_for_each_entry(insn, &file->call_list, call_node)
1050                 idx++;
1051
1052         sec = elf_create_section(file->elf, ".call_sites", 0, sizeof(unsigned int), idx);
1053         if (!sec)
1054                 return -1;
1055
1056         idx = 0;
1057         list_for_each_entry(insn, &file->call_list, call_node) {
1058
1059                 loc = (unsigned int *)sec->data->d_buf + idx;
1060                 memset(loc, 0, sizeof(unsigned int));
1061
1062                 if (elf_add_reloc_to_insn(file->elf, sec,
1063                                           idx * sizeof(unsigned int),
1064                                           R_X86_64_PC32,
1065                                           insn->sec, insn->offset))
1066                         return -1;
1067
1068                 idx++;
1069         }
1070
1071         return 0;
1072 }
1073
1074 /*
1075  * Warnings shouldn't be reported for ignored functions.
1076  */
1077 static void add_ignores(struct objtool_file *file)
1078 {
1079         struct instruction *insn;
1080         struct section *sec;
1081         struct symbol *func;
1082         struct reloc *reloc;
1083
1084         sec = find_section_by_name(file->elf, ".rela.discard.func_stack_frame_non_standard");
1085         if (!sec)
1086                 return;
1087
1088         list_for_each_entry(reloc, &sec->reloc_list, list) {
1089                 switch (reloc->sym->type) {
1090                 case STT_FUNC:
1091                         func = reloc->sym;
1092                         break;
1093
1094                 case STT_SECTION:
1095                         func = find_func_by_offset(reloc->sym->sec, reloc->addend);
1096                         if (!func)
1097                                 continue;
1098                         break;
1099
1100                 default:
1101                         WARN("unexpected relocation symbol type in %s: %d", sec->name, reloc->sym->type);
1102                         continue;
1103                 }
1104
1105                 func_for_each_insn(file, func, insn)
1106                         insn->ignore = true;
1107         }
1108 }
1109
1110 /*
1111  * This is a whitelist of functions that is allowed to be called with AC set.
1112  * The list is meant to be minimal and only contains compiler instrumentation
1113  * ABI and a few functions used to implement *_{to,from}_user() functions.
1114  *
1115  * These functions must not directly change AC, but may PUSHF/POPF.
1116  */
1117 static const char *uaccess_safe_builtin[] = {
1118         /* KASAN */
1119         "kasan_report",
1120         "kasan_check_range",
1121         /* KASAN out-of-line */
1122         "__asan_loadN_noabort",
1123         "__asan_load1_noabort",
1124         "__asan_load2_noabort",
1125         "__asan_load4_noabort",
1126         "__asan_load8_noabort",
1127         "__asan_load16_noabort",
1128         "__asan_storeN_noabort",
1129         "__asan_store1_noabort",
1130         "__asan_store2_noabort",
1131         "__asan_store4_noabort",
1132         "__asan_store8_noabort",
1133         "__asan_store16_noabort",
1134         "__kasan_check_read",
1135         "__kasan_check_write",
1136         /* KASAN in-line */
1137         "__asan_report_load_n_noabort",
1138         "__asan_report_load1_noabort",
1139         "__asan_report_load2_noabort",
1140         "__asan_report_load4_noabort",
1141         "__asan_report_load8_noabort",
1142         "__asan_report_load16_noabort",
1143         "__asan_report_store_n_noabort",
1144         "__asan_report_store1_noabort",
1145         "__asan_report_store2_noabort",
1146         "__asan_report_store4_noabort",
1147         "__asan_report_store8_noabort",
1148         "__asan_report_store16_noabort",
1149         /* KCSAN */
1150         "__kcsan_check_access",
1151         "__kcsan_mb",
1152         "__kcsan_wmb",
1153         "__kcsan_rmb",
1154         "__kcsan_release",
1155         "kcsan_found_watchpoint",
1156         "kcsan_setup_watchpoint",
1157         "kcsan_check_scoped_accesses",
1158         "kcsan_disable_current",
1159         "kcsan_enable_current_nowarn",
1160         /* KCSAN/TSAN */
1161         "__tsan_func_entry",
1162         "__tsan_func_exit",
1163         "__tsan_read_range",
1164         "__tsan_write_range",
1165         "__tsan_read1",
1166         "__tsan_read2",
1167         "__tsan_read4",
1168         "__tsan_read8",
1169         "__tsan_read16",
1170         "__tsan_write1",
1171         "__tsan_write2",
1172         "__tsan_write4",
1173         "__tsan_write8",
1174         "__tsan_write16",
1175         "__tsan_read_write1",
1176         "__tsan_read_write2",
1177         "__tsan_read_write4",
1178         "__tsan_read_write8",
1179         "__tsan_read_write16",
1180         "__tsan_volatile_read1",
1181         "__tsan_volatile_read2",
1182         "__tsan_volatile_read4",
1183         "__tsan_volatile_read8",
1184         "__tsan_volatile_read16",
1185         "__tsan_volatile_write1",
1186         "__tsan_volatile_write2",
1187         "__tsan_volatile_write4",
1188         "__tsan_volatile_write8",
1189         "__tsan_volatile_write16",
1190         "__tsan_atomic8_load",
1191         "__tsan_atomic16_load",
1192         "__tsan_atomic32_load",
1193         "__tsan_atomic64_load",
1194         "__tsan_atomic8_store",
1195         "__tsan_atomic16_store",
1196         "__tsan_atomic32_store",
1197         "__tsan_atomic64_store",
1198         "__tsan_atomic8_exchange",
1199         "__tsan_atomic16_exchange",
1200         "__tsan_atomic32_exchange",
1201         "__tsan_atomic64_exchange",
1202         "__tsan_atomic8_fetch_add",
1203         "__tsan_atomic16_fetch_add",
1204         "__tsan_atomic32_fetch_add",
1205         "__tsan_atomic64_fetch_add",
1206         "__tsan_atomic8_fetch_sub",
1207         "__tsan_atomic16_fetch_sub",
1208         "__tsan_atomic32_fetch_sub",
1209         "__tsan_atomic64_fetch_sub",
1210         "__tsan_atomic8_fetch_and",
1211         "__tsan_atomic16_fetch_and",
1212         "__tsan_atomic32_fetch_and",
1213         "__tsan_atomic64_fetch_and",
1214         "__tsan_atomic8_fetch_or",
1215         "__tsan_atomic16_fetch_or",
1216         "__tsan_atomic32_fetch_or",
1217         "__tsan_atomic64_fetch_or",
1218         "__tsan_atomic8_fetch_xor",
1219         "__tsan_atomic16_fetch_xor",
1220         "__tsan_atomic32_fetch_xor",
1221         "__tsan_atomic64_fetch_xor",
1222         "__tsan_atomic8_fetch_nand",
1223         "__tsan_atomic16_fetch_nand",
1224         "__tsan_atomic32_fetch_nand",
1225         "__tsan_atomic64_fetch_nand",
1226         "__tsan_atomic8_compare_exchange_strong",
1227         "__tsan_atomic16_compare_exchange_strong",
1228         "__tsan_atomic32_compare_exchange_strong",
1229         "__tsan_atomic64_compare_exchange_strong",
1230         "__tsan_atomic8_compare_exchange_weak",
1231         "__tsan_atomic16_compare_exchange_weak",
1232         "__tsan_atomic32_compare_exchange_weak",
1233         "__tsan_atomic64_compare_exchange_weak",
1234         "__tsan_atomic8_compare_exchange_val",
1235         "__tsan_atomic16_compare_exchange_val",
1236         "__tsan_atomic32_compare_exchange_val",
1237         "__tsan_atomic64_compare_exchange_val",
1238         "__tsan_atomic_thread_fence",
1239         "__tsan_atomic_signal_fence",
1240         /* KCOV */
1241         "write_comp_data",
1242         "check_kcov_mode",
1243         "__sanitizer_cov_trace_pc",
1244         "__sanitizer_cov_trace_const_cmp1",
1245         "__sanitizer_cov_trace_const_cmp2",
1246         "__sanitizer_cov_trace_const_cmp4",
1247         "__sanitizer_cov_trace_const_cmp8",
1248         "__sanitizer_cov_trace_cmp1",
1249         "__sanitizer_cov_trace_cmp2",
1250         "__sanitizer_cov_trace_cmp4",
1251         "__sanitizer_cov_trace_cmp8",
1252         "__sanitizer_cov_trace_switch",
1253         /* KMSAN */
1254         "kmsan_copy_to_user",
1255         "kmsan_report",
1256         "kmsan_unpoison_entry_regs",
1257         "kmsan_unpoison_memory",
1258         "__msan_chain_origin",
1259         "__msan_get_context_state",
1260         "__msan_instrument_asm_store",
1261         "__msan_metadata_ptr_for_load_1",
1262         "__msan_metadata_ptr_for_load_2",
1263         "__msan_metadata_ptr_for_load_4",
1264         "__msan_metadata_ptr_for_load_8",
1265         "__msan_metadata_ptr_for_load_n",
1266         "__msan_metadata_ptr_for_store_1",
1267         "__msan_metadata_ptr_for_store_2",
1268         "__msan_metadata_ptr_for_store_4",
1269         "__msan_metadata_ptr_for_store_8",
1270         "__msan_metadata_ptr_for_store_n",
1271         "__msan_poison_alloca",
1272         "__msan_warning",
1273         /* UBSAN */
1274         "ubsan_type_mismatch_common",
1275         "__ubsan_handle_type_mismatch",
1276         "__ubsan_handle_type_mismatch_v1",
1277         "__ubsan_handle_shift_out_of_bounds",
1278         "__ubsan_handle_load_invalid_value",
1279         /* misc */
1280         "csum_partial_copy_generic",
1281         "copy_mc_fragile",
1282         "copy_mc_fragile_handle_tail",
1283         "copy_mc_enhanced_fast_string",
1284         "ftrace_likely_update", /* CONFIG_TRACE_BRANCH_PROFILING */
1285         "clear_user_erms",
1286         "clear_user_rep_good",
1287         "clear_user_original",
1288         NULL
1289 };
1290
1291 static void add_uaccess_safe(struct objtool_file *file)
1292 {
1293         struct symbol *func;
1294         const char **name;
1295
1296         if (!opts.uaccess)
1297                 return;
1298
1299         for (name = uaccess_safe_builtin; *name; name++) {
1300                 func = find_symbol_by_name(file->elf, *name);
1301                 if (!func)
1302                         continue;
1303
1304                 func->uaccess_safe = true;
1305         }
1306 }
1307
1308 /*
1309  * FIXME: For now, just ignore any alternatives which add retpolines.  This is
1310  * a temporary hack, as it doesn't allow ORC to unwind from inside a retpoline.
1311  * But it at least allows objtool to understand the control flow *around* the
1312  * retpoline.
1313  */
1314 static int add_ignore_alternatives(struct objtool_file *file)
1315 {
1316         struct section *sec;
1317         struct reloc *reloc;
1318         struct instruction *insn;
1319
1320         sec = find_section_by_name(file->elf, ".rela.discard.ignore_alts");
1321         if (!sec)
1322                 return 0;
1323
1324         list_for_each_entry(reloc, &sec->reloc_list, list) {
1325                 if (reloc->sym->type != STT_SECTION) {
1326                         WARN("unexpected relocation symbol type in %s", sec->name);
1327                         return -1;
1328                 }
1329
1330                 insn = find_insn(file, reloc->sym->sec, reloc->addend);
1331                 if (!insn) {
1332                         WARN("bad .discard.ignore_alts entry");
1333                         return -1;
1334                 }
1335
1336                 insn->ignore_alts = true;
1337         }
1338
1339         return 0;
1340 }
1341
1342 __weak bool arch_is_retpoline(struct symbol *sym)
1343 {
1344         return false;
1345 }
1346
1347 __weak bool arch_is_rethunk(struct symbol *sym)
1348 {
1349         return false;
1350 }
1351
1352 static struct reloc *insn_reloc(struct objtool_file *file, struct instruction *insn)
1353 {
1354         struct reloc *reloc;
1355
1356         if (insn->no_reloc)
1357                 return NULL;
1358
1359         if (!file)
1360                 return NULL;
1361
1362         reloc = find_reloc_by_dest_range(file->elf, insn->sec,
1363                                          insn->offset, insn->len);
1364         if (!reloc) {
1365                 insn->no_reloc = 1;
1366                 return NULL;
1367         }
1368
1369         return reloc;
1370 }
1371
1372 static void remove_insn_ops(struct instruction *insn)
1373 {
1374         struct stack_op *op, *next;
1375
1376         for (op = insn->stack_ops; op; op = next) {
1377                 next = op->next;
1378                 free(op);
1379         }
1380         insn->stack_ops = NULL;
1381 }
1382
1383 static void annotate_call_site(struct objtool_file *file,
1384                                struct instruction *insn, bool sibling)
1385 {
1386         struct reloc *reloc = insn_reloc(file, insn);
1387         struct symbol *sym = insn_call_dest(insn);
1388
1389         if (!sym)
1390                 sym = reloc->sym;
1391
1392         /*
1393          * Alternative replacement code is just template code which is
1394          * sometimes copied to the original instruction. For now, don't
1395          * annotate it. (In the future we might consider annotating the
1396          * original instruction if/when it ever makes sense to do so.)
1397          */
1398         if (!strcmp(insn->sec->name, ".altinstr_replacement"))
1399                 return;
1400
1401         if (sym->static_call_tramp) {
1402                 list_add_tail(&insn->call_node, &file->static_call_list);
1403                 return;
1404         }
1405
1406         if (sym->retpoline_thunk) {
1407                 list_add_tail(&insn->call_node, &file->retpoline_call_list);
1408                 return;
1409         }
1410
1411         /*
1412          * Many compilers cannot disable KCOV or sanitizer calls with a function
1413          * attribute so they need a little help, NOP out any such calls from
1414          * noinstr text.
1415          */
1416         if (opts.hack_noinstr && insn->sec->noinstr && sym->profiling_func) {
1417                 if (reloc) {
1418                         reloc->type = R_NONE;
1419                         elf_write_reloc(file->elf, reloc);
1420                 }
1421
1422                 elf_write_insn(file->elf, insn->sec,
1423                                insn->offset, insn->len,
1424                                sibling ? arch_ret_insn(insn->len)
1425                                        : arch_nop_insn(insn->len));
1426
1427                 insn->type = sibling ? INSN_RETURN : INSN_NOP;
1428
1429                 if (sibling) {
1430                         /*
1431                          * We've replaced the tail-call JMP insn by two new
1432                          * insn: RET; INT3, except we only have a single struct
1433                          * insn here. Mark it retpoline_safe to avoid the SLS
1434                          * warning, instead of adding another insn.
1435                          */
1436                         insn->retpoline_safe = true;
1437                 }
1438
1439                 return;
1440         }
1441
1442         if (opts.mcount && sym->fentry) {
1443                 if (sibling)
1444                         WARN_FUNC("Tail call to __fentry__ !?!?", insn->sec, insn->offset);
1445                 if (opts.mnop) {
1446                         if (reloc) {
1447                                 reloc->type = R_NONE;
1448                                 elf_write_reloc(file->elf, reloc);
1449                         }
1450
1451                         elf_write_insn(file->elf, insn->sec,
1452                                        insn->offset, insn->len,
1453                                        arch_nop_insn(insn->len));
1454
1455                         insn->type = INSN_NOP;
1456                 }
1457
1458                 list_add_tail(&insn->call_node, &file->mcount_loc_list);
1459                 return;
1460         }
1461
1462         if (insn->type == INSN_CALL && !insn->sec->init)
1463                 list_add_tail(&insn->call_node, &file->call_list);
1464
1465         if (!sibling && dead_end_function(file, sym))
1466                 insn->dead_end = true;
1467 }
1468
1469 static void add_call_dest(struct objtool_file *file, struct instruction *insn,
1470                           struct symbol *dest, bool sibling)
1471 {
1472         insn->_call_dest = dest;
1473         if (!dest)
1474                 return;
1475
1476         /*
1477          * Whatever stack impact regular CALLs have, should be undone
1478          * by the RETURN of the called function.
1479          *
1480          * Annotated intra-function calls retain the stack_ops but
1481          * are converted to JUMP, see read_intra_function_calls().
1482          */
1483         remove_insn_ops(insn);
1484
1485         annotate_call_site(file, insn, sibling);
1486 }
1487
1488 static void add_retpoline_call(struct objtool_file *file, struct instruction *insn)
1489 {
1490         /*
1491          * Retpoline calls/jumps are really dynamic calls/jumps in disguise,
1492          * so convert them accordingly.
1493          */
1494         switch (insn->type) {
1495         case INSN_CALL:
1496                 insn->type = INSN_CALL_DYNAMIC;
1497                 break;
1498         case INSN_JUMP_UNCONDITIONAL:
1499                 insn->type = INSN_JUMP_DYNAMIC;
1500                 break;
1501         case INSN_JUMP_CONDITIONAL:
1502                 insn->type = INSN_JUMP_DYNAMIC_CONDITIONAL;
1503                 break;
1504         default:
1505                 return;
1506         }
1507
1508         insn->retpoline_safe = true;
1509
1510         /*
1511          * Whatever stack impact regular CALLs have, should be undone
1512          * by the RETURN of the called function.
1513          *
1514          * Annotated intra-function calls retain the stack_ops but
1515          * are converted to JUMP, see read_intra_function_calls().
1516          */
1517         remove_insn_ops(insn);
1518
1519         annotate_call_site(file, insn, false);
1520 }
1521
1522 static void add_return_call(struct objtool_file *file, struct instruction *insn, bool add)
1523 {
1524         /*
1525          * Return thunk tail calls are really just returns in disguise,
1526          * so convert them accordingly.
1527          */
1528         insn->type = INSN_RETURN;
1529         insn->retpoline_safe = true;
1530
1531         if (add)
1532                 list_add_tail(&insn->call_node, &file->return_thunk_list);
1533 }
1534
1535 static bool is_first_func_insn(struct objtool_file *file,
1536                                struct instruction *insn, struct symbol *sym)
1537 {
1538         if (insn->offset == sym->offset)
1539                 return true;
1540
1541         /* Allow direct CALL/JMP past ENDBR */
1542         if (opts.ibt) {
1543                 struct instruction *prev = prev_insn_same_sym(file, insn);
1544
1545                 if (prev && prev->type == INSN_ENDBR &&
1546                     insn->offset == sym->offset + prev->len)
1547                         return true;
1548         }
1549
1550         return false;
1551 }
1552
1553 /*
1554  * A sibling call is a tail-call to another symbol -- to differentiate from a
1555  * recursive tail-call which is to the same symbol.
1556  */
1557 static bool jump_is_sibling_call(struct objtool_file *file,
1558                                  struct instruction *from, struct instruction *to)
1559 {
1560         struct symbol *fs = from->sym;
1561         struct symbol *ts = to->sym;
1562
1563         /* Not a sibling call if from/to a symbol hole */
1564         if (!fs || !ts)
1565                 return false;
1566
1567         /* Not a sibling call if not targeting the start of a symbol. */
1568         if (!is_first_func_insn(file, to, ts))
1569                 return false;
1570
1571         /* Disallow sibling calls into STT_NOTYPE */
1572         if (ts->type == STT_NOTYPE)
1573                 return false;
1574
1575         /* Must not be self to be a sibling */
1576         return fs->pfunc != ts->pfunc;
1577 }
1578
1579 /*
1580  * Find the destination instructions for all jumps.
1581  */
1582 static int add_jump_destinations(struct objtool_file *file)
1583 {
1584         struct instruction *insn, *jump_dest;
1585         struct reloc *reloc;
1586         struct section *dest_sec;
1587         unsigned long dest_off;
1588
1589         for_each_insn(file, insn) {
1590                 if (insn->jump_dest) {
1591                         /*
1592                          * handle_group_alt() may have previously set
1593                          * 'jump_dest' for some alternatives.
1594                          */
1595                         continue;
1596                 }
1597                 if (!is_static_jump(insn))
1598                         continue;
1599
1600                 reloc = insn_reloc(file, insn);
1601                 if (!reloc) {
1602                         dest_sec = insn->sec;
1603                         dest_off = arch_jump_destination(insn);
1604                 } else if (reloc->sym->type == STT_SECTION) {
1605                         dest_sec = reloc->sym->sec;
1606                         dest_off = arch_dest_reloc_offset(reloc->addend);
1607                 } else if (reloc->sym->retpoline_thunk) {
1608                         add_retpoline_call(file, insn);
1609                         continue;
1610                 } else if (reloc->sym->return_thunk) {
1611                         add_return_call(file, insn, true);
1612                         continue;
1613                 } else if (insn_func(insn)) {
1614                         /*
1615                          * External sibling call or internal sibling call with
1616                          * STT_FUNC reloc.
1617                          */
1618                         add_call_dest(file, insn, reloc->sym, true);
1619                         continue;
1620                 } else if (reloc->sym->sec->idx) {
1621                         dest_sec = reloc->sym->sec;
1622                         dest_off = reloc->sym->sym.st_value +
1623                                    arch_dest_reloc_offset(reloc->addend);
1624                 } else {
1625                         /* non-func asm code jumping to another file */
1626                         continue;
1627                 }
1628
1629                 jump_dest = find_insn(file, dest_sec, dest_off);
1630                 if (!jump_dest) {
1631                         struct symbol *sym = find_symbol_by_offset(dest_sec, dest_off);
1632
1633                         /*
1634                          * This is a special case for zen_untrain_ret().
1635                          * It jumps to __x86_return_thunk(), but objtool
1636                          * can't find the thunk's starting RET
1637                          * instruction, because the RET is also in the
1638                          * middle of another instruction.  Objtool only
1639                          * knows about the outer instruction.
1640                          */
1641                         if (sym && sym->return_thunk) {
1642                                 add_return_call(file, insn, false);
1643                                 continue;
1644                         }
1645
1646                         WARN_FUNC("can't find jump dest instruction at %s+0x%lx",
1647                                   insn->sec, insn->offset, dest_sec->name,
1648                                   dest_off);
1649                         return -1;
1650                 }
1651
1652                 /*
1653                  * Cross-function jump.
1654                  */
1655                 if (insn_func(insn) && insn_func(jump_dest) &&
1656                     insn_func(insn) != insn_func(jump_dest)) {
1657
1658                         /*
1659                          * For GCC 8+, create parent/child links for any cold
1660                          * subfunctions.  This is _mostly_ redundant with a
1661                          * similar initialization in read_symbols().
1662                          *
1663                          * If a function has aliases, we want the *first* such
1664                          * function in the symbol table to be the subfunction's
1665                          * parent.  In that case we overwrite the
1666                          * initialization done in read_symbols().
1667                          *
1668                          * However this code can't completely replace the
1669                          * read_symbols() code because this doesn't detect the
1670                          * case where the parent function's only reference to a
1671                          * subfunction is through a jump table.
1672                          */
1673                         if (!strstr(insn_func(insn)->name, ".cold") &&
1674                             strstr(insn_func(jump_dest)->name, ".cold")) {
1675                                 insn_func(insn)->cfunc = insn_func(jump_dest);
1676                                 insn_func(jump_dest)->pfunc = insn_func(insn);
1677                         }
1678                 }
1679
1680                 if (jump_is_sibling_call(file, insn, jump_dest)) {
1681                         /*
1682                          * Internal sibling call without reloc or with
1683                          * STT_SECTION reloc.
1684                          */
1685                         add_call_dest(file, insn, insn_func(jump_dest), true);
1686                         continue;
1687                 }
1688
1689                 insn->jump_dest = jump_dest;
1690         }
1691
1692         return 0;
1693 }
1694
1695 static struct symbol *find_call_destination(struct section *sec, unsigned long offset)
1696 {
1697         struct symbol *call_dest;
1698
1699         call_dest = find_func_by_offset(sec, offset);
1700         if (!call_dest)
1701                 call_dest = find_symbol_by_offset(sec, offset);
1702
1703         return call_dest;
1704 }
1705
1706 /*
1707  * Find the destination instructions for all calls.
1708  */
1709 static int add_call_destinations(struct objtool_file *file)
1710 {
1711         struct instruction *insn;
1712         unsigned long dest_off;
1713         struct symbol *dest;
1714         struct reloc *reloc;
1715
1716         for_each_insn(file, insn) {
1717                 if (insn->type != INSN_CALL)
1718                         continue;
1719
1720                 reloc = insn_reloc(file, insn);
1721                 if (!reloc) {
1722                         dest_off = arch_jump_destination(insn);
1723                         dest = find_call_destination(insn->sec, dest_off);
1724
1725                         add_call_dest(file, insn, dest, false);
1726
1727                         if (insn->ignore)
1728                                 continue;
1729
1730                         if (!insn_call_dest(insn)) {
1731                                 WARN_FUNC("unannotated intra-function call", insn->sec, insn->offset);
1732                                 return -1;
1733                         }
1734
1735                         if (insn_func(insn) && insn_call_dest(insn)->type != STT_FUNC) {
1736                                 WARN_FUNC("unsupported call to non-function",
1737                                           insn->sec, insn->offset);
1738                                 return -1;
1739                         }
1740
1741                 } else if (reloc->sym->type == STT_SECTION) {
1742                         dest_off = arch_dest_reloc_offset(reloc->addend);
1743                         dest = find_call_destination(reloc->sym->sec, dest_off);
1744                         if (!dest) {
1745                                 WARN_FUNC("can't find call dest symbol at %s+0x%lx",
1746                                           insn->sec, insn->offset,
1747                                           reloc->sym->sec->name,
1748                                           dest_off);
1749                                 return -1;
1750                         }
1751
1752                         add_call_dest(file, insn, dest, false);
1753
1754                 } else if (reloc->sym->retpoline_thunk) {
1755                         add_retpoline_call(file, insn);
1756
1757                 } else
1758                         add_call_dest(file, insn, reloc->sym, false);
1759         }
1760
1761         return 0;
1762 }
1763
1764 /*
1765  * The .alternatives section requires some extra special care over and above
1766  * other special sections because alternatives are patched in place.
1767  */
1768 static int handle_group_alt(struct objtool_file *file,
1769                             struct special_alt *special_alt,
1770                             struct instruction *orig_insn,
1771                             struct instruction **new_insn)
1772 {
1773         struct instruction *last_new_insn = NULL, *insn, *nop = NULL;
1774         struct alt_group *orig_alt_group, *new_alt_group;
1775         unsigned long dest_off;
1776
1777         orig_alt_group = orig_insn->alt_group;
1778         if (!orig_alt_group) {
1779                 struct instruction *last_orig_insn = NULL;
1780
1781                 orig_alt_group = malloc(sizeof(*orig_alt_group));
1782                 if (!orig_alt_group) {
1783                         WARN("malloc failed");
1784                         return -1;
1785                 }
1786                 orig_alt_group->cfi = calloc(special_alt->orig_len,
1787                                              sizeof(struct cfi_state *));
1788                 if (!orig_alt_group->cfi) {
1789                         WARN("calloc failed");
1790                         return -1;
1791                 }
1792
1793                 insn = orig_insn;
1794                 sec_for_each_insn_from(file, insn) {
1795                         if (insn->offset >= special_alt->orig_off + special_alt->orig_len)
1796                                 break;
1797
1798                         insn->alt_group = orig_alt_group;
1799                         last_orig_insn = insn;
1800                 }
1801                 orig_alt_group->orig_group = NULL;
1802                 orig_alt_group->first_insn = orig_insn;
1803                 orig_alt_group->last_insn = last_orig_insn;
1804                 orig_alt_group->nop = NULL;
1805         } else {
1806                 if (orig_alt_group->last_insn->offset + orig_alt_group->last_insn->len -
1807                     orig_alt_group->first_insn->offset != special_alt->orig_len) {
1808                         WARN_FUNC("weirdly overlapping alternative! %ld != %d",
1809                                   orig_insn->sec, orig_insn->offset,
1810                                   orig_alt_group->last_insn->offset +
1811                                   orig_alt_group->last_insn->len -
1812                                   orig_alt_group->first_insn->offset,
1813                                   special_alt->orig_len);
1814                         return -1;
1815                 }
1816         }
1817
1818         new_alt_group = malloc(sizeof(*new_alt_group));
1819         if (!new_alt_group) {
1820                 WARN("malloc failed");
1821                 return -1;
1822         }
1823
1824         if (special_alt->new_len < special_alt->orig_len) {
1825                 /*
1826                  * Insert a fake nop at the end to make the replacement
1827                  * alt_group the same size as the original.  This is needed to
1828                  * allow propagate_alt_cfi() to do its magic.  When the last
1829                  * instruction affects the stack, the instruction after it (the
1830                  * nop) will propagate the new state to the shared CFI array.
1831                  */
1832                 nop = malloc(sizeof(*nop));
1833                 if (!nop) {
1834                         WARN("malloc failed");
1835                         return -1;
1836                 }
1837                 memset(nop, 0, sizeof(*nop));
1838
1839                 nop->sec = special_alt->new_sec;
1840                 nop->offset = special_alt->new_off + special_alt->new_len;
1841                 nop->len = special_alt->orig_len - special_alt->new_len;
1842                 nop->type = INSN_NOP;
1843                 nop->sym = orig_insn->sym;
1844                 nop->alt_group = new_alt_group;
1845                 nop->ignore = orig_insn->ignore_alts;
1846         }
1847
1848         if (!special_alt->new_len) {
1849                 *new_insn = nop;
1850                 goto end;
1851         }
1852
1853         insn = *new_insn;
1854         sec_for_each_insn_from(file, insn) {
1855                 struct reloc *alt_reloc;
1856
1857                 if (insn->offset >= special_alt->new_off + special_alt->new_len)
1858                         break;
1859
1860                 last_new_insn = insn;
1861
1862                 insn->ignore = orig_insn->ignore_alts;
1863                 insn->sym = orig_insn->sym;
1864                 insn->alt_group = new_alt_group;
1865
1866                 /*
1867                  * Since alternative replacement code is copy/pasted by the
1868                  * kernel after applying relocations, generally such code can't
1869                  * have relative-address relocation references to outside the
1870                  * .altinstr_replacement section, unless the arch's
1871                  * alternatives code can adjust the relative offsets
1872                  * accordingly.
1873                  */
1874                 alt_reloc = insn_reloc(file, insn);
1875                 if (alt_reloc && arch_pc_relative_reloc(alt_reloc) &&
1876                     !arch_support_alt_relocation(special_alt, insn, alt_reloc)) {
1877
1878                         WARN_FUNC("unsupported relocation in alternatives section",
1879                                   insn->sec, insn->offset);
1880                         return -1;
1881                 }
1882
1883                 if (!is_static_jump(insn))
1884                         continue;
1885
1886                 if (!insn->immediate)
1887                         continue;
1888
1889                 dest_off = arch_jump_destination(insn);
1890                 if (dest_off == special_alt->new_off + special_alt->new_len) {
1891                         insn->jump_dest = next_insn_same_sec(file, orig_alt_group->last_insn);
1892                         if (!insn->jump_dest) {
1893                                 WARN_FUNC("can't find alternative jump destination",
1894                                           insn->sec, insn->offset);
1895                                 return -1;
1896                         }
1897                 }
1898         }
1899
1900         if (!last_new_insn) {
1901                 WARN_FUNC("can't find last new alternative instruction",
1902                           special_alt->new_sec, special_alt->new_off);
1903                 return -1;
1904         }
1905
1906 end:
1907         new_alt_group->orig_group = orig_alt_group;
1908         new_alt_group->first_insn = *new_insn;
1909         new_alt_group->last_insn = last_new_insn;
1910         new_alt_group->nop = nop;
1911         new_alt_group->cfi = orig_alt_group->cfi;
1912         return 0;
1913 }
1914
1915 /*
1916  * A jump table entry can either convert a nop to a jump or a jump to a nop.
1917  * If the original instruction is a jump, make the alt entry an effective nop
1918  * by just skipping the original instruction.
1919  */
1920 static int handle_jump_alt(struct objtool_file *file,
1921                            struct special_alt *special_alt,
1922                            struct instruction *orig_insn,
1923                            struct instruction **new_insn)
1924 {
1925         if (orig_insn->type != INSN_JUMP_UNCONDITIONAL &&
1926             orig_insn->type != INSN_NOP) {
1927
1928                 WARN_FUNC("unsupported instruction at jump label",
1929                           orig_insn->sec, orig_insn->offset);
1930                 return -1;
1931         }
1932
1933         if (opts.hack_jump_label && special_alt->key_addend & 2) {
1934                 struct reloc *reloc = insn_reloc(file, orig_insn);
1935
1936                 if (reloc) {
1937                         reloc->type = R_NONE;
1938                         elf_write_reloc(file->elf, reloc);
1939                 }
1940                 elf_write_insn(file->elf, orig_insn->sec,
1941                                orig_insn->offset, orig_insn->len,
1942                                arch_nop_insn(orig_insn->len));
1943                 orig_insn->type = INSN_NOP;
1944         }
1945
1946         if (orig_insn->type == INSN_NOP) {
1947                 if (orig_insn->len == 2)
1948                         file->jl_nop_short++;
1949                 else
1950                         file->jl_nop_long++;
1951
1952                 return 0;
1953         }
1954
1955         if (orig_insn->len == 2)
1956                 file->jl_short++;
1957         else
1958                 file->jl_long++;
1959
1960         *new_insn = next_insn_same_sec(file, orig_insn);
1961         return 0;
1962 }
1963
1964 /*
1965  * Read all the special sections which have alternate instructions which can be
1966  * patched in or redirected to at runtime.  Each instruction having alternate
1967  * instruction(s) has them added to its insn->alts list, which will be
1968  * traversed in validate_branch().
1969  */
1970 static int add_special_section_alts(struct objtool_file *file)
1971 {
1972         struct list_head special_alts;
1973         struct instruction *orig_insn, *new_insn;
1974         struct special_alt *special_alt, *tmp;
1975         struct alternative *alt;
1976         int ret;
1977
1978         ret = special_get_alts(file->elf, &special_alts);
1979         if (ret)
1980                 return ret;
1981
1982         list_for_each_entry_safe(special_alt, tmp, &special_alts, list) {
1983
1984                 orig_insn = find_insn(file, special_alt->orig_sec,
1985                                       special_alt->orig_off);
1986                 if (!orig_insn) {
1987                         WARN_FUNC("special: can't find orig instruction",
1988                                   special_alt->orig_sec, special_alt->orig_off);
1989                         ret = -1;
1990                         goto out;
1991                 }
1992
1993                 new_insn = NULL;
1994                 if (!special_alt->group || special_alt->new_len) {
1995                         new_insn = find_insn(file, special_alt->new_sec,
1996                                              special_alt->new_off);
1997                         if (!new_insn) {
1998                                 WARN_FUNC("special: can't find new instruction",
1999                                           special_alt->new_sec,
2000                                           special_alt->new_off);
2001                                 ret = -1;
2002                                 goto out;
2003                         }
2004                 }
2005
2006                 if (special_alt->group) {
2007                         if (!special_alt->orig_len) {
2008                                 WARN_FUNC("empty alternative entry",
2009                                           orig_insn->sec, orig_insn->offset);
2010                                 continue;
2011                         }
2012
2013                         ret = handle_group_alt(file, special_alt, orig_insn,
2014                                                &new_insn);
2015                         if (ret)
2016                                 goto out;
2017                 } else if (special_alt->jump_or_nop) {
2018                         ret = handle_jump_alt(file, special_alt, orig_insn,
2019                                               &new_insn);
2020                         if (ret)
2021                                 goto out;
2022                 }
2023
2024                 alt = malloc(sizeof(*alt));
2025                 if (!alt) {
2026                         WARN("malloc failed");
2027                         ret = -1;
2028                         goto out;
2029                 }
2030
2031                 alt->insn = new_insn;
2032                 alt->skip_orig = special_alt->skip_orig;
2033                 orig_insn->ignore_alts |= special_alt->skip_alt;
2034                 alt->next = orig_insn->alts;
2035                 orig_insn->alts = alt;
2036
2037                 list_del(&special_alt->list);
2038                 free(special_alt);
2039         }
2040
2041         if (opts.stats) {
2042                 printf("jl\\\tNOP\tJMP\n");
2043                 printf("short:\t%ld\t%ld\n", file->jl_nop_short, file->jl_short);
2044                 printf("long:\t%ld\t%ld\n", file->jl_nop_long, file->jl_long);
2045         }
2046
2047 out:
2048         return ret;
2049 }
2050
2051 static int add_jump_table(struct objtool_file *file, struct instruction *insn,
2052                             struct reloc *table)
2053 {
2054         struct reloc *reloc = table;
2055         struct instruction *dest_insn;
2056         struct alternative *alt;
2057         struct symbol *pfunc = insn_func(insn)->pfunc;
2058         unsigned int prev_offset = 0;
2059
2060         /*
2061          * Each @reloc is a switch table relocation which points to the target
2062          * instruction.
2063          */
2064         list_for_each_entry_from(reloc, &table->sec->reloc_list, list) {
2065
2066                 /* Check for the end of the table: */
2067                 if (reloc != table && reloc->jump_table_start)
2068                         break;
2069
2070                 /* Make sure the table entries are consecutive: */
2071                 if (prev_offset && reloc->offset != prev_offset + 8)
2072                         break;
2073
2074                 /* Detect function pointers from contiguous objects: */
2075                 if (reloc->sym->sec == pfunc->sec &&
2076                     reloc->addend == pfunc->offset)
2077                         break;
2078
2079                 dest_insn = find_insn(file, reloc->sym->sec, reloc->addend);
2080                 if (!dest_insn)
2081                         break;
2082
2083                 /* Make sure the destination is in the same function: */
2084                 if (!insn_func(dest_insn) || insn_func(dest_insn)->pfunc != pfunc)
2085                         break;
2086
2087                 alt = malloc(sizeof(*alt));
2088                 if (!alt) {
2089                         WARN("malloc failed");
2090                         return -1;
2091                 }
2092
2093                 alt->insn = dest_insn;
2094                 alt->next = insn->alts;
2095                 insn->alts = alt;
2096                 prev_offset = reloc->offset;
2097         }
2098
2099         if (!prev_offset) {
2100                 WARN_FUNC("can't find switch jump table",
2101                           insn->sec, insn->offset);
2102                 return -1;
2103         }
2104
2105         return 0;
2106 }
2107
2108 /*
2109  * find_jump_table() - Given a dynamic jump, find the switch jump table
2110  * associated with it.
2111  */
2112 static struct reloc *find_jump_table(struct objtool_file *file,
2113                                       struct symbol *func,
2114                                       struct instruction *insn)
2115 {
2116         struct reloc *table_reloc;
2117         struct instruction *dest_insn, *orig_insn = insn;
2118
2119         /*
2120          * Backward search using the @first_jump_src links, these help avoid
2121          * much of the 'in between' code. Which avoids us getting confused by
2122          * it.
2123          */
2124         for (;
2125              insn && insn_func(insn) && insn_func(insn)->pfunc == func;
2126              insn = insn->first_jump_src ?: prev_insn_same_sym(file, insn)) {
2127
2128                 if (insn != orig_insn && insn->type == INSN_JUMP_DYNAMIC)
2129                         break;
2130
2131                 /* allow small jumps within the range */
2132                 if (insn->type == INSN_JUMP_UNCONDITIONAL &&
2133                     insn->jump_dest &&
2134                     (insn->jump_dest->offset <= insn->offset ||
2135                      insn->jump_dest->offset > orig_insn->offset))
2136                     break;
2137
2138                 table_reloc = arch_find_switch_table(file, insn);
2139                 if (!table_reloc)
2140                         continue;
2141                 dest_insn = find_insn(file, table_reloc->sym->sec, table_reloc->addend);
2142                 if (!dest_insn || !insn_func(dest_insn) || insn_func(dest_insn)->pfunc != func)
2143                         continue;
2144
2145                 return table_reloc;
2146         }
2147
2148         return NULL;
2149 }
2150
2151 /*
2152  * First pass: Mark the head of each jump table so that in the next pass,
2153  * we know when a given jump table ends and the next one starts.
2154  */
2155 static void mark_func_jump_tables(struct objtool_file *file,
2156                                     struct symbol *func)
2157 {
2158         struct instruction *insn, *last = NULL;
2159         struct reloc *reloc;
2160
2161         func_for_each_insn(file, func, insn) {
2162                 if (!last)
2163                         last = insn;
2164
2165                 /*
2166                  * Store back-pointers for unconditional forward jumps such
2167                  * that find_jump_table() can back-track using those and
2168                  * avoid some potentially confusing code.
2169                  */
2170                 if (insn->type == INSN_JUMP_UNCONDITIONAL && insn->jump_dest &&
2171                     insn->offset > last->offset &&
2172                     insn->jump_dest->offset > insn->offset &&
2173                     !insn->jump_dest->first_jump_src) {
2174
2175                         insn->jump_dest->first_jump_src = insn;
2176                         last = insn->jump_dest;
2177                 }
2178
2179                 if (insn->type != INSN_JUMP_DYNAMIC)
2180                         continue;
2181
2182                 reloc = find_jump_table(file, func, insn);
2183                 if (reloc) {
2184                         reloc->jump_table_start = true;
2185                         insn->_jump_table = reloc;
2186                 }
2187         }
2188 }
2189
2190 static int add_func_jump_tables(struct objtool_file *file,
2191                                   struct symbol *func)
2192 {
2193         struct instruction *insn;
2194         int ret;
2195
2196         func_for_each_insn(file, func, insn) {
2197                 if (!insn_jump_table(insn))
2198                         continue;
2199
2200                 ret = add_jump_table(file, insn, insn_jump_table(insn));
2201                 if (ret)
2202                         return ret;
2203         }
2204
2205         return 0;
2206 }
2207
2208 /*
2209  * For some switch statements, gcc generates a jump table in the .rodata
2210  * section which contains a list of addresses within the function to jump to.
2211  * This finds these jump tables and adds them to the insn->alts lists.
2212  */
2213 static int add_jump_table_alts(struct objtool_file *file)
2214 {
2215         struct section *sec;
2216         struct symbol *func;
2217         int ret;
2218
2219         if (!file->rodata)
2220                 return 0;
2221
2222         for_each_sec(file, sec) {
2223                 list_for_each_entry(func, &sec->symbol_list, list) {
2224                         if (func->type != STT_FUNC)
2225                                 continue;
2226
2227                         mark_func_jump_tables(file, func);
2228                         ret = add_func_jump_tables(file, func);
2229                         if (ret)
2230                                 return ret;
2231                 }
2232         }
2233
2234         return 0;
2235 }
2236
2237 static void set_func_state(struct cfi_state *state)
2238 {
2239         state->cfa = initial_func_cfi.cfa;
2240         memcpy(&state->regs, &initial_func_cfi.regs,
2241                CFI_NUM_REGS * sizeof(struct cfi_reg));
2242         state->stack_size = initial_func_cfi.cfa.offset;
2243 }
2244
2245 static int read_unwind_hints(struct objtool_file *file)
2246 {
2247         struct cfi_state cfi = init_cfi;
2248         struct section *sec, *relocsec;
2249         struct unwind_hint *hint;
2250         struct instruction *insn;
2251         struct reloc *reloc;
2252         int i;
2253
2254         sec = find_section_by_name(file->elf, ".discard.unwind_hints");
2255         if (!sec)
2256                 return 0;
2257
2258         relocsec = sec->reloc;
2259         if (!relocsec) {
2260                 WARN("missing .rela.discard.unwind_hints section");
2261                 return -1;
2262         }
2263
2264         if (sec->sh.sh_size % sizeof(struct unwind_hint)) {
2265                 WARN("struct unwind_hint size mismatch");
2266                 return -1;
2267         }
2268
2269         file->hints = true;
2270
2271         for (i = 0; i < sec->sh.sh_size / sizeof(struct unwind_hint); i++) {
2272                 hint = (struct unwind_hint *)sec->data->d_buf + i;
2273
2274                 reloc = find_reloc_by_dest(file->elf, sec, i * sizeof(*hint));
2275                 if (!reloc) {
2276                         WARN("can't find reloc for unwind_hints[%d]", i);
2277                         return -1;
2278                 }
2279
2280                 insn = find_insn(file, reloc->sym->sec, reloc->addend);
2281                 if (!insn) {
2282                         WARN("can't find insn for unwind_hints[%d]", i);
2283                         return -1;
2284                 }
2285
2286                 insn->hint = true;
2287
2288                 if (hint->type == UNWIND_HINT_TYPE_SAVE) {
2289                         insn->hint = false;
2290                         insn->save = true;
2291                         continue;
2292                 }
2293
2294                 if (hint->type == UNWIND_HINT_TYPE_RESTORE) {
2295                         insn->restore = true;
2296                         continue;
2297                 }
2298
2299                 if (hint->type == UNWIND_HINT_TYPE_REGS_PARTIAL) {
2300                         struct symbol *sym = find_symbol_by_offset(insn->sec, insn->offset);
2301
2302                         if (sym && sym->bind == STB_GLOBAL) {
2303                                 if (opts.ibt && insn->type != INSN_ENDBR && !insn->noendbr) {
2304                                         WARN_FUNC("UNWIND_HINT_IRET_REGS without ENDBR",
2305                                                   insn->sec, insn->offset);
2306                                 }
2307
2308                                 insn->entry = 1;
2309                         }
2310                 }
2311
2312                 if (hint->type == UNWIND_HINT_TYPE_ENTRY) {
2313                         hint->type = UNWIND_HINT_TYPE_CALL;
2314                         insn->entry = 1;
2315                 }
2316
2317                 if (hint->type == UNWIND_HINT_TYPE_FUNC) {
2318                         insn->cfi = &func_cfi;
2319                         continue;
2320                 }
2321
2322                 if (insn->cfi)
2323                         cfi = *(insn->cfi);
2324
2325                 if (arch_decode_hint_reg(hint->sp_reg, &cfi.cfa.base)) {
2326                         WARN_FUNC("unsupported unwind_hint sp base reg %d",
2327                                   insn->sec, insn->offset, hint->sp_reg);
2328                         return -1;
2329                 }
2330
2331                 cfi.cfa.offset = bswap_if_needed(file->elf, hint->sp_offset);
2332                 cfi.type = hint->type;
2333                 cfi.end = hint->end;
2334
2335                 insn->cfi = cfi_hash_find_or_add(&cfi);
2336         }
2337
2338         return 0;
2339 }
2340
2341 static int read_noendbr_hints(struct objtool_file *file)
2342 {
2343         struct section *sec;
2344         struct instruction *insn;
2345         struct reloc *reloc;
2346
2347         sec = find_section_by_name(file->elf, ".rela.discard.noendbr");
2348         if (!sec)
2349                 return 0;
2350
2351         list_for_each_entry(reloc, &sec->reloc_list, list) {
2352                 insn = find_insn(file, reloc->sym->sec, reloc->sym->offset + reloc->addend);
2353                 if (!insn) {
2354                         WARN("bad .discard.noendbr entry");
2355                         return -1;
2356                 }
2357
2358                 insn->noendbr = 1;
2359         }
2360
2361         return 0;
2362 }
2363
2364 static int read_retpoline_hints(struct objtool_file *file)
2365 {
2366         struct section *sec;
2367         struct instruction *insn;
2368         struct reloc *reloc;
2369
2370         sec = find_section_by_name(file->elf, ".rela.discard.retpoline_safe");
2371         if (!sec)
2372                 return 0;
2373
2374         list_for_each_entry(reloc, &sec->reloc_list, list) {
2375                 if (reloc->sym->type != STT_SECTION) {
2376                         WARN("unexpected relocation symbol type in %s", sec->name);
2377                         return -1;
2378                 }
2379
2380                 insn = find_insn(file, reloc->sym->sec, reloc->addend);
2381                 if (!insn) {
2382                         WARN("bad .discard.retpoline_safe entry");
2383                         return -1;
2384                 }
2385
2386                 if (insn->type != INSN_JUMP_DYNAMIC &&
2387                     insn->type != INSN_CALL_DYNAMIC &&
2388                     insn->type != INSN_RETURN &&
2389                     insn->type != INSN_NOP) {
2390                         WARN_FUNC("retpoline_safe hint not an indirect jump/call/ret/nop",
2391                                   insn->sec, insn->offset);
2392                         return -1;
2393                 }
2394
2395                 insn->retpoline_safe = true;
2396         }
2397
2398         return 0;
2399 }
2400
2401 static int read_instr_hints(struct objtool_file *file)
2402 {
2403         struct section *sec;
2404         struct instruction *insn;
2405         struct reloc *reloc;
2406
2407         sec = find_section_by_name(file->elf, ".rela.discard.instr_end");
2408         if (!sec)
2409                 return 0;
2410
2411         list_for_each_entry(reloc, &sec->reloc_list, list) {
2412                 if (reloc->sym->type != STT_SECTION) {
2413                         WARN("unexpected relocation symbol type in %s", sec->name);
2414                         return -1;
2415                 }
2416
2417                 insn = find_insn(file, reloc->sym->sec, reloc->addend);
2418                 if (!insn) {
2419                         WARN("bad .discard.instr_end entry");
2420                         return -1;
2421                 }
2422
2423                 insn->instr--;
2424         }
2425
2426         sec = find_section_by_name(file->elf, ".rela.discard.instr_begin");
2427         if (!sec)
2428                 return 0;
2429
2430         list_for_each_entry(reloc, &sec->reloc_list, list) {
2431                 if (reloc->sym->type != STT_SECTION) {
2432                         WARN("unexpected relocation symbol type in %s", sec->name);
2433                         return -1;
2434                 }
2435
2436                 insn = find_insn(file, reloc->sym->sec, reloc->addend);
2437                 if (!insn) {
2438                         WARN("bad .discard.instr_begin entry");
2439                         return -1;
2440                 }
2441
2442                 insn->instr++;
2443         }
2444
2445         return 0;
2446 }
2447
2448 static int read_intra_function_calls(struct objtool_file *file)
2449 {
2450         struct instruction *insn;
2451         struct section *sec;
2452         struct reloc *reloc;
2453
2454         sec = find_section_by_name(file->elf, ".rela.discard.intra_function_calls");
2455         if (!sec)
2456                 return 0;
2457
2458         list_for_each_entry(reloc, &sec->reloc_list, list) {
2459                 unsigned long dest_off;
2460
2461                 if (reloc->sym->type != STT_SECTION) {
2462                         WARN("unexpected relocation symbol type in %s",
2463                              sec->name);
2464                         return -1;
2465                 }
2466
2467                 insn = find_insn(file, reloc->sym->sec, reloc->addend);
2468                 if (!insn) {
2469                         WARN("bad .discard.intra_function_call entry");
2470                         return -1;
2471                 }
2472
2473                 if (insn->type != INSN_CALL) {
2474                         WARN_FUNC("intra_function_call not a direct call",
2475                                   insn->sec, insn->offset);
2476                         return -1;
2477                 }
2478
2479                 /*
2480                  * Treat intra-function CALLs as JMPs, but with a stack_op.
2481                  * See add_call_destinations(), which strips stack_ops from
2482                  * normal CALLs.
2483                  */
2484                 insn->type = INSN_JUMP_UNCONDITIONAL;
2485
2486                 dest_off = arch_jump_destination(insn);
2487                 insn->jump_dest = find_insn(file, insn->sec, dest_off);
2488                 if (!insn->jump_dest) {
2489                         WARN_FUNC("can't find call dest at %s+0x%lx",
2490                                   insn->sec, insn->offset,
2491                                   insn->sec->name, dest_off);
2492                         return -1;
2493                 }
2494         }
2495
2496         return 0;
2497 }
2498
2499 /*
2500  * Return true if name matches an instrumentation function, where calls to that
2501  * function from noinstr code can safely be removed, but compilers won't do so.
2502  */
2503 static bool is_profiling_func(const char *name)
2504 {
2505         /*
2506          * Many compilers cannot disable KCOV with a function attribute.
2507          */
2508         if (!strncmp(name, "__sanitizer_cov_", 16))
2509                 return true;
2510
2511         /*
2512          * Some compilers currently do not remove __tsan_func_entry/exit nor
2513          * __tsan_atomic_signal_fence (used for barrier instrumentation) with
2514          * the __no_sanitize_thread attribute, remove them. Once the kernel's
2515          * minimum Clang version is 14.0, this can be removed.
2516          */
2517         if (!strncmp(name, "__tsan_func_", 12) ||
2518             !strcmp(name, "__tsan_atomic_signal_fence"))
2519                 return true;
2520
2521         return false;
2522 }
2523
2524 static int classify_symbols(struct objtool_file *file)
2525 {
2526         struct section *sec;
2527         struct symbol *func;
2528
2529         for_each_sec(file, sec) {
2530                 list_for_each_entry(func, &sec->symbol_list, list) {
2531                         if (func->bind != STB_GLOBAL)
2532                                 continue;
2533
2534                         if (!strncmp(func->name, STATIC_CALL_TRAMP_PREFIX_STR,
2535                                      strlen(STATIC_CALL_TRAMP_PREFIX_STR)))
2536                                 func->static_call_tramp = true;
2537
2538                         if (arch_is_retpoline(func))
2539                                 func->retpoline_thunk = true;
2540
2541                         if (arch_is_rethunk(func))
2542                                 func->return_thunk = true;
2543
2544                         if (arch_ftrace_match(func->name))
2545                                 func->fentry = true;
2546
2547                         if (is_profiling_func(func->name))
2548                                 func->profiling_func = true;
2549                 }
2550         }
2551
2552         return 0;
2553 }
2554
2555 static void mark_rodata(struct objtool_file *file)
2556 {
2557         struct section *sec;
2558         bool found = false;
2559
2560         /*
2561          * Search for the following rodata sections, each of which can
2562          * potentially contain jump tables:
2563          *
2564          * - .rodata: can contain GCC switch tables
2565          * - .rodata.<func>: same, if -fdata-sections is being used
2566          * - .rodata..c_jump_table: contains C annotated jump tables
2567          *
2568          * .rodata.str1.* sections are ignored; they don't contain jump tables.
2569          */
2570         for_each_sec(file, sec) {
2571                 if (!strncmp(sec->name, ".rodata", 7) &&
2572                     !strstr(sec->name, ".str1.")) {
2573                         sec->rodata = true;
2574                         found = true;
2575                 }
2576         }
2577
2578         file->rodata = found;
2579 }
2580
2581 static int decode_sections(struct objtool_file *file)
2582 {
2583         int ret;
2584
2585         mark_rodata(file);
2586
2587         ret = init_pv_ops(file);
2588         if (ret)
2589                 return ret;
2590
2591         /*
2592          * Must be before add_{jump_call}_destination.
2593          */
2594         ret = classify_symbols(file);
2595         if (ret)
2596                 return ret;
2597
2598         ret = decode_instructions(file);
2599         if (ret)
2600                 return ret;
2601
2602         add_ignores(file);
2603         add_uaccess_safe(file);
2604
2605         ret = add_ignore_alternatives(file);
2606         if (ret)
2607                 return ret;
2608
2609         /*
2610          * Must be before read_unwind_hints() since that needs insn->noendbr.
2611          */
2612         ret = read_noendbr_hints(file);
2613         if (ret)
2614                 return ret;
2615
2616         /*
2617          * Must be before add_jump_destinations(), which depends on 'func'
2618          * being set for alternatives, to enable proper sibling call detection.
2619          */
2620         if (opts.stackval || opts.orc || opts.uaccess || opts.noinstr) {
2621                 ret = add_special_section_alts(file);
2622                 if (ret)
2623                         return ret;
2624         }
2625
2626         ret = add_jump_destinations(file);
2627         if (ret)
2628                 return ret;
2629
2630         /*
2631          * Must be before add_call_destination(); it changes INSN_CALL to
2632          * INSN_JUMP.
2633          */
2634         ret = read_intra_function_calls(file);
2635         if (ret)
2636                 return ret;
2637
2638         ret = add_call_destinations(file);
2639         if (ret)
2640                 return ret;
2641
2642         /*
2643          * Must be after add_call_destinations() such that it can override
2644          * dead_end_function() marks.
2645          */
2646         ret = add_dead_ends(file);
2647         if (ret)
2648                 return ret;
2649
2650         ret = add_jump_table_alts(file);
2651         if (ret)
2652                 return ret;
2653
2654         ret = read_unwind_hints(file);
2655         if (ret)
2656                 return ret;
2657
2658         ret = read_retpoline_hints(file);
2659         if (ret)
2660                 return ret;
2661
2662         ret = read_instr_hints(file);
2663         if (ret)
2664                 return ret;
2665
2666         return 0;
2667 }
2668
2669 static bool is_fentry_call(struct instruction *insn)
2670 {
2671         if (insn->type == INSN_CALL &&
2672             insn_call_dest(insn) &&
2673             insn_call_dest(insn)->fentry)
2674                 return true;
2675
2676         return false;
2677 }
2678
2679 static bool has_modified_stack_frame(struct instruction *insn, struct insn_state *state)
2680 {
2681         struct cfi_state *cfi = &state->cfi;
2682         int i;
2683
2684         if (cfi->cfa.base != initial_func_cfi.cfa.base || cfi->drap)
2685                 return true;
2686
2687         if (cfi->cfa.offset != initial_func_cfi.cfa.offset)
2688                 return true;
2689
2690         if (cfi->stack_size != initial_func_cfi.cfa.offset)
2691                 return true;
2692
2693         for (i = 0; i < CFI_NUM_REGS; i++) {
2694                 if (cfi->regs[i].base != initial_func_cfi.regs[i].base ||
2695                     cfi->regs[i].offset != initial_func_cfi.regs[i].offset)
2696                         return true;
2697         }
2698
2699         return false;
2700 }
2701
2702 static bool check_reg_frame_pos(const struct cfi_reg *reg,
2703                                 int expected_offset)
2704 {
2705         return reg->base == CFI_CFA &&
2706                reg->offset == expected_offset;
2707 }
2708
2709 static bool has_valid_stack_frame(struct insn_state *state)
2710 {
2711         struct cfi_state *cfi = &state->cfi;
2712
2713         if (cfi->cfa.base == CFI_BP &&
2714             check_reg_frame_pos(&cfi->regs[CFI_BP], -cfi->cfa.offset) &&
2715             check_reg_frame_pos(&cfi->regs[CFI_RA], -cfi->cfa.offset + 8))
2716                 return true;
2717
2718         if (cfi->drap && cfi->regs[CFI_BP].base == CFI_BP)
2719                 return true;
2720
2721         return false;
2722 }
2723
2724 static int update_cfi_state_regs(struct instruction *insn,
2725                                   struct cfi_state *cfi,
2726                                   struct stack_op *op)
2727 {
2728         struct cfi_reg *cfa = &cfi->cfa;
2729
2730         if (cfa->base != CFI_SP && cfa->base != CFI_SP_INDIRECT)
2731                 return 0;
2732
2733         /* push */
2734         if (op->dest.type == OP_DEST_PUSH || op->dest.type == OP_DEST_PUSHF)
2735                 cfa->offset += 8;
2736
2737         /* pop */
2738         if (op->src.type == OP_SRC_POP || op->src.type == OP_SRC_POPF)
2739                 cfa->offset -= 8;
2740
2741         /* add immediate to sp */
2742         if (op->dest.type == OP_DEST_REG && op->src.type == OP_SRC_ADD &&
2743             op->dest.reg == CFI_SP && op->src.reg == CFI_SP)
2744                 cfa->offset -= op->src.offset;
2745
2746         return 0;
2747 }
2748
2749 static void save_reg(struct cfi_state *cfi, unsigned char reg, int base, int offset)
2750 {
2751         if (arch_callee_saved_reg(reg) &&
2752             cfi->regs[reg].base == CFI_UNDEFINED) {
2753                 cfi->regs[reg].base = base;
2754                 cfi->regs[reg].offset = offset;
2755         }
2756 }
2757
2758 static void restore_reg(struct cfi_state *cfi, unsigned char reg)
2759 {
2760         cfi->regs[reg].base = initial_func_cfi.regs[reg].base;
2761         cfi->regs[reg].offset = initial_func_cfi.regs[reg].offset;
2762 }
2763
2764 /*
2765  * A note about DRAP stack alignment:
2766  *
2767  * GCC has the concept of a DRAP register, which is used to help keep track of
2768  * the stack pointer when aligning the stack.  r10 or r13 is used as the DRAP
2769  * register.  The typical DRAP pattern is:
2770  *
2771  *   4c 8d 54 24 08             lea    0x8(%rsp),%r10
2772  *   48 83 e4 c0                and    $0xffffffffffffffc0,%rsp
2773  *   41 ff 72 f8                pushq  -0x8(%r10)
2774  *   55                         push   %rbp
2775  *   48 89 e5                   mov    %rsp,%rbp
2776  *                              (more pushes)
2777  *   41 52                      push   %r10
2778  *                              ...
2779  *   41 5a                      pop    %r10
2780  *                              (more pops)
2781  *   5d                         pop    %rbp
2782  *   49 8d 62 f8                lea    -0x8(%r10),%rsp
2783  *   c3                         retq
2784  *
2785  * There are some variations in the epilogues, like:
2786  *
2787  *   5b                         pop    %rbx
2788  *   41 5a                      pop    %r10
2789  *   41 5c                      pop    %r12
2790  *   41 5d                      pop    %r13
2791  *   41 5e                      pop    %r14
2792  *   c9                         leaveq
2793  *   49 8d 62 f8                lea    -0x8(%r10),%rsp
2794  *   c3                         retq
2795  *
2796  * and:
2797  *
2798  *   4c 8b 55 e8                mov    -0x18(%rbp),%r10
2799  *   48 8b 5d e0                mov    -0x20(%rbp),%rbx
2800  *   4c 8b 65 f0                mov    -0x10(%rbp),%r12
2801  *   4c 8b 6d f8                mov    -0x8(%rbp),%r13
2802  *   c9                         leaveq
2803  *   49 8d 62 f8                lea    -0x8(%r10),%rsp
2804  *   c3                         retq
2805  *
2806  * Sometimes r13 is used as the DRAP register, in which case it's saved and
2807  * restored beforehand:
2808  *
2809  *   41 55                      push   %r13
2810  *   4c 8d 6c 24 10             lea    0x10(%rsp),%r13
2811  *   48 83 e4 f0                and    $0xfffffffffffffff0,%rsp
2812  *                              ...
2813  *   49 8d 65 f0                lea    -0x10(%r13),%rsp
2814  *   41 5d                      pop    %r13
2815  *   c3                         retq
2816  */
2817 static int update_cfi_state(struct instruction *insn,
2818                             struct instruction *next_insn,
2819                             struct cfi_state *cfi, struct stack_op *op)
2820 {
2821         struct cfi_reg *cfa = &cfi->cfa;
2822         struct cfi_reg *regs = cfi->regs;
2823
2824         /* stack operations don't make sense with an undefined CFA */
2825         if (cfa->base == CFI_UNDEFINED) {
2826                 if (insn_func(insn)) {
2827                         WARN_FUNC("undefined stack state", insn->sec, insn->offset);
2828                         return -1;
2829                 }
2830                 return 0;
2831         }
2832
2833         if (cfi->type == UNWIND_HINT_TYPE_REGS ||
2834             cfi->type == UNWIND_HINT_TYPE_REGS_PARTIAL)
2835                 return update_cfi_state_regs(insn, cfi, op);
2836
2837         switch (op->dest.type) {
2838
2839         case OP_DEST_REG:
2840                 switch (op->src.type) {
2841
2842                 case OP_SRC_REG:
2843                         if (op->src.reg == CFI_SP && op->dest.reg == CFI_BP &&
2844                             cfa->base == CFI_SP &&
2845                             check_reg_frame_pos(&regs[CFI_BP], -cfa->offset)) {
2846
2847                                 /* mov %rsp, %rbp */
2848                                 cfa->base = op->dest.reg;
2849                                 cfi->bp_scratch = false;
2850                         }
2851
2852                         else if (op->src.reg == CFI_SP &&
2853                                  op->dest.reg == CFI_BP && cfi->drap) {
2854
2855                                 /* drap: mov %rsp, %rbp */
2856                                 regs[CFI_BP].base = CFI_BP;
2857                                 regs[CFI_BP].offset = -cfi->stack_size;
2858                                 cfi->bp_scratch = false;
2859                         }
2860
2861                         else if (op->src.reg == CFI_SP && cfa->base == CFI_SP) {
2862
2863                                 /*
2864                                  * mov %rsp, %reg
2865                                  *
2866                                  * This is needed for the rare case where GCC
2867                                  * does:
2868                                  *
2869                                  *   mov    %rsp, %rax
2870                                  *   ...
2871                                  *   mov    %rax, %rsp
2872                                  */
2873                                 cfi->vals[op->dest.reg].base = CFI_CFA;
2874                                 cfi->vals[op->dest.reg].offset = -cfi->stack_size;
2875                         }
2876
2877                         else if (op->src.reg == CFI_BP && op->dest.reg == CFI_SP &&
2878                                  (cfa->base == CFI_BP || cfa->base == cfi->drap_reg)) {
2879
2880                                 /*
2881                                  * mov %rbp, %rsp
2882                                  *
2883                                  * Restore the original stack pointer (Clang).
2884                                  */
2885                                 cfi->stack_size = -cfi->regs[CFI_BP].offset;
2886                         }
2887
2888                         else if (op->dest.reg == cfa->base) {
2889
2890                                 /* mov %reg, %rsp */
2891                                 if (cfa->base == CFI_SP &&
2892                                     cfi->vals[op->src.reg].base == CFI_CFA) {
2893
2894                                         /*
2895                                          * This is needed for the rare case
2896                                          * where GCC does something dumb like:
2897                                          *
2898                                          *   lea    0x8(%rsp), %rcx
2899                                          *   ...
2900                                          *   mov    %rcx, %rsp
2901                                          */
2902                                         cfa->offset = -cfi->vals[op->src.reg].offset;
2903                                         cfi->stack_size = cfa->offset;
2904
2905                                 } else if (cfa->base == CFI_SP &&
2906                                            cfi->vals[op->src.reg].base == CFI_SP_INDIRECT &&
2907                                            cfi->vals[op->src.reg].offset == cfa->offset) {
2908
2909                                         /*
2910                                          * Stack swizzle:
2911                                          *
2912                                          * 1: mov %rsp, (%[tos])
2913                                          * 2: mov %[tos], %rsp
2914                                          *    ...
2915                                          * 3: pop %rsp
2916                                          *
2917                                          * Where:
2918                                          *
2919                                          * 1 - places a pointer to the previous
2920                                          *     stack at the Top-of-Stack of the
2921                                          *     new stack.
2922                                          *
2923                                          * 2 - switches to the new stack.
2924                                          *
2925                                          * 3 - pops the Top-of-Stack to restore
2926                                          *     the original stack.
2927                                          *
2928                                          * Note: we set base to SP_INDIRECT
2929                                          * here and preserve offset. Therefore
2930                                          * when the unwinder reaches ToS it
2931                                          * will dereference SP and then add the
2932                                          * offset to find the next frame, IOW:
2933                                          * (%rsp) + offset.
2934                                          */
2935                                         cfa->base = CFI_SP_INDIRECT;
2936
2937                                 } else {
2938                                         cfa->base = CFI_UNDEFINED;
2939                                         cfa->offset = 0;
2940                                 }
2941                         }
2942
2943                         else if (op->dest.reg == CFI_SP &&
2944                                  cfi->vals[op->src.reg].base == CFI_SP_INDIRECT &&
2945                                  cfi->vals[op->src.reg].offset == cfa->offset) {
2946
2947                                 /*
2948                                  * The same stack swizzle case 2) as above. But
2949                                  * because we can't change cfa->base, case 3)
2950                                  * will become a regular POP. Pretend we're a
2951                                  * PUSH so things don't go unbalanced.
2952                                  */
2953                                 cfi->stack_size += 8;
2954                         }
2955
2956
2957                         break;
2958
2959                 case OP_SRC_ADD:
2960                         if (op->dest.reg == CFI_SP && op->src.reg == CFI_SP) {
2961
2962                                 /* add imm, %rsp */
2963                                 cfi->stack_size -= op->src.offset;
2964                                 if (cfa->base == CFI_SP)
2965                                         cfa->offset -= op->src.offset;
2966                                 break;
2967                         }
2968
2969                         if (op->dest.reg == CFI_SP && op->src.reg == CFI_BP) {
2970
2971                                 /* lea disp(%rbp), %rsp */
2972                                 cfi->stack_size = -(op->src.offset + regs[CFI_BP].offset);
2973                                 break;
2974                         }
2975
2976                         if (!cfi->drap && op->src.reg == CFI_SP &&
2977                             op->dest.reg == CFI_BP && cfa->base == CFI_SP &&
2978                             check_reg_frame_pos(&regs[CFI_BP], -cfa->offset + op->src.offset)) {
2979
2980                                 /* lea disp(%rsp), %rbp */
2981                                 cfa->base = CFI_BP;
2982                                 cfa->offset -= op->src.offset;
2983                                 cfi->bp_scratch = false;
2984                                 break;
2985                         }
2986
2987                         if (op->src.reg == CFI_SP && cfa->base == CFI_SP) {
2988
2989                                 /* drap: lea disp(%rsp), %drap */
2990                                 cfi->drap_reg = op->dest.reg;
2991
2992                                 /*
2993                                  * lea disp(%rsp), %reg
2994                                  *
2995                                  * This is needed for the rare case where GCC
2996                                  * does something dumb like:
2997                                  *
2998                                  *   lea    0x8(%rsp), %rcx
2999                                  *   ...
3000                                  *   mov    %rcx, %rsp
3001                                  */
3002                                 cfi->vals[op->dest.reg].base = CFI_CFA;
3003                                 cfi->vals[op->dest.reg].offset = \
3004                                         -cfi->stack_size + op->src.offset;
3005
3006                                 break;
3007                         }
3008
3009                         if (cfi->drap && op->dest.reg == CFI_SP &&
3010                             op->src.reg == cfi->drap_reg) {
3011
3012                                  /* drap: lea disp(%drap), %rsp */
3013                                 cfa->base = CFI_SP;
3014                                 cfa->offset = cfi->stack_size = -op->src.offset;
3015                                 cfi->drap_reg = CFI_UNDEFINED;
3016                                 cfi->drap = false;
3017                                 break;
3018                         }
3019
3020                         if (op->dest.reg == cfi->cfa.base && !(next_insn && next_insn->hint)) {
3021                                 WARN_FUNC("unsupported stack register modification",
3022                                           insn->sec, insn->offset);
3023                                 return -1;
3024                         }
3025
3026                         break;
3027
3028                 case OP_SRC_AND:
3029                         if (op->dest.reg != CFI_SP ||
3030                             (cfi->drap_reg != CFI_UNDEFINED && cfa->base != CFI_SP) ||
3031                             (cfi->drap_reg == CFI_UNDEFINED && cfa->base != CFI_BP)) {
3032                                 WARN_FUNC("unsupported stack pointer realignment",
3033                                           insn->sec, insn->offset);
3034                                 return -1;
3035                         }
3036
3037                         if (cfi->drap_reg != CFI_UNDEFINED) {
3038                                 /* drap: and imm, %rsp */
3039                                 cfa->base = cfi->drap_reg;
3040                                 cfa->offset = cfi->stack_size = 0;
3041                                 cfi->drap = true;
3042                         }
3043
3044                         /*
3045                          * Older versions of GCC (4.8ish) realign the stack
3046                          * without DRAP, with a frame pointer.
3047                          */
3048
3049                         break;
3050
3051                 case OP_SRC_POP:
3052                 case OP_SRC_POPF:
3053                         if (op->dest.reg == CFI_SP && cfa->base == CFI_SP_INDIRECT) {
3054
3055                                 /* pop %rsp; # restore from a stack swizzle */
3056                                 cfa->base = CFI_SP;
3057                                 break;
3058                         }
3059
3060                         if (!cfi->drap && op->dest.reg == cfa->base) {
3061
3062                                 /* pop %rbp */
3063                                 cfa->base = CFI_SP;
3064                         }
3065
3066                         if (cfi->drap && cfa->base == CFI_BP_INDIRECT &&
3067                             op->dest.reg == cfi->drap_reg &&
3068                             cfi->drap_offset == -cfi->stack_size) {
3069
3070                                 /* drap: pop %drap */
3071                                 cfa->base = cfi->drap_reg;
3072                                 cfa->offset = 0;
3073                                 cfi->drap_offset = -1;
3074
3075                         } else if (cfi->stack_size == -regs[op->dest.reg].offset) {
3076
3077                                 /* pop %reg */
3078                                 restore_reg(cfi, op->dest.reg);
3079                         }
3080
3081                         cfi->stack_size -= 8;
3082                         if (cfa->base == CFI_SP)
3083                                 cfa->offset -= 8;
3084
3085                         break;
3086
3087                 case OP_SRC_REG_INDIRECT:
3088                         if (!cfi->drap && op->dest.reg == cfa->base &&
3089                             op->dest.reg == CFI_BP) {
3090
3091                                 /* mov disp(%rsp), %rbp */
3092                                 cfa->base = CFI_SP;
3093                                 cfa->offset = cfi->stack_size;
3094                         }
3095
3096                         if (cfi->drap && op->src.reg == CFI_BP &&
3097                             op->src.offset == cfi->drap_offset) {
3098
3099                                 /* drap: mov disp(%rbp), %drap */
3100                                 cfa->base = cfi->drap_reg;
3101                                 cfa->offset = 0;
3102                                 cfi->drap_offset = -1;
3103                         }
3104
3105                         if (cfi->drap && op->src.reg == CFI_BP &&
3106                             op->src.offset == regs[op->dest.reg].offset) {
3107
3108                                 /* drap: mov disp(%rbp), %reg */
3109                                 restore_reg(cfi, op->dest.reg);
3110
3111                         } else if (op->src.reg == cfa->base &&
3112                             op->src.offset == regs[op->dest.reg].offset + cfa->offset) {
3113
3114                                 /* mov disp(%rbp), %reg */
3115                                 /* mov disp(%rsp), %reg */
3116                                 restore_reg(cfi, op->dest.reg);
3117
3118                         } else if (op->src.reg == CFI_SP &&
3119                                    op->src.offset == regs[op->dest.reg].offset + cfi->stack_size) {
3120
3121                                 /* mov disp(%rsp), %reg */
3122                                 restore_reg(cfi, op->dest.reg);
3123                         }
3124
3125                         break;
3126
3127                 default:
3128                         WARN_FUNC("unknown stack-related instruction",
3129                                   insn->sec, insn->offset);
3130                         return -1;
3131                 }
3132
3133                 break;
3134
3135         case OP_DEST_PUSH:
3136         case OP_DEST_PUSHF:
3137                 cfi->stack_size += 8;
3138                 if (cfa->base == CFI_SP)
3139                         cfa->offset += 8;
3140
3141                 if (op->src.type != OP_SRC_REG)
3142                         break;
3143
3144                 if (cfi->drap) {
3145                         if (op->src.reg == cfa->base && op->src.reg == cfi->drap_reg) {
3146
3147                                 /* drap: push %drap */
3148                                 cfa->base = CFI_BP_INDIRECT;
3149                                 cfa->offset = -cfi->stack_size;
3150
3151                                 /* save drap so we know when to restore it */
3152                                 cfi->drap_offset = -cfi->stack_size;
3153
3154                         } else if (op->src.reg == CFI_BP && cfa->base == cfi->drap_reg) {
3155
3156                                 /* drap: push %rbp */
3157                                 cfi->stack_size = 0;
3158
3159                         } else {
3160
3161                                 /* drap: push %reg */
3162                                 save_reg(cfi, op->src.reg, CFI_BP, -cfi->stack_size);
3163                         }
3164
3165                 } else {
3166
3167                         /* push %reg */
3168                         save_reg(cfi, op->src.reg, CFI_CFA, -cfi->stack_size);
3169                 }
3170
3171                 /* detect when asm code uses rbp as a scratch register */
3172                 if (opts.stackval && insn_func(insn) && op->src.reg == CFI_BP &&
3173                     cfa->base != CFI_BP)
3174                         cfi->bp_scratch = true;
3175                 break;
3176
3177         case OP_DEST_REG_INDIRECT:
3178
3179                 if (cfi->drap) {
3180                         if (op->src.reg == cfa->base && op->src.reg == cfi->drap_reg) {
3181
3182                                 /* drap: mov %drap, disp(%rbp) */
3183                                 cfa->base = CFI_BP_INDIRECT;
3184                                 cfa->offset = op->dest.offset;
3185
3186                                 /* save drap offset so we know when to restore it */
3187                                 cfi->drap_offset = op->dest.offset;
3188                         } else {
3189
3190                                 /* drap: mov reg, disp(%rbp) */
3191                                 save_reg(cfi, op->src.reg, CFI_BP, op->dest.offset);
3192                         }
3193
3194                 } else if (op->dest.reg == cfa->base) {
3195
3196                         /* mov reg, disp(%rbp) */
3197                         /* mov reg, disp(%rsp) */
3198                         save_reg(cfi, op->src.reg, CFI_CFA,
3199                                  op->dest.offset - cfi->cfa.offset);
3200
3201                 } else if (op->dest.reg == CFI_SP) {
3202
3203                         /* mov reg, disp(%rsp) */
3204                         save_reg(cfi, op->src.reg, CFI_CFA,
3205                                  op->dest.offset - cfi->stack_size);
3206
3207                 } else if (op->src.reg == CFI_SP && op->dest.offset == 0) {
3208
3209                         /* mov %rsp, (%reg); # setup a stack swizzle. */
3210                         cfi->vals[op->dest.reg].base = CFI_SP_INDIRECT;
3211                         cfi->vals[op->dest.reg].offset = cfa->offset;
3212                 }
3213
3214                 break;
3215
3216         case OP_DEST_MEM:
3217                 if (op->src.type != OP_SRC_POP && op->src.type != OP_SRC_POPF) {
3218                         WARN_FUNC("unknown stack-related memory operation",
3219                                   insn->sec, insn->offset);
3220                         return -1;
3221                 }
3222
3223                 /* pop mem */
3224                 cfi->stack_size -= 8;
3225                 if (cfa->base == CFI_SP)
3226                         cfa->offset -= 8;
3227
3228                 break;
3229
3230         default:
3231                 WARN_FUNC("unknown stack-related instruction",
3232                           insn->sec, insn->offset);
3233                 return -1;
3234         }
3235
3236         return 0;
3237 }
3238
3239 /*
3240  * The stack layouts of alternatives instructions can sometimes diverge when
3241  * they have stack modifications.  That's fine as long as the potential stack
3242  * layouts don't conflict at any given potential instruction boundary.
3243  *
3244  * Flatten the CFIs of the different alternative code streams (both original
3245  * and replacement) into a single shared CFI array which can be used to detect
3246  * conflicts and nicely feed a linear array of ORC entries to the unwinder.
3247  */
3248 static int propagate_alt_cfi(struct objtool_file *file, struct instruction *insn)
3249 {
3250         struct cfi_state **alt_cfi;
3251         int group_off;
3252
3253         if (!insn->alt_group)
3254                 return 0;
3255
3256         if (!insn->cfi) {
3257                 WARN("CFI missing");
3258                 return -1;
3259         }
3260
3261         alt_cfi = insn->alt_group->cfi;
3262         group_off = insn->offset - insn->alt_group->first_insn->offset;
3263
3264         if (!alt_cfi[group_off]) {
3265                 alt_cfi[group_off] = insn->cfi;
3266         } else {
3267                 if (cficmp(alt_cfi[group_off], insn->cfi)) {
3268                         struct alt_group *orig_group = insn->alt_group->orig_group ?: insn->alt_group;
3269                         struct instruction *orig = orig_group->first_insn;
3270                         char *where = offstr(insn->sec, insn->offset);
3271                         WARN_FUNC("stack layout conflict in alternatives: %s",
3272                                   orig->sec, orig->offset, where);
3273                         free(where);
3274                         return -1;
3275                 }
3276         }
3277
3278         return 0;
3279 }
3280
3281 static int handle_insn_ops(struct instruction *insn,
3282                            struct instruction *next_insn,
3283                            struct insn_state *state)
3284 {
3285         struct stack_op *op;
3286
3287         for (op = insn->stack_ops; op; op = op->next) {
3288
3289                 if (update_cfi_state(insn, next_insn, &state->cfi, op))
3290                         return 1;
3291
3292                 if (!insn->alt_group)
3293                         continue;
3294
3295                 if (op->dest.type == OP_DEST_PUSHF) {
3296                         if (!state->uaccess_stack) {
3297                                 state->uaccess_stack = 1;
3298                         } else if (state->uaccess_stack >> 31) {
3299                                 WARN_FUNC("PUSHF stack exhausted",
3300                                           insn->sec, insn->offset);
3301                                 return 1;
3302                         }
3303                         state->uaccess_stack <<= 1;
3304                         state->uaccess_stack  |= state->uaccess;
3305                 }
3306
3307                 if (op->src.type == OP_SRC_POPF) {
3308                         if (state->uaccess_stack) {
3309                                 state->uaccess = state->uaccess_stack & 1;
3310                                 state->uaccess_stack >>= 1;
3311                                 if (state->uaccess_stack == 1)
3312                                         state->uaccess_stack = 0;
3313                         }
3314                 }
3315         }
3316
3317         return 0;
3318 }
3319
3320 static bool insn_cfi_match(struct instruction *insn, struct cfi_state *cfi2)
3321 {
3322         struct cfi_state *cfi1 = insn->cfi;
3323         int i;
3324
3325         if (!cfi1) {
3326                 WARN("CFI missing");
3327                 return false;
3328         }
3329
3330         if (memcmp(&cfi1->cfa, &cfi2->cfa, sizeof(cfi1->cfa))) {
3331
3332                 WARN_FUNC("stack state mismatch: cfa1=%d%+d cfa2=%d%+d",
3333                           insn->sec, insn->offset,
3334                           cfi1->cfa.base, cfi1->cfa.offset,
3335                           cfi2->cfa.base, cfi2->cfa.offset);
3336
3337         } else if (memcmp(&cfi1->regs, &cfi2->regs, sizeof(cfi1->regs))) {
3338                 for (i = 0; i < CFI_NUM_REGS; i++) {
3339                         if (!memcmp(&cfi1->regs[i], &cfi2->regs[i],
3340                                     sizeof(struct cfi_reg)))
3341                                 continue;
3342
3343                         WARN_FUNC("stack state mismatch: reg1[%d]=%d%+d reg2[%d]=%d%+d",
3344                                   insn->sec, insn->offset,
3345                                   i, cfi1->regs[i].base, cfi1->regs[i].offset,
3346                                   i, cfi2->regs[i].base, cfi2->regs[i].offset);
3347                         break;
3348                 }
3349
3350         } else if (cfi1->type != cfi2->type) {
3351
3352                 WARN_FUNC("stack state mismatch: type1=%d type2=%d",
3353                           insn->sec, insn->offset, cfi1->type, cfi2->type);
3354
3355         } else if (cfi1->drap != cfi2->drap ||
3356                    (cfi1->drap && cfi1->drap_reg != cfi2->drap_reg) ||
3357                    (cfi1->drap && cfi1->drap_offset != cfi2->drap_offset)) {
3358
3359                 WARN_FUNC("stack state mismatch: drap1=%d(%d,%d) drap2=%d(%d,%d)",
3360                           insn->sec, insn->offset,
3361                           cfi1->drap, cfi1->drap_reg, cfi1->drap_offset,
3362                           cfi2->drap, cfi2->drap_reg, cfi2->drap_offset);
3363
3364         } else
3365                 return true;
3366
3367         return false;
3368 }
3369
3370 static inline bool func_uaccess_safe(struct symbol *func)
3371 {
3372         if (func)
3373                 return func->uaccess_safe;
3374
3375         return false;
3376 }
3377
3378 static inline const char *call_dest_name(struct instruction *insn)
3379 {
3380         static char pvname[19];
3381         struct reloc *rel;
3382         int idx;
3383
3384         if (insn_call_dest(insn))
3385                 return insn_call_dest(insn)->name;
3386
3387         rel = insn_reloc(NULL, insn);
3388         if (rel && !strcmp(rel->sym->name, "pv_ops")) {
3389                 idx = (rel->addend / sizeof(void *));
3390                 snprintf(pvname, sizeof(pvname), "pv_ops[%d]", idx);
3391                 return pvname;
3392         }
3393
3394         return "{dynamic}";
3395 }
3396
3397 static bool pv_call_dest(struct objtool_file *file, struct instruction *insn)
3398 {
3399         struct symbol *target;
3400         struct reloc *rel;
3401         int idx;
3402
3403         rel = insn_reloc(file, insn);
3404         if (!rel || strcmp(rel->sym->name, "pv_ops"))
3405                 return false;
3406
3407         idx = (arch_dest_reloc_offset(rel->addend) / sizeof(void *));
3408
3409         if (file->pv_ops[idx].clean)
3410                 return true;
3411
3412         file->pv_ops[idx].clean = true;
3413
3414         list_for_each_entry(target, &file->pv_ops[idx].targets, pv_target) {
3415                 if (!target->sec->noinstr) {
3416                         WARN("pv_ops[%d]: %s", idx, target->name);
3417                         file->pv_ops[idx].clean = false;
3418                 }
3419         }
3420
3421         return file->pv_ops[idx].clean;
3422 }
3423
3424 static inline bool noinstr_call_dest(struct objtool_file *file,
3425                                      struct instruction *insn,
3426                                      struct symbol *func)
3427 {
3428         /*
3429          * We can't deal with indirect function calls at present;
3430          * assume they're instrumented.
3431          */
3432         if (!func) {
3433                 if (file->pv_ops)
3434                         return pv_call_dest(file, insn);
3435
3436                 return false;
3437         }
3438
3439         /*
3440          * If the symbol is from a noinstr section; we good.
3441          */
3442         if (func->sec->noinstr)
3443                 return true;
3444
3445         /*
3446          * If the symbol is a static_call trampoline, we can't tell.
3447          */
3448         if (func->static_call_tramp)
3449                 return true;
3450
3451         /*
3452          * The __ubsan_handle_*() calls are like WARN(), they only happen when
3453          * something 'BAD' happened. At the risk of taking the machine down,
3454          * let them proceed to get the message out.
3455          */
3456         if (!strncmp(func->name, "__ubsan_handle_", 15))
3457                 return true;
3458
3459         return false;
3460 }
3461
3462 static int validate_call(struct objtool_file *file,
3463                          struct instruction *insn,
3464                          struct insn_state *state)
3465 {
3466         if (state->noinstr && state->instr <= 0 &&
3467             !noinstr_call_dest(file, insn, insn_call_dest(insn))) {
3468                 WARN_FUNC("call to %s() leaves .noinstr.text section",
3469                                 insn->sec, insn->offset, call_dest_name(insn));
3470                 return 1;
3471         }
3472
3473         if (state->uaccess && !func_uaccess_safe(insn_call_dest(insn))) {
3474                 WARN_FUNC("call to %s() with UACCESS enabled",
3475                                 insn->sec, insn->offset, call_dest_name(insn));
3476                 return 1;
3477         }
3478
3479         if (state->df) {
3480                 WARN_FUNC("call to %s() with DF set",
3481                                 insn->sec, insn->offset, call_dest_name(insn));
3482                 return 1;
3483         }
3484
3485         return 0;
3486 }
3487
3488 static int validate_sibling_call(struct objtool_file *file,
3489                                  struct instruction *insn,
3490                                  struct insn_state *state)
3491 {
3492         if (insn_func(insn) && has_modified_stack_frame(insn, state)) {
3493                 WARN_FUNC("sibling call from callable instruction with modified stack frame",
3494                                 insn->sec, insn->offset);
3495                 return 1;
3496         }
3497
3498         return validate_call(file, insn, state);
3499 }
3500
3501 static int validate_return(struct symbol *func, struct instruction *insn, struct insn_state *state)
3502 {
3503         if (state->noinstr && state->instr > 0) {
3504                 WARN_FUNC("return with instrumentation enabled",
3505                           insn->sec, insn->offset);
3506                 return 1;
3507         }
3508
3509         if (state->uaccess && !func_uaccess_safe(func)) {
3510                 WARN_FUNC("return with UACCESS enabled",
3511                           insn->sec, insn->offset);
3512                 return 1;
3513         }
3514
3515         if (!state->uaccess && func_uaccess_safe(func)) {
3516                 WARN_FUNC("return with UACCESS disabled from a UACCESS-safe function",
3517                           insn->sec, insn->offset);
3518                 return 1;
3519         }
3520
3521         if (state->df) {
3522                 WARN_FUNC("return with DF set",
3523                           insn->sec, insn->offset);
3524                 return 1;
3525         }
3526
3527         if (func && has_modified_stack_frame(insn, state)) {
3528                 WARN_FUNC("return with modified stack frame",
3529                           insn->sec, insn->offset);
3530                 return 1;
3531         }
3532
3533         if (state->cfi.bp_scratch) {
3534                 WARN_FUNC("BP used as a scratch register",
3535                           insn->sec, insn->offset);
3536                 return 1;
3537         }
3538
3539         return 0;
3540 }
3541
3542 static struct instruction *next_insn_to_validate(struct objtool_file *file,
3543                                                  struct instruction *insn)
3544 {
3545         struct alt_group *alt_group = insn->alt_group;
3546
3547         /*
3548          * Simulate the fact that alternatives are patched in-place.  When the
3549          * end of a replacement alt_group is reached, redirect objtool flow to
3550          * the end of the original alt_group.
3551          *
3552          * insn->alts->insn -> alt_group->first_insn
3553          *                     ...
3554          *                     alt_group->last_insn
3555          *                     [alt_group->nop]      -> next(orig_group->last_insn)
3556          */
3557         if (alt_group) {
3558                 if (alt_group->nop) {
3559                         /* ->nop implies ->orig_group */
3560                         if (insn == alt_group->last_insn)
3561                                 return alt_group->nop;
3562                         if (insn == alt_group->nop)
3563                                 goto next_orig;
3564                 }
3565                 if (insn == alt_group->last_insn && alt_group->orig_group)
3566                         goto next_orig;
3567         }
3568
3569         return next_insn_same_sec(file, insn);
3570
3571 next_orig:
3572         return next_insn_same_sec(file, alt_group->orig_group->last_insn);
3573 }
3574
3575 /*
3576  * Follow the branch starting at the given instruction, and recursively follow
3577  * any other branches (jumps).  Meanwhile, track the frame pointer state at
3578  * each instruction and validate all the rules described in
3579  * tools/objtool/Documentation/objtool.txt.
3580  */
3581 static int validate_branch(struct objtool_file *file, struct symbol *func,
3582                            struct instruction *insn, struct insn_state state)
3583 {
3584         struct alternative *alt;
3585         struct instruction *next_insn, *prev_insn = NULL;
3586         struct section *sec;
3587         u8 visited;
3588         int ret;
3589
3590         sec = insn->sec;
3591
3592         while (1) {
3593                 next_insn = next_insn_to_validate(file, insn);
3594
3595                 if (func && insn_func(insn) && func != insn_func(insn)->pfunc) {
3596                         /* Ignore KCFI type preambles, which always fall through */
3597                         if (!strncmp(func->name, "__cfi_", 6) ||
3598                             !strncmp(func->name, "__pfx_", 6))
3599                                 return 0;
3600
3601                         WARN("%s() falls through to next function %s()",
3602                              func->name, insn_func(insn)->name);
3603                         return 1;
3604                 }
3605
3606                 if (func && insn->ignore) {
3607                         WARN_FUNC("BUG: why am I validating an ignored function?",
3608                                   sec, insn->offset);
3609                         return 1;
3610                 }
3611
3612                 visited = VISITED_BRANCH << state.uaccess;
3613                 if (insn->visited & VISITED_BRANCH_MASK) {
3614                         if (!insn->hint && !insn_cfi_match(insn, &state.cfi))
3615                                 return 1;
3616
3617                         if (insn->visited & visited)
3618                                 return 0;
3619                 } else {
3620                         nr_insns_visited++;
3621                 }
3622
3623                 if (state.noinstr)
3624                         state.instr += insn->instr;
3625
3626                 if (insn->hint) {
3627                         if (insn->restore) {
3628                                 struct instruction *save_insn, *i;
3629
3630                                 i = insn;
3631                                 save_insn = NULL;
3632
3633                                 sym_for_each_insn_continue_reverse(file, func, i) {
3634                                         if (i->save) {
3635                                                 save_insn = i;
3636                                                 break;
3637                                         }
3638                                 }
3639
3640                                 if (!save_insn) {
3641                                         WARN_FUNC("no corresponding CFI save for CFI restore",
3642                                                   sec, insn->offset);
3643                                         return 1;
3644                                 }
3645
3646                                 if (!save_insn->visited) {
3647                                         WARN_FUNC("objtool isn't smart enough to handle this CFI save/restore combo",
3648                                                   sec, insn->offset);
3649                                         return 1;
3650                                 }
3651
3652                                 insn->cfi = save_insn->cfi;
3653                                 nr_cfi_reused++;
3654                         }
3655
3656                         state.cfi = *insn->cfi;
3657                 } else {
3658                         /* XXX track if we actually changed state.cfi */
3659
3660                         if (prev_insn && !cficmp(prev_insn->cfi, &state.cfi)) {
3661                                 insn->cfi = prev_insn->cfi;
3662                                 nr_cfi_reused++;
3663                         } else {
3664                                 insn->cfi = cfi_hash_find_or_add(&state.cfi);
3665                         }
3666                 }
3667
3668                 insn->visited |= visited;
3669
3670                 if (propagate_alt_cfi(file, insn))
3671                         return 1;
3672
3673                 if (!insn->ignore_alts && insn->alts) {
3674                         bool skip_orig = false;
3675
3676                         for (alt = insn->alts; alt; alt = alt->next) {
3677                                 if (alt->skip_orig)
3678                                         skip_orig = true;
3679
3680                                 ret = validate_branch(file, func, alt->insn, state);
3681                                 if (ret) {
3682                                         if (opts.backtrace)
3683                                                 BT_FUNC("(alt)", insn);
3684                                         return ret;
3685                                 }
3686                         }
3687
3688                         if (skip_orig)
3689                                 return 0;
3690                 }
3691
3692                 if (handle_insn_ops(insn, next_insn, &state))
3693                         return 1;
3694
3695                 switch (insn->type) {
3696
3697                 case INSN_RETURN:
3698                         return validate_return(func, insn, &state);
3699
3700                 case INSN_CALL:
3701                 case INSN_CALL_DYNAMIC:
3702                         ret = validate_call(file, insn, &state);
3703                         if (ret)
3704                                 return ret;
3705
3706                         if (opts.stackval && func && !is_fentry_call(insn) &&
3707                             !has_valid_stack_frame(&state)) {
3708                                 WARN_FUNC("call without frame pointer save/setup",
3709                                           sec, insn->offset);
3710                                 return 1;
3711                         }
3712
3713                         if (insn->dead_end)
3714                                 return 0;
3715
3716                         break;
3717
3718                 case INSN_JUMP_CONDITIONAL:
3719                 case INSN_JUMP_UNCONDITIONAL:
3720                         if (is_sibling_call(insn)) {
3721                                 ret = validate_sibling_call(file, insn, &state);
3722                                 if (ret)
3723                                         return ret;
3724
3725                         } else if (insn->jump_dest) {
3726                                 ret = validate_branch(file, func,
3727                                                       insn->jump_dest, state);
3728                                 if (ret) {
3729                                         if (opts.backtrace)
3730                                                 BT_FUNC("(branch)", insn);
3731                                         return ret;
3732                                 }
3733                         }
3734
3735                         if (insn->type == INSN_JUMP_UNCONDITIONAL)
3736                                 return 0;
3737
3738                         break;
3739
3740                 case INSN_JUMP_DYNAMIC:
3741                 case INSN_JUMP_DYNAMIC_CONDITIONAL:
3742                         if (is_sibling_call(insn)) {
3743                                 ret = validate_sibling_call(file, insn, &state);
3744                                 if (ret)
3745                                         return ret;
3746                         }
3747
3748                         if (insn->type == INSN_JUMP_DYNAMIC)
3749                                 return 0;
3750
3751                         break;
3752
3753                 case INSN_CONTEXT_SWITCH:
3754                         if (func && (!next_insn || !next_insn->hint)) {
3755                                 WARN_FUNC("unsupported instruction in callable function",
3756                                           sec, insn->offset);
3757                                 return 1;
3758                         }
3759                         return 0;
3760
3761                 case INSN_STAC:
3762                         if (state.uaccess) {
3763                                 WARN_FUNC("recursive UACCESS enable", sec, insn->offset);
3764                                 return 1;
3765                         }
3766
3767                         state.uaccess = true;
3768                         break;
3769
3770                 case INSN_CLAC:
3771                         if (!state.uaccess && func) {
3772                                 WARN_FUNC("redundant UACCESS disable", sec, insn->offset);
3773                                 return 1;
3774                         }
3775
3776                         if (func_uaccess_safe(func) && !state.uaccess_stack) {
3777                                 WARN_FUNC("UACCESS-safe disables UACCESS", sec, insn->offset);
3778                                 return 1;
3779                         }
3780
3781                         state.uaccess = false;
3782                         break;
3783
3784                 case INSN_STD:
3785                         if (state.df) {
3786                                 WARN_FUNC("recursive STD", sec, insn->offset);
3787                                 return 1;
3788                         }
3789
3790                         state.df = true;
3791                         break;
3792
3793                 case INSN_CLD:
3794                         if (!state.df && func) {
3795                                 WARN_FUNC("redundant CLD", sec, insn->offset);
3796                                 return 1;
3797                         }
3798
3799                         state.df = false;
3800                         break;
3801
3802                 default:
3803                         break;
3804                 }
3805
3806                 if (insn->dead_end)
3807                         return 0;
3808
3809                 if (!next_insn) {
3810                         if (state.cfi.cfa.base == CFI_UNDEFINED)
3811                                 return 0;
3812                         WARN("%s: unexpected end of section", sec->name);
3813                         return 1;
3814                 }
3815
3816                 prev_insn = insn;
3817                 insn = next_insn;
3818         }
3819
3820         return 0;
3821 }
3822
3823 static int validate_unwind_hint(struct objtool_file *file,
3824                                   struct instruction *insn,
3825                                   struct insn_state *state)
3826 {
3827         if (insn->hint && !insn->visited && !insn->ignore) {
3828                 int ret = validate_branch(file, insn_func(insn), insn, *state);
3829                 if (ret && opts.backtrace)
3830                         BT_FUNC("<=== (hint)", insn);
3831                 return ret;
3832         }
3833
3834         return 0;
3835 }
3836
3837 static int validate_unwind_hints(struct objtool_file *file, struct section *sec)
3838 {
3839         struct instruction *insn;
3840         struct insn_state state;
3841         int warnings = 0;
3842
3843         if (!file->hints)
3844                 return 0;
3845
3846         init_insn_state(file, &state, sec);
3847
3848         if (sec) {
3849                 sec_for_each_insn(file, sec, insn)
3850                         warnings += validate_unwind_hint(file, insn, &state);
3851         } else {
3852                 for_each_insn(file, insn)
3853                         warnings += validate_unwind_hint(file, insn, &state);
3854         }
3855
3856         return warnings;
3857 }
3858
3859 /*
3860  * Validate rethunk entry constraint: must untrain RET before the first RET.
3861  *
3862  * Follow every branch (intra-function) and ensure ANNOTATE_UNRET_END comes
3863  * before an actual RET instruction.
3864  */
3865 static int validate_entry(struct objtool_file *file, struct instruction *insn)
3866 {
3867         struct instruction *next, *dest;
3868         int ret, warnings = 0;
3869
3870         for (;;) {
3871                 next = next_insn_to_validate(file, insn);
3872
3873                 if (insn->visited & VISITED_ENTRY)
3874                         return 0;
3875
3876                 insn->visited |= VISITED_ENTRY;
3877
3878                 if (!insn->ignore_alts && insn->alts) {
3879                         struct alternative *alt;
3880                         bool skip_orig = false;
3881
3882                         for (alt = insn->alts; alt; alt = alt->next) {
3883                                 if (alt->skip_orig)
3884                                         skip_orig = true;
3885
3886                                 ret = validate_entry(file, alt->insn);
3887                                 if (ret) {
3888                                         if (opts.backtrace)
3889                                                 BT_FUNC("(alt)", insn);
3890                                         return ret;
3891                                 }
3892                         }
3893
3894                         if (skip_orig)
3895                                 return 0;
3896                 }
3897
3898                 switch (insn->type) {
3899
3900                 case INSN_CALL_DYNAMIC:
3901                 case INSN_JUMP_DYNAMIC:
3902                 case INSN_JUMP_DYNAMIC_CONDITIONAL:
3903                         WARN_FUNC("early indirect call", insn->sec, insn->offset);
3904                         return 1;
3905
3906                 case INSN_JUMP_UNCONDITIONAL:
3907                 case INSN_JUMP_CONDITIONAL:
3908                         if (!is_sibling_call(insn)) {
3909                                 if (!insn->jump_dest) {
3910                                         WARN_FUNC("unresolved jump target after linking?!?",
3911                                                   insn->sec, insn->offset);
3912                                         return -1;
3913                                 }
3914                                 ret = validate_entry(file, insn->jump_dest);
3915                                 if (ret) {
3916                                         if (opts.backtrace) {
3917                                                 BT_FUNC("(branch%s)", insn,
3918                                                         insn->type == INSN_JUMP_CONDITIONAL ? "-cond" : "");
3919                                         }
3920                                         return ret;
3921                                 }
3922
3923                                 if (insn->type == INSN_JUMP_UNCONDITIONAL)
3924                                         return 0;
3925
3926                                 break;
3927                         }
3928
3929                         /* fallthrough */
3930                 case INSN_CALL:
3931                         dest = find_insn(file, insn_call_dest(insn)->sec,
3932                                          insn_call_dest(insn)->offset);
3933                         if (!dest) {
3934                                 WARN("Unresolved function after linking!?: %s",
3935                                      insn_call_dest(insn)->name);
3936                                 return -1;
3937                         }
3938
3939                         ret = validate_entry(file, dest);
3940                         if (ret) {
3941                                 if (opts.backtrace)
3942                                         BT_FUNC("(call)", insn);
3943                                 return ret;
3944                         }
3945                         /*
3946                          * If a call returns without error, it must have seen UNTRAIN_RET.
3947                          * Therefore any non-error return is a success.
3948                          */
3949                         return 0;
3950
3951                 case INSN_RETURN:
3952                         WARN_FUNC("RET before UNTRAIN", insn->sec, insn->offset);
3953                         return 1;
3954
3955                 case INSN_NOP:
3956                         if (insn->retpoline_safe)
3957                                 return 0;
3958                         break;
3959
3960                 default:
3961                         break;
3962                 }
3963
3964                 if (!next) {
3965                         WARN_FUNC("teh end!", insn->sec, insn->offset);
3966                         return -1;
3967                 }
3968                 insn = next;
3969         }
3970
3971         return warnings;
3972 }
3973
3974 /*
3975  * Validate that all branches starting at 'insn->entry' encounter UNRET_END
3976  * before RET.
3977  */
3978 static int validate_unret(struct objtool_file *file)
3979 {
3980         struct instruction *insn;
3981         int ret, warnings = 0;
3982
3983         for_each_insn(file, insn) {
3984                 if (!insn->entry)
3985                         continue;
3986
3987                 ret = validate_entry(file, insn);
3988                 if (ret < 0) {
3989                         WARN_FUNC("Failed UNRET validation", insn->sec, insn->offset);
3990                         return ret;
3991                 }
3992                 warnings += ret;
3993         }
3994
3995         return warnings;
3996 }
3997
3998 static int validate_retpoline(struct objtool_file *file)
3999 {
4000         struct instruction *insn;
4001         int warnings = 0;
4002
4003         for_each_insn(file, insn) {
4004                 if (insn->type != INSN_JUMP_DYNAMIC &&
4005                     insn->type != INSN_CALL_DYNAMIC &&
4006                     insn->type != INSN_RETURN)
4007                         continue;
4008
4009                 if (insn->retpoline_safe)
4010                         continue;
4011
4012                 if (insn->sec->init)
4013                         continue;
4014
4015                 if (insn->type == INSN_RETURN) {
4016                         if (opts.rethunk) {
4017                                 WARN_FUNC("'naked' return found in RETHUNK build",
4018                                           insn->sec, insn->offset);
4019                         } else
4020                                 continue;
4021                 } else {
4022                         WARN_FUNC("indirect %s found in RETPOLINE build",
4023                                   insn->sec, insn->offset,
4024                                   insn->type == INSN_JUMP_DYNAMIC ? "jump" : "call");
4025                 }
4026
4027                 warnings++;
4028         }
4029
4030         return warnings;
4031 }
4032
4033 static bool is_kasan_insn(struct instruction *insn)
4034 {
4035         return (insn->type == INSN_CALL &&
4036                 !strcmp(insn_call_dest(insn)->name, "__asan_handle_no_return"));
4037 }
4038
4039 static bool is_ubsan_insn(struct instruction *insn)
4040 {
4041         return (insn->type == INSN_CALL &&
4042                 !strcmp(insn_call_dest(insn)->name,
4043                         "__ubsan_handle_builtin_unreachable"));
4044 }
4045
4046 static bool ignore_unreachable_insn(struct objtool_file *file, struct instruction *insn)
4047 {
4048         int i;
4049         struct instruction *prev_insn;
4050
4051         if (insn->ignore || insn->type == INSN_NOP || insn->type == INSN_TRAP)
4052                 return true;
4053
4054         /*
4055          * Ignore alternative replacement instructions.  This can happen
4056          * when a whitelisted function uses one of the ALTERNATIVE macros.
4057          */
4058         if (!strcmp(insn->sec->name, ".altinstr_replacement") ||
4059             !strcmp(insn->sec->name, ".altinstr_aux"))
4060                 return true;
4061
4062         /*
4063          * Whole archive runs might encounter dead code from weak symbols.
4064          * This is where the linker will have dropped the weak symbol in
4065          * favour of a regular symbol, but leaves the code in place.
4066          *
4067          * In this case we'll find a piece of code (whole function) that is not
4068          * covered by a !section symbol. Ignore them.
4069          */
4070         if (opts.link && !insn_func(insn)) {
4071                 int size = find_symbol_hole_containing(insn->sec, insn->offset);
4072                 unsigned long end = insn->offset + size;
4073
4074                 if (!size) /* not a hole */
4075                         return false;
4076
4077                 if (size < 0) /* hole until the end */
4078                         return true;
4079
4080                 sec_for_each_insn_continue(file, insn) {
4081                         /*
4082                          * If we reach a visited instruction at or before the
4083                          * end of the hole, ignore the unreachable.
4084                          */
4085                         if (insn->visited)
4086                                 return true;
4087
4088                         if (insn->offset >= end)
4089                                 break;
4090
4091                         /*
4092                          * If this hole jumps to a .cold function, mark it ignore too.
4093                          */
4094                         if (insn->jump_dest && insn_func(insn->jump_dest) &&
4095                             strstr(insn_func(insn->jump_dest)->name, ".cold")) {
4096                                 struct instruction *dest = insn->jump_dest;
4097                                 func_for_each_insn(file, insn_func(dest), dest)
4098                                         dest->ignore = true;
4099                         }
4100                 }
4101
4102                 return false;
4103         }
4104
4105         if (!insn_func(insn))
4106                 return false;
4107
4108         if (insn_func(insn)->static_call_tramp)
4109                 return true;
4110
4111         /*
4112          * CONFIG_UBSAN_TRAP inserts a UD2 when it sees
4113          * __builtin_unreachable().  The BUG() macro has an unreachable() after
4114          * the UD2, which causes GCC's undefined trap logic to emit another UD2
4115          * (or occasionally a JMP to UD2).
4116          *
4117          * It may also insert a UD2 after calling a __noreturn function.
4118          */
4119         prev_insn = prev_insn_same_sec(file, insn);
4120         if ((prev_insn->dead_end ||
4121              dead_end_function(file, insn_call_dest(prev_insn))) &&
4122             (insn->type == INSN_BUG ||
4123              (insn->type == INSN_JUMP_UNCONDITIONAL &&
4124               insn->jump_dest && insn->jump_dest->type == INSN_BUG)))
4125                 return true;
4126
4127         /*
4128          * Check if this (or a subsequent) instruction is related to
4129          * CONFIG_UBSAN or CONFIG_KASAN.
4130          *
4131          * End the search at 5 instructions to avoid going into the weeds.
4132          */
4133         for (i = 0; i < 5; i++) {
4134
4135                 if (is_kasan_insn(insn) || is_ubsan_insn(insn))
4136                         return true;
4137
4138                 if (insn->type == INSN_JUMP_UNCONDITIONAL) {
4139                         if (insn->jump_dest &&
4140                             insn_func(insn->jump_dest) == insn_func(insn)) {
4141                                 insn = insn->jump_dest;
4142                                 continue;
4143                         }
4144
4145                         break;
4146                 }
4147
4148                 if (insn->offset + insn->len >= insn_func(insn)->offset + insn_func(insn)->len)
4149                         break;
4150
4151                 insn = next_insn_same_sec(file, insn);
4152         }
4153
4154         return false;
4155 }
4156
4157 static int add_prefix_symbol(struct objtool_file *file, struct symbol *func,
4158                              struct instruction *insn)
4159 {
4160         if (!opts.prefix)
4161                 return 0;
4162
4163         for (;;) {
4164                 struct instruction *prev = prev_insn_same_sec(file, insn);
4165                 u64 offset;
4166
4167                 if (!prev)
4168                         break;
4169
4170                 if (prev->type != INSN_NOP)
4171                         break;
4172
4173                 offset = func->offset - prev->offset;
4174                 if (offset >= opts.prefix) {
4175                         if (offset == opts.prefix) {
4176                                 /*
4177                                  * Since the sec->symbol_list is ordered by
4178                                  * offset (see elf_add_symbol()) the added
4179                                  * symbol will not be seen by the iteration in
4180                                  * validate_section().
4181                                  *
4182                                  * Hence the lack of list_for_each_entry_safe()
4183                                  * there.
4184                                  *
4185                                  * The direct concequence is that prefix symbols
4186                                  * don't get visited (because pointless), except
4187                                  * for the logic in ignore_unreachable_insn()
4188                                  * that needs the terminating insn to be visited
4189                                  * otherwise it will report the hole.
4190                                  *
4191                                  * Hence mark the first instruction of the
4192                                  * prefix symbol as visisted.
4193                                  */
4194                                 prev->visited |= VISITED_BRANCH;
4195                                 elf_create_prefix_symbol(file->elf, func, opts.prefix);
4196                         }
4197                         break;
4198                 }
4199                 insn = prev;
4200         }
4201
4202         return 0;
4203 }
4204
4205 static int validate_symbol(struct objtool_file *file, struct section *sec,
4206                            struct symbol *sym, struct insn_state *state)
4207 {
4208         struct instruction *insn;
4209         int ret;
4210
4211         if (!sym->len) {
4212                 WARN("%s() is missing an ELF size annotation", sym->name);
4213                 return 1;
4214         }
4215
4216         if (sym->pfunc != sym || sym->alias != sym)
4217                 return 0;
4218
4219         insn = find_insn(file, sec, sym->offset);
4220         if (!insn || insn->ignore || insn->visited)
4221                 return 0;
4222
4223         add_prefix_symbol(file, sym, insn);
4224
4225         state->uaccess = sym->uaccess_safe;
4226
4227         ret = validate_branch(file, insn_func(insn), insn, *state);
4228         if (ret && opts.backtrace)
4229                 BT_FUNC("<=== (sym)", insn);
4230         return ret;
4231 }
4232
4233 static int validate_section(struct objtool_file *file, struct section *sec)
4234 {
4235         struct insn_state state;
4236         struct symbol *func;
4237         int warnings = 0;
4238
4239         list_for_each_entry(func, &sec->symbol_list, list) {
4240                 if (func->type != STT_FUNC)
4241                         continue;
4242
4243                 init_insn_state(file, &state, sec);
4244                 set_func_state(&state.cfi);
4245
4246                 warnings += validate_symbol(file, sec, func, &state);
4247         }
4248
4249         return warnings;
4250 }
4251
4252 static int validate_noinstr_sections(struct objtool_file *file)
4253 {
4254         struct section *sec;
4255         int warnings = 0;
4256
4257         sec = find_section_by_name(file->elf, ".noinstr.text");
4258         if (sec) {
4259                 warnings += validate_section(file, sec);
4260                 warnings += validate_unwind_hints(file, sec);
4261         }
4262
4263         sec = find_section_by_name(file->elf, ".entry.text");
4264         if (sec) {
4265                 warnings += validate_section(file, sec);
4266                 warnings += validate_unwind_hints(file, sec);
4267         }
4268
4269         sec = find_section_by_name(file->elf, ".cpuidle.text");
4270         if (sec) {
4271                 warnings += validate_section(file, sec);
4272                 warnings += validate_unwind_hints(file, sec);
4273         }
4274
4275         return warnings;
4276 }
4277
4278 static int validate_functions(struct objtool_file *file)
4279 {
4280         struct section *sec;
4281         int warnings = 0;
4282
4283         for_each_sec(file, sec) {
4284                 if (!(sec->sh.sh_flags & SHF_EXECINSTR))
4285                         continue;
4286
4287                 warnings += validate_section(file, sec);
4288         }
4289
4290         return warnings;
4291 }
4292
4293 static void mark_endbr_used(struct instruction *insn)
4294 {
4295         if (!list_empty(&insn->call_node))
4296                 list_del_init(&insn->call_node);
4297 }
4298
4299 static bool noendbr_range(struct objtool_file *file, struct instruction *insn)
4300 {
4301         struct symbol *sym = find_symbol_containing(insn->sec, insn->offset-1);
4302         struct instruction *first;
4303
4304         if (!sym)
4305                 return false;
4306
4307         first = find_insn(file, sym->sec, sym->offset);
4308         if (!first)
4309                 return false;
4310
4311         if (first->type != INSN_ENDBR && !first->noendbr)
4312                 return false;
4313
4314         return insn->offset == sym->offset + sym->len;
4315 }
4316
4317 static int validate_ibt_insn(struct objtool_file *file, struct instruction *insn)
4318 {
4319         struct instruction *dest;
4320         struct reloc *reloc;
4321         unsigned long off;
4322         int warnings = 0;
4323
4324         /*
4325          * Looking for function pointer load relocations.  Ignore
4326          * direct/indirect branches:
4327          */
4328         switch (insn->type) {
4329         case INSN_CALL:
4330         case INSN_CALL_DYNAMIC:
4331         case INSN_JUMP_CONDITIONAL:
4332         case INSN_JUMP_UNCONDITIONAL:
4333         case INSN_JUMP_DYNAMIC:
4334         case INSN_JUMP_DYNAMIC_CONDITIONAL:
4335         case INSN_RETURN:
4336         case INSN_NOP:
4337                 return 0;
4338         default:
4339                 break;
4340         }
4341
4342         for (reloc = insn_reloc(file, insn);
4343              reloc;
4344              reloc = find_reloc_by_dest_range(file->elf, insn->sec,
4345                                               reloc->offset + 1,
4346                                               (insn->offset + insn->len) - (reloc->offset + 1))) {
4347
4348                 /*
4349                  * static_call_update() references the trampoline, which
4350                  * doesn't have (or need) ENDBR.  Skip warning in that case.
4351                  */
4352                 if (reloc->sym->static_call_tramp)
4353                         continue;
4354
4355                 off = reloc->sym->offset;
4356                 if (reloc->type == R_X86_64_PC32 || reloc->type == R_X86_64_PLT32)
4357                         off += arch_dest_reloc_offset(reloc->addend);
4358                 else
4359                         off += reloc->addend;
4360
4361                 dest = find_insn(file, reloc->sym->sec, off);
4362                 if (!dest)
4363                         continue;
4364
4365                 if (dest->type == INSN_ENDBR) {
4366                         mark_endbr_used(dest);
4367                         continue;
4368                 }
4369
4370                 if (insn_func(dest) && insn_func(dest) == insn_func(insn)) {
4371                         /*
4372                          * Anything from->to self is either _THIS_IP_ or
4373                          * IRET-to-self.
4374                          *
4375                          * There is no sane way to annotate _THIS_IP_ since the
4376                          * compiler treats the relocation as a constant and is
4377                          * happy to fold in offsets, skewing any annotation we
4378                          * do, leading to vast amounts of false-positives.
4379                          *
4380                          * There's also compiler generated _THIS_IP_ through
4381                          * KCOV and such which we have no hope of annotating.
4382                          *
4383                          * As such, blanket accept self-references without
4384                          * issue.
4385                          */
4386                         continue;
4387                 }
4388
4389                 /*
4390                  * Accept anything ANNOTATE_NOENDBR.
4391                  */
4392                 if (dest->noendbr)
4393                         continue;
4394
4395                 /*
4396                  * Accept if this is the instruction after a symbol
4397                  * that is (no)endbr -- typical code-range usage.
4398                  */
4399                 if (noendbr_range(file, dest))
4400                         continue;
4401
4402                 WARN_FUNC("relocation to !ENDBR: %s",
4403                           insn->sec, insn->offset,
4404                           offstr(dest->sec, dest->offset));
4405
4406                 warnings++;
4407         }
4408
4409         return warnings;
4410 }
4411
4412 static int validate_ibt_data_reloc(struct objtool_file *file,
4413                                    struct reloc *reloc)
4414 {
4415         struct instruction *dest;
4416
4417         dest = find_insn(file, reloc->sym->sec,
4418                          reloc->sym->offset + reloc->addend);
4419         if (!dest)
4420                 return 0;
4421
4422         if (dest->type == INSN_ENDBR) {
4423                 mark_endbr_used(dest);
4424                 return 0;
4425         }
4426
4427         if (dest->noendbr)
4428                 return 0;
4429
4430         WARN_FUNC("data relocation to !ENDBR: %s",
4431                   reloc->sec->base, reloc->offset,
4432                   offstr(dest->sec, dest->offset));
4433
4434         return 1;
4435 }
4436
4437 /*
4438  * Validate IBT rules and remove used ENDBR instructions from the seal list.
4439  * Unused ENDBR instructions will be annotated for sealing (i.e., replaced with
4440  * NOPs) later, in create_ibt_endbr_seal_sections().
4441  */
4442 static int validate_ibt(struct objtool_file *file)
4443 {
4444         struct section *sec;
4445         struct reloc *reloc;
4446         struct instruction *insn;
4447         int warnings = 0;
4448
4449         for_each_insn(file, insn)
4450                 warnings += validate_ibt_insn(file, insn);
4451
4452         for_each_sec(file, sec) {
4453
4454                 /* Already done by validate_ibt_insn() */
4455                 if (sec->sh.sh_flags & SHF_EXECINSTR)
4456                         continue;
4457
4458                 if (!sec->reloc)
4459                         continue;
4460
4461                 /*
4462                  * These sections can reference text addresses, but not with
4463                  * the intent to indirect branch to them.
4464                  */
4465                 if ((!strncmp(sec->name, ".discard", 8) &&
4466                      strcmp(sec->name, ".discard.ibt_endbr_noseal"))    ||
4467                     !strncmp(sec->name, ".debug", 6)                    ||
4468                     !strcmp(sec->name, ".altinstructions")              ||
4469                     !strcmp(sec->name, ".ibt_endbr_seal")               ||
4470                     !strcmp(sec->name, ".orc_unwind_ip")                ||
4471                     !strcmp(sec->name, ".parainstructions")             ||
4472                     !strcmp(sec->name, ".retpoline_sites")              ||
4473                     !strcmp(sec->name, ".smp_locks")                    ||
4474                     !strcmp(sec->name, ".static_call_sites")            ||
4475                     !strcmp(sec->name, "_error_injection_whitelist")    ||
4476                     !strcmp(sec->name, "_kprobe_blacklist")             ||
4477                     !strcmp(sec->name, "__bug_table")                   ||
4478                     !strcmp(sec->name, "__ex_table")                    ||
4479                     !strcmp(sec->name, "__jump_table")                  ||
4480                     !strcmp(sec->name, "__mcount_loc")                  ||
4481                     !strcmp(sec->name, ".kcfi_traps")                   ||
4482                     strstr(sec->name, "__patchable_function_entries"))
4483                         continue;
4484
4485                 list_for_each_entry(reloc, &sec->reloc->reloc_list, list)
4486                         warnings += validate_ibt_data_reloc(file, reloc);
4487         }
4488
4489         return warnings;
4490 }
4491
4492 static int validate_sls(struct objtool_file *file)
4493 {
4494         struct instruction *insn, *next_insn;
4495         int warnings = 0;
4496
4497         for_each_insn(file, insn) {
4498                 next_insn = next_insn_same_sec(file, insn);
4499
4500                 if (insn->retpoline_safe)
4501                         continue;
4502
4503                 switch (insn->type) {
4504                 case INSN_RETURN:
4505                         if (!next_insn || next_insn->type != INSN_TRAP) {
4506                                 WARN_FUNC("missing int3 after ret",
4507                                           insn->sec, insn->offset);
4508                                 warnings++;
4509                         }
4510
4511                         break;
4512                 case INSN_JUMP_DYNAMIC:
4513                         if (!next_insn || next_insn->type != INSN_TRAP) {
4514                                 WARN_FUNC("missing int3 after indirect jump",
4515                                           insn->sec, insn->offset);
4516                                 warnings++;
4517                         }
4518                         break;
4519                 default:
4520                         break;
4521                 }
4522         }
4523
4524         return warnings;
4525 }
4526
4527 static int validate_reachable_instructions(struct objtool_file *file)
4528 {
4529         struct instruction *insn;
4530
4531         if (file->ignore_unreachables)
4532                 return 0;
4533
4534         for_each_insn(file, insn) {
4535                 if (insn->visited || ignore_unreachable_insn(file, insn))
4536                         continue;
4537
4538                 WARN_FUNC("unreachable instruction", insn->sec, insn->offset);
4539                 return 1;
4540         }
4541
4542         return 0;
4543 }
4544
4545 int check(struct objtool_file *file)
4546 {
4547         int ret, warnings = 0;
4548
4549         arch_initial_func_cfi_state(&initial_func_cfi);
4550         init_cfi_state(&init_cfi);
4551         init_cfi_state(&func_cfi);
4552         set_func_state(&func_cfi);
4553
4554         if (!cfi_hash_alloc(1UL << (file->elf->symbol_bits - 3)))
4555                 goto out;
4556
4557         cfi_hash_add(&init_cfi);
4558         cfi_hash_add(&func_cfi);
4559
4560         ret = decode_sections(file);
4561         if (ret < 0)
4562                 goto out;
4563
4564         warnings += ret;
4565
4566         if (!nr_insns)
4567                 goto out;
4568
4569         if (opts.retpoline) {
4570                 ret = validate_retpoline(file);
4571                 if (ret < 0)
4572                         return ret;
4573                 warnings += ret;
4574         }
4575
4576         if (opts.stackval || opts.orc || opts.uaccess) {
4577                 ret = validate_functions(file);
4578                 if (ret < 0)
4579                         goto out;
4580                 warnings += ret;
4581
4582                 ret = validate_unwind_hints(file, NULL);
4583                 if (ret < 0)
4584                         goto out;
4585                 warnings += ret;
4586
4587                 if (!warnings) {
4588                         ret = validate_reachable_instructions(file);
4589                         if (ret < 0)
4590                                 goto out;
4591                         warnings += ret;
4592                 }
4593
4594         } else if (opts.noinstr) {
4595                 ret = validate_noinstr_sections(file);
4596                 if (ret < 0)
4597                         goto out;
4598                 warnings += ret;
4599         }
4600
4601         if (opts.unret) {
4602                 /*
4603                  * Must be after validate_branch() and friends, it plays
4604                  * further games with insn->visited.
4605                  */
4606                 ret = validate_unret(file);
4607                 if (ret < 0)
4608                         return ret;
4609                 warnings += ret;
4610         }
4611
4612         if (opts.ibt) {
4613                 ret = validate_ibt(file);
4614                 if (ret < 0)
4615                         goto out;
4616                 warnings += ret;
4617         }
4618
4619         if (opts.sls) {
4620                 ret = validate_sls(file);
4621                 if (ret < 0)
4622                         goto out;
4623                 warnings += ret;
4624         }
4625
4626         if (opts.static_call) {
4627                 ret = create_static_call_sections(file);
4628                 if (ret < 0)
4629                         goto out;
4630                 warnings += ret;
4631         }
4632
4633         if (opts.retpoline) {
4634                 ret = create_retpoline_sites_sections(file);
4635                 if (ret < 0)
4636                         goto out;
4637                 warnings += ret;
4638         }
4639
4640         if (opts.cfi) {
4641                 ret = create_cfi_sections(file);
4642                 if (ret < 0)
4643                         goto out;
4644                 warnings += ret;
4645         }
4646
4647         if (opts.rethunk) {
4648                 ret = create_return_sites_sections(file);
4649                 if (ret < 0)
4650                         goto out;
4651                 warnings += ret;
4652
4653                 if (opts.hack_skylake) {
4654                         ret = create_direct_call_sections(file);
4655                         if (ret < 0)
4656                                 goto out;
4657                         warnings += ret;
4658                 }
4659         }
4660
4661         if (opts.mcount) {
4662                 ret = create_mcount_loc_sections(file);
4663                 if (ret < 0)
4664                         goto out;
4665                 warnings += ret;
4666         }
4667
4668         if (opts.ibt) {
4669                 ret = create_ibt_endbr_seal_sections(file);
4670                 if (ret < 0)
4671                         goto out;
4672                 warnings += ret;
4673         }
4674
4675         if (opts.orc && nr_insns) {
4676                 ret = orc_create(file);
4677                 if (ret < 0)
4678                         goto out;
4679                 warnings += ret;
4680         }
4681
4682
4683         if (opts.stats) {
4684                 printf("nr_insns_visited: %ld\n", nr_insns_visited);
4685                 printf("nr_cfi: %ld\n", nr_cfi);
4686                 printf("nr_cfi_reused: %ld\n", nr_cfi_reused);
4687                 printf("nr_cfi_cache: %ld\n", nr_cfi_cache);
4688         }
4689
4690 out:
4691         /*
4692          *  For now, don't fail the kernel build on fatal warnings.  These
4693          *  errors are still fairly common due to the growing matrix of
4694          *  supported toolchains and their recent pace of change.
4695          */
4696         return 0;
4697 }