1 // SPDX-License-Identifier: GPL-2.0-only
2 #define pr_fmt(fmt) "SMP alternatives: " fmt
4 #include <linux/module.h>
5 #include <linux/sched.h>
6 #include <linux/perf_event.h>
7 #include <linux/mutex.h>
8 #include <linux/list.h>
9 #include <linux/stringify.h>
10 #include <linux/highmem.h>
12 #include <linux/vmalloc.h>
13 #include <linux/memory.h>
14 #include <linux/stop_machine.h>
15 #include <linux/slab.h>
16 #include <linux/kdebug.h>
17 #include <linux/kprobes.h>
18 #include <linux/mmu_context.h>
19 #include <linux/bsearch.h>
20 #include <linux/sync_core.h>
21 #include <asm/text-patching.h>
22 #include <asm/alternative.h>
23 #include <asm/sections.h>
26 #include <asm/cacheflush.h>
27 #include <asm/tlbflush.h>
30 #include <asm/fixmap.h>
31 #include <asm/paravirt.h>
32 #include <asm/asm-prototypes.h>
34 int __read_mostly alternatives_patched;
36 EXPORT_SYMBOL_GPL(alternatives_patched);
38 #define MAX_PATCH_LEN (255-1)
40 static int __initdata_or_module debug_alternative;
42 static int __init debug_alt(char *str)
44 debug_alternative = 1;
47 __setup("debug-alternative", debug_alt);
49 static int noreplace_smp;
51 static int __init setup_noreplace_smp(char *str)
56 __setup("noreplace-smp", setup_noreplace_smp);
58 #define DPRINTK(fmt, args...) \
60 if (debug_alternative) \
61 printk(KERN_DEBUG pr_fmt(fmt) "\n", ##args); \
64 #define DUMP_BYTES(buf, len, fmt, args...) \
66 if (unlikely(debug_alternative)) { \
72 printk(KERN_DEBUG pr_fmt(fmt), ##args); \
73 for (j = 0; j < (len) - 1; j++) \
74 printk(KERN_CONT "%02hhx ", buf[j]); \
75 printk(KERN_CONT "%02hhx\n", buf[j]); \
79 static const unsigned char x86nops[] =
91 const unsigned char * const x86_nops[ASM_NOP_MAX+1] =
98 x86nops + 1 + 2 + 3 + 4,
99 x86nops + 1 + 2 + 3 + 4 + 5,
100 x86nops + 1 + 2 + 3 + 4 + 5 + 6,
101 x86nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
104 /* Use this to add nops to a buffer, then text_poke the whole buffer. */
105 static void __init_or_module add_nops(void *insns, unsigned int len)
108 unsigned int noplen = len;
109 if (noplen > ASM_NOP_MAX)
110 noplen = ASM_NOP_MAX;
111 memcpy(insns, x86_nops[noplen], noplen);
117 extern s32 __retpoline_sites[], __retpoline_sites_end[];
118 extern s32 __return_sites[], __return_sites_end[];
119 extern s32 __cfi_sites[], __cfi_sites_end[];
120 extern s32 __ibt_endbr_seal[], __ibt_endbr_seal_end[];
121 extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
122 extern s32 __smp_locks[], __smp_locks_end[];
123 void text_poke_early(void *addr, const void *opcode, size_t len);
126 * Are we looking at a near JMP with a 1 or 4-byte displacement.
128 static inline bool is_jmp(const u8 opcode)
130 return opcode == 0xeb || opcode == 0xe9;
133 static void __init_or_module
134 recompute_jump(struct alt_instr *a, u8 *orig_insn, u8 *repl_insn, u8 *insn_buff)
136 u8 *next_rip, *tgt_rip;
140 if (a->replacementlen != 5)
143 o_dspl = *(s32 *)(insn_buff + 1);
145 /* next_rip of the replacement JMP */
146 next_rip = repl_insn + a->replacementlen;
147 /* target rip of the replacement JMP */
148 tgt_rip = next_rip + o_dspl;
149 n_dspl = tgt_rip - orig_insn;
151 DPRINTK("target RIP: %px, new_displ: 0x%x", tgt_rip, n_dspl);
153 if (tgt_rip - orig_insn >= 0) {
154 if (n_dspl - 2 <= 127)
158 /* negative offset */
160 if (((n_dspl - 2) & 0xff) == (n_dspl - 2))
170 insn_buff[1] = (s8)n_dspl;
171 add_nops(insn_buff + 2, 3);
180 *(s32 *)&insn_buff[1] = n_dspl;
186 DPRINTK("final displ: 0x%08x, JMP 0x%lx",
187 n_dspl, (unsigned long)orig_insn + n_dspl + repl_len);
191 * optimize_nops_range() - Optimize a sequence of single byte NOPs (0x90)
193 * @instr: instruction byte stream
194 * @instrlen: length of the above
195 * @off: offset within @instr where the first NOP has been detected
197 * Return: number of NOPs found (and replaced).
199 static __always_inline int optimize_nops_range(u8 *instr, u8 instrlen, int off)
204 while (i < instrlen) {
205 if (instr[i] != 0x90)
216 local_irq_save(flags);
217 add_nops(instr + off, nnops);
218 local_irq_restore(flags);
220 DUMP_BYTES(instr, instrlen, "%px: [%d:%d) optimized NOPs: ", instr, off, i);
226 * "noinline" to cause control flow change and thus invalidate I$ and
227 * cause refetch after modification.
229 static void __init_or_module noinline optimize_nops(u8 *instr, size_t len)
235 * Jump over the non-NOP insns and optimize single-byte NOPs into bigger
239 if (insn_decode_kernel(&insn, &instr[i]))
243 * See if this and any potentially following NOPs can be
246 if (insn.length == 1 && insn.opcode.bytes[0] == 0x90)
247 i += optimize_nops_range(instr, len, i);
257 * Replace instructions with better alternatives for this CPU type. This runs
258 * before SMP is initialized to avoid SMP problems with self modifying code.
259 * This implies that asymmetric systems where APs have less capabilities than
260 * the boot processor are not handled. Tough. Make sure you disable such
263 * Marked "noinline" to cause control flow change and thus insn cache
264 * to refetch changed I$ lines.
266 void __init_or_module noinline apply_alternatives(struct alt_instr *start,
267 struct alt_instr *end)
270 u8 *instr, *replacement;
271 u8 insn_buff[MAX_PATCH_LEN];
273 DPRINTK("alt table %px, -> %px", start, end);
275 * The scan order should be from start to end. A later scanned
276 * alternative code can overwrite previously scanned alternative code.
277 * Some kernel functions (e.g. memcpy, memset, etc) use this order to
280 * So be careful if you want to change the scan order to any other
283 for (a = start; a < end; a++) {
284 int insn_buff_sz = 0;
285 /* Mask away "NOT" flag bit for feature to test. */
286 u16 feature = a->cpuid & ~ALTINSTR_FLAG_INV;
288 instr = (u8 *)&a->instr_offset + a->instr_offset;
289 replacement = (u8 *)&a->repl_offset + a->repl_offset;
290 BUG_ON(a->instrlen > sizeof(insn_buff));
291 BUG_ON(feature >= (NCAPINTS + NBUGINTS) * 32);
295 * - feature is present
296 * - feature not present but ALTINSTR_FLAG_INV is set to mean,
297 * patch if feature is *NOT* present.
299 if (!boot_cpu_has(feature) == !(a->cpuid & ALTINSTR_FLAG_INV))
302 DPRINTK("feat: %s%d*32+%d, old: (%pS (%px) len: %d), repl: (%px, len: %d)",
303 (a->cpuid & ALTINSTR_FLAG_INV) ? "!" : "",
306 instr, instr, a->instrlen,
307 replacement, a->replacementlen);
309 DUMP_BYTES(instr, a->instrlen, "%px: old_insn: ", instr);
310 DUMP_BYTES(replacement, a->replacementlen, "%px: rpl_insn: ", replacement);
312 memcpy(insn_buff, replacement, a->replacementlen);
313 insn_buff_sz = a->replacementlen;
316 * 0xe8 is a relative jump; fix the offset.
318 * Instruction length is checked before the opcode to avoid
319 * accessing uninitialized bytes for zero-length replacements.
321 if (a->replacementlen == 5 && *insn_buff == 0xe8) {
322 *(s32 *)(insn_buff + 1) += replacement - instr;
323 DPRINTK("Fix CALL offset: 0x%x, CALL 0x%lx",
324 *(s32 *)(insn_buff + 1),
325 (unsigned long)instr + *(s32 *)(insn_buff + 1) + 5);
328 if (a->replacementlen && is_jmp(replacement[0]))
329 recompute_jump(a, instr, replacement, insn_buff);
331 for (; insn_buff_sz < a->instrlen; insn_buff_sz++)
332 insn_buff[insn_buff_sz] = 0x90;
334 DUMP_BYTES(insn_buff, insn_buff_sz, "%px: final_insn: ", instr);
336 text_poke_early(instr, insn_buff, insn_buff_sz);
339 optimize_nops(instr, a->instrlen);
343 #if defined(CONFIG_RETPOLINE) && defined(CONFIG_OBJTOOL)
348 static int emit_indirect(int op, int reg, u8 *bytes)
354 case CALL_INSN_OPCODE:
355 modrm = 0x10; /* Reg = 2; CALL r/m */
358 case JMP32_INSN_OPCODE:
359 modrm = 0x20; /* Reg = 4; JMP r/m */
368 bytes[i++] = 0x41; /* REX.B prefix */
372 modrm |= 0xc0; /* Mod = 3 */
375 bytes[i++] = 0xff; /* opcode */
381 static inline bool is_jcc32(struct insn *insn)
383 /* Jcc.d32 second opcode byte is in the range: 0x80-0x8f */
384 return insn->opcode.bytes[0] == 0x0f && (insn->opcode.bytes[1] & 0xf0) == 0x80;
387 static int emit_call_track_retpoline(void *addr, struct insn *insn, int reg, u8 *bytes)
389 u8 op = insn->opcode.bytes[0];
393 * Clang does 'weird' Jcc __x86_indirect_thunk_r11 conditional
394 * tail-calls. Deal with them.
396 if (is_jcc32(insn)) {
398 op = insn->opcode.bytes[1];
402 if (insn->length == 6)
403 bytes[i++] = 0x2e; /* CS-prefix */
406 case CALL_INSN_OPCODE:
407 __text_gen_insn(bytes+i, op, addr+i,
408 __x86_indirect_call_thunk_array[reg],
413 case JMP32_INSN_OPCODE:
415 __text_gen_insn(bytes+i, op, addr+i,
416 __x86_indirect_jump_thunk_array[reg],
418 i += JMP32_INSN_SIZE;
422 WARN(1, "%pS %px %*ph\n", addr, addr, 6, addr);
426 WARN_ON_ONCE(i != insn->length);
432 * Rewrite the compiler generated retpoline thunk calls.
434 * For spectre_v2=off (!X86_FEATURE_RETPOLINE), rewrite them into immediate
435 * indirect instructions, avoiding the extra indirection.
437 * For example, convert:
439 * CALL __x86_indirect_thunk_\reg
445 * It also tries to inline spectre_v2=retpoline,lfence when size permits.
447 static int patch_retpoline(void *addr, struct insn *insn, u8 *bytes)
449 retpoline_thunk_t *target;
453 target = addr + insn->length + insn->immediate.value;
454 reg = target - __x86_indirect_thunk_array;
456 if (WARN_ON_ONCE(reg & ~0xf))
459 /* If anyone ever does: CALL/JMP *%rsp, we're in deep trouble. */
462 if (cpu_feature_enabled(X86_FEATURE_RETPOLINE) &&
463 !cpu_feature_enabled(X86_FEATURE_RETPOLINE_LFENCE)) {
464 if (cpu_feature_enabled(X86_FEATURE_CALL_DEPTH))
465 return emit_call_track_retpoline(addr, insn, reg, bytes);
470 op = insn->opcode.bytes[0];
475 * Jcc.d32 __x86_indirect_thunk_\reg
485 if (is_jcc32(insn)) {
486 cc = insn->opcode.bytes[1] & 0xf;
487 cc ^= 1; /* invert condition */
489 bytes[i++] = 0x70 + cc; /* Jcc.d8 */
490 bytes[i++] = insn->length - 2; /* sizeof(Jcc.d8) == 2 */
492 /* Continue as if: JMP.d32 __x86_indirect_thunk_\reg */
493 op = JMP32_INSN_OPCODE;
497 * For RETPOLINE_LFENCE: prepend the indirect CALL/JMP with an LFENCE.
499 if (cpu_feature_enabled(X86_FEATURE_RETPOLINE_LFENCE)) {
502 bytes[i++] = 0xe8; /* LFENCE */
505 ret = emit_indirect(op, reg, bytes + i);
511 * The compiler is supposed to EMIT an INT3 after every unconditional
512 * JMP instruction due to AMD BTC. However, if the compiler is too old
513 * or SLS isn't enabled, we still need an INT3 after indirect JMPs
516 if (op == JMP32_INSN_OPCODE && i < insn->length)
517 bytes[i++] = INT3_INSN_OPCODE;
519 for (; i < insn->length;)
520 bytes[i++] = BYTES_NOP1;
526 * Generated by 'objtool --retpoline'.
528 void __init_or_module noinline apply_retpolines(s32 *start, s32 *end)
532 for (s = start; s < end; s++) {
533 void *addr = (void *)s + *s;
539 ret = insn_decode_kernel(&insn, addr);
540 if (WARN_ON_ONCE(ret < 0))
543 op1 = insn.opcode.bytes[0];
544 op2 = insn.opcode.bytes[1];
547 case CALL_INSN_OPCODE:
548 case JMP32_INSN_OPCODE:
551 case 0x0f: /* escape */
552 if (op2 >= 0x80 && op2 <= 0x8f)
560 DPRINTK("retpoline at: %pS (%px) len: %d to: %pS",
561 addr, addr, insn.length,
562 addr + insn.length + insn.immediate.value);
564 len = patch_retpoline(addr, &insn, bytes);
565 if (len == insn.length) {
566 optimize_nops(bytes, len);
567 DUMP_BYTES(((u8*)addr), len, "%px: orig: ", addr);
568 DUMP_BYTES(((u8*)bytes), len, "%px: repl: ", addr);
569 text_poke_early(addr, bytes, len);
574 #ifdef CONFIG_RETHUNK
576 #ifdef CONFIG_CALL_THUNKS
577 void (*x86_return_thunk)(void) __ro_after_init = &__x86_return_thunk;
581 * Rewrite the compiler generated return thunk tail-calls.
583 * For example, convert:
585 * JMP __x86_return_thunk
591 static int patch_return(void *addr, struct insn *insn, u8 *bytes)
595 if (cpu_feature_enabled(X86_FEATURE_RETHUNK)) {
596 if (x86_return_thunk == __x86_return_thunk)
600 __text_gen_insn(bytes, JMP32_INSN_OPCODE, addr, x86_return_thunk, i);
602 bytes[i++] = RET_INSN_OPCODE;
605 for (; i < insn->length;)
606 bytes[i++] = INT3_INSN_OPCODE;
610 void __init_or_module noinline apply_returns(s32 *start, s32 *end)
614 for (s = start; s < end; s++) {
615 void *dest = NULL, *addr = (void *)s + *s;
621 ret = insn_decode_kernel(&insn, addr);
622 if (WARN_ON_ONCE(ret < 0))
625 op = insn.opcode.bytes[0];
626 if (op == JMP32_INSN_OPCODE)
627 dest = addr + insn.length + insn.immediate.value;
629 if (__static_call_fixup(addr, op, dest) ||
630 WARN_ONCE(dest != &__x86_return_thunk,
631 "missing return thunk: %pS-%pS: %*ph",
632 addr, dest, 5, addr))
635 DPRINTK("return thunk at: %pS (%px) len: %d to: %pS",
636 addr, addr, insn.length,
637 addr + insn.length + insn.immediate.value);
639 len = patch_return(addr, &insn, bytes);
640 if (len == insn.length) {
641 DUMP_BYTES(((u8*)addr), len, "%px: orig: ", addr);
642 DUMP_BYTES(((u8*)bytes), len, "%px: repl: ", addr);
643 text_poke_early(addr, bytes, len);
648 void __init_or_module noinline apply_returns(s32 *start, s32 *end) { }
649 #endif /* CONFIG_RETHUNK */
651 #else /* !CONFIG_RETPOLINE || !CONFIG_OBJTOOL */
653 void __init_or_module noinline apply_retpolines(s32 *start, s32 *end) { }
654 void __init_or_module noinline apply_returns(s32 *start, s32 *end) { }
656 #endif /* CONFIG_RETPOLINE && CONFIG_OBJTOOL */
658 #ifdef CONFIG_X86_KERNEL_IBT
660 static void poison_endbr(void *addr, bool warn)
662 u32 endbr, poison = gen_endbr_poison();
664 if (WARN_ON_ONCE(get_kernel_nofault(endbr, addr)))
667 if (!is_endbr(endbr)) {
672 DPRINTK("ENDBR at: %pS (%px)", addr, addr);
675 * When we have IBT, the lack of ENDBR will trigger #CP
677 DUMP_BYTES(((u8*)addr), 4, "%px: orig: ", addr);
678 DUMP_BYTES(((u8*)&poison), 4, "%px: repl: ", addr);
679 text_poke_early(addr, &poison, 4);
683 * Generated by: objtool --ibt
685 void __init_or_module noinline apply_ibt_endbr(s32 *start, s32 *end)
689 for (s = start; s < end; s++) {
690 void *addr = (void *)s + *s;
692 poison_endbr(addr, true);
693 if (IS_ENABLED(CONFIG_FINEIBT))
694 poison_endbr(addr - 16, false);
700 void __init_or_module noinline apply_ibt_endbr(s32 *start, s32 *end) { }
702 #endif /* CONFIG_X86_KERNEL_IBT */
704 #ifdef CONFIG_FINEIBT
713 static enum cfi_mode cfi_mode __ro_after_init = CFI_DEFAULT;
714 static bool cfi_rand __ro_after_init = true;
715 static u32 cfi_seed __ro_after_init;
718 * Re-hash the CFI hash with a boot-time seed while making sure the result is
719 * not a valid ENDBR instruction.
721 static u32 cfi_rehash(u32 hash)
724 while (unlikely(is_endbr(hash) || is_endbr(-hash))) {
733 static __init int cfi_parse_cmdline(char *str)
739 char *next = strchr(str, ',');
745 if (!strcmp(str, "auto")) {
746 cfi_mode = CFI_DEFAULT;
747 } else if (!strcmp(str, "off")) {
750 } else if (!strcmp(str, "kcfi")) {
752 } else if (!strcmp(str, "fineibt")) {
753 cfi_mode = CFI_FINEIBT;
754 } else if (!strcmp(str, "norand")) {
757 pr_err("Ignoring unknown cfi option (%s).", str);
765 early_param("cfi", cfi_parse_cmdline);
770 * __cfi_\func: __cfi_\func:
771 * movl $0x12345678,%eax // 5 endbr64 // 4
772 * nop subl $0x12345678,%r10d // 7
786 * movl $(-0x12345678),%r10d // 6 movl $0x12345678,%r10d // 6
787 * addl $-15(%r11),%r10d // 4 sub $16,%r11 // 4
788 * je 1f // 2 nop4 // 4
790 * 1: call __x86_indirect_thunk_r11 // 5 call *%r11; nop2; // 5
794 asm( ".pushsection .rodata \n"
795 "fineibt_preamble_start: \n"
797 " subl $0x12345678, %r10d \n"
798 " je fineibt_preamble_end \n"
801 "fineibt_preamble_end: \n"
805 extern u8 fineibt_preamble_start[];
806 extern u8 fineibt_preamble_end[];
808 #define fineibt_preamble_size (fineibt_preamble_end - fineibt_preamble_start)
809 #define fineibt_preamble_hash 7
811 asm( ".pushsection .rodata \n"
812 "fineibt_caller_start: \n"
813 " movl $0x12345678, %r10d \n"
816 "fineibt_caller_end: \n"
820 extern u8 fineibt_caller_start[];
821 extern u8 fineibt_caller_end[];
823 #define fineibt_caller_size (fineibt_caller_end - fineibt_caller_start)
824 #define fineibt_caller_hash 2
826 #define fineibt_caller_jmp (fineibt_caller_size - 2)
828 static u32 decode_preamble_hash(void *addr)
832 /* b8 78 56 34 12 mov $0x12345678,%eax */
834 return *(u32 *)(addr + 1);
836 return 0; /* invalid hash value */
839 static u32 decode_caller_hash(void *addr)
843 /* 41 ba 78 56 34 12 mov $0x12345678,%r10d */
844 if (p[0] == 0x41 && p[1] == 0xba)
845 return -*(u32 *)(addr + 2);
847 /* e8 0c 78 56 34 12 jmp.d8 +12 */
848 if (p[0] == JMP8_INSN_OPCODE && p[1] == fineibt_caller_jmp)
849 return -*(u32 *)(addr + 2);
851 return 0; /* invalid hash value */
854 /* .retpoline_sites */
855 static int cfi_disable_callers(s32 *start, s32 *end)
858 * Disable kCFI by patching in a JMP.d8, this leaves the hash immediate
859 * in tact for later usage. Also see decode_caller_hash() and
860 * cfi_rewrite_callers().
862 const u8 jmp[] = { JMP8_INSN_OPCODE, fineibt_caller_jmp };
865 for (s = start; s < end; s++) {
866 void *addr = (void *)s + *s;
869 addr -= fineibt_caller_size;
870 hash = decode_caller_hash(addr);
871 if (!hash) /* nocfi callers */
874 text_poke_early(addr, jmp, 2);
880 static int cfi_enable_callers(s32 *start, s32 *end)
883 * Re-enable kCFI, undo what cfi_disable_callers() did.
885 const u8 mov[] = { 0x41, 0xba };
888 for (s = start; s < end; s++) {
889 void *addr = (void *)s + *s;
892 addr -= fineibt_caller_size;
893 hash = decode_caller_hash(addr);
894 if (!hash) /* nocfi callers */
897 text_poke_early(addr, mov, 2);
904 static int cfi_rand_preamble(s32 *start, s32 *end)
908 for (s = start; s < end; s++) {
909 void *addr = (void *)s + *s;
912 hash = decode_preamble_hash(addr);
913 if (WARN(!hash, "no CFI hash found at: %pS %px %*ph\n",
914 addr, addr, 5, addr))
917 hash = cfi_rehash(hash);
918 text_poke_early(addr + 1, &hash, 4);
924 static int cfi_rewrite_preamble(s32 *start, s32 *end)
928 for (s = start; s < end; s++) {
929 void *addr = (void *)s + *s;
932 hash = decode_preamble_hash(addr);
933 if (WARN(!hash, "no CFI hash found at: %pS %px %*ph\n",
934 addr, addr, 5, addr))
937 text_poke_early(addr, fineibt_preamble_start, fineibt_preamble_size);
938 WARN_ON(*(u32 *)(addr + fineibt_preamble_hash) != 0x12345678);
939 text_poke_early(addr + fineibt_preamble_hash, &hash, 4);
945 /* .retpoline_sites */
946 static int cfi_rand_callers(s32 *start, s32 *end)
950 for (s = start; s < end; s++) {
951 void *addr = (void *)s + *s;
954 addr -= fineibt_caller_size;
955 hash = decode_caller_hash(addr);
957 hash = -cfi_rehash(hash);
958 text_poke_early(addr + 2, &hash, 4);
965 static int cfi_rewrite_callers(s32 *start, s32 *end)
969 for (s = start; s < end; s++) {
970 void *addr = (void *)s + *s;
973 addr -= fineibt_caller_size;
974 hash = decode_caller_hash(addr);
976 text_poke_early(addr, fineibt_caller_start, fineibt_caller_size);
977 WARN_ON(*(u32 *)(addr + fineibt_caller_hash) != 0x12345678);
978 text_poke_early(addr + fineibt_caller_hash, &hash, 4);
980 /* rely on apply_retpolines() */
986 static void __apply_fineibt(s32 *start_retpoline, s32 *end_retpoline,
987 s32 *start_cfi, s32 *end_cfi, bool builtin)
991 if (WARN_ONCE(fineibt_preamble_size != 16,
992 "FineIBT preamble wrong size: %ld", fineibt_preamble_size))
995 if (cfi_mode == CFI_DEFAULT) {
997 if (HAS_KERNEL_IBT && cpu_feature_enabled(X86_FEATURE_IBT))
998 cfi_mode = CFI_FINEIBT;
1002 * Rewrite the callers to not use the __cfi_ stubs, such that we might
1003 * rewrite them. This disables all CFI. If this succeeds but any of the
1004 * later stages fails, we're without CFI.
1006 ret = cfi_disable_callers(start_retpoline, end_retpoline);
1012 cfi_seed = get_random_u32();
1014 ret = cfi_rand_preamble(start_cfi, end_cfi);
1018 ret = cfi_rand_callers(start_retpoline, end_retpoline);
1026 pr_info("Disabling CFI\n");
1030 ret = cfi_enable_callers(start_retpoline, end_retpoline);
1035 pr_info("Using kCFI\n");
1039 ret = cfi_rewrite_preamble(start_cfi, end_cfi);
1043 ret = cfi_rewrite_callers(start_retpoline, end_retpoline);
1048 pr_info("Using FineIBT CFI\n");
1056 pr_err("Something went horribly wrong trying to rewrite the CFI implementation.\n");
1061 static void __apply_fineibt(s32 *start_retpoline, s32 *end_retpoline,
1062 s32 *start_cfi, s32 *end_cfi, bool builtin)
1068 void apply_fineibt(s32 *start_retpoline, s32 *end_retpoline,
1069 s32 *start_cfi, s32 *end_cfi)
1071 return __apply_fineibt(start_retpoline, end_retpoline,
1073 /* .builtin = */ false);
1077 static void alternatives_smp_lock(const s32 *start, const s32 *end,
1078 u8 *text, u8 *text_end)
1082 for (poff = start; poff < end; poff++) {
1083 u8 *ptr = (u8 *)poff + *poff;
1085 if (!*poff || ptr < text || ptr >= text_end)
1087 /* turn DS segment override prefix into lock prefix */
1089 text_poke(ptr, ((unsigned char []){0xf0}), 1);
1093 static void alternatives_smp_unlock(const s32 *start, const s32 *end,
1094 u8 *text, u8 *text_end)
1098 for (poff = start; poff < end; poff++) {
1099 u8 *ptr = (u8 *)poff + *poff;
1101 if (!*poff || ptr < text || ptr >= text_end)
1103 /* turn lock prefix into DS segment override prefix */
1105 text_poke(ptr, ((unsigned char []){0x3E}), 1);
1109 struct smp_alt_module {
1110 /* what is this ??? */
1114 /* ptrs to lock prefixes */
1116 const s32 *locks_end;
1118 /* .text segment, needed to avoid patching init code ;) */
1122 struct list_head next;
1124 static LIST_HEAD(smp_alt_modules);
1125 static bool uniproc_patched = false; /* protected by text_mutex */
1127 void __init_or_module alternatives_smp_module_add(struct module *mod,
1129 void *locks, void *locks_end,
1130 void *text, void *text_end)
1132 struct smp_alt_module *smp;
1134 mutex_lock(&text_mutex);
1135 if (!uniproc_patched)
1138 if (num_possible_cpus() == 1)
1139 /* Don't bother remembering, we'll never have to undo it. */
1142 smp = kzalloc(sizeof(*smp), GFP_KERNEL);
1144 /* we'll run the (safe but slow) SMP code then ... */
1150 smp->locks_end = locks_end;
1152 smp->text_end = text_end;
1153 DPRINTK("locks %p -> %p, text %p -> %p, name %s\n",
1154 smp->locks, smp->locks_end,
1155 smp->text, smp->text_end, smp->name);
1157 list_add_tail(&smp->next, &smp_alt_modules);
1159 alternatives_smp_unlock(locks, locks_end, text, text_end);
1161 mutex_unlock(&text_mutex);
1164 void __init_or_module alternatives_smp_module_del(struct module *mod)
1166 struct smp_alt_module *item;
1168 mutex_lock(&text_mutex);
1169 list_for_each_entry(item, &smp_alt_modules, next) {
1170 if (mod != item->mod)
1172 list_del(&item->next);
1176 mutex_unlock(&text_mutex);
1179 void alternatives_enable_smp(void)
1181 struct smp_alt_module *mod;
1183 /* Why bother if there are no other CPUs? */
1184 BUG_ON(num_possible_cpus() == 1);
1186 mutex_lock(&text_mutex);
1188 if (uniproc_patched) {
1189 pr_info("switching to SMP code\n");
1190 BUG_ON(num_online_cpus() != 1);
1191 clear_cpu_cap(&boot_cpu_data, X86_FEATURE_UP);
1192 clear_cpu_cap(&cpu_data(0), X86_FEATURE_UP);
1193 list_for_each_entry(mod, &smp_alt_modules, next)
1194 alternatives_smp_lock(mod->locks, mod->locks_end,
1195 mod->text, mod->text_end);
1196 uniproc_patched = false;
1198 mutex_unlock(&text_mutex);
1202 * Return 1 if the address range is reserved for SMP-alternatives.
1203 * Must hold text_mutex.
1205 int alternatives_text_reserved(void *start, void *end)
1207 struct smp_alt_module *mod;
1209 u8 *text_start = start;
1212 lockdep_assert_held(&text_mutex);
1214 list_for_each_entry(mod, &smp_alt_modules, next) {
1215 if (mod->text > text_end || mod->text_end < text_start)
1217 for (poff = mod->locks; poff < mod->locks_end; poff++) {
1218 const u8 *ptr = (const u8 *)poff + *poff;
1220 if (text_start <= ptr && text_end > ptr)
1227 #endif /* CONFIG_SMP */
1229 #ifdef CONFIG_PARAVIRT
1230 void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
1231 struct paravirt_patch_site *end)
1233 struct paravirt_patch_site *p;
1234 char insn_buff[MAX_PATCH_LEN];
1236 for (p = start; p < end; p++) {
1239 BUG_ON(p->len > MAX_PATCH_LEN);
1240 /* prep the buffer with the original instructions */
1241 memcpy(insn_buff, p->instr, p->len);
1242 used = paravirt_patch(p->type, insn_buff, (unsigned long)p->instr, p->len);
1244 BUG_ON(used > p->len);
1246 /* Pad the rest with nops */
1247 add_nops(insn_buff + used, p->len - used);
1248 text_poke_early(p->instr, insn_buff, p->len);
1251 extern struct paravirt_patch_site __start_parainstructions[],
1252 __stop_parainstructions[];
1253 #endif /* CONFIG_PARAVIRT */
1256 * Self-test for the INT3 based CALL emulation code.
1258 * This exercises int3_emulate_call() to make sure INT3 pt_regs are set up
1259 * properly and that there is a stack gap between the INT3 frame and the
1260 * previous context. Without this gap doing a virtual PUSH on the interrupted
1261 * stack would corrupt the INT3 IRET frame.
1263 * See entry_{32,64}.S for more details.
1267 * We define the int3_magic() function in assembly to control the calling
1268 * convention such that we can 'call' it from assembly.
1271 extern void int3_magic(unsigned int *ptr); /* defined in asm */
1274 " .pushsection .init.text, \"ax\", @progbits\n"
1275 " .type int3_magic, @function\n"
1278 " movl $1, (%" _ASM_ARG1 ")\n"
1280 " .size int3_magic, .-int3_magic\n"
1284 extern void int3_selftest_ip(void); /* defined in asm below */
1287 int3_exception_notify(struct notifier_block *self, unsigned long val, void *data)
1289 unsigned long selftest = (unsigned long)&int3_selftest_ip;
1290 struct die_args *args = data;
1291 struct pt_regs *regs = args->regs;
1293 OPTIMIZER_HIDE_VAR(selftest);
1295 if (!regs || user_mode(regs))
1298 if (val != DIE_INT3)
1301 if (regs->ip - INT3_INSN_SIZE != selftest)
1304 int3_emulate_call(regs, (unsigned long)&int3_magic);
1308 /* Must be noinline to ensure uniqueness of int3_selftest_ip. */
1309 static noinline void __init int3_selftest(void)
1311 static __initdata struct notifier_block int3_exception_nb = {
1312 .notifier_call = int3_exception_notify,
1313 .priority = INT_MAX-1, /* last */
1315 unsigned int val = 0;
1317 BUG_ON(register_die_notifier(&int3_exception_nb));
1320 * Basically: int3_magic(&val); but really complicated :-)
1322 * INT3 padded with NOP to CALL_INSN_SIZE. The int3_exception_nb
1323 * notifier above will emulate CALL for us.
1325 asm volatile ("int3_selftest_ip:\n\t"
1327 " int3; nop; nop; nop; nop\n\t"
1328 : ASM_CALL_CONSTRAINT
1329 : __ASM_SEL_RAW(a, D) (&val)
1334 unregister_die_notifier(&int3_exception_nb);
1337 void __init alternative_instructions(void)
1342 * The patching is not fully atomic, so try to avoid local
1343 * interruptions that might execute the to be patched code.
1344 * Other CPUs are not running.
1349 * Don't stop machine check exceptions while patching.
1350 * MCEs only happen when something got corrupted and in this
1351 * case we must do something about the corruption.
1352 * Ignoring it is worse than an unlikely patching race.
1353 * Also machine checks tend to be broadcast and if one CPU
1354 * goes into machine check the others follow quickly, so we don't
1355 * expect a machine check to cause undue problems during to code
1360 * Paravirt patching and alternative patching can be combined to
1361 * replace a function call with a short direct code sequence (e.g.
1362 * by setting a constant return value instead of doing that in an
1363 * external function).
1364 * In order to make this work the following sequence is required:
1365 * 1. set (artificial) features depending on used paravirt
1366 * functions which can later influence alternative patching
1367 * 2. apply paravirt patching (generally replacing an indirect
1368 * function call with a direct one)
1369 * 3. apply alternative patching (e.g. replacing a direct function
1370 * call with a custom code sequence)
1371 * Doing paravirt patching after alternative patching would clobber
1372 * the optimization of the custom code with a function call again.
1377 * First patch paravirt functions, such that we overwrite the indirect
1378 * call with the direct call.
1380 apply_paravirt(__parainstructions, __parainstructions_end);
1382 __apply_fineibt(__retpoline_sites, __retpoline_sites_end,
1383 __cfi_sites, __cfi_sites_end, true);
1386 * Rewrite the retpolines, must be done before alternatives since
1387 * those can rewrite the retpoline thunks.
1389 apply_retpolines(__retpoline_sites, __retpoline_sites_end);
1390 apply_returns(__return_sites, __return_sites_end);
1393 * Then patch alternatives, such that those paravirt calls that are in
1394 * alternatives can be overwritten by their immediate fragments.
1396 apply_alternatives(__alt_instructions, __alt_instructions_end);
1399 * Now all calls are established. Apply the call thunks if
1402 callthunks_patch_builtin_calls();
1404 apply_ibt_endbr(__ibt_endbr_seal, __ibt_endbr_seal_end);
1407 /* Patch to UP if other cpus not imminent. */
1408 if (!noreplace_smp && (num_present_cpus() == 1 || setup_max_cpus <= 1)) {
1409 uniproc_patched = true;
1410 alternatives_smp_module_add(NULL, "core kernel",
1411 __smp_locks, __smp_locks_end,
1415 if (!uniproc_patched || num_possible_cpus() == 1) {
1416 free_init_pages("SMP alternatives",
1417 (unsigned long)__smp_locks,
1418 (unsigned long)__smp_locks_end);
1423 alternatives_patched = 1;
1427 * text_poke_early - Update instructions on a live kernel at boot time
1428 * @addr: address to modify
1429 * @opcode: source of the copy
1430 * @len: length to copy
1432 * When you use this code to patch more than one byte of an instruction
1433 * you need to make sure that other CPUs cannot execute this code in parallel.
1434 * Also no thread must be currently preempted in the middle of these
1435 * instructions. And on the local CPU you need to be protected against NMI or
1436 * MCE handlers seeing an inconsistent instruction while you patch.
1438 void __init_or_module text_poke_early(void *addr, const void *opcode,
1441 unsigned long flags;
1443 if (boot_cpu_has(X86_FEATURE_NX) &&
1444 is_module_text_address((unsigned long)addr)) {
1446 * Modules text is marked initially as non-executable, so the
1447 * code cannot be running and speculative code-fetches are
1448 * prevented. Just change the code.
1450 memcpy(addr, opcode, len);
1452 local_irq_save(flags);
1453 memcpy(addr, opcode, len);
1454 local_irq_restore(flags);
1458 * Could also do a CLFLUSH here to speed up CPU recovery; but
1459 * that causes hangs on some VIA CPUs.
1465 struct mm_struct *mm;
1469 * Using a temporary mm allows to set temporary mappings that are not accessible
1470 * by other CPUs. Such mappings are needed to perform sensitive memory writes
1471 * that override the kernel memory protections (e.g., W^X), without exposing the
1472 * temporary page-table mappings that are required for these write operations to
1473 * other CPUs. Using a temporary mm also allows to avoid TLB shootdowns when the
1474 * mapping is torn down.
1476 * Context: The temporary mm needs to be used exclusively by a single core. To
1477 * harden security IRQs must be disabled while the temporary mm is
1478 * loaded, thereby preventing interrupt handler bugs from overriding
1479 * the kernel memory protection.
1481 static inline temp_mm_state_t use_temporary_mm(struct mm_struct *mm)
1483 temp_mm_state_t temp_state;
1485 lockdep_assert_irqs_disabled();
1488 * Make sure not to be in TLB lazy mode, as otherwise we'll end up
1489 * with a stale address space WITHOUT being in lazy mode after
1490 * restoring the previous mm.
1492 if (this_cpu_read(cpu_tlbstate_shared.is_lazy))
1493 leave_mm(smp_processor_id());
1495 temp_state.mm = this_cpu_read(cpu_tlbstate.loaded_mm);
1496 switch_mm_irqs_off(NULL, mm, current);
1499 * If breakpoints are enabled, disable them while the temporary mm is
1500 * used. Userspace might set up watchpoints on addresses that are used
1501 * in the temporary mm, which would lead to wrong signals being sent or
1504 * Note that breakpoints are not disabled selectively, which also causes
1505 * kernel breakpoints (e.g., perf's) to be disabled. This might be
1506 * undesirable, but still seems reasonable as the code that runs in the
1507 * temporary mm should be short.
1509 if (hw_breakpoint_active())
1510 hw_breakpoint_disable();
1515 static inline void unuse_temporary_mm(temp_mm_state_t prev_state)
1517 lockdep_assert_irqs_disabled();
1518 switch_mm_irqs_off(NULL, prev_state.mm, current);
1521 * Restore the breakpoints if they were disabled before the temporary mm
1524 if (hw_breakpoint_active())
1525 hw_breakpoint_restore();
1528 __ro_after_init struct mm_struct *poking_mm;
1529 __ro_after_init unsigned long poking_addr;
1531 static void text_poke_memcpy(void *dst, const void *src, size_t len)
1533 memcpy(dst, src, len);
1536 static void text_poke_memset(void *dst, const void *src, size_t len)
1538 int c = *(const int *)src;
1540 memset(dst, c, len);
1543 typedef void text_poke_f(void *dst, const void *src, size_t len);
1545 static void *__text_poke(text_poke_f func, void *addr, const void *src, size_t len)
1547 bool cross_page_boundary = offset_in_page(addr) + len > PAGE_SIZE;
1548 struct page *pages[2] = {NULL};
1549 temp_mm_state_t prev;
1550 unsigned long flags;
1556 * While boot memory allocator is running we cannot use struct pages as
1557 * they are not yet initialized. There is no way to recover.
1559 BUG_ON(!after_bootmem);
1561 if (!core_kernel_text((unsigned long)addr)) {
1562 pages[0] = vmalloc_to_page(addr);
1563 if (cross_page_boundary)
1564 pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
1566 pages[0] = virt_to_page(addr);
1567 WARN_ON(!PageReserved(pages[0]));
1568 if (cross_page_boundary)
1569 pages[1] = virt_to_page(addr + PAGE_SIZE);
1572 * If something went wrong, crash and burn since recovery paths are not
1575 BUG_ON(!pages[0] || (cross_page_boundary && !pages[1]));
1578 * Map the page without the global bit, as TLB flushing is done with
1579 * flush_tlb_mm_range(), which is intended for non-global PTEs.
1581 pgprot = __pgprot(pgprot_val(PAGE_KERNEL) & ~_PAGE_GLOBAL);
1584 * The lock is not really needed, but this allows to avoid open-coding.
1586 ptep = get_locked_pte(poking_mm, poking_addr, &ptl);
1589 * This must not fail; preallocated in poking_init().
1593 local_irq_save(flags);
1595 pte = mk_pte(pages[0], pgprot);
1596 set_pte_at(poking_mm, poking_addr, ptep, pte);
1598 if (cross_page_boundary) {
1599 pte = mk_pte(pages[1], pgprot);
1600 set_pte_at(poking_mm, poking_addr + PAGE_SIZE, ptep + 1, pte);
1604 * Loading the temporary mm behaves as a compiler barrier, which
1605 * guarantees that the PTE will be set at the time memcpy() is done.
1607 prev = use_temporary_mm(poking_mm);
1609 kasan_disable_current();
1610 func((u8 *)poking_addr + offset_in_page(addr), src, len);
1611 kasan_enable_current();
1614 * Ensure that the PTE is only cleared after the instructions of memcpy
1615 * were issued by using a compiler barrier.
1619 pte_clear(poking_mm, poking_addr, ptep);
1620 if (cross_page_boundary)
1621 pte_clear(poking_mm, poking_addr + PAGE_SIZE, ptep + 1);
1624 * Loading the previous page-table hierarchy requires a serializing
1625 * instruction that already allows the core to see the updated version.
1626 * Xen-PV is assumed to serialize execution in a similar manner.
1628 unuse_temporary_mm(prev);
1631 * Flushing the TLB might involve IPIs, which would require enabled
1632 * IRQs, but not if the mm is not used, as it is in this point.
1634 flush_tlb_mm_range(poking_mm, poking_addr, poking_addr +
1635 (cross_page_boundary ? 2 : 1) * PAGE_SIZE,
1638 if (func == text_poke_memcpy) {
1640 * If the text does not match what we just wrote then something is
1641 * fundamentally screwy; there's nothing we can really do about that.
1643 BUG_ON(memcmp(addr, src, len));
1646 local_irq_restore(flags);
1647 pte_unmap_unlock(ptep, ptl);
1652 * text_poke - Update instructions on a live kernel
1653 * @addr: address to modify
1654 * @opcode: source of the copy
1655 * @len: length to copy
1657 * Only atomic text poke/set should be allowed when not doing early patching.
1658 * It means the size must be writable atomically and the address must be aligned
1659 * in a way that permits an atomic write. It also makes sure we fit on a single
1662 * Note that the caller must ensure that if the modified code is part of a
1663 * module, the module would not be removed during poking. This can be achieved
1664 * by registering a module notifier, and ordering module removal and patching
1667 void *text_poke(void *addr, const void *opcode, size_t len)
1669 lockdep_assert_held(&text_mutex);
1671 return __text_poke(text_poke_memcpy, addr, opcode, len);
1675 * text_poke_kgdb - Update instructions on a live kernel by kgdb
1676 * @addr: address to modify
1677 * @opcode: source of the copy
1678 * @len: length to copy
1680 * Only atomic text poke/set should be allowed when not doing early patching.
1681 * It means the size must be writable atomically and the address must be aligned
1682 * in a way that permits an atomic write. It also makes sure we fit on a single
1685 * Context: should only be used by kgdb, which ensures no other core is running,
1686 * despite the fact it does not hold the text_mutex.
1688 void *text_poke_kgdb(void *addr, const void *opcode, size_t len)
1690 return __text_poke(text_poke_memcpy, addr, opcode, len);
1693 void *text_poke_copy_locked(void *addr, const void *opcode, size_t len,
1696 unsigned long start = (unsigned long)addr;
1699 if (WARN_ON_ONCE(!core_ok && core_kernel_text(start)))
1702 while (patched < len) {
1703 unsigned long ptr = start + patched;
1706 s = min_t(size_t, PAGE_SIZE * 2 - offset_in_page(ptr), len - patched);
1708 __text_poke(text_poke_memcpy, (void *)ptr, opcode + patched, s);
1715 * text_poke_copy - Copy instructions into (an unused part of) RX memory
1716 * @addr: address to modify
1717 * @opcode: source of the copy
1718 * @len: length to copy, could be more than 2x PAGE_SIZE
1720 * Not safe against concurrent execution; useful for JITs to dump
1721 * new code blocks into unused regions of RX memory. Can be used in
1722 * conjunction with synchronize_rcu_tasks() to wait for existing
1723 * execution to quiesce after having made sure no existing functions
1724 * pointers are live.
1726 void *text_poke_copy(void *addr, const void *opcode, size_t len)
1728 mutex_lock(&text_mutex);
1729 addr = text_poke_copy_locked(addr, opcode, len, false);
1730 mutex_unlock(&text_mutex);
1735 * text_poke_set - memset into (an unused part of) RX memory
1736 * @addr: address to modify
1737 * @c: the byte to fill the area with
1738 * @len: length to copy, could be more than 2x PAGE_SIZE
1740 * This is useful to overwrite unused regions of RX memory with illegal
1743 void *text_poke_set(void *addr, int c, size_t len)
1745 unsigned long start = (unsigned long)addr;
1748 if (WARN_ON_ONCE(core_kernel_text(start)))
1751 mutex_lock(&text_mutex);
1752 while (patched < len) {
1753 unsigned long ptr = start + patched;
1756 s = min_t(size_t, PAGE_SIZE * 2 - offset_in_page(ptr), len - patched);
1758 __text_poke(text_poke_memset, (void *)ptr, (void *)&c, s);
1761 mutex_unlock(&text_mutex);
1765 static void do_sync_core(void *info)
1770 void text_poke_sync(void)
1772 on_each_cpu(do_sync_core, NULL, 1);
1775 struct text_poke_loc {
1776 /* addr := _stext + rel_addr */
1781 const u8 text[POKE_MAX_OPCODE_SIZE];
1782 /* see text_poke_bp_batch() */
1786 struct bp_patching_desc {
1787 struct text_poke_loc *vec;
1792 static struct bp_patching_desc bp_desc;
1794 static __always_inline
1795 struct bp_patching_desc *try_get_desc(void)
1797 struct bp_patching_desc *desc = &bp_desc;
1799 if (!arch_atomic_inc_not_zero(&desc->refs))
1805 static __always_inline void put_desc(void)
1807 struct bp_patching_desc *desc = &bp_desc;
1809 smp_mb__before_atomic();
1810 arch_atomic_dec(&desc->refs);
1813 static __always_inline void *text_poke_addr(struct text_poke_loc *tp)
1815 return _stext + tp->rel_addr;
1818 static __always_inline int patch_cmp(const void *key, const void *elt)
1820 struct text_poke_loc *tp = (struct text_poke_loc *) elt;
1822 if (key < text_poke_addr(tp))
1824 if (key > text_poke_addr(tp))
1829 noinstr int poke_int3_handler(struct pt_regs *regs)
1831 struct bp_patching_desc *desc;
1832 struct text_poke_loc *tp;
1836 if (user_mode(regs))
1840 * Having observed our INT3 instruction, we now must observe
1841 * bp_desc with non-zero refcount:
1843 * bp_desc.refs = 1 INT3
1845 * write INT3 if (bp_desc.refs != 0)
1849 desc = try_get_desc();
1854 * Discount the INT3. See text_poke_bp_batch().
1856 ip = (void *) regs->ip - INT3_INSN_SIZE;
1859 * Skip the binary search if there is a single member in the vector.
1861 if (unlikely(desc->nr_entries > 1)) {
1862 tp = __inline_bsearch(ip, desc->vec, desc->nr_entries,
1863 sizeof(struct text_poke_loc),
1869 if (text_poke_addr(tp) != ip)
1875 switch (tp->opcode) {
1876 case INT3_INSN_OPCODE:
1878 * Someone poked an explicit INT3, they'll want to handle it,
1883 case RET_INSN_OPCODE:
1884 int3_emulate_ret(regs);
1887 case CALL_INSN_OPCODE:
1888 int3_emulate_call(regs, (long)ip + tp->disp);
1891 case JMP32_INSN_OPCODE:
1892 case JMP8_INSN_OPCODE:
1893 int3_emulate_jmp(regs, (long)ip + tp->disp);
1907 #define TP_VEC_MAX (PAGE_SIZE / sizeof(struct text_poke_loc))
1908 static struct text_poke_loc tp_vec[TP_VEC_MAX];
1909 static int tp_vec_nr;
1912 * text_poke_bp_batch() -- update instructions on live kernel on SMP
1913 * @tp: vector of instructions to patch
1914 * @nr_entries: number of entries in the vector
1916 * Modify multi-byte instruction by using int3 breakpoint on SMP.
1917 * We completely avoid stop_machine() here, and achieve the
1918 * synchronization using int3 breakpoint.
1920 * The way it is done:
1921 * - For each entry in the vector:
1922 * - add a int3 trap to the address that will be patched
1924 * - For each entry in the vector:
1925 * - update all but the first byte of the patched range
1927 * - For each entry in the vector:
1928 * - replace the first byte (int3) by the first byte of
1932 static void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries)
1934 unsigned char int3 = INT3_INSN_OPCODE;
1938 lockdep_assert_held(&text_mutex);
1941 bp_desc.nr_entries = nr_entries;
1944 * Corresponds to the implicit memory barrier in try_get_desc() to
1945 * ensure reading a non-zero refcount provides up to date bp_desc data.
1947 atomic_set_release(&bp_desc.refs, 1);
1950 * Corresponding read barrier in int3 notifier for making sure the
1951 * nr_entries and handler are correctly ordered wrt. patching.
1956 * First step: add a int3 trap to the address that will be patched.
1958 for (i = 0; i < nr_entries; i++) {
1959 tp[i].old = *(u8 *)text_poke_addr(&tp[i]);
1960 text_poke(text_poke_addr(&tp[i]), &int3, INT3_INSN_SIZE);
1966 * Second step: update all but the first byte of the patched range.
1968 for (do_sync = 0, i = 0; i < nr_entries; i++) {
1969 u8 old[POKE_MAX_OPCODE_SIZE] = { tp[i].old, };
1970 int len = tp[i].len;
1972 if (len - INT3_INSN_SIZE > 0) {
1973 memcpy(old + INT3_INSN_SIZE,
1974 text_poke_addr(&tp[i]) + INT3_INSN_SIZE,
1975 len - INT3_INSN_SIZE);
1976 text_poke(text_poke_addr(&tp[i]) + INT3_INSN_SIZE,
1977 (const char *)tp[i].text + INT3_INSN_SIZE,
1978 len - INT3_INSN_SIZE);
1983 * Emit a perf event to record the text poke, primarily to
1984 * support Intel PT decoding which must walk the executable code
1985 * to reconstruct the trace. The flow up to here is:
1988 * - write instruction tail
1989 * At this point the actual control flow will be through the
1990 * INT3 and handler and not hit the old or new instruction.
1991 * Intel PT outputs FUP/TIP packets for the INT3, so the flow
1992 * can still be decoded. Subsequently:
1993 * - emit RECORD_TEXT_POKE with the new instruction
1995 * - write first byte
1997 * So before the text poke event timestamp, the decoder will see
1998 * either the old instruction flow or FUP/TIP of INT3. After the
1999 * text poke event timestamp, the decoder will see either the
2000 * new instruction flow or FUP/TIP of INT3. Thus decoders can
2001 * use the timestamp as the point at which to modify the
2003 * The old instruction is recorded so that the event can be
2004 * processed forwards or backwards.
2006 perf_event_text_poke(text_poke_addr(&tp[i]), old, len,
2012 * According to Intel, this core syncing is very likely
2013 * not necessary and we'd be safe even without it. But
2014 * better safe than sorry (plus there's not only Intel).
2020 * Third step: replace the first byte (int3) by the first byte of
2023 for (do_sync = 0, i = 0; i < nr_entries; i++) {
2024 if (tp[i].text[0] == INT3_INSN_OPCODE)
2027 text_poke(text_poke_addr(&tp[i]), tp[i].text, INT3_INSN_SIZE);
2035 * Remove and wait for refs to be zero.
2037 if (!atomic_dec_and_test(&bp_desc.refs))
2038 atomic_cond_read_acquire(&bp_desc.refs, !VAL);
2041 static void text_poke_loc_init(struct text_poke_loc *tp, void *addr,
2042 const void *opcode, size_t len, const void *emulate)
2047 memcpy((void *)tp->text, opcode, len);
2051 ret = insn_decode_kernel(&insn, emulate);
2054 tp->rel_addr = addr - (void *)_stext;
2056 tp->opcode = insn.opcode.bytes[0];
2058 switch (tp->opcode) {
2059 case RET_INSN_OPCODE:
2060 case JMP32_INSN_OPCODE:
2061 case JMP8_INSN_OPCODE:
2063 * Control flow instructions without implied execution of the
2064 * next instruction can be padded with INT3.
2066 for (i = insn.length; i < len; i++)
2067 BUG_ON(tp->text[i] != INT3_INSN_OPCODE);
2071 BUG_ON(len != insn.length);
2075 switch (tp->opcode) {
2076 case INT3_INSN_OPCODE:
2077 case RET_INSN_OPCODE:
2080 case CALL_INSN_OPCODE:
2081 case JMP32_INSN_OPCODE:
2082 case JMP8_INSN_OPCODE:
2083 tp->disp = insn.immediate.value;
2086 default: /* assume NOP */
2088 case 2: /* NOP2 -- emulate as JMP8+0 */
2089 BUG_ON(memcmp(emulate, x86_nops[len], len));
2090 tp->opcode = JMP8_INSN_OPCODE;
2094 case 5: /* NOP5 -- emulate as JMP32+0 */
2095 BUG_ON(memcmp(emulate, x86_nops[len], len));
2096 tp->opcode = JMP32_INSN_OPCODE;
2100 default: /* unknown instruction */
2108 * We hard rely on the tp_vec being ordered; ensure this is so by flushing
2111 static bool tp_order_fail(void *addr)
2113 struct text_poke_loc *tp;
2118 if (!addr) /* force */
2121 tp = &tp_vec[tp_vec_nr - 1];
2122 if ((unsigned long)text_poke_addr(tp) > (unsigned long)addr)
2128 static void text_poke_flush(void *addr)
2130 if (tp_vec_nr == TP_VEC_MAX || tp_order_fail(addr)) {
2131 text_poke_bp_batch(tp_vec, tp_vec_nr);
2136 void text_poke_finish(void)
2138 text_poke_flush(NULL);
2141 void __ref text_poke_queue(void *addr, const void *opcode, size_t len, const void *emulate)
2143 struct text_poke_loc *tp;
2145 if (unlikely(system_state == SYSTEM_BOOTING)) {
2146 text_poke_early(addr, opcode, len);
2150 text_poke_flush(addr);
2152 tp = &tp_vec[tp_vec_nr++];
2153 text_poke_loc_init(tp, addr, opcode, len, emulate);
2157 * text_poke_bp() -- update instructions on live kernel on SMP
2158 * @addr: address to patch
2159 * @opcode: opcode of new instruction
2160 * @len: length to copy
2161 * @emulate: instruction to be emulated
2163 * Update a single instruction with the vector in the stack, avoiding
2164 * dynamically allocated memory. This function should be used when it is
2165 * not possible to allocate memory.
2167 void __ref text_poke_bp(void *addr, const void *opcode, size_t len, const void *emulate)
2169 struct text_poke_loc tp;
2171 if (unlikely(system_state == SYSTEM_BOOTING)) {
2172 text_poke_early(addr, opcode, len);
2176 text_poke_loc_init(&tp, addr, opcode, len, emulate);
2177 text_poke_bp_batch(&tp, 1);