Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
35de5b06 AL |
2 | #ifndef _ASM_X86_TEXT_PATCHING_H |
3 | #define _ASM_X86_TEXT_PATCHING_H | |
4 | ||
5 | #include <linux/types.h> | |
6 | #include <linux/stddef.h> | |
7 | #include <asm/ptrace.h> | |
8 | ||
c0213b0a DBO |
9 | /* |
10 | * Currently, the max observed size in the kernel code is | |
11 | * JUMP_LABEL_NOP_SIZE/RELATIVEJUMP_SIZE, which are 5. | |
12 | * Raise it if needed. | |
13 | */ | |
14 | #define POKE_MAX_OPCODE_SIZE 5 | |
15 | ||
0a203df5 | 16 | extern void text_poke_early(void *addr, const void *opcode, size_t len); |
35de5b06 | 17 | |
03c11eb3 IM |
18 | extern void apply_relocation(u8 *buf, size_t len, u8 *dest, u8 *src, size_t src_len); |
19 | ||
35de5b06 AL |
20 | /* |
21 | * Clear and restore the kernel write-protection flag on the local CPU. | |
22 | * Allows the kernel to edit read-only pages. | |
23 | * Side-effect: any interrupt handler running between save and restore will have | |
24 | * the ability to write to read-only pages. | |
25 | * | |
26 | * Warning: | |
27 | * Code patching in the UP case is safe if NMIs and MCE handlers are stopped and | |
28 | * no thread can be preempted in the instructions being modified (no iret to an | |
29 | * invalid instruction possible) or if the instructions are changed from a | |
30 | * consistent state to another consistent state atomically. | |
32b1cbe3 MA |
31 | * On the local CPU you need to be protected against NMI or MCE handlers seeing |
32 | * an inconsistent instruction while you patch. | |
35de5b06 AL |
33 | */ |
34 | extern void *text_poke(void *addr, const void *opcode, size_t len); | |
5c02ece8 | 35 | extern void text_poke_sync(void); |
e836673c | 36 | extern void *text_poke_kgdb(void *addr, const void *opcode, size_t len); |
0e06b403 | 37 | extern void *text_poke_copy(void *addr, const void *opcode, size_t len); |
fe54d079 | 38 | extern void *text_poke_copy_locked(void *addr, const void *opcode, size_t len, bool core_ok); |
aadd1b67 | 39 | extern void *text_poke_set(void *addr, int c, size_t len); |
35de5b06 | 40 | extern int poke_int3_handler(struct pt_regs *regs); |
c3d6324f | 41 | extern void text_poke_bp(void *addr, const void *opcode, size_t len, const void *emulate); |
18cbc8be PZ |
42 | |
43 | extern void text_poke_queue(void *addr, const void *opcode, size_t len, const void *emulate); | |
44 | extern void text_poke_finish(void); | |
45 | ||
c3d6324f PZ |
46 | #define INT3_INSN_SIZE 1 |
47 | #define INT3_INSN_OPCODE 0xCC | |
48 | ||
c43a43e4 PZ |
49 | #define RET_INSN_SIZE 1 |
50 | #define RET_INSN_OPCODE 0xC3 | |
51 | ||
c3d6324f PZ |
52 | #define CALL_INSN_SIZE 5 |
53 | #define CALL_INSN_OPCODE 0xE8 | |
54 | ||
55 | #define JMP32_INSN_SIZE 5 | |
56 | #define JMP32_INSN_OPCODE 0xE9 | |
57 | ||
58 | #define JMP8_INSN_SIZE 2 | |
59 | #define JMP8_INSN_OPCODE 0xEB | |
4b33dadf | 60 | |
ab09e95c PZ |
61 | #define DISP32_SIZE 4 |
62 | ||
4979fb53 | 63 | static __always_inline int text_opcode_size(u8 opcode) |
254d2c04 PZ |
64 | { |
65 | int size = 0; | |
66 | ||
67 | #define __CASE(insn) \ | |
68 | case insn##_INSN_OPCODE: size = insn##_INSN_SIZE; break | |
69 | ||
70 | switch(opcode) { | |
71 | __CASE(INT3); | |
c43a43e4 | 72 | __CASE(RET); |
254d2c04 PZ |
73 | __CASE(CALL); |
74 | __CASE(JMP32); | |
75 | __CASE(JMP8); | |
76 | } | |
77 | ||
78 | #undef __CASE | |
79 | ||
80 | return size; | |
81 | } | |
82 | ||
67c1d4a2 PZ |
83 | union text_poke_insn { |
84 | u8 text[POKE_MAX_OPCODE_SIZE]; | |
85 | struct { | |
86 | u8 opcode; | |
87 | s32 disp; | |
88 | } __attribute__((packed)); | |
89 | }; | |
90 | ||
91 | static __always_inline | |
ba27d1a8 | 92 | void __text_gen_insn(void *buf, u8 opcode, const void *addr, const void *dest, int size) |
67c1d4a2 | 93 | { |
ba27d1a8 PZ |
94 | union text_poke_insn *insn = buf; |
95 | ||
96 | BUG_ON(size < text_opcode_size(opcode)); | |
67c1d4a2 | 97 | |
bbf92368 PZ |
98 | /* |
99 | * Hide the addresses to avoid the compiler folding in constants when | |
100 | * referencing code, these can mess up annotations like | |
101 | * ANNOTATE_NOENDBR. | |
102 | */ | |
ba27d1a8 | 103 | OPTIMIZER_HIDE_VAR(insn); |
bbf92368 PZ |
104 | OPTIMIZER_HIDE_VAR(addr); |
105 | OPTIMIZER_HIDE_VAR(dest); | |
106 | ||
ba27d1a8 | 107 | insn->opcode = opcode; |
67c1d4a2 PZ |
108 | |
109 | if (size > 1) { | |
ba27d1a8 | 110 | insn->disp = (long)dest - (long)(addr + size); |
67c1d4a2 PZ |
111 | if (size == 2) { |
112 | /* | |
bbf92368 | 113 | * Ensure that for JMP8 the displacement |
67c1d4a2 PZ |
114 | * actually fits the signed byte. |
115 | */ | |
ba27d1a8 | 116 | BUG_ON((insn->disp >> 31) != (insn->disp >> 7)); |
67c1d4a2 PZ |
117 | } |
118 | } | |
ba27d1a8 | 119 | } |
67c1d4a2 | 120 | |
ba27d1a8 PZ |
121 | static __always_inline |
122 | void *text_gen_insn(u8 opcode, const void *addr, const void *dest) | |
123 | { | |
124 | static union text_poke_insn insn; /* per instance */ | |
125 | __text_gen_insn(&insn, opcode, addr, dest, text_opcode_size(opcode)); | |
67c1d4a2 PZ |
126 | return &insn.text; |
127 | } | |
254d2c04 PZ |
128 | |
129 | extern int after_bootmem; | |
130 | extern __ro_after_init struct mm_struct *poking_mm; | |
131 | extern __ro_after_init unsigned long poking_addr; | |
132 | ||
133 | #ifndef CONFIG_UML_X86 | |
4979fb53 TG |
134 | static __always_inline |
135 | void int3_emulate_jmp(struct pt_regs *regs, unsigned long ip) | |
254d2c04 PZ |
136 | { |
137 | regs->ip = ip; | |
138 | } | |
139 | ||
4979fb53 TG |
140 | static __always_inline |
141 | void int3_emulate_push(struct pt_regs *regs, unsigned long val) | |
4b33dadf PZ |
142 | { |
143 | /* | |
144 | * The int3 handler in entry_64.S adds a gap between the | |
145 | * stack where the break point happened, and the saving of | |
146 | * pt_regs. We can extend the original stack because of | |
147 | * this gap. See the idtentry macro's create_gap option. | |
8f4a4160 PZ |
148 | * |
149 | * Similarly entry_32.S will have a gap on the stack for (any) hardware | |
150 | * exception and pt_regs; see FIXUP_FRAME. | |
4b33dadf PZ |
151 | */ |
152 | regs->sp -= sizeof(unsigned long); | |
153 | *(unsigned long *)regs->sp = val; | |
154 | } | |
155 | ||
c43a43e4 PZ |
156 | static __always_inline |
157 | unsigned long int3_emulate_pop(struct pt_regs *regs) | |
158 | { | |
159 | unsigned long val = *(unsigned long *)regs->sp; | |
160 | regs->sp += sizeof(unsigned long); | |
161 | return val; | |
162 | } | |
163 | ||
4979fb53 TG |
164 | static __always_inline |
165 | void int3_emulate_call(struct pt_regs *regs, unsigned long func) | |
4b33dadf PZ |
166 | { |
167 | int3_emulate_push(regs, regs->ip - INT3_INSN_SIZE + CALL_INSN_SIZE); | |
168 | int3_emulate_jmp(regs, func); | |
169 | } | |
c43a43e4 PZ |
170 | |
171 | static __always_inline | |
172 | void int3_emulate_ret(struct pt_regs *regs) | |
173 | { | |
174 | unsigned long ip = int3_emulate_pop(regs); | |
175 | int3_emulate_jmp(regs, ip); | |
176 | } | |
db7adcfd PZ |
177 | |
178 | static __always_inline | |
179 | void int3_emulate_jcc(struct pt_regs *regs, u8 cc, unsigned long ip, unsigned long disp) | |
180 | { | |
181 | static const unsigned long jcc_mask[6] = { | |
182 | [0] = X86_EFLAGS_OF, | |
183 | [1] = X86_EFLAGS_CF, | |
184 | [2] = X86_EFLAGS_ZF, | |
185 | [3] = X86_EFLAGS_CF | X86_EFLAGS_ZF, | |
186 | [4] = X86_EFLAGS_SF, | |
187 | [5] = X86_EFLAGS_PF, | |
188 | }; | |
189 | ||
190 | bool invert = cc & 1; | |
191 | bool match; | |
192 | ||
193 | if (cc < 0xc) { | |
194 | match = regs->flags & jcc_mask[cc >> 1]; | |
195 | } else { | |
196 | match = ((regs->flags & X86_EFLAGS_SF) >> X86_EFLAGS_SF_BIT) ^ | |
197 | ((regs->flags & X86_EFLAGS_OF) >> X86_EFLAGS_OF_BIT); | |
198 | if (cc >= 0xe) | |
199 | match = match || (regs->flags & X86_EFLAGS_ZF); | |
200 | } | |
201 | ||
202 | if ((match && !invert) || (!match && invert)) | |
203 | ip += disp; | |
204 | ||
205 | int3_emulate_jmp(regs, ip); | |
206 | } | |
207 | ||
693713cb | 208 | #endif /* !CONFIG_UML_X86 */ |
4b33dadf | 209 | |
35de5b06 | 210 | #endif /* _ASM_X86_TEXT_PATCHING_H */ |