| 1 | // SPDX-License-Identifier: GPL-2.0-only |
| 2 | /* |
| 3 | * BPF JIT compiler |
| 4 | * |
| 5 | * Copyright (C) 2011-2013 Eric Dumazet (eric.dumazet@gmail.com) |
| 6 | * Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com |
| 7 | */ |
| 8 | #include <linux/netdevice.h> |
| 9 | #include <linux/filter.h> |
| 10 | #include <linux/if_vlan.h> |
| 11 | #include <linux/bpf.h> |
| 12 | #include <linux/memory.h> |
| 13 | #include <linux/sort.h> |
| 14 | #include <asm/extable.h> |
| 15 | #include <asm/ftrace.h> |
| 16 | #include <asm/set_memory.h> |
| 17 | #include <asm/nospec-branch.h> |
| 18 | #include <asm/text-patching.h> |
| 19 | #include <asm/unwind.h> |
| 20 | #include <asm/cfi.h> |
| 21 | |
| 22 | static bool all_callee_regs_used[4] = {true, true, true, true}; |
| 23 | |
| 24 | static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len) |
| 25 | { |
| 26 | if (len == 1) |
| 27 | *ptr = bytes; |
| 28 | else if (len == 2) |
| 29 | *(u16 *)ptr = bytes; |
| 30 | else { |
| 31 | *(u32 *)ptr = bytes; |
| 32 | barrier(); |
| 33 | } |
| 34 | return ptr + len; |
| 35 | } |
| 36 | |
| 37 | #define EMIT(bytes, len) \ |
| 38 | do { prog = emit_code(prog, bytes, len); } while (0) |
| 39 | |
| 40 | #define EMIT1(b1) EMIT(b1, 1) |
| 41 | #define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2) |
| 42 | #define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3) |
| 43 | #define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4) |
| 44 | #define EMIT5(b1, b2, b3, b4, b5) \ |
| 45 | do { EMIT1(b1); EMIT4(b2, b3, b4, b5); } while (0) |
| 46 | |
| 47 | #define EMIT1_off32(b1, off) \ |
| 48 | do { EMIT1(b1); EMIT(off, 4); } while (0) |
| 49 | #define EMIT2_off32(b1, b2, off) \ |
| 50 | do { EMIT2(b1, b2); EMIT(off, 4); } while (0) |
| 51 | #define EMIT3_off32(b1, b2, b3, off) \ |
| 52 | do { EMIT3(b1, b2, b3); EMIT(off, 4); } while (0) |
| 53 | #define EMIT4_off32(b1, b2, b3, b4, off) \ |
| 54 | do { EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0) |
| 55 | |
| 56 | #ifdef CONFIG_X86_KERNEL_IBT |
| 57 | #define EMIT_ENDBR() EMIT(gen_endbr(), 4) |
| 58 | #define EMIT_ENDBR_POISON() EMIT(gen_endbr_poison(), 4) |
| 59 | #else |
| 60 | #define EMIT_ENDBR() |
| 61 | #define EMIT_ENDBR_POISON() |
| 62 | #endif |
| 63 | |
| 64 | static bool is_imm8(int value) |
| 65 | { |
| 66 | return value <= 127 && value >= -128; |
| 67 | } |
| 68 | |
| 69 | /* |
| 70 | * Let us limit the positive offset to be <= 123. |
| 71 | * This is to ensure eventual jit convergence For the following patterns: |
| 72 | * ... |
| 73 | * pass4, final_proglen=4391: |
| 74 | * ... |
| 75 | * 20e: 48 85 ff test rdi,rdi |
| 76 | * 211: 74 7d je 0x290 |
| 77 | * 213: 48 8b 77 00 mov rsi,QWORD PTR [rdi+0x0] |
| 78 | * ... |
| 79 | * 289: 48 85 ff test rdi,rdi |
| 80 | * 28c: 74 17 je 0x2a5 |
| 81 | * 28e: e9 7f ff ff ff jmp 0x212 |
| 82 | * 293: bf 03 00 00 00 mov edi,0x3 |
| 83 | * Note that insn at 0x211 is 2-byte cond jump insn for offset 0x7d (-125) |
| 84 | * and insn at 0x28e is 5-byte jmp insn with offset -129. |
| 85 | * |
| 86 | * pass5, final_proglen=4392: |
| 87 | * ... |
| 88 | * 20e: 48 85 ff test rdi,rdi |
| 89 | * 211: 0f 84 80 00 00 00 je 0x297 |
| 90 | * 217: 48 8b 77 00 mov rsi,QWORD PTR [rdi+0x0] |
| 91 | * ... |
| 92 | * 28d: 48 85 ff test rdi,rdi |
| 93 | * 290: 74 1a je 0x2ac |
| 94 | * 292: eb 84 jmp 0x218 |
| 95 | * 294: bf 03 00 00 00 mov edi,0x3 |
| 96 | * Note that insn at 0x211 is 6-byte cond jump insn now since its offset |
| 97 | * becomes 0x80 based on previous round (0x293 - 0x213 = 0x80). |
| 98 | * At the same time, insn at 0x292 is a 2-byte insn since its offset is |
| 99 | * -124. |
| 100 | * |
| 101 | * pass6 will repeat the same code as in pass4 and this will prevent |
| 102 | * eventual convergence. |
| 103 | * |
| 104 | * To fix this issue, we need to break je (2->6 bytes) <-> jmp (5->2 bytes) |
| 105 | * cycle in the above. In the above example je offset <= 0x7c should work. |
| 106 | * |
| 107 | * For other cases, je <-> je needs offset <= 0x7b to avoid no convergence |
| 108 | * issue. For jmp <-> je and jmp <-> jmp cases, jmp offset <= 0x7c should |
| 109 | * avoid no convergence issue. |
| 110 | * |
| 111 | * Overall, let us limit the positive offset for 8bit cond/uncond jmp insn |
| 112 | * to maximum 123 (0x7b). This way, the jit pass can eventually converge. |
| 113 | */ |
| 114 | static bool is_imm8_jmp_offset(int value) |
| 115 | { |
| 116 | return value <= 123 && value >= -128; |
| 117 | } |
| 118 | |
| 119 | static bool is_simm32(s64 value) |
| 120 | { |
| 121 | return value == (s64)(s32)value; |
| 122 | } |
| 123 | |
| 124 | static bool is_uimm32(u64 value) |
| 125 | { |
| 126 | return value == (u64)(u32)value; |
| 127 | } |
| 128 | |
| 129 | /* mov dst, src */ |
| 130 | #define EMIT_mov(DST, SRC) \ |
| 131 | do { \ |
| 132 | if (DST != SRC) \ |
| 133 | EMIT3(add_2mod(0x48, DST, SRC), 0x89, add_2reg(0xC0, DST, SRC)); \ |
| 134 | } while (0) |
| 135 | |
| 136 | static int bpf_size_to_x86_bytes(int bpf_size) |
| 137 | { |
| 138 | if (bpf_size == BPF_W) |
| 139 | return 4; |
| 140 | else if (bpf_size == BPF_H) |
| 141 | return 2; |
| 142 | else if (bpf_size == BPF_B) |
| 143 | return 1; |
| 144 | else if (bpf_size == BPF_DW) |
| 145 | return 4; /* imm32 */ |
| 146 | else |
| 147 | return 0; |
| 148 | } |
| 149 | |
| 150 | /* |
| 151 | * List of x86 cond jumps opcodes (. + s8) |
| 152 | * Add 0x10 (and an extra 0x0f) to generate far jumps (. + s32) |
| 153 | */ |
| 154 | #define X86_JB 0x72 |
| 155 | #define X86_JAE 0x73 |
| 156 | #define X86_JE 0x74 |
| 157 | #define X86_JNE 0x75 |
| 158 | #define X86_JBE 0x76 |
| 159 | #define X86_JA 0x77 |
| 160 | #define X86_JL 0x7C |
| 161 | #define X86_JGE 0x7D |
| 162 | #define X86_JLE 0x7E |
| 163 | #define X86_JG 0x7F |
| 164 | |
| 165 | /* Pick a register outside of BPF range for JIT internal work */ |
| 166 | #define AUX_REG (MAX_BPF_JIT_REG + 1) |
| 167 | #define X86_REG_R9 (MAX_BPF_JIT_REG + 2) |
| 168 | #define X86_REG_R12 (MAX_BPF_JIT_REG + 3) |
| 169 | |
| 170 | /* |
| 171 | * The following table maps BPF registers to x86-64 registers. |
| 172 | * |
| 173 | * x86-64 register R12 is unused, since if used as base address |
| 174 | * register in load/store instructions, it always needs an |
| 175 | * extra byte of encoding and is callee saved. |
| 176 | * |
| 177 | * x86-64 register R9 is not used by BPF programs, but can be used by BPF |
| 178 | * trampoline. x86-64 register R10 is used for blinding (if enabled). |
| 179 | */ |
| 180 | static const int reg2hex[] = { |
| 181 | [BPF_REG_0] = 0, /* RAX */ |
| 182 | [BPF_REG_1] = 7, /* RDI */ |
| 183 | [BPF_REG_2] = 6, /* RSI */ |
| 184 | [BPF_REG_3] = 2, /* RDX */ |
| 185 | [BPF_REG_4] = 1, /* RCX */ |
| 186 | [BPF_REG_5] = 0, /* R8 */ |
| 187 | [BPF_REG_6] = 3, /* RBX callee saved */ |
| 188 | [BPF_REG_7] = 5, /* R13 callee saved */ |
| 189 | [BPF_REG_8] = 6, /* R14 callee saved */ |
| 190 | [BPF_REG_9] = 7, /* R15 callee saved */ |
| 191 | [BPF_REG_FP] = 5, /* RBP readonly */ |
| 192 | [BPF_REG_AX] = 2, /* R10 temp register */ |
| 193 | [AUX_REG] = 3, /* R11 temp register */ |
| 194 | [X86_REG_R9] = 1, /* R9 register, 6th function argument */ |
| 195 | [X86_REG_R12] = 4, /* R12 callee saved */ |
| 196 | }; |
| 197 | |
| 198 | static const int reg2pt_regs[] = { |
| 199 | [BPF_REG_0] = offsetof(struct pt_regs, ax), |
| 200 | [BPF_REG_1] = offsetof(struct pt_regs, di), |
| 201 | [BPF_REG_2] = offsetof(struct pt_regs, si), |
| 202 | [BPF_REG_3] = offsetof(struct pt_regs, dx), |
| 203 | [BPF_REG_4] = offsetof(struct pt_regs, cx), |
| 204 | [BPF_REG_5] = offsetof(struct pt_regs, r8), |
| 205 | [BPF_REG_6] = offsetof(struct pt_regs, bx), |
| 206 | [BPF_REG_7] = offsetof(struct pt_regs, r13), |
| 207 | [BPF_REG_8] = offsetof(struct pt_regs, r14), |
| 208 | [BPF_REG_9] = offsetof(struct pt_regs, r15), |
| 209 | }; |
| 210 | |
| 211 | /* |
| 212 | * is_ereg() == true if BPF register 'reg' maps to x86-64 r8..r15 |
| 213 | * which need extra byte of encoding. |
| 214 | * rax,rcx,...,rbp have simpler encoding |
| 215 | */ |
| 216 | static bool is_ereg(u32 reg) |
| 217 | { |
| 218 | return (1 << reg) & (BIT(BPF_REG_5) | |
| 219 | BIT(AUX_REG) | |
| 220 | BIT(BPF_REG_7) | |
| 221 | BIT(BPF_REG_8) | |
| 222 | BIT(BPF_REG_9) | |
| 223 | BIT(X86_REG_R9) | |
| 224 | BIT(X86_REG_R12) | |
| 225 | BIT(BPF_REG_AX)); |
| 226 | } |
| 227 | |
| 228 | /* |
| 229 | * is_ereg_8l() == true if BPF register 'reg' is mapped to access x86-64 |
| 230 | * lower 8-bit registers dil,sil,bpl,spl,r8b..r15b, which need extra byte |
| 231 | * of encoding. al,cl,dl,bl have simpler encoding. |
| 232 | */ |
| 233 | static bool is_ereg_8l(u32 reg) |
| 234 | { |
| 235 | return is_ereg(reg) || |
| 236 | (1 << reg) & (BIT(BPF_REG_1) | |
| 237 | BIT(BPF_REG_2) | |
| 238 | BIT(BPF_REG_FP)); |
| 239 | } |
| 240 | |
| 241 | static bool is_axreg(u32 reg) |
| 242 | { |
| 243 | return reg == BPF_REG_0; |
| 244 | } |
| 245 | |
| 246 | /* Add modifiers if 'reg' maps to x86-64 registers R8..R15 */ |
| 247 | static u8 add_1mod(u8 byte, u32 reg) |
| 248 | { |
| 249 | if (is_ereg(reg)) |
| 250 | byte |= 1; |
| 251 | return byte; |
| 252 | } |
| 253 | |
| 254 | static u8 add_2mod(u8 byte, u32 r1, u32 r2) |
| 255 | { |
| 256 | if (is_ereg(r1)) |
| 257 | byte |= 1; |
| 258 | if (is_ereg(r2)) |
| 259 | byte |= 4; |
| 260 | return byte; |
| 261 | } |
| 262 | |
| 263 | static u8 add_3mod(u8 byte, u32 r1, u32 r2, u32 index) |
| 264 | { |
| 265 | if (is_ereg(r1)) |
| 266 | byte |= 1; |
| 267 | if (is_ereg(index)) |
| 268 | byte |= 2; |
| 269 | if (is_ereg(r2)) |
| 270 | byte |= 4; |
| 271 | return byte; |
| 272 | } |
| 273 | |
| 274 | /* Encode 'dst_reg' register into x86-64 opcode 'byte' */ |
| 275 | static u8 add_1reg(u8 byte, u32 dst_reg) |
| 276 | { |
| 277 | return byte + reg2hex[dst_reg]; |
| 278 | } |
| 279 | |
| 280 | /* Encode 'dst_reg' and 'src_reg' registers into x86-64 opcode 'byte' */ |
| 281 | static u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg) |
| 282 | { |
| 283 | return byte + reg2hex[dst_reg] + (reg2hex[src_reg] << 3); |
| 284 | } |
| 285 | |
| 286 | /* Some 1-byte opcodes for binary ALU operations */ |
| 287 | static u8 simple_alu_opcodes[] = { |
| 288 | [BPF_ADD] = 0x01, |
| 289 | [BPF_SUB] = 0x29, |
| 290 | [BPF_AND] = 0x21, |
| 291 | [BPF_OR] = 0x09, |
| 292 | [BPF_XOR] = 0x31, |
| 293 | [BPF_LSH] = 0xE0, |
| 294 | [BPF_RSH] = 0xE8, |
| 295 | [BPF_ARSH] = 0xF8, |
| 296 | }; |
| 297 | |
| 298 | static void jit_fill_hole(void *area, unsigned int size) |
| 299 | { |
| 300 | /* Fill whole space with INT3 instructions */ |
| 301 | memset(area, 0xcc, size); |
| 302 | } |
| 303 | |
| 304 | int bpf_arch_text_invalidate(void *dst, size_t len) |
| 305 | { |
| 306 | return IS_ERR_OR_NULL(text_poke_set(dst, 0xcc, len)); |
| 307 | } |
| 308 | |
| 309 | struct jit_context { |
| 310 | int cleanup_addr; /* Epilogue code offset */ |
| 311 | |
| 312 | /* |
| 313 | * Program specific offsets of labels in the code; these rely on the |
| 314 | * JIT doing at least 2 passes, recording the position on the first |
| 315 | * pass, only to generate the correct offset on the second pass. |
| 316 | */ |
| 317 | int tail_call_direct_label; |
| 318 | int tail_call_indirect_label; |
| 319 | }; |
| 320 | |
| 321 | /* Maximum number of bytes emitted while JITing one eBPF insn */ |
| 322 | #define BPF_MAX_INSN_SIZE 128 |
| 323 | #define BPF_INSN_SAFETY 64 |
| 324 | |
| 325 | /* Number of bytes emit_patch() needs to generate instructions */ |
| 326 | #define X86_PATCH_SIZE 5 |
| 327 | /* Number of bytes that will be skipped on tailcall */ |
| 328 | #define X86_TAIL_CALL_OFFSET (12 + ENDBR_INSN_SIZE) |
| 329 | |
| 330 | static void push_r9(u8 **pprog) |
| 331 | { |
| 332 | u8 *prog = *pprog; |
| 333 | |
| 334 | EMIT2(0x41, 0x51); /* push r9 */ |
| 335 | *pprog = prog; |
| 336 | } |
| 337 | |
| 338 | static void pop_r9(u8 **pprog) |
| 339 | { |
| 340 | u8 *prog = *pprog; |
| 341 | |
| 342 | EMIT2(0x41, 0x59); /* pop r9 */ |
| 343 | *pprog = prog; |
| 344 | } |
| 345 | |
| 346 | static void push_r12(u8 **pprog) |
| 347 | { |
| 348 | u8 *prog = *pprog; |
| 349 | |
| 350 | EMIT2(0x41, 0x54); /* push r12 */ |
| 351 | *pprog = prog; |
| 352 | } |
| 353 | |
| 354 | static void push_callee_regs(u8 **pprog, bool *callee_regs_used) |
| 355 | { |
| 356 | u8 *prog = *pprog; |
| 357 | |
| 358 | if (callee_regs_used[0]) |
| 359 | EMIT1(0x53); /* push rbx */ |
| 360 | if (callee_regs_used[1]) |
| 361 | EMIT2(0x41, 0x55); /* push r13 */ |
| 362 | if (callee_regs_used[2]) |
| 363 | EMIT2(0x41, 0x56); /* push r14 */ |
| 364 | if (callee_regs_used[3]) |
| 365 | EMIT2(0x41, 0x57); /* push r15 */ |
| 366 | *pprog = prog; |
| 367 | } |
| 368 | |
| 369 | static void pop_r12(u8 **pprog) |
| 370 | { |
| 371 | u8 *prog = *pprog; |
| 372 | |
| 373 | EMIT2(0x41, 0x5C); /* pop r12 */ |
| 374 | *pprog = prog; |
| 375 | } |
| 376 | |
| 377 | static void pop_callee_regs(u8 **pprog, bool *callee_regs_used) |
| 378 | { |
| 379 | u8 *prog = *pprog; |
| 380 | |
| 381 | if (callee_regs_used[3]) |
| 382 | EMIT2(0x41, 0x5F); /* pop r15 */ |
| 383 | if (callee_regs_used[2]) |
| 384 | EMIT2(0x41, 0x5E); /* pop r14 */ |
| 385 | if (callee_regs_used[1]) |
| 386 | EMIT2(0x41, 0x5D); /* pop r13 */ |
| 387 | if (callee_regs_used[0]) |
| 388 | EMIT1(0x5B); /* pop rbx */ |
| 389 | *pprog = prog; |
| 390 | } |
| 391 | |
| 392 | static void emit_nops(u8 **pprog, int len) |
| 393 | { |
| 394 | u8 *prog = *pprog; |
| 395 | int i, noplen; |
| 396 | |
| 397 | while (len > 0) { |
| 398 | noplen = len; |
| 399 | |
| 400 | if (noplen > ASM_NOP_MAX) |
| 401 | noplen = ASM_NOP_MAX; |
| 402 | |
| 403 | for (i = 0; i < noplen; i++) |
| 404 | EMIT1(x86_nops[noplen][i]); |
| 405 | len -= noplen; |
| 406 | } |
| 407 | |
| 408 | *pprog = prog; |
| 409 | } |
| 410 | |
| 411 | /* |
| 412 | * Emit the various CFI preambles, see asm/cfi.h and the comments about FineIBT |
| 413 | * in arch/x86/kernel/alternative.c |
| 414 | */ |
| 415 | static int emit_call(u8 **prog, void *func, void *ip); |
| 416 | |
| 417 | static void emit_fineibt(u8 **pprog, u8 *ip, u32 hash, int arity) |
| 418 | { |
| 419 | u8 *prog = *pprog; |
| 420 | |
| 421 | EMIT_ENDBR(); |
| 422 | EMIT3_off32(0x41, 0x81, 0xea, hash); /* subl $hash, %r10d */ |
| 423 | if (cfi_bhi) { |
| 424 | emit_call(&prog, __bhi_args[arity], ip + 11); |
| 425 | } else { |
| 426 | EMIT2(0x75, 0xf9); /* jne.d8 .-7 */ |
| 427 | EMIT3(0x0f, 0x1f, 0x00); /* nop3 */ |
| 428 | } |
| 429 | EMIT_ENDBR_POISON(); |
| 430 | |
| 431 | *pprog = prog; |
| 432 | } |
| 433 | |
| 434 | static void emit_kcfi(u8 **pprog, u32 hash) |
| 435 | { |
| 436 | u8 *prog = *pprog; |
| 437 | |
| 438 | EMIT1_off32(0xb8, hash); /* movl $hash, %eax */ |
| 439 | #ifdef CONFIG_CALL_PADDING |
| 440 | EMIT1(0x90); |
| 441 | EMIT1(0x90); |
| 442 | EMIT1(0x90); |
| 443 | EMIT1(0x90); |
| 444 | EMIT1(0x90); |
| 445 | EMIT1(0x90); |
| 446 | EMIT1(0x90); |
| 447 | EMIT1(0x90); |
| 448 | EMIT1(0x90); |
| 449 | EMIT1(0x90); |
| 450 | EMIT1(0x90); |
| 451 | #endif |
| 452 | EMIT_ENDBR(); |
| 453 | |
| 454 | *pprog = prog; |
| 455 | } |
| 456 | |
| 457 | static void emit_cfi(u8 **pprog, u8 *ip, u32 hash, int arity) |
| 458 | { |
| 459 | u8 *prog = *pprog; |
| 460 | |
| 461 | switch (cfi_mode) { |
| 462 | case CFI_FINEIBT: |
| 463 | emit_fineibt(&prog, ip, hash, arity); |
| 464 | break; |
| 465 | |
| 466 | case CFI_KCFI: |
| 467 | emit_kcfi(&prog, hash); |
| 468 | break; |
| 469 | |
| 470 | default: |
| 471 | EMIT_ENDBR(); |
| 472 | break; |
| 473 | } |
| 474 | |
| 475 | *pprog = prog; |
| 476 | } |
| 477 | |
| 478 | static void emit_prologue_tail_call(u8 **pprog, bool is_subprog) |
| 479 | { |
| 480 | u8 *prog = *pprog; |
| 481 | |
| 482 | if (!is_subprog) { |
| 483 | /* cmp rax, MAX_TAIL_CALL_CNT */ |
| 484 | EMIT4(0x48, 0x83, 0xF8, MAX_TAIL_CALL_CNT); |
| 485 | EMIT2(X86_JA, 6); /* ja 6 */ |
| 486 | /* rax is tail_call_cnt if <= MAX_TAIL_CALL_CNT. |
| 487 | * case1: entry of main prog. |
| 488 | * case2: tail callee of main prog. |
| 489 | */ |
| 490 | EMIT1(0x50); /* push rax */ |
| 491 | /* Make rax as tail_call_cnt_ptr. */ |
| 492 | EMIT3(0x48, 0x89, 0xE0); /* mov rax, rsp */ |
| 493 | EMIT2(0xEB, 1); /* jmp 1 */ |
| 494 | /* rax is tail_call_cnt_ptr if > MAX_TAIL_CALL_CNT. |
| 495 | * case: tail callee of subprog. |
| 496 | */ |
| 497 | EMIT1(0x50); /* push rax */ |
| 498 | /* push tail_call_cnt_ptr */ |
| 499 | EMIT1(0x50); /* push rax */ |
| 500 | } else { /* is_subprog */ |
| 501 | /* rax is tail_call_cnt_ptr. */ |
| 502 | EMIT1(0x50); /* push rax */ |
| 503 | EMIT1(0x50); /* push rax */ |
| 504 | } |
| 505 | |
| 506 | *pprog = prog; |
| 507 | } |
| 508 | |
| 509 | /* |
| 510 | * Emit x86-64 prologue code for BPF program. |
| 511 | * bpf_tail_call helper will skip the first X86_TAIL_CALL_OFFSET bytes |
| 512 | * while jumping to another program |
| 513 | */ |
| 514 | static void emit_prologue(u8 **pprog, u8 *ip, u32 stack_depth, bool ebpf_from_cbpf, |
| 515 | bool tail_call_reachable, bool is_subprog, |
| 516 | bool is_exception_cb) |
| 517 | { |
| 518 | u8 *prog = *pprog; |
| 519 | |
| 520 | if (is_subprog) { |
| 521 | emit_cfi(&prog, ip, cfi_bpf_subprog_hash, 5); |
| 522 | } else { |
| 523 | emit_cfi(&prog, ip, cfi_bpf_hash, 1); |
| 524 | } |
| 525 | /* BPF trampoline can be made to work without these nops, |
| 526 | * but let's waste 5 bytes for now and optimize later |
| 527 | */ |
| 528 | emit_nops(&prog, X86_PATCH_SIZE); |
| 529 | if (!ebpf_from_cbpf) { |
| 530 | if (tail_call_reachable && !is_subprog) |
| 531 | /* When it's the entry of the whole tailcall context, |
| 532 | * zeroing rax means initialising tail_call_cnt. |
| 533 | */ |
| 534 | EMIT3(0x48, 0x31, 0xC0); /* xor rax, rax */ |
| 535 | else |
| 536 | /* Keep the same instruction layout. */ |
| 537 | emit_nops(&prog, 3); /* nop3 */ |
| 538 | } |
| 539 | /* Exception callback receives FP as third parameter */ |
| 540 | if (is_exception_cb) { |
| 541 | EMIT3(0x48, 0x89, 0xF4); /* mov rsp, rsi */ |
| 542 | EMIT3(0x48, 0x89, 0xD5); /* mov rbp, rdx */ |
| 543 | /* The main frame must have exception_boundary as true, so we |
| 544 | * first restore those callee-saved regs from stack, before |
| 545 | * reusing the stack frame. |
| 546 | */ |
| 547 | pop_callee_regs(&prog, all_callee_regs_used); |
| 548 | pop_r12(&prog); |
| 549 | /* Reset the stack frame. */ |
| 550 | EMIT3(0x48, 0x89, 0xEC); /* mov rsp, rbp */ |
| 551 | } else { |
| 552 | EMIT1(0x55); /* push rbp */ |
| 553 | EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */ |
| 554 | } |
| 555 | |
| 556 | /* X86_TAIL_CALL_OFFSET is here */ |
| 557 | EMIT_ENDBR(); |
| 558 | |
| 559 | /* sub rsp, rounded_stack_depth */ |
| 560 | if (stack_depth) |
| 561 | EMIT3_off32(0x48, 0x81, 0xEC, round_up(stack_depth, 8)); |
| 562 | if (tail_call_reachable) |
| 563 | emit_prologue_tail_call(&prog, is_subprog); |
| 564 | *pprog = prog; |
| 565 | } |
| 566 | |
| 567 | static int emit_patch(u8 **pprog, void *func, void *ip, u8 opcode) |
| 568 | { |
| 569 | u8 *prog = *pprog; |
| 570 | s64 offset; |
| 571 | |
| 572 | offset = func - (ip + X86_PATCH_SIZE); |
| 573 | if (!is_simm32(offset)) { |
| 574 | pr_err("Target call %p is out of range\n", func); |
| 575 | return -ERANGE; |
| 576 | } |
| 577 | EMIT1_off32(opcode, offset); |
| 578 | *pprog = prog; |
| 579 | return 0; |
| 580 | } |
| 581 | |
| 582 | static int emit_call(u8 **pprog, void *func, void *ip) |
| 583 | { |
| 584 | return emit_patch(pprog, func, ip, 0xE8); |
| 585 | } |
| 586 | |
| 587 | static int emit_rsb_call(u8 **pprog, void *func, void *ip) |
| 588 | { |
| 589 | OPTIMIZER_HIDE_VAR(func); |
| 590 | ip += x86_call_depth_emit_accounting(pprog, func, ip); |
| 591 | return emit_patch(pprog, func, ip, 0xE8); |
| 592 | } |
| 593 | |
| 594 | static int emit_jump(u8 **pprog, void *func, void *ip) |
| 595 | { |
| 596 | return emit_patch(pprog, func, ip, 0xE9); |
| 597 | } |
| 598 | |
| 599 | static int __bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t, |
| 600 | void *old_addr, void *new_addr) |
| 601 | { |
| 602 | const u8 *nop_insn = x86_nops[5]; |
| 603 | u8 old_insn[X86_PATCH_SIZE]; |
| 604 | u8 new_insn[X86_PATCH_SIZE]; |
| 605 | u8 *prog; |
| 606 | int ret; |
| 607 | |
| 608 | memcpy(old_insn, nop_insn, X86_PATCH_SIZE); |
| 609 | if (old_addr) { |
| 610 | prog = old_insn; |
| 611 | ret = t == BPF_MOD_CALL ? |
| 612 | emit_call(&prog, old_addr, ip) : |
| 613 | emit_jump(&prog, old_addr, ip); |
| 614 | if (ret) |
| 615 | return ret; |
| 616 | } |
| 617 | |
| 618 | memcpy(new_insn, nop_insn, X86_PATCH_SIZE); |
| 619 | if (new_addr) { |
| 620 | prog = new_insn; |
| 621 | ret = t == BPF_MOD_CALL ? |
| 622 | emit_call(&prog, new_addr, ip) : |
| 623 | emit_jump(&prog, new_addr, ip); |
| 624 | if (ret) |
| 625 | return ret; |
| 626 | } |
| 627 | |
| 628 | ret = -EBUSY; |
| 629 | mutex_lock(&text_mutex); |
| 630 | if (memcmp(ip, old_insn, X86_PATCH_SIZE)) |
| 631 | goto out; |
| 632 | ret = 1; |
| 633 | if (memcmp(ip, new_insn, X86_PATCH_SIZE)) { |
| 634 | smp_text_poke_single(ip, new_insn, X86_PATCH_SIZE, NULL); |
| 635 | ret = 0; |
| 636 | } |
| 637 | out: |
| 638 | mutex_unlock(&text_mutex); |
| 639 | return ret; |
| 640 | } |
| 641 | |
| 642 | int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t, |
| 643 | void *old_addr, void *new_addr) |
| 644 | { |
| 645 | if (!is_kernel_text((long)ip) && |
| 646 | !is_bpf_text_address((long)ip)) |
| 647 | /* BPF poking in modules is not supported */ |
| 648 | return -EINVAL; |
| 649 | |
| 650 | /* |
| 651 | * See emit_prologue(), for IBT builds the trampoline hook is preceded |
| 652 | * with an ENDBR instruction. |
| 653 | */ |
| 654 | if (is_endbr(ip)) |
| 655 | ip += ENDBR_INSN_SIZE; |
| 656 | |
| 657 | return __bpf_arch_text_poke(ip, t, old_addr, new_addr); |
| 658 | } |
| 659 | |
| 660 | #define EMIT_LFENCE() EMIT3(0x0F, 0xAE, 0xE8) |
| 661 | |
| 662 | static void emit_indirect_jump(u8 **pprog, int reg, u8 *ip) |
| 663 | { |
| 664 | u8 *prog = *pprog; |
| 665 | |
| 666 | if (cpu_feature_enabled(X86_FEATURE_INDIRECT_THUNK_ITS)) { |
| 667 | OPTIMIZER_HIDE_VAR(reg); |
| 668 | emit_jump(&prog, its_static_thunk(reg), ip); |
| 669 | } else if (cpu_feature_enabled(X86_FEATURE_RETPOLINE_LFENCE)) { |
| 670 | EMIT_LFENCE(); |
| 671 | EMIT2(0xFF, 0xE0 + reg); |
| 672 | } else if (cpu_feature_enabled(X86_FEATURE_RETPOLINE)) { |
| 673 | OPTIMIZER_HIDE_VAR(reg); |
| 674 | if (cpu_feature_enabled(X86_FEATURE_CALL_DEPTH)) |
| 675 | emit_jump(&prog, &__x86_indirect_jump_thunk_array[reg], ip); |
| 676 | else |
| 677 | emit_jump(&prog, &__x86_indirect_thunk_array[reg], ip); |
| 678 | } else { |
| 679 | EMIT2(0xFF, 0xE0 + reg); /* jmp *%\reg */ |
| 680 | if (IS_ENABLED(CONFIG_MITIGATION_RETPOLINE) || IS_ENABLED(CONFIG_MITIGATION_SLS)) |
| 681 | EMIT1(0xCC); /* int3 */ |
| 682 | } |
| 683 | |
| 684 | *pprog = prog; |
| 685 | } |
| 686 | |
| 687 | static void emit_return(u8 **pprog, u8 *ip) |
| 688 | { |
| 689 | u8 *prog = *pprog; |
| 690 | |
| 691 | if (cpu_wants_rethunk()) { |
| 692 | emit_jump(&prog, x86_return_thunk, ip); |
| 693 | } else { |
| 694 | EMIT1(0xC3); /* ret */ |
| 695 | if (IS_ENABLED(CONFIG_MITIGATION_SLS)) |
| 696 | EMIT1(0xCC); /* int3 */ |
| 697 | } |
| 698 | |
| 699 | *pprog = prog; |
| 700 | } |
| 701 | |
| 702 | #define BPF_TAIL_CALL_CNT_PTR_STACK_OFF(stack) (-16 - round_up(stack, 8)) |
| 703 | |
| 704 | /* |
| 705 | * Generate the following code: |
| 706 | * |
| 707 | * ... bpf_tail_call(void *ctx, struct bpf_array *array, u64 index) ... |
| 708 | * if (index >= array->map.max_entries) |
| 709 | * goto out; |
| 710 | * if ((*tcc_ptr)++ >= MAX_TAIL_CALL_CNT) |
| 711 | * goto out; |
| 712 | * prog = array->ptrs[index]; |
| 713 | * if (prog == NULL) |
| 714 | * goto out; |
| 715 | * goto *(prog->bpf_func + prologue_size); |
| 716 | * out: |
| 717 | */ |
| 718 | static void emit_bpf_tail_call_indirect(struct bpf_prog *bpf_prog, |
| 719 | u8 **pprog, bool *callee_regs_used, |
| 720 | u32 stack_depth, u8 *ip, |
| 721 | struct jit_context *ctx) |
| 722 | { |
| 723 | int tcc_ptr_off = BPF_TAIL_CALL_CNT_PTR_STACK_OFF(stack_depth); |
| 724 | u8 *prog = *pprog, *start = *pprog; |
| 725 | int offset; |
| 726 | |
| 727 | /* |
| 728 | * rdi - pointer to ctx |
| 729 | * rsi - pointer to bpf_array |
| 730 | * rdx - index in bpf_array |
| 731 | */ |
| 732 | |
| 733 | /* |
| 734 | * if (index >= array->map.max_entries) |
| 735 | * goto out; |
| 736 | */ |
| 737 | EMIT2(0x89, 0xD2); /* mov edx, edx */ |
| 738 | EMIT3(0x39, 0x56, /* cmp dword ptr [rsi + 16], edx */ |
| 739 | offsetof(struct bpf_array, map.max_entries)); |
| 740 | |
| 741 | offset = ctx->tail_call_indirect_label - (prog + 2 - start); |
| 742 | EMIT2(X86_JBE, offset); /* jbe out */ |
| 743 | |
| 744 | /* |
| 745 | * if ((*tcc_ptr)++ >= MAX_TAIL_CALL_CNT) |
| 746 | * goto out; |
| 747 | */ |
| 748 | EMIT3_off32(0x48, 0x8B, 0x85, tcc_ptr_off); /* mov rax, qword ptr [rbp - tcc_ptr_off] */ |
| 749 | EMIT4(0x48, 0x83, 0x38, MAX_TAIL_CALL_CNT); /* cmp qword ptr [rax], MAX_TAIL_CALL_CNT */ |
| 750 | |
| 751 | offset = ctx->tail_call_indirect_label - (prog + 2 - start); |
| 752 | EMIT2(X86_JAE, offset); /* jae out */ |
| 753 | |
| 754 | /* prog = array->ptrs[index]; */ |
| 755 | EMIT4_off32(0x48, 0x8B, 0x8C, 0xD6, /* mov rcx, [rsi + rdx * 8 + offsetof(...)] */ |
| 756 | offsetof(struct bpf_array, ptrs)); |
| 757 | |
| 758 | /* |
| 759 | * if (prog == NULL) |
| 760 | * goto out; |
| 761 | */ |
| 762 | EMIT3(0x48, 0x85, 0xC9); /* test rcx,rcx */ |
| 763 | |
| 764 | offset = ctx->tail_call_indirect_label - (prog + 2 - start); |
| 765 | EMIT2(X86_JE, offset); /* je out */ |
| 766 | |
| 767 | /* Inc tail_call_cnt if the slot is populated. */ |
| 768 | EMIT4(0x48, 0x83, 0x00, 0x01); /* add qword ptr [rax], 1 */ |
| 769 | |
| 770 | if (bpf_prog->aux->exception_boundary) { |
| 771 | pop_callee_regs(&prog, all_callee_regs_used); |
| 772 | pop_r12(&prog); |
| 773 | } else { |
| 774 | pop_callee_regs(&prog, callee_regs_used); |
| 775 | if (bpf_arena_get_kern_vm_start(bpf_prog->aux->arena)) |
| 776 | pop_r12(&prog); |
| 777 | } |
| 778 | |
| 779 | /* Pop tail_call_cnt_ptr. */ |
| 780 | EMIT1(0x58); /* pop rax */ |
| 781 | /* Pop tail_call_cnt, if it's main prog. |
| 782 | * Pop tail_call_cnt_ptr, if it's subprog. |
| 783 | */ |
| 784 | EMIT1(0x58); /* pop rax */ |
| 785 | if (stack_depth) |
| 786 | EMIT3_off32(0x48, 0x81, 0xC4, /* add rsp, sd */ |
| 787 | round_up(stack_depth, 8)); |
| 788 | |
| 789 | /* goto *(prog->bpf_func + X86_TAIL_CALL_OFFSET); */ |
| 790 | EMIT4(0x48, 0x8B, 0x49, /* mov rcx, qword ptr [rcx + 32] */ |
| 791 | offsetof(struct bpf_prog, bpf_func)); |
| 792 | EMIT4(0x48, 0x83, 0xC1, /* add rcx, X86_TAIL_CALL_OFFSET */ |
| 793 | X86_TAIL_CALL_OFFSET); |
| 794 | /* |
| 795 | * Now we're ready to jump into next BPF program |
| 796 | * rdi == ctx (1st arg) |
| 797 | * rcx == prog->bpf_func + X86_TAIL_CALL_OFFSET |
| 798 | */ |
| 799 | emit_indirect_jump(&prog, 1 /* rcx */, ip + (prog - start)); |
| 800 | |
| 801 | /* out: */ |
| 802 | ctx->tail_call_indirect_label = prog - start; |
| 803 | *pprog = prog; |
| 804 | } |
| 805 | |
| 806 | static void emit_bpf_tail_call_direct(struct bpf_prog *bpf_prog, |
| 807 | struct bpf_jit_poke_descriptor *poke, |
| 808 | u8 **pprog, u8 *ip, |
| 809 | bool *callee_regs_used, u32 stack_depth, |
| 810 | struct jit_context *ctx) |
| 811 | { |
| 812 | int tcc_ptr_off = BPF_TAIL_CALL_CNT_PTR_STACK_OFF(stack_depth); |
| 813 | u8 *prog = *pprog, *start = *pprog; |
| 814 | int offset; |
| 815 | |
| 816 | /* |
| 817 | * if ((*tcc_ptr)++ >= MAX_TAIL_CALL_CNT) |
| 818 | * goto out; |
| 819 | */ |
| 820 | EMIT3_off32(0x48, 0x8B, 0x85, tcc_ptr_off); /* mov rax, qword ptr [rbp - tcc_ptr_off] */ |
| 821 | EMIT4(0x48, 0x83, 0x38, MAX_TAIL_CALL_CNT); /* cmp qword ptr [rax], MAX_TAIL_CALL_CNT */ |
| 822 | |
| 823 | offset = ctx->tail_call_direct_label - (prog + 2 - start); |
| 824 | EMIT2(X86_JAE, offset); /* jae out */ |
| 825 | |
| 826 | poke->tailcall_bypass = ip + (prog - start); |
| 827 | poke->adj_off = X86_TAIL_CALL_OFFSET; |
| 828 | poke->tailcall_target = ip + ctx->tail_call_direct_label - X86_PATCH_SIZE; |
| 829 | poke->bypass_addr = (u8 *)poke->tailcall_target + X86_PATCH_SIZE; |
| 830 | |
| 831 | emit_jump(&prog, (u8 *)poke->tailcall_target + X86_PATCH_SIZE, |
| 832 | poke->tailcall_bypass); |
| 833 | |
| 834 | /* Inc tail_call_cnt if the slot is populated. */ |
| 835 | EMIT4(0x48, 0x83, 0x00, 0x01); /* add qword ptr [rax], 1 */ |
| 836 | |
| 837 | if (bpf_prog->aux->exception_boundary) { |
| 838 | pop_callee_regs(&prog, all_callee_regs_used); |
| 839 | pop_r12(&prog); |
| 840 | } else { |
| 841 | pop_callee_regs(&prog, callee_regs_used); |
| 842 | if (bpf_arena_get_kern_vm_start(bpf_prog->aux->arena)) |
| 843 | pop_r12(&prog); |
| 844 | } |
| 845 | |
| 846 | /* Pop tail_call_cnt_ptr. */ |
| 847 | EMIT1(0x58); /* pop rax */ |
| 848 | /* Pop tail_call_cnt, if it's main prog. |
| 849 | * Pop tail_call_cnt_ptr, if it's subprog. |
| 850 | */ |
| 851 | EMIT1(0x58); /* pop rax */ |
| 852 | if (stack_depth) |
| 853 | EMIT3_off32(0x48, 0x81, 0xC4, round_up(stack_depth, 8)); |
| 854 | |
| 855 | emit_nops(&prog, X86_PATCH_SIZE); |
| 856 | |
| 857 | /* out: */ |
| 858 | ctx->tail_call_direct_label = prog - start; |
| 859 | |
| 860 | *pprog = prog; |
| 861 | } |
| 862 | |
| 863 | static void bpf_tail_call_direct_fixup(struct bpf_prog *prog) |
| 864 | { |
| 865 | struct bpf_jit_poke_descriptor *poke; |
| 866 | struct bpf_array *array; |
| 867 | struct bpf_prog *target; |
| 868 | int i, ret; |
| 869 | |
| 870 | for (i = 0; i < prog->aux->size_poke_tab; i++) { |
| 871 | poke = &prog->aux->poke_tab[i]; |
| 872 | if (poke->aux && poke->aux != prog->aux) |
| 873 | continue; |
| 874 | |
| 875 | WARN_ON_ONCE(READ_ONCE(poke->tailcall_target_stable)); |
| 876 | |
| 877 | if (poke->reason != BPF_POKE_REASON_TAIL_CALL) |
| 878 | continue; |
| 879 | |
| 880 | array = container_of(poke->tail_call.map, struct bpf_array, map); |
| 881 | mutex_lock(&array->aux->poke_mutex); |
| 882 | target = array->ptrs[poke->tail_call.key]; |
| 883 | if (target) { |
| 884 | ret = __bpf_arch_text_poke(poke->tailcall_target, |
| 885 | BPF_MOD_JUMP, NULL, |
| 886 | (u8 *)target->bpf_func + |
| 887 | poke->adj_off); |
| 888 | BUG_ON(ret < 0); |
| 889 | ret = __bpf_arch_text_poke(poke->tailcall_bypass, |
| 890 | BPF_MOD_JUMP, |
| 891 | (u8 *)poke->tailcall_target + |
| 892 | X86_PATCH_SIZE, NULL); |
| 893 | BUG_ON(ret < 0); |
| 894 | } |
| 895 | WRITE_ONCE(poke->tailcall_target_stable, true); |
| 896 | mutex_unlock(&array->aux->poke_mutex); |
| 897 | } |
| 898 | } |
| 899 | |
| 900 | static void emit_mov_imm32(u8 **pprog, bool sign_propagate, |
| 901 | u32 dst_reg, const u32 imm32) |
| 902 | { |
| 903 | u8 *prog = *pprog; |
| 904 | u8 b1, b2, b3; |
| 905 | |
| 906 | /* |
| 907 | * Optimization: if imm32 is positive, use 'mov %eax, imm32' |
| 908 | * (which zero-extends imm32) to save 2 bytes. |
| 909 | */ |
| 910 | if (sign_propagate && (s32)imm32 < 0) { |
| 911 | /* 'mov %rax, imm32' sign extends imm32 */ |
| 912 | b1 = add_1mod(0x48, dst_reg); |
| 913 | b2 = 0xC7; |
| 914 | b3 = 0xC0; |
| 915 | EMIT3_off32(b1, b2, add_1reg(b3, dst_reg), imm32); |
| 916 | goto done; |
| 917 | } |
| 918 | |
| 919 | /* |
| 920 | * Optimization: if imm32 is zero, use 'xor %eax, %eax' |
| 921 | * to save 3 bytes. |
| 922 | */ |
| 923 | if (imm32 == 0) { |
| 924 | if (is_ereg(dst_reg)) |
| 925 | EMIT1(add_2mod(0x40, dst_reg, dst_reg)); |
| 926 | b2 = 0x31; /* xor */ |
| 927 | b3 = 0xC0; |
| 928 | EMIT2(b2, add_2reg(b3, dst_reg, dst_reg)); |
| 929 | goto done; |
| 930 | } |
| 931 | |
| 932 | /* mov %eax, imm32 */ |
| 933 | if (is_ereg(dst_reg)) |
| 934 | EMIT1(add_1mod(0x40, dst_reg)); |
| 935 | EMIT1_off32(add_1reg(0xB8, dst_reg), imm32); |
| 936 | done: |
| 937 | *pprog = prog; |
| 938 | } |
| 939 | |
| 940 | static void emit_mov_imm64(u8 **pprog, u32 dst_reg, |
| 941 | const u32 imm32_hi, const u32 imm32_lo) |
| 942 | { |
| 943 | u64 imm64 = ((u64)imm32_hi << 32) | (u32)imm32_lo; |
| 944 | u8 *prog = *pprog; |
| 945 | |
| 946 | if (is_uimm32(imm64)) { |
| 947 | /* |
| 948 | * For emitting plain u32, where sign bit must not be |
| 949 | * propagated LLVM tends to load imm64 over mov32 |
| 950 | * directly, so save couple of bytes by just doing |
| 951 | * 'mov %eax, imm32' instead. |
| 952 | */ |
| 953 | emit_mov_imm32(&prog, false, dst_reg, imm32_lo); |
| 954 | } else if (is_simm32(imm64)) { |
| 955 | emit_mov_imm32(&prog, true, dst_reg, imm32_lo); |
| 956 | } else { |
| 957 | /* movabsq rax, imm64 */ |
| 958 | EMIT2(add_1mod(0x48, dst_reg), add_1reg(0xB8, dst_reg)); |
| 959 | EMIT(imm32_lo, 4); |
| 960 | EMIT(imm32_hi, 4); |
| 961 | } |
| 962 | |
| 963 | *pprog = prog; |
| 964 | } |
| 965 | |
| 966 | static void emit_mov_reg(u8 **pprog, bool is64, u32 dst_reg, u32 src_reg) |
| 967 | { |
| 968 | u8 *prog = *pprog; |
| 969 | |
| 970 | if (is64) { |
| 971 | /* mov dst, src */ |
| 972 | EMIT_mov(dst_reg, src_reg); |
| 973 | } else { |
| 974 | /* mov32 dst, src */ |
| 975 | if (is_ereg(dst_reg) || is_ereg(src_reg)) |
| 976 | EMIT1(add_2mod(0x40, dst_reg, src_reg)); |
| 977 | EMIT2(0x89, add_2reg(0xC0, dst_reg, src_reg)); |
| 978 | } |
| 979 | |
| 980 | *pprog = prog; |
| 981 | } |
| 982 | |
| 983 | static void emit_movsx_reg(u8 **pprog, int num_bits, bool is64, u32 dst_reg, |
| 984 | u32 src_reg) |
| 985 | { |
| 986 | u8 *prog = *pprog; |
| 987 | |
| 988 | if (is64) { |
| 989 | /* movs[b,w,l]q dst, src */ |
| 990 | if (num_bits == 8) |
| 991 | EMIT4(add_2mod(0x48, src_reg, dst_reg), 0x0f, 0xbe, |
| 992 | add_2reg(0xC0, src_reg, dst_reg)); |
| 993 | else if (num_bits == 16) |
| 994 | EMIT4(add_2mod(0x48, src_reg, dst_reg), 0x0f, 0xbf, |
| 995 | add_2reg(0xC0, src_reg, dst_reg)); |
| 996 | else if (num_bits == 32) |
| 997 | EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x63, |
| 998 | add_2reg(0xC0, src_reg, dst_reg)); |
| 999 | } else { |
| 1000 | /* movs[b,w]l dst, src */ |
| 1001 | if (num_bits == 8) { |
| 1002 | EMIT4(add_2mod(0x40, src_reg, dst_reg), 0x0f, 0xbe, |
| 1003 | add_2reg(0xC0, src_reg, dst_reg)); |
| 1004 | } else if (num_bits == 16) { |
| 1005 | if (is_ereg(dst_reg) || is_ereg(src_reg)) |
| 1006 | EMIT1(add_2mod(0x40, src_reg, dst_reg)); |
| 1007 | EMIT3(add_2mod(0x0f, src_reg, dst_reg), 0xbf, |
| 1008 | add_2reg(0xC0, src_reg, dst_reg)); |
| 1009 | } |
| 1010 | } |
| 1011 | |
| 1012 | *pprog = prog; |
| 1013 | } |
| 1014 | |
| 1015 | /* Emit the suffix (ModR/M etc) for addressing *(ptr_reg + off) and val_reg */ |
| 1016 | static void emit_insn_suffix(u8 **pprog, u32 ptr_reg, u32 val_reg, int off) |
| 1017 | { |
| 1018 | u8 *prog = *pprog; |
| 1019 | |
| 1020 | if (is_imm8(off)) { |
| 1021 | /* 1-byte signed displacement. |
| 1022 | * |
| 1023 | * If off == 0 we could skip this and save one extra byte, but |
| 1024 | * special case of x86 R13 which always needs an offset is not |
| 1025 | * worth the hassle |
| 1026 | */ |
| 1027 | EMIT2(add_2reg(0x40, ptr_reg, val_reg), off); |
| 1028 | } else { |
| 1029 | /* 4-byte signed displacement */ |
| 1030 | EMIT1_off32(add_2reg(0x80, ptr_reg, val_reg), off); |
| 1031 | } |
| 1032 | *pprog = prog; |
| 1033 | } |
| 1034 | |
| 1035 | static void emit_insn_suffix_SIB(u8 **pprog, u32 ptr_reg, u32 val_reg, u32 index_reg, int off) |
| 1036 | { |
| 1037 | u8 *prog = *pprog; |
| 1038 | |
| 1039 | if (is_imm8(off)) { |
| 1040 | EMIT3(add_2reg(0x44, BPF_REG_0, val_reg), add_2reg(0, ptr_reg, index_reg) /* SIB */, off); |
| 1041 | } else { |
| 1042 | EMIT2_off32(add_2reg(0x84, BPF_REG_0, val_reg), add_2reg(0, ptr_reg, index_reg) /* SIB */, off); |
| 1043 | } |
| 1044 | *pprog = prog; |
| 1045 | } |
| 1046 | |
| 1047 | /* |
| 1048 | * Emit a REX byte if it will be necessary to address these registers |
| 1049 | */ |
| 1050 | static void maybe_emit_mod(u8 **pprog, u32 dst_reg, u32 src_reg, bool is64) |
| 1051 | { |
| 1052 | u8 *prog = *pprog; |
| 1053 | |
| 1054 | if (is64) |
| 1055 | EMIT1(add_2mod(0x48, dst_reg, src_reg)); |
| 1056 | else if (is_ereg(dst_reg) || is_ereg(src_reg)) |
| 1057 | EMIT1(add_2mod(0x40, dst_reg, src_reg)); |
| 1058 | *pprog = prog; |
| 1059 | } |
| 1060 | |
| 1061 | /* |
| 1062 | * Similar version of maybe_emit_mod() for a single register |
| 1063 | */ |
| 1064 | static void maybe_emit_1mod(u8 **pprog, u32 reg, bool is64) |
| 1065 | { |
| 1066 | u8 *prog = *pprog; |
| 1067 | |
| 1068 | if (is64) |
| 1069 | EMIT1(add_1mod(0x48, reg)); |
| 1070 | else if (is_ereg(reg)) |
| 1071 | EMIT1(add_1mod(0x40, reg)); |
| 1072 | *pprog = prog; |
| 1073 | } |
| 1074 | |
| 1075 | /* LDX: dst_reg = *(u8*)(src_reg + off) */ |
| 1076 | static void emit_ldx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off) |
| 1077 | { |
| 1078 | u8 *prog = *pprog; |
| 1079 | |
| 1080 | switch (size) { |
| 1081 | case BPF_B: |
| 1082 | /* Emit 'movzx rax, byte ptr [rax + off]' */ |
| 1083 | EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB6); |
| 1084 | break; |
| 1085 | case BPF_H: |
| 1086 | /* Emit 'movzx rax, word ptr [rax + off]' */ |
| 1087 | EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB7); |
| 1088 | break; |
| 1089 | case BPF_W: |
| 1090 | /* Emit 'mov eax, dword ptr [rax+0x14]' */ |
| 1091 | if (is_ereg(dst_reg) || is_ereg(src_reg)) |
| 1092 | EMIT2(add_2mod(0x40, src_reg, dst_reg), 0x8B); |
| 1093 | else |
| 1094 | EMIT1(0x8B); |
| 1095 | break; |
| 1096 | case BPF_DW: |
| 1097 | /* Emit 'mov rax, qword ptr [rax+0x14]' */ |
| 1098 | EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x8B); |
| 1099 | break; |
| 1100 | } |
| 1101 | emit_insn_suffix(&prog, src_reg, dst_reg, off); |
| 1102 | *pprog = prog; |
| 1103 | } |
| 1104 | |
| 1105 | /* LDSX: dst_reg = *(s8*)(src_reg + off) */ |
| 1106 | static void emit_ldsx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off) |
| 1107 | { |
| 1108 | u8 *prog = *pprog; |
| 1109 | |
| 1110 | switch (size) { |
| 1111 | case BPF_B: |
| 1112 | /* Emit 'movsx rax, byte ptr [rax + off]' */ |
| 1113 | EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xBE); |
| 1114 | break; |
| 1115 | case BPF_H: |
| 1116 | /* Emit 'movsx rax, word ptr [rax + off]' */ |
| 1117 | EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xBF); |
| 1118 | break; |
| 1119 | case BPF_W: |
| 1120 | /* Emit 'movsx rax, dword ptr [rax+0x14]' */ |
| 1121 | EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x63); |
| 1122 | break; |
| 1123 | } |
| 1124 | emit_insn_suffix(&prog, src_reg, dst_reg, off); |
| 1125 | *pprog = prog; |
| 1126 | } |
| 1127 | |
| 1128 | static void emit_ldx_index(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, u32 index_reg, int off) |
| 1129 | { |
| 1130 | u8 *prog = *pprog; |
| 1131 | |
| 1132 | switch (size) { |
| 1133 | case BPF_B: |
| 1134 | /* movzx rax, byte ptr [rax + r12 + off] */ |
| 1135 | EMIT3(add_3mod(0x40, src_reg, dst_reg, index_reg), 0x0F, 0xB6); |
| 1136 | break; |
| 1137 | case BPF_H: |
| 1138 | /* movzx rax, word ptr [rax + r12 + off] */ |
| 1139 | EMIT3(add_3mod(0x40, src_reg, dst_reg, index_reg), 0x0F, 0xB7); |
| 1140 | break; |
| 1141 | case BPF_W: |
| 1142 | /* mov eax, dword ptr [rax + r12 + off] */ |
| 1143 | EMIT2(add_3mod(0x40, src_reg, dst_reg, index_reg), 0x8B); |
| 1144 | break; |
| 1145 | case BPF_DW: |
| 1146 | /* mov rax, qword ptr [rax + r12 + off] */ |
| 1147 | EMIT2(add_3mod(0x48, src_reg, dst_reg, index_reg), 0x8B); |
| 1148 | break; |
| 1149 | } |
| 1150 | emit_insn_suffix_SIB(&prog, src_reg, dst_reg, index_reg, off); |
| 1151 | *pprog = prog; |
| 1152 | } |
| 1153 | |
| 1154 | static void emit_ldx_r12(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off) |
| 1155 | { |
| 1156 | emit_ldx_index(pprog, size, dst_reg, src_reg, X86_REG_R12, off); |
| 1157 | } |
| 1158 | |
| 1159 | /* STX: *(u8*)(dst_reg + off) = src_reg */ |
| 1160 | static void emit_stx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off) |
| 1161 | { |
| 1162 | u8 *prog = *pprog; |
| 1163 | |
| 1164 | switch (size) { |
| 1165 | case BPF_B: |
| 1166 | /* Emit 'mov byte ptr [rax + off], al' */ |
| 1167 | if (is_ereg(dst_reg) || is_ereg_8l(src_reg)) |
| 1168 | /* Add extra byte for eregs or SIL,DIL,BPL in src_reg */ |
| 1169 | EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x88); |
| 1170 | else |
| 1171 | EMIT1(0x88); |
| 1172 | break; |
| 1173 | case BPF_H: |
| 1174 | if (is_ereg(dst_reg) || is_ereg(src_reg)) |
| 1175 | EMIT3(0x66, add_2mod(0x40, dst_reg, src_reg), 0x89); |
| 1176 | else |
| 1177 | EMIT2(0x66, 0x89); |
| 1178 | break; |
| 1179 | case BPF_W: |
| 1180 | if (is_ereg(dst_reg) || is_ereg(src_reg)) |
| 1181 | EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x89); |
| 1182 | else |
| 1183 | EMIT1(0x89); |
| 1184 | break; |
| 1185 | case BPF_DW: |
| 1186 | EMIT2(add_2mod(0x48, dst_reg, src_reg), 0x89); |
| 1187 | break; |
| 1188 | } |
| 1189 | emit_insn_suffix(&prog, dst_reg, src_reg, off); |
| 1190 | *pprog = prog; |
| 1191 | } |
| 1192 | |
| 1193 | /* STX: *(u8*)(dst_reg + index_reg + off) = src_reg */ |
| 1194 | static void emit_stx_index(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, u32 index_reg, int off) |
| 1195 | { |
| 1196 | u8 *prog = *pprog; |
| 1197 | |
| 1198 | switch (size) { |
| 1199 | case BPF_B: |
| 1200 | /* mov byte ptr [rax + r12 + off], al */ |
| 1201 | EMIT2(add_3mod(0x40, dst_reg, src_reg, index_reg), 0x88); |
| 1202 | break; |
| 1203 | case BPF_H: |
| 1204 | /* mov word ptr [rax + r12 + off], ax */ |
| 1205 | EMIT3(0x66, add_3mod(0x40, dst_reg, src_reg, index_reg), 0x89); |
| 1206 | break; |
| 1207 | case BPF_W: |
| 1208 | /* mov dword ptr [rax + r12 + 1], eax */ |
| 1209 | EMIT2(add_3mod(0x40, dst_reg, src_reg, index_reg), 0x89); |
| 1210 | break; |
| 1211 | case BPF_DW: |
| 1212 | /* mov qword ptr [rax + r12 + 1], rax */ |
| 1213 | EMIT2(add_3mod(0x48, dst_reg, src_reg, index_reg), 0x89); |
| 1214 | break; |
| 1215 | } |
| 1216 | emit_insn_suffix_SIB(&prog, dst_reg, src_reg, index_reg, off); |
| 1217 | *pprog = prog; |
| 1218 | } |
| 1219 | |
| 1220 | static void emit_stx_r12(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off) |
| 1221 | { |
| 1222 | emit_stx_index(pprog, size, dst_reg, src_reg, X86_REG_R12, off); |
| 1223 | } |
| 1224 | |
| 1225 | /* ST: *(u8*)(dst_reg + index_reg + off) = imm32 */ |
| 1226 | static void emit_st_index(u8 **pprog, u32 size, u32 dst_reg, u32 index_reg, int off, int imm) |
| 1227 | { |
| 1228 | u8 *prog = *pprog; |
| 1229 | |
| 1230 | switch (size) { |
| 1231 | case BPF_B: |
| 1232 | /* mov byte ptr [rax + r12 + off], imm8 */ |
| 1233 | EMIT2(add_3mod(0x40, dst_reg, 0, index_reg), 0xC6); |
| 1234 | break; |
| 1235 | case BPF_H: |
| 1236 | /* mov word ptr [rax + r12 + off], imm16 */ |
| 1237 | EMIT3(0x66, add_3mod(0x40, dst_reg, 0, index_reg), 0xC7); |
| 1238 | break; |
| 1239 | case BPF_W: |
| 1240 | /* mov dword ptr [rax + r12 + 1], imm32 */ |
| 1241 | EMIT2(add_3mod(0x40, dst_reg, 0, index_reg), 0xC7); |
| 1242 | break; |
| 1243 | case BPF_DW: |
| 1244 | /* mov qword ptr [rax + r12 + 1], imm32 */ |
| 1245 | EMIT2(add_3mod(0x48, dst_reg, 0, index_reg), 0xC7); |
| 1246 | break; |
| 1247 | } |
| 1248 | emit_insn_suffix_SIB(&prog, dst_reg, 0, index_reg, off); |
| 1249 | EMIT(imm, bpf_size_to_x86_bytes(size)); |
| 1250 | *pprog = prog; |
| 1251 | } |
| 1252 | |
| 1253 | static void emit_st_r12(u8 **pprog, u32 size, u32 dst_reg, int off, int imm) |
| 1254 | { |
| 1255 | emit_st_index(pprog, size, dst_reg, X86_REG_R12, off, imm); |
| 1256 | } |
| 1257 | |
| 1258 | static int emit_atomic_rmw(u8 **pprog, u32 atomic_op, |
| 1259 | u32 dst_reg, u32 src_reg, s16 off, u8 bpf_size) |
| 1260 | { |
| 1261 | u8 *prog = *pprog; |
| 1262 | |
| 1263 | EMIT1(0xF0); /* lock prefix */ |
| 1264 | |
| 1265 | maybe_emit_mod(&prog, dst_reg, src_reg, bpf_size == BPF_DW); |
| 1266 | |
| 1267 | /* emit opcode */ |
| 1268 | switch (atomic_op) { |
| 1269 | case BPF_ADD: |
| 1270 | case BPF_AND: |
| 1271 | case BPF_OR: |
| 1272 | case BPF_XOR: |
| 1273 | /* lock *(u32/u64*)(dst_reg + off) <op>= src_reg */ |
| 1274 | EMIT1(simple_alu_opcodes[atomic_op]); |
| 1275 | break; |
| 1276 | case BPF_ADD | BPF_FETCH: |
| 1277 | /* src_reg = atomic_fetch_add(dst_reg + off, src_reg); */ |
| 1278 | EMIT2(0x0F, 0xC1); |
| 1279 | break; |
| 1280 | case BPF_XCHG: |
| 1281 | /* src_reg = atomic_xchg(dst_reg + off, src_reg); */ |
| 1282 | EMIT1(0x87); |
| 1283 | break; |
| 1284 | case BPF_CMPXCHG: |
| 1285 | /* r0 = atomic_cmpxchg(dst_reg + off, r0, src_reg); */ |
| 1286 | EMIT2(0x0F, 0xB1); |
| 1287 | break; |
| 1288 | default: |
| 1289 | pr_err("bpf_jit: unknown atomic opcode %02x\n", atomic_op); |
| 1290 | return -EFAULT; |
| 1291 | } |
| 1292 | |
| 1293 | emit_insn_suffix(&prog, dst_reg, src_reg, off); |
| 1294 | |
| 1295 | *pprog = prog; |
| 1296 | return 0; |
| 1297 | } |
| 1298 | |
| 1299 | static int emit_atomic_rmw_index(u8 **pprog, u32 atomic_op, u32 size, |
| 1300 | u32 dst_reg, u32 src_reg, u32 index_reg, |
| 1301 | int off) |
| 1302 | { |
| 1303 | u8 *prog = *pprog; |
| 1304 | |
| 1305 | EMIT1(0xF0); /* lock prefix */ |
| 1306 | switch (size) { |
| 1307 | case BPF_W: |
| 1308 | EMIT1(add_3mod(0x40, dst_reg, src_reg, index_reg)); |
| 1309 | break; |
| 1310 | case BPF_DW: |
| 1311 | EMIT1(add_3mod(0x48, dst_reg, src_reg, index_reg)); |
| 1312 | break; |
| 1313 | default: |
| 1314 | pr_err("bpf_jit: 1- and 2-byte RMW atomics are not supported\n"); |
| 1315 | return -EFAULT; |
| 1316 | } |
| 1317 | |
| 1318 | /* emit opcode */ |
| 1319 | switch (atomic_op) { |
| 1320 | case BPF_ADD: |
| 1321 | case BPF_AND: |
| 1322 | case BPF_OR: |
| 1323 | case BPF_XOR: |
| 1324 | /* lock *(u32/u64*)(dst_reg + idx_reg + off) <op>= src_reg */ |
| 1325 | EMIT1(simple_alu_opcodes[atomic_op]); |
| 1326 | break; |
| 1327 | case BPF_ADD | BPF_FETCH: |
| 1328 | /* src_reg = atomic_fetch_add(dst_reg + idx_reg + off, src_reg); */ |
| 1329 | EMIT2(0x0F, 0xC1); |
| 1330 | break; |
| 1331 | case BPF_XCHG: |
| 1332 | /* src_reg = atomic_xchg(dst_reg + idx_reg + off, src_reg); */ |
| 1333 | EMIT1(0x87); |
| 1334 | break; |
| 1335 | case BPF_CMPXCHG: |
| 1336 | /* r0 = atomic_cmpxchg(dst_reg + idx_reg + off, r0, src_reg); */ |
| 1337 | EMIT2(0x0F, 0xB1); |
| 1338 | break; |
| 1339 | default: |
| 1340 | pr_err("bpf_jit: unknown atomic opcode %02x\n", atomic_op); |
| 1341 | return -EFAULT; |
| 1342 | } |
| 1343 | emit_insn_suffix_SIB(&prog, dst_reg, src_reg, index_reg, off); |
| 1344 | *pprog = prog; |
| 1345 | return 0; |
| 1346 | } |
| 1347 | |
| 1348 | static int emit_atomic_ld_st(u8 **pprog, u32 atomic_op, u32 dst_reg, |
| 1349 | u32 src_reg, s16 off, u8 bpf_size) |
| 1350 | { |
| 1351 | switch (atomic_op) { |
| 1352 | case BPF_LOAD_ACQ: |
| 1353 | /* dst_reg = smp_load_acquire(src_reg + off16) */ |
| 1354 | emit_ldx(pprog, bpf_size, dst_reg, src_reg, off); |
| 1355 | break; |
| 1356 | case BPF_STORE_REL: |
| 1357 | /* smp_store_release(dst_reg + off16, src_reg) */ |
| 1358 | emit_stx(pprog, bpf_size, dst_reg, src_reg, off); |
| 1359 | break; |
| 1360 | default: |
| 1361 | pr_err("bpf_jit: unknown atomic load/store opcode %02x\n", |
| 1362 | atomic_op); |
| 1363 | return -EFAULT; |
| 1364 | } |
| 1365 | |
| 1366 | return 0; |
| 1367 | } |
| 1368 | |
| 1369 | static int emit_atomic_ld_st_index(u8 **pprog, u32 atomic_op, u32 size, |
| 1370 | u32 dst_reg, u32 src_reg, u32 index_reg, |
| 1371 | int off) |
| 1372 | { |
| 1373 | switch (atomic_op) { |
| 1374 | case BPF_LOAD_ACQ: |
| 1375 | /* dst_reg = smp_load_acquire(src_reg + idx_reg + off16) */ |
| 1376 | emit_ldx_index(pprog, size, dst_reg, src_reg, index_reg, off); |
| 1377 | break; |
| 1378 | case BPF_STORE_REL: |
| 1379 | /* smp_store_release(dst_reg + idx_reg + off16, src_reg) */ |
| 1380 | emit_stx_index(pprog, size, dst_reg, src_reg, index_reg, off); |
| 1381 | break; |
| 1382 | default: |
| 1383 | pr_err("bpf_jit: unknown atomic load/store opcode %02x\n", |
| 1384 | atomic_op); |
| 1385 | return -EFAULT; |
| 1386 | } |
| 1387 | |
| 1388 | return 0; |
| 1389 | } |
| 1390 | |
| 1391 | #define DONT_CLEAR 1 |
| 1392 | |
| 1393 | bool ex_handler_bpf(const struct exception_table_entry *x, struct pt_regs *regs) |
| 1394 | { |
| 1395 | u32 reg = x->fixup >> 8; |
| 1396 | |
| 1397 | /* jump over faulting load and clear dest register */ |
| 1398 | if (reg != DONT_CLEAR) |
| 1399 | *(unsigned long *)((void *)regs + reg) = 0; |
| 1400 | regs->ip += x->fixup & 0xff; |
| 1401 | return true; |
| 1402 | } |
| 1403 | |
| 1404 | static void detect_reg_usage(struct bpf_insn *insn, int insn_cnt, |
| 1405 | bool *regs_used) |
| 1406 | { |
| 1407 | int i; |
| 1408 | |
| 1409 | for (i = 1; i <= insn_cnt; i++, insn++) { |
| 1410 | if (insn->dst_reg == BPF_REG_6 || insn->src_reg == BPF_REG_6) |
| 1411 | regs_used[0] = true; |
| 1412 | if (insn->dst_reg == BPF_REG_7 || insn->src_reg == BPF_REG_7) |
| 1413 | regs_used[1] = true; |
| 1414 | if (insn->dst_reg == BPF_REG_8 || insn->src_reg == BPF_REG_8) |
| 1415 | regs_used[2] = true; |
| 1416 | if (insn->dst_reg == BPF_REG_9 || insn->src_reg == BPF_REG_9) |
| 1417 | regs_used[3] = true; |
| 1418 | } |
| 1419 | } |
| 1420 | |
| 1421 | /* emit the 3-byte VEX prefix |
| 1422 | * |
| 1423 | * r: same as rex.r, extra bit for ModRM reg field |
| 1424 | * x: same as rex.x, extra bit for SIB index field |
| 1425 | * b: same as rex.b, extra bit for ModRM r/m, or SIB base |
| 1426 | * m: opcode map select, encoding escape bytes e.g. 0x0f38 |
| 1427 | * w: same as rex.w (32 bit or 64 bit) or opcode specific |
| 1428 | * src_reg2: additional source reg (encoded as BPF reg) |
| 1429 | * l: vector length (128 bit or 256 bit) or reserved |
| 1430 | * pp: opcode prefix (none, 0x66, 0xf2 or 0xf3) |
| 1431 | */ |
| 1432 | static void emit_3vex(u8 **pprog, bool r, bool x, bool b, u8 m, |
| 1433 | bool w, u8 src_reg2, bool l, u8 pp) |
| 1434 | { |
| 1435 | u8 *prog = *pprog; |
| 1436 | const u8 b0 = 0xc4; /* first byte of 3-byte VEX prefix */ |
| 1437 | u8 b1, b2; |
| 1438 | u8 vvvv = reg2hex[src_reg2]; |
| 1439 | |
| 1440 | /* reg2hex gives only the lower 3 bit of vvvv */ |
| 1441 | if (is_ereg(src_reg2)) |
| 1442 | vvvv |= 1 << 3; |
| 1443 | |
| 1444 | /* |
| 1445 | * 2nd byte of 3-byte VEX prefix |
| 1446 | * ~ means bit inverted encoding |
| 1447 | * |
| 1448 | * 7 0 |
| 1449 | * +---+---+---+---+---+---+---+---+ |
| 1450 | * |~R |~X |~B | m | |
| 1451 | * +---+---+---+---+---+---+---+---+ |
| 1452 | */ |
| 1453 | b1 = (!r << 7) | (!x << 6) | (!b << 5) | (m & 0x1f); |
| 1454 | /* |
| 1455 | * 3rd byte of 3-byte VEX prefix |
| 1456 | * |
| 1457 | * 7 0 |
| 1458 | * +---+---+---+---+---+---+---+---+ |
| 1459 | * | W | ~vvvv | L | pp | |
| 1460 | * +---+---+---+---+---+---+---+---+ |
| 1461 | */ |
| 1462 | b2 = (w << 7) | ((~vvvv & 0xf) << 3) | (l << 2) | (pp & 3); |
| 1463 | |
| 1464 | EMIT3(b0, b1, b2); |
| 1465 | *pprog = prog; |
| 1466 | } |
| 1467 | |
| 1468 | /* emit BMI2 shift instruction */ |
| 1469 | static void emit_shiftx(u8 **pprog, u32 dst_reg, u8 src_reg, bool is64, u8 op) |
| 1470 | { |
| 1471 | u8 *prog = *pprog; |
| 1472 | bool r = is_ereg(dst_reg); |
| 1473 | u8 m = 2; /* escape code 0f38 */ |
| 1474 | |
| 1475 | emit_3vex(&prog, r, false, r, m, is64, src_reg, false, op); |
| 1476 | EMIT2(0xf7, add_2reg(0xC0, dst_reg, dst_reg)); |
| 1477 | *pprog = prog; |
| 1478 | } |
| 1479 | |
| 1480 | static void emit_priv_frame_ptr(u8 **pprog, void __percpu *priv_frame_ptr) |
| 1481 | { |
| 1482 | u8 *prog = *pprog; |
| 1483 | |
| 1484 | /* movabs r9, priv_frame_ptr */ |
| 1485 | emit_mov_imm64(&prog, X86_REG_R9, (__force long) priv_frame_ptr >> 32, |
| 1486 | (u32) (__force long) priv_frame_ptr); |
| 1487 | |
| 1488 | #ifdef CONFIG_SMP |
| 1489 | /* add <r9>, gs:[<off>] */ |
| 1490 | EMIT2(0x65, 0x4c); |
| 1491 | EMIT3(0x03, 0x0c, 0x25); |
| 1492 | EMIT((u32)(unsigned long)&this_cpu_off, 4); |
| 1493 | #endif |
| 1494 | |
| 1495 | *pprog = prog; |
| 1496 | } |
| 1497 | |
| 1498 | #define INSN_SZ_DIFF (((addrs[i] - addrs[i - 1]) - (prog - temp))) |
| 1499 | |
| 1500 | #define __LOAD_TCC_PTR(off) \ |
| 1501 | EMIT3_off32(0x48, 0x8B, 0x85, off) |
| 1502 | /* mov rax, qword ptr [rbp - rounded_stack_depth - 16] */ |
| 1503 | #define LOAD_TAIL_CALL_CNT_PTR(stack) \ |
| 1504 | __LOAD_TCC_PTR(BPF_TAIL_CALL_CNT_PTR_STACK_OFF(stack)) |
| 1505 | |
| 1506 | /* Memory size/value to protect private stack overflow/underflow */ |
| 1507 | #define PRIV_STACK_GUARD_SZ 8 |
| 1508 | #define PRIV_STACK_GUARD_VAL 0xEB9F12345678eb9fULL |
| 1509 | |
| 1510 | static int emit_spectre_bhb_barrier(u8 **pprog, u8 *ip, |
| 1511 | struct bpf_prog *bpf_prog) |
| 1512 | { |
| 1513 | u8 *prog = *pprog; |
| 1514 | u8 *func; |
| 1515 | |
| 1516 | if (cpu_feature_enabled(X86_FEATURE_CLEAR_BHB_LOOP)) { |
| 1517 | /* The clearing sequence clobbers eax and ecx. */ |
| 1518 | EMIT1(0x50); /* push rax */ |
| 1519 | EMIT1(0x51); /* push rcx */ |
| 1520 | ip += 2; |
| 1521 | |
| 1522 | func = (u8 *)clear_bhb_loop; |
| 1523 | ip += x86_call_depth_emit_accounting(&prog, func, ip); |
| 1524 | |
| 1525 | if (emit_call(&prog, func, ip)) |
| 1526 | return -EINVAL; |
| 1527 | EMIT1(0x59); /* pop rcx */ |
| 1528 | EMIT1(0x58); /* pop rax */ |
| 1529 | } |
| 1530 | /* Insert IBHF instruction */ |
| 1531 | if ((cpu_feature_enabled(X86_FEATURE_CLEAR_BHB_LOOP) && |
| 1532 | cpu_feature_enabled(X86_FEATURE_HYPERVISOR)) || |
| 1533 | cpu_feature_enabled(X86_FEATURE_CLEAR_BHB_HW)) { |
| 1534 | /* |
| 1535 | * Add an Indirect Branch History Fence (IBHF). IBHF acts as a |
| 1536 | * fence preventing branch history from before the fence from |
| 1537 | * affecting indirect branches after the fence. This is |
| 1538 | * specifically used in cBPF jitted code to prevent Intra-mode |
| 1539 | * BHI attacks. The IBHF instruction is designed to be a NOP on |
| 1540 | * hardware that doesn't need or support it. The REP and REX.W |
| 1541 | * prefixes are required by the microcode, and they also ensure |
| 1542 | * that the NOP is unlikely to be used in existing code. |
| 1543 | * |
| 1544 | * IBHF is not a valid instruction in 32-bit mode. |
| 1545 | */ |
| 1546 | EMIT5(0xF3, 0x48, 0x0F, 0x1E, 0xF8); /* ibhf */ |
| 1547 | } |
| 1548 | *pprog = prog; |
| 1549 | return 0; |
| 1550 | } |
| 1551 | |
| 1552 | static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, u8 *rw_image, |
| 1553 | int oldproglen, struct jit_context *ctx, bool jmp_padding) |
| 1554 | { |
| 1555 | bool tail_call_reachable = bpf_prog->aux->tail_call_reachable; |
| 1556 | struct bpf_insn *insn = bpf_prog->insnsi; |
| 1557 | bool callee_regs_used[4] = {}; |
| 1558 | int insn_cnt = bpf_prog->len; |
| 1559 | bool seen_exit = false; |
| 1560 | u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY]; |
| 1561 | void __percpu *priv_frame_ptr = NULL; |
| 1562 | u64 arena_vm_start, user_vm_start; |
| 1563 | void __percpu *priv_stack_ptr; |
| 1564 | int i, excnt = 0; |
| 1565 | int ilen, proglen = 0; |
| 1566 | u8 *prog = temp; |
| 1567 | u32 stack_depth; |
| 1568 | int err; |
| 1569 | |
| 1570 | stack_depth = bpf_prog->aux->stack_depth; |
| 1571 | priv_stack_ptr = bpf_prog->aux->priv_stack_ptr; |
| 1572 | if (priv_stack_ptr) { |
| 1573 | priv_frame_ptr = priv_stack_ptr + PRIV_STACK_GUARD_SZ + round_up(stack_depth, 8); |
| 1574 | stack_depth = 0; |
| 1575 | } |
| 1576 | |
| 1577 | arena_vm_start = bpf_arena_get_kern_vm_start(bpf_prog->aux->arena); |
| 1578 | user_vm_start = bpf_arena_get_user_vm_start(bpf_prog->aux->arena); |
| 1579 | |
| 1580 | detect_reg_usage(insn, insn_cnt, callee_regs_used); |
| 1581 | |
| 1582 | emit_prologue(&prog, image, stack_depth, |
| 1583 | bpf_prog_was_classic(bpf_prog), tail_call_reachable, |
| 1584 | bpf_is_subprog(bpf_prog), bpf_prog->aux->exception_cb); |
| 1585 | /* Exception callback will clobber callee regs for its own use, and |
| 1586 | * restore the original callee regs from main prog's stack frame. |
| 1587 | */ |
| 1588 | if (bpf_prog->aux->exception_boundary) { |
| 1589 | /* We also need to save r12, which is not mapped to any BPF |
| 1590 | * register, as we throw after entry into the kernel, which may |
| 1591 | * overwrite r12. |
| 1592 | */ |
| 1593 | push_r12(&prog); |
| 1594 | push_callee_regs(&prog, all_callee_regs_used); |
| 1595 | } else { |
| 1596 | if (arena_vm_start) |
| 1597 | push_r12(&prog); |
| 1598 | push_callee_regs(&prog, callee_regs_used); |
| 1599 | } |
| 1600 | if (arena_vm_start) |
| 1601 | emit_mov_imm64(&prog, X86_REG_R12, |
| 1602 | arena_vm_start >> 32, (u32) arena_vm_start); |
| 1603 | |
| 1604 | if (priv_frame_ptr) |
| 1605 | emit_priv_frame_ptr(&prog, priv_frame_ptr); |
| 1606 | |
| 1607 | ilen = prog - temp; |
| 1608 | if (rw_image) |
| 1609 | memcpy(rw_image + proglen, temp, ilen); |
| 1610 | proglen += ilen; |
| 1611 | addrs[0] = proglen; |
| 1612 | prog = temp; |
| 1613 | |
| 1614 | for (i = 1; i <= insn_cnt; i++, insn++) { |
| 1615 | const s32 imm32 = insn->imm; |
| 1616 | u32 dst_reg = insn->dst_reg; |
| 1617 | u32 src_reg = insn->src_reg; |
| 1618 | u8 b2 = 0, b3 = 0; |
| 1619 | u8 *start_of_ldx; |
| 1620 | s64 jmp_offset; |
| 1621 | s16 insn_off; |
| 1622 | u8 jmp_cond; |
| 1623 | u8 *func; |
| 1624 | int nops; |
| 1625 | |
| 1626 | if (priv_frame_ptr) { |
| 1627 | if (src_reg == BPF_REG_FP) |
| 1628 | src_reg = X86_REG_R9; |
| 1629 | |
| 1630 | if (dst_reg == BPF_REG_FP) |
| 1631 | dst_reg = X86_REG_R9; |
| 1632 | } |
| 1633 | |
| 1634 | switch (insn->code) { |
| 1635 | /* ALU */ |
| 1636 | case BPF_ALU | BPF_ADD | BPF_X: |
| 1637 | case BPF_ALU | BPF_SUB | BPF_X: |
| 1638 | case BPF_ALU | BPF_AND | BPF_X: |
| 1639 | case BPF_ALU | BPF_OR | BPF_X: |
| 1640 | case BPF_ALU | BPF_XOR | BPF_X: |
| 1641 | case BPF_ALU64 | BPF_ADD | BPF_X: |
| 1642 | case BPF_ALU64 | BPF_SUB | BPF_X: |
| 1643 | case BPF_ALU64 | BPF_AND | BPF_X: |
| 1644 | case BPF_ALU64 | BPF_OR | BPF_X: |
| 1645 | case BPF_ALU64 | BPF_XOR | BPF_X: |
| 1646 | maybe_emit_mod(&prog, dst_reg, src_reg, |
| 1647 | BPF_CLASS(insn->code) == BPF_ALU64); |
| 1648 | b2 = simple_alu_opcodes[BPF_OP(insn->code)]; |
| 1649 | EMIT2(b2, add_2reg(0xC0, dst_reg, src_reg)); |
| 1650 | break; |
| 1651 | |
| 1652 | case BPF_ALU64 | BPF_MOV | BPF_X: |
| 1653 | if (insn_is_cast_user(insn)) { |
| 1654 | if (dst_reg != src_reg) |
| 1655 | /* 32-bit mov */ |
| 1656 | emit_mov_reg(&prog, false, dst_reg, src_reg); |
| 1657 | /* shl dst_reg, 32 */ |
| 1658 | maybe_emit_1mod(&prog, dst_reg, true); |
| 1659 | EMIT3(0xC1, add_1reg(0xE0, dst_reg), 32); |
| 1660 | |
| 1661 | /* or dst_reg, user_vm_start */ |
| 1662 | maybe_emit_1mod(&prog, dst_reg, true); |
| 1663 | if (is_axreg(dst_reg)) |
| 1664 | EMIT1_off32(0x0D, user_vm_start >> 32); |
| 1665 | else |
| 1666 | EMIT2_off32(0x81, add_1reg(0xC8, dst_reg), user_vm_start >> 32); |
| 1667 | |
| 1668 | /* rol dst_reg, 32 */ |
| 1669 | maybe_emit_1mod(&prog, dst_reg, true); |
| 1670 | EMIT3(0xC1, add_1reg(0xC0, dst_reg), 32); |
| 1671 | |
| 1672 | /* xor r11, r11 */ |
| 1673 | EMIT3(0x4D, 0x31, 0xDB); |
| 1674 | |
| 1675 | /* test dst_reg32, dst_reg32; check if lower 32-bit are zero */ |
| 1676 | maybe_emit_mod(&prog, dst_reg, dst_reg, false); |
| 1677 | EMIT2(0x85, add_2reg(0xC0, dst_reg, dst_reg)); |
| 1678 | |
| 1679 | /* cmove r11, dst_reg; if so, set dst_reg to zero */ |
| 1680 | /* WARNING: Intel swapped src/dst register encoding in CMOVcc !!! */ |
| 1681 | maybe_emit_mod(&prog, AUX_REG, dst_reg, true); |
| 1682 | EMIT3(0x0F, 0x44, add_2reg(0xC0, AUX_REG, dst_reg)); |
| 1683 | break; |
| 1684 | } else if (insn_is_mov_percpu_addr(insn)) { |
| 1685 | /* mov <dst>, <src> (if necessary) */ |
| 1686 | EMIT_mov(dst_reg, src_reg); |
| 1687 | #ifdef CONFIG_SMP |
| 1688 | /* add <dst>, gs:[<off>] */ |
| 1689 | EMIT2(0x65, add_1mod(0x48, dst_reg)); |
| 1690 | EMIT3(0x03, add_2reg(0x04, 0, dst_reg), 0x25); |
| 1691 | EMIT((u32)(unsigned long)&this_cpu_off, 4); |
| 1692 | #endif |
| 1693 | break; |
| 1694 | } |
| 1695 | fallthrough; |
| 1696 | case BPF_ALU | BPF_MOV | BPF_X: |
| 1697 | if (insn->off == 0) |
| 1698 | emit_mov_reg(&prog, |
| 1699 | BPF_CLASS(insn->code) == BPF_ALU64, |
| 1700 | dst_reg, src_reg); |
| 1701 | else |
| 1702 | emit_movsx_reg(&prog, insn->off, |
| 1703 | BPF_CLASS(insn->code) == BPF_ALU64, |
| 1704 | dst_reg, src_reg); |
| 1705 | break; |
| 1706 | |
| 1707 | /* neg dst */ |
| 1708 | case BPF_ALU | BPF_NEG: |
| 1709 | case BPF_ALU64 | BPF_NEG: |
| 1710 | maybe_emit_1mod(&prog, dst_reg, |
| 1711 | BPF_CLASS(insn->code) == BPF_ALU64); |
| 1712 | EMIT2(0xF7, add_1reg(0xD8, dst_reg)); |
| 1713 | break; |
| 1714 | |
| 1715 | case BPF_ALU | BPF_ADD | BPF_K: |
| 1716 | case BPF_ALU | BPF_SUB | BPF_K: |
| 1717 | case BPF_ALU | BPF_AND | BPF_K: |
| 1718 | case BPF_ALU | BPF_OR | BPF_K: |
| 1719 | case BPF_ALU | BPF_XOR | BPF_K: |
| 1720 | case BPF_ALU64 | BPF_ADD | BPF_K: |
| 1721 | case BPF_ALU64 | BPF_SUB | BPF_K: |
| 1722 | case BPF_ALU64 | BPF_AND | BPF_K: |
| 1723 | case BPF_ALU64 | BPF_OR | BPF_K: |
| 1724 | case BPF_ALU64 | BPF_XOR | BPF_K: |
| 1725 | maybe_emit_1mod(&prog, dst_reg, |
| 1726 | BPF_CLASS(insn->code) == BPF_ALU64); |
| 1727 | |
| 1728 | /* |
| 1729 | * b3 holds 'normal' opcode, b2 short form only valid |
| 1730 | * in case dst is eax/rax. |
| 1731 | */ |
| 1732 | switch (BPF_OP(insn->code)) { |
| 1733 | case BPF_ADD: |
| 1734 | b3 = 0xC0; |
| 1735 | b2 = 0x05; |
| 1736 | break; |
| 1737 | case BPF_SUB: |
| 1738 | b3 = 0xE8; |
| 1739 | b2 = 0x2D; |
| 1740 | break; |
| 1741 | case BPF_AND: |
| 1742 | b3 = 0xE0; |
| 1743 | b2 = 0x25; |
| 1744 | break; |
| 1745 | case BPF_OR: |
| 1746 | b3 = 0xC8; |
| 1747 | b2 = 0x0D; |
| 1748 | break; |
| 1749 | case BPF_XOR: |
| 1750 | b3 = 0xF0; |
| 1751 | b2 = 0x35; |
| 1752 | break; |
| 1753 | } |
| 1754 | |
| 1755 | if (is_imm8(imm32)) |
| 1756 | EMIT3(0x83, add_1reg(b3, dst_reg), imm32); |
| 1757 | else if (is_axreg(dst_reg)) |
| 1758 | EMIT1_off32(b2, imm32); |
| 1759 | else |
| 1760 | EMIT2_off32(0x81, add_1reg(b3, dst_reg), imm32); |
| 1761 | break; |
| 1762 | |
| 1763 | case BPF_ALU64 | BPF_MOV | BPF_K: |
| 1764 | case BPF_ALU | BPF_MOV | BPF_K: |
| 1765 | emit_mov_imm32(&prog, BPF_CLASS(insn->code) == BPF_ALU64, |
| 1766 | dst_reg, imm32); |
| 1767 | break; |
| 1768 | |
| 1769 | case BPF_LD | BPF_IMM | BPF_DW: |
| 1770 | emit_mov_imm64(&prog, dst_reg, insn[1].imm, insn[0].imm); |
| 1771 | insn++; |
| 1772 | i++; |
| 1773 | break; |
| 1774 | |
| 1775 | /* dst %= src, dst /= src, dst %= imm32, dst /= imm32 */ |
| 1776 | case BPF_ALU | BPF_MOD | BPF_X: |
| 1777 | case BPF_ALU | BPF_DIV | BPF_X: |
| 1778 | case BPF_ALU | BPF_MOD | BPF_K: |
| 1779 | case BPF_ALU | BPF_DIV | BPF_K: |
| 1780 | case BPF_ALU64 | BPF_MOD | BPF_X: |
| 1781 | case BPF_ALU64 | BPF_DIV | BPF_X: |
| 1782 | case BPF_ALU64 | BPF_MOD | BPF_K: |
| 1783 | case BPF_ALU64 | BPF_DIV | BPF_K: { |
| 1784 | bool is64 = BPF_CLASS(insn->code) == BPF_ALU64; |
| 1785 | |
| 1786 | if (dst_reg != BPF_REG_0) |
| 1787 | EMIT1(0x50); /* push rax */ |
| 1788 | if (dst_reg != BPF_REG_3) |
| 1789 | EMIT1(0x52); /* push rdx */ |
| 1790 | |
| 1791 | if (BPF_SRC(insn->code) == BPF_X) { |
| 1792 | if (src_reg == BPF_REG_0 || |
| 1793 | src_reg == BPF_REG_3) { |
| 1794 | /* mov r11, src_reg */ |
| 1795 | EMIT_mov(AUX_REG, src_reg); |
| 1796 | src_reg = AUX_REG; |
| 1797 | } |
| 1798 | } else { |
| 1799 | /* mov r11, imm32 */ |
| 1800 | EMIT3_off32(0x49, 0xC7, 0xC3, imm32); |
| 1801 | src_reg = AUX_REG; |
| 1802 | } |
| 1803 | |
| 1804 | if (dst_reg != BPF_REG_0) |
| 1805 | /* mov rax, dst_reg */ |
| 1806 | emit_mov_reg(&prog, is64, BPF_REG_0, dst_reg); |
| 1807 | |
| 1808 | if (insn->off == 0) { |
| 1809 | /* |
| 1810 | * xor edx, edx |
| 1811 | * equivalent to 'xor rdx, rdx', but one byte less |
| 1812 | */ |
| 1813 | EMIT2(0x31, 0xd2); |
| 1814 | |
| 1815 | /* div src_reg */ |
| 1816 | maybe_emit_1mod(&prog, src_reg, is64); |
| 1817 | EMIT2(0xF7, add_1reg(0xF0, src_reg)); |
| 1818 | } else { |
| 1819 | if (BPF_CLASS(insn->code) == BPF_ALU) |
| 1820 | EMIT1(0x99); /* cdq */ |
| 1821 | else |
| 1822 | EMIT2(0x48, 0x99); /* cqo */ |
| 1823 | |
| 1824 | /* idiv src_reg */ |
| 1825 | maybe_emit_1mod(&prog, src_reg, is64); |
| 1826 | EMIT2(0xF7, add_1reg(0xF8, src_reg)); |
| 1827 | } |
| 1828 | |
| 1829 | if (BPF_OP(insn->code) == BPF_MOD && |
| 1830 | dst_reg != BPF_REG_3) |
| 1831 | /* mov dst_reg, rdx */ |
| 1832 | emit_mov_reg(&prog, is64, dst_reg, BPF_REG_3); |
| 1833 | else if (BPF_OP(insn->code) == BPF_DIV && |
| 1834 | dst_reg != BPF_REG_0) |
| 1835 | /* mov dst_reg, rax */ |
| 1836 | emit_mov_reg(&prog, is64, dst_reg, BPF_REG_0); |
| 1837 | |
| 1838 | if (dst_reg != BPF_REG_3) |
| 1839 | EMIT1(0x5A); /* pop rdx */ |
| 1840 | if (dst_reg != BPF_REG_0) |
| 1841 | EMIT1(0x58); /* pop rax */ |
| 1842 | break; |
| 1843 | } |
| 1844 | |
| 1845 | case BPF_ALU | BPF_MUL | BPF_K: |
| 1846 | case BPF_ALU64 | BPF_MUL | BPF_K: |
| 1847 | maybe_emit_mod(&prog, dst_reg, dst_reg, |
| 1848 | BPF_CLASS(insn->code) == BPF_ALU64); |
| 1849 | |
| 1850 | if (is_imm8(imm32)) |
| 1851 | /* imul dst_reg, dst_reg, imm8 */ |
| 1852 | EMIT3(0x6B, add_2reg(0xC0, dst_reg, dst_reg), |
| 1853 | imm32); |
| 1854 | else |
| 1855 | /* imul dst_reg, dst_reg, imm32 */ |
| 1856 | EMIT2_off32(0x69, |
| 1857 | add_2reg(0xC0, dst_reg, dst_reg), |
| 1858 | imm32); |
| 1859 | break; |
| 1860 | |
| 1861 | case BPF_ALU | BPF_MUL | BPF_X: |
| 1862 | case BPF_ALU64 | BPF_MUL | BPF_X: |
| 1863 | maybe_emit_mod(&prog, src_reg, dst_reg, |
| 1864 | BPF_CLASS(insn->code) == BPF_ALU64); |
| 1865 | |
| 1866 | /* imul dst_reg, src_reg */ |
| 1867 | EMIT3(0x0F, 0xAF, add_2reg(0xC0, src_reg, dst_reg)); |
| 1868 | break; |
| 1869 | |
| 1870 | /* Shifts */ |
| 1871 | case BPF_ALU | BPF_LSH | BPF_K: |
| 1872 | case BPF_ALU | BPF_RSH | BPF_K: |
| 1873 | case BPF_ALU | BPF_ARSH | BPF_K: |
| 1874 | case BPF_ALU64 | BPF_LSH | BPF_K: |
| 1875 | case BPF_ALU64 | BPF_RSH | BPF_K: |
| 1876 | case BPF_ALU64 | BPF_ARSH | BPF_K: |
| 1877 | maybe_emit_1mod(&prog, dst_reg, |
| 1878 | BPF_CLASS(insn->code) == BPF_ALU64); |
| 1879 | |
| 1880 | b3 = simple_alu_opcodes[BPF_OP(insn->code)]; |
| 1881 | if (imm32 == 1) |
| 1882 | EMIT2(0xD1, add_1reg(b3, dst_reg)); |
| 1883 | else |
| 1884 | EMIT3(0xC1, add_1reg(b3, dst_reg), imm32); |
| 1885 | break; |
| 1886 | |
| 1887 | case BPF_ALU | BPF_LSH | BPF_X: |
| 1888 | case BPF_ALU | BPF_RSH | BPF_X: |
| 1889 | case BPF_ALU | BPF_ARSH | BPF_X: |
| 1890 | case BPF_ALU64 | BPF_LSH | BPF_X: |
| 1891 | case BPF_ALU64 | BPF_RSH | BPF_X: |
| 1892 | case BPF_ALU64 | BPF_ARSH | BPF_X: |
| 1893 | /* BMI2 shifts aren't better when shift count is already in rcx */ |
| 1894 | if (boot_cpu_has(X86_FEATURE_BMI2) && src_reg != BPF_REG_4) { |
| 1895 | /* shrx/sarx/shlx dst_reg, dst_reg, src_reg */ |
| 1896 | bool w = (BPF_CLASS(insn->code) == BPF_ALU64); |
| 1897 | u8 op; |
| 1898 | |
| 1899 | switch (BPF_OP(insn->code)) { |
| 1900 | case BPF_LSH: |
| 1901 | op = 1; /* prefix 0x66 */ |
| 1902 | break; |
| 1903 | case BPF_RSH: |
| 1904 | op = 3; /* prefix 0xf2 */ |
| 1905 | break; |
| 1906 | case BPF_ARSH: |
| 1907 | op = 2; /* prefix 0xf3 */ |
| 1908 | break; |
| 1909 | } |
| 1910 | |
| 1911 | emit_shiftx(&prog, dst_reg, src_reg, w, op); |
| 1912 | |
| 1913 | break; |
| 1914 | } |
| 1915 | |
| 1916 | if (src_reg != BPF_REG_4) { /* common case */ |
| 1917 | /* Check for bad case when dst_reg == rcx */ |
| 1918 | if (dst_reg == BPF_REG_4) { |
| 1919 | /* mov r11, dst_reg */ |
| 1920 | EMIT_mov(AUX_REG, dst_reg); |
| 1921 | dst_reg = AUX_REG; |
| 1922 | } else { |
| 1923 | EMIT1(0x51); /* push rcx */ |
| 1924 | } |
| 1925 | /* mov rcx, src_reg */ |
| 1926 | EMIT_mov(BPF_REG_4, src_reg); |
| 1927 | } |
| 1928 | |
| 1929 | /* shl %rax, %cl | shr %rax, %cl | sar %rax, %cl */ |
| 1930 | maybe_emit_1mod(&prog, dst_reg, |
| 1931 | BPF_CLASS(insn->code) == BPF_ALU64); |
| 1932 | |
| 1933 | b3 = simple_alu_opcodes[BPF_OP(insn->code)]; |
| 1934 | EMIT2(0xD3, add_1reg(b3, dst_reg)); |
| 1935 | |
| 1936 | if (src_reg != BPF_REG_4) { |
| 1937 | if (insn->dst_reg == BPF_REG_4) |
| 1938 | /* mov dst_reg, r11 */ |
| 1939 | EMIT_mov(insn->dst_reg, AUX_REG); |
| 1940 | else |
| 1941 | EMIT1(0x59); /* pop rcx */ |
| 1942 | } |
| 1943 | |
| 1944 | break; |
| 1945 | |
| 1946 | case BPF_ALU | BPF_END | BPF_FROM_BE: |
| 1947 | case BPF_ALU64 | BPF_END | BPF_FROM_LE: |
| 1948 | switch (imm32) { |
| 1949 | case 16: |
| 1950 | /* Emit 'ror %ax, 8' to swap lower 2 bytes */ |
| 1951 | EMIT1(0x66); |
| 1952 | if (is_ereg(dst_reg)) |
| 1953 | EMIT1(0x41); |
| 1954 | EMIT3(0xC1, add_1reg(0xC8, dst_reg), 8); |
| 1955 | |
| 1956 | /* Emit 'movzwl eax, ax' */ |
| 1957 | if (is_ereg(dst_reg)) |
| 1958 | EMIT3(0x45, 0x0F, 0xB7); |
| 1959 | else |
| 1960 | EMIT2(0x0F, 0xB7); |
| 1961 | EMIT1(add_2reg(0xC0, dst_reg, dst_reg)); |
| 1962 | break; |
| 1963 | case 32: |
| 1964 | /* Emit 'bswap eax' to swap lower 4 bytes */ |
| 1965 | if (is_ereg(dst_reg)) |
| 1966 | EMIT2(0x41, 0x0F); |
| 1967 | else |
| 1968 | EMIT1(0x0F); |
| 1969 | EMIT1(add_1reg(0xC8, dst_reg)); |
| 1970 | break; |
| 1971 | case 64: |
| 1972 | /* Emit 'bswap rax' to swap 8 bytes */ |
| 1973 | EMIT3(add_1mod(0x48, dst_reg), 0x0F, |
| 1974 | add_1reg(0xC8, dst_reg)); |
| 1975 | break; |
| 1976 | } |
| 1977 | break; |
| 1978 | |
| 1979 | case BPF_ALU | BPF_END | BPF_FROM_LE: |
| 1980 | switch (imm32) { |
| 1981 | case 16: |
| 1982 | /* |
| 1983 | * Emit 'movzwl eax, ax' to zero extend 16-bit |
| 1984 | * into 64 bit |
| 1985 | */ |
| 1986 | if (is_ereg(dst_reg)) |
| 1987 | EMIT3(0x45, 0x0F, 0xB7); |
| 1988 | else |
| 1989 | EMIT2(0x0F, 0xB7); |
| 1990 | EMIT1(add_2reg(0xC0, dst_reg, dst_reg)); |
| 1991 | break; |
| 1992 | case 32: |
| 1993 | /* Emit 'mov eax, eax' to clear upper 32-bits */ |
| 1994 | if (is_ereg(dst_reg)) |
| 1995 | EMIT1(0x45); |
| 1996 | EMIT2(0x89, add_2reg(0xC0, dst_reg, dst_reg)); |
| 1997 | break; |
| 1998 | case 64: |
| 1999 | /* nop */ |
| 2000 | break; |
| 2001 | } |
| 2002 | break; |
| 2003 | |
| 2004 | /* speculation barrier */ |
| 2005 | case BPF_ST | BPF_NOSPEC: |
| 2006 | EMIT_LFENCE(); |
| 2007 | break; |
| 2008 | |
| 2009 | /* ST: *(u8*)(dst_reg + off) = imm */ |
| 2010 | case BPF_ST | BPF_MEM | BPF_B: |
| 2011 | if (is_ereg(dst_reg)) |
| 2012 | EMIT2(0x41, 0xC6); |
| 2013 | else |
| 2014 | EMIT1(0xC6); |
| 2015 | goto st; |
| 2016 | case BPF_ST | BPF_MEM | BPF_H: |
| 2017 | if (is_ereg(dst_reg)) |
| 2018 | EMIT3(0x66, 0x41, 0xC7); |
| 2019 | else |
| 2020 | EMIT2(0x66, 0xC7); |
| 2021 | goto st; |
| 2022 | case BPF_ST | BPF_MEM | BPF_W: |
| 2023 | if (is_ereg(dst_reg)) |
| 2024 | EMIT2(0x41, 0xC7); |
| 2025 | else |
| 2026 | EMIT1(0xC7); |
| 2027 | goto st; |
| 2028 | case BPF_ST | BPF_MEM | BPF_DW: |
| 2029 | EMIT2(add_1mod(0x48, dst_reg), 0xC7); |
| 2030 | |
| 2031 | st: if (is_imm8(insn->off)) |
| 2032 | EMIT2(add_1reg(0x40, dst_reg), insn->off); |
| 2033 | else |
| 2034 | EMIT1_off32(add_1reg(0x80, dst_reg), insn->off); |
| 2035 | |
| 2036 | EMIT(imm32, bpf_size_to_x86_bytes(BPF_SIZE(insn->code))); |
| 2037 | break; |
| 2038 | |
| 2039 | /* STX: *(u8*)(dst_reg + off) = src_reg */ |
| 2040 | case BPF_STX | BPF_MEM | BPF_B: |
| 2041 | case BPF_STX | BPF_MEM | BPF_H: |
| 2042 | case BPF_STX | BPF_MEM | BPF_W: |
| 2043 | case BPF_STX | BPF_MEM | BPF_DW: |
| 2044 | emit_stx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off); |
| 2045 | break; |
| 2046 | |
| 2047 | case BPF_ST | BPF_PROBE_MEM32 | BPF_B: |
| 2048 | case BPF_ST | BPF_PROBE_MEM32 | BPF_H: |
| 2049 | case BPF_ST | BPF_PROBE_MEM32 | BPF_W: |
| 2050 | case BPF_ST | BPF_PROBE_MEM32 | BPF_DW: |
| 2051 | start_of_ldx = prog; |
| 2052 | emit_st_r12(&prog, BPF_SIZE(insn->code), dst_reg, insn->off, insn->imm); |
| 2053 | goto populate_extable; |
| 2054 | |
| 2055 | /* LDX: dst_reg = *(u8*)(src_reg + r12 + off) */ |
| 2056 | case BPF_LDX | BPF_PROBE_MEM32 | BPF_B: |
| 2057 | case BPF_LDX | BPF_PROBE_MEM32 | BPF_H: |
| 2058 | case BPF_LDX | BPF_PROBE_MEM32 | BPF_W: |
| 2059 | case BPF_LDX | BPF_PROBE_MEM32 | BPF_DW: |
| 2060 | case BPF_STX | BPF_PROBE_MEM32 | BPF_B: |
| 2061 | case BPF_STX | BPF_PROBE_MEM32 | BPF_H: |
| 2062 | case BPF_STX | BPF_PROBE_MEM32 | BPF_W: |
| 2063 | case BPF_STX | BPF_PROBE_MEM32 | BPF_DW: |
| 2064 | start_of_ldx = prog; |
| 2065 | if (BPF_CLASS(insn->code) == BPF_LDX) |
| 2066 | emit_ldx_r12(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off); |
| 2067 | else |
| 2068 | emit_stx_r12(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off); |
| 2069 | populate_extable: |
| 2070 | { |
| 2071 | struct exception_table_entry *ex; |
| 2072 | u8 *_insn = image + proglen + (start_of_ldx - temp); |
| 2073 | s64 delta; |
| 2074 | |
| 2075 | if (!bpf_prog->aux->extable) |
| 2076 | break; |
| 2077 | |
| 2078 | if (excnt >= bpf_prog->aux->num_exentries) { |
| 2079 | pr_err("mem32 extable bug\n"); |
| 2080 | return -EFAULT; |
| 2081 | } |
| 2082 | ex = &bpf_prog->aux->extable[excnt++]; |
| 2083 | |
| 2084 | delta = _insn - (u8 *)&ex->insn; |
| 2085 | /* switch ex to rw buffer for writes */ |
| 2086 | ex = (void *)rw_image + ((void *)ex - (void *)image); |
| 2087 | |
| 2088 | ex->insn = delta; |
| 2089 | |
| 2090 | ex->data = EX_TYPE_BPF; |
| 2091 | |
| 2092 | ex->fixup = (prog - start_of_ldx) | |
| 2093 | ((BPF_CLASS(insn->code) == BPF_LDX ? reg2pt_regs[dst_reg] : DONT_CLEAR) << 8); |
| 2094 | } |
| 2095 | break; |
| 2096 | |
| 2097 | /* LDX: dst_reg = *(u8*)(src_reg + off) */ |
| 2098 | case BPF_LDX | BPF_MEM | BPF_B: |
| 2099 | case BPF_LDX | BPF_PROBE_MEM | BPF_B: |
| 2100 | case BPF_LDX | BPF_MEM | BPF_H: |
| 2101 | case BPF_LDX | BPF_PROBE_MEM | BPF_H: |
| 2102 | case BPF_LDX | BPF_MEM | BPF_W: |
| 2103 | case BPF_LDX | BPF_PROBE_MEM | BPF_W: |
| 2104 | case BPF_LDX | BPF_MEM | BPF_DW: |
| 2105 | case BPF_LDX | BPF_PROBE_MEM | BPF_DW: |
| 2106 | /* LDXS: dst_reg = *(s8*)(src_reg + off) */ |
| 2107 | case BPF_LDX | BPF_MEMSX | BPF_B: |
| 2108 | case BPF_LDX | BPF_MEMSX | BPF_H: |
| 2109 | case BPF_LDX | BPF_MEMSX | BPF_W: |
| 2110 | case BPF_LDX | BPF_PROBE_MEMSX | BPF_B: |
| 2111 | case BPF_LDX | BPF_PROBE_MEMSX | BPF_H: |
| 2112 | case BPF_LDX | BPF_PROBE_MEMSX | BPF_W: |
| 2113 | insn_off = insn->off; |
| 2114 | |
| 2115 | if (BPF_MODE(insn->code) == BPF_PROBE_MEM || |
| 2116 | BPF_MODE(insn->code) == BPF_PROBE_MEMSX) { |
| 2117 | /* Conservatively check that src_reg + insn->off is a kernel address: |
| 2118 | * src_reg + insn->off > TASK_SIZE_MAX + PAGE_SIZE |
| 2119 | * and |
| 2120 | * src_reg + insn->off < VSYSCALL_ADDR |
| 2121 | */ |
| 2122 | |
| 2123 | u64 limit = TASK_SIZE_MAX + PAGE_SIZE - VSYSCALL_ADDR; |
| 2124 | u8 *end_of_jmp; |
| 2125 | |
| 2126 | /* movabsq r10, VSYSCALL_ADDR */ |
| 2127 | emit_mov_imm64(&prog, BPF_REG_AX, (long)VSYSCALL_ADDR >> 32, |
| 2128 | (u32)(long)VSYSCALL_ADDR); |
| 2129 | |
| 2130 | /* mov src_reg, r11 */ |
| 2131 | EMIT_mov(AUX_REG, src_reg); |
| 2132 | |
| 2133 | if (insn->off) { |
| 2134 | /* add r11, insn->off */ |
| 2135 | maybe_emit_1mod(&prog, AUX_REG, true); |
| 2136 | EMIT2_off32(0x81, add_1reg(0xC0, AUX_REG), insn->off); |
| 2137 | } |
| 2138 | |
| 2139 | /* sub r11, r10 */ |
| 2140 | maybe_emit_mod(&prog, AUX_REG, BPF_REG_AX, true); |
| 2141 | EMIT2(0x29, add_2reg(0xC0, AUX_REG, BPF_REG_AX)); |
| 2142 | |
| 2143 | /* movabsq r10, limit */ |
| 2144 | emit_mov_imm64(&prog, BPF_REG_AX, (long)limit >> 32, |
| 2145 | (u32)(long)limit); |
| 2146 | |
| 2147 | /* cmp r10, r11 */ |
| 2148 | maybe_emit_mod(&prog, AUX_REG, BPF_REG_AX, true); |
| 2149 | EMIT2(0x39, add_2reg(0xC0, AUX_REG, BPF_REG_AX)); |
| 2150 | |
| 2151 | /* if unsigned '>', goto load */ |
| 2152 | EMIT2(X86_JA, 0); |
| 2153 | end_of_jmp = prog; |
| 2154 | |
| 2155 | /* xor dst_reg, dst_reg */ |
| 2156 | emit_mov_imm32(&prog, false, dst_reg, 0); |
| 2157 | /* jmp byte_after_ldx */ |
| 2158 | EMIT2(0xEB, 0); |
| 2159 | |
| 2160 | /* populate jmp_offset for JAE above to jump to start_of_ldx */ |
| 2161 | start_of_ldx = prog; |
| 2162 | end_of_jmp[-1] = start_of_ldx - end_of_jmp; |
| 2163 | } |
| 2164 | if (BPF_MODE(insn->code) == BPF_PROBE_MEMSX || |
| 2165 | BPF_MODE(insn->code) == BPF_MEMSX) |
| 2166 | emit_ldsx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn_off); |
| 2167 | else |
| 2168 | emit_ldx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn_off); |
| 2169 | if (BPF_MODE(insn->code) == BPF_PROBE_MEM || |
| 2170 | BPF_MODE(insn->code) == BPF_PROBE_MEMSX) { |
| 2171 | struct exception_table_entry *ex; |
| 2172 | u8 *_insn = image + proglen + (start_of_ldx - temp); |
| 2173 | s64 delta; |
| 2174 | |
| 2175 | /* populate jmp_offset for JMP above */ |
| 2176 | start_of_ldx[-1] = prog - start_of_ldx; |
| 2177 | |
| 2178 | if (!bpf_prog->aux->extable) |
| 2179 | break; |
| 2180 | |
| 2181 | if (excnt >= bpf_prog->aux->num_exentries) { |
| 2182 | pr_err("ex gen bug\n"); |
| 2183 | return -EFAULT; |
| 2184 | } |
| 2185 | ex = &bpf_prog->aux->extable[excnt++]; |
| 2186 | |
| 2187 | delta = _insn - (u8 *)&ex->insn; |
| 2188 | if (!is_simm32(delta)) { |
| 2189 | pr_err("extable->insn doesn't fit into 32-bit\n"); |
| 2190 | return -EFAULT; |
| 2191 | } |
| 2192 | /* switch ex to rw buffer for writes */ |
| 2193 | ex = (void *)rw_image + ((void *)ex - (void *)image); |
| 2194 | |
| 2195 | ex->insn = delta; |
| 2196 | |
| 2197 | ex->data = EX_TYPE_BPF; |
| 2198 | |
| 2199 | if (dst_reg > BPF_REG_9) { |
| 2200 | pr_err("verifier error\n"); |
| 2201 | return -EFAULT; |
| 2202 | } |
| 2203 | /* |
| 2204 | * Compute size of x86 insn and its target dest x86 register. |
| 2205 | * ex_handler_bpf() will use lower 8 bits to adjust |
| 2206 | * pt_regs->ip to jump over this x86 instruction |
| 2207 | * and upper bits to figure out which pt_regs to zero out. |
| 2208 | * End result: x86 insn "mov rbx, qword ptr [rax+0x14]" |
| 2209 | * of 4 bytes will be ignored and rbx will be zero inited. |
| 2210 | */ |
| 2211 | ex->fixup = (prog - start_of_ldx) | (reg2pt_regs[dst_reg] << 8); |
| 2212 | } |
| 2213 | break; |
| 2214 | |
| 2215 | case BPF_STX | BPF_ATOMIC | BPF_B: |
| 2216 | case BPF_STX | BPF_ATOMIC | BPF_H: |
| 2217 | if (!bpf_atomic_is_load_store(insn)) { |
| 2218 | pr_err("bpf_jit: 1- and 2-byte RMW atomics are not supported\n"); |
| 2219 | return -EFAULT; |
| 2220 | } |
| 2221 | fallthrough; |
| 2222 | case BPF_STX | BPF_ATOMIC | BPF_W: |
| 2223 | case BPF_STX | BPF_ATOMIC | BPF_DW: |
| 2224 | if (insn->imm == (BPF_AND | BPF_FETCH) || |
| 2225 | insn->imm == (BPF_OR | BPF_FETCH) || |
| 2226 | insn->imm == (BPF_XOR | BPF_FETCH)) { |
| 2227 | bool is64 = BPF_SIZE(insn->code) == BPF_DW; |
| 2228 | u32 real_src_reg = src_reg; |
| 2229 | u32 real_dst_reg = dst_reg; |
| 2230 | u8 *branch_target; |
| 2231 | |
| 2232 | /* |
| 2233 | * Can't be implemented with a single x86 insn. |
| 2234 | * Need to do a CMPXCHG loop. |
| 2235 | */ |
| 2236 | |
| 2237 | /* Will need RAX as a CMPXCHG operand so save R0 */ |
| 2238 | emit_mov_reg(&prog, true, BPF_REG_AX, BPF_REG_0); |
| 2239 | if (src_reg == BPF_REG_0) |
| 2240 | real_src_reg = BPF_REG_AX; |
| 2241 | if (dst_reg == BPF_REG_0) |
| 2242 | real_dst_reg = BPF_REG_AX; |
| 2243 | |
| 2244 | branch_target = prog; |
| 2245 | /* Load old value */ |
| 2246 | emit_ldx(&prog, BPF_SIZE(insn->code), |
| 2247 | BPF_REG_0, real_dst_reg, insn->off); |
| 2248 | /* |
| 2249 | * Perform the (commutative) operation locally, |
| 2250 | * put the result in the AUX_REG. |
| 2251 | */ |
| 2252 | emit_mov_reg(&prog, is64, AUX_REG, BPF_REG_0); |
| 2253 | maybe_emit_mod(&prog, AUX_REG, real_src_reg, is64); |
| 2254 | EMIT2(simple_alu_opcodes[BPF_OP(insn->imm)], |
| 2255 | add_2reg(0xC0, AUX_REG, real_src_reg)); |
| 2256 | /* Attempt to swap in new value */ |
| 2257 | err = emit_atomic_rmw(&prog, BPF_CMPXCHG, |
| 2258 | real_dst_reg, AUX_REG, |
| 2259 | insn->off, |
| 2260 | BPF_SIZE(insn->code)); |
| 2261 | if (WARN_ON(err)) |
| 2262 | return err; |
| 2263 | /* |
| 2264 | * ZF tells us whether we won the race. If it's |
| 2265 | * cleared we need to try again. |
| 2266 | */ |
| 2267 | EMIT2(X86_JNE, -(prog - branch_target) - 2); |
| 2268 | /* Return the pre-modification value */ |
| 2269 | emit_mov_reg(&prog, is64, real_src_reg, BPF_REG_0); |
| 2270 | /* Restore R0 after clobbering RAX */ |
| 2271 | emit_mov_reg(&prog, true, BPF_REG_0, BPF_REG_AX); |
| 2272 | break; |
| 2273 | } |
| 2274 | |
| 2275 | if (bpf_atomic_is_load_store(insn)) |
| 2276 | err = emit_atomic_ld_st(&prog, insn->imm, dst_reg, src_reg, |
| 2277 | insn->off, BPF_SIZE(insn->code)); |
| 2278 | else |
| 2279 | err = emit_atomic_rmw(&prog, insn->imm, dst_reg, src_reg, |
| 2280 | insn->off, BPF_SIZE(insn->code)); |
| 2281 | if (err) |
| 2282 | return err; |
| 2283 | break; |
| 2284 | |
| 2285 | case BPF_STX | BPF_PROBE_ATOMIC | BPF_B: |
| 2286 | case BPF_STX | BPF_PROBE_ATOMIC | BPF_H: |
| 2287 | if (!bpf_atomic_is_load_store(insn)) { |
| 2288 | pr_err("bpf_jit: 1- and 2-byte RMW atomics are not supported\n"); |
| 2289 | return -EFAULT; |
| 2290 | } |
| 2291 | fallthrough; |
| 2292 | case BPF_STX | BPF_PROBE_ATOMIC | BPF_W: |
| 2293 | case BPF_STX | BPF_PROBE_ATOMIC | BPF_DW: |
| 2294 | start_of_ldx = prog; |
| 2295 | |
| 2296 | if (bpf_atomic_is_load_store(insn)) |
| 2297 | err = emit_atomic_ld_st_index(&prog, insn->imm, |
| 2298 | BPF_SIZE(insn->code), dst_reg, |
| 2299 | src_reg, X86_REG_R12, insn->off); |
| 2300 | else |
| 2301 | err = emit_atomic_rmw_index(&prog, insn->imm, BPF_SIZE(insn->code), |
| 2302 | dst_reg, src_reg, X86_REG_R12, |
| 2303 | insn->off); |
| 2304 | if (err) |
| 2305 | return err; |
| 2306 | goto populate_extable; |
| 2307 | |
| 2308 | /* call */ |
| 2309 | case BPF_JMP | BPF_CALL: { |
| 2310 | u8 *ip = image + addrs[i - 1]; |
| 2311 | |
| 2312 | func = (u8 *) __bpf_call_base + imm32; |
| 2313 | if (src_reg == BPF_PSEUDO_CALL && tail_call_reachable) { |
| 2314 | LOAD_TAIL_CALL_CNT_PTR(stack_depth); |
| 2315 | ip += 7; |
| 2316 | } |
| 2317 | if (!imm32) |
| 2318 | return -EINVAL; |
| 2319 | if (priv_frame_ptr) { |
| 2320 | push_r9(&prog); |
| 2321 | ip += 2; |
| 2322 | } |
| 2323 | ip += x86_call_depth_emit_accounting(&prog, func, ip); |
| 2324 | if (emit_call(&prog, func, ip)) |
| 2325 | return -EINVAL; |
| 2326 | if (priv_frame_ptr) |
| 2327 | pop_r9(&prog); |
| 2328 | break; |
| 2329 | } |
| 2330 | |
| 2331 | case BPF_JMP | BPF_TAIL_CALL: |
| 2332 | if (imm32) |
| 2333 | emit_bpf_tail_call_direct(bpf_prog, |
| 2334 | &bpf_prog->aux->poke_tab[imm32 - 1], |
| 2335 | &prog, image + addrs[i - 1], |
| 2336 | callee_regs_used, |
| 2337 | stack_depth, |
| 2338 | ctx); |
| 2339 | else |
| 2340 | emit_bpf_tail_call_indirect(bpf_prog, |
| 2341 | &prog, |
| 2342 | callee_regs_used, |
| 2343 | stack_depth, |
| 2344 | image + addrs[i - 1], |
| 2345 | ctx); |
| 2346 | break; |
| 2347 | |
| 2348 | /* cond jump */ |
| 2349 | case BPF_JMP | BPF_JEQ | BPF_X: |
| 2350 | case BPF_JMP | BPF_JNE | BPF_X: |
| 2351 | case BPF_JMP | BPF_JGT | BPF_X: |
| 2352 | case BPF_JMP | BPF_JLT | BPF_X: |
| 2353 | case BPF_JMP | BPF_JGE | BPF_X: |
| 2354 | case BPF_JMP | BPF_JLE | BPF_X: |
| 2355 | case BPF_JMP | BPF_JSGT | BPF_X: |
| 2356 | case BPF_JMP | BPF_JSLT | BPF_X: |
| 2357 | case BPF_JMP | BPF_JSGE | BPF_X: |
| 2358 | case BPF_JMP | BPF_JSLE | BPF_X: |
| 2359 | case BPF_JMP32 | BPF_JEQ | BPF_X: |
| 2360 | case BPF_JMP32 | BPF_JNE | BPF_X: |
| 2361 | case BPF_JMP32 | BPF_JGT | BPF_X: |
| 2362 | case BPF_JMP32 | BPF_JLT | BPF_X: |
| 2363 | case BPF_JMP32 | BPF_JGE | BPF_X: |
| 2364 | case BPF_JMP32 | BPF_JLE | BPF_X: |
| 2365 | case BPF_JMP32 | BPF_JSGT | BPF_X: |
| 2366 | case BPF_JMP32 | BPF_JSLT | BPF_X: |
| 2367 | case BPF_JMP32 | BPF_JSGE | BPF_X: |
| 2368 | case BPF_JMP32 | BPF_JSLE | BPF_X: |
| 2369 | /* cmp dst_reg, src_reg */ |
| 2370 | maybe_emit_mod(&prog, dst_reg, src_reg, |
| 2371 | BPF_CLASS(insn->code) == BPF_JMP); |
| 2372 | EMIT2(0x39, add_2reg(0xC0, dst_reg, src_reg)); |
| 2373 | goto emit_cond_jmp; |
| 2374 | |
| 2375 | case BPF_JMP | BPF_JSET | BPF_X: |
| 2376 | case BPF_JMP32 | BPF_JSET | BPF_X: |
| 2377 | /* test dst_reg, src_reg */ |
| 2378 | maybe_emit_mod(&prog, dst_reg, src_reg, |
| 2379 | BPF_CLASS(insn->code) == BPF_JMP); |
| 2380 | EMIT2(0x85, add_2reg(0xC0, dst_reg, src_reg)); |
| 2381 | goto emit_cond_jmp; |
| 2382 | |
| 2383 | case BPF_JMP | BPF_JSET | BPF_K: |
| 2384 | case BPF_JMP32 | BPF_JSET | BPF_K: |
| 2385 | /* test dst_reg, imm32 */ |
| 2386 | maybe_emit_1mod(&prog, dst_reg, |
| 2387 | BPF_CLASS(insn->code) == BPF_JMP); |
| 2388 | EMIT2_off32(0xF7, add_1reg(0xC0, dst_reg), imm32); |
| 2389 | goto emit_cond_jmp; |
| 2390 | |
| 2391 | case BPF_JMP | BPF_JEQ | BPF_K: |
| 2392 | case BPF_JMP | BPF_JNE | BPF_K: |
| 2393 | case BPF_JMP | BPF_JGT | BPF_K: |
| 2394 | case BPF_JMP | BPF_JLT | BPF_K: |
| 2395 | case BPF_JMP | BPF_JGE | BPF_K: |
| 2396 | case BPF_JMP | BPF_JLE | BPF_K: |
| 2397 | case BPF_JMP | BPF_JSGT | BPF_K: |
| 2398 | case BPF_JMP | BPF_JSLT | BPF_K: |
| 2399 | case BPF_JMP | BPF_JSGE | BPF_K: |
| 2400 | case BPF_JMP | BPF_JSLE | BPF_K: |
| 2401 | case BPF_JMP32 | BPF_JEQ | BPF_K: |
| 2402 | case BPF_JMP32 | BPF_JNE | BPF_K: |
| 2403 | case BPF_JMP32 | BPF_JGT | BPF_K: |
| 2404 | case BPF_JMP32 | BPF_JLT | BPF_K: |
| 2405 | case BPF_JMP32 | BPF_JGE | BPF_K: |
| 2406 | case BPF_JMP32 | BPF_JLE | BPF_K: |
| 2407 | case BPF_JMP32 | BPF_JSGT | BPF_K: |
| 2408 | case BPF_JMP32 | BPF_JSLT | BPF_K: |
| 2409 | case BPF_JMP32 | BPF_JSGE | BPF_K: |
| 2410 | case BPF_JMP32 | BPF_JSLE | BPF_K: |
| 2411 | /* test dst_reg, dst_reg to save one extra byte */ |
| 2412 | if (imm32 == 0) { |
| 2413 | maybe_emit_mod(&prog, dst_reg, dst_reg, |
| 2414 | BPF_CLASS(insn->code) == BPF_JMP); |
| 2415 | EMIT2(0x85, add_2reg(0xC0, dst_reg, dst_reg)); |
| 2416 | goto emit_cond_jmp; |
| 2417 | } |
| 2418 | |
| 2419 | /* cmp dst_reg, imm8/32 */ |
| 2420 | maybe_emit_1mod(&prog, dst_reg, |
| 2421 | BPF_CLASS(insn->code) == BPF_JMP); |
| 2422 | |
| 2423 | if (is_imm8(imm32)) |
| 2424 | EMIT3(0x83, add_1reg(0xF8, dst_reg), imm32); |
| 2425 | else |
| 2426 | EMIT2_off32(0x81, add_1reg(0xF8, dst_reg), imm32); |
| 2427 | |
| 2428 | emit_cond_jmp: /* Convert BPF opcode to x86 */ |
| 2429 | switch (BPF_OP(insn->code)) { |
| 2430 | case BPF_JEQ: |
| 2431 | jmp_cond = X86_JE; |
| 2432 | break; |
| 2433 | case BPF_JSET: |
| 2434 | case BPF_JNE: |
| 2435 | jmp_cond = X86_JNE; |
| 2436 | break; |
| 2437 | case BPF_JGT: |
| 2438 | /* GT is unsigned '>', JA in x86 */ |
| 2439 | jmp_cond = X86_JA; |
| 2440 | break; |
| 2441 | case BPF_JLT: |
| 2442 | /* LT is unsigned '<', JB in x86 */ |
| 2443 | jmp_cond = X86_JB; |
| 2444 | break; |
| 2445 | case BPF_JGE: |
| 2446 | /* GE is unsigned '>=', JAE in x86 */ |
| 2447 | jmp_cond = X86_JAE; |
| 2448 | break; |
| 2449 | case BPF_JLE: |
| 2450 | /* LE is unsigned '<=', JBE in x86 */ |
| 2451 | jmp_cond = X86_JBE; |
| 2452 | break; |
| 2453 | case BPF_JSGT: |
| 2454 | /* Signed '>', GT in x86 */ |
| 2455 | jmp_cond = X86_JG; |
| 2456 | break; |
| 2457 | case BPF_JSLT: |
| 2458 | /* Signed '<', LT in x86 */ |
| 2459 | jmp_cond = X86_JL; |
| 2460 | break; |
| 2461 | case BPF_JSGE: |
| 2462 | /* Signed '>=', GE in x86 */ |
| 2463 | jmp_cond = X86_JGE; |
| 2464 | break; |
| 2465 | case BPF_JSLE: |
| 2466 | /* Signed '<=', LE in x86 */ |
| 2467 | jmp_cond = X86_JLE; |
| 2468 | break; |
| 2469 | default: /* to silence GCC warning */ |
| 2470 | return -EFAULT; |
| 2471 | } |
| 2472 | jmp_offset = addrs[i + insn->off] - addrs[i]; |
| 2473 | if (is_imm8_jmp_offset(jmp_offset)) { |
| 2474 | if (jmp_padding) { |
| 2475 | /* To keep the jmp_offset valid, the extra bytes are |
| 2476 | * padded before the jump insn, so we subtract the |
| 2477 | * 2 bytes of jmp_cond insn from INSN_SZ_DIFF. |
| 2478 | * |
| 2479 | * If the previous pass already emits an imm8 |
| 2480 | * jmp_cond, then this BPF insn won't shrink, so |
| 2481 | * "nops" is 0. |
| 2482 | * |
| 2483 | * On the other hand, if the previous pass emits an |
| 2484 | * imm32 jmp_cond, the extra 4 bytes(*) is padded to |
| 2485 | * keep the image from shrinking further. |
| 2486 | * |
| 2487 | * (*) imm32 jmp_cond is 6 bytes, and imm8 jmp_cond |
| 2488 | * is 2 bytes, so the size difference is 4 bytes. |
| 2489 | */ |
| 2490 | nops = INSN_SZ_DIFF - 2; |
| 2491 | if (nops != 0 && nops != 4) { |
| 2492 | pr_err("unexpected jmp_cond padding: %d bytes\n", |
| 2493 | nops); |
| 2494 | return -EFAULT; |
| 2495 | } |
| 2496 | emit_nops(&prog, nops); |
| 2497 | } |
| 2498 | EMIT2(jmp_cond, jmp_offset); |
| 2499 | } else if (is_simm32(jmp_offset)) { |
| 2500 | EMIT2_off32(0x0F, jmp_cond + 0x10, jmp_offset); |
| 2501 | } else { |
| 2502 | pr_err("cond_jmp gen bug %llx\n", jmp_offset); |
| 2503 | return -EFAULT; |
| 2504 | } |
| 2505 | |
| 2506 | break; |
| 2507 | |
| 2508 | case BPF_JMP | BPF_JA: |
| 2509 | case BPF_JMP32 | BPF_JA: |
| 2510 | if (BPF_CLASS(insn->code) == BPF_JMP) { |
| 2511 | if (insn->off == -1) |
| 2512 | /* -1 jmp instructions will always jump |
| 2513 | * backwards two bytes. Explicitly handling |
| 2514 | * this case avoids wasting too many passes |
| 2515 | * when there are long sequences of replaced |
| 2516 | * dead code. |
| 2517 | */ |
| 2518 | jmp_offset = -2; |
| 2519 | else |
| 2520 | jmp_offset = addrs[i + insn->off] - addrs[i]; |
| 2521 | } else { |
| 2522 | if (insn->imm == -1) |
| 2523 | jmp_offset = -2; |
| 2524 | else |
| 2525 | jmp_offset = addrs[i + insn->imm] - addrs[i]; |
| 2526 | } |
| 2527 | |
| 2528 | if (!jmp_offset) { |
| 2529 | /* |
| 2530 | * If jmp_padding is enabled, the extra nops will |
| 2531 | * be inserted. Otherwise, optimize out nop jumps. |
| 2532 | */ |
| 2533 | if (jmp_padding) { |
| 2534 | /* There are 3 possible conditions. |
| 2535 | * (1) This BPF_JA is already optimized out in |
| 2536 | * the previous run, so there is no need |
| 2537 | * to pad any extra byte (0 byte). |
| 2538 | * (2) The previous pass emits an imm8 jmp, |
| 2539 | * so we pad 2 bytes to match the previous |
| 2540 | * insn size. |
| 2541 | * (3) Similarly, the previous pass emits an |
| 2542 | * imm32 jmp, and 5 bytes is padded. |
| 2543 | */ |
| 2544 | nops = INSN_SZ_DIFF; |
| 2545 | if (nops != 0 && nops != 2 && nops != 5) { |
| 2546 | pr_err("unexpected nop jump padding: %d bytes\n", |
| 2547 | nops); |
| 2548 | return -EFAULT; |
| 2549 | } |
| 2550 | emit_nops(&prog, nops); |
| 2551 | } |
| 2552 | break; |
| 2553 | } |
| 2554 | emit_jmp: |
| 2555 | if (is_imm8_jmp_offset(jmp_offset)) { |
| 2556 | if (jmp_padding) { |
| 2557 | /* To avoid breaking jmp_offset, the extra bytes |
| 2558 | * are padded before the actual jmp insn, so |
| 2559 | * 2 bytes is subtracted from INSN_SZ_DIFF. |
| 2560 | * |
| 2561 | * If the previous pass already emits an imm8 |
| 2562 | * jmp, there is nothing to pad (0 byte). |
| 2563 | * |
| 2564 | * If it emits an imm32 jmp (5 bytes) previously |
| 2565 | * and now an imm8 jmp (2 bytes), then we pad |
| 2566 | * (5 - 2 = 3) bytes to stop the image from |
| 2567 | * shrinking further. |
| 2568 | */ |
| 2569 | nops = INSN_SZ_DIFF - 2; |
| 2570 | if (nops != 0 && nops != 3) { |
| 2571 | pr_err("unexpected jump padding: %d bytes\n", |
| 2572 | nops); |
| 2573 | return -EFAULT; |
| 2574 | } |
| 2575 | emit_nops(&prog, INSN_SZ_DIFF - 2); |
| 2576 | } |
| 2577 | EMIT2(0xEB, jmp_offset); |
| 2578 | } else if (is_simm32(jmp_offset)) { |
| 2579 | EMIT1_off32(0xE9, jmp_offset); |
| 2580 | } else { |
| 2581 | pr_err("jmp gen bug %llx\n", jmp_offset); |
| 2582 | return -EFAULT; |
| 2583 | } |
| 2584 | break; |
| 2585 | |
| 2586 | case BPF_JMP | BPF_EXIT: |
| 2587 | if (seen_exit) { |
| 2588 | jmp_offset = ctx->cleanup_addr - addrs[i]; |
| 2589 | goto emit_jmp; |
| 2590 | } |
| 2591 | seen_exit = true; |
| 2592 | /* Update cleanup_addr */ |
| 2593 | ctx->cleanup_addr = proglen; |
| 2594 | if (bpf_prog_was_classic(bpf_prog) && |
| 2595 | !capable(CAP_SYS_ADMIN)) { |
| 2596 | u8 *ip = image + addrs[i - 1]; |
| 2597 | |
| 2598 | if (emit_spectre_bhb_barrier(&prog, ip, bpf_prog)) |
| 2599 | return -EINVAL; |
| 2600 | } |
| 2601 | if (bpf_prog->aux->exception_boundary) { |
| 2602 | pop_callee_regs(&prog, all_callee_regs_used); |
| 2603 | pop_r12(&prog); |
| 2604 | } else { |
| 2605 | pop_callee_regs(&prog, callee_regs_used); |
| 2606 | if (arena_vm_start) |
| 2607 | pop_r12(&prog); |
| 2608 | } |
| 2609 | EMIT1(0xC9); /* leave */ |
| 2610 | emit_return(&prog, image + addrs[i - 1] + (prog - temp)); |
| 2611 | break; |
| 2612 | |
| 2613 | default: |
| 2614 | /* |
| 2615 | * By design x86-64 JIT should support all BPF instructions. |
| 2616 | * This error will be seen if new instruction was added |
| 2617 | * to the interpreter, but not to the JIT, or if there is |
| 2618 | * junk in bpf_prog. |
| 2619 | */ |
| 2620 | pr_err("bpf_jit: unknown opcode %02x\n", insn->code); |
| 2621 | return -EINVAL; |
| 2622 | } |
| 2623 | |
| 2624 | ilen = prog - temp; |
| 2625 | if (ilen > BPF_MAX_INSN_SIZE) { |
| 2626 | pr_err("bpf_jit: fatal insn size error\n"); |
| 2627 | return -EFAULT; |
| 2628 | } |
| 2629 | |
| 2630 | if (image) { |
| 2631 | /* |
| 2632 | * When populating the image, assert that: |
| 2633 | * |
| 2634 | * i) We do not write beyond the allocated space, and |
| 2635 | * ii) addrs[i] did not change from the prior run, in order |
| 2636 | * to validate assumptions made for computing branch |
| 2637 | * displacements. |
| 2638 | */ |
| 2639 | if (unlikely(proglen + ilen > oldproglen || |
| 2640 | proglen + ilen != addrs[i])) { |
| 2641 | pr_err("bpf_jit: fatal error\n"); |
| 2642 | return -EFAULT; |
| 2643 | } |
| 2644 | memcpy(rw_image + proglen, temp, ilen); |
| 2645 | } |
| 2646 | proglen += ilen; |
| 2647 | addrs[i] = proglen; |
| 2648 | prog = temp; |
| 2649 | } |
| 2650 | |
| 2651 | if (image && excnt != bpf_prog->aux->num_exentries) { |
| 2652 | pr_err("extable is not populated\n"); |
| 2653 | return -EFAULT; |
| 2654 | } |
| 2655 | return proglen; |
| 2656 | } |
| 2657 | |
| 2658 | static void clean_stack_garbage(const struct btf_func_model *m, |
| 2659 | u8 **pprog, int nr_stack_slots, |
| 2660 | int stack_size) |
| 2661 | { |
| 2662 | int arg_size, off; |
| 2663 | u8 *prog; |
| 2664 | |
| 2665 | /* Generally speaking, the compiler will pass the arguments |
| 2666 | * on-stack with "push" instruction, which will take 8-byte |
| 2667 | * on the stack. In this case, there won't be garbage values |
| 2668 | * while we copy the arguments from origin stack frame to current |
| 2669 | * in BPF_DW. |
| 2670 | * |
| 2671 | * However, sometimes the compiler will only allocate 4-byte on |
| 2672 | * the stack for the arguments. For now, this case will only |
| 2673 | * happen if there is only one argument on-stack and its size |
| 2674 | * not more than 4 byte. In this case, there will be garbage |
| 2675 | * values on the upper 4-byte where we store the argument on |
| 2676 | * current stack frame. |
| 2677 | * |
| 2678 | * arguments on origin stack: |
| 2679 | * |
| 2680 | * stack_arg_1(4-byte) xxx(4-byte) |
| 2681 | * |
| 2682 | * what we copy: |
| 2683 | * |
| 2684 | * stack_arg_1(8-byte): stack_arg_1(origin) xxx |
| 2685 | * |
| 2686 | * and the xxx is the garbage values which we should clean here. |
| 2687 | */ |
| 2688 | if (nr_stack_slots != 1) |
| 2689 | return; |
| 2690 | |
| 2691 | /* the size of the last argument */ |
| 2692 | arg_size = m->arg_size[m->nr_args - 1]; |
| 2693 | if (arg_size <= 4) { |
| 2694 | off = -(stack_size - 4); |
| 2695 | prog = *pprog; |
| 2696 | /* mov DWORD PTR [rbp + off], 0 */ |
| 2697 | if (!is_imm8(off)) |
| 2698 | EMIT2_off32(0xC7, 0x85, off); |
| 2699 | else |
| 2700 | EMIT3(0xC7, 0x45, off); |
| 2701 | EMIT(0, 4); |
| 2702 | *pprog = prog; |
| 2703 | } |
| 2704 | } |
| 2705 | |
| 2706 | /* get the count of the regs that are used to pass arguments */ |
| 2707 | static int get_nr_used_regs(const struct btf_func_model *m) |
| 2708 | { |
| 2709 | int i, arg_regs, nr_used_regs = 0; |
| 2710 | |
| 2711 | for (i = 0; i < min_t(int, m->nr_args, MAX_BPF_FUNC_ARGS); i++) { |
| 2712 | arg_regs = (m->arg_size[i] + 7) / 8; |
| 2713 | if (nr_used_regs + arg_regs <= 6) |
| 2714 | nr_used_regs += arg_regs; |
| 2715 | |
| 2716 | if (nr_used_regs >= 6) |
| 2717 | break; |
| 2718 | } |
| 2719 | |
| 2720 | return nr_used_regs; |
| 2721 | } |
| 2722 | |
| 2723 | static void save_args(const struct btf_func_model *m, u8 **prog, |
| 2724 | int stack_size, bool for_call_origin) |
| 2725 | { |
| 2726 | int arg_regs, first_off = 0, nr_regs = 0, nr_stack_slots = 0; |
| 2727 | int i, j; |
| 2728 | |
| 2729 | /* Store function arguments to stack. |
| 2730 | * For a function that accepts two pointers the sequence will be: |
| 2731 | * mov QWORD PTR [rbp-0x10],rdi |
| 2732 | * mov QWORD PTR [rbp-0x8],rsi |
| 2733 | */ |
| 2734 | for (i = 0; i < min_t(int, m->nr_args, MAX_BPF_FUNC_ARGS); i++) { |
| 2735 | arg_regs = (m->arg_size[i] + 7) / 8; |
| 2736 | |
| 2737 | /* According to the research of Yonghong, struct members |
| 2738 | * should be all in register or all on the stack. |
| 2739 | * Meanwhile, the compiler will pass the argument on regs |
| 2740 | * if the remaining regs can hold the argument. |
| 2741 | * |
| 2742 | * Disorder of the args can happen. For example: |
| 2743 | * |
| 2744 | * struct foo_struct { |
| 2745 | * long a; |
| 2746 | * int b; |
| 2747 | * }; |
| 2748 | * int foo(char, char, char, char, char, struct foo_struct, |
| 2749 | * char); |
| 2750 | * |
| 2751 | * the arg1-5,arg7 will be passed by regs, and arg6 will |
| 2752 | * by stack. |
| 2753 | */ |
| 2754 | if (nr_regs + arg_regs > 6) { |
| 2755 | /* copy function arguments from origin stack frame |
| 2756 | * into current stack frame. |
| 2757 | * |
| 2758 | * The starting address of the arguments on-stack |
| 2759 | * is: |
| 2760 | * rbp + 8(push rbp) + |
| 2761 | * 8(return addr of origin call) + |
| 2762 | * 8(return addr of the caller) |
| 2763 | * which means: rbp + 24 |
| 2764 | */ |
| 2765 | for (j = 0; j < arg_regs; j++) { |
| 2766 | emit_ldx(prog, BPF_DW, BPF_REG_0, BPF_REG_FP, |
| 2767 | nr_stack_slots * 8 + 0x18); |
| 2768 | emit_stx(prog, BPF_DW, BPF_REG_FP, BPF_REG_0, |
| 2769 | -stack_size); |
| 2770 | |
| 2771 | if (!nr_stack_slots) |
| 2772 | first_off = stack_size; |
| 2773 | stack_size -= 8; |
| 2774 | nr_stack_slots++; |
| 2775 | } |
| 2776 | } else { |
| 2777 | /* Only copy the arguments on-stack to current |
| 2778 | * 'stack_size' and ignore the regs, used to |
| 2779 | * prepare the arguments on-stack for origin call. |
| 2780 | */ |
| 2781 | if (for_call_origin) { |
| 2782 | nr_regs += arg_regs; |
| 2783 | continue; |
| 2784 | } |
| 2785 | |
| 2786 | /* copy the arguments from regs into stack */ |
| 2787 | for (j = 0; j < arg_regs; j++) { |
| 2788 | emit_stx(prog, BPF_DW, BPF_REG_FP, |
| 2789 | nr_regs == 5 ? X86_REG_R9 : BPF_REG_1 + nr_regs, |
| 2790 | -stack_size); |
| 2791 | stack_size -= 8; |
| 2792 | nr_regs++; |
| 2793 | } |
| 2794 | } |
| 2795 | } |
| 2796 | |
| 2797 | clean_stack_garbage(m, prog, nr_stack_slots, first_off); |
| 2798 | } |
| 2799 | |
| 2800 | static void restore_regs(const struct btf_func_model *m, u8 **prog, |
| 2801 | int stack_size) |
| 2802 | { |
| 2803 | int i, j, arg_regs, nr_regs = 0; |
| 2804 | |
| 2805 | /* Restore function arguments from stack. |
| 2806 | * For a function that accepts two pointers the sequence will be: |
| 2807 | * EMIT4(0x48, 0x8B, 0x7D, 0xF0); mov rdi,QWORD PTR [rbp-0x10] |
| 2808 | * EMIT4(0x48, 0x8B, 0x75, 0xF8); mov rsi,QWORD PTR [rbp-0x8] |
| 2809 | * |
| 2810 | * The logic here is similar to what we do in save_args() |
| 2811 | */ |
| 2812 | for (i = 0; i < min_t(int, m->nr_args, MAX_BPF_FUNC_ARGS); i++) { |
| 2813 | arg_regs = (m->arg_size[i] + 7) / 8; |
| 2814 | if (nr_regs + arg_regs <= 6) { |
| 2815 | for (j = 0; j < arg_regs; j++) { |
| 2816 | emit_ldx(prog, BPF_DW, |
| 2817 | nr_regs == 5 ? X86_REG_R9 : BPF_REG_1 + nr_regs, |
| 2818 | BPF_REG_FP, |
| 2819 | -stack_size); |
| 2820 | stack_size -= 8; |
| 2821 | nr_regs++; |
| 2822 | } |
| 2823 | } else { |
| 2824 | stack_size -= 8 * arg_regs; |
| 2825 | } |
| 2826 | |
| 2827 | if (nr_regs >= 6) |
| 2828 | break; |
| 2829 | } |
| 2830 | } |
| 2831 | |
| 2832 | static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog, |
| 2833 | struct bpf_tramp_link *l, int stack_size, |
| 2834 | int run_ctx_off, bool save_ret, |
| 2835 | void *image, void *rw_image) |
| 2836 | { |
| 2837 | u8 *prog = *pprog; |
| 2838 | u8 *jmp_insn; |
| 2839 | int ctx_cookie_off = offsetof(struct bpf_tramp_run_ctx, bpf_cookie); |
| 2840 | struct bpf_prog *p = l->link.prog; |
| 2841 | u64 cookie = l->cookie; |
| 2842 | |
| 2843 | /* mov rdi, cookie */ |
| 2844 | emit_mov_imm64(&prog, BPF_REG_1, (long) cookie >> 32, (u32) (long) cookie); |
| 2845 | |
| 2846 | /* Prepare struct bpf_tramp_run_ctx. |
| 2847 | * |
| 2848 | * bpf_tramp_run_ctx is already preserved by |
| 2849 | * arch_prepare_bpf_trampoline(). |
| 2850 | * |
| 2851 | * mov QWORD PTR [rbp - run_ctx_off + ctx_cookie_off], rdi |
| 2852 | */ |
| 2853 | emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_1, -run_ctx_off + ctx_cookie_off); |
| 2854 | |
| 2855 | /* arg1: mov rdi, progs[i] */ |
| 2856 | emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, (u32) (long) p); |
| 2857 | /* arg2: lea rsi, [rbp - ctx_cookie_off] */ |
| 2858 | if (!is_imm8(-run_ctx_off)) |
| 2859 | EMIT3_off32(0x48, 0x8D, 0xB5, -run_ctx_off); |
| 2860 | else |
| 2861 | EMIT4(0x48, 0x8D, 0x75, -run_ctx_off); |
| 2862 | |
| 2863 | if (emit_rsb_call(&prog, bpf_trampoline_enter(p), image + (prog - (u8 *)rw_image))) |
| 2864 | return -EINVAL; |
| 2865 | /* remember prog start time returned by __bpf_prog_enter */ |
| 2866 | emit_mov_reg(&prog, true, BPF_REG_6, BPF_REG_0); |
| 2867 | |
| 2868 | /* if (__bpf_prog_enter*(prog) == 0) |
| 2869 | * goto skip_exec_of_prog; |
| 2870 | */ |
| 2871 | EMIT3(0x48, 0x85, 0xC0); /* test rax,rax */ |
| 2872 | /* emit 2 nops that will be replaced with JE insn */ |
| 2873 | jmp_insn = prog; |
| 2874 | emit_nops(&prog, 2); |
| 2875 | |
| 2876 | /* arg1: lea rdi, [rbp - stack_size] */ |
| 2877 | if (!is_imm8(-stack_size)) |
| 2878 | EMIT3_off32(0x48, 0x8D, 0xBD, -stack_size); |
| 2879 | else |
| 2880 | EMIT4(0x48, 0x8D, 0x7D, -stack_size); |
| 2881 | /* arg2: progs[i]->insnsi for interpreter */ |
| 2882 | if (!p->jited) |
| 2883 | emit_mov_imm64(&prog, BPF_REG_2, |
| 2884 | (long) p->insnsi >> 32, |
| 2885 | (u32) (long) p->insnsi); |
| 2886 | /* call JITed bpf program or interpreter */ |
| 2887 | if (emit_rsb_call(&prog, p->bpf_func, image + (prog - (u8 *)rw_image))) |
| 2888 | return -EINVAL; |
| 2889 | |
| 2890 | /* |
| 2891 | * BPF_TRAMP_MODIFY_RETURN trampolines can modify the return |
| 2892 | * of the previous call which is then passed on the stack to |
| 2893 | * the next BPF program. |
| 2894 | * |
| 2895 | * BPF_TRAMP_FENTRY trampoline may need to return the return |
| 2896 | * value of BPF_PROG_TYPE_STRUCT_OPS prog. |
| 2897 | */ |
| 2898 | if (save_ret) |
| 2899 | emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8); |
| 2900 | |
| 2901 | /* replace 2 nops with JE insn, since jmp target is known */ |
| 2902 | jmp_insn[0] = X86_JE; |
| 2903 | jmp_insn[1] = prog - jmp_insn - 2; |
| 2904 | |
| 2905 | /* arg1: mov rdi, progs[i] */ |
| 2906 | emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, (u32) (long) p); |
| 2907 | /* arg2: mov rsi, rbx <- start time in nsec */ |
| 2908 | emit_mov_reg(&prog, true, BPF_REG_2, BPF_REG_6); |
| 2909 | /* arg3: lea rdx, [rbp - run_ctx_off] */ |
| 2910 | if (!is_imm8(-run_ctx_off)) |
| 2911 | EMIT3_off32(0x48, 0x8D, 0x95, -run_ctx_off); |
| 2912 | else |
| 2913 | EMIT4(0x48, 0x8D, 0x55, -run_ctx_off); |
| 2914 | if (emit_rsb_call(&prog, bpf_trampoline_exit(p), image + (prog - (u8 *)rw_image))) |
| 2915 | return -EINVAL; |
| 2916 | |
| 2917 | *pprog = prog; |
| 2918 | return 0; |
| 2919 | } |
| 2920 | |
| 2921 | static void emit_align(u8 **pprog, u32 align) |
| 2922 | { |
| 2923 | u8 *target, *prog = *pprog; |
| 2924 | |
| 2925 | target = PTR_ALIGN(prog, align); |
| 2926 | if (target != prog) |
| 2927 | emit_nops(&prog, target - prog); |
| 2928 | |
| 2929 | *pprog = prog; |
| 2930 | } |
| 2931 | |
| 2932 | static int emit_cond_near_jump(u8 **pprog, void *func, void *ip, u8 jmp_cond) |
| 2933 | { |
| 2934 | u8 *prog = *pprog; |
| 2935 | s64 offset; |
| 2936 | |
| 2937 | offset = func - (ip + 2 + 4); |
| 2938 | if (!is_simm32(offset)) { |
| 2939 | pr_err("Target %p is out of range\n", func); |
| 2940 | return -EINVAL; |
| 2941 | } |
| 2942 | EMIT2_off32(0x0F, jmp_cond + 0x10, offset); |
| 2943 | *pprog = prog; |
| 2944 | return 0; |
| 2945 | } |
| 2946 | |
| 2947 | static int invoke_bpf(const struct btf_func_model *m, u8 **pprog, |
| 2948 | struct bpf_tramp_links *tl, int stack_size, |
| 2949 | int run_ctx_off, bool save_ret, |
| 2950 | void *image, void *rw_image) |
| 2951 | { |
| 2952 | int i; |
| 2953 | u8 *prog = *pprog; |
| 2954 | |
| 2955 | for (i = 0; i < tl->nr_links; i++) { |
| 2956 | if (invoke_bpf_prog(m, &prog, tl->links[i], stack_size, |
| 2957 | run_ctx_off, save_ret, image, rw_image)) |
| 2958 | return -EINVAL; |
| 2959 | } |
| 2960 | *pprog = prog; |
| 2961 | return 0; |
| 2962 | } |
| 2963 | |
| 2964 | static int invoke_bpf_mod_ret(const struct btf_func_model *m, u8 **pprog, |
| 2965 | struct bpf_tramp_links *tl, int stack_size, |
| 2966 | int run_ctx_off, u8 **branches, |
| 2967 | void *image, void *rw_image) |
| 2968 | { |
| 2969 | u8 *prog = *pprog; |
| 2970 | int i; |
| 2971 | |
| 2972 | /* The first fmod_ret program will receive a garbage return value. |
| 2973 | * Set this to 0 to avoid confusing the program. |
| 2974 | */ |
| 2975 | emit_mov_imm32(&prog, false, BPF_REG_0, 0); |
| 2976 | emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8); |
| 2977 | for (i = 0; i < tl->nr_links; i++) { |
| 2978 | if (invoke_bpf_prog(m, &prog, tl->links[i], stack_size, run_ctx_off, true, |
| 2979 | image, rw_image)) |
| 2980 | return -EINVAL; |
| 2981 | |
| 2982 | /* mod_ret prog stored return value into [rbp - 8]. Emit: |
| 2983 | * if (*(u64 *)(rbp - 8) != 0) |
| 2984 | * goto do_fexit; |
| 2985 | */ |
| 2986 | /* cmp QWORD PTR [rbp - 0x8], 0x0 */ |
| 2987 | EMIT4(0x48, 0x83, 0x7d, 0xf8); EMIT1(0x00); |
| 2988 | |
| 2989 | /* Save the location of the branch and Generate 6 nops |
| 2990 | * (4 bytes for an offset and 2 bytes for the jump) These nops |
| 2991 | * are replaced with a conditional jump once do_fexit (i.e. the |
| 2992 | * start of the fexit invocation) is finalized. |
| 2993 | */ |
| 2994 | branches[i] = prog; |
| 2995 | emit_nops(&prog, 4 + 2); |
| 2996 | } |
| 2997 | |
| 2998 | *pprog = prog; |
| 2999 | return 0; |
| 3000 | } |
| 3001 | |
| 3002 | /* mov rax, qword ptr [rbp - rounded_stack_depth - 8] */ |
| 3003 | #define LOAD_TRAMP_TAIL_CALL_CNT_PTR(stack) \ |
| 3004 | __LOAD_TCC_PTR(-round_up(stack, 8) - 8) |
| 3005 | |
| 3006 | /* Example: |
| 3007 | * __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev); |
| 3008 | * its 'struct btf_func_model' will be nr_args=2 |
| 3009 | * The assembly code when eth_type_trans is executing after trampoline: |
| 3010 | * |
| 3011 | * push rbp |
| 3012 | * mov rbp, rsp |
| 3013 | * sub rsp, 16 // space for skb and dev |
| 3014 | * push rbx // temp regs to pass start time |
| 3015 | * mov qword ptr [rbp - 16], rdi // save skb pointer to stack |
| 3016 | * mov qword ptr [rbp - 8], rsi // save dev pointer to stack |
| 3017 | * call __bpf_prog_enter // rcu_read_lock and preempt_disable |
| 3018 | * mov rbx, rax // remember start time in bpf stats are enabled |
| 3019 | * lea rdi, [rbp - 16] // R1==ctx of bpf prog |
| 3020 | * call addr_of_jited_FENTRY_prog |
| 3021 | * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off |
| 3022 | * mov rsi, rbx // prog start time |
| 3023 | * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math |
| 3024 | * mov rdi, qword ptr [rbp - 16] // restore skb pointer from stack |
| 3025 | * mov rsi, qword ptr [rbp - 8] // restore dev pointer from stack |
| 3026 | * pop rbx |
| 3027 | * leave |
| 3028 | * ret |
| 3029 | * |
| 3030 | * eth_type_trans has 5 byte nop at the beginning. These 5 bytes will be |
| 3031 | * replaced with 'call generated_bpf_trampoline'. When it returns |
| 3032 | * eth_type_trans will continue executing with original skb and dev pointers. |
| 3033 | * |
| 3034 | * The assembly code when eth_type_trans is called from trampoline: |
| 3035 | * |
| 3036 | * push rbp |
| 3037 | * mov rbp, rsp |
| 3038 | * sub rsp, 24 // space for skb, dev, return value |
| 3039 | * push rbx // temp regs to pass start time |
| 3040 | * mov qword ptr [rbp - 24], rdi // save skb pointer to stack |
| 3041 | * mov qword ptr [rbp - 16], rsi // save dev pointer to stack |
| 3042 | * call __bpf_prog_enter // rcu_read_lock and preempt_disable |
| 3043 | * mov rbx, rax // remember start time if bpf stats are enabled |
| 3044 | * lea rdi, [rbp - 24] // R1==ctx of bpf prog |
| 3045 | * call addr_of_jited_FENTRY_prog // bpf prog can access skb and dev |
| 3046 | * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off |
| 3047 | * mov rsi, rbx // prog start time |
| 3048 | * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math |
| 3049 | * mov rdi, qword ptr [rbp - 24] // restore skb pointer from stack |
| 3050 | * mov rsi, qword ptr [rbp - 16] // restore dev pointer from stack |
| 3051 | * call eth_type_trans+5 // execute body of eth_type_trans |
| 3052 | * mov qword ptr [rbp - 8], rax // save return value |
| 3053 | * call __bpf_prog_enter // rcu_read_lock and preempt_disable |
| 3054 | * mov rbx, rax // remember start time in bpf stats are enabled |
| 3055 | * lea rdi, [rbp - 24] // R1==ctx of bpf prog |
| 3056 | * call addr_of_jited_FEXIT_prog // bpf prog can access skb, dev, return value |
| 3057 | * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off |
| 3058 | * mov rsi, rbx // prog start time |
| 3059 | * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math |
| 3060 | * mov rax, qword ptr [rbp - 8] // restore eth_type_trans's return value |
| 3061 | * pop rbx |
| 3062 | * leave |
| 3063 | * add rsp, 8 // skip eth_type_trans's frame |
| 3064 | * ret // return to its caller |
| 3065 | */ |
| 3066 | static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *rw_image, |
| 3067 | void *rw_image_end, void *image, |
| 3068 | const struct btf_func_model *m, u32 flags, |
| 3069 | struct bpf_tramp_links *tlinks, |
| 3070 | void *func_addr) |
| 3071 | { |
| 3072 | int i, ret, nr_regs = m->nr_args, stack_size = 0; |
| 3073 | int regs_off, nregs_off, ip_off, run_ctx_off, arg_stack_off, rbx_off; |
| 3074 | struct bpf_tramp_links *fentry = &tlinks[BPF_TRAMP_FENTRY]; |
| 3075 | struct bpf_tramp_links *fexit = &tlinks[BPF_TRAMP_FEXIT]; |
| 3076 | struct bpf_tramp_links *fmod_ret = &tlinks[BPF_TRAMP_MODIFY_RETURN]; |
| 3077 | void *orig_call = func_addr; |
| 3078 | u8 **branches = NULL; |
| 3079 | u8 *prog; |
| 3080 | bool save_ret; |
| 3081 | |
| 3082 | /* |
| 3083 | * F_INDIRECT is only compatible with F_RET_FENTRY_RET, it is |
| 3084 | * explicitly incompatible with F_CALL_ORIG | F_SKIP_FRAME | F_IP_ARG |
| 3085 | * because @func_addr. |
| 3086 | */ |
| 3087 | WARN_ON_ONCE((flags & BPF_TRAMP_F_INDIRECT) && |
| 3088 | (flags & ~(BPF_TRAMP_F_INDIRECT | BPF_TRAMP_F_RET_FENTRY_RET))); |
| 3089 | |
| 3090 | /* extra registers for struct arguments */ |
| 3091 | for (i = 0; i < m->nr_args; i++) { |
| 3092 | if (m->arg_flags[i] & BTF_FMODEL_STRUCT_ARG) |
| 3093 | nr_regs += (m->arg_size[i] + 7) / 8 - 1; |
| 3094 | } |
| 3095 | |
| 3096 | /* x86-64 supports up to MAX_BPF_FUNC_ARGS arguments. 1-6 |
| 3097 | * are passed through regs, the remains are through stack. |
| 3098 | */ |
| 3099 | if (nr_regs > MAX_BPF_FUNC_ARGS) |
| 3100 | return -ENOTSUPP; |
| 3101 | |
| 3102 | /* Generated trampoline stack layout: |
| 3103 | * |
| 3104 | * RBP + 8 [ return address ] |
| 3105 | * RBP + 0 [ RBP ] |
| 3106 | * |
| 3107 | * RBP - 8 [ return value ] BPF_TRAMP_F_CALL_ORIG or |
| 3108 | * BPF_TRAMP_F_RET_FENTRY_RET flags |
| 3109 | * |
| 3110 | * [ reg_argN ] always |
| 3111 | * [ ... ] |
| 3112 | * RBP - regs_off [ reg_arg1 ] program's ctx pointer |
| 3113 | * |
| 3114 | * RBP - nregs_off [ regs count ] always |
| 3115 | * |
| 3116 | * RBP - ip_off [ traced function ] BPF_TRAMP_F_IP_ARG flag |
| 3117 | * |
| 3118 | * RBP - rbx_off [ rbx value ] always |
| 3119 | * |
| 3120 | * RBP - run_ctx_off [ bpf_tramp_run_ctx ] |
| 3121 | * |
| 3122 | * [ stack_argN ] BPF_TRAMP_F_CALL_ORIG |
| 3123 | * [ ... ] |
| 3124 | * [ stack_arg2 ] |
| 3125 | * RBP - arg_stack_off [ stack_arg1 ] |
| 3126 | * RSP [ tail_call_cnt_ptr ] BPF_TRAMP_F_TAIL_CALL_CTX |
| 3127 | */ |
| 3128 | |
| 3129 | /* room for return value of orig_call or fentry prog */ |
| 3130 | save_ret = flags & (BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_RET_FENTRY_RET); |
| 3131 | if (save_ret) |
| 3132 | stack_size += 8; |
| 3133 | |
| 3134 | stack_size += nr_regs * 8; |
| 3135 | regs_off = stack_size; |
| 3136 | |
| 3137 | /* regs count */ |
| 3138 | stack_size += 8; |
| 3139 | nregs_off = stack_size; |
| 3140 | |
| 3141 | if (flags & BPF_TRAMP_F_IP_ARG) |
| 3142 | stack_size += 8; /* room for IP address argument */ |
| 3143 | |
| 3144 | ip_off = stack_size; |
| 3145 | |
| 3146 | stack_size += 8; |
| 3147 | rbx_off = stack_size; |
| 3148 | |
| 3149 | stack_size += (sizeof(struct bpf_tramp_run_ctx) + 7) & ~0x7; |
| 3150 | run_ctx_off = stack_size; |
| 3151 | |
| 3152 | if (nr_regs > 6 && (flags & BPF_TRAMP_F_CALL_ORIG)) { |
| 3153 | /* the space that used to pass arguments on-stack */ |
| 3154 | stack_size += (nr_regs - get_nr_used_regs(m)) * 8; |
| 3155 | /* make sure the stack pointer is 16-byte aligned if we |
| 3156 | * need pass arguments on stack, which means |
| 3157 | * [stack_size + 8(rbp) + 8(rip) + 8(origin rip)] |
| 3158 | * should be 16-byte aligned. Following code depend on |
| 3159 | * that stack_size is already 8-byte aligned. |
| 3160 | */ |
| 3161 | stack_size += (stack_size % 16) ? 0 : 8; |
| 3162 | } |
| 3163 | |
| 3164 | arg_stack_off = stack_size; |
| 3165 | |
| 3166 | if (flags & BPF_TRAMP_F_SKIP_FRAME) { |
| 3167 | /* skip patched call instruction and point orig_call to actual |
| 3168 | * body of the kernel function. |
| 3169 | */ |
| 3170 | if (is_endbr(orig_call)) |
| 3171 | orig_call += ENDBR_INSN_SIZE; |
| 3172 | orig_call += X86_PATCH_SIZE; |
| 3173 | } |
| 3174 | |
| 3175 | prog = rw_image; |
| 3176 | |
| 3177 | if (flags & BPF_TRAMP_F_INDIRECT) { |
| 3178 | /* |
| 3179 | * Indirect call for bpf_struct_ops |
| 3180 | */ |
| 3181 | emit_cfi(&prog, image, |
| 3182 | cfi_get_func_hash(func_addr), |
| 3183 | cfi_get_func_arity(func_addr)); |
| 3184 | } else { |
| 3185 | /* |
| 3186 | * Direct-call fentry stub, as such it needs accounting for the |
| 3187 | * __fentry__ call. |
| 3188 | */ |
| 3189 | x86_call_depth_emit_accounting(&prog, NULL, image); |
| 3190 | } |
| 3191 | EMIT1(0x55); /* push rbp */ |
| 3192 | EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */ |
| 3193 | if (!is_imm8(stack_size)) { |
| 3194 | /* sub rsp, stack_size */ |
| 3195 | EMIT3_off32(0x48, 0x81, 0xEC, stack_size); |
| 3196 | } else { |
| 3197 | /* sub rsp, stack_size */ |
| 3198 | EMIT4(0x48, 0x83, 0xEC, stack_size); |
| 3199 | } |
| 3200 | if (flags & BPF_TRAMP_F_TAIL_CALL_CTX) |
| 3201 | EMIT1(0x50); /* push rax */ |
| 3202 | /* mov QWORD PTR [rbp - rbx_off], rbx */ |
| 3203 | emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_6, -rbx_off); |
| 3204 | |
| 3205 | /* Store number of argument registers of the traced function: |
| 3206 | * mov rax, nr_regs |
| 3207 | * mov QWORD PTR [rbp - nregs_off], rax |
| 3208 | */ |
| 3209 | emit_mov_imm64(&prog, BPF_REG_0, 0, (u32) nr_regs); |
| 3210 | emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -nregs_off); |
| 3211 | |
| 3212 | if (flags & BPF_TRAMP_F_IP_ARG) { |
| 3213 | /* Store IP address of the traced function: |
| 3214 | * movabsq rax, func_addr |
| 3215 | * mov QWORD PTR [rbp - ip_off], rax |
| 3216 | */ |
| 3217 | emit_mov_imm64(&prog, BPF_REG_0, (long) func_addr >> 32, (u32) (long) func_addr); |
| 3218 | emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -ip_off); |
| 3219 | } |
| 3220 | |
| 3221 | save_args(m, &prog, regs_off, false); |
| 3222 | |
| 3223 | if (flags & BPF_TRAMP_F_CALL_ORIG) { |
| 3224 | /* arg1: mov rdi, im */ |
| 3225 | emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im); |
| 3226 | if (emit_rsb_call(&prog, __bpf_tramp_enter, |
| 3227 | image + (prog - (u8 *)rw_image))) { |
| 3228 | ret = -EINVAL; |
| 3229 | goto cleanup; |
| 3230 | } |
| 3231 | } |
| 3232 | |
| 3233 | if (fentry->nr_links) { |
| 3234 | if (invoke_bpf(m, &prog, fentry, regs_off, run_ctx_off, |
| 3235 | flags & BPF_TRAMP_F_RET_FENTRY_RET, image, rw_image)) |
| 3236 | return -EINVAL; |
| 3237 | } |
| 3238 | |
| 3239 | if (fmod_ret->nr_links) { |
| 3240 | branches = kcalloc(fmod_ret->nr_links, sizeof(u8 *), |
| 3241 | GFP_KERNEL); |
| 3242 | if (!branches) |
| 3243 | return -ENOMEM; |
| 3244 | |
| 3245 | if (invoke_bpf_mod_ret(m, &prog, fmod_ret, regs_off, |
| 3246 | run_ctx_off, branches, image, rw_image)) { |
| 3247 | ret = -EINVAL; |
| 3248 | goto cleanup; |
| 3249 | } |
| 3250 | } |
| 3251 | |
| 3252 | if (flags & BPF_TRAMP_F_CALL_ORIG) { |
| 3253 | restore_regs(m, &prog, regs_off); |
| 3254 | save_args(m, &prog, arg_stack_off, true); |
| 3255 | |
| 3256 | if (flags & BPF_TRAMP_F_TAIL_CALL_CTX) { |
| 3257 | /* Before calling the original function, load the |
| 3258 | * tail_call_cnt_ptr from stack to rax. |
| 3259 | */ |
| 3260 | LOAD_TRAMP_TAIL_CALL_CNT_PTR(stack_size); |
| 3261 | } |
| 3262 | |
| 3263 | if (flags & BPF_TRAMP_F_ORIG_STACK) { |
| 3264 | emit_ldx(&prog, BPF_DW, BPF_REG_6, BPF_REG_FP, 8); |
| 3265 | EMIT2(0xff, 0xd3); /* call *rbx */ |
| 3266 | } else { |
| 3267 | /* call original function */ |
| 3268 | if (emit_rsb_call(&prog, orig_call, image + (prog - (u8 *)rw_image))) { |
| 3269 | ret = -EINVAL; |
| 3270 | goto cleanup; |
| 3271 | } |
| 3272 | } |
| 3273 | /* remember return value in a stack for bpf prog to access */ |
| 3274 | emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8); |
| 3275 | im->ip_after_call = image + (prog - (u8 *)rw_image); |
| 3276 | emit_nops(&prog, X86_PATCH_SIZE); |
| 3277 | } |
| 3278 | |
| 3279 | if (fmod_ret->nr_links) { |
| 3280 | /* From Intel 64 and IA-32 Architectures Optimization |
| 3281 | * Reference Manual, 3.4.1.4 Code Alignment, Assembly/Compiler |
| 3282 | * Coding Rule 11: All branch targets should be 16-byte |
| 3283 | * aligned. |
| 3284 | */ |
| 3285 | emit_align(&prog, 16); |
| 3286 | /* Update the branches saved in invoke_bpf_mod_ret with the |
| 3287 | * aligned address of do_fexit. |
| 3288 | */ |
| 3289 | for (i = 0; i < fmod_ret->nr_links; i++) { |
| 3290 | emit_cond_near_jump(&branches[i], image + (prog - (u8 *)rw_image), |
| 3291 | image + (branches[i] - (u8 *)rw_image), X86_JNE); |
| 3292 | } |
| 3293 | } |
| 3294 | |
| 3295 | if (fexit->nr_links) { |
| 3296 | if (invoke_bpf(m, &prog, fexit, regs_off, run_ctx_off, |
| 3297 | false, image, rw_image)) { |
| 3298 | ret = -EINVAL; |
| 3299 | goto cleanup; |
| 3300 | } |
| 3301 | } |
| 3302 | |
| 3303 | if (flags & BPF_TRAMP_F_RESTORE_REGS) |
| 3304 | restore_regs(m, &prog, regs_off); |
| 3305 | |
| 3306 | /* This needs to be done regardless. If there were fmod_ret programs, |
| 3307 | * the return value is only updated on the stack and still needs to be |
| 3308 | * restored to R0. |
| 3309 | */ |
| 3310 | if (flags & BPF_TRAMP_F_CALL_ORIG) { |
| 3311 | im->ip_epilogue = image + (prog - (u8 *)rw_image); |
| 3312 | /* arg1: mov rdi, im */ |
| 3313 | emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im); |
| 3314 | if (emit_rsb_call(&prog, __bpf_tramp_exit, image + (prog - (u8 *)rw_image))) { |
| 3315 | ret = -EINVAL; |
| 3316 | goto cleanup; |
| 3317 | } |
| 3318 | } else if (flags & BPF_TRAMP_F_TAIL_CALL_CTX) { |
| 3319 | /* Before running the original function, load the |
| 3320 | * tail_call_cnt_ptr from stack to rax. |
| 3321 | */ |
| 3322 | LOAD_TRAMP_TAIL_CALL_CNT_PTR(stack_size); |
| 3323 | } |
| 3324 | |
| 3325 | /* restore return value of orig_call or fentry prog back into RAX */ |
| 3326 | if (save_ret) |
| 3327 | emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8); |
| 3328 | |
| 3329 | emit_ldx(&prog, BPF_DW, BPF_REG_6, BPF_REG_FP, -rbx_off); |
| 3330 | EMIT1(0xC9); /* leave */ |
| 3331 | if (flags & BPF_TRAMP_F_SKIP_FRAME) { |
| 3332 | /* skip our return address and return to parent */ |
| 3333 | EMIT4(0x48, 0x83, 0xC4, 8); /* add rsp, 8 */ |
| 3334 | } |
| 3335 | emit_return(&prog, image + (prog - (u8 *)rw_image)); |
| 3336 | /* Make sure the trampoline generation logic doesn't overflow */ |
| 3337 | if (WARN_ON_ONCE(prog > (u8 *)rw_image_end - BPF_INSN_SAFETY)) { |
| 3338 | ret = -EFAULT; |
| 3339 | goto cleanup; |
| 3340 | } |
| 3341 | ret = prog - (u8 *)rw_image + BPF_INSN_SAFETY; |
| 3342 | |
| 3343 | cleanup: |
| 3344 | kfree(branches); |
| 3345 | return ret; |
| 3346 | } |
| 3347 | |
| 3348 | void *arch_alloc_bpf_trampoline(unsigned int size) |
| 3349 | { |
| 3350 | return bpf_prog_pack_alloc(size, jit_fill_hole); |
| 3351 | } |
| 3352 | |
| 3353 | void arch_free_bpf_trampoline(void *image, unsigned int size) |
| 3354 | { |
| 3355 | bpf_prog_pack_free(image, size); |
| 3356 | } |
| 3357 | |
| 3358 | int arch_protect_bpf_trampoline(void *image, unsigned int size) |
| 3359 | { |
| 3360 | return 0; |
| 3361 | } |
| 3362 | |
| 3363 | int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *image_end, |
| 3364 | const struct btf_func_model *m, u32 flags, |
| 3365 | struct bpf_tramp_links *tlinks, |
| 3366 | void *func_addr) |
| 3367 | { |
| 3368 | void *rw_image, *tmp; |
| 3369 | int ret; |
| 3370 | u32 size = image_end - image; |
| 3371 | |
| 3372 | /* rw_image doesn't need to be in module memory range, so we can |
| 3373 | * use kvmalloc. |
| 3374 | */ |
| 3375 | rw_image = kvmalloc(size, GFP_KERNEL); |
| 3376 | if (!rw_image) |
| 3377 | return -ENOMEM; |
| 3378 | |
| 3379 | ret = __arch_prepare_bpf_trampoline(im, rw_image, rw_image + size, image, m, |
| 3380 | flags, tlinks, func_addr); |
| 3381 | if (ret < 0) |
| 3382 | goto out; |
| 3383 | |
| 3384 | tmp = bpf_arch_text_copy(image, rw_image, size); |
| 3385 | if (IS_ERR(tmp)) |
| 3386 | ret = PTR_ERR(tmp); |
| 3387 | out: |
| 3388 | kvfree(rw_image); |
| 3389 | return ret; |
| 3390 | } |
| 3391 | |
| 3392 | int arch_bpf_trampoline_size(const struct btf_func_model *m, u32 flags, |
| 3393 | struct bpf_tramp_links *tlinks, void *func_addr) |
| 3394 | { |
| 3395 | struct bpf_tramp_image im; |
| 3396 | void *image; |
| 3397 | int ret; |
| 3398 | |
| 3399 | /* Allocate a temporary buffer for __arch_prepare_bpf_trampoline(). |
| 3400 | * This will NOT cause fragmentation in direct map, as we do not |
| 3401 | * call set_memory_*() on this buffer. |
| 3402 | * |
| 3403 | * We cannot use kvmalloc here, because we need image to be in |
| 3404 | * module memory range. |
| 3405 | */ |
| 3406 | image = bpf_jit_alloc_exec(PAGE_SIZE); |
| 3407 | if (!image) |
| 3408 | return -ENOMEM; |
| 3409 | |
| 3410 | ret = __arch_prepare_bpf_trampoline(&im, image, image + PAGE_SIZE, image, |
| 3411 | m, flags, tlinks, func_addr); |
| 3412 | bpf_jit_free_exec(image); |
| 3413 | return ret; |
| 3414 | } |
| 3415 | |
| 3416 | static int emit_bpf_dispatcher(u8 **pprog, int a, int b, s64 *progs, u8 *image, u8 *buf) |
| 3417 | { |
| 3418 | u8 *jg_reloc, *prog = *pprog; |
| 3419 | int pivot, err, jg_bytes = 1; |
| 3420 | s64 jg_offset; |
| 3421 | |
| 3422 | if (a == b) { |
| 3423 | /* Leaf node of recursion, i.e. not a range of indices |
| 3424 | * anymore. |
| 3425 | */ |
| 3426 | EMIT1(add_1mod(0x48, BPF_REG_3)); /* cmp rdx,func */ |
| 3427 | if (!is_simm32(progs[a])) |
| 3428 | return -1; |
| 3429 | EMIT2_off32(0x81, add_1reg(0xF8, BPF_REG_3), |
| 3430 | progs[a]); |
| 3431 | err = emit_cond_near_jump(&prog, /* je func */ |
| 3432 | (void *)progs[a], image + (prog - buf), |
| 3433 | X86_JE); |
| 3434 | if (err) |
| 3435 | return err; |
| 3436 | |
| 3437 | emit_indirect_jump(&prog, 2 /* rdx */, image + (prog - buf)); |
| 3438 | |
| 3439 | *pprog = prog; |
| 3440 | return 0; |
| 3441 | } |
| 3442 | |
| 3443 | /* Not a leaf node, so we pivot, and recursively descend into |
| 3444 | * the lower and upper ranges. |
| 3445 | */ |
| 3446 | pivot = (b - a) / 2; |
| 3447 | EMIT1(add_1mod(0x48, BPF_REG_3)); /* cmp rdx,func */ |
| 3448 | if (!is_simm32(progs[a + pivot])) |
| 3449 | return -1; |
| 3450 | EMIT2_off32(0x81, add_1reg(0xF8, BPF_REG_3), progs[a + pivot]); |
| 3451 | |
| 3452 | if (pivot > 2) { /* jg upper_part */ |
| 3453 | /* Require near jump. */ |
| 3454 | jg_bytes = 4; |
| 3455 | EMIT2_off32(0x0F, X86_JG + 0x10, 0); |
| 3456 | } else { |
| 3457 | EMIT2(X86_JG, 0); |
| 3458 | } |
| 3459 | jg_reloc = prog; |
| 3460 | |
| 3461 | err = emit_bpf_dispatcher(&prog, a, a + pivot, /* emit lower_part */ |
| 3462 | progs, image, buf); |
| 3463 | if (err) |
| 3464 | return err; |
| 3465 | |
| 3466 | /* From Intel 64 and IA-32 Architectures Optimization |
| 3467 | * Reference Manual, 3.4.1.4 Code Alignment, Assembly/Compiler |
| 3468 | * Coding Rule 11: All branch targets should be 16-byte |
| 3469 | * aligned. |
| 3470 | */ |
| 3471 | emit_align(&prog, 16); |
| 3472 | jg_offset = prog - jg_reloc; |
| 3473 | emit_code(jg_reloc - jg_bytes, jg_offset, jg_bytes); |
| 3474 | |
| 3475 | err = emit_bpf_dispatcher(&prog, a + pivot + 1, /* emit upper_part */ |
| 3476 | b, progs, image, buf); |
| 3477 | if (err) |
| 3478 | return err; |
| 3479 | |
| 3480 | *pprog = prog; |
| 3481 | return 0; |
| 3482 | } |
| 3483 | |
| 3484 | static int cmp_ips(const void *a, const void *b) |
| 3485 | { |
| 3486 | const s64 *ipa = a; |
| 3487 | const s64 *ipb = b; |
| 3488 | |
| 3489 | if (*ipa > *ipb) |
| 3490 | return 1; |
| 3491 | if (*ipa < *ipb) |
| 3492 | return -1; |
| 3493 | return 0; |
| 3494 | } |
| 3495 | |
| 3496 | int arch_prepare_bpf_dispatcher(void *image, void *buf, s64 *funcs, int num_funcs) |
| 3497 | { |
| 3498 | u8 *prog = buf; |
| 3499 | |
| 3500 | sort(funcs, num_funcs, sizeof(funcs[0]), cmp_ips, NULL); |
| 3501 | return emit_bpf_dispatcher(&prog, 0, num_funcs - 1, funcs, image, buf); |
| 3502 | } |
| 3503 | |
| 3504 | static const char *bpf_get_prog_name(struct bpf_prog *prog) |
| 3505 | { |
| 3506 | if (prog->aux->ksym.prog) |
| 3507 | return prog->aux->ksym.name; |
| 3508 | return prog->aux->name; |
| 3509 | } |
| 3510 | |
| 3511 | static void priv_stack_init_guard(void __percpu *priv_stack_ptr, int alloc_size) |
| 3512 | { |
| 3513 | int cpu, underflow_idx = (alloc_size - PRIV_STACK_GUARD_SZ) >> 3; |
| 3514 | u64 *stack_ptr; |
| 3515 | |
| 3516 | for_each_possible_cpu(cpu) { |
| 3517 | stack_ptr = per_cpu_ptr(priv_stack_ptr, cpu); |
| 3518 | stack_ptr[0] = PRIV_STACK_GUARD_VAL; |
| 3519 | stack_ptr[underflow_idx] = PRIV_STACK_GUARD_VAL; |
| 3520 | } |
| 3521 | } |
| 3522 | |
| 3523 | static void priv_stack_check_guard(void __percpu *priv_stack_ptr, int alloc_size, |
| 3524 | struct bpf_prog *prog) |
| 3525 | { |
| 3526 | int cpu, underflow_idx = (alloc_size - PRIV_STACK_GUARD_SZ) >> 3; |
| 3527 | u64 *stack_ptr; |
| 3528 | |
| 3529 | for_each_possible_cpu(cpu) { |
| 3530 | stack_ptr = per_cpu_ptr(priv_stack_ptr, cpu); |
| 3531 | if (stack_ptr[0] != PRIV_STACK_GUARD_VAL || |
| 3532 | stack_ptr[underflow_idx] != PRIV_STACK_GUARD_VAL) { |
| 3533 | pr_err("BPF private stack overflow/underflow detected for prog %sx\n", |
| 3534 | bpf_get_prog_name(prog)); |
| 3535 | break; |
| 3536 | } |
| 3537 | } |
| 3538 | } |
| 3539 | |
| 3540 | struct x64_jit_data { |
| 3541 | struct bpf_binary_header *rw_header; |
| 3542 | struct bpf_binary_header *header; |
| 3543 | int *addrs; |
| 3544 | u8 *image; |
| 3545 | int proglen; |
| 3546 | struct jit_context ctx; |
| 3547 | }; |
| 3548 | |
| 3549 | #define MAX_PASSES 20 |
| 3550 | #define PADDING_PASSES (MAX_PASSES - 5) |
| 3551 | |
| 3552 | struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) |
| 3553 | { |
| 3554 | struct bpf_binary_header *rw_header = NULL; |
| 3555 | struct bpf_binary_header *header = NULL; |
| 3556 | struct bpf_prog *tmp, *orig_prog = prog; |
| 3557 | void __percpu *priv_stack_ptr = NULL; |
| 3558 | struct x64_jit_data *jit_data; |
| 3559 | int priv_stack_alloc_sz; |
| 3560 | int proglen, oldproglen = 0; |
| 3561 | struct jit_context ctx = {}; |
| 3562 | bool tmp_blinded = false; |
| 3563 | bool extra_pass = false; |
| 3564 | bool padding = false; |
| 3565 | u8 *rw_image = NULL; |
| 3566 | u8 *image = NULL; |
| 3567 | int *addrs; |
| 3568 | int pass; |
| 3569 | int i; |
| 3570 | |
| 3571 | if (!prog->jit_requested) |
| 3572 | return orig_prog; |
| 3573 | |
| 3574 | tmp = bpf_jit_blind_constants(prog); |
| 3575 | /* |
| 3576 | * If blinding was requested and we failed during blinding, |
| 3577 | * we must fall back to the interpreter. |
| 3578 | */ |
| 3579 | if (IS_ERR(tmp)) |
| 3580 | return orig_prog; |
| 3581 | if (tmp != prog) { |
| 3582 | tmp_blinded = true; |
| 3583 | prog = tmp; |
| 3584 | } |
| 3585 | |
| 3586 | jit_data = prog->aux->jit_data; |
| 3587 | if (!jit_data) { |
| 3588 | jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL); |
| 3589 | if (!jit_data) { |
| 3590 | prog = orig_prog; |
| 3591 | goto out; |
| 3592 | } |
| 3593 | prog->aux->jit_data = jit_data; |
| 3594 | } |
| 3595 | priv_stack_ptr = prog->aux->priv_stack_ptr; |
| 3596 | if (!priv_stack_ptr && prog->aux->jits_use_priv_stack) { |
| 3597 | /* Allocate actual private stack size with verifier-calculated |
| 3598 | * stack size plus two memory guards to protect overflow and |
| 3599 | * underflow. |
| 3600 | */ |
| 3601 | priv_stack_alloc_sz = round_up(prog->aux->stack_depth, 8) + |
| 3602 | 2 * PRIV_STACK_GUARD_SZ; |
| 3603 | priv_stack_ptr = __alloc_percpu_gfp(priv_stack_alloc_sz, 8, GFP_KERNEL); |
| 3604 | if (!priv_stack_ptr) { |
| 3605 | prog = orig_prog; |
| 3606 | goto out_priv_stack; |
| 3607 | } |
| 3608 | |
| 3609 | priv_stack_init_guard(priv_stack_ptr, priv_stack_alloc_sz); |
| 3610 | prog->aux->priv_stack_ptr = priv_stack_ptr; |
| 3611 | } |
| 3612 | addrs = jit_data->addrs; |
| 3613 | if (addrs) { |
| 3614 | ctx = jit_data->ctx; |
| 3615 | oldproglen = jit_data->proglen; |
| 3616 | image = jit_data->image; |
| 3617 | header = jit_data->header; |
| 3618 | rw_header = jit_data->rw_header; |
| 3619 | rw_image = (void *)rw_header + ((void *)image - (void *)header); |
| 3620 | extra_pass = true; |
| 3621 | padding = true; |
| 3622 | goto skip_init_addrs; |
| 3623 | } |
| 3624 | addrs = kvmalloc_array(prog->len + 1, sizeof(*addrs), GFP_KERNEL); |
| 3625 | if (!addrs) { |
| 3626 | prog = orig_prog; |
| 3627 | goto out_addrs; |
| 3628 | } |
| 3629 | |
| 3630 | /* |
| 3631 | * Before first pass, make a rough estimation of addrs[] |
| 3632 | * each BPF instruction is translated to less than 64 bytes |
| 3633 | */ |
| 3634 | for (proglen = 0, i = 0; i <= prog->len; i++) { |
| 3635 | proglen += 64; |
| 3636 | addrs[i] = proglen; |
| 3637 | } |
| 3638 | ctx.cleanup_addr = proglen; |
| 3639 | skip_init_addrs: |
| 3640 | |
| 3641 | /* |
| 3642 | * JITed image shrinks with every pass and the loop iterates |
| 3643 | * until the image stops shrinking. Very large BPF programs |
| 3644 | * may converge on the last pass. In such case do one more |
| 3645 | * pass to emit the final image. |
| 3646 | */ |
| 3647 | for (pass = 0; pass < MAX_PASSES || image; pass++) { |
| 3648 | if (!padding && pass >= PADDING_PASSES) |
| 3649 | padding = true; |
| 3650 | proglen = do_jit(prog, addrs, image, rw_image, oldproglen, &ctx, padding); |
| 3651 | if (proglen <= 0) { |
| 3652 | out_image: |
| 3653 | image = NULL; |
| 3654 | if (header) { |
| 3655 | bpf_arch_text_copy(&header->size, &rw_header->size, |
| 3656 | sizeof(rw_header->size)); |
| 3657 | bpf_jit_binary_pack_free(header, rw_header); |
| 3658 | } |
| 3659 | /* Fall back to interpreter mode */ |
| 3660 | prog = orig_prog; |
| 3661 | if (extra_pass) { |
| 3662 | prog->bpf_func = NULL; |
| 3663 | prog->jited = 0; |
| 3664 | prog->jited_len = 0; |
| 3665 | } |
| 3666 | goto out_addrs; |
| 3667 | } |
| 3668 | if (image) { |
| 3669 | if (proglen != oldproglen) { |
| 3670 | pr_err("bpf_jit: proglen=%d != oldproglen=%d\n", |
| 3671 | proglen, oldproglen); |
| 3672 | goto out_image; |
| 3673 | } |
| 3674 | break; |
| 3675 | } |
| 3676 | if (proglen == oldproglen) { |
| 3677 | /* |
| 3678 | * The number of entries in extable is the number of BPF_LDX |
| 3679 | * insns that access kernel memory via "pointer to BTF type". |
| 3680 | * The verifier changed their opcode from LDX|MEM|size |
| 3681 | * to LDX|PROBE_MEM|size to make JITing easier. |
| 3682 | */ |
| 3683 | u32 align = __alignof__(struct exception_table_entry); |
| 3684 | u32 extable_size = prog->aux->num_exentries * |
| 3685 | sizeof(struct exception_table_entry); |
| 3686 | |
| 3687 | /* allocate module memory for x86 insns and extable */ |
| 3688 | header = bpf_jit_binary_pack_alloc(roundup(proglen, align) + extable_size, |
| 3689 | &image, align, &rw_header, &rw_image, |
| 3690 | jit_fill_hole); |
| 3691 | if (!header) { |
| 3692 | prog = orig_prog; |
| 3693 | goto out_addrs; |
| 3694 | } |
| 3695 | prog->aux->extable = (void *) image + roundup(proglen, align); |
| 3696 | } |
| 3697 | oldproglen = proglen; |
| 3698 | cond_resched(); |
| 3699 | } |
| 3700 | |
| 3701 | if (bpf_jit_enable > 1) |
| 3702 | bpf_jit_dump(prog->len, proglen, pass + 1, rw_image); |
| 3703 | |
| 3704 | if (image) { |
| 3705 | if (!prog->is_func || extra_pass) { |
| 3706 | /* |
| 3707 | * bpf_jit_binary_pack_finalize fails in two scenarios: |
| 3708 | * 1) header is not pointing to proper module memory; |
| 3709 | * 2) the arch doesn't support bpf_arch_text_copy(). |
| 3710 | * |
| 3711 | * Both cases are serious bugs and justify WARN_ON. |
| 3712 | */ |
| 3713 | if (WARN_ON(bpf_jit_binary_pack_finalize(header, rw_header))) { |
| 3714 | /* header has been freed */ |
| 3715 | header = NULL; |
| 3716 | goto out_image; |
| 3717 | } |
| 3718 | |
| 3719 | bpf_tail_call_direct_fixup(prog); |
| 3720 | } else { |
| 3721 | jit_data->addrs = addrs; |
| 3722 | jit_data->ctx = ctx; |
| 3723 | jit_data->proglen = proglen; |
| 3724 | jit_data->image = image; |
| 3725 | jit_data->header = header; |
| 3726 | jit_data->rw_header = rw_header; |
| 3727 | } |
| 3728 | /* |
| 3729 | * ctx.prog_offset is used when CFI preambles put code *before* |
| 3730 | * the function. See emit_cfi(). For FineIBT specifically this code |
| 3731 | * can also be executed and bpf_prog_kallsyms_add() will |
| 3732 | * generate an additional symbol to cover this, hence also |
| 3733 | * decrement proglen. |
| 3734 | */ |
| 3735 | prog->bpf_func = (void *)image + cfi_get_offset(); |
| 3736 | prog->jited = 1; |
| 3737 | prog->jited_len = proglen - cfi_get_offset(); |
| 3738 | } else { |
| 3739 | prog = orig_prog; |
| 3740 | } |
| 3741 | |
| 3742 | if (!image || !prog->is_func || extra_pass) { |
| 3743 | if (image) |
| 3744 | bpf_prog_fill_jited_linfo(prog, addrs + 1); |
| 3745 | out_addrs: |
| 3746 | kvfree(addrs); |
| 3747 | if (!image && priv_stack_ptr) { |
| 3748 | free_percpu(priv_stack_ptr); |
| 3749 | prog->aux->priv_stack_ptr = NULL; |
| 3750 | } |
| 3751 | out_priv_stack: |
| 3752 | kfree(jit_data); |
| 3753 | prog->aux->jit_data = NULL; |
| 3754 | } |
| 3755 | out: |
| 3756 | if (tmp_blinded) |
| 3757 | bpf_jit_prog_release_other(prog, prog == orig_prog ? |
| 3758 | tmp : orig_prog); |
| 3759 | return prog; |
| 3760 | } |
| 3761 | |
| 3762 | bool bpf_jit_supports_kfunc_call(void) |
| 3763 | { |
| 3764 | return true; |
| 3765 | } |
| 3766 | |
| 3767 | void *bpf_arch_text_copy(void *dst, void *src, size_t len) |
| 3768 | { |
| 3769 | if (text_poke_copy(dst, src, len) == NULL) |
| 3770 | return ERR_PTR(-EINVAL); |
| 3771 | return dst; |
| 3772 | } |
| 3773 | |
| 3774 | /* Indicate the JIT backend supports mixing bpf2bpf and tailcalls. */ |
| 3775 | bool bpf_jit_supports_subprog_tailcalls(void) |
| 3776 | { |
| 3777 | return true; |
| 3778 | } |
| 3779 | |
| 3780 | bool bpf_jit_supports_percpu_insn(void) |
| 3781 | { |
| 3782 | return true; |
| 3783 | } |
| 3784 | |
| 3785 | void bpf_jit_free(struct bpf_prog *prog) |
| 3786 | { |
| 3787 | if (prog->jited) { |
| 3788 | struct x64_jit_data *jit_data = prog->aux->jit_data; |
| 3789 | struct bpf_binary_header *hdr; |
| 3790 | void __percpu *priv_stack_ptr; |
| 3791 | int priv_stack_alloc_sz; |
| 3792 | |
| 3793 | /* |
| 3794 | * If we fail the final pass of JIT (from jit_subprogs), |
| 3795 | * the program may not be finalized yet. Call finalize here |
| 3796 | * before freeing it. |
| 3797 | */ |
| 3798 | if (jit_data) { |
| 3799 | bpf_jit_binary_pack_finalize(jit_data->header, |
| 3800 | jit_data->rw_header); |
| 3801 | kvfree(jit_data->addrs); |
| 3802 | kfree(jit_data); |
| 3803 | } |
| 3804 | prog->bpf_func = (void *)prog->bpf_func - cfi_get_offset(); |
| 3805 | hdr = bpf_jit_binary_pack_hdr(prog); |
| 3806 | bpf_jit_binary_pack_free(hdr, NULL); |
| 3807 | priv_stack_ptr = prog->aux->priv_stack_ptr; |
| 3808 | if (priv_stack_ptr) { |
| 3809 | priv_stack_alloc_sz = round_up(prog->aux->stack_depth, 8) + |
| 3810 | 2 * PRIV_STACK_GUARD_SZ; |
| 3811 | priv_stack_check_guard(priv_stack_ptr, priv_stack_alloc_sz, prog); |
| 3812 | free_percpu(prog->aux->priv_stack_ptr); |
| 3813 | } |
| 3814 | WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(prog)); |
| 3815 | } |
| 3816 | |
| 3817 | bpf_prog_unlock_free(prog); |
| 3818 | } |
| 3819 | |
| 3820 | bool bpf_jit_supports_exceptions(void) |
| 3821 | { |
| 3822 | /* We unwind through both kernel frames (starting from within bpf_throw |
| 3823 | * call) and BPF frames. Therefore we require ORC unwinder to be enabled |
| 3824 | * to walk kernel frames and reach BPF frames in the stack trace. |
| 3825 | */ |
| 3826 | return IS_ENABLED(CONFIG_UNWINDER_ORC); |
| 3827 | } |
| 3828 | |
| 3829 | bool bpf_jit_supports_private_stack(void) |
| 3830 | { |
| 3831 | return true; |
| 3832 | } |
| 3833 | |
| 3834 | void arch_bpf_stack_walk(bool (*consume_fn)(void *cookie, u64 ip, u64 sp, u64 bp), void *cookie) |
| 3835 | { |
| 3836 | #if defined(CONFIG_UNWINDER_ORC) |
| 3837 | struct unwind_state state; |
| 3838 | unsigned long addr; |
| 3839 | |
| 3840 | for (unwind_start(&state, current, NULL, NULL); !unwind_done(&state); |
| 3841 | unwind_next_frame(&state)) { |
| 3842 | addr = unwind_get_return_address(&state); |
| 3843 | if (!addr || !consume_fn(cookie, (u64)addr, (u64)state.sp, (u64)state.bp)) |
| 3844 | break; |
| 3845 | } |
| 3846 | return; |
| 3847 | #endif |
| 3848 | WARN(1, "verification of programs using bpf_throw should have failed\n"); |
| 3849 | } |
| 3850 | |
| 3851 | void bpf_arch_poke_desc_update(struct bpf_jit_poke_descriptor *poke, |
| 3852 | struct bpf_prog *new, struct bpf_prog *old) |
| 3853 | { |
| 3854 | u8 *old_addr, *new_addr, *old_bypass_addr; |
| 3855 | int ret; |
| 3856 | |
| 3857 | old_bypass_addr = old ? NULL : poke->bypass_addr; |
| 3858 | old_addr = old ? (u8 *)old->bpf_func + poke->adj_off : NULL; |
| 3859 | new_addr = new ? (u8 *)new->bpf_func + poke->adj_off : NULL; |
| 3860 | |
| 3861 | /* |
| 3862 | * On program loading or teardown, the program's kallsym entry |
| 3863 | * might not be in place, so we use __bpf_arch_text_poke to skip |
| 3864 | * the kallsyms check. |
| 3865 | */ |
| 3866 | if (new) { |
| 3867 | ret = __bpf_arch_text_poke(poke->tailcall_target, |
| 3868 | BPF_MOD_JUMP, |
| 3869 | old_addr, new_addr); |
| 3870 | BUG_ON(ret < 0); |
| 3871 | if (!old) { |
| 3872 | ret = __bpf_arch_text_poke(poke->tailcall_bypass, |
| 3873 | BPF_MOD_JUMP, |
| 3874 | poke->bypass_addr, |
| 3875 | NULL); |
| 3876 | BUG_ON(ret < 0); |
| 3877 | } |
| 3878 | } else { |
| 3879 | ret = __bpf_arch_text_poke(poke->tailcall_bypass, |
| 3880 | BPF_MOD_JUMP, |
| 3881 | old_bypass_addr, |
| 3882 | poke->bypass_addr); |
| 3883 | BUG_ON(ret < 0); |
| 3884 | /* let other CPUs finish the execution of program |
| 3885 | * so that it will not possible to expose them |
| 3886 | * to invalid nop, stack unwind, nop state |
| 3887 | */ |
| 3888 | if (!ret) |
| 3889 | synchronize_rcu(); |
| 3890 | ret = __bpf_arch_text_poke(poke->tailcall_target, |
| 3891 | BPF_MOD_JUMP, |
| 3892 | old_addr, NULL); |
| 3893 | BUG_ON(ret < 0); |
| 3894 | } |
| 3895 | } |
| 3896 | |
| 3897 | bool bpf_jit_supports_arena(void) |
| 3898 | { |
| 3899 | return true; |
| 3900 | } |
| 3901 | |
| 3902 | bool bpf_jit_supports_insn(struct bpf_insn *insn, bool in_arena) |
| 3903 | { |
| 3904 | if (!in_arena) |
| 3905 | return true; |
| 3906 | switch (insn->code) { |
| 3907 | case BPF_STX | BPF_ATOMIC | BPF_W: |
| 3908 | case BPF_STX | BPF_ATOMIC | BPF_DW: |
| 3909 | if (insn->imm == (BPF_AND | BPF_FETCH) || |
| 3910 | insn->imm == (BPF_OR | BPF_FETCH) || |
| 3911 | insn->imm == (BPF_XOR | BPF_FETCH)) |
| 3912 | return false; |
| 3913 | } |
| 3914 | return true; |
| 3915 | } |
| 3916 | |
| 3917 | bool bpf_jit_supports_ptr_xchg(void) |
| 3918 | { |
| 3919 | return true; |
| 3920 | } |
| 3921 | |
| 3922 | /* x86-64 JIT emits its own code to filter user addresses so return 0 here */ |
| 3923 | u64 bpf_arch_uaddress_limit(void) |
| 3924 | { |
| 3925 | return 0; |
| 3926 | } |
| 3927 | |
| 3928 | bool bpf_jit_supports_timed_may_goto(void) |
| 3929 | { |
| 3930 | return true; |
| 3931 | } |