From: Linus Torvalds Date: Wed, 14 Dec 2022 23:03:00 +0000 (-0800) Subject: Merge tag 'x86_core_for_v6.2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip X-Git-Tag: v6.2-rc1~85 X-Git-Url: https://git.kernel.dk/?a=commitdiff_plain;h=94a855111ed9106971ca2617c5d075269e6aefde;p=linux-2.6-block.git Merge tag 'x86_core_for_v6.2' of git://git./linux/kernel/git/tip/tip Pull x86 core updates from Borislav Petkov: - Add the call depth tracking mitigation for Retbleed which has been long in the making. It is a lighterweight software-only fix for Skylake-based cores where enabling IBRS is a big hammer and causes a significant performance impact. What it basically does is, it aligns all kernel functions to 16 bytes boundary and adds a 16-byte padding before the function, objtool collects all functions' locations and when the mitigation gets applied, it patches a call accounting thunk which is used to track the call depth of the stack at any time. When that call depth reaches a magical, microarchitecture-specific value for the Return Stack Buffer, the code stuffs that RSB and avoids its underflow which could otherwise lead to the Intel variant of Retbleed. This software-only solution brings a lot of the lost performance back, as benchmarks suggest: https://lore.kernel.org/all/20220915111039.092790446@infradead.org/ That page above also contains a lot more detailed explanation of the whole mechanism - Implement a new control flow integrity scheme called FineIBT which is based on the software kCFI implementation and uses hardware IBT support where present to annotate and track indirect branches using a hash to validate them - Other misc fixes and cleanups * tag 'x86_core_for_v6.2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (80 commits) x86/paravirt: Use common macro for creating simple asm paravirt functions x86/paravirt: Remove clobber bitmask from .parainstructions x86/debug: Include percpu.h in debugreg.h to get DECLARE_PER_CPU() et al x86/cpufeatures: Move X86_FEATURE_CALL_DEPTH from bit 18 to bit 19 of word 11, to leave space for WIP X86_FEATURE_SGX_EDECCSSA bit x86/Kconfig: Enable kernel IBT by default x86,pm: Force out-of-line memcpy() objtool: Fix weak hole vs prefix symbol objtool: Optimize elf_dirty_reloc_sym() x86/cfi: Add boot time hash randomization x86/cfi: Boot time selection of CFI scheme x86/ibt: Implement FineIBT objtool: Add --cfi to generate the .cfi_sites section x86: Add prefix symbols for function padding objtool: Add option to generate prefix symbols objtool: Avoid O(bloody terrible) behaviour -- an ode to libelf objtool: Slice up elf_create_section_symbol() kallsyms: Revert "Take callthunks into account" x86: Unconfuse CONFIG_ and X86_FEATURE_ namespaces x86/retpoline: Fix crash printing warning x86/paravirt: Fix a !PARAVIRT build warning ... --- 94a855111ed9106971ca2617c5d075269e6aefde diff --cc arch/x86/crypto/sha1_ni_asm.S index 3cae5a1bb3d6,cd943b2af2c4..cade913d4882 --- a/arch/x86/crypto/sha1_ni_asm.S +++ b/arch/x86/crypto/sha1_ni_asm.S @@@ -93,8 -92,7 +93,7 @@@ * numBlocks: Number of blocks to process */ .text - .align 32 -SYM_FUNC_START(sha1_ni_transform) +SYM_TYPED_FUNC_START(sha1_ni_transform) push %rbp mov %rsp, %rbp sub $FRAME_SIZE, %rsp diff --cc arch/x86/crypto/sha256-avx-asm.S index 06ea30c20828,3649370690c5..5555b5d5215a --- a/arch/x86/crypto/sha256-avx-asm.S +++ b/arch/x86/crypto/sha256-avx-asm.S @@@ -347,8 -346,7 +347,7 @@@ a = TMP ## arg 3 : Num blocks ######################################################################## .text -SYM_FUNC_START(sha256_transform_avx) +SYM_TYPED_FUNC_START(sha256_transform_avx) - .align 32 pushq %rbx pushq %r12 pushq %r13 diff --cc arch/x86/crypto/sha256-avx2-asm.S index 2d2be531a11e,c4c1dc5ee078..3eada9416852 --- a/arch/x86/crypto/sha256-avx2-asm.S +++ b/arch/x86/crypto/sha256-avx2-asm.S @@@ -524,8 -523,7 +524,7 @@@ STACK_SIZE = _CTX + _CTX_SIZ ## arg 3 : Num blocks ######################################################################## .text -SYM_FUNC_START(sha256_transform_rorx) +SYM_TYPED_FUNC_START(sha256_transform_rorx) - .align 32 pushq %rbx pushq %r12 pushq %r13 diff --cc arch/x86/crypto/sha256-ssse3-asm.S index 7db28839108d,96b7dcdeaebe..959288eecc68 --- a/arch/x86/crypto/sha256-ssse3-asm.S +++ b/arch/x86/crypto/sha256-ssse3-asm.S @@@ -356,8 -355,7 +356,7 @@@ a = TMP ## arg 3 : Num blocks ######################################################################## .text -SYM_FUNC_START(sha256_transform_ssse3) +SYM_TYPED_FUNC_START(sha256_transform_ssse3) - .align 32 pushq %rbx pushq %r12 pushq %r13 diff --cc arch/x86/crypto/sha256_ni_asm.S index 47f93937f798,b3f1a1a12027..537b6dcd7ed8 --- a/arch/x86/crypto/sha256_ni_asm.S +++ b/arch/x86/crypto/sha256_ni_asm.S @@@ -97,8 -96,7 +97,7 @@@ */ .text - .align 32 -SYM_FUNC_START(sha256_ni_transform) +SYM_TYPED_FUNC_START(sha256_ni_transform) shl $6, NUM_BLKS /* convert to bytes */ jz .Ldone_hash diff --cc arch/x86/crypto/sm3-avx-asm_64.S index 8fc5ac681fd6,b28d804ee10d..503bab450a91 --- a/arch/x86/crypto/sm3-avx-asm_64.S +++ b/arch/x86/crypto/sm3-avx-asm_64.S @@@ -328,8 -327,7 +328,7 @@@ * void sm3_transform_avx(struct sm3_state *state, * const u8 *data, int nblocks); */ - .align 16 -SYM_FUNC_START(sm3_transform_avx) +SYM_TYPED_FUNC_START(sm3_transform_avx) /* input: * %rdi: ctx, CTX * %rsi: data (64*nblks bytes) diff --cc arch/x86/crypto/sm4-aesni-avx-asm_64.S index 22b6560eb9e1,e13c8537b2ec..e2668d2fe6ce --- a/arch/x86/crypto/sm4-aesni-avx-asm_64.S +++ b/arch/x86/crypto/sm4-aesni-avx-asm_64.S @@@ -420,8 -415,7 +416,7 @@@ SYM_FUNC_END(sm4_aesni_avx_crypt8 * void sm4_aesni_avx_ctr_enc_blk8(const u32 *rk, u8 *dst, * const u8 *src, u8 *iv) */ - .align 8 -SYM_FUNC_START(sm4_aesni_avx_ctr_enc_blk8) +SYM_TYPED_FUNC_START(sm4_aesni_avx_ctr_enc_blk8) /* input: * %rdi: round key array, CTX * %rsi: dst (8 blocks) @@@ -495,8 -489,7 +490,7 @@@ SYM_FUNC_END(sm4_aesni_avx_ctr_enc_blk8 * void sm4_aesni_avx_cbc_dec_blk8(const u32 *rk, u8 *dst, * const u8 *src, u8 *iv) */ - .align 8 -SYM_FUNC_START(sm4_aesni_avx_cbc_dec_blk8) +SYM_TYPED_FUNC_START(sm4_aesni_avx_cbc_dec_blk8) /* input: * %rdi: round key array, CTX * %rsi: dst (8 blocks) @@@ -545,8 -538,7 +539,7 @@@ SYM_FUNC_END(sm4_aesni_avx_cbc_dec_blk8 * void sm4_aesni_avx_cfb_dec_blk8(const u32 *rk, u8 *dst, * const u8 *src, u8 *iv) */ - .align 8 -SYM_FUNC_START(sm4_aesni_avx_cfb_dec_blk8) +SYM_TYPED_FUNC_START(sm4_aesni_avx_cfb_dec_blk8) /* input: * %rdi: round key array, CTX * %rsi: dst (8 blocks) diff --cc arch/x86/crypto/sm4-aesni-avx2-asm_64.S index 23ee39a8ada8,2212705f7da6..98ede9459287 --- a/arch/x86/crypto/sm4-aesni-avx2-asm_64.S +++ b/arch/x86/crypto/sm4-aesni-avx2-asm_64.S @@@ -282,8 -278,7 +279,7 @@@ SYM_FUNC_END(__sm4_crypt_blk16 * void sm4_aesni_avx2_ctr_enc_blk16(const u32 *rk, u8 *dst, * const u8 *src, u8 *iv) */ - .align 8 -SYM_FUNC_START(sm4_aesni_avx2_ctr_enc_blk16) +SYM_TYPED_FUNC_START(sm4_aesni_avx2_ctr_enc_blk16) /* input: * %rdi: round key array, CTX * %rsi: dst (16 blocks) @@@ -395,8 -390,7 +391,7 @@@ SYM_FUNC_END(sm4_aesni_avx2_ctr_enc_blk * void sm4_aesni_avx2_cbc_dec_blk16(const u32 *rk, u8 *dst, * const u8 *src, u8 *iv) */ - .align 8 -SYM_FUNC_START(sm4_aesni_avx2_cbc_dec_blk16) +SYM_TYPED_FUNC_START(sm4_aesni_avx2_cbc_dec_blk16) /* input: * %rdi: round key array, CTX * %rsi: dst (16 blocks) @@@ -449,8 -443,7 +444,7 @@@ SYM_FUNC_END(sm4_aesni_avx2_cbc_dec_blk * void sm4_aesni_avx2_cfb_dec_blk16(const u32 *rk, u8 *dst, * const u8 *src, u8 *iv) */ - .align 8 -SYM_FUNC_START(sm4_aesni_avx2_cfb_dec_blk16) +SYM_TYPED_FUNC_START(sm4_aesni_avx2_cfb_dec_blk16) /* input: * %rdi: round key array, CTX * %rsi: dst (16 blocks) diff --cc arch/x86/entry/entry_64_compat.S index 59b93901660d,bc45ea7d08ee..70150298f8bd --- a/arch/x86/entry/entry_64_compat.S +++ b/arch/x86/entry/entry_64_compat.S @@@ -58,10 -58,10 +58,10 @@@ SYM_CODE_START(entry_SYSENTER_compat SWITCH_TO_KERNEL_CR3 scratch_reg=%rax popq %rax - movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp + movq PER_CPU_VAR(pcpu_hot + X86_top_of_stack), %rsp /* Construct struct pt_regs on stack */ - pushq $__USER32_DS /* pt_regs->ss */ + pushq $__USER_DS /* pt_regs->ss */ pushq $0 /* pt_regs->sp = 0 (placeholder) */ /* diff --cc arch/x86/include/asm/cpufeatures.h index c9f4730bb113,864c9b0dda68..2dd2691b5ee1 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@@ -304,10 -304,8 +304,9 @@@ #define X86_FEATURE_UNRET (11*32+15) /* "" AMD BTB untrain return */ #define X86_FEATURE_USE_IBPB_FW (11*32+16) /* "" Use IBPB during runtime firmware calls */ #define X86_FEATURE_RSB_VMEXIT_LITE (11*32+17) /* "" Fill RSB on VM exit when EIBRS is enabled */ - /* Hole left for X86_FEATURE_SGX_EDECCSSA */ +#define X86_FEATURE_SGX_EDECCSSA (11*32+18) /* "" SGX EDECCSSA user leaf function */ - - + #define X86_FEATURE_CALL_DEPTH (11*32+19) /* "" Call depth tracking for RSB stuffing */ +#define X86_FEATURE_MSR_TSX_CTRL (11*32+20) /* "" MSR IA32_TSX_CTRL (Intel) implemented */ /* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */ #define X86_FEATURE_AVX_VNNI (12*32+ 4) /* AVX VNNI instructions */ diff --cc arch/x86/include/asm/disabled-features.h index c862552d7d6d,bbb03b25263e..c44b56f7ffba --- a/arch/x86/include/asm/disabled-features.h +++ b/arch/x86/include/asm/disabled-features.h @@@ -104,10 -104,11 +110,11 @@@ #define DISABLED_MASK5 0 #define DISABLED_MASK6 0 #define DISABLED_MASK7 (DISABLE_PTI) -#define DISABLED_MASK8 (DISABLE_TDX_GUEST) +#define DISABLED_MASK8 (DISABLE_XENPV|DISABLE_TDX_GUEST) #define DISABLED_MASK9 (DISABLE_SGX) #define DISABLED_MASK10 0 - #define DISABLED_MASK11 (DISABLE_RETPOLINE|DISABLE_RETHUNK|DISABLE_UNRET) + #define DISABLED_MASK11 (DISABLE_RETPOLINE|DISABLE_RETHUNK|DISABLE_UNRET| \ + DISABLE_CALL_DEPTH_TRACKING) #define DISABLED_MASK12 0 #define DISABLED_MASK13 0 #define DISABLED_MASK14 0 diff --cc arch/x86/kernel/alternative.c index a9bea860e22a,aa7f791585c5..23cbfa8d34c5 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@@ -602,24 -687,373 +687,373 @@@ void __init_or_module noinline apply_ib s32 *s; for (s = start; s < end; s++) { - u32 endbr, poison = gen_endbr_poison(); void *addr = (void *)s + *s; - if (WARN_ON_ONCE(get_kernel_nofault(endbr, addr))) + poison_endbr(addr, true); + if (IS_ENABLED(CONFIG_FINEIBT)) + poison_endbr(addr - 16, false); + } + } + + #else + -void __init_or_module noinline apply_ibt_endbr(s32 *start, s32 *end) { } ++void __init_or_module apply_ibt_endbr(s32 *start, s32 *end) { } + + #endif /* CONFIG_X86_KERNEL_IBT */ + + #ifdef CONFIG_FINEIBT + + enum cfi_mode { + CFI_DEFAULT, + CFI_OFF, + CFI_KCFI, + CFI_FINEIBT, + }; + + static enum cfi_mode cfi_mode __ro_after_init = CFI_DEFAULT; + static bool cfi_rand __ro_after_init = true; + static u32 cfi_seed __ro_after_init; + + /* + * Re-hash the CFI hash with a boot-time seed while making sure the result is + * not a valid ENDBR instruction. + */ + static u32 cfi_rehash(u32 hash) + { + hash ^= cfi_seed; + while (unlikely(is_endbr(hash) || is_endbr(-hash))) { + bool lsb = hash & 1; + hash >>= 1; + if (lsb) + hash ^= 0x80200003; + } + return hash; + } + + static __init int cfi_parse_cmdline(char *str) + { + if (!str) + return -EINVAL; + + while (str) { + char *next = strchr(str, ','); + if (next) { + *next = 0; + next++; + } + + if (!strcmp(str, "auto")) { + cfi_mode = CFI_DEFAULT; + } else if (!strcmp(str, "off")) { + cfi_mode = CFI_OFF; + cfi_rand = false; + } else if (!strcmp(str, "kcfi")) { + cfi_mode = CFI_KCFI; + } else if (!strcmp(str, "fineibt")) { + cfi_mode = CFI_FINEIBT; + } else if (!strcmp(str, "norand")) { + cfi_rand = false; + } else { + pr_err("Ignoring unknown cfi option (%s).", str); + } + + str = next; + } + + return 0; + } + early_param("cfi", cfi_parse_cmdline); + + /* + * kCFI FineIBT + * + * __cfi_\func: __cfi_\func: + * movl $0x12345678,%eax // 5 endbr64 // 4 + * nop subl $0x12345678,%r10d // 7 + * nop jz 1f // 2 + * nop ud2 // 2 + * nop 1: nop // 1 + * nop + * nop + * nop + * nop + * nop + * nop + * nop + * + * + * caller: caller: + * movl $(-0x12345678),%r10d // 6 movl $0x12345678,%r10d // 6 + * addl $-15(%r11),%r10d // 4 sub $16,%r11 // 4 + * je 1f // 2 nop4 // 4 + * ud2 // 2 + * 1: call __x86_indirect_thunk_r11 // 5 call *%r11; nop2; // 5 + * + */ + + asm( ".pushsection .rodata \n" + "fineibt_preamble_start: \n" + " endbr64 \n" + " subl $0x12345678, %r10d \n" + " je fineibt_preamble_end \n" + " ud2 \n" + " nop \n" + "fineibt_preamble_end: \n" + ".popsection\n" + ); + + extern u8 fineibt_preamble_start[]; + extern u8 fineibt_preamble_end[]; + + #define fineibt_preamble_size (fineibt_preamble_end - fineibt_preamble_start) + #define fineibt_preamble_hash 7 + + asm( ".pushsection .rodata \n" + "fineibt_caller_start: \n" + " movl $0x12345678, %r10d \n" + " sub $16, %r11 \n" + ASM_NOP4 + "fineibt_caller_end: \n" + ".popsection \n" + ); + + extern u8 fineibt_caller_start[]; + extern u8 fineibt_caller_end[]; + + #define fineibt_caller_size (fineibt_caller_end - fineibt_caller_start) + #define fineibt_caller_hash 2 + + #define fineibt_caller_jmp (fineibt_caller_size - 2) + + static u32 decode_preamble_hash(void *addr) + { + u8 *p = addr; + + /* b8 78 56 34 12 mov $0x12345678,%eax */ + if (p[0] == 0xb8) + return *(u32 *)(addr + 1); + + return 0; /* invalid hash value */ + } + + static u32 decode_caller_hash(void *addr) + { + u8 *p = addr; + + /* 41 ba 78 56 34 12 mov $0x12345678,%r10d */ + if (p[0] == 0x41 && p[1] == 0xba) + return -*(u32 *)(addr + 2); + + /* e8 0c 78 56 34 12 jmp.d8 +12 */ + if (p[0] == JMP8_INSN_OPCODE && p[1] == fineibt_caller_jmp) + return -*(u32 *)(addr + 2); + + return 0; /* invalid hash value */ + } + + /* .retpoline_sites */ + static int cfi_disable_callers(s32 *start, s32 *end) + { + /* + * Disable kCFI by patching in a JMP.d8, this leaves the hash immediate + * in tact for later usage. Also see decode_caller_hash() and + * cfi_rewrite_callers(). + */ + const u8 jmp[] = { JMP8_INSN_OPCODE, fineibt_caller_jmp }; + s32 *s; + + for (s = start; s < end; s++) { + void *addr = (void *)s + *s; + u32 hash; + + addr -= fineibt_caller_size; + hash = decode_caller_hash(addr); + if (!hash) /* nocfi callers */ continue; - if (WARN_ON_ONCE(!is_endbr(endbr))) + text_poke_early(addr, jmp, 2); + } + + return 0; + } + + static int cfi_enable_callers(s32 *start, s32 *end) + { + /* + * Re-enable kCFI, undo what cfi_disable_callers() did. + */ + const u8 mov[] = { 0x41, 0xba }; + s32 *s; + + for (s = start; s < end; s++) { + void *addr = (void *)s + *s; + u32 hash; + + addr -= fineibt_caller_size; + hash = decode_caller_hash(addr); + if (!hash) /* nocfi callers */ continue; - DPRINTK("ENDBR at: %pS (%px)", addr, addr); + text_poke_early(addr, mov, 2); + } - /* - * When we have IBT, the lack of ENDBR will trigger #CP - */ - DUMP_BYTES(((u8*)addr), 4, "%px: orig: ", addr); - DUMP_BYTES(((u8*)&poison), 4, "%px: repl: ", addr); - text_poke_early(addr, &poison, 4); + return 0; + } + + /* .cfi_sites */ + static int cfi_rand_preamble(s32 *start, s32 *end) + { + s32 *s; + + for (s = start; s < end; s++) { + void *addr = (void *)s + *s; + u32 hash; + + hash = decode_preamble_hash(addr); + if (WARN(!hash, "no CFI hash found at: %pS %px %*ph\n", + addr, addr, 5, addr)) + return -EINVAL; + + hash = cfi_rehash(hash); + text_poke_early(addr + 1, &hash, 4); + } + + return 0; + } + + static int cfi_rewrite_preamble(s32 *start, s32 *end) + { + s32 *s; + + for (s = start; s < end; s++) { + void *addr = (void *)s + *s; + u32 hash; + + hash = decode_preamble_hash(addr); + if (WARN(!hash, "no CFI hash found at: %pS %px %*ph\n", + addr, addr, 5, addr)) + return -EINVAL; + + text_poke_early(addr, fineibt_preamble_start, fineibt_preamble_size); + WARN_ON(*(u32 *)(addr + fineibt_preamble_hash) != 0x12345678); + text_poke_early(addr + fineibt_preamble_hash, &hash, 4); + } + + return 0; + } + + /* .retpoline_sites */ + static int cfi_rand_callers(s32 *start, s32 *end) + { + s32 *s; + + for (s = start; s < end; s++) { + void *addr = (void *)s + *s; + u32 hash; + + addr -= fineibt_caller_size; + hash = decode_caller_hash(addr); + if (hash) { + hash = -cfi_rehash(hash); + text_poke_early(addr + 2, &hash, 4); + } + } + + return 0; + } + + static int cfi_rewrite_callers(s32 *start, s32 *end) + { + s32 *s; + + for (s = start; s < end; s++) { + void *addr = (void *)s + *s; + u32 hash; + + addr -= fineibt_caller_size; + hash = decode_caller_hash(addr); + if (hash) { + text_poke_early(addr, fineibt_caller_start, fineibt_caller_size); + WARN_ON(*(u32 *)(addr + fineibt_caller_hash) != 0x12345678); + text_poke_early(addr + fineibt_caller_hash, &hash, 4); + } + /* rely on apply_retpolines() */ + } + + return 0; + } + + static void __apply_fineibt(s32 *start_retpoline, s32 *end_retpoline, + s32 *start_cfi, s32 *end_cfi, bool builtin) + { + int ret; + + if (WARN_ONCE(fineibt_preamble_size != 16, + "FineIBT preamble wrong size: %ld", fineibt_preamble_size)) + return; + + if (cfi_mode == CFI_DEFAULT) { + cfi_mode = CFI_KCFI; + if (HAS_KERNEL_IBT && cpu_feature_enabled(X86_FEATURE_IBT)) + cfi_mode = CFI_FINEIBT; + } + + /* + * Rewrite the callers to not use the __cfi_ stubs, such that we might + * rewrite them. This disables all CFI. If this succeeds but any of the + * later stages fails, we're without CFI. + */ + ret = cfi_disable_callers(start_retpoline, end_retpoline); + if (ret) + goto err; + + if (cfi_rand) { + if (builtin) + cfi_seed = get_random_u32(); + + ret = cfi_rand_preamble(start_cfi, end_cfi); + if (ret) + goto err; + + ret = cfi_rand_callers(start_retpoline, end_retpoline); + if (ret) + goto err; } + + switch (cfi_mode) { + case CFI_OFF: + if (builtin) + pr_info("Disabling CFI\n"); + return; + + case CFI_KCFI: + ret = cfi_enable_callers(start_retpoline, end_retpoline); + if (ret) + goto err; + + if (builtin) + pr_info("Using kCFI\n"); + return; + + case CFI_FINEIBT: + ret = cfi_rewrite_preamble(start_cfi, end_cfi); + if (ret) + goto err; + + ret = cfi_rewrite_callers(start_retpoline, end_retpoline); + if (ret) + goto err; + + if (builtin) + pr_info("Using FineIBT CFI\n"); + return; + + default: + break; + } + + err: + pr_err("Something went horribly wrong trying to rewrite the CFI implementation.\n"); } #else diff --cc arch/x86/kernel/module.c index d85a6980e263,0142982e94c5..705fb2a41d7d --- a/arch/x86/kernel/module.c +++ b/arch/x86/kernel/module.c @@@ -251,9 -252,10 +252,10 @@@ int module_finalize(const Elf_Ehdr *hdr const Elf_Shdr *sechdrs, struct module *me) { - const Elf_Shdr *s, *text = NULL, *alt = NULL, *locks = NULL, + const Elf_Shdr *s, *alt = NULL, *locks = NULL, *para = NULL, *orc = NULL, *orc_ip = NULL, - *retpolines = NULL, *returns = NULL, *ibt_endbr = NULL; + *retpolines = NULL, *returns = NULL, *ibt_endbr = NULL, + *calls = NULL, *cfi = NULL; char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) { diff --cc arch/x86/kernel/setup_percpu.c index b26123c90b4f,c2fc4c41c164..c242dc47e9cb --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c @@@ -22,10 -21,8 +22,7 @@@ #include #include #include -#include - DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number); - EXPORT_PER_CPU_SYMBOL(cpu_number); - #ifdef CONFIG_X86_64 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load) #else diff --cc arch/x86/net/bpf_jit_comp.c index 36ffe67ad6e5,f46b62029d91..b808be77635e --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@@ -1917,7 -1869,7 +1935,7 @@@ static int invoke_bpf_prog(const struc /* arg2: lea rsi, [rbp - ctx_cookie_off] */ EMIT4(0x48, 0x8D, 0x75, -run_ctx_off); - if (emit_call(&prog, bpf_trampoline_enter(p), prog)) - if (emit_rsb_call(&prog, enter, prog)) ++ if (emit_rsb_call(&prog, bpf_trampoline_enter(p), prog)) return -EINVAL; /* remember prog start time returned by __bpf_prog_enter */ emit_mov_reg(&prog, true, BPF_REG_6, BPF_REG_0); @@@ -1962,7 -1914,7 +1980,7 @@@ emit_mov_reg(&prog, true, BPF_REG_2, BPF_REG_6); /* arg3: lea rdx, [rbp - run_ctx_off] */ EMIT4(0x48, 0x8D, 0x55, -run_ctx_off); - if (emit_call(&prog, bpf_trampoline_exit(p), prog)) - if (emit_rsb_call(&prog, exit, prog)) ++ if (emit_rsb_call(&prog, bpf_trampoline_exit(p), prog)) return -EINVAL; *pprog = prog;