Merge tag 'x86-core-2024-03-11' of git://git.kernel.org/pub/scm/linux/kernel/git...
authorLinus Torvalds <torvalds@linux-foundation.org>
Tue, 12 Mar 2024 02:53:15 +0000 (19:53 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 12 Mar 2024 02:53:15 +0000 (19:53 -0700)
Pull core x86 updates from Ingo Molnar:

 - The biggest change is the rework of the percpu code, to support the
   'Named Address Spaces' GCC feature, by Uros Bizjak:

      - This allows C code to access GS and FS segment relative memory
        via variables declared with such attributes, which allows the
        compiler to better optimize those accesses than the previous
        inline assembly code.

      - The series also includes a number of micro-optimizations for
        various percpu access methods, plus a number of cleanups of %gs
        accesses in assembly code.

      - These changes have been exposed to linux-next testing for the
        last ~5 months, with no known regressions in this area.

 - Fix/clean up __switch_to()'s broken but accidentally working handling
   of FPU switching - which also generates better code

 - Propagate more RIP-relative addressing in assembly code, to generate
   slightly better code

 - Rework the CPU mitigations Kconfig space to be less idiosyncratic, to
   make it easier for distros to follow & maintain these options

 - Rework the x86 idle code to cure RCU violations and to clean up the
   logic

 - Clean up the vDSO Makefile logic

 - Misc cleanups and fixes

* tag 'x86-core-2024-03-11' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (52 commits)
  x86/idle: Select idle routine only once
  x86/idle: Let prefer_mwait_c1_over_halt() return bool
  x86/idle: Cleanup idle_setup()
  x86/idle: Clean up idle selection
  x86/idle: Sanitize X86_BUG_AMD_E400 handling
  sched/idle: Conditionally handle tick broadcast in default_idle_call()
  x86: Increase brk randomness entropy for 64-bit systems
  x86/vdso: Move vDSO to mmap region
  x86/vdso/kbuild: Group non-standard build attributes and primary object file rules together
  x86/vdso: Fix rethunk patching for vdso-image-{32,64}.o
  x86/retpoline: Ensure default return thunk isn't used at runtime
  x86/vdso: Use CONFIG_COMPAT_32 to specify vdso32
  x86/vdso: Use $(addprefix ) instead of $(foreach )
  x86/vdso: Simplify obj-y addition
  x86/vdso: Consolidate targets and clean-files
  x86/bugs: Rename CONFIG_RETHUNK              => CONFIG_MITIGATION_RETHUNK
  x86/bugs: Rename CONFIG_CPU_SRSO             => CONFIG_MITIGATION_SRSO
  x86/bugs: Rename CONFIG_CPU_IBRS_ENTRY       => CONFIG_MITIGATION_IBRS_ENTRY
  x86/bugs: Rename CONFIG_CPU_UNRET_ENTRY      => CONFIG_MITIGATION_UNRET_ENTRY
  x86/bugs: Rename CONFIG_SLS                  => CONFIG_MITIGATION_SLS
  ...

30 files changed:
1  2 
Documentation/admin-guide/kernel-parameters.txt
arch/x86/Kconfig
arch/x86/Makefile
arch/x86/entry/calling.h
arch/x86/entry/entry.S
arch/x86/entry/entry_32.S
arch/x86/entry/entry_64.S
arch/x86/include/asm/disabled-features.h
arch/x86/include/asm/nospec-branch.h
arch/x86/include/asm/processor.h
arch/x86/include/asm/uaccess_64.h
arch/x86/kernel/callthunks.c
arch/x86/kernel/cpu/amd.c
arch/x86/kernel/cpu/bugs.c
arch/x86/kernel/cpu/common.c
arch/x86/kernel/head_64.S
arch/x86/kernel/process.c
arch/x86/kernel/process_64.c
arch/x86/kernel/traps.c
arch/x86/kvm/mmu/mmu.c
arch/x86/kvm/svm/svm.c
arch/x86/kvm/vmx/vmx.c
arch/x86/mm/Makefile
include/linux/compiler-gcc.h
include/linux/cpu.h
include/linux/tick.h
kernel/sched/idle.c
kernel/trace/ring_buffer.c
net/netfilter/nft_ct.c
tools/arch/x86/include/asm/disabled-features.h

Simple merge
Simple merge
Simple merge
index 0033790499245e3df5f10496986badbe0150aac2,582731f74dc87d497ddddb408c920abb9c8c4e72..d9feadffa972dadf0e2dcf5804ac50f9827d6c07
@@@ -6,10 -6,9 +6,12 @@@
  #include <linux/export.h>
  #include <linux/linkage.h>
  #include <asm/msr-index.h>
 +#include <asm/unwind_hints.h>
 +#include <asm/segment.h>
 +#include <asm/cache.h>
  
+ #include "calling.h"
  .pushsection .noinstr.text, "ax"
  
  SYM_FUNC_START(entry_ibpb)
@@@ -24,22 -23,4 +26,23 @@@ EXPORT_SYMBOL_GPL(entry_ibpb)
  
  .popsection
  
 +/*
 + * Define the VERW operand that is disguised as entry code so that
 + * it can be referenced with KPTI enabled. This ensure VERW can be
 + * used late in exit-to-user path after page tables are switched.
 + */
 +.pushsection .entry.text, "ax"
 +
 +.align L1_CACHE_BYTES, 0xcc
 +SYM_CODE_START_NOALIGN(mds_verw_sel)
 +      UNWIND_HINT_UNDEFINED
 +      ANNOTATE_NOENDBR
 +      .word __KERNEL_DS
 +.align L1_CACHE_BYTES, 0xcc
 +SYM_CODE_END(mds_verw_sel);
 +/* For KVM */
 +EXPORT_SYMBOL_GPL(mds_verw_sel);
 +
 +.popsection
 +
+ THUNK warn_thunk_thunk, __warn_thunk
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
index 6121c2b42ecfe518b9cee51f1d5c899700484420,f0166b31a803856e0b9e4f501d71bfdfee997662..b8441147eb5e84bc6b75648a1228c16850cd5897
@@@ -933,19 -909,19 +909,19 @@@ static __cpuidle void mwait_idle(void
        __current_clr_polling();
  }
  
- void select_idle_routine(const struct cpuinfo_x86 *c)
+ void __init select_idle_routine(void)
  {
- #ifdef CONFIG_SMP
-       if (boot_option_idle_override == IDLE_POLL && __max_threads_per_core > 1)
-               pr_warn_once("WARNING: polling idle and HT enabled, performance may degrade\n");
- #endif
-       if (x86_idle_set() || boot_option_idle_override == IDLE_POLL)
+       if (boot_option_idle_override == IDLE_POLL) {
 -              if (IS_ENABLED(CONFIG_SMP) && smp_num_siblings > 1)
++              if (IS_ENABLED(CONFIG_SMP) && __max_threads_per_core > 1)
+                       pr_warn_once("WARNING: polling idle and HT enabled, performance may degrade\n");
+               return;
+       }
+       /* Required to guard against xen_set_default_idle() */
+       if (x86_idle_set())
                return;
  
-       if (boot_cpu_has_bug(X86_BUG_AMD_E400)) {
-               pr_info("using AMD E400 aware idle routine\n");
-               static_call_update(x86_idle, amd_e400_idle);
-       } else if (prefer_mwait_c1_over_halt(c)) {
+       if (prefer_mwait_c1_over_halt()) {
                pr_info("using mwait in idle threads\n");
                static_call_update(x86_idle, mwait_idle);
        } else if (cpu_feature_enabled(X86_FEATURE_TDX_GUEST)) {
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge