Merge tag 'x86_misc_for_v5.19_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git...
authorLinus Torvalds <torvalds@linux-foundation.org>
Tue, 24 May 2022 02:32:59 +0000 (19:32 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 24 May 2022 02:32:59 +0000 (19:32 -0700)
Pull misc x86 updates from Borislav Petkov:
 "A variety of fixes which don't fit any other tip bucket:

   - Remove unnecessary function export

   - Correct asm constraint

   - Fix __setup handlers retval"

* tag 'x86_misc_for_v5.19_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/mm: Cleanup the control_va_addr_alignment() __setup handler
  x86: Fix return value of __setup handlers
  x86/delay: Fix the wrong asm constraint in delay_loop()
  x86/amd_nb: Unexport amd_cache_northbridges()

1  2 
arch/x86/kernel/apic/apic.c
arch/x86/kernel/cpu/intel.c

index 9b87ec6acdec31c084ae7c2209d98404ab0eb7dd,ed7d9cf71f68deb122f2c086718b5f45cfe5ce6a..189d3a5e471adc43c44b7d6aa8d2805a0a9a475b
@@@ -170,7 -170,7 +170,7 @@@ static __init int setup_apicpmtimer(cha
  {
        apic_calibrate_pmtmr = 1;
        notsc_setup(NULL);
-       return 0;
+       return 1;
  }
  __setup("apicpmtimer", setup_apicpmtimer);
  #endif
@@@ -320,9 -320,6 +320,9 @@@ int lapic_get_maxlvt(void
  #define APIC_DIVISOR 16
  #define TSC_DIVISOR  8
  
 +/* i82489DX specific */
 +#define               I82489DX_BASE_DIVIDER           (((0x2) << 18))
 +
  /*
   * This function sets up the local APIC timer, with a timeout of
   * 'clocks' APIC bus clock. During calibration we actually call
@@@ -343,14 -340,8 +343,14 @@@ static void __setup_APIC_LVTT(unsigned 
        else if (boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER))
                lvtt_value |= APIC_LVT_TIMER_TSCDEADLINE;
  
 +      /*
 +       * The i82489DX APIC uses bit 18 and 19 for the base divider.  This
 +       * overlaps with bit 18 on integrated APICs, but is not documented
 +       * in the SDM. No problem though. i82489DX equipped systems do not
 +       * have TSC deadline timer.
 +       */
        if (!lapic_is_integrated())
 -              lvtt_value |= SET_APIC_TIMER_BASE(APIC_TIMER_BASE_DIV);
 +              lvtt_value |= I82489DX_BASE_DIVIDER;
  
        if (!irqen)
                lvtt_value |= APIC_LVT_MASKED;
@@@ -1428,21 -1419,22 +1428,21 @@@ void __init apic_intr_mode_init(void
                return;
        case APIC_VIRTUAL_WIRE:
                pr_info("APIC: Switch to virtual wire mode setup\n");
 -              default_setup_apic_routing();
                break;
        case APIC_VIRTUAL_WIRE_NO_CONFIG:
                pr_info("APIC: Switch to virtual wire mode setup with no configuration\n");
                upmode = true;
 -              default_setup_apic_routing();
                break;
        case APIC_SYMMETRIC_IO:
                pr_info("APIC: Switch to symmetric I/O mode setup\n");
 -              default_setup_apic_routing();
                break;
        case APIC_SYMMETRIC_IO_NO_ROUTING:
                pr_info("APIC: Switch to symmetric I/O mode setup in no SMP routine\n");
                break;
        }
  
 +      default_setup_apic_routing();
 +
        if (x86_platform.apic_post_init)
                x86_platform.apic_post_init();
  
@@@ -2559,16 -2551,6 +2559,16 @@@ u32 x86_msi_msg_get_destid(struct msi_m
  }
  EXPORT_SYMBOL_GPL(x86_msi_msg_get_destid);
  
 +#ifdef CONFIG_X86_64
 +void __init acpi_wake_cpu_handler_update(wakeup_cpu_handler handler)
 +{
 +      struct apic **drv;
 +
 +      for (drv = __apicdrivers; drv < __apicdrivers_end; drv++)
 +              (*drv)->wakeup_secondary_cpu_64 = handler;
 +}
 +#endif
 +
  /*
   * Override the generic EOI implementation with an optimized version.
   * Only called during early boot when only one CPU is active and with
index e6c37f38c5ea4b7f246a25f429a2f0b78d70b348,350c247def370e7770f98197acb1cfc49fd96329..7860241a67db0ba79c5398054f8b23557dd09a3f
@@@ -7,13 -7,10 +7,13 @@@
  #include <linux/smp.h>
  #include <linux/sched.h>
  #include <linux/sched/clock.h>
 +#include <linux/semaphore.h>
  #include <linux/thread_info.h>
  #include <linux/init.h>
  #include <linux/uaccess.h>
 +#include <linux/workqueue.h>
  #include <linux/delay.h>
 +#include <linux/cpuhotplug.h>
  
  #include <asm/cpufeature.h>
  #include <asm/msr.h>
@@@ -94,7 -91,7 +94,7 @@@ static bool ring3mwait_disabled __read_
  static int __init ring3mwait_disable(char *__unused)
  {
        ring3mwait_disabled = true;
-       return 0;
+       return 1;
  }
  __setup("ring3mwait=disable", ring3mwait_disable);
  
@@@ -720,6 -717,13 +720,6 @@@ static void init_intel(struct cpuinfo_x
  
        init_intel_misc_features(c);
  
 -      if (tsx_ctrl_state == TSX_CTRL_ENABLE)
 -              tsx_enable();
 -      else if (tsx_ctrl_state == TSX_CTRL_DISABLE)
 -              tsx_disable();
 -      else if (tsx_ctrl_state == TSX_CTRL_RTM_ALWAYS_ABORT)
 -              tsx_clear_cpuid();
 -
        split_lock_init();
        bus_lock_init();
  
@@@ -1002,8 -1006,6 +1002,8 @@@ static const struct 
  
  static struct ratelimit_state bld_ratelimit;
  
 +static DEFINE_SEMAPHORE(buslock_sem);
 +
  static inline bool match_option(const char *arg, int arglen, const char *opt)
  {
        int len = strlen(opt), ratelimit;
@@@ -1114,52 -1116,18 +1114,52 @@@ static void split_lock_init(void
                split_lock_verify_msr(sld_state != sld_off);
  }
  
 +static void __split_lock_reenable(struct work_struct *work)
 +{
 +      sld_update_msr(true);
 +      up(&buslock_sem);
 +}
 +
 +/*
 + * If a CPU goes offline with pending delayed work to re-enable split lock
 + * detection then the delayed work will be executed on some other CPU. That
 + * handles releasing the buslock_sem, but because it executes on a
 + * different CPU probably won't re-enable split lock detection. This is a
 + * problem on HT systems since the sibling CPU on the same core may then be
 + * left running with split lock detection disabled.
 + *
 + * Unconditionally re-enable detection here.
 + */
 +static int splitlock_cpu_offline(unsigned int cpu)
 +{
 +      sld_update_msr(true);
 +
 +      return 0;
 +}
 +
 +static DECLARE_DELAYED_WORK(split_lock_reenable, __split_lock_reenable);
 +
  static void split_lock_warn(unsigned long ip)
  {
 -      pr_warn_ratelimited("#AC: %s/%d took a split_lock trap at address: 0x%lx\n",
 -                          current->comm, current->pid, ip);
 +      int cpu;
  
 -      /*
 -       * Disable the split lock detection for this task so it can make
 -       * progress and set TIF_SLD so the detection is re-enabled via
 -       * switch_to_sld() when the task is scheduled out.
 -       */
 +      if (!current->reported_split_lock)
 +              pr_warn_ratelimited("#AC: %s/%d took a split_lock trap at address: 0x%lx\n",
 +                                  current->comm, current->pid, ip);
 +      current->reported_split_lock = 1;
 +
 +      /* misery factor #1, sleep 10ms before trying to execute split lock */
 +      if (msleep_interruptible(10) > 0)
 +              return;
 +      /* Misery factor #2, only allow one buslocked disabled core at a time */
 +      if (down_interruptible(&buslock_sem) == -EINTR)
 +              return;
 +      cpu = get_cpu();
 +      schedule_delayed_work_on(cpu, &split_lock_reenable, 2);
 +
 +      /* Disable split lock detection on this CPU to make progress */
        sld_update_msr(false);
 -      set_tsk_thread_flag(current, TIF_SLD);
 +      put_cpu();
  }
  
  bool handle_guest_split_lock(unsigned long ip)
@@@ -1232,6 -1200,18 +1232,6 @@@ void handle_bus_lock(struct pt_regs *re
        }
  }
  
 -/*
 - * This function is called only when switching between tasks with
 - * different split-lock detection modes. It sets the MSR for the
 - * mode of the new task. This is right most of the time, but since
 - * the MSR is shared by hyperthreads on a physical core there can
 - * be glitches when the two threads need different modes.
 - */
 -void switch_to_sld(unsigned long tifn)
 -{
 -      sld_update_msr(!(tifn & _TIF_SLD));
 -}
 -
  /*
   * Bits in the IA32_CORE_CAPABILITIES are not architectural, so they should
   * only be trusted if it is confirmed that a CPU model implements a
@@@ -1257,7 -1237,6 +1257,7 @@@ static const struct x86_cpu_id split_lo
        X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X,    1),
        X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE,           1),
        X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L,         1),
 +      X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE,          1),
        {}
  };
  
@@@ -1302,14 -1281,10 +1302,14 @@@ static void sld_state_show(void
                pr_info("disabled\n");
                break;
        case sld_warn:
 -              if (boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT))
 +              if (boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT)) {
                        pr_info("#AC: crashing the kernel on kernel split_locks and warning on user-space split_locks\n");
 -              else if (boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT))
 +                      if (cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
 +                                            "x86/splitlock", NULL, splitlock_cpu_offline) < 0)
 +                              pr_warn("No splitlock CPU offline handler\n");
 +              } else if (boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT)) {
                        pr_info("#DB: warning on user-space bus_locks\n");
 +              }
                break;
        case sld_fatal:
                if (boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT)) {