x86/microcode: Make the late update update_lock a raw lock for RT
authorScott Wood <swood@redhat.com>
Thu, 24 May 2018 15:44:20 +0000 (10:44 -0500)
committerThomas Gleixner <tglx@linutronix.de>
Sun, 27 May 2018 19:50:09 +0000 (21:50 +0200)
__reload_late() is called from stop_machine context and thus cannot
acquire a non-raw spinlock on PREEMPT_RT.

Signed-off-by: Scott Wood <swood@redhat.com>
Signed-off-by: Borislav Petkov <bp@suse.de>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Ashok Raj <ashok.raj@intel.com>
Cc: Clark Williams <williams@redhat.com>
Cc: Pei Zhang <pezhang@redhat.com>
Cc: x86-ml <x86@kernel.org>
Link: http://lkml.kernel.org/r/20180524154420.24455-1-swood@redhat.com
arch/x86/kernel/cpu/microcode/core.c

index 77e201301528817e32537194cf5e075b29aa9686..08286269fd2411799304a9a5f004615111674b12 100644 (file)
@@ -70,7 +70,7 @@ static DEFINE_MUTEX(microcode_mutex);
 /*
  * Serialize late loading so that CPUs get updated one-by-one.
  */
-static DEFINE_SPINLOCK(update_lock);
+static DEFINE_RAW_SPINLOCK(update_lock);
 
 struct ucode_cpu_info          ucode_cpu_info[NR_CPUS];
 
@@ -560,9 +560,9 @@ static int __reload_late(void *info)
        if (__wait_for_cpus(&late_cpus_in, NSEC_PER_SEC))
                return -1;
 
-       spin_lock(&update_lock);
+       raw_spin_lock(&update_lock);
        apply_microcode_local(&err);
-       spin_unlock(&update_lock);
+       raw_spin_unlock(&update_lock);
 
        /* siblings return UCODE_OK because their engine got updated already */
        if (err > UCODE_NFOUND) {