x86: Convert some slow-path static_cpu_has() callers to boot_cpu_has()
authorBorislav Petkov <bp@suse.de>
Fri, 29 Mar 2019 18:52:59 +0000 (19:52 +0100)
committerBorislav Petkov <bp@suse.de>
Mon, 8 Apr 2019 10:13:34 +0000 (12:13 +0200)
Using static_cpu_has() is pointless on those paths, convert them to the
boot_cpu_has() variant.

No functional changes.

Reported-by: Nadav Amit <nadav.amit@gmail.com>
Signed-off-by: Borislav Petkov <bp@suse.de>
Reviewed-by: Rik van Riel <riel@surriel.com>
Reviewed-by: Juergen Gross <jgross@suse.com> # for paravirt
Cc: Aubrey Li <aubrey.li@intel.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Dominik Brodowski <linux@dominikbrodowski.net>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jann Horn <jannh@google.com>
Cc: Joerg Roedel <jroedel@suse.de>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Cc: Thomas Lendacky <Thomas.Lendacky@amd.com>
Cc: linux-edac@vger.kernel.org
Cc: Masami Hiramatsu <mhiramat@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Tony Luck <tony.luck@intel.com>
Cc: virtualization@lists.linux-foundation.org
Cc: x86@kernel.org
Link: https://lkml.kernel.org/r/20190330112022.28888-3-bp@alien8.de
arch/x86/include/asm/fpu/internal.h
arch/x86/kernel/apic/apic_numachip.c
arch/x86/kernel/cpu/aperfmperf.c
arch/x86/kernel/cpu/common.c
arch/x86/kernel/cpu/mce/inject.c
arch/x86/kernel/cpu/proc.c
arch/x86/kernel/ldt.c
arch/x86/kernel/paravirt.c
arch/x86/kernel/process.c
arch/x86/kernel/reboot.c
arch/x86/kernel/vm86_32.c

index fb04a3ded7ddb2ab284404f0caf0f1e6b1af23aa..745a19d34f23f245d17fc50e13786d4a6ca6d34a 100644 (file)
@@ -253,7 +253,7 @@ static inline void copy_xregs_to_kernel_booting(struct xregs_state *xstate)
 
        WARN_ON(system_state != SYSTEM_BOOTING);
 
-       if (static_cpu_has(X86_FEATURE_XSAVES))
+       if (boot_cpu_has(X86_FEATURE_XSAVES))
                XSTATE_OP(XSAVES, xstate, lmask, hmask, err);
        else
                XSTATE_OP(XSAVE, xstate, lmask, hmask, err);
@@ -275,7 +275,7 @@ static inline void copy_kernel_to_xregs_booting(struct xregs_state *xstate)
 
        WARN_ON(system_state != SYSTEM_BOOTING);
 
-       if (static_cpu_has(X86_FEATURE_XSAVES))
+       if (boot_cpu_has(X86_FEATURE_XSAVES))
                XSTATE_OP(XRSTORS, xstate, lmask, hmask, err);
        else
                XSTATE_OP(XRSTOR, xstate, lmask, hmask, err);
@@ -497,8 +497,7 @@ static inline void fpregs_activate(struct fpu *fpu)
  *  - switch_fpu_finish() restores the new state as
  *    necessary.
  */
-static inline void
-switch_fpu_prepare(struct fpu *old_fpu, int cpu)
+static inline void switch_fpu_prepare(struct fpu *old_fpu, int cpu)
 {
        if (static_cpu_has(X86_FEATURE_FPU) && old_fpu->initialized) {
                if (!copy_fpregs_to_fpstate(old_fpu))
index 78778b54f904a8f15c24fa257a4699b232d3f842..a5464b8b6c464d117d8d2e03c8273bc21531f412 100644 (file)
@@ -175,7 +175,7 @@ static void fixup_cpu_id(struct cpuinfo_x86 *c, int node)
        this_cpu_write(cpu_llc_id, node);
 
        /* Account for nodes per socket in multi-core-module processors */
-       if (static_cpu_has(X86_FEATURE_NODEID_MSR)) {
+       if (boot_cpu_has(X86_FEATURE_NODEID_MSR)) {
                rdmsrl(MSR_FAM10H_NODE_ID, val);
                nodes = ((val >> 3) & 7) + 1;
        }
index 804c49493938bfc06a8a5f91507a36f993b467b9..64d5aec24203fcebd54563248a8fad3ae130b470 100644 (file)
@@ -83,7 +83,7 @@ unsigned int aperfmperf_get_khz(int cpu)
        if (!cpu_khz)
                return 0;
 
-       if (!static_cpu_has(X86_FEATURE_APERFMPERF))
+       if (!boot_cpu_has(X86_FEATURE_APERFMPERF))
                return 0;
 
        aperfmperf_snapshot_cpu(cpu, ktime_get(), true);
@@ -99,7 +99,7 @@ void arch_freq_prepare_all(void)
        if (!cpu_khz)
                return;
 
-       if (!static_cpu_has(X86_FEATURE_APERFMPERF))
+       if (!boot_cpu_has(X86_FEATURE_APERFMPERF))
                return;
 
        for_each_online_cpu(cpu)
@@ -115,7 +115,7 @@ unsigned int arch_freq_get_on_cpu(int cpu)
        if (!cpu_khz)
                return 0;
 
-       if (!static_cpu_has(X86_FEATURE_APERFMPERF))
+       if (!boot_cpu_has(X86_FEATURE_APERFMPERF))
                return 0;
 
        if (aperfmperf_snapshot_cpu(cpu, ktime_get(), true))
index cb28e98a0659abc5051b1e7fcb936b692a1a1264..95a5faf3a6a0fc23b404b423d71c271e1eb4c766 100644 (file)
@@ -1668,7 +1668,7 @@ static void setup_getcpu(int cpu)
        unsigned long cpudata = vdso_encode_cpunode(cpu, early_cpu_to_node(cpu));
        struct desc_struct d = { };
 
-       if (static_cpu_has(X86_FEATURE_RDTSCP))
+       if (boot_cpu_has(X86_FEATURE_RDTSCP))
                write_rdtscp_aux(cpudata);
 
        /* Store CPU and node number in limit. */
index 8492ef7d9015086fb44e08ec532438bf43056d5c..3da9a8823e4787f060e06e4b5172f6bbe2209688 100644 (file)
@@ -528,7 +528,7 @@ static void do_inject(void)
         * only on the node base core. Refer to D18F3x44[NbMcaToMstCpuEn] for
         * Fam10h and later BKDGs.
         */
-       if (static_cpu_has(X86_FEATURE_AMD_DCM) &&
+       if (boot_cpu_has(X86_FEATURE_AMD_DCM) &&
            b == 4 &&
            boot_cpu_data.x86 < 0x17) {
                toggle_nb_mca_mst_cpu(amd_get_nb_id(cpu));
index 2c8522a39ed5dbc388bada821ed144f2435adac2..cb2e49810d687fe67ae304edcb480469b95480b7 100644 (file)
@@ -35,11 +35,11 @@ static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c)
                   "fpu_exception\t: %s\n"
                   "cpuid level\t: %d\n"
                   "wp\t\t: yes\n",
-                  static_cpu_has_bug(X86_BUG_FDIV) ? "yes" : "no",
-                  static_cpu_has_bug(X86_BUG_F00F) ? "yes" : "no",
-                  static_cpu_has_bug(X86_BUG_COMA) ? "yes" : "no",
-                  static_cpu_has(X86_FEATURE_FPU) ? "yes" : "no",
-                  static_cpu_has(X86_FEATURE_FPU) ? "yes" : "no",
+                  boot_cpu_has_bug(X86_BUG_FDIV) ? "yes" : "no",
+                  boot_cpu_has_bug(X86_BUG_F00F) ? "yes" : "no",
+                  boot_cpu_has_bug(X86_BUG_COMA) ? "yes" : "no",
+                  boot_cpu_has(X86_FEATURE_FPU) ? "yes" : "no",
+                  boot_cpu_has(X86_FEATURE_FPU) ? "yes" : "no",
                   c->cpuid_level);
 }
 #else
index 6135ae8ce0364772f5cc72f73b4bb8f2ad3a8d9e..b2463fcb20a8116921203fb246d3b7ffa0ef1e88 100644 (file)
@@ -113,7 +113,7 @@ static void do_sanity_check(struct mm_struct *mm,
                 * tables.
                 */
                WARN_ON(!had_kernel_mapping);
-               if (static_cpu_has(X86_FEATURE_PTI))
+               if (boot_cpu_has(X86_FEATURE_PTI))
                        WARN_ON(!had_user_mapping);
        } else {
                /*
@@ -121,7 +121,7 @@ static void do_sanity_check(struct mm_struct *mm,
                 * Sync the pgd to the usermode tables.
                 */
                WARN_ON(had_kernel_mapping);
-               if (static_cpu_has(X86_FEATURE_PTI))
+               if (boot_cpu_has(X86_FEATURE_PTI))
                        WARN_ON(had_user_mapping);
        }
 }
@@ -156,7 +156,7 @@ static void map_ldt_struct_to_user(struct mm_struct *mm)
        k_pmd = pgd_to_pmd_walk(k_pgd, LDT_BASE_ADDR);
        u_pmd = pgd_to_pmd_walk(u_pgd, LDT_BASE_ADDR);
 
-       if (static_cpu_has(X86_FEATURE_PTI) && !mm->context.ldt)
+       if (boot_cpu_has(X86_FEATURE_PTI) && !mm->context.ldt)
                set_pmd(u_pmd, *k_pmd);
 }
 
@@ -181,7 +181,7 @@ static void map_ldt_struct_to_user(struct mm_struct *mm)
 {
        pgd_t *pgd = pgd_offset(mm, LDT_BASE_ADDR);
 
-       if (static_cpu_has(X86_FEATURE_PTI) && !mm->context.ldt)
+       if (boot_cpu_has(X86_FEATURE_PTI) && !mm->context.ldt)
                set_pgd(kernel_to_user_pgdp(pgd), *pgd);
 }
 
@@ -208,7 +208,7 @@ map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot)
        spinlock_t *ptl;
        int i, nr_pages;
 
-       if (!static_cpu_has(X86_FEATURE_PTI))
+       if (!boot_cpu_has(X86_FEATURE_PTI))
                return 0;
 
        /*
@@ -271,7 +271,7 @@ static void unmap_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt)
                return;
 
        /* LDT map/unmap is only required for PTI */
-       if (!static_cpu_has(X86_FEATURE_PTI))
+       if (!boot_cpu_has(X86_FEATURE_PTI))
                return;
 
        nr_pages = DIV_ROUND_UP(ldt->nr_entries * LDT_ENTRY_SIZE, PAGE_SIZE);
@@ -311,7 +311,7 @@ static void free_ldt_pgtables(struct mm_struct *mm)
        unsigned long start = LDT_BASE_ADDR;
        unsigned long end = LDT_END_ADDR;
 
-       if (!static_cpu_has(X86_FEATURE_PTI))
+       if (!boot_cpu_has(X86_FEATURE_PTI))
                return;
 
        tlb_gather_mmu(&tlb, mm, start, end);
index c0e0101133f352ba6a8ac8369eef15a3e5301be3..7bbaa6baf37f9b9ada524cf8f07385cf23717739 100644 (file)
@@ -121,7 +121,7 @@ DEFINE_STATIC_KEY_TRUE(virt_spin_lock_key);
 
 void __init native_pv_lock_init(void)
 {
-       if (!static_cpu_has(X86_FEATURE_HYPERVISOR))
+       if (!boot_cpu_has(X86_FEATURE_HYPERVISOR))
                static_branch_disable(&virt_spin_lock_key);
 }
 
index 58ac7be52c7a6df944dca7305492b8ce70ed8d8e..16a7113e91c5af6d1556371b52ec50e479ee7e95 100644 (file)
@@ -236,7 +236,7 @@ static int get_cpuid_mode(void)
 
 static int set_cpuid_mode(struct task_struct *task, unsigned long cpuid_enabled)
 {
-       if (!static_cpu_has(X86_FEATURE_CPUID_FAULT))
+       if (!boot_cpu_has(X86_FEATURE_CPUID_FAULT))
                return -ENODEV;
 
        if (cpuid_enabled)
@@ -666,7 +666,7 @@ static int prefer_mwait_c1_over_halt(const struct cpuinfo_x86 *c)
        if (c->x86_vendor != X86_VENDOR_INTEL)
                return 0;
 
-       if (!cpu_has(c, X86_FEATURE_MWAIT) || static_cpu_has_bug(X86_BUG_MONITOR))
+       if (!cpu_has(c, X86_FEATURE_MWAIT) || boot_cpu_has_bug(X86_BUG_MONITOR))
                return 0;
 
        return 1;
index 725624b6c0c05cdc0c94175214a7ce796df47eee..d62ebbc5ec783240e61b54b321e6c1f3b496adad 100644 (file)
@@ -108,7 +108,7 @@ void __noreturn machine_real_restart(unsigned int type)
        write_cr3(real_mode_header->trampoline_pgd);
 
        /* Exiting long mode will fail if CR4.PCIDE is set. */
-       if (static_cpu_has(X86_FEATURE_PCID))
+       if (boot_cpu_has(X86_FEATURE_PCID))
                cr4_clear_bits(X86_CR4_PCIDE);
 #endif
 
index a092b6b40c6b5113f95d374fbbc966717ddf8ca4..6a38717d179c4bbb35d39badb2fa2ee53d9ee71e 100644 (file)
@@ -369,7 +369,7 @@ static long do_sys_vm86(struct vm86plus_struct __user *user_vm86, bool plus)
        preempt_disable();
        tsk->thread.sp0 += 16;
 
-       if (static_cpu_has(X86_FEATURE_SEP)) {
+       if (boot_cpu_has(X86_FEATURE_SEP)) {
                tsk->thread.sysenter_cs = 0;
                refresh_sysenter_cs(&tsk->thread);
        }