x86/cpufeature: Remove unused and seldomly used cpu_has_xx macros
authorBorislav Petkov <bp@suse.de>
Mon, 7 Dec 2015 09:39:41 +0000 (10:39 +0100)
committerThomas Gleixner <tglx@linutronix.de>
Sat, 19 Dec 2015 10:49:55 +0000 (11:49 +0100)
Those are stupid and code should use static_cpu_has_safe() or
boot_cpu_has() instead. Kill the least used and unused ones.

The remaining ones need more careful inspection before a conversion can
happen. On the TODO.

Signed-off-by: Borislav Petkov <bp@suse.de>
Link: http://lkml.kernel.org/r/1449481182-27541-4-git-send-email-bp@alien8.de
Cc: David Sterba <dsterba@suse.com>
Cc: Herbert Xu <herbert@gondor.apana.org.au>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Matt Mackall <mpm@selenic.com>
Cc: Chris Mason <clm@fb.com>
Cc: Josef Bacik <jbacik@fb.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
24 files changed:
arch/x86/crypto/chacha20_glue.c
arch/x86/crypto/crc32c-intel_glue.c
arch/x86/include/asm/cmpxchg_32.h
arch/x86/include/asm/cmpxchg_64.h
arch/x86/include/asm/cpufeature.h
arch/x86/include/asm/xor_32.h
arch/x86/kernel/cpu/amd.c
arch/x86/kernel/cpu/common.c
arch/x86/kernel/cpu/intel.c
arch/x86/kernel/cpu/intel_cacheinfo.c
arch/x86/kernel/cpu/mtrr/generic.c
arch/x86/kernel/cpu/mtrr/main.c
arch/x86/kernel/cpu/perf_event_amd.c
arch/x86/kernel/cpu/perf_event_amd_uncore.c
arch/x86/kernel/fpu/init.c
arch/x86/kernel/hw_breakpoint.c
arch/x86/kernel/smpboot.c
arch/x86/kernel/vm86_32.c
arch/x86/mm/setup_nx.c
drivers/char/hw_random/via-rng.c
drivers/crypto/padlock-aes.c
drivers/crypto/padlock-sha.c
drivers/iommu/intel_irq_remapping.c
fs/btrfs/disk-io.c

index 722bacea040e71f4cae3769e38605e51a6fd8f82..8baaff5af0b572b27e9c488083d2d751216775d9 100644 (file)
@@ -125,7 +125,7 @@ static struct crypto_alg alg = {
 
 static int __init chacha20_simd_mod_init(void)
 {
-       if (!cpu_has_ssse3)
+       if (!boot_cpu_has(X86_FEATURE_SSSE3))
                return -ENODEV;
 
 #ifdef CONFIG_AS_AVX2
index 81a595d75cf5959bbcae8c2096ebdf0f538bf7f9..0e9871693f2469d3106f57ab6dc94d85a3f16146 100644 (file)
@@ -257,7 +257,7 @@ static int __init crc32c_intel_mod_init(void)
        if (!x86_match_cpu(crc32c_cpu_id))
                return -ENODEV;
 #ifdef CONFIG_X86_64
-       if (cpu_has_pclmulqdq) {
+       if (boot_cpu_has(X86_FEATURE_PCLMULQDQ)) {
                alg.update = crc32c_pcl_intel_update;
                alg.finup = crc32c_pcl_intel_finup;
                alg.digest = crc32c_pcl_intel_digest;
index f7e142926481b6fce09ce3f2d5ddc7f5869b86d8..e4959d023af84857c7b184b1c64d9d715f318a29 100644 (file)
@@ -109,6 +109,6 @@ static inline u64 __cmpxchg64_local(volatile u64 *ptr, u64 old, u64 new)
 
 #endif
 
-#define system_has_cmpxchg_double() cpu_has_cx8
+#define system_has_cmpxchg_double() boot_cpu_has(X86_FEATURE_CX8)
 
 #endif /* _ASM_X86_CMPXCHG_32_H */
index 1af94697aae510fb8eee5699b96895622975f5ec..caa23a34c963ae34e1492c19633c53019d308caf 100644 (file)
@@ -18,6 +18,6 @@ static inline void set_64bit(volatile u64 *ptr, u64 val)
        cmpxchg_local((ptr), (o), (n));                                 \
 })
 
-#define system_has_cmpxchg_double() cpu_has_cx16
+#define system_has_cmpxchg_double() boot_cpu_has(X86_FEATURE_CX16)
 
 #endif /* _ASM_X86_CMPXCHG_64_H */
index 35401fef0d75aa6f7210b97d7dd55ae9a8571b5a..144b042c0872d03bb57f5f5bfb3d20257f763c2d 100644 (file)
@@ -385,58 +385,29 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
 } while (0)
 
 #define cpu_has_fpu            boot_cpu_has(X86_FEATURE_FPU)
-#define cpu_has_de             boot_cpu_has(X86_FEATURE_DE)
 #define cpu_has_pse            boot_cpu_has(X86_FEATURE_PSE)
 #define cpu_has_tsc            boot_cpu_has(X86_FEATURE_TSC)
 #define cpu_has_pge            boot_cpu_has(X86_FEATURE_PGE)
 #define cpu_has_apic           boot_cpu_has(X86_FEATURE_APIC)
-#define cpu_has_sep            boot_cpu_has(X86_FEATURE_SEP)
-#define cpu_has_mtrr           boot_cpu_has(X86_FEATURE_MTRR)
-#define cpu_has_mmx            boot_cpu_has(X86_FEATURE_MMX)
 #define cpu_has_fxsr           boot_cpu_has(X86_FEATURE_FXSR)
 #define cpu_has_xmm            boot_cpu_has(X86_FEATURE_XMM)
 #define cpu_has_xmm2           boot_cpu_has(X86_FEATURE_XMM2)
-#define cpu_has_xmm3           boot_cpu_has(X86_FEATURE_XMM3)
-#define cpu_has_ssse3          boot_cpu_has(X86_FEATURE_SSSE3)
 #define cpu_has_aes            boot_cpu_has(X86_FEATURE_AES)
 #define cpu_has_avx            boot_cpu_has(X86_FEATURE_AVX)
 #define cpu_has_avx2           boot_cpu_has(X86_FEATURE_AVX2)
-#define cpu_has_ht             boot_cpu_has(X86_FEATURE_HT)
-#define cpu_has_nx             boot_cpu_has(X86_FEATURE_NX)
-#define cpu_has_xstore         boot_cpu_has(X86_FEATURE_XSTORE)
-#define cpu_has_xstore_enabled boot_cpu_has(X86_FEATURE_XSTORE_EN)
-#define cpu_has_xcrypt         boot_cpu_has(X86_FEATURE_XCRYPT)
-#define cpu_has_xcrypt_enabled boot_cpu_has(X86_FEATURE_XCRYPT_EN)
-#define cpu_has_ace2           boot_cpu_has(X86_FEATURE_ACE2)
-#define cpu_has_ace2_enabled   boot_cpu_has(X86_FEATURE_ACE2_EN)
-#define cpu_has_phe            boot_cpu_has(X86_FEATURE_PHE)
-#define cpu_has_phe_enabled    boot_cpu_has(X86_FEATURE_PHE_EN)
-#define cpu_has_pmm            boot_cpu_has(X86_FEATURE_PMM)
-#define cpu_has_pmm_enabled    boot_cpu_has(X86_FEATURE_PMM_EN)
-#define cpu_has_ds             boot_cpu_has(X86_FEATURE_DS)
-#define cpu_has_pebs           boot_cpu_has(X86_FEATURE_PEBS)
 #define cpu_has_clflush                boot_cpu_has(X86_FEATURE_CLFLUSH)
-#define cpu_has_bts            boot_cpu_has(X86_FEATURE_BTS)
 #define cpu_has_gbpages                boot_cpu_has(X86_FEATURE_GBPAGES)
 #define cpu_has_arch_perfmon   boot_cpu_has(X86_FEATURE_ARCH_PERFMON)
 #define cpu_has_pat            boot_cpu_has(X86_FEATURE_PAT)
-#define cpu_has_xmm4_1         boot_cpu_has(X86_FEATURE_XMM4_1)
-#define cpu_has_xmm4_2         boot_cpu_has(X86_FEATURE_XMM4_2)
 #define cpu_has_x2apic         boot_cpu_has(X86_FEATURE_X2APIC)
 #define cpu_has_xsave          boot_cpu_has(X86_FEATURE_XSAVE)
-#define cpu_has_xsaveopt       boot_cpu_has(X86_FEATURE_XSAVEOPT)
 #define cpu_has_xsaves         boot_cpu_has(X86_FEATURE_XSAVES)
 #define cpu_has_osxsave                boot_cpu_has(X86_FEATURE_OSXSAVE)
 #define cpu_has_hypervisor     boot_cpu_has(X86_FEATURE_HYPERVISOR)
-#define cpu_has_pclmulqdq      boot_cpu_has(X86_FEATURE_PCLMULQDQ)
-#define cpu_has_perfctr_core   boot_cpu_has(X86_FEATURE_PERFCTR_CORE)
-#define cpu_has_perfctr_nb     boot_cpu_has(X86_FEATURE_PERFCTR_NB)
-#define cpu_has_perfctr_l2     boot_cpu_has(X86_FEATURE_PERFCTR_L2)
-#define cpu_has_cx8            boot_cpu_has(X86_FEATURE_CX8)
-#define cpu_has_cx16           boot_cpu_has(X86_FEATURE_CX16)
-#define cpu_has_eager_fpu      boot_cpu_has(X86_FEATURE_EAGER_FPU)
-#define cpu_has_topoext                boot_cpu_has(X86_FEATURE_TOPOEXT)
-#define cpu_has_bpext          boot_cpu_has(X86_FEATURE_BPEXT)
+/*
+ * Do not add any more of those clumsy macros - use static_cpu_has_safe() for
+ * fast paths and boot_cpu_has() otherwise!
+ */
 
 #if __GNUC__ >= 4
 extern void warn_pre_alternatives(void);
index 5a08bc8bff33934e10b4b9afe8e3236ac8c5ce93..c54beb44c4c1f20e4dda33fb901d17b2850fecf5 100644 (file)
@@ -553,7 +553,7 @@ do {                                                        \
        if (cpu_has_xmm) {                              \
                xor_speed(&xor_block_pIII_sse);         \
                xor_speed(&xor_block_sse_pf64);         \
-       } else if (cpu_has_mmx) {                       \
+       } else if (boot_cpu_has(X86_FEATURE_MMX)) {     \
                xor_speed(&xor_block_pII_mmx);          \
                xor_speed(&xor_block_p5_mmx);           \
        } else {                                        \
index a8816b3251620c941f543e595e949aa60c43467d..34c3ad608dd4094b555c643edf6b29c18459a30f 100644 (file)
@@ -304,7 +304,7 @@ static void amd_get_topology(struct cpuinfo_x86 *c)
        int cpu = smp_processor_id();
 
        /* get information required for multi-node processors */
-       if (cpu_has_topoext) {
+       if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
                u32 eax, ebx, ecx, edx;
 
                cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
@@ -922,7 +922,7 @@ static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)
 
 void set_dr_addr_mask(unsigned long mask, int dr)
 {
-       if (!cpu_has_bpext)
+       if (!boot_cpu_has(X86_FEATURE_BPEXT))
                return;
 
        switch (dr) {
index e14d5bd8671f37fa934de7f57164c8eaa6ded27e..4d5279c95d5faea67c9a7c2a3d900a722f543b7a 100644 (file)
@@ -1445,7 +1445,9 @@ void cpu_init(void)
 
        printk(KERN_INFO "Initializing CPU#%d\n", cpu);
 
-       if (cpu_feature_enabled(X86_FEATURE_VME) || cpu_has_tsc || cpu_has_de)
+       if (cpu_feature_enabled(X86_FEATURE_VME) ||
+           cpu_has_tsc ||
+           boot_cpu_has(X86_FEATURE_DE))
                cr4_clear_bits(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
 
        load_current_idt();
index 209ac1e7d1f03664010955dea71d953f6a396b5b..565648bc1a0aef6c3cf60da92ec9fb60a2408c90 100644 (file)
@@ -445,7 +445,8 @@ static void init_intel(struct cpuinfo_x86 *c)
 
        if (cpu_has_xmm2)
                set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
-       if (cpu_has_ds) {
+
+       if (boot_cpu_has(X86_FEATURE_DS)) {
                unsigned int l1;
                rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
                if (!(l1 & (1<<11)))
index e38d338a64475a82feb198c7169e54b8d6d7100a..0b6c52388cf484f809a7f328f475424dd262417c 100644 (file)
@@ -591,7 +591,7 @@ cpuid4_cache_lookup_regs(int index, struct _cpuid4_info_regs *this_leaf)
        unsigned                edx;
 
        if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
-               if (cpu_has_topoext)
+               if (boot_cpu_has(X86_FEATURE_TOPOEXT))
                        cpuid_count(0x8000001d, index, &eax.full,
                                    &ebx.full, &ecx.full, &edx);
                else
@@ -637,7 +637,7 @@ static int find_num_cache_leaves(struct cpuinfo_x86 *c)
 void init_amd_cacheinfo(struct cpuinfo_x86 *c)
 {
 
-       if (cpu_has_topoext) {
+       if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
                num_cache_leaves = find_num_cache_leaves(c);
        } else if (c->extended_cpuid_level >= 0x80000006) {
                if (cpuid_edx(0x80000006) & 0xf000)
@@ -809,7 +809,7 @@ static int __cache_amd_cpumap_setup(unsigned int cpu, int index,
        struct cacheinfo *this_leaf;
        int i, sibling;
 
-       if (cpu_has_topoext) {
+       if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
                unsigned int apicid, nshared, first, last;
 
                this_leaf = this_cpu_ci->info_list + index;
index 3b533cf37c745c9ecfc81fc5fde94bc46f84e1b5..c870af1610083ec3dda7cb61b966860c9a224374 100644 (file)
@@ -349,7 +349,7 @@ static void get_fixed_ranges(mtrr_type *frs)
 
 void mtrr_save_fixed_ranges(void *info)
 {
-       if (cpu_has_mtrr)
+       if (boot_cpu_has(X86_FEATURE_MTRR))
                get_fixed_ranges(mtrr_state.fixed_ranges);
 }
 
index f891b4750f04c00b296b84598aa396bcbe9724c7..5c3d149ee91cb1f6c87ff6ad1853a38adfce82da 100644 (file)
@@ -682,7 +682,7 @@ void __init mtrr_bp_init(void)
 
        phys_addr = 32;
 
-       if (cpu_has_mtrr) {
+       if (boot_cpu_has(X86_FEATURE_MTRR)) {
                mtrr_if = &generic_mtrr_ops;
                size_or_mask = SIZE_OR_MASK_BITS(36);
                size_and_mask = 0x00f00000;
index 1cee5d2d7eceafde5d5545dac8227c55f8097386..3ea177cb7366572984b2f8a33993cdc4fce7cfa6 100644 (file)
@@ -160,7 +160,7 @@ static inline int amd_pmu_addr_offset(int index, bool eventsel)
        if (offset)
                return offset;
 
-       if (!cpu_has_perfctr_core)
+       if (!boot_cpu_has(X86_FEATURE_PERFCTR_CORE))
                offset = index;
        else
                offset = index << 1;
@@ -652,7 +652,7 @@ static __initconst const struct x86_pmu amd_pmu = {
 
 static int __init amd_core_pmu_init(void)
 {
-       if (!cpu_has_perfctr_core)
+       if (!boot_cpu_has(X86_FEATURE_PERFCTR_CORE))
                return 0;
 
        switch (boot_cpu_data.x86) {
index cc6cedb8f25d5da565f09c612eb612a0eb7db701..49742746a6c963c4a86c08c773430087225fec53 100644 (file)
@@ -523,10 +523,10 @@ static int __init amd_uncore_init(void)
        if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
                goto fail_nodev;
 
-       if (!cpu_has_topoext)
+       if (!boot_cpu_has(X86_FEATURE_TOPOEXT))
                goto fail_nodev;
 
-       if (cpu_has_perfctr_nb) {
+       if (boot_cpu_has(X86_FEATURE_PERFCTR_NB)) {
                amd_uncore_nb = alloc_percpu(struct amd_uncore *);
                if (!amd_uncore_nb) {
                        ret = -ENOMEM;
@@ -540,7 +540,7 @@ static int __init amd_uncore_init(void)
                ret = 0;
        }
 
-       if (cpu_has_perfctr_l2) {
+       if (boot_cpu_has(X86_FEATURE_PERFCTR_L2)) {
                amd_uncore_l2 = alloc_percpu(struct amd_uncore *);
                if (!amd_uncore_l2) {
                        ret = -ENOMEM;
@@ -583,10 +583,11 @@ fail_online:
 
        /* amd_uncore_nb/l2 should have been freed by cleanup_cpu_online */
        amd_uncore_nb = amd_uncore_l2 = NULL;
-       if (cpu_has_perfctr_l2)
+
+       if (boot_cpu_has(X86_FEATURE_PERFCTR_L2))
                perf_pmu_unregister(&amd_l2_pmu);
 fail_l2:
-       if (cpu_has_perfctr_nb)
+       if (boot_cpu_has(X86_FEATURE_PERFCTR_NB))
                perf_pmu_unregister(&amd_nb_pmu);
        if (amd_uncore_l2)
                free_percpu(amd_uncore_l2);
index be39b5fde4b9619a566eb5deee4ee9456a3f5cc5..22abea04731e63848be5af40916339eb722ec1c4 100644 (file)
@@ -12,7 +12,7 @@
  */
 static void fpu__init_cpu_ctx_switch(void)
 {
-       if (!cpu_has_eager_fpu)
+       if (!boot_cpu_has(X86_FEATURE_EAGER_FPU))
                stts();
        else
                clts();
@@ -287,7 +287,7 @@ static void __init fpu__init_system_ctx_switch(void)
        current_thread_info()->status = 0;
 
        /* Auto enable eagerfpu for xsaveopt */
-       if (cpu_has_xsaveopt && eagerfpu != DISABLE)
+       if (boot_cpu_has(X86_FEATURE_XSAVEOPT) && eagerfpu != DISABLE)
                eagerfpu = ENABLE;
 
        if (xfeatures_mask & XFEATURE_MASK_EAGER) {
index 50a3fad5b89f1ff42b05b4f7eeb60ea735d7daaa..2bcfb5f2bc449c3717308c9a75676af3b07e796f 100644 (file)
@@ -300,6 +300,10 @@ static int arch_build_bp_info(struct perf_event *bp)
                        return -EINVAL;
                if (bp->attr.bp_addr & (bp->attr.bp_len - 1))
                        return -EINVAL;
+
+               if (!boot_cpu_has(X86_FEATURE_BPEXT))
+                       return -EOPNOTSUPP;
+
                /*
                 * It's impossible to use a range breakpoint to fake out
                 * user vs kernel detection because bp_len - 1 can't
@@ -307,8 +311,6 @@ static int arch_build_bp_info(struct perf_event *bp)
                 * breakpoints, then we'll have to check for kprobe-blacklisted
                 * addresses anywhere in the range.
                 */
-               if (!cpu_has_bpext)
-                       return -EOPNOTSUPP;
                info->mask = bp->attr.bp_len - 1;
                info->len = X86_BREAKPOINT_LEN_1;
        }
index f2281e9cfdbe04edaa404b6f9d6159a0df1faa0f..24d57f77b3c19615840ac4f09c8c0fd299864698 100644 (file)
@@ -304,7 +304,7 @@ do {                                                                        \
 
 static bool match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
 {
-       if (cpu_has_topoext) {
+       if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
                int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
 
                if (c->phys_proc_id == o->phys_proc_id &&
index 5246193519614dbd8d3a602544e8117376df05f7..483231ebbb0b2e254bbd749997e7b3feea62bb44 100644 (file)
@@ -357,8 +357,10 @@ static long do_sys_vm86(struct vm86plus_struct __user *user_vm86, bool plus)
        tss = &per_cpu(cpu_tss, get_cpu());
        /* make room for real-mode segments */
        tsk->thread.sp0 += 16;
-       if (cpu_has_sep)
+
+       if (static_cpu_has_safe(X86_FEATURE_SEP))
                tsk->thread.sysenter_cs = 0;
+
        load_sp0(tss, &tsk->thread);
        put_cpu();
 
index 90555bf60aa45dce625c0171e4783e6014529490..92e2eacb33216821e8b18821834c7ce972571f29 100644 (file)
@@ -31,7 +31,7 @@ early_param("noexec", noexec_setup);
 
 void x86_configure_nx(void)
 {
-       if (cpu_has_nx && !disable_nx)
+       if (boot_cpu_has(X86_FEATURE_NX) && !disable_nx)
                __supported_pte_mask |= _PAGE_NX;
        else
                __supported_pte_mask &= ~_PAGE_NX;
@@ -39,7 +39,7 @@ void x86_configure_nx(void)
 
 void __init x86_report_nx(void)
 {
-       if (!cpu_has_nx) {
+       if (!boot_cpu_has(X86_FEATURE_NX)) {
                printk(KERN_NOTICE "Notice: NX (Execute Disable) protection "
                       "missing in CPU!\n");
        } else {
index 0c98a9d51a2494e6a49ef49e6bfb557cefca1974..44ce806069444712ce9d6f883c2c3e3d3b8354a3 100644 (file)
@@ -140,7 +140,7 @@ static int via_rng_init(struct hwrng *rng)
         * RNG configuration like it used to be the case in this
         * register */
        if ((c->x86 == 6) && (c->x86_model >= 0x0f)) {
-               if (!cpu_has_xstore_enabled) {
+               if (!boot_cpu_has(X86_FEATURE_XSTORE_EN)) {
                        pr_err(PFX "can't enable hardware RNG "
                                "if XSTORE is not enabled\n");
                        return -ENODEV;
@@ -200,8 +200,9 @@ static int __init mod_init(void)
 {
        int err;
 
-       if (!cpu_has_xstore)
+       if (!boot_cpu_has(X86_FEATURE_XSTORE))
                return -ENODEV;
+
        pr_info("VIA RNG detected\n");
        err = hwrng_register(&via_rng);
        if (err) {
index da2d6777bd092f0a373e14cc960514b8a4f5d148..97a364694bfced664b33498505fa6510d8979d7c 100644 (file)
@@ -515,7 +515,7 @@ static int __init padlock_init(void)
        if (!x86_match_cpu(padlock_cpu_id))
                return -ENODEV;
 
-       if (!cpu_has_xcrypt_enabled) {
+       if (!boot_cpu_has(X86_FEATURE_XCRYPT_EN)) {
                printk(KERN_NOTICE PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n");
                return -ENODEV;
        }
index 4e154c9b92064bb1fbafeb805bcb77f9cc2d1bdf..8c5f90647b7a773112365e8a86ad979edaaed190 100644 (file)
@@ -540,7 +540,7 @@ static int __init padlock_init(void)
        struct shash_alg *sha1;
        struct shash_alg *sha256;
 
-       if (!x86_match_cpu(padlock_sha_ids) || !cpu_has_phe_enabled)
+       if (!x86_match_cpu(padlock_sha_ids) || !boot_cpu_has(X86_FEATURE_PHE_EN))
                return -ENODEV;
 
        /* Register the newly added algorithm module if on *
index 1fae1881648c5a87e9071d1dc9ade123baab0252..c12ba4516df25b7201731b44ef4a175c78f2d0e0 100644 (file)
@@ -753,7 +753,7 @@ static inline void set_irq_posting_cap(void)
                 * should have X86_FEATURE_CX16 support, this has been confirmed
                 * with Intel hardware guys.
                 */
-               if ( cpu_has_cx16 )
+               if (boot_cpu_has(X86_FEATURE_CX16))
                        intel_irq_remap_ops.capability |= 1 << IRQ_POSTING_CAP;
 
                for_each_iommu(iommu, drhd)
index 974be09e7556ca3f342cac89357364f4ce3cc016..42a378a4eefb4cd198c0d328eecaec293735f3af 100644 (file)
@@ -923,7 +923,7 @@ static int check_async_write(struct inode *inode, unsigned long bio_flags)
        if (bio_flags & EXTENT_BIO_TREE_LOG)
                return 0;
 #ifdef CONFIG_X86
-       if (cpu_has_xmm4_2)
+       if (static_cpu_has_safe(X86_FEATURE_XMM4_2))
                return 0;
 #endif
        return 1;