x86/msr: Rename 'rdmsrl()' to 'rdmsrq()'
authorIngo Molnar <mingo@kernel.org>
Wed, 9 Apr 2025 20:28:54 +0000 (22:28 +0200)
committerIngo Molnar <mingo@kernel.org>
Thu, 10 Apr 2025 09:58:27 +0000 (11:58 +0200)
Suggested-by: "H. Peter Anvin" <hpa@zytor.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Juergen Gross <jgross@suse.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Xin Li <xin@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
101 files changed:
arch/x86/coco/sev/core.c
arch/x86/events/amd/brs.c
arch/x86/events/amd/core.c
arch/x86/events/amd/ibs.c
arch/x86/events/amd/lbr.c
arch/x86/events/amd/power.c
arch/x86/events/amd/uncore.c
arch/x86/events/core.c
arch/x86/events/intel/core.c
arch/x86/events/intel/cstate.c
arch/x86/events/intel/knc.c
arch/x86/events/intel/lbr.c
arch/x86/events/intel/p4.c
arch/x86/events/intel/p6.c
arch/x86/events/intel/pt.c
arch/x86/events/intel/uncore.c
arch/x86/events/intel/uncore_nhmex.c
arch/x86/events/intel/uncore_snb.c
arch/x86/events/intel/uncore_snbep.c
arch/x86/events/msr.c
arch/x86/events/perf_event.h
arch/x86/events/rapl.c
arch/x86/events/zhaoxin/core.c
arch/x86/hyperv/hv_apic.c
arch/x86/hyperv/hv_init.c
arch/x86/hyperv/hv_spinlock.c
arch/x86/include/asm/apic.h
arch/x86/include/asm/debugreg.h
arch/x86/include/asm/fsgsbase.h
arch/x86/include/asm/kvm_host.h
arch/x86/include/asm/msr.h
arch/x86/include/asm/paravirt.h
arch/x86/kernel/apic/apic.c
arch/x86/kernel/apic/apic_numachip.c
arch/x86/kernel/cet.c
arch/x86/kernel/cpu/amd.c
arch/x86/kernel/cpu/aperfmperf.c
arch/x86/kernel/cpu/bugs.c
arch/x86/kernel/cpu/bus_lock.c
arch/x86/kernel/cpu/common.c
arch/x86/kernel/cpu/hygon.c
arch/x86/kernel/cpu/intel.c
arch/x86/kernel/cpu/intel_epb.c
arch/x86/kernel/cpu/mce/amd.c
arch/x86/kernel/cpu/mce/core.c
arch/x86/kernel/cpu/mce/inject.c
arch/x86/kernel/cpu/mce/intel.c
arch/x86/kernel/cpu/mshyperv.c
arch/x86/kernel/cpu/resctrl/core.c
arch/x86/kernel/cpu/resctrl/monitor.c
arch/x86/kernel/cpu/resctrl/rdtgroup.c
arch/x86/kernel/cpu/topology.c
arch/x86/kernel/cpu/topology_amd.c
arch/x86/kernel/cpu/tsx.c
arch/x86/kernel/cpu/umwait.c
arch/x86/kernel/fpu/core.c
arch/x86/kernel/hpet.c
arch/x86/kernel/kvm.c
arch/x86/kernel/mmconf-fam10h_64.c
arch/x86/kernel/process.c
arch/x86/kernel/process_64.c
arch/x86/kernel/shstk.c
arch/x86/kernel/traps.c
arch/x86/kernel/tsc.c
arch/x86/kernel/tsc_sync.c
arch/x86/kvm/svm/svm.c
arch/x86/kvm/vmx/nested.c
arch/x86/kvm/vmx/pmu_intel.c
arch/x86/kvm/vmx/sgx.c
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/x86.c
arch/x86/lib/insn-eval.c
arch/x86/mm/pat/memtype.c
arch/x86/pci/amd_bus.c
arch/x86/platform/olpc/olpc-xo1-rtc.c
arch/x86/power/cpu.c
arch/x86/realmode/init.c
arch/x86/virt/svm/sev.c
arch/x86/xen/suspend.c
drivers/cpufreq/acpi-cpufreq.c
drivers/cpufreq/amd-pstate.c
drivers/cpufreq/e_powersaver.c
drivers/cpufreq/intel_pstate.c
drivers/cpufreq/longhaul.c
drivers/cpufreq/powernow-k7.c
drivers/edac/amd64_edac.c
drivers/idle/intel_idle.c
drivers/mtd/nand/raw/cs553x_nand.c
drivers/platform/x86/intel/ifs/load.c
drivers/platform/x86/intel/ifs/runtest.c
drivers/platform/x86/intel/pmc/cnp.c
drivers/platform/x86/intel/speed_select_if/isst_if_mbox_msr.c
drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c
drivers/platform/x86/intel_ips.c
drivers/thermal/intel/intel_hfi.c
drivers/thermal/intel/therm_throt.c
drivers/video/fbdev/geode/gxfb_core.c
drivers/video/fbdev/geode/lxfb_ops.c
drivers/video/fbdev/geode/suspend_gx.c
drivers/video/fbdev/geode/video_gx.c
include/hyperv/hvgdk_mini.h

index b0c1a7a574974a297b3078a9c9de942e1dc3434b..b18a33fe8dd348a58339f362cc5e23aec2e4c36b 100644 (file)
@@ -3278,7 +3278,7 @@ void __init snp_secure_tsc_init(void)
                return;
 
        setup_force_cpu_cap(X86_FEATURE_TSC_KNOWN_FREQ);
-       rdmsrl(MSR_AMD64_GUEST_TSC_FREQ, tsc_freq_mhz);
+       rdmsrq(MSR_AMD64_GUEST_TSC_FREQ, tsc_freq_mhz);
        snp_tsc_freq_khz = (unsigned long)(tsc_freq_mhz * 1000);
 
        x86_platform.calibrate_cpu = securetsc_get_tsc_khz;
index ec34274633824e7a850d44872cebb07f7191e500..86ef0fe8f43530be3b2d1ad04f9cf16143571888 100644 (file)
@@ -325,7 +325,7 @@ void amd_brs_drain(void)
                u32 brs_idx = tos - i;
                u64 from, to;
 
-               rdmsrl(brs_to(brs_idx), to);
+               rdmsrq(brs_to(brs_idx), to);
 
                /* Entry does not belong to us (as marked by kernel) */
                if (to == BRS_POISON)
@@ -341,7 +341,7 @@ void amd_brs_drain(void)
                if (!amd_brs_match_plm(event, to))
                        continue;
 
-               rdmsrl(brs_from(brs_idx), from);
+               rdmsrq(brs_from(brs_idx), from);
 
                perf_clear_branch_entry_bitfields(br+nr);
 
index 30d6ceb4c8ad49fc0cf0bd83eacd2729712cf279..94d6d2428f2a122c3d08398cbc21a87b19573c9d 100644 (file)
@@ -659,7 +659,7 @@ static inline u64 amd_pmu_get_global_status(void)
        u64 status;
 
        /* PerfCntrGlobalStatus is read-only */
-       rdmsrl(MSR_AMD64_PERF_CNTR_GLOBAL_STATUS, status);
+       rdmsrq(MSR_AMD64_PERF_CNTR_GLOBAL_STATUS, status);
 
        return status;
 }
@@ -679,7 +679,7 @@ static bool amd_pmu_test_overflow_topbit(int idx)
 {
        u64 counter;
 
-       rdmsrl(x86_pmu_event_addr(idx), counter);
+       rdmsrq(x86_pmu_event_addr(idx), counter);
 
        return !(counter & BIT_ULL(x86_pmu.cntval_bits - 1));
 }
index 0252b7ea8bcac25f292274909a8bb6a185efc37e..ca6cc90f4719e02e715f8986d03322a23389c66f 100644 (file)
@@ -424,7 +424,7 @@ perf_ibs_event_update(struct perf_ibs *perf_ibs, struct perf_event *event,
         * prev count manually on overflow.
         */
        while (!perf_event_try_update(event, count, 64)) {
-               rdmsrl(event->hw.config_base, *config);
+               rdmsrq(event->hw.config_base, *config);
                count = perf_ibs->get_count(*config);
        }
 }
@@ -513,7 +513,7 @@ static void perf_ibs_stop(struct perf_event *event, int flags)
        if (!stopping && (hwc->state & PERF_HES_UPTODATE))
                return;
 
-       rdmsrl(hwc->config_base, config);
+       rdmsrq(hwc->config_base, config);
 
        if (stopping) {
                /*
@@ -1256,7 +1256,7 @@ fail:
        hwc = &event->hw;
        msr = hwc->config_base;
        buf = ibs_data.regs;
-       rdmsrl(msr, *buf);
+       rdmsrq(msr, *buf);
        if (!(*buf++ & perf_ibs->valid_mask))
                goto fail;
 
@@ -1274,7 +1274,7 @@ fail:
        offset_max = perf_ibs_get_offset_max(perf_ibs, event, check_rip);
 
        do {
-               rdmsrl(msr + offset, *buf++);
+               rdmsrq(msr + offset, *buf++);
                size++;
                offset = find_next_bit(perf_ibs->offset_mask,
                                       perf_ibs->offset_max,
@@ -1304,17 +1304,17 @@ fail:
        if (event->attr.sample_type & PERF_SAMPLE_RAW) {
                if (perf_ibs == &perf_ibs_op) {
                        if (ibs_caps & IBS_CAPS_BRNTRGT) {
-                               rdmsrl(MSR_AMD64_IBSBRTARGET, *buf++);
+                               rdmsrq(MSR_AMD64_IBSBRTARGET, *buf++);
                                br_target_idx = size;
                                size++;
                        }
                        if (ibs_caps & IBS_CAPS_OPDATA4) {
-                               rdmsrl(MSR_AMD64_IBSOPDATA4, *buf++);
+                               rdmsrq(MSR_AMD64_IBSOPDATA4, *buf++);
                                size++;
                        }
                }
                if (perf_ibs == &perf_ibs_fetch && (ibs_caps & IBS_CAPS_FETCHCTLEXTD)) {
-                       rdmsrl(MSR_AMD64_ICIBSEXTDCTL, *buf++);
+                       rdmsrq(MSR_AMD64_ICIBSEXTDCTL, *buf++);
                        size++;
                }
        }
@@ -1565,7 +1565,7 @@ static inline int ibs_eilvt_valid(void)
 
        preempt_disable();
 
-       rdmsrl(MSR_AMD64_IBSCTL, val);
+       rdmsrq(MSR_AMD64_IBSCTL, val);
        offset = val & IBSCTL_LVT_OFFSET_MASK;
 
        if (!(val & IBSCTL_LVT_OFFSET_VALID)) {
@@ -1680,7 +1680,7 @@ static inline int get_ibs_lvt_offset(void)
 {
        u64 val;
 
-       rdmsrl(MSR_AMD64_IBSCTL, val);
+       rdmsrq(MSR_AMD64_IBSCTL, val);
        if (!(val & IBSCTL_LVT_OFFSET_VALID))
                return -EINVAL;
 
index c06ccca96851f3367b5839ae24c43e5005d4dca4..a1aecf5c04a0eaa98b164126779174ff98d54dde 100644 (file)
@@ -73,7 +73,7 @@ static __always_inline u64 amd_pmu_lbr_get_from(unsigned int idx)
 {
        u64 val;
 
-       rdmsrl(MSR_AMD_SAMP_BR_FROM + idx * 2, val);
+       rdmsrq(MSR_AMD_SAMP_BR_FROM + idx * 2, val);
 
        return val;
 }
@@ -82,7 +82,7 @@ static __always_inline u64 amd_pmu_lbr_get_to(unsigned int idx)
 {
        u64 val;
 
-       rdmsrl(MSR_AMD_SAMP_BR_FROM + idx * 2 + 1, val);
+       rdmsrq(MSR_AMD_SAMP_BR_FROM + idx * 2 + 1, val);
 
        return val;
 }
@@ -400,11 +400,11 @@ void amd_pmu_lbr_enable_all(void)
        }
 
        if (cpu_feature_enabled(X86_FEATURE_AMD_LBR_PMC_FREEZE)) {
-               rdmsrl(MSR_IA32_DEBUGCTLMSR, dbg_ctl);
+               rdmsrq(MSR_IA32_DEBUGCTLMSR, dbg_ctl);
                wrmsrl(MSR_IA32_DEBUGCTLMSR, dbg_ctl | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI);
        }
 
-       rdmsrl(MSR_AMD_DBG_EXTN_CFG, dbg_extn_cfg);
+       rdmsrq(MSR_AMD_DBG_EXTN_CFG, dbg_extn_cfg);
        wrmsrl(MSR_AMD_DBG_EXTN_CFG, dbg_extn_cfg | DBG_EXTN_CFG_LBRV2EN);
 }
 
index 37d5b380516ec4ecd85a9828273b778d7de9d29b..7e96f4652230ba4ef748c6cff68e76ef17e26979 100644 (file)
@@ -48,8 +48,8 @@ static void event_update(struct perf_event *event)
 
        prev_pwr_acc = hwc->pwr_acc;
        prev_ptsc = hwc->ptsc;
-       rdmsrl(MSR_F15H_CU_PWR_ACCUMULATOR, new_pwr_acc);
-       rdmsrl(MSR_F15H_PTSC, new_ptsc);
+       rdmsrq(MSR_F15H_CU_PWR_ACCUMULATOR, new_pwr_acc);
+       rdmsrq(MSR_F15H_PTSC, new_ptsc);
 
        /*
         * Calculate the CU power consumption over a time period, the unit of
@@ -75,8 +75,8 @@ static void __pmu_event_start(struct perf_event *event)
 
        event->hw.state = 0;
 
-       rdmsrl(MSR_F15H_PTSC, event->hw.ptsc);
-       rdmsrl(MSR_F15H_CU_PWR_ACCUMULATOR, event->hw.pwr_acc);
+       rdmsrq(MSR_F15H_PTSC, event->hw.ptsc);
+       rdmsrq(MSR_F15H_CU_PWR_ACCUMULATOR, event->hw.pwr_acc);
 }
 
 static void pmu_event_start(struct perf_event *event, int mode)
index 49c26ce2b115229be977c0d9e7488921db2fca75..4ddf5429a0637aa34652d2fcba446416a5c56153 100644 (file)
@@ -106,7 +106,7 @@ static void amd_uncore_read(struct perf_event *event)
         * read counts directly from the corresponding PERF_CTR.
         */
        if (hwc->event_base_rdpmc < 0)
-               rdmsrl(hwc->event_base, new);
+               rdmsrq(hwc->event_base, new);
        else
                rdpmcl(hwc->event_base_rdpmc, new);
 
index 6866cc5acb0b57f2995b059382440fc36f6eba57..a27f2e680070aaaab6a00ac0da8e835ca19ff806 100644 (file)
@@ -693,7 +693,7 @@ void x86_pmu_disable_all(void)
 
                if (!test_bit(idx, cpuc->active_mask))
                        continue;
-               rdmsrl(x86_pmu_config_addr(idx), val);
+               rdmsrq(x86_pmu_config_addr(idx), val);
                if (!(val & ARCH_PERFMON_EVENTSEL_ENABLE))
                        continue;
                val &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
@@ -1550,10 +1550,10 @@ void perf_event_print_debug(void)
                return;
 
        if (x86_pmu.version >= 2) {
-               rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
-               rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
-               rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow);
-               rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed);
+               rdmsrq(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
+               rdmsrq(MSR_CORE_PERF_GLOBAL_STATUS, status);
+               rdmsrq(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow);
+               rdmsrq(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed);
 
                pr_info("\n");
                pr_info("CPU#%d: ctrl:       %016llx\n", cpu, ctrl);
@@ -1561,19 +1561,19 @@ void perf_event_print_debug(void)
                pr_info("CPU#%d: overflow:   %016llx\n", cpu, overflow);
                pr_info("CPU#%d: fixed:      %016llx\n", cpu, fixed);
                if (pebs_constraints) {
-                       rdmsrl(MSR_IA32_PEBS_ENABLE, pebs);
+                       rdmsrq(MSR_IA32_PEBS_ENABLE, pebs);
                        pr_info("CPU#%d: pebs:       %016llx\n", cpu, pebs);
                }
                if (x86_pmu.lbr_nr) {
-                       rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
+                       rdmsrq(MSR_IA32_DEBUGCTLMSR, debugctl);
                        pr_info("CPU#%d: debugctl:   %016llx\n", cpu, debugctl);
                }
        }
        pr_info("CPU#%d: active:     %016llx\n", cpu, *(u64 *)cpuc->active_mask);
 
        for_each_set_bit(idx, cntr_mask, X86_PMC_IDX_MAX) {
-               rdmsrl(x86_pmu_config_addr(idx), pmc_ctrl);
-               rdmsrl(x86_pmu_event_addr(idx), pmc_count);
+               rdmsrq(x86_pmu_config_addr(idx), pmc_ctrl);
+               rdmsrq(x86_pmu_event_addr(idx), pmc_count);
 
                prev_left = per_cpu(pmc_prev_left[idx], cpu);
 
@@ -1587,7 +1587,7 @@ void perf_event_print_debug(void)
        for_each_set_bit(idx, fixed_cntr_mask, X86_PMC_IDX_MAX) {
                if (fixed_counter_disabled(idx, cpuc->pmu))
                        continue;
-               rdmsrl(x86_pmu_fixed_ctr_addr(idx), pmc_count);
+               rdmsrq(x86_pmu_fixed_ctr_addr(idx), pmc_count);
 
                pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
                        cpu, idx, pmc_count);
index 09d2d66c9f21fafdb704c29696c7e0bcf4e21822..852b1ea9bc1707b13022f005481e2943b4ca3278 100644 (file)
@@ -2489,7 +2489,7 @@ static inline u64 intel_pmu_get_status(void)
 {
        u64 status;
 
-       rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
+       rdmsrq(MSR_CORE_PERF_GLOBAL_STATUS, status);
 
        return status;
 }
@@ -5054,7 +5054,7 @@ static void update_pmu_cap(struct x86_hybrid_pmu *pmu)
 
        if (!intel_pmu_broken_perf_cap()) {
                /* Perf Metric (Bit 15) and PEBS via PT (Bit 16) are hybrid enumeration */
-               rdmsrl(MSR_IA32_PERF_CAPABILITIES, pmu->intel_cap.capabilities);
+               rdmsrq(MSR_IA32_PERF_CAPABILITIES, pmu->intel_cap.capabilities);
        }
 }
 
@@ -5202,7 +5202,7 @@ static void intel_pmu_cpu_starting(int cpu)
        if (!is_hybrid() && x86_pmu.intel_cap.perf_metrics) {
                union perf_capabilities perf_cap;
 
-               rdmsrl(MSR_IA32_PERF_CAPABILITIES, perf_cap.capabilities);
+               rdmsrq(MSR_IA32_PERF_CAPABILITIES, perf_cap.capabilities);
                if (!perf_cap.perf_metrics) {
                        x86_pmu.intel_cap.perf_metrics = 0;
                        x86_pmu.intel_ctrl &= ~(1ULL << GLOBAL_CTRL_EN_PERF_METRICS);
@@ -5627,7 +5627,7 @@ static bool check_msr(unsigned long msr, u64 mask)
 
        /*
         * Quirk only affects validation in wrmsr(), so wrmsrl()'s value
-        * should equal rdmsrl()'s even with the quirk.
+        * should equal rdmsrq()'s even with the quirk.
         */
        if (val_new != val_tmp)
                return false;
@@ -6642,7 +6642,7 @@ __init int intel_pmu_init(void)
        if (boot_cpu_has(X86_FEATURE_PDCM)) {
                u64 capabilities;
 
-               rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
+               rdmsrq(MSR_IA32_PERF_CAPABILITIES, capabilities);
                x86_pmu.intel_cap.capabilities = capabilities;
        }
 
index ae4ec16156bb0f63869bc44c00df5b79f54512a9..56b1c391ccc731e0526edeb4ab556ca0c1f9c061 100644 (file)
@@ -320,7 +320,7 @@ static inline u64 cstate_pmu_read_counter(struct perf_event *event)
 {
        u64 val;
 
-       rdmsrl(event->hw.event_base, val);
+       rdmsrq(event->hw.event_base, val);
        return val;
 }
 
index 034a1f6a457c6985e44efd2a800db85cc562555b..0ee32a0b5bfdebd9781e9c8e8c49ce01f9575945 100644 (file)
@@ -159,7 +159,7 @@ static void knc_pmu_disable_all(void)
 {
        u64 val;
 
-       rdmsrl(MSR_KNC_IA32_PERF_GLOBAL_CTRL, val);
+       rdmsrq(MSR_KNC_IA32_PERF_GLOBAL_CTRL, val);
        val &= ~(KNC_ENABLE_COUNTER0|KNC_ENABLE_COUNTER1);
        wrmsrl(MSR_KNC_IA32_PERF_GLOBAL_CTRL, val);
 }
@@ -168,7 +168,7 @@ static void knc_pmu_enable_all(int added)
 {
        u64 val;
 
-       rdmsrl(MSR_KNC_IA32_PERF_GLOBAL_CTRL, val);
+       rdmsrq(MSR_KNC_IA32_PERF_GLOBAL_CTRL, val);
        val |= (KNC_ENABLE_COUNTER0|KNC_ENABLE_COUNTER1);
        wrmsrl(MSR_KNC_IA32_PERF_GLOBAL_CTRL, val);
 }
@@ -200,7 +200,7 @@ static inline u64 knc_pmu_get_status(void)
 {
        u64 status;
 
-       rdmsrl(MSR_KNC_IA32_PERF_GLOBAL_STATUS, status);
+       rdmsrq(MSR_KNC_IA32_PERF_GLOBAL_STATUS, status);
 
        return status;
 }
index f44c3d866f248ca7a91d19e12babbc4bcc0024ce..2b33aacb598385770119ca2c99c1c0c88ad01d50 100644 (file)
@@ -139,7 +139,7 @@ static void __intel_pmu_lbr_enable(bool pmi)
        if (!static_cpu_has(X86_FEATURE_ARCH_LBR) && !pmi && cpuc->lbr_sel)
                wrmsrl(MSR_LBR_SELECT, lbr_select);
 
-       rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
+       rdmsrq(MSR_IA32_DEBUGCTLMSR, debugctl);
        orig_debugctl = debugctl;
 
        if (!static_cpu_has(X86_FEATURE_ARCH_LBR))
@@ -209,7 +209,7 @@ static inline u64 intel_pmu_lbr_tos(void)
 {
        u64 tos;
 
-       rdmsrl(x86_pmu.lbr_tos, tos);
+       rdmsrq(x86_pmu.lbr_tos, tos);
        return tos;
 }
 
@@ -302,7 +302,7 @@ static __always_inline u64 rdlbr_from(unsigned int idx, struct lbr_entry *lbr)
        if (lbr)
                return lbr->from;
 
-       rdmsrl(x86_pmu.lbr_from + idx, val);
+       rdmsrq(x86_pmu.lbr_from + idx, val);
 
        return lbr_from_signext_quirk_rd(val);
 }
@@ -314,7 +314,7 @@ static __always_inline u64 rdlbr_to(unsigned int idx, struct lbr_entry *lbr)
        if (lbr)
                return lbr->to;
 
-       rdmsrl(x86_pmu.lbr_to + idx, val);
+       rdmsrq(x86_pmu.lbr_to + idx, val);
 
        return val;
 }
@@ -326,7 +326,7 @@ static __always_inline u64 rdlbr_info(unsigned int idx, struct lbr_entry *lbr)
        if (lbr)
                return lbr->info;
 
-       rdmsrl(x86_pmu.lbr_info + idx, val);
+       rdmsrq(x86_pmu.lbr_info + idx, val);
 
        return val;
 }
@@ -475,7 +475,7 @@ void intel_pmu_lbr_save(void *ctx)
        task_ctx->tos = tos;
 
        if (cpuc->lbr_select)
-               rdmsrl(MSR_LBR_SELECT, task_ctx->lbr_sel);
+               rdmsrq(MSR_LBR_SELECT, task_ctx->lbr_sel);
 }
 
 static void intel_pmu_arch_lbr_save(void *ctx)
@@ -752,7 +752,7 @@ void intel_pmu_lbr_read_32(struct cpu_hw_events *cpuc)
                        u64     lbr;
                } msr_lastbranch;
 
-               rdmsrl(x86_pmu.lbr_from + lbr_idx, msr_lastbranch.lbr);
+               rdmsrq(x86_pmu.lbr_from + lbr_idx, msr_lastbranch.lbr);
 
                perf_clear_branch_entry_bitfields(br);
 
index c85a9fc4435545c5cba1fe5d272d36e36f61685f..a0eb63b6c85986b8224450fd31042d6f057c32a8 100644 (file)
@@ -859,7 +859,7 @@ static inline int p4_pmu_clear_cccr_ovf(struct hw_perf_event *hwc)
        u64 v;
 
        /* an official way for overflow indication */
-       rdmsrl(hwc->config_base, v);
+       rdmsrq(hwc->config_base, v);
        if (v & P4_CCCR_OVF) {
                wrmsrl(hwc->config_base, v & ~P4_CCCR_OVF);
                return 1;
@@ -872,7 +872,7 @@ static inline int p4_pmu_clear_cccr_ovf(struct hw_perf_event *hwc)
         * the counter has reached zero value and continued counting before
         * real NMI signal was received:
         */
-       rdmsrl(hwc->event_base, v);
+       rdmsrq(hwc->event_base, v);
        if (!(v & ARCH_P4_UNFLAGGED_BIT))
                return 1;
 
index 65b45e9d7016ac335e359de1b8a60bba30b24e9f..701f8a8ab0bac6edcc36a68c25f667fe27c8767c 100644 (file)
@@ -142,7 +142,7 @@ static void p6_pmu_disable_all(void)
        u64 val;
 
        /* p6 only has one enable register */
-       rdmsrl(MSR_P6_EVNTSEL0, val);
+       rdmsrq(MSR_P6_EVNTSEL0, val);
        val &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
        wrmsrl(MSR_P6_EVNTSEL0, val);
 }
@@ -152,7 +152,7 @@ static void p6_pmu_enable_all(int added)
        unsigned long val;
 
        /* p6 only has one enable register */
-       rdmsrl(MSR_P6_EVNTSEL0, val);
+       rdmsrq(MSR_P6_EVNTSEL0, val);
        val |= ARCH_PERFMON_EVENTSEL_ENABLE;
        wrmsrl(MSR_P6_EVNTSEL0, val);
 }
index fa37565f6418632c18cd21eb3f7428e86ee72f15..b8212d8d017411cb388e4c26769690ba46df0582 100644 (file)
@@ -194,7 +194,7 @@ static int __init pt_pmu_hw_init(void)
        int ret;
        long i;
 
-       rdmsrl(MSR_PLATFORM_INFO, reg);
+       rdmsrq(MSR_PLATFORM_INFO, reg);
        pt_pmu.max_nonturbo_ratio = (reg & 0xff00) >> 8;
 
        /*
@@ -230,7 +230,7 @@ static int __init pt_pmu_hw_init(void)
                 * "IA32_VMX_MISC[bit 14]" being 1 means PT can trace
                 * post-VMXON.
                 */
-               rdmsrl(MSR_IA32_VMX_MISC, reg);
+               rdmsrq(MSR_IA32_VMX_MISC, reg);
                if (reg & BIT(14))
                        pt_pmu.vmx = true;
        }
@@ -926,7 +926,7 @@ static void pt_handle_status(struct pt *pt)
        int advance = 0;
        u64 status;
 
-       rdmsrl(MSR_IA32_RTIT_STATUS, status);
+       rdmsrq(MSR_IA32_RTIT_STATUS, status);
 
        if (status & RTIT_STATUS_ERROR) {
                pr_err_ratelimited("ToPA ERROR encountered, trying to recover\n");
@@ -985,12 +985,12 @@ static void pt_read_offset(struct pt_buffer *buf)
        struct topa_page *tp;
 
        if (!buf->single) {
-               rdmsrl(MSR_IA32_RTIT_OUTPUT_BASE, pt->output_base);
+               rdmsrq(MSR_IA32_RTIT_OUTPUT_BASE, pt->output_base);
                tp = phys_to_virt(pt->output_base);
                buf->cur = &tp->topa;
        }
 
-       rdmsrl(MSR_IA32_RTIT_OUTPUT_MASK, pt->output_mask);
+       rdmsrq(MSR_IA32_RTIT_OUTPUT_MASK, pt->output_mask);
        /* offset within current output region */
        buf->output_off = pt->output_mask >> 32;
        /* index of current output region within this table */
@@ -1611,7 +1611,7 @@ static void pt_event_start(struct perf_event *event, int mode)
                         * PMI might have just cleared these, so resume_allowed
                         * must be checked again also.
                         */
-                       rdmsrl(MSR_IA32_RTIT_STATUS, status);
+                       rdmsrq(MSR_IA32_RTIT_STATUS, status);
                        if (!(status & (RTIT_STATUS_TRIGGEREN |
                                        RTIT_STATUS_ERROR |
                                        RTIT_STATUS_STOPPED)) &&
index a34e50fc4a8fc723111e1ba065c74de8d8fbd2d1..d6070529c58e752d0ac81ad620e8e89596b84662 100644 (file)
@@ -150,7 +150,7 @@ u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *eve
 {
        u64 count;
 
-       rdmsrl(event->hw.event_base, count);
+       rdmsrq(event->hw.event_base, count);
 
        return count;
 }
index 466833478e81fb6ae624d980b9d9bbe581711a20..e6fccad9708df764dcf4e5320fe5f1a7ed961e77 100644 (file)
@@ -214,7 +214,7 @@ static void nhmex_uncore_msr_disable_box(struct intel_uncore_box *box)
        u64 config;
 
        if (msr) {
-               rdmsrl(msr, config);
+               rdmsrq(msr, config);
                config &= ~((1ULL << uncore_num_counters(box)) - 1);
                /* WBox has a fixed counter */
                if (uncore_msr_fixed_ctl(box))
@@ -229,7 +229,7 @@ static void nhmex_uncore_msr_enable_box(struct intel_uncore_box *box)
        u64 config;
 
        if (msr) {
-               rdmsrl(msr, config);
+               rdmsrq(msr, config);
                config |= (1ULL << uncore_num_counters(box)) - 1;
                /* WBox has a fixed counter */
                if (uncore_msr_fixed_ctl(box))
index edb7fd50efe021ce5e065274b70578514c01e653..66dadf58799b05ea9ed888eb67d9abcc573b7676 100644 (file)
@@ -504,7 +504,7 @@ static int icl_get_cbox_num(void)
 {
        u64 num_boxes;
 
-       rdmsrl(ICL_UNC_CBO_CONFIG, num_boxes);
+       rdmsrq(ICL_UNC_CBO_CONFIG, num_boxes);
 
        return num_boxes & ICL_UNC_NUM_CBO_MASK;
 }
index 60973c209c0e641cd133963f347ffcd1460ba4d0..e498485e0ae9acc4365d801921494f66d3b5cbd2 100644 (file)
@@ -618,7 +618,7 @@ static void snbep_uncore_msr_disable_box(struct intel_uncore_box *box)
 
        msr = uncore_msr_box_ctl(box);
        if (msr) {
-               rdmsrl(msr, config);
+               rdmsrq(msr, config);
                config |= SNBEP_PMON_BOX_CTL_FRZ;
                wrmsrl(msr, config);
        }
@@ -631,7 +631,7 @@ static void snbep_uncore_msr_enable_box(struct intel_uncore_box *box)
 
        msr = uncore_msr_box_ctl(box);
        if (msr) {
-               rdmsrl(msr, config);
+               rdmsrq(msr, config);
                config &= ~SNBEP_PMON_BOX_CTL_FRZ;
                wrmsrl(msr, config);
        }
@@ -6572,7 +6572,7 @@ void spr_uncore_cpu_init(void)
                 * of UNCORE_SPR_CHA) is incorrect on some SPR variants because of a
                 * firmware bug. Using the value from SPR_MSR_UNC_CBO_CONFIG to replace it.
                 */
-               rdmsrl(SPR_MSR_UNC_CBO_CONFIG, num_cbo);
+               rdmsrq(SPR_MSR_UNC_CBO_CONFIG, num_cbo);
                /*
                 * The MSR doesn't work on the EMR XCC, but the firmware bug doesn't impact
                 * the EMR XCC. Don't let the value from the MSR replace the existing value.
index 45b1866ff0513d2920c5371bbdb206e12a0ac2ed..8970ecef87c535ee06adb0e7a0a251356edff007 100644 (file)
@@ -231,7 +231,7 @@ static inline u64 msr_read_counter(struct perf_event *event)
        u64 now;
 
        if (event->hw.event_base)
-               rdmsrl(event->hw.event_base, now);
+               rdmsrq(event->hw.event_base, now);
        else
                now = rdtsc_ordered();
 
index 2c0ce0e9545e50ddfa217cf9e9a2dac48d30ae6e..34b6d35e76e8c4db25a061ae4872e4db6ec5913f 100644 (file)
@@ -1394,11 +1394,11 @@ static __always_inline void __amd_pmu_lbr_disable(void)
 {
        u64 dbg_ctl, dbg_extn_cfg;
 
-       rdmsrl(MSR_AMD_DBG_EXTN_CFG, dbg_extn_cfg);
+       rdmsrq(MSR_AMD_DBG_EXTN_CFG, dbg_extn_cfg);
        wrmsrl(MSR_AMD_DBG_EXTN_CFG, dbg_extn_cfg & ~DBG_EXTN_CFG_LBRV2EN);
 
        if (cpu_feature_enabled(X86_FEATURE_AMD_LBR_PMC_FREEZE)) {
-               rdmsrl(MSR_IA32_DEBUGCTLMSR, dbg_ctl);
+               rdmsrq(MSR_IA32_DEBUGCTLMSR, dbg_ctl);
                wrmsrl(MSR_IA32_DEBUGCTLMSR, dbg_ctl & ~DEBUGCTLMSR_FREEZE_LBRS_ON_PMI);
        }
 }
@@ -1543,7 +1543,7 @@ static __always_inline void __intel_pmu_lbr_disable(void)
 {
        u64 debugctl;
 
-       rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
+       rdmsrq(MSR_IA32_DEBUGCTLMSR, debugctl);
        debugctl &= ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI);
        wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
 }
index 8ddace8cea96c36643d54440d53b941e0571771a..b7c256a2e20186bf1501c2cc3bc2325974df2fbf 100644 (file)
@@ -192,7 +192,7 @@ static inline unsigned int get_rapl_pmu_idx(int cpu, int scope)
 static inline u64 rapl_read_counter(struct perf_event *event)
 {
        u64 raw;
-       rdmsrl(event->hw.event_base, raw);
+       rdmsrq(event->hw.event_base, raw);
        return raw;
 }
 
@@ -221,7 +221,7 @@ static u64 rapl_event_update(struct perf_event *event)
 
        prev_raw_count = local64_read(&hwc->prev_count);
        do {
-               rdmsrl(event->hw.event_base, new_raw_count);
+               rdmsrq(event->hw.event_base, new_raw_count);
        } while (!local64_try_cmpxchg(&hwc->prev_count,
                                      &prev_raw_count, new_raw_count));
 
@@ -610,7 +610,7 @@ static int rapl_check_hw_unit(void)
        u64 msr_rapl_power_unit_bits;
        int i;
 
-       /* protect rdmsrl() to handle virtualization */
+       /* protect rdmsrq() to handle virtualization */
        if (rdmsrl_safe(rapl_model->msr_power_unit, &msr_rapl_power_unit_bits))
                return -1;
        for (i = 0; i < NR_RAPL_PKG_DOMAINS; i++)
index 2fd9b0cf9a5e574fd6728088cf4b55ad8299f76f..67ff23f6bcbafa654f21ac044afdbdb79bdbad5b 100644 (file)
@@ -266,7 +266,7 @@ static inline u64 zhaoxin_pmu_get_status(void)
 {
        u64 status;
 
-       rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
+       rdmsrq(MSR_CORE_PERF_GLOBAL_STATUS, status);
 
        return status;
 }
@@ -293,7 +293,7 @@ static void zhaoxin_pmu_disable_fixed(struct hw_perf_event *hwc)
 
        mask = 0xfULL << (idx * 4);
 
-       rdmsrl(hwc->config_base, ctrl_val);
+       rdmsrq(hwc->config_base, ctrl_val);
        ctrl_val &= ~mask;
        wrmsrl(hwc->config_base, ctrl_val);
 }
@@ -329,7 +329,7 @@ static void zhaoxin_pmu_enable_fixed(struct hw_perf_event *hwc)
        bits <<= (idx * 4);
        mask = 0xfULL << (idx * 4);
 
-       rdmsrl(hwc->config_base, ctrl_val);
+       rdmsrq(hwc->config_base, ctrl_val);
        ctrl_val &= ~mask;
        ctrl_val |= bits;
        wrmsrl(hwc->config_base, ctrl_val);
index 6d91ac5f983606c6df4ca2cac0886271c6569f55..d1d6aa5fb17bea6c76cac3ef44e0df6d18b2fbc1 100644 (file)
@@ -37,7 +37,7 @@ static u64 hv_apic_icr_read(void)
 {
        u64 reg_val;
 
-       rdmsrl(HV_X64_MSR_ICR, reg_val);
+       rdmsrq(HV_X64_MSR_ICR, reg_val);
        return reg_val;
 }
 
index ddeb40930bc8027e06fe586eabb9d82b86dc54b8..7c58affa129b7ecb96b0d08d6e085db6b514446f 100644 (file)
@@ -62,7 +62,7 @@ static int hyperv_init_ghcb(void)
         * returned by MSR_AMD64_SEV_ES_GHCB is above shared
         * memory boundary and map it here.
         */
-       rdmsrl(MSR_AMD64_SEV_ES_GHCB, ghcb_gpa);
+       rdmsrq(MSR_AMD64_SEV_ES_GHCB, ghcb_gpa);
 
        /* Mask out vTOM bit. ioremap_cache() maps decrypted */
        ghcb_gpa &= ~ms_hyperv.shared_gpa_boundary;
@@ -95,7 +95,7 @@ static int hv_cpu_init(unsigned int cpu)
                 * For root partition we get the hypervisor provided VP assist
                 * page, instead of allocating a new page.
                 */
-               rdmsrl(HV_X64_MSR_VP_ASSIST_PAGE, msr.as_uint64);
+               rdmsrq(HV_X64_MSR_VP_ASSIST_PAGE, msr.as_uint64);
                *hvp = memremap(msr.pfn << HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_SHIFT,
                                PAGE_SIZE, MEMREMAP_WB);
        } else {
@@ -140,7 +140,7 @@ static void hv_reenlightenment_notify(struct work_struct *dummy)
 {
        struct hv_tsc_emulation_status emu_status;
 
-       rdmsrl(HV_X64_MSR_TSC_EMULATION_STATUS, *(u64 *)&emu_status);
+       rdmsrq(HV_X64_MSR_TSC_EMULATION_STATUS, *(u64 *)&emu_status);
 
        /* Don't issue the callback if TSC accesses are not emulated */
        if (hv_reenlightenment_cb && emu_status.inprogress)
@@ -153,11 +153,11 @@ void hyperv_stop_tsc_emulation(void)
        u64 freq;
        struct hv_tsc_emulation_status emu_status;
 
-       rdmsrl(HV_X64_MSR_TSC_EMULATION_STATUS, *(u64 *)&emu_status);
+       rdmsrq(HV_X64_MSR_TSC_EMULATION_STATUS, *(u64 *)&emu_status);
        emu_status.inprogress = 0;
        wrmsrl(HV_X64_MSR_TSC_EMULATION_STATUS, *(u64 *)&emu_status);
 
-       rdmsrl(HV_X64_MSR_TSC_FREQUENCY, freq);
+       rdmsrq(HV_X64_MSR_TSC_FREQUENCY, freq);
        tsc_khz = div64_u64(freq, 1000);
 }
 EXPORT_SYMBOL_GPL(hyperv_stop_tsc_emulation);
@@ -217,7 +217,7 @@ void clear_hv_tscchange_cb(void)
        if (!hv_reenlightenment_available())
                return;
 
-       rdmsrl(HV_X64_MSR_REENLIGHTENMENT_CONTROL, *(u64 *)&re_ctrl);
+       rdmsrq(HV_X64_MSR_REENLIGHTENMENT_CONTROL, *(u64 *)&re_ctrl);
        re_ctrl.enabled = 0;
        wrmsrl(HV_X64_MSR_REENLIGHTENMENT_CONTROL, *(u64 *)&re_ctrl);
 
@@ -251,7 +251,7 @@ static int hv_cpu_die(unsigned int cpu)
                         */
                        memunmap(hv_vp_assist_page[cpu]);
                        hv_vp_assist_page[cpu] = NULL;
-                       rdmsrl(HV_X64_MSR_VP_ASSIST_PAGE, msr.as_uint64);
+                       rdmsrq(HV_X64_MSR_VP_ASSIST_PAGE, msr.as_uint64);
                        msr.enable = 0;
                }
                wrmsrl(HV_X64_MSR_VP_ASSIST_PAGE, msr.as_uint64);
@@ -260,7 +260,7 @@ static int hv_cpu_die(unsigned int cpu)
        if (hv_reenlightenment_cb == NULL)
                return 0;
 
-       rdmsrl(HV_X64_MSR_REENLIGHTENMENT_CONTROL, *((u64 *)&re_ctrl));
+       rdmsrq(HV_X64_MSR_REENLIGHTENMENT_CONTROL, *((u64 *)&re_ctrl));
        if (re_ctrl.target_vp == hv_vp_index[cpu]) {
                /*
                 * Reassign reenlightenment notifications to some other online
@@ -331,7 +331,7 @@ static int hv_suspend(void)
        hv_hypercall_pg = NULL;
 
        /* Disable the hypercall page in the hypervisor */
-       rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
+       rdmsrq(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
        hypercall_msr.enable = 0;
        wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
 
@@ -348,7 +348,7 @@ static void hv_resume(void)
        WARN_ON(ret);
 
        /* Re-enable the hypercall page */
-       rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
+       rdmsrq(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
        hypercall_msr.enable = 1;
        hypercall_msr.guest_physical_address =
                vmalloc_to_pfn(hv_hypercall_pg_saved);
@@ -515,7 +515,7 @@ void __init hyperv_init(void)
        if (hv_hypercall_pg == NULL)
                goto clean_guest_os_id;
 
-       rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
+       rdmsrq(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
        hypercall_msr.enable = 1;
 
        if (hv_root_partition()) {
@@ -667,7 +667,7 @@ void hyperv_report_panic(struct pt_regs *regs, long err, bool in_die)
                return;
        panic_reported = true;
 
-       rdmsrl(HV_X64_MSR_GUEST_OS_ID, guest_id);
+       rdmsrq(HV_X64_MSR_GUEST_OS_ID, guest_id);
 
        wrmsrl(HV_X64_MSR_CRASH_P0, err);
        wrmsrl(HV_X64_MSR_CRASH_P1, guest_id);
@@ -701,7 +701,7 @@ bool hv_is_hyperv_initialized(void)
         * that the hypercall page is setup
         */
        hypercall_msr.as_uint64 = 0;
-       rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
+       rdmsrq(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
 
        return hypercall_msr.enable;
 }
index 151e851bef09dab2790d440da064046b10df2c14..626f6d4d625309292d90141e16394c19aa56cf21 100644 (file)
@@ -39,18 +39,18 @@ static void hv_qlock_wait(u8 *byte, u8 val)
         * To prevent a race against the unlock path it is required to
         * disable interrupts before accessing the HV_X64_MSR_GUEST_IDLE
         * MSR. Otherwise, if the IPI from hv_qlock_kick() arrives between
-        * the lock value check and the rdmsrl() then the vCPU might be put
+        * the lock value check and the rdmsrq() then the vCPU might be put
         * into 'idle' state by the hypervisor and kept in that state for
         * an unspecified amount of time.
         */
        local_irq_save(flags);
        /*
-        * Only issue the rdmsrl() when the lock state has not changed.
+        * Only issue the rdmsrq() when the lock state has not changed.
         */
        if (READ_ONCE(*byte) == val) {
                unsigned long msr_val;
 
-               rdmsrl(HV_X64_MSR_GUEST_IDLE, msr_val);
+               rdmsrq(HV_X64_MSR_GUEST_IDLE, msr_val);
 
                (void)msr_val;
        }
index c903d358405d389b808c6075cf6bfa6032710c2a..45819cfc043b4d419691950a723805e2c246021c 100644 (file)
@@ -224,7 +224,7 @@ static inline u32 native_apic_msr_read(u32 reg)
        if (reg == APIC_DFR)
                return -1;
 
-       rdmsrl(APIC_BASE_MSR + (reg >> 4), msr);
+       rdmsrq(APIC_BASE_MSR + (reg >> 4), msr);
        return (u32)msr;
 }
 
@@ -237,7 +237,7 @@ static inline u64 native_x2apic_icr_read(void)
 {
        unsigned long val;
 
-       rdmsrl(APIC_BASE_MSR + (APIC_ICR >> 4), val);
+       rdmsrq(APIC_BASE_MSR + (APIC_ICR >> 4), val);
        return val;
 }
 
index fdbbbfec745aa58855641ae59e17a3a50fc2696a..0e703c0ef192cdc392e92ab59e5c67180ab225e8 100644 (file)
@@ -169,7 +169,7 @@ static inline unsigned long get_debugctlmsr(void)
        if (boot_cpu_data.x86 < 6)
                return 0;
 #endif
-       rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr);
+       rdmsrq(MSR_IA32_DEBUGCTLMSR, debugctlmsr);
 
        return debugctlmsr;
 }
index 02f239569b93d3ec0b9a36280cf9684bc2c7be75..d7708f2fd68fedc994c59572105dfe1b5cb3f7d6 100644 (file)
@@ -60,7 +60,7 @@ static inline unsigned long x86_fsbase_read_cpu(void)
        if (boot_cpu_has(X86_FEATURE_FSGSBASE))
                fsbase = rdfsbase();
        else
-               rdmsrl(MSR_FS_BASE, fsbase);
+               rdmsrq(MSR_FS_BASE, fsbase);
 
        return fsbase;
 }
index a884ab544335e7753b160e3a8d41d2512f2cd2cc..9af20b3c0f1db3e2931a25f2e3cfb0e05a6d6864 100644 (file)
@@ -2272,7 +2272,7 @@ static inline unsigned long read_msr(unsigned long msr)
 {
        u64 value;
 
-       rdmsrl(msr, value);
+       rdmsrq(msr, value);
        return value;
 }
 #endif
index 2f5a661719a403ed8f44b44b04e71ba6f66a0fe1..b35513a81b240f387b7cb3724bdd071056f09f3d 100644 (file)
@@ -255,7 +255,7 @@ static inline void wrmsr(u32 msr, u32 low, u32 high)
        native_write_msr(msr, low, high);
 }
 
-#define rdmsrl(msr, val)                       \
+#define rdmsrq(msr, val)                       \
        ((val) = native_read_msr((msr)))
 
 static inline void wrmsrl(u32 msr, u64 val)
@@ -352,7 +352,7 @@ static inline int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
 }
 static inline int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
 {
-       rdmsrl(msr_no, *q);
+       rdmsrq(msr_no, *q);
        return 0;
 }
 static inline int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
index c270ca050781330412499af6d93888e3c630fbb4..1cda83bbd5b76cb54c76a913e809337743b44862 100644 (file)
@@ -209,7 +209,7 @@ do {                                                \
        paravirt_write_msr(msr, val1, val2);    \
 } while (0)
 
-#define rdmsrl(msr, val)                       \
+#define rdmsrq(msr, val)                       \
 do {                                           \
        val = paravirt_read_msr(msr);           \
 } while (0)
index 62584a34793115d6ce5238ea3ffc9074edab6c35..dc9af058e5a9bb1b96b0b8b693d1a0207983b394 100644 (file)
@@ -1694,7 +1694,7 @@ static bool x2apic_hw_locked(void)
 
        x86_arch_cap_msr = x86_read_arch_cap_msr();
        if (x86_arch_cap_msr & ARCH_CAP_XAPIC_DISABLE) {
-               rdmsrl(MSR_IA32_XAPIC_DISABLE_STATUS, msr);
+               rdmsrq(MSR_IA32_XAPIC_DISABLE_STATUS, msr);
                return (msr & LEGACY_XAPIC_DISABLED);
        }
        return false;
@@ -1707,7 +1707,7 @@ static void __x2apic_disable(void)
        if (!boot_cpu_has(X86_FEATURE_APIC))
                return;
 
-       rdmsrl(MSR_IA32_APICBASE, msr);
+       rdmsrq(MSR_IA32_APICBASE, msr);
        if (!(msr & X2APIC_ENABLE))
                return;
        /* Disable xapic and x2apic first and then reenable xapic mode */
@@ -1720,7 +1720,7 @@ static void __x2apic_enable(void)
 {
        u64 msr;
 
-       rdmsrl(MSR_IA32_APICBASE, msr);
+       rdmsrq(MSR_IA32_APICBASE, msr);
        if (msr & X2APIC_ENABLE)
                return;
        wrmsrl(MSR_IA32_APICBASE, msr | X2APIC_ENABLE);
index 16410f087b7af4f8390ee4973d9ee4bbd5679827..fcfef701c17a2dc7adfea97d5dd7801cad67d561 100644 (file)
@@ -31,7 +31,7 @@ static u32 numachip1_get_apic_id(u32 x)
        unsigned int id = (x >> 24) & 0xff;
 
        if (static_cpu_has(X86_FEATURE_NODEID_MSR)) {
-               rdmsrl(MSR_FAM10H_NODE_ID, value);
+               rdmsrq(MSR_FAM10H_NODE_ID, value);
                id |= (value << 2) & 0xff00;
        }
 
@@ -42,7 +42,7 @@ static u32 numachip2_get_apic_id(u32 x)
 {
        u64 mcfg;
 
-       rdmsrl(MSR_FAM10H_MMIO_CONF_BASE, mcfg);
+       rdmsrq(MSR_FAM10H_MMIO_CONF_BASE, mcfg);
        return ((mcfg >> (28 - 8)) & 0xfff00) | (x >> 24);
 }
 
@@ -150,7 +150,7 @@ static void fixup_cpu_id(struct cpuinfo_x86 *c, int node)
 
        /* Account for nodes per socket in multi-core-module processors */
        if (boot_cpu_has(X86_FEATURE_NODEID_MSR)) {
-               rdmsrl(MSR_FAM10H_NODE_ID, val);
+               rdmsrq(MSR_FAM10H_NODE_ID, val);
                nodes = ((val >> 3) & 7) + 1;
        }
 
index 303bf74d175b30c7045974386b945fed3fec8cff..d897aadd1d44a94ae8118850d12748fad1f5a9e3 100644 (file)
@@ -55,7 +55,7 @@ static void do_user_cp_fault(struct pt_regs *regs, unsigned long error_code)
         * will be whatever is live in userspace. So read the SSP before enabling
         * interrupts so locking the fpregs to do it later is not required.
         */
-       rdmsrl(MSR_IA32_PL3_SSP, ssp);
+       rdmsrq(MSR_IA32_PL3_SSP, ssp);
 
        cond_local_irq_enable(regs);
 
index b2ab2369c9cb4e71e813a558bc814834af18f86f..7c7eca7451b6b79f8bdcefaff37885908b0748a8 100644 (file)
@@ -383,7 +383,7 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
                    (c->x86 == 0x10 && c->x86_model >= 0x2)) {
                        u64 val;
 
-                       rdmsrl(MSR_K7_HWCR, val);
+                       rdmsrq(MSR_K7_HWCR, val);
                        if (!(val & BIT(24)))
                                pr_warn(FW_BUG "TSC doesn't count with P0 frequency!\n");
                }
@@ -508,7 +508,7 @@ static void early_detect_mem_encrypt(struct cpuinfo_x86 *c)
         */
        if (cpu_has(c, X86_FEATURE_SME) || cpu_has(c, X86_FEATURE_SEV)) {
                /* Check if memory encryption is enabled */
-               rdmsrl(MSR_AMD64_SYSCFG, msr);
+               rdmsrq(MSR_AMD64_SYSCFG, msr);
                if (!(msr & MSR_AMD64_SYSCFG_MEM_ENCRYPT))
                        goto clear_all;
 
@@ -525,7 +525,7 @@ static void early_detect_mem_encrypt(struct cpuinfo_x86 *c)
                if (!sme_me_mask)
                        setup_clear_cpu_cap(X86_FEATURE_SME);
 
-               rdmsrl(MSR_K7_HWCR, msr);
+               rdmsrq(MSR_K7_HWCR, msr);
                if (!(msr & MSR_K7_HWCR_SMMLOCK))
                        goto clear_sev;
 
@@ -1014,7 +1014,7 @@ static void init_amd(struct cpuinfo_x86 *c)
        init_amd_cacheinfo(c);
 
        if (cpu_has(c, X86_FEATURE_SVM)) {
-               rdmsrl(MSR_VM_CR, vm_cr);
+               rdmsrq(MSR_VM_CR, vm_cr);
                if (vm_cr & SVM_VM_CR_SVM_DIS_MASK) {
                        pr_notice_once("SVM disabled (by BIOS) in MSR_VM_CR\n");
                        clear_cpu_cap(c, X86_FEATURE_SVM);
index 6cf31a1649c4b96a5fa20816e980c708b342c799..dca78650d99e2647eb92753a5212b3339259bd65 100644 (file)
@@ -40,8 +40,8 @@ static void init_counter_refs(void)
 {
        u64 aperf, mperf;
 
-       rdmsrl(MSR_IA32_APERF, aperf);
-       rdmsrl(MSR_IA32_MPERF, mperf);
+       rdmsrq(MSR_IA32_APERF, aperf);
+       rdmsrq(MSR_IA32_MPERF, mperf);
 
        this_cpu_write(cpu_samples.aperf, aperf);
        this_cpu_write(cpu_samples.mperf, mperf);
@@ -474,8 +474,8 @@ void arch_scale_freq_tick(void)
        if (!cpu_feature_enabled(X86_FEATURE_APERFMPERF))
                return;
 
-       rdmsrl(MSR_IA32_APERF, aperf);
-       rdmsrl(MSR_IA32_MPERF, mperf);
+       rdmsrq(MSR_IA32_APERF, aperf);
+       rdmsrq(MSR_IA32_MPERF, mperf);
        acnt = aperf - s->aperf;
        mcnt = mperf - s->mperf;
 
index 4386aa6c69e12c9a8d66758e9f7cfff816ccbbe3..78ffe0ebdff250d4f68b2c8c813f9b39b7b30162 100644 (file)
@@ -140,7 +140,7 @@ void __init cpu_select_mitigations(void)
         * init code as it is not enumerated and depends on the family.
         */
        if (cpu_feature_enabled(X86_FEATURE_MSR_SPEC_CTRL)) {
-               rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
+               rdmsrq(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
 
                /*
                 * Previously running kernel (kexec), may have some controls
@@ -656,7 +656,7 @@ void update_srbds_msr(void)
        if (!boot_cpu_has(X86_FEATURE_SRBDS_CTRL))
                return;
 
-       rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
+       rdmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
 
        switch (srbds_mitigation) {
        case SRBDS_MITIGATION_OFF:
@@ -776,7 +776,7 @@ void update_gds_msr(void)
 
        switch (gds_mitigation) {
        case GDS_MITIGATION_OFF:
-               rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
+               rdmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
                mcu_ctrl |= GDS_MITG_DIS;
                break;
        case GDS_MITIGATION_FULL_LOCKED:
@@ -786,7 +786,7 @@ void update_gds_msr(void)
                 * CPUs.
                 */
        case GDS_MITIGATION_FULL:
-               rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
+               rdmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
                mcu_ctrl &= ~GDS_MITG_DIS;
                break;
        case GDS_MITIGATION_FORCE:
@@ -802,7 +802,7 @@ void update_gds_msr(void)
         * GDS_MITG_DIS will be ignored if this processor is locked but the boot
         * processor was not.
         */
-       rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl_after);
+       rdmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl_after);
        WARN_ON_ONCE(mcu_ctrl != mcu_ctrl_after);
 }
 
@@ -841,7 +841,7 @@ static void __init gds_select_mitigation(void)
        if (gds_mitigation == GDS_MITIGATION_FORCE)
                gds_mitigation = GDS_MITIGATION_FULL;
 
-       rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
+       rdmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
        if (mcu_ctrl & GDS_MITG_LOCKED) {
                if (gds_mitigation == GDS_MITIGATION_OFF)
                        pr_warn("Mitigation locked. Disable failed.\n");
index 237faf7e700cb4a97a59a94374367c3e9390577b..725072b8281603fbbcaab6a158fc95e4c36b6ba3 100644 (file)
@@ -103,7 +103,7 @@ static bool split_lock_verify_msr(bool on)
                ctrl &= ~MSR_TEST_CTRL_SPLIT_LOCK_DETECT;
        if (wrmsrl_safe(MSR_TEST_CTRL, ctrl))
                return false;
-       rdmsrl(MSR_TEST_CTRL, tmp);
+       rdmsrq(MSR_TEST_CTRL, tmp);
        return ctrl == tmp;
 }
 
@@ -137,7 +137,7 @@ static void __init __split_lock_setup(void)
                return;
        }
 
-       rdmsrl(MSR_TEST_CTRL, msr_test_ctrl_cache);
+       rdmsrq(MSR_TEST_CTRL, msr_test_ctrl_cache);
 
        if (!split_lock_verify_msr(true)) {
                pr_info("MSR access failed: Disabled\n");
@@ -297,7 +297,7 @@ void bus_lock_init(void)
        if (!boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT))
                return;
 
-       rdmsrl(MSR_IA32_DEBUGCTLMSR, val);
+       rdmsrq(MSR_IA32_DEBUGCTLMSR, val);
 
        if ((boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT) &&
            (sld_state == sld_warn || sld_state == sld_fatal)) ||
@@ -375,7 +375,7 @@ static void __init split_lock_setup(struct cpuinfo_x86 *c)
         * MSR_IA32_CORE_CAPS_SPLIT_LOCK_DETECT is.  All CPUs that set
         * it have split lock detection.
         */
-       rdmsrl(MSR_IA32_CORE_CAPS, ia32_core_caps);
+       rdmsrq(MSR_IA32_CORE_CAPS, ia32_core_caps);
        if (ia32_core_caps & MSR_IA32_CORE_CAPS_SPLIT_LOCK_DETECT)
                goto supported;
 
index 12126adbc3a9a7e2b767e024ececd3940098f3e8..c3c0ba239828c51690aa5920ac1ff701eee46bf1 100644 (file)
@@ -562,7 +562,7 @@ __noendbr u64 ibt_save(bool disable)
        u64 msr = 0;
 
        if (cpu_feature_enabled(X86_FEATURE_IBT)) {
-               rdmsrl(MSR_IA32_S_CET, msr);
+               rdmsrq(MSR_IA32_S_CET, msr);
                if (disable)
                        wrmsrl(MSR_IA32_S_CET, msr & ~CET_ENDBR_EN);
        }
@@ -575,7 +575,7 @@ __noendbr void ibt_restore(u64 save)
        u64 msr;
 
        if (cpu_feature_enabled(X86_FEATURE_IBT)) {
-               rdmsrl(MSR_IA32_S_CET, msr);
+               rdmsrq(MSR_IA32_S_CET, msr);
                msr &= ~CET_ENDBR_EN;
                msr |= (save & CET_ENDBR_EN);
                wrmsrl(MSR_IA32_S_CET, msr);
@@ -1288,7 +1288,7 @@ u64 x86_read_arch_cap_msr(void)
        u64 x86_arch_cap_msr = 0;
 
        if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES))
-               rdmsrl(MSR_IA32_ARCH_CAPABILITIES, x86_arch_cap_msr);
+               rdmsrq(MSR_IA32_ARCH_CAPABILITIES, x86_arch_cap_msr);
 
        return x86_arch_cap_msr;
 }
@@ -1749,10 +1749,10 @@ static bool detect_null_seg_behavior(void)
         */
 
        unsigned long old_base, tmp;
-       rdmsrl(MSR_FS_BASE, old_base);
+       rdmsrq(MSR_FS_BASE, old_base);
        wrmsrl(MSR_FS_BASE, 1);
        loadsegment(fs, 0);
-       rdmsrl(MSR_FS_BASE, tmp);
+       rdmsrq(MSR_FS_BASE, tmp);
        wrmsrl(MSR_FS_BASE, old_base);
        return tmp == 0;
 }
index 6af4a4a90a521f2c0d3b5d7eca328d95cd82e8af..10eeda3ba6ae032d10a6fbb8a2775d0f882902c3 100644 (file)
@@ -96,7 +96,7 @@ static void bsp_init_hygon(struct cpuinfo_x86 *c)
        if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
                u64 val;
 
-               rdmsrl(MSR_K7_HWCR, val);
+               rdmsrq(MSR_K7_HWCR, val);
                if (!(val & BIT(24)))
                        pr_warn(FW_BUG "TSC doesn't count with P0 frequency!\n");
        }
@@ -194,7 +194,7 @@ static void init_hygon(struct cpuinfo_x86 *c)
        init_hygon_cacheinfo(c);
 
        if (cpu_has(c, X86_FEATURE_SVM)) {
-               rdmsrl(MSR_VM_CR, vm_cr);
+               rdmsrq(MSR_VM_CR, vm_cr);
                if (vm_cr & SVM_VM_CR_SVM_DIS_MASK) {
                        pr_notice_once("SVM disabled (by BIOS) in MSR_VM_CR\n");
                        clear_cpu_cap(c, X86_FEATURE_SVM);
index cdc9813871ef144b091eeb0616b7851ba4ff3ecb..e9a6e0d00819b4cb1ec3b9b6a745d3b5c6c5e824 100644 (file)
@@ -157,7 +157,7 @@ static void detect_tme_early(struct cpuinfo_x86 *c)
        u64 tme_activate;
        int keyid_bits;
 
-       rdmsrl(MSR_IA32_TME_ACTIVATE, tme_activate);
+       rdmsrq(MSR_IA32_TME_ACTIVATE, tme_activate);
 
        if (!TME_ACTIVATE_LOCKED(tme_activate) || !TME_ACTIVATE_ENABLED(tme_activate)) {
                pr_info_once("x86/tme: not enabled by BIOS\n");
@@ -299,7 +299,7 @@ static void early_init_intel(struct cpuinfo_x86 *c)
         * string flag and enhanced fast string capabilities accordingly.
         */
        if (c->x86_vfm >= INTEL_PENTIUM_M_DOTHAN) {
-               rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
+               rdmsrq(MSR_IA32_MISC_ENABLE, misc_enable);
                if (misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING) {
                        /* X86_FEATURE_ERMS is set based on CPUID */
                        set_cpu_cap(c, X86_FEATURE_REP_GOOD);
index 30b1d63b97f3a1b3463cd16f1dc90286f6d24cc4..6e6af07fc61f4740b3ddc9270cd3613094629a2c 100644 (file)
@@ -79,7 +79,7 @@ static int intel_epb_save(void)
 {
        u64 epb;
 
-       rdmsrl(MSR_IA32_ENERGY_PERF_BIAS, epb);
+       rdmsrq(MSR_IA32_ENERGY_PERF_BIAS, epb);
        /*
         * Ensure that saved_epb will always be nonzero after this write even if
         * the EPB value read from the MSR is 0.
@@ -94,7 +94,7 @@ static void intel_epb_restore(void)
        u64 val = this_cpu_read(saved_epb);
        u64 epb;
 
-       rdmsrl(MSR_IA32_ENERGY_PERF_BIAS, epb);
+       rdmsrq(MSR_IA32_ENERGY_PERF_BIAS, epb);
        if (val) {
                val &= EPB_MASK;
        } else {
index 1075a90141daedc3d259882678c8cbe0ebb66e04..c29165f41bb6c3843a8171ce6438aab191758d29 100644 (file)
@@ -662,7 +662,7 @@ static void disable_err_thresholding(struct cpuinfo_x86 *c, unsigned int bank)
                return;
        }
 
-       rdmsrl(MSR_K7_HWCR, hwcr);
+       rdmsrq(MSR_K7_HWCR, hwcr);
 
        /* McStatusWrEn has to be set */
        need_toggle = !(hwcr & BIT(18));
@@ -805,12 +805,12 @@ static void __log_error(unsigned int bank, u64 status, u64 addr, u64 misc)
        }
 
        if (mce_flags.smca) {
-               rdmsrl(MSR_AMD64_SMCA_MCx_IPID(bank), m->ipid);
+               rdmsrq(MSR_AMD64_SMCA_MCx_IPID(bank), m->ipid);
 
                if (m->status & MCI_STATUS_SYNDV) {
-                       rdmsrl(MSR_AMD64_SMCA_MCx_SYND(bank), m->synd);
-                       rdmsrl(MSR_AMD64_SMCA_MCx_SYND1(bank), err.vendor.amd.synd1);
-                       rdmsrl(MSR_AMD64_SMCA_MCx_SYND2(bank), err.vendor.amd.synd2);
+                       rdmsrq(MSR_AMD64_SMCA_MCx_SYND(bank), m->synd);
+                       rdmsrq(MSR_AMD64_SMCA_MCx_SYND1(bank), err.vendor.amd.synd1);
+                       rdmsrq(MSR_AMD64_SMCA_MCx_SYND2(bank), err.vendor.amd.synd2);
                }
        }
 
@@ -834,12 +834,12 @@ _log_error_bank(unsigned int bank, u32 msr_stat, u32 msr_addr, u64 misc)
 {
        u64 status, addr = 0;
 
-       rdmsrl(msr_stat, status);
+       rdmsrq(msr_stat, status);
        if (!(status & MCI_STATUS_VAL))
                return false;
 
        if (status & MCI_STATUS_ADDRV)
-               rdmsrl(msr_addr, addr);
+               rdmsrq(msr_addr, addr);
 
        __log_error(bank, status, addr, misc);
 
index f6fd71b64b6638f077a30756e74444b577bf3f7b..a71228d9a4f41806a516ba532b9fcb122ddfbc0a 100644 (file)
@@ -1822,7 +1822,7 @@ static void __mcheck_cpu_cap_init(void)
        u64 cap;
        u8 b;
 
-       rdmsrl(MSR_IA32_MCG_CAP, cap);
+       rdmsrq(MSR_IA32_MCG_CAP, cap);
 
        b = cap & MCG_BANKCNT_MASK;
 
@@ -1863,7 +1863,7 @@ static void __mcheck_cpu_init_generic(void)
 
        cr4_set_bits(X86_CR4_MCE);
 
-       rdmsrl(MSR_IA32_MCG_CAP, cap);
+       rdmsrq(MSR_IA32_MCG_CAP, cap);
        if (cap & MCG_CTL_P)
                wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff);
 }
@@ -1905,7 +1905,7 @@ static void __mcheck_cpu_check_banks(void)
                if (!b->init)
                        continue;
 
-               rdmsrl(mca_msr_reg(i, MCA_CTL), msrval);
+               rdmsrq(mca_msr_reg(i, MCA_CTL), msrval);
                b->init = !!msrval;
        }
 }
index 06e3cf7229ce919f774ccd031a5d81f885370000..08d52af7a8ada515159441bf1b34fc689229c9c6 100644 (file)
@@ -741,7 +741,7 @@ static void check_hw_inj_possible(void)
                u64 status = MCI_STATUS_VAL, ipid;
 
                /* Check whether bank is populated */
-               rdmsrl(MSR_AMD64_SMCA_MCx_IPID(bank), ipid);
+               rdmsrq(MSR_AMD64_SMCA_MCx_IPID(bank), ipid);
                if (!ipid)
                        continue;
 
index f863df0ff42ce87a9e9a08149bf73380ffd30ed8..9fda956d4e4833ee859557d0cadca3afc49c8944 100644 (file)
@@ -94,7 +94,7 @@ static bool cmci_supported(int *banks)
        if (!boot_cpu_has(X86_FEATURE_APIC) || lapic_get_maxlvt() < 6)
                return false;
 
-       rdmsrl(MSR_IA32_MCG_CAP, cap);
+       rdmsrq(MSR_IA32_MCG_CAP, cap);
        *banks = min_t(unsigned, MAX_NR_BANKS, cap & MCG_BANKCNT_MASK);
        return !!(cap & MCG_CMCI_P);
 }
@@ -106,7 +106,7 @@ static bool lmce_supported(void)
        if (mca_cfg.lmce_disabled)
                return false;
 
-       rdmsrl(MSR_IA32_MCG_CAP, tmp);
+       rdmsrq(MSR_IA32_MCG_CAP, tmp);
 
        /*
         * LMCE depends on recovery support in the processor. Hence both
@@ -123,7 +123,7 @@ static bool lmce_supported(void)
         * WARN if the MSR isn't locked as init_ia32_feat_ctl() unconditionally
         * locks the MSR in the event that it wasn't already locked by BIOS.
         */
-       rdmsrl(MSR_IA32_FEAT_CTL, tmp);
+       rdmsrq(MSR_IA32_FEAT_CTL, tmp);
        if (WARN_ON_ONCE(!(tmp & FEAT_CTL_LOCKED)))
                return false;
 
@@ -141,7 +141,7 @@ static void cmci_set_threshold(int bank, int thresh)
        u64 val;
 
        raw_spin_lock_irqsave(&cmci_discover_lock, flags);
-       rdmsrl(MSR_IA32_MCx_CTL2(bank), val);
+       rdmsrq(MSR_IA32_MCx_CTL2(bank), val);
        val &= ~MCI_CTL2_CMCI_THRESHOLD_MASK;
        wrmsrl(MSR_IA32_MCx_CTL2(bank), val | thresh);
        raw_spin_unlock_irqrestore(&cmci_discover_lock, flags);
@@ -184,7 +184,7 @@ static bool cmci_skip_bank(int bank, u64 *val)
        if (test_bit(bank, mce_banks_ce_disabled))
                return true;
 
-       rdmsrl(MSR_IA32_MCx_CTL2(bank), *val);
+       rdmsrq(MSR_IA32_MCx_CTL2(bank), *val);
 
        /* Already owned by someone else? */
        if (*val & MCI_CTL2_CMCI_EN) {
@@ -233,7 +233,7 @@ static void cmci_claim_bank(int bank, u64 val, int bios_zero_thresh, int *bios_w
 
        val |= MCI_CTL2_CMCI_EN;
        wrmsrl(MSR_IA32_MCx_CTL2(bank), val);
-       rdmsrl(MSR_IA32_MCx_CTL2(bank), val);
+       rdmsrq(MSR_IA32_MCx_CTL2(bank), val);
 
        /* If the enable bit did not stick, this bank should be polled. */
        if (!(val & MCI_CTL2_CMCI_EN)) {
@@ -324,7 +324,7 @@ static void __cmci_disable_bank(int bank)
 
        if (!test_bit(bank, this_cpu_ptr(mce_banks_owned)))
                return;
-       rdmsrl(MSR_IA32_MCx_CTL2(bank), val);
+       rdmsrq(MSR_IA32_MCx_CTL2(bank), val);
        val &= ~MCI_CTL2_CMCI_EN;
        wrmsrl(MSR_IA32_MCx_CTL2(bank), val);
        __clear_bit(bank, this_cpu_ptr(mce_banks_owned));
@@ -430,7 +430,7 @@ void intel_init_lmce(void)
        if (!lmce_supported())
                return;
 
-       rdmsrl(MSR_IA32_MCG_EXT_CTL, val);
+       rdmsrq(MSR_IA32_MCG_EXT_CTL, val);
 
        if (!(val & MCG_EXT_CTL_LMCE_EN))
                wrmsrl(MSR_IA32_MCG_EXT_CTL, val | MCG_EXT_CTL_LMCE_EN);
@@ -443,7 +443,7 @@ void intel_clear_lmce(void)
        if (!lmce_supported())
                return;
 
-       rdmsrl(MSR_IA32_MCG_EXT_CTL, val);
+       rdmsrq(MSR_IA32_MCG_EXT_CTL, val);
        val &= ~MCG_EXT_CTL_LMCE_EN;
        wrmsrl(MSR_IA32_MCG_EXT_CTL, val);
 }
index 3e2533954675a9a541ae9cf9cd6629420614672d..b1b9b4c4997e32c51493237965392ccc3e01fd91 100644 (file)
@@ -70,7 +70,7 @@ u64 hv_get_non_nested_msr(unsigned int reg)
        if (hv_is_synic_msr(reg) && ms_hyperv.paravisor_present)
                hv_ivm_msr_read(reg, &value);
        else
-               rdmsrl(reg, value);
+               rdmsrq(reg, value);
        return value;
 }
 EXPORT_SYMBOL_GPL(hv_get_non_nested_msr);
@@ -345,7 +345,7 @@ static unsigned long hv_get_tsc_khz(void)
 {
        unsigned long freq;
 
-       rdmsrl(HV_X64_MSR_TSC_FREQUENCY, freq);
+       rdmsrq(HV_X64_MSR_TSC_FREQUENCY, freq);
 
        return freq / 1000;
 }
@@ -541,7 +541,7 @@ static void __init ms_hyperv_init_platform(void)
                 */
                u64     hv_lapic_frequency;
 
-               rdmsrl(HV_X64_MSR_APIC_FREQUENCY, hv_lapic_frequency);
+               rdmsrq(HV_X64_MSR_APIC_FREQUENCY, hv_lapic_frequency);
                hv_lapic_frequency = div_u64(hv_lapic_frequency, HZ);
                lapic_timer_period = hv_lapic_frequency;
                pr_info("Hyper-V: LAPIC Timer Frequency: %#x\n",
index cf29681d01e04e308dc9963b2714bff8ebf72222..76c887ba9e9ce7de9581bd3795bb60c3d042a986 100644 (file)
@@ -148,7 +148,7 @@ static inline void cache_alloc_hsw_probe(void)
        if (wrmsrl_safe(MSR_IA32_L3_CBM_BASE, max_cbm))
                return;
 
-       rdmsrl(MSR_IA32_L3_CBM_BASE, l3_cbm_0);
+       rdmsrq(MSR_IA32_L3_CBM_BASE, l3_cbm_0);
 
        /* If all the bits were set in MSR, return success */
        if (l3_cbm_0 != max_cbm)
index a93ed7d2a1602956bf2e6a42f41f2fbb129e4bcd..f73a74945ffa86d6075f747fa5aeb817d83a8f2e 100644 (file)
@@ -238,7 +238,7 @@ static int __rmid_read_phys(u32 prmid, enum resctrl_event_id eventid, u64 *val)
         * are error bits.
         */
        wrmsr(MSR_IA32_QM_EVTSEL, eventid, prmid);
-       rdmsrl(MSR_IA32_QM_CTR, msr_val);
+       rdmsrq(MSR_IA32_QM_CTR, msr_val);
 
        if (msr_val & RMID_VAL_ERROR)
                return -EIO;
index 93ec829015f1346fb66d8f08fefa76fbb5ce60ab..3be9866be0e71f7ce0e4c1823debfa65bd499083 100644 (file)
@@ -1635,7 +1635,7 @@ void resctrl_arch_mon_event_config_read(void *_config_info)
                pr_warn_once("Invalid event id %d\n", config_info->evtid);
                return;
        }
-       rdmsrl(MSR_IA32_EVT_CFG_BASE + index, msrval);
+       rdmsrq(MSR_IA32_EVT_CFG_BASE + index, msrval);
 
        /* Report only the valid event configuration bits */
        config_info->mon_config = msrval & MAX_EVT_CONFIG_BITS;
index 01456236a6dd340f6fff334a98206d5bf3ed6b3a..6e1885dece0fd4fef87f822bebde21685aae39a2 100644 (file)
@@ -154,7 +154,7 @@ static __init bool check_for_real_bsp(u32 apic_id)
         * kernel must rely on the firmware enumeration order.
         */
        if (has_apic_base) {
-               rdmsrl(MSR_IA32_APICBASE, msr);
+               rdmsrq(MSR_IA32_APICBASE, msr);
                is_bsp = !!(msr & MSR_IA32_APICBASE_BSP);
        }
 
index 03b3c9c3a45e2ef16449c666793a2c91378f5542..535dcf511096fe19995b10e7215e0f83f3463b3a 100644 (file)
@@ -133,7 +133,7 @@ static void parse_fam10h_node_id(struct topo_scan *tscan)
        if (!boot_cpu_has(X86_FEATURE_NODEID_MSR))
                return;
 
-       rdmsrl(MSR_FAM10H_NODE_ID, nid.msr);
+       rdmsrq(MSR_FAM10H_NODE_ID, nid.msr);
        store_node(tscan, nid.nodes_per_pkg + 1, nid.node_id);
        tscan->c->topo.llc_id = nid.node_id;
 }
@@ -160,7 +160,7 @@ static void topoext_fixup(struct topo_scan *tscan)
        if (msr_set_bit(0xc0011005, 54) <= 0)
                return;
 
-       rdmsrl(0xc0011005, msrval);
+       rdmsrq(0xc0011005, msrval);
        if (msrval & BIT_64(54)) {
                set_cpu_cap(c, X86_FEATURE_TOPOEXT);
                pr_info_once(FW_INFO "CPU: Re-enabling disabled Topology Extensions Support.\n");
index b31ee4f1657a82d15e7df449fbcca1952f8bec56..c3aaa6815c0bc13179795437bd75883e8aff5b37 100644 (file)
@@ -24,7 +24,7 @@ static void tsx_disable(void)
 {
        u64 tsx;
 
-       rdmsrl(MSR_IA32_TSX_CTRL, tsx);
+       rdmsrq(MSR_IA32_TSX_CTRL, tsx);
 
        /* Force all transactions to immediately abort */
        tsx |= TSX_CTRL_RTM_DISABLE;
@@ -44,7 +44,7 @@ static void tsx_enable(void)
 {
        u64 tsx;
 
-       rdmsrl(MSR_IA32_TSX_CTRL, tsx);
+       rdmsrq(MSR_IA32_TSX_CTRL, tsx);
 
        /* Enable the RTM feature in the cpu */
        tsx &= ~TSX_CTRL_RTM_DISABLE;
@@ -115,11 +115,11 @@ static void tsx_clear_cpuid(void)
         */
        if (boot_cpu_has(X86_FEATURE_RTM_ALWAYS_ABORT) &&
            boot_cpu_has(X86_FEATURE_TSX_FORCE_ABORT)) {
-               rdmsrl(MSR_TSX_FORCE_ABORT, msr);
+               rdmsrq(MSR_TSX_FORCE_ABORT, msr);
                msr |= MSR_TFA_TSX_CPUID_CLEAR;
                wrmsrl(MSR_TSX_FORCE_ABORT, msr);
        } else if (cpu_feature_enabled(X86_FEATURE_MSR_TSX_CTRL)) {
-               rdmsrl(MSR_IA32_TSX_CTRL, msr);
+               rdmsrq(MSR_IA32_TSX_CTRL, msr);
                msr |= TSX_CTRL_CPUID_CLEAR;
                wrmsrl(MSR_IA32_TSX_CTRL, msr);
        }
@@ -146,7 +146,7 @@ static void tsx_dev_mode_disable(void)
            !cpu_feature_enabled(X86_FEATURE_SRBDS_CTRL))
                return;
 
-       rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_opt_ctrl);
+       rdmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_opt_ctrl);
 
        if (mcu_opt_ctrl & RTM_ALLOW) {
                mcu_opt_ctrl &= ~RTM_ALLOW;
index 2293efd6ffa696c32998f8dfc1f2d063b3ffbfc6..0050eae153bbbba51144408ef330c9712becf688 100644 (file)
@@ -214,7 +214,7 @@ static int __init umwait_init(void)
         * changed. This is the only place where orig_umwait_control_cached
         * is modified.
         */
-       rdmsrl(MSR_IA32_UMWAIT_CONTROL, orig_umwait_control_cached);
+       rdmsrq(MSR_IA32_UMWAIT_CONTROL, orig_umwait_control_cached);
 
        ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "umwait:online",
                                umwait_cpu_online, umwait_cpu_offline);
index 91d6341f281f80e5dd4653116ce1b3833e2eabfe..985dfffe28c16c8eb1caae3325b2001ee92b55b6 100644 (file)
@@ -327,7 +327,7 @@ void fpu_sync_guest_vmexit_xfd_state(void)
 
        lockdep_assert_irqs_disabled();
        if (fpu_state_size_dynamic()) {
-               rdmsrl(MSR_IA32_XFD, fps->xfd);
+               rdmsrq(MSR_IA32_XFD, fps->xfd);
                __this_cpu_write(xfd_state, fps->xfd);
        }
 }
index 7f4b2966e15cb047ffc54a74d9e770f209cfba60..cc5d122322164b49d6052e6422b54e873919f688 100644 (file)
@@ -970,7 +970,7 @@ static bool __init hpet_is_pc10_damaged(void)
                return false;
 
        /* Check whether PC10 is enabled in PKG C-state limit */
-       rdmsrl(MSR_PKG_CST_CONFIG_CONTROL, pcfg);
+       rdmsrq(MSR_PKG_CST_CONFIG_CONTROL, pcfg);
        if ((pcfg & 0xF) < 8)
                return false;
 
index 3be9b3342c672fd8efbf3a20bc9901756a902bba..f7aa39cd56ecce4eda610e4456d66c0f08ad1cb1 100644 (file)
@@ -728,7 +728,7 @@ static int kvm_suspend(void)
 
 #ifdef CONFIG_ARCH_CPUIDLE_HALTPOLL
        if (kvm_para_has_feature(KVM_FEATURE_POLL_CONTROL))
-               rdmsrl(MSR_KVM_POLL_CONTROL, val);
+               rdmsrq(MSR_KVM_POLL_CONTROL, val);
        has_guest_poll = !(val & 1);
 #endif
        return 0;
index 1f54eedc3015e9bc6ac6c250bfe5b2c8c778ba0c..5c6a55f8a7fe288f4e42901273fe94691c8c6908 100644 (file)
@@ -97,7 +97,7 @@ static void get_fam10h_pci_mmconf_base(void)
 
        /* SYS_CFG */
        address = MSR_AMD64_SYSCFG;
-       rdmsrl(address, val);
+       rdmsrq(address, val);
 
        /* TOP_MEM2 is not enabled? */
        if (!(val & (1<<21))) {
@@ -105,7 +105,7 @@ static void get_fam10h_pci_mmconf_base(void)
        } else {
                /* TOP_MEM2 */
                address = MSR_K8_TOP_MEM2;
-               rdmsrl(address, val);
+               rdmsrq(address, val);
                tom2 = max(val & 0xffffff800000ULL, 1ULL << 32);
        }
 
@@ -177,7 +177,7 @@ void fam10h_check_enable_mmcfg(void)
                return;
 
        address = MSR_FAM10H_MMIO_CONF_BASE;
-       rdmsrl(address, val);
+       rdmsrq(address, val);
 
        /* try to make sure that AP's setting is identical to BSP setting */
        if (val & FAM10H_MMIO_CONF_ENABLE) {
index 962c3ce39323e7bdb75ee1920e4bea936c83d028..197be718e964a90983ef000b5828655f31c59f71 100644 (file)
@@ -710,7 +710,7 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p)
            arch_has_block_step()) {
                unsigned long debugctl, msk;
 
-               rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
+               rdmsrq(MSR_IA32_DEBUGCTLMSR, debugctl);
                debugctl &= ~DEBUGCTLMSR_BTF;
                msk = tifn & _TIF_BLOCKSTEP;
                debugctl |= (msk >> TIF_BLOCKSTEP) << DEBUGCTLMSR_BTF_SHIFT;
index 7196ca7048be0cfbb57d13f0f142ea4f90b48634..422c5dad1046a2b4b18f84fa9324d8d0042d6eb7 100644 (file)
@@ -95,8 +95,8 @@ void __show_regs(struct pt_regs *regs, enum show_regs_mode mode,
                return;
 
        if (mode == SHOW_REGS_USER) {
-               rdmsrl(MSR_FS_BASE, fs);
-               rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
+               rdmsrq(MSR_FS_BASE, fs);
+               rdmsrq(MSR_KERNEL_GS_BASE, shadowgs);
                printk("%sFS:  %016lx GS:  %016lx\n",
                       log_lvl, fs, shadowgs);
                return;
@@ -107,9 +107,9 @@ void __show_regs(struct pt_regs *regs, enum show_regs_mode mode,
        asm("movl %%fs,%0" : "=r" (fsindex));
        asm("movl %%gs,%0" : "=r" (gsindex));
 
-       rdmsrl(MSR_FS_BASE, fs);
-       rdmsrl(MSR_GS_BASE, gs);
-       rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
+       rdmsrq(MSR_FS_BASE, fs);
+       rdmsrq(MSR_GS_BASE, gs);
+       rdmsrq(MSR_KERNEL_GS_BASE, shadowgs);
 
        cr0 = read_cr0();
        cr2 = read_cr2();
@@ -195,7 +195,7 @@ static noinstr unsigned long __rdgsbase_inactive(void)
                native_swapgs();
        } else {
                instrumentation_begin();
-               rdmsrl(MSR_KERNEL_GS_BASE, gsbase);
+               rdmsrq(MSR_KERNEL_GS_BASE, gsbase);
                instrumentation_end();
        }
 
@@ -463,7 +463,7 @@ unsigned long x86_gsbase_read_cpu_inactive(void)
                gsbase = __rdgsbase_inactive();
                local_irq_restore(flags);
        } else {
-               rdmsrl(MSR_KERNEL_GS_BASE, gsbase);
+               rdmsrq(MSR_KERNEL_GS_BASE, gsbase);
        }
 
        return gsbase;
index 059685612362d7b1865eabf400888fbfa0659c1e..3d1ba20f840f7a5421c1968e5fc6b3d8c2ea7a7f 100644 (file)
@@ -239,7 +239,7 @@ static unsigned long get_user_shstk_addr(void)
 
        fpregs_lock_and_load();
 
-       rdmsrl(MSR_IA32_PL3_SSP, ssp);
+       rdmsrq(MSR_IA32_PL3_SSP, ssp);
 
        fpregs_unlock();
 
@@ -460,7 +460,7 @@ static int wrss_control(bool enable)
                return 0;
 
        fpregs_lock_and_load();
-       rdmsrl(MSR_IA32_U_CET, msrval);
+       rdmsrq(MSR_IA32_U_CET, msrval);
 
        if (enable) {
                features_set(ARCH_SHSTK_WRSS);
index 9f88b8a78e50912790077cff0f940c7f601aff33..71b14675e50ed659cef5f0086579e03e227ac1da 100644 (file)
@@ -1120,7 +1120,7 @@ static noinstr void exc_debug_kernel(struct pt_regs *regs, unsigned long dr6)
                 */
                unsigned long debugctl;
 
-               rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
+               rdmsrq(MSR_IA32_DEBUGCTLMSR, debugctl);
                debugctl |= DEBUGCTLMSR_BTF;
                wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
        }
@@ -1386,7 +1386,7 @@ static bool handle_xfd_event(struct pt_regs *regs)
        if (!IS_ENABLED(CONFIG_X86_64) || !cpu_feature_enabled(X86_FEATURE_XFD))
                return false;
 
-       rdmsrl(MSR_IA32_XFD_ERR, xfd_err);
+       rdmsrq(MSR_IA32_XFD_ERR, xfd_err);
        if (!xfd_err)
                return false;
 
index 88e5a4ed9db3a7e530d03e7da5ad7a1895ec4b64..160fef71b9a3e771e9ec8fa28ec13450ccbeedaa 100644 (file)
@@ -1098,7 +1098,7 @@ static void __init detect_art(void)
        if (art_base_clk.denominator < ART_MIN_DENOMINATOR)
                return;
 
-       rdmsrl(MSR_IA32_TSC_ADJUST, art_base_clk.offset);
+       rdmsrq(MSR_IA32_TSC_ADJUST, art_base_clk.offset);
 
        /* Make this sticky over multiple CPU init calls */
        setup_force_cpu_cap(X86_FEATURE_ART);
index 4334033658edfbbe71ec48bda9f8fbcda2749c33..8260391998285e184c22f55568390df491967820 100644 (file)
@@ -65,7 +65,7 @@ void tsc_verify_tsc_adjust(bool resume)
 
        adj->nextcheck = jiffies + HZ;
 
-       rdmsrl(MSR_IA32_TSC_ADJUST, curval);
+       rdmsrq(MSR_IA32_TSC_ADJUST, curval);
        if (adj->adjusted == curval)
                return;
 
@@ -165,7 +165,7 @@ bool __init tsc_store_and_check_tsc_adjust(bool bootcpu)
        if (check_tsc_unstable())
                return false;
 
-       rdmsrl(MSR_IA32_TSC_ADJUST, bootval);
+       rdmsrq(MSR_IA32_TSC_ADJUST, bootval);
        cur->bootval = bootval;
        cur->nextcheck = jiffies + HZ;
        tsc_sanitize_first_cpu(cur, bootval, smp_processor_id(), bootcpu);
@@ -187,7 +187,7 @@ bool tsc_store_and_check_tsc_adjust(bool bootcpu)
        if (!boot_cpu_has(X86_FEATURE_TSC_ADJUST))
                return false;
 
-       rdmsrl(MSR_IA32_TSC_ADJUST, bootval);
+       rdmsrq(MSR_IA32_TSC_ADJUST, bootval);
        cur->bootval = bootval;
        cur->nextcheck = jiffies + HZ;
        cur->warned = false;
index d5d0c5c3300bc168283c95f32614c95b6825af8c..21c3563547f2efba153162277fc8988970a36075 100644 (file)
@@ -580,7 +580,7 @@ static inline void kvm_cpu_svm_disable(void)
        uint64_t efer;
 
        wrmsrl(MSR_VM_HSAVE_PA, 0);
-       rdmsrl(MSR_EFER, efer);
+       rdmsrq(MSR_EFER, efer);
        if (efer & EFER_SVME) {
                /*
                 * Force GIF=1 prior to disabling SVM, e.g. to ensure INIT and
@@ -619,7 +619,7 @@ static int svm_enable_virtualization_cpu(void)
        uint64_t efer;
        int me = raw_smp_processor_id();
 
-       rdmsrl(MSR_EFER, efer);
+       rdmsrq(MSR_EFER, efer);
        if (efer & EFER_SVME)
                return -EBUSY;
 
@@ -5232,7 +5232,7 @@ static __init void svm_adjust_mmio_mask(void)
                return;
 
        /* If memory encryption is not enabled, use existing mask */
-       rdmsrl(MSR_AMD64_SYSCFG, msr);
+       rdmsrq(MSR_AMD64_SYSCFG, msr);
        if (!(msr & MSR_AMD64_SYSCFG_MEM_ENCRYPT))
                return;
 
index 5504d9e9fd32cb630a3cde2d22ffd40a7a1b4ab9..a7fea622e204d567527eb144dbc87fd0f9b39933 100644 (file)
@@ -7202,8 +7202,8 @@ static void nested_vmx_setup_cr_fixed(struct nested_vmx_msrs *msrs)
        msrs->cr4_fixed0 = VMXON_CR4_ALWAYSON;
 
        /* These MSRs specify bits which the guest must keep fixed off. */
-       rdmsrl(MSR_IA32_VMX_CR0_FIXED1, msrs->cr0_fixed1);
-       rdmsrl(MSR_IA32_VMX_CR4_FIXED1, msrs->cr4_fixed1);
+       rdmsrq(MSR_IA32_VMX_CR0_FIXED1, msrs->cr0_fixed1);
+       rdmsrq(MSR_IA32_VMX_CR4_FIXED1, msrs->cr4_fixed1);
 
        if (vmx_umip_emulated())
                msrs->cr4_fixed1 |= X86_CR4_UMIP;
index 77012b2eca0e9a5a8671fa43a59906a4cd7f07c5..f0f02eee48cb02f63fa2e5f71175da41d1c7cccb 100644 (file)
@@ -279,7 +279,7 @@ static bool intel_pmu_handle_lbr_msrs_access(struct kvm_vcpu *vcpu,
        local_irq_disable();
        if (lbr_desc->event->state == PERF_EVENT_STATE_ACTIVE) {
                if (read)
-                       rdmsrl(index, msr_info->data);
+                       rdmsrq(index, msr_info->data);
                else
                        wrmsrl(index, msr_info->data);
                __set_bit(INTEL_PMC_IDX_FIXED_VLBR, vcpu_to_pmu(vcpu)->pmc_in_use);
index 9961e07cf071aa6e4be8590927b2651c59a03209..3cf4ef659dbdbcb11db4833e2539f3048c300a2c 100644 (file)
@@ -418,9 +418,9 @@ void setup_default_sgx_lepubkeyhash(void)
                sgx_pubkey_hash[3] = 0xd4f8c05909f9bb3bULL;
        } else {
                /* MSR_IA32_SGXLEPUBKEYHASH0 is read above */
-               rdmsrl(MSR_IA32_SGXLEPUBKEYHASH1, sgx_pubkey_hash[1]);
-               rdmsrl(MSR_IA32_SGXLEPUBKEYHASH2, sgx_pubkey_hash[2]);
-               rdmsrl(MSR_IA32_SGXLEPUBKEYHASH3, sgx_pubkey_hash[3]);
+               rdmsrq(MSR_IA32_SGXLEPUBKEYHASH1, sgx_pubkey_hash[1]);
+               rdmsrq(MSR_IA32_SGXLEPUBKEYHASH2, sgx_pubkey_hash[2]);
+               rdmsrq(MSR_IA32_SGXLEPUBKEYHASH3, sgx_pubkey_hash[3]);
        }
 }
 
index 5c5766467a61d434ba2baa79a5faba99bcbd9997..3e0762a6939beaa84038ac365a49bbb2a7d2b94d 100644 (file)
@@ -1206,13 +1206,13 @@ static inline void pt_save_msr(struct pt_ctx *ctx, u32 addr_range)
 {
        u32 i;
 
-       rdmsrl(MSR_IA32_RTIT_STATUS, ctx->status);
-       rdmsrl(MSR_IA32_RTIT_OUTPUT_BASE, ctx->output_base);
-       rdmsrl(MSR_IA32_RTIT_OUTPUT_MASK, ctx->output_mask);
-       rdmsrl(MSR_IA32_RTIT_CR3_MATCH, ctx->cr3_match);
+       rdmsrq(MSR_IA32_RTIT_STATUS, ctx->status);
+       rdmsrq(MSR_IA32_RTIT_OUTPUT_BASE, ctx->output_base);
+       rdmsrq(MSR_IA32_RTIT_OUTPUT_MASK, ctx->output_mask);
+       rdmsrq(MSR_IA32_RTIT_CR3_MATCH, ctx->cr3_match);
        for (i = 0; i < addr_range; i++) {
-               rdmsrl(MSR_IA32_RTIT_ADDR0_A + i * 2, ctx->addr_a[i]);
-               rdmsrl(MSR_IA32_RTIT_ADDR0_B + i * 2, ctx->addr_b[i]);
+               rdmsrq(MSR_IA32_RTIT_ADDR0_A + i * 2, ctx->addr_a[i]);
+               rdmsrq(MSR_IA32_RTIT_ADDR0_B + i * 2, ctx->addr_b[i]);
        }
 }
 
@@ -1225,7 +1225,7 @@ static void pt_guest_enter(struct vcpu_vmx *vmx)
         * GUEST_IA32_RTIT_CTL is already set in the VMCS.
         * Save host state before VM entry.
         */
-       rdmsrl(MSR_IA32_RTIT_CTL, vmx->pt_desc.host.ctl);
+       rdmsrq(MSR_IA32_RTIT_CTL, vmx->pt_desc.host.ctl);
        if (vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) {
                wrmsrl(MSR_IA32_RTIT_CTL, 0);
                pt_save_msr(&vmx->pt_desc.host, vmx->pt_desc.num_address_ranges);
@@ -1362,7 +1362,7 @@ static void vmx_prepare_switch_to_host(struct vcpu_vmx *vmx)
        ++vmx->vcpu.stat.host_state_reload;
 
 #ifdef CONFIG_X86_64
-       rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
+       rdmsrq(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
 #endif
        if (host_state->ldt_sel || (host_state->gs_sel & 7)) {
                kvm_load_ldt(host_state->ldt_sel);
@@ -1394,7 +1394,7 @@ static u64 vmx_read_guest_kernel_gs_base(struct vcpu_vmx *vmx)
 {
        preempt_disable();
        if (vmx->guest_state_loaded)
-               rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
+               rdmsrq(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
        preempt_enable();
        return vmx->msr_guest_kernel_gs_base;
 }
@@ -2574,7 +2574,7 @@ static u64 adjust_vmx_controls64(u64 ctl_opt, u32 msr)
 {
        u64 allowed;
 
-       rdmsrl(msr, allowed);
+       rdmsrq(msr, allowed);
 
        return  ctl_opt & allowed;
 }
@@ -2746,7 +2746,7 @@ static int setup_vmcs_config(struct vmcs_config *vmcs_conf,
                break;
        }
 
-       rdmsrl(MSR_IA32_VMX_BASIC, basic_msr);
+       rdmsrq(MSR_IA32_VMX_BASIC, basic_msr);
 
        /* IA-32 SDM Vol 3B: VMCS size is never greater than 4kB. */
        if (vmx_basic_vmcs_size(basic_msr) > PAGE_SIZE)
@@ -2766,7 +2766,7 @@ static int setup_vmcs_config(struct vmcs_config *vmcs_conf,
        if (vmx_basic_vmcs_mem_type(basic_msr) != X86_MEMTYPE_WB)
                return -EIO;
 
-       rdmsrl(MSR_IA32_VMX_MISC, misc_msr);
+       rdmsrq(MSR_IA32_VMX_MISC, misc_msr);
 
        vmcs_conf->basic = basic_msr;
        vmcs_conf->pin_based_exec_ctrl = _pin_based_exec_control;
@@ -4391,7 +4391,7 @@ void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
        if (!IS_ENABLED(CONFIG_IA32_EMULATION) && !IS_ENABLED(CONFIG_X86_32))
                vmcs_writel(HOST_IA32_SYSENTER_ESP, 0);
 
-       rdmsrl(MSR_IA32_SYSENTER_EIP, tmpl);
+       rdmsrq(MSR_IA32_SYSENTER_EIP, tmpl);
        vmcs_writel(HOST_IA32_SYSENTER_EIP, tmpl);   /* 22.2.3 */
 
        if (vmcs_config.vmexit_ctrl & VM_EXIT_LOAD_IA32_PAT) {
@@ -7052,7 +7052,7 @@ static void handle_nm_fault_irqoff(struct kvm_vcpu *vcpu)
         * the #NM exception.
         */
        if (is_xfd_nm_fault(vcpu))
-               rdmsrl(MSR_IA32_XFD_ERR, vcpu->arch.guest_fpu.xfd_err);
+               rdmsrq(MSR_IA32_XFD_ERR, vcpu->arch.guest_fpu.xfd_err);
 }
 
 static void handle_exception_irqoff(struct kvm_vcpu *vcpu, u32 intr_info)
@@ -7959,7 +7959,7 @@ static __init u64 vmx_get_perf_capabilities(void)
                return 0;
 
        if (boot_cpu_has(X86_FEATURE_PDCM))
-               rdmsrl(MSR_IA32_PERF_CAPABILITIES, host_perf_cap);
+               rdmsrq(MSR_IA32_PERF_CAPABILITIES, host_perf_cap);
 
        if (!cpu_feature_enabled(X86_FEATURE_ARCH_LBR)) {
                x86_perf_get_lbr(&vmx_lbr_caps);
@@ -8508,7 +8508,7 @@ __init int vmx_hardware_setup(void)
                kvm_enable_efer_bits(EFER_NX);
 
        if (boot_cpu_has(X86_FEATURE_MPX)) {
-               rdmsrl(MSR_IA32_BNDCFGS, host_bndcfgs);
+               rdmsrq(MSR_IA32_BNDCFGS, host_bndcfgs);
                WARN_ONCE(host_bndcfgs, "BNDCFGS in host will be lost");
        }
 
index c841817a914a3442179077cad579863c692d4065..c96c2ca7bb61f7c9efea36138fed370e0f6f659b 100644 (file)
@@ -9773,12 +9773,12 @@ int kvm_x86_vendor_init(struct kvm_x86_init_ops *ops)
        rdmsrl_safe(MSR_EFER, &kvm_host.efer);
 
        if (boot_cpu_has(X86_FEATURE_XSAVES))
-               rdmsrl(MSR_IA32_XSS, kvm_host.xss);
+               rdmsrq(MSR_IA32_XSS, kvm_host.xss);
 
        kvm_init_pmu_capability(ops->pmu_ops);
 
        if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES))
-               rdmsrl(MSR_IA32_ARCH_CAPABILITIES, kvm_host.arch_capabilities);
+               rdmsrq(MSR_IA32_ARCH_CAPABILITIES, kvm_host.arch_capabilities);
 
        r = ops->hardware_setup();
        if (r != 0)
index 98631c0e7a11f887307e484db9ae8bc643ffaa72..da5af3cc25b14b6b7e50021d678ce1ab0c5edb68 100644 (file)
@@ -702,16 +702,16 @@ unsigned long insn_get_seg_base(struct pt_regs *regs, int seg_reg_idx)
                unsigned long base;
 
                if (seg_reg_idx == INAT_SEG_REG_FS) {
-                       rdmsrl(MSR_FS_BASE, base);
+                       rdmsrq(MSR_FS_BASE, base);
                } else if (seg_reg_idx == INAT_SEG_REG_GS) {
                        /*
                         * swapgs was called at the kernel entry point. Thus,
                         * MSR_KERNEL_GS_BASE will have the user-space GS base.
                         */
                        if (user_mode(regs))
-                               rdmsrl(MSR_KERNEL_GS_BASE, base);
+                               rdmsrq(MSR_KERNEL_GS_BASE, base);
                        else
-                               rdmsrl(MSR_GS_BASE, base);
+                               rdmsrq(MSR_GS_BASE, base);
                } else {
                        base = 0;
                }
index 72d8cbc611583228bc86333ca1b128c47e7eefec..ce6105e649449cc740b51ce99227d6b60bf716a9 100644 (file)
@@ -256,7 +256,7 @@ void __init pat_bp_init(void)
        if (!cpu_feature_enabled(X86_FEATURE_PAT))
                pat_disable("PAT not supported by the CPU.");
        else
-               rdmsrl(MSR_IA32_CR_PAT, pat_msr_val);
+               rdmsrq(MSR_IA32_CR_PAT, pat_msr_val);
 
        if (!pat_msr_val) {
                pat_disable("PAT support disabled by the firmware.");
index 631512f7ec85fc6cc8ec163e5d09e7307d47a916..b92917c052f37117b41268c6223a0d078341b213 100644 (file)
@@ -202,7 +202,7 @@ static int __init early_root_info_init(void)
 
        /* need to take out [0, TOM) for RAM*/
        address = MSR_K8_TOP_MEM1;
-       rdmsrl(address, val);
+       rdmsrq(address, val);
        end = (val & 0xffffff800000ULL);
        printk(KERN_INFO "TOM: %016llx aka %lldM\n", end, end>>20);
        if (end < (1ULL<<32))
@@ -293,12 +293,12 @@ static int __init early_root_info_init(void)
        /* need to take out [4G, TOM2) for RAM*/
        /* SYS_CFG */
        address = MSR_AMD64_SYSCFG;
-       rdmsrl(address, val);
+       rdmsrq(address, val);
        /* TOP_MEM2 is enabled? */
        if (val & (1<<21)) {
                /* TOP_MEM2 */
                address = MSR_K8_TOP_MEM2;
-               rdmsrl(address, val);
+               rdmsrq(address, val);
                end = (val & 0xffffff800000ULL);
                printk(KERN_INFO "TOM2: %016llx aka %lldM\n", end, end>>20);
                subtract_range(range, RANGE_NUM, 1ULL<<32, end);
@@ -341,7 +341,7 @@ static int amd_bus_cpu_online(unsigned int cpu)
 {
        u64 reg;
 
-       rdmsrl(MSR_AMD64_NB_CFG, reg);
+       rdmsrq(MSR_AMD64_NB_CFG, reg);
        if (!(reg & ENABLE_CF8_EXT_CFG)) {
                reg |= ENABLE_CF8_EXT_CFG;
                wrmsrl(MSR_AMD64_NB_CFG, reg);
index 57f210cda761b0521458467543861326b7eda82b..ee77d57bcab7af24b3f790efd2cdfb83e0b84764 100644 (file)
@@ -64,9 +64,9 @@ static int __init xo1_rtc_init(void)
        of_node_put(node);
 
        pr_info("olpc-xo1-rtc: Initializing OLPC XO-1 RTC\n");
-       rdmsrl(MSR_RTC_DOMA_OFFSET, rtc_info.rtc_day_alarm);
-       rdmsrl(MSR_RTC_MONA_OFFSET, rtc_info.rtc_mon_alarm);
-       rdmsrl(MSR_RTC_CEN_OFFSET, rtc_info.rtc_century);
+       rdmsrq(MSR_RTC_DOMA_OFFSET, rtc_info.rtc_day_alarm);
+       rdmsrq(MSR_RTC_MONA_OFFSET, rtc_info.rtc_mon_alarm);
+       rdmsrq(MSR_RTC_CEN_OFFSET, rtc_info.rtc_century);
 
        r = platform_device_register(&xo1_rtc_device);
        if (r)
index 08e76a5ca1553d30c829fdbb451ea4d71a95b1bb..874fe980c4bdaa031a5b0562d77b3df61d5f46c6 100644 (file)
@@ -44,7 +44,7 @@ static void msr_save_context(struct saved_context *ctxt)
 
        while (msr < end) {
                if (msr->valid)
-                       rdmsrl(msr->info.msr_no, msr->info.reg.q);
+                       rdmsrq(msr->info.msr_no, msr->info.reg.q);
                msr++;
        }
 }
@@ -110,12 +110,12 @@ static void __save_processor_state(struct saved_context *ctxt)
        savesegment(ds, ctxt->ds);
        savesegment(es, ctxt->es);
 
-       rdmsrl(MSR_FS_BASE, ctxt->fs_base);
-       rdmsrl(MSR_GS_BASE, ctxt->kernelmode_gs_base);
-       rdmsrl(MSR_KERNEL_GS_BASE, ctxt->usermode_gs_base);
+       rdmsrq(MSR_FS_BASE, ctxt->fs_base);
+       rdmsrq(MSR_GS_BASE, ctxt->kernelmode_gs_base);
+       rdmsrq(MSR_KERNEL_GS_BASE, ctxt->usermode_gs_base);
        mtrr_save_fixed_ranges(NULL);
 
-       rdmsrl(MSR_EFER, ctxt->efer);
+       rdmsrq(MSR_EFER, ctxt->efer);
 #endif
 
        /*
index f9bc444a3064d3f1266242022c0b3efc5c44afaf..263787b4800c4dfb9736372a56f16c4d7c91648e 100644 (file)
@@ -145,7 +145,7 @@ static void __init setup_real_mode(void)
         * Some AMD processors will #GP(0) if EFER.LMA is set in WRMSR
         * so we need to mask it out.
         */
-       rdmsrl(MSR_EFER, efer);
+       rdmsrq(MSR_EFER, efer);
        trampoline_header->efer = efer & ~EFER_LMA;
 
        trampoline_header->start = (u64) secondary_startup_64;
index fc473ca12c44df51b12ef1b57bfe198a42c8786c..9ddd10265de7e35db3780833e38a8a70a06ea655 100644 (file)
@@ -136,7 +136,7 @@ static int __mfd_enable(unsigned int cpu)
        if (!cc_platform_has(CC_ATTR_HOST_SEV_SNP))
                return 0;
 
-       rdmsrl(MSR_AMD64_SYSCFG, val);
+       rdmsrq(MSR_AMD64_SYSCFG, val);
 
        val |= MSR_AMD64_SYSCFG_MFDM;
 
@@ -157,7 +157,7 @@ static int __snp_enable(unsigned int cpu)
        if (!cc_platform_has(CC_ATTR_HOST_SEV_SNP))
                return 0;
 
-       rdmsrl(MSR_AMD64_SYSCFG, val);
+       rdmsrq(MSR_AMD64_SYSCFG, val);
 
        val |= MSR_AMD64_SYSCFG_SNP_EN;
        val |= MSR_AMD64_SYSCFG_SNP_VMPL_EN;
@@ -522,7 +522,7 @@ int __init snp_rmptable_init(void)
         * Check if SEV-SNP is already enabled, this can happen in case of
         * kexec boot.
         */
-       rdmsrl(MSR_AMD64_SYSCFG, val);
+       rdmsrq(MSR_AMD64_SYSCFG, val);
        if (val & MSR_AMD64_SYSCFG_SNP_EN)
                goto skip_enable;
 
@@ -576,8 +576,8 @@ static bool probe_contiguous_rmptable_info(void)
 {
        u64 rmp_sz, rmp_base, rmp_end;
 
-       rdmsrl(MSR_AMD64_RMP_BASE, rmp_base);
-       rdmsrl(MSR_AMD64_RMP_END, rmp_end);
+       rdmsrq(MSR_AMD64_RMP_BASE, rmp_base);
+       rdmsrq(MSR_AMD64_RMP_END, rmp_end);
 
        if (!(rmp_base & RMP_ADDR_MASK) || !(rmp_end & RMP_ADDR_MASK)) {
                pr_err("Memory for the RMP table has not been reserved by BIOS\n");
@@ -610,13 +610,13 @@ static bool probe_segmented_rmptable_info(void)
        unsigned int eax, ebx, segment_shift, segment_shift_min, segment_shift_max;
        u64 rmp_base, rmp_end;
 
-       rdmsrl(MSR_AMD64_RMP_BASE, rmp_base);
+       rdmsrq(MSR_AMD64_RMP_BASE, rmp_base);
        if (!(rmp_base & RMP_ADDR_MASK)) {
                pr_err("Memory for the RMP table has not been reserved by BIOS\n");
                return false;
        }
 
-       rdmsrl(MSR_AMD64_RMP_END, rmp_end);
+       rdmsrq(MSR_AMD64_RMP_END, rmp_end);
        WARN_ONCE(rmp_end & RMP_ADDR_MASK,
                  "Segmented RMP enabled but RMP_END MSR is non-zero\n");
 
@@ -652,7 +652,7 @@ static bool probe_segmented_rmptable_info(void)
 bool snp_probe_rmptable_info(void)
 {
        if (cpu_feature_enabled(X86_FEATURE_SEGMENTED_RMP))
-               rdmsrl(MSR_AMD64_RMP_CFG, rmp_cfg);
+               rdmsrq(MSR_AMD64_RMP_CFG, rmp_cfg);
 
        if (rmp_cfg & MSR_AMD64_SEG_RMP_ENABLED)
                return probe_segmented_rmptable_info();
index 77a6ea1c60e49272ed001465ce0b7827ec44aaa3..cacac3d75f32025c4ab48cb9527388a2feb832d4 100644 (file)
@@ -55,7 +55,7 @@ static void xen_vcpu_notify_suspend(void *data)
        tick_suspend_local();
 
        if (xen_pv_domain() && boot_cpu_has(X86_FEATURE_SPEC_CTRL)) {
-               rdmsrl(MSR_IA32_SPEC_CTRL, tmp);
+               rdmsrq(MSR_IA32_SPEC_CTRL, tmp);
                this_cpu_write(spec_ctrl, tmp);
                wrmsrl(MSR_IA32_SPEC_CTRL, 0);
        }
index 924314cdeebcec468c23863cb6e0ba6dc44551c1..8e31c25c090523a274d4ec79c0889ae8555b03c5 100644 (file)
@@ -110,7 +110,7 @@ static int boost_set_msr(bool enable)
                return -EINVAL;
        }
 
-       rdmsrl(msr_addr, val);
+       rdmsrq(msr_addr, val);
 
        if (enable)
                val &= ~msr_mask;
index 6789eed1bb5ba00a6519a2c4c6d110c8f7abe27f..2b673ac0781f089af9936bddf4c12442b17b2556 100644 (file)
@@ -518,8 +518,8 @@ static inline bool amd_pstate_sample(struct amd_cpudata *cpudata)
        unsigned long flags;
 
        local_irq_save(flags);
-       rdmsrl(MSR_IA32_APERF, aperf);
-       rdmsrl(MSR_IA32_MPERF, mperf);
+       rdmsrq(MSR_IA32_APERF, aperf);
+       rdmsrq(MSR_IA32_MPERF, mperf);
        tsc = rdtsc();
 
        if (cpudata->prev.mperf == mperf || cpudata->prev.tsc == tsc) {
index d23a97ba64783fb7ce74491702b981c5ef670fd7..6d6afcda3681bd5d6194e5a220fcc48844aa377f 100644 (file)
@@ -225,12 +225,12 @@ static int eps_cpu_init(struct cpufreq_policy *policy)
                return -ENODEV;
        }
        /* Enable Enhanced PowerSaver */
-       rdmsrl(MSR_IA32_MISC_ENABLE, val);
+       rdmsrq(MSR_IA32_MISC_ENABLE, val);
        if (!(val & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) {
                val |= MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP;
                wrmsrl(MSR_IA32_MISC_ENABLE, val);
                /* Can be locked at 0 */
-               rdmsrl(MSR_IA32_MISC_ENABLE, val);
+               rdmsrq(MSR_IA32_MISC_ENABLE, val);
                if (!(val & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) {
                        pr_info("Can't enable Enhanced PowerSaver\n");
                        return -ENODEV;
index 4aad79d26c64f59f45e5aeb7c8bdc25094c6a07b..d66a4741fe9f01eb9b56f2d1d2e968b18eaeb863 100644 (file)
@@ -598,7 +598,7 @@ static bool turbo_is_disabled(void)
 {
        u64 misc_en;
 
-       rdmsrl(MSR_IA32_MISC_ENABLE, misc_en);
+       rdmsrq(MSR_IA32_MISC_ENABLE, misc_en);
 
        return !!(misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE);
 }
@@ -1285,7 +1285,7 @@ static void set_power_ctl_ee_state(bool input)
        u64 power_ctl;
 
        mutex_lock(&intel_pstate_driver_lock);
-       rdmsrl(MSR_IA32_POWER_CTL, power_ctl);
+       rdmsrq(MSR_IA32_POWER_CTL, power_ctl);
        if (input) {
                power_ctl &= ~BIT(MSR_IA32_POWER_CTL_BIT_EE);
                power_ctl_ee_state = POWER_CTL_EE_ENABLE;
@@ -1703,7 +1703,7 @@ static ssize_t show_energy_efficiency(struct kobject *kobj, struct kobj_attribut
        u64 power_ctl;
        int enable;
 
-       rdmsrl(MSR_IA32_POWER_CTL, power_ctl);
+       rdmsrq(MSR_IA32_POWER_CTL, power_ctl);
        enable = !!(power_ctl & BIT(MSR_IA32_POWER_CTL_BIT_EE));
        return sprintf(buf, "%d\n", !enable);
 }
@@ -1990,7 +1990,7 @@ static int atom_get_min_pstate(int not_used)
 {
        u64 value;
 
-       rdmsrl(MSR_ATOM_CORE_RATIOS, value);
+       rdmsrq(MSR_ATOM_CORE_RATIOS, value);
        return (value >> 8) & 0x7F;
 }
 
@@ -1998,7 +1998,7 @@ static int atom_get_max_pstate(int not_used)
 {
        u64 value;
 
-       rdmsrl(MSR_ATOM_CORE_RATIOS, value);
+       rdmsrq(MSR_ATOM_CORE_RATIOS, value);
        return (value >> 16) & 0x7F;
 }
 
@@ -2006,7 +2006,7 @@ static int atom_get_turbo_pstate(int not_used)
 {
        u64 value;
 
-       rdmsrl(MSR_ATOM_CORE_TURBO_RATIOS, value);
+       rdmsrq(MSR_ATOM_CORE_TURBO_RATIOS, value);
        return value & 0x7F;
 }
 
@@ -2041,7 +2041,7 @@ static int silvermont_get_scaling(void)
        static int silvermont_freq_table[] = {
                83300, 100000, 133300, 116700, 80000};
 
-       rdmsrl(MSR_FSB_FREQ, value);
+       rdmsrq(MSR_FSB_FREQ, value);
        i = value & 0x7;
        WARN_ON(i > 4);
 
@@ -2057,7 +2057,7 @@ static int airmont_get_scaling(void)
                83300, 100000, 133300, 116700, 80000,
                93300, 90000, 88900, 87500};
 
-       rdmsrl(MSR_FSB_FREQ, value);
+       rdmsrq(MSR_FSB_FREQ, value);
        i = value & 0xF;
        WARN_ON(i > 8);
 
@@ -2068,7 +2068,7 @@ static void atom_get_vid(struct cpudata *cpudata)
 {
        u64 value;
 
-       rdmsrl(MSR_ATOM_CORE_VIDS, value);
+       rdmsrq(MSR_ATOM_CORE_VIDS, value);
        cpudata->vid.min = int_tofp((value >> 8) & 0x7f);
        cpudata->vid.max = int_tofp((value >> 16) & 0x7f);
        cpudata->vid.ratio = div_fp(
@@ -2076,7 +2076,7 @@ static void atom_get_vid(struct cpudata *cpudata)
                int_tofp(cpudata->pstate.max_pstate -
                        cpudata->pstate.min_pstate));
 
-       rdmsrl(MSR_ATOM_CORE_TURBO_VIDS, value);
+       rdmsrq(MSR_ATOM_CORE_TURBO_VIDS, value);
        cpudata->vid.turbo = value & 0x7f;
 }
 
@@ -2425,8 +2425,8 @@ static inline bool intel_pstate_sample(struct cpudata *cpu, u64 time)
        u64 tsc;
 
        local_irq_save(flags);
-       rdmsrl(MSR_IA32_APERF, aperf);
-       rdmsrl(MSR_IA32_MPERF, mperf);
+       rdmsrq(MSR_IA32_APERF, aperf);
+       rdmsrq(MSR_IA32_MPERF, mperf);
        tsc = rdtsc();
        if (cpu->prev_mperf == mperf || cpu->prev_tsc == tsc) {
                local_irq_restore(flags);
@@ -3573,7 +3573,7 @@ static bool __init intel_pstate_platform_pwr_mgmt_exists(void)
 
        id = x86_match_cpu(intel_pstate_cpu_oob_ids);
        if (id) {
-               rdmsrl(MSR_MISC_PWR_MGMT, misc_pwr);
+               rdmsrq(MSR_MISC_PWR_MGMT, misc_pwr);
                if (misc_pwr & BITMASK_OOB) {
                        pr_debug("Bit 8 or 18 in the MISC_PWR_MGMT MSR set\n");
                        pr_debug("P states are controlled in Out of Band mode by the firmware/hardware\n");
@@ -3629,7 +3629,7 @@ static bool intel_pstate_hwp_is_enabled(void)
 {
        u64 value;
 
-       rdmsrl(MSR_PM_ENABLE, value);
+       rdmsrq(MSR_PM_ENABLE, value);
        return !!(value & 0x1);
 }
 
index 68ccd73c8129cc98dc5166d638ee6c90d2a65b3e..d3146f09fe8070a0bb49ecb869bae12e6e65a73d 100644 (file)
@@ -136,7 +136,7 @@ static void do_longhaul1(unsigned int mults_index)
 {
        union msr_bcr2 bcr2;
 
-       rdmsrl(MSR_VIA_BCR2, bcr2.val);
+       rdmsrq(MSR_VIA_BCR2, bcr2.val);
        /* Enable software clock multiplier */
        bcr2.bits.ESOFTBF = 1;
        bcr2.bits.CLOCKMUL = mults_index & 0xff;
@@ -151,7 +151,7 @@ static void do_longhaul1(unsigned int mults_index)
 
        /* Disable software clock multiplier */
        local_irq_disable();
-       rdmsrl(MSR_VIA_BCR2, bcr2.val);
+       rdmsrq(MSR_VIA_BCR2, bcr2.val);
        bcr2.bits.ESOFTBF = 0;
        wrmsrl(MSR_VIA_BCR2, bcr2.val);
 }
@@ -164,7 +164,7 @@ static void do_powersaver(int cx_address, unsigned int mults_index,
        union msr_longhaul longhaul;
        u32 t;
 
-       rdmsrl(MSR_VIA_LONGHAUL, longhaul.val);
+       rdmsrq(MSR_VIA_LONGHAUL, longhaul.val);
        /* Setup new frequency */
        if (!revid_errata)
                longhaul.bits.RevisionKey = longhaul.bits.RevisionID;
@@ -534,7 +534,7 @@ static void longhaul_setup_voltagescaling(void)
        unsigned int j, speed, pos, kHz_step, numvscales;
        int min_vid_speed;
 
-       rdmsrl(MSR_VIA_LONGHAUL, longhaul.val);
+       rdmsrq(MSR_VIA_LONGHAUL, longhaul.val);
        if (!(longhaul.bits.RevisionID & 1)) {
                pr_info("Voltage scaling not supported by CPU\n");
                return;
index fb2197dc170f496ec16abd075052deabeaaf06cc..dbbc27cb9ce2ff271d379900f270b14fb855918a 100644 (file)
@@ -219,7 +219,7 @@ static void change_FID(int fid)
 {
        union msr_fidvidctl fidvidctl;
 
-       rdmsrl(MSR_K7_FID_VID_CTL, fidvidctl.val);
+       rdmsrq(MSR_K7_FID_VID_CTL, fidvidctl.val);
        if (fidvidctl.bits.FID != fid) {
                fidvidctl.bits.SGTC = latency;
                fidvidctl.bits.FID = fid;
@@ -234,7 +234,7 @@ static void change_VID(int vid)
 {
        union msr_fidvidctl fidvidctl;
 
-       rdmsrl(MSR_K7_FID_VID_CTL, fidvidctl.val);
+       rdmsrq(MSR_K7_FID_VID_CTL, fidvidctl.val);
        if (fidvidctl.bits.VID != vid) {
                fidvidctl.bits.SGTC = latency;
                fidvidctl.bits.VID = vid;
@@ -260,7 +260,7 @@ static int powernow_target(struct cpufreq_policy *policy, unsigned int index)
        fid = powernow_table[index].driver_data & 0xFF;
        vid = (powernow_table[index].driver_data & 0xFF00) >> 8;
 
-       rdmsrl(MSR_K7_FID_VID_STATUS, fidvidstatus.val);
+       rdmsrq(MSR_K7_FID_VID_STATUS, fidvidstatus.val);
        cfid = fidvidstatus.bits.CFID;
        freqs.old = fsb * fid_codes[cfid] / 10;
 
@@ -557,7 +557,7 @@ static unsigned int powernow_get(unsigned int cpu)
 
        if (cpu)
                return 0;
-       rdmsrl(MSR_K7_FID_VID_STATUS, fidvidstatus.val);
+       rdmsrq(MSR_K7_FID_VID_STATUS, fidvidstatus.val);
        cfid = fidvidstatus.bits.CFID;
 
        return fsb * fid_codes[cfid] / 10;
@@ -598,7 +598,7 @@ static int powernow_cpu_init(struct cpufreq_policy *policy)
        if (policy->cpu != 0)
                return -ENODEV;
 
-       rdmsrl(MSR_K7_FID_VID_STATUS, fidvidstatus.val);
+       rdmsrq(MSR_K7_FID_VID_STATUS, fidvidstatus.val);
 
        recalibrate_cpu_khz();
 
index 90f0eb7cc5b9bd36754a600c110724188b5b44b9..db758aa900b055a49f01b0e7149aac090cc14c3b 100644 (file)
@@ -2942,13 +2942,13 @@ static void dct_read_mc_regs(struct amd64_pvt *pvt)
         * Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since
         * those are Read-As-Zero.
         */
-       rdmsrl(MSR_K8_TOP_MEM1, pvt->top_mem);
+       rdmsrq(MSR_K8_TOP_MEM1, pvt->top_mem);
        edac_dbg(0, "  TOP_MEM:  0x%016llx\n", pvt->top_mem);
 
        /* Check first whether TOP_MEM2 is enabled: */
-       rdmsrl(MSR_AMD64_SYSCFG, msr_val);
+       rdmsrq(MSR_AMD64_SYSCFG, msr_val);
        if (msr_val & BIT(21)) {
-               rdmsrl(MSR_K8_TOP_MEM2, pvt->top_mem2);
+               rdmsrq(MSR_K8_TOP_MEM2, pvt->top_mem2);
                edac_dbg(0, "  TOP_MEM2: 0x%016llx\n", pvt->top_mem2);
        } else {
                edac_dbg(0, "  TOP_MEM2 disabled\n");
index 976f5be54e36d011bce59a66e76c1a8e5ccea883..6e4b5b33ba8105c608c0a3634fa58c79b531f31a 100644 (file)
@@ -1928,35 +1928,35 @@ static void __init bxt_idle_state_table_update(void)
        unsigned long long msr;
        unsigned int usec;
 
-       rdmsrl(MSR_PKGC6_IRTL, msr);
+       rdmsrq(MSR_PKGC6_IRTL, msr);
        usec = irtl_2_usec(msr);
        if (usec) {
                bxt_cstates[2].exit_latency = usec;
                bxt_cstates[2].target_residency = usec;
        }
 
-       rdmsrl(MSR_PKGC7_IRTL, msr);
+       rdmsrq(MSR_PKGC7_IRTL, msr);
        usec = irtl_2_usec(msr);
        if (usec) {
                bxt_cstates[3].exit_latency = usec;
                bxt_cstates[3].target_residency = usec;
        }
 
-       rdmsrl(MSR_PKGC8_IRTL, msr);
+       rdmsrq(MSR_PKGC8_IRTL, msr);
        usec = irtl_2_usec(msr);
        if (usec) {
                bxt_cstates[4].exit_latency = usec;
                bxt_cstates[4].target_residency = usec;
        }
 
-       rdmsrl(MSR_PKGC9_IRTL, msr);
+       rdmsrq(MSR_PKGC9_IRTL, msr);
        usec = irtl_2_usec(msr);
        if (usec) {
                bxt_cstates[5].exit_latency = usec;
                bxt_cstates[5].target_residency = usec;
        }
 
-       rdmsrl(MSR_PKGC10_IRTL, msr);
+       rdmsrq(MSR_PKGC10_IRTL, msr);
        usec = irtl_2_usec(msr);
        if (usec) {
                bxt_cstates[6].exit_latency = usec;
@@ -1984,7 +1984,7 @@ static void __init sklh_idle_state_table_update(void)
        if ((mwait_substates & (0xF << 28)) == 0)
                return;
 
-       rdmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr);
+       rdmsrq(MSR_PKG_CST_CONFIG_CONTROL, msr);
 
        /* PC10 is not enabled in PKG C-state limit */
        if ((msr & 0xF) != 8)
@@ -1996,7 +1996,7 @@ static void __init sklh_idle_state_table_update(void)
        /* if SGX is present */
        if (ebx & (1 << 2)) {
 
-               rdmsrl(MSR_IA32_FEAT_CTL, msr);
+               rdmsrq(MSR_IA32_FEAT_CTL, msr);
 
                /* if SGX is enabled */
                if (msr & (1 << 18))
@@ -2015,7 +2015,7 @@ static void __init skx_idle_state_table_update(void)
 {
        unsigned long long msr;
 
-       rdmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr);
+       rdmsrq(MSR_PKG_CST_CONFIG_CONTROL, msr);
 
        /*
         * 000b: C0/C1 (no package C-state support)
@@ -2068,7 +2068,7 @@ static void __init spr_idle_state_table_update(void)
         * C6. However, if PC6 is disabled, we update the numbers to match
         * core C6.
         */
-       rdmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr);
+       rdmsrq(MSR_PKG_CST_CONFIG_CONTROL, msr);
 
        /* Limit value 2 and above allow for PC6. */
        if ((msr & 0x7) < 2) {
@@ -2241,7 +2241,7 @@ static void auto_demotion_disable(void)
 {
        unsigned long long msr_bits;
 
-       rdmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr_bits);
+       rdmsrq(MSR_PKG_CST_CONFIG_CONTROL, msr_bits);
        msr_bits &= ~auto_demotion_disable_flags;
        wrmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr_bits);
 }
@@ -2250,7 +2250,7 @@ static void c1e_promotion_enable(void)
 {
        unsigned long long msr_bits;
 
-       rdmsrl(MSR_IA32_POWER_CTL, msr_bits);
+       rdmsrq(MSR_IA32_POWER_CTL, msr_bits);
        msr_bits |= 0x2;
        wrmsrl(MSR_IA32_POWER_CTL, msr_bits);
 }
@@ -2259,7 +2259,7 @@ static void c1e_promotion_disable(void)
 {
        unsigned long long msr_bits;
 
-       rdmsrl(MSR_IA32_POWER_CTL, msr_bits);
+       rdmsrq(MSR_IA32_POWER_CTL, msr_bits);
        msr_bits &= ~0x2;
        wrmsrl(MSR_IA32_POWER_CTL, msr_bits);
 }
index 341318024a19313621ea27a574438f1b534b2261..ec95d787001bd5d823d8a0e342795656f899b0c4 100644 (file)
@@ -351,20 +351,20 @@ static int __init cs553x_init(void)
                return -ENXIO;
 
        /* If it doesn't have the CS553[56], abort */
-       rdmsrl(MSR_DIVIL_GLD_CAP, val);
+       rdmsrq(MSR_DIVIL_GLD_CAP, val);
        val &= ~0xFFULL;
        if (val != CAP_CS5535 && val != CAP_CS5536)
                return -ENXIO;
 
        /* If it doesn't have the NAND controller enabled, abort */
-       rdmsrl(MSR_DIVIL_BALL_OPTS, val);
+       rdmsrq(MSR_DIVIL_BALL_OPTS, val);
        if (val & PIN_OPT_IDE) {
                pr_info("CS553x NAND controller: Flash I/O not enabled in MSR_DIVIL_BALL_OPTS.\n");
                return -ENXIO;
        }
 
        for (i = 0; i < NR_CS553X_CONTROLLERS; i++) {
-               rdmsrl(MSR_DIVIL_LBAR_FLSH0 + i, val);
+               rdmsrq(MSR_DIVIL_LBAR_FLSH0 + i, val);
 
                if ((val & (FLSH_LBAR_EN|FLSH_NOR_NAND)) == (FLSH_LBAR_EN|FLSH_NOR_NAND))
                        err = cs553x_init_one(i, !!(val & FLSH_MEM_IO), val & 0xFFFFFFFF);
index de54bd1a5970bf7c078c4a2d55558c5c7887dd0e..d3e48669a01d5cd95f7d241d66c7b2db2e20ccc9 100644 (file)
@@ -128,7 +128,7 @@ static void copy_hashes_authenticate_chunks(struct work_struct *work)
        msrs = ifs_get_test_msrs(dev);
        /* run scan hash copy */
        wrmsrl(msrs->copy_hashes, ifs_hash_ptr);
-       rdmsrl(msrs->copy_hashes_status, hashes_status.data);
+       rdmsrq(msrs->copy_hashes_status, hashes_status.data);
 
        /* enumerate the scan image information */
        num_chunks = hashes_status.num_chunks;
@@ -150,7 +150,7 @@ static void copy_hashes_authenticate_chunks(struct work_struct *work)
                linear_addr |= i;
 
                wrmsrl(msrs->copy_chunks, linear_addr);
-               rdmsrl(msrs->copy_chunks_status, chunk_status.data);
+               rdmsrq(msrs->copy_chunks_status, chunk_status.data);
 
                ifsd->valid_chunks = chunk_status.valid_chunks;
                err_code = chunk_status.error_code;
@@ -196,7 +196,7 @@ static int copy_hashes_authenticate_chunks_gen2(struct device *dev)
 
        if (need_copy_scan_hashes(ifsd)) {
                wrmsrl(msrs->copy_hashes, ifs_hash_ptr);
-               rdmsrl(msrs->copy_hashes_status, hashes_status.data);
+               rdmsrq(msrs->copy_hashes_status, hashes_status.data);
 
                /* enumerate the scan image information */
                chunk_size = hashes_status.chunk_size * SZ_1K;
@@ -217,7 +217,7 @@ static int copy_hashes_authenticate_chunks_gen2(struct device *dev)
 
        if (ifsd->generation >= IFS_GEN_STRIDE_AWARE) {
                wrmsrl(msrs->test_ctrl, INVALIDATE_STRIDE);
-               rdmsrl(msrs->copy_chunks_status, chunk_status.data);
+               rdmsrq(msrs->copy_chunks_status, chunk_status.data);
                if (chunk_status.valid_chunks != 0) {
                        dev_err(dev, "Couldn't invalidate installed stride - %d\n",
                                chunk_status.valid_chunks);
@@ -240,7 +240,7 @@ static int copy_hashes_authenticate_chunks_gen2(struct device *dev)
                        local_irq_disable();
                        wrmsrl(msrs->copy_chunks, (u64)chunk_table);
                        local_irq_enable();
-                       rdmsrl(msrs->copy_chunks_status, chunk_status.data);
+                       rdmsrq(msrs->copy_chunks_status, chunk_status.data);
                        err_code = chunk_status.error_code;
                } while (err_code == AUTH_INTERRUPTED_ERROR && --retry_count);
 
index f978dd05d4d8b2ed117762b4cf1af6fdc9582f4e..8e669540a0a7ecae473b3482d9dcfa477ea6c57b 100644 (file)
@@ -210,7 +210,7 @@ static int doscan(void *data)
         * are processed in a single pass) before it retires.
         */
        wrmsrl(MSR_ACTIVATE_SCAN, params->activate->data);
-       rdmsrl(MSR_SCAN_STATUS, status.data);
+       rdmsrq(MSR_SCAN_STATUS, status.data);
 
        trace_ifs_status(ifsd->cur_batch, start, stop, status.data);
 
@@ -323,7 +323,7 @@ static int do_array_test(void *data)
        if (cpu == first) {
                wrmsrl(MSR_ARRAY_BIST, command->data);
                /* Pass back the result of the test */
-               rdmsrl(MSR_ARRAY_BIST, command->data);
+               rdmsrq(MSR_ARRAY_BIST, command->data);
        }
 
        return 0;
@@ -375,7 +375,7 @@ static int do_array_test_gen1(void *status)
 
        if (cpu == first) {
                wrmsrl(MSR_ARRAY_TRIGGER, ARRAY_GEN1_TEST_ALL_ARRAYS);
-               rdmsrl(MSR_ARRAY_STATUS, *((u64 *)status));
+               rdmsrq(MSR_ARRAY_STATUS, *((u64 *)status));
        }
 
        return 0;
@@ -527,7 +527,7 @@ static int dosbaf(void *data)
         * during the "execution" of the WRMSR.
         */
        wrmsrl(MSR_ACTIVATE_SBAF, run_params->activate->data);
-       rdmsrl(MSR_SBAF_STATUS, status.data);
+       rdmsrq(MSR_SBAF_STATUS, status.data);
        trace_ifs_sbaf(ifsd->cur_batch, *run_params->activate, status);
 
        /* Pass back the result of the test */
index 2c5af158bbe2e85008106089b091b14cf0068385..aa9aa1cc244a326541207b02513c382e1e827c17 100644 (file)
@@ -227,7 +227,7 @@ static void disable_c1_auto_demote(void *unused)
        int cpunum = smp_processor_id();
        u64 val;
 
-       rdmsrl(MSR_PKG_CST_CONFIG_CONTROL, val);
+       rdmsrq(MSR_PKG_CST_CONFIG_CONTROL, val);
        per_cpu(pkg_cst_config, cpunum) = val;
        val &= ~NHM_C1_AUTO_DEMOTE;
        wrmsrl(MSR_PKG_CST_CONFIG_CONTROL, val);
index c4b7af00352bcc790426de0faae834ddaec343b4..20624fcb96752d700217fa4f9d90876e21e143bd 100644 (file)
@@ -39,7 +39,7 @@ static int isst_if_send_mbox_cmd(u8 command, u8 sub_command, u32 parameter,
        /* Poll for rb bit == 0 */
        retries = OS_MAILBOX_RETRY_COUNT;
        do {
-               rdmsrl(MSR_OS_MAILBOX_INTERFACE, data);
+               rdmsrq(MSR_OS_MAILBOX_INTERFACE, data);
                if (data & BIT_ULL(MSR_OS_MAILBOX_BUSY_BIT)) {
                        ret = -EBUSY;
                        continue;
@@ -64,7 +64,7 @@ static int isst_if_send_mbox_cmd(u8 command, u8 sub_command, u32 parameter,
        /* Poll for rb bit == 0 */
        retries = OS_MAILBOX_RETRY_COUNT;
        do {
-               rdmsrl(MSR_OS_MAILBOX_INTERFACE, data);
+               rdmsrq(MSR_OS_MAILBOX_INTERFACE, data);
                if (data & BIT_ULL(MSR_OS_MAILBOX_BUSY_BIT)) {
                        ret = -EBUSY;
                        continue;
@@ -74,7 +74,7 @@ static int isst_if_send_mbox_cmd(u8 command, u8 sub_command, u32 parameter,
                        return -ENXIO;
 
                if (response_data) {
-                       rdmsrl(MSR_OS_MAILBOX_DATA, data);
+                       rdmsrq(MSR_OS_MAILBOX_DATA, data);
                        *response_data = data;
                }
                ret = 0;
index 9978cdd1985185d947179bd350aa48edaf03481c..0b8ef0cfaf80ec2e2e533ac8cf29c6906412b783 100644 (file)
@@ -556,7 +556,7 @@ static bool disable_dynamic_sst_features(void)
 {
        u64 value;
 
-       rdmsrl(MSR_PM_ENABLE, value);
+       rdmsrq(MSR_PM_ENABLE, value);
        return !(value & 0x1);
 }
 
index 5d717b1c23cf8ad21f9d3afc3129da27f66af10b..74b2a87000224558163897a7617f1795da79e5dc 100644 (file)
@@ -370,7 +370,7 @@ static void ips_cpu_raise(struct ips_driver *ips)
        if (!ips->cpu_turbo_enabled)
                return;
 
-       rdmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override);
+       rdmsrq(TURBO_POWER_CURRENT_LIMIT, turbo_override);
 
        cur_tdp_limit = turbo_override & TURBO_TDP_MASK;
        new_tdp_limit = cur_tdp_limit + 8; /* 1W increase */
@@ -405,7 +405,7 @@ static void ips_cpu_lower(struct ips_driver *ips)
        u64 turbo_override;
        u16 cur_limit, new_limit;
 
-       rdmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override);
+       rdmsrq(TURBO_POWER_CURRENT_LIMIT, turbo_override);
 
        cur_limit = turbo_override & TURBO_TDP_MASK;
        new_limit = cur_limit - 8; /* 1W decrease */
@@ -437,7 +437,7 @@ static void do_enable_cpu_turbo(void *data)
 {
        u64 perf_ctl;
 
-       rdmsrl(IA32_PERF_CTL, perf_ctl);
+       rdmsrq(IA32_PERF_CTL, perf_ctl);
        if (perf_ctl & IA32_PERF_TURBO_DIS) {
                perf_ctl &= ~IA32_PERF_TURBO_DIS;
                wrmsrl(IA32_PERF_CTL, perf_ctl);
@@ -475,7 +475,7 @@ static void do_disable_cpu_turbo(void *data)
 {
        u64 perf_ctl;
 
-       rdmsrl(IA32_PERF_CTL, perf_ctl);
+       rdmsrq(IA32_PERF_CTL, perf_ctl);
        if (!(perf_ctl & IA32_PERF_TURBO_DIS)) {
                perf_ctl |= IA32_PERF_TURBO_DIS;
                wrmsrl(IA32_PERF_CTL, perf_ctl);
@@ -1215,7 +1215,7 @@ static int cpu_clamp_show(struct seq_file *m, void *data)
        u64 turbo_override;
        int tdp, tdc;
 
-       rdmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override);
+       rdmsrq(TURBO_POWER_CURRENT_LIMIT, turbo_override);
 
        tdp = (int)(turbo_override & TURBO_TDP_MASK);
        tdc = (int)((turbo_override & TURBO_TDC_MASK) >> TURBO_TDC_SHIFT);
@@ -1290,7 +1290,7 @@ static struct ips_mcp_limits *ips_detect_cpu(struct ips_driver *ips)
                return NULL;
        }
 
-       rdmsrl(IA32_MISC_ENABLE, misc_en);
+       rdmsrq(IA32_MISC_ENABLE, misc_en);
        /*
         * If the turbo enable bit isn't set, we shouldn't try to enable/disable
         * turbo manually or we'll get an illegal MSR access, even though
@@ -1312,7 +1312,7 @@ static struct ips_mcp_limits *ips_detect_cpu(struct ips_driver *ips)
                return NULL;
        }
 
-       rdmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_power);
+       rdmsrq(TURBO_POWER_CURRENT_LIMIT, turbo_power);
        tdp = turbo_power & TURBO_TDP_MASK;
 
        /* Sanity check TDP against CPU */
@@ -1496,7 +1496,7 @@ static int ips_probe(struct pci_dev *dev, const struct pci_device_id *id)
         * Check PLATFORM_INFO MSR to make sure this chip is
         * turbo capable.
         */
-       rdmsrl(PLATFORM_INFO, platform_info);
+       rdmsrq(PLATFORM_INFO, platform_info);
        if (!(platform_info & PLATFORM_TDP)) {
                dev_err(&dev->dev, "platform indicates TDP override unavailable, aborting\n");
                return -ENODEV;
@@ -1529,7 +1529,7 @@ static int ips_probe(struct pci_dev *dev, const struct pci_device_id *id)
        ips->mgta_val = thm_readw(THM_MGTA);
 
        /* Save turbo limits & ratios */
-       rdmsrl(TURBO_POWER_CURRENT_LIMIT, ips->orig_turbo_limit);
+       rdmsrq(TURBO_POWER_CURRENT_LIMIT, ips->orig_turbo_limit);
 
        ips_disable_cpu_turbo(ips);
        ips->cpu_turbo_enabled = false;
@@ -1596,7 +1596,7 @@ static void ips_remove(struct pci_dev *dev)
        if (ips->gpu_turbo_disable)
                symbol_put(i915_gpu_turbo_disable);
 
-       rdmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override);
+       rdmsrq(TURBO_POWER_CURRENT_LIMIT, turbo_override);
        turbo_override &= ~(TURBO_TDC_OVR_EN | TURBO_TDP_OVR_EN);
        wrmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override);
        wrmsrl(TURBO_POWER_CURRENT_LIMIT, ips->orig_turbo_limit);
index 5b18a46a10b06df0eb776247d706bcc1d82071ef..2aaac15748a5f4525bdded909c20e60b57cc263f 100644 (file)
@@ -284,7 +284,7 @@ void intel_hfi_process_event(__u64 pkg_therm_status_msr_val)
        if (!raw_spin_trylock(&hfi_instance->event_lock))
                return;
 
-       rdmsrl(MSR_IA32_PACKAGE_THERM_STATUS, msr);
+       rdmsrq(MSR_IA32_PACKAGE_THERM_STATUS, msr);
        hfi = msr & PACKAGE_THERM_STATUS_HFI_UPDATED;
        if (!hfi) {
                raw_spin_unlock(&hfi_instance->event_lock);
@@ -356,7 +356,7 @@ static void hfi_enable(void)
 {
        u64 msr_val;
 
-       rdmsrl(MSR_IA32_HW_FEEDBACK_CONFIG, msr_val);
+       rdmsrq(MSR_IA32_HW_FEEDBACK_CONFIG, msr_val);
        msr_val |= HW_FEEDBACK_CONFIG_HFI_ENABLE_BIT;
        wrmsrl(MSR_IA32_HW_FEEDBACK_CONFIG, msr_val);
 }
@@ -377,7 +377,7 @@ static void hfi_disable(void)
        u64 msr_val;
        int i;
 
-       rdmsrl(MSR_IA32_HW_FEEDBACK_CONFIG, msr_val);
+       rdmsrq(MSR_IA32_HW_FEEDBACK_CONFIG, msr_val);
        msr_val &= ~HW_FEEDBACK_CONFIG_HFI_ENABLE_BIT;
        wrmsrl(MSR_IA32_HW_FEEDBACK_CONFIG, msr_val);
 
@@ -388,7 +388,7 @@ static void hfi_disable(void)
         * memory.
         */
        for (i = 0; i < 2000; i++) {
-               rdmsrl(MSR_IA32_PACKAGE_THERM_STATUS, msr_val);
+               rdmsrq(MSR_IA32_PACKAGE_THERM_STATUS, msr_val);
                if (msr_val & PACKAGE_THERM_STATUS_HFI_UPDATED)
                        break;
 
index e69868e868eb9e9abd0a2e826dbf75913605b4bc..f296f35ef7d4dc3becea9e860119d521523b3415 100644 (file)
@@ -287,7 +287,7 @@ static void get_therm_status(int level, bool *proc_hot, u8 *temp)
        else
                msr = MSR_IA32_PACKAGE_THERM_STATUS;
 
-       rdmsrl(msr, msr_val);
+       rdmsrq(msr, msr_val);
        if (msr_val & THERM_STATUS_PROCHOT_LOG)
                *proc_hot = true;
        else
@@ -654,7 +654,7 @@ void intel_thermal_interrupt(void)
        if (static_cpu_has(X86_FEATURE_HWP))
                notify_hwp_interrupt();
 
-       rdmsrl(MSR_IA32_THERM_STATUS, msr_val);
+       rdmsrq(MSR_IA32_THERM_STATUS, msr_val);
 
        /* Check for violation of core thermal thresholds*/
        notify_thresholds(msr_val);
@@ -669,7 +669,7 @@ void intel_thermal_interrupt(void)
                                        CORE_LEVEL);
 
        if (this_cpu_has(X86_FEATURE_PTS)) {
-               rdmsrl(MSR_IA32_PACKAGE_THERM_STATUS, msr_val);
+               rdmsrq(MSR_IA32_PACKAGE_THERM_STATUS, msr_val);
                /* check violations of package thermal thresholds */
                notify_package_thresholds(msr_val);
                therm_throt_process(msr_val & PACKAGE_THERM_STATUS_PROCHOT,
index af996634c1a9c0f43377b9d9cb1320313e700827..2b27d6540805e75554c424316f768e429e1d6e1f 100644 (file)
@@ -377,7 +377,7 @@ static int gxfb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 
        /* Figure out if this is a TFT or CRT part */
 
-       rdmsrl(MSR_GX_GLD_MSR_CONFIG, val);
+       rdmsrq(MSR_GX_GLD_MSR_CONFIG, val);
 
        if ((val & MSR_GX_GLD_MSR_CONFIG_FP) == MSR_GX_GLD_MSR_CONFIG_FP)
                par->enable_crt = 0;
index 32baaf59fcf78ad096d5fc4801e05a2d4d77dad6..5402c000ce8e2f4472f1cd455bf2f87f192f055b 100644 (file)
@@ -358,7 +358,7 @@ void lx_set_mode(struct fb_info *info)
 
        /* Set output mode */
 
-       rdmsrl(MSR_LX_GLD_MSR_CONFIG, msrval);
+       rdmsrq(MSR_LX_GLD_MSR_CONFIG, msrval);
        msrval &= ~MSR_LX_GLD_MSR_CONFIG_FMT;
 
        if (par->output & OUTPUT_PANEL) {
@@ -419,7 +419,7 @@ void lx_set_mode(struct fb_info *info)
 
        /* Set default watermark values */
 
-       rdmsrl(MSR_LX_SPARE_MSR, msrval);
+       rdmsrq(MSR_LX_SPARE_MSR, msrval);
 
        msrval &= ~(MSR_LX_SPARE_MSR_DIS_CFIFO_HGO
                        | MSR_LX_SPARE_MSR_VFIFO_ARB_SEL
@@ -591,10 +591,10 @@ static void lx_save_regs(struct lxfb_par *par)
        } while ((i & GP_BLT_STATUS_PB) || !(i & GP_BLT_STATUS_CE));
 
        /* save MSRs */
-       rdmsrl(MSR_LX_MSR_PADSEL, par->msr.padsel);
-       rdmsrl(MSR_GLCP_DOTPLL, par->msr.dotpll);
-       rdmsrl(MSR_LX_GLD_MSR_CONFIG, par->msr.dfglcfg);
-       rdmsrl(MSR_LX_SPARE_MSR, par->msr.dcspare);
+       rdmsrq(MSR_LX_MSR_PADSEL, par->msr.padsel);
+       rdmsrq(MSR_GLCP_DOTPLL, par->msr.dotpll);
+       rdmsrq(MSR_LX_GLD_MSR_CONFIG, par->msr.dfglcfg);
+       rdmsrq(MSR_LX_SPARE_MSR, par->msr.dcspare);
 
        write_dc(par, DC_UNLOCK, DC_UNLOCK_UNLOCK);
 
index 8c49d4e98772253b6602ea295b8804369251c147..ed4997724063a6e555fa090b55cd99c2fc62a333 100644 (file)
@@ -21,8 +21,8 @@ static void gx_save_regs(struct gxfb_par *par)
        } while (i & (GP_BLT_STATUS_BLT_PENDING | GP_BLT_STATUS_BLT_BUSY));
 
        /* save MSRs */
-       rdmsrl(MSR_GX_MSR_PADSEL, par->msr.padsel);
-       rdmsrl(MSR_GLCP_DOTPLL, par->msr.dotpll);
+       rdmsrq(MSR_GX_MSR_PADSEL, par->msr.padsel);
+       rdmsrq(MSR_GLCP_DOTPLL, par->msr.dotpll);
 
        write_dc(par, DC_UNLOCK, DC_UNLOCK_UNLOCK);
 
@@ -43,14 +43,14 @@ static void gx_set_dotpll(uint32_t dotpll_hi)
        uint32_t dotpll_lo;
        int i;
 
-       rdmsrl(MSR_GLCP_DOTPLL, dotpll_lo);
+       rdmsrq(MSR_GLCP_DOTPLL, dotpll_lo);
        dotpll_lo |= MSR_GLCP_DOTPLL_DOTRESET;
        dotpll_lo &= ~MSR_GLCP_DOTPLL_BYPASS;
        wrmsr(MSR_GLCP_DOTPLL, dotpll_lo, dotpll_hi);
 
        /* wait for the PLL to lock */
        for (i = 0; i < 200; i++) {
-               rdmsrl(MSR_GLCP_DOTPLL, dotpll_lo);
+               rdmsrq(MSR_GLCP_DOTPLL, dotpll_lo);
                if (dotpll_lo & MSR_GLCP_DOTPLL_LOCK)
                        break;
                udelay(1);
index 91dac2aa247cc0f77fbf8276f602ea08440c6c98..9bee01751f47ffc0dd1a36238cfd52ce16bce235 100644 (file)
@@ -142,8 +142,8 @@ void gx_set_dclk_frequency(struct fb_info *info)
                }
        }
 
-       rdmsrl(MSR_GLCP_SYS_RSTPLL, sys_rstpll);
-       rdmsrl(MSR_GLCP_DOTPLL, dotpll);
+       rdmsrq(MSR_GLCP_SYS_RSTPLL, sys_rstpll);
+       rdmsrq(MSR_GLCP_DOTPLL, dotpll);
 
        /* Program new M, N and P. */
        dotpll &= 0x00000000ffffffffull;
@@ -167,7 +167,7 @@ void gx_set_dclk_frequency(struct fb_info *info)
 
        /* Wait for LOCK bit. */
        do {
-               rdmsrl(MSR_GLCP_DOTPLL, dotpll);
+               rdmsrq(MSR_GLCP_DOTPLL, dotpll);
        } while (timeout-- && !(dotpll & MSR_GLCP_DOTPLL_LOCK));
 }
 
@@ -180,7 +180,7 @@ gx_configure_tft(struct fb_info *info)
 
        /* Set up the DF pad select MSR */
 
-       rdmsrl(MSR_GX_MSR_PADSEL, val);
+       rdmsrq(MSR_GX_MSR_PADSEL, val);
        val &= ~MSR_GX_MSR_PADSEL_MASK;
        val |= MSR_GX_MSR_PADSEL_TFT;
        wrmsrl(MSR_GX_MSR_PADSEL, val);
index abf0bd76e3703a1e00768bde621ee522cd4721cb..53699750db86d5dd420244f7aead4f3b64c43cd5 100644 (file)
@@ -1013,7 +1013,7 @@ enum hv_register_name {
 
 /*
  * To support arch-generic code calling hv_set/get_register:
- * - On x86, HV_MSR_ indicates an MSR accessed via rdmsrl/wrmsrl
+ * - On x86, HV_MSR_ indicates an MSR accessed via rdmsrq/wrmsrl
  * - On ARM, HV_MSR_ indicates a VP register accessed via hypercall
  */
 #define HV_MSR_CRASH_P0                (HV_X64_MSR_CRASH_P0)