x86/msr: Convert __rdmsr() uses to native_rdmsrq() uses
authorXin Li (Intel) <xin@zytor.com>
Sun, 27 Apr 2025 09:20:21 +0000 (02:20 -0700)
committerIngo Molnar <mingo@kernel.org>
Fri, 2 May 2025 08:36:35 +0000 (10:36 +0200)
__rdmsr() is the lowest level MSR write API, with native_rdmsr()
and native_rdmsrq() serving as higher-level wrappers around it.

  #define native_rdmsr(msr, val1, val2)                   \
  do {                                                    \
          u64 __val = __rdmsr((msr));                     \
          (void)((val1) = (u32)__val);                    \
          (void)((val2) = (u32)(__val >> 32));            \
  } while (0)

  static __always_inline u64 native_rdmsrq(u32 msr)
  {
          return __rdmsr(msr);
  }

However, __rdmsr() continues to be utilized in various locations.

MSR APIs are designed for different scenarios, such as native or
pvops, with or without trace, and safe or non-safe.  Unfortunately,
the current MSR API names do not adequately reflect these factors,
making it challenging to select the most appropriate API for
various situations.

To pave the way for improving MSR API names, convert __rdmsr()
uses to native_rdmsrq() to ensure consistent usage.  Later, these
APIs can be renamed to better reflect their implications, such as
native or pvops, with or without trace, and safe or non-safe.

No functional change intended.

Signed-off-by: Xin Li (Intel) <xin@zytor.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: David Woodhouse <dwmw2@infradead.org>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Juergen Gross <jgross@suse.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Sean Christopherson <seanjc@google.com>
Cc: Stefano Stabellini <sstabellini@kernel.org>
Cc: Uros Bizjak <ubizjak@gmail.com>
Cc: Vitaly Kuznetsov <vkuznets@redhat.com>
Link: https://lore.kernel.org/r/20250427092027.1598740-10-xin@zytor.com
arch/x86/coco/sev/core.c
arch/x86/events/amd/brs.c
arch/x86/hyperv/hv_vtl.c
arch/x86/hyperv/ivm.c
arch/x86/include/asm/mshyperv.h
arch/x86/kernel/cpu/common.c
arch/x86/kernel/cpu/mce/core.c
arch/x86/kernel/cpu/resctrl/pseudo_lock.c
arch/x86/kvm/vmx/vmx.c
arch/x86/mm/mem_encrypt_identity.c

index 85b16a0ee4176ed08ef6eaeb7c0d65aaf21b0291..ff82151f77188b792c884c2f7c60b27321bc03b9 100644 (file)
@@ -277,7 +277,7 @@ static noinstr struct ghcb *__sev_get_ghcb(struct ghcb_state *state)
 
 static inline u64 sev_es_rd_ghcb_msr(void)
 {
-       return __rdmsr(MSR_AMD64_SEV_ES_GHCB);
+       return native_rdmsrq(MSR_AMD64_SEV_ES_GHCB);
 }
 
 static __always_inline void sev_es_wr_ghcb_msr(u64 val)
index 3f5ecfd80d1ec30caa503c9b9168f351b5fd1f13..06f35a6b58a5b228996df99ba3bb62a2bc28cf82 100644 (file)
@@ -49,7 +49,7 @@ static __always_inline void set_debug_extn_cfg(u64 val)
 
 static __always_inline u64 get_debug_extn_cfg(void)
 {
-       return __rdmsr(MSR_AMD_DBG_EXTN_CFG);
+       return native_rdmsrq(MSR_AMD_DBG_EXTN_CFG);
 }
 
 static bool __init amd_brs_detect(void)
index 079b276e5f300cb788667963b4720ebfab56ce3c..4580936dcb03de0087a68cf2add5e7a79ecc4daf 100644 (file)
@@ -150,11 +150,11 @@ static int hv_vtl_bringup_vcpu(u32 target_vp_index, int cpu, u64 eip_ignored)
        input->vp_context.rip = rip;
        input->vp_context.rsp = rsp;
        input->vp_context.rflags = 0x0000000000000002;
-       input->vp_context.efer = __rdmsr(MSR_EFER);
+       input->vp_context.efer = native_rdmsrq(MSR_EFER);
        input->vp_context.cr0 = native_read_cr0();
        input->vp_context.cr3 = __native_read_cr3();
        input->vp_context.cr4 = native_read_cr4();
-       input->vp_context.msr_cr_pat = __rdmsr(MSR_IA32_CR_PAT);
+       input->vp_context.msr_cr_pat = native_rdmsrq(MSR_IA32_CR_PAT);
        input->vp_context.idtr.limit = idt_ptr.size;
        input->vp_context.idtr.base = idt_ptr.address;
        input->vp_context.gdtr.limit = gdt_ptr.size;
index 8209de792388058558daebc34e9aa4bd10ee9250..09a165a3c41e98db7e922cada68e4f24c15da26d 100644 (file)
@@ -111,7 +111,7 @@ u64 hv_ghcb_hypercall(u64 control, void *input, void *output, u32 input_size)
 
 static inline u64 rd_ghcb_msr(void)
 {
-       return __rdmsr(MSR_AMD64_SEV_ES_GHCB);
+       return native_rdmsrq(MSR_AMD64_SEV_ES_GHCB);
 }
 
 static inline void wr_ghcb_msr(u64 val)
index 15d00dace70fa8a90e86bde45ad984d8f90e910d..778444310cfbfe631771aea701f14954b05e1209 100644 (file)
@@ -305,7 +305,7 @@ void hv_set_non_nested_msr(unsigned int reg, u64 value);
 
 static __always_inline u64 hv_raw_get_msr(unsigned int reg)
 {
-       return __rdmsr(reg);
+       return native_rdmsrq(reg);
 }
 
 #else /* CONFIG_HYPERV */
index 079ded4eeb86fba893f5b51bf123946492112af0..cefc99990bde2207d1d7db15ed8d195f2500295c 100644 (file)
@@ -164,7 +164,7 @@ static void ppin_init(struct cpuinfo_x86 *c)
 
        /* Is the enable bit set? */
        if (val & 2UL) {
-               c->ppin = __rdmsr(info->msr_ppin);
+               c->ppin = native_rdmsrq(info->msr_ppin);
                set_cpu_cap(c, info->feature);
                return;
        }
index 96db2fd03e665966958a33b515ac7f5f19d007d7..e9b3c5d4a52ed32165547e54db378d6683d119ca 100644 (file)
@@ -121,7 +121,7 @@ void mce_prep_record_common(struct mce *m)
 {
        m->cpuid        = cpuid_eax(1);
        m->cpuvendor    = boot_cpu_data.x86_vendor;
-       m->mcgcap       = __rdmsr(MSR_IA32_MCG_CAP);
+       m->mcgcap       = native_rdmsrq(MSR_IA32_MCG_CAP);
        /* need the internal __ version to avoid deadlocks */
        m->time         = __ktime_get_real_seconds();
 }
@@ -1298,7 +1298,7 @@ static noinstr bool mce_check_crashing_cpu(void)
            (crashing_cpu != -1 && crashing_cpu != cpu)) {
                u64 mcgstatus;
 
-               mcgstatus = __rdmsr(MSR_IA32_MCG_STATUS);
+               mcgstatus = native_rdmsrq(MSR_IA32_MCG_STATUS);
 
                if (boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN) {
                        if (mcgstatus & MCG_STATUS_LMCES)
index 6e5edd76086e5e19b4314d99d5ef46ba30d55d7d..324bd4919300cc606b149f35c253acbf8e5657c9 100644 (file)
@@ -482,7 +482,7 @@ int resctrl_arch_pseudo_lock_fn(void *_plr)
         * the buffer and evict pseudo-locked memory read earlier from the
         * cache.
         */
-       saved_msr = __rdmsr(MSR_MISC_FEATURE_CONTROL);
+       saved_msr = native_rdmsrq(MSR_MISC_FEATURE_CONTROL);
        native_wrmsrq(MSR_MISC_FEATURE_CONTROL, prefetch_disable_bits);
        closid_p = this_cpu_read(pqr_state.cur_closid);
        rmid_p = this_cpu_read(pqr_state.cur_rmid);
index d8412cfdb18e8befd8892cd80144343fec129679..63de5f6051e5d1c54b6883b04d25e2796a726e8f 100644 (file)
@@ -381,7 +381,7 @@ static __always_inline void vmx_disable_fb_clear(struct vcpu_vmx *vmx)
        if (!vmx->disable_fb_clear)
                return;
 
-       msr = __rdmsr(MSR_IA32_MCU_OPT_CTRL);
+       msr = native_rdmsrq(MSR_IA32_MCU_OPT_CTRL);
        msr |= FB_CLEAR_DIS;
        native_wrmsrq(MSR_IA32_MCU_OPT_CTRL, msr);
        /* Cache the MSR value to avoid reading it later */
@@ -7308,7 +7308,7 @@ void noinstr vmx_spec_ctrl_restore_host(struct vcpu_vmx *vmx,
                return;
 
        if (flags & VMX_RUN_SAVE_SPEC_CTRL)
-               vmx->spec_ctrl = __rdmsr(MSR_IA32_SPEC_CTRL);
+               vmx->spec_ctrl = native_rdmsrq(MSR_IA32_SPEC_CTRL);
 
        /*
         * If the guest/host SPEC_CTRL values differ, restore the host value.
index 5eecdd92da10500c867bc35957ce9b5ac16b144d..25f6677e85751fca94031b240c73669c390d014e 100644 (file)
@@ -526,7 +526,7 @@ void __head sme_enable(struct boot_params *bp)
        me_mask = 1UL << (ebx & 0x3f);
 
        /* Check the SEV MSR whether SEV or SME is enabled */
-       RIP_REL_REF(sev_status) = msr = __rdmsr(MSR_AMD64_SEV);
+       RIP_REL_REF(sev_status) = msr = native_rdmsrq(MSR_AMD64_SEV);
        feature_mask = (msr & MSR_AMD64_SEV_ENABLED) ? AMD_SEV_BIT : AMD_SME_BIT;
 
        /*
@@ -557,7 +557,7 @@ void __head sme_enable(struct boot_params *bp)
                        return;
 
                /* For SME, check the SYSCFG MSR */
-               msr = __rdmsr(MSR_AMD64_SYSCFG);
+               msr = native_rdmsrq(MSR_AMD64_SYSCFG);
                if (!(msr & MSR_AMD64_SYSCFG_MEM_ENCRYPT))
                        return;
        }