hpa found that pmu_msr_write() is actually a completely pointless
function:
https://lore.kernel.org/lkml/
0ec48b84-d158-47c6-b14c-
3563fd14bcc4@zytor.com/
all it does is shuffle some arguments, then calls pmu_msr_chk_emulated()
and if it returns true AND the emulated flag is clear then does
*exactly the same thing* that the calling code would have done if
pmu_msr_write() itself had returned true.
And pmu_msr_read() does the equivalent stupidity.
Remove the calls to native_{read,write}_msr{,_safe}() within
pmu_msr_{read,write}(). Instead reuse the existing calling code
that decides whether to call native_{read,write}_msr{,_safe}() based
on the return value from pmu_msr_{read,write}(). Consequently,
eliminate the need to pass an error pointer to pmu_msr_{read,write}().
While at it, refactor pmu_msr_write() to take the MSR value as a u64
argument, replacing the current dual u32 arguments, because the dual
u32 arguments were only used to call native_write_msr{,_safe}(), which
has now been removed.
Suggested-by: H. Peter Anvin (Intel) <hpa@zytor.com>
Signed-off-by: Xin Li (Intel) <xin@zytor.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Reviewed-by: Juergen Gross <jgross@suse.com>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: David Woodhouse <dwmw2@infradead.org>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Sean Christopherson <seanjc@google.com>
Cc: Stefano Stabellini <sstabellini@kernel.org>
Cc: Uros Bizjak <ubizjak@gmail.com>
Cc: Vitaly Kuznetsov <vkuznets@redhat.com>
Link: https://lore.kernel.org/r/20250427092027.1598740-11-xin@zytor.com
{
u64 val = 0; /* Avoid uninitialized value for safe variant. */
- if (pmu_msr_read(msr, &val, err))
+ if (pmu_msr_read_emulated(msr, &val))
return val;
if (err)
static void xen_do_write_msr(unsigned int msr, unsigned int low,
unsigned int high, int *err)
{
+ u64 val;
+
switch (msr) {
case MSR_FS_BASE:
set_seg(SEGBASE_FS, low, high, err);
break;
default:
- if (!pmu_msr_write(msr, low, high, err)) {
+ val = (u64)high << 32 | low;
+
+ if (!pmu_msr_write_emulated(msr, val)) {
if (err)
*err = native_write_msr_safe(msr, low, high);
else
return true;
}
-bool pmu_msr_read(unsigned int msr, uint64_t *val, int *err)
+bool pmu_msr_read_emulated(u32 msr, u64 *val)
{
bool emulated;
- if (!pmu_msr_chk_emulated(msr, val, true, &emulated))
- return false;
-
- if (!emulated) {
- *val = err ? native_read_msr_safe(msr, err)
- : native_read_msr(msr);
- }
-
- return true;
+ return pmu_msr_chk_emulated(msr, val, true, &emulated) && emulated;
}
-bool pmu_msr_write(unsigned int msr, uint32_t low, uint32_t high, int *err)
+bool pmu_msr_write_emulated(u32 msr, u64 val)
{
- uint64_t val = ((uint64_t)high << 32) | low;
bool emulated;
- if (!pmu_msr_chk_emulated(msr, &val, false, &emulated))
- return false;
-
- if (!emulated) {
- if (err)
- *err = native_write_msr_safe(msr, low, high);
- else
- native_write_msr(msr, low, high);
- }
-
- return true;
+ return pmu_msr_chk_emulated(msr, &val, false, &emulated) && emulated;
}
static u64 xen_amd_read_pmc(int counter)
static inline void xen_pmu_init(int cpu) {}
static inline void xen_pmu_finish(int cpu) {}
#endif
-bool pmu_msr_read(unsigned int msr, uint64_t *val, int *err);
-bool pmu_msr_write(unsigned int msr, uint32_t low, uint32_t high, int *err);
+bool pmu_msr_read_emulated(u32 msr, u64 *val);
+bool pmu_msr_write_emulated(u32 msr, u64 val);
int pmu_apic_update(uint32_t reg);
u64 xen_read_pmc(int counter);