x86/paravirt: Add paravirt_{read,write}_msr()
authorAndy Lutomirski <luto@kernel.org>
Sat, 2 Apr 2016 14:01:38 +0000 (07:01 -0700)
committerIngo Molnar <mingo@kernel.org>
Wed, 13 Apr 2016 09:37:46 +0000 (11:37 +0200)
This adds paravirt callbacks for unsafe MSR access.  On native, they
call native_{read,write}_msr().  On Xen, they use xen_{read,write}_msr_safe().

Nothing uses them yet for ease of bisection.  The next patch will
use them in rdmsrl(), wrmsrl(), etc.

I intentionally didn't make them warn on #GP on Xen.  I think that
should be done separately by the Xen maintainers.

Tested-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Signed-off-by: Andy Lutomirski <luto@kernel.org>
Acked-by: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Arjan van de Ven <arjan@linux.intel.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: KVM list <kvm@vger.kernel.org>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: xen-devel <Xen-devel@lists.xen.org>
Link: http://lkml.kernel.org/r/880eebc5dcd2ad9f310d41345f82061ea500e9fa.1459605520.git.luto@kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
arch/x86/include/asm/msr.h
arch/x86/include/asm/paravirt.h
arch/x86/include/asm/paravirt_types.h
arch/x86/kernel/paravirt.c
arch/x86/xen/enlighten.c

index 25f169c6eb953021b46707a596be071d54f4e6be..00050c034a13825bfe774204061a1def235f5755 100644 (file)
@@ -111,8 +111,9 @@ static inline unsigned long long native_read_msr_safe(unsigned int msr,
        return EAX_EDX_VAL(val, low, high);
 }
 
-static inline void native_write_msr(unsigned int msr,
-                                   unsigned low, unsigned high)
+/* Can be uninlined because referenced by paravirt */
+notrace static inline void native_write_msr(unsigned int msr,
+                                           unsigned low, unsigned high)
 {
        asm volatile("1: wrmsr\n"
                     "2:\n"
index 81ef2d5c2a24878d288c034b052f4785ef1b287c..97839fa8b8aae7f18b8f530a797c39e05fd59993 100644 (file)
@@ -130,6 +130,17 @@ static inline void wbinvd(void)
 
 #define get_kernel_rpl()  (pv_info.kernel_rpl)
 
+static inline u64 paravirt_read_msr(unsigned msr)
+{
+       return PVOP_CALL1(u64, pv_cpu_ops.read_msr, msr);
+}
+
+static inline void paravirt_write_msr(unsigned msr,
+                                     unsigned low, unsigned high)
+{
+       return PVOP_VCALL3(pv_cpu_ops.write_msr, msr, low, high);
+}
+
 static inline u64 paravirt_read_msr_safe(unsigned msr, int *err)
 {
        return PVOP_CALL2(u64, pv_cpu_ops.read_msr_safe, msr, err);
index 09c9e1dd81ce9c59a1419c332c46be5426ad8f88..b4a23eafa1b95e2e0d63f42a54c8725732e99df8 100644 (file)
@@ -155,8 +155,14 @@ struct pv_cpu_ops {
        void (*cpuid)(unsigned int *eax, unsigned int *ebx,
                      unsigned int *ecx, unsigned int *edx);
 
-       /* MSR operations.
-          err = 0/-EIO.  wrmsr returns 0/-EIO. */
+       /* Unsafe MSR operations.  These will warn or panic on failure. */
+       u64 (*read_msr)(unsigned int msr);
+       void (*write_msr)(unsigned int msr, unsigned low, unsigned high);
+
+       /*
+        * Safe MSR operations.
+        * read sets err to 0 or -EIO.  write returns 0 or -EIO.
+        */
        u64 (*read_msr_safe)(unsigned int msr, int *err);
        int (*write_msr_safe)(unsigned int msr, unsigned low, unsigned high);
 
index 8aad95478ae54e4223118e7e86d2d266216a1022..f9583917c7c4f440456074efff1179e54b38e558 100644 (file)
@@ -339,6 +339,8 @@ __visible struct pv_cpu_ops pv_cpu_ops = {
        .write_cr8 = native_write_cr8,
 #endif
        .wbinvd = native_wbinvd,
+       .read_msr = native_read_msr,
+       .write_msr = native_write_msr,
        .read_msr_safe = native_read_msr_safe,
        .write_msr_safe = native_write_msr_safe,
        .read_pmc = native_read_pmc,
index 13f756fdcb3341d8022e4cbadfa9a38014af6ee2..6ab672233ac9861d35d5ee53aebcf84286f46f0c 100644 (file)
@@ -1092,6 +1092,26 @@ static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high)
        return ret;
 }
 
+static u64 xen_read_msr(unsigned int msr)
+{
+       /*
+        * This will silently swallow a #GP from RDMSR.  It may be worth
+        * changing that.
+        */
+       int err;
+
+       return xen_read_msr_safe(msr, &err);
+}
+
+static void xen_write_msr(unsigned int msr, unsigned low, unsigned high)
+{
+       /*
+        * This will silently swallow a #GP from WRMSR.  It may be worth
+        * changing that.
+        */
+       xen_write_msr_safe(msr, low, high);
+}
+
 void xen_setup_shared_info(void)
 {
        if (!xen_feature(XENFEAT_auto_translated_physmap)) {
@@ -1222,6 +1242,9 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst = {
 
        .wbinvd = native_wbinvd,
 
+       .read_msr = xen_read_msr,
+       .write_msr = xen_write_msr,
+
        .read_msr_safe = xen_read_msr_safe,
        .write_msr_safe = xen_write_msr_safe,