KVM: arm64: Rewrite system register accessors to read/write functions
authorChristoffer Dall <cdall@cs.columbia.edu>
Wed, 16 Mar 2016 14:38:53 +0000 (15:38 +0100)
committerMarc Zyngier <marc.zyngier@arm.com>
Mon, 19 Mar 2018 10:53:16 +0000 (10:53 +0000)
Currently we access the system registers array via the vcpu_sys_reg()
macro.  However, we are about to change the behavior to some times
modify the register file directly, so let's change this to two
primitives:

 * Accessor macros vcpu_write_sys_reg() and vcpu_read_sys_reg()
 * Direct array access macro __vcpu_sys_reg()

The accessor macros should be used in places where the code needs to
access the currently loaded VCPU's state as observed by the guest.  For
example, when trapping on cache related registers, a write to a system
register should go directly to the VCPU version of the register.

The direct array access macro can be used in places where the VCPU is
known to never be running (for example userspace access) or for
registers which are never context switched (for example all the PMU
system registers).

This rewrites all users of vcpu_sys_regs to one of the macros described
above.

No functional change.

Acked-by: Marc Zyngier <marc.zyngier@arm.com>
Reviewed-by: Andrew Jones <drjones@redhat.com>
Signed-off-by: Christoffer Dall <cdall@cs.columbia.edu>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
arch/arm64/include/asm/kvm_emulate.h
arch/arm64/include/asm/kvm_host.h
arch/arm64/include/asm/kvm_mmu.h
arch/arm64/kvm/debug.c
arch/arm64/kvm/inject_fault.c
arch/arm64/kvm/sys_regs.c
arch/arm64/kvm/sys_regs.h
arch/arm64/kvm/sys_regs_generic_v8.c
virt/kvm/arm/pmu.c

index 3cc535591bdf9b59eea588f26dc4909040823357..d313aaae5c380ad8c0c3954df3ec2fd089511ce2 100644 (file)
@@ -290,15 +290,18 @@ static inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
 
 static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
 {
-       return vcpu_sys_reg(vcpu, MPIDR_EL1) & MPIDR_HWID_BITMASK;
+       return vcpu_read_sys_reg(vcpu, MPIDR_EL1) & MPIDR_HWID_BITMASK;
 }
 
 static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
 {
-       if (vcpu_mode_is_32bit(vcpu))
+       if (vcpu_mode_is_32bit(vcpu)) {
                *vcpu_cpsr(vcpu) |= COMPAT_PSR_E_BIT;
-       else
-               vcpu_sys_reg(vcpu, SCTLR_EL1) |= (1 << 25);
+       } else {
+               u64 sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1);
+               sctlr |= (1 << 25);
+               vcpu_write_sys_reg(vcpu, SCTLR_EL1, sctlr);
+       }
 }
 
 static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu)
@@ -306,7 +309,7 @@ static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu)
        if (vcpu_mode_is_32bit(vcpu))
                return !!(*vcpu_cpsr(vcpu) & COMPAT_PSR_E_BIT);
 
-       return !!(vcpu_sys_reg(vcpu, SCTLR_EL1) & (1 << 25));
+       return !!(vcpu_read_sys_reg(vcpu, SCTLR_EL1) & (1 << 25));
 }
 
 static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu,
index 9001fd0890c9062b7d5cc11a887197a4d63c8bdb..179bb9d5760b2308320e3af4d7ebf32ac60222fe 100644 (file)
@@ -287,7 +287,18 @@ struct kvm_vcpu_arch {
 };
 
 #define vcpu_gp_regs(v)                (&(v)->arch.ctxt.gp_regs)
-#define vcpu_sys_reg(v,r)      ((v)->arch.ctxt.sys_regs[(r)])
+
+/*
+ * Only use __vcpu_sys_reg if you know you want the memory backed version of a
+ * register, and not the one most recently accessed by a running VCPU.  For
+ * example, for userspace access or for system registers that are never context
+ * switched, but only emulated.
+ */
+#define __vcpu_sys_reg(v,r)    ((v)->arch.ctxt.sys_regs[(r)])
+
+#define vcpu_read_sys_reg(v,r) __vcpu_sys_reg(v,r)
+#define vcpu_write_sys_reg(v,n,r)      do { __vcpu_sys_reg(v,r) = n; } while (0)
+
 /*
  * CP14 and CP15 live in the same array, as they are backed by the
  * same system registers.
index 7faed6e48b46212709485b7225c512f3fb99831e..cffa34e237183b0d3de1bb56065c3b3c0e1a8023 100644 (file)
@@ -249,7 +249,7 @@ struct kvm;
 
 static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
 {
-       return (vcpu_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101;
+       return (vcpu_read_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101;
 }
 
 static inline void __clean_dcache_guest_page(kvm_pfn_t pfn, unsigned long size)
index feedb877cff8c091648e51c54f52eef1e806d336..a1f4ebdfe6d3fbe59ac5d22a139b9f159b7cd48f 100644 (file)
@@ -46,7 +46,9 @@ static DEFINE_PER_CPU(u32, mdcr_el2);
  */
 static void save_guest_debug_regs(struct kvm_vcpu *vcpu)
 {
-       vcpu->arch.guest_debug_preserved.mdscr_el1 = vcpu_sys_reg(vcpu, MDSCR_EL1);
+       u64 val = vcpu_read_sys_reg(vcpu, MDSCR_EL1);
+
+       vcpu->arch.guest_debug_preserved.mdscr_el1 = val;
 
        trace_kvm_arm_set_dreg32("Saved MDSCR_EL1",
                                vcpu->arch.guest_debug_preserved.mdscr_el1);
@@ -54,10 +56,12 @@ static void save_guest_debug_regs(struct kvm_vcpu *vcpu)
 
 static void restore_guest_debug_regs(struct kvm_vcpu *vcpu)
 {
-       vcpu_sys_reg(vcpu, MDSCR_EL1) = vcpu->arch.guest_debug_preserved.mdscr_el1;
+       u64 val = vcpu->arch.guest_debug_preserved.mdscr_el1;
+
+       vcpu_write_sys_reg(vcpu, val, MDSCR_EL1);
 
        trace_kvm_arm_set_dreg32("Restored MDSCR_EL1",
-                               vcpu_sys_reg(vcpu, MDSCR_EL1));
+                               vcpu_read_sys_reg(vcpu, MDSCR_EL1));
 }
 
 /**
@@ -108,6 +112,7 @@ void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu)
 void kvm_arm_setup_debug(struct kvm_vcpu *vcpu)
 {
        bool trap_debug = !(vcpu->arch.debug_flags & KVM_ARM64_DEBUG_DIRTY);
+       unsigned long mdscr;
 
        trace_kvm_arm_setup_debug(vcpu, vcpu->guest_debug);
 
@@ -152,9 +157,13 @@ void kvm_arm_setup_debug(struct kvm_vcpu *vcpu)
                 */
                if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
                        *vcpu_cpsr(vcpu) |=  DBG_SPSR_SS;
-                       vcpu_sys_reg(vcpu, MDSCR_EL1) |= DBG_MDSCR_SS;
+                       mdscr = vcpu_read_sys_reg(vcpu, MDSCR_EL1);
+                       mdscr |= DBG_MDSCR_SS;
+                       vcpu_write_sys_reg(vcpu, mdscr, MDSCR_EL1);
                } else {
-                       vcpu_sys_reg(vcpu, MDSCR_EL1) &= ~DBG_MDSCR_SS;
+                       mdscr = vcpu_read_sys_reg(vcpu, MDSCR_EL1);
+                       mdscr &= ~DBG_MDSCR_SS;
+                       vcpu_write_sys_reg(vcpu, mdscr, MDSCR_EL1);
                }
 
                trace_kvm_arm_set_dreg32("SPSR_EL2", *vcpu_cpsr(vcpu));
@@ -170,7 +179,9 @@ void kvm_arm_setup_debug(struct kvm_vcpu *vcpu)
                 */
                if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW) {
                        /* Enable breakpoints/watchpoints */
-                       vcpu_sys_reg(vcpu, MDSCR_EL1) |= DBG_MDSCR_MDE;
+                       mdscr = vcpu_read_sys_reg(vcpu, MDSCR_EL1);
+                       mdscr |= DBG_MDSCR_MDE;
+                       vcpu_write_sys_reg(vcpu, mdscr, MDSCR_EL1);
 
                        vcpu->arch.debug_ptr = &vcpu->arch.external_debug_state;
                        vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
@@ -194,12 +205,11 @@ void kvm_arm_setup_debug(struct kvm_vcpu *vcpu)
                vcpu->arch.mdcr_el2 |= MDCR_EL2_TDA;
 
        /* If KDE or MDE are set, perform a full save/restore cycle. */
-       if ((vcpu_sys_reg(vcpu, MDSCR_EL1) & DBG_MDSCR_KDE) ||
-           (vcpu_sys_reg(vcpu, MDSCR_EL1) & DBG_MDSCR_MDE))
+       if (vcpu_read_sys_reg(vcpu, MDSCR_EL1) & (DBG_MDSCR_KDE | DBG_MDSCR_MDE))
                vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
 
        trace_kvm_arm_set_dreg32("MDCR_EL2", vcpu->arch.mdcr_el2);
-       trace_kvm_arm_set_dreg32("MDSCR_EL1", vcpu_sys_reg(vcpu, MDSCR_EL1));
+       trace_kvm_arm_set_dreg32("MDSCR_EL1", vcpu_read_sys_reg(vcpu, MDSCR_EL1));
 }
 
 void kvm_arm_clear_debug(struct kvm_vcpu *vcpu)
index 30a3f58cdb7bb19b2ee1b0fb031f8afcb52f2838..63dba401fc7d234b26977b6c1972f034c2e3c2ae 100644 (file)
@@ -58,7 +58,7 @@ static u64 get_except_vector(struct kvm_vcpu *vcpu, enum exception_type type)
                exc_offset = LOWER_EL_AArch32_VECTOR;
        }
 
-       return vcpu_sys_reg(vcpu, VBAR_EL1) + exc_offset + type;
+       return vcpu_read_sys_reg(vcpu, VBAR_EL1) + exc_offset + type;
 }
 
 static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr)
@@ -73,7 +73,7 @@ static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr
        *vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64;
        *vcpu_spsr(vcpu) = cpsr;
 
-       vcpu_sys_reg(vcpu, FAR_EL1) = addr;
+       vcpu_write_sys_reg(vcpu, addr, FAR_EL1);
 
        /*
         * Build an {i,d}abort, depending on the level and the
@@ -94,7 +94,7 @@ static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr
        if (!is_iabt)
                esr |= ESR_ELx_EC_DABT_LOW << ESR_ELx_EC_SHIFT;
 
-       vcpu_sys_reg(vcpu, ESR_EL1) = esr | ESR_ELx_FSC_EXTABT;
+       vcpu_write_sys_reg(vcpu, esr | ESR_ELx_FSC_EXTABT, ESR_EL1);
 }
 
 static void inject_undef64(struct kvm_vcpu *vcpu)
@@ -115,7 +115,7 @@ static void inject_undef64(struct kvm_vcpu *vcpu)
        if (kvm_vcpu_trap_il_is32bit(vcpu))
                esr |= ESR_ELx_IL;
 
-       vcpu_sys_reg(vcpu, ESR_EL1) = esr;
+       vcpu_write_sys_reg(vcpu, esr, ESR_EL1);
 }
 
 /**
index 691f81c31018795ab6ebff1aada3814aa4ea610f..7514db002430cb3827152d5aed3e71624dc819fd 100644 (file)
@@ -133,14 +133,14 @@ static bool access_vm_reg(struct kvm_vcpu *vcpu,
        if (!p->is_aarch32 || !p->is_32bit) {
                val = p->regval;
        } else {
-               val = vcpu_sys_reg(vcpu, reg);
+               val = vcpu_read_sys_reg(vcpu, reg);
                if (r->reg % 2)
                        val = (p->regval << 32) | (u64)lower_32_bits(val);
                else
                        val = ((u64)upper_32_bits(val) << 32) |
                                lower_32_bits(p->regval);
        }
-       vcpu_sys_reg(vcpu, reg) = val;
+       vcpu_write_sys_reg(vcpu, val, reg);
 
        kvm_toggle_cache(vcpu, was_enabled);
        return true;
@@ -249,10 +249,10 @@ static bool trap_debug_regs(struct kvm_vcpu *vcpu,
                            const struct sys_reg_desc *r)
 {
        if (p->is_write) {
-               vcpu_sys_reg(vcpu, r->reg) = p->regval;
+               vcpu_write_sys_reg(vcpu, p->regval, r->reg);
                vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
        } else {
-               p->regval = vcpu_sys_reg(vcpu, r->reg);
+               p->regval = vcpu_read_sys_reg(vcpu, r->reg);
        }
 
        trace_trap_reg(__func__, r->reg, p->is_write, p->regval);
@@ -465,7 +465,8 @@ static void reset_wcr(struct kvm_vcpu *vcpu,
 
 static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
 {
-       vcpu_sys_reg(vcpu, AMAIR_EL1) = read_sysreg(amair_el1);
+       u64 amair = read_sysreg(amair_el1);
+       vcpu_write_sys_reg(vcpu, amair, AMAIR_EL1);
 }
 
 static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
@@ -482,7 +483,7 @@ static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
        mpidr = (vcpu->vcpu_id & 0x0f) << MPIDR_LEVEL_SHIFT(0);
        mpidr |= ((vcpu->vcpu_id >> 4) & 0xff) << MPIDR_LEVEL_SHIFT(1);
        mpidr |= ((vcpu->vcpu_id >> 12) & 0xff) << MPIDR_LEVEL_SHIFT(2);
-       vcpu_sys_reg(vcpu, MPIDR_EL1) = (1ULL << 31) | mpidr;
+       vcpu_write_sys_reg(vcpu, (1ULL << 31) | mpidr, MPIDR_EL1);
 }
 
 static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
@@ -496,12 +497,12 @@ static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
         */
        val = ((pmcr & ~ARMV8_PMU_PMCR_MASK)
               | (ARMV8_PMU_PMCR_MASK & 0xdecafbad)) & (~ARMV8_PMU_PMCR_E);
-       vcpu_sys_reg(vcpu, PMCR_EL0) = val;
+       __vcpu_sys_reg(vcpu, PMCR_EL0) = val;
 }
 
 static bool check_pmu_access_disabled(struct kvm_vcpu *vcpu, u64 flags)
 {
-       u64 reg = vcpu_sys_reg(vcpu, PMUSERENR_EL0);
+       u64 reg = __vcpu_sys_reg(vcpu, PMUSERENR_EL0);
        bool enabled = (reg & flags) || vcpu_mode_priv(vcpu);
 
        if (!enabled)
@@ -543,14 +544,14 @@ static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
 
        if (p->is_write) {
                /* Only update writeable bits of PMCR */
-               val = vcpu_sys_reg(vcpu, PMCR_EL0);
+               val = __vcpu_sys_reg(vcpu, PMCR_EL0);
                val &= ~ARMV8_PMU_PMCR_MASK;
                val |= p->regval & ARMV8_PMU_PMCR_MASK;
-               vcpu_sys_reg(vcpu, PMCR_EL0) = val;
+               __vcpu_sys_reg(vcpu, PMCR_EL0) = val;
                kvm_pmu_handle_pmcr(vcpu, val);
        } else {
                /* PMCR.P & PMCR.C are RAZ */
-               val = vcpu_sys_reg(vcpu, PMCR_EL0)
+               val = __vcpu_sys_reg(vcpu, PMCR_EL0)
                      & ~(ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C);
                p->regval = val;
        }
@@ -568,10 +569,10 @@ static bool access_pmselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
                return false;
 
        if (p->is_write)
-               vcpu_sys_reg(vcpu, PMSELR_EL0) = p->regval;
+               __vcpu_sys_reg(vcpu, PMSELR_EL0) = p->regval;
        else
                /* return PMSELR.SEL field */
-               p->regval = vcpu_sys_reg(vcpu, PMSELR_EL0)
+               p->regval = __vcpu_sys_reg(vcpu, PMSELR_EL0)
                            & ARMV8_PMU_COUNTER_MASK;
 
        return true;
@@ -604,7 +605,7 @@ static bool pmu_counter_idx_valid(struct kvm_vcpu *vcpu, u64 idx)
 {
        u64 pmcr, val;
 
-       pmcr = vcpu_sys_reg(vcpu, PMCR_EL0);
+       pmcr = __vcpu_sys_reg(vcpu, PMCR_EL0);
        val = (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK;
        if (idx >= val && idx != ARMV8_PMU_CYCLE_IDX) {
                kvm_inject_undefined(vcpu);
@@ -629,7 +630,7 @@ static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
                        if (pmu_access_event_counter_el0_disabled(vcpu))
                                return false;
 
-                       idx = vcpu_sys_reg(vcpu, PMSELR_EL0)
+                       idx = __vcpu_sys_reg(vcpu, PMSELR_EL0)
                              & ARMV8_PMU_COUNTER_MASK;
                } else if (r->Op2 == 0) {
                        /* PMCCNTR_EL0 */
@@ -684,7 +685,7 @@ static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
 
        if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 1) {
                /* PMXEVTYPER_EL0 */
-               idx = vcpu_sys_reg(vcpu, PMSELR_EL0) & ARMV8_PMU_COUNTER_MASK;
+               idx = __vcpu_sys_reg(vcpu, PMSELR_EL0) & ARMV8_PMU_COUNTER_MASK;
                reg = PMEVTYPER0_EL0 + idx;
        } else if (r->CRn == 14 && (r->CRm & 12) == 12) {
                idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
@@ -702,9 +703,9 @@ static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
 
        if (p->is_write) {
                kvm_pmu_set_counter_event_type(vcpu, p->regval, idx);
-               vcpu_sys_reg(vcpu, reg) = p->regval & ARMV8_PMU_EVTYPE_MASK;
+               __vcpu_sys_reg(vcpu, reg) = p->regval & ARMV8_PMU_EVTYPE_MASK;
        } else {
-               p->regval = vcpu_sys_reg(vcpu, reg) & ARMV8_PMU_EVTYPE_MASK;
+               p->regval = __vcpu_sys_reg(vcpu, reg) & ARMV8_PMU_EVTYPE_MASK;
        }
 
        return true;
@@ -726,15 +727,15 @@ static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
                val = p->regval & mask;
                if (r->Op2 & 0x1) {
                        /* accessing PMCNTENSET_EL0 */
-                       vcpu_sys_reg(vcpu, PMCNTENSET_EL0) |= val;
+                       __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) |= val;
                        kvm_pmu_enable_counter(vcpu, val);
                } else {
                        /* accessing PMCNTENCLR_EL0 */
-                       vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= ~val;
+                       __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= ~val;
                        kvm_pmu_disable_counter(vcpu, val);
                }
        } else {
-               p->regval = vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & mask;
+               p->regval = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & mask;
        }
 
        return true;
@@ -758,12 +759,12 @@ static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
 
                if (r->Op2 & 0x1)
                        /* accessing PMINTENSET_EL1 */
-                       vcpu_sys_reg(vcpu, PMINTENSET_EL1) |= val;
+                       __vcpu_sys_reg(vcpu, PMINTENSET_EL1) |= val;
                else
                        /* accessing PMINTENCLR_EL1 */
-                       vcpu_sys_reg(vcpu, PMINTENSET_EL1) &= ~val;
+                       __vcpu_sys_reg(vcpu, PMINTENSET_EL1) &= ~val;
        } else {
-               p->regval = vcpu_sys_reg(vcpu, PMINTENSET_EL1) & mask;
+               p->regval = __vcpu_sys_reg(vcpu, PMINTENSET_EL1) & mask;
        }
 
        return true;
@@ -783,12 +784,12 @@ static bool access_pmovs(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
        if (p->is_write) {
                if (r->CRm & 0x2)
                        /* accessing PMOVSSET_EL0 */
-                       vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= (p->regval & mask);
+                       __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= (p->regval & mask);
                else
                        /* accessing PMOVSCLR_EL0 */
-                       vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= ~(p->regval & mask);
+                       __vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= ~(p->regval & mask);
        } else {
-               p->regval = vcpu_sys_reg(vcpu, PMOVSSET_EL0) & mask;
+               p->regval = __vcpu_sys_reg(vcpu, PMOVSSET_EL0) & mask;
        }
 
        return true;
@@ -825,10 +826,10 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
                        return false;
                }
 
-               vcpu_sys_reg(vcpu, PMUSERENR_EL0) = p->regval
-                                                   & ARMV8_PMU_USERENR_MASK;
+               __vcpu_sys_reg(vcpu, PMUSERENR_EL0) =
+                              p->regval & ARMV8_PMU_USERENR_MASK;
        } else {
-               p->regval = vcpu_sys_reg(vcpu, PMUSERENR_EL0)
+               p->regval = __vcpu_sys_reg(vcpu, PMUSERENR_EL0)
                            & ARMV8_PMU_USERENR_MASK;
        }
 
@@ -2230,7 +2231,7 @@ int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg
        if (r->get_user)
                return (r->get_user)(vcpu, r, reg, uaddr);
 
-       return reg_to_user(uaddr, &vcpu_sys_reg(vcpu, r->reg), reg->id);
+       return reg_to_user(uaddr, &__vcpu_sys_reg(vcpu, r->reg), reg->id);
 }
 
 int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
@@ -2251,7 +2252,7 @@ int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg
        if (r->set_user)
                return (r->set_user)(vcpu, r, reg, uaddr);
 
-       return reg_from_user(&vcpu_sys_reg(vcpu, r->reg), uaddr, reg->id);
+       return reg_from_user(&__vcpu_sys_reg(vcpu, r->reg), uaddr, reg->id);
 }
 
 static unsigned int num_demux_regs(void)
@@ -2457,6 +2458,6 @@ void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
        reset_sys_reg_descs(vcpu, table, num);
 
        for (num = 1; num < NR_SYS_REGS; num++)
-               if (vcpu_sys_reg(vcpu, num) == 0x4242424242424242)
-                       panic("Didn't reset vcpu_sys_reg(%zi)", num);
+               if (__vcpu_sys_reg(vcpu, num) == 0x4242424242424242)
+                       panic("Didn't reset __vcpu_sys_reg(%zi)", num);
 }
index 060f5348ef2502e38e30971d721d7fd598ed7135..cd710f8b63e0f9a33c8a39819428ae07fcf22eeb 100644 (file)
@@ -89,14 +89,14 @@ static inline void reset_unknown(struct kvm_vcpu *vcpu,
 {
        BUG_ON(!r->reg);
        BUG_ON(r->reg >= NR_SYS_REGS);
-       vcpu_sys_reg(vcpu, r->reg) = 0x1de7ec7edbadc0deULL;
+       __vcpu_sys_reg(vcpu, r->reg) = 0x1de7ec7edbadc0deULL;
 }
 
 static inline void reset_val(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
 {
        BUG_ON(!r->reg);
        BUG_ON(r->reg >= NR_SYS_REGS);
-       vcpu_sys_reg(vcpu, r->reg) = r->val;
+       __vcpu_sys_reg(vcpu, r->reg) = r->val;
 }
 
 static inline int cmp_sys_reg(const struct sys_reg_desc *i1,
index 969ade1d333daca184df02f01b2c7856f5b7332a..ddb8497d18d61b4a6da5c99aaac49a4e77e9a1d6 100644 (file)
@@ -38,13 +38,13 @@ static bool access_actlr(struct kvm_vcpu *vcpu,
        if (p->is_write)
                return ignore_write(vcpu, p);
 
-       p->regval = vcpu_sys_reg(vcpu, ACTLR_EL1);
+       p->regval = vcpu_read_sys_reg(vcpu, ACTLR_EL1);
        return true;
 }
 
 static void reset_actlr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
 {
-       vcpu_sys_reg(vcpu, ACTLR_EL1) = read_sysreg(actlr_el1);
+       __vcpu_sys_reg(vcpu, ACTLR_EL1) = read_sysreg(actlr_el1);
 }
 
 /*
index 8a9c42366db7df0874fe3fb4347de4bb4174575c..1c5b76c46e26670eb676c6bacd516ab209026e9f 100644 (file)
@@ -37,7 +37,7 @@ u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx)
 
        reg = (select_idx == ARMV8_PMU_CYCLE_IDX)
              ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + select_idx;
-       counter = vcpu_sys_reg(vcpu, reg);
+       counter = __vcpu_sys_reg(vcpu, reg);
 
        /* The real counter value is equal to the value of counter register plus
         * the value perf event counts.
@@ -61,7 +61,7 @@ void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val)
 
        reg = (select_idx == ARMV8_PMU_CYCLE_IDX)
              ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + select_idx;
-       vcpu_sys_reg(vcpu, reg) += (s64)val - kvm_pmu_get_counter_value(vcpu, select_idx);
+       __vcpu_sys_reg(vcpu, reg) += (s64)val - kvm_pmu_get_counter_value(vcpu, select_idx);
 }
 
 /**
@@ -78,7 +78,7 @@ static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc)
                counter = kvm_pmu_get_counter_value(vcpu, pmc->idx);
                reg = (pmc->idx == ARMV8_PMU_CYCLE_IDX)
                       ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + pmc->idx;
-               vcpu_sys_reg(vcpu, reg) = counter;
+               __vcpu_sys_reg(vcpu, reg) = counter;
                perf_event_disable(pmc->perf_event);
                perf_event_release_kernel(pmc->perf_event);
                pmc->perf_event = NULL;
@@ -125,7 +125,7 @@ void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu)
 
 u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu)
 {
-       u64 val = vcpu_sys_reg(vcpu, PMCR_EL0) >> ARMV8_PMU_PMCR_N_SHIFT;
+       u64 val = __vcpu_sys_reg(vcpu, PMCR_EL0) >> ARMV8_PMU_PMCR_N_SHIFT;
 
        val &= ARMV8_PMU_PMCR_N_MASK;
        if (val == 0)
@@ -147,7 +147,7 @@ void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u64 val)
        struct kvm_pmu *pmu = &vcpu->arch.pmu;
        struct kvm_pmc *pmc;
 
-       if (!(vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) || !val)
+       if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) || !val)
                return;
 
        for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
@@ -193,10 +193,10 @@ static u64 kvm_pmu_overflow_status(struct kvm_vcpu *vcpu)
 {
        u64 reg = 0;
 
-       if ((vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E)) {
-               reg = vcpu_sys_reg(vcpu, PMOVSSET_EL0);
-               reg &= vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
-               reg &= vcpu_sys_reg(vcpu, PMINTENSET_EL1);
+       if ((__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E)) {
+               reg = __vcpu_sys_reg(vcpu, PMOVSSET_EL0);
+               reg &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
+               reg &= __vcpu_sys_reg(vcpu, PMINTENSET_EL1);
                reg &= kvm_pmu_valid_counter_mask(vcpu);
        }
 
@@ -295,7 +295,7 @@ static void kvm_pmu_perf_overflow(struct perf_event *perf_event,
        struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
        int idx = pmc->idx;
 
-       vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(idx);
+       __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(idx);
 
        if (kvm_pmu_overflow_status(vcpu)) {
                kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
@@ -316,19 +316,19 @@ void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val)
        if (val == 0)
                return;
 
-       enable = vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
+       enable = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
        for (i = 0; i < ARMV8_PMU_CYCLE_IDX; i++) {
                if (!(val & BIT(i)))
                        continue;
-               type = vcpu_sys_reg(vcpu, PMEVTYPER0_EL0 + i)
+               type = __vcpu_sys_reg(vcpu, PMEVTYPER0_EL0 + i)
                       & ARMV8_PMU_EVTYPE_EVENT;
                if ((type == ARMV8_PMUV3_PERFCTR_SW_INCR)
                    && (enable & BIT(i))) {
-                       reg = vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) + 1;
+                       reg = __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) + 1;
                        reg = lower_32_bits(reg);
-                       vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) = reg;
+                       __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) = reg;
                        if (!reg)
-                               vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i);
+                               __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i);
                }
        }
 }
@@ -348,7 +348,7 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
        mask = kvm_pmu_valid_counter_mask(vcpu);
        if (val & ARMV8_PMU_PMCR_E) {
                kvm_pmu_enable_counter(vcpu,
-                               vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & mask);
+                      __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & mask);
        } else {
                kvm_pmu_disable_counter(vcpu, mask);
        }
@@ -369,8 +369,8 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
 
 static bool kvm_pmu_counter_is_enabled(struct kvm_vcpu *vcpu, u64 select_idx)
 {
-       return (vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) &&
-              (vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & BIT(select_idx));
+       return (__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) &&
+              (__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & BIT(select_idx));
 }
 
 /**