Merge tag 'kvmarm-fixes-6.3-4' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmar...
authorPaolo Bonzini <pbonzini@redhat.com>
Fri, 21 Apr 2023 23:19:02 +0000 (19:19 -0400)
committerPaolo Bonzini <pbonzini@redhat.com>
Fri, 21 Apr 2023 23:19:02 +0000 (19:19 -0400)
KVM/arm64 fixes for 6.3, part #4

 - Plug a buffer overflow due to the use of the user-provided register
   width for firmware regs. Outright reject accesses where the
   user register width does not match the kernel representation.

 - Protect non-atomic RMW operations on vCPU flags against preemption,
   as an update to the flags by an intervening preemption could be lost.

arch/arm64/include/asm/kvm_host.h
arch/arm64/kvm/hypercalls.c

index bcd774d74f3494563582c52827926e4c03468ebe..3dd691c85ca0d81d0c4e6897dcdc91a81cd82581 100644 (file)
@@ -576,9 +576,22 @@ struct kvm_vcpu_arch {
        ({                                                      \
                __build_check_flag(v, flagset, f, m);           \
                                                                \
-               v->arch.flagset & (m);                          \
+               READ_ONCE(v->arch.flagset) & (m);               \
        })
 
+/*
+ * Note that the set/clear accessors must be preempt-safe in order to
+ * avoid nesting them with load/put which also manipulate flags...
+ */
+#ifdef __KVM_NVHE_HYPERVISOR__
+/* the nVHE hypervisor is always non-preemptible */
+#define __vcpu_flags_preempt_disable()
+#define __vcpu_flags_preempt_enable()
+#else
+#define __vcpu_flags_preempt_disable() preempt_disable()
+#define __vcpu_flags_preempt_enable()  preempt_enable()
+#endif
+
 #define __vcpu_set_flag(v, flagset, f, m)                      \
        do {                                                    \
                typeof(v->arch.flagset) *fset;                  \
@@ -586,9 +599,11 @@ struct kvm_vcpu_arch {
                __build_check_flag(v, flagset, f, m);           \
                                                                \
                fset = &v->arch.flagset;                        \
+               __vcpu_flags_preempt_disable();                 \
                if (HWEIGHT(m) > 1)                             \
                        *fset &= ~(m);                          \
                *fset |= (f);                                   \
+               __vcpu_flags_preempt_enable();                  \
        } while (0)
 
 #define __vcpu_clear_flag(v, flagset, f, m)                    \
@@ -598,7 +613,9 @@ struct kvm_vcpu_arch {
                __build_check_flag(v, flagset, f, m);           \
                                                                \
                fset = &v->arch.flagset;                        \
+               __vcpu_flags_preempt_disable();                 \
                *fset &= ~(m);                                  \
+               __vcpu_flags_preempt_enable();                  \
        } while (0)
 
 #define vcpu_get_flag(v, ...)  __vcpu_get_flag((v), __VA_ARGS__)
index 5da884e11337a6d420e3dc71456b469057300d1c..c4b4678bc4a4580c6d9fd924cb66656d3c87cd98 100644 (file)
@@ -397,6 +397,8 @@ int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
        u64 val;
        int wa_level;
 
+       if (KVM_REG_SIZE(reg->id) != sizeof(val))
+               return -ENOENT;
        if (copy_from_user(&val, uaddr, KVM_REG_SIZE(reg->id)))
                return -EFAULT;