arm/arm64: KVM: enable kernel side of GICv3 emulation
authorAndre Przywara <andre.przywara@arm.com>
Tue, 3 Jun 2014 08:26:03 +0000 (10:26 +0200)
committerChristoffer Dall <christoffer.dall@linaro.org>
Tue, 20 Jan 2015 17:25:32 +0000 (18:25 +0100)
With all the necessary GICv3 emulation code in place, we can now
connect the code to the GICv3 backend in the kernel.
The LR register handling is different depending on the emulated GIC
model, so provide different implementations for each.
Also allow non-v2-compatible GICv3 implementations (which don't
provide MMIO regions for the virtual CPU interface in the DT), but
restrict those hosts to support GICv3 guests only.
If the device tree provides a GICv2 compatible GICV resource entry,
but that one is faulty, just disable the GICv2 emulation and let the
user use at least the GICv3 emulation for guests.
To provide proper support for the legacy KVM_CREATE_IRQCHIP ioctl,
note virtual GICv2 compatibility in struct vgic_params and use it
on creating a VGICv2.

Signed-off-by: Andre Przywara <andre.przywara@arm.com>
Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
include/kvm/arm_vgic.h
virt/kvm/arm/vgic-v2.c
virt/kvm/arm/vgic-v3.c
virt/kvm/arm/vgic.c

index b9b2e05a39ef8c9808d3b5c7e4f885d7499cf8a4..39039d5f09a8f60c5f6b6d66b804cb2fb3a634ed 100644 (file)
@@ -134,6 +134,8 @@ struct vgic_params {
        /* Virtual control interface base address */
        void __iomem    *vctrl_base;
        int             max_gic_vcpus;
+       /* Only needed for the legacy KVM_CREATE_IRQCHIP */
+       bool            can_emulate_gicv2;
 };
 
 struct vgic_vm_ops {
index e8b82b289844c3d56c70553ab9bb315a3d7bfc16..a0a7b5d1a0703a00f81c9421856c0fdf1b1f6aaa 100644 (file)
@@ -229,6 +229,7 @@ int vgic_v2_probe(struct device_node *vgic_node,
                goto out_unmap;
        }
 
+       vgic->can_emulate_gicv2 = true;
        kvm_register_device_ops(&kvm_arm_vgic_v2_ops, KVM_DEV_TYPE_ARM_VGIC_V2);
 
        vgic->vcpu_base = vcpu_res.start;
index 52490480b6f96f1f2be543743a918d9b52ddae4c..3a62d8a9a2c6fce0cc2f94a6e558115785c03811 100644 (file)
@@ -34,6 +34,7 @@
 #define GICH_LR_VIRTUALID              (0x3ffUL << 0)
 #define GICH_LR_PHYSID_CPUID_SHIFT     (10)
 #define GICH_LR_PHYSID_CPUID           (7UL << GICH_LR_PHYSID_CPUID_SHIFT)
+#define ICH_LR_VIRTUALID_MASK          (BIT_ULL(32) - 1)
 
 /*
  * LRs are stored in reverse order in memory. make sure we index them
@@ -48,12 +49,17 @@ static struct vgic_lr vgic_v3_get_lr(const struct kvm_vcpu *vcpu, int lr)
        struct vgic_lr lr_desc;
        u64 val = vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[LR_INDEX(lr)];
 
-       lr_desc.irq     = val & GICH_LR_VIRTUALID;
-       if (lr_desc.irq <= 15)
-               lr_desc.source  = (val >> GICH_LR_PHYSID_CPUID_SHIFT) & 0x7;
+       if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3)
+               lr_desc.irq = val & ICH_LR_VIRTUALID_MASK;
        else
-               lr_desc.source = 0;
-       lr_desc.state   = 0;
+               lr_desc.irq = val & GICH_LR_VIRTUALID;
+
+       lr_desc.source = 0;
+       if (lr_desc.irq <= 15 &&
+           vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V2)
+               lr_desc.source = (val >> GICH_LR_PHYSID_CPUID_SHIFT) & 0x7;
+
+       lr_desc.state = 0;
 
        if (val & ICH_LR_PENDING_BIT)
                lr_desc.state |= LR_STATE_PENDING;
@@ -68,8 +74,20 @@ static struct vgic_lr vgic_v3_get_lr(const struct kvm_vcpu *vcpu, int lr)
 static void vgic_v3_set_lr(struct kvm_vcpu *vcpu, int lr,
                           struct vgic_lr lr_desc)
 {
-       u64 lr_val = (((u32)lr_desc.source << GICH_LR_PHYSID_CPUID_SHIFT) |
-                     lr_desc.irq);
+       u64 lr_val;
+
+       lr_val = lr_desc.irq;
+
+       /*
+        * Currently all guest IRQs are Group1, as Group0 would result
+        * in a FIQ in the guest, which it wouldn't expect.
+        * Eventually we want to make this configurable, so we may revisit
+        * this in the future.
+        */
+       if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3)
+               lr_val |= ICH_LR_GROUP;
+       else
+               lr_val |= (u32)lr_desc.source << GICH_LR_PHYSID_CPUID_SHIFT;
 
        if (lr_desc.state & LR_STATE_PENDING)
                lr_val |= ICH_LR_PENDING_BIT;
@@ -154,7 +172,15 @@ static void vgic_v3_enable(struct kvm_vcpu *vcpu)
         */
        vgic_v3->vgic_vmcr = 0;
 
-       vgic_v3->vgic_sre = 0;
+       /*
+        * If we are emulating a GICv3, we do it in an non-GICv2-compatible
+        * way, so we force SRE to 1 to demonstrate this to the guest.
+        * This goes with the spec allowing the value to be RAO/WI.
+        */
+       if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3)
+               vgic_v3->vgic_sre = ICC_SRE_EL1_SRE;
+       else
+               vgic_v3->vgic_sre = 0;
 
        /* Get the show on the road... */
        vgic_v3->vgic_hcr = ICH_HCR_EN;
@@ -209,34 +235,34 @@ int vgic_v3_probe(struct device_node *vgic_node,
         * maximum of 16 list registers. Just ignore bit 4...
         */
        vgic->nr_lr = (ich_vtr_el2 & 0xf) + 1;
+       vgic->can_emulate_gicv2 = false;
 
        if (of_property_read_u32(vgic_node, "#redistributor-regions", &gicv_idx))
                gicv_idx = 1;
 
        gicv_idx += 3; /* Also skip GICD, GICC, GICH */
        if (of_address_to_resource(vgic_node, gicv_idx, &vcpu_res)) {
-               kvm_err("Cannot obtain GICV region\n");
-               ret = -ENXIO;
-               goto out;
-       }
-
-       if (!PAGE_ALIGNED(vcpu_res.start)) {
-               kvm_err("GICV physical address 0x%llx not page aligned\n",
+               kvm_info("GICv3: no GICV resource entry\n");
+               vgic->vcpu_base = 0;
+       } else if (!PAGE_ALIGNED(vcpu_res.start)) {
+               pr_warn("GICV physical address 0x%llx not page aligned\n",
                        (unsigned long long)vcpu_res.start);
-               ret = -ENXIO;
-               goto out;
-       }
-
-       if (!PAGE_ALIGNED(resource_size(&vcpu_res))) {
-               kvm_err("GICV size 0x%llx not a multiple of page size 0x%lx\n",
+               vgic->vcpu_base = 0;
+       } else if (!PAGE_ALIGNED(resource_size(&vcpu_res))) {
+               pr_warn("GICV size 0x%llx not a multiple of page size 0x%lx\n",
                        (unsigned long long)resource_size(&vcpu_res),
                        PAGE_SIZE);
-               ret = -ENXIO;
-               goto out;
+               vgic->vcpu_base = 0;
+       } else {
+               vgic->vcpu_base = vcpu_res.start;
+               vgic->can_emulate_gicv2 = true;
+               kvm_register_device_ops(&kvm_arm_vgic_v2_ops,
+                                       KVM_DEV_TYPE_ARM_VGIC_V2);
        }
-       kvm_register_device_ops(&kvm_arm_vgic_v2_ops, KVM_DEV_TYPE_ARM_VGIC_V2);
+       if (vgic->vcpu_base == 0)
+               kvm_info("disabling GICv2 emulation\n");
+       kvm_register_device_ops(&kvm_arm_vgic_v3_ops, KVM_DEV_TYPE_ARM_VGIC_V3);
 
-       vgic->vcpu_base = vcpu_res.start;
        vgic->vctrl_base = NULL;
        vgic->type = VGIC_V3;
        vgic->max_gic_vcpus = KVM_MAX_VCPUS;
index 6d23e57c356168dbf7f291bdb20d4826c58aa4b7..2efba823137512a691e0438270366d41ea22613d 100644 (file)
@@ -1550,6 +1550,11 @@ static int init_vgic_model(struct kvm *kvm, int type)
        case KVM_DEV_TYPE_ARM_VGIC_V2:
                vgic_v2_init_emulation(kvm);
                break;
+#ifdef CONFIG_ARM_GIC_V3
+       case KVM_DEV_TYPE_ARM_VGIC_V3:
+               vgic_v3_init_emulation(kvm);
+               break;
+#endif
        default:
                return -ENODEV;
        }
@@ -1572,6 +1577,15 @@ int kvm_vgic_create(struct kvm *kvm, u32 type)
                goto out;
        }
 
+       /*
+        * This function is also called by the KVM_CREATE_IRQCHIP handler,
+        * which had no chance yet to check the availability of the GICv2
+        * emulation. So check this here again. KVM_CREATE_DEVICE does
+        * the proper checks already.
+        */
+       if (type == KVM_DEV_TYPE_ARM_VGIC_V2 && !vgic->can_emulate_gicv2)
+               return -ENODEV;
+
        /*
         * Any time a vcpu is run, vcpu_load is called which tries to grab the
         * vcpu->mutex.  By grabbing the vcpu->mutex of all VCPUs we ensure