KVM: vgic-v4: Track the number of VLPIs per vcpu
authorMarc Zyngier <maz@kernel.org>
Thu, 7 Nov 2019 16:04:11 +0000 (16:04 +0000)
committerMarc Zyngier <maz@kernel.org>
Fri, 8 Nov 2019 11:13:24 +0000 (11:13 +0000)
In order to find out whether a vcpu is likely to be the target of
VLPIs (and to further optimize the way we deal with those), let's
track the number of VLPIs a vcpu can receive.

This gets implemented with an atomic variable that gets incremented
or decremented on map, unmap and move of a VLPI.

Signed-off-by: Marc Zyngier <maz@kernel.org>
Reviewed-by: Zenghui Yu <yuzenghui@huawei.com>
Reviewed-by: Christoffer Dall <christoffer.dall@arm.com>
Link: https://lore.kernel.org/r/20191107160412.30301-2-maz@kernel.org
include/linux/irqchip/arm-gic-v4.h
virt/kvm/arm/vgic/vgic-init.c
virt/kvm/arm/vgic/vgic-its.c
virt/kvm/arm/vgic/vgic-v4.c

index ab1396afe08ac56e2421d37e58a4bdfa068c100c..5dbcfc65f21ec0e977c28fd227b626ae1eadef3e 100644 (file)
@@ -32,6 +32,8 @@ struct its_vm {
 struct its_vpe {
        struct page             *vpt_page;
        struct its_vm           *its_vm;
+       /* per-vPE VLPI tracking */
+       atomic_t                vlpi_count;
        /* Doorbell interrupt */
        int                     irq;
        irq_hw_number_t         vpe_db_lpi;
index 6f50c429196de19c4be0e359172632666181951d..b3c5de48064c91b3a0b32fae72590708f7783695 100644 (file)
@@ -203,6 +203,7 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
 
        INIT_LIST_HEAD(&vgic_cpu->ap_list_head);
        raw_spin_lock_init(&vgic_cpu->ap_list_lock);
+       atomic_set(&vgic_cpu->vgic_v3.its_vpe.vlpi_count, 0);
 
        /*
         * Enable and configure all SGIs to be edge-triggered and
index 2be6b66b3856dfc0fa1512b1d67c259ba194d01b..98c7360d9fb700703bd5f328e9e85be262ca22e3 100644 (file)
@@ -360,7 +360,10 @@ static int update_affinity(struct vgic_irq *irq, struct kvm_vcpu *vcpu)
                if (ret)
                        return ret;
 
+               if (map.vpe)
+                       atomic_dec(&map.vpe->vlpi_count);
                map.vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
+               atomic_inc(&map.vpe->vlpi_count);
 
                ret = its_map_vlpi(irq->host_irq, &map);
        }
index 0965fb0c427acd6608f627e5d14a8c233248fb0a..46f875589c472d4181623bbe75412b9bc31bdab2 100644 (file)
@@ -309,6 +309,7 @@ int kvm_vgic_v4_set_forwarding(struct kvm *kvm, int virq,
 
        irq->hw         = true;
        irq->host_irq   = virq;
+       atomic_inc(&map.vpe->vlpi_count);
 
 out:
        mutex_unlock(&its->its_lock);
@@ -342,6 +343,7 @@ int kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int virq,
 
        WARN_ON(!(irq->hw && irq->host_irq == virq));
        if (irq->hw) {
+               atomic_dec(&irq->target_vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vlpi_count);
                irq->hw = false;
                ret = its_unmap_vlpi(virq);
        }