1 // SPDX-License-Identifier: GPL-2.0-only
3 * VGIC MMIO handling functions
6 #include <linux/bitops.h>
7 #include <linux/bsearch.h>
9 #include <linux/kvm_host.h>
10 #include <kvm/iodev.h>
11 #include <kvm/arm_arch_timer.h>
12 #include <kvm/arm_vgic.h>
15 #include "vgic-mmio.h"
17 unsigned long vgic_mmio_read_raz(struct kvm_vcpu *vcpu,
18 gpa_t addr, unsigned int len)
23 unsigned long vgic_mmio_read_rao(struct kvm_vcpu *vcpu,
24 gpa_t addr, unsigned int len)
29 void vgic_mmio_write_wi(struct kvm_vcpu *vcpu, gpa_t addr,
30 unsigned int len, unsigned long val)
35 int vgic_mmio_uaccess_write_wi(struct kvm_vcpu *vcpu, gpa_t addr,
36 unsigned int len, unsigned long val)
42 unsigned long vgic_mmio_read_group(struct kvm_vcpu *vcpu,
43 gpa_t addr, unsigned int len)
45 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
49 /* Loop over all IRQs affected by this read */
50 for (i = 0; i < len * 8; i++) {
51 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
56 vgic_put_irq(vcpu->kvm, irq);
62 void vgic_mmio_write_group(struct kvm_vcpu *vcpu, gpa_t addr,
63 unsigned int len, unsigned long val)
65 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
69 for (i = 0; i < len * 8; i++) {
70 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
72 raw_spin_lock_irqsave(&irq->irq_lock, flags);
73 irq->group = !!(val & BIT(i));
74 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
76 vgic_put_irq(vcpu->kvm, irq);
81 * Read accesses to both GICD_ICENABLER and GICD_ISENABLER return the value
82 * of the enabled bit, so there is only one function for both here.
84 unsigned long vgic_mmio_read_enable(struct kvm_vcpu *vcpu,
85 gpa_t addr, unsigned int len)
87 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
91 /* Loop over all IRQs affected by this read */
92 for (i = 0; i < len * 8; i++) {
93 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
98 vgic_put_irq(vcpu->kvm, irq);
104 void vgic_mmio_write_senable(struct kvm_vcpu *vcpu,
105 gpa_t addr, unsigned int len,
108 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
112 for_each_set_bit(i, &val, len * 8) {
113 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
115 raw_spin_lock_irqsave(&irq->irq_lock, flags);
117 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
119 vgic_put_irq(vcpu->kvm, irq);
123 void vgic_mmio_write_cenable(struct kvm_vcpu *vcpu,
124 gpa_t addr, unsigned int len,
127 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
131 for_each_set_bit(i, &val, len * 8) {
132 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
134 raw_spin_lock_irqsave(&irq->irq_lock, flags);
136 irq->enabled = false;
138 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
139 vgic_put_irq(vcpu->kvm, irq);
143 unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu,
144 gpa_t addr, unsigned int len)
146 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
150 /* Loop over all IRQs affected by this read */
151 for (i = 0; i < len * 8; i++) {
152 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
155 raw_spin_lock_irqsave(&irq->irq_lock, flags);
156 if (irq_is_pending(irq))
158 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
160 vgic_put_irq(vcpu->kvm, irq);
167 * This function will return the VCPU that performed the MMIO access and
168 * trapped from within the VM, and will return NULL if this is a userspace
171 * We can disable preemption locally around accessing the per-CPU variable,
172 * and use the resolved vcpu pointer after enabling preemption again, because
173 * even if the current thread is migrated to another CPU, reading the per-CPU
174 * value later will give us the same value as we update the per-CPU variable
175 * in the preempt notifier handlers.
177 static struct kvm_vcpu *vgic_get_mmio_requester_vcpu(void)
179 struct kvm_vcpu *vcpu;
182 vcpu = kvm_arm_get_running_vcpu();
187 /* Must be called with irq->irq_lock held */
188 static void vgic_hw_irq_spending(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
194 irq->pending_latch = true;
195 vgic_irq_set_phys_active(irq, true);
198 void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
199 gpa_t addr, unsigned int len,
202 bool is_uaccess = !vgic_get_mmio_requester_vcpu();
203 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
207 for_each_set_bit(i, &val, len * 8) {
208 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
210 raw_spin_lock_irqsave(&irq->irq_lock, flags);
212 vgic_hw_irq_spending(vcpu, irq, is_uaccess);
214 irq->pending_latch = true;
215 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
216 vgic_put_irq(vcpu->kvm, irq);
220 /* Must be called with irq->irq_lock held */
221 static void vgic_hw_irq_cpending(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
227 irq->pending_latch = false;
230 * We don't want the guest to effectively mask the physical
231 * interrupt by doing a write to SPENDR followed by a write to
232 * CPENDR for HW interrupts, so we clear the active state on
233 * the physical side if the virtual interrupt is not active.
234 * This may lead to taking an additional interrupt on the
235 * host, but that should not be a problem as the worst that
236 * can happen is an additional vgic injection. We also clear
237 * the pending state to maintain proper semantics for edge HW
240 vgic_irq_set_phys_pending(irq, false);
242 vgic_irq_set_phys_active(irq, false);
245 void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
246 gpa_t addr, unsigned int len,
249 bool is_uaccess = !vgic_get_mmio_requester_vcpu();
250 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
254 for_each_set_bit(i, &val, len * 8) {
255 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
257 raw_spin_lock_irqsave(&irq->irq_lock, flags);
260 vgic_hw_irq_cpending(vcpu, irq, is_uaccess);
262 irq->pending_latch = false;
264 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
265 vgic_put_irq(vcpu->kvm, irq);
269 unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu,
270 gpa_t addr, unsigned int len)
272 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
276 /* Loop over all IRQs affected by this read */
277 for (i = 0; i < len * 8; i++) {
278 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
283 vgic_put_irq(vcpu->kvm, irq);
289 /* Must be called with irq->irq_lock held */
290 static void vgic_hw_irq_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
291 bool active, bool is_uaccess)
296 irq->active = active;
297 vgic_irq_set_phys_active(irq, active);
300 static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
304 struct kvm_vcpu *requester_vcpu = vgic_get_mmio_requester_vcpu();
306 raw_spin_lock_irqsave(&irq->irq_lock, flags);
309 vgic_hw_irq_change_active(vcpu, irq, active, !requester_vcpu);
311 u32 model = vcpu->kvm->arch.vgic.vgic_model;
314 irq->active = active;
317 * The GICv2 architecture indicates that the source CPUID for
318 * an SGI should be provided during an EOI which implies that
319 * the active state is stored somewhere, but at the same time
320 * this state is not architecturally exposed anywhere and we
321 * have no way of knowing the right source.
323 * This may lead to a VCPU not being able to receive
324 * additional instances of a particular SGI after migration
325 * for a GICv2 VM on some GIC implementations. Oh well.
327 active_source = (requester_vcpu) ? requester_vcpu->vcpu_id : 0;
329 if (model == KVM_DEV_TYPE_ARM_VGIC_V2 &&
330 active && vgic_irq_is_sgi(irq->intid))
331 irq->active_source = active_source;
335 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
337 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
341 * If we are fiddling with an IRQ's active state, we have to make sure the IRQ
342 * is not queued on some running VCPU's LRs, because then the change to the
343 * active state can be overwritten when the VCPU's state is synced coming back
346 * For shared interrupts, we have to stop all the VCPUs because interrupts can
347 * be migrated while we don't hold the IRQ locks and we don't want to be
348 * chasing moving targets.
350 * For private interrupts we don't have to do anything because userspace
351 * accesses to the VGIC state already require all VCPUs to be stopped, and
352 * only the VCPU itself can modify its private interrupts active state, which
353 * guarantees that the VCPU is not running.
355 static void vgic_change_active_prepare(struct kvm_vcpu *vcpu, u32 intid)
357 if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 ||
358 intid > VGIC_NR_PRIVATE_IRQS)
359 kvm_arm_halt_guest(vcpu->kvm);
362 /* See vgic_change_active_prepare */
363 static void vgic_change_active_finish(struct kvm_vcpu *vcpu, u32 intid)
365 if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 ||
366 intid > VGIC_NR_PRIVATE_IRQS)
367 kvm_arm_resume_guest(vcpu->kvm);
370 static void __vgic_mmio_write_cactive(struct kvm_vcpu *vcpu,
371 gpa_t addr, unsigned int len,
374 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
377 for_each_set_bit(i, &val, len * 8) {
378 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
379 vgic_mmio_change_active(vcpu, irq, false);
380 vgic_put_irq(vcpu->kvm, irq);
384 void vgic_mmio_write_cactive(struct kvm_vcpu *vcpu,
385 gpa_t addr, unsigned int len,
388 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
390 mutex_lock(&vcpu->kvm->lock);
391 vgic_change_active_prepare(vcpu, intid);
393 __vgic_mmio_write_cactive(vcpu, addr, len, val);
395 vgic_change_active_finish(vcpu, intid);
396 mutex_unlock(&vcpu->kvm->lock);
399 int vgic_mmio_uaccess_write_cactive(struct kvm_vcpu *vcpu,
400 gpa_t addr, unsigned int len,
403 __vgic_mmio_write_cactive(vcpu, addr, len, val);
407 static void __vgic_mmio_write_sactive(struct kvm_vcpu *vcpu,
408 gpa_t addr, unsigned int len,
411 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
414 for_each_set_bit(i, &val, len * 8) {
415 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
416 vgic_mmio_change_active(vcpu, irq, true);
417 vgic_put_irq(vcpu->kvm, irq);
421 void vgic_mmio_write_sactive(struct kvm_vcpu *vcpu,
422 gpa_t addr, unsigned int len,
425 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
427 mutex_lock(&vcpu->kvm->lock);
428 vgic_change_active_prepare(vcpu, intid);
430 __vgic_mmio_write_sactive(vcpu, addr, len, val);
432 vgic_change_active_finish(vcpu, intid);
433 mutex_unlock(&vcpu->kvm->lock);
436 int vgic_mmio_uaccess_write_sactive(struct kvm_vcpu *vcpu,
437 gpa_t addr, unsigned int len,
440 __vgic_mmio_write_sactive(vcpu, addr, len, val);
444 unsigned long vgic_mmio_read_priority(struct kvm_vcpu *vcpu,
445 gpa_t addr, unsigned int len)
447 u32 intid = VGIC_ADDR_TO_INTID(addr, 8);
451 for (i = 0; i < len; i++) {
452 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
454 val |= (u64)irq->priority << (i * 8);
456 vgic_put_irq(vcpu->kvm, irq);
463 * We currently don't handle changing the priority of an interrupt that
464 * is already pending on a VCPU. If there is a need for this, we would
465 * need to make this VCPU exit and re-evaluate the priorities, potentially
466 * leading to this interrupt getting presented now to the guest (if it has
467 * been masked by the priority mask before).
469 void vgic_mmio_write_priority(struct kvm_vcpu *vcpu,
470 gpa_t addr, unsigned int len,
473 u32 intid = VGIC_ADDR_TO_INTID(addr, 8);
477 for (i = 0; i < len; i++) {
478 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
480 raw_spin_lock_irqsave(&irq->irq_lock, flags);
481 /* Narrow the priority range to what we actually support */
482 irq->priority = (val >> (i * 8)) & GENMASK(7, 8 - VGIC_PRI_BITS);
483 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
485 vgic_put_irq(vcpu->kvm, irq);
489 unsigned long vgic_mmio_read_config(struct kvm_vcpu *vcpu,
490 gpa_t addr, unsigned int len)
492 u32 intid = VGIC_ADDR_TO_INTID(addr, 2);
496 for (i = 0; i < len * 4; i++) {
497 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
499 if (irq->config == VGIC_CONFIG_EDGE)
500 value |= (2U << (i * 2));
502 vgic_put_irq(vcpu->kvm, irq);
508 void vgic_mmio_write_config(struct kvm_vcpu *vcpu,
509 gpa_t addr, unsigned int len,
512 u32 intid = VGIC_ADDR_TO_INTID(addr, 2);
516 for (i = 0; i < len * 4; i++) {
517 struct vgic_irq *irq;
520 * The configuration cannot be changed for SGIs in general,
521 * for PPIs this is IMPLEMENTATION DEFINED. The arch timer
522 * code relies on PPIs being level triggered, so we also
523 * make them read-only here.
525 if (intid + i < VGIC_NR_PRIVATE_IRQS)
528 irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
529 raw_spin_lock_irqsave(&irq->irq_lock, flags);
531 if (test_bit(i * 2 + 1, &val))
532 irq->config = VGIC_CONFIG_EDGE;
534 irq->config = VGIC_CONFIG_LEVEL;
536 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
537 vgic_put_irq(vcpu->kvm, irq);
541 u64 vgic_read_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid)
545 int nr_irqs = vcpu->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
547 for (i = 0; i < 32; i++) {
548 struct vgic_irq *irq;
550 if ((intid + i) < VGIC_NR_SGIS || (intid + i) >= nr_irqs)
553 irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
554 if (irq->config == VGIC_CONFIG_LEVEL && irq->line_level)
557 vgic_put_irq(vcpu->kvm, irq);
563 void vgic_write_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid,
567 int nr_irqs = vcpu->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
570 for (i = 0; i < 32; i++) {
571 struct vgic_irq *irq;
574 if ((intid + i) < VGIC_NR_SGIS || (intid + i) >= nr_irqs)
577 irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
580 * Line level is set irrespective of irq type
581 * (level or edge) to avoid dependency that VM should
582 * restore irq config before line level.
584 new_level = !!(val & (1U << i));
585 raw_spin_lock_irqsave(&irq->irq_lock, flags);
586 irq->line_level = new_level;
588 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
590 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
592 vgic_put_irq(vcpu->kvm, irq);
596 static int match_region(const void *key, const void *elt)
598 const unsigned int offset = (unsigned long)key;
599 const struct vgic_register_region *region = elt;
601 if (offset < region->reg_offset)
604 if (offset >= region->reg_offset + region->len)
610 const struct vgic_register_region *
611 vgic_find_mmio_region(const struct vgic_register_region *regions,
612 int nr_regions, unsigned int offset)
614 return bsearch((void *)(uintptr_t)offset, regions, nr_regions,
615 sizeof(regions[0]), match_region);
618 void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
620 if (kvm_vgic_global_state.type == VGIC_V2)
621 vgic_v2_set_vmcr(vcpu, vmcr);
623 vgic_v3_set_vmcr(vcpu, vmcr);
626 void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
628 if (kvm_vgic_global_state.type == VGIC_V2)
629 vgic_v2_get_vmcr(vcpu, vmcr);
631 vgic_v3_get_vmcr(vcpu, vmcr);
635 * kvm_mmio_read_buf() returns a value in a format where it can be converted
636 * to a byte array and be directly observed as the guest wanted it to appear
637 * in memory if it had done the store itself, which is LE for the GIC, as the
638 * guest knows the GIC is always LE.
640 * We convert this value to the CPUs native format to deal with it as a data
643 unsigned long vgic_data_mmio_bus_to_host(const void *val, unsigned int len)
645 unsigned long data = kvm_mmio_read_buf(val, len);
651 return le16_to_cpu(data);
653 return le32_to_cpu(data);
655 return le64_to_cpu(data);
660 * kvm_mmio_write_buf() expects a value in a format such that if converted to
661 * a byte array it is observed as the guest would see it if it could perform
662 * the load directly. Since the GIC is LE, and the guest knows this, the
663 * guest expects a value in little endian format.
665 * We convert the data value from the CPUs native format to LE so that the
666 * value is returned in the proper format.
668 void vgic_data_host_to_mmio_bus(void *buf, unsigned int len,
675 data = cpu_to_le16(data);
678 data = cpu_to_le32(data);
681 data = cpu_to_le64(data);
684 kvm_mmio_write_buf(buf, len, data);
688 struct vgic_io_device *kvm_to_vgic_iodev(const struct kvm_io_device *dev)
690 return container_of(dev, struct vgic_io_device, dev);
693 static bool check_region(const struct kvm *kvm,
694 const struct vgic_register_region *region,
697 int flags, nr_irqs = kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
701 flags = VGIC_ACCESS_8bit;
704 flags = VGIC_ACCESS_32bit;
707 flags = VGIC_ACCESS_64bit;
713 if ((region->access_flags & flags) && IS_ALIGNED(addr, len)) {
714 if (!region->bits_per_irq)
717 /* Do we access a non-allocated IRQ? */
718 return VGIC_ADDR_TO_INTID(addr, region->bits_per_irq) < nr_irqs;
724 const struct vgic_register_region *
725 vgic_get_mmio_region(struct kvm_vcpu *vcpu, struct vgic_io_device *iodev,
728 const struct vgic_register_region *region;
730 region = vgic_find_mmio_region(iodev->regions, iodev->nr_regions,
731 addr - iodev->base_addr);
732 if (!region || !check_region(vcpu->kvm, region, addr, len))
738 static int vgic_uaccess_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
739 gpa_t addr, u32 *val)
741 struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev);
742 const struct vgic_register_region *region;
743 struct kvm_vcpu *r_vcpu;
745 region = vgic_get_mmio_region(vcpu, iodev, addr, sizeof(u32));
751 r_vcpu = iodev->redist_vcpu ? iodev->redist_vcpu : vcpu;
752 if (region->uaccess_read)
753 *val = region->uaccess_read(r_vcpu, addr, sizeof(u32));
755 *val = region->read(r_vcpu, addr, sizeof(u32));
760 static int vgic_uaccess_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
761 gpa_t addr, const u32 *val)
763 struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev);
764 const struct vgic_register_region *region;
765 struct kvm_vcpu *r_vcpu;
767 region = vgic_get_mmio_region(vcpu, iodev, addr, sizeof(u32));
771 r_vcpu = iodev->redist_vcpu ? iodev->redist_vcpu : vcpu;
772 if (region->uaccess_write)
773 return region->uaccess_write(r_vcpu, addr, sizeof(u32), *val);
775 region->write(r_vcpu, addr, sizeof(u32), *val);
780 * Userland access to VGIC registers.
782 int vgic_uaccess(struct kvm_vcpu *vcpu, struct vgic_io_device *dev,
783 bool is_write, int offset, u32 *val)
786 return vgic_uaccess_write(vcpu, &dev->dev, offset, val);
788 return vgic_uaccess_read(vcpu, &dev->dev, offset, val);
791 static int dispatch_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
792 gpa_t addr, int len, void *val)
794 struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev);
795 const struct vgic_register_region *region;
796 unsigned long data = 0;
798 region = vgic_get_mmio_region(vcpu, iodev, addr, len);
804 switch (iodev->iodev_type) {
806 data = region->read(vcpu, addr, len);
809 data = region->read(vcpu, addr, len);
812 data = region->read(iodev->redist_vcpu, addr, len);
815 data = region->its_read(vcpu->kvm, iodev->its, addr, len);
819 vgic_data_host_to_mmio_bus(val, len, data);
823 static int dispatch_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
824 gpa_t addr, int len, const void *val)
826 struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev);
827 const struct vgic_register_region *region;
828 unsigned long data = vgic_data_mmio_bus_to_host(val, len);
830 region = vgic_get_mmio_region(vcpu, iodev, addr, len);
834 switch (iodev->iodev_type) {
836 region->write(vcpu, addr, len, data);
839 region->write(vcpu, addr, len, data);
842 region->write(iodev->redist_vcpu, addr, len, data);
845 region->its_write(vcpu->kvm, iodev->its, addr, len, data);
852 struct kvm_io_device_ops kvm_io_gic_ops = {
853 .read = dispatch_mmio_read,
854 .write = dispatch_mmio_write,
857 int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address,
860 struct vgic_io_device *io_device = &kvm->arch.vgic.dist_iodev;
866 len = vgic_v2_init_dist_iodev(io_device);
869 len = vgic_v3_init_dist_iodev(io_device);
875 io_device->base_addr = dist_base_address;
876 io_device->iodev_type = IODEV_DIST;
877 io_device->redist_vcpu = NULL;
879 mutex_lock(&kvm->slots_lock);
880 ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, dist_base_address,
881 len, &io_device->dev);
882 mutex_unlock(&kvm->slots_lock);