1 // SPDX-License-Identifier: GPL-2.0-only
3 * VGIC MMIO handling functions
6 #include <linux/bitops.h>
7 #include <linux/bsearch.h>
8 #include <linux/interrupt.h>
10 #include <linux/kvm.h>
11 #include <linux/kvm_host.h>
12 #include <kvm/iodev.h>
13 #include <kvm/arm_arch_timer.h>
14 #include <kvm/arm_vgic.h>
17 #include "vgic-mmio.h"
19 unsigned long vgic_mmio_read_raz(struct kvm_vcpu *vcpu,
20 gpa_t addr, unsigned int len)
25 unsigned long vgic_mmio_read_rao(struct kvm_vcpu *vcpu,
26 gpa_t addr, unsigned int len)
31 void vgic_mmio_write_wi(struct kvm_vcpu *vcpu, gpa_t addr,
32 unsigned int len, unsigned long val)
37 int vgic_mmio_uaccess_write_wi(struct kvm_vcpu *vcpu, gpa_t addr,
38 unsigned int len, unsigned long val)
44 unsigned long vgic_mmio_read_group(struct kvm_vcpu *vcpu,
45 gpa_t addr, unsigned int len)
47 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
51 /* Loop over all IRQs affected by this read */
52 for (i = 0; i < len * 8; i++) {
53 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
58 vgic_put_irq(vcpu->kvm, irq);
64 static void vgic_update_vsgi(struct vgic_irq *irq)
66 WARN_ON(its_prop_update_vsgi(irq->host_irq, irq->priority, irq->group));
69 void vgic_mmio_write_group(struct kvm_vcpu *vcpu, gpa_t addr,
70 unsigned int len, unsigned long val)
72 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
76 for (i = 0; i < len * 8; i++) {
77 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
79 raw_spin_lock_irqsave(&irq->irq_lock, flags);
80 irq->group = !!(val & BIT(i));
81 if (irq->hw && vgic_irq_is_sgi(irq->intid)) {
82 vgic_update_vsgi(irq);
83 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
85 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
88 vgic_put_irq(vcpu->kvm, irq);
93 * Read accesses to both GICD_ICENABLER and GICD_ISENABLER return the value
94 * of the enabled bit, so there is only one function for both here.
96 unsigned long vgic_mmio_read_enable(struct kvm_vcpu *vcpu,
97 gpa_t addr, unsigned int len)
99 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
103 /* Loop over all IRQs affected by this read */
104 for (i = 0; i < len * 8; i++) {
105 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
110 vgic_put_irq(vcpu->kvm, irq);
116 void vgic_mmio_write_senable(struct kvm_vcpu *vcpu,
117 gpa_t addr, unsigned int len,
120 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
124 for_each_set_bit(i, &val, len * 8) {
125 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
127 raw_spin_lock_irqsave(&irq->irq_lock, flags);
128 if (irq->hw && vgic_irq_is_sgi(irq->intid)) {
130 struct irq_data *data;
133 data = &irq_to_desc(irq->host_irq)->irq_data;
134 while (irqd_irq_disabled(data))
135 enable_irq(irq->host_irq);
138 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
139 vgic_put_irq(vcpu->kvm, irq);
142 } else if (vgic_irq_is_mapped_level(irq)) {
143 bool was_high = irq->line_level;
146 * We need to update the state of the interrupt because
147 * the guest might have changed the state of the device
148 * while the interrupt was disabled at the VGIC level.
150 irq->line_level = vgic_get_phys_line_level(irq);
152 * Deactivate the physical interrupt so the GIC will let
153 * us know when it is asserted again.
155 if (!irq->active && was_high && !irq->line_level)
156 vgic_irq_set_phys_active(irq, false);
159 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
161 vgic_put_irq(vcpu->kvm, irq);
165 void vgic_mmio_write_cenable(struct kvm_vcpu *vcpu,
166 gpa_t addr, unsigned int len,
169 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
173 for_each_set_bit(i, &val, len * 8) {
174 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
176 raw_spin_lock_irqsave(&irq->irq_lock, flags);
177 if (irq->hw && vgic_irq_is_sgi(irq->intid) && irq->enabled)
178 disable_irq_nosync(irq->host_irq);
180 irq->enabled = false;
182 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
183 vgic_put_irq(vcpu->kvm, irq);
187 int vgic_uaccess_write_senable(struct kvm_vcpu *vcpu,
188 gpa_t addr, unsigned int len,
191 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
195 for_each_set_bit(i, &val, len * 8) {
196 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
198 raw_spin_lock_irqsave(&irq->irq_lock, flags);
200 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
202 vgic_put_irq(vcpu->kvm, irq);
208 int vgic_uaccess_write_cenable(struct kvm_vcpu *vcpu,
209 gpa_t addr, unsigned int len,
212 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
216 for_each_set_bit(i, &val, len * 8) {
217 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
219 raw_spin_lock_irqsave(&irq->irq_lock, flags);
220 irq->enabled = false;
221 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
223 vgic_put_irq(vcpu->kvm, irq);
229 unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu,
230 gpa_t addr, unsigned int len)
232 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
236 /* Loop over all IRQs affected by this read */
237 for (i = 0; i < len * 8; i++) {
238 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
242 raw_spin_lock_irqsave(&irq->irq_lock, flags);
243 if (irq->hw && vgic_irq_is_sgi(irq->intid)) {
247 err = irq_get_irqchip_state(irq->host_irq,
248 IRQCHIP_STATE_PENDING,
250 WARN_RATELIMIT(err, "IRQ %d", irq->host_irq);
252 val = irq_is_pending(irq);
255 value |= ((u32)val << i);
256 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
258 vgic_put_irq(vcpu->kvm, irq);
264 /* Must be called with irq->irq_lock held */
265 static void vgic_hw_irq_spending(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
271 irq->pending_latch = true;
272 vgic_irq_set_phys_active(irq, true);
275 static bool is_vgic_v2_sgi(struct kvm_vcpu *vcpu, struct vgic_irq *irq)
277 return (vgic_irq_is_sgi(irq->intid) &&
278 vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V2);
281 void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
282 gpa_t addr, unsigned int len,
285 bool is_uaccess = !kvm_get_running_vcpu();
286 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
290 for_each_set_bit(i, &val, len * 8) {
291 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
293 /* GICD_ISPENDR0 SGI bits are WI */
294 if (is_vgic_v2_sgi(vcpu, irq)) {
295 vgic_put_irq(vcpu->kvm, irq);
299 raw_spin_lock_irqsave(&irq->irq_lock, flags);
301 if (irq->hw && vgic_irq_is_sgi(irq->intid)) {
302 /* HW SGI? Ask the GIC to inject it */
304 err = irq_set_irqchip_state(irq->host_irq,
305 IRQCHIP_STATE_PENDING,
307 WARN_RATELIMIT(err, "IRQ %d", irq->host_irq);
309 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
310 vgic_put_irq(vcpu->kvm, irq);
316 vgic_hw_irq_spending(vcpu, irq, is_uaccess);
318 irq->pending_latch = true;
319 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
320 vgic_put_irq(vcpu->kvm, irq);
324 /* Must be called with irq->irq_lock held */
325 static void vgic_hw_irq_cpending(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
331 irq->pending_latch = false;
334 * We don't want the guest to effectively mask the physical
335 * interrupt by doing a write to SPENDR followed by a write to
336 * CPENDR for HW interrupts, so we clear the active state on
337 * the physical side if the virtual interrupt is not active.
338 * This may lead to taking an additional interrupt on the
339 * host, but that should not be a problem as the worst that
340 * can happen is an additional vgic injection. We also clear
341 * the pending state to maintain proper semantics for edge HW
344 vgic_irq_set_phys_pending(irq, false);
346 vgic_irq_set_phys_active(irq, false);
349 void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
350 gpa_t addr, unsigned int len,
353 bool is_uaccess = !kvm_get_running_vcpu();
354 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
358 for_each_set_bit(i, &val, len * 8) {
359 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
361 /* GICD_ICPENDR0 SGI bits are WI */
362 if (is_vgic_v2_sgi(vcpu, irq)) {
363 vgic_put_irq(vcpu->kvm, irq);
367 raw_spin_lock_irqsave(&irq->irq_lock, flags);
369 if (irq->hw && vgic_irq_is_sgi(irq->intid)) {
370 /* HW SGI? Ask the GIC to clear its pending bit */
372 err = irq_set_irqchip_state(irq->host_irq,
373 IRQCHIP_STATE_PENDING,
375 WARN_RATELIMIT(err, "IRQ %d", irq->host_irq);
377 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
378 vgic_put_irq(vcpu->kvm, irq);
384 vgic_hw_irq_cpending(vcpu, irq, is_uaccess);
386 irq->pending_latch = false;
388 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
389 vgic_put_irq(vcpu->kvm, irq);
395 * If we are fiddling with an IRQ's active state, we have to make sure the IRQ
396 * is not queued on some running VCPU's LRs, because then the change to the
397 * active state can be overwritten when the VCPU's state is synced coming back
400 * For shared interrupts as well as GICv3 private interrupts, we have to
401 * stop all the VCPUs because interrupts can be migrated while we don't hold
402 * the IRQ locks and we don't want to be chasing moving targets.
404 * For GICv2 private interrupts we don't have to do anything because
405 * userspace accesses to the VGIC state already require all VCPUs to be
406 * stopped, and only the VCPU itself can modify its private interrupts
407 * active state, which guarantees that the VCPU is not running.
409 static void vgic_access_active_prepare(struct kvm_vcpu *vcpu, u32 intid)
411 if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 ||
412 intid >= VGIC_NR_PRIVATE_IRQS)
413 kvm_arm_halt_guest(vcpu->kvm);
416 /* See vgic_access_active_prepare */
417 static void vgic_access_active_finish(struct kvm_vcpu *vcpu, u32 intid)
419 if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 ||
420 intid >= VGIC_NR_PRIVATE_IRQS)
421 kvm_arm_resume_guest(vcpu->kvm);
424 static unsigned long __vgic_mmio_read_active(struct kvm_vcpu *vcpu,
425 gpa_t addr, unsigned int len)
427 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
431 /* Loop over all IRQs affected by this read */
432 for (i = 0; i < len * 8; i++) {
433 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
436 * Even for HW interrupts, don't evaluate the HW state as
437 * all the guest is interested in is the virtual state.
442 vgic_put_irq(vcpu->kvm, irq);
448 unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu,
449 gpa_t addr, unsigned int len)
451 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
454 mutex_lock(&vcpu->kvm->lock);
455 vgic_access_active_prepare(vcpu, intid);
457 val = __vgic_mmio_read_active(vcpu, addr, len);
459 vgic_access_active_finish(vcpu, intid);
460 mutex_unlock(&vcpu->kvm->lock);
465 unsigned long vgic_uaccess_read_active(struct kvm_vcpu *vcpu,
466 gpa_t addr, unsigned int len)
468 return __vgic_mmio_read_active(vcpu, addr, len);
471 /* Must be called with irq->irq_lock held */
472 static void vgic_hw_irq_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
473 bool active, bool is_uaccess)
478 irq->active = active;
479 vgic_irq_set_phys_active(irq, active);
482 static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
486 struct kvm_vcpu *requester_vcpu = kvm_get_running_vcpu();
488 raw_spin_lock_irqsave(&irq->irq_lock, flags);
490 if (irq->hw && !vgic_irq_is_sgi(irq->intid)) {
491 vgic_hw_irq_change_active(vcpu, irq, active, !requester_vcpu);
492 } else if (irq->hw && vgic_irq_is_sgi(irq->intid)) {
494 * GICv4.1 VSGI feature doesn't track an active state,
495 * so let's not kid ourselves, there is nothing we can
500 u32 model = vcpu->kvm->arch.vgic.vgic_model;
503 irq->active = active;
506 * The GICv2 architecture indicates that the source CPUID for
507 * an SGI should be provided during an EOI which implies that
508 * the active state is stored somewhere, but at the same time
509 * this state is not architecturally exposed anywhere and we
510 * have no way of knowing the right source.
512 * This may lead to a VCPU not being able to receive
513 * additional instances of a particular SGI after migration
514 * for a GICv2 VM on some GIC implementations. Oh well.
516 active_source = (requester_vcpu) ? requester_vcpu->vcpu_id : 0;
518 if (model == KVM_DEV_TYPE_ARM_VGIC_V2 &&
519 active && vgic_irq_is_sgi(irq->intid))
520 irq->active_source = active_source;
524 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
526 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
529 static void __vgic_mmio_write_cactive(struct kvm_vcpu *vcpu,
530 gpa_t addr, unsigned int len,
533 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
536 for_each_set_bit(i, &val, len * 8) {
537 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
538 vgic_mmio_change_active(vcpu, irq, false);
539 vgic_put_irq(vcpu->kvm, irq);
543 void vgic_mmio_write_cactive(struct kvm_vcpu *vcpu,
544 gpa_t addr, unsigned int len,
547 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
549 mutex_lock(&vcpu->kvm->lock);
550 vgic_access_active_prepare(vcpu, intid);
552 __vgic_mmio_write_cactive(vcpu, addr, len, val);
554 vgic_access_active_finish(vcpu, intid);
555 mutex_unlock(&vcpu->kvm->lock);
558 int vgic_mmio_uaccess_write_cactive(struct kvm_vcpu *vcpu,
559 gpa_t addr, unsigned int len,
562 __vgic_mmio_write_cactive(vcpu, addr, len, val);
566 static void __vgic_mmio_write_sactive(struct kvm_vcpu *vcpu,
567 gpa_t addr, unsigned int len,
570 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
573 for_each_set_bit(i, &val, len * 8) {
574 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
575 vgic_mmio_change_active(vcpu, irq, true);
576 vgic_put_irq(vcpu->kvm, irq);
580 void vgic_mmio_write_sactive(struct kvm_vcpu *vcpu,
581 gpa_t addr, unsigned int len,
584 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
586 mutex_lock(&vcpu->kvm->lock);
587 vgic_access_active_prepare(vcpu, intid);
589 __vgic_mmio_write_sactive(vcpu, addr, len, val);
591 vgic_access_active_finish(vcpu, intid);
592 mutex_unlock(&vcpu->kvm->lock);
595 int vgic_mmio_uaccess_write_sactive(struct kvm_vcpu *vcpu,
596 gpa_t addr, unsigned int len,
599 __vgic_mmio_write_sactive(vcpu, addr, len, val);
603 unsigned long vgic_mmio_read_priority(struct kvm_vcpu *vcpu,
604 gpa_t addr, unsigned int len)
606 u32 intid = VGIC_ADDR_TO_INTID(addr, 8);
610 for (i = 0; i < len; i++) {
611 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
613 val |= (u64)irq->priority << (i * 8);
615 vgic_put_irq(vcpu->kvm, irq);
622 * We currently don't handle changing the priority of an interrupt that
623 * is already pending on a VCPU. If there is a need for this, we would
624 * need to make this VCPU exit and re-evaluate the priorities, potentially
625 * leading to this interrupt getting presented now to the guest (if it has
626 * been masked by the priority mask before).
628 void vgic_mmio_write_priority(struct kvm_vcpu *vcpu,
629 gpa_t addr, unsigned int len,
632 u32 intid = VGIC_ADDR_TO_INTID(addr, 8);
636 for (i = 0; i < len; i++) {
637 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
639 raw_spin_lock_irqsave(&irq->irq_lock, flags);
640 /* Narrow the priority range to what we actually support */
641 irq->priority = (val >> (i * 8)) & GENMASK(7, 8 - VGIC_PRI_BITS);
642 if (irq->hw && vgic_irq_is_sgi(irq->intid))
643 vgic_update_vsgi(irq);
644 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
646 vgic_put_irq(vcpu->kvm, irq);
650 unsigned long vgic_mmio_read_config(struct kvm_vcpu *vcpu,
651 gpa_t addr, unsigned int len)
653 u32 intid = VGIC_ADDR_TO_INTID(addr, 2);
657 for (i = 0; i < len * 4; i++) {
658 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
660 if (irq->config == VGIC_CONFIG_EDGE)
661 value |= (2U << (i * 2));
663 vgic_put_irq(vcpu->kvm, irq);
669 void vgic_mmio_write_config(struct kvm_vcpu *vcpu,
670 gpa_t addr, unsigned int len,
673 u32 intid = VGIC_ADDR_TO_INTID(addr, 2);
677 for (i = 0; i < len * 4; i++) {
678 struct vgic_irq *irq;
681 * The configuration cannot be changed for SGIs in general,
682 * for PPIs this is IMPLEMENTATION DEFINED. The arch timer
683 * code relies on PPIs being level triggered, so we also
684 * make them read-only here.
686 if (intid + i < VGIC_NR_PRIVATE_IRQS)
689 irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
690 raw_spin_lock_irqsave(&irq->irq_lock, flags);
692 if (test_bit(i * 2 + 1, &val))
693 irq->config = VGIC_CONFIG_EDGE;
695 irq->config = VGIC_CONFIG_LEVEL;
697 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
698 vgic_put_irq(vcpu->kvm, irq);
702 u64 vgic_read_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid)
706 int nr_irqs = vcpu->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
708 for (i = 0; i < 32; i++) {
709 struct vgic_irq *irq;
711 if ((intid + i) < VGIC_NR_SGIS || (intid + i) >= nr_irqs)
714 irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
715 if (irq->config == VGIC_CONFIG_LEVEL && irq->line_level)
718 vgic_put_irq(vcpu->kvm, irq);
724 void vgic_write_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid,
728 int nr_irqs = vcpu->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
731 for (i = 0; i < 32; i++) {
732 struct vgic_irq *irq;
735 if ((intid + i) < VGIC_NR_SGIS || (intid + i) >= nr_irqs)
738 irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
741 * Line level is set irrespective of irq type
742 * (level or edge) to avoid dependency that VM should
743 * restore irq config before line level.
745 new_level = !!(val & (1U << i));
746 raw_spin_lock_irqsave(&irq->irq_lock, flags);
747 irq->line_level = new_level;
749 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
751 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
753 vgic_put_irq(vcpu->kvm, irq);
757 static int match_region(const void *key, const void *elt)
759 const unsigned int offset = (unsigned long)key;
760 const struct vgic_register_region *region = elt;
762 if (offset < region->reg_offset)
765 if (offset >= region->reg_offset + region->len)
771 const struct vgic_register_region *
772 vgic_find_mmio_region(const struct vgic_register_region *regions,
773 int nr_regions, unsigned int offset)
775 return bsearch((void *)(uintptr_t)offset, regions, nr_regions,
776 sizeof(regions[0]), match_region);
779 void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
781 if (kvm_vgic_global_state.type == VGIC_V2)
782 vgic_v2_set_vmcr(vcpu, vmcr);
784 vgic_v3_set_vmcr(vcpu, vmcr);
787 void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
789 if (kvm_vgic_global_state.type == VGIC_V2)
790 vgic_v2_get_vmcr(vcpu, vmcr);
792 vgic_v3_get_vmcr(vcpu, vmcr);
796 * kvm_mmio_read_buf() returns a value in a format where it can be converted
797 * to a byte array and be directly observed as the guest wanted it to appear
798 * in memory if it had done the store itself, which is LE for the GIC, as the
799 * guest knows the GIC is always LE.
801 * We convert this value to the CPUs native format to deal with it as a data
804 unsigned long vgic_data_mmio_bus_to_host(const void *val, unsigned int len)
806 unsigned long data = kvm_mmio_read_buf(val, len);
812 return le16_to_cpu(data);
814 return le32_to_cpu(data);
816 return le64_to_cpu(data);
821 * kvm_mmio_write_buf() expects a value in a format such that if converted to
822 * a byte array it is observed as the guest would see it if it could perform
823 * the load directly. Since the GIC is LE, and the guest knows this, the
824 * guest expects a value in little endian format.
826 * We convert the data value from the CPUs native format to LE so that the
827 * value is returned in the proper format.
829 void vgic_data_host_to_mmio_bus(void *buf, unsigned int len,
836 data = cpu_to_le16(data);
839 data = cpu_to_le32(data);
842 data = cpu_to_le64(data);
845 kvm_mmio_write_buf(buf, len, data);
849 struct vgic_io_device *kvm_to_vgic_iodev(const struct kvm_io_device *dev)
851 return container_of(dev, struct vgic_io_device, dev);
854 static bool check_region(const struct kvm *kvm,
855 const struct vgic_register_region *region,
858 int flags, nr_irqs = kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
862 flags = VGIC_ACCESS_8bit;
865 flags = VGIC_ACCESS_32bit;
868 flags = VGIC_ACCESS_64bit;
874 if ((region->access_flags & flags) && IS_ALIGNED(addr, len)) {
875 if (!region->bits_per_irq)
878 /* Do we access a non-allocated IRQ? */
879 return VGIC_ADDR_TO_INTID(addr, region->bits_per_irq) < nr_irqs;
885 const struct vgic_register_region *
886 vgic_get_mmio_region(struct kvm_vcpu *vcpu, struct vgic_io_device *iodev,
889 const struct vgic_register_region *region;
891 region = vgic_find_mmio_region(iodev->regions, iodev->nr_regions,
892 addr - iodev->base_addr);
893 if (!region || !check_region(vcpu->kvm, region, addr, len))
899 static int vgic_uaccess_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
900 gpa_t addr, u32 *val)
902 struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev);
903 const struct vgic_register_region *region;
904 struct kvm_vcpu *r_vcpu;
906 region = vgic_get_mmio_region(vcpu, iodev, addr, sizeof(u32));
912 r_vcpu = iodev->redist_vcpu ? iodev->redist_vcpu : vcpu;
913 if (region->uaccess_read)
914 *val = region->uaccess_read(r_vcpu, addr, sizeof(u32));
916 *val = region->read(r_vcpu, addr, sizeof(u32));
921 static int vgic_uaccess_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
922 gpa_t addr, const u32 *val)
924 struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev);
925 const struct vgic_register_region *region;
926 struct kvm_vcpu *r_vcpu;
928 region = vgic_get_mmio_region(vcpu, iodev, addr, sizeof(u32));
932 r_vcpu = iodev->redist_vcpu ? iodev->redist_vcpu : vcpu;
933 if (region->uaccess_write)
934 return region->uaccess_write(r_vcpu, addr, sizeof(u32), *val);
936 region->write(r_vcpu, addr, sizeof(u32), *val);
941 * Userland access to VGIC registers.
943 int vgic_uaccess(struct kvm_vcpu *vcpu, struct vgic_io_device *dev,
944 bool is_write, int offset, u32 *val)
947 return vgic_uaccess_write(vcpu, &dev->dev, offset, val);
949 return vgic_uaccess_read(vcpu, &dev->dev, offset, val);
952 static int dispatch_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
953 gpa_t addr, int len, void *val)
955 struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev);
956 const struct vgic_register_region *region;
957 unsigned long data = 0;
959 region = vgic_get_mmio_region(vcpu, iodev, addr, len);
965 switch (iodev->iodev_type) {
967 data = region->read(vcpu, addr, len);
970 data = region->read(vcpu, addr, len);
973 data = region->read(iodev->redist_vcpu, addr, len);
976 data = region->its_read(vcpu->kvm, iodev->its, addr, len);
980 vgic_data_host_to_mmio_bus(val, len, data);
984 static int dispatch_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
985 gpa_t addr, int len, const void *val)
987 struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev);
988 const struct vgic_register_region *region;
989 unsigned long data = vgic_data_mmio_bus_to_host(val, len);
991 region = vgic_get_mmio_region(vcpu, iodev, addr, len);
995 switch (iodev->iodev_type) {
997 region->write(vcpu, addr, len, data);
1000 region->write(vcpu, addr, len, data);
1003 region->write(iodev->redist_vcpu, addr, len, data);
1006 region->its_write(vcpu->kvm, iodev->its, addr, len, data);
1013 struct kvm_io_device_ops kvm_io_gic_ops = {
1014 .read = dispatch_mmio_read,
1015 .write = dispatch_mmio_write,
1018 int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address,
1019 enum vgic_type type)
1021 struct vgic_io_device *io_device = &kvm->arch.vgic.dist_iodev;
1027 len = vgic_v2_init_dist_iodev(io_device);
1030 len = vgic_v3_init_dist_iodev(io_device);
1036 io_device->base_addr = dist_base_address;
1037 io_device->iodev_type = IODEV_DIST;
1038 io_device->redist_vcpu = NULL;
1040 mutex_lock(&kvm->slots_lock);
1041 ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, dist_base_address,
1042 len, &io_device->dev);
1043 mutex_unlock(&kvm->slots_lock);