1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (C) 2015,2016 ARM Ltd.
6 * Author: Andre Przywara <andre.przywara@arm.com>
10 #include <linux/kvm.h>
11 #include <linux/kvm_host.h>
12 #include <linux/interrupt.h>
13 #include <linux/list.h>
14 #include <linux/uaccess.h>
15 #include <linux/list_sort.h>
17 #include <linux/irqchip/arm-gic-v3.h>
19 #include <asm/kvm_emulate.h>
20 #include <asm/kvm_arm.h>
21 #include <asm/kvm_mmu.h>
24 #include "vgic-mmio.h"
26 static int vgic_its_save_tables_v0(struct vgic_its *its);
27 static int vgic_its_restore_tables_v0(struct vgic_its *its);
28 static int vgic_its_commit_v0(struct vgic_its *its);
29 static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq,
30 struct kvm_vcpu *filter_vcpu, bool needs_inv);
33 * Creates a new (reference to a) struct vgic_irq for a given LPI.
34 * If this LPI is already mapped on another ITS, we increase its refcount
35 * and return a pointer to the existing structure.
36 * If this is a "new" LPI, we allocate and initialize a new struct vgic_irq.
37 * This function returns a pointer to the _unlocked_ structure.
39 static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
40 struct kvm_vcpu *vcpu)
42 struct vgic_dist *dist = &kvm->arch.vgic;
43 struct vgic_irq *irq = vgic_get_irq(kvm, NULL, intid), *oldirq;
47 /* In this case there is no put, since we keep the reference. */
51 irq = kzalloc(sizeof(struct vgic_irq), GFP_KERNEL_ACCOUNT);
53 return ERR_PTR(-ENOMEM);
55 INIT_LIST_HEAD(&irq->lpi_list);
56 INIT_LIST_HEAD(&irq->ap_list);
57 raw_spin_lock_init(&irq->irq_lock);
59 irq->config = VGIC_CONFIG_EDGE;
60 kref_init(&irq->refcount);
62 irq->target_vcpu = vcpu;
65 raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
68 * There could be a race with another vgic_add_lpi(), so we need to
69 * check that we don't add a second list entry with the same LPI.
71 list_for_each_entry(oldirq, &dist->lpi_list_head, lpi_list) {
72 if (oldirq->intid != intid)
75 /* Someone was faster with adding this LPI, lets use that. */
80 * This increases the refcount, the caller is expected to
81 * call vgic_put_irq() on the returned pointer once it's
82 * finished with the IRQ.
84 vgic_get_irq_kref(irq);
89 list_add_tail(&irq->lpi_list, &dist->lpi_list_head);
90 dist->lpi_list_count++;
93 raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
96 * We "cache" the configuration table entries in our struct vgic_irq's.
97 * However we only have those structs for mapped IRQs, so we read in
98 * the respective config data from memory here upon mapping the LPI.
100 * Should any of these fail, behave as if we couldn't create the LPI
101 * by dropping the refcount and returning the error.
103 ret = update_lpi_config(kvm, irq, NULL, false);
105 vgic_put_irq(kvm, irq);
109 ret = vgic_v3_lpi_sync_pending_status(kvm, irq);
111 vgic_put_irq(kvm, irq);
119 struct list_head dev_list;
121 /* the head for the list of ITTEs */
122 struct list_head itt_head;
123 u32 num_eventid_bits;
128 #define COLLECTION_NOT_MAPPED ((u32)~0)
130 struct its_collection {
131 struct list_head coll_list;
137 #define its_is_collection_mapped(coll) ((coll) && \
138 ((coll)->target_addr != COLLECTION_NOT_MAPPED))
141 struct list_head ite_list;
143 struct vgic_irq *irq;
144 struct its_collection *collection;
148 struct vgic_translation_cache_entry {
149 struct list_head entry;
153 struct vgic_irq *irq;
157 * struct vgic_its_abi - ITS abi ops and settings
158 * @cte_esz: collection table entry size
159 * @dte_esz: device table entry size
160 * @ite_esz: interrupt translation table entry size
161 * @save tables: save the ITS tables into guest RAM
162 * @restore_tables: restore the ITS internal structs from tables
163 * stored in guest RAM
164 * @commit: initialize the registers which expose the ABI settings,
165 * especially the entry sizes
167 struct vgic_its_abi {
171 int (*save_tables)(struct vgic_its *its);
172 int (*restore_tables)(struct vgic_its *its);
173 int (*commit)(struct vgic_its *its);
177 #define ESZ_MAX ABI_0_ESZ
179 static const struct vgic_its_abi its_table_abi_versions[] = {
181 .cte_esz = ABI_0_ESZ,
182 .dte_esz = ABI_0_ESZ,
183 .ite_esz = ABI_0_ESZ,
184 .save_tables = vgic_its_save_tables_v0,
185 .restore_tables = vgic_its_restore_tables_v0,
186 .commit = vgic_its_commit_v0,
190 #define NR_ITS_ABIS ARRAY_SIZE(its_table_abi_versions)
192 inline const struct vgic_its_abi *vgic_its_get_abi(struct vgic_its *its)
194 return &its_table_abi_versions[its->abi_rev];
197 static int vgic_its_set_abi(struct vgic_its *its, u32 rev)
199 const struct vgic_its_abi *abi;
202 abi = vgic_its_get_abi(its);
203 return abi->commit(its);
207 * Find and returns a device in the device table for an ITS.
208 * Must be called with the its_lock mutex held.
210 static struct its_device *find_its_device(struct vgic_its *its, u32 device_id)
212 struct its_device *device;
214 list_for_each_entry(device, &its->device_list, dev_list)
215 if (device_id == device->device_id)
222 * Find and returns an interrupt translation table entry (ITTE) for a given
223 * Device ID/Event ID pair on an ITS.
224 * Must be called with the its_lock mutex held.
226 static struct its_ite *find_ite(struct vgic_its *its, u32 device_id,
229 struct its_device *device;
232 device = find_its_device(its, device_id);
236 list_for_each_entry(ite, &device->itt_head, ite_list)
237 if (ite->event_id == event_id)
243 /* To be used as an iterator this macro misses the enclosing parentheses */
244 #define for_each_lpi_its(dev, ite, its) \
245 list_for_each_entry(dev, &(its)->device_list, dev_list) \
246 list_for_each_entry(ite, &(dev)->itt_head, ite_list)
248 #define GIC_LPI_OFFSET 8192
250 #define VITS_TYPER_IDBITS 16
251 #define VITS_TYPER_DEVBITS 16
252 #define VITS_DTE_MAX_DEVID_OFFSET (BIT(14) - 1)
253 #define VITS_ITE_MAX_EVENTID_OFFSET (BIT(16) - 1)
256 * Finds and returns a collection in the ITS collection table.
257 * Must be called with the its_lock mutex held.
259 static struct its_collection *find_collection(struct vgic_its *its, int coll_id)
261 struct its_collection *collection;
263 list_for_each_entry(collection, &its->collection_list, coll_list) {
264 if (coll_id == collection->collection_id)
271 #define LPI_PROP_ENABLE_BIT(p) ((p) & LPI_PROP_ENABLED)
272 #define LPI_PROP_PRIORITY(p) ((p) & 0xfc)
275 * Reads the configuration data for a given LPI from guest memory and
276 * updates the fields in struct vgic_irq.
277 * If filter_vcpu is not NULL, applies only if the IRQ is targeting this
278 * VCPU. Unconditionally applies if filter_vcpu is NULL.
280 static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq,
281 struct kvm_vcpu *filter_vcpu, bool needs_inv)
283 u64 propbase = GICR_PROPBASER_ADDRESS(kvm->arch.vgic.propbaser);
288 ret = kvm_read_guest_lock(kvm, propbase + irq->intid - GIC_LPI_OFFSET,
294 raw_spin_lock_irqsave(&irq->irq_lock, flags);
296 if (!filter_vcpu || filter_vcpu == irq->target_vcpu) {
297 irq->priority = LPI_PROP_PRIORITY(prop);
298 irq->enabled = LPI_PROP_ENABLE_BIT(prop);
301 vgic_queue_irq_unlock(kvm, irq, flags);
306 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
309 return its_prop_update_vlpi(irq->host_irq, prop, needs_inv);
315 * Create a snapshot of the current LPIs targeting @vcpu, so that we can
316 * enumerate those LPIs without holding any lock.
317 * Returns their number and puts the kmalloc'ed array into intid_ptr.
319 int vgic_copy_lpi_list(struct kvm *kvm, struct kvm_vcpu *vcpu, u32 **intid_ptr)
321 struct vgic_dist *dist = &kvm->arch.vgic;
322 struct vgic_irq *irq;
325 int irq_count, i = 0;
328 * There is an obvious race between allocating the array and LPIs
329 * being mapped/unmapped. If we ended up here as a result of a
330 * command, we're safe (locks are held, preventing another
331 * command). If coming from another path (such as enabling LPIs),
332 * we must be careful not to overrun the array.
334 irq_count = READ_ONCE(dist->lpi_list_count);
335 intids = kmalloc_array(irq_count, sizeof(intids[0]), GFP_KERNEL_ACCOUNT);
339 raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
340 list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
343 /* We don't need to "get" the IRQ, as we hold the list lock. */
344 if (vcpu && irq->target_vcpu != vcpu)
346 intids[i++] = irq->intid;
348 raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
354 static int update_affinity(struct vgic_irq *irq, struct kvm_vcpu *vcpu)
359 raw_spin_lock_irqsave(&irq->irq_lock, flags);
360 irq->target_vcpu = vcpu;
361 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
364 struct its_vlpi_map map;
366 ret = its_get_vlpi(irq->host_irq, &map);
371 atomic_dec(&map.vpe->vlpi_count);
372 map.vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
373 atomic_inc(&map.vpe->vlpi_count);
375 ret = its_map_vlpi(irq->host_irq, &map);
381 static struct kvm_vcpu *collection_to_vcpu(struct kvm *kvm,
382 struct its_collection *col)
384 return kvm_get_vcpu_by_id(kvm, col->target_addr);
388 * Promotes the ITS view of affinity of an ITTE (which redistributor this LPI
389 * is targeting) to the VGIC's view, which deals with target VCPUs.
390 * Needs to be called whenever either the collection for a LPIs has
391 * changed or the collection itself got retargeted.
393 static void update_affinity_ite(struct kvm *kvm, struct its_ite *ite)
395 struct kvm_vcpu *vcpu;
397 if (!its_is_collection_mapped(ite->collection))
400 vcpu = collection_to_vcpu(kvm, ite->collection);
401 update_affinity(ite->irq, vcpu);
405 * Updates the target VCPU for every LPI targeting this collection.
406 * Must be called with the its_lock mutex held.
408 static void update_affinity_collection(struct kvm *kvm, struct vgic_its *its,
409 struct its_collection *coll)
411 struct its_device *device;
414 for_each_lpi_its(device, ite, its) {
415 if (ite->collection != coll)
418 update_affinity_ite(kvm, ite);
422 static u32 max_lpis_propbaser(u64 propbaser)
424 int nr_idbits = (propbaser & 0x1f) + 1;
426 return 1U << min(nr_idbits, INTERRUPT_ID_BITS_ITS);
430 * Sync the pending table pending bit of LPIs targeting @vcpu
431 * with our own data structures. This relies on the LPI being
434 static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu)
436 gpa_t pendbase = GICR_PENDBASER_ADDRESS(vcpu->arch.vgic_cpu.pendbaser);
437 struct vgic_irq *irq;
438 int last_byte_offset = -1;
445 nr_irqs = vgic_copy_lpi_list(vcpu->kvm, vcpu, &intids);
449 for (i = 0; i < nr_irqs; i++) {
450 int byte_offset, bit_nr;
452 byte_offset = intids[i] / BITS_PER_BYTE;
453 bit_nr = intids[i] % BITS_PER_BYTE;
456 * For contiguously allocated LPIs chances are we just read
457 * this very same byte in the last iteration. Reuse that.
459 if (byte_offset != last_byte_offset) {
460 ret = kvm_read_guest_lock(vcpu->kvm,
461 pendbase + byte_offset,
467 last_byte_offset = byte_offset;
470 irq = vgic_get_irq(vcpu->kvm, NULL, intids[i]);
474 raw_spin_lock_irqsave(&irq->irq_lock, flags);
475 irq->pending_latch = pendmask & (1U << bit_nr);
476 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
477 vgic_put_irq(vcpu->kvm, irq);
485 static unsigned long vgic_mmio_read_its_typer(struct kvm *kvm,
486 struct vgic_its *its,
487 gpa_t addr, unsigned int len)
489 const struct vgic_its_abi *abi = vgic_its_get_abi(its);
490 u64 reg = GITS_TYPER_PLPIS;
493 * We use linear CPU numbers for redistributor addressing,
494 * so GITS_TYPER.PTA is 0.
495 * Also we force all PROPBASER registers to be the same, so
496 * CommonLPIAff is 0 as well.
497 * To avoid memory waste in the guest, we keep the number of IDBits and
498 * DevBits low - as least for the time being.
500 reg |= GIC_ENCODE_SZ(VITS_TYPER_DEVBITS, 5) << GITS_TYPER_DEVBITS_SHIFT;
501 reg |= GIC_ENCODE_SZ(VITS_TYPER_IDBITS, 5) << GITS_TYPER_IDBITS_SHIFT;
502 reg |= GIC_ENCODE_SZ(abi->ite_esz, 4) << GITS_TYPER_ITT_ENTRY_SIZE_SHIFT;
504 return extract_bytes(reg, addr & 7, len);
507 static unsigned long vgic_mmio_read_its_iidr(struct kvm *kvm,
508 struct vgic_its *its,
509 gpa_t addr, unsigned int len)
513 val = (its->abi_rev << GITS_IIDR_REV_SHIFT) & GITS_IIDR_REV_MASK;
514 val |= (PRODUCT_ID_KVM << GITS_IIDR_PRODUCTID_SHIFT) | IMPLEMENTER_ARM;
518 static int vgic_mmio_uaccess_write_its_iidr(struct kvm *kvm,
519 struct vgic_its *its,
520 gpa_t addr, unsigned int len,
523 u32 rev = GITS_IIDR_REV(val);
525 if (rev >= NR_ITS_ABIS)
527 return vgic_its_set_abi(its, rev);
530 static unsigned long vgic_mmio_read_its_idregs(struct kvm *kvm,
531 struct vgic_its *its,
532 gpa_t addr, unsigned int len)
534 switch (addr & 0xffff) {
536 return 0x92; /* part number, bits[7:0] */
538 return 0xb4; /* part number, bits[11:8] */
540 return GIC_PIDR2_ARCH_GICv3 | 0x0b;
542 return 0x40; /* This is a 64K software visible page */
543 /* The following are the ID registers for (any) GIC. */
557 static struct vgic_irq *__vgic_its_check_cache(struct vgic_dist *dist,
559 u32 devid, u32 eventid)
561 struct vgic_translation_cache_entry *cte;
563 list_for_each_entry(cte, &dist->lpi_translation_cache, entry) {
565 * If we hit a NULL entry, there is nothing after this
571 if (cte->db != db || cte->devid != devid ||
572 cte->eventid != eventid)
576 * Move this entry to the head, as it is the most
579 if (!list_is_first(&cte->entry, &dist->lpi_translation_cache))
580 list_move(&cte->entry, &dist->lpi_translation_cache);
588 static struct vgic_irq *vgic_its_check_cache(struct kvm *kvm, phys_addr_t db,
589 u32 devid, u32 eventid)
591 struct vgic_dist *dist = &kvm->arch.vgic;
592 struct vgic_irq *irq;
595 raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
597 irq = __vgic_its_check_cache(dist, db, devid, eventid);
599 vgic_get_irq_kref(irq);
601 raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
606 static void vgic_its_cache_translation(struct kvm *kvm, struct vgic_its *its,
607 u32 devid, u32 eventid,
608 struct vgic_irq *irq)
610 struct vgic_dist *dist = &kvm->arch.vgic;
611 struct vgic_translation_cache_entry *cte;
615 /* Do not cache a directly injected interrupt */
619 raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
621 if (unlikely(list_empty(&dist->lpi_translation_cache)))
625 * We could have raced with another CPU caching the same
626 * translation behind our back, so let's check it is not in
629 db = its->vgic_its_base + GITS_TRANSLATER;
630 if (__vgic_its_check_cache(dist, db, devid, eventid))
633 /* Always reuse the last entry (LRU policy) */
634 cte = list_last_entry(&dist->lpi_translation_cache,
635 typeof(*cte), entry);
638 * Caching the translation implies having an extra reference
639 * to the interrupt, so drop the potential reference on what
640 * was in the cache, and increment it on the new interrupt.
643 __vgic_put_lpi_locked(kvm, cte->irq);
645 vgic_get_irq_kref(irq);
649 cte->eventid = eventid;
652 /* Move the new translation to the head of the list */
653 list_move(&cte->entry, &dist->lpi_translation_cache);
656 raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
659 void vgic_its_invalidate_cache(struct kvm *kvm)
661 struct vgic_dist *dist = &kvm->arch.vgic;
662 struct vgic_translation_cache_entry *cte;
665 raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
667 list_for_each_entry(cte, &dist->lpi_translation_cache, entry) {
669 * If we hit a NULL entry, there is nothing after this
675 __vgic_put_lpi_locked(kvm, cte->irq);
679 raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
682 int vgic_its_resolve_lpi(struct kvm *kvm, struct vgic_its *its,
683 u32 devid, u32 eventid, struct vgic_irq **irq)
685 struct kvm_vcpu *vcpu;
691 ite = find_ite(its, devid, eventid);
692 if (!ite || !its_is_collection_mapped(ite->collection))
693 return E_ITS_INT_UNMAPPED_INTERRUPT;
695 vcpu = collection_to_vcpu(kvm, ite->collection);
697 return E_ITS_INT_UNMAPPED_INTERRUPT;
699 if (!vgic_lpis_enabled(vcpu))
702 vgic_its_cache_translation(kvm, its, devid, eventid, ite->irq);
708 struct vgic_its *vgic_msi_to_its(struct kvm *kvm, struct kvm_msi *msi)
711 struct kvm_io_device *kvm_io_dev;
712 struct vgic_io_device *iodev;
714 if (!vgic_has_its(kvm))
715 return ERR_PTR(-ENODEV);
717 if (!(msi->flags & KVM_MSI_VALID_DEVID))
718 return ERR_PTR(-EINVAL);
720 address = (u64)msi->address_hi << 32 | msi->address_lo;
722 kvm_io_dev = kvm_io_bus_get_dev(kvm, KVM_MMIO_BUS, address);
724 return ERR_PTR(-EINVAL);
726 if (kvm_io_dev->ops != &kvm_io_gic_ops)
727 return ERR_PTR(-EINVAL);
729 iodev = container_of(kvm_io_dev, struct vgic_io_device, dev);
730 if (iodev->iodev_type != IODEV_ITS)
731 return ERR_PTR(-EINVAL);
737 * Find the target VCPU and the LPI number for a given devid/eventid pair
738 * and make this IRQ pending, possibly injecting it.
739 * Must be called with the its_lock mutex held.
740 * Returns 0 on success, a positive error value for any ITS mapping
741 * related errors and negative error values for generic errors.
743 static int vgic_its_trigger_msi(struct kvm *kvm, struct vgic_its *its,
744 u32 devid, u32 eventid)
746 struct vgic_irq *irq = NULL;
750 err = vgic_its_resolve_lpi(kvm, its, devid, eventid, &irq);
755 return irq_set_irqchip_state(irq->host_irq,
756 IRQCHIP_STATE_PENDING, true);
758 raw_spin_lock_irqsave(&irq->irq_lock, flags);
759 irq->pending_latch = true;
760 vgic_queue_irq_unlock(kvm, irq, flags);
765 int vgic_its_inject_cached_translation(struct kvm *kvm, struct kvm_msi *msi)
767 struct vgic_irq *irq;
771 db = (u64)msi->address_hi << 32 | msi->address_lo;
772 irq = vgic_its_check_cache(kvm, db, msi->devid, msi->data);
776 raw_spin_lock_irqsave(&irq->irq_lock, flags);
777 irq->pending_latch = true;
778 vgic_queue_irq_unlock(kvm, irq, flags);
779 vgic_put_irq(kvm, irq);
785 * Queries the KVM IO bus framework to get the ITS pointer from the given
787 * We then call vgic_its_trigger_msi() with the decoded data.
788 * According to the KVM_SIGNAL_MSI API description returns 1 on success.
790 int vgic_its_inject_msi(struct kvm *kvm, struct kvm_msi *msi)
792 struct vgic_its *its;
795 if (!vgic_its_inject_cached_translation(kvm, msi))
798 its = vgic_msi_to_its(kvm, msi);
802 mutex_lock(&its->its_lock);
803 ret = vgic_its_trigger_msi(kvm, its, msi->devid, msi->data);
804 mutex_unlock(&its->its_lock);
810 * KVM_SIGNAL_MSI demands a return value > 0 for success and 0
811 * if the guest has blocked the MSI. So we map any LPI mapping
812 * related error to that.
820 /* Requires the its_lock to be held. */
821 static void its_free_ite(struct kvm *kvm, struct its_ite *ite)
823 list_del(&ite->ite_list);
825 /* This put matches the get in vgic_add_lpi. */
828 WARN_ON(its_unmap_vlpi(ite->irq->host_irq));
830 vgic_put_irq(kvm, ite->irq);
836 static u64 its_cmd_mask_field(u64 *its_cmd, int word, int shift, int size)
838 return (le64_to_cpu(its_cmd[word]) >> shift) & (BIT_ULL(size) - 1);
841 #define its_cmd_get_command(cmd) its_cmd_mask_field(cmd, 0, 0, 8)
842 #define its_cmd_get_deviceid(cmd) its_cmd_mask_field(cmd, 0, 32, 32)
843 #define its_cmd_get_size(cmd) (its_cmd_mask_field(cmd, 1, 0, 5) + 1)
844 #define its_cmd_get_id(cmd) its_cmd_mask_field(cmd, 1, 0, 32)
845 #define its_cmd_get_physical_id(cmd) its_cmd_mask_field(cmd, 1, 32, 32)
846 #define its_cmd_get_collection(cmd) its_cmd_mask_field(cmd, 2, 0, 16)
847 #define its_cmd_get_ittaddr(cmd) (its_cmd_mask_field(cmd, 2, 8, 44) << 8)
848 #define its_cmd_get_target_addr(cmd) its_cmd_mask_field(cmd, 2, 16, 32)
849 #define its_cmd_get_validbit(cmd) its_cmd_mask_field(cmd, 2, 63, 1)
852 * The DISCARD command frees an Interrupt Translation Table Entry (ITTE).
853 * Must be called with the its_lock mutex held.
855 static int vgic_its_cmd_handle_discard(struct kvm *kvm, struct vgic_its *its,
858 u32 device_id = its_cmd_get_deviceid(its_cmd);
859 u32 event_id = its_cmd_get_id(its_cmd);
862 ite = find_ite(its, device_id, event_id);
863 if (ite && its_is_collection_mapped(ite->collection)) {
865 * Though the spec talks about removing the pending state, we
866 * don't bother here since we clear the ITTE anyway and the
867 * pending state is a property of the ITTE struct.
869 vgic_its_invalidate_cache(kvm);
871 its_free_ite(kvm, ite);
875 return E_ITS_DISCARD_UNMAPPED_INTERRUPT;
879 * The MOVI command moves an ITTE to a different collection.
880 * Must be called with the its_lock mutex held.
882 static int vgic_its_cmd_handle_movi(struct kvm *kvm, struct vgic_its *its,
885 u32 device_id = its_cmd_get_deviceid(its_cmd);
886 u32 event_id = its_cmd_get_id(its_cmd);
887 u32 coll_id = its_cmd_get_collection(its_cmd);
888 struct kvm_vcpu *vcpu;
890 struct its_collection *collection;
892 ite = find_ite(its, device_id, event_id);
894 return E_ITS_MOVI_UNMAPPED_INTERRUPT;
896 if (!its_is_collection_mapped(ite->collection))
897 return E_ITS_MOVI_UNMAPPED_COLLECTION;
899 collection = find_collection(its, coll_id);
900 if (!its_is_collection_mapped(collection))
901 return E_ITS_MOVI_UNMAPPED_COLLECTION;
903 ite->collection = collection;
904 vcpu = collection_to_vcpu(kvm, collection);
906 vgic_its_invalidate_cache(kvm);
908 return update_affinity(ite->irq, vcpu);
911 static bool __is_visible_gfn_locked(struct vgic_its *its, gpa_t gpa)
913 gfn_t gfn = gpa >> PAGE_SHIFT;
917 idx = srcu_read_lock(&its->dev->kvm->srcu);
918 ret = kvm_is_visible_gfn(its->dev->kvm, gfn);
919 srcu_read_unlock(&its->dev->kvm->srcu, idx);
924 * Check whether an ID can be stored into the corresponding guest table.
925 * For a direct table this is pretty easy, but gets a bit nasty for
926 * indirect tables. We check whether the resulting guest physical address
927 * is actually valid (covered by a memslot and guest accessible).
928 * For this we have to read the respective first level entry.
930 static bool vgic_its_check_id(struct vgic_its *its, u64 baser, u32 id,
933 int l1_tbl_size = GITS_BASER_NR_PAGES(baser) * SZ_64K;
934 u64 indirect_ptr, type = GITS_BASER_TYPE(baser);
935 phys_addr_t base = GITS_BASER_ADDR_48_to_52(baser);
936 int esz = GITS_BASER_ENTRY_SIZE(baser);
940 case GITS_BASER_TYPE_DEVICE:
941 if (id >= BIT_ULL(VITS_TYPER_DEVBITS))
944 case GITS_BASER_TYPE_COLLECTION:
945 /* as GITS_TYPER.CIL == 0, ITS supports 16-bit collection ID */
946 if (id >= BIT_ULL(16))
953 if (!(baser & GITS_BASER_INDIRECT)) {
956 if (id >= (l1_tbl_size / esz))
959 addr = base + id * esz;
964 return __is_visible_gfn_locked(its, addr);
967 /* calculate and check the index into the 1st level */
968 index = id / (SZ_64K / esz);
969 if (index >= (l1_tbl_size / sizeof(u64)))
972 /* Each 1st level entry is represented by a 64-bit value. */
973 if (kvm_read_guest_lock(its->dev->kvm,
974 base + index * sizeof(indirect_ptr),
975 &indirect_ptr, sizeof(indirect_ptr)))
978 indirect_ptr = le64_to_cpu(indirect_ptr);
980 /* check the valid bit of the first level entry */
981 if (!(indirect_ptr & BIT_ULL(63)))
984 /* Mask the guest physical address and calculate the frame number. */
985 indirect_ptr &= GENMASK_ULL(51, 16);
987 /* Find the address of the actual entry */
988 index = id % (SZ_64K / esz);
989 indirect_ptr += index * esz;
992 *eaddr = indirect_ptr;
994 return __is_visible_gfn_locked(its, indirect_ptr);
998 * Check whether an event ID can be stored in the corresponding Interrupt
999 * Translation Table, which starts at device->itt_addr.
1001 static bool vgic_its_check_event_id(struct vgic_its *its, struct its_device *device,
1004 const struct vgic_its_abi *abi = vgic_its_get_abi(its);
1005 int ite_esz = abi->ite_esz;
1008 /* max table size is: BIT_ULL(device->num_eventid_bits) * ite_esz */
1009 if (event_id >= BIT_ULL(device->num_eventid_bits))
1012 gpa = device->itt_addr + event_id * ite_esz;
1013 return __is_visible_gfn_locked(its, gpa);
1017 * Add a new collection into the ITS collection table.
1018 * Returns 0 on success, and a negative error value for generic errors.
1020 static int vgic_its_alloc_collection(struct vgic_its *its,
1021 struct its_collection **colp,
1024 struct its_collection *collection;
1026 collection = kzalloc(sizeof(*collection), GFP_KERNEL_ACCOUNT);
1030 collection->collection_id = coll_id;
1031 collection->target_addr = COLLECTION_NOT_MAPPED;
1033 list_add_tail(&collection->coll_list, &its->collection_list);
1039 static void vgic_its_free_collection(struct vgic_its *its, u32 coll_id)
1041 struct its_collection *collection;
1042 struct its_device *device;
1043 struct its_ite *ite;
1046 * Clearing the mapping for that collection ID removes the
1047 * entry from the list. If there wasn't any before, we can
1050 collection = find_collection(its, coll_id);
1054 for_each_lpi_its(device, ite, its)
1055 if (ite->collection &&
1056 ite->collection->collection_id == coll_id)
1057 ite->collection = NULL;
1059 list_del(&collection->coll_list);
1063 /* Must be called with its_lock mutex held */
1064 static struct its_ite *vgic_its_alloc_ite(struct its_device *device,
1065 struct its_collection *collection,
1068 struct its_ite *ite;
1070 ite = kzalloc(sizeof(*ite), GFP_KERNEL_ACCOUNT);
1072 return ERR_PTR(-ENOMEM);
1074 ite->event_id = event_id;
1075 ite->collection = collection;
1077 list_add_tail(&ite->ite_list, &device->itt_head);
1082 * The MAPTI and MAPI commands map LPIs to ITTEs.
1083 * Must be called with its_lock mutex held.
1085 static int vgic_its_cmd_handle_mapi(struct kvm *kvm, struct vgic_its *its,
1088 u32 device_id = its_cmd_get_deviceid(its_cmd);
1089 u32 event_id = its_cmd_get_id(its_cmd);
1090 u32 coll_id = its_cmd_get_collection(its_cmd);
1091 struct its_ite *ite;
1092 struct kvm_vcpu *vcpu = NULL;
1093 struct its_device *device;
1094 struct its_collection *collection, *new_coll = NULL;
1095 struct vgic_irq *irq;
1098 device = find_its_device(its, device_id);
1100 return E_ITS_MAPTI_UNMAPPED_DEVICE;
1102 if (!vgic_its_check_event_id(its, device, event_id))
1103 return E_ITS_MAPTI_ID_OOR;
1105 if (its_cmd_get_command(its_cmd) == GITS_CMD_MAPTI)
1106 lpi_nr = its_cmd_get_physical_id(its_cmd);
1109 if (lpi_nr < GIC_LPI_OFFSET ||
1110 lpi_nr >= max_lpis_propbaser(kvm->arch.vgic.propbaser))
1111 return E_ITS_MAPTI_PHYSICALID_OOR;
1113 /* If there is an existing mapping, behavior is UNPREDICTABLE. */
1114 if (find_ite(its, device_id, event_id))
1117 collection = find_collection(its, coll_id);
1121 if (!vgic_its_check_id(its, its->baser_coll_table, coll_id, NULL))
1122 return E_ITS_MAPC_COLLECTION_OOR;
1124 ret = vgic_its_alloc_collection(its, &collection, coll_id);
1127 new_coll = collection;
1130 ite = vgic_its_alloc_ite(device, collection, event_id);
1133 vgic_its_free_collection(its, coll_id);
1134 return PTR_ERR(ite);
1137 if (its_is_collection_mapped(collection))
1138 vcpu = collection_to_vcpu(kvm, collection);
1140 irq = vgic_add_lpi(kvm, lpi_nr, vcpu);
1143 vgic_its_free_collection(its, coll_id);
1144 its_free_ite(kvm, ite);
1145 return PTR_ERR(irq);
1152 /* Requires the its_lock to be held. */
1153 static void vgic_its_free_device(struct kvm *kvm, struct its_device *device)
1155 struct its_ite *ite, *temp;
1158 * The spec says that unmapping a device with still valid
1159 * ITTEs associated is UNPREDICTABLE. We remove all ITTEs,
1160 * since we cannot leave the memory unreferenced.
1162 list_for_each_entry_safe(ite, temp, &device->itt_head, ite_list)
1163 its_free_ite(kvm, ite);
1165 vgic_its_invalidate_cache(kvm);
1167 list_del(&device->dev_list);
1171 /* its lock must be held */
1172 static void vgic_its_free_device_list(struct kvm *kvm, struct vgic_its *its)
1174 struct its_device *cur, *temp;
1176 list_for_each_entry_safe(cur, temp, &its->device_list, dev_list)
1177 vgic_its_free_device(kvm, cur);
1180 /* its lock must be held */
1181 static void vgic_its_free_collection_list(struct kvm *kvm, struct vgic_its *its)
1183 struct its_collection *cur, *temp;
1185 list_for_each_entry_safe(cur, temp, &its->collection_list, coll_list)
1186 vgic_its_free_collection(its, cur->collection_id);
1189 /* Must be called with its_lock mutex held */
1190 static struct its_device *vgic_its_alloc_device(struct vgic_its *its,
1191 u32 device_id, gpa_t itt_addr,
1192 u8 num_eventid_bits)
1194 struct its_device *device;
1196 device = kzalloc(sizeof(*device), GFP_KERNEL_ACCOUNT);
1198 return ERR_PTR(-ENOMEM);
1200 device->device_id = device_id;
1201 device->itt_addr = itt_addr;
1202 device->num_eventid_bits = num_eventid_bits;
1203 INIT_LIST_HEAD(&device->itt_head);
1205 list_add_tail(&device->dev_list, &its->device_list);
1210 * MAPD maps or unmaps a device ID to Interrupt Translation Tables (ITTs).
1211 * Must be called with the its_lock mutex held.
1213 static int vgic_its_cmd_handle_mapd(struct kvm *kvm, struct vgic_its *its,
1216 u32 device_id = its_cmd_get_deviceid(its_cmd);
1217 bool valid = its_cmd_get_validbit(its_cmd);
1218 u8 num_eventid_bits = its_cmd_get_size(its_cmd);
1219 gpa_t itt_addr = its_cmd_get_ittaddr(its_cmd);
1220 struct its_device *device;
1222 if (!vgic_its_check_id(its, its->baser_device_table, device_id, NULL))
1223 return E_ITS_MAPD_DEVICE_OOR;
1225 if (valid && num_eventid_bits > VITS_TYPER_IDBITS)
1226 return E_ITS_MAPD_ITTSIZE_OOR;
1228 device = find_its_device(its, device_id);
1231 * The spec says that calling MAPD on an already mapped device
1232 * invalidates all cached data for this device. We implement this
1233 * by removing the mapping and re-establishing it.
1236 vgic_its_free_device(kvm, device);
1239 * The spec does not say whether unmapping a not-mapped device
1240 * is an error, so we are done in any case.
1245 device = vgic_its_alloc_device(its, device_id, itt_addr,
1248 return PTR_ERR_OR_ZERO(device);
1252 * The MAPC command maps collection IDs to redistributors.
1253 * Must be called with the its_lock mutex held.
1255 static int vgic_its_cmd_handle_mapc(struct kvm *kvm, struct vgic_its *its,
1259 struct its_collection *collection;
1262 valid = its_cmd_get_validbit(its_cmd);
1263 coll_id = its_cmd_get_collection(its_cmd);
1266 vgic_its_free_collection(its, coll_id);
1267 vgic_its_invalidate_cache(kvm);
1269 struct kvm_vcpu *vcpu;
1271 vcpu = kvm_get_vcpu_by_id(kvm, its_cmd_get_target_addr(its_cmd));
1273 return E_ITS_MAPC_PROCNUM_OOR;
1275 collection = find_collection(its, coll_id);
1280 if (!vgic_its_check_id(its, its->baser_coll_table,
1282 return E_ITS_MAPC_COLLECTION_OOR;
1284 ret = vgic_its_alloc_collection(its, &collection,
1288 collection->target_addr = vcpu->vcpu_id;
1290 collection->target_addr = vcpu->vcpu_id;
1291 update_affinity_collection(kvm, its, collection);
1299 * The CLEAR command removes the pending state for a particular LPI.
1300 * Must be called with the its_lock mutex held.
1302 static int vgic_its_cmd_handle_clear(struct kvm *kvm, struct vgic_its *its,
1305 u32 device_id = its_cmd_get_deviceid(its_cmd);
1306 u32 event_id = its_cmd_get_id(its_cmd);
1307 struct its_ite *ite;
1310 ite = find_ite(its, device_id, event_id);
1312 return E_ITS_CLEAR_UNMAPPED_INTERRUPT;
1314 ite->irq->pending_latch = false;
1317 return irq_set_irqchip_state(ite->irq->host_irq,
1318 IRQCHIP_STATE_PENDING, false);
1323 int vgic_its_inv_lpi(struct kvm *kvm, struct vgic_irq *irq)
1325 return update_lpi_config(kvm, irq, NULL, true);
1329 * The INV command syncs the configuration bits from the memory table.
1330 * Must be called with the its_lock mutex held.
1332 static int vgic_its_cmd_handle_inv(struct kvm *kvm, struct vgic_its *its,
1335 u32 device_id = its_cmd_get_deviceid(its_cmd);
1336 u32 event_id = its_cmd_get_id(its_cmd);
1337 struct its_ite *ite;
1340 ite = find_ite(its, device_id, event_id);
1342 return E_ITS_INV_UNMAPPED_INTERRUPT;
1344 return vgic_its_inv_lpi(kvm, ite->irq);
1348 * vgic_its_invall - invalidate all LPIs targetting a given vcpu
1349 * @vcpu: the vcpu for which the RD is targetted by an invalidation
1351 * Contrary to the INVALL command, this targets a RD instead of a
1352 * collection, and we don't need to hold the its_lock, since no ITS is
1355 int vgic_its_invall(struct kvm_vcpu *vcpu)
1357 struct kvm *kvm = vcpu->kvm;
1358 int irq_count, i = 0;
1361 irq_count = vgic_copy_lpi_list(kvm, vcpu, &intids);
1365 for (i = 0; i < irq_count; i++) {
1366 struct vgic_irq *irq = vgic_get_irq(kvm, NULL, intids[i]);
1369 update_lpi_config(kvm, irq, vcpu, false);
1370 vgic_put_irq(kvm, irq);
1375 if (vcpu->arch.vgic_cpu.vgic_v3.its_vpe.its_vm)
1376 its_invall_vpe(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe);
1382 * The INVALL command requests flushing of all IRQ data in this collection.
1383 * Find the VCPU mapped to that collection, then iterate over the VM's list
1384 * of mapped LPIs and update the configuration for each IRQ which targets
1385 * the specified vcpu. The configuration will be read from the in-memory
1386 * configuration table.
1387 * Must be called with the its_lock mutex held.
1389 static int vgic_its_cmd_handle_invall(struct kvm *kvm, struct vgic_its *its,
1392 u32 coll_id = its_cmd_get_collection(its_cmd);
1393 struct its_collection *collection;
1394 struct kvm_vcpu *vcpu;
1396 collection = find_collection(its, coll_id);
1397 if (!its_is_collection_mapped(collection))
1398 return E_ITS_INVALL_UNMAPPED_COLLECTION;
1400 vcpu = collection_to_vcpu(kvm, collection);
1401 vgic_its_invall(vcpu);
1407 * The MOVALL command moves the pending state of all IRQs targeting one
1408 * redistributor to another. We don't hold the pending state in the VCPUs,
1409 * but in the IRQs instead, so there is really not much to do for us here.
1410 * However the spec says that no IRQ must target the old redistributor
1411 * afterwards, so we make sure that no LPI is using the associated target_vcpu.
1412 * This command affects all LPIs in the system that target that redistributor.
1414 static int vgic_its_cmd_handle_movall(struct kvm *kvm, struct vgic_its *its,
1417 struct kvm_vcpu *vcpu1, *vcpu2;
1418 struct vgic_irq *irq;
1422 /* We advertise GITS_TYPER.PTA==0, making the address the vcpu ID */
1423 vcpu1 = kvm_get_vcpu_by_id(kvm, its_cmd_get_target_addr(its_cmd));
1424 vcpu2 = kvm_get_vcpu_by_id(kvm, its_cmd_mask_field(its_cmd, 3, 16, 32));
1426 if (!vcpu1 || !vcpu2)
1427 return E_ITS_MOVALL_PROCNUM_OOR;
1432 irq_count = vgic_copy_lpi_list(kvm, vcpu1, &intids);
1436 for (i = 0; i < irq_count; i++) {
1437 irq = vgic_get_irq(kvm, NULL, intids[i]);
1441 update_affinity(irq, vcpu2);
1443 vgic_put_irq(kvm, irq);
1446 vgic_its_invalidate_cache(kvm);
1453 * The INT command injects the LPI associated with that DevID/EvID pair.
1454 * Must be called with the its_lock mutex held.
1456 static int vgic_its_cmd_handle_int(struct kvm *kvm, struct vgic_its *its,
1459 u32 msi_data = its_cmd_get_id(its_cmd);
1460 u64 msi_devid = its_cmd_get_deviceid(its_cmd);
1462 return vgic_its_trigger_msi(kvm, its, msi_devid, msi_data);
1466 * This function is called with the its_cmd lock held, but the ITS data
1467 * structure lock dropped.
1469 static int vgic_its_handle_command(struct kvm *kvm, struct vgic_its *its,
1474 mutex_lock(&its->its_lock);
1475 switch (its_cmd_get_command(its_cmd)) {
1477 ret = vgic_its_cmd_handle_mapd(kvm, its, its_cmd);
1480 ret = vgic_its_cmd_handle_mapc(kvm, its, its_cmd);
1483 ret = vgic_its_cmd_handle_mapi(kvm, its, its_cmd);
1485 case GITS_CMD_MAPTI:
1486 ret = vgic_its_cmd_handle_mapi(kvm, its, its_cmd);
1489 ret = vgic_its_cmd_handle_movi(kvm, its, its_cmd);
1491 case GITS_CMD_DISCARD:
1492 ret = vgic_its_cmd_handle_discard(kvm, its, its_cmd);
1494 case GITS_CMD_CLEAR:
1495 ret = vgic_its_cmd_handle_clear(kvm, its, its_cmd);
1497 case GITS_CMD_MOVALL:
1498 ret = vgic_its_cmd_handle_movall(kvm, its, its_cmd);
1501 ret = vgic_its_cmd_handle_int(kvm, its, its_cmd);
1504 ret = vgic_its_cmd_handle_inv(kvm, its, its_cmd);
1506 case GITS_CMD_INVALL:
1507 ret = vgic_its_cmd_handle_invall(kvm, its, its_cmd);
1510 /* we ignore this command: we are in sync all of the time */
1514 mutex_unlock(&its->its_lock);
1519 static u64 vgic_sanitise_its_baser(u64 reg)
1521 reg = vgic_sanitise_field(reg, GITS_BASER_SHAREABILITY_MASK,
1522 GITS_BASER_SHAREABILITY_SHIFT,
1523 vgic_sanitise_shareability);
1524 reg = vgic_sanitise_field(reg, GITS_BASER_INNER_CACHEABILITY_MASK,
1525 GITS_BASER_INNER_CACHEABILITY_SHIFT,
1526 vgic_sanitise_inner_cacheability);
1527 reg = vgic_sanitise_field(reg, GITS_BASER_OUTER_CACHEABILITY_MASK,
1528 GITS_BASER_OUTER_CACHEABILITY_SHIFT,
1529 vgic_sanitise_outer_cacheability);
1531 /* We support only one (ITS) page size: 64K */
1532 reg = (reg & ~GITS_BASER_PAGE_SIZE_MASK) | GITS_BASER_PAGE_SIZE_64K;
1537 static u64 vgic_sanitise_its_cbaser(u64 reg)
1539 reg = vgic_sanitise_field(reg, GITS_CBASER_SHAREABILITY_MASK,
1540 GITS_CBASER_SHAREABILITY_SHIFT,
1541 vgic_sanitise_shareability);
1542 reg = vgic_sanitise_field(reg, GITS_CBASER_INNER_CACHEABILITY_MASK,
1543 GITS_CBASER_INNER_CACHEABILITY_SHIFT,
1544 vgic_sanitise_inner_cacheability);
1545 reg = vgic_sanitise_field(reg, GITS_CBASER_OUTER_CACHEABILITY_MASK,
1546 GITS_CBASER_OUTER_CACHEABILITY_SHIFT,
1547 vgic_sanitise_outer_cacheability);
1549 /* Sanitise the physical address to be 64k aligned. */
1550 reg &= ~GENMASK_ULL(15, 12);
1555 static unsigned long vgic_mmio_read_its_cbaser(struct kvm *kvm,
1556 struct vgic_its *its,
1557 gpa_t addr, unsigned int len)
1559 return extract_bytes(its->cbaser, addr & 7, len);
1562 static void vgic_mmio_write_its_cbaser(struct kvm *kvm, struct vgic_its *its,
1563 gpa_t addr, unsigned int len,
1566 /* When GITS_CTLR.Enable is 1, this register is RO. */
1570 mutex_lock(&its->cmd_lock);
1571 its->cbaser = update_64bit_reg(its->cbaser, addr & 7, len, val);
1572 its->cbaser = vgic_sanitise_its_cbaser(its->cbaser);
1575 * CWRITER is architecturally UNKNOWN on reset, but we need to reset
1576 * it to CREADR to make sure we start with an empty command buffer.
1578 its->cwriter = its->creadr;
1579 mutex_unlock(&its->cmd_lock);
1582 #define ITS_CMD_BUFFER_SIZE(baser) ((((baser) & 0xff) + 1) << 12)
1583 #define ITS_CMD_SIZE 32
1584 #define ITS_CMD_OFFSET(reg) ((reg) & GENMASK(19, 5))
1586 /* Must be called with the cmd_lock held. */
1587 static void vgic_its_process_commands(struct kvm *kvm, struct vgic_its *its)
1592 /* Commands are only processed when the ITS is enabled. */
1596 cbaser = GITS_CBASER_ADDRESS(its->cbaser);
1598 while (its->cwriter != its->creadr) {
1599 int ret = kvm_read_guest_lock(kvm, cbaser + its->creadr,
1600 cmd_buf, ITS_CMD_SIZE);
1602 * If kvm_read_guest() fails, this could be due to the guest
1603 * programming a bogus value in CBASER or something else going
1604 * wrong from which we cannot easily recover.
1605 * According to section 6.3.2 in the GICv3 spec we can just
1606 * ignore that command then.
1609 vgic_its_handle_command(kvm, its, cmd_buf);
1611 its->creadr += ITS_CMD_SIZE;
1612 if (its->creadr == ITS_CMD_BUFFER_SIZE(its->cbaser))
1618 * By writing to CWRITER the guest announces new commands to be processed.
1619 * To avoid any races in the first place, we take the its_cmd lock, which
1620 * protects our ring buffer variables, so that there is only one user
1621 * per ITS handling commands at a given time.
1623 static void vgic_mmio_write_its_cwriter(struct kvm *kvm, struct vgic_its *its,
1624 gpa_t addr, unsigned int len,
1632 mutex_lock(&its->cmd_lock);
1634 reg = update_64bit_reg(its->cwriter, addr & 7, len, val);
1635 reg = ITS_CMD_OFFSET(reg);
1636 if (reg >= ITS_CMD_BUFFER_SIZE(its->cbaser)) {
1637 mutex_unlock(&its->cmd_lock);
1642 vgic_its_process_commands(kvm, its);
1644 mutex_unlock(&its->cmd_lock);
1647 static unsigned long vgic_mmio_read_its_cwriter(struct kvm *kvm,
1648 struct vgic_its *its,
1649 gpa_t addr, unsigned int len)
1651 return extract_bytes(its->cwriter, addr & 0x7, len);
1654 static unsigned long vgic_mmio_read_its_creadr(struct kvm *kvm,
1655 struct vgic_its *its,
1656 gpa_t addr, unsigned int len)
1658 return extract_bytes(its->creadr, addr & 0x7, len);
1661 static int vgic_mmio_uaccess_write_its_creadr(struct kvm *kvm,
1662 struct vgic_its *its,
1663 gpa_t addr, unsigned int len,
1669 mutex_lock(&its->cmd_lock);
1676 cmd_offset = ITS_CMD_OFFSET(val);
1677 if (cmd_offset >= ITS_CMD_BUFFER_SIZE(its->cbaser)) {
1682 its->creadr = cmd_offset;
1684 mutex_unlock(&its->cmd_lock);
1688 #define BASER_INDEX(addr) (((addr) / sizeof(u64)) & 0x7)
1689 static unsigned long vgic_mmio_read_its_baser(struct kvm *kvm,
1690 struct vgic_its *its,
1691 gpa_t addr, unsigned int len)
1695 switch (BASER_INDEX(addr)) {
1697 reg = its->baser_device_table;
1700 reg = its->baser_coll_table;
1707 return extract_bytes(reg, addr & 7, len);
1710 #define GITS_BASER_RO_MASK (GENMASK_ULL(52, 48) | GENMASK_ULL(58, 56))
1711 static void vgic_mmio_write_its_baser(struct kvm *kvm,
1712 struct vgic_its *its,
1713 gpa_t addr, unsigned int len,
1716 const struct vgic_its_abi *abi = vgic_its_get_abi(its);
1717 u64 entry_size, table_type;
1718 u64 reg, *regptr, clearbits = 0;
1720 /* When GITS_CTLR.Enable is 1, we ignore write accesses. */
1724 switch (BASER_INDEX(addr)) {
1726 regptr = &its->baser_device_table;
1727 entry_size = abi->dte_esz;
1728 table_type = GITS_BASER_TYPE_DEVICE;
1731 regptr = &its->baser_coll_table;
1732 entry_size = abi->cte_esz;
1733 table_type = GITS_BASER_TYPE_COLLECTION;
1734 clearbits = GITS_BASER_INDIRECT;
1740 reg = update_64bit_reg(*regptr, addr & 7, len, val);
1741 reg &= ~GITS_BASER_RO_MASK;
1744 reg |= (entry_size - 1) << GITS_BASER_ENTRY_SIZE_SHIFT;
1745 reg |= table_type << GITS_BASER_TYPE_SHIFT;
1746 reg = vgic_sanitise_its_baser(reg);
1750 if (!(reg & GITS_BASER_VALID)) {
1751 /* Take the its_lock to prevent a race with a save/restore */
1752 mutex_lock(&its->its_lock);
1753 switch (table_type) {
1754 case GITS_BASER_TYPE_DEVICE:
1755 vgic_its_free_device_list(kvm, its);
1757 case GITS_BASER_TYPE_COLLECTION:
1758 vgic_its_free_collection_list(kvm, its);
1761 mutex_unlock(&its->its_lock);
1765 static unsigned long vgic_mmio_read_its_ctlr(struct kvm *vcpu,
1766 struct vgic_its *its,
1767 gpa_t addr, unsigned int len)
1771 mutex_lock(&its->cmd_lock);
1772 if (its->creadr == its->cwriter)
1773 reg |= GITS_CTLR_QUIESCENT;
1775 reg |= GITS_CTLR_ENABLE;
1776 mutex_unlock(&its->cmd_lock);
1781 static void vgic_mmio_write_its_ctlr(struct kvm *kvm, struct vgic_its *its,
1782 gpa_t addr, unsigned int len,
1785 mutex_lock(&its->cmd_lock);
1788 * It is UNPREDICTABLE to enable the ITS if any of the CBASER or
1789 * device/collection BASER are invalid
1791 if (!its->enabled && (val & GITS_CTLR_ENABLE) &&
1792 (!(its->baser_device_table & GITS_BASER_VALID) ||
1793 !(its->baser_coll_table & GITS_BASER_VALID) ||
1794 !(its->cbaser & GITS_CBASER_VALID)))
1797 its->enabled = !!(val & GITS_CTLR_ENABLE);
1799 vgic_its_invalidate_cache(kvm);
1802 * Try to process any pending commands. This function bails out early
1803 * if the ITS is disabled or no commands have been queued.
1805 vgic_its_process_commands(kvm, its);
1808 mutex_unlock(&its->cmd_lock);
1811 #define REGISTER_ITS_DESC(off, rd, wr, length, acc) \
1813 .reg_offset = off, \
1815 .access_flags = acc, \
1820 #define REGISTER_ITS_DESC_UACCESS(off, rd, wr, uwr, length, acc)\
1822 .reg_offset = off, \
1824 .access_flags = acc, \
1827 .uaccess_its_write = uwr, \
1830 static void its_mmio_write_wi(struct kvm *kvm, struct vgic_its *its,
1831 gpa_t addr, unsigned int len, unsigned long val)
1836 static struct vgic_register_region its_registers[] = {
1837 REGISTER_ITS_DESC(GITS_CTLR,
1838 vgic_mmio_read_its_ctlr, vgic_mmio_write_its_ctlr, 4,
1840 REGISTER_ITS_DESC_UACCESS(GITS_IIDR,
1841 vgic_mmio_read_its_iidr, its_mmio_write_wi,
1842 vgic_mmio_uaccess_write_its_iidr, 4,
1844 REGISTER_ITS_DESC(GITS_TYPER,
1845 vgic_mmio_read_its_typer, its_mmio_write_wi, 8,
1846 VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
1847 REGISTER_ITS_DESC(GITS_CBASER,
1848 vgic_mmio_read_its_cbaser, vgic_mmio_write_its_cbaser, 8,
1849 VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
1850 REGISTER_ITS_DESC(GITS_CWRITER,
1851 vgic_mmio_read_its_cwriter, vgic_mmio_write_its_cwriter, 8,
1852 VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
1853 REGISTER_ITS_DESC_UACCESS(GITS_CREADR,
1854 vgic_mmio_read_its_creadr, its_mmio_write_wi,
1855 vgic_mmio_uaccess_write_its_creadr, 8,
1856 VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
1857 REGISTER_ITS_DESC(GITS_BASER,
1858 vgic_mmio_read_its_baser, vgic_mmio_write_its_baser, 0x40,
1859 VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
1860 REGISTER_ITS_DESC(GITS_IDREGS_BASE,
1861 vgic_mmio_read_its_idregs, its_mmio_write_wi, 0x30,
1865 /* This is called on setting the LPI enable bit in the redistributor. */
1866 void vgic_enable_lpis(struct kvm_vcpu *vcpu)
1868 if (!(vcpu->arch.vgic_cpu.pendbaser & GICR_PENDBASER_PTZ))
1869 its_sync_lpi_pending_table(vcpu);
1872 static int vgic_register_its_iodev(struct kvm *kvm, struct vgic_its *its,
1875 struct vgic_io_device *iodev = &its->iodev;
1878 mutex_lock(&kvm->slots_lock);
1879 if (!IS_VGIC_ADDR_UNDEF(its->vgic_its_base)) {
1884 its->vgic_its_base = addr;
1885 iodev->regions = its_registers;
1886 iodev->nr_regions = ARRAY_SIZE(its_registers);
1887 kvm_iodevice_init(&iodev->dev, &kvm_io_gic_ops);
1889 iodev->base_addr = its->vgic_its_base;
1890 iodev->iodev_type = IODEV_ITS;
1892 ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, iodev->base_addr,
1893 KVM_VGIC_V3_ITS_SIZE, &iodev->dev);
1895 mutex_unlock(&kvm->slots_lock);
1900 /* Default is 16 cached LPIs per vcpu */
1901 #define LPI_DEFAULT_PCPU_CACHE_SIZE 16
1903 void vgic_lpi_translation_cache_init(struct kvm *kvm)
1905 struct vgic_dist *dist = &kvm->arch.vgic;
1909 if (!list_empty(&dist->lpi_translation_cache))
1912 sz = atomic_read(&kvm->online_vcpus) * LPI_DEFAULT_PCPU_CACHE_SIZE;
1914 for (i = 0; i < sz; i++) {
1915 struct vgic_translation_cache_entry *cte;
1917 /* An allocation failure is not fatal */
1918 cte = kzalloc(sizeof(*cte), GFP_KERNEL_ACCOUNT);
1922 INIT_LIST_HEAD(&cte->entry);
1923 list_add(&cte->entry, &dist->lpi_translation_cache);
1927 void vgic_lpi_translation_cache_destroy(struct kvm *kvm)
1929 struct vgic_dist *dist = &kvm->arch.vgic;
1930 struct vgic_translation_cache_entry *cte, *tmp;
1932 vgic_its_invalidate_cache(kvm);
1934 list_for_each_entry_safe(cte, tmp,
1935 &dist->lpi_translation_cache, entry) {
1936 list_del(&cte->entry);
1941 #define INITIAL_BASER_VALUE \
1942 (GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWb) | \
1943 GIC_BASER_CACHEABILITY(GITS_BASER, OUTER, SameAsInner) | \
1944 GIC_BASER_SHAREABILITY(GITS_BASER, InnerShareable) | \
1945 GITS_BASER_PAGE_SIZE_64K)
1947 #define INITIAL_PROPBASER_VALUE \
1948 (GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, RaWb) | \
1949 GIC_BASER_CACHEABILITY(GICR_PROPBASER, OUTER, SameAsInner) | \
1950 GIC_BASER_SHAREABILITY(GICR_PROPBASER, InnerShareable))
1952 static int vgic_its_create(struct kvm_device *dev, u32 type)
1955 struct vgic_its *its;
1957 if (type != KVM_DEV_TYPE_ARM_VGIC_ITS)
1960 its = kzalloc(sizeof(struct vgic_its), GFP_KERNEL_ACCOUNT);
1964 mutex_lock(&dev->kvm->arch.config_lock);
1966 if (vgic_initialized(dev->kvm)) {
1967 ret = vgic_v4_init(dev->kvm);
1969 mutex_unlock(&dev->kvm->arch.config_lock);
1974 vgic_lpi_translation_cache_init(dev->kvm);
1977 mutex_init(&its->its_lock);
1978 mutex_init(&its->cmd_lock);
1980 /* Yep, even more trickery for lock ordering... */
1981 #ifdef CONFIG_LOCKDEP
1982 mutex_lock(&its->cmd_lock);
1983 mutex_lock(&its->its_lock);
1984 mutex_unlock(&its->its_lock);
1985 mutex_unlock(&its->cmd_lock);
1988 its->vgic_its_base = VGIC_ADDR_UNDEF;
1990 INIT_LIST_HEAD(&its->device_list);
1991 INIT_LIST_HEAD(&its->collection_list);
1993 dev->kvm->arch.vgic.msis_require_devid = true;
1994 dev->kvm->arch.vgic.has_its = true;
1995 its->enabled = false;
1998 its->baser_device_table = INITIAL_BASER_VALUE |
1999 ((u64)GITS_BASER_TYPE_DEVICE << GITS_BASER_TYPE_SHIFT);
2000 its->baser_coll_table = INITIAL_BASER_VALUE |
2001 ((u64)GITS_BASER_TYPE_COLLECTION << GITS_BASER_TYPE_SHIFT);
2002 dev->kvm->arch.vgic.propbaser = INITIAL_PROPBASER_VALUE;
2006 ret = vgic_its_set_abi(its, NR_ITS_ABIS - 1);
2008 mutex_unlock(&dev->kvm->arch.config_lock);
2013 static void vgic_its_destroy(struct kvm_device *kvm_dev)
2015 struct kvm *kvm = kvm_dev->kvm;
2016 struct vgic_its *its = kvm_dev->private;
2018 mutex_lock(&its->its_lock);
2020 vgic_its_free_device_list(kvm, its);
2021 vgic_its_free_collection_list(kvm, its);
2023 mutex_unlock(&its->its_lock);
2025 kfree(kvm_dev);/* alloc by kvm_ioctl_create_device, free by .destroy */
2028 static int vgic_its_has_attr_regs(struct kvm_device *dev,
2029 struct kvm_device_attr *attr)
2031 const struct vgic_register_region *region;
2032 gpa_t offset = attr->attr;
2035 align = (offset < GITS_TYPER) || (offset >= GITS_PIDR4) ? 0x3 : 0x7;
2040 region = vgic_find_mmio_region(its_registers,
2041 ARRAY_SIZE(its_registers),
2049 static int vgic_its_attr_regs_access(struct kvm_device *dev,
2050 struct kvm_device_attr *attr,
2051 u64 *reg, bool is_write)
2053 const struct vgic_register_region *region;
2054 struct vgic_its *its;
2060 offset = attr->attr;
2063 * Although the spec supports upper/lower 32-bit accesses to
2064 * 64-bit ITS registers, the userspace ABI requires 64-bit
2065 * accesses to all 64-bit wide registers. We therefore only
2066 * support 32-bit accesses to GITS_CTLR, GITS_IIDR and GITS ID
2069 if ((offset < GITS_TYPER) || (offset >= GITS_PIDR4))
2077 mutex_lock(&dev->kvm->lock);
2079 if (!lock_all_vcpus(dev->kvm)) {
2080 mutex_unlock(&dev->kvm->lock);
2084 mutex_lock(&dev->kvm->arch.config_lock);
2086 if (IS_VGIC_ADDR_UNDEF(its->vgic_its_base)) {
2091 region = vgic_find_mmio_region(its_registers,
2092 ARRAY_SIZE(its_registers),
2099 addr = its->vgic_its_base + offset;
2101 len = region->access_flags & VGIC_ACCESS_64bit ? 8 : 4;
2104 if (region->uaccess_its_write)
2105 ret = region->uaccess_its_write(dev->kvm, its, addr,
2108 region->its_write(dev->kvm, its, addr, len, *reg);
2110 *reg = region->its_read(dev->kvm, its, addr, len);
2113 mutex_unlock(&dev->kvm->arch.config_lock);
2114 unlock_all_vcpus(dev->kvm);
2115 mutex_unlock(&dev->kvm->lock);
2119 static u32 compute_next_devid_offset(struct list_head *h,
2120 struct its_device *dev)
2122 struct its_device *next;
2125 if (list_is_last(&dev->dev_list, h))
2127 next = list_next_entry(dev, dev_list);
2128 next_offset = next->device_id - dev->device_id;
2130 return min_t(u32, next_offset, VITS_DTE_MAX_DEVID_OFFSET);
2133 static u32 compute_next_eventid_offset(struct list_head *h, struct its_ite *ite)
2135 struct its_ite *next;
2138 if (list_is_last(&ite->ite_list, h))
2140 next = list_next_entry(ite, ite_list);
2141 next_offset = next->event_id - ite->event_id;
2143 return min_t(u32, next_offset, VITS_ITE_MAX_EVENTID_OFFSET);
2147 * entry_fn_t - Callback called on a table entry restore path
2149 * @id: id of the entry
2150 * @entry: pointer to the entry
2151 * @opaque: pointer to an opaque data
2153 * Return: < 0 on error, 0 if last element was identified, id offset to next
2156 typedef int (*entry_fn_t)(struct vgic_its *its, u32 id, void *entry,
2160 * scan_its_table - Scan a contiguous table in guest RAM and applies a function
2164 * @base: base gpa of the table
2165 * @size: size of the table in bytes
2166 * @esz: entry size in bytes
2167 * @start_id: the ID of the first entry in the table
2168 * (non zero for 2d level tables)
2169 * @fn: function to apply on each entry
2171 * Return: < 0 on error, 0 if last element was identified, 1 otherwise
2172 * (the last element may not be found on second level tables)
2174 static int scan_its_table(struct vgic_its *its, gpa_t base, int size, u32 esz,
2175 int start_id, entry_fn_t fn, void *opaque)
2177 struct kvm *kvm = its->dev->kvm;
2178 unsigned long len = size;
2181 char entry[ESZ_MAX];
2184 memset(entry, 0, esz);
2190 ret = kvm_read_guest_lock(kvm, gpa, entry, esz);
2194 next_offset = fn(its, id, entry, opaque);
2195 if (next_offset <= 0)
2198 byte_offset = next_offset * esz;
2199 if (byte_offset >= len)
2210 * vgic_its_save_ite - Save an interrupt translation entry at @gpa
2212 static int vgic_its_save_ite(struct vgic_its *its, struct its_device *dev,
2213 struct its_ite *ite, gpa_t gpa, int ite_esz)
2215 struct kvm *kvm = its->dev->kvm;
2219 next_offset = compute_next_eventid_offset(&dev->itt_head, ite);
2220 val = ((u64)next_offset << KVM_ITS_ITE_NEXT_SHIFT) |
2221 ((u64)ite->irq->intid << KVM_ITS_ITE_PINTID_SHIFT) |
2222 ite->collection->collection_id;
2223 val = cpu_to_le64(val);
2224 return vgic_write_guest_lock(kvm, gpa, &val, ite_esz);
2228 * vgic_its_restore_ite - restore an interrupt translation entry
2229 * @event_id: id used for indexing
2230 * @ptr: pointer to the ITE entry
2231 * @opaque: pointer to the its_device
2233 static int vgic_its_restore_ite(struct vgic_its *its, u32 event_id,
2234 void *ptr, void *opaque)
2236 struct its_device *dev = opaque;
2237 struct its_collection *collection;
2238 struct kvm *kvm = its->dev->kvm;
2239 struct kvm_vcpu *vcpu = NULL;
2241 u64 *p = (u64 *)ptr;
2242 struct vgic_irq *irq;
2243 u32 coll_id, lpi_id;
2244 struct its_ite *ite;
2249 val = le64_to_cpu(val);
2251 coll_id = val & KVM_ITS_ITE_ICID_MASK;
2252 lpi_id = (val & KVM_ITS_ITE_PINTID_MASK) >> KVM_ITS_ITE_PINTID_SHIFT;
2255 return 1; /* invalid entry, no choice but to scan next entry */
2257 if (lpi_id < VGIC_MIN_LPI)
2260 offset = val >> KVM_ITS_ITE_NEXT_SHIFT;
2261 if (event_id + offset >= BIT_ULL(dev->num_eventid_bits))
2264 collection = find_collection(its, coll_id);
2268 if (!vgic_its_check_event_id(its, dev, event_id))
2271 ite = vgic_its_alloc_ite(dev, collection, event_id);
2273 return PTR_ERR(ite);
2275 if (its_is_collection_mapped(collection))
2276 vcpu = kvm_get_vcpu_by_id(kvm, collection->target_addr);
2278 irq = vgic_add_lpi(kvm, lpi_id, vcpu);
2280 its_free_ite(kvm, ite);
2281 return PTR_ERR(irq);
2288 static int vgic_its_ite_cmp(void *priv, const struct list_head *a,
2289 const struct list_head *b)
2291 struct its_ite *itea = container_of(a, struct its_ite, ite_list);
2292 struct its_ite *iteb = container_of(b, struct its_ite, ite_list);
2294 if (itea->event_id < iteb->event_id)
2300 static int vgic_its_save_itt(struct vgic_its *its, struct its_device *device)
2302 const struct vgic_its_abi *abi = vgic_its_get_abi(its);
2303 gpa_t base = device->itt_addr;
2304 struct its_ite *ite;
2306 int ite_esz = abi->ite_esz;
2308 list_sort(NULL, &device->itt_head, vgic_its_ite_cmp);
2310 list_for_each_entry(ite, &device->itt_head, ite_list) {
2311 gpa_t gpa = base + ite->event_id * ite_esz;
2314 * If an LPI carries the HW bit, this means that this
2315 * interrupt is controlled by GICv4, and we do not
2316 * have direct access to that state without GICv4.1.
2317 * Let's simply fail the save operation...
2319 if (ite->irq->hw && !kvm_vgic_global_state.has_gicv4_1)
2322 ret = vgic_its_save_ite(its, device, ite, gpa, ite_esz);
2330 * vgic_its_restore_itt - restore the ITT of a device
2333 * @dev: device handle
2335 * Return 0 on success, < 0 on error
2337 static int vgic_its_restore_itt(struct vgic_its *its, struct its_device *dev)
2339 const struct vgic_its_abi *abi = vgic_its_get_abi(its);
2340 gpa_t base = dev->itt_addr;
2342 int ite_esz = abi->ite_esz;
2343 size_t max_size = BIT_ULL(dev->num_eventid_bits) * ite_esz;
2345 ret = scan_its_table(its, base, max_size, ite_esz, 0,
2346 vgic_its_restore_ite, dev);
2348 /* scan_its_table returns +1 if all ITEs are invalid */
2356 * vgic_its_save_dte - Save a device table entry at a given GPA
2362 static int vgic_its_save_dte(struct vgic_its *its, struct its_device *dev,
2363 gpa_t ptr, int dte_esz)
2365 struct kvm *kvm = its->dev->kvm;
2366 u64 val, itt_addr_field;
2369 itt_addr_field = dev->itt_addr >> 8;
2370 next_offset = compute_next_devid_offset(&its->device_list, dev);
2371 val = (1ULL << KVM_ITS_DTE_VALID_SHIFT |
2372 ((u64)next_offset << KVM_ITS_DTE_NEXT_SHIFT) |
2373 (itt_addr_field << KVM_ITS_DTE_ITTADDR_SHIFT) |
2374 (dev->num_eventid_bits - 1));
2375 val = cpu_to_le64(val);
2376 return vgic_write_guest_lock(kvm, ptr, &val, dte_esz);
2380 * vgic_its_restore_dte - restore a device table entry
2383 * @id: device id the DTE corresponds to
2384 * @ptr: kernel VA where the 8 byte DTE is located
2387 * Return: < 0 on error, 0 if the dte is the last one, id offset to the
2388 * next dte otherwise
2390 static int vgic_its_restore_dte(struct vgic_its *its, u32 id,
2391 void *ptr, void *opaque)
2393 struct its_device *dev;
2394 u64 baser = its->baser_device_table;
2396 u8 num_eventid_bits;
2397 u64 entry = *(u64 *)ptr;
2402 entry = le64_to_cpu(entry);
2404 valid = entry >> KVM_ITS_DTE_VALID_SHIFT;
2405 num_eventid_bits = (entry & KVM_ITS_DTE_SIZE_MASK) + 1;
2406 itt_addr = ((entry & KVM_ITS_DTE_ITTADDR_MASK)
2407 >> KVM_ITS_DTE_ITTADDR_SHIFT) << 8;
2412 /* dte entry is valid */
2413 offset = (entry & KVM_ITS_DTE_NEXT_MASK) >> KVM_ITS_DTE_NEXT_SHIFT;
2415 if (!vgic_its_check_id(its, baser, id, NULL))
2418 dev = vgic_its_alloc_device(its, id, itt_addr, num_eventid_bits);
2420 return PTR_ERR(dev);
2422 ret = vgic_its_restore_itt(its, dev);
2424 vgic_its_free_device(its->dev->kvm, dev);
2431 static int vgic_its_device_cmp(void *priv, const struct list_head *a,
2432 const struct list_head *b)
2434 struct its_device *deva = container_of(a, struct its_device, dev_list);
2435 struct its_device *devb = container_of(b, struct its_device, dev_list);
2437 if (deva->device_id < devb->device_id)
2444 * vgic_its_save_device_tables - Save the device table and all ITT
2447 * L1/L2 handling is hidden by vgic_its_check_id() helper which directly
2448 * returns the GPA of the device entry
2450 static int vgic_its_save_device_tables(struct vgic_its *its)
2452 const struct vgic_its_abi *abi = vgic_its_get_abi(its);
2453 u64 baser = its->baser_device_table;
2454 struct its_device *dev;
2455 int dte_esz = abi->dte_esz;
2457 if (!(baser & GITS_BASER_VALID))
2460 list_sort(NULL, &its->device_list, vgic_its_device_cmp);
2462 list_for_each_entry(dev, &its->device_list, dev_list) {
2466 if (!vgic_its_check_id(its, baser,
2467 dev->device_id, &eaddr))
2470 ret = vgic_its_save_itt(its, dev);
2474 ret = vgic_its_save_dte(its, dev, eaddr, dte_esz);
2482 * handle_l1_dte - callback used for L1 device table entries (2 stage case)
2485 * @id: index of the entry in the L1 table
2489 * L1 table entries are scanned by steps of 1 entry
2490 * Return < 0 if error, 0 if last dte was found when scanning the L2
2491 * table, +1 otherwise (meaning next L1 entry must be scanned)
2493 static int handle_l1_dte(struct vgic_its *its, u32 id, void *addr,
2496 const struct vgic_its_abi *abi = vgic_its_get_abi(its);
2497 int l2_start_id = id * (SZ_64K / abi->dte_esz);
2498 u64 entry = *(u64 *)addr;
2499 int dte_esz = abi->dte_esz;
2503 entry = le64_to_cpu(entry);
2505 if (!(entry & KVM_ITS_L1E_VALID_MASK))
2508 gpa = entry & KVM_ITS_L1E_ADDR_MASK;
2510 ret = scan_its_table(its, gpa, SZ_64K, dte_esz,
2511 l2_start_id, vgic_its_restore_dte, NULL);
2517 * vgic_its_restore_device_tables - Restore the device table and all ITT
2518 * from guest RAM to internal data structs
2520 static int vgic_its_restore_device_tables(struct vgic_its *its)
2522 const struct vgic_its_abi *abi = vgic_its_get_abi(its);
2523 u64 baser = its->baser_device_table;
2525 int l1_tbl_size = GITS_BASER_NR_PAGES(baser) * SZ_64K;
2528 if (!(baser & GITS_BASER_VALID))
2531 l1_gpa = GITS_BASER_ADDR_48_to_52(baser);
2533 if (baser & GITS_BASER_INDIRECT) {
2534 l1_esz = GITS_LVL1_ENTRY_SIZE;
2535 ret = scan_its_table(its, l1_gpa, l1_tbl_size, l1_esz, 0,
2536 handle_l1_dte, NULL);
2538 l1_esz = abi->dte_esz;
2539 ret = scan_its_table(its, l1_gpa, l1_tbl_size, l1_esz, 0,
2540 vgic_its_restore_dte, NULL);
2543 /* scan_its_table returns +1 if all entries are invalid */
2548 vgic_its_free_device_list(its->dev->kvm, its);
2553 static int vgic_its_save_cte(struct vgic_its *its,
2554 struct its_collection *collection,
2559 val = (1ULL << KVM_ITS_CTE_VALID_SHIFT |
2560 ((u64)collection->target_addr << KVM_ITS_CTE_RDBASE_SHIFT) |
2561 collection->collection_id);
2562 val = cpu_to_le64(val);
2563 return vgic_write_guest_lock(its->dev->kvm, gpa, &val, esz);
2567 * Restore a collection entry into the ITS collection table.
2568 * Return +1 on success, 0 if the entry was invalid (which should be
2569 * interpreted as end-of-table), and a negative error value for generic errors.
2571 static int vgic_its_restore_cte(struct vgic_its *its, gpa_t gpa, int esz)
2573 struct its_collection *collection;
2574 struct kvm *kvm = its->dev->kvm;
2575 u32 target_addr, coll_id;
2579 BUG_ON(esz > sizeof(val));
2580 ret = kvm_read_guest_lock(kvm, gpa, &val, esz);
2583 val = le64_to_cpu(val);
2584 if (!(val & KVM_ITS_CTE_VALID_MASK))
2587 target_addr = (u32)(val >> KVM_ITS_CTE_RDBASE_SHIFT);
2588 coll_id = val & KVM_ITS_CTE_ICID_MASK;
2590 if (target_addr != COLLECTION_NOT_MAPPED &&
2591 !kvm_get_vcpu_by_id(kvm, target_addr))
2594 collection = find_collection(its, coll_id);
2598 if (!vgic_its_check_id(its, its->baser_coll_table, coll_id, NULL))
2601 ret = vgic_its_alloc_collection(its, &collection, coll_id);
2604 collection->target_addr = target_addr;
2609 * vgic_its_save_collection_table - Save the collection table into
2612 static int vgic_its_save_collection_table(struct vgic_its *its)
2614 const struct vgic_its_abi *abi = vgic_its_get_abi(its);
2615 u64 baser = its->baser_coll_table;
2616 gpa_t gpa = GITS_BASER_ADDR_48_to_52(baser);
2617 struct its_collection *collection;
2619 size_t max_size, filled = 0;
2620 int ret, cte_esz = abi->cte_esz;
2622 if (!(baser & GITS_BASER_VALID))
2625 max_size = GITS_BASER_NR_PAGES(baser) * SZ_64K;
2627 list_for_each_entry(collection, &its->collection_list, coll_list) {
2628 ret = vgic_its_save_cte(its, collection, gpa, cte_esz);
2635 if (filled == max_size)
2639 * table is not fully filled, add a last dummy element
2640 * with valid bit unset
2643 BUG_ON(cte_esz > sizeof(val));
2644 ret = vgic_write_guest_lock(its->dev->kvm, gpa, &val, cte_esz);
2649 * vgic_its_restore_collection_table - reads the collection table
2650 * in guest memory and restores the ITS internal state. Requires the
2651 * BASER registers to be restored before.
2653 static int vgic_its_restore_collection_table(struct vgic_its *its)
2655 const struct vgic_its_abi *abi = vgic_its_get_abi(its);
2656 u64 baser = its->baser_coll_table;
2657 int cte_esz = abi->cte_esz;
2658 size_t max_size, read = 0;
2662 if (!(baser & GITS_BASER_VALID))
2665 gpa = GITS_BASER_ADDR_48_to_52(baser);
2667 max_size = GITS_BASER_NR_PAGES(baser) * SZ_64K;
2669 while (read < max_size) {
2670 ret = vgic_its_restore_cte(its, gpa, cte_esz);
2681 vgic_its_free_collection_list(its->dev->kvm, its);
2687 * vgic_its_save_tables_v0 - Save the ITS tables into guest ARM
2688 * according to v0 ABI
2690 static int vgic_its_save_tables_v0(struct vgic_its *its)
2694 ret = vgic_its_save_device_tables(its);
2698 return vgic_its_save_collection_table(its);
2702 * vgic_its_restore_tables_v0 - Restore the ITS tables from guest RAM
2703 * to internal data structs according to V0 ABI
2706 static int vgic_its_restore_tables_v0(struct vgic_its *its)
2710 ret = vgic_its_restore_collection_table(its);
2714 ret = vgic_its_restore_device_tables(its);
2716 vgic_its_free_collection_list(its->dev->kvm, its);
2720 static int vgic_its_commit_v0(struct vgic_its *its)
2722 const struct vgic_its_abi *abi;
2724 abi = vgic_its_get_abi(its);
2725 its->baser_coll_table &= ~GITS_BASER_ENTRY_SIZE_MASK;
2726 its->baser_device_table &= ~GITS_BASER_ENTRY_SIZE_MASK;
2728 its->baser_coll_table |= (GIC_ENCODE_SZ(abi->cte_esz, 5)
2729 << GITS_BASER_ENTRY_SIZE_SHIFT);
2731 its->baser_device_table |= (GIC_ENCODE_SZ(abi->dte_esz, 5)
2732 << GITS_BASER_ENTRY_SIZE_SHIFT);
2736 static void vgic_its_reset(struct kvm *kvm, struct vgic_its *its)
2738 /* We need to keep the ABI specific field values */
2739 its->baser_coll_table &= ~GITS_BASER_VALID;
2740 its->baser_device_table &= ~GITS_BASER_VALID;
2745 vgic_its_free_device_list(kvm, its);
2746 vgic_its_free_collection_list(kvm, its);
2749 static int vgic_its_has_attr(struct kvm_device *dev,
2750 struct kvm_device_attr *attr)
2752 switch (attr->group) {
2753 case KVM_DEV_ARM_VGIC_GRP_ADDR:
2754 switch (attr->attr) {
2755 case KVM_VGIC_ITS_ADDR_TYPE:
2759 case KVM_DEV_ARM_VGIC_GRP_CTRL:
2760 switch (attr->attr) {
2761 case KVM_DEV_ARM_VGIC_CTRL_INIT:
2763 case KVM_DEV_ARM_ITS_CTRL_RESET:
2765 case KVM_DEV_ARM_ITS_SAVE_TABLES:
2767 case KVM_DEV_ARM_ITS_RESTORE_TABLES:
2771 case KVM_DEV_ARM_VGIC_GRP_ITS_REGS:
2772 return vgic_its_has_attr_regs(dev, attr);
2777 static int vgic_its_ctrl(struct kvm *kvm, struct vgic_its *its, u64 attr)
2779 const struct vgic_its_abi *abi = vgic_its_get_abi(its);
2782 if (attr == KVM_DEV_ARM_VGIC_CTRL_INIT) /* Nothing to do */
2785 mutex_lock(&kvm->lock);
2787 if (!lock_all_vcpus(kvm)) {
2788 mutex_unlock(&kvm->lock);
2792 mutex_lock(&kvm->arch.config_lock);
2793 mutex_lock(&its->its_lock);
2796 case KVM_DEV_ARM_ITS_CTRL_RESET:
2797 vgic_its_reset(kvm, its);
2799 case KVM_DEV_ARM_ITS_SAVE_TABLES:
2800 ret = abi->save_tables(its);
2802 case KVM_DEV_ARM_ITS_RESTORE_TABLES:
2803 ret = abi->restore_tables(its);
2807 mutex_unlock(&its->its_lock);
2808 mutex_unlock(&kvm->arch.config_lock);
2809 unlock_all_vcpus(kvm);
2810 mutex_unlock(&kvm->lock);
2815 * kvm_arch_allow_write_without_running_vcpu - allow writing guest memory
2816 * without the running VCPU when dirty ring is enabled.
2818 * The running VCPU is required to track dirty guest pages when dirty ring
2819 * is enabled. Otherwise, the backup bitmap should be used to track the
2820 * dirty guest pages. When vgic/its tables are being saved, the backup
2821 * bitmap is used to track the dirty guest pages due to the missed running
2822 * VCPU in the period.
2824 bool kvm_arch_allow_write_without_running_vcpu(struct kvm *kvm)
2826 struct vgic_dist *dist = &kvm->arch.vgic;
2828 return dist->table_write_in_progress;
2831 static int vgic_its_set_attr(struct kvm_device *dev,
2832 struct kvm_device_attr *attr)
2834 struct vgic_its *its = dev->private;
2837 switch (attr->group) {
2838 case KVM_DEV_ARM_VGIC_GRP_ADDR: {
2839 u64 __user *uaddr = (u64 __user *)(long)attr->addr;
2840 unsigned long type = (unsigned long)attr->attr;
2843 if (type != KVM_VGIC_ITS_ADDR_TYPE)
2846 if (copy_from_user(&addr, uaddr, sizeof(addr)))
2849 ret = vgic_check_iorange(dev->kvm, its->vgic_its_base,
2850 addr, SZ_64K, KVM_VGIC_V3_ITS_SIZE);
2854 return vgic_register_its_iodev(dev->kvm, its, addr);
2856 case KVM_DEV_ARM_VGIC_GRP_CTRL:
2857 return vgic_its_ctrl(dev->kvm, its, attr->attr);
2858 case KVM_DEV_ARM_VGIC_GRP_ITS_REGS: {
2859 u64 __user *uaddr = (u64 __user *)(long)attr->addr;
2862 if (get_user(reg, uaddr))
2865 return vgic_its_attr_regs_access(dev, attr, ®, true);
2871 static int vgic_its_get_attr(struct kvm_device *dev,
2872 struct kvm_device_attr *attr)
2874 switch (attr->group) {
2875 case KVM_DEV_ARM_VGIC_GRP_ADDR: {
2876 struct vgic_its *its = dev->private;
2877 u64 addr = its->vgic_its_base;
2878 u64 __user *uaddr = (u64 __user *)(long)attr->addr;
2879 unsigned long type = (unsigned long)attr->attr;
2881 if (type != KVM_VGIC_ITS_ADDR_TYPE)
2884 if (copy_to_user(uaddr, &addr, sizeof(addr)))
2888 case KVM_DEV_ARM_VGIC_GRP_ITS_REGS: {
2889 u64 __user *uaddr = (u64 __user *)(long)attr->addr;
2893 ret = vgic_its_attr_regs_access(dev, attr, ®, false);
2896 return put_user(reg, uaddr);
2905 static struct kvm_device_ops kvm_arm_vgic_its_ops = {
2906 .name = "kvm-arm-vgic-its",
2907 .create = vgic_its_create,
2908 .destroy = vgic_its_destroy,
2909 .set_attr = vgic_its_set_attr,
2910 .get_attr = vgic_its_get_attr,
2911 .has_attr = vgic_its_has_attr,
2914 int kvm_vgic_register_its_device(void)
2916 return kvm_register_device_ops(&kvm_arm_vgic_its_ops,
2917 KVM_DEV_TYPE_ARM_VGIC_ITS);