4 * Copyright (C) 2015,2016 ARM Ltd.
5 * Author: Andre Przywara <andre.przywara@arm.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #include <linux/cpu.h>
21 #include <linux/kvm.h>
22 #include <linux/kvm_host.h>
23 #include <linux/interrupt.h>
24 #include <linux/list.h>
25 #include <linux/uaccess.h>
27 #include <linux/irqchip/arm-gic-v3.h>
29 #include <asm/kvm_emulate.h>
30 #include <asm/kvm_arm.h>
31 #include <asm/kvm_mmu.h>
34 #include "vgic-mmio.h"
37 * Creates a new (reference to a) struct vgic_irq for a given LPI.
38 * If this LPI is already mapped on another ITS, we increase its refcount
39 * and return a pointer to the existing structure.
40 * If this is a "new" LPI, we allocate and initialize a new struct vgic_irq.
41 * This function returns a pointer to the _unlocked_ structure.
43 static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid)
45 struct vgic_dist *dist = &kvm->arch.vgic;
46 struct vgic_irq *irq = vgic_get_irq(kvm, NULL, intid), *oldirq;
48 /* In this case there is no put, since we keep the reference. */
52 irq = kzalloc(sizeof(struct vgic_irq), GFP_KERNEL);
56 INIT_LIST_HEAD(&irq->lpi_list);
57 INIT_LIST_HEAD(&irq->ap_list);
58 spin_lock_init(&irq->irq_lock);
60 irq->config = VGIC_CONFIG_EDGE;
61 kref_init(&irq->refcount);
64 spin_lock(&dist->lpi_list_lock);
67 * There could be a race with another vgic_add_lpi(), so we need to
68 * check that we don't add a second list entry with the same LPI.
70 list_for_each_entry(oldirq, &dist->lpi_list_head, lpi_list) {
71 if (oldirq->intid != intid)
74 /* Someone was faster with adding this LPI, lets use that. */
79 * This increases the refcount, the caller is expected to
80 * call vgic_put_irq() on the returned pointer once it's
81 * finished with the IRQ.
83 kref_get(&irq->refcount);
88 list_add_tail(&irq->lpi_list, &dist->lpi_list_head);
89 dist->lpi_list_count++;
92 spin_unlock(&dist->lpi_list_lock);
98 struct list_head dev_list;
100 /* the head for the list of ITTEs */
101 struct list_head itt_head;
105 #define COLLECTION_NOT_MAPPED ((u32)~0)
107 struct its_collection {
108 struct list_head coll_list;
114 #define its_is_collection_mapped(coll) ((coll) && \
115 ((coll)->target_addr != COLLECTION_NOT_MAPPED))
118 struct list_head itte_list;
120 struct vgic_irq *irq;
121 struct its_collection *collection;
127 * Find and returns a device in the device table for an ITS.
128 * Must be called with the its_lock mutex held.
130 static struct its_device *find_its_device(struct vgic_its *its, u32 device_id)
132 struct its_device *device;
134 list_for_each_entry(device, &its->device_list, dev_list)
135 if (device_id == device->device_id)
142 * Find and returns an interrupt translation table entry (ITTE) for a given
143 * Device ID/Event ID pair on an ITS.
144 * Must be called with the its_lock mutex held.
146 static struct its_itte *find_itte(struct vgic_its *its, u32 device_id,
149 struct its_device *device;
150 struct its_itte *itte;
152 device = find_its_device(its, device_id);
156 list_for_each_entry(itte, &device->itt_head, itte_list)
157 if (itte->event_id == event_id)
163 /* To be used as an iterator this macro misses the enclosing parentheses */
164 #define for_each_lpi_its(dev, itte, its) \
165 list_for_each_entry(dev, &(its)->device_list, dev_list) \
166 list_for_each_entry(itte, &(dev)->itt_head, itte_list)
169 * We only implement 48 bits of PA at the moment, although the ITS
170 * supports more. Let's be restrictive here.
172 #define BASER_ADDRESS(x) ((x) & GENMASK_ULL(47, 16))
173 #define CBASER_ADDRESS(x) ((x) & GENMASK_ULL(47, 12))
174 #define PENDBASER_ADDRESS(x) ((x) & GENMASK_ULL(47, 16))
175 #define PROPBASER_ADDRESS(x) ((x) & GENMASK_ULL(47, 12))
177 #define GIC_LPI_OFFSET 8192
180 * Finds and returns a collection in the ITS collection table.
181 * Must be called with the its_lock mutex held.
183 static struct its_collection *find_collection(struct vgic_its *its, int coll_id)
185 struct its_collection *collection;
187 list_for_each_entry(collection, &its->collection_list, coll_list) {
188 if (coll_id == collection->collection_id)
195 #define LPI_PROP_ENABLE_BIT(p) ((p) & LPI_PROP_ENABLED)
196 #define LPI_PROP_PRIORITY(p) ((p) & 0xfc)
199 * Reads the configuration data for a given LPI from guest memory and
200 * updates the fields in struct vgic_irq.
201 * If filter_vcpu is not NULL, applies only if the IRQ is targeting this
202 * VCPU. Unconditionally applies if filter_vcpu is NULL.
204 static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq,
205 struct kvm_vcpu *filter_vcpu)
207 u64 propbase = PROPBASER_ADDRESS(kvm->arch.vgic.propbaser);
211 ret = kvm_read_guest(kvm, propbase + irq->intid - GIC_LPI_OFFSET,
217 spin_lock(&irq->irq_lock);
219 if (!filter_vcpu || filter_vcpu == irq->target_vcpu) {
220 irq->priority = LPI_PROP_PRIORITY(prop);
221 irq->enabled = LPI_PROP_ENABLE_BIT(prop);
223 vgic_queue_irq_unlock(kvm, irq);
225 spin_unlock(&irq->irq_lock);
232 * Create a snapshot of the current LPI list, so that we can enumerate all
233 * LPIs without holding any lock.
234 * Returns the array length and puts the kmalloc'ed array into intid_ptr.
236 static int vgic_copy_lpi_list(struct kvm *kvm, u32 **intid_ptr)
238 struct vgic_dist *dist = &kvm->arch.vgic;
239 struct vgic_irq *irq;
241 int irq_count = dist->lpi_list_count, i = 0;
244 * We use the current value of the list length, which may change
245 * after the kmalloc. We don't care, because the guest shouldn't
246 * change anything while the command handling is still running,
247 * and in the worst case we would miss a new IRQ, which one wouldn't
248 * expect to be covered by this command anyway.
250 intids = kmalloc_array(irq_count, sizeof(intids[0]), GFP_KERNEL);
254 spin_lock(&dist->lpi_list_lock);
255 list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
256 /* We don't need to "get" the IRQ, as we hold the list lock. */
257 intids[i] = irq->intid;
258 if (++i == irq_count)
261 spin_unlock(&dist->lpi_list_lock);
268 * Promotes the ITS view of affinity of an ITTE (which redistributor this LPI
269 * is targeting) to the VGIC's view, which deals with target VCPUs.
270 * Needs to be called whenever either the collection for a LPIs has
271 * changed or the collection itself got retargeted.
273 static void update_affinity_itte(struct kvm *kvm, struct its_itte *itte)
275 struct kvm_vcpu *vcpu;
277 if (!its_is_collection_mapped(itte->collection))
280 vcpu = kvm_get_vcpu(kvm, itte->collection->target_addr);
282 spin_lock(&itte->irq->irq_lock);
283 itte->irq->target_vcpu = vcpu;
284 spin_unlock(&itte->irq->irq_lock);
288 * Updates the target VCPU for every LPI targeting this collection.
289 * Must be called with the its_lock mutex held.
291 static void update_affinity_collection(struct kvm *kvm, struct vgic_its *its,
292 struct its_collection *coll)
294 struct its_device *device;
295 struct its_itte *itte;
297 for_each_lpi_its(device, itte, its) {
298 if (!itte->collection || coll != itte->collection)
301 update_affinity_itte(kvm, itte);
305 static u32 max_lpis_propbaser(u64 propbaser)
307 int nr_idbits = (propbaser & 0x1f) + 1;
309 return 1U << min(nr_idbits, INTERRUPT_ID_BITS_ITS);
313 * Scan the whole LPI pending table and sync the pending bit in there
314 * with our own data structures. This relies on the LPI being
317 static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu)
319 gpa_t pendbase = PENDBASER_ADDRESS(vcpu->arch.vgic_cpu.pendbaser);
320 struct vgic_irq *irq;
321 int last_byte_offset = -1;
326 nr_irqs = vgic_copy_lpi_list(vcpu->kvm, &intids);
330 for (i = 0; i < nr_irqs; i++) {
331 int byte_offset, bit_nr;
334 byte_offset = intids[i] / BITS_PER_BYTE;
335 bit_nr = intids[i] % BITS_PER_BYTE;
338 * For contiguously allocated LPIs chances are we just read
339 * this very same byte in the last iteration. Reuse that.
341 if (byte_offset != last_byte_offset) {
342 ret = kvm_read_guest(vcpu->kvm, pendbase + byte_offset,
348 last_byte_offset = byte_offset;
351 irq = vgic_get_irq(vcpu->kvm, NULL, intids[i]);
352 spin_lock(&irq->irq_lock);
353 irq->pending = pendmask & (1U << bit_nr);
354 vgic_queue_irq_unlock(vcpu->kvm, irq);
355 vgic_put_irq(vcpu->kvm, irq);
363 static unsigned long vgic_mmio_read_its_ctlr(struct kvm *vcpu,
364 struct vgic_its *its,
365 gpa_t addr, unsigned int len)
369 mutex_lock(&its->cmd_lock);
370 if (its->creadr == its->cwriter)
371 reg |= GITS_CTLR_QUIESCENT;
373 reg |= GITS_CTLR_ENABLE;
374 mutex_unlock(&its->cmd_lock);
379 static void vgic_mmio_write_its_ctlr(struct kvm *kvm, struct vgic_its *its,
380 gpa_t addr, unsigned int len,
383 its->enabled = !!(val & GITS_CTLR_ENABLE);
386 static unsigned long vgic_mmio_read_its_typer(struct kvm *kvm,
387 struct vgic_its *its,
388 gpa_t addr, unsigned int len)
390 u64 reg = GITS_TYPER_PLPIS;
393 * We use linear CPU numbers for redistributor addressing,
394 * so GITS_TYPER.PTA is 0.
395 * Also we force all PROPBASER registers to be the same, so
396 * CommonLPIAff is 0 as well.
397 * To avoid memory waste in the guest, we keep the number of IDBits and
398 * DevBits low - as least for the time being.
400 reg |= 0x0f << GITS_TYPER_DEVBITS_SHIFT;
401 reg |= 0x0f << GITS_TYPER_IDBITS_SHIFT;
403 return extract_bytes(reg, addr & 7, len);
406 static unsigned long vgic_mmio_read_its_iidr(struct kvm *kvm,
407 struct vgic_its *its,
408 gpa_t addr, unsigned int len)
410 return (PRODUCT_ID_KVM << 24) | (IMPLEMENTER_ARM << 0);
413 static unsigned long vgic_mmio_read_its_idregs(struct kvm *kvm,
414 struct vgic_its *its,
415 gpa_t addr, unsigned int len)
417 switch (addr & 0xffff) {
419 return 0x92; /* part number, bits[7:0] */
421 return 0xb4; /* part number, bits[11:8] */
423 return GIC_PIDR2_ARCH_GICv3 | 0x0b;
425 return 0x40; /* This is a 64K software visible page */
426 /* The following are the ID registers for (any) GIC. */
440 /* Requires the its_lock to be held. */
441 static void its_free_itte(struct kvm *kvm, struct its_itte *itte)
443 list_del(&itte->itte_list);
445 /* This put matches the get in vgic_add_lpi. */
446 vgic_put_irq(kvm, itte->irq);
451 static u64 its_cmd_mask_field(u64 *its_cmd, int word, int shift, int size)
453 return (le64_to_cpu(its_cmd[word]) >> shift) & (BIT_ULL(size) - 1);
456 #define its_cmd_get_command(cmd) its_cmd_mask_field(cmd, 0, 0, 8)
457 #define its_cmd_get_deviceid(cmd) its_cmd_mask_field(cmd, 0, 32, 32)
458 #define its_cmd_get_id(cmd) its_cmd_mask_field(cmd, 1, 0, 32)
459 #define its_cmd_get_physical_id(cmd) its_cmd_mask_field(cmd, 1, 32, 32)
460 #define its_cmd_get_collection(cmd) its_cmd_mask_field(cmd, 2, 0, 16)
461 #define its_cmd_get_target_addr(cmd) its_cmd_mask_field(cmd, 2, 16, 32)
462 #define its_cmd_get_validbit(cmd) its_cmd_mask_field(cmd, 2, 63, 1)
465 * The DISCARD command frees an Interrupt Translation Table Entry (ITTE).
466 * Must be called with the its_lock mutex held.
468 static int vgic_its_cmd_handle_discard(struct kvm *kvm, struct vgic_its *its,
471 u32 device_id = its_cmd_get_deviceid(its_cmd);
472 u32 event_id = its_cmd_get_id(its_cmd);
473 struct its_itte *itte;
476 itte = find_itte(its, device_id, event_id);
477 if (itte && itte->collection) {
479 * Though the spec talks about removing the pending state, we
480 * don't bother here since we clear the ITTE anyway and the
481 * pending state is a property of the ITTE struct.
483 its_free_itte(kvm, itte);
487 return E_ITS_DISCARD_UNMAPPED_INTERRUPT;
491 * The MOVI command moves an ITTE to a different collection.
492 * Must be called with the its_lock mutex held.
494 static int vgic_its_cmd_handle_movi(struct kvm *kvm, struct vgic_its *its,
497 u32 device_id = its_cmd_get_deviceid(its_cmd);
498 u32 event_id = its_cmd_get_id(its_cmd);
499 u32 coll_id = its_cmd_get_collection(its_cmd);
500 struct kvm_vcpu *vcpu;
501 struct its_itte *itte;
502 struct its_collection *collection;
504 itte = find_itte(its, device_id, event_id);
506 return E_ITS_MOVI_UNMAPPED_INTERRUPT;
508 if (!its_is_collection_mapped(itte->collection))
509 return E_ITS_MOVI_UNMAPPED_COLLECTION;
511 collection = find_collection(its, coll_id);
512 if (!its_is_collection_mapped(collection))
513 return E_ITS_MOVI_UNMAPPED_COLLECTION;
515 itte->collection = collection;
516 vcpu = kvm_get_vcpu(kvm, collection->target_addr);
518 spin_lock(&itte->irq->irq_lock);
519 itte->irq->target_vcpu = vcpu;
520 spin_unlock(&itte->irq->irq_lock);
525 static void vgic_its_init_collection(struct vgic_its *its,
526 struct its_collection *collection,
529 collection->collection_id = coll_id;
530 collection->target_addr = COLLECTION_NOT_MAPPED;
532 list_add_tail(&collection->coll_list, &its->collection_list);
536 * The MAPTI and MAPI commands map LPIs to ITTEs.
537 * Must be called with its_lock mutex held.
539 static int vgic_its_cmd_handle_mapi(struct kvm *kvm, struct vgic_its *its,
540 u64 *its_cmd, u8 subcmd)
542 u32 device_id = its_cmd_get_deviceid(its_cmd);
543 u32 event_id = its_cmd_get_id(its_cmd);
544 u32 coll_id = its_cmd_get_collection(its_cmd);
545 struct its_itte *itte;
546 struct its_device *device;
547 struct its_collection *collection, *new_coll = NULL;
550 device = find_its_device(its, device_id);
552 return E_ITS_MAPTI_UNMAPPED_DEVICE;
554 collection = find_collection(its, coll_id);
556 new_coll = kzalloc(sizeof(struct its_collection), GFP_KERNEL);
561 if (subcmd == GITS_CMD_MAPTI)
562 lpi_nr = its_cmd_get_physical_id(its_cmd);
565 if (lpi_nr < GIC_LPI_OFFSET ||
566 lpi_nr >= max_lpis_propbaser(kvm->arch.vgic.propbaser)) {
568 return E_ITS_MAPTI_PHYSICALID_OOR;
571 itte = find_itte(its, device_id, event_id);
573 itte = kzalloc(sizeof(struct its_itte), GFP_KERNEL);
579 itte->event_id = event_id;
580 list_add_tail(&itte->itte_list, &device->itt_head);
584 collection = new_coll;
585 vgic_its_init_collection(its, collection, coll_id);
588 itte->collection = collection;
590 itte->irq = vgic_add_lpi(kvm, lpi_nr);
591 update_affinity_itte(kvm, itte);
594 * We "cache" the configuration table entries in out struct vgic_irq's.
595 * However we only have those structs for mapped IRQs, so we read in
596 * the respective config data from memory here upon mapping the LPI.
598 update_lpi_config(kvm, itte->irq, NULL);
603 /* Requires the its_lock to be held. */
604 static void vgic_its_unmap_device(struct kvm *kvm, struct its_device *device)
606 struct its_itte *itte, *temp;
609 * The spec says that unmapping a device with still valid
610 * ITTEs associated is UNPREDICTABLE. We remove all ITTEs,
611 * since we cannot leave the memory unreferenced.
613 list_for_each_entry_safe(itte, temp, &device->itt_head, itte_list)
614 its_free_itte(kvm, itte);
616 list_del(&device->dev_list);
621 * Check whether a device ID can be stored into the guest device tables.
622 * For a direct table this is pretty easy, but gets a bit nasty for
623 * indirect tables. We check whether the resulting guest physical address
624 * is actually valid (covered by a memslot and guest accessbible).
625 * For this we have to read the respective first level entry.
627 static bool vgic_its_check_device_id(struct kvm *kvm, struct vgic_its *its,
630 u64 r = its->baser_device_table;
631 int nr_entries = GITS_BASER_NR_PAGES(r) * SZ_64K;
637 if (!(r & GITS_BASER_INDIRECT))
638 return device_id < (nr_entries / GITS_BASER_ENTRY_SIZE(r));
640 /* calculate and check the index into the 1st level */
641 index = device_id / (SZ_64K / GITS_BASER_ENTRY_SIZE(r));
642 if (index >= (nr_entries / sizeof(u64)))
645 /* Each 1st level entry is represented by a 64-bit value. */
646 if (!kvm_read_guest(kvm,
647 BASER_ADDRESS(r) + index * sizeof(indirect_ptr),
648 &indirect_ptr, sizeof(indirect_ptr)))
651 /* check the valid bit of the first level entry */
652 if (!(indirect_ptr & BIT_ULL(63)))
656 * Mask the guest physical address and calculate the frame number.
657 * Any address beyond our supported 48 bits of PA will be caught
658 * by the actual check in the final step.
660 gfn = (indirect_ptr & GENMASK_ULL(51, 16)) >> PAGE_SHIFT;
662 return kvm_is_visible_gfn(kvm, gfn);
666 * MAPD maps or unmaps a device ID to Interrupt Translation Tables (ITTs).
667 * Must be called with the its_lock mutex held.
669 static int vgic_its_cmd_handle_mapd(struct kvm *kvm, struct vgic_its *its,
672 u32 device_id = its_cmd_get_deviceid(its_cmd);
673 bool valid = its_cmd_get_validbit(its_cmd);
674 struct its_device *device;
676 if (!vgic_its_check_device_id(kvm, its, device_id))
677 return E_ITS_MAPD_DEVICE_OOR;
679 device = find_its_device(its, device_id);
682 * The spec says that calling MAPD on an already mapped device
683 * invalidates all cached data for this device. We implement this
684 * by removing the mapping and re-establishing it.
687 vgic_its_unmap_device(kvm, device);
690 * The spec does not say whether unmapping a not-mapped device
691 * is an error, so we are done in any case.
696 device = kzalloc(sizeof(struct its_device), GFP_KERNEL);
700 device->device_id = device_id;
701 INIT_LIST_HEAD(&device->itt_head);
703 list_add_tail(&device->dev_list, &its->device_list);
708 static int vgic_its_nr_collection_ids(struct vgic_its *its)
710 u64 r = its->baser_coll_table;
712 return (GITS_BASER_NR_PAGES(r) * SZ_64K) / GITS_BASER_ENTRY_SIZE(r);
716 * The MAPC command maps collection IDs to redistributors.
717 * Must be called with the its_lock mutex held.
719 static int vgic_its_cmd_handle_mapc(struct kvm *kvm, struct vgic_its *its,
724 struct its_collection *collection;
727 valid = its_cmd_get_validbit(its_cmd);
728 coll_id = its_cmd_get_collection(its_cmd);
729 target_addr = its_cmd_get_target_addr(its_cmd);
731 if (target_addr >= atomic_read(&kvm->online_vcpus))
732 return E_ITS_MAPC_PROCNUM_OOR;
734 if (coll_id >= vgic_its_nr_collection_ids(its))
735 return E_ITS_MAPC_COLLECTION_OOR;
737 collection = find_collection(its, coll_id);
740 struct its_device *device;
741 struct its_itte *itte;
743 * Clearing the mapping for that collection ID removes the
744 * entry from the list. If there wasn't any before, we can
750 for_each_lpi_its(device, itte, its)
751 if (itte->collection &&
752 itte->collection->collection_id == coll_id)
753 itte->collection = NULL;
755 list_del(&collection->coll_list);
759 collection = kzalloc(sizeof(struct its_collection),
764 vgic_its_init_collection(its, collection, coll_id);
765 collection->target_addr = target_addr;
767 collection->target_addr = target_addr;
768 update_affinity_collection(kvm, its, collection);
776 * The CLEAR command removes the pending state for a particular LPI.
777 * Must be called with the its_lock mutex held.
779 static int vgic_its_cmd_handle_clear(struct kvm *kvm, struct vgic_its *its,
782 u32 device_id = its_cmd_get_deviceid(its_cmd);
783 u32 event_id = its_cmd_get_id(its_cmd);
784 struct its_itte *itte;
787 itte = find_itte(its, device_id, event_id);
789 return E_ITS_CLEAR_UNMAPPED_INTERRUPT;
791 itte->irq->pending = false;
797 * The INV command syncs the configuration bits from the memory table.
798 * Must be called with the its_lock mutex held.
800 static int vgic_its_cmd_handle_inv(struct kvm *kvm, struct vgic_its *its,
803 u32 device_id = its_cmd_get_deviceid(its_cmd);
804 u32 event_id = its_cmd_get_id(its_cmd);
805 struct its_itte *itte;
808 itte = find_itte(its, device_id, event_id);
810 return E_ITS_INV_UNMAPPED_INTERRUPT;
812 return update_lpi_config(kvm, itte->irq, NULL);
816 * The INVALL command requests flushing of all IRQ data in this collection.
817 * Find the VCPU mapped to that collection, then iterate over the VM's list
818 * of mapped LPIs and update the configuration for each IRQ which targets
819 * the specified vcpu. The configuration will be read from the in-memory
820 * configuration table.
821 * Must be called with the its_lock mutex held.
823 static int vgic_its_cmd_handle_invall(struct kvm *kvm, struct vgic_its *its,
826 u32 coll_id = its_cmd_get_collection(its_cmd);
827 struct its_collection *collection;
828 struct kvm_vcpu *vcpu;
829 struct vgic_irq *irq;
833 collection = find_collection(its, coll_id);
834 if (!its_is_collection_mapped(collection))
835 return E_ITS_INVALL_UNMAPPED_COLLECTION;
837 vcpu = kvm_get_vcpu(kvm, collection->target_addr);
839 irq_count = vgic_copy_lpi_list(kvm, &intids);
843 for (i = 0; i < irq_count; i++) {
844 irq = vgic_get_irq(kvm, NULL, intids[i]);
847 update_lpi_config(kvm, irq, vcpu);
848 vgic_put_irq(kvm, irq);
857 * The MOVALL command moves the pending state of all IRQs targeting one
858 * redistributor to another. We don't hold the pending state in the VCPUs,
859 * but in the IRQs instead, so there is really not much to do for us here.
860 * However the spec says that no IRQ must target the old redistributor
861 * afterwards, so we make sure that no LPI is using the associated target_vcpu.
862 * This command affects all LPIs in the system that target that redistributor.
864 static int vgic_its_cmd_handle_movall(struct kvm *kvm, struct vgic_its *its,
867 struct vgic_dist *dist = &kvm->arch.vgic;
868 u32 target1_addr = its_cmd_get_target_addr(its_cmd);
869 u32 target2_addr = its_cmd_mask_field(its_cmd, 3, 16, 32);
870 struct kvm_vcpu *vcpu1, *vcpu2;
871 struct vgic_irq *irq;
873 if (target1_addr >= atomic_read(&kvm->online_vcpus) ||
874 target2_addr >= atomic_read(&kvm->online_vcpus))
875 return E_ITS_MOVALL_PROCNUM_OOR;
877 if (target1_addr == target2_addr)
880 vcpu1 = kvm_get_vcpu(kvm, target1_addr);
881 vcpu2 = kvm_get_vcpu(kvm, target2_addr);
883 spin_lock(&dist->lpi_list_lock);
885 list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
886 spin_lock(&irq->irq_lock);
888 if (irq->target_vcpu == vcpu1)
889 irq->target_vcpu = vcpu2;
891 spin_unlock(&irq->irq_lock);
894 spin_unlock(&dist->lpi_list_lock);
900 * This function is called with the its_cmd lock held, but the ITS data
901 * structure lock dropped.
903 static int vgic_its_handle_command(struct kvm *kvm, struct vgic_its *its,
906 u8 cmd = its_cmd_get_command(its_cmd);
909 mutex_lock(&its->its_lock);
912 ret = vgic_its_cmd_handle_mapd(kvm, its, its_cmd);
915 ret = vgic_its_cmd_handle_mapc(kvm, its, its_cmd);
918 ret = vgic_its_cmd_handle_mapi(kvm, its, its_cmd, cmd);
921 ret = vgic_its_cmd_handle_mapi(kvm, its, its_cmd, cmd);
924 ret = vgic_its_cmd_handle_movi(kvm, its, its_cmd);
926 case GITS_CMD_DISCARD:
927 ret = vgic_its_cmd_handle_discard(kvm, its, its_cmd);
930 ret = vgic_its_cmd_handle_clear(kvm, its, its_cmd);
932 case GITS_CMD_MOVALL:
933 ret = vgic_its_cmd_handle_movall(kvm, its, its_cmd);
936 ret = vgic_its_cmd_handle_inv(kvm, its, its_cmd);
938 case GITS_CMD_INVALL:
939 ret = vgic_its_cmd_handle_invall(kvm, its, its_cmd);
942 /* we ignore this command: we are in sync all of the time */
946 mutex_unlock(&its->its_lock);
951 static u64 vgic_sanitise_its_baser(u64 reg)
953 reg = vgic_sanitise_field(reg, GITS_BASER_SHAREABILITY_MASK,
954 GITS_BASER_SHAREABILITY_SHIFT,
955 vgic_sanitise_shareability);
956 reg = vgic_sanitise_field(reg, GITS_BASER_INNER_CACHEABILITY_MASK,
957 GITS_BASER_INNER_CACHEABILITY_SHIFT,
958 vgic_sanitise_inner_cacheability);
959 reg = vgic_sanitise_field(reg, GITS_BASER_OUTER_CACHEABILITY_MASK,
960 GITS_BASER_OUTER_CACHEABILITY_SHIFT,
961 vgic_sanitise_outer_cacheability);
963 /* Bits 15:12 contain bits 51:48 of the PA, which we don't support. */
964 reg &= ~GENMASK_ULL(15, 12);
966 /* We support only one (ITS) page size: 64K */
967 reg = (reg & ~GITS_BASER_PAGE_SIZE_MASK) | GITS_BASER_PAGE_SIZE_64K;
972 static u64 vgic_sanitise_its_cbaser(u64 reg)
974 reg = vgic_sanitise_field(reg, GITS_CBASER_SHAREABILITY_MASK,
975 GITS_CBASER_SHAREABILITY_SHIFT,
976 vgic_sanitise_shareability);
977 reg = vgic_sanitise_field(reg, GITS_CBASER_INNER_CACHEABILITY_MASK,
978 GITS_CBASER_INNER_CACHEABILITY_SHIFT,
979 vgic_sanitise_inner_cacheability);
980 reg = vgic_sanitise_field(reg, GITS_CBASER_OUTER_CACHEABILITY_MASK,
981 GITS_CBASER_OUTER_CACHEABILITY_SHIFT,
982 vgic_sanitise_outer_cacheability);
985 * Sanitise the physical address to be 64k aligned.
986 * Also limit the physical addresses to 48 bits.
988 reg &= ~(GENMASK_ULL(51, 48) | GENMASK_ULL(15, 12));
993 static unsigned long vgic_mmio_read_its_cbaser(struct kvm *kvm,
994 struct vgic_its *its,
995 gpa_t addr, unsigned int len)
997 return extract_bytes(its->cbaser, addr & 7, len);
1000 static void vgic_mmio_write_its_cbaser(struct kvm *kvm, struct vgic_its *its,
1001 gpa_t addr, unsigned int len,
1004 /* When GITS_CTLR.Enable is 1, this register is RO. */
1008 mutex_lock(&its->cmd_lock);
1009 its->cbaser = update_64bit_reg(its->cbaser, addr & 7, len, val);
1010 its->cbaser = vgic_sanitise_its_cbaser(its->cbaser);
1013 * CWRITER is architecturally UNKNOWN on reset, but we need to reset
1014 * it to CREADR to make sure we start with an empty command buffer.
1016 its->cwriter = its->creadr;
1017 mutex_unlock(&its->cmd_lock);
1020 #define ITS_CMD_BUFFER_SIZE(baser) ((((baser) & 0xff) + 1) << 12)
1021 #define ITS_CMD_SIZE 32
1022 #define ITS_CMD_OFFSET(reg) ((reg) & GENMASK(19, 5))
1025 * By writing to CWRITER the guest announces new commands to be processed.
1026 * To avoid any races in the first place, we take the its_cmd lock, which
1027 * protects our ring buffer variables, so that there is only one user
1028 * per ITS handling commands at a given time.
1030 static void vgic_mmio_write_its_cwriter(struct kvm *kvm, struct vgic_its *its,
1031 gpa_t addr, unsigned int len,
1041 mutex_lock(&its->cmd_lock);
1043 reg = update_64bit_reg(its->cwriter, addr & 7, len, val);
1044 reg = ITS_CMD_OFFSET(reg);
1045 if (reg >= ITS_CMD_BUFFER_SIZE(its->cbaser)) {
1046 mutex_unlock(&its->cmd_lock);
1051 cbaser = CBASER_ADDRESS(its->cbaser);
1053 while (its->cwriter != its->creadr) {
1054 int ret = kvm_read_guest(kvm, cbaser + its->creadr,
1055 cmd_buf, ITS_CMD_SIZE);
1057 * If kvm_read_guest() fails, this could be due to the guest
1058 * programming a bogus value in CBASER or something else going
1059 * wrong from which we cannot easily recover.
1060 * According to section 6.3.2 in the GICv3 spec we can just
1061 * ignore that command then.
1064 vgic_its_handle_command(kvm, its, cmd_buf);
1066 its->creadr += ITS_CMD_SIZE;
1067 if (its->creadr == ITS_CMD_BUFFER_SIZE(its->cbaser))
1071 mutex_unlock(&its->cmd_lock);
1074 static unsigned long vgic_mmio_read_its_cwriter(struct kvm *kvm,
1075 struct vgic_its *its,
1076 gpa_t addr, unsigned int len)
1078 return extract_bytes(its->cwriter, addr & 0x7, len);
1081 static unsigned long vgic_mmio_read_its_creadr(struct kvm *kvm,
1082 struct vgic_its *its,
1083 gpa_t addr, unsigned int len)
1085 return extract_bytes(its->creadr, addr & 0x7, len);
1088 #define BASER_INDEX(addr) (((addr) / sizeof(u64)) & 0x7)
1089 static unsigned long vgic_mmio_read_its_baser(struct kvm *kvm,
1090 struct vgic_its *its,
1091 gpa_t addr, unsigned int len)
1095 switch (BASER_INDEX(addr)) {
1097 reg = its->baser_device_table;
1100 reg = its->baser_coll_table;
1107 return extract_bytes(reg, addr & 7, len);
1110 #define GITS_BASER_RO_MASK (GENMASK_ULL(52, 48) | GENMASK_ULL(58, 56))
1111 static void vgic_mmio_write_its_baser(struct kvm *kvm,
1112 struct vgic_its *its,
1113 gpa_t addr, unsigned int len,
1116 u64 entry_size, device_type;
1117 u64 reg, *regptr, clearbits = 0;
1119 /* When GITS_CTLR.Enable is 1, we ignore write accesses. */
1123 switch (BASER_INDEX(addr)) {
1125 regptr = &its->baser_device_table;
1127 device_type = GITS_BASER_TYPE_DEVICE;
1130 regptr = &its->baser_coll_table;
1132 device_type = GITS_BASER_TYPE_COLLECTION;
1133 clearbits = GITS_BASER_INDIRECT;
1139 reg = update_64bit_reg(*regptr, addr & 7, len, val);
1140 reg &= ~GITS_BASER_RO_MASK;
1143 reg |= (entry_size - 1) << GITS_BASER_ENTRY_SIZE_SHIFT;
1144 reg |= device_type << GITS_BASER_TYPE_SHIFT;
1145 reg = vgic_sanitise_its_baser(reg);
1150 #define REGISTER_ITS_DESC(off, rd, wr, length, acc) \
1152 .reg_offset = off, \
1154 .access_flags = acc, \
1159 static void its_mmio_write_wi(struct kvm *kvm, struct vgic_its *its,
1160 gpa_t addr, unsigned int len, unsigned long val)
1165 static struct vgic_register_region its_registers[] = {
1166 REGISTER_ITS_DESC(GITS_CTLR,
1167 vgic_mmio_read_its_ctlr, vgic_mmio_write_its_ctlr, 4,
1169 REGISTER_ITS_DESC(GITS_IIDR,
1170 vgic_mmio_read_its_iidr, its_mmio_write_wi, 4,
1172 REGISTER_ITS_DESC(GITS_TYPER,
1173 vgic_mmio_read_its_typer, its_mmio_write_wi, 8,
1174 VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
1175 REGISTER_ITS_DESC(GITS_CBASER,
1176 vgic_mmio_read_its_cbaser, vgic_mmio_write_its_cbaser, 8,
1177 VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
1178 REGISTER_ITS_DESC(GITS_CWRITER,
1179 vgic_mmio_read_its_cwriter, vgic_mmio_write_its_cwriter, 8,
1180 VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
1181 REGISTER_ITS_DESC(GITS_CREADR,
1182 vgic_mmio_read_its_creadr, its_mmio_write_wi, 8,
1183 VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
1184 REGISTER_ITS_DESC(GITS_BASER,
1185 vgic_mmio_read_its_baser, vgic_mmio_write_its_baser, 0x40,
1186 VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
1187 REGISTER_ITS_DESC(GITS_IDREGS_BASE,
1188 vgic_mmio_read_its_idregs, its_mmio_write_wi, 0x30,
1192 /* This is called on setting the LPI enable bit in the redistributor. */
1193 void vgic_enable_lpis(struct kvm_vcpu *vcpu)
1195 if (!(vcpu->arch.vgic_cpu.pendbaser & GICR_PENDBASER_PTZ))
1196 its_sync_lpi_pending_table(vcpu);
1199 static int vgic_its_init_its(struct kvm *kvm, struct vgic_its *its)
1201 struct vgic_io_device *iodev = &its->iodev;
1204 if (its->initialized)
1207 if (IS_VGIC_ADDR_UNDEF(its->vgic_its_base))
1210 iodev->regions = its_registers;
1211 iodev->nr_regions = ARRAY_SIZE(its_registers);
1212 kvm_iodevice_init(&iodev->dev, &kvm_io_gic_ops);
1214 iodev->base_addr = its->vgic_its_base;
1215 iodev->iodev_type = IODEV_ITS;
1217 mutex_lock(&kvm->slots_lock);
1218 ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, iodev->base_addr,
1219 KVM_VGIC_V3_ITS_SIZE, &iodev->dev);
1220 mutex_unlock(&kvm->slots_lock);
1223 its->initialized = true;
1228 #define INITIAL_BASER_VALUE \
1229 (GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWb) | \
1230 GIC_BASER_CACHEABILITY(GITS_BASER, OUTER, SameAsInner) | \
1231 GIC_BASER_SHAREABILITY(GITS_BASER, InnerShareable) | \
1232 ((8ULL - 1) << GITS_BASER_ENTRY_SIZE_SHIFT) | \
1233 GITS_BASER_PAGE_SIZE_64K)
1235 #define INITIAL_PROPBASER_VALUE \
1236 (GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, RaWb) | \
1237 GIC_BASER_CACHEABILITY(GICR_PROPBASER, OUTER, SameAsInner) | \
1238 GIC_BASER_SHAREABILITY(GICR_PROPBASER, InnerShareable))
1240 static int vgic_its_create(struct kvm_device *dev, u32 type)
1242 struct vgic_its *its;
1244 if (type != KVM_DEV_TYPE_ARM_VGIC_ITS)
1247 its = kzalloc(sizeof(struct vgic_its), GFP_KERNEL);
1251 mutex_init(&its->its_lock);
1252 mutex_init(&its->cmd_lock);
1254 its->vgic_its_base = VGIC_ADDR_UNDEF;
1256 INIT_LIST_HEAD(&its->device_list);
1257 INIT_LIST_HEAD(&its->collection_list);
1259 dev->kvm->arch.vgic.has_its = true;
1260 its->initialized = false;
1261 its->enabled = false;
1263 its->baser_device_table = INITIAL_BASER_VALUE |
1264 ((u64)GITS_BASER_TYPE_DEVICE << GITS_BASER_TYPE_SHIFT);
1265 its->baser_coll_table = INITIAL_BASER_VALUE |
1266 ((u64)GITS_BASER_TYPE_COLLECTION << GITS_BASER_TYPE_SHIFT);
1267 dev->kvm->arch.vgic.propbaser = INITIAL_PROPBASER_VALUE;
1274 static void vgic_its_destroy(struct kvm_device *kvm_dev)
1276 struct kvm *kvm = kvm_dev->kvm;
1277 struct vgic_its *its = kvm_dev->private;
1278 struct its_device *dev;
1279 struct its_itte *itte;
1280 struct list_head *dev_cur, *dev_temp;
1281 struct list_head *cur, *temp;
1284 * We may end up here without the lists ever having been initialized.
1285 * Check this and bail out early to avoid dereferencing a NULL pointer.
1287 if (!its->device_list.next)
1290 mutex_lock(&its->its_lock);
1291 list_for_each_safe(dev_cur, dev_temp, &its->device_list) {
1292 dev = container_of(dev_cur, struct its_device, dev_list);
1293 list_for_each_safe(cur, temp, &dev->itt_head) {
1294 itte = (container_of(cur, struct its_itte, itte_list));
1295 its_free_itte(kvm, itte);
1301 list_for_each_safe(cur, temp, &its->collection_list) {
1303 kfree(container_of(cur, struct its_collection, coll_list));
1305 mutex_unlock(&its->its_lock);
1310 static int vgic_its_has_attr(struct kvm_device *dev,
1311 struct kvm_device_attr *attr)
1313 switch (attr->group) {
1314 case KVM_DEV_ARM_VGIC_GRP_ADDR:
1315 switch (attr->attr) {
1316 case KVM_VGIC_ITS_ADDR_TYPE:
1320 case KVM_DEV_ARM_VGIC_GRP_CTRL:
1321 switch (attr->attr) {
1322 case KVM_DEV_ARM_VGIC_CTRL_INIT:
1330 static int vgic_its_set_attr(struct kvm_device *dev,
1331 struct kvm_device_attr *attr)
1333 struct vgic_its *its = dev->private;
1336 switch (attr->group) {
1337 case KVM_DEV_ARM_VGIC_GRP_ADDR: {
1338 u64 __user *uaddr = (u64 __user *)(long)attr->addr;
1339 unsigned long type = (unsigned long)attr->attr;
1342 if (type != KVM_VGIC_ITS_ADDR_TYPE)
1345 if (its->initialized)
1348 if (copy_from_user(&addr, uaddr, sizeof(addr)))
1351 ret = vgic_check_ioaddr(dev->kvm, &its->vgic_its_base,
1356 its->vgic_its_base = addr;
1360 case KVM_DEV_ARM_VGIC_GRP_CTRL:
1361 switch (attr->attr) {
1362 case KVM_DEV_ARM_VGIC_CTRL_INIT:
1363 return vgic_its_init_its(dev->kvm, its);
1370 static int vgic_its_get_attr(struct kvm_device *dev,
1371 struct kvm_device_attr *attr)
1373 switch (attr->group) {
1374 case KVM_DEV_ARM_VGIC_GRP_ADDR: {
1375 struct vgic_its *its = dev->private;
1376 u64 addr = its->vgic_its_base;
1377 u64 __user *uaddr = (u64 __user *)(long)attr->addr;
1378 unsigned long type = (unsigned long)attr->attr;
1380 if (type != KVM_VGIC_ITS_ADDR_TYPE)
1383 if (copy_to_user(uaddr, &addr, sizeof(addr)))
1394 static struct kvm_device_ops kvm_arm_vgic_its_ops = {
1395 .name = "kvm-arm-vgic-its",
1396 .create = vgic_its_create,
1397 .destroy = vgic_its_destroy,
1398 .set_attr = vgic_its_set_attr,
1399 .get_attr = vgic_its_get_attr,
1400 .has_attr = vgic_its_has_attr,
1403 int kvm_vgic_register_its_device(void)
1405 return kvm_register_device_ops(&kvm_arm_vgic_its_ops,
1406 KVM_DEV_TYPE_ARM_VGIC_ITS);