Merge branch 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-block.git] / drivers / irqchip / irq-gic-v3-its.c
index 284738add89b3b94f77ad257fd314bc89a423cae..e8d89343d6139424e2caf0baa112955b7005b2e4 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2013, 2014 ARM Limited, All Rights Reserved.
+ * Copyright (C) 2013-2017 ARM Limited, All Rights Reserved.
  * Author: Marc Zyngier <marc.zyngier@arm.com>
  *
  * This program is free software; you can redistribute it and/or modify
@@ -36,6 +36,7 @@
 
 #include <linux/irqchip.h>
 #include <linux/irqchip/arm-gic-v3.h>
+#include <linux/irqchip/arm-gic-v4.h>
 
 #include <asm/cputype.h>
 #include <asm/exception.h>
 
 #define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING    (1 << 0)
 
+static u32 lpi_id_bits;
+
+/*
+ * We allocate memory for PROPBASE to cover 2 ^ lpi_id_bits LPIs to
+ * deal with (one configuration byte per interrupt). PENDBASE has to
+ * be 64kB aligned (one bit per LPI, plus 8192 bits for SPI/PPI/SGI).
+ */
+#define LPI_NRBITS             lpi_id_bits
+#define LPI_PROPBASE_SZ                ALIGN(BIT(LPI_NRBITS), SZ_64K)
+#define LPI_PENDBASE_SZ                ALIGN(BIT(LPI_NRBITS) / 8, SZ_64K)
+
+#define LPI_PROP_DEFAULT_PRIO  0xa0
+
 /*
  * Collection structure - just an ID, and a redistributor address to
  * ping. We use one per CPU as a bag of interrupts assigned to this
@@ -88,6 +102,7 @@ struct its_node {
        u32                     ite_size;
        u32                     device_ids;
        int                     numa_node;
+       bool                    is_v4;
 };
 
 #define ITS_ITT_ALIGN          SZ_256
@@ -100,11 +115,17 @@ struct event_lpi_map {
        u16                     *col_map;
        irq_hw_number_t         lpi_base;
        int                     nr_lpis;
+       struct mutex            vlpi_lock;
+       struct its_vm           *vm;
+       struct its_vlpi_map     *vlpi_maps;
+       int                     nr_vlpis;
 };
 
 /*
- * The ITS view of a device - belongs to an ITS, a collection, owns an
- * interrupt translation table, and a list of interrupts.
+ * The ITS view of a device - belongs to an ITS, owns an interrupt
+ * translation table, and a list of interrupts.  If it some of its
+ * LPIs are injected into a guest (GICv4), the event_map.vm field
+ * indicates which one.
  */
 struct its_device {
        struct list_head        entry;
@@ -115,13 +136,33 @@ struct its_device {
        u32                     device_id;
 };
 
+static struct {
+       raw_spinlock_t          lock;
+       struct its_device       *dev;
+       struct its_vpe          **vpes;
+       int                     next_victim;
+} vpe_proxy;
+
 static LIST_HEAD(its_nodes);
 static DEFINE_SPINLOCK(its_lock);
 static struct rdists *gic_rdists;
 static struct irq_domain *its_parent;
 
+/*
+ * We have a maximum number of 16 ITSs in the whole system if we're
+ * using the ITSList mechanism
+ */
+#define ITS_LIST_MAX           16
+
+static unsigned long its_list_map;
+static u16 vmovp_seq_num;
+static DEFINE_RAW_SPINLOCK(vmovp_lock);
+
+static DEFINE_IDA(its_vpeid_ida);
+
 #define gic_data_rdist()               (raw_cpu_ptr(gic_rdists->rdist))
 #define gic_data_rdist_rd_base()       (gic_data_rdist()->rd_base)
+#define gic_data_rdist_vlpi_base()     (gic_data_rdist_rd_base() + SZ_128K)
 
 static struct its_collection *dev_event_to_col(struct its_device *its_dev,
                                               u32 event)
@@ -142,6 +183,11 @@ struct its_cmd_desc {
                        u32 event_id;
                } its_inv_cmd;
 
+               struct {
+                       struct its_device *dev;
+                       u32 event_id;
+               } its_clear_cmd;
+
                struct {
                        struct its_device *dev;
                        u32 event_id;
@@ -177,6 +223,38 @@ struct its_cmd_desc {
                struct {
                        struct its_collection *col;
                } its_invall_cmd;
+
+               struct {
+                       struct its_vpe *vpe;
+               } its_vinvall_cmd;
+
+               struct {
+                       struct its_vpe *vpe;
+                       struct its_collection *col;
+                       bool valid;
+               } its_vmapp_cmd;
+
+               struct {
+                       struct its_vpe *vpe;
+                       struct its_device *dev;
+                       u32 virt_id;
+                       u32 event_id;
+                       bool db_enabled;
+               } its_vmapti_cmd;
+
+               struct {
+                       struct its_vpe *vpe;
+                       struct its_device *dev;
+                       u32 event_id;
+                       bool db_enabled;
+               } its_vmovi_cmd;
+
+               struct {
+                       struct its_vpe *vpe;
+                       struct its_collection *col;
+                       u16 seq_num;
+                       u16 its_list;
+               } its_vmovp_cmd;
        };
 };
 
@@ -193,6 +271,9 @@ struct its_cmd_block {
 typedef struct its_collection *(*its_cmd_builder_t)(struct its_cmd_block *,
                                                    struct its_cmd_desc *);
 
+typedef struct its_vpe *(*its_cmd_vbuilder_t)(struct its_cmd_block *,
+                                             struct its_cmd_desc *);
+
 static void its_mask_encode(u64 *raw_cmd, u64 val, int h, int l)
 {
        u64 mask = GENMASK_ULL(h, l);
@@ -245,6 +326,46 @@ static void its_encode_collection(struct its_cmd_block *cmd, u16 col)
        its_mask_encode(&cmd->raw_cmd[2], col, 15, 0);
 }
 
+static void its_encode_vpeid(struct its_cmd_block *cmd, u16 vpeid)
+{
+       its_mask_encode(&cmd->raw_cmd[1], vpeid, 47, 32);
+}
+
+static void its_encode_virt_id(struct its_cmd_block *cmd, u32 virt_id)
+{
+       its_mask_encode(&cmd->raw_cmd[2], virt_id, 31, 0);
+}
+
+static void its_encode_db_phys_id(struct its_cmd_block *cmd, u32 db_phys_id)
+{
+       its_mask_encode(&cmd->raw_cmd[2], db_phys_id, 63, 32);
+}
+
+static void its_encode_db_valid(struct its_cmd_block *cmd, bool db_valid)
+{
+       its_mask_encode(&cmd->raw_cmd[2], db_valid, 0, 0);
+}
+
+static void its_encode_seq_num(struct its_cmd_block *cmd, u16 seq_num)
+{
+       its_mask_encode(&cmd->raw_cmd[0], seq_num, 47, 32);
+}
+
+static void its_encode_its_list(struct its_cmd_block *cmd, u16 its_list)
+{
+       its_mask_encode(&cmd->raw_cmd[1], its_list, 15, 0);
+}
+
+static void its_encode_vpt_addr(struct its_cmd_block *cmd, u64 vpt_pa)
+{
+       its_mask_encode(&cmd->raw_cmd[3], vpt_pa >> 16, 50, 16);
+}
+
+static void its_encode_vpt_size(struct its_cmd_block *cmd, u8 vpt_size)
+{
+       its_mask_encode(&cmd->raw_cmd[3], vpt_size, 4, 0);
+}
+
 static inline void its_fixup_cmd(struct its_cmd_block *cmd)
 {
        /* Let's fixup BE commands */
@@ -358,6 +479,40 @@ static struct its_collection *its_build_inv_cmd(struct its_cmd_block *cmd,
        return col;
 }
 
+static struct its_collection *its_build_int_cmd(struct its_cmd_block *cmd,
+                                               struct its_cmd_desc *desc)
+{
+       struct its_collection *col;
+
+       col = dev_event_to_col(desc->its_int_cmd.dev,
+                              desc->its_int_cmd.event_id);
+
+       its_encode_cmd(cmd, GITS_CMD_INT);
+       its_encode_devid(cmd, desc->its_int_cmd.dev->device_id);
+       its_encode_event_id(cmd, desc->its_int_cmd.event_id);
+
+       its_fixup_cmd(cmd);
+
+       return col;
+}
+
+static struct its_collection *its_build_clear_cmd(struct its_cmd_block *cmd,
+                                                 struct its_cmd_desc *desc)
+{
+       struct its_collection *col;
+
+       col = dev_event_to_col(desc->its_clear_cmd.dev,
+                              desc->its_clear_cmd.event_id);
+
+       its_encode_cmd(cmd, GITS_CMD_CLEAR);
+       its_encode_devid(cmd, desc->its_clear_cmd.dev->device_id);
+       its_encode_event_id(cmd, desc->its_clear_cmd.event_id);
+
+       its_fixup_cmd(cmd);
+
+       return col;
+}
+
 static struct its_collection *its_build_invall_cmd(struct its_cmd_block *cmd,
                                                   struct its_cmd_desc *desc)
 {
@@ -369,6 +524,94 @@ static struct its_collection *its_build_invall_cmd(struct its_cmd_block *cmd,
        return NULL;
 }
 
+static struct its_vpe *its_build_vinvall_cmd(struct its_cmd_block *cmd,
+                                            struct its_cmd_desc *desc)
+{
+       its_encode_cmd(cmd, GITS_CMD_VINVALL);
+       its_encode_vpeid(cmd, desc->its_vinvall_cmd.vpe->vpe_id);
+
+       its_fixup_cmd(cmd);
+
+       return desc->its_vinvall_cmd.vpe;
+}
+
+static struct its_vpe *its_build_vmapp_cmd(struct its_cmd_block *cmd,
+                                          struct its_cmd_desc *desc)
+{
+       unsigned long vpt_addr;
+
+       vpt_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->vpt_page));
+
+       its_encode_cmd(cmd, GITS_CMD_VMAPP);
+       its_encode_vpeid(cmd, desc->its_vmapp_cmd.vpe->vpe_id);
+       its_encode_valid(cmd, desc->its_vmapp_cmd.valid);
+       its_encode_target(cmd, desc->its_vmapp_cmd.col->target_address);
+       its_encode_vpt_addr(cmd, vpt_addr);
+       its_encode_vpt_size(cmd, LPI_NRBITS - 1);
+
+       its_fixup_cmd(cmd);
+
+       return desc->its_vmapp_cmd.vpe;
+}
+
+static struct its_vpe *its_build_vmapti_cmd(struct its_cmd_block *cmd,
+                                           struct its_cmd_desc *desc)
+{
+       u32 db;
+
+       if (desc->its_vmapti_cmd.db_enabled)
+               db = desc->its_vmapti_cmd.vpe->vpe_db_lpi;
+       else
+               db = 1023;
+
+       its_encode_cmd(cmd, GITS_CMD_VMAPTI);
+       its_encode_devid(cmd, desc->its_vmapti_cmd.dev->device_id);
+       its_encode_vpeid(cmd, desc->its_vmapti_cmd.vpe->vpe_id);
+       its_encode_event_id(cmd, desc->its_vmapti_cmd.event_id);
+       its_encode_db_phys_id(cmd, db);
+       its_encode_virt_id(cmd, desc->its_vmapti_cmd.virt_id);
+
+       its_fixup_cmd(cmd);
+
+       return desc->its_vmapti_cmd.vpe;
+}
+
+static struct its_vpe *its_build_vmovi_cmd(struct its_cmd_block *cmd,
+                                          struct its_cmd_desc *desc)
+{
+       u32 db;
+
+       if (desc->its_vmovi_cmd.db_enabled)
+               db = desc->its_vmovi_cmd.vpe->vpe_db_lpi;
+       else
+               db = 1023;
+
+       its_encode_cmd(cmd, GITS_CMD_VMOVI);
+       its_encode_devid(cmd, desc->its_vmovi_cmd.dev->device_id);
+       its_encode_vpeid(cmd, desc->its_vmovi_cmd.vpe->vpe_id);
+       its_encode_event_id(cmd, desc->its_vmovi_cmd.event_id);
+       its_encode_db_phys_id(cmd, db);
+       its_encode_db_valid(cmd, true);
+
+       its_fixup_cmd(cmd);
+
+       return desc->its_vmovi_cmd.vpe;
+}
+
+static struct its_vpe *its_build_vmovp_cmd(struct its_cmd_block *cmd,
+                                          struct its_cmd_desc *desc)
+{
+       its_encode_cmd(cmd, GITS_CMD_VMOVP);
+       its_encode_seq_num(cmd, desc->its_vmovp_cmd.seq_num);
+       its_encode_its_list(cmd, desc->its_vmovp_cmd.its_list);
+       its_encode_vpeid(cmd, desc->its_vmovp_cmd.vpe->vpe_id);
+       its_encode_target(cmd, desc->its_vmovp_cmd.col->target_address);
+
+       its_fixup_cmd(cmd);
+
+       return desc->its_vmovp_cmd.vpe;
+}
+
 static u64 its_cmd_ptr_to_offset(struct its_node *its,
                                 struct its_cmd_block *ptr)
 {
@@ -453,7 +696,13 @@ static void its_wait_for_range_completion(struct its_node *its,
 
        while (1) {
                rd_idx = readl_relaxed(its->base + GITS_CREADR);
-               if (rd_idx >= to_idx || rd_idx < from_idx)
+
+               /* Direct case */
+               if (from_idx < to_idx && rd_idx >= to_idx)
+                       break;
+
+               /* Wrapped case */
+               if (from_idx >= to_idx && rd_idx >= to_idx && rd_idx < from_idx)
                        break;
 
                count--;
@@ -466,42 +715,84 @@ static void its_wait_for_range_completion(struct its_node *its,
        }
 }
 
-static void its_send_single_command(struct its_node *its,
-                                   its_cmd_builder_t builder,
-                                   struct its_cmd_desc *desc)
+/* Warning, macro hell follows */
+#define BUILD_SINGLE_CMD_FUNC(name, buildtype, synctype, buildfn)      \
+void name(struct its_node *its,                                                \
+         buildtype builder,                                            \
+         struct its_cmd_desc *desc)                                    \
+{                                                                      \
+       struct its_cmd_block *cmd, *sync_cmd, *next_cmd;                \
+       synctype *sync_obj;                                             \
+       unsigned long flags;                                            \
+                                                                       \
+       raw_spin_lock_irqsave(&its->lock, flags);                       \
+                                                                       \
+       cmd = its_allocate_entry(its);                                  \
+       if (!cmd) {             /* We're soooooo screewed... */         \
+               raw_spin_unlock_irqrestore(&its->lock, flags);          \
+               return;                                                 \
+       }                                                               \
+       sync_obj = builder(cmd, desc);                                  \
+       its_flush_cmd(its, cmd);                                        \
+                                                                       \
+       if (sync_obj) {                                                 \
+               sync_cmd = its_allocate_entry(its);                     \
+               if (!sync_cmd)                                          \
+                       goto post;                                      \
+                                                                       \
+               buildfn(sync_cmd, sync_obj);                            \
+               its_flush_cmd(its, sync_cmd);                           \
+       }                                                               \
+                                                                       \
+post:                                                                  \
+       next_cmd = its_post_commands(its);                              \
+       raw_spin_unlock_irqrestore(&its->lock, flags);                  \
+                                                                       \
+       its_wait_for_range_completion(its, cmd, next_cmd);              \
+}
+
+static void its_build_sync_cmd(struct its_cmd_block *sync_cmd,
+                              struct its_collection *sync_col)
+{
+       its_encode_cmd(sync_cmd, GITS_CMD_SYNC);
+       its_encode_target(sync_cmd, sync_col->target_address);
+
+       its_fixup_cmd(sync_cmd);
+}
+
+static BUILD_SINGLE_CMD_FUNC(its_send_single_command, its_cmd_builder_t,
+                            struct its_collection, its_build_sync_cmd)
+
+static void its_build_vsync_cmd(struct its_cmd_block *sync_cmd,
+                               struct its_vpe *sync_vpe)
+{
+       its_encode_cmd(sync_cmd, GITS_CMD_VSYNC);
+       its_encode_vpeid(sync_cmd, sync_vpe->vpe_id);
+
+       its_fixup_cmd(sync_cmd);
+}
+
+static BUILD_SINGLE_CMD_FUNC(its_send_single_vcommand, its_cmd_vbuilder_t,
+                            struct its_vpe, its_build_vsync_cmd)
+
+static void its_send_int(struct its_device *dev, u32 event_id)
 {
-       struct its_cmd_block *cmd, *sync_cmd, *next_cmd;
-       struct its_collection *sync_col;
-       unsigned long flags;
+       struct its_cmd_desc desc;
 
-       raw_spin_lock_irqsave(&its->lock, flags);
+       desc.its_int_cmd.dev = dev;
+       desc.its_int_cmd.event_id = event_id;
 
-       cmd = its_allocate_entry(its);
-       if (!cmd) {             /* We're soooooo screewed... */
-               pr_err_ratelimited("ITS can't allocate, dropping command\n");
-               raw_spin_unlock_irqrestore(&its->lock, flags);
-               return;
-       }
-       sync_col = builder(cmd, desc);
-       its_flush_cmd(its, cmd);
+       its_send_single_command(dev->its, its_build_int_cmd, &desc);
+}
 
-       if (sync_col) {
-               sync_cmd = its_allocate_entry(its);
-               if (!sync_cmd) {
-                       pr_err_ratelimited("ITS can't SYNC, skipping\n");
-                       goto post;
-               }
-               its_encode_cmd(sync_cmd, GITS_CMD_SYNC);
-               its_encode_target(sync_cmd, sync_col->target_address);
-               its_fixup_cmd(sync_cmd);
-               its_flush_cmd(its, sync_cmd);
-       }
+static void its_send_clear(struct its_device *dev, u32 event_id)
+{
+       struct its_cmd_desc desc;
 
-post:
-       next_cmd = its_post_commands(its);
-       raw_spin_unlock_irqrestore(&its->lock, flags);
+       desc.its_clear_cmd.dev = dev;
+       desc.its_clear_cmd.event_id = event_id;
 
-       its_wait_for_range_completion(its, cmd, next_cmd);
+       its_send_single_command(dev->its, its_build_clear_cmd, &desc);
 }
 
 static void its_send_inv(struct its_device *dev, u32 event_id)
@@ -577,6 +868,106 @@ static void its_send_invall(struct its_node *its, struct its_collection *col)
        its_send_single_command(its, its_build_invall_cmd, &desc);
 }
 
+static void its_send_vmapti(struct its_device *dev, u32 id)
+{
+       struct its_vlpi_map *map = &dev->event_map.vlpi_maps[id];
+       struct its_cmd_desc desc;
+
+       desc.its_vmapti_cmd.vpe = map->vpe;
+       desc.its_vmapti_cmd.dev = dev;
+       desc.its_vmapti_cmd.virt_id = map->vintid;
+       desc.its_vmapti_cmd.event_id = id;
+       desc.its_vmapti_cmd.db_enabled = map->db_enabled;
+
+       its_send_single_vcommand(dev->its, its_build_vmapti_cmd, &desc);
+}
+
+static void its_send_vmovi(struct its_device *dev, u32 id)
+{
+       struct its_vlpi_map *map = &dev->event_map.vlpi_maps[id];
+       struct its_cmd_desc desc;
+
+       desc.its_vmovi_cmd.vpe = map->vpe;
+       desc.its_vmovi_cmd.dev = dev;
+       desc.its_vmovi_cmd.event_id = id;
+       desc.its_vmovi_cmd.db_enabled = map->db_enabled;
+
+       its_send_single_vcommand(dev->its, its_build_vmovi_cmd, &desc);
+}
+
+static void its_send_vmapp(struct its_vpe *vpe, bool valid)
+{
+       struct its_cmd_desc desc;
+       struct its_node *its;
+
+       desc.its_vmapp_cmd.vpe = vpe;
+       desc.its_vmapp_cmd.valid = valid;
+
+       list_for_each_entry(its, &its_nodes, entry) {
+               if (!its->is_v4)
+                       continue;
+
+               desc.its_vmapp_cmd.col = &its->collections[vpe->col_idx];
+               its_send_single_vcommand(its, its_build_vmapp_cmd, &desc);
+       }
+}
+
+static void its_send_vmovp(struct its_vpe *vpe)
+{
+       struct its_cmd_desc desc;
+       struct its_node *its;
+       unsigned long flags;
+       int col_id = vpe->col_idx;
+
+       desc.its_vmovp_cmd.vpe = vpe;
+       desc.its_vmovp_cmd.its_list = (u16)its_list_map;
+
+       if (!its_list_map) {
+               its = list_first_entry(&its_nodes, struct its_node, entry);
+               desc.its_vmovp_cmd.seq_num = 0;
+               desc.its_vmovp_cmd.col = &its->collections[col_id];
+               its_send_single_vcommand(its, its_build_vmovp_cmd, &desc);
+               return;
+       }
+
+       /*
+        * Yet another marvel of the architecture. If using the
+        * its_list "feature", we need to make sure that all ITSs
+        * receive all VMOVP commands in the same order. The only way
+        * to guarantee this is to make vmovp a serialization point.
+        *
+        * Wall <-- Head.
+        */
+       raw_spin_lock_irqsave(&vmovp_lock, flags);
+
+       desc.its_vmovp_cmd.seq_num = vmovp_seq_num++;
+
+       /* Emit VMOVPs */
+       list_for_each_entry(its, &its_nodes, entry) {
+               if (!its->is_v4)
+                       continue;
+
+               desc.its_vmovp_cmd.col = &its->collections[col_id];
+               its_send_single_vcommand(its, its_build_vmovp_cmd, &desc);
+       }
+
+       raw_spin_unlock_irqrestore(&vmovp_lock, flags);
+}
+
+static void its_send_vinvall(struct its_vpe *vpe)
+{
+       struct its_cmd_desc desc;
+       struct its_node *its;
+
+       desc.its_vinvall_cmd.vpe = vpe;
+
+       list_for_each_entry(its, &its_nodes, entry) {
+               if (!its->is_v4)
+                       continue;
+               its_send_single_vcommand(its, its_build_vinvall_cmd, &desc);
+       }
+}
+
 /*
  * irqchip functions - assumes MSI, mostly.
  */
@@ -587,17 +978,26 @@ static inline u32 its_get_event_id(struct irq_data *d)
        return d->hwirq - its_dev->event_map.lpi_base;
 }
 
-static void lpi_set_config(struct irq_data *d, bool enable)
+static void lpi_write_config(struct irq_data *d, u8 clr, u8 set)
 {
-       struct its_device *its_dev = irq_data_get_irq_chip_data(d);
-       irq_hw_number_t hwirq = d->hwirq;
-       u32 id = its_get_event_id(d);
-       u8 *cfg = page_address(gic_rdists->prop_page) + hwirq - 8192;
+       irq_hw_number_t hwirq;
+       struct page *prop_page;
+       u8 *cfg;
 
-       if (enable)
-               *cfg |= LPI_PROP_ENABLED;
-       else
-               *cfg &= ~LPI_PROP_ENABLED;
+       if (irqd_is_forwarded_to_vcpu(d)) {
+               struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+               u32 event = its_get_event_id(d);
+
+               prop_page = its_dev->event_map.vm->vprop_page;
+               hwirq = its_dev->event_map.vlpi_maps[event].vintid;
+       } else {
+               prop_page = gic_rdists->prop_page;
+               hwirq = d->hwirq;
+       }
+
+       cfg = page_address(prop_page) + hwirq - 8192;
+       *cfg &= ~clr;
+       *cfg |= set | LPI_PROP_GROUP1;
 
        /*
         * Make the above write visible to the redistributors.
@@ -608,17 +1008,53 @@ static void lpi_set_config(struct irq_data *d, bool enable)
                gic_flush_dcache_to_poc(cfg, sizeof(*cfg));
        else
                dsb(ishst);
-       its_send_inv(its_dev, id);
+}
+
+static void lpi_update_config(struct irq_data *d, u8 clr, u8 set)
+{
+       struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+
+       lpi_write_config(d, clr, set);
+       its_send_inv(its_dev, its_get_event_id(d));
+}
+
+static void its_vlpi_set_doorbell(struct irq_data *d, bool enable)
+{
+       struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+       u32 event = its_get_event_id(d);
+
+       if (its_dev->event_map.vlpi_maps[event].db_enabled == enable)
+               return;
+
+       its_dev->event_map.vlpi_maps[event].db_enabled = enable;
+
+       /*
+        * More fun with the architecture:
+        *
+        * Ideally, we'd issue a VMAPTI to set the doorbell to its LPI
+        * value or to 1023, depending on the enable bit. But that
+        * would be issueing a mapping for an /existing/ DevID+EventID
+        * pair, which is UNPREDICTABLE. Instead, let's issue a VMOVI
+        * to the /same/ vPE, using this opportunity to adjust the
+        * doorbell. Mouahahahaha. We loves it, Precious.
+        */
+       its_send_vmovi(its_dev, event);
 }
 
 static void its_mask_irq(struct irq_data *d)
 {
-       lpi_set_config(d, false);
+       if (irqd_is_forwarded_to_vcpu(d))
+               its_vlpi_set_doorbell(d, false);
+
+       lpi_update_config(d, LPI_PROP_ENABLED, 0);
 }
 
 static void its_unmask_irq(struct irq_data *d)
 {
-       lpi_set_config(d, true);
+       if (irqd_is_forwarded_to_vcpu(d))
+               its_vlpi_set_doorbell(d, true);
+
+       lpi_update_config(d, 0, LPI_PROP_ENABLED);
 }
 
 static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
@@ -630,6 +1066,10 @@ static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
        struct its_collection *target_col;
        u32 id = its_get_event_id(d);
 
+       /* A forwarded interrupt should use irq_set_vcpu_affinity */
+       if (irqd_is_forwarded_to_vcpu(d))
+               return -EINVAL;
+
        /* lpi cannot be routed to a redistributor that is on a foreign node */
        if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) {
                if (its_dev->its->numa_node >= 0) {
@@ -649,6 +1089,7 @@ static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
                target_col = &its_dev->its->collections[cpu];
                its_send_movi(its_dev, target_col, id);
                its_dev->event_map.col_map[id] = cpu;
+               irq_data_update_effective_affinity(d, cpumask_of(cpu));
        }
 
        return IRQ_SET_MASK_OK_DONE;
@@ -670,6 +1111,179 @@ static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
        iommu_dma_map_msi_msg(d->irq, msg);
 }
 
+static int its_irq_set_irqchip_state(struct irq_data *d,
+                                    enum irqchip_irq_state which,
+                                    bool state)
+{
+       struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+       u32 event = its_get_event_id(d);
+
+       if (which != IRQCHIP_STATE_PENDING)
+               return -EINVAL;
+
+       if (state)
+               its_send_int(its_dev, event);
+       else
+               its_send_clear(its_dev, event);
+
+       return 0;
+}
+
+static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info)
+{
+       struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+       u32 event = its_get_event_id(d);
+       int ret = 0;
+
+       if (!info->map)
+               return -EINVAL;
+
+       mutex_lock(&its_dev->event_map.vlpi_lock);
+
+       if (!its_dev->event_map.vm) {
+               struct its_vlpi_map *maps;
+
+               maps = kzalloc(sizeof(*maps) * its_dev->event_map.nr_lpis,
+                              GFP_KERNEL);
+               if (!maps) {
+                       ret = -ENOMEM;
+                       goto out;
+               }
+
+               its_dev->event_map.vm = info->map->vm;
+               its_dev->event_map.vlpi_maps = maps;
+       } else if (its_dev->event_map.vm != info->map->vm) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       /* Get our private copy of the mapping information */
+       its_dev->event_map.vlpi_maps[event] = *info->map;
+
+       if (irqd_is_forwarded_to_vcpu(d)) {
+               /* Already mapped, move it around */
+               its_send_vmovi(its_dev, event);
+       } else {
+               /* Drop the physical mapping */
+               its_send_discard(its_dev, event);
+
+               /* and install the virtual one */
+               its_send_vmapti(its_dev, event);
+               irqd_set_forwarded_to_vcpu(d);
+
+               /* Increment the number of VLPIs */
+               its_dev->event_map.nr_vlpis++;
+       }
+
+out:
+       mutex_unlock(&its_dev->event_map.vlpi_lock);
+       return ret;
+}
+
+static int its_vlpi_get(struct irq_data *d, struct its_cmd_info *info)
+{
+       struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+       u32 event = its_get_event_id(d);
+       int ret = 0;
+
+       mutex_lock(&its_dev->event_map.vlpi_lock);
+
+       if (!its_dev->event_map.vm ||
+           !its_dev->event_map.vlpi_maps[event].vm) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       /* Copy our mapping information to the incoming request */
+       *info->map = its_dev->event_map.vlpi_maps[event];
+
+out:
+       mutex_unlock(&its_dev->event_map.vlpi_lock);
+       return ret;
+}
+
+static int its_vlpi_unmap(struct irq_data *d)
+{
+       struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+       u32 event = its_get_event_id(d);
+       int ret = 0;
+
+       mutex_lock(&its_dev->event_map.vlpi_lock);
+
+       if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d)) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       /* Drop the virtual mapping */
+       its_send_discard(its_dev, event);
+
+       /* and restore the physical one */
+       irqd_clr_forwarded_to_vcpu(d);
+       its_send_mapti(its_dev, d->hwirq, event);
+       lpi_update_config(d, 0xff, (LPI_PROP_DEFAULT_PRIO |
+                                   LPI_PROP_ENABLED |
+                                   LPI_PROP_GROUP1));
+
+       /*
+        * Drop the refcount and make the device available again if
+        * this was the last VLPI.
+        */
+       if (!--its_dev->event_map.nr_vlpis) {
+               its_dev->event_map.vm = NULL;
+               kfree(its_dev->event_map.vlpi_maps);
+       }
+
+out:
+       mutex_unlock(&its_dev->event_map.vlpi_lock);
+       return ret;
+}
+
+static int its_vlpi_prop_update(struct irq_data *d, struct its_cmd_info *info)
+{
+       struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+
+       if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d))
+               return -EINVAL;
+
+       if (info->cmd_type == PROP_UPDATE_AND_INV_VLPI)
+               lpi_update_config(d, 0xff, info->config);
+       else
+               lpi_write_config(d, 0xff, info->config);
+       its_vlpi_set_doorbell(d, !!(info->config & LPI_PROP_ENABLED));
+
+       return 0;
+}
+
+static int its_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
+{
+       struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+       struct its_cmd_info *info = vcpu_info;
+
+       /* Need a v4 ITS */
+       if (!its_dev->its->is_v4)
+               return -EINVAL;
+
+       /* Unmap request? */
+       if (!info)
+               return its_vlpi_unmap(d);
+
+       switch (info->cmd_type) {
+       case MAP_VLPI:
+               return its_vlpi_map(d, info);
+
+       case GET_VLPI:
+               return its_vlpi_get(d, info);
+
+       case PROP_UPDATE_VLPI:
+       case PROP_UPDATE_AND_INV_VLPI:
+               return its_vlpi_prop_update(d, info);
+
+       default:
+               return -EINVAL;
+       }
+}
+
 static struct irq_chip its_irq_chip = {
        .name                   = "ITS",
        .irq_mask               = its_mask_irq,
@@ -677,6 +1291,8 @@ static struct irq_chip its_irq_chip = {
        .irq_eoi                = irq_chip_eoi_parent,
        .irq_set_affinity       = its_set_affinity,
        .irq_compose_msi_msg    = its_irq_compose_msi_msg,
+       .irq_set_irqchip_state  = its_irq_set_irqchip_state,
+       .irq_set_vcpu_affinity  = its_irq_set_vcpu_affinity,
 };
 
 /*
@@ -695,7 +1311,6 @@ static struct irq_chip its_irq_chip = {
 
 static unsigned long *lpi_bitmap;
 static u32 lpi_chunks;
-static u32 lpi_id_bits;
 static DEFINE_SPINLOCK(lpi_lock);
 
 static int its_lpi_to_chunk(int lpi)
@@ -766,16 +1381,15 @@ out:
        return bitmap;
 }
 
-static void its_lpi_free(struct event_lpi_map *map)
+static void its_lpi_free_chunks(unsigned long *bitmap, int base, int nr_ids)
 {
-       int base = map->lpi_base;
-       int nr_ids = map->nr_lpis;
        int lpi;
 
        spin_lock(&lpi_lock);
 
        for (lpi = base; lpi < (base + nr_ids); lpi += IRQS_PER_CHUNK) {
                int chunk = its_lpi_to_chunk(lpi);
+
                BUG_ON(chunk > lpi_chunks);
                if (test_bit(chunk, lpi_bitmap)) {
                        clear_bit(chunk, lpi_bitmap);
@@ -786,28 +1400,40 @@ static void its_lpi_free(struct event_lpi_map *map)
 
        spin_unlock(&lpi_lock);
 
-       kfree(map->lpi_map);
-       kfree(map->col_map);
+       kfree(bitmap);
 }
 
-/*
- * We allocate memory for PROPBASE to cover 2 ^ lpi_id_bits LPIs to
- * deal with (one configuration byte per interrupt). PENDBASE has to
- * be 64kB aligned (one bit per LPI, plus 8192 bits for SPI/PPI/SGI).
- */
-#define LPI_NRBITS             lpi_id_bits
-#define LPI_PROPBASE_SZ                ALIGN(BIT(LPI_NRBITS), SZ_64K)
-#define LPI_PENDBASE_SZ                ALIGN(BIT(LPI_NRBITS) / 8, SZ_64K)
+static struct page *its_allocate_prop_table(gfp_t gfp_flags)
+{
+       struct page *prop_page;
 
-#define LPI_PROP_DEFAULT_PRIO  0xa0
+       prop_page = alloc_pages(gfp_flags, get_order(LPI_PROPBASE_SZ));
+       if (!prop_page)
+               return NULL;
+
+       /* Priority 0xa0, Group-1, disabled */
+       memset(page_address(prop_page),
+              LPI_PROP_DEFAULT_PRIO | LPI_PROP_GROUP1,
+              LPI_PROPBASE_SZ);
+
+       /* Make sure the GIC will observe the written configuration */
+       gic_flush_dcache_to_poc(page_address(prop_page), LPI_PROPBASE_SZ);
+
+       return prop_page;
+}
+
+static void its_free_prop_table(struct page *prop_page)
+{
+       free_pages((unsigned long)page_address(prop_page),
+                  get_order(LPI_PROPBASE_SZ));
+}
 
 static int __init its_alloc_lpi_tables(void)
 {
        phys_addr_t paddr;
 
        lpi_id_bits = min_t(u32, gic_rdists->id_bits, ITS_MAX_LPI_NRBITS);
-       gic_rdists->prop_page = alloc_pages(GFP_NOWAIT,
-                                          get_order(LPI_PROPBASE_SZ));
+       gic_rdists->prop_page = its_allocate_prop_table(GFP_NOWAIT);
        if (!gic_rdists->prop_page) {
                pr_err("Failed to allocate PROPBASE\n");
                return -ENOMEM;
@@ -816,14 +1442,6 @@ static int __init its_alloc_lpi_tables(void)
        paddr = page_to_phys(gic_rdists->prop_page);
        pr_info("GIC: using LPI property table @%pa\n", &paddr);
 
-       /* Priority 0xa0, Group-1, disabled */
-       memset(page_address(gic_rdists->prop_page),
-              LPI_PROP_DEFAULT_PRIO | LPI_PROP_GROUP1,
-              LPI_PROPBASE_SZ);
-
-       /* Make sure the GIC will observe the written configuration */
-       gic_flush_dcache_to_poc(page_address(gic_rdists->prop_page), LPI_PROPBASE_SZ);
-
        return its_lpi_init(lpi_id_bits);
 }
 
@@ -962,10 +1580,13 @@ retry_baser:
        return 0;
 }
 
-static bool its_parse_baser_device(struct its_node *its, struct its_baser *baser,
-                                  u32 psz, u32 *order)
+static bool its_parse_indirect_baser(struct its_node *its,
+                                    struct its_baser *baser,
+                                    u32 psz, u32 *order)
 {
-       u64 esz = GITS_BASER_ENTRY_SIZE(its_read_baser(its, baser));
+       u64 tmp = its_read_baser(its, baser);
+       u64 type = GITS_BASER_TYPE(tmp);
+       u64 esz = GITS_BASER_ENTRY_SIZE(tmp);
        u64 val = GITS_BASER_InnerShareable | GITS_BASER_RaWaWb;
        u32 ids = its->device_ids;
        u32 new_order = *order;
@@ -1004,8 +1625,9 @@ static bool its_parse_baser_device(struct its_node *its, struct its_baser *baser
        if (new_order >= MAX_ORDER) {
                new_order = MAX_ORDER - 1;
                ids = ilog2(PAGE_ORDER_TO_SIZE(new_order) / (int)esz);
-               pr_warn("ITS@%pa: Device Table too large, reduce ids %u->%u\n",
-                       &its->phys_base, its->device_ids, ids);
+               pr_warn("ITS@%pa: %s Table too large, reduce ids %u->%u\n",
+                       &its->phys_base, its_base_type_string[type],
+                       its->device_ids, ids);
        }
 
        *order = new_order;
@@ -1053,11 +1675,16 @@ static int its_alloc_tables(struct its_node *its)
                u32 order = get_order(psz);
                bool indirect = false;
 
-               if (type == GITS_BASER_TYPE_NONE)
+               switch (type) {
+               case GITS_BASER_TYPE_NONE:
                        continue;
 
-               if (type == GITS_BASER_TYPE_DEVICE)
-                       indirect = its_parse_baser_device(its, baser, psz, &order);
+               case GITS_BASER_TYPE_DEVICE:
+               case GITS_BASER_TYPE_VCPU:
+                       indirect = its_parse_indirect_baser(its, baser,
+                                                           psz, &order);
+                       break;
+               }
 
                err = its_setup_baser(its, baser, cache, shr, psz, order, indirect);
                if (err < 0) {
@@ -1084,6 +1711,30 @@ static int its_alloc_collections(struct its_node *its)
        return 0;
 }
 
+static struct page *its_allocate_pending_table(gfp_t gfp_flags)
+{
+       struct page *pend_page;
+       /*
+        * The pending pages have to be at least 64kB aligned,
+        * hence the 'max(LPI_PENDBASE_SZ, SZ_64K)' below.
+        */
+       pend_page = alloc_pages(gfp_flags | __GFP_ZERO,
+                               get_order(max_t(u32, LPI_PENDBASE_SZ, SZ_64K)));
+       if (!pend_page)
+               return NULL;
+
+       /* Make sure the GIC will observe the zero-ed page */
+       gic_flush_dcache_to_poc(page_address(pend_page), LPI_PENDBASE_SZ);
+
+       return pend_page;
+}
+
+static void its_free_pending_table(struct page *pt)
+{
+       free_pages((unsigned long)page_address(pt),
+                  get_order(max_t(u32, LPI_PENDBASE_SZ, SZ_64K)));
+}
+
 static void its_cpu_init_lpis(void)
 {
        void __iomem *rbase = gic_data_rdist_rd_base();
@@ -1094,21 +1745,14 @@ static void its_cpu_init_lpis(void)
        pend_page = gic_data_rdist()->pend_page;
        if (!pend_page) {
                phys_addr_t paddr;
-               /*
-                * The pending pages have to be at least 64kB aligned,
-                * hence the 'max(LPI_PENDBASE_SZ, SZ_64K)' below.
-                */
-               pend_page = alloc_pages(GFP_NOWAIT | __GFP_ZERO,
-                                       get_order(max_t(u32, LPI_PENDBASE_SZ, SZ_64K)));
+
+               pend_page = its_allocate_pending_table(GFP_NOWAIT);
                if (!pend_page) {
                        pr_err("Failed to allocate PENDBASE for CPU%d\n",
                               smp_processor_id());
                        return;
                }
 
-               /* Make sure the GIC will observe the zero-ed page */
-               gic_flush_dcache_to_poc(page_address(pend_page), LPI_PENDBASE_SZ);
-
                paddr = page_to_phys(pend_page);
                pr_info("CPU%d: using LPI pending table @%pa\n",
                        smp_processor_id(), &paddr);
@@ -1259,26 +1903,19 @@ static struct its_baser *its_get_baser(struct its_node *its, u32 type)
        return NULL;
 }
 
-static bool its_alloc_device_table(struct its_node *its, u32 dev_id)
+static bool its_alloc_table_entry(struct its_baser *baser, u32 id)
 {
-       struct its_baser *baser;
        struct page *page;
        u32 esz, idx;
        __le64 *table;
 
-       baser = its_get_baser(its, GITS_BASER_TYPE_DEVICE);
-
-       /* Don't allow device id that exceeds ITS hardware limit */
-       if (!baser)
-               return (ilog2(dev_id) < its->device_ids);
-
        /* Don't allow device id that exceeds single, flat table limit */
        esz = GITS_BASER_ENTRY_SIZE(baser->val);
        if (!(baser->val & GITS_BASER_INDIRECT))
-               return (dev_id < (PAGE_ORDER_TO_SIZE(baser->order) / esz));
+               return (id < (PAGE_ORDER_TO_SIZE(baser->order) / esz));
 
        /* Compute 1st level table index & check if that exceeds table limit */
-       idx = dev_id >> ilog2(baser->psz / esz);
+       idx = id >> ilog2(baser->psz / esz);
        if (idx >= (PAGE_ORDER_TO_SIZE(baser->order) / GITS_LVL1_ENTRY_SIZE))
                return false;
 
@@ -1294,24 +1931,65 @@ static bool its_alloc_device_table(struct its_node *its, u32 dev_id)
                if (!(baser->val & GITS_BASER_SHAREABILITY_MASK))
                        gic_flush_dcache_to_poc(page_address(page), baser->psz);
 
-               table[idx] = cpu_to_le64(page_to_phys(page) | GITS_BASER_VALID);
+               table[idx] = cpu_to_le64(page_to_phys(page) | GITS_BASER_VALID);
+
+               /* Flush Lvl1 entry to PoC if hw doesn't support coherency */
+               if (!(baser->val & GITS_BASER_SHAREABILITY_MASK))
+                       gic_flush_dcache_to_poc(table + idx, GITS_LVL1_ENTRY_SIZE);
+
+               /* Ensure updated table contents are visible to ITS hardware */
+               dsb(sy);
+       }
+
+       return true;
+}
+
+static bool its_alloc_device_table(struct its_node *its, u32 dev_id)
+{
+       struct its_baser *baser;
+
+       baser = its_get_baser(its, GITS_BASER_TYPE_DEVICE);
+
+       /* Don't allow device id that exceeds ITS hardware limit */
+       if (!baser)
+               return (ilog2(dev_id) < its->device_ids);
+
+       return its_alloc_table_entry(baser, dev_id);
+}
+
+static bool its_alloc_vpe_table(u32 vpe_id)
+{
+       struct its_node *its;
+
+       /*
+        * Make sure the L2 tables are allocated on *all* v4 ITSs. We
+        * could try and only do it on ITSs corresponding to devices
+        * that have interrupts targeted at this VPE, but the
+        * complexity becomes crazy (and you have tons of memory
+        * anyway, right?).
+        */
+       list_for_each_entry(its, &its_nodes, entry) {
+               struct its_baser *baser;
+
+               if (!its->is_v4)
+                       continue;
 
-               /* Flush Lvl1 entry to PoC if hw doesn't support coherency */
-               if (!(baser->val & GITS_BASER_SHAREABILITY_MASK))
-                       gic_flush_dcache_to_poc(table + idx, GITS_LVL1_ENTRY_SIZE);
+               baser = its_get_baser(its, GITS_BASER_TYPE_VCPU);
+               if (!baser)
+                       return false;
 
-               /* Ensure updated table contents are visible to ITS hardware */
-               dsb(sy);
+               if (!its_alloc_table_entry(baser, vpe_id))
+                       return false;
        }
 
        return true;
 }
 
 static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
-                                           int nvecs)
+                                           int nvecs, bool alloc_lpis)
 {
        struct its_device *dev;
-       unsigned long *lpi_map;
+       unsigned long *lpi_map = NULL;
        unsigned long flags;
        u16 *col_map = NULL;
        void *itt;
@@ -1333,11 +2011,18 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
        sz = nr_ites * its->ite_size;
        sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1;
        itt = kzalloc(sz, GFP_KERNEL);
-       lpi_map = its_lpi_alloc_chunks(nvecs, &lpi_base, &nr_lpis);
-       if (lpi_map)
-               col_map = kzalloc(sizeof(*col_map) * nr_lpis, GFP_KERNEL);
+       if (alloc_lpis) {
+               lpi_map = its_lpi_alloc_chunks(nvecs, &lpi_base, &nr_lpis);
+               if (lpi_map)
+                       col_map = kzalloc(sizeof(*col_map) * nr_lpis,
+                                         GFP_KERNEL);
+       } else {
+               col_map = kzalloc(sizeof(*col_map) * nr_ites, GFP_KERNEL);
+               nr_lpis = 0;
+               lpi_base = 0;
+       }
 
-       if (!dev || !itt || !lpi_map || !col_map) {
+       if (!dev || !itt ||  !col_map || (!lpi_map && alloc_lpis)) {
                kfree(dev);
                kfree(itt);
                kfree(lpi_map);
@@ -1354,6 +2039,7 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
        dev->event_map.col_map = col_map;
        dev->event_map.lpi_base = lpi_base;
        dev->event_map.nr_lpis = nr_lpis;
+       mutex_init(&dev->event_map.vlpi_lock);
        dev->device_id = dev_id;
        INIT_LIST_HEAD(&dev->entry);
 
@@ -1412,6 +2098,16 @@ static int its_msi_prepare(struct irq_domain *domain, struct device *dev,
        msi_info = msi_get_domain_info(domain);
        its = msi_info->data;
 
+       if (!gic_rdists->has_direct_lpi &&
+           vpe_proxy.dev &&
+           vpe_proxy.dev->its == its &&
+           dev_id == vpe_proxy.dev->device_id) {
+               /* Bad luck. Get yourself a better implementation */
+               WARN_ONCE(1, "DevId %x clashes with GICv4 VPE proxy device\n",
+                         dev_id);
+               return -EINVAL;
+       }
+
        its_dev = its_find_device(its, dev_id);
        if (its_dev) {
                /*
@@ -1423,7 +2119,7 @@ static int its_msi_prepare(struct irq_domain *domain, struct device *dev,
                goto out;
        }
 
-       its_dev = its_create_device(its, dev_id, nvec);
+       its_dev = its_create_device(its, dev_id, nvec, true);
        if (!its_dev)
                return -ENOMEM;
 
@@ -1481,6 +2177,7 @@ static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
 
                irq_domain_set_hwirq_and_chip(domain, virq + i,
                                              hwirq, &its_irq_chip, its_dev);
+               irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq + i)));
                pr_debug("ID:%d pID:%d vID:%d\n",
                         (int)(hwirq - its_dev->event_map.lpi_base),
                         (int) hwirq, virq + i);
@@ -1495,13 +2192,16 @@ static void its_irq_domain_activate(struct irq_domain *domain,
        struct its_device *its_dev = irq_data_get_irq_chip_data(d);
        u32 event = its_get_event_id(d);
        const struct cpumask *cpu_mask = cpu_online_mask;
+       int cpu;
 
        /* get the cpu_mask of local node */
        if (its_dev->its->numa_node >= 0)
                cpu_mask = cpumask_of_node(its_dev->its->numa_node);
 
        /* Bind the LPI to the first possible CPU */
-       its_dev->event_map.col_map[event] = cpumask_first(cpu_mask);
+       cpu = cpumask_first(cpu_mask);
+       its_dev->event_map.col_map[event] = cpu;
+       irq_data_update_effective_affinity(d, cpumask_of(cpu));
 
        /* Map the GIC IRQ and event to the device */
        its_send_mapti(its_dev, d->hwirq, event);
@@ -1539,7 +2239,10 @@ static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq,
        /* If all interrupts have been freed, start mopping the floor */
        if (bitmap_empty(its_dev->event_map.lpi_map,
                         its_dev->event_map.nr_lpis)) {
-               its_lpi_free(&its_dev->event_map);
+               its_lpi_free_chunks(its_dev->event_map.lpi_map,
+                                   its_dev->event_map.lpi_base,
+                                   its_dev->event_map.nr_lpis);
+               kfree(its_dev->event_map.col_map);
 
                /* Unmap device/itt */
                its_send_mapd(its_dev, 0);
@@ -1556,6 +2259,451 @@ static const struct irq_domain_ops its_domain_ops = {
        .deactivate             = its_irq_domain_deactivate,
 };
 
+/*
+ * This is insane.
+ *
+ * If a GICv4 doesn't implement Direct LPIs (which is extremely
+ * likely), the only way to perform an invalidate is to use a fake
+ * device to issue an INV command, implying that the LPI has first
+ * been mapped to some event on that device. Since this is not exactly
+ * cheap, we try to keep that mapping around as long as possible, and
+ * only issue an UNMAP if we're short on available slots.
+ *
+ * Broken by design(tm).
+ */
+static void its_vpe_db_proxy_unmap_locked(struct its_vpe *vpe)
+{
+       /* Already unmapped? */
+       if (vpe->vpe_proxy_event == -1)
+               return;
+
+       its_send_discard(vpe_proxy.dev, vpe->vpe_proxy_event);
+       vpe_proxy.vpes[vpe->vpe_proxy_event] = NULL;
+
+       /*
+        * We don't track empty slots at all, so let's move the
+        * next_victim pointer if we can quickly reuse that slot
+        * instead of nuking an existing entry. Not clear that this is
+        * always a win though, and this might just generate a ripple
+        * effect... Let's just hope VPEs don't migrate too often.
+        */
+       if (vpe_proxy.vpes[vpe_proxy.next_victim])
+               vpe_proxy.next_victim = vpe->vpe_proxy_event;
+
+       vpe->vpe_proxy_event = -1;
+}
+
+static void its_vpe_db_proxy_unmap(struct its_vpe *vpe)
+{
+       if (!gic_rdists->has_direct_lpi) {
+               unsigned long flags;
+
+               raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
+               its_vpe_db_proxy_unmap_locked(vpe);
+               raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
+       }
+}
+
+static void its_vpe_db_proxy_map_locked(struct its_vpe *vpe)
+{
+       /* Already mapped? */
+       if (vpe->vpe_proxy_event != -1)
+               return;
+
+       /* This slot was already allocated. Kick the other VPE out. */
+       if (vpe_proxy.vpes[vpe_proxy.next_victim])
+               its_vpe_db_proxy_unmap_locked(vpe_proxy.vpes[vpe_proxy.next_victim]);
+
+       /* Map the new VPE instead */
+       vpe_proxy.vpes[vpe_proxy.next_victim] = vpe;
+       vpe->vpe_proxy_event = vpe_proxy.next_victim;
+       vpe_proxy.next_victim = (vpe_proxy.next_victim + 1) % vpe_proxy.dev->nr_ites;
+
+       vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = vpe->col_idx;
+       its_send_mapti(vpe_proxy.dev, vpe->vpe_db_lpi, vpe->vpe_proxy_event);
+}
+
+static void its_vpe_db_proxy_move(struct its_vpe *vpe, int from, int to)
+{
+       unsigned long flags;
+       struct its_collection *target_col;
+
+       if (gic_rdists->has_direct_lpi) {
+               void __iomem *rdbase;
+
+               rdbase = per_cpu_ptr(gic_rdists->rdist, from)->rd_base;
+               gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR);
+               while (gic_read_lpir(rdbase + GICR_SYNCR) & 1)
+                       cpu_relax();
+
+               return;
+       }
+
+       raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
+
+       its_vpe_db_proxy_map_locked(vpe);
+
+       target_col = &vpe_proxy.dev->its->collections[to];
+       its_send_movi(vpe_proxy.dev, target_col, vpe->vpe_proxy_event);
+       vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = to;
+
+       raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
+}
+
+static int its_vpe_set_affinity(struct irq_data *d,
+                               const struct cpumask *mask_val,
+                               bool force)
+{
+       struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
+       int cpu = cpumask_first(mask_val);
+
+       /*
+        * Changing affinity is mega expensive, so let's be as lazy as
+        * we can and only do it if we really have to. Also, if mapped
+        * into the proxy device, we need to move the doorbell
+        * interrupt to its new location.
+        */
+       if (vpe->col_idx != cpu) {
+               int from = vpe->col_idx;
+
+               vpe->col_idx = cpu;
+               its_send_vmovp(vpe);
+               its_vpe_db_proxy_move(vpe, from, cpu);
+       }
+
+       return IRQ_SET_MASK_OK_DONE;
+}
+
+static void its_vpe_schedule(struct its_vpe *vpe)
+{
+       void * __iomem vlpi_base = gic_data_rdist_vlpi_base();
+       u64 val;
+
+       /* Schedule the VPE */
+       val  = virt_to_phys(page_address(vpe->its_vm->vprop_page)) &
+               GENMASK_ULL(51, 12);
+       val |= (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK;
+       val |= GICR_VPROPBASER_RaWb;
+       val |= GICR_VPROPBASER_InnerShareable;
+       gits_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
+
+       val  = virt_to_phys(page_address(vpe->vpt_page)) &
+               GENMASK_ULL(51, 16);
+       val |= GICR_VPENDBASER_RaWaWb;
+       val |= GICR_VPENDBASER_NonShareable;
+       /*
+        * There is no good way of finding out if the pending table is
+        * empty as we can race against the doorbell interrupt very
+        * easily. So in the end, vpe->pending_last is only an
+        * indication that the vcpu has something pending, not one
+        * that the pending table is empty. A good implementation
+        * would be able to read its coarse map pretty quickly anyway,
+        * making this a tolerable issue.
+        */
+       val |= GICR_VPENDBASER_PendingLast;
+       val |= vpe->idai ? GICR_VPENDBASER_IDAI : 0;
+       val |= GICR_VPENDBASER_Valid;
+       gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
+}
+
+static void its_vpe_deschedule(struct its_vpe *vpe)
+{
+       void * __iomem vlpi_base = gic_data_rdist_vlpi_base();
+       u32 count = 1000000;    /* 1s! */
+       bool clean;
+       u64 val;
+
+       /* We're being scheduled out */
+       val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
+       val &= ~GICR_VPENDBASER_Valid;
+       gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
+
+       do {
+               val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
+               clean = !(val & GICR_VPENDBASER_Dirty);
+               if (!clean) {
+                       count--;
+                       cpu_relax();
+                       udelay(1);
+               }
+       } while (!clean && count);
+
+       if (unlikely(!clean && !count)) {
+               pr_err_ratelimited("ITS virtual pending table not cleaning\n");
+               vpe->idai = false;
+               vpe->pending_last = true;
+       } else {
+               vpe->idai = !!(val & GICR_VPENDBASER_IDAI);
+               vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast);
+       }
+}
+
+static int its_vpe_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
+{
+       struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
+       struct its_cmd_info *info = vcpu_info;
+
+       switch (info->cmd_type) {
+       case SCHEDULE_VPE:
+               its_vpe_schedule(vpe);
+               return 0;
+
+       case DESCHEDULE_VPE:
+               its_vpe_deschedule(vpe);
+               return 0;
+
+       case INVALL_VPE:
+               its_send_vinvall(vpe);
+               return 0;
+
+       default:
+               return -EINVAL;
+       }
+}
+
+static void its_vpe_send_cmd(struct its_vpe *vpe,
+                            void (*cmd)(struct its_device *, u32))
+{
+       unsigned long flags;
+
+       raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
+
+       its_vpe_db_proxy_map_locked(vpe);
+       cmd(vpe_proxy.dev, vpe->vpe_proxy_event);
+
+       raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
+}
+
+static void its_vpe_send_inv(struct irq_data *d)
+{
+       struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
+
+       if (gic_rdists->has_direct_lpi) {
+               void __iomem *rdbase;
+
+               rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base;
+               gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_INVLPIR);
+               while (gic_read_lpir(rdbase + GICR_SYNCR) & 1)
+                       cpu_relax();
+       } else {
+               its_vpe_send_cmd(vpe, its_send_inv);
+       }
+}
+
+static void its_vpe_mask_irq(struct irq_data *d)
+{
+       /*
+        * We need to unmask the LPI, which is described by the parent
+        * irq_data. Instead of calling into the parent (which won't
+        * exactly do the right thing, let's simply use the
+        * parent_data pointer. Yes, I'm naughty.
+        */
+       lpi_write_config(d->parent_data, LPI_PROP_ENABLED, 0);
+       its_vpe_send_inv(d);
+}
+
+static void its_vpe_unmask_irq(struct irq_data *d)
+{
+       /* Same hack as above... */
+       lpi_write_config(d->parent_data, 0, LPI_PROP_ENABLED);
+       its_vpe_send_inv(d);
+}
+
+static int its_vpe_set_irqchip_state(struct irq_data *d,
+                                    enum irqchip_irq_state which,
+                                    bool state)
+{
+       struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
+
+       if (which != IRQCHIP_STATE_PENDING)
+               return -EINVAL;
+
+       if (gic_rdists->has_direct_lpi) {
+               void __iomem *rdbase;
+
+               rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base;
+               if (state) {
+                       gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_SETLPIR);
+               } else {
+                       gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR);
+                       while (gic_read_lpir(rdbase + GICR_SYNCR) & 1)
+                               cpu_relax();
+               }
+       } else {
+               if (state)
+                       its_vpe_send_cmd(vpe, its_send_int);
+               else
+                       its_vpe_send_cmd(vpe, its_send_clear);
+       }
+
+       return 0;
+}
+
+static struct irq_chip its_vpe_irq_chip = {
+       .name                   = "GICv4-vpe",
+       .irq_mask               = its_vpe_mask_irq,
+       .irq_unmask             = its_vpe_unmask_irq,
+       .irq_eoi                = irq_chip_eoi_parent,
+       .irq_set_affinity       = its_vpe_set_affinity,
+       .irq_set_irqchip_state  = its_vpe_set_irqchip_state,
+       .irq_set_vcpu_affinity  = its_vpe_set_vcpu_affinity,
+};
+
+static int its_vpe_id_alloc(void)
+{
+       return ida_simple_get(&its_vpeid_ida, 0, 1 << 16, GFP_KERNEL);
+}
+
+static void its_vpe_id_free(u16 id)
+{
+       ida_simple_remove(&its_vpeid_ida, id);
+}
+
+static int its_vpe_init(struct its_vpe *vpe)
+{
+       struct page *vpt_page;
+       int vpe_id;
+
+       /* Allocate vpe_id */
+       vpe_id = its_vpe_id_alloc();
+       if (vpe_id < 0)
+               return vpe_id;
+
+       /* Allocate VPT */
+       vpt_page = its_allocate_pending_table(GFP_KERNEL);
+       if (!vpt_page) {
+               its_vpe_id_free(vpe_id);
+               return -ENOMEM;
+       }
+
+       if (!its_alloc_vpe_table(vpe_id)) {
+               its_vpe_id_free(vpe_id);
+               its_free_pending_table(vpe->vpt_page);
+               return -ENOMEM;
+       }
+
+       vpe->vpe_id = vpe_id;
+       vpe->vpt_page = vpt_page;
+       vpe->vpe_proxy_event = -1;
+
+       return 0;
+}
+
+static void its_vpe_teardown(struct its_vpe *vpe)
+{
+       its_vpe_db_proxy_unmap(vpe);
+       its_vpe_id_free(vpe->vpe_id);
+       its_free_pending_table(vpe->vpt_page);
+}
+
+static void its_vpe_irq_domain_free(struct irq_domain *domain,
+                                   unsigned int virq,
+                                   unsigned int nr_irqs)
+{
+       struct its_vm *vm = domain->host_data;
+       int i;
+
+       irq_domain_free_irqs_parent(domain, virq, nr_irqs);
+
+       for (i = 0; i < nr_irqs; i++) {
+               struct irq_data *data = irq_domain_get_irq_data(domain,
+                                                               virq + i);
+               struct its_vpe *vpe = irq_data_get_irq_chip_data(data);
+
+               BUG_ON(vm != vpe->its_vm);
+
+               clear_bit(data->hwirq, vm->db_bitmap);
+               its_vpe_teardown(vpe);
+               irq_domain_reset_irq_data(data);
+       }
+
+       if (bitmap_empty(vm->db_bitmap, vm->nr_db_lpis)) {
+               its_lpi_free_chunks(vm->db_bitmap, vm->db_lpi_base, vm->nr_db_lpis);
+               its_free_prop_table(vm->vprop_page);
+       }
+}
+
+static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
+                                   unsigned int nr_irqs, void *args)
+{
+       struct its_vm *vm = args;
+       unsigned long *bitmap;
+       struct page *vprop_page;
+       int base, nr_ids, i, err = 0;
+
+       BUG_ON(!vm);
+
+       bitmap = its_lpi_alloc_chunks(nr_irqs, &base, &nr_ids);
+       if (!bitmap)
+               return -ENOMEM;
+
+       if (nr_ids < nr_irqs) {
+               its_lpi_free_chunks(bitmap, base, nr_ids);
+               return -ENOMEM;
+       }
+
+       vprop_page = its_allocate_prop_table(GFP_KERNEL);
+       if (!vprop_page) {
+               its_lpi_free_chunks(bitmap, base, nr_ids);
+               return -ENOMEM;
+       }
+
+       vm->db_bitmap = bitmap;
+       vm->db_lpi_base = base;
+       vm->nr_db_lpis = nr_ids;
+       vm->vprop_page = vprop_page;
+
+       for (i = 0; i < nr_irqs; i++) {
+               vm->vpes[i]->vpe_db_lpi = base + i;
+               err = its_vpe_init(vm->vpes[i]);
+               if (err)
+                       break;
+               err = its_irq_gic_domain_alloc(domain, virq + i,
+                                              vm->vpes[i]->vpe_db_lpi);
+               if (err)
+                       break;
+               irq_domain_set_hwirq_and_chip(domain, virq + i, i,
+                                             &its_vpe_irq_chip, vm->vpes[i]);
+               set_bit(i, bitmap);
+       }
+
+       if (err) {
+               if (i > 0)
+                       its_vpe_irq_domain_free(domain, virq, i - 1);
+
+               its_lpi_free_chunks(bitmap, base, nr_ids);
+               its_free_prop_table(vprop_page);
+       }
+
+       return err;
+}
+
+static void its_vpe_irq_domain_activate(struct irq_domain *domain,
+                                       struct irq_data *d)
+{
+       struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
+
+       /* Map the VPE to the first possible CPU */
+       vpe->col_idx = cpumask_first(cpu_online_mask);
+       its_send_vmapp(vpe, true);
+       its_send_vinvall(vpe);
+}
+
+static void its_vpe_irq_domain_deactivate(struct irq_domain *domain,
+                                         struct irq_data *d)
+{
+       struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
+
+       its_send_vmapp(vpe, false);
+}
+
+static const struct irq_domain_ops its_vpe_domain_ops = {
+       .alloc                  = its_vpe_irq_domain_alloc,
+       .free                   = its_vpe_irq_domain_free,
+       .activate               = its_vpe_irq_domain_activate,
+       .deactivate             = its_vpe_irq_domain_deactivate,
+};
+
 static int its_force_quiescent(void __iomem *base)
 {
        u32 count = 1000000;    /* 1s */
@@ -1571,7 +2719,7 @@ static int its_force_quiescent(void __iomem *base)
                return 0;
 
        /* Disable the generation of all interrupts to this ITS */
-       val &= ~GITS_CTLR_ENABLE;
+       val &= ~(GITS_CTLR_ENABLE | GITS_CTLR_ImDe);
        writel_relaxed(val, base + GITS_CTLR);
 
        /* Poll GITS_CTLR and wait until ITS becomes quiescent */
@@ -1672,13 +2820,92 @@ static int its_init_domain(struct fwnode_handle *handle, struct its_node *its)
        return 0;
 }
 
+static int its_init_vpe_domain(void)
+{
+       struct its_node *its;
+       u32 devid;
+       int entries;
+
+       if (gic_rdists->has_direct_lpi) {
+               pr_info("ITS: Using DirectLPI for VPE invalidation\n");
+               return 0;
+       }
+
+       /* Any ITS will do, even if not v4 */
+       its = list_first_entry(&its_nodes, struct its_node, entry);
+
+       entries = roundup_pow_of_two(nr_cpu_ids);
+       vpe_proxy.vpes = kzalloc(sizeof(*vpe_proxy.vpes) * entries,
+                                GFP_KERNEL);
+       if (!vpe_proxy.vpes) {
+               pr_err("ITS: Can't allocate GICv4 proxy device array\n");
+               return -ENOMEM;
+       }
+
+       /* Use the last possible DevID */
+       devid = GENMASK(its->device_ids - 1, 0);
+       vpe_proxy.dev = its_create_device(its, devid, entries, false);
+       if (!vpe_proxy.dev) {
+               kfree(vpe_proxy.vpes);
+               pr_err("ITS: Can't allocate GICv4 proxy device\n");
+               return -ENOMEM;
+       }
+
+       BUG_ON(entries != vpe_proxy.dev->nr_ites);
+
+       raw_spin_lock_init(&vpe_proxy.lock);
+       vpe_proxy.next_victim = 0;
+       pr_info("ITS: Allocated DevID %x as GICv4 proxy device (%d slots)\n",
+               devid, vpe_proxy.dev->nr_ites);
+
+       return 0;
+}
+
+static int __init its_compute_its_list_map(struct resource *res,
+                                          void __iomem *its_base)
+{
+       int its_number;
+       u32 ctlr;
+
+       /*
+        * This is assumed to be done early enough that we're
+        * guaranteed to be single-threaded, hence no
+        * locking. Should this change, we should address
+        * this.
+        */
+       its_number = find_first_zero_bit(&its_list_map, ITS_LIST_MAX);
+       if (its_number >= ITS_LIST_MAX) {
+               pr_err("ITS@%pa: No ITSList entry available!\n",
+                      &res->start);
+               return -EINVAL;
+       }
+
+       ctlr = readl_relaxed(its_base + GITS_CTLR);
+       ctlr &= ~GITS_CTLR_ITS_NUMBER;
+       ctlr |= its_number << GITS_CTLR_ITS_NUMBER_SHIFT;
+       writel_relaxed(ctlr, its_base + GITS_CTLR);
+       ctlr = readl_relaxed(its_base + GITS_CTLR);
+       if ((ctlr & GITS_CTLR_ITS_NUMBER) != (its_number << GITS_CTLR_ITS_NUMBER_SHIFT)) {
+               its_number = ctlr & GITS_CTLR_ITS_NUMBER;
+               its_number >>= GITS_CTLR_ITS_NUMBER_SHIFT;
+       }
+
+       if (test_and_set_bit(its_number, &its_list_map)) {
+               pr_err("ITS@%pa: Duplicate ITSList entry %d\n",
+                      &res->start, its_number);
+               return -EINVAL;
+       }
+
+       return its_number;
+}
+
 static int __init its_probe_one(struct resource *res,
                                struct fwnode_handle *handle, int numa_node)
 {
        struct its_node *its;
        void __iomem *its_base;
-       u32 val;
-       u64 baser, tmp;
+       u32 val, ctlr;
+       u64 baser, tmp, typer;
        int err;
 
        its_base = ioremap(res->start, resource_size(res));
@@ -1711,9 +2938,24 @@ static int __init its_probe_one(struct resource *res,
        raw_spin_lock_init(&its->lock);
        INIT_LIST_HEAD(&its->entry);
        INIT_LIST_HEAD(&its->its_device_list);
+       typer = gic_read_typer(its_base + GITS_TYPER);
        its->base = its_base;
        its->phys_base = res->start;
-       its->ite_size = ((gic_read_typer(its_base + GITS_TYPER) >> 4) & 0xf) + 1;
+       its->ite_size = GITS_TYPER_ITT_ENTRY_SIZE(typer);
+       its->is_v4 = !!(typer & GITS_TYPER_VLPIS);
+       if (its->is_v4) {
+               if (!(typer & GITS_TYPER_VMOVP)) {
+                       err = its_compute_its_list_map(res, its_base);
+                       if (err < 0)
+                               goto out_free_its;
+
+                       pr_info("ITS@%pa: Using ITS number %d\n",
+                               &res->start, err);
+               } else {
+                       pr_info("ITS@%pa: Single VMOVP capable\n", &res->start);
+               }
+       }
+
        its->numa_node = numa_node;
 
        its->cmd_base = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
@@ -1760,7 +3002,11 @@ static int __init its_probe_one(struct resource *res,
        }
 
        gits_write_cwriter(0, its->base + GITS_CWRITER);
-       writel_relaxed(GITS_CTLR_ENABLE, its->base + GITS_CTLR);
+       ctlr = readl_relaxed(its->base + GITS_CTLR);
+       ctlr |= GITS_CTLR_ENABLE;
+       if (its->is_v4)
+               ctlr |= GITS_CTLR_ImDe;
+       writel_relaxed(ctlr, its->base + GITS_CTLR);
 
        err = its_init_domain(handle, its);
        if (err)
@@ -1816,13 +3062,13 @@ static int __init its_of_probe(struct device_node *node)
        for (np = of_find_matching_node(node, its_device_id); np;
             np = of_find_matching_node(np, its_device_id)) {
                if (!of_property_read_bool(np, "msi-controller")) {
-                       pr_warn("%s: no msi-controller property, ITS ignored\n",
-                               np->full_name);
+                       pr_warn("%pOF: no msi-controller property, ITS ignored\n",
+                               np);
                        continue;
                }
 
                if (of_address_to_resource(np, 0, &res)) {
-                       pr_warn("%s: no regs?\n", np->full_name);
+                       pr_warn("%pOF: no regs?\n", np);
                        continue;
                }
 
@@ -1984,6 +3230,9 @@ int __init its_init(struct fwnode_handle *handle, struct rdists *rdists,
                    struct irq_domain *parent_domain)
 {
        struct device_node *of_node;
+       struct its_node *its;
+       bool has_v4 = false;
+       int err;
 
        its_parent = parent_domain;
        of_node = to_of_node(handle);
@@ -1998,5 +3247,20 @@ int __init its_init(struct fwnode_handle *handle, struct rdists *rdists,
        }
 
        gic_rdists = rdists;
-       return its_alloc_lpi_tables();
+       err = its_alloc_lpi_tables();
+       if (err)
+               return err;
+
+       list_for_each_entry(its, &its_nodes, entry)
+               has_v4 |= its->is_v4;
+
+       if (has_v4 & rdists->has_vlpis) {
+               if (its_init_vpe_domain() ||
+                   its_init_v4(parent_domain, &its_vpe_domain_ops)) {
+                       rdists->has_vlpis = false;
+                       pr_err("ITS: Disabling GICv4 support\n");
+               }
+       }
+
+       return 0;
 }