d54fa0638dc442efd7f3597ecf96529f9c17ce17
[linux-block.git] / drivers / irqchip / irq-gic-v3-its.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2013-2017 ARM Limited, All Rights Reserved.
4  * Author: Marc Zyngier <marc.zyngier@arm.com>
5  */
6
7 #include <linux/acpi.h>
8 #include <linux/acpi_iort.h>
9 #include <linux/bitfield.h>
10 #include <linux/bitmap.h>
11 #include <linux/cpu.h>
12 #include <linux/crash_dump.h>
13 #include <linux/delay.h>
14 #include <linux/efi.h>
15 #include <linux/genalloc.h>
16 #include <linux/interrupt.h>
17 #include <linux/iommu.h>
18 #include <linux/iopoll.h>
19 #include <linux/irqdomain.h>
20 #include <linux/list.h>
21 #include <linux/log2.h>
22 #include <linux/mem_encrypt.h>
23 #include <linux/memblock.h>
24 #include <linux/mm.h>
25 #include <linux/msi.h>
26 #include <linux/of.h>
27 #include <linux/of_address.h>
28 #include <linux/of_irq.h>
29 #include <linux/of_pci.h>
30 #include <linux/of_platform.h>
31 #include <linux/percpu.h>
32 #include <linux/set_memory.h>
33 #include <linux/slab.h>
34 #include <linux/syscore_ops.h>
35
36 #include <linux/irqchip.h>
37 #include <linux/irqchip/arm-gic-v3.h>
38 #include <linux/irqchip/arm-gic-v4.h>
39
40 #include <asm/cputype.h>
41 #include <asm/exception.h>
42
43 #include "irq-gic-common.h"
44 #include <linux/irqchip/irq-msi-lib.h>
45
46 #define ITS_FLAGS_CMDQ_NEEDS_FLUSHING           (1ULL << 0)
47 #define ITS_FLAGS_WORKAROUND_CAVIUM_22375       (1ULL << 1)
48 #define ITS_FLAGS_WORKAROUND_CAVIUM_23144       (1ULL << 2)
49 #define ITS_FLAGS_FORCE_NON_SHAREABLE           (1ULL << 3)
50 #define ITS_FLAGS_WORKAROUND_HISILICON_162100801        (1ULL << 4)
51
52 #define RD_LOCAL_LPI_ENABLED                    BIT(0)
53 #define RD_LOCAL_PENDTABLE_PREALLOCATED         BIT(1)
54 #define RD_LOCAL_MEMRESERVE_DONE                BIT(2)
55
56 static u32 lpi_id_bits;
57
58 /*
59  * We allocate memory for PROPBASE to cover 2 ^ lpi_id_bits LPIs to
60  * deal with (one configuration byte per interrupt). PENDBASE has to
61  * be 64kB aligned (one bit per LPI, plus 8192 bits for SPI/PPI/SGI).
62  */
63 #define LPI_NRBITS              lpi_id_bits
64 #define LPI_PROPBASE_SZ         ALIGN(BIT(LPI_NRBITS), SZ_64K)
65 #define LPI_PENDBASE_SZ         ALIGN(BIT(LPI_NRBITS) / 8, SZ_64K)
66
67 static u8 __ro_after_init lpi_prop_prio;
68 static struct its_node *find_4_1_its(void);
69
70 /*
71  * Collection structure - just an ID, and a redistributor address to
72  * ping. We use one per CPU as a bag of interrupts assigned to this
73  * CPU.
74  */
75 struct its_collection {
76         u64                     target_address;
77         u16                     col_id;
78 };
79
80 /*
81  * The ITS_BASER structure - contains memory information, cached
82  * value of BASER register configuration and ITS page size.
83  */
84 struct its_baser {
85         void            *base;
86         u64             val;
87         u32             order;
88         u32             psz;
89 };
90
91 struct its_device;
92
93 /*
94  * The ITS structure - contains most of the infrastructure, with the
95  * top-level MSI domain, the command queue, the collections, and the
96  * list of devices writing to it.
97  *
98  * dev_alloc_lock has to be taken for device allocations, while the
99  * spinlock must be taken to parse data structures such as the device
100  * list.
101  */
102 struct its_node {
103         raw_spinlock_t          lock;
104         struct mutex            dev_alloc_lock;
105         struct list_head        entry;
106         void __iomem            *base;
107         void __iomem            *sgir_base;
108         phys_addr_t             phys_base;
109         struct its_cmd_block    *cmd_base;
110         struct its_cmd_block    *cmd_write;
111         struct its_baser        tables[GITS_BASER_NR_REGS];
112         struct its_collection   *collections;
113         struct fwnode_handle    *fwnode_handle;
114         u64                     (*get_msi_base)(struct its_device *its_dev);
115         u64                     typer;
116         u64                     cbaser_save;
117         u32                     ctlr_save;
118         u32                     mpidr;
119         struct list_head        its_device_list;
120         u64                     flags;
121         unsigned long           list_nr;
122         int                     numa_node;
123         unsigned int            msi_domain_flags;
124         u32                     pre_its_base; /* for Socionext Synquacer */
125         int                     vlpi_redist_offset;
126 };
127
128 static DEFINE_PER_CPU(struct its_node *, local_4_1_its);
129
130 #define is_v4(its)              (!!((its)->typer & GITS_TYPER_VLPIS))
131 #define is_v4_1(its)            (!!((its)->typer & GITS_TYPER_VMAPP))
132 #define device_ids(its)         (FIELD_GET(GITS_TYPER_DEVBITS, (its)->typer) + 1)
133
134 #define ITS_ITT_ALIGN           SZ_256
135
136 /* The maximum number of VPEID bits supported by VLPI commands */
137 #define ITS_MAX_VPEID_BITS                                              \
138         ({                                                              \
139                 int nvpeid = 16;                                        \
140                 if (gic_rdists->has_rvpeid &&                           \
141                     gic_rdists->gicd_typer2 & GICD_TYPER2_VIL)          \
142                         nvpeid = 1 + (gic_rdists->gicd_typer2 &         \
143                                       GICD_TYPER2_VID);                 \
144                                                                         \
145                 nvpeid;                                                 \
146         })
147 #define ITS_MAX_VPEID           (1 << (ITS_MAX_VPEID_BITS))
148
149 /* Convert page order to size in bytes */
150 #define PAGE_ORDER_TO_SIZE(o)   (PAGE_SIZE << (o))
151
152 struct event_lpi_map {
153         unsigned long           *lpi_map;
154         u16                     *col_map;
155         irq_hw_number_t         lpi_base;
156         int                     nr_lpis;
157         raw_spinlock_t          vlpi_lock;
158         struct its_vm           *vm;
159         struct its_vlpi_map     *vlpi_maps;
160         int                     nr_vlpis;
161 };
162
163 /*
164  * The ITS view of a device - belongs to an ITS, owns an interrupt
165  * translation table, and a list of interrupts.  If it some of its
166  * LPIs are injected into a guest (GICv4), the event_map.vm field
167  * indicates which one.
168  */
169 struct its_device {
170         struct list_head        entry;
171         struct its_node         *its;
172         struct event_lpi_map    event_map;
173         void                    *itt;
174         u32                     itt_sz;
175         u32                     nr_ites;
176         u32                     device_id;
177         bool                    shared;
178 };
179
180 static struct {
181         raw_spinlock_t          lock;
182         struct its_device       *dev;
183         struct its_vpe          **vpes;
184         int                     next_victim;
185 } vpe_proxy;
186
187 struct cpu_lpi_count {
188         atomic_t        managed;
189         atomic_t        unmanaged;
190 };
191
192 static DEFINE_PER_CPU(struct cpu_lpi_count, cpu_lpi_count);
193
194 static LIST_HEAD(its_nodes);
195 static DEFINE_RAW_SPINLOCK(its_lock);
196 static struct rdists *gic_rdists;
197 static struct irq_domain *its_parent;
198
199 static unsigned long its_list_map;
200 static u16 vmovp_seq_num;
201 static DEFINE_RAW_SPINLOCK(vmovp_lock);
202
203 static DEFINE_IDA(its_vpeid_ida);
204
205 #define gic_data_rdist()                (raw_cpu_ptr(gic_rdists->rdist))
206 #define gic_data_rdist_cpu(cpu)         (per_cpu_ptr(gic_rdists->rdist, cpu))
207 #define gic_data_rdist_rd_base()        (gic_data_rdist()->rd_base)
208 #define gic_data_rdist_vlpi_base()      (gic_data_rdist_rd_base() + SZ_128K)
209
210 static gfp_t gfp_flags_quirk;
211
212 static struct page *its_alloc_pages_node(int node, gfp_t gfp,
213                                          unsigned int order)
214 {
215         struct page *page;
216         int ret = 0;
217
218         page = alloc_pages_node(node, gfp | gfp_flags_quirk, order);
219
220         if (!page)
221                 return NULL;
222
223         ret = set_memory_decrypted((unsigned long)page_address(page),
224                                    1 << order);
225         /*
226          * If set_memory_decrypted() fails then we don't know what state the
227          * page is in, so we can't free it. Instead we leak it.
228          * set_memory_decrypted() will already have WARNed.
229          */
230         if (ret)
231                 return NULL;
232
233         return page;
234 }
235
236 static struct page *its_alloc_pages(gfp_t gfp, unsigned int order)
237 {
238         return its_alloc_pages_node(NUMA_NO_NODE, gfp, order);
239 }
240
241 static void its_free_pages(void *addr, unsigned int order)
242 {
243         /*
244          * If the memory cannot be encrypted again then we must leak the pages.
245          * set_memory_encrypted() will already have WARNed.
246          */
247         if (set_memory_encrypted((unsigned long)addr, 1 << order))
248                 return;
249         free_pages((unsigned long)addr, order);
250 }
251
252 static struct gen_pool *itt_pool;
253
254 static void *itt_alloc_pool(int node, int size)
255 {
256         unsigned long addr;
257         struct page *page;
258
259         if (size >= PAGE_SIZE) {
260                 page = its_alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, get_order(size));
261
262                 return page ? page_address(page) : NULL;
263         }
264
265         do {
266                 addr = gen_pool_alloc(itt_pool, size);
267                 if (addr)
268                         break;
269
270                 page = its_alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0);
271                 if (!page)
272                         break;
273
274                 gen_pool_add(itt_pool, (unsigned long)page_address(page), PAGE_SIZE, node);
275         } while (!addr);
276
277         return (void *)addr;
278 }
279
280 static void itt_free_pool(void *addr, int size)
281 {
282         if (!addr)
283                 return;
284
285         if (size >= PAGE_SIZE) {
286                 its_free_pages(addr, get_order(size));
287                 return;
288         }
289
290         gen_pool_free(itt_pool, (unsigned long)addr, size);
291 }
292
293 /*
294  * Skip ITSs that have no vLPIs mapped, unless we're on GICv4.1, as we
295  * always have vSGIs mapped.
296  */
297 static bool require_its_list_vmovp(struct its_vm *vm, struct its_node *its)
298 {
299         return (gic_rdists->has_rvpeid || vm->vlpi_count[its->list_nr]);
300 }
301
302 static bool rdists_support_shareable(void)
303 {
304         return !(gic_rdists->flags & RDIST_FLAGS_FORCE_NON_SHAREABLE);
305 }
306
307 static u16 get_its_list(struct its_vm *vm)
308 {
309         struct its_node *its;
310         unsigned long its_list = 0;
311
312         list_for_each_entry(its, &its_nodes, entry) {
313                 if (!is_v4(its))
314                         continue;
315
316                 if (require_its_list_vmovp(vm, its))
317                         __set_bit(its->list_nr, &its_list);
318         }
319
320         return (u16)its_list;
321 }
322
323 static inline u32 its_get_event_id(struct irq_data *d)
324 {
325         struct its_device *its_dev = irq_data_get_irq_chip_data(d);
326         return d->hwirq - its_dev->event_map.lpi_base;
327 }
328
329 static struct its_collection *dev_event_to_col(struct its_device *its_dev,
330                                                u32 event)
331 {
332         struct its_node *its = its_dev->its;
333
334         return its->collections + its_dev->event_map.col_map[event];
335 }
336
337 static struct its_vlpi_map *dev_event_to_vlpi_map(struct its_device *its_dev,
338                                                u32 event)
339 {
340         if (WARN_ON_ONCE(event >= its_dev->event_map.nr_lpis))
341                 return NULL;
342
343         return &its_dev->event_map.vlpi_maps[event];
344 }
345
346 static struct its_vlpi_map *get_vlpi_map(struct irq_data *d)
347 {
348         if (irqd_is_forwarded_to_vcpu(d)) {
349                 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
350                 u32 event = its_get_event_id(d);
351
352                 return dev_event_to_vlpi_map(its_dev, event);
353         }
354
355         return NULL;
356 }
357
358 static int vpe_to_cpuid_lock(struct its_vpe *vpe, unsigned long *flags)
359 {
360         raw_spin_lock_irqsave(&vpe->vpe_lock, *flags);
361         return vpe->col_idx;
362 }
363
364 static void vpe_to_cpuid_unlock(struct its_vpe *vpe, unsigned long flags)
365 {
366         raw_spin_unlock_irqrestore(&vpe->vpe_lock, flags);
367 }
368
369 static struct irq_chip its_vpe_irq_chip;
370
371 static int irq_to_cpuid_lock(struct irq_data *d, unsigned long *flags)
372 {
373         struct its_vpe *vpe = NULL;
374         int cpu;
375
376         if (d->chip == &its_vpe_irq_chip) {
377                 vpe = irq_data_get_irq_chip_data(d);
378         } else {
379                 struct its_vlpi_map *map = get_vlpi_map(d);
380                 if (map)
381                         vpe = map->vpe;
382         }
383
384         if (vpe) {
385                 cpu = vpe_to_cpuid_lock(vpe, flags);
386         } else {
387                 /* Physical LPIs are already locked via the irq_desc lock */
388                 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
389                 cpu = its_dev->event_map.col_map[its_get_event_id(d)];
390                 /* Keep GCC quiet... */
391                 *flags = 0;
392         }
393
394         return cpu;
395 }
396
397 static void irq_to_cpuid_unlock(struct irq_data *d, unsigned long flags)
398 {
399         struct its_vpe *vpe = NULL;
400
401         if (d->chip == &its_vpe_irq_chip) {
402                 vpe = irq_data_get_irq_chip_data(d);
403         } else {
404                 struct its_vlpi_map *map = get_vlpi_map(d);
405                 if (map)
406                         vpe = map->vpe;
407         }
408
409         if (vpe)
410                 vpe_to_cpuid_unlock(vpe, flags);
411 }
412
413 static struct its_collection *valid_col(struct its_collection *col)
414 {
415         if (WARN_ON_ONCE(col->target_address & GENMASK_ULL(15, 0)))
416                 return NULL;
417
418         return col;
419 }
420
421 static struct its_vpe *valid_vpe(struct its_node *its, struct its_vpe *vpe)
422 {
423         if (valid_col(its->collections + vpe->col_idx))
424                 return vpe;
425
426         return NULL;
427 }
428
429 /*
430  * ITS command descriptors - parameters to be encoded in a command
431  * block.
432  */
433 struct its_cmd_desc {
434         union {
435                 struct {
436                         struct its_device *dev;
437                         u32 event_id;
438                 } its_inv_cmd;
439
440                 struct {
441                         struct its_device *dev;
442                         u32 event_id;
443                 } its_clear_cmd;
444
445                 struct {
446                         struct its_device *dev;
447                         u32 event_id;
448                 } its_int_cmd;
449
450                 struct {
451                         struct its_device *dev;
452                         int valid;
453                 } its_mapd_cmd;
454
455                 struct {
456                         struct its_collection *col;
457                         int valid;
458                 } its_mapc_cmd;
459
460                 struct {
461                         struct its_device *dev;
462                         u32 phys_id;
463                         u32 event_id;
464                 } its_mapti_cmd;
465
466                 struct {
467                         struct its_device *dev;
468                         struct its_collection *col;
469                         u32 event_id;
470                 } its_movi_cmd;
471
472                 struct {
473                         struct its_device *dev;
474                         u32 event_id;
475                 } its_discard_cmd;
476
477                 struct {
478                         struct its_collection *col;
479                 } its_invall_cmd;
480
481                 struct {
482                         struct its_vpe *vpe;
483                 } its_vinvall_cmd;
484
485                 struct {
486                         struct its_vpe *vpe;
487                         struct its_collection *col;
488                         bool valid;
489                 } its_vmapp_cmd;
490
491                 struct {
492                         struct its_vpe *vpe;
493                         struct its_device *dev;
494                         u32 virt_id;
495                         u32 event_id;
496                         bool db_enabled;
497                 } its_vmapti_cmd;
498
499                 struct {
500                         struct its_vpe *vpe;
501                         struct its_device *dev;
502                         u32 event_id;
503                         bool db_enabled;
504                 } its_vmovi_cmd;
505
506                 struct {
507                         struct its_vpe *vpe;
508                         struct its_collection *col;
509                         u16 seq_num;
510                         u16 its_list;
511                 } its_vmovp_cmd;
512
513                 struct {
514                         struct its_vpe *vpe;
515                 } its_invdb_cmd;
516
517                 struct {
518                         struct its_vpe *vpe;
519                         u8 sgi;
520                         u8 priority;
521                         bool enable;
522                         bool group;
523                         bool clear;
524                 } its_vsgi_cmd;
525         };
526 };
527
528 /*
529  * The ITS command block, which is what the ITS actually parses.
530  */
531 struct its_cmd_block {
532         union {
533                 u64     raw_cmd[4];
534                 __le64  raw_cmd_le[4];
535         };
536 };
537
538 #define ITS_CMD_QUEUE_SZ                SZ_64K
539 #define ITS_CMD_QUEUE_NR_ENTRIES        (ITS_CMD_QUEUE_SZ / sizeof(struct its_cmd_block))
540
541 typedef struct its_collection *(*its_cmd_builder_t)(struct its_node *,
542                                                     struct its_cmd_block *,
543                                                     struct its_cmd_desc *);
544
545 typedef struct its_vpe *(*its_cmd_vbuilder_t)(struct its_node *,
546                                               struct its_cmd_block *,
547                                               struct its_cmd_desc *);
548
549 static void its_mask_encode(u64 *raw_cmd, u64 val, int h, int l)
550 {
551         u64 mask = GENMASK_ULL(h, l);
552         *raw_cmd &= ~mask;
553         *raw_cmd |= (val << l) & mask;
554 }
555
556 static void its_encode_cmd(struct its_cmd_block *cmd, u8 cmd_nr)
557 {
558         its_mask_encode(&cmd->raw_cmd[0], cmd_nr, 7, 0);
559 }
560
561 static void its_encode_devid(struct its_cmd_block *cmd, u32 devid)
562 {
563         its_mask_encode(&cmd->raw_cmd[0], devid, 63, 32);
564 }
565
566 static void its_encode_event_id(struct its_cmd_block *cmd, u32 id)
567 {
568         its_mask_encode(&cmd->raw_cmd[1], id, 31, 0);
569 }
570
571 static void its_encode_phys_id(struct its_cmd_block *cmd, u32 phys_id)
572 {
573         its_mask_encode(&cmd->raw_cmd[1], phys_id, 63, 32);
574 }
575
576 static void its_encode_size(struct its_cmd_block *cmd, u8 size)
577 {
578         its_mask_encode(&cmd->raw_cmd[1], size, 4, 0);
579 }
580
581 static void its_encode_itt(struct its_cmd_block *cmd, u64 itt_addr)
582 {
583         its_mask_encode(&cmd->raw_cmd[2], itt_addr >> 8, 51, 8);
584 }
585
586 static void its_encode_valid(struct its_cmd_block *cmd, int valid)
587 {
588         its_mask_encode(&cmd->raw_cmd[2], !!valid, 63, 63);
589 }
590
591 static void its_encode_target(struct its_cmd_block *cmd, u64 target_addr)
592 {
593         its_mask_encode(&cmd->raw_cmd[2], target_addr >> 16, 51, 16);
594 }
595
596 static void its_encode_collection(struct its_cmd_block *cmd, u16 col)
597 {
598         its_mask_encode(&cmd->raw_cmd[2], col, 15, 0);
599 }
600
601 static void its_encode_vpeid(struct its_cmd_block *cmd, u16 vpeid)
602 {
603         its_mask_encode(&cmd->raw_cmd[1], vpeid, 47, 32);
604 }
605
606 static void its_encode_virt_id(struct its_cmd_block *cmd, u32 virt_id)
607 {
608         its_mask_encode(&cmd->raw_cmd[2], virt_id, 31, 0);
609 }
610
611 static void its_encode_db_phys_id(struct its_cmd_block *cmd, u32 db_phys_id)
612 {
613         its_mask_encode(&cmd->raw_cmd[2], db_phys_id, 63, 32);
614 }
615
616 static void its_encode_db_valid(struct its_cmd_block *cmd, bool db_valid)
617 {
618         its_mask_encode(&cmd->raw_cmd[2], db_valid, 0, 0);
619 }
620
621 static void its_encode_seq_num(struct its_cmd_block *cmd, u16 seq_num)
622 {
623         its_mask_encode(&cmd->raw_cmd[0], seq_num, 47, 32);
624 }
625
626 static void its_encode_its_list(struct its_cmd_block *cmd, u16 its_list)
627 {
628         its_mask_encode(&cmd->raw_cmd[1], its_list, 15, 0);
629 }
630
631 static void its_encode_vpt_addr(struct its_cmd_block *cmd, u64 vpt_pa)
632 {
633         its_mask_encode(&cmd->raw_cmd[3], vpt_pa >> 16, 51, 16);
634 }
635
636 static void its_encode_vpt_size(struct its_cmd_block *cmd, u8 vpt_size)
637 {
638         its_mask_encode(&cmd->raw_cmd[3], vpt_size, 4, 0);
639 }
640
641 static void its_encode_vconf_addr(struct its_cmd_block *cmd, u64 vconf_pa)
642 {
643         its_mask_encode(&cmd->raw_cmd[0], vconf_pa >> 16, 51, 16);
644 }
645
646 static void its_encode_alloc(struct its_cmd_block *cmd, bool alloc)
647 {
648         its_mask_encode(&cmd->raw_cmd[0], alloc, 8, 8);
649 }
650
651 static void its_encode_ptz(struct its_cmd_block *cmd, bool ptz)
652 {
653         its_mask_encode(&cmd->raw_cmd[0], ptz, 9, 9);
654 }
655
656 static void its_encode_vmapp_default_db(struct its_cmd_block *cmd,
657                                         u32 vpe_db_lpi)
658 {
659         its_mask_encode(&cmd->raw_cmd[1], vpe_db_lpi, 31, 0);
660 }
661
662 static void its_encode_vmovp_default_db(struct its_cmd_block *cmd,
663                                         u32 vpe_db_lpi)
664 {
665         its_mask_encode(&cmd->raw_cmd[3], vpe_db_lpi, 31, 0);
666 }
667
668 static void its_encode_db(struct its_cmd_block *cmd, bool db)
669 {
670         its_mask_encode(&cmd->raw_cmd[2], db, 63, 63);
671 }
672
673 static void its_encode_sgi_intid(struct its_cmd_block *cmd, u8 sgi)
674 {
675         its_mask_encode(&cmd->raw_cmd[0], sgi, 35, 32);
676 }
677
678 static void its_encode_sgi_priority(struct its_cmd_block *cmd, u8 prio)
679 {
680         its_mask_encode(&cmd->raw_cmd[0], prio >> 4, 23, 20);
681 }
682
683 static void its_encode_sgi_group(struct its_cmd_block *cmd, bool grp)
684 {
685         its_mask_encode(&cmd->raw_cmd[0], grp, 10, 10);
686 }
687
688 static void its_encode_sgi_clear(struct its_cmd_block *cmd, bool clr)
689 {
690         its_mask_encode(&cmd->raw_cmd[0], clr, 9, 9);
691 }
692
693 static void its_encode_sgi_enable(struct its_cmd_block *cmd, bool en)
694 {
695         its_mask_encode(&cmd->raw_cmd[0], en, 8, 8);
696 }
697
698 static inline void its_fixup_cmd(struct its_cmd_block *cmd)
699 {
700         /* Let's fixup BE commands */
701         cmd->raw_cmd_le[0] = cpu_to_le64(cmd->raw_cmd[0]);
702         cmd->raw_cmd_le[1] = cpu_to_le64(cmd->raw_cmd[1]);
703         cmd->raw_cmd_le[2] = cpu_to_le64(cmd->raw_cmd[2]);
704         cmd->raw_cmd_le[3] = cpu_to_le64(cmd->raw_cmd[3]);
705 }
706
707 static struct its_collection *its_build_mapd_cmd(struct its_node *its,
708                                                  struct its_cmd_block *cmd,
709                                                  struct its_cmd_desc *desc)
710 {
711         unsigned long itt_addr;
712         u8 size = ilog2(desc->its_mapd_cmd.dev->nr_ites);
713
714         itt_addr = virt_to_phys(desc->its_mapd_cmd.dev->itt);
715
716         its_encode_cmd(cmd, GITS_CMD_MAPD);
717         its_encode_devid(cmd, desc->its_mapd_cmd.dev->device_id);
718         its_encode_size(cmd, size - 1);
719         its_encode_itt(cmd, itt_addr);
720         its_encode_valid(cmd, desc->its_mapd_cmd.valid);
721
722         its_fixup_cmd(cmd);
723
724         return NULL;
725 }
726
727 static struct its_collection *its_build_mapc_cmd(struct its_node *its,
728                                                  struct its_cmd_block *cmd,
729                                                  struct its_cmd_desc *desc)
730 {
731         its_encode_cmd(cmd, GITS_CMD_MAPC);
732         its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id);
733         its_encode_target(cmd, desc->its_mapc_cmd.col->target_address);
734         its_encode_valid(cmd, desc->its_mapc_cmd.valid);
735
736         its_fixup_cmd(cmd);
737
738         return desc->its_mapc_cmd.col;
739 }
740
741 static struct its_collection *its_build_mapti_cmd(struct its_node *its,
742                                                   struct its_cmd_block *cmd,
743                                                   struct its_cmd_desc *desc)
744 {
745         struct its_collection *col;
746
747         col = dev_event_to_col(desc->its_mapti_cmd.dev,
748                                desc->its_mapti_cmd.event_id);
749
750         its_encode_cmd(cmd, GITS_CMD_MAPTI);
751         its_encode_devid(cmd, desc->its_mapti_cmd.dev->device_id);
752         its_encode_event_id(cmd, desc->its_mapti_cmd.event_id);
753         its_encode_phys_id(cmd, desc->its_mapti_cmd.phys_id);
754         its_encode_collection(cmd, col->col_id);
755
756         its_fixup_cmd(cmd);
757
758         return valid_col(col);
759 }
760
761 static struct its_collection *its_build_movi_cmd(struct its_node *its,
762                                                  struct its_cmd_block *cmd,
763                                                  struct its_cmd_desc *desc)
764 {
765         struct its_collection *col;
766
767         col = dev_event_to_col(desc->its_movi_cmd.dev,
768                                desc->its_movi_cmd.event_id);
769
770         its_encode_cmd(cmd, GITS_CMD_MOVI);
771         its_encode_devid(cmd, desc->its_movi_cmd.dev->device_id);
772         its_encode_event_id(cmd, desc->its_movi_cmd.event_id);
773         its_encode_collection(cmd, desc->its_movi_cmd.col->col_id);
774
775         its_fixup_cmd(cmd);
776
777         return valid_col(col);
778 }
779
780 static struct its_collection *its_build_discard_cmd(struct its_node *its,
781                                                     struct its_cmd_block *cmd,
782                                                     struct its_cmd_desc *desc)
783 {
784         struct its_collection *col;
785
786         col = dev_event_to_col(desc->its_discard_cmd.dev,
787                                desc->its_discard_cmd.event_id);
788
789         its_encode_cmd(cmd, GITS_CMD_DISCARD);
790         its_encode_devid(cmd, desc->its_discard_cmd.dev->device_id);
791         its_encode_event_id(cmd, desc->its_discard_cmd.event_id);
792
793         its_fixup_cmd(cmd);
794
795         return valid_col(col);
796 }
797
798 static struct its_collection *its_build_inv_cmd(struct its_node *its,
799                                                 struct its_cmd_block *cmd,
800                                                 struct its_cmd_desc *desc)
801 {
802         struct its_collection *col;
803
804         col = dev_event_to_col(desc->its_inv_cmd.dev,
805                                desc->its_inv_cmd.event_id);
806
807         its_encode_cmd(cmd, GITS_CMD_INV);
808         its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id);
809         its_encode_event_id(cmd, desc->its_inv_cmd.event_id);
810
811         its_fixup_cmd(cmd);
812
813         return valid_col(col);
814 }
815
816 static struct its_collection *its_build_int_cmd(struct its_node *its,
817                                                 struct its_cmd_block *cmd,
818                                                 struct its_cmd_desc *desc)
819 {
820         struct its_collection *col;
821
822         col = dev_event_to_col(desc->its_int_cmd.dev,
823                                desc->its_int_cmd.event_id);
824
825         its_encode_cmd(cmd, GITS_CMD_INT);
826         its_encode_devid(cmd, desc->its_int_cmd.dev->device_id);
827         its_encode_event_id(cmd, desc->its_int_cmd.event_id);
828
829         its_fixup_cmd(cmd);
830
831         return valid_col(col);
832 }
833
834 static struct its_collection *its_build_clear_cmd(struct its_node *its,
835                                                   struct its_cmd_block *cmd,
836                                                   struct its_cmd_desc *desc)
837 {
838         struct its_collection *col;
839
840         col = dev_event_to_col(desc->its_clear_cmd.dev,
841                                desc->its_clear_cmd.event_id);
842
843         its_encode_cmd(cmd, GITS_CMD_CLEAR);
844         its_encode_devid(cmd, desc->its_clear_cmd.dev->device_id);
845         its_encode_event_id(cmd, desc->its_clear_cmd.event_id);
846
847         its_fixup_cmd(cmd);
848
849         return valid_col(col);
850 }
851
852 static struct its_collection *its_build_invall_cmd(struct its_node *its,
853                                                    struct its_cmd_block *cmd,
854                                                    struct its_cmd_desc *desc)
855 {
856         its_encode_cmd(cmd, GITS_CMD_INVALL);
857         its_encode_collection(cmd, desc->its_invall_cmd.col->col_id);
858
859         its_fixup_cmd(cmd);
860
861         return desc->its_invall_cmd.col;
862 }
863
864 static struct its_vpe *its_build_vinvall_cmd(struct its_node *its,
865                                              struct its_cmd_block *cmd,
866                                              struct its_cmd_desc *desc)
867 {
868         its_encode_cmd(cmd, GITS_CMD_VINVALL);
869         its_encode_vpeid(cmd, desc->its_vinvall_cmd.vpe->vpe_id);
870
871         its_fixup_cmd(cmd);
872
873         return valid_vpe(its, desc->its_vinvall_cmd.vpe);
874 }
875
876 static struct its_vpe *its_build_vmapp_cmd(struct its_node *its,
877                                            struct its_cmd_block *cmd,
878                                            struct its_cmd_desc *desc)
879 {
880         struct its_vpe *vpe = valid_vpe(its, desc->its_vmapp_cmd.vpe);
881         unsigned long vpt_addr, vconf_addr;
882         u64 target;
883         bool alloc;
884
885         its_encode_cmd(cmd, GITS_CMD_VMAPP);
886         its_encode_vpeid(cmd, desc->its_vmapp_cmd.vpe->vpe_id);
887         its_encode_valid(cmd, desc->its_vmapp_cmd.valid);
888
889         if (!desc->its_vmapp_cmd.valid) {
890                 alloc = !atomic_dec_return(&desc->its_vmapp_cmd.vpe->vmapp_count);
891                 if (is_v4_1(its)) {
892                         its_encode_alloc(cmd, alloc);
893                         /*
894                          * Unmapping a VPE is self-synchronizing on GICv4.1,
895                          * no need to issue a VSYNC.
896                          */
897                         vpe = NULL;
898                 }
899
900                 goto out;
901         }
902
903         vpt_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->vpt_page));
904         target = desc->its_vmapp_cmd.col->target_address + its->vlpi_redist_offset;
905
906         its_encode_target(cmd, target);
907         its_encode_vpt_addr(cmd, vpt_addr);
908         its_encode_vpt_size(cmd, LPI_NRBITS - 1);
909
910         alloc = !atomic_fetch_inc(&desc->its_vmapp_cmd.vpe->vmapp_count);
911
912         if (!is_v4_1(its))
913                 goto out;
914
915         vconf_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->its_vm->vprop_page));
916
917         its_encode_alloc(cmd, alloc);
918
919         /*
920          * GICv4.1 provides a way to get the VLPI state, which needs the vPE
921          * to be unmapped first, and in this case, we may remap the vPE
922          * back while the VPT is not empty. So we can't assume that the
923          * VPT is empty on map. This is why we never advertise PTZ.
924          */
925         its_encode_ptz(cmd, false);
926         its_encode_vconf_addr(cmd, vconf_addr);
927         its_encode_vmapp_default_db(cmd, desc->its_vmapp_cmd.vpe->vpe_db_lpi);
928
929 out:
930         its_fixup_cmd(cmd);
931
932         return vpe;
933 }
934
935 static struct its_vpe *its_build_vmapti_cmd(struct its_node *its,
936                                             struct its_cmd_block *cmd,
937                                             struct its_cmd_desc *desc)
938 {
939         u32 db;
940
941         if (!is_v4_1(its) && desc->its_vmapti_cmd.db_enabled)
942                 db = desc->its_vmapti_cmd.vpe->vpe_db_lpi;
943         else
944                 db = 1023;
945
946         its_encode_cmd(cmd, GITS_CMD_VMAPTI);
947         its_encode_devid(cmd, desc->its_vmapti_cmd.dev->device_id);
948         its_encode_vpeid(cmd, desc->its_vmapti_cmd.vpe->vpe_id);
949         its_encode_event_id(cmd, desc->its_vmapti_cmd.event_id);
950         its_encode_db_phys_id(cmd, db);
951         its_encode_virt_id(cmd, desc->its_vmapti_cmd.virt_id);
952
953         its_fixup_cmd(cmd);
954
955         return valid_vpe(its, desc->its_vmapti_cmd.vpe);
956 }
957
958 static struct its_vpe *its_build_vmovi_cmd(struct its_node *its,
959                                            struct its_cmd_block *cmd,
960                                            struct its_cmd_desc *desc)
961 {
962         u32 db;
963
964         if (!is_v4_1(its) && desc->its_vmovi_cmd.db_enabled)
965                 db = desc->its_vmovi_cmd.vpe->vpe_db_lpi;
966         else
967                 db = 1023;
968
969         its_encode_cmd(cmd, GITS_CMD_VMOVI);
970         its_encode_devid(cmd, desc->its_vmovi_cmd.dev->device_id);
971         its_encode_vpeid(cmd, desc->its_vmovi_cmd.vpe->vpe_id);
972         its_encode_event_id(cmd, desc->its_vmovi_cmd.event_id);
973         its_encode_db_phys_id(cmd, db);
974         its_encode_db_valid(cmd, true);
975
976         its_fixup_cmd(cmd);
977
978         return valid_vpe(its, desc->its_vmovi_cmd.vpe);
979 }
980
981 static struct its_vpe *its_build_vmovp_cmd(struct its_node *its,
982                                            struct its_cmd_block *cmd,
983                                            struct its_cmd_desc *desc)
984 {
985         u64 target;
986
987         target = desc->its_vmovp_cmd.col->target_address + its->vlpi_redist_offset;
988         its_encode_cmd(cmd, GITS_CMD_VMOVP);
989         its_encode_seq_num(cmd, desc->its_vmovp_cmd.seq_num);
990         its_encode_its_list(cmd, desc->its_vmovp_cmd.its_list);
991         its_encode_vpeid(cmd, desc->its_vmovp_cmd.vpe->vpe_id);
992         its_encode_target(cmd, target);
993
994         if (is_v4_1(its)) {
995                 its_encode_db(cmd, true);
996                 its_encode_vmovp_default_db(cmd, desc->its_vmovp_cmd.vpe->vpe_db_lpi);
997         }
998
999         its_fixup_cmd(cmd);
1000
1001         return valid_vpe(its, desc->its_vmovp_cmd.vpe);
1002 }
1003
1004 static struct its_vpe *its_build_vinv_cmd(struct its_node *its,
1005                                           struct its_cmd_block *cmd,
1006                                           struct its_cmd_desc *desc)
1007 {
1008         struct its_vlpi_map *map;
1009
1010         map = dev_event_to_vlpi_map(desc->its_inv_cmd.dev,
1011                                     desc->its_inv_cmd.event_id);
1012
1013         its_encode_cmd(cmd, GITS_CMD_INV);
1014         its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id);
1015         its_encode_event_id(cmd, desc->its_inv_cmd.event_id);
1016
1017         its_fixup_cmd(cmd);
1018
1019         return valid_vpe(its, map->vpe);
1020 }
1021
1022 static struct its_vpe *its_build_vint_cmd(struct its_node *its,
1023                                           struct its_cmd_block *cmd,
1024                                           struct its_cmd_desc *desc)
1025 {
1026         struct its_vlpi_map *map;
1027
1028         map = dev_event_to_vlpi_map(desc->its_int_cmd.dev,
1029                                     desc->its_int_cmd.event_id);
1030
1031         its_encode_cmd(cmd, GITS_CMD_INT);
1032         its_encode_devid(cmd, desc->its_int_cmd.dev->device_id);
1033         its_encode_event_id(cmd, desc->its_int_cmd.event_id);
1034
1035         its_fixup_cmd(cmd);
1036
1037         return valid_vpe(its, map->vpe);
1038 }
1039
1040 static struct its_vpe *its_build_vclear_cmd(struct its_node *its,
1041                                             struct its_cmd_block *cmd,
1042                                             struct its_cmd_desc *desc)
1043 {
1044         struct its_vlpi_map *map;
1045
1046         map = dev_event_to_vlpi_map(desc->its_clear_cmd.dev,
1047                                     desc->its_clear_cmd.event_id);
1048
1049         its_encode_cmd(cmd, GITS_CMD_CLEAR);
1050         its_encode_devid(cmd, desc->its_clear_cmd.dev->device_id);
1051         its_encode_event_id(cmd, desc->its_clear_cmd.event_id);
1052
1053         its_fixup_cmd(cmd);
1054
1055         return valid_vpe(its, map->vpe);
1056 }
1057
1058 static struct its_vpe *its_build_invdb_cmd(struct its_node *its,
1059                                            struct its_cmd_block *cmd,
1060                                            struct its_cmd_desc *desc)
1061 {
1062         if (WARN_ON(!is_v4_1(its)))
1063                 return NULL;
1064
1065         its_encode_cmd(cmd, GITS_CMD_INVDB);
1066         its_encode_vpeid(cmd, desc->its_invdb_cmd.vpe->vpe_id);
1067
1068         its_fixup_cmd(cmd);
1069
1070         return valid_vpe(its, desc->its_invdb_cmd.vpe);
1071 }
1072
1073 static struct its_vpe *its_build_vsgi_cmd(struct its_node *its,
1074                                           struct its_cmd_block *cmd,
1075                                           struct its_cmd_desc *desc)
1076 {
1077         if (WARN_ON(!is_v4_1(its)))
1078                 return NULL;
1079
1080         its_encode_cmd(cmd, GITS_CMD_VSGI);
1081         its_encode_vpeid(cmd, desc->its_vsgi_cmd.vpe->vpe_id);
1082         its_encode_sgi_intid(cmd, desc->its_vsgi_cmd.sgi);
1083         its_encode_sgi_priority(cmd, desc->its_vsgi_cmd.priority);
1084         its_encode_sgi_group(cmd, desc->its_vsgi_cmd.group);
1085         its_encode_sgi_clear(cmd, desc->its_vsgi_cmd.clear);
1086         its_encode_sgi_enable(cmd, desc->its_vsgi_cmd.enable);
1087
1088         its_fixup_cmd(cmd);
1089
1090         return valid_vpe(its, desc->its_vsgi_cmd.vpe);
1091 }
1092
1093 static u64 its_cmd_ptr_to_offset(struct its_node *its,
1094                                  struct its_cmd_block *ptr)
1095 {
1096         return (ptr - its->cmd_base) * sizeof(*ptr);
1097 }
1098
1099 static int its_queue_full(struct its_node *its)
1100 {
1101         int widx;
1102         int ridx;
1103
1104         widx = its->cmd_write - its->cmd_base;
1105         ridx = readl_relaxed(its->base + GITS_CREADR) / sizeof(struct its_cmd_block);
1106
1107         /* This is incredibly unlikely to happen, unless the ITS locks up. */
1108         if (((widx + 1) % ITS_CMD_QUEUE_NR_ENTRIES) == ridx)
1109                 return 1;
1110
1111         return 0;
1112 }
1113
1114 static struct its_cmd_block *its_allocate_entry(struct its_node *its)
1115 {
1116         struct its_cmd_block *cmd;
1117         u32 count = 1000000;    /* 1s! */
1118
1119         while (its_queue_full(its)) {
1120                 count--;
1121                 if (!count) {
1122                         pr_err_ratelimited("ITS queue not draining\n");
1123                         return NULL;
1124                 }
1125                 cpu_relax();
1126                 udelay(1);
1127         }
1128
1129         cmd = its->cmd_write++;
1130
1131         /* Handle queue wrapping */
1132         if (its->cmd_write == (its->cmd_base + ITS_CMD_QUEUE_NR_ENTRIES))
1133                 its->cmd_write = its->cmd_base;
1134
1135         /* Clear command  */
1136         cmd->raw_cmd[0] = 0;
1137         cmd->raw_cmd[1] = 0;
1138         cmd->raw_cmd[2] = 0;
1139         cmd->raw_cmd[3] = 0;
1140
1141         return cmd;
1142 }
1143
1144 static struct its_cmd_block *its_post_commands(struct its_node *its)
1145 {
1146         u64 wr = its_cmd_ptr_to_offset(its, its->cmd_write);
1147
1148         writel_relaxed(wr, its->base + GITS_CWRITER);
1149
1150         return its->cmd_write;
1151 }
1152
1153 static void its_flush_cmd(struct its_node *its, struct its_cmd_block *cmd)
1154 {
1155         /*
1156          * Make sure the commands written to memory are observable by
1157          * the ITS.
1158          */
1159         if (its->flags & ITS_FLAGS_CMDQ_NEEDS_FLUSHING)
1160                 gic_flush_dcache_to_poc(cmd, sizeof(*cmd));
1161         else
1162                 dsb(ishst);
1163 }
1164
1165 static int its_wait_for_range_completion(struct its_node *its,
1166                                          u64    prev_idx,
1167                                          struct its_cmd_block *to)
1168 {
1169         u64 rd_idx, to_idx, linear_idx;
1170         u32 count = 1000000;    /* 1s! */
1171
1172         /* Linearize to_idx if the command set has wrapped around */
1173         to_idx = its_cmd_ptr_to_offset(its, to);
1174         if (to_idx < prev_idx)
1175                 to_idx += ITS_CMD_QUEUE_SZ;
1176
1177         linear_idx = prev_idx;
1178
1179         while (1) {
1180                 s64 delta;
1181
1182                 rd_idx = readl_relaxed(its->base + GITS_CREADR);
1183
1184                 /*
1185                  * Compute the read pointer progress, taking the
1186                  * potential wrap-around into account.
1187                  */
1188                 delta = rd_idx - prev_idx;
1189                 if (rd_idx < prev_idx)
1190                         delta += ITS_CMD_QUEUE_SZ;
1191
1192                 linear_idx += delta;
1193                 if (linear_idx >= to_idx)
1194                         break;
1195
1196                 count--;
1197                 if (!count) {
1198                         pr_err_ratelimited("ITS queue timeout (%llu %llu)\n",
1199                                            to_idx, linear_idx);
1200                         return -1;
1201                 }
1202                 prev_idx = rd_idx;
1203                 cpu_relax();
1204                 udelay(1);
1205         }
1206
1207         return 0;
1208 }
1209
1210 /* Warning, macro hell follows */
1211 #define BUILD_SINGLE_CMD_FUNC(name, buildtype, synctype, buildfn)       \
1212 void name(struct its_node *its,                                         \
1213           buildtype builder,                                            \
1214           struct its_cmd_desc *desc)                                    \
1215 {                                                                       \
1216         struct its_cmd_block *cmd, *sync_cmd, *next_cmd;                \
1217         synctype *sync_obj;                                             \
1218         unsigned long flags;                                            \
1219         u64 rd_idx;                                                     \
1220                                                                         \
1221         raw_spin_lock_irqsave(&its->lock, flags);                       \
1222                                                                         \
1223         cmd = its_allocate_entry(its);                                  \
1224         if (!cmd) {             /* We're soooooo screewed... */         \
1225                 raw_spin_unlock_irqrestore(&its->lock, flags);          \
1226                 return;                                                 \
1227         }                                                               \
1228         sync_obj = builder(its, cmd, desc);                             \
1229         its_flush_cmd(its, cmd);                                        \
1230                                                                         \
1231         if (sync_obj) {                                                 \
1232                 sync_cmd = its_allocate_entry(its);                     \
1233                 if (!sync_cmd)                                          \
1234                         goto post;                                      \
1235                                                                         \
1236                 buildfn(its, sync_cmd, sync_obj);                       \
1237                 its_flush_cmd(its, sync_cmd);                           \
1238         }                                                               \
1239                                                                         \
1240 post:                                                                   \
1241         rd_idx = readl_relaxed(its->base + GITS_CREADR);                \
1242         next_cmd = its_post_commands(its);                              \
1243         raw_spin_unlock_irqrestore(&its->lock, flags);                  \
1244                                                                         \
1245         if (its_wait_for_range_completion(its, rd_idx, next_cmd))       \
1246                 pr_err_ratelimited("ITS cmd %ps failed\n", builder);    \
1247 }
1248
1249 static void its_build_sync_cmd(struct its_node *its,
1250                                struct its_cmd_block *sync_cmd,
1251                                struct its_collection *sync_col)
1252 {
1253         its_encode_cmd(sync_cmd, GITS_CMD_SYNC);
1254         its_encode_target(sync_cmd, sync_col->target_address);
1255
1256         its_fixup_cmd(sync_cmd);
1257 }
1258
1259 static BUILD_SINGLE_CMD_FUNC(its_send_single_command, its_cmd_builder_t,
1260                              struct its_collection, its_build_sync_cmd)
1261
1262 static void its_build_vsync_cmd(struct its_node *its,
1263                                 struct its_cmd_block *sync_cmd,
1264                                 struct its_vpe *sync_vpe)
1265 {
1266         its_encode_cmd(sync_cmd, GITS_CMD_VSYNC);
1267         its_encode_vpeid(sync_cmd, sync_vpe->vpe_id);
1268
1269         its_fixup_cmd(sync_cmd);
1270 }
1271
1272 static BUILD_SINGLE_CMD_FUNC(its_send_single_vcommand, its_cmd_vbuilder_t,
1273                              struct its_vpe, its_build_vsync_cmd)
1274
1275 static void its_send_int(struct its_device *dev, u32 event_id)
1276 {
1277         struct its_cmd_desc desc;
1278
1279         desc.its_int_cmd.dev = dev;
1280         desc.its_int_cmd.event_id = event_id;
1281
1282         its_send_single_command(dev->its, its_build_int_cmd, &desc);
1283 }
1284
1285 static void its_send_clear(struct its_device *dev, u32 event_id)
1286 {
1287         struct its_cmd_desc desc;
1288
1289         desc.its_clear_cmd.dev = dev;
1290         desc.its_clear_cmd.event_id = event_id;
1291
1292         its_send_single_command(dev->its, its_build_clear_cmd, &desc);
1293 }
1294
1295 static void its_send_inv(struct its_device *dev, u32 event_id)
1296 {
1297         struct its_cmd_desc desc;
1298
1299         desc.its_inv_cmd.dev = dev;
1300         desc.its_inv_cmd.event_id = event_id;
1301
1302         its_send_single_command(dev->its, its_build_inv_cmd, &desc);
1303 }
1304
1305 static void its_send_mapd(struct its_device *dev, int valid)
1306 {
1307         struct its_cmd_desc desc;
1308
1309         desc.its_mapd_cmd.dev = dev;
1310         desc.its_mapd_cmd.valid = !!valid;
1311
1312         its_send_single_command(dev->its, its_build_mapd_cmd, &desc);
1313 }
1314
1315 static void its_send_mapc(struct its_node *its, struct its_collection *col,
1316                           int valid)
1317 {
1318         struct its_cmd_desc desc;
1319
1320         desc.its_mapc_cmd.col = col;
1321         desc.its_mapc_cmd.valid = !!valid;
1322
1323         its_send_single_command(its, its_build_mapc_cmd, &desc);
1324 }
1325
1326 static void its_send_mapti(struct its_device *dev, u32 irq_id, u32 id)
1327 {
1328         struct its_cmd_desc desc;
1329
1330         desc.its_mapti_cmd.dev = dev;
1331         desc.its_mapti_cmd.phys_id = irq_id;
1332         desc.its_mapti_cmd.event_id = id;
1333
1334         its_send_single_command(dev->its, its_build_mapti_cmd, &desc);
1335 }
1336
1337 static void its_send_movi(struct its_device *dev,
1338                           struct its_collection *col, u32 id)
1339 {
1340         struct its_cmd_desc desc;
1341
1342         desc.its_movi_cmd.dev = dev;
1343         desc.its_movi_cmd.col = col;
1344         desc.its_movi_cmd.event_id = id;
1345
1346         its_send_single_command(dev->its, its_build_movi_cmd, &desc);
1347 }
1348
1349 static void its_send_discard(struct its_device *dev, u32 id)
1350 {
1351         struct its_cmd_desc desc;
1352
1353         desc.its_discard_cmd.dev = dev;
1354         desc.its_discard_cmd.event_id = id;
1355
1356         its_send_single_command(dev->its, its_build_discard_cmd, &desc);
1357 }
1358
1359 static void its_send_invall(struct its_node *its, struct its_collection *col)
1360 {
1361         struct its_cmd_desc desc;
1362
1363         desc.its_invall_cmd.col = col;
1364
1365         its_send_single_command(its, its_build_invall_cmd, &desc);
1366 }
1367
1368 static void its_send_vmapti(struct its_device *dev, u32 id)
1369 {
1370         struct its_vlpi_map *map = dev_event_to_vlpi_map(dev, id);
1371         struct its_cmd_desc desc;
1372
1373         desc.its_vmapti_cmd.vpe = map->vpe;
1374         desc.its_vmapti_cmd.dev = dev;
1375         desc.its_vmapti_cmd.virt_id = map->vintid;
1376         desc.its_vmapti_cmd.event_id = id;
1377         desc.its_vmapti_cmd.db_enabled = map->db_enabled;
1378
1379         its_send_single_vcommand(dev->its, its_build_vmapti_cmd, &desc);
1380 }
1381
1382 static void its_send_vmovi(struct its_device *dev, u32 id)
1383 {
1384         struct its_vlpi_map *map = dev_event_to_vlpi_map(dev, id);
1385         struct its_cmd_desc desc;
1386
1387         desc.its_vmovi_cmd.vpe = map->vpe;
1388         desc.its_vmovi_cmd.dev = dev;
1389         desc.its_vmovi_cmd.event_id = id;
1390         desc.its_vmovi_cmd.db_enabled = map->db_enabled;
1391
1392         its_send_single_vcommand(dev->its, its_build_vmovi_cmd, &desc);
1393 }
1394
1395 static void its_send_vmapp(struct its_node *its,
1396                            struct its_vpe *vpe, bool valid)
1397 {
1398         struct its_cmd_desc desc;
1399
1400         desc.its_vmapp_cmd.vpe = vpe;
1401         desc.its_vmapp_cmd.valid = valid;
1402         desc.its_vmapp_cmd.col = &its->collections[vpe->col_idx];
1403
1404         its_send_single_vcommand(its, its_build_vmapp_cmd, &desc);
1405 }
1406
1407 static void its_send_vmovp(struct its_vpe *vpe)
1408 {
1409         struct its_cmd_desc desc = {};
1410         struct its_node *its;
1411         int col_id = vpe->col_idx;
1412
1413         desc.its_vmovp_cmd.vpe = vpe;
1414
1415         if (!its_list_map) {
1416                 its = list_first_entry(&its_nodes, struct its_node, entry);
1417                 desc.its_vmovp_cmd.col = &its->collections[col_id];
1418                 its_send_single_vcommand(its, its_build_vmovp_cmd, &desc);
1419                 return;
1420         }
1421
1422         /*
1423          * Yet another marvel of the architecture. If using the
1424          * its_list "feature", we need to make sure that all ITSs
1425          * receive all VMOVP commands in the same order. The only way
1426          * to guarantee this is to make vmovp a serialization point.
1427          *
1428          * Wall <-- Head.
1429          */
1430         guard(raw_spinlock)(&vmovp_lock);
1431         desc.its_vmovp_cmd.seq_num = vmovp_seq_num++;
1432         desc.its_vmovp_cmd.its_list = get_its_list(vpe->its_vm);
1433
1434         /* Emit VMOVPs */
1435         list_for_each_entry(its, &its_nodes, entry) {
1436                 if (!is_v4(its))
1437                         continue;
1438
1439                 if (!require_its_list_vmovp(vpe->its_vm, its))
1440                         continue;
1441
1442                 desc.its_vmovp_cmd.col = &its->collections[col_id];
1443                 its_send_single_vcommand(its, its_build_vmovp_cmd, &desc);
1444         }
1445 }
1446
1447 static void its_send_vinvall(struct its_node *its, struct its_vpe *vpe)
1448 {
1449         struct its_cmd_desc desc;
1450
1451         desc.its_vinvall_cmd.vpe = vpe;
1452         its_send_single_vcommand(its, its_build_vinvall_cmd, &desc);
1453 }
1454
1455 static void its_send_vinv(struct its_device *dev, u32 event_id)
1456 {
1457         struct its_cmd_desc desc;
1458
1459         /*
1460          * There is no real VINV command. This is just a normal INV,
1461          * with a VSYNC instead of a SYNC.
1462          */
1463         desc.its_inv_cmd.dev = dev;
1464         desc.its_inv_cmd.event_id = event_id;
1465
1466         its_send_single_vcommand(dev->its, its_build_vinv_cmd, &desc);
1467 }
1468
1469 static void its_send_vint(struct its_device *dev, u32 event_id)
1470 {
1471         struct its_cmd_desc desc;
1472
1473         /*
1474          * There is no real VINT command. This is just a normal INT,
1475          * with a VSYNC instead of a SYNC.
1476          */
1477         desc.its_int_cmd.dev = dev;
1478         desc.its_int_cmd.event_id = event_id;
1479
1480         its_send_single_vcommand(dev->its, its_build_vint_cmd, &desc);
1481 }
1482
1483 static void its_send_vclear(struct its_device *dev, u32 event_id)
1484 {
1485         struct its_cmd_desc desc;
1486
1487         /*
1488          * There is no real VCLEAR command. This is just a normal CLEAR,
1489          * with a VSYNC instead of a SYNC.
1490          */
1491         desc.its_clear_cmd.dev = dev;
1492         desc.its_clear_cmd.event_id = event_id;
1493
1494         its_send_single_vcommand(dev->its, its_build_vclear_cmd, &desc);
1495 }
1496
1497 static void its_send_invdb(struct its_node *its, struct its_vpe *vpe)
1498 {
1499         struct its_cmd_desc desc;
1500
1501         desc.its_invdb_cmd.vpe = vpe;
1502         its_send_single_vcommand(its, its_build_invdb_cmd, &desc);
1503 }
1504
1505 /*
1506  * irqchip functions - assumes MSI, mostly.
1507  */
1508 static void lpi_write_config(struct irq_data *d, u8 clr, u8 set)
1509 {
1510         struct its_vlpi_map *map = get_vlpi_map(d);
1511         irq_hw_number_t hwirq;
1512         void *va;
1513         u8 *cfg;
1514
1515         if (map) {
1516                 va = page_address(map->vm->vprop_page);
1517                 hwirq = map->vintid;
1518
1519                 /* Remember the updated property */
1520                 map->properties &= ~clr;
1521                 map->properties |= set | LPI_PROP_GROUP1;
1522         } else {
1523                 va = gic_rdists->prop_table_va;
1524                 hwirq = d->hwirq;
1525         }
1526
1527         cfg = va + hwirq - 8192;
1528         *cfg &= ~clr;
1529         *cfg |= set | LPI_PROP_GROUP1;
1530
1531         /*
1532          * Make the above write visible to the redistributors.
1533          * And yes, we're flushing exactly: One. Single. Byte.
1534          * Humpf...
1535          */
1536         if (gic_rdists->flags & RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING)
1537                 gic_flush_dcache_to_poc(cfg, sizeof(*cfg));
1538         else
1539                 dsb(ishst);
1540 }
1541
1542 static void wait_for_syncr(void __iomem *rdbase)
1543 {
1544         while (readl_relaxed(rdbase + GICR_SYNCR) & 1)
1545                 cpu_relax();
1546 }
1547
1548 static void __direct_lpi_inv(struct irq_data *d, u64 val)
1549 {
1550         void __iomem *rdbase;
1551         unsigned long flags;
1552         int cpu;
1553
1554         /* Target the redistributor this LPI is currently routed to */
1555         cpu = irq_to_cpuid_lock(d, &flags);
1556         raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock);
1557
1558         rdbase = per_cpu_ptr(gic_rdists->rdist, cpu)->rd_base;
1559         gic_write_lpir(val, rdbase + GICR_INVLPIR);
1560         wait_for_syncr(rdbase);
1561
1562         raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock);
1563         irq_to_cpuid_unlock(d, flags);
1564 }
1565
1566 static void direct_lpi_inv(struct irq_data *d)
1567 {
1568         struct its_vlpi_map *map = get_vlpi_map(d);
1569         u64 val;
1570
1571         if (map) {
1572                 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1573
1574                 WARN_ON(!is_v4_1(its_dev->its));
1575
1576                 val  = GICR_INVLPIR_V;
1577                 val |= FIELD_PREP(GICR_INVLPIR_VPEID, map->vpe->vpe_id);
1578                 val |= FIELD_PREP(GICR_INVLPIR_INTID, map->vintid);
1579         } else {
1580                 val = d->hwirq;
1581         }
1582
1583         __direct_lpi_inv(d, val);
1584 }
1585
1586 static void lpi_update_config(struct irq_data *d, u8 clr, u8 set)
1587 {
1588         struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1589
1590         lpi_write_config(d, clr, set);
1591         if (gic_rdists->has_direct_lpi &&
1592             (is_v4_1(its_dev->its) || !irqd_is_forwarded_to_vcpu(d)))
1593                 direct_lpi_inv(d);
1594         else if (!irqd_is_forwarded_to_vcpu(d))
1595                 its_send_inv(its_dev, its_get_event_id(d));
1596         else
1597                 its_send_vinv(its_dev, its_get_event_id(d));
1598 }
1599
1600 static void its_vlpi_set_doorbell(struct irq_data *d, bool enable)
1601 {
1602         struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1603         u32 event = its_get_event_id(d);
1604         struct its_vlpi_map *map;
1605
1606         /*
1607          * GICv4.1 does away with the per-LPI nonsense, nothing to do
1608          * here.
1609          */
1610         if (is_v4_1(its_dev->its))
1611                 return;
1612
1613         map = dev_event_to_vlpi_map(its_dev, event);
1614
1615         if (map->db_enabled == enable)
1616                 return;
1617
1618         map->db_enabled = enable;
1619
1620         /*
1621          * More fun with the architecture:
1622          *
1623          * Ideally, we'd issue a VMAPTI to set the doorbell to its LPI
1624          * value or to 1023, depending on the enable bit. But that
1625          * would be issuing a mapping for an /existing/ DevID+EventID
1626          * pair, which is UNPREDICTABLE. Instead, let's issue a VMOVI
1627          * to the /same/ vPE, using this opportunity to adjust the
1628          * doorbell. Mouahahahaha. We loves it, Precious.
1629          */
1630         its_send_vmovi(its_dev, event);
1631 }
1632
1633 static void its_mask_irq(struct irq_data *d)
1634 {
1635         if (irqd_is_forwarded_to_vcpu(d))
1636                 its_vlpi_set_doorbell(d, false);
1637
1638         lpi_update_config(d, LPI_PROP_ENABLED, 0);
1639 }
1640
1641 static void its_unmask_irq(struct irq_data *d)
1642 {
1643         if (irqd_is_forwarded_to_vcpu(d))
1644                 its_vlpi_set_doorbell(d, true);
1645
1646         lpi_update_config(d, 0, LPI_PROP_ENABLED);
1647 }
1648
1649 static __maybe_unused u32 its_read_lpi_count(struct irq_data *d, int cpu)
1650 {
1651         if (irqd_affinity_is_managed(d))
1652                 return atomic_read(&per_cpu_ptr(&cpu_lpi_count, cpu)->managed);
1653
1654         return atomic_read(&per_cpu_ptr(&cpu_lpi_count, cpu)->unmanaged);
1655 }
1656
1657 static void its_inc_lpi_count(struct irq_data *d, int cpu)
1658 {
1659         if (irqd_affinity_is_managed(d))
1660                 atomic_inc(&per_cpu_ptr(&cpu_lpi_count, cpu)->managed);
1661         else
1662                 atomic_inc(&per_cpu_ptr(&cpu_lpi_count, cpu)->unmanaged);
1663 }
1664
1665 static void its_dec_lpi_count(struct irq_data *d, int cpu)
1666 {
1667         if (irqd_affinity_is_managed(d))
1668                 atomic_dec(&per_cpu_ptr(&cpu_lpi_count, cpu)->managed);
1669         else
1670                 atomic_dec(&per_cpu_ptr(&cpu_lpi_count, cpu)->unmanaged);
1671 }
1672
1673 static unsigned int cpumask_pick_least_loaded(struct irq_data *d,
1674                                               const struct cpumask *cpu_mask)
1675 {
1676         unsigned int cpu = nr_cpu_ids, tmp;
1677         int count = S32_MAX;
1678
1679         for_each_cpu(tmp, cpu_mask) {
1680                 int this_count = its_read_lpi_count(d, tmp);
1681                 if (this_count < count) {
1682                         cpu = tmp;
1683                         count = this_count;
1684                 }
1685         }
1686
1687         return cpu;
1688 }
1689
1690 /*
1691  * As suggested by Thomas Gleixner in:
1692  * https://lore.kernel.org/r/87h80q2aoc.fsf@nanos.tec.linutronix.de
1693  */
1694 static int its_select_cpu(struct irq_data *d,
1695                           const struct cpumask *aff_mask)
1696 {
1697         struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1698         static DEFINE_RAW_SPINLOCK(tmpmask_lock);
1699         static struct cpumask __tmpmask;
1700         struct cpumask *tmpmask;
1701         unsigned long flags;
1702         int cpu, node;
1703         node = its_dev->its->numa_node;
1704         tmpmask = &__tmpmask;
1705
1706         raw_spin_lock_irqsave(&tmpmask_lock, flags);
1707
1708         if (!irqd_affinity_is_managed(d)) {
1709                 /* First try the NUMA node */
1710                 if (node != NUMA_NO_NODE) {
1711                         /*
1712                          * Try the intersection of the affinity mask and the
1713                          * node mask (and the online mask, just to be safe).
1714                          */
1715                         cpumask_and(tmpmask, cpumask_of_node(node), aff_mask);
1716                         cpumask_and(tmpmask, tmpmask, cpu_online_mask);
1717
1718                         /*
1719                          * Ideally, we would check if the mask is empty, and
1720                          * try again on the full node here.
1721                          *
1722                          * But it turns out that the way ACPI describes the
1723                          * affinity for ITSs only deals about memory, and
1724                          * not target CPUs, so it cannot describe a single
1725                          * ITS placed next to two NUMA nodes.
1726                          *
1727                          * Instead, just fallback on the online mask. This
1728                          * diverges from Thomas' suggestion above.
1729                          */
1730                         cpu = cpumask_pick_least_loaded(d, tmpmask);
1731                         if (cpu < nr_cpu_ids)
1732                                 goto out;
1733
1734                         /* If we can't cross sockets, give up */
1735                         if ((its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144))
1736                                 goto out;
1737
1738                         /* If the above failed, expand the search */
1739                 }
1740
1741                 /* Try the intersection of the affinity and online masks */
1742                 cpumask_and(tmpmask, aff_mask, cpu_online_mask);
1743
1744                 /* If that doesn't fly, the online mask is the last resort */
1745                 if (cpumask_empty(tmpmask))
1746                         cpumask_copy(tmpmask, cpu_online_mask);
1747
1748                 cpu = cpumask_pick_least_loaded(d, tmpmask);
1749         } else {
1750                 cpumask_copy(tmpmask, aff_mask);
1751
1752                 /* If we cannot cross sockets, limit the search to that node */
1753                 if ((its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) &&
1754                     node != NUMA_NO_NODE)
1755                         cpumask_and(tmpmask, tmpmask, cpumask_of_node(node));
1756
1757                 cpu = cpumask_pick_least_loaded(d, tmpmask);
1758         }
1759 out:
1760         raw_spin_unlock_irqrestore(&tmpmask_lock, flags);
1761
1762         pr_debug("IRQ%d -> %*pbl CPU%d\n", d->irq, cpumask_pr_args(aff_mask), cpu);
1763         return cpu;
1764 }
1765
1766 static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
1767                             bool force)
1768 {
1769         struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1770         struct its_collection *target_col;
1771         u32 id = its_get_event_id(d);
1772         int cpu, prev_cpu;
1773
1774         /* A forwarded interrupt should use irq_set_vcpu_affinity */
1775         if (irqd_is_forwarded_to_vcpu(d))
1776                 return -EINVAL;
1777
1778         prev_cpu = its_dev->event_map.col_map[id];
1779         its_dec_lpi_count(d, prev_cpu);
1780
1781         if (!force)
1782                 cpu = its_select_cpu(d, mask_val);
1783         else
1784                 cpu = cpumask_pick_least_loaded(d, mask_val);
1785
1786         if (cpu < 0 || cpu >= nr_cpu_ids)
1787                 goto err;
1788
1789         /* don't set the affinity when the target cpu is same as current one */
1790         if (cpu != prev_cpu) {
1791                 target_col = &its_dev->its->collections[cpu];
1792                 its_send_movi(its_dev, target_col, id);
1793                 its_dev->event_map.col_map[id] = cpu;
1794                 irq_data_update_effective_affinity(d, cpumask_of(cpu));
1795         }
1796
1797         its_inc_lpi_count(d, cpu);
1798
1799         return IRQ_SET_MASK_OK_DONE;
1800
1801 err:
1802         its_inc_lpi_count(d, prev_cpu);
1803         return -EINVAL;
1804 }
1805
1806 static u64 its_irq_get_msi_base(struct its_device *its_dev)
1807 {
1808         struct its_node *its = its_dev->its;
1809
1810         return its->phys_base + GITS_TRANSLATER;
1811 }
1812
1813 static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
1814 {
1815         struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1816
1817         msg->data = its_get_event_id(d);
1818         msi_msg_set_addr(irq_data_get_msi_desc(d), msg,
1819                          its_dev->its->get_msi_base(its_dev));
1820 }
1821
1822 static int its_irq_set_irqchip_state(struct irq_data *d,
1823                                      enum irqchip_irq_state which,
1824                                      bool state)
1825 {
1826         struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1827         u32 event = its_get_event_id(d);
1828
1829         if (which != IRQCHIP_STATE_PENDING)
1830                 return -EINVAL;
1831
1832         if (irqd_is_forwarded_to_vcpu(d)) {
1833                 if (state)
1834                         its_send_vint(its_dev, event);
1835                 else
1836                         its_send_vclear(its_dev, event);
1837         } else {
1838                 if (state)
1839                         its_send_int(its_dev, event);
1840                 else
1841                         its_send_clear(its_dev, event);
1842         }
1843
1844         return 0;
1845 }
1846
1847 static int its_irq_retrigger(struct irq_data *d)
1848 {
1849         return !its_irq_set_irqchip_state(d, IRQCHIP_STATE_PENDING, true);
1850 }
1851
1852 /*
1853  * Two favourable cases:
1854  *
1855  * (a) Either we have a GICv4.1, and all vPEs have to be mapped at all times
1856  *     for vSGI delivery
1857  *
1858  * (b) Or the ITSs do not use a list map, meaning that VMOVP is cheap enough
1859  *     and we're better off mapping all VPEs always
1860  *
1861  * If neither (a) nor (b) is true, then we map vPEs on demand.
1862  *
1863  */
1864 static bool gic_requires_eager_mapping(void)
1865 {
1866         if (!its_list_map || gic_rdists->has_rvpeid)
1867                 return true;
1868
1869         return false;
1870 }
1871
1872 static void its_map_vm(struct its_node *its, struct its_vm *vm)
1873 {
1874         if (gic_requires_eager_mapping())
1875                 return;
1876
1877         guard(raw_spinlock_irqsave)(&vm->vmapp_lock);
1878
1879         /*
1880          * If the VM wasn't mapped yet, iterate over the vpes and get
1881          * them mapped now.
1882          */
1883         vm->vlpi_count[its->list_nr]++;
1884
1885         if (vm->vlpi_count[its->list_nr] == 1) {
1886                 int i;
1887
1888                 for (i = 0; i < vm->nr_vpes; i++) {
1889                         struct its_vpe *vpe = vm->vpes[i];
1890
1891                         scoped_guard(raw_spinlock, &vpe->vpe_lock)
1892                                 its_send_vmapp(its, vpe, true);
1893
1894                         its_send_vinvall(its, vpe);
1895                 }
1896         }
1897 }
1898
1899 static void its_unmap_vm(struct its_node *its, struct its_vm *vm)
1900 {
1901         /* Not using the ITS list? Everything is always mapped. */
1902         if (gic_requires_eager_mapping())
1903                 return;
1904
1905         guard(raw_spinlock_irqsave)(&vm->vmapp_lock);
1906
1907         if (!--vm->vlpi_count[its->list_nr]) {
1908                 int i;
1909
1910                 for (i = 0; i < vm->nr_vpes; i++) {
1911                         guard(raw_spinlock)(&vm->vpes[i]->vpe_lock);
1912                         its_send_vmapp(its, vm->vpes[i], false);
1913                 }
1914         }
1915 }
1916
1917 static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info)
1918 {
1919         struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1920         u32 event = its_get_event_id(d);
1921
1922         if (!info->map)
1923                 return -EINVAL;
1924
1925         if (!its_dev->event_map.vm) {
1926                 struct its_vlpi_map *maps;
1927
1928                 maps = kcalloc(its_dev->event_map.nr_lpis, sizeof(*maps),
1929                                GFP_ATOMIC);
1930                 if (!maps)
1931                         return -ENOMEM;
1932
1933                 its_dev->event_map.vm = info->map->vm;
1934                 its_dev->event_map.vlpi_maps = maps;
1935         } else if (its_dev->event_map.vm != info->map->vm) {
1936                 return -EINVAL;
1937         }
1938
1939         /* Get our private copy of the mapping information */
1940         its_dev->event_map.vlpi_maps[event] = *info->map;
1941
1942         if (irqd_is_forwarded_to_vcpu(d)) {
1943                 /* Already mapped, move it around */
1944                 its_send_vmovi(its_dev, event);
1945         } else {
1946                 /* Ensure all the VPEs are mapped on this ITS */
1947                 its_map_vm(its_dev->its, info->map->vm);
1948
1949                 /*
1950                  * Flag the interrupt as forwarded so that we can
1951                  * start poking the virtual property table.
1952                  */
1953                 irqd_set_forwarded_to_vcpu(d);
1954
1955                 /* Write out the property to the prop table */
1956                 lpi_write_config(d, 0xff, info->map->properties);
1957
1958                 /* Drop the physical mapping */
1959                 its_send_discard(its_dev, event);
1960
1961                 /* and install the virtual one */
1962                 its_send_vmapti(its_dev, event);
1963
1964                 /* Increment the number of VLPIs */
1965                 its_dev->event_map.nr_vlpis++;
1966         }
1967
1968         return 0;
1969 }
1970
1971 static int its_vlpi_get(struct irq_data *d, struct its_cmd_info *info)
1972 {
1973         struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1974         struct its_vlpi_map *map;
1975
1976         map = get_vlpi_map(d);
1977
1978         if (!its_dev->event_map.vm || !map)
1979                 return -EINVAL;
1980
1981         /* Copy our mapping information to the incoming request */
1982         *info->map = *map;
1983
1984         return 0;
1985 }
1986
1987 static int its_vlpi_unmap(struct irq_data *d)
1988 {
1989         struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1990         u32 event = its_get_event_id(d);
1991
1992         if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d))
1993                 return -EINVAL;
1994
1995         /* Drop the virtual mapping */
1996         its_send_discard(its_dev, event);
1997
1998         /* and restore the physical one */
1999         irqd_clr_forwarded_to_vcpu(d);
2000         its_send_mapti(its_dev, d->hwirq, event);
2001         lpi_update_config(d, 0xff, (lpi_prop_prio |
2002                                     LPI_PROP_ENABLED |
2003                                     LPI_PROP_GROUP1));
2004
2005         /* Potentially unmap the VM from this ITS */
2006         its_unmap_vm(its_dev->its, its_dev->event_map.vm);
2007
2008         /*
2009          * Drop the refcount and make the device available again if
2010          * this was the last VLPI.
2011          */
2012         if (!--its_dev->event_map.nr_vlpis) {
2013                 its_dev->event_map.vm = NULL;
2014                 kfree(its_dev->event_map.vlpi_maps);
2015         }
2016
2017         return 0;
2018 }
2019
2020 static int its_vlpi_prop_update(struct irq_data *d, struct its_cmd_info *info)
2021 {
2022         struct its_device *its_dev = irq_data_get_irq_chip_data(d);
2023
2024         if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d))
2025                 return -EINVAL;
2026
2027         if (info->cmd_type == PROP_UPDATE_AND_INV_VLPI)
2028                 lpi_update_config(d, 0xff, info->config);
2029         else
2030                 lpi_write_config(d, 0xff, info->config);
2031         its_vlpi_set_doorbell(d, !!(info->config & LPI_PROP_ENABLED));
2032
2033         return 0;
2034 }
2035
2036 static int its_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
2037 {
2038         struct its_device *its_dev = irq_data_get_irq_chip_data(d);
2039         struct its_cmd_info *info = vcpu_info;
2040
2041         /* Need a v4 ITS */
2042         if (!is_v4(its_dev->its))
2043                 return -EINVAL;
2044
2045         guard(raw_spinlock)(&its_dev->event_map.vlpi_lock);
2046
2047         /* Unmap request? */
2048         if (!info)
2049                 return its_vlpi_unmap(d);
2050
2051         switch (info->cmd_type) {
2052         case MAP_VLPI:
2053                 return its_vlpi_map(d, info);
2054
2055         case GET_VLPI:
2056                 return its_vlpi_get(d, info);
2057
2058         case PROP_UPDATE_VLPI:
2059         case PROP_UPDATE_AND_INV_VLPI:
2060                 return its_vlpi_prop_update(d, info);
2061
2062         default:
2063                 return -EINVAL;
2064         }
2065 }
2066
2067 static struct irq_chip its_irq_chip = {
2068         .name                   = "ITS",
2069         .irq_mask               = its_mask_irq,
2070         .irq_unmask             = its_unmask_irq,
2071         .irq_eoi                = irq_chip_eoi_parent,
2072         .irq_set_affinity       = its_set_affinity,
2073         .irq_compose_msi_msg    = its_irq_compose_msi_msg,
2074         .irq_set_irqchip_state  = its_irq_set_irqchip_state,
2075         .irq_retrigger          = its_irq_retrigger,
2076         .irq_set_vcpu_affinity  = its_irq_set_vcpu_affinity,
2077 };
2078
2079
2080 /*
2081  * How we allocate LPIs:
2082  *
2083  * lpi_range_list contains ranges of LPIs that are to available to
2084  * allocate from. To allocate LPIs, just pick the first range that
2085  * fits the required allocation, and reduce it by the required
2086  * amount. Once empty, remove the range from the list.
2087  *
2088  * To free a range of LPIs, add a free range to the list, sort it and
2089  * merge the result if the new range happens to be adjacent to an
2090  * already free block.
2091  *
2092  * The consequence of the above is that allocation is cost is low, but
2093  * freeing is expensive. We assumes that freeing rarely occurs.
2094  */
2095 #define ITS_MAX_LPI_NRBITS      16 /* 64K LPIs */
2096
2097 static DEFINE_MUTEX(lpi_range_lock);
2098 static LIST_HEAD(lpi_range_list);
2099
2100 struct lpi_range {
2101         struct list_head        entry;
2102         u32                     base_id;
2103         u32                     span;
2104 };
2105
2106 static struct lpi_range *mk_lpi_range(u32 base, u32 span)
2107 {
2108         struct lpi_range *range;
2109
2110         range = kmalloc(sizeof(*range), GFP_KERNEL);
2111         if (range) {
2112                 range->base_id = base;
2113                 range->span = span;
2114         }
2115
2116         return range;
2117 }
2118
2119 static int alloc_lpi_range(u32 nr_lpis, u32 *base)
2120 {
2121         struct lpi_range *range, *tmp;
2122         int err = -ENOSPC;
2123
2124         mutex_lock(&lpi_range_lock);
2125
2126         list_for_each_entry_safe(range, tmp, &lpi_range_list, entry) {
2127                 if (range->span >= nr_lpis) {
2128                         *base = range->base_id;
2129                         range->base_id += nr_lpis;
2130                         range->span -= nr_lpis;
2131
2132                         if (range->span == 0) {
2133                                 list_del(&range->entry);
2134                                 kfree(range);
2135                         }
2136
2137                         err = 0;
2138                         break;
2139                 }
2140         }
2141
2142         mutex_unlock(&lpi_range_lock);
2143
2144         pr_debug("ITS: alloc %u:%u\n", *base, nr_lpis);
2145         return err;
2146 }
2147
2148 static void merge_lpi_ranges(struct lpi_range *a, struct lpi_range *b)
2149 {
2150         if (&a->entry == &lpi_range_list || &b->entry == &lpi_range_list)
2151                 return;
2152         if (a->base_id + a->span != b->base_id)
2153                 return;
2154         b->base_id = a->base_id;
2155         b->span += a->span;
2156         list_del(&a->entry);
2157         kfree(a);
2158 }
2159
2160 static int free_lpi_range(u32 base, u32 nr_lpis)
2161 {
2162         struct lpi_range *new, *old;
2163
2164         new = mk_lpi_range(base, nr_lpis);
2165         if (!new)
2166                 return -ENOMEM;
2167
2168         mutex_lock(&lpi_range_lock);
2169
2170         list_for_each_entry_reverse(old, &lpi_range_list, entry) {
2171                 if (old->base_id < base)
2172                         break;
2173         }
2174         /*
2175          * old is the last element with ->base_id smaller than base,
2176          * so new goes right after it. If there are no elements with
2177          * ->base_id smaller than base, &old->entry ends up pointing
2178          * at the head of the list, and inserting new it the start of
2179          * the list is the right thing to do in that case as well.
2180          */
2181         list_add(&new->entry, &old->entry);
2182         /*
2183          * Now check if we can merge with the preceding and/or
2184          * following ranges.
2185          */
2186         merge_lpi_ranges(old, new);
2187         merge_lpi_ranges(new, list_next_entry(new, entry));
2188
2189         mutex_unlock(&lpi_range_lock);
2190         return 0;
2191 }
2192
2193 static int __init its_lpi_init(u32 id_bits)
2194 {
2195         u32 lpis = (1UL << id_bits) - 8192;
2196         u32 numlpis;
2197         int err;
2198
2199         numlpis = 1UL << GICD_TYPER_NUM_LPIS(gic_rdists->gicd_typer);
2200
2201         if (numlpis > 2 && !WARN_ON(numlpis > lpis)) {
2202                 lpis = numlpis;
2203                 pr_info("ITS: Using hypervisor restricted LPI range [%u]\n",
2204                         lpis);
2205         }
2206
2207         /*
2208          * Initializing the allocator is just the same as freeing the
2209          * full range of LPIs.
2210          */
2211         err = free_lpi_range(8192, lpis);
2212         pr_debug("ITS: Allocator initialized for %u LPIs\n", lpis);
2213         return err;
2214 }
2215
2216 static unsigned long *its_lpi_alloc(int nr_irqs, u32 *base, int *nr_ids)
2217 {
2218         unsigned long *bitmap = NULL;
2219         int err = 0;
2220
2221         do {
2222                 err = alloc_lpi_range(nr_irqs, base);
2223                 if (!err)
2224                         break;
2225
2226                 nr_irqs /= 2;
2227         } while (nr_irqs > 0);
2228
2229         if (!nr_irqs)
2230                 err = -ENOSPC;
2231
2232         if (err)
2233                 goto out;
2234
2235         bitmap = bitmap_zalloc(nr_irqs, GFP_ATOMIC);
2236         if (!bitmap)
2237                 goto out;
2238
2239         *nr_ids = nr_irqs;
2240
2241 out:
2242         if (!bitmap)
2243                 *base = *nr_ids = 0;
2244
2245         return bitmap;
2246 }
2247
2248 static void its_lpi_free(unsigned long *bitmap, u32 base, u32 nr_ids)
2249 {
2250         WARN_ON(free_lpi_range(base, nr_ids));
2251         bitmap_free(bitmap);
2252 }
2253
2254 static void gic_reset_prop_table(void *va)
2255 {
2256         /* Regular IRQ priority, Group-1, disabled */
2257         memset(va, lpi_prop_prio | LPI_PROP_GROUP1, LPI_PROPBASE_SZ);
2258
2259         /* Make sure the GIC will observe the written configuration */
2260         gic_flush_dcache_to_poc(va, LPI_PROPBASE_SZ);
2261 }
2262
2263 static struct page *its_allocate_prop_table(gfp_t gfp_flags)
2264 {
2265         struct page *prop_page;
2266
2267         prop_page = its_alloc_pages(gfp_flags,
2268                                     get_order(LPI_PROPBASE_SZ));
2269         if (!prop_page)
2270                 return NULL;
2271
2272         gic_reset_prop_table(page_address(prop_page));
2273
2274         return prop_page;
2275 }
2276
2277 static void its_free_prop_table(struct page *prop_page)
2278 {
2279         its_free_pages(page_address(prop_page), get_order(LPI_PROPBASE_SZ));
2280 }
2281
2282 static bool gic_check_reserved_range(phys_addr_t addr, unsigned long size)
2283 {
2284         phys_addr_t start, end, addr_end;
2285         u64 i;
2286
2287         /*
2288          * We don't bother checking for a kdump kernel as by
2289          * construction, the LPI tables are out of this kernel's
2290          * memory map.
2291          */
2292         if (is_kdump_kernel())
2293                 return true;
2294
2295         addr_end = addr + size - 1;
2296
2297         for_each_reserved_mem_range(i, &start, &end) {
2298                 if (addr >= start && addr_end <= end)
2299                         return true;
2300         }
2301
2302         /* Not found, not a good sign... */
2303         pr_warn("GICv3: Expected reserved range [%pa:%pa], not found\n",
2304                 &addr, &addr_end);
2305         add_taint(TAINT_CRAP, LOCKDEP_STILL_OK);
2306         return false;
2307 }
2308
2309 static int gic_reserve_range(phys_addr_t addr, unsigned long size)
2310 {
2311         if (efi_enabled(EFI_CONFIG_TABLES))
2312                 return efi_mem_reserve_persistent(addr, size);
2313
2314         return 0;
2315 }
2316
2317 static int __init its_setup_lpi_prop_table(void)
2318 {
2319         if (gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED) {
2320                 u64 val;
2321
2322                 val = gicr_read_propbaser(gic_data_rdist_rd_base() + GICR_PROPBASER);
2323                 lpi_id_bits = (val & GICR_PROPBASER_IDBITS_MASK) + 1;
2324
2325                 gic_rdists->prop_table_pa = val & GENMASK_ULL(51, 12);
2326                 gic_rdists->prop_table_va = memremap(gic_rdists->prop_table_pa,
2327                                                      LPI_PROPBASE_SZ,
2328                                                      MEMREMAP_WB);
2329                 gic_reset_prop_table(gic_rdists->prop_table_va);
2330         } else {
2331                 struct page *page;
2332
2333                 lpi_id_bits = min_t(u32,
2334                                     GICD_TYPER_ID_BITS(gic_rdists->gicd_typer),
2335                                     ITS_MAX_LPI_NRBITS);
2336                 page = its_allocate_prop_table(GFP_NOWAIT);
2337                 if (!page) {
2338                         pr_err("Failed to allocate PROPBASE\n");
2339                         return -ENOMEM;
2340                 }
2341
2342                 gic_rdists->prop_table_pa = page_to_phys(page);
2343                 gic_rdists->prop_table_va = page_address(page);
2344                 WARN_ON(gic_reserve_range(gic_rdists->prop_table_pa,
2345                                           LPI_PROPBASE_SZ));
2346         }
2347
2348         pr_info("GICv3: using LPI property table @%pa\n",
2349                 &gic_rdists->prop_table_pa);
2350
2351         return its_lpi_init(lpi_id_bits);
2352 }
2353
2354 static const char *its_base_type_string[] = {
2355         [GITS_BASER_TYPE_DEVICE]        = "Devices",
2356         [GITS_BASER_TYPE_VCPU]          = "Virtual CPUs",
2357         [GITS_BASER_TYPE_RESERVED3]     = "Reserved (3)",
2358         [GITS_BASER_TYPE_COLLECTION]    = "Interrupt Collections",
2359         [GITS_BASER_TYPE_RESERVED5]     = "Reserved (5)",
2360         [GITS_BASER_TYPE_RESERVED6]     = "Reserved (6)",
2361         [GITS_BASER_TYPE_RESERVED7]     = "Reserved (7)",
2362 };
2363
2364 static u64 its_read_baser(struct its_node *its, struct its_baser *baser)
2365 {
2366         u32 idx = baser - its->tables;
2367
2368         return gits_read_baser(its->base + GITS_BASER + (idx << 3));
2369 }
2370
2371 static void its_write_baser(struct its_node *its, struct its_baser *baser,
2372                             u64 val)
2373 {
2374         u32 idx = baser - its->tables;
2375
2376         gits_write_baser(val, its->base + GITS_BASER + (idx << 3));
2377         baser->val = its_read_baser(its, baser);
2378 }
2379
2380 static int its_setup_baser(struct its_node *its, struct its_baser *baser,
2381                            u64 cache, u64 shr, u32 order, bool indirect)
2382 {
2383         u64 val = its_read_baser(its, baser);
2384         u64 esz = GITS_BASER_ENTRY_SIZE(val);
2385         u64 type = GITS_BASER_TYPE(val);
2386         u64 baser_phys, tmp;
2387         u32 alloc_pages, psz;
2388         struct page *page;
2389         void *base;
2390
2391         psz = baser->psz;
2392         alloc_pages = (PAGE_ORDER_TO_SIZE(order) / psz);
2393         if (alloc_pages > GITS_BASER_PAGES_MAX) {
2394                 pr_warn("ITS@%pa: %s too large, reduce ITS pages %u->%u\n",
2395                         &its->phys_base, its_base_type_string[type],
2396                         alloc_pages, GITS_BASER_PAGES_MAX);
2397                 alloc_pages = GITS_BASER_PAGES_MAX;
2398                 order = get_order(GITS_BASER_PAGES_MAX * psz);
2399         }
2400
2401         page = its_alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, order);
2402         if (!page)
2403                 return -ENOMEM;
2404
2405         base = (void *)page_address(page);
2406         baser_phys = virt_to_phys(base);
2407
2408         /* Check if the physical address of the memory is above 48bits */
2409         if (IS_ENABLED(CONFIG_ARM64_64K_PAGES) && (baser_phys >> 48)) {
2410
2411                 /* 52bit PA is supported only when PageSize=64K */
2412                 if (psz != SZ_64K) {
2413                         pr_err("ITS: no 52bit PA support when psz=%d\n", psz);
2414                         its_free_pages(base, order);
2415                         return -ENXIO;
2416                 }
2417
2418                 /* Convert 52bit PA to 48bit field */
2419                 baser_phys = GITS_BASER_PHYS_52_to_48(baser_phys);
2420         }
2421
2422 retry_baser:
2423         val = (baser_phys                                        |
2424                 (type << GITS_BASER_TYPE_SHIFT)                  |
2425                 ((esz - 1) << GITS_BASER_ENTRY_SIZE_SHIFT)       |
2426                 ((alloc_pages - 1) << GITS_BASER_PAGES_SHIFT)    |
2427                 cache                                            |
2428                 shr                                              |
2429                 GITS_BASER_VALID);
2430
2431         val |=  indirect ? GITS_BASER_INDIRECT : 0x0;
2432
2433         switch (psz) {
2434         case SZ_4K:
2435                 val |= GITS_BASER_PAGE_SIZE_4K;
2436                 break;
2437         case SZ_16K:
2438                 val |= GITS_BASER_PAGE_SIZE_16K;
2439                 break;
2440         case SZ_64K:
2441                 val |= GITS_BASER_PAGE_SIZE_64K;
2442                 break;
2443         }
2444
2445         if (!shr)
2446                 gic_flush_dcache_to_poc(base, PAGE_ORDER_TO_SIZE(order));
2447
2448         its_write_baser(its, baser, val);
2449         tmp = baser->val;
2450
2451         if ((val ^ tmp) & GITS_BASER_SHAREABILITY_MASK) {
2452                 /*
2453                  * Shareability didn't stick. Just use
2454                  * whatever the read reported, which is likely
2455                  * to be the only thing this redistributor
2456                  * supports. If that's zero, make it
2457                  * non-cacheable as well.
2458                  */
2459                 shr = tmp & GITS_BASER_SHAREABILITY_MASK;
2460                 if (!shr)
2461                         cache = GITS_BASER_nC;
2462
2463                 goto retry_baser;
2464         }
2465
2466         if (val != tmp) {
2467                 pr_err("ITS@%pa: %s doesn't stick: %llx %llx\n",
2468                        &its->phys_base, its_base_type_string[type],
2469                        val, tmp);
2470                 its_free_pages(base, order);
2471                 return -ENXIO;
2472         }
2473
2474         baser->order = order;
2475         baser->base = base;
2476         baser->psz = psz;
2477         tmp = indirect ? GITS_LVL1_ENTRY_SIZE : esz;
2478
2479         pr_info("ITS@%pa: allocated %d %s @%lx (%s, esz %d, psz %dK, shr %d)\n",
2480                 &its->phys_base, (int)(PAGE_ORDER_TO_SIZE(order) / (int)tmp),
2481                 its_base_type_string[type],
2482                 (unsigned long)virt_to_phys(base),
2483                 indirect ? "indirect" : "flat", (int)esz,
2484                 psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT);
2485
2486         return 0;
2487 }
2488
2489 static bool its_parse_indirect_baser(struct its_node *its,
2490                                      struct its_baser *baser,
2491                                      u32 *order, u32 ids)
2492 {
2493         u64 tmp = its_read_baser(its, baser);
2494         u64 type = GITS_BASER_TYPE(tmp);
2495         u64 esz = GITS_BASER_ENTRY_SIZE(tmp);
2496         u64 val = GITS_BASER_InnerShareable | GITS_BASER_RaWaWb;
2497         u32 new_order = *order;
2498         u32 psz = baser->psz;
2499         bool indirect = false;
2500
2501         /* No need to enable Indirection if memory requirement < (psz*2)bytes */
2502         if ((esz << ids) > (psz * 2)) {
2503                 /*
2504                  * Find out whether hw supports a single or two-level table by
2505                  * table by reading bit at offset '62' after writing '1' to it.
2506                  */
2507                 its_write_baser(its, baser, val | GITS_BASER_INDIRECT);
2508                 indirect = !!(baser->val & GITS_BASER_INDIRECT);
2509
2510                 if (indirect) {
2511                         /*
2512                          * The size of the lvl2 table is equal to ITS page size
2513                          * which is 'psz'. For computing lvl1 table size,
2514                          * subtract ID bits that sparse lvl2 table from 'ids'
2515                          * which is reported by ITS hardware times lvl1 table
2516                          * entry size.
2517                          */
2518                         ids -= ilog2(psz / (int)esz);
2519                         esz = GITS_LVL1_ENTRY_SIZE;
2520                 }
2521         }
2522
2523         /*
2524          * Allocate as many entries as required to fit the
2525          * range of device IDs that the ITS can grok... The ID
2526          * space being incredibly sparse, this results in a
2527          * massive waste of memory if two-level device table
2528          * feature is not supported by hardware.
2529          */
2530         new_order = max_t(u32, get_order(esz << ids), new_order);
2531         if (new_order > MAX_PAGE_ORDER) {
2532                 new_order = MAX_PAGE_ORDER;
2533                 ids = ilog2(PAGE_ORDER_TO_SIZE(new_order) / (int)esz);
2534                 pr_warn("ITS@%pa: %s Table too large, reduce ids %llu->%u\n",
2535                         &its->phys_base, its_base_type_string[type],
2536                         device_ids(its), ids);
2537         }
2538
2539         *order = new_order;
2540
2541         return indirect;
2542 }
2543
2544 static u32 compute_common_aff(u64 val)
2545 {
2546         u32 aff, clpiaff;
2547
2548         aff = FIELD_GET(GICR_TYPER_AFFINITY, val);
2549         clpiaff = FIELD_GET(GICR_TYPER_COMMON_LPI_AFF, val);
2550
2551         return aff & ~(GENMASK(31, 0) >> (clpiaff * 8));
2552 }
2553
2554 static u32 compute_its_aff(struct its_node *its)
2555 {
2556         u64 val;
2557         u32 svpet;
2558
2559         /*
2560          * Reencode the ITS SVPET and MPIDR as a GICR_TYPER, and compute
2561          * the resulting affinity. We then use that to see if this match
2562          * our own affinity.
2563          */
2564         svpet = FIELD_GET(GITS_TYPER_SVPET, its->typer);
2565         val  = FIELD_PREP(GICR_TYPER_COMMON_LPI_AFF, svpet);
2566         val |= FIELD_PREP(GICR_TYPER_AFFINITY, its->mpidr);
2567         return compute_common_aff(val);
2568 }
2569
2570 static struct its_node *find_sibling_its(struct its_node *cur_its)
2571 {
2572         struct its_node *its;
2573         u32 aff;
2574
2575         if (!FIELD_GET(GITS_TYPER_SVPET, cur_its->typer))
2576                 return NULL;
2577
2578         aff = compute_its_aff(cur_its);
2579
2580         list_for_each_entry(its, &its_nodes, entry) {
2581                 u64 baser;
2582
2583                 if (!is_v4_1(its) || its == cur_its)
2584                         continue;
2585
2586                 if (!FIELD_GET(GITS_TYPER_SVPET, its->typer))
2587                         continue;
2588
2589                 if (aff != compute_its_aff(its))
2590                         continue;
2591
2592                 /* GICv4.1 guarantees that the vPE table is GITS_BASER2 */
2593                 baser = its->tables[2].val;
2594                 if (!(baser & GITS_BASER_VALID))
2595                         continue;
2596
2597                 return its;
2598         }
2599
2600         return NULL;
2601 }
2602
2603 static void its_free_tables(struct its_node *its)
2604 {
2605         int i;
2606
2607         for (i = 0; i < GITS_BASER_NR_REGS; i++) {
2608                 if (its->tables[i].base) {
2609                         its_free_pages(its->tables[i].base, its->tables[i].order);
2610                         its->tables[i].base = NULL;
2611                 }
2612         }
2613 }
2614
2615 static int its_probe_baser_psz(struct its_node *its, struct its_baser *baser)
2616 {
2617         u64 psz = SZ_64K;
2618
2619         while (psz) {
2620                 u64 val, gpsz;
2621
2622                 val = its_read_baser(its, baser);
2623                 val &= ~GITS_BASER_PAGE_SIZE_MASK;
2624
2625                 switch (psz) {
2626                 case SZ_64K:
2627                         gpsz = GITS_BASER_PAGE_SIZE_64K;
2628                         break;
2629                 case SZ_16K:
2630                         gpsz = GITS_BASER_PAGE_SIZE_16K;
2631                         break;
2632                 case SZ_4K:
2633                 default:
2634                         gpsz = GITS_BASER_PAGE_SIZE_4K;
2635                         break;
2636                 }
2637
2638                 gpsz >>= GITS_BASER_PAGE_SIZE_SHIFT;
2639
2640                 val |= FIELD_PREP(GITS_BASER_PAGE_SIZE_MASK, gpsz);
2641                 its_write_baser(its, baser, val);
2642
2643                 if (FIELD_GET(GITS_BASER_PAGE_SIZE_MASK, baser->val) == gpsz)
2644                         break;
2645
2646                 switch (psz) {
2647                 case SZ_64K:
2648                         psz = SZ_16K;
2649                         break;
2650                 case SZ_16K:
2651                         psz = SZ_4K;
2652                         break;
2653                 case SZ_4K:
2654                 default:
2655                         return -1;
2656                 }
2657         }
2658
2659         baser->psz = psz;
2660         return 0;
2661 }
2662
2663 static int its_alloc_tables(struct its_node *its)
2664 {
2665         u64 shr = GITS_BASER_InnerShareable;
2666         u64 cache = GITS_BASER_RaWaWb;
2667         int err, i;
2668
2669         if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_22375)
2670                 /* erratum 24313: ignore memory access type */
2671                 cache = GITS_BASER_nCnB;
2672
2673         if (its->flags & ITS_FLAGS_FORCE_NON_SHAREABLE) {
2674                 cache = GITS_BASER_nC;
2675                 shr = 0;
2676         }
2677
2678         for (i = 0; i < GITS_BASER_NR_REGS; i++) {
2679                 struct its_baser *baser = its->tables + i;
2680                 u64 val = its_read_baser(its, baser);
2681                 u64 type = GITS_BASER_TYPE(val);
2682                 bool indirect = false;
2683                 u32 order;
2684
2685                 if (type == GITS_BASER_TYPE_NONE)
2686                         continue;
2687
2688                 if (its_probe_baser_psz(its, baser)) {
2689                         its_free_tables(its);
2690                         return -ENXIO;
2691                 }
2692
2693                 order = get_order(baser->psz);
2694
2695                 switch (type) {
2696                 case GITS_BASER_TYPE_DEVICE:
2697                         indirect = its_parse_indirect_baser(its, baser, &order,
2698                                                             device_ids(its));
2699                         break;
2700
2701                 case GITS_BASER_TYPE_VCPU:
2702                         if (is_v4_1(its)) {
2703                                 struct its_node *sibling;
2704
2705                                 WARN_ON(i != 2);
2706                                 if ((sibling = find_sibling_its(its))) {
2707                                         *baser = sibling->tables[2];
2708                                         its_write_baser(its, baser, baser->val);
2709                                         continue;
2710                                 }
2711                         }
2712
2713                         indirect = its_parse_indirect_baser(its, baser, &order,
2714                                                             ITS_MAX_VPEID_BITS);
2715                         break;
2716                 }
2717
2718                 err = its_setup_baser(its, baser, cache, shr, order, indirect);
2719                 if (err < 0) {
2720                         its_free_tables(its);
2721                         return err;
2722                 }
2723
2724                 /* Update settings which will be used for next BASERn */
2725                 cache = baser->val & GITS_BASER_CACHEABILITY_MASK;
2726                 shr = baser->val & GITS_BASER_SHAREABILITY_MASK;
2727         }
2728
2729         return 0;
2730 }
2731
2732 static u64 inherit_vpe_l1_table_from_its(void)
2733 {
2734         struct its_node *its;
2735         u64 val;
2736         u32 aff;
2737
2738         val = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER);
2739         aff = compute_common_aff(val);
2740
2741         list_for_each_entry(its, &its_nodes, entry) {
2742                 u64 baser, addr;
2743
2744                 if (!is_v4_1(its))
2745                         continue;
2746
2747                 if (!FIELD_GET(GITS_TYPER_SVPET, its->typer))
2748                         continue;
2749
2750                 if (aff != compute_its_aff(its))
2751                         continue;
2752
2753                 /* GICv4.1 guarantees that the vPE table is GITS_BASER2 */
2754                 baser = its->tables[2].val;
2755                 if (!(baser & GITS_BASER_VALID))
2756                         continue;
2757
2758                 /* We have a winner! */
2759                 gic_data_rdist()->vpe_l1_base = its->tables[2].base;
2760
2761                 val  = GICR_VPROPBASER_4_1_VALID;
2762                 if (baser & GITS_BASER_INDIRECT)
2763                         val |= GICR_VPROPBASER_4_1_INDIRECT;
2764                 val |= FIELD_PREP(GICR_VPROPBASER_4_1_PAGE_SIZE,
2765                                   FIELD_GET(GITS_BASER_PAGE_SIZE_MASK, baser));
2766                 switch (FIELD_GET(GITS_BASER_PAGE_SIZE_MASK, baser)) {
2767                 case GIC_PAGE_SIZE_64K:
2768                         addr = GITS_BASER_ADDR_48_to_52(baser);
2769                         break;
2770                 default:
2771                         addr = baser & GENMASK_ULL(47, 12);
2772                         break;
2773                 }
2774                 val |= FIELD_PREP(GICR_VPROPBASER_4_1_ADDR, addr >> 12);
2775                 if (rdists_support_shareable()) {
2776                         val |= FIELD_PREP(GICR_VPROPBASER_SHAREABILITY_MASK,
2777                                           FIELD_GET(GITS_BASER_SHAREABILITY_MASK, baser));
2778                         val |= FIELD_PREP(GICR_VPROPBASER_INNER_CACHEABILITY_MASK,
2779                                           FIELD_GET(GITS_BASER_INNER_CACHEABILITY_MASK, baser));
2780                 }
2781                 val |= FIELD_PREP(GICR_VPROPBASER_4_1_SIZE, GITS_BASER_NR_PAGES(baser) - 1);
2782
2783                 *this_cpu_ptr(&local_4_1_its) = its;
2784                 return val;
2785         }
2786
2787         return 0;
2788 }
2789
2790 static u64 inherit_vpe_l1_table_from_rd(cpumask_t **mask)
2791 {
2792         u32 aff;
2793         u64 val;
2794         int cpu;
2795
2796         val = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER);
2797         aff = compute_common_aff(val);
2798
2799         for_each_possible_cpu(cpu) {
2800                 void __iomem *base = gic_data_rdist_cpu(cpu)->rd_base;
2801
2802                 if (!base || cpu == smp_processor_id())
2803                         continue;
2804
2805                 val = gic_read_typer(base + GICR_TYPER);
2806                 if (aff != compute_common_aff(val))
2807                         continue;
2808
2809                 /*
2810                  * At this point, we have a victim. This particular CPU
2811                  * has already booted, and has an affinity that matches
2812                  * ours wrt CommonLPIAff. Let's use its own VPROPBASER.
2813                  * Make sure we don't write the Z bit in that case.
2814                  */
2815                 val = gicr_read_vpropbaser(base + SZ_128K + GICR_VPROPBASER);
2816                 val &= ~GICR_VPROPBASER_4_1_Z;
2817
2818                 gic_data_rdist()->vpe_l1_base = gic_data_rdist_cpu(cpu)->vpe_l1_base;
2819                 *mask = gic_data_rdist_cpu(cpu)->vpe_table_mask;
2820
2821                 *this_cpu_ptr(&local_4_1_its) = *per_cpu_ptr(&local_4_1_its, cpu);
2822                 return val;
2823         }
2824
2825         return 0;
2826 }
2827
2828 static bool allocate_vpe_l2_table(int cpu, u32 id)
2829 {
2830         void __iomem *base = gic_data_rdist_cpu(cpu)->rd_base;
2831         unsigned int psz, esz, idx, npg, gpsz;
2832         u64 val;
2833         struct page *page;
2834         __le64 *table;
2835
2836         if (!gic_rdists->has_rvpeid)
2837                 return true;
2838
2839         /* Skip non-present CPUs */
2840         if (!base)
2841                 return true;
2842
2843         val  = gicr_read_vpropbaser(base + SZ_128K + GICR_VPROPBASER);
2844
2845         esz  = FIELD_GET(GICR_VPROPBASER_4_1_ENTRY_SIZE, val) + 1;
2846         gpsz = FIELD_GET(GICR_VPROPBASER_4_1_PAGE_SIZE, val);
2847         npg  = FIELD_GET(GICR_VPROPBASER_4_1_SIZE, val) + 1;
2848
2849         switch (gpsz) {
2850         default:
2851                 WARN_ON(1);
2852                 fallthrough;
2853         case GIC_PAGE_SIZE_4K:
2854                 psz = SZ_4K;
2855                 break;
2856         case GIC_PAGE_SIZE_16K:
2857                 psz = SZ_16K;
2858                 break;
2859         case GIC_PAGE_SIZE_64K:
2860                 psz = SZ_64K;
2861                 break;
2862         }
2863
2864         /* Don't allow vpe_id that exceeds single, flat table limit */
2865         if (!(val & GICR_VPROPBASER_4_1_INDIRECT))
2866                 return (id < (npg * psz / (esz * SZ_8)));
2867
2868         /* Compute 1st level table index & check if that exceeds table limit */
2869         idx = id >> ilog2(psz / (esz * SZ_8));
2870         if (idx >= (npg * psz / GITS_LVL1_ENTRY_SIZE))
2871                 return false;
2872
2873         table = gic_data_rdist_cpu(cpu)->vpe_l1_base;
2874
2875         /* Allocate memory for 2nd level table */
2876         if (!table[idx]) {
2877                 page = its_alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(psz));
2878                 if (!page)
2879                         return false;
2880
2881                 /* Flush Lvl2 table to PoC if hw doesn't support coherency */
2882                 if (!(val & GICR_VPROPBASER_SHAREABILITY_MASK))
2883                         gic_flush_dcache_to_poc(page_address(page), psz);
2884
2885                 table[idx] = cpu_to_le64(page_to_phys(page) | GITS_BASER_VALID);
2886
2887                 /* Flush Lvl1 entry to PoC if hw doesn't support coherency */
2888                 if (!(val & GICR_VPROPBASER_SHAREABILITY_MASK))
2889                         gic_flush_dcache_to_poc(table + idx, GITS_LVL1_ENTRY_SIZE);
2890
2891                 /* Ensure updated table contents are visible to RD hardware */
2892                 dsb(sy);
2893         }
2894
2895         return true;
2896 }
2897
2898 static int allocate_vpe_l1_table(void)
2899 {
2900         void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
2901         u64 val, gpsz, npg, pa;
2902         unsigned int psz = SZ_64K;
2903         unsigned int np, epp, esz;
2904         struct page *page;
2905
2906         if (!gic_rdists->has_rvpeid)
2907                 return 0;
2908
2909         /*
2910          * if VPENDBASER.Valid is set, disable any previously programmed
2911          * VPE by setting PendingLast while clearing Valid. This has the
2912          * effect of making sure no doorbell will be generated and we can
2913          * then safely clear VPROPBASER.Valid.
2914          */
2915         if (gicr_read_vpendbaser(vlpi_base + GICR_VPENDBASER) & GICR_VPENDBASER_Valid)
2916                 gicr_write_vpendbaser(GICR_VPENDBASER_PendingLast,
2917                                       vlpi_base + GICR_VPENDBASER);
2918
2919         /*
2920          * If we can inherit the configuration from another RD, let's do
2921          * so. Otherwise, we have to go through the allocation process. We
2922          * assume that all RDs have the exact same requirements, as
2923          * nothing will work otherwise.
2924          */
2925         val = inherit_vpe_l1_table_from_rd(&gic_data_rdist()->vpe_table_mask);
2926         if (val & GICR_VPROPBASER_4_1_VALID)
2927                 goto out;
2928
2929         gic_data_rdist()->vpe_table_mask = kzalloc(sizeof(cpumask_t), GFP_ATOMIC);
2930         if (!gic_data_rdist()->vpe_table_mask)
2931                 return -ENOMEM;
2932
2933         val = inherit_vpe_l1_table_from_its();
2934         if (val & GICR_VPROPBASER_4_1_VALID)
2935                 goto out;
2936
2937         /* First probe the page size */
2938         val = FIELD_PREP(GICR_VPROPBASER_4_1_PAGE_SIZE, GIC_PAGE_SIZE_64K);
2939         gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
2940         val = gicr_read_vpropbaser(vlpi_base + GICR_VPROPBASER);
2941         gpsz = FIELD_GET(GICR_VPROPBASER_4_1_PAGE_SIZE, val);
2942         esz = FIELD_GET(GICR_VPROPBASER_4_1_ENTRY_SIZE, val);
2943
2944         switch (gpsz) {
2945         default:
2946                 gpsz = GIC_PAGE_SIZE_4K;
2947                 fallthrough;
2948         case GIC_PAGE_SIZE_4K:
2949                 psz = SZ_4K;
2950                 break;
2951         case GIC_PAGE_SIZE_16K:
2952                 psz = SZ_16K;
2953                 break;
2954         case GIC_PAGE_SIZE_64K:
2955                 psz = SZ_64K;
2956                 break;
2957         }
2958
2959         /*
2960          * Start populating the register from scratch, including RO fields
2961          * (which we want to print in debug cases...)
2962          */
2963         val = 0;
2964         val |= FIELD_PREP(GICR_VPROPBASER_4_1_PAGE_SIZE, gpsz);
2965         val |= FIELD_PREP(GICR_VPROPBASER_4_1_ENTRY_SIZE, esz);
2966
2967         /* How many entries per GIC page? */
2968         esz++;
2969         epp = psz / (esz * SZ_8);
2970
2971         /*
2972          * If we need more than just a single L1 page, flag the table
2973          * as indirect and compute the number of required L1 pages.
2974          */
2975         if (epp < ITS_MAX_VPEID) {
2976                 int nl2;
2977
2978                 val |= GICR_VPROPBASER_4_1_INDIRECT;
2979
2980                 /* Number of L2 pages required to cover the VPEID space */
2981                 nl2 = DIV_ROUND_UP(ITS_MAX_VPEID, epp);
2982
2983                 /* Number of L1 pages to point to the L2 pages */
2984                 npg = DIV_ROUND_UP(nl2 * SZ_8, psz);
2985         } else {
2986                 npg = 1;
2987         }
2988
2989         val |= FIELD_PREP(GICR_VPROPBASER_4_1_SIZE, npg - 1);
2990
2991         /* Right, that's the number of CPU pages we need for L1 */
2992         np = DIV_ROUND_UP(npg * psz, PAGE_SIZE);
2993
2994         pr_debug("np = %d, npg = %lld, psz = %d, epp = %d, esz = %d\n",
2995                  np, npg, psz, epp, esz);
2996         page = its_alloc_pages(GFP_ATOMIC | __GFP_ZERO, get_order(np * PAGE_SIZE));
2997         if (!page)
2998                 return -ENOMEM;
2999
3000         gic_data_rdist()->vpe_l1_base = page_address(page);
3001         pa = virt_to_phys(page_address(page));
3002         WARN_ON(!IS_ALIGNED(pa, psz));
3003
3004         val |= FIELD_PREP(GICR_VPROPBASER_4_1_ADDR, pa >> 12);
3005         if (rdists_support_shareable()) {
3006                 val |= GICR_VPROPBASER_RaWb;
3007                 val |= GICR_VPROPBASER_InnerShareable;
3008         }
3009         val |= GICR_VPROPBASER_4_1_Z;
3010         val |= GICR_VPROPBASER_4_1_VALID;
3011
3012 out:
3013         gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
3014         cpumask_set_cpu(smp_processor_id(), gic_data_rdist()->vpe_table_mask);
3015
3016         pr_debug("CPU%d: VPROPBASER = %llx %*pbl\n",
3017                  smp_processor_id(), val,
3018                  cpumask_pr_args(gic_data_rdist()->vpe_table_mask));
3019
3020         return 0;
3021 }
3022
3023 static int its_alloc_collections(struct its_node *its)
3024 {
3025         int i;
3026
3027         its->collections = kcalloc(nr_cpu_ids, sizeof(*its->collections),
3028                                    GFP_KERNEL);
3029         if (!its->collections)
3030                 return -ENOMEM;
3031
3032         for (i = 0; i < nr_cpu_ids; i++)
3033                 its->collections[i].target_address = ~0ULL;
3034
3035         return 0;
3036 }
3037
3038 static struct page *its_allocate_pending_table(gfp_t gfp_flags)
3039 {
3040         struct page *pend_page;
3041
3042         pend_page = its_alloc_pages(gfp_flags | __GFP_ZERO, get_order(LPI_PENDBASE_SZ));
3043         if (!pend_page)
3044                 return NULL;
3045
3046         /* Make sure the GIC will observe the zero-ed page */
3047         gic_flush_dcache_to_poc(page_address(pend_page), LPI_PENDBASE_SZ);
3048
3049         return pend_page;
3050 }
3051
3052 static void its_free_pending_table(struct page *pt)
3053 {
3054         its_free_pages(page_address(pt), get_order(LPI_PENDBASE_SZ));
3055 }
3056
3057 /*
3058  * Booting with kdump and LPIs enabled is generally fine. Any other
3059  * case is wrong in the absence of firmware/EFI support.
3060  */
3061 static bool enabled_lpis_allowed(void)
3062 {
3063         phys_addr_t addr;
3064         u64 val;
3065
3066         /* Check whether the property table is in a reserved region */
3067         val = gicr_read_propbaser(gic_data_rdist_rd_base() + GICR_PROPBASER);
3068         addr = val & GENMASK_ULL(51, 12);
3069
3070         return gic_check_reserved_range(addr, LPI_PROPBASE_SZ);
3071 }
3072
3073 static int __init allocate_lpi_tables(void)
3074 {
3075         u64 val;
3076         int err, cpu;
3077
3078         /*
3079          * If LPIs are enabled while we run this from the boot CPU,
3080          * flag the RD tables as pre-allocated if the stars do align.
3081          */
3082         val = readl_relaxed(gic_data_rdist_rd_base() + GICR_CTLR);
3083         if ((val & GICR_CTLR_ENABLE_LPIS) && enabled_lpis_allowed()) {
3084                 gic_rdists->flags |= (RDIST_FLAGS_RD_TABLES_PREALLOCATED |
3085                                       RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING);
3086                 pr_info("GICv3: Using preallocated redistributor tables\n");
3087         }
3088
3089         err = its_setup_lpi_prop_table();
3090         if (err)
3091                 return err;
3092
3093         /*
3094          * We allocate all the pending tables anyway, as we may have a
3095          * mix of RDs that have had LPIs enabled, and some that
3096          * don't. We'll free the unused ones as each CPU comes online.
3097          */
3098         for_each_possible_cpu(cpu) {
3099                 struct page *pend_page;
3100
3101                 pend_page = its_allocate_pending_table(GFP_NOWAIT);
3102                 if (!pend_page) {
3103                         pr_err("Failed to allocate PENDBASE for CPU%d\n", cpu);
3104                         return -ENOMEM;
3105                 }
3106
3107                 gic_data_rdist_cpu(cpu)->pend_page = pend_page;
3108         }
3109
3110         return 0;
3111 }
3112
3113 static u64 read_vpend_dirty_clear(void __iomem *vlpi_base)
3114 {
3115         u32 count = 1000000;    /* 1s! */
3116         bool clean;
3117         u64 val;
3118
3119         do {
3120                 val = gicr_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
3121                 clean = !(val & GICR_VPENDBASER_Dirty);
3122                 if (!clean) {
3123                         count--;
3124                         cpu_relax();
3125                         udelay(1);
3126                 }
3127         } while (!clean && count);
3128
3129         if (unlikely(!clean))
3130                 pr_err_ratelimited("ITS virtual pending table not cleaning\n");
3131
3132         return val;
3133 }
3134
3135 static u64 its_clear_vpend_valid(void __iomem *vlpi_base, u64 clr, u64 set)
3136 {
3137         u64 val;
3138
3139         /* Make sure we wait until the RD is done with the initial scan */
3140         val = read_vpend_dirty_clear(vlpi_base);
3141         val &= ~GICR_VPENDBASER_Valid;
3142         val &= ~clr;
3143         val |= set;
3144         gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
3145
3146         val = read_vpend_dirty_clear(vlpi_base);
3147         if (unlikely(val & GICR_VPENDBASER_Dirty))
3148                 val |= GICR_VPENDBASER_PendingLast;
3149
3150         return val;
3151 }
3152
3153 static void its_cpu_init_lpis(void)
3154 {
3155         void __iomem *rbase = gic_data_rdist_rd_base();
3156         struct page *pend_page;
3157         phys_addr_t paddr;
3158         u64 val, tmp;
3159
3160         if (gic_data_rdist()->flags & RD_LOCAL_LPI_ENABLED)
3161                 return;
3162
3163         val = readl_relaxed(rbase + GICR_CTLR);
3164         if ((gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED) &&
3165             (val & GICR_CTLR_ENABLE_LPIS)) {
3166                 /*
3167                  * Check that we get the same property table on all
3168                  * RDs. If we don't, this is hopeless.
3169                  */
3170                 paddr = gicr_read_propbaser(rbase + GICR_PROPBASER);
3171                 paddr &= GENMASK_ULL(51, 12);
3172                 if (WARN_ON(gic_rdists->prop_table_pa != paddr))
3173                         add_taint(TAINT_CRAP, LOCKDEP_STILL_OK);
3174
3175                 paddr = gicr_read_pendbaser(rbase + GICR_PENDBASER);
3176                 paddr &= GENMASK_ULL(51, 16);
3177
3178                 WARN_ON(!gic_check_reserved_range(paddr, LPI_PENDBASE_SZ));
3179                 gic_data_rdist()->flags |= RD_LOCAL_PENDTABLE_PREALLOCATED;
3180
3181                 goto out;
3182         }
3183
3184         pend_page = gic_data_rdist()->pend_page;
3185         paddr = page_to_phys(pend_page);
3186
3187         /* set PROPBASE */
3188         val = (gic_rdists->prop_table_pa |
3189                GICR_PROPBASER_InnerShareable |
3190                GICR_PROPBASER_RaWaWb |
3191                ((LPI_NRBITS - 1) & GICR_PROPBASER_IDBITS_MASK));
3192
3193         gicr_write_propbaser(val, rbase + GICR_PROPBASER);
3194         tmp = gicr_read_propbaser(rbase + GICR_PROPBASER);
3195
3196         if (!rdists_support_shareable())
3197                 tmp &= ~GICR_PROPBASER_SHAREABILITY_MASK;
3198
3199         if ((tmp ^ val) & GICR_PROPBASER_SHAREABILITY_MASK) {
3200                 if (!(tmp & GICR_PROPBASER_SHAREABILITY_MASK)) {
3201                         /*
3202                          * The HW reports non-shareable, we must
3203                          * remove the cacheability attributes as
3204                          * well.
3205                          */
3206                         val &= ~(GICR_PROPBASER_SHAREABILITY_MASK |
3207                                  GICR_PROPBASER_CACHEABILITY_MASK);
3208                         val |= GICR_PROPBASER_nC;
3209                         gicr_write_propbaser(val, rbase + GICR_PROPBASER);
3210                 }
3211                 pr_info_once("GIC: using cache flushing for LPI property table\n");
3212                 gic_rdists->flags |= RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING;
3213         }
3214
3215         /* set PENDBASE */
3216         val = (page_to_phys(pend_page) |
3217                GICR_PENDBASER_InnerShareable |
3218                GICR_PENDBASER_RaWaWb);
3219
3220         gicr_write_pendbaser(val, rbase + GICR_PENDBASER);
3221         tmp = gicr_read_pendbaser(rbase + GICR_PENDBASER);
3222
3223         if (!rdists_support_shareable())
3224                 tmp &= ~GICR_PENDBASER_SHAREABILITY_MASK;
3225
3226         if (!(tmp & GICR_PENDBASER_SHAREABILITY_MASK)) {
3227                 /*
3228                  * The HW reports non-shareable, we must remove the
3229                  * cacheability attributes as well.
3230                  */
3231                 val &= ~(GICR_PENDBASER_SHAREABILITY_MASK |
3232                          GICR_PENDBASER_CACHEABILITY_MASK);
3233                 val |= GICR_PENDBASER_nC;
3234                 gicr_write_pendbaser(val, rbase + GICR_PENDBASER);
3235         }
3236
3237         /* Enable LPIs */
3238         val = readl_relaxed(rbase + GICR_CTLR);
3239         val |= GICR_CTLR_ENABLE_LPIS;
3240         writel_relaxed(val, rbase + GICR_CTLR);
3241
3242 out:
3243         if (gic_rdists->has_vlpis && !gic_rdists->has_rvpeid) {
3244                 void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
3245
3246                 /*
3247                  * It's possible for CPU to receive VLPIs before it is
3248                  * scheduled as a vPE, especially for the first CPU, and the
3249                  * VLPI with INTID larger than 2^(IDbits+1) will be considered
3250                  * as out of range and dropped by GIC.
3251                  * So we initialize IDbits to known value to avoid VLPI drop.
3252                  */
3253                 val = (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK;
3254                 pr_debug("GICv4: CPU%d: Init IDbits to 0x%llx for GICR_VPROPBASER\n",
3255                         smp_processor_id(), val);
3256                 gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
3257
3258                 /*
3259                  * Also clear Valid bit of GICR_VPENDBASER, in case some
3260                  * ancient programming gets left in and has possibility of
3261                  * corrupting memory.
3262                  */
3263                 val = its_clear_vpend_valid(vlpi_base, 0, 0);
3264         }
3265
3266         if (allocate_vpe_l1_table()) {
3267                 /*
3268                  * If the allocation has failed, we're in massive trouble.
3269                  * Disable direct injection, and pray that no VM was
3270                  * already running...
3271                  */
3272                 gic_rdists->has_rvpeid = false;
3273                 gic_rdists->has_vlpis = false;
3274         }
3275
3276         /* Make sure the GIC has seen the above */
3277         dsb(sy);
3278         gic_data_rdist()->flags |= RD_LOCAL_LPI_ENABLED;
3279         pr_info("GICv3: CPU%d: using %s LPI pending table @%pa\n",
3280                 smp_processor_id(),
3281                 gic_data_rdist()->flags & RD_LOCAL_PENDTABLE_PREALLOCATED ?
3282                 "reserved" : "allocated",
3283                 &paddr);
3284 }
3285
3286 static void its_cpu_init_collection(struct its_node *its)
3287 {
3288         int cpu = smp_processor_id();
3289         u64 target;
3290
3291         /* avoid cross node collections and its mapping */
3292         if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) {
3293                 struct device_node *cpu_node;
3294
3295                 cpu_node = of_get_cpu_node(cpu, NULL);
3296                 if (its->numa_node != NUMA_NO_NODE &&
3297                         its->numa_node != of_node_to_nid(cpu_node))
3298                         return;
3299         }
3300
3301         /*
3302          * We now have to bind each collection to its target
3303          * redistributor.
3304          */
3305         if (gic_read_typer(its->base + GITS_TYPER) & GITS_TYPER_PTA) {
3306                 /*
3307                  * This ITS wants the physical address of the
3308                  * redistributor.
3309                  */
3310                 target = gic_data_rdist()->phys_base;
3311         } else {
3312                 /* This ITS wants a linear CPU number. */
3313                 target = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER);
3314                 target = GICR_TYPER_CPU_NUMBER(target) << 16;
3315         }
3316
3317         /* Perform collection mapping */
3318         its->collections[cpu].target_address = target;
3319         its->collections[cpu].col_id = cpu;
3320
3321         its_send_mapc(its, &its->collections[cpu], 1);
3322         its_send_invall(its, &its->collections[cpu]);
3323 }
3324
3325 static void its_cpu_init_collections(void)
3326 {
3327         struct its_node *its;
3328
3329         raw_spin_lock(&its_lock);
3330
3331         list_for_each_entry(its, &its_nodes, entry)
3332                 its_cpu_init_collection(its);
3333
3334         raw_spin_unlock(&its_lock);
3335 }
3336
3337 static struct its_device *its_find_device(struct its_node *its, u32 dev_id)
3338 {
3339         struct its_device *its_dev = NULL, *tmp;
3340         unsigned long flags;
3341
3342         raw_spin_lock_irqsave(&its->lock, flags);
3343
3344         list_for_each_entry(tmp, &its->its_device_list, entry) {
3345                 if (tmp->device_id == dev_id) {
3346                         its_dev = tmp;
3347                         break;
3348                 }
3349         }
3350
3351         raw_spin_unlock_irqrestore(&its->lock, flags);
3352
3353         return its_dev;
3354 }
3355
3356 static struct its_baser *its_get_baser(struct its_node *its, u32 type)
3357 {
3358         int i;
3359
3360         for (i = 0; i < GITS_BASER_NR_REGS; i++) {
3361                 if (GITS_BASER_TYPE(its->tables[i].val) == type)
3362                         return &its->tables[i];
3363         }
3364
3365         return NULL;
3366 }
3367
3368 static bool its_alloc_table_entry(struct its_node *its,
3369                                   struct its_baser *baser, u32 id)
3370 {
3371         struct page *page;
3372         u32 esz, idx;
3373         __le64 *table;
3374
3375         /* Don't allow device id that exceeds single, flat table limit */
3376         esz = GITS_BASER_ENTRY_SIZE(baser->val);
3377         if (!(baser->val & GITS_BASER_INDIRECT))
3378                 return (id < (PAGE_ORDER_TO_SIZE(baser->order) / esz));
3379
3380         /* Compute 1st level table index & check if that exceeds table limit */
3381         idx = id >> ilog2(baser->psz / esz);
3382         if (idx >= (PAGE_ORDER_TO_SIZE(baser->order) / GITS_LVL1_ENTRY_SIZE))
3383                 return false;
3384
3385         table = baser->base;
3386
3387         /* Allocate memory for 2nd level table */
3388         if (!table[idx]) {
3389                 page = its_alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO,
3390                                             get_order(baser->psz));
3391                 if (!page)
3392                         return false;
3393
3394                 /* Flush Lvl2 table to PoC if hw doesn't support coherency */
3395                 if (!(baser->val & GITS_BASER_SHAREABILITY_MASK))
3396                         gic_flush_dcache_to_poc(page_address(page), baser->psz);
3397
3398                 table[idx] = cpu_to_le64(page_to_phys(page) | GITS_BASER_VALID);
3399
3400                 /* Flush Lvl1 entry to PoC if hw doesn't support coherency */
3401                 if (!(baser->val & GITS_BASER_SHAREABILITY_MASK))
3402                         gic_flush_dcache_to_poc(table + idx, GITS_LVL1_ENTRY_SIZE);
3403
3404                 /* Ensure updated table contents are visible to ITS hardware */
3405                 dsb(sy);
3406         }
3407
3408         return true;
3409 }
3410
3411 static bool its_alloc_device_table(struct its_node *its, u32 dev_id)
3412 {
3413         struct its_baser *baser;
3414
3415         baser = its_get_baser(its, GITS_BASER_TYPE_DEVICE);
3416
3417         /* Don't allow device id that exceeds ITS hardware limit */
3418         if (!baser)
3419                 return (ilog2(dev_id) < device_ids(its));
3420
3421         return its_alloc_table_entry(its, baser, dev_id);
3422 }
3423
3424 static bool its_alloc_vpe_table(u32 vpe_id)
3425 {
3426         struct its_node *its;
3427         int cpu;
3428
3429         /*
3430          * Make sure the L2 tables are allocated on *all* v4 ITSs. We
3431          * could try and only do it on ITSs corresponding to devices
3432          * that have interrupts targeted at this VPE, but the
3433          * complexity becomes crazy (and you have tons of memory
3434          * anyway, right?).
3435          */
3436         list_for_each_entry(its, &its_nodes, entry) {
3437                 struct its_baser *baser;
3438
3439                 if (!is_v4(its))
3440                         continue;
3441
3442                 baser = its_get_baser(its, GITS_BASER_TYPE_VCPU);
3443                 if (!baser)
3444                         return false;
3445
3446                 if (!its_alloc_table_entry(its, baser, vpe_id))
3447                         return false;
3448         }
3449
3450         /* Non v4.1? No need to iterate RDs and go back early. */
3451         if (!gic_rdists->has_rvpeid)
3452                 return true;
3453
3454         /*
3455          * Make sure the L2 tables are allocated for all copies of
3456          * the L1 table on *all* v4.1 RDs.
3457          */
3458         for_each_possible_cpu(cpu) {
3459                 if (!allocate_vpe_l2_table(cpu, vpe_id))
3460                         return false;
3461         }
3462
3463         return true;
3464 }
3465
3466 static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
3467                                             int nvecs, bool alloc_lpis)
3468 {
3469         struct its_device *dev;
3470         unsigned long *lpi_map = NULL;
3471         unsigned long flags;
3472         u16 *col_map = NULL;
3473         void *itt;
3474         int lpi_base;
3475         int nr_lpis;
3476         int nr_ites;
3477         int sz;
3478
3479         if (!its_alloc_device_table(its, dev_id))
3480                 return NULL;
3481
3482         if (WARN_ON(!is_power_of_2(nvecs)))
3483                 nvecs = roundup_pow_of_two(nvecs);
3484
3485         /*
3486          * Even if the device wants a single LPI, the ITT must be
3487          * sized as a power of two (and you need at least one bit...).
3488          */
3489         nr_ites = max(2, nvecs);
3490         sz = nr_ites * (FIELD_GET(GITS_TYPER_ITT_ENTRY_SIZE, its->typer) + 1);
3491         sz = max(sz, ITS_ITT_ALIGN);
3492
3493         itt = itt_alloc_pool(its->numa_node, sz);
3494
3495         dev = kzalloc(sizeof(*dev), GFP_KERNEL);
3496
3497         if (alloc_lpis) {
3498                 lpi_map = its_lpi_alloc(nvecs, &lpi_base, &nr_lpis);
3499                 if (lpi_map)
3500                         col_map = kcalloc(nr_lpis, sizeof(*col_map),
3501                                           GFP_KERNEL);
3502         } else {
3503                 col_map = kcalloc(nr_ites, sizeof(*col_map), GFP_KERNEL);
3504                 nr_lpis = 0;
3505                 lpi_base = 0;
3506         }
3507
3508         if (!dev || !itt || !col_map || (!lpi_map && alloc_lpis)) {
3509                 kfree(dev);
3510                 itt_free_pool(itt, sz);
3511                 bitmap_free(lpi_map);
3512                 kfree(col_map);
3513                 return NULL;
3514         }
3515
3516         gic_flush_dcache_to_poc(itt, sz);
3517
3518         dev->its = its;
3519         dev->itt = itt;
3520         dev->itt_sz = sz;
3521         dev->nr_ites = nr_ites;
3522         dev->event_map.lpi_map = lpi_map;
3523         dev->event_map.col_map = col_map;
3524         dev->event_map.lpi_base = lpi_base;
3525         dev->event_map.nr_lpis = nr_lpis;
3526         raw_spin_lock_init(&dev->event_map.vlpi_lock);
3527         dev->device_id = dev_id;
3528         INIT_LIST_HEAD(&dev->entry);
3529
3530         raw_spin_lock_irqsave(&its->lock, flags);
3531         list_add(&dev->entry, &its->its_device_list);
3532         raw_spin_unlock_irqrestore(&its->lock, flags);
3533
3534         /* Map device to its ITT */
3535         its_send_mapd(dev, 1);
3536
3537         return dev;
3538 }
3539
3540 static void its_free_device(struct its_device *its_dev)
3541 {
3542         unsigned long flags;
3543
3544         raw_spin_lock_irqsave(&its_dev->its->lock, flags);
3545         list_del(&its_dev->entry);
3546         raw_spin_unlock_irqrestore(&its_dev->its->lock, flags);
3547         kfree(its_dev->event_map.col_map);
3548         itt_free_pool(its_dev->itt, its_dev->itt_sz);
3549         kfree(its_dev);
3550 }
3551
3552 static int its_alloc_device_irq(struct its_device *dev, int nvecs, irq_hw_number_t *hwirq)
3553 {
3554         int idx;
3555
3556         /* Find a free LPI region in lpi_map and allocate them. */
3557         idx = bitmap_find_free_region(dev->event_map.lpi_map,
3558                                       dev->event_map.nr_lpis,
3559                                       get_count_order(nvecs));
3560         if (idx < 0)
3561                 return -ENOSPC;
3562
3563         *hwirq = dev->event_map.lpi_base + idx;
3564
3565         return 0;
3566 }
3567
3568 static int its_msi_prepare(struct irq_domain *domain, struct device *dev,
3569                            int nvec, msi_alloc_info_t *info)
3570 {
3571         struct its_node *its;
3572         struct its_device *its_dev;
3573         struct msi_domain_info *msi_info;
3574         u32 dev_id;
3575         int err = 0;
3576
3577         /*
3578          * We ignore "dev" entirely, and rely on the dev_id that has
3579          * been passed via the scratchpad. This limits this domain's
3580          * usefulness to upper layers that definitely know that they
3581          * are built on top of the ITS.
3582          */
3583         dev_id = info->scratchpad[0].ul;
3584
3585         msi_info = msi_get_domain_info(domain);
3586         its = msi_info->data;
3587
3588         if (!gic_rdists->has_direct_lpi &&
3589             vpe_proxy.dev &&
3590             vpe_proxy.dev->its == its &&
3591             dev_id == vpe_proxy.dev->device_id) {
3592                 /* Bad luck. Get yourself a better implementation */
3593                 WARN_ONCE(1, "DevId %x clashes with GICv4 VPE proxy device\n",
3594                           dev_id);
3595                 return -EINVAL;
3596         }
3597
3598         mutex_lock(&its->dev_alloc_lock);
3599         its_dev = its_find_device(its, dev_id);
3600         if (its_dev) {
3601                 /*
3602                  * We already have seen this ID, probably through
3603                  * another alias (PCI bridge of some sort). No need to
3604                  * create the device.
3605                  */
3606                 its_dev->shared = true;
3607                 pr_debug("Reusing ITT for devID %x\n", dev_id);
3608                 goto out;
3609         }
3610
3611         its_dev = its_create_device(its, dev_id, nvec, true);
3612         if (!its_dev) {
3613                 err = -ENOMEM;
3614                 goto out;
3615         }
3616
3617         if (info->flags & MSI_ALLOC_FLAGS_PROXY_DEVICE)
3618                 its_dev->shared = true;
3619
3620         pr_debug("ITT %d entries, %d bits\n", nvec, ilog2(nvec));
3621 out:
3622         mutex_unlock(&its->dev_alloc_lock);
3623         info->scratchpad[0].ptr = its_dev;
3624         return err;
3625 }
3626
3627 static void its_msi_teardown(struct irq_domain *domain, msi_alloc_info_t *info)
3628 {
3629         struct its_device *its_dev = info->scratchpad[0].ptr;
3630
3631         guard(mutex)(&its_dev->its->dev_alloc_lock);
3632
3633         /* If the device is shared, keep everything around */
3634         if (its_dev->shared)
3635                 return;
3636
3637         /* LPIs should have been already unmapped at this stage */
3638         if (WARN_ON_ONCE(!bitmap_empty(its_dev->event_map.lpi_map,
3639                                        its_dev->event_map.nr_lpis)))
3640                 return;
3641
3642         its_lpi_free(its_dev->event_map.lpi_map,
3643                      its_dev->event_map.lpi_base,
3644                      its_dev->event_map.nr_lpis);
3645
3646         /* Unmap device/itt, and get rid of the tracking */
3647         its_send_mapd(its_dev, 0);
3648         its_free_device(its_dev);
3649 }
3650
3651 static struct msi_domain_ops its_msi_domain_ops = {
3652         .msi_prepare    = its_msi_prepare,
3653         .msi_teardown   = its_msi_teardown,
3654 };
3655
3656 static int its_irq_gic_domain_alloc(struct irq_domain *domain,
3657                                     unsigned int virq,
3658                                     irq_hw_number_t hwirq)
3659 {
3660         struct irq_fwspec fwspec;
3661
3662         if (irq_domain_get_of_node(domain->parent)) {
3663                 fwspec.fwnode = domain->parent->fwnode;
3664                 fwspec.param_count = 3;
3665                 fwspec.param[0] = GIC_IRQ_TYPE_LPI;
3666                 fwspec.param[1] = hwirq;
3667                 fwspec.param[2] = IRQ_TYPE_EDGE_RISING;
3668         } else if (is_fwnode_irqchip(domain->parent->fwnode)) {
3669                 fwspec.fwnode = domain->parent->fwnode;
3670                 fwspec.param_count = 2;
3671                 fwspec.param[0] = hwirq;
3672                 fwspec.param[1] = IRQ_TYPE_EDGE_RISING;
3673         } else {
3674                 return -EINVAL;
3675         }
3676
3677         return irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
3678 }
3679
3680 static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
3681                                 unsigned int nr_irqs, void *args)
3682 {
3683         msi_alloc_info_t *info = args;
3684         struct its_device *its_dev = info->scratchpad[0].ptr;
3685         struct its_node *its = its_dev->its;
3686         struct irq_data *irqd;
3687         irq_hw_number_t hwirq;
3688         int err;
3689         int i;
3690
3691         err = its_alloc_device_irq(its_dev, nr_irqs, &hwirq);
3692         if (err)
3693                 return err;
3694
3695         err = iommu_dma_prepare_msi(info->desc, its->get_msi_base(its_dev));
3696         if (err)
3697                 return err;
3698
3699         for (i = 0; i < nr_irqs; i++) {
3700                 err = its_irq_gic_domain_alloc(domain, virq + i, hwirq + i);
3701                 if (err)
3702                         return err;
3703
3704                 irq_domain_set_hwirq_and_chip(domain, virq + i,
3705                                               hwirq + i, &its_irq_chip, its_dev);
3706                 irqd = irq_get_irq_data(virq + i);
3707                 irqd_set_single_target(irqd);
3708                 irqd_set_affinity_on_activate(irqd);
3709                 irqd_set_resend_when_in_progress(irqd);
3710                 pr_debug("ID:%d pID:%d vID:%d\n",
3711                          (int)(hwirq + i - its_dev->event_map.lpi_base),
3712                          (int)(hwirq + i), virq + i);
3713         }
3714
3715         return 0;
3716 }
3717
3718 static int its_irq_domain_activate(struct irq_domain *domain,
3719                                    struct irq_data *d, bool reserve)
3720 {
3721         struct its_device *its_dev = irq_data_get_irq_chip_data(d);
3722         u32 event = its_get_event_id(d);
3723         int cpu;
3724
3725         cpu = its_select_cpu(d, cpu_online_mask);
3726         if (cpu < 0 || cpu >= nr_cpu_ids)
3727                 return -EINVAL;
3728
3729         its_inc_lpi_count(d, cpu);
3730         its_dev->event_map.col_map[event] = cpu;
3731         irq_data_update_effective_affinity(d, cpumask_of(cpu));
3732
3733         /* Map the GIC IRQ and event to the device */
3734         its_send_mapti(its_dev, d->hwirq, event);
3735         return 0;
3736 }
3737
3738 static void its_irq_domain_deactivate(struct irq_domain *domain,
3739                                       struct irq_data *d)
3740 {
3741         struct its_device *its_dev = irq_data_get_irq_chip_data(d);
3742         u32 event = its_get_event_id(d);
3743
3744         its_dec_lpi_count(d, its_dev->event_map.col_map[event]);
3745         /* Stop the delivery of interrupts */
3746         its_send_discard(its_dev, event);
3747 }
3748
3749 static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq,
3750                                 unsigned int nr_irqs)
3751 {
3752         struct irq_data *d = irq_domain_get_irq_data(domain, virq);
3753         struct its_device *its_dev = irq_data_get_irq_chip_data(d);
3754         int i;
3755
3756         bitmap_release_region(its_dev->event_map.lpi_map,
3757                               its_get_event_id(irq_domain_get_irq_data(domain, virq)),
3758                               get_count_order(nr_irqs));
3759
3760         for (i = 0; i < nr_irqs; i++) {
3761                 struct irq_data *data = irq_domain_get_irq_data(domain,
3762                                                                 virq + i);
3763                 /* Nuke the entry in the domain */
3764                 irq_domain_reset_irq_data(data);
3765         }
3766
3767         irq_domain_free_irqs_parent(domain, virq, nr_irqs);
3768 }
3769
3770 static const struct irq_domain_ops its_domain_ops = {
3771         .select                 = msi_lib_irq_domain_select,
3772         .alloc                  = its_irq_domain_alloc,
3773         .free                   = its_irq_domain_free,
3774         .activate               = its_irq_domain_activate,
3775         .deactivate             = its_irq_domain_deactivate,
3776 };
3777
3778 /*
3779  * This is insane.
3780  *
3781  * If a GICv4.0 doesn't implement Direct LPIs (which is extremely
3782  * likely), the only way to perform an invalidate is to use a fake
3783  * device to issue an INV command, implying that the LPI has first
3784  * been mapped to some event on that device. Since this is not exactly
3785  * cheap, we try to keep that mapping around as long as possible, and
3786  * only issue an UNMAP if we're short on available slots.
3787  *
3788  * Broken by design(tm).
3789  *
3790  * GICv4.1, on the other hand, mandates that we're able to invalidate
3791  * by writing to a MMIO register. It doesn't implement the whole of
3792  * DirectLPI, but that's good enough. And most of the time, we don't
3793  * even have to invalidate anything, as the redistributor can be told
3794  * whether to generate a doorbell or not (we thus leave it enabled,
3795  * always).
3796  */
3797 static void its_vpe_db_proxy_unmap_locked(struct its_vpe *vpe)
3798 {
3799         /* GICv4.1 doesn't use a proxy, so nothing to do here */
3800         if (gic_rdists->has_rvpeid)
3801                 return;
3802
3803         /* Already unmapped? */
3804         if (vpe->vpe_proxy_event == -1)
3805                 return;
3806
3807         its_send_discard(vpe_proxy.dev, vpe->vpe_proxy_event);
3808         vpe_proxy.vpes[vpe->vpe_proxy_event] = NULL;
3809
3810         /*
3811          * We don't track empty slots at all, so let's move the
3812          * next_victim pointer if we can quickly reuse that slot
3813          * instead of nuking an existing entry. Not clear that this is
3814          * always a win though, and this might just generate a ripple
3815          * effect... Let's just hope VPEs don't migrate too often.
3816          */
3817         if (vpe_proxy.vpes[vpe_proxy.next_victim])
3818                 vpe_proxy.next_victim = vpe->vpe_proxy_event;
3819
3820         vpe->vpe_proxy_event = -1;
3821 }
3822
3823 static void its_vpe_db_proxy_unmap(struct its_vpe *vpe)
3824 {
3825         /* GICv4.1 doesn't use a proxy, so nothing to do here */
3826         if (gic_rdists->has_rvpeid)
3827                 return;
3828
3829         if (!gic_rdists->has_direct_lpi) {
3830                 unsigned long flags;
3831
3832                 raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
3833                 its_vpe_db_proxy_unmap_locked(vpe);
3834                 raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
3835         }
3836 }
3837
3838 static void its_vpe_db_proxy_map_locked(struct its_vpe *vpe)
3839 {
3840         /* GICv4.1 doesn't use a proxy, so nothing to do here */
3841         if (gic_rdists->has_rvpeid)
3842                 return;
3843
3844         /* Already mapped? */
3845         if (vpe->vpe_proxy_event != -1)
3846                 return;
3847
3848         /* This slot was already allocated. Kick the other VPE out. */
3849         if (vpe_proxy.vpes[vpe_proxy.next_victim])
3850                 its_vpe_db_proxy_unmap_locked(vpe_proxy.vpes[vpe_proxy.next_victim]);
3851
3852         /* Map the new VPE instead */
3853         vpe_proxy.vpes[vpe_proxy.next_victim] = vpe;
3854         vpe->vpe_proxy_event = vpe_proxy.next_victim;
3855         vpe_proxy.next_victim = (vpe_proxy.next_victim + 1) % vpe_proxy.dev->nr_ites;
3856
3857         vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = vpe->col_idx;
3858         its_send_mapti(vpe_proxy.dev, vpe->vpe_db_lpi, vpe->vpe_proxy_event);
3859 }
3860
3861 static void its_vpe_db_proxy_move(struct its_vpe *vpe, int from, int to)
3862 {
3863         unsigned long flags;
3864         struct its_collection *target_col;
3865
3866         /* GICv4.1 doesn't use a proxy, so nothing to do here */
3867         if (gic_rdists->has_rvpeid)
3868                 return;
3869
3870         if (gic_rdists->has_direct_lpi) {
3871                 void __iomem *rdbase;
3872
3873                 rdbase = per_cpu_ptr(gic_rdists->rdist, from)->rd_base;
3874                 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR);
3875                 wait_for_syncr(rdbase);
3876
3877                 return;
3878         }
3879
3880         raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
3881
3882         its_vpe_db_proxy_map_locked(vpe);
3883
3884         target_col = &vpe_proxy.dev->its->collections[to];
3885         its_send_movi(vpe_proxy.dev, target_col, vpe->vpe_proxy_event);
3886         vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = to;
3887
3888         raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
3889 }
3890
3891 static void its_vpe_4_1_invall_locked(int cpu, struct its_vpe *vpe)
3892 {
3893         void __iomem *rdbase;
3894         u64 val;
3895
3896         val  = GICR_INVALLR_V;
3897         val |= FIELD_PREP(GICR_INVALLR_VPEID, vpe->vpe_id);
3898
3899         guard(raw_spinlock)(&gic_data_rdist_cpu(cpu)->rd_lock);
3900         rdbase = per_cpu_ptr(gic_rdists->rdist, cpu)->rd_base;
3901         gic_write_lpir(val, rdbase + GICR_INVALLR);
3902         wait_for_syncr(rdbase);
3903 }
3904
3905 static int its_vpe_set_affinity(struct irq_data *d,
3906                                 const struct cpumask *mask_val,
3907                                 bool force)
3908 {
3909         struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
3910         unsigned int from, cpu = nr_cpu_ids;
3911         struct cpumask *table_mask;
3912         struct its_node *its;
3913         unsigned long flags;
3914
3915         /*
3916          * Check if we're racing against a VPE being destroyed, for
3917          * which we don't want to allow a VMOVP.
3918          */
3919         if (!atomic_read(&vpe->vmapp_count)) {
3920                 if (gic_requires_eager_mapping())
3921                         return -EINVAL;
3922
3923                 /*
3924                  * If we lazily map the VPEs, this isn't an error and
3925                  * we can exit cleanly.
3926                  */
3927                 cpu = cpumask_first(mask_val);
3928                 irq_data_update_effective_affinity(d, cpumask_of(cpu));
3929                 return IRQ_SET_MASK_OK_DONE;
3930         }
3931
3932         /*
3933          * Changing affinity is mega expensive, so let's be as lazy as
3934          * we can and only do it if we really have to. Also, if mapped
3935          * into the proxy device, we need to move the doorbell
3936          * interrupt to its new location.
3937          *
3938          * Another thing is that changing the affinity of a vPE affects
3939          * *other interrupts* such as all the vLPIs that are routed to
3940          * this vPE. This means that the irq_desc lock is not enough to
3941          * protect us, and that we must ensure nobody samples vpe->col_idx
3942          * during the update, hence the lock below which must also be
3943          * taken on any vLPI handling path that evaluates vpe->col_idx.
3944          *
3945          * Finally, we must protect ourselves against concurrent updates of
3946          * the mapping state on this VM should the ITS list be in use (see
3947          * the shortcut in its_send_vmovp() otherewise).
3948          */
3949         if (its_list_map)
3950                 raw_spin_lock(&vpe->its_vm->vmapp_lock);
3951
3952         from = vpe_to_cpuid_lock(vpe, &flags);
3953         table_mask = gic_data_rdist_cpu(from)->vpe_table_mask;
3954
3955         /*
3956          * If we are offered another CPU in the same GICv4.1 ITS
3957          * affinity, pick this one. Otherwise, any CPU will do.
3958          */
3959         if (table_mask)
3960                 cpu = cpumask_any_and(mask_val, table_mask);
3961         if (cpu < nr_cpu_ids) {
3962                 if (cpumask_test_cpu(from, mask_val) &&
3963                     cpumask_test_cpu(from, table_mask))
3964                         cpu = from;
3965         } else {
3966                 cpu = cpumask_first(mask_val);
3967         }
3968
3969         if (from == cpu)
3970                 goto out;
3971
3972         vpe->col_idx = cpu;
3973
3974         its_send_vmovp(vpe);
3975
3976         its = find_4_1_its();
3977         if (its && its->flags & ITS_FLAGS_WORKAROUND_HISILICON_162100801)
3978                 its_vpe_4_1_invall_locked(cpu, vpe);
3979
3980         its_vpe_db_proxy_move(vpe, from, cpu);
3981
3982 out:
3983         irq_data_update_effective_affinity(d, cpumask_of(cpu));
3984         vpe_to_cpuid_unlock(vpe, flags);
3985
3986         if (its_list_map)
3987                 raw_spin_unlock(&vpe->its_vm->vmapp_lock);
3988
3989         return IRQ_SET_MASK_OK_DONE;
3990 }
3991
3992 static void its_wait_vpt_parse_complete(void)
3993 {
3994         void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
3995         u64 val;
3996
3997         if (!gic_rdists->has_vpend_valid_dirty)
3998                 return;
3999
4000         WARN_ON_ONCE(readq_relaxed_poll_timeout_atomic(vlpi_base + GICR_VPENDBASER,
4001                                                        val,
4002                                                        !(val & GICR_VPENDBASER_Dirty),
4003                                                        1, 500));
4004 }
4005
4006 static void its_vpe_schedule(struct its_vpe *vpe)
4007 {
4008         void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
4009         u64 val;
4010
4011         /* Schedule the VPE */
4012         val  = virt_to_phys(page_address(vpe->its_vm->vprop_page)) &
4013                 GENMASK_ULL(51, 12);
4014         val |= (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK;
4015         if (rdists_support_shareable()) {
4016                 val |= GICR_VPROPBASER_RaWb;
4017                 val |= GICR_VPROPBASER_InnerShareable;
4018         }
4019         gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
4020
4021         val  = virt_to_phys(page_address(vpe->vpt_page)) &
4022                 GENMASK_ULL(51, 16);
4023         if (rdists_support_shareable()) {
4024                 val |= GICR_VPENDBASER_RaWaWb;
4025                 val |= GICR_VPENDBASER_InnerShareable;
4026         }
4027         /*
4028          * There is no good way of finding out if the pending table is
4029          * empty as we can race against the doorbell interrupt very
4030          * easily. So in the end, vpe->pending_last is only an
4031          * indication that the vcpu has something pending, not one
4032          * that the pending table is empty. A good implementation
4033          * would be able to read its coarse map pretty quickly anyway,
4034          * making this a tolerable issue.
4035          */
4036         val |= GICR_VPENDBASER_PendingLast;
4037         val |= vpe->idai ? GICR_VPENDBASER_IDAI : 0;
4038         val |= GICR_VPENDBASER_Valid;
4039         gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
4040 }
4041
4042 static void its_vpe_deschedule(struct its_vpe *vpe)
4043 {
4044         void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
4045         u64 val;
4046
4047         val = its_clear_vpend_valid(vlpi_base, 0, 0);
4048
4049         vpe->idai = !!(val & GICR_VPENDBASER_IDAI);
4050         vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast);
4051 }
4052
4053 static void its_vpe_invall(struct its_vpe *vpe)
4054 {
4055         struct its_node *its;
4056
4057         guard(raw_spinlock_irqsave)(&vpe->its_vm->vmapp_lock);
4058
4059         list_for_each_entry(its, &its_nodes, entry) {
4060                 if (!is_v4(its))
4061                         continue;
4062
4063                 if (its_list_map && !vpe->its_vm->vlpi_count[its->list_nr])
4064                         continue;
4065
4066                 /*
4067                  * Sending a VINVALL to a single ITS is enough, as all
4068                  * we need is to reach the redistributors.
4069                  */
4070                 its_send_vinvall(its, vpe);
4071                 return;
4072         }
4073 }
4074
4075 static int its_vpe_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
4076 {
4077         struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4078         struct its_cmd_info *info = vcpu_info;
4079
4080         switch (info->cmd_type) {
4081         case SCHEDULE_VPE:
4082                 its_vpe_schedule(vpe);
4083                 return 0;
4084
4085         case DESCHEDULE_VPE:
4086                 its_vpe_deschedule(vpe);
4087                 return 0;
4088
4089         case COMMIT_VPE:
4090                 its_wait_vpt_parse_complete();
4091                 return 0;
4092
4093         case INVALL_VPE:
4094                 its_vpe_invall(vpe);
4095                 return 0;
4096
4097         default:
4098                 return -EINVAL;
4099         }
4100 }
4101
4102 static void its_vpe_send_cmd(struct its_vpe *vpe,
4103                              void (*cmd)(struct its_device *, u32))
4104 {
4105         unsigned long flags;
4106
4107         raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
4108
4109         its_vpe_db_proxy_map_locked(vpe);
4110         cmd(vpe_proxy.dev, vpe->vpe_proxy_event);
4111
4112         raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
4113 }
4114
4115 static void its_vpe_send_inv(struct irq_data *d)
4116 {
4117         struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4118
4119         if (gic_rdists->has_direct_lpi)
4120                 __direct_lpi_inv(d, d->parent_data->hwirq);
4121         else
4122                 its_vpe_send_cmd(vpe, its_send_inv);
4123 }
4124
4125 static void its_vpe_mask_irq(struct irq_data *d)
4126 {
4127         /*
4128          * We need to unmask the LPI, which is described by the parent
4129          * irq_data. Instead of calling into the parent (which won't
4130          * exactly do the right thing, let's simply use the
4131          * parent_data pointer. Yes, I'm naughty.
4132          */
4133         lpi_write_config(d->parent_data, LPI_PROP_ENABLED, 0);
4134         its_vpe_send_inv(d);
4135 }
4136
4137 static void its_vpe_unmask_irq(struct irq_data *d)
4138 {
4139         /* Same hack as above... */
4140         lpi_write_config(d->parent_data, 0, LPI_PROP_ENABLED);
4141         its_vpe_send_inv(d);
4142 }
4143
4144 static int its_vpe_set_irqchip_state(struct irq_data *d,
4145                                      enum irqchip_irq_state which,
4146                                      bool state)
4147 {
4148         struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4149
4150         if (which != IRQCHIP_STATE_PENDING)
4151                 return -EINVAL;
4152
4153         if (gic_rdists->has_direct_lpi) {
4154                 void __iomem *rdbase;
4155
4156                 rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base;
4157                 if (state) {
4158                         gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_SETLPIR);
4159                 } else {
4160                         gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR);
4161                         wait_for_syncr(rdbase);
4162                 }
4163         } else {
4164                 if (state)
4165                         its_vpe_send_cmd(vpe, its_send_int);
4166                 else
4167                         its_vpe_send_cmd(vpe, its_send_clear);
4168         }
4169
4170         return 0;
4171 }
4172
4173 static int its_vpe_retrigger(struct irq_data *d)
4174 {
4175         return !its_vpe_set_irqchip_state(d, IRQCHIP_STATE_PENDING, true);
4176 }
4177
4178 static struct irq_chip its_vpe_irq_chip = {
4179         .name                   = "GICv4-vpe",
4180         .irq_mask               = its_vpe_mask_irq,
4181         .irq_unmask             = its_vpe_unmask_irq,
4182         .irq_eoi                = irq_chip_eoi_parent,
4183         .irq_set_affinity       = its_vpe_set_affinity,
4184         .irq_retrigger          = its_vpe_retrigger,
4185         .irq_set_irqchip_state  = its_vpe_set_irqchip_state,
4186         .irq_set_vcpu_affinity  = its_vpe_set_vcpu_affinity,
4187 };
4188
4189 static struct its_node *find_4_1_its(void)
4190 {
4191         struct its_node *its = *this_cpu_ptr(&local_4_1_its);
4192
4193         if (!its) {
4194                 list_for_each_entry(its, &its_nodes, entry) {
4195                         if (is_v4_1(its))
4196                                 return its;
4197                 }
4198
4199                 /* Oops? */
4200                 its = NULL;
4201         }
4202
4203         return its;
4204 }
4205
4206 static void its_vpe_4_1_send_inv(struct irq_data *d)
4207 {
4208         struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4209         struct its_node *its;
4210
4211         /*
4212          * GICv4.1 wants doorbells to be invalidated using the
4213          * INVDB command in order to be broadcast to all RDs. Send
4214          * it to the first valid ITS, and let the HW do its magic.
4215          */
4216         its = find_4_1_its();
4217         if (its)
4218                 its_send_invdb(its, vpe);
4219 }
4220
4221 static void its_vpe_4_1_mask_irq(struct irq_data *d)
4222 {
4223         lpi_write_config(d->parent_data, LPI_PROP_ENABLED, 0);
4224         its_vpe_4_1_send_inv(d);
4225 }
4226
4227 static void its_vpe_4_1_unmask_irq(struct irq_data *d)
4228 {
4229         lpi_write_config(d->parent_data, 0, LPI_PROP_ENABLED);
4230         its_vpe_4_1_send_inv(d);
4231 }
4232
4233 static void its_vpe_4_1_schedule(struct its_vpe *vpe,
4234                                  struct its_cmd_info *info)
4235 {
4236         void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
4237         u64 val = 0;
4238
4239         /* Schedule the VPE */
4240         val |= GICR_VPENDBASER_Valid;
4241         val |= info->g0en ? GICR_VPENDBASER_4_1_VGRP0EN : 0;
4242         val |= info->g1en ? GICR_VPENDBASER_4_1_VGRP1EN : 0;
4243         val |= FIELD_PREP(GICR_VPENDBASER_4_1_VPEID, vpe->vpe_id);
4244
4245         gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
4246 }
4247
4248 static void its_vpe_4_1_deschedule(struct its_vpe *vpe,
4249                                    struct its_cmd_info *info)
4250 {
4251         void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
4252         u64 val;
4253
4254         if (info->req_db) {
4255                 unsigned long flags;
4256
4257                 /*
4258                  * vPE is going to block: make the vPE non-resident with
4259                  * PendingLast clear and DB set. The GIC guarantees that if
4260                  * we read-back PendingLast clear, then a doorbell will be
4261                  * delivered when an interrupt comes.
4262                  *
4263                  * Note the locking to deal with the concurrent update of
4264                  * pending_last from the doorbell interrupt handler that can
4265                  * run concurrently.
4266                  */
4267                 raw_spin_lock_irqsave(&vpe->vpe_lock, flags);
4268                 val = its_clear_vpend_valid(vlpi_base,
4269                                             GICR_VPENDBASER_PendingLast,
4270                                             GICR_VPENDBASER_4_1_DB);
4271                 vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast);
4272                 raw_spin_unlock_irqrestore(&vpe->vpe_lock, flags);
4273         } else {
4274                 /*
4275                  * We're not blocking, so just make the vPE non-resident
4276                  * with PendingLast set, indicating that we'll be back.
4277                  */
4278                 val = its_clear_vpend_valid(vlpi_base,
4279                                             0,
4280                                             GICR_VPENDBASER_PendingLast);
4281                 vpe->pending_last = true;
4282         }
4283 }
4284
4285 static void its_vpe_4_1_invall(struct its_vpe *vpe)
4286 {
4287         unsigned long flags;
4288         int cpu;
4289
4290         /* Target the redistributor this vPE is currently known on */
4291         cpu = vpe_to_cpuid_lock(vpe, &flags);
4292         its_vpe_4_1_invall_locked(cpu, vpe);
4293         vpe_to_cpuid_unlock(vpe, flags);
4294 }
4295
4296 static int its_vpe_4_1_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
4297 {
4298         struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4299         struct its_cmd_info *info = vcpu_info;
4300
4301         switch (info->cmd_type) {
4302         case SCHEDULE_VPE:
4303                 its_vpe_4_1_schedule(vpe, info);
4304                 return 0;
4305
4306         case DESCHEDULE_VPE:
4307                 its_vpe_4_1_deschedule(vpe, info);
4308                 return 0;
4309
4310         case COMMIT_VPE:
4311                 its_wait_vpt_parse_complete();
4312                 return 0;
4313
4314         case INVALL_VPE:
4315                 its_vpe_4_1_invall(vpe);
4316                 return 0;
4317
4318         default:
4319                 return -EINVAL;
4320         }
4321 }
4322
4323 static struct irq_chip its_vpe_4_1_irq_chip = {
4324         .name                   = "GICv4.1-vpe",
4325         .irq_mask               = its_vpe_4_1_mask_irq,
4326         .irq_unmask             = its_vpe_4_1_unmask_irq,
4327         .irq_eoi                = irq_chip_eoi_parent,
4328         .irq_set_affinity       = its_vpe_set_affinity,
4329         .irq_set_vcpu_affinity  = its_vpe_4_1_set_vcpu_affinity,
4330 };
4331
4332 static void its_configure_sgi(struct irq_data *d, bool clear)
4333 {
4334         struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4335         struct its_cmd_desc desc;
4336
4337         desc.its_vsgi_cmd.vpe = vpe;
4338         desc.its_vsgi_cmd.sgi = d->hwirq;
4339         desc.its_vsgi_cmd.priority = vpe->sgi_config[d->hwirq].priority;
4340         desc.its_vsgi_cmd.enable = vpe->sgi_config[d->hwirq].enabled;
4341         desc.its_vsgi_cmd.group = vpe->sgi_config[d->hwirq].group;
4342         desc.its_vsgi_cmd.clear = clear;
4343
4344         /*
4345          * GICv4.1 allows us to send VSGI commands to any ITS as long as the
4346          * destination VPE is mapped there. Since we map them eagerly at
4347          * activation time, we're pretty sure the first GICv4.1 ITS will do.
4348          */
4349         its_send_single_vcommand(find_4_1_its(), its_build_vsgi_cmd, &desc);
4350 }
4351
4352 static void its_sgi_mask_irq(struct irq_data *d)
4353 {
4354         struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4355
4356         vpe->sgi_config[d->hwirq].enabled = false;
4357         its_configure_sgi(d, false);
4358 }
4359
4360 static void its_sgi_unmask_irq(struct irq_data *d)
4361 {
4362         struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4363
4364         vpe->sgi_config[d->hwirq].enabled = true;
4365         its_configure_sgi(d, false);
4366 }
4367
4368 static int its_sgi_set_affinity(struct irq_data *d,
4369                                 const struct cpumask *mask_val,
4370                                 bool force)
4371 {
4372         /*
4373          * There is no notion of affinity for virtual SGIs, at least
4374          * not on the host (since they can only be targeting a vPE).
4375          * Tell the kernel we've done whatever it asked for.
4376          */
4377         irq_data_update_effective_affinity(d, mask_val);
4378         return IRQ_SET_MASK_OK;
4379 }
4380
4381 static int its_sgi_set_irqchip_state(struct irq_data *d,
4382                                      enum irqchip_irq_state which,
4383                                      bool state)
4384 {
4385         if (which != IRQCHIP_STATE_PENDING)
4386                 return -EINVAL;
4387
4388         if (state) {
4389                 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4390                 struct its_node *its = find_4_1_its();
4391                 u64 val;
4392
4393                 val  = FIELD_PREP(GITS_SGIR_VPEID, vpe->vpe_id);
4394                 val |= FIELD_PREP(GITS_SGIR_VINTID, d->hwirq);
4395                 writeq_relaxed(val, its->sgir_base + GITS_SGIR - SZ_128K);
4396         } else {
4397                 its_configure_sgi(d, true);
4398         }
4399
4400         return 0;
4401 }
4402
4403 static int its_sgi_get_irqchip_state(struct irq_data *d,
4404                                      enum irqchip_irq_state which, bool *val)
4405 {
4406         struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4407         void __iomem *base;
4408         unsigned long flags;
4409         u32 count = 1000000;    /* 1s! */
4410         u32 status;
4411         int cpu;
4412
4413         if (which != IRQCHIP_STATE_PENDING)
4414                 return -EINVAL;
4415
4416         /*
4417          * Locking galore! We can race against two different events:
4418          *
4419          * - Concurrent vPE affinity change: we must make sure it cannot
4420          *   happen, or we'll talk to the wrong redistributor. This is
4421          *   identical to what happens with vLPIs.
4422          *
4423          * - Concurrent VSGIPENDR access: As it involves accessing two
4424          *   MMIO registers, this must be made atomic one way or another.
4425          */
4426         cpu = vpe_to_cpuid_lock(vpe, &flags);
4427         raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock);
4428         base = gic_data_rdist_cpu(cpu)->rd_base + SZ_128K;
4429         writel_relaxed(vpe->vpe_id, base + GICR_VSGIR);
4430         do {
4431                 status = readl_relaxed(base + GICR_VSGIPENDR);
4432                 if (!(status & GICR_VSGIPENDR_BUSY))
4433                         goto out;
4434
4435                 count--;
4436                 if (!count) {
4437                         pr_err_ratelimited("Unable to get SGI status\n");
4438                         goto out;
4439                 }
4440                 cpu_relax();
4441                 udelay(1);
4442         } while (count);
4443
4444 out:
4445         raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock);
4446         vpe_to_cpuid_unlock(vpe, flags);
4447
4448         if (!count)
4449                 return -ENXIO;
4450
4451         *val = !!(status & (1 << d->hwirq));
4452
4453         return 0;
4454 }
4455
4456 static int its_sgi_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
4457 {
4458         struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4459         struct its_cmd_info *info = vcpu_info;
4460
4461         switch (info->cmd_type) {
4462         case PROP_UPDATE_VSGI:
4463                 vpe->sgi_config[d->hwirq].priority = info->priority;
4464                 vpe->sgi_config[d->hwirq].group = info->group;
4465                 its_configure_sgi(d, false);
4466                 return 0;
4467
4468         default:
4469                 return -EINVAL;
4470         }
4471 }
4472
4473 static struct irq_chip its_sgi_irq_chip = {
4474         .name                   = "GICv4.1-sgi",
4475         .irq_mask               = its_sgi_mask_irq,
4476         .irq_unmask             = its_sgi_unmask_irq,
4477         .irq_set_affinity       = its_sgi_set_affinity,
4478         .irq_set_irqchip_state  = its_sgi_set_irqchip_state,
4479         .irq_get_irqchip_state  = its_sgi_get_irqchip_state,
4480         .irq_set_vcpu_affinity  = its_sgi_set_vcpu_affinity,
4481 };
4482
4483 static int its_sgi_irq_domain_alloc(struct irq_domain *domain,
4484                                     unsigned int virq, unsigned int nr_irqs,
4485                                     void *args)
4486 {
4487         struct its_vpe *vpe = args;
4488         int i;
4489
4490         /* Yes, we do want 16 SGIs */
4491         WARN_ON(nr_irqs != 16);
4492
4493         for (i = 0; i < 16; i++) {
4494                 vpe->sgi_config[i].priority = 0;
4495                 vpe->sgi_config[i].enabled = false;
4496                 vpe->sgi_config[i].group = false;
4497
4498                 irq_domain_set_hwirq_and_chip(domain, virq + i, i,
4499                                               &its_sgi_irq_chip, vpe);
4500                 irq_set_status_flags(virq + i, IRQ_DISABLE_UNLAZY);
4501         }
4502
4503         return 0;
4504 }
4505
4506 static void its_sgi_irq_domain_free(struct irq_domain *domain,
4507                                     unsigned int virq,
4508                                     unsigned int nr_irqs)
4509 {
4510         /* Nothing to do */
4511 }
4512
4513 static int its_sgi_irq_domain_activate(struct irq_domain *domain,
4514                                        struct irq_data *d, bool reserve)
4515 {
4516         /* Write out the initial SGI configuration */
4517         its_configure_sgi(d, false);
4518         return 0;
4519 }
4520
4521 static void its_sgi_irq_domain_deactivate(struct irq_domain *domain,
4522                                           struct irq_data *d)
4523 {
4524         struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4525
4526         /*
4527          * The VSGI command is awkward:
4528          *
4529          * - To change the configuration, CLEAR must be set to false,
4530          *   leaving the pending bit unchanged.
4531          * - To clear the pending bit, CLEAR must be set to true, leaving
4532          *   the configuration unchanged.
4533          *
4534          * You just can't do both at once, hence the two commands below.
4535          */
4536         vpe->sgi_config[d->hwirq].enabled = false;
4537         its_configure_sgi(d, false);
4538         its_configure_sgi(d, true);
4539 }
4540
4541 static const struct irq_domain_ops its_sgi_domain_ops = {
4542         .alloc          = its_sgi_irq_domain_alloc,
4543         .free           = its_sgi_irq_domain_free,
4544         .activate       = its_sgi_irq_domain_activate,
4545         .deactivate     = its_sgi_irq_domain_deactivate,
4546 };
4547
4548 static int its_vpe_id_alloc(void)
4549 {
4550         return ida_alloc_max(&its_vpeid_ida, ITS_MAX_VPEID - 1, GFP_KERNEL);
4551 }
4552
4553 static void its_vpe_id_free(u16 id)
4554 {
4555         ida_free(&its_vpeid_ida, id);
4556 }
4557
4558 static int its_vpe_init(struct its_vpe *vpe)
4559 {
4560         struct page *vpt_page;
4561         int vpe_id;
4562
4563         /* Allocate vpe_id */
4564         vpe_id = its_vpe_id_alloc();
4565         if (vpe_id < 0)
4566                 return vpe_id;
4567
4568         /* Allocate VPT */
4569         vpt_page = its_allocate_pending_table(GFP_KERNEL);
4570         if (!vpt_page) {
4571                 its_vpe_id_free(vpe_id);
4572                 return -ENOMEM;
4573         }
4574
4575         if (!its_alloc_vpe_table(vpe_id)) {
4576                 its_vpe_id_free(vpe_id);
4577                 its_free_pending_table(vpt_page);
4578                 return -ENOMEM;
4579         }
4580
4581         raw_spin_lock_init(&vpe->vpe_lock);
4582         vpe->vpe_id = vpe_id;
4583         vpe->vpt_page = vpt_page;
4584         atomic_set(&vpe->vmapp_count, 0);
4585         if (!gic_rdists->has_rvpeid)
4586                 vpe->vpe_proxy_event = -1;
4587
4588         return 0;
4589 }
4590
4591 static void its_vpe_teardown(struct its_vpe *vpe)
4592 {
4593         its_vpe_db_proxy_unmap(vpe);
4594         its_vpe_id_free(vpe->vpe_id);
4595         its_free_pending_table(vpe->vpt_page);
4596 }
4597
4598 static void its_vpe_irq_domain_free(struct irq_domain *domain,
4599                                     unsigned int virq,
4600                                     unsigned int nr_irqs)
4601 {
4602         struct its_vm *vm = domain->host_data;
4603         int i;
4604
4605         irq_domain_free_irqs_parent(domain, virq, nr_irqs);
4606
4607         for (i = 0; i < nr_irqs; i++) {
4608                 struct irq_data *data = irq_domain_get_irq_data(domain,
4609                                                                 virq + i);
4610                 struct its_vpe *vpe = irq_data_get_irq_chip_data(data);
4611
4612                 BUG_ON(vm != vpe->its_vm);
4613
4614                 clear_bit(data->hwirq, vm->db_bitmap);
4615                 its_vpe_teardown(vpe);
4616                 irq_domain_reset_irq_data(data);
4617         }
4618
4619         if (bitmap_empty(vm->db_bitmap, vm->nr_db_lpis)) {
4620                 its_lpi_free(vm->db_bitmap, vm->db_lpi_base, vm->nr_db_lpis);
4621                 its_free_prop_table(vm->vprop_page);
4622         }
4623 }
4624
4625 static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
4626                                     unsigned int nr_irqs, void *args)
4627 {
4628         struct irq_chip *irqchip = &its_vpe_irq_chip;
4629         struct its_vm *vm = args;
4630         unsigned long *bitmap;
4631         struct page *vprop_page;
4632         int base, nr_ids, i, err = 0;
4633
4634         bitmap = its_lpi_alloc(roundup_pow_of_two(nr_irqs), &base, &nr_ids);
4635         if (!bitmap)
4636                 return -ENOMEM;
4637
4638         if (nr_ids < nr_irqs) {
4639                 its_lpi_free(bitmap, base, nr_ids);
4640                 return -ENOMEM;
4641         }
4642
4643         vprop_page = its_allocate_prop_table(GFP_KERNEL);
4644         if (!vprop_page) {
4645                 its_lpi_free(bitmap, base, nr_ids);
4646                 return -ENOMEM;
4647         }
4648
4649         vm->db_bitmap = bitmap;
4650         vm->db_lpi_base = base;
4651         vm->nr_db_lpis = nr_ids;
4652         vm->vprop_page = vprop_page;
4653         raw_spin_lock_init(&vm->vmapp_lock);
4654
4655         if (gic_rdists->has_rvpeid)
4656                 irqchip = &its_vpe_4_1_irq_chip;
4657
4658         for (i = 0; i < nr_irqs; i++) {
4659                 vm->vpes[i]->vpe_db_lpi = base + i;
4660                 err = its_vpe_init(vm->vpes[i]);
4661                 if (err)
4662                         break;
4663                 err = its_irq_gic_domain_alloc(domain, virq + i,
4664                                                vm->vpes[i]->vpe_db_lpi);
4665                 if (err)
4666                         break;
4667                 irq_domain_set_hwirq_and_chip(domain, virq + i, i,
4668                                               irqchip, vm->vpes[i]);
4669                 set_bit(i, bitmap);
4670                 irqd_set_resend_when_in_progress(irq_get_irq_data(virq + i));
4671         }
4672
4673         if (err)
4674                 its_vpe_irq_domain_free(domain, virq, i);
4675
4676         return err;
4677 }
4678
4679 static int its_vpe_irq_domain_activate(struct irq_domain *domain,
4680                                        struct irq_data *d, bool reserve)
4681 {
4682         struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4683         struct its_node *its;
4684
4685         /* Map the VPE to the first possible CPU */
4686         vpe->col_idx = cpumask_first(cpu_online_mask);
4687         irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
4688
4689         /*
4690          * If we use the list map, we issue VMAPP on demand... Unless
4691          * we're on a GICv4.1 and we eagerly map the VPE on all ITSs
4692          * so that VSGIs can work.
4693          */
4694         if (!gic_requires_eager_mapping())
4695                 return 0;
4696
4697         list_for_each_entry(its, &its_nodes, entry) {
4698                 if (!is_v4(its))
4699                         continue;
4700
4701                 its_send_vmapp(its, vpe, true);
4702                 its_send_vinvall(its, vpe);
4703         }
4704
4705         return 0;
4706 }
4707
4708 static void its_vpe_irq_domain_deactivate(struct irq_domain *domain,
4709                                           struct irq_data *d)
4710 {
4711         struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4712         struct its_node *its;
4713
4714         /*
4715          * If we use the list map on GICv4.0, we unmap the VPE once no
4716          * VLPIs are associated with the VM.
4717          */
4718         if (!gic_requires_eager_mapping())
4719                 return;
4720
4721         list_for_each_entry(its, &its_nodes, entry) {
4722                 if (!is_v4(its))
4723                         continue;
4724
4725                 its_send_vmapp(its, vpe, false);
4726         }
4727
4728         /*
4729          * There may be a direct read to the VPT after unmapping the
4730          * vPE, to guarantee the validity of this, we make the VPT
4731          * memory coherent with the CPU caches here.
4732          */
4733         if (find_4_1_its() && !atomic_read(&vpe->vmapp_count))
4734                 gic_flush_dcache_to_poc(page_address(vpe->vpt_page),
4735                                         LPI_PENDBASE_SZ);
4736 }
4737
4738 static const struct irq_domain_ops its_vpe_domain_ops = {
4739         .alloc                  = its_vpe_irq_domain_alloc,
4740         .free                   = its_vpe_irq_domain_free,
4741         .activate               = its_vpe_irq_domain_activate,
4742         .deactivate             = its_vpe_irq_domain_deactivate,
4743 };
4744
4745 static int its_force_quiescent(void __iomem *base)
4746 {
4747         u32 count = 1000000;    /* 1s */
4748         u32 val;
4749
4750         val = readl_relaxed(base + GITS_CTLR);
4751         /*
4752          * GIC architecture specification requires the ITS to be both
4753          * disabled and quiescent for writes to GITS_BASER<n> or
4754          * GITS_CBASER to not have UNPREDICTABLE results.
4755          */
4756         if ((val & GITS_CTLR_QUIESCENT) && !(val & GITS_CTLR_ENABLE))
4757                 return 0;
4758
4759         /* Disable the generation of all interrupts to this ITS */
4760         val &= ~(GITS_CTLR_ENABLE | GITS_CTLR_ImDe);
4761         writel_relaxed(val, base + GITS_CTLR);
4762
4763         /* Poll GITS_CTLR and wait until ITS becomes quiescent */
4764         while (1) {
4765                 val = readl_relaxed(base + GITS_CTLR);
4766                 if (val & GITS_CTLR_QUIESCENT)
4767                         return 0;
4768
4769                 count--;
4770                 if (!count)
4771                         return -EBUSY;
4772
4773                 cpu_relax();
4774                 udelay(1);
4775         }
4776 }
4777
4778 static bool __maybe_unused its_enable_quirk_cavium_22375(void *data)
4779 {
4780         struct its_node *its = data;
4781
4782         /* erratum 22375: only alloc 8MB table size (20 bits) */
4783         its->typer &= ~GITS_TYPER_DEVBITS;
4784         its->typer |= FIELD_PREP(GITS_TYPER_DEVBITS, 20 - 1);
4785         its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_22375;
4786
4787         return true;
4788 }
4789
4790 static bool __maybe_unused its_enable_quirk_cavium_23144(void *data)
4791 {
4792         struct its_node *its = data;
4793
4794         its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_23144;
4795
4796         return true;
4797 }
4798
4799 static bool __maybe_unused its_enable_quirk_qdf2400_e0065(void *data)
4800 {
4801         struct its_node *its = data;
4802
4803         /* On QDF2400, the size of the ITE is 16Bytes */
4804         its->typer &= ~GITS_TYPER_ITT_ENTRY_SIZE;
4805         its->typer |= FIELD_PREP(GITS_TYPER_ITT_ENTRY_SIZE, 16 - 1);
4806
4807         return true;
4808 }
4809
4810 static u64 its_irq_get_msi_base_pre_its(struct its_device *its_dev)
4811 {
4812         struct its_node *its = its_dev->its;
4813
4814         /*
4815          * The Socionext Synquacer SoC has a so-called 'pre-ITS',
4816          * which maps 32-bit writes targeted at a separate window of
4817          * size '4 << device_id_bits' onto writes to GITS_TRANSLATER
4818          * with device ID taken from bits [device_id_bits + 1:2] of
4819          * the window offset.
4820          */
4821         return its->pre_its_base + (its_dev->device_id << 2);
4822 }
4823
4824 static bool __maybe_unused its_enable_quirk_socionext_synquacer(void *data)
4825 {
4826         struct its_node *its = data;
4827         u32 pre_its_window[2];
4828         u32 ids;
4829
4830         if (!fwnode_property_read_u32_array(its->fwnode_handle,
4831                                            "socionext,synquacer-pre-its",
4832                                            pre_its_window,
4833                                            ARRAY_SIZE(pre_its_window))) {
4834
4835                 its->pre_its_base = pre_its_window[0];
4836                 its->get_msi_base = its_irq_get_msi_base_pre_its;
4837
4838                 ids = ilog2(pre_its_window[1]) - 2;
4839                 if (device_ids(its) > ids) {
4840                         its->typer &= ~GITS_TYPER_DEVBITS;
4841                         its->typer |= FIELD_PREP(GITS_TYPER_DEVBITS, ids - 1);
4842                 }
4843
4844                 /* the pre-ITS breaks isolation, so disable MSI remapping */
4845                 its->msi_domain_flags &= ~IRQ_DOMAIN_FLAG_ISOLATED_MSI;
4846                 return true;
4847         }
4848         return false;
4849 }
4850
4851 static bool __maybe_unused its_enable_quirk_hip07_161600802(void *data)
4852 {
4853         struct its_node *its = data;
4854
4855         /*
4856          * Hip07 insists on using the wrong address for the VLPI
4857          * page. Trick it into doing the right thing...
4858          */
4859         its->vlpi_redist_offset = SZ_128K;
4860         return true;
4861 }
4862
4863 static bool __maybe_unused its_enable_rk3588001(void *data)
4864 {
4865         struct its_node *its = data;
4866
4867         if (!of_machine_is_compatible("rockchip,rk3588") &&
4868             !of_machine_is_compatible("rockchip,rk3588s"))
4869                 return false;
4870
4871         its->flags |= ITS_FLAGS_FORCE_NON_SHAREABLE;
4872         gic_rdists->flags |= RDIST_FLAGS_FORCE_NON_SHAREABLE;
4873
4874         return true;
4875 }
4876
4877 static bool its_set_non_coherent(void *data)
4878 {
4879         struct its_node *its = data;
4880
4881         its->flags |= ITS_FLAGS_FORCE_NON_SHAREABLE;
4882         return true;
4883 }
4884
4885 static bool __maybe_unused its_enable_quirk_hip09_162100801(void *data)
4886 {
4887         struct its_node *its = data;
4888
4889         its->flags |= ITS_FLAGS_WORKAROUND_HISILICON_162100801;
4890         return true;
4891 }
4892
4893 static bool __maybe_unused its_enable_rk3568002(void *data)
4894 {
4895         if (!of_machine_is_compatible("rockchip,rk3566") &&
4896             !of_machine_is_compatible("rockchip,rk3568"))
4897                 return false;
4898
4899         gfp_flags_quirk |= GFP_DMA32;
4900
4901         return true;
4902 }
4903
4904 static const struct gic_quirk its_quirks[] = {
4905 #ifdef CONFIG_CAVIUM_ERRATUM_22375
4906         {
4907                 .desc   = "ITS: Cavium errata 22375, 24313",
4908                 .iidr   = 0xa100034c,   /* ThunderX pass 1.x */
4909                 .mask   = 0xffff0fff,
4910                 .init   = its_enable_quirk_cavium_22375,
4911         },
4912 #endif
4913 #ifdef CONFIG_CAVIUM_ERRATUM_23144
4914         {
4915                 .desc   = "ITS: Cavium erratum 23144",
4916                 .iidr   = 0xa100034c,   /* ThunderX pass 1.x */
4917                 .mask   = 0xffff0fff,
4918                 .init   = its_enable_quirk_cavium_23144,
4919         },
4920 #endif
4921 #ifdef CONFIG_QCOM_QDF2400_ERRATUM_0065
4922         {
4923                 .desc   = "ITS: QDF2400 erratum 0065",
4924                 .iidr   = 0x00001070, /* QDF2400 ITS rev 1.x */
4925                 .mask   = 0xffffffff,
4926                 .init   = its_enable_quirk_qdf2400_e0065,
4927         },
4928 #endif
4929 #ifdef CONFIG_SOCIONEXT_SYNQUACER_PREITS
4930         {
4931                 /*
4932                  * The Socionext Synquacer SoC incorporates ARM's own GIC-500
4933                  * implementation, but with a 'pre-ITS' added that requires
4934                  * special handling in software.
4935                  */
4936                 .desc   = "ITS: Socionext Synquacer pre-ITS",
4937                 .iidr   = 0x0001143b,
4938                 .mask   = 0xffffffff,
4939                 .init   = its_enable_quirk_socionext_synquacer,
4940         },
4941 #endif
4942 #ifdef CONFIG_HISILICON_ERRATUM_161600802
4943         {
4944                 .desc   = "ITS: Hip07 erratum 161600802",
4945                 .iidr   = 0x00000004,
4946                 .mask   = 0xffffffff,
4947                 .init   = its_enable_quirk_hip07_161600802,
4948         },
4949 #endif
4950 #ifdef CONFIG_HISILICON_ERRATUM_162100801
4951         {
4952                 .desc   = "ITS: Hip09 erratum 162100801",
4953                 .iidr   = 0x00051736,
4954                 .mask   = 0xffffffff,
4955                 .init   = its_enable_quirk_hip09_162100801,
4956         },
4957 #endif
4958 #ifdef CONFIG_ROCKCHIP_ERRATUM_3588001
4959         {
4960                 .desc   = "ITS: Rockchip erratum RK3588001",
4961                 .iidr   = 0x0201743b,
4962                 .mask   = 0xffffffff,
4963                 .init   = its_enable_rk3588001,
4964         },
4965 #endif
4966         {
4967                 .desc   = "ITS: non-coherent attribute",
4968                 .property = "dma-noncoherent",
4969                 .init   = its_set_non_coherent,
4970         },
4971 #ifdef CONFIG_ROCKCHIP_ERRATUM_3568002
4972         {
4973                 .desc   = "ITS: Rockchip erratum RK3568002",
4974                 .iidr   = 0x0201743b,
4975                 .mask   = 0xffffffff,
4976                 .init   = its_enable_rk3568002,
4977         },
4978 #endif
4979         {
4980         }
4981 };
4982
4983 static void its_enable_quirks(struct its_node *its)
4984 {
4985         u32 iidr = readl_relaxed(its->base + GITS_IIDR);
4986
4987         gic_enable_quirks(iidr, its_quirks, its);
4988
4989         if (is_of_node(its->fwnode_handle))
4990                 gic_enable_of_quirks(to_of_node(its->fwnode_handle),
4991                                      its_quirks, its);
4992 }
4993
4994 static int its_save_disable(void)
4995 {
4996         struct its_node *its;
4997         int err = 0;
4998
4999         raw_spin_lock(&its_lock);
5000         list_for_each_entry(its, &its_nodes, entry) {
5001                 void __iomem *base;
5002
5003                 base = its->base;
5004                 its->ctlr_save = readl_relaxed(base + GITS_CTLR);
5005                 err = its_force_quiescent(base);
5006                 if (err) {
5007                         pr_err("ITS@%pa: failed to quiesce: %d\n",
5008                                &its->phys_base, err);
5009                         writel_relaxed(its->ctlr_save, base + GITS_CTLR);
5010                         goto err;
5011                 }
5012
5013                 its->cbaser_save = gits_read_cbaser(base + GITS_CBASER);
5014         }
5015
5016 err:
5017         if (err) {
5018                 list_for_each_entry_continue_reverse(its, &its_nodes, entry) {
5019                         void __iomem *base;
5020
5021                         base = its->base;
5022                         writel_relaxed(its->ctlr_save, base + GITS_CTLR);
5023                 }
5024         }
5025         raw_spin_unlock(&its_lock);
5026
5027         return err;
5028 }
5029
5030 static void its_restore_enable(void)
5031 {
5032         struct its_node *its;
5033         int ret;
5034
5035         raw_spin_lock(&its_lock);
5036         list_for_each_entry(its, &its_nodes, entry) {
5037                 void __iomem *base;
5038                 int i;
5039
5040                 base = its->base;
5041
5042                 /*
5043                  * Make sure that the ITS is disabled. If it fails to quiesce,
5044                  * don't restore it since writing to CBASER or BASER<n>
5045                  * registers is undefined according to the GIC v3 ITS
5046                  * Specification.
5047                  *
5048                  * Firmware resuming with the ITS enabled is terminally broken.
5049                  */
5050                 WARN_ON(readl_relaxed(base + GITS_CTLR) & GITS_CTLR_ENABLE);
5051                 ret = its_force_quiescent(base);
5052                 if (ret) {
5053                         pr_err("ITS@%pa: failed to quiesce on resume: %d\n",
5054                                &its->phys_base, ret);
5055                         continue;
5056                 }
5057
5058                 gits_write_cbaser(its->cbaser_save, base + GITS_CBASER);
5059
5060                 /*
5061                  * Writing CBASER resets CREADR to 0, so make CWRITER and
5062                  * cmd_write line up with it.
5063                  */
5064                 its->cmd_write = its->cmd_base;
5065                 gits_write_cwriter(0, base + GITS_CWRITER);
5066
5067                 /* Restore GITS_BASER from the value cache. */
5068                 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
5069                         struct its_baser *baser = &its->tables[i];
5070
5071                         if (!(baser->val & GITS_BASER_VALID))
5072                                 continue;
5073
5074                         its_write_baser(its, baser, baser->val);
5075                 }
5076                 writel_relaxed(its->ctlr_save, base + GITS_CTLR);
5077
5078                 /*
5079                  * Reinit the collection if it's stored in the ITS. This is
5080                  * indicated by the col_id being less than the HCC field.
5081                  * CID < HCC as specified in the GIC v3 Documentation.
5082                  */
5083                 if (its->collections[smp_processor_id()].col_id <
5084                     GITS_TYPER_HCC(gic_read_typer(base + GITS_TYPER)))
5085                         its_cpu_init_collection(its);
5086         }
5087         raw_spin_unlock(&its_lock);
5088 }
5089
5090 static struct syscore_ops its_syscore_ops = {
5091         .suspend = its_save_disable,
5092         .resume = its_restore_enable,
5093 };
5094
5095 static void __init __iomem *its_map_one(struct resource *res, int *err)
5096 {
5097         void __iomem *its_base;
5098         u32 val;
5099
5100         its_base = ioremap(res->start, SZ_64K);
5101         if (!its_base) {
5102                 pr_warn("ITS@%pa: Unable to map ITS registers\n", &res->start);
5103                 *err = -ENOMEM;
5104                 return NULL;
5105         }
5106
5107         val = readl_relaxed(its_base + GITS_PIDR2) & GIC_PIDR2_ARCH_MASK;
5108         if (val != 0x30 && val != 0x40) {
5109                 pr_warn("ITS@%pa: No ITS detected, giving up\n", &res->start);
5110                 *err = -ENODEV;
5111                 goto out_unmap;
5112         }
5113
5114         *err = its_force_quiescent(its_base);
5115         if (*err) {
5116                 pr_warn("ITS@%pa: Failed to quiesce, giving up\n", &res->start);
5117                 goto out_unmap;
5118         }
5119
5120         return its_base;
5121
5122 out_unmap:
5123         iounmap(its_base);
5124         return NULL;
5125 }
5126
5127 static int its_init_domain(struct its_node *its)
5128 {
5129         struct irq_domain_info dom_info = {
5130                 .fwnode         = its->fwnode_handle,
5131                 .ops            = &its_domain_ops,
5132                 .domain_flags   = its->msi_domain_flags,
5133                 .parent         = its_parent,
5134         };
5135         struct msi_domain_info *info;
5136
5137         info = kzalloc(sizeof(*info), GFP_KERNEL);
5138         if (!info)
5139                 return -ENOMEM;
5140
5141         info->ops = &its_msi_domain_ops;
5142         info->data = its;
5143         dom_info.host_data = info;
5144
5145         if (!msi_create_parent_irq_domain(&dom_info, &gic_v3_its_msi_parent_ops)) {
5146                 kfree(info);
5147                 return -ENOMEM;
5148         }
5149         return 0;
5150 }
5151
5152 static int its_init_vpe_domain(void)
5153 {
5154         struct its_node *its;
5155         u32 devid;
5156         int entries;
5157
5158         if (gic_rdists->has_direct_lpi) {
5159                 pr_info("ITS: Using DirectLPI for VPE invalidation\n");
5160                 return 0;
5161         }
5162
5163         /* Any ITS will do, even if not v4 */
5164         its = list_first_entry(&its_nodes, struct its_node, entry);
5165
5166         entries = roundup_pow_of_two(nr_cpu_ids);
5167         vpe_proxy.vpes = kcalloc(entries, sizeof(*vpe_proxy.vpes),
5168                                  GFP_KERNEL);
5169         if (!vpe_proxy.vpes)
5170                 return -ENOMEM;
5171
5172         /* Use the last possible DevID */
5173         devid = GENMASK(device_ids(its) - 1, 0);
5174         vpe_proxy.dev = its_create_device(its, devid, entries, false);
5175         if (!vpe_proxy.dev) {
5176                 kfree(vpe_proxy.vpes);
5177                 pr_err("ITS: Can't allocate GICv4 proxy device\n");
5178                 return -ENOMEM;
5179         }
5180
5181         BUG_ON(entries > vpe_proxy.dev->nr_ites);
5182
5183         raw_spin_lock_init(&vpe_proxy.lock);
5184         vpe_proxy.next_victim = 0;
5185         pr_info("ITS: Allocated DevID %x as GICv4 proxy device (%d slots)\n",
5186                 devid, vpe_proxy.dev->nr_ites);
5187
5188         return 0;
5189 }
5190
5191 static int __init its_compute_its_list_map(struct its_node *its)
5192 {
5193         int its_number;
5194         u32 ctlr;
5195
5196         /*
5197          * This is assumed to be done early enough that we're
5198          * guaranteed to be single-threaded, hence no
5199          * locking. Should this change, we should address
5200          * this.
5201          */
5202         its_number = find_first_zero_bit(&its_list_map, GICv4_ITS_LIST_MAX);
5203         if (its_number >= GICv4_ITS_LIST_MAX) {
5204                 pr_err("ITS@%pa: No ITSList entry available!\n",
5205                        &its->phys_base);
5206                 return -EINVAL;
5207         }
5208
5209         ctlr = readl_relaxed(its->base + GITS_CTLR);
5210         ctlr &= ~GITS_CTLR_ITS_NUMBER;
5211         ctlr |= its_number << GITS_CTLR_ITS_NUMBER_SHIFT;
5212         writel_relaxed(ctlr, its->base + GITS_CTLR);
5213         ctlr = readl_relaxed(its->base + GITS_CTLR);
5214         if ((ctlr & GITS_CTLR_ITS_NUMBER) != (its_number << GITS_CTLR_ITS_NUMBER_SHIFT)) {
5215                 its_number = ctlr & GITS_CTLR_ITS_NUMBER;
5216                 its_number >>= GITS_CTLR_ITS_NUMBER_SHIFT;
5217         }
5218
5219         if (test_and_set_bit(its_number, &its_list_map)) {
5220                 pr_err("ITS@%pa: Duplicate ITSList entry %d\n",
5221                        &its->phys_base, its_number);
5222                 return -EINVAL;
5223         }
5224
5225         return its_number;
5226 }
5227
5228 static int __init its_probe_one(struct its_node *its)
5229 {
5230         u64 baser, tmp;
5231         struct page *page;
5232         u32 ctlr;
5233         int err;
5234
5235         its_enable_quirks(its);
5236
5237         if (is_v4(its)) {
5238                 if (!(its->typer & GITS_TYPER_VMOVP)) {
5239                         err = its_compute_its_list_map(its);
5240                         if (err < 0)
5241                                 goto out;
5242
5243                         its->list_nr = err;
5244
5245                         pr_info("ITS@%pa: Using ITS number %d\n",
5246                                 &its->phys_base, err);
5247                 } else {
5248                         pr_info("ITS@%pa: Single VMOVP capable\n", &its->phys_base);
5249                 }
5250
5251                 if (is_v4_1(its)) {
5252                         u32 svpet = FIELD_GET(GITS_TYPER_SVPET, its->typer);
5253
5254                         its->sgir_base = ioremap(its->phys_base + SZ_128K, SZ_64K);
5255                         if (!its->sgir_base) {
5256                                 err = -ENOMEM;
5257                                 goto out;
5258                         }
5259
5260                         its->mpidr = readl_relaxed(its->base + GITS_MPIDR);
5261
5262                         pr_info("ITS@%pa: Using GICv4.1 mode %08x %08x\n",
5263                                 &its->phys_base, its->mpidr, svpet);
5264                 }
5265         }
5266
5267         page = its_alloc_pages_node(its->numa_node,
5268                                     GFP_KERNEL | __GFP_ZERO,
5269                                     get_order(ITS_CMD_QUEUE_SZ));
5270         if (!page) {
5271                 err = -ENOMEM;
5272                 goto out_unmap_sgir;
5273         }
5274         its->cmd_base = (void *)page_address(page);
5275         its->cmd_write = its->cmd_base;
5276
5277         err = its_alloc_tables(its);
5278         if (err)
5279                 goto out_free_cmd;
5280
5281         err = its_alloc_collections(its);
5282         if (err)
5283                 goto out_free_tables;
5284
5285         baser = (virt_to_phys(its->cmd_base)    |
5286                  GITS_CBASER_RaWaWb             |
5287                  GITS_CBASER_InnerShareable     |
5288                  (ITS_CMD_QUEUE_SZ / SZ_4K - 1) |
5289                  GITS_CBASER_VALID);
5290
5291         gits_write_cbaser(baser, its->base + GITS_CBASER);
5292         tmp = gits_read_cbaser(its->base + GITS_CBASER);
5293
5294         if (its->flags & ITS_FLAGS_FORCE_NON_SHAREABLE)
5295                 tmp &= ~GITS_CBASER_SHAREABILITY_MASK;
5296
5297         if ((tmp ^ baser) & GITS_CBASER_SHAREABILITY_MASK) {
5298                 if (!(tmp & GITS_CBASER_SHAREABILITY_MASK)) {
5299                         /*
5300                          * The HW reports non-shareable, we must
5301                          * remove the cacheability attributes as
5302                          * well.
5303                          */
5304                         baser &= ~(GITS_CBASER_SHAREABILITY_MASK |
5305                                    GITS_CBASER_CACHEABILITY_MASK);
5306                         baser |= GITS_CBASER_nC;
5307                         gits_write_cbaser(baser, its->base + GITS_CBASER);
5308                 }
5309                 pr_info("ITS: using cache flushing for cmd queue\n");
5310                 its->flags |= ITS_FLAGS_CMDQ_NEEDS_FLUSHING;
5311         }
5312
5313         gits_write_cwriter(0, its->base + GITS_CWRITER);
5314         ctlr = readl_relaxed(its->base + GITS_CTLR);
5315         ctlr |= GITS_CTLR_ENABLE;
5316         if (is_v4(its))
5317                 ctlr |= GITS_CTLR_ImDe;
5318         writel_relaxed(ctlr, its->base + GITS_CTLR);
5319
5320         err = its_init_domain(its);
5321         if (err)
5322                 goto out_free_tables;
5323
5324         raw_spin_lock(&its_lock);
5325         list_add(&its->entry, &its_nodes);
5326         raw_spin_unlock(&its_lock);
5327
5328         return 0;
5329
5330 out_free_tables:
5331         its_free_tables(its);
5332 out_free_cmd:
5333         its_free_pages(its->cmd_base, get_order(ITS_CMD_QUEUE_SZ));
5334 out_unmap_sgir:
5335         if (its->sgir_base)
5336                 iounmap(its->sgir_base);
5337 out:
5338         pr_err("ITS@%pa: failed probing (%d)\n", &its->phys_base, err);
5339         return err;
5340 }
5341
5342 static bool gic_rdists_supports_plpis(void)
5343 {
5344         return !!(gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER) & GICR_TYPER_PLPIS);
5345 }
5346
5347 static int redist_disable_lpis(void)
5348 {
5349         void __iomem *rbase = gic_data_rdist_rd_base();
5350         u64 timeout = USEC_PER_SEC;
5351         u64 val;
5352
5353         if (!gic_rdists_supports_plpis()) {
5354                 pr_info("CPU%d: LPIs not supported\n", smp_processor_id());
5355                 return -ENXIO;
5356         }
5357
5358         val = readl_relaxed(rbase + GICR_CTLR);
5359         if (!(val & GICR_CTLR_ENABLE_LPIS))
5360                 return 0;
5361
5362         /*
5363          * If coming via a CPU hotplug event, we don't need to disable
5364          * LPIs before trying to re-enable them. They are already
5365          * configured and all is well in the world.
5366          *
5367          * If running with preallocated tables, there is nothing to do.
5368          */
5369         if ((gic_data_rdist()->flags & RD_LOCAL_LPI_ENABLED) ||
5370             (gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED))
5371                 return 0;
5372
5373         /*
5374          * From that point on, we only try to do some damage control.
5375          */
5376         pr_warn("GICv3: CPU%d: Booted with LPIs enabled, memory probably corrupted\n",
5377                 smp_processor_id());
5378         add_taint(TAINT_CRAP, LOCKDEP_STILL_OK);
5379
5380         /* Disable LPIs */
5381         val &= ~GICR_CTLR_ENABLE_LPIS;
5382         writel_relaxed(val, rbase + GICR_CTLR);
5383
5384         /* Make sure any change to GICR_CTLR is observable by the GIC */
5385         dsb(sy);
5386
5387         /*
5388          * Software must observe RWP==0 after clearing GICR_CTLR.EnableLPIs
5389          * from 1 to 0 before programming GICR_PEND{PROP}BASER registers.
5390          * Error out if we time out waiting for RWP to clear.
5391          */
5392         while (readl_relaxed(rbase + GICR_CTLR) & GICR_CTLR_RWP) {
5393                 if (!timeout) {
5394                         pr_err("CPU%d: Timeout while disabling LPIs\n",
5395                                smp_processor_id());
5396                         return -ETIMEDOUT;
5397                 }
5398                 udelay(1);
5399                 timeout--;
5400         }
5401
5402         /*
5403          * After it has been written to 1, it is IMPLEMENTATION
5404          * DEFINED whether GICR_CTLR.EnableLPI becomes RES1 or can be
5405          * cleared to 0. Error out if clearing the bit failed.
5406          */
5407         if (readl_relaxed(rbase + GICR_CTLR) & GICR_CTLR_ENABLE_LPIS) {
5408                 pr_err("CPU%d: Failed to disable LPIs\n", smp_processor_id());
5409                 return -EBUSY;
5410         }
5411
5412         return 0;
5413 }
5414
5415 int its_cpu_init(void)
5416 {
5417         if (!list_empty(&its_nodes)) {
5418                 int ret;
5419
5420                 ret = redist_disable_lpis();
5421                 if (ret)
5422                         return ret;
5423
5424                 its_cpu_init_lpis();
5425                 its_cpu_init_collections();
5426         }
5427
5428         return 0;
5429 }
5430
5431 static void rdist_memreserve_cpuhp_cleanup_workfn(struct work_struct *work)
5432 {
5433         cpuhp_remove_state_nocalls(gic_rdists->cpuhp_memreserve_state);
5434         gic_rdists->cpuhp_memreserve_state = CPUHP_INVALID;
5435 }
5436
5437 static DECLARE_WORK(rdist_memreserve_cpuhp_cleanup_work,
5438                     rdist_memreserve_cpuhp_cleanup_workfn);
5439
5440 static int its_cpu_memreserve_lpi(unsigned int cpu)
5441 {
5442         struct page *pend_page;
5443         int ret = 0;
5444
5445         /* This gets to run exactly once per CPU */
5446         if (gic_data_rdist()->flags & RD_LOCAL_MEMRESERVE_DONE)
5447                 return 0;
5448
5449         pend_page = gic_data_rdist()->pend_page;
5450         if (WARN_ON(!pend_page)) {
5451                 ret = -ENOMEM;
5452                 goto out;
5453         }
5454         /*
5455          * If the pending table was pre-programmed, free the memory we
5456          * preemptively allocated. Otherwise, reserve that memory for
5457          * later kexecs.
5458          */
5459         if (gic_data_rdist()->flags & RD_LOCAL_PENDTABLE_PREALLOCATED) {
5460                 its_free_pending_table(pend_page);
5461                 gic_data_rdist()->pend_page = NULL;
5462         } else {
5463                 phys_addr_t paddr = page_to_phys(pend_page);
5464                 WARN_ON(gic_reserve_range(paddr, LPI_PENDBASE_SZ));
5465         }
5466
5467 out:
5468         /* Last CPU being brought up gets to issue the cleanup */
5469         if (!IS_ENABLED(CONFIG_SMP) ||
5470             cpumask_equal(&cpus_booted_once_mask, cpu_possible_mask))
5471                 schedule_work(&rdist_memreserve_cpuhp_cleanup_work);
5472
5473         gic_data_rdist()->flags |= RD_LOCAL_MEMRESERVE_DONE;
5474         return ret;
5475 }
5476
5477 /* Mark all the BASER registers as invalid before they get reprogrammed */
5478 static int __init its_reset_one(struct resource *res)
5479 {
5480         void __iomem *its_base;
5481         int err, i;
5482
5483         its_base = its_map_one(res, &err);
5484         if (!its_base)
5485                 return err;
5486
5487         for (i = 0; i < GITS_BASER_NR_REGS; i++)
5488                 gits_write_baser(0, its_base + GITS_BASER + (i << 3));
5489
5490         iounmap(its_base);
5491         return 0;
5492 }
5493
5494 static const struct of_device_id its_device_id[] = {
5495         {       .compatible     = "arm,gic-v3-its",     },
5496         {},
5497 };
5498
5499 static struct its_node __init *its_node_init(struct resource *res,
5500                                              struct fwnode_handle *handle, int numa_node)
5501 {
5502         void __iomem *its_base;
5503         struct its_node *its;
5504         int err;
5505
5506         its_base = its_map_one(res, &err);
5507         if (!its_base)
5508                 return NULL;
5509
5510         pr_info("ITS %pR\n", res);
5511
5512         its = kzalloc(sizeof(*its), GFP_KERNEL);
5513         if (!its)
5514                 goto out_unmap;
5515
5516         raw_spin_lock_init(&its->lock);
5517         mutex_init(&its->dev_alloc_lock);
5518         INIT_LIST_HEAD(&its->entry);
5519         INIT_LIST_HEAD(&its->its_device_list);
5520
5521         its->typer = gic_read_typer(its_base + GITS_TYPER);
5522         its->base = its_base;
5523         its->phys_base = res->start;
5524         its->get_msi_base = its_irq_get_msi_base;
5525         its->msi_domain_flags = IRQ_DOMAIN_FLAG_ISOLATED_MSI | IRQ_DOMAIN_FLAG_MSI_IMMUTABLE;
5526
5527         its->numa_node = numa_node;
5528         its->fwnode_handle = handle;
5529
5530         return its;
5531
5532 out_unmap:
5533         iounmap(its_base);
5534         return NULL;
5535 }
5536
5537 static void its_node_destroy(struct its_node *its)
5538 {
5539         iounmap(its->base);
5540         kfree(its);
5541 }
5542
5543 static int __init its_of_probe(struct device_node *node)
5544 {
5545         struct device_node *np;
5546         struct resource res;
5547         int err;
5548
5549         /*
5550          * Make sure *all* the ITS are reset before we probe any, as
5551          * they may be sharing memory. If any of the ITS fails to
5552          * reset, don't even try to go any further, as this could
5553          * result in something even worse.
5554          */
5555         for (np = of_find_matching_node(node, its_device_id); np;
5556              np = of_find_matching_node(np, its_device_id)) {
5557                 if (!of_device_is_available(np) ||
5558                     !of_property_read_bool(np, "msi-controller") ||
5559                     of_address_to_resource(np, 0, &res))
5560                         continue;
5561
5562                 err = its_reset_one(&res);
5563                 if (err)
5564                         return err;
5565         }
5566
5567         for (np = of_find_matching_node(node, its_device_id); np;
5568              np = of_find_matching_node(np, its_device_id)) {
5569                 struct its_node *its;
5570
5571                 if (!of_device_is_available(np))
5572                         continue;
5573                 if (!of_property_read_bool(np, "msi-controller")) {
5574                         pr_warn("%pOF: no msi-controller property, ITS ignored\n",
5575                                 np);
5576                         continue;
5577                 }
5578
5579                 if (of_address_to_resource(np, 0, &res)) {
5580                         pr_warn("%pOF: no regs?\n", np);
5581                         continue;
5582                 }
5583
5584
5585                 its = its_node_init(&res, &np->fwnode, of_node_to_nid(np));
5586                 if (!its)
5587                         return -ENOMEM;
5588
5589                 err = its_probe_one(its);
5590                 if (err)  {
5591                         its_node_destroy(its);
5592                         return err;
5593                 }
5594         }
5595         return 0;
5596 }
5597
5598 #ifdef CONFIG_ACPI
5599
5600 #define ACPI_GICV3_ITS_MEM_SIZE (SZ_128K)
5601
5602 #ifdef CONFIG_ACPI_NUMA
5603 struct its_srat_map {
5604         /* numa node id */
5605         u32     numa_node;
5606         /* GIC ITS ID */
5607         u32     its_id;
5608 };
5609
5610 static struct its_srat_map *its_srat_maps __initdata;
5611 static int its_in_srat __initdata;
5612
5613 static int __init acpi_get_its_numa_node(u32 its_id)
5614 {
5615         int i;
5616
5617         for (i = 0; i < its_in_srat; i++) {
5618                 if (its_id == its_srat_maps[i].its_id)
5619                         return its_srat_maps[i].numa_node;
5620         }
5621         return NUMA_NO_NODE;
5622 }
5623
5624 static int __init gic_acpi_match_srat_its(union acpi_subtable_headers *header,
5625                                           const unsigned long end)
5626 {
5627         return 0;
5628 }
5629
5630 static int __init gic_acpi_parse_srat_its(union acpi_subtable_headers *header,
5631                          const unsigned long end)
5632 {
5633         int node;
5634         struct acpi_srat_gic_its_affinity *its_affinity;
5635
5636         its_affinity = (struct acpi_srat_gic_its_affinity *)header;
5637         if (!its_affinity)
5638                 return -EINVAL;
5639
5640         if (its_affinity->header.length < sizeof(*its_affinity)) {
5641                 pr_err("SRAT: Invalid header length %d in ITS affinity\n",
5642                         its_affinity->header.length);
5643                 return -EINVAL;
5644         }
5645
5646         /*
5647          * Note that in theory a new proximity node could be created by this
5648          * entry as it is an SRAT resource allocation structure.
5649          * We do not currently support doing so.
5650          */
5651         node = pxm_to_node(its_affinity->proximity_domain);
5652
5653         if (node == NUMA_NO_NODE || node >= MAX_NUMNODES) {
5654                 pr_err("SRAT: Invalid NUMA node %d in ITS affinity\n", node);
5655                 return 0;
5656         }
5657
5658         its_srat_maps[its_in_srat].numa_node = node;
5659         its_srat_maps[its_in_srat].its_id = its_affinity->its_id;
5660         its_in_srat++;
5661         pr_info("SRAT: PXM %d -> ITS %d -> Node %d\n",
5662                 its_affinity->proximity_domain, its_affinity->its_id, node);
5663
5664         return 0;
5665 }
5666
5667 static void __init acpi_table_parse_srat_its(void)
5668 {
5669         int count;
5670
5671         count = acpi_table_parse_entries(ACPI_SIG_SRAT,
5672                         sizeof(struct acpi_table_srat),
5673                         ACPI_SRAT_TYPE_GIC_ITS_AFFINITY,
5674                         gic_acpi_match_srat_its, 0);
5675         if (count <= 0)
5676                 return;
5677
5678         its_srat_maps = kmalloc_array(count, sizeof(struct its_srat_map),
5679                                       GFP_KERNEL);
5680         if (!its_srat_maps)
5681                 return;
5682
5683         acpi_table_parse_entries(ACPI_SIG_SRAT,
5684                         sizeof(struct acpi_table_srat),
5685                         ACPI_SRAT_TYPE_GIC_ITS_AFFINITY,
5686                         gic_acpi_parse_srat_its, 0);
5687 }
5688
5689 /* free the its_srat_maps after ITS probing */
5690 static void __init acpi_its_srat_maps_free(void)
5691 {
5692         kfree(its_srat_maps);
5693 }
5694 #else
5695 static void __init acpi_table_parse_srat_its(void)      { }
5696 static int __init acpi_get_its_numa_node(u32 its_id) { return NUMA_NO_NODE; }
5697 static void __init acpi_its_srat_maps_free(void) { }
5698 #endif
5699
5700 static int __init gic_acpi_parse_madt_its(union acpi_subtable_headers *header,
5701                                           const unsigned long end)
5702 {
5703         struct acpi_madt_generic_translator *its_entry;
5704         struct fwnode_handle *dom_handle;
5705         struct its_node *its;
5706         struct resource res;
5707         int err;
5708
5709         its_entry = (struct acpi_madt_generic_translator *)header;
5710         memset(&res, 0, sizeof(res));
5711         res.start = its_entry->base_address;
5712         res.end = its_entry->base_address + ACPI_GICV3_ITS_MEM_SIZE - 1;
5713         res.flags = IORESOURCE_MEM;
5714
5715         dom_handle = irq_domain_alloc_fwnode(&res.start);
5716         if (!dom_handle) {
5717                 pr_err("ITS@%pa: Unable to allocate GICv3 ITS domain token\n",
5718                        &res.start);
5719                 return -ENOMEM;
5720         }
5721
5722         err = iort_register_domain_token(its_entry->translation_id, res.start,
5723                                          dom_handle);
5724         if (err) {
5725                 pr_err("ITS@%pa: Unable to register GICv3 ITS domain token (ITS ID %d) to IORT\n",
5726                        &res.start, its_entry->translation_id);
5727                 goto dom_err;
5728         }
5729
5730         its = its_node_init(&res, dom_handle,
5731                             acpi_get_its_numa_node(its_entry->translation_id));
5732         if (!its) {
5733                 err = -ENOMEM;
5734                 goto node_err;
5735         }
5736
5737         if (acpi_get_madt_revision() >= 7 &&
5738             (its_entry->flags & ACPI_MADT_ITS_NON_COHERENT))
5739                 its->flags |= ITS_FLAGS_FORCE_NON_SHAREABLE;
5740
5741         err = its_probe_one(its);
5742         if (!err)
5743                 return 0;
5744
5745 node_err:
5746         iort_deregister_domain_token(its_entry->translation_id);
5747 dom_err:
5748         irq_domain_free_fwnode(dom_handle);
5749         return err;
5750 }
5751
5752 static int __init its_acpi_reset(union acpi_subtable_headers *header,
5753                                  const unsigned long end)
5754 {
5755         struct acpi_madt_generic_translator *its_entry;
5756         struct resource res;
5757
5758         its_entry = (struct acpi_madt_generic_translator *)header;
5759         res = (struct resource) {
5760                 .start  = its_entry->base_address,
5761                 .end    = its_entry->base_address + ACPI_GICV3_ITS_MEM_SIZE - 1,
5762                 .flags  = IORESOURCE_MEM,
5763         };
5764
5765         return its_reset_one(&res);
5766 }
5767
5768 static void __init its_acpi_probe(void)
5769 {
5770         acpi_table_parse_srat_its();
5771         /*
5772          * Make sure *all* the ITS are reset before we probe any, as
5773          * they may be sharing memory. If any of the ITS fails to
5774          * reset, don't even try to go any further, as this could
5775          * result in something even worse.
5776          */
5777         if (acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR,
5778                                   its_acpi_reset, 0) > 0)
5779                 acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR,
5780                                       gic_acpi_parse_madt_its, 0);
5781         acpi_its_srat_maps_free();
5782 }
5783 #else
5784 static void __init its_acpi_probe(void) { }
5785 #endif
5786
5787 int __init its_lpi_memreserve_init(void)
5788 {
5789         int state;
5790
5791         if (!efi_enabled(EFI_CONFIG_TABLES))
5792                 return 0;
5793
5794         if (list_empty(&its_nodes))
5795                 return 0;
5796
5797         gic_rdists->cpuhp_memreserve_state = CPUHP_INVALID;
5798         state = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
5799                                   "irqchip/arm/gicv3/memreserve:online",
5800                                   its_cpu_memreserve_lpi,
5801                                   NULL);
5802         if (state < 0)
5803                 return state;
5804
5805         gic_rdists->cpuhp_memreserve_state = state;
5806
5807         return 0;
5808 }
5809
5810 int __init its_init(struct fwnode_handle *handle, struct rdists *rdists,
5811                     struct irq_domain *parent_domain, u8 irq_prio)
5812 {
5813         struct device_node *of_node;
5814         struct its_node *its;
5815         bool has_v4 = false;
5816         bool has_v4_1 = false;
5817         int err;
5818
5819         itt_pool = gen_pool_create(get_order(ITS_ITT_ALIGN), -1);
5820         if (!itt_pool)
5821                 return -ENOMEM;
5822
5823         gic_rdists = rdists;
5824
5825         lpi_prop_prio = irq_prio;
5826         its_parent = parent_domain;
5827         of_node = to_of_node(handle);
5828         if (of_node)
5829                 its_of_probe(of_node);
5830         else
5831                 its_acpi_probe();
5832
5833         if (list_empty(&its_nodes)) {
5834                 pr_warn("ITS: No ITS available, not enabling LPIs\n");
5835                 return -ENXIO;
5836         }
5837
5838         err = allocate_lpi_tables();
5839         if (err)
5840                 return err;
5841
5842         list_for_each_entry(its, &its_nodes, entry) {
5843                 has_v4 |= is_v4(its);
5844                 has_v4_1 |= is_v4_1(its);
5845         }
5846
5847         /* Don't bother with inconsistent systems */
5848         if (WARN_ON(!has_v4_1 && rdists->has_rvpeid))
5849                 rdists->has_rvpeid = false;
5850
5851         if (has_v4 & rdists->has_vlpis) {
5852                 const struct irq_domain_ops *sgi_ops;
5853
5854                 if (has_v4_1)
5855                         sgi_ops = &its_sgi_domain_ops;
5856                 else
5857                         sgi_ops = NULL;
5858
5859                 if (its_init_vpe_domain() ||
5860                     its_init_v4(parent_domain, &its_vpe_domain_ops, sgi_ops)) {
5861                         rdists->has_vlpis = false;
5862                         pr_err("ITS: Disabling GICv4 support\n");
5863                 }
5864         }
5865
5866         register_syscore_ops(&its_syscore_ops);
5867
5868         return 0;
5869 }