Commit | Line | Data |
---|---|---|
caab277b | 1 | // SPDX-License-Identifier: GPL-2.0-only |
cc2d3216 | 2 | /* |
d7276b80 | 3 | * Copyright (C) 2013-2017 ARM Limited, All Rights Reserved. |
cc2d3216 | 4 | * Author: Marc Zyngier <marc.zyngier@arm.com> |
cc2d3216 MZ |
5 | */ |
6 | ||
3f010cf1 | 7 | #include <linux/acpi.h> |
8d3554b8 | 8 | #include <linux/acpi_iort.h> |
ffedbf0c | 9 | #include <linux/bitfield.h> |
cc2d3216 MZ |
10 | #include <linux/bitmap.h> |
11 | #include <linux/cpu.h> | |
c6e2ccb6 | 12 | #include <linux/crash_dump.h> |
cc2d3216 | 13 | #include <linux/delay.h> |
44bb7e24 | 14 | #include <linux/dma-iommu.h> |
3fb68fae | 15 | #include <linux/efi.h> |
cc2d3216 | 16 | #include <linux/interrupt.h> |
3f010cf1 | 17 | #include <linux/irqdomain.h> |
880cb3cd | 18 | #include <linux/list.h> |
cc2d3216 | 19 | #include <linux/log2.h> |
5e2c9f9a | 20 | #include <linux/memblock.h> |
cc2d3216 MZ |
21 | #include <linux/mm.h> |
22 | #include <linux/msi.h> | |
23 | #include <linux/of.h> | |
24 | #include <linux/of_address.h> | |
25 | #include <linux/of_irq.h> | |
26 | #include <linux/of_pci.h> | |
27 | #include <linux/of_platform.h> | |
28 | #include <linux/percpu.h> | |
29 | #include <linux/slab.h> | |
dba0bc7b | 30 | #include <linux/syscore_ops.h> |
cc2d3216 | 31 | |
41a83e06 | 32 | #include <linux/irqchip.h> |
cc2d3216 | 33 | #include <linux/irqchip/arm-gic-v3.h> |
c808eea8 | 34 | #include <linux/irqchip/arm-gic-v4.h> |
cc2d3216 | 35 | |
cc2d3216 MZ |
36 | #include <asm/cputype.h> |
37 | #include <asm/exception.h> | |
38 | ||
67510cca RR |
39 | #include "irq-gic-common.h" |
40 | ||
94100970 RR |
41 | #define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1ULL << 0) |
42 | #define ITS_FLAGS_WORKAROUND_CAVIUM_22375 (1ULL << 1) | |
fbf8f40e | 43 | #define ITS_FLAGS_WORKAROUND_CAVIUM_23144 (1ULL << 2) |
dba0bc7b | 44 | #define ITS_FLAGS_SAVE_SUSPEND_STATE (1ULL << 3) |
cc2d3216 | 45 | |
c48ed51c | 46 | #define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0) |
c440a9d9 | 47 | #define RDIST_FLAGS_RD_TABLES_PREALLOCATED (1 << 1) |
c48ed51c | 48 | |
a13b0404 MZ |
49 | static u32 lpi_id_bits; |
50 | ||
51 | /* | |
52 | * We allocate memory for PROPBASE to cover 2 ^ lpi_id_bits LPIs to | |
53 | * deal with (one configuration byte per interrupt). PENDBASE has to | |
54 | * be 64kB aligned (one bit per LPI, plus 8192 bits for SPI/PPI/SGI). | |
55 | */ | |
56 | #define LPI_NRBITS lpi_id_bits | |
57 | #define LPI_PROPBASE_SZ ALIGN(BIT(LPI_NRBITS), SZ_64K) | |
58 | #define LPI_PENDBASE_SZ ALIGN(BIT(LPI_NRBITS) / 8, SZ_64K) | |
59 | ||
2130b789 | 60 | #define LPI_PROP_DEFAULT_PRIO GICD_INT_DEF_PRI |
a13b0404 | 61 | |
cc2d3216 MZ |
62 | /* |
63 | * Collection structure - just an ID, and a redistributor address to | |
64 | * ping. We use one per CPU as a bag of interrupts assigned to this | |
65 | * CPU. | |
66 | */ | |
67 | struct its_collection { | |
68 | u64 target_address; | |
69 | u16 col_id; | |
70 | }; | |
71 | ||
466b7d16 | 72 | /* |
9347359a SD |
73 | * The ITS_BASER structure - contains memory information, cached |
74 | * value of BASER register configuration and ITS page size. | |
466b7d16 SD |
75 | */ |
76 | struct its_baser { | |
77 | void *base; | |
78 | u64 val; | |
79 | u32 order; | |
9347359a | 80 | u32 psz; |
466b7d16 SD |
81 | }; |
82 | ||
558b0165 AB |
83 | struct its_device; |
84 | ||
cc2d3216 MZ |
85 | /* |
86 | * The ITS structure - contains most of the infrastructure, with the | |
841514ab MZ |
87 | * top-level MSI domain, the command queue, the collections, and the |
88 | * list of devices writing to it. | |
9791ec7d MZ |
89 | * |
90 | * dev_alloc_lock has to be taken for device allocations, while the | |
91 | * spinlock must be taken to parse data structures such as the device | |
92 | * list. | |
cc2d3216 MZ |
93 | */ |
94 | struct its_node { | |
95 | raw_spinlock_t lock; | |
9791ec7d | 96 | struct mutex dev_alloc_lock; |
cc2d3216 | 97 | struct list_head entry; |
cc2d3216 | 98 | void __iomem *base; |
5e46a484 | 99 | void __iomem *sgir_base; |
db40f0a7 | 100 | phys_addr_t phys_base; |
cc2d3216 MZ |
101 | struct its_cmd_block *cmd_base; |
102 | struct its_cmd_block *cmd_write; | |
466b7d16 | 103 | struct its_baser tables[GITS_BASER_NR_REGS]; |
cc2d3216 | 104 | struct its_collection *collections; |
558b0165 AB |
105 | struct fwnode_handle *fwnode_handle; |
106 | u64 (*get_msi_base)(struct its_device *its_dev); | |
0dd57fed | 107 | u64 typer; |
dba0bc7b DB |
108 | u64 cbaser_save; |
109 | u32 ctlr_save; | |
5e516846 | 110 | u32 mpidr; |
cc2d3216 MZ |
111 | struct list_head its_device_list; |
112 | u64 flags; | |
debf6d02 | 113 | unsigned long list_nr; |
fbf8f40e | 114 | int numa_node; |
558b0165 AB |
115 | unsigned int msi_domain_flags; |
116 | u32 pre_its_base; /* for Socionext Synquacer */ | |
5c9a882e | 117 | int vlpi_redist_offset; |
cc2d3216 MZ |
118 | }; |
119 | ||
0dd57fed | 120 | #define is_v4(its) (!!((its)->typer & GITS_TYPER_VLPIS)) |
5e516846 | 121 | #define is_v4_1(its) (!!((its)->typer & GITS_TYPER_VMAPP)) |
576a8342 | 122 | #define device_ids(its) (FIELD_GET(GITS_TYPER_DEVBITS, (its)->typer) + 1) |
0dd57fed | 123 | |
cc2d3216 MZ |
124 | #define ITS_ITT_ALIGN SZ_256 |
125 | ||
32bd44dc | 126 | /* The maximum number of VPEID bits supported by VLPI commands */ |
f2d83409 MZ |
127 | #define ITS_MAX_VPEID_BITS \ |
128 | ({ \ | |
129 | int nvpeid = 16; \ | |
130 | if (gic_rdists->has_rvpeid && \ | |
131 | gic_rdists->gicd_typer2 & GICD_TYPER2_VIL) \ | |
132 | nvpeid = 1 + (gic_rdists->gicd_typer2 & \ | |
133 | GICD_TYPER2_VID); \ | |
134 | \ | |
135 | nvpeid; \ | |
136 | }) | |
32bd44dc SD |
137 | #define ITS_MAX_VPEID (1 << (ITS_MAX_VPEID_BITS)) |
138 | ||
2eca0d6c SD |
139 | /* Convert page order to size in bytes */ |
140 | #define PAGE_ORDER_TO_SIZE(o) (PAGE_SIZE << (o)) | |
141 | ||
591e5bec MZ |
142 | struct event_lpi_map { |
143 | unsigned long *lpi_map; | |
144 | u16 *col_map; | |
145 | irq_hw_number_t lpi_base; | |
146 | int nr_lpis; | |
11635fa2 | 147 | raw_spinlock_t vlpi_lock; |
d011e4e6 MZ |
148 | struct its_vm *vm; |
149 | struct its_vlpi_map *vlpi_maps; | |
150 | int nr_vlpis; | |
591e5bec MZ |
151 | }; |
152 | ||
cc2d3216 | 153 | /* |
d011e4e6 MZ |
154 | * The ITS view of a device - belongs to an ITS, owns an interrupt |
155 | * translation table, and a list of interrupts. If it some of its | |
156 | * LPIs are injected into a guest (GICv4), the event_map.vm field | |
157 | * indicates which one. | |
cc2d3216 MZ |
158 | */ |
159 | struct its_device { | |
160 | struct list_head entry; | |
161 | struct its_node *its; | |
591e5bec | 162 | struct event_lpi_map event_map; |
cc2d3216 | 163 | void *itt; |
cc2d3216 MZ |
164 | u32 nr_ites; |
165 | u32 device_id; | |
9791ec7d | 166 | bool shared; |
cc2d3216 MZ |
167 | }; |
168 | ||
20b3d54e MZ |
169 | static struct { |
170 | raw_spinlock_t lock; | |
171 | struct its_device *dev; | |
172 | struct its_vpe **vpes; | |
173 | int next_victim; | |
174 | } vpe_proxy; | |
175 | ||
1ac19ca6 | 176 | static LIST_HEAD(its_nodes); |
a8db7456 | 177 | static DEFINE_RAW_SPINLOCK(its_lock); |
1ac19ca6 | 178 | static struct rdists *gic_rdists; |
db40f0a7 | 179 | static struct irq_domain *its_parent; |
1ac19ca6 | 180 | |
3dfa576b | 181 | static unsigned long its_list_map; |
3171a47a MZ |
182 | static u16 vmovp_seq_num; |
183 | static DEFINE_RAW_SPINLOCK(vmovp_lock); | |
184 | ||
7d75bbb4 | 185 | static DEFINE_IDA(its_vpeid_ida); |
3dfa576b | 186 | |
1ac19ca6 | 187 | #define gic_data_rdist() (raw_cpu_ptr(gic_rdists->rdist)) |
11e37d35 | 188 | #define gic_data_rdist_cpu(cpu) (per_cpu_ptr(gic_rdists->rdist, cpu)) |
1ac19ca6 | 189 | #define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base) |
e643d803 | 190 | #define gic_data_rdist_vlpi_base() (gic_data_rdist_rd_base() + SZ_128K) |
1ac19ca6 | 191 | |
84243125 ZY |
192 | static u16 get_its_list(struct its_vm *vm) |
193 | { | |
194 | struct its_node *its; | |
195 | unsigned long its_list = 0; | |
196 | ||
197 | list_for_each_entry(its, &its_nodes, entry) { | |
0dd57fed | 198 | if (!is_v4(its)) |
84243125 ZY |
199 | continue; |
200 | ||
201 | if (vm->vlpi_count[its->list_nr]) | |
202 | __set_bit(its->list_nr, &its_list); | |
203 | } | |
204 | ||
205 | return (u16)its_list; | |
206 | } | |
207 | ||
425c09be MZ |
208 | static inline u32 its_get_event_id(struct irq_data *d) |
209 | { | |
210 | struct its_device *its_dev = irq_data_get_irq_chip_data(d); | |
211 | return d->hwirq - its_dev->event_map.lpi_base; | |
212 | } | |
213 | ||
591e5bec MZ |
214 | static struct its_collection *dev_event_to_col(struct its_device *its_dev, |
215 | u32 event) | |
216 | { | |
217 | struct its_node *its = its_dev->its; | |
218 | ||
219 | return its->collections + its_dev->event_map.col_map[event]; | |
220 | } | |
221 | ||
c1d4d5cd MZ |
222 | static struct its_vlpi_map *dev_event_to_vlpi_map(struct its_device *its_dev, |
223 | u32 event) | |
224 | { | |
225 | if (WARN_ON_ONCE(event >= its_dev->event_map.nr_lpis)) | |
226 | return NULL; | |
227 | ||
228 | return &its_dev->event_map.vlpi_maps[event]; | |
229 | } | |
230 | ||
f4a81f5a MZ |
231 | static struct its_vlpi_map *get_vlpi_map(struct irq_data *d) |
232 | { | |
233 | if (irqd_is_forwarded_to_vcpu(d)) { | |
234 | struct its_device *its_dev = irq_data_get_irq_chip_data(d); | |
235 | u32 event = its_get_event_id(d); | |
236 | ||
237 | return dev_event_to_vlpi_map(its_dev, event); | |
238 | } | |
239 | ||
240 | return NULL; | |
241 | } | |
242 | ||
f3a05921 MZ |
243 | static int vpe_to_cpuid_lock(struct its_vpe *vpe, unsigned long *flags) |
244 | { | |
245 | raw_spin_lock_irqsave(&vpe->vpe_lock, *flags); | |
246 | return vpe->col_idx; | |
247 | } | |
248 | ||
249 | static void vpe_to_cpuid_unlock(struct its_vpe *vpe, unsigned long flags) | |
250 | { | |
251 | raw_spin_unlock_irqrestore(&vpe->vpe_lock, flags); | |
252 | } | |
253 | ||
254 | static int irq_to_cpuid_lock(struct irq_data *d, unsigned long *flags) | |
425c09be | 255 | { |
f4a81f5a | 256 | struct its_vlpi_map *map = get_vlpi_map(d); |
f3a05921 | 257 | int cpu; |
f4a81f5a | 258 | |
f3a05921 MZ |
259 | if (map) { |
260 | cpu = vpe_to_cpuid_lock(map->vpe, flags); | |
261 | } else { | |
262 | /* Physical LPIs are already locked via the irq_desc lock */ | |
263 | struct its_device *its_dev = irq_data_get_irq_chip_data(d); | |
264 | cpu = its_dev->event_map.col_map[its_get_event_id(d)]; | |
265 | /* Keep GCC quiet... */ | |
266 | *flags = 0; | |
267 | } | |
425c09be | 268 | |
f3a05921 MZ |
269 | return cpu; |
270 | } | |
271 | ||
272 | static void irq_to_cpuid_unlock(struct irq_data *d, unsigned long flags) | |
273 | { | |
274 | struct its_vlpi_map *map = get_vlpi_map(d); | |
275 | ||
276 | if (map) | |
277 | vpe_to_cpuid_unlock(map->vpe, flags); | |
425c09be MZ |
278 | } |
279 | ||
83559b47 MZ |
280 | static struct its_collection *valid_col(struct its_collection *col) |
281 | { | |
20faba84 | 282 | if (WARN_ON_ONCE(col->target_address & GENMASK_ULL(15, 0))) |
83559b47 MZ |
283 | return NULL; |
284 | ||
285 | return col; | |
286 | } | |
287 | ||
205e065d MZ |
288 | static struct its_vpe *valid_vpe(struct its_node *its, struct its_vpe *vpe) |
289 | { | |
290 | if (valid_col(its->collections + vpe->col_idx)) | |
291 | return vpe; | |
292 | ||
293 | return NULL; | |
294 | } | |
295 | ||
cc2d3216 MZ |
296 | /* |
297 | * ITS command descriptors - parameters to be encoded in a command | |
298 | * block. | |
299 | */ | |
300 | struct its_cmd_desc { | |
301 | union { | |
302 | struct { | |
303 | struct its_device *dev; | |
304 | u32 event_id; | |
305 | } its_inv_cmd; | |
306 | ||
8d85dced MZ |
307 | struct { |
308 | struct its_device *dev; | |
309 | u32 event_id; | |
310 | } its_clear_cmd; | |
311 | ||
cc2d3216 MZ |
312 | struct { |
313 | struct its_device *dev; | |
314 | u32 event_id; | |
315 | } its_int_cmd; | |
316 | ||
317 | struct { | |
318 | struct its_device *dev; | |
319 | int valid; | |
320 | } its_mapd_cmd; | |
321 | ||
322 | struct { | |
323 | struct its_collection *col; | |
324 | int valid; | |
325 | } its_mapc_cmd; | |
326 | ||
327 | struct { | |
328 | struct its_device *dev; | |
329 | u32 phys_id; | |
330 | u32 event_id; | |
6a25ad3a | 331 | } its_mapti_cmd; |
cc2d3216 MZ |
332 | |
333 | struct { | |
334 | struct its_device *dev; | |
335 | struct its_collection *col; | |
591e5bec | 336 | u32 event_id; |
cc2d3216 MZ |
337 | } its_movi_cmd; |
338 | ||
339 | struct { | |
340 | struct its_device *dev; | |
341 | u32 event_id; | |
342 | } its_discard_cmd; | |
343 | ||
344 | struct { | |
345 | struct its_collection *col; | |
346 | } its_invall_cmd; | |
d011e4e6 | 347 | |
eb78192b MZ |
348 | struct { |
349 | struct its_vpe *vpe; | |
350 | } its_vinvall_cmd; | |
351 | ||
352 | struct { | |
353 | struct its_vpe *vpe; | |
354 | struct its_collection *col; | |
355 | bool valid; | |
356 | } its_vmapp_cmd; | |
357 | ||
d011e4e6 MZ |
358 | struct { |
359 | struct its_vpe *vpe; | |
360 | struct its_device *dev; | |
361 | u32 virt_id; | |
362 | u32 event_id; | |
363 | bool db_enabled; | |
364 | } its_vmapti_cmd; | |
365 | ||
366 | struct { | |
367 | struct its_vpe *vpe; | |
368 | struct its_device *dev; | |
369 | u32 event_id; | |
370 | bool db_enabled; | |
371 | } its_vmovi_cmd; | |
3171a47a MZ |
372 | |
373 | struct { | |
374 | struct its_vpe *vpe; | |
375 | struct its_collection *col; | |
376 | u16 seq_num; | |
377 | u16 its_list; | |
378 | } its_vmovp_cmd; | |
d97c97ba MZ |
379 | |
380 | struct { | |
381 | struct its_vpe *vpe; | |
382 | } its_invdb_cmd; | |
cc2d3216 MZ |
383 | }; |
384 | }; | |
385 | ||
386 | /* | |
387 | * The ITS command block, which is what the ITS actually parses. | |
388 | */ | |
389 | struct its_cmd_block { | |
2bbdfcc5 BDC |
390 | union { |
391 | u64 raw_cmd[4]; | |
392 | __le64 raw_cmd_le[4]; | |
393 | }; | |
cc2d3216 MZ |
394 | }; |
395 | ||
396 | #define ITS_CMD_QUEUE_SZ SZ_64K | |
397 | #define ITS_CMD_QUEUE_NR_ENTRIES (ITS_CMD_QUEUE_SZ / sizeof(struct its_cmd_block)) | |
398 | ||
67047f90 MZ |
399 | typedef struct its_collection *(*its_cmd_builder_t)(struct its_node *, |
400 | struct its_cmd_block *, | |
cc2d3216 MZ |
401 | struct its_cmd_desc *); |
402 | ||
67047f90 MZ |
403 | typedef struct its_vpe *(*its_cmd_vbuilder_t)(struct its_node *, |
404 | struct its_cmd_block *, | |
d011e4e6 MZ |
405 | struct its_cmd_desc *); |
406 | ||
4d36f136 MZ |
407 | static void its_mask_encode(u64 *raw_cmd, u64 val, int h, int l) |
408 | { | |
409 | u64 mask = GENMASK_ULL(h, l); | |
410 | *raw_cmd &= ~mask; | |
411 | *raw_cmd |= (val << l) & mask; | |
412 | } | |
413 | ||
cc2d3216 MZ |
414 | static void its_encode_cmd(struct its_cmd_block *cmd, u8 cmd_nr) |
415 | { | |
4d36f136 | 416 | its_mask_encode(&cmd->raw_cmd[0], cmd_nr, 7, 0); |
cc2d3216 MZ |
417 | } |
418 | ||
419 | static void its_encode_devid(struct its_cmd_block *cmd, u32 devid) | |
420 | { | |
4d36f136 | 421 | its_mask_encode(&cmd->raw_cmd[0], devid, 63, 32); |
cc2d3216 MZ |
422 | } |
423 | ||
424 | static void its_encode_event_id(struct its_cmd_block *cmd, u32 id) | |
425 | { | |
4d36f136 | 426 | its_mask_encode(&cmd->raw_cmd[1], id, 31, 0); |
cc2d3216 MZ |
427 | } |
428 | ||
429 | static void its_encode_phys_id(struct its_cmd_block *cmd, u32 phys_id) | |
430 | { | |
4d36f136 | 431 | its_mask_encode(&cmd->raw_cmd[1], phys_id, 63, 32); |
cc2d3216 MZ |
432 | } |
433 | ||
434 | static void its_encode_size(struct its_cmd_block *cmd, u8 size) | |
435 | { | |
4d36f136 | 436 | its_mask_encode(&cmd->raw_cmd[1], size, 4, 0); |
cc2d3216 MZ |
437 | } |
438 | ||
439 | static void its_encode_itt(struct its_cmd_block *cmd, u64 itt_addr) | |
440 | { | |
30ae9610 | 441 | its_mask_encode(&cmd->raw_cmd[2], itt_addr >> 8, 51, 8); |
cc2d3216 MZ |
442 | } |
443 | ||
444 | static void its_encode_valid(struct its_cmd_block *cmd, int valid) | |
445 | { | |
4d36f136 | 446 | its_mask_encode(&cmd->raw_cmd[2], !!valid, 63, 63); |
cc2d3216 MZ |
447 | } |
448 | ||
449 | static void its_encode_target(struct its_cmd_block *cmd, u64 target_addr) | |
450 | { | |
30ae9610 | 451 | its_mask_encode(&cmd->raw_cmd[2], target_addr >> 16, 51, 16); |
cc2d3216 MZ |
452 | } |
453 | ||
454 | static void its_encode_collection(struct its_cmd_block *cmd, u16 col) | |
455 | { | |
4d36f136 | 456 | its_mask_encode(&cmd->raw_cmd[2], col, 15, 0); |
cc2d3216 MZ |
457 | } |
458 | ||
d011e4e6 MZ |
459 | static void its_encode_vpeid(struct its_cmd_block *cmd, u16 vpeid) |
460 | { | |
461 | its_mask_encode(&cmd->raw_cmd[1], vpeid, 47, 32); | |
462 | } | |
463 | ||
464 | static void its_encode_virt_id(struct its_cmd_block *cmd, u32 virt_id) | |
465 | { | |
466 | its_mask_encode(&cmd->raw_cmd[2], virt_id, 31, 0); | |
467 | } | |
468 | ||
469 | static void its_encode_db_phys_id(struct its_cmd_block *cmd, u32 db_phys_id) | |
470 | { | |
471 | its_mask_encode(&cmd->raw_cmd[2], db_phys_id, 63, 32); | |
472 | } | |
473 | ||
474 | static void its_encode_db_valid(struct its_cmd_block *cmd, bool db_valid) | |
475 | { | |
476 | its_mask_encode(&cmd->raw_cmd[2], db_valid, 0, 0); | |
477 | } | |
478 | ||
3171a47a MZ |
479 | static void its_encode_seq_num(struct its_cmd_block *cmd, u16 seq_num) |
480 | { | |
481 | its_mask_encode(&cmd->raw_cmd[0], seq_num, 47, 32); | |
482 | } | |
483 | ||
484 | static void its_encode_its_list(struct its_cmd_block *cmd, u16 its_list) | |
485 | { | |
486 | its_mask_encode(&cmd->raw_cmd[1], its_list, 15, 0); | |
487 | } | |
488 | ||
eb78192b MZ |
489 | static void its_encode_vpt_addr(struct its_cmd_block *cmd, u64 vpt_pa) |
490 | { | |
30ae9610 | 491 | its_mask_encode(&cmd->raw_cmd[3], vpt_pa >> 16, 51, 16); |
eb78192b MZ |
492 | } |
493 | ||
494 | static void its_encode_vpt_size(struct its_cmd_block *cmd, u8 vpt_size) | |
495 | { | |
496 | its_mask_encode(&cmd->raw_cmd[3], vpt_size, 4, 0); | |
497 | } | |
498 | ||
64edfaa9 MZ |
499 | static void its_encode_vconf_addr(struct its_cmd_block *cmd, u64 vconf_pa) |
500 | { | |
501 | its_mask_encode(&cmd->raw_cmd[0], vconf_pa >> 16, 51, 16); | |
502 | } | |
503 | ||
504 | static void its_encode_alloc(struct its_cmd_block *cmd, bool alloc) | |
505 | { | |
506 | its_mask_encode(&cmd->raw_cmd[0], alloc, 8, 8); | |
507 | } | |
508 | ||
509 | static void its_encode_ptz(struct its_cmd_block *cmd, bool ptz) | |
510 | { | |
511 | its_mask_encode(&cmd->raw_cmd[0], ptz, 9, 9); | |
512 | } | |
513 | ||
514 | static void its_encode_vmapp_default_db(struct its_cmd_block *cmd, | |
515 | u32 vpe_db_lpi) | |
516 | { | |
517 | its_mask_encode(&cmd->raw_cmd[1], vpe_db_lpi, 31, 0); | |
518 | } | |
519 | ||
dd3f050a MZ |
520 | static void its_encode_vmovp_default_db(struct its_cmd_block *cmd, |
521 | u32 vpe_db_lpi) | |
522 | { | |
523 | its_mask_encode(&cmd->raw_cmd[3], vpe_db_lpi, 31, 0); | |
524 | } | |
525 | ||
526 | static void its_encode_db(struct its_cmd_block *cmd, bool db) | |
527 | { | |
528 | its_mask_encode(&cmd->raw_cmd[2], db, 63, 63); | |
529 | } | |
530 | ||
cc2d3216 MZ |
531 | static inline void its_fixup_cmd(struct its_cmd_block *cmd) |
532 | { | |
533 | /* Let's fixup BE commands */ | |
2bbdfcc5 BDC |
534 | cmd->raw_cmd_le[0] = cpu_to_le64(cmd->raw_cmd[0]); |
535 | cmd->raw_cmd_le[1] = cpu_to_le64(cmd->raw_cmd[1]); | |
536 | cmd->raw_cmd_le[2] = cpu_to_le64(cmd->raw_cmd[2]); | |
537 | cmd->raw_cmd_le[3] = cpu_to_le64(cmd->raw_cmd[3]); | |
cc2d3216 MZ |
538 | } |
539 | ||
67047f90 MZ |
540 | static struct its_collection *its_build_mapd_cmd(struct its_node *its, |
541 | struct its_cmd_block *cmd, | |
cc2d3216 MZ |
542 | struct its_cmd_desc *desc) |
543 | { | |
544 | unsigned long itt_addr; | |
c8481267 | 545 | u8 size = ilog2(desc->its_mapd_cmd.dev->nr_ites); |
cc2d3216 MZ |
546 | |
547 | itt_addr = virt_to_phys(desc->its_mapd_cmd.dev->itt); | |
548 | itt_addr = ALIGN(itt_addr, ITS_ITT_ALIGN); | |
549 | ||
550 | its_encode_cmd(cmd, GITS_CMD_MAPD); | |
551 | its_encode_devid(cmd, desc->its_mapd_cmd.dev->device_id); | |
552 | its_encode_size(cmd, size - 1); | |
553 | its_encode_itt(cmd, itt_addr); | |
554 | its_encode_valid(cmd, desc->its_mapd_cmd.valid); | |
555 | ||
556 | its_fixup_cmd(cmd); | |
557 | ||
591e5bec | 558 | return NULL; |
cc2d3216 MZ |
559 | } |
560 | ||
67047f90 MZ |
561 | static struct its_collection *its_build_mapc_cmd(struct its_node *its, |
562 | struct its_cmd_block *cmd, | |
cc2d3216 MZ |
563 | struct its_cmd_desc *desc) |
564 | { | |
565 | its_encode_cmd(cmd, GITS_CMD_MAPC); | |
566 | its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id); | |
567 | its_encode_target(cmd, desc->its_mapc_cmd.col->target_address); | |
568 | its_encode_valid(cmd, desc->its_mapc_cmd.valid); | |
569 | ||
570 | its_fixup_cmd(cmd); | |
571 | ||
572 | return desc->its_mapc_cmd.col; | |
573 | } | |
574 | ||
67047f90 MZ |
575 | static struct its_collection *its_build_mapti_cmd(struct its_node *its, |
576 | struct its_cmd_block *cmd, | |
cc2d3216 MZ |
577 | struct its_cmd_desc *desc) |
578 | { | |
591e5bec MZ |
579 | struct its_collection *col; |
580 | ||
6a25ad3a MZ |
581 | col = dev_event_to_col(desc->its_mapti_cmd.dev, |
582 | desc->its_mapti_cmd.event_id); | |
591e5bec | 583 | |
6a25ad3a MZ |
584 | its_encode_cmd(cmd, GITS_CMD_MAPTI); |
585 | its_encode_devid(cmd, desc->its_mapti_cmd.dev->device_id); | |
586 | its_encode_event_id(cmd, desc->its_mapti_cmd.event_id); | |
587 | its_encode_phys_id(cmd, desc->its_mapti_cmd.phys_id); | |
591e5bec | 588 | its_encode_collection(cmd, col->col_id); |
cc2d3216 MZ |
589 | |
590 | its_fixup_cmd(cmd); | |
591 | ||
83559b47 | 592 | return valid_col(col); |
cc2d3216 MZ |
593 | } |
594 | ||
67047f90 MZ |
595 | static struct its_collection *its_build_movi_cmd(struct its_node *its, |
596 | struct its_cmd_block *cmd, | |
cc2d3216 MZ |
597 | struct its_cmd_desc *desc) |
598 | { | |
591e5bec MZ |
599 | struct its_collection *col; |
600 | ||
601 | col = dev_event_to_col(desc->its_movi_cmd.dev, | |
602 | desc->its_movi_cmd.event_id); | |
603 | ||
cc2d3216 MZ |
604 | its_encode_cmd(cmd, GITS_CMD_MOVI); |
605 | its_encode_devid(cmd, desc->its_movi_cmd.dev->device_id); | |
591e5bec | 606 | its_encode_event_id(cmd, desc->its_movi_cmd.event_id); |
cc2d3216 MZ |
607 | its_encode_collection(cmd, desc->its_movi_cmd.col->col_id); |
608 | ||
609 | its_fixup_cmd(cmd); | |
610 | ||
83559b47 | 611 | return valid_col(col); |
cc2d3216 MZ |
612 | } |
613 | ||
67047f90 MZ |
614 | static struct its_collection *its_build_discard_cmd(struct its_node *its, |
615 | struct its_cmd_block *cmd, | |
cc2d3216 MZ |
616 | struct its_cmd_desc *desc) |
617 | { | |
591e5bec MZ |
618 | struct its_collection *col; |
619 | ||
620 | col = dev_event_to_col(desc->its_discard_cmd.dev, | |
621 | desc->its_discard_cmd.event_id); | |
622 | ||
cc2d3216 MZ |
623 | its_encode_cmd(cmd, GITS_CMD_DISCARD); |
624 | its_encode_devid(cmd, desc->its_discard_cmd.dev->device_id); | |
625 | its_encode_event_id(cmd, desc->its_discard_cmd.event_id); | |
626 | ||
627 | its_fixup_cmd(cmd); | |
628 | ||
83559b47 | 629 | return valid_col(col); |
cc2d3216 MZ |
630 | } |
631 | ||
67047f90 MZ |
632 | static struct its_collection *its_build_inv_cmd(struct its_node *its, |
633 | struct its_cmd_block *cmd, | |
cc2d3216 MZ |
634 | struct its_cmd_desc *desc) |
635 | { | |
591e5bec MZ |
636 | struct its_collection *col; |
637 | ||
638 | col = dev_event_to_col(desc->its_inv_cmd.dev, | |
639 | desc->its_inv_cmd.event_id); | |
640 | ||
cc2d3216 MZ |
641 | its_encode_cmd(cmd, GITS_CMD_INV); |
642 | its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id); | |
643 | its_encode_event_id(cmd, desc->its_inv_cmd.event_id); | |
644 | ||
645 | its_fixup_cmd(cmd); | |
646 | ||
83559b47 | 647 | return valid_col(col); |
cc2d3216 MZ |
648 | } |
649 | ||
67047f90 MZ |
650 | static struct its_collection *its_build_int_cmd(struct its_node *its, |
651 | struct its_cmd_block *cmd, | |
8d85dced MZ |
652 | struct its_cmd_desc *desc) |
653 | { | |
654 | struct its_collection *col; | |
655 | ||
656 | col = dev_event_to_col(desc->its_int_cmd.dev, | |
657 | desc->its_int_cmd.event_id); | |
658 | ||
659 | its_encode_cmd(cmd, GITS_CMD_INT); | |
660 | its_encode_devid(cmd, desc->its_int_cmd.dev->device_id); | |
661 | its_encode_event_id(cmd, desc->its_int_cmd.event_id); | |
662 | ||
663 | its_fixup_cmd(cmd); | |
664 | ||
83559b47 | 665 | return valid_col(col); |
8d85dced MZ |
666 | } |
667 | ||
67047f90 MZ |
668 | static struct its_collection *its_build_clear_cmd(struct its_node *its, |
669 | struct its_cmd_block *cmd, | |
8d85dced MZ |
670 | struct its_cmd_desc *desc) |
671 | { | |
672 | struct its_collection *col; | |
673 | ||
674 | col = dev_event_to_col(desc->its_clear_cmd.dev, | |
675 | desc->its_clear_cmd.event_id); | |
676 | ||
677 | its_encode_cmd(cmd, GITS_CMD_CLEAR); | |
678 | its_encode_devid(cmd, desc->its_clear_cmd.dev->device_id); | |
679 | its_encode_event_id(cmd, desc->its_clear_cmd.event_id); | |
680 | ||
681 | its_fixup_cmd(cmd); | |
682 | ||
83559b47 | 683 | return valid_col(col); |
8d85dced MZ |
684 | } |
685 | ||
67047f90 MZ |
686 | static struct its_collection *its_build_invall_cmd(struct its_node *its, |
687 | struct its_cmd_block *cmd, | |
cc2d3216 MZ |
688 | struct its_cmd_desc *desc) |
689 | { | |
690 | its_encode_cmd(cmd, GITS_CMD_INVALL); | |
10794522 | 691 | its_encode_collection(cmd, desc->its_invall_cmd.col->col_id); |
cc2d3216 MZ |
692 | |
693 | its_fixup_cmd(cmd); | |
694 | ||
695 | return NULL; | |
696 | } | |
697 | ||
67047f90 MZ |
698 | static struct its_vpe *its_build_vinvall_cmd(struct its_node *its, |
699 | struct its_cmd_block *cmd, | |
eb78192b MZ |
700 | struct its_cmd_desc *desc) |
701 | { | |
702 | its_encode_cmd(cmd, GITS_CMD_VINVALL); | |
703 | its_encode_vpeid(cmd, desc->its_vinvall_cmd.vpe->vpe_id); | |
704 | ||
705 | its_fixup_cmd(cmd); | |
706 | ||
205e065d | 707 | return valid_vpe(its, desc->its_vinvall_cmd.vpe); |
eb78192b MZ |
708 | } |
709 | ||
67047f90 MZ |
710 | static struct its_vpe *its_build_vmapp_cmd(struct its_node *its, |
711 | struct its_cmd_block *cmd, | |
eb78192b MZ |
712 | struct its_cmd_desc *desc) |
713 | { | |
64edfaa9 | 714 | unsigned long vpt_addr, vconf_addr; |
5c9a882e | 715 | u64 target; |
64edfaa9 | 716 | bool alloc; |
eb78192b MZ |
717 | |
718 | its_encode_cmd(cmd, GITS_CMD_VMAPP); | |
719 | its_encode_vpeid(cmd, desc->its_vmapp_cmd.vpe->vpe_id); | |
720 | its_encode_valid(cmd, desc->its_vmapp_cmd.valid); | |
64edfaa9 MZ |
721 | |
722 | if (!desc->its_vmapp_cmd.valid) { | |
723 | if (is_v4_1(its)) { | |
724 | alloc = !atomic_dec_return(&desc->its_vmapp_cmd.vpe->vmapp_count); | |
725 | its_encode_alloc(cmd, alloc); | |
726 | } | |
727 | ||
728 | goto out; | |
729 | } | |
730 | ||
731 | vpt_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->vpt_page)); | |
732 | target = desc->its_vmapp_cmd.col->target_address + its->vlpi_redist_offset; | |
733 | ||
5c9a882e | 734 | its_encode_target(cmd, target); |
eb78192b MZ |
735 | its_encode_vpt_addr(cmd, vpt_addr); |
736 | its_encode_vpt_size(cmd, LPI_NRBITS - 1); | |
737 | ||
64edfaa9 MZ |
738 | if (!is_v4_1(its)) |
739 | goto out; | |
740 | ||
741 | vconf_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->its_vm->vprop_page)); | |
742 | ||
743 | alloc = !atomic_fetch_inc(&desc->its_vmapp_cmd.vpe->vmapp_count); | |
744 | ||
745 | its_encode_alloc(cmd, alloc); | |
746 | ||
747 | /* We can only signal PTZ when alloc==1. Why do we have two bits? */ | |
748 | its_encode_ptz(cmd, alloc); | |
749 | its_encode_vconf_addr(cmd, vconf_addr); | |
750 | its_encode_vmapp_default_db(cmd, desc->its_vmapp_cmd.vpe->vpe_db_lpi); | |
751 | ||
752 | out: | |
eb78192b MZ |
753 | its_fixup_cmd(cmd); |
754 | ||
205e065d | 755 | return valid_vpe(its, desc->its_vmapp_cmd.vpe); |
eb78192b MZ |
756 | } |
757 | ||
67047f90 MZ |
758 | static struct its_vpe *its_build_vmapti_cmd(struct its_node *its, |
759 | struct its_cmd_block *cmd, | |
d011e4e6 MZ |
760 | struct its_cmd_desc *desc) |
761 | { | |
762 | u32 db; | |
763 | ||
3858d4df | 764 | if (!is_v4_1(its) && desc->its_vmapti_cmd.db_enabled) |
d011e4e6 MZ |
765 | db = desc->its_vmapti_cmd.vpe->vpe_db_lpi; |
766 | else | |
767 | db = 1023; | |
768 | ||
769 | its_encode_cmd(cmd, GITS_CMD_VMAPTI); | |
770 | its_encode_devid(cmd, desc->its_vmapti_cmd.dev->device_id); | |
771 | its_encode_vpeid(cmd, desc->its_vmapti_cmd.vpe->vpe_id); | |
772 | its_encode_event_id(cmd, desc->its_vmapti_cmd.event_id); | |
773 | its_encode_db_phys_id(cmd, db); | |
774 | its_encode_virt_id(cmd, desc->its_vmapti_cmd.virt_id); | |
775 | ||
776 | its_fixup_cmd(cmd); | |
777 | ||
205e065d | 778 | return valid_vpe(its, desc->its_vmapti_cmd.vpe); |
d011e4e6 MZ |
779 | } |
780 | ||
67047f90 MZ |
781 | static struct its_vpe *its_build_vmovi_cmd(struct its_node *its, |
782 | struct its_cmd_block *cmd, | |
d011e4e6 MZ |
783 | struct its_cmd_desc *desc) |
784 | { | |
785 | u32 db; | |
786 | ||
3858d4df | 787 | if (!is_v4_1(its) && desc->its_vmovi_cmd.db_enabled) |
d011e4e6 MZ |
788 | db = desc->its_vmovi_cmd.vpe->vpe_db_lpi; |
789 | else | |
790 | db = 1023; | |
791 | ||
792 | its_encode_cmd(cmd, GITS_CMD_VMOVI); | |
793 | its_encode_devid(cmd, desc->its_vmovi_cmd.dev->device_id); | |
794 | its_encode_vpeid(cmd, desc->its_vmovi_cmd.vpe->vpe_id); | |
795 | its_encode_event_id(cmd, desc->its_vmovi_cmd.event_id); | |
796 | its_encode_db_phys_id(cmd, db); | |
797 | its_encode_db_valid(cmd, true); | |
798 | ||
799 | its_fixup_cmd(cmd); | |
800 | ||
205e065d | 801 | return valid_vpe(its, desc->its_vmovi_cmd.vpe); |
d011e4e6 MZ |
802 | } |
803 | ||
67047f90 MZ |
804 | static struct its_vpe *its_build_vmovp_cmd(struct its_node *its, |
805 | struct its_cmd_block *cmd, | |
3171a47a MZ |
806 | struct its_cmd_desc *desc) |
807 | { | |
5c9a882e MZ |
808 | u64 target; |
809 | ||
810 | target = desc->its_vmovp_cmd.col->target_address + its->vlpi_redist_offset; | |
3171a47a MZ |
811 | its_encode_cmd(cmd, GITS_CMD_VMOVP); |
812 | its_encode_seq_num(cmd, desc->its_vmovp_cmd.seq_num); | |
813 | its_encode_its_list(cmd, desc->its_vmovp_cmd.its_list); | |
814 | its_encode_vpeid(cmd, desc->its_vmovp_cmd.vpe->vpe_id); | |
5c9a882e | 815 | its_encode_target(cmd, target); |
3171a47a | 816 | |
dd3f050a MZ |
817 | if (is_v4_1(its)) { |
818 | its_encode_db(cmd, true); | |
819 | its_encode_vmovp_default_db(cmd, desc->its_vmovp_cmd.vpe->vpe_db_lpi); | |
820 | } | |
821 | ||
3171a47a MZ |
822 | its_fixup_cmd(cmd); |
823 | ||
205e065d | 824 | return valid_vpe(its, desc->its_vmovp_cmd.vpe); |
3171a47a MZ |
825 | } |
826 | ||
28614696 MZ |
827 | static struct its_vpe *its_build_vinv_cmd(struct its_node *its, |
828 | struct its_cmd_block *cmd, | |
829 | struct its_cmd_desc *desc) | |
830 | { | |
831 | struct its_vlpi_map *map; | |
832 | ||
833 | map = dev_event_to_vlpi_map(desc->its_inv_cmd.dev, | |
834 | desc->its_inv_cmd.event_id); | |
835 | ||
836 | its_encode_cmd(cmd, GITS_CMD_INV); | |
837 | its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id); | |
838 | its_encode_event_id(cmd, desc->its_inv_cmd.event_id); | |
839 | ||
840 | its_fixup_cmd(cmd); | |
841 | ||
842 | return valid_vpe(its, map->vpe); | |
843 | } | |
844 | ||
ed0e4aa9 MZ |
845 | static struct its_vpe *its_build_vint_cmd(struct its_node *its, |
846 | struct its_cmd_block *cmd, | |
847 | struct its_cmd_desc *desc) | |
848 | { | |
849 | struct its_vlpi_map *map; | |
850 | ||
851 | map = dev_event_to_vlpi_map(desc->its_int_cmd.dev, | |
852 | desc->its_int_cmd.event_id); | |
853 | ||
854 | its_encode_cmd(cmd, GITS_CMD_INT); | |
855 | its_encode_devid(cmd, desc->its_int_cmd.dev->device_id); | |
856 | its_encode_event_id(cmd, desc->its_int_cmd.event_id); | |
857 | ||
858 | its_fixup_cmd(cmd); | |
859 | ||
860 | return valid_vpe(its, map->vpe); | |
861 | } | |
862 | ||
863 | static struct its_vpe *its_build_vclear_cmd(struct its_node *its, | |
864 | struct its_cmd_block *cmd, | |
865 | struct its_cmd_desc *desc) | |
866 | { | |
867 | struct its_vlpi_map *map; | |
868 | ||
869 | map = dev_event_to_vlpi_map(desc->its_clear_cmd.dev, | |
870 | desc->its_clear_cmd.event_id); | |
871 | ||
872 | its_encode_cmd(cmd, GITS_CMD_CLEAR); | |
873 | its_encode_devid(cmd, desc->its_clear_cmd.dev->device_id); | |
874 | its_encode_event_id(cmd, desc->its_clear_cmd.event_id); | |
875 | ||
876 | its_fixup_cmd(cmd); | |
877 | ||
878 | return valid_vpe(its, map->vpe); | |
879 | } | |
880 | ||
d97c97ba MZ |
881 | static struct its_vpe *its_build_invdb_cmd(struct its_node *its, |
882 | struct its_cmd_block *cmd, | |
883 | struct its_cmd_desc *desc) | |
884 | { | |
885 | if (WARN_ON(!is_v4_1(its))) | |
886 | return NULL; | |
887 | ||
888 | its_encode_cmd(cmd, GITS_CMD_INVDB); | |
889 | its_encode_vpeid(cmd, desc->its_invdb_cmd.vpe->vpe_id); | |
890 | ||
891 | its_fixup_cmd(cmd); | |
892 | ||
893 | return valid_vpe(its, desc->its_invdb_cmd.vpe); | |
894 | } | |
895 | ||
cc2d3216 MZ |
896 | static u64 its_cmd_ptr_to_offset(struct its_node *its, |
897 | struct its_cmd_block *ptr) | |
898 | { | |
899 | return (ptr - its->cmd_base) * sizeof(*ptr); | |
900 | } | |
901 | ||
902 | static int its_queue_full(struct its_node *its) | |
903 | { | |
904 | int widx; | |
905 | int ridx; | |
906 | ||
907 | widx = its->cmd_write - its->cmd_base; | |
908 | ridx = readl_relaxed(its->base + GITS_CREADR) / sizeof(struct its_cmd_block); | |
909 | ||
910 | /* This is incredibly unlikely to happen, unless the ITS locks up. */ | |
911 | if (((widx + 1) % ITS_CMD_QUEUE_NR_ENTRIES) == ridx) | |
912 | return 1; | |
913 | ||
914 | return 0; | |
915 | } | |
916 | ||
917 | static struct its_cmd_block *its_allocate_entry(struct its_node *its) | |
918 | { | |
919 | struct its_cmd_block *cmd; | |
920 | u32 count = 1000000; /* 1s! */ | |
921 | ||
922 | while (its_queue_full(its)) { | |
923 | count--; | |
924 | if (!count) { | |
925 | pr_err_ratelimited("ITS queue not draining\n"); | |
926 | return NULL; | |
927 | } | |
928 | cpu_relax(); | |
929 | udelay(1); | |
930 | } | |
931 | ||
932 | cmd = its->cmd_write++; | |
933 | ||
934 | /* Handle queue wrapping */ | |
935 | if (its->cmd_write == (its->cmd_base + ITS_CMD_QUEUE_NR_ENTRIES)) | |
936 | its->cmd_write = its->cmd_base; | |
937 | ||
34d677a9 MZ |
938 | /* Clear command */ |
939 | cmd->raw_cmd[0] = 0; | |
940 | cmd->raw_cmd[1] = 0; | |
941 | cmd->raw_cmd[2] = 0; | |
942 | cmd->raw_cmd[3] = 0; | |
943 | ||
cc2d3216 MZ |
944 | return cmd; |
945 | } | |
946 | ||
947 | static struct its_cmd_block *its_post_commands(struct its_node *its) | |
948 | { | |
949 | u64 wr = its_cmd_ptr_to_offset(its, its->cmd_write); | |
950 | ||
951 | writel_relaxed(wr, its->base + GITS_CWRITER); | |
952 | ||
953 | return its->cmd_write; | |
954 | } | |
955 | ||
956 | static void its_flush_cmd(struct its_node *its, struct its_cmd_block *cmd) | |
957 | { | |
958 | /* | |
959 | * Make sure the commands written to memory are observable by | |
960 | * the ITS. | |
961 | */ | |
962 | if (its->flags & ITS_FLAGS_CMDQ_NEEDS_FLUSHING) | |
328191c0 | 963 | gic_flush_dcache_to_poc(cmd, sizeof(*cmd)); |
cc2d3216 MZ |
964 | else |
965 | dsb(ishst); | |
966 | } | |
967 | ||
a19b462f | 968 | static int its_wait_for_range_completion(struct its_node *its, |
a050fa54 | 969 | u64 prev_idx, |
a19b462f | 970 | struct its_cmd_block *to) |
cc2d3216 | 971 | { |
a050fa54 | 972 | u64 rd_idx, to_idx, linear_idx; |
cc2d3216 MZ |
973 | u32 count = 1000000; /* 1s! */ |
974 | ||
a050fa54 | 975 | /* Linearize to_idx if the command set has wrapped around */ |
cc2d3216 | 976 | to_idx = its_cmd_ptr_to_offset(its, to); |
a050fa54 HG |
977 | if (to_idx < prev_idx) |
978 | to_idx += ITS_CMD_QUEUE_SZ; | |
979 | ||
980 | linear_idx = prev_idx; | |
cc2d3216 MZ |
981 | |
982 | while (1) { | |
a050fa54 HG |
983 | s64 delta; |
984 | ||
cc2d3216 | 985 | rd_idx = readl_relaxed(its->base + GITS_CREADR); |
9bdd8b1c | 986 | |
a050fa54 HG |
987 | /* |
988 | * Compute the read pointer progress, taking the | |
989 | * potential wrap-around into account. | |
990 | */ | |
991 | delta = rd_idx - prev_idx; | |
992 | if (rd_idx < prev_idx) | |
993 | delta += ITS_CMD_QUEUE_SZ; | |
9bdd8b1c | 994 | |
a050fa54 HG |
995 | linear_idx += delta; |
996 | if (linear_idx >= to_idx) | |
cc2d3216 MZ |
997 | break; |
998 | ||
999 | count--; | |
1000 | if (!count) { | |
a050fa54 HG |
1001 | pr_err_ratelimited("ITS queue timeout (%llu %llu)\n", |
1002 | to_idx, linear_idx); | |
a19b462f | 1003 | return -1; |
cc2d3216 | 1004 | } |
a050fa54 | 1005 | prev_idx = rd_idx; |
cc2d3216 MZ |
1006 | cpu_relax(); |
1007 | udelay(1); | |
1008 | } | |
a19b462f MZ |
1009 | |
1010 | return 0; | |
cc2d3216 MZ |
1011 | } |
1012 | ||
e4f9094b MZ |
1013 | /* Warning, macro hell follows */ |
1014 | #define BUILD_SINGLE_CMD_FUNC(name, buildtype, synctype, buildfn) \ | |
1015 | void name(struct its_node *its, \ | |
1016 | buildtype builder, \ | |
1017 | struct its_cmd_desc *desc) \ | |
1018 | { \ | |
1019 | struct its_cmd_block *cmd, *sync_cmd, *next_cmd; \ | |
1020 | synctype *sync_obj; \ | |
1021 | unsigned long flags; \ | |
a050fa54 | 1022 | u64 rd_idx; \ |
e4f9094b MZ |
1023 | \ |
1024 | raw_spin_lock_irqsave(&its->lock, flags); \ | |
1025 | \ | |
1026 | cmd = its_allocate_entry(its); \ | |
1027 | if (!cmd) { /* We're soooooo screewed... */ \ | |
1028 | raw_spin_unlock_irqrestore(&its->lock, flags); \ | |
1029 | return; \ | |
1030 | } \ | |
67047f90 | 1031 | sync_obj = builder(its, cmd, desc); \ |
e4f9094b MZ |
1032 | its_flush_cmd(its, cmd); \ |
1033 | \ | |
1034 | if (sync_obj) { \ | |
1035 | sync_cmd = its_allocate_entry(its); \ | |
1036 | if (!sync_cmd) \ | |
1037 | goto post; \ | |
1038 | \ | |
67047f90 | 1039 | buildfn(its, sync_cmd, sync_obj); \ |
e4f9094b MZ |
1040 | its_flush_cmd(its, sync_cmd); \ |
1041 | } \ | |
1042 | \ | |
1043 | post: \ | |
a050fa54 | 1044 | rd_idx = readl_relaxed(its->base + GITS_CREADR); \ |
e4f9094b MZ |
1045 | next_cmd = its_post_commands(its); \ |
1046 | raw_spin_unlock_irqrestore(&its->lock, flags); \ | |
1047 | \ | |
a050fa54 | 1048 | if (its_wait_for_range_completion(its, rd_idx, next_cmd)) \ |
a19b462f | 1049 | pr_err_ratelimited("ITS cmd %ps failed\n", builder); \ |
e4f9094b | 1050 | } |
cc2d3216 | 1051 | |
67047f90 MZ |
1052 | static void its_build_sync_cmd(struct its_node *its, |
1053 | struct its_cmd_block *sync_cmd, | |
e4f9094b MZ |
1054 | struct its_collection *sync_col) |
1055 | { | |
1056 | its_encode_cmd(sync_cmd, GITS_CMD_SYNC); | |
1057 | its_encode_target(sync_cmd, sync_col->target_address); | |
cc2d3216 | 1058 | |
e4f9094b | 1059 | its_fixup_cmd(sync_cmd); |
cc2d3216 MZ |
1060 | } |
1061 | ||
e4f9094b MZ |
1062 | static BUILD_SINGLE_CMD_FUNC(its_send_single_command, its_cmd_builder_t, |
1063 | struct its_collection, its_build_sync_cmd) | |
1064 | ||
67047f90 MZ |
1065 | static void its_build_vsync_cmd(struct its_node *its, |
1066 | struct its_cmd_block *sync_cmd, | |
d011e4e6 MZ |
1067 | struct its_vpe *sync_vpe) |
1068 | { | |
1069 | its_encode_cmd(sync_cmd, GITS_CMD_VSYNC); | |
1070 | its_encode_vpeid(sync_cmd, sync_vpe->vpe_id); | |
1071 | ||
1072 | its_fixup_cmd(sync_cmd); | |
1073 | } | |
1074 | ||
1075 | static BUILD_SINGLE_CMD_FUNC(its_send_single_vcommand, its_cmd_vbuilder_t, | |
1076 | struct its_vpe, its_build_vsync_cmd) | |
1077 | ||
8d85dced | 1078 | static void its_send_int(struct its_device *dev, u32 event_id) |
cc2d3216 | 1079 | { |
8d85dced | 1080 | struct its_cmd_desc desc; |
cc2d3216 | 1081 | |
8d85dced MZ |
1082 | desc.its_int_cmd.dev = dev; |
1083 | desc.its_int_cmd.event_id = event_id; | |
cc2d3216 | 1084 | |
8d85dced MZ |
1085 | its_send_single_command(dev->its, its_build_int_cmd, &desc); |
1086 | } | |
cc2d3216 | 1087 | |
8d85dced MZ |
1088 | static void its_send_clear(struct its_device *dev, u32 event_id) |
1089 | { | |
1090 | struct its_cmd_desc desc; | |
cc2d3216 | 1091 | |
8d85dced MZ |
1092 | desc.its_clear_cmd.dev = dev; |
1093 | desc.its_clear_cmd.event_id = event_id; | |
cc2d3216 | 1094 | |
8d85dced | 1095 | its_send_single_command(dev->its, its_build_clear_cmd, &desc); |
cc2d3216 MZ |
1096 | } |
1097 | ||
1098 | static void its_send_inv(struct its_device *dev, u32 event_id) | |
1099 | { | |
1100 | struct its_cmd_desc desc; | |
1101 | ||
1102 | desc.its_inv_cmd.dev = dev; | |
1103 | desc.its_inv_cmd.event_id = event_id; | |
1104 | ||
1105 | its_send_single_command(dev->its, its_build_inv_cmd, &desc); | |
1106 | } | |
1107 | ||
1108 | static void its_send_mapd(struct its_device *dev, int valid) | |
1109 | { | |
1110 | struct its_cmd_desc desc; | |
1111 | ||
1112 | desc.its_mapd_cmd.dev = dev; | |
1113 | desc.its_mapd_cmd.valid = !!valid; | |
1114 | ||
1115 | its_send_single_command(dev->its, its_build_mapd_cmd, &desc); | |
1116 | } | |
1117 | ||
1118 | static void its_send_mapc(struct its_node *its, struct its_collection *col, | |
1119 | int valid) | |
1120 | { | |
1121 | struct its_cmd_desc desc; | |
1122 | ||
1123 | desc.its_mapc_cmd.col = col; | |
1124 | desc.its_mapc_cmd.valid = !!valid; | |
1125 | ||
1126 | its_send_single_command(its, its_build_mapc_cmd, &desc); | |
1127 | } | |
1128 | ||
6a25ad3a | 1129 | static void its_send_mapti(struct its_device *dev, u32 irq_id, u32 id) |
cc2d3216 MZ |
1130 | { |
1131 | struct its_cmd_desc desc; | |
1132 | ||
6a25ad3a MZ |
1133 | desc.its_mapti_cmd.dev = dev; |
1134 | desc.its_mapti_cmd.phys_id = irq_id; | |
1135 | desc.its_mapti_cmd.event_id = id; | |
cc2d3216 | 1136 | |
6a25ad3a | 1137 | its_send_single_command(dev->its, its_build_mapti_cmd, &desc); |
cc2d3216 MZ |
1138 | } |
1139 | ||
1140 | static void its_send_movi(struct its_device *dev, | |
1141 | struct its_collection *col, u32 id) | |
1142 | { | |
1143 | struct its_cmd_desc desc; | |
1144 | ||
1145 | desc.its_movi_cmd.dev = dev; | |
1146 | desc.its_movi_cmd.col = col; | |
591e5bec | 1147 | desc.its_movi_cmd.event_id = id; |
cc2d3216 MZ |
1148 | |
1149 | its_send_single_command(dev->its, its_build_movi_cmd, &desc); | |
1150 | } | |
1151 | ||
1152 | static void its_send_discard(struct its_device *dev, u32 id) | |
1153 | { | |
1154 | struct its_cmd_desc desc; | |
1155 | ||
1156 | desc.its_discard_cmd.dev = dev; | |
1157 | desc.its_discard_cmd.event_id = id; | |
1158 | ||
1159 | its_send_single_command(dev->its, its_build_discard_cmd, &desc); | |
1160 | } | |
1161 | ||
1162 | static void its_send_invall(struct its_node *its, struct its_collection *col) | |
1163 | { | |
1164 | struct its_cmd_desc desc; | |
1165 | ||
1166 | desc.its_invall_cmd.col = col; | |
1167 | ||
1168 | its_send_single_command(its, its_build_invall_cmd, &desc); | |
1169 | } | |
c48ed51c | 1170 | |
d011e4e6 MZ |
1171 | static void its_send_vmapti(struct its_device *dev, u32 id) |
1172 | { | |
c1d4d5cd | 1173 | struct its_vlpi_map *map = dev_event_to_vlpi_map(dev, id); |
d011e4e6 MZ |
1174 | struct its_cmd_desc desc; |
1175 | ||
1176 | desc.its_vmapti_cmd.vpe = map->vpe; | |
1177 | desc.its_vmapti_cmd.dev = dev; | |
1178 | desc.its_vmapti_cmd.virt_id = map->vintid; | |
1179 | desc.its_vmapti_cmd.event_id = id; | |
1180 | desc.its_vmapti_cmd.db_enabled = map->db_enabled; | |
1181 | ||
1182 | its_send_single_vcommand(dev->its, its_build_vmapti_cmd, &desc); | |
1183 | } | |
1184 | ||
1185 | static void its_send_vmovi(struct its_device *dev, u32 id) | |
1186 | { | |
c1d4d5cd | 1187 | struct its_vlpi_map *map = dev_event_to_vlpi_map(dev, id); |
d011e4e6 MZ |
1188 | struct its_cmd_desc desc; |
1189 | ||
1190 | desc.its_vmovi_cmd.vpe = map->vpe; | |
1191 | desc.its_vmovi_cmd.dev = dev; | |
1192 | desc.its_vmovi_cmd.event_id = id; | |
1193 | desc.its_vmovi_cmd.db_enabled = map->db_enabled; | |
1194 | ||
1195 | its_send_single_vcommand(dev->its, its_build_vmovi_cmd, &desc); | |
1196 | } | |
1197 | ||
75fd951b MZ |
1198 | static void its_send_vmapp(struct its_node *its, |
1199 | struct its_vpe *vpe, bool valid) | |
eb78192b MZ |
1200 | { |
1201 | struct its_cmd_desc desc; | |
eb78192b MZ |
1202 | |
1203 | desc.its_vmapp_cmd.vpe = vpe; | |
1204 | desc.its_vmapp_cmd.valid = valid; | |
75fd951b | 1205 | desc.its_vmapp_cmd.col = &its->collections[vpe->col_idx]; |
eb78192b | 1206 | |
75fd951b | 1207 | its_send_single_vcommand(its, its_build_vmapp_cmd, &desc); |
eb78192b MZ |
1208 | } |
1209 | ||
3171a47a MZ |
1210 | static void its_send_vmovp(struct its_vpe *vpe) |
1211 | { | |
84243125 | 1212 | struct its_cmd_desc desc = {}; |
3171a47a MZ |
1213 | struct its_node *its; |
1214 | unsigned long flags; | |
1215 | int col_id = vpe->col_idx; | |
1216 | ||
1217 | desc.its_vmovp_cmd.vpe = vpe; | |
3171a47a MZ |
1218 | |
1219 | if (!its_list_map) { | |
1220 | its = list_first_entry(&its_nodes, struct its_node, entry); | |
3171a47a MZ |
1221 | desc.its_vmovp_cmd.col = &its->collections[col_id]; |
1222 | its_send_single_vcommand(its, its_build_vmovp_cmd, &desc); | |
1223 | return; | |
1224 | } | |
1225 | ||
1226 | /* | |
1227 | * Yet another marvel of the architecture. If using the | |
1228 | * its_list "feature", we need to make sure that all ITSs | |
1229 | * receive all VMOVP commands in the same order. The only way | |
1230 | * to guarantee this is to make vmovp a serialization point. | |
1231 | * | |
1232 | * Wall <-- Head. | |
1233 | */ | |
1234 | raw_spin_lock_irqsave(&vmovp_lock, flags); | |
1235 | ||
1236 | desc.its_vmovp_cmd.seq_num = vmovp_seq_num++; | |
84243125 | 1237 | desc.its_vmovp_cmd.its_list = get_its_list(vpe->its_vm); |
3171a47a MZ |
1238 | |
1239 | /* Emit VMOVPs */ | |
1240 | list_for_each_entry(its, &its_nodes, entry) { | |
0dd57fed | 1241 | if (!is_v4(its)) |
3171a47a MZ |
1242 | continue; |
1243 | ||
2247e1bf MZ |
1244 | if (!vpe->its_vm->vlpi_count[its->list_nr]) |
1245 | continue; | |
1246 | ||
3171a47a MZ |
1247 | desc.its_vmovp_cmd.col = &its->collections[col_id]; |
1248 | its_send_single_vcommand(its, its_build_vmovp_cmd, &desc); | |
1249 | } | |
1250 | ||
1251 | raw_spin_unlock_irqrestore(&vmovp_lock, flags); | |
1252 | } | |
1253 | ||
40619a2e | 1254 | static void its_send_vinvall(struct its_node *its, struct its_vpe *vpe) |
eb78192b MZ |
1255 | { |
1256 | struct its_cmd_desc desc; | |
eb78192b MZ |
1257 | |
1258 | desc.its_vinvall_cmd.vpe = vpe; | |
40619a2e | 1259 | its_send_single_vcommand(its, its_build_vinvall_cmd, &desc); |
eb78192b MZ |
1260 | } |
1261 | ||
28614696 MZ |
1262 | static void its_send_vinv(struct its_device *dev, u32 event_id) |
1263 | { | |
1264 | struct its_cmd_desc desc; | |
1265 | ||
1266 | /* | |
1267 | * There is no real VINV command. This is just a normal INV, | |
1268 | * with a VSYNC instead of a SYNC. | |
1269 | */ | |
1270 | desc.its_inv_cmd.dev = dev; | |
1271 | desc.its_inv_cmd.event_id = event_id; | |
1272 | ||
1273 | its_send_single_vcommand(dev->its, its_build_vinv_cmd, &desc); | |
1274 | } | |
1275 | ||
ed0e4aa9 MZ |
1276 | static void its_send_vint(struct its_device *dev, u32 event_id) |
1277 | { | |
1278 | struct its_cmd_desc desc; | |
1279 | ||
1280 | /* | |
1281 | * There is no real VINT command. This is just a normal INT, | |
1282 | * with a VSYNC instead of a SYNC. | |
1283 | */ | |
1284 | desc.its_int_cmd.dev = dev; | |
1285 | desc.its_int_cmd.event_id = event_id; | |
1286 | ||
1287 | its_send_single_vcommand(dev->its, its_build_vint_cmd, &desc); | |
1288 | } | |
1289 | ||
1290 | static void its_send_vclear(struct its_device *dev, u32 event_id) | |
1291 | { | |
1292 | struct its_cmd_desc desc; | |
1293 | ||
1294 | /* | |
1295 | * There is no real VCLEAR command. This is just a normal CLEAR, | |
1296 | * with a VSYNC instead of a SYNC. | |
1297 | */ | |
1298 | desc.its_clear_cmd.dev = dev; | |
1299 | desc.its_clear_cmd.event_id = event_id; | |
1300 | ||
1301 | its_send_single_vcommand(dev->its, its_build_vclear_cmd, &desc); | |
1302 | } | |
1303 | ||
d97c97ba MZ |
1304 | static void its_send_invdb(struct its_node *its, struct its_vpe *vpe) |
1305 | { | |
1306 | struct its_cmd_desc desc; | |
1307 | ||
1308 | desc.its_invdb_cmd.vpe = vpe; | |
1309 | its_send_single_vcommand(its, its_build_invdb_cmd, &desc); | |
1310 | } | |
1311 | ||
c48ed51c MZ |
1312 | /* |
1313 | * irqchip functions - assumes MSI, mostly. | |
1314 | */ | |
015ec038 | 1315 | static void lpi_write_config(struct irq_data *d, u8 clr, u8 set) |
c48ed51c | 1316 | { |
c1d4d5cd | 1317 | struct its_vlpi_map *map = get_vlpi_map(d); |
015ec038 | 1318 | irq_hw_number_t hwirq; |
e1a2e201 | 1319 | void *va; |
adcdb94e | 1320 | u8 *cfg; |
c48ed51c | 1321 | |
c1d4d5cd MZ |
1322 | if (map) { |
1323 | va = page_address(map->vm->vprop_page); | |
d4d7b4ad MZ |
1324 | hwirq = map->vintid; |
1325 | ||
1326 | /* Remember the updated property */ | |
1327 | map->properties &= ~clr; | |
1328 | map->properties |= set | LPI_PROP_GROUP1; | |
015ec038 | 1329 | } else { |
e1a2e201 | 1330 | va = gic_rdists->prop_table_va; |
015ec038 MZ |
1331 | hwirq = d->hwirq; |
1332 | } | |
adcdb94e | 1333 | |
e1a2e201 | 1334 | cfg = va + hwirq - 8192; |
adcdb94e | 1335 | *cfg &= ~clr; |
015ec038 | 1336 | *cfg |= set | LPI_PROP_GROUP1; |
c48ed51c MZ |
1337 | |
1338 | /* | |
1339 | * Make the above write visible to the redistributors. | |
1340 | * And yes, we're flushing exactly: One. Single. Byte. | |
1341 | * Humpf... | |
1342 | */ | |
1343 | if (gic_rdists->flags & RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING) | |
328191c0 | 1344 | gic_flush_dcache_to_poc(cfg, sizeof(*cfg)); |
c48ed51c MZ |
1345 | else |
1346 | dsb(ishst); | |
015ec038 MZ |
1347 | } |
1348 | ||
2f4f064b MZ |
1349 | static void wait_for_syncr(void __iomem *rdbase) |
1350 | { | |
1351 | while (gic_read_lpir(rdbase + GICR_SYNCR) & 1) | |
1352 | cpu_relax(); | |
1353 | } | |
1354 | ||
425c09be MZ |
1355 | static void direct_lpi_inv(struct irq_data *d) |
1356 | { | |
f4a81f5a | 1357 | struct its_vlpi_map *map = get_vlpi_map(d); |
425c09be | 1358 | void __iomem *rdbase; |
f3a05921 | 1359 | unsigned long flags; |
f4a81f5a | 1360 | u64 val; |
f3a05921 | 1361 | int cpu; |
f4a81f5a MZ |
1362 | |
1363 | if (map) { | |
1364 | struct its_device *its_dev = irq_data_get_irq_chip_data(d); | |
1365 | ||
1366 | WARN_ON(!is_v4_1(its_dev->its)); | |
1367 | ||
1368 | val = GICR_INVLPIR_V; | |
1369 | val |= FIELD_PREP(GICR_INVLPIR_VPEID, map->vpe->vpe_id); | |
1370 | val |= FIELD_PREP(GICR_INVLPIR_INTID, map->vintid); | |
1371 | } else { | |
1372 | val = d->hwirq; | |
1373 | } | |
425c09be MZ |
1374 | |
1375 | /* Target the redistributor this LPI is currently routed to */ | |
f3a05921 | 1376 | cpu = irq_to_cpuid_lock(d, &flags); |
9058a4e9 | 1377 | raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock); |
f3a05921 | 1378 | rdbase = per_cpu_ptr(gic_rdists->rdist, cpu)->rd_base; |
f4a81f5a | 1379 | gic_write_lpir(val, rdbase + GICR_INVLPIR); |
425c09be MZ |
1380 | |
1381 | wait_for_syncr(rdbase); | |
9058a4e9 | 1382 | raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock); |
f3a05921 | 1383 | irq_to_cpuid_unlock(d, flags); |
425c09be MZ |
1384 | } |
1385 | ||
015ec038 MZ |
1386 | static void lpi_update_config(struct irq_data *d, u8 clr, u8 set) |
1387 | { | |
1388 | struct its_device *its_dev = irq_data_get_irq_chip_data(d); | |
1389 | ||
1390 | lpi_write_config(d, clr, set); | |
f4a81f5a MZ |
1391 | if (gic_rdists->has_direct_lpi && |
1392 | (is_v4_1(its_dev->its) || !irqd_is_forwarded_to_vcpu(d))) | |
425c09be | 1393 | direct_lpi_inv(d); |
28614696 | 1394 | else if (!irqd_is_forwarded_to_vcpu(d)) |
425c09be | 1395 | its_send_inv(its_dev, its_get_event_id(d)); |
28614696 MZ |
1396 | else |
1397 | its_send_vinv(its_dev, its_get_event_id(d)); | |
c48ed51c MZ |
1398 | } |
1399 | ||
015ec038 MZ |
1400 | static void its_vlpi_set_doorbell(struct irq_data *d, bool enable) |
1401 | { | |
1402 | struct its_device *its_dev = irq_data_get_irq_chip_data(d); | |
1403 | u32 event = its_get_event_id(d); | |
c1d4d5cd | 1404 | struct its_vlpi_map *map; |
015ec038 | 1405 | |
3858d4df MZ |
1406 | /* |
1407 | * GICv4.1 does away with the per-LPI nonsense, nothing to do | |
1408 | * here. | |
1409 | */ | |
1410 | if (is_v4_1(its_dev->its)) | |
1411 | return; | |
1412 | ||
c1d4d5cd MZ |
1413 | map = dev_event_to_vlpi_map(its_dev, event); |
1414 | ||
1415 | if (map->db_enabled == enable) | |
015ec038 MZ |
1416 | return; |
1417 | ||
c1d4d5cd | 1418 | map->db_enabled = enable; |
015ec038 MZ |
1419 | |
1420 | /* | |
1421 | * More fun with the architecture: | |
1422 | * | |
1423 | * Ideally, we'd issue a VMAPTI to set the doorbell to its LPI | |
1424 | * value or to 1023, depending on the enable bit. But that | |
1425 | * would be issueing a mapping for an /existing/ DevID+EventID | |
1426 | * pair, which is UNPREDICTABLE. Instead, let's issue a VMOVI | |
1427 | * to the /same/ vPE, using this opportunity to adjust the | |
1428 | * doorbell. Mouahahahaha. We loves it, Precious. | |
1429 | */ | |
1430 | its_send_vmovi(its_dev, event); | |
c48ed51c MZ |
1431 | } |
1432 | ||
1433 | static void its_mask_irq(struct irq_data *d) | |
1434 | { | |
015ec038 MZ |
1435 | if (irqd_is_forwarded_to_vcpu(d)) |
1436 | its_vlpi_set_doorbell(d, false); | |
1437 | ||
adcdb94e | 1438 | lpi_update_config(d, LPI_PROP_ENABLED, 0); |
c48ed51c MZ |
1439 | } |
1440 | ||
1441 | static void its_unmask_irq(struct irq_data *d) | |
1442 | { | |
015ec038 MZ |
1443 | if (irqd_is_forwarded_to_vcpu(d)) |
1444 | its_vlpi_set_doorbell(d, true); | |
1445 | ||
adcdb94e | 1446 | lpi_update_config(d, 0, LPI_PROP_ENABLED); |
c48ed51c MZ |
1447 | } |
1448 | ||
c48ed51c MZ |
1449 | static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val, |
1450 | bool force) | |
1451 | { | |
fbf8f40e GK |
1452 | unsigned int cpu; |
1453 | const struct cpumask *cpu_mask = cpu_online_mask; | |
c48ed51c MZ |
1454 | struct its_device *its_dev = irq_data_get_irq_chip_data(d); |
1455 | struct its_collection *target_col; | |
1456 | u32 id = its_get_event_id(d); | |
1457 | ||
015ec038 MZ |
1458 | /* A forwarded interrupt should use irq_set_vcpu_affinity */ |
1459 | if (irqd_is_forwarded_to_vcpu(d)) | |
1460 | return -EINVAL; | |
1461 | ||
fbf8f40e GK |
1462 | /* lpi cannot be routed to a redistributor that is on a foreign node */ |
1463 | if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) { | |
1464 | if (its_dev->its->numa_node >= 0) { | |
1465 | cpu_mask = cpumask_of_node(its_dev->its->numa_node); | |
1466 | if (!cpumask_intersects(mask_val, cpu_mask)) | |
1467 | return -EINVAL; | |
1468 | } | |
1469 | } | |
1470 | ||
1471 | cpu = cpumask_any_and(mask_val, cpu_mask); | |
1472 | ||
c48ed51c MZ |
1473 | if (cpu >= nr_cpu_ids) |
1474 | return -EINVAL; | |
1475 | ||
8b8d94a7 M |
1476 | /* don't set the affinity when the target cpu is same as current one */ |
1477 | if (cpu != its_dev->event_map.col_map[id]) { | |
1478 | target_col = &its_dev->its->collections[cpu]; | |
1479 | its_send_movi(its_dev, target_col, id); | |
1480 | its_dev->event_map.col_map[id] = cpu; | |
0d224d35 | 1481 | irq_data_update_effective_affinity(d, cpumask_of(cpu)); |
8b8d94a7 | 1482 | } |
c48ed51c MZ |
1483 | |
1484 | return IRQ_SET_MASK_OK_DONE; | |
1485 | } | |
1486 | ||
558b0165 AB |
1487 | static u64 its_irq_get_msi_base(struct its_device *its_dev) |
1488 | { | |
1489 | struct its_node *its = its_dev->its; | |
1490 | ||
1491 | return its->phys_base + GITS_TRANSLATER; | |
1492 | } | |
1493 | ||
b48ac83d MZ |
1494 | static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg) |
1495 | { | |
1496 | struct its_device *its_dev = irq_data_get_irq_chip_data(d); | |
1497 | struct its_node *its; | |
1498 | u64 addr; | |
1499 | ||
1500 | its = its_dev->its; | |
558b0165 | 1501 | addr = its->get_msi_base(its_dev); |
b48ac83d | 1502 | |
b11283eb VM |
1503 | msg->address_lo = lower_32_bits(addr); |
1504 | msg->address_hi = upper_32_bits(addr); | |
b48ac83d | 1505 | msg->data = its_get_event_id(d); |
44bb7e24 | 1506 | |
35ae7df2 | 1507 | iommu_dma_compose_msi_msg(irq_data_get_msi_desc(d), msg); |
b48ac83d MZ |
1508 | } |
1509 | ||
8d85dced MZ |
1510 | static int its_irq_set_irqchip_state(struct irq_data *d, |
1511 | enum irqchip_irq_state which, | |
1512 | bool state) | |
1513 | { | |
1514 | struct its_device *its_dev = irq_data_get_irq_chip_data(d); | |
1515 | u32 event = its_get_event_id(d); | |
1516 | ||
1517 | if (which != IRQCHIP_STATE_PENDING) | |
1518 | return -EINVAL; | |
1519 | ||
ed0e4aa9 MZ |
1520 | if (irqd_is_forwarded_to_vcpu(d)) { |
1521 | if (state) | |
1522 | its_send_vint(its_dev, event); | |
1523 | else | |
1524 | its_send_vclear(its_dev, event); | |
1525 | } else { | |
1526 | if (state) | |
1527 | its_send_int(its_dev, event); | |
1528 | else | |
1529 | its_send_clear(its_dev, event); | |
1530 | } | |
8d85dced MZ |
1531 | |
1532 | return 0; | |
1533 | } | |
1534 | ||
2247e1bf MZ |
1535 | static void its_map_vm(struct its_node *its, struct its_vm *vm) |
1536 | { | |
1537 | unsigned long flags; | |
1538 | ||
1539 | /* Not using the ITS list? Everything is always mapped. */ | |
1540 | if (!its_list_map) | |
1541 | return; | |
1542 | ||
1543 | raw_spin_lock_irqsave(&vmovp_lock, flags); | |
1544 | ||
1545 | /* | |
1546 | * If the VM wasn't mapped yet, iterate over the vpes and get | |
1547 | * them mapped now. | |
1548 | */ | |
1549 | vm->vlpi_count[its->list_nr]++; | |
1550 | ||
1551 | if (vm->vlpi_count[its->list_nr] == 1) { | |
1552 | int i; | |
1553 | ||
1554 | for (i = 0; i < vm->nr_vpes; i++) { | |
1555 | struct its_vpe *vpe = vm->vpes[i]; | |
44c4c25e | 1556 | struct irq_data *d = irq_get_irq_data(vpe->irq); |
2247e1bf MZ |
1557 | |
1558 | /* Map the VPE to the first possible CPU */ | |
1559 | vpe->col_idx = cpumask_first(cpu_online_mask); | |
1560 | its_send_vmapp(its, vpe, true); | |
1561 | its_send_vinvall(its, vpe); | |
44c4c25e | 1562 | irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx)); |
2247e1bf MZ |
1563 | } |
1564 | } | |
1565 | ||
1566 | raw_spin_unlock_irqrestore(&vmovp_lock, flags); | |
1567 | } | |
1568 | ||
1569 | static void its_unmap_vm(struct its_node *its, struct its_vm *vm) | |
1570 | { | |
1571 | unsigned long flags; | |
1572 | ||
1573 | /* Not using the ITS list? Everything is always mapped. */ | |
1574 | if (!its_list_map) | |
1575 | return; | |
1576 | ||
1577 | raw_spin_lock_irqsave(&vmovp_lock, flags); | |
1578 | ||
1579 | if (!--vm->vlpi_count[its->list_nr]) { | |
1580 | int i; | |
1581 | ||
1582 | for (i = 0; i < vm->nr_vpes; i++) | |
1583 | its_send_vmapp(its, vm->vpes[i], false); | |
1584 | } | |
1585 | ||
1586 | raw_spin_unlock_irqrestore(&vmovp_lock, flags); | |
1587 | } | |
1588 | ||
d011e4e6 MZ |
1589 | static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info) |
1590 | { | |
1591 | struct its_device *its_dev = irq_data_get_irq_chip_data(d); | |
1592 | u32 event = its_get_event_id(d); | |
1593 | int ret = 0; | |
1594 | ||
1595 | if (!info->map) | |
1596 | return -EINVAL; | |
1597 | ||
11635fa2 | 1598 | raw_spin_lock(&its_dev->event_map.vlpi_lock); |
d011e4e6 MZ |
1599 | |
1600 | if (!its_dev->event_map.vm) { | |
1601 | struct its_vlpi_map *maps; | |
1602 | ||
6396bb22 | 1603 | maps = kcalloc(its_dev->event_map.nr_lpis, sizeof(*maps), |
11635fa2 | 1604 | GFP_ATOMIC); |
d011e4e6 MZ |
1605 | if (!maps) { |
1606 | ret = -ENOMEM; | |
1607 | goto out; | |
1608 | } | |
1609 | ||
1610 | its_dev->event_map.vm = info->map->vm; | |
1611 | its_dev->event_map.vlpi_maps = maps; | |
1612 | } else if (its_dev->event_map.vm != info->map->vm) { | |
1613 | ret = -EINVAL; | |
1614 | goto out; | |
1615 | } | |
1616 | ||
1617 | /* Get our private copy of the mapping information */ | |
1618 | its_dev->event_map.vlpi_maps[event] = *info->map; | |
1619 | ||
1620 | if (irqd_is_forwarded_to_vcpu(d)) { | |
1621 | /* Already mapped, move it around */ | |
1622 | its_send_vmovi(its_dev, event); | |
1623 | } else { | |
2247e1bf MZ |
1624 | /* Ensure all the VPEs are mapped on this ITS */ |
1625 | its_map_vm(its_dev->its, info->map->vm); | |
1626 | ||
d4d7b4ad MZ |
1627 | /* |
1628 | * Flag the interrupt as forwarded so that we can | |
1629 | * start poking the virtual property table. | |
1630 | */ | |
1631 | irqd_set_forwarded_to_vcpu(d); | |
1632 | ||
1633 | /* Write out the property to the prop table */ | |
1634 | lpi_write_config(d, 0xff, info->map->properties); | |
1635 | ||
d011e4e6 MZ |
1636 | /* Drop the physical mapping */ |
1637 | its_send_discard(its_dev, event); | |
1638 | ||
1639 | /* and install the virtual one */ | |
1640 | its_send_vmapti(its_dev, event); | |
d011e4e6 MZ |
1641 | |
1642 | /* Increment the number of VLPIs */ | |
1643 | its_dev->event_map.nr_vlpis++; | |
1644 | } | |
1645 | ||
1646 | out: | |
11635fa2 | 1647 | raw_spin_unlock(&its_dev->event_map.vlpi_lock); |
d011e4e6 MZ |
1648 | return ret; |
1649 | } | |
1650 | ||
1651 | static int its_vlpi_get(struct irq_data *d, struct its_cmd_info *info) | |
1652 | { | |
1653 | struct its_device *its_dev = irq_data_get_irq_chip_data(d); | |
046b5054 | 1654 | struct its_vlpi_map *map; |
d011e4e6 MZ |
1655 | int ret = 0; |
1656 | ||
11635fa2 | 1657 | raw_spin_lock(&its_dev->event_map.vlpi_lock); |
d011e4e6 | 1658 | |
046b5054 MZ |
1659 | map = get_vlpi_map(d); |
1660 | ||
1661 | if (!its_dev->event_map.vm || !map) { | |
d011e4e6 MZ |
1662 | ret = -EINVAL; |
1663 | goto out; | |
1664 | } | |
1665 | ||
1666 | /* Copy our mapping information to the incoming request */ | |
c1d4d5cd | 1667 | *info->map = *map; |
d011e4e6 MZ |
1668 | |
1669 | out: | |
11635fa2 | 1670 | raw_spin_unlock(&its_dev->event_map.vlpi_lock); |
d011e4e6 MZ |
1671 | return ret; |
1672 | } | |
1673 | ||
1674 | static int its_vlpi_unmap(struct irq_data *d) | |
1675 | { | |
1676 | struct its_device *its_dev = irq_data_get_irq_chip_data(d); | |
1677 | u32 event = its_get_event_id(d); | |
1678 | int ret = 0; | |
1679 | ||
11635fa2 | 1680 | raw_spin_lock(&its_dev->event_map.vlpi_lock); |
d011e4e6 MZ |
1681 | |
1682 | if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d)) { | |
1683 | ret = -EINVAL; | |
1684 | goto out; | |
1685 | } | |
1686 | ||
1687 | /* Drop the virtual mapping */ | |
1688 | its_send_discard(its_dev, event); | |
1689 | ||
1690 | /* and restore the physical one */ | |
1691 | irqd_clr_forwarded_to_vcpu(d); | |
1692 | its_send_mapti(its_dev, d->hwirq, event); | |
1693 | lpi_update_config(d, 0xff, (LPI_PROP_DEFAULT_PRIO | | |
1694 | LPI_PROP_ENABLED | | |
1695 | LPI_PROP_GROUP1)); | |
1696 | ||
2247e1bf MZ |
1697 | /* Potentially unmap the VM from this ITS */ |
1698 | its_unmap_vm(its_dev->its, its_dev->event_map.vm); | |
1699 | ||
d011e4e6 MZ |
1700 | /* |
1701 | * Drop the refcount and make the device available again if | |
1702 | * this was the last VLPI. | |
1703 | */ | |
1704 | if (!--its_dev->event_map.nr_vlpis) { | |
1705 | its_dev->event_map.vm = NULL; | |
1706 | kfree(its_dev->event_map.vlpi_maps); | |
1707 | } | |
1708 | ||
1709 | out: | |
11635fa2 | 1710 | raw_spin_unlock(&its_dev->event_map.vlpi_lock); |
d011e4e6 MZ |
1711 | return ret; |
1712 | } | |
1713 | ||
015ec038 MZ |
1714 | static int its_vlpi_prop_update(struct irq_data *d, struct its_cmd_info *info) |
1715 | { | |
1716 | struct its_device *its_dev = irq_data_get_irq_chip_data(d); | |
1717 | ||
1718 | if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d)) | |
1719 | return -EINVAL; | |
1720 | ||
1721 | if (info->cmd_type == PROP_UPDATE_AND_INV_VLPI) | |
1722 | lpi_update_config(d, 0xff, info->config); | |
1723 | else | |
1724 | lpi_write_config(d, 0xff, info->config); | |
1725 | its_vlpi_set_doorbell(d, !!(info->config & LPI_PROP_ENABLED)); | |
1726 | ||
1727 | return 0; | |
1728 | } | |
1729 | ||
c808eea8 MZ |
1730 | static int its_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu_info) |
1731 | { | |
1732 | struct its_device *its_dev = irq_data_get_irq_chip_data(d); | |
1733 | struct its_cmd_info *info = vcpu_info; | |
1734 | ||
1735 | /* Need a v4 ITS */ | |
0dd57fed | 1736 | if (!is_v4(its_dev->its)) |
c808eea8 MZ |
1737 | return -EINVAL; |
1738 | ||
d011e4e6 MZ |
1739 | /* Unmap request? */ |
1740 | if (!info) | |
1741 | return its_vlpi_unmap(d); | |
1742 | ||
c808eea8 MZ |
1743 | switch (info->cmd_type) { |
1744 | case MAP_VLPI: | |
d011e4e6 | 1745 | return its_vlpi_map(d, info); |
c808eea8 MZ |
1746 | |
1747 | case GET_VLPI: | |
d011e4e6 | 1748 | return its_vlpi_get(d, info); |
c808eea8 MZ |
1749 | |
1750 | case PROP_UPDATE_VLPI: | |
1751 | case PROP_UPDATE_AND_INV_VLPI: | |
015ec038 | 1752 | return its_vlpi_prop_update(d, info); |
c808eea8 MZ |
1753 | |
1754 | default: | |
1755 | return -EINVAL; | |
1756 | } | |
1757 | } | |
1758 | ||
c48ed51c MZ |
1759 | static struct irq_chip its_irq_chip = { |
1760 | .name = "ITS", | |
1761 | .irq_mask = its_mask_irq, | |
1762 | .irq_unmask = its_unmask_irq, | |
004fa08d | 1763 | .irq_eoi = irq_chip_eoi_parent, |
c48ed51c | 1764 | .irq_set_affinity = its_set_affinity, |
b48ac83d | 1765 | .irq_compose_msi_msg = its_irq_compose_msi_msg, |
8d85dced | 1766 | .irq_set_irqchip_state = its_irq_set_irqchip_state, |
c808eea8 | 1767 | .irq_set_vcpu_affinity = its_irq_set_vcpu_affinity, |
b48ac83d MZ |
1768 | }; |
1769 | ||
880cb3cd | 1770 | |
bf9529f8 MZ |
1771 | /* |
1772 | * How we allocate LPIs: | |
1773 | * | |
880cb3cd MZ |
1774 | * lpi_range_list contains ranges of LPIs that are to available to |
1775 | * allocate from. To allocate LPIs, just pick the first range that | |
1776 | * fits the required allocation, and reduce it by the required | |
1777 | * amount. Once empty, remove the range from the list. | |
1778 | * | |
1779 | * To free a range of LPIs, add a free range to the list, sort it and | |
1780 | * merge the result if the new range happens to be adjacent to an | |
1781 | * already free block. | |
bf9529f8 | 1782 | * |
880cb3cd MZ |
1783 | * The consequence of the above is that allocation is cost is low, but |
1784 | * freeing is expensive. We assumes that freeing rarely occurs. | |
1785 | */ | |
4cb205c0 | 1786 | #define ITS_MAX_LPI_NRBITS 16 /* 64K LPIs */ |
880cb3cd | 1787 | |
880cb3cd MZ |
1788 | static DEFINE_MUTEX(lpi_range_lock); |
1789 | static LIST_HEAD(lpi_range_list); | |
1790 | ||
1791 | struct lpi_range { | |
1792 | struct list_head entry; | |
1793 | u32 base_id; | |
1794 | u32 span; | |
1795 | }; | |
bf9529f8 | 1796 | |
880cb3cd | 1797 | static struct lpi_range *mk_lpi_range(u32 base, u32 span) |
bf9529f8 | 1798 | { |
880cb3cd MZ |
1799 | struct lpi_range *range; |
1800 | ||
1c73fac5 | 1801 | range = kmalloc(sizeof(*range), GFP_KERNEL); |
880cb3cd | 1802 | if (range) { |
880cb3cd MZ |
1803 | range->base_id = base; |
1804 | range->span = span; | |
1805 | } | |
1806 | ||
1807 | return range; | |
bf9529f8 MZ |
1808 | } |
1809 | ||
880cb3cd MZ |
1810 | static int alloc_lpi_range(u32 nr_lpis, u32 *base) |
1811 | { | |
1812 | struct lpi_range *range, *tmp; | |
1813 | int err = -ENOSPC; | |
1814 | ||
1815 | mutex_lock(&lpi_range_lock); | |
1816 | ||
1817 | list_for_each_entry_safe(range, tmp, &lpi_range_list, entry) { | |
1818 | if (range->span >= nr_lpis) { | |
1819 | *base = range->base_id; | |
1820 | range->base_id += nr_lpis; | |
1821 | range->span -= nr_lpis; | |
1822 | ||
1823 | if (range->span == 0) { | |
1824 | list_del(&range->entry); | |
1825 | kfree(range); | |
1826 | } | |
1827 | ||
1828 | err = 0; | |
1829 | break; | |
1830 | } | |
1831 | } | |
1832 | ||
1833 | mutex_unlock(&lpi_range_lock); | |
1834 | ||
1835 | pr_debug("ITS: alloc %u:%u\n", *base, nr_lpis); | |
1836 | return err; | |
bf9529f8 MZ |
1837 | } |
1838 | ||
12eade12 RV |
1839 | static void merge_lpi_ranges(struct lpi_range *a, struct lpi_range *b) |
1840 | { | |
1841 | if (&a->entry == &lpi_range_list || &b->entry == &lpi_range_list) | |
1842 | return; | |
1843 | if (a->base_id + a->span != b->base_id) | |
1844 | return; | |
1845 | b->base_id = a->base_id; | |
1846 | b->span += a->span; | |
1847 | list_del(&a->entry); | |
1848 | kfree(a); | |
1849 | } | |
1850 | ||
880cb3cd | 1851 | static int free_lpi_range(u32 base, u32 nr_lpis) |
bf9529f8 | 1852 | { |
12eade12 | 1853 | struct lpi_range *new, *old; |
880cb3cd MZ |
1854 | |
1855 | new = mk_lpi_range(base, nr_lpis); | |
b31a3838 RV |
1856 | if (!new) |
1857 | return -ENOMEM; | |
880cb3cd MZ |
1858 | |
1859 | mutex_lock(&lpi_range_lock); | |
1860 | ||
12eade12 RV |
1861 | list_for_each_entry_reverse(old, &lpi_range_list, entry) { |
1862 | if (old->base_id < base) | |
1863 | break; | |
880cb3cd | 1864 | } |
12eade12 RV |
1865 | /* |
1866 | * old is the last element with ->base_id smaller than base, | |
1867 | * so new goes right after it. If there are no elements with | |
1868 | * ->base_id smaller than base, &old->entry ends up pointing | |
1869 | * at the head of the list, and inserting new it the start of | |
1870 | * the list is the right thing to do in that case as well. | |
1871 | */ | |
1872 | list_add(&new->entry, &old->entry); | |
1873 | /* | |
1874 | * Now check if we can merge with the preceding and/or | |
1875 | * following ranges. | |
1876 | */ | |
1877 | merge_lpi_ranges(old, new); | |
1878 | merge_lpi_ranges(new, list_next_entry(new, entry)); | |
880cb3cd | 1879 | |
880cb3cd | 1880 | mutex_unlock(&lpi_range_lock); |
b31a3838 | 1881 | return 0; |
880cb3cd MZ |
1882 | } |
1883 | ||
1884 | static int __init its_lpi_init(u32 id_bits) | |
1885 | { | |
1886 | u32 lpis = (1UL << id_bits) - 8192; | |
12b2905a | 1887 | u32 numlpis; |
880cb3cd MZ |
1888 | int err; |
1889 | ||
12b2905a MZ |
1890 | numlpis = 1UL << GICD_TYPER_NUM_LPIS(gic_rdists->gicd_typer); |
1891 | ||
1892 | if (numlpis > 2 && !WARN_ON(numlpis > lpis)) { | |
1893 | lpis = numlpis; | |
1894 | pr_info("ITS: Using hypervisor restricted LPI range [%u]\n", | |
1895 | lpis); | |
1896 | } | |
1897 | ||
880cb3cd MZ |
1898 | /* |
1899 | * Initializing the allocator is just the same as freeing the | |
1900 | * full range of LPIs. | |
1901 | */ | |
1902 | err = free_lpi_range(8192, lpis); | |
1903 | pr_debug("ITS: Allocator initialized for %u LPIs\n", lpis); | |
1904 | return err; | |
1905 | } | |
bf9529f8 | 1906 | |
38dd7c49 | 1907 | static unsigned long *its_lpi_alloc(int nr_irqs, u32 *base, int *nr_ids) |
880cb3cd MZ |
1908 | { |
1909 | unsigned long *bitmap = NULL; | |
1910 | int err = 0; | |
bf9529f8 MZ |
1911 | |
1912 | do { | |
38dd7c49 | 1913 | err = alloc_lpi_range(nr_irqs, base); |
880cb3cd | 1914 | if (!err) |
bf9529f8 MZ |
1915 | break; |
1916 | ||
38dd7c49 MZ |
1917 | nr_irqs /= 2; |
1918 | } while (nr_irqs > 0); | |
bf9529f8 | 1919 | |
45725e0f MZ |
1920 | if (!nr_irqs) |
1921 | err = -ENOSPC; | |
1922 | ||
880cb3cd | 1923 | if (err) |
bf9529f8 MZ |
1924 | goto out; |
1925 | ||
38dd7c49 | 1926 | bitmap = kcalloc(BITS_TO_LONGS(nr_irqs), sizeof (long), GFP_ATOMIC); |
bf9529f8 MZ |
1927 | if (!bitmap) |
1928 | goto out; | |
1929 | ||
38dd7c49 | 1930 | *nr_ids = nr_irqs; |
bf9529f8 MZ |
1931 | |
1932 | out: | |
c8415b94 MZ |
1933 | if (!bitmap) |
1934 | *base = *nr_ids = 0; | |
1935 | ||
bf9529f8 MZ |
1936 | return bitmap; |
1937 | } | |
1938 | ||
38dd7c49 | 1939 | static void its_lpi_free(unsigned long *bitmap, u32 base, u32 nr_ids) |
bf9529f8 | 1940 | { |
880cb3cd | 1941 | WARN_ON(free_lpi_range(base, nr_ids)); |
cf2be8ba | 1942 | kfree(bitmap); |
bf9529f8 | 1943 | } |
1ac19ca6 | 1944 | |
053be485 MZ |
1945 | static void gic_reset_prop_table(void *va) |
1946 | { | |
1947 | /* Priority 0xa0, Group-1, disabled */ | |
1948 | memset(va, LPI_PROP_DEFAULT_PRIO | LPI_PROP_GROUP1, LPI_PROPBASE_SZ); | |
1949 | ||
1950 | /* Make sure the GIC will observe the written configuration */ | |
1951 | gic_flush_dcache_to_poc(va, LPI_PROPBASE_SZ); | |
1952 | } | |
1953 | ||
0e5ccf91 MZ |
1954 | static struct page *its_allocate_prop_table(gfp_t gfp_flags) |
1955 | { | |
1956 | struct page *prop_page; | |
1ac19ca6 | 1957 | |
0e5ccf91 MZ |
1958 | prop_page = alloc_pages(gfp_flags, get_order(LPI_PROPBASE_SZ)); |
1959 | if (!prop_page) | |
1960 | return NULL; | |
1961 | ||
053be485 | 1962 | gic_reset_prop_table(page_address(prop_page)); |
0e5ccf91 MZ |
1963 | |
1964 | return prop_page; | |
1965 | } | |
1966 | ||
7d75bbb4 MZ |
1967 | static void its_free_prop_table(struct page *prop_page) |
1968 | { | |
1969 | free_pages((unsigned long)page_address(prop_page), | |
1970 | get_order(LPI_PROPBASE_SZ)); | |
1971 | } | |
1ac19ca6 | 1972 | |
5e2c9f9a MZ |
1973 | static bool gic_check_reserved_range(phys_addr_t addr, unsigned long size) |
1974 | { | |
1975 | phys_addr_t start, end, addr_end; | |
1976 | u64 i; | |
1977 | ||
1978 | /* | |
1979 | * We don't bother checking for a kdump kernel as by | |
1980 | * construction, the LPI tables are out of this kernel's | |
1981 | * memory map. | |
1982 | */ | |
1983 | if (is_kdump_kernel()) | |
1984 | return true; | |
1985 | ||
1986 | addr_end = addr + size - 1; | |
1987 | ||
1988 | for_each_reserved_mem_region(i, &start, &end) { | |
1989 | if (addr >= start && addr_end <= end) | |
1990 | return true; | |
1991 | } | |
1992 | ||
1993 | /* Not found, not a good sign... */ | |
1994 | pr_warn("GICv3: Expected reserved range [%pa:%pa], not found\n", | |
1995 | &addr, &addr_end); | |
1996 | add_taint(TAINT_CRAP, LOCKDEP_STILL_OK); | |
1997 | return false; | |
1998 | } | |
1999 | ||
3fb68fae MZ |
2000 | static int gic_reserve_range(phys_addr_t addr, unsigned long size) |
2001 | { | |
2002 | if (efi_enabled(EFI_CONFIG_TABLES)) | |
2003 | return efi_mem_reserve_persistent(addr, size); | |
2004 | ||
2005 | return 0; | |
2006 | } | |
2007 | ||
11e37d35 | 2008 | static int __init its_setup_lpi_prop_table(void) |
1ac19ca6 | 2009 | { |
c440a9d9 MZ |
2010 | if (gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED) { |
2011 | u64 val; | |
1ac19ca6 | 2012 | |
c440a9d9 MZ |
2013 | val = gicr_read_propbaser(gic_data_rdist_rd_base() + GICR_PROPBASER); |
2014 | lpi_id_bits = (val & GICR_PROPBASER_IDBITS_MASK) + 1; | |
1ac19ca6 | 2015 | |
c440a9d9 MZ |
2016 | gic_rdists->prop_table_pa = val & GENMASK_ULL(51, 12); |
2017 | gic_rdists->prop_table_va = memremap(gic_rdists->prop_table_pa, | |
2018 | LPI_PROPBASE_SZ, | |
2019 | MEMREMAP_WB); | |
2020 | gic_reset_prop_table(gic_rdists->prop_table_va); | |
2021 | } else { | |
2022 | struct page *page; | |
2023 | ||
2024 | lpi_id_bits = min_t(u32, | |
2025 | GICD_TYPER_ID_BITS(gic_rdists->gicd_typer), | |
2026 | ITS_MAX_LPI_NRBITS); | |
2027 | page = its_allocate_prop_table(GFP_NOWAIT); | |
2028 | if (!page) { | |
2029 | pr_err("Failed to allocate PROPBASE\n"); | |
2030 | return -ENOMEM; | |
2031 | } | |
2032 | ||
2033 | gic_rdists->prop_table_pa = page_to_phys(page); | |
2034 | gic_rdists->prop_table_va = page_address(page); | |
3fb68fae MZ |
2035 | WARN_ON(gic_reserve_range(gic_rdists->prop_table_pa, |
2036 | LPI_PROPBASE_SZ)); | |
c440a9d9 | 2037 | } |
e1a2e201 MZ |
2038 | |
2039 | pr_info("GICv3: using LPI property table @%pa\n", | |
2040 | &gic_rdists->prop_table_pa); | |
1ac19ca6 | 2041 | |
6c31e123 | 2042 | return its_lpi_init(lpi_id_bits); |
1ac19ca6 MZ |
2043 | } |
2044 | ||
2045 | static const char *its_base_type_string[] = { | |
2046 | [GITS_BASER_TYPE_DEVICE] = "Devices", | |
2047 | [GITS_BASER_TYPE_VCPU] = "Virtual CPUs", | |
4f46de9d | 2048 | [GITS_BASER_TYPE_RESERVED3] = "Reserved (3)", |
1ac19ca6 MZ |
2049 | [GITS_BASER_TYPE_COLLECTION] = "Interrupt Collections", |
2050 | [GITS_BASER_TYPE_RESERVED5] = "Reserved (5)", | |
2051 | [GITS_BASER_TYPE_RESERVED6] = "Reserved (6)", | |
2052 | [GITS_BASER_TYPE_RESERVED7] = "Reserved (7)", | |
2053 | }; | |
2054 | ||
2d81d425 SD |
2055 | static u64 its_read_baser(struct its_node *its, struct its_baser *baser) |
2056 | { | |
2057 | u32 idx = baser - its->tables; | |
2058 | ||
0968a619 | 2059 | return gits_read_baser(its->base + GITS_BASER + (idx << 3)); |
2d81d425 SD |
2060 | } |
2061 | ||
2062 | static void its_write_baser(struct its_node *its, struct its_baser *baser, | |
2063 | u64 val) | |
2064 | { | |
2065 | u32 idx = baser - its->tables; | |
2066 | ||
0968a619 | 2067 | gits_write_baser(val, its->base + GITS_BASER + (idx << 3)); |
2d81d425 SD |
2068 | baser->val = its_read_baser(its, baser); |
2069 | } | |
2070 | ||
9347359a | 2071 | static int its_setup_baser(struct its_node *its, struct its_baser *baser, |
3faf24ea SD |
2072 | u64 cache, u64 shr, u32 psz, u32 order, |
2073 | bool indirect) | |
9347359a SD |
2074 | { |
2075 | u64 val = its_read_baser(its, baser); | |
2076 | u64 esz = GITS_BASER_ENTRY_SIZE(val); | |
2077 | u64 type = GITS_BASER_TYPE(val); | |
30ae9610 | 2078 | u64 baser_phys, tmp; |
9347359a | 2079 | u32 alloc_pages; |
539d3782 | 2080 | struct page *page; |
9347359a | 2081 | void *base; |
9347359a SD |
2082 | |
2083 | retry_alloc_baser: | |
2084 | alloc_pages = (PAGE_ORDER_TO_SIZE(order) / psz); | |
2085 | if (alloc_pages > GITS_BASER_PAGES_MAX) { | |
2086 | pr_warn("ITS@%pa: %s too large, reduce ITS pages %u->%u\n", | |
2087 | &its->phys_base, its_base_type_string[type], | |
2088 | alloc_pages, GITS_BASER_PAGES_MAX); | |
2089 | alloc_pages = GITS_BASER_PAGES_MAX; | |
2090 | order = get_order(GITS_BASER_PAGES_MAX * psz); | |
2091 | } | |
2092 | ||
539d3782 SD |
2093 | page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, order); |
2094 | if (!page) | |
9347359a SD |
2095 | return -ENOMEM; |
2096 | ||
539d3782 | 2097 | base = (void *)page_address(page); |
30ae9610 SD |
2098 | baser_phys = virt_to_phys(base); |
2099 | ||
2100 | /* Check if the physical address of the memory is above 48bits */ | |
2101 | if (IS_ENABLED(CONFIG_ARM64_64K_PAGES) && (baser_phys >> 48)) { | |
2102 | ||
2103 | /* 52bit PA is supported only when PageSize=64K */ | |
2104 | if (psz != SZ_64K) { | |
2105 | pr_err("ITS: no 52bit PA support when psz=%d\n", psz); | |
2106 | free_pages((unsigned long)base, order); | |
2107 | return -ENXIO; | |
2108 | } | |
2109 | ||
2110 | /* Convert 52bit PA to 48bit field */ | |
2111 | baser_phys = GITS_BASER_PHYS_52_to_48(baser_phys); | |
2112 | } | |
2113 | ||
9347359a | 2114 | retry_baser: |
30ae9610 | 2115 | val = (baser_phys | |
9347359a SD |
2116 | (type << GITS_BASER_TYPE_SHIFT) | |
2117 | ((esz - 1) << GITS_BASER_ENTRY_SIZE_SHIFT) | | |
2118 | ((alloc_pages - 1) << GITS_BASER_PAGES_SHIFT) | | |
2119 | cache | | |
2120 | shr | | |
2121 | GITS_BASER_VALID); | |
2122 | ||
3faf24ea SD |
2123 | val |= indirect ? GITS_BASER_INDIRECT : 0x0; |
2124 | ||
9347359a SD |
2125 | switch (psz) { |
2126 | case SZ_4K: | |
2127 | val |= GITS_BASER_PAGE_SIZE_4K; | |
2128 | break; | |
2129 | case SZ_16K: | |
2130 | val |= GITS_BASER_PAGE_SIZE_16K; | |
2131 | break; | |
2132 | case SZ_64K: | |
2133 | val |= GITS_BASER_PAGE_SIZE_64K; | |
2134 | break; | |
2135 | } | |
2136 | ||
2137 | its_write_baser(its, baser, val); | |
2138 | tmp = baser->val; | |
2139 | ||
2140 | if ((val ^ tmp) & GITS_BASER_SHAREABILITY_MASK) { | |
2141 | /* | |
2142 | * Shareability didn't stick. Just use | |
2143 | * whatever the read reported, which is likely | |
2144 | * to be the only thing this redistributor | |
2145 | * supports. If that's zero, make it | |
2146 | * non-cacheable as well. | |
2147 | */ | |
2148 | shr = tmp & GITS_BASER_SHAREABILITY_MASK; | |
2149 | if (!shr) { | |
2150 | cache = GITS_BASER_nC; | |
328191c0 | 2151 | gic_flush_dcache_to_poc(base, PAGE_ORDER_TO_SIZE(order)); |
9347359a SD |
2152 | } |
2153 | goto retry_baser; | |
2154 | } | |
2155 | ||
2156 | if ((val ^ tmp) & GITS_BASER_PAGE_SIZE_MASK) { | |
2157 | /* | |
2158 | * Page size didn't stick. Let's try a smaller | |
2159 | * size and retry. If we reach 4K, then | |
2160 | * something is horribly wrong... | |
2161 | */ | |
2162 | free_pages((unsigned long)base, order); | |
2163 | baser->base = NULL; | |
2164 | ||
2165 | switch (psz) { | |
2166 | case SZ_16K: | |
2167 | psz = SZ_4K; | |
2168 | goto retry_alloc_baser; | |
2169 | case SZ_64K: | |
2170 | psz = SZ_16K; | |
2171 | goto retry_alloc_baser; | |
2172 | } | |
2173 | } | |
2174 | ||
2175 | if (val != tmp) { | |
b11283eb | 2176 | pr_err("ITS@%pa: %s doesn't stick: %llx %llx\n", |
9347359a | 2177 | &its->phys_base, its_base_type_string[type], |
b11283eb | 2178 | val, tmp); |
9347359a SD |
2179 | free_pages((unsigned long)base, order); |
2180 | return -ENXIO; | |
2181 | } | |
2182 | ||
2183 | baser->order = order; | |
2184 | baser->base = base; | |
2185 | baser->psz = psz; | |
3faf24ea | 2186 | tmp = indirect ? GITS_LVL1_ENTRY_SIZE : esz; |
9347359a | 2187 | |
3faf24ea | 2188 | pr_info("ITS@%pa: allocated %d %s @%lx (%s, esz %d, psz %dK, shr %d)\n", |
d524eaa2 | 2189 | &its->phys_base, (int)(PAGE_ORDER_TO_SIZE(order) / (int)tmp), |
9347359a SD |
2190 | its_base_type_string[type], |
2191 | (unsigned long)virt_to_phys(base), | |
3faf24ea | 2192 | indirect ? "indirect" : "flat", (int)esz, |
9347359a SD |
2193 | psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT); |
2194 | ||
2195 | return 0; | |
2196 | } | |
2197 | ||
4cacac57 MZ |
2198 | static bool its_parse_indirect_baser(struct its_node *its, |
2199 | struct its_baser *baser, | |
32bd44dc | 2200 | u32 psz, u32 *order, u32 ids) |
4b75c459 | 2201 | { |
4cacac57 MZ |
2202 | u64 tmp = its_read_baser(its, baser); |
2203 | u64 type = GITS_BASER_TYPE(tmp); | |
2204 | u64 esz = GITS_BASER_ENTRY_SIZE(tmp); | |
2fd632a0 | 2205 | u64 val = GITS_BASER_InnerShareable | GITS_BASER_RaWaWb; |
4b75c459 | 2206 | u32 new_order = *order; |
3faf24ea SD |
2207 | bool indirect = false; |
2208 | ||
2209 | /* No need to enable Indirection if memory requirement < (psz*2)bytes */ | |
2210 | if ((esz << ids) > (psz * 2)) { | |
2211 | /* | |
2212 | * Find out whether hw supports a single or two-level table by | |
2213 | * table by reading bit at offset '62' after writing '1' to it. | |
2214 | */ | |
2215 | its_write_baser(its, baser, val | GITS_BASER_INDIRECT); | |
2216 | indirect = !!(baser->val & GITS_BASER_INDIRECT); | |
2217 | ||
2218 | if (indirect) { | |
2219 | /* | |
2220 | * The size of the lvl2 table is equal to ITS page size | |
2221 | * which is 'psz'. For computing lvl1 table size, | |
2222 | * subtract ID bits that sparse lvl2 table from 'ids' | |
2223 | * which is reported by ITS hardware times lvl1 table | |
2224 | * entry size. | |
2225 | */ | |
d524eaa2 | 2226 | ids -= ilog2(psz / (int)esz); |
3faf24ea SD |
2227 | esz = GITS_LVL1_ENTRY_SIZE; |
2228 | } | |
2229 | } | |
4b75c459 SD |
2230 | |
2231 | /* | |
2232 | * Allocate as many entries as required to fit the | |
2233 | * range of device IDs that the ITS can grok... The ID | |
2234 | * space being incredibly sparse, this results in a | |
3faf24ea SD |
2235 | * massive waste of memory if two-level device table |
2236 | * feature is not supported by hardware. | |
4b75c459 SD |
2237 | */ |
2238 | new_order = max_t(u32, get_order(esz << ids), new_order); | |
2239 | if (new_order >= MAX_ORDER) { | |
2240 | new_order = MAX_ORDER - 1; | |
d524eaa2 | 2241 | ids = ilog2(PAGE_ORDER_TO_SIZE(new_order) / (int)esz); |
576a8342 | 2242 | pr_warn("ITS@%pa: %s Table too large, reduce ids %llu->%u\n", |
4cacac57 | 2243 | &its->phys_base, its_base_type_string[type], |
576a8342 | 2244 | device_ids(its), ids); |
4b75c459 SD |
2245 | } |
2246 | ||
2247 | *order = new_order; | |
3faf24ea SD |
2248 | |
2249 | return indirect; | |
4b75c459 SD |
2250 | } |
2251 | ||
5e516846 MZ |
2252 | static u32 compute_common_aff(u64 val) |
2253 | { | |
2254 | u32 aff, clpiaff; | |
2255 | ||
2256 | aff = FIELD_GET(GICR_TYPER_AFFINITY, val); | |
2257 | clpiaff = FIELD_GET(GICR_TYPER_COMMON_LPI_AFF, val); | |
2258 | ||
2259 | return aff & ~(GENMASK(31, 0) >> (clpiaff * 8)); | |
2260 | } | |
2261 | ||
2262 | static u32 compute_its_aff(struct its_node *its) | |
2263 | { | |
2264 | u64 val; | |
2265 | u32 svpet; | |
2266 | ||
2267 | /* | |
2268 | * Reencode the ITS SVPET and MPIDR as a GICR_TYPER, and compute | |
2269 | * the resulting affinity. We then use that to see if this match | |
2270 | * our own affinity. | |
2271 | */ | |
2272 | svpet = FIELD_GET(GITS_TYPER_SVPET, its->typer); | |
2273 | val = FIELD_PREP(GICR_TYPER_COMMON_LPI_AFF, svpet); | |
2274 | val |= FIELD_PREP(GICR_TYPER_AFFINITY, its->mpidr); | |
2275 | return compute_common_aff(val); | |
2276 | } | |
2277 | ||
2278 | static struct its_node *find_sibling_its(struct its_node *cur_its) | |
2279 | { | |
2280 | struct its_node *its; | |
2281 | u32 aff; | |
2282 | ||
2283 | if (!FIELD_GET(GITS_TYPER_SVPET, cur_its->typer)) | |
2284 | return NULL; | |
2285 | ||
2286 | aff = compute_its_aff(cur_its); | |
2287 | ||
2288 | list_for_each_entry(its, &its_nodes, entry) { | |
2289 | u64 baser; | |
2290 | ||
2291 | if (!is_v4_1(its) || its == cur_its) | |
2292 | continue; | |
2293 | ||
2294 | if (!FIELD_GET(GITS_TYPER_SVPET, its->typer)) | |
2295 | continue; | |
2296 | ||
2297 | if (aff != compute_its_aff(its)) | |
2298 | continue; | |
2299 | ||
2300 | /* GICv4.1 guarantees that the vPE table is GITS_BASER2 */ | |
2301 | baser = its->tables[2].val; | |
2302 | if (!(baser & GITS_BASER_VALID)) | |
2303 | continue; | |
2304 | ||
2305 | return its; | |
2306 | } | |
2307 | ||
2308 | return NULL; | |
2309 | } | |
2310 | ||
1ac19ca6 MZ |
2311 | static void its_free_tables(struct its_node *its) |
2312 | { | |
2313 | int i; | |
2314 | ||
2315 | for (i = 0; i < GITS_BASER_NR_REGS; i++) { | |
1a485f4d SD |
2316 | if (its->tables[i].base) { |
2317 | free_pages((unsigned long)its->tables[i].base, | |
2318 | its->tables[i].order); | |
2319 | its->tables[i].base = NULL; | |
1ac19ca6 MZ |
2320 | } |
2321 | } | |
2322 | } | |
2323 | ||
0e0b0f69 | 2324 | static int its_alloc_tables(struct its_node *its) |
1ac19ca6 | 2325 | { |
1ac19ca6 | 2326 | u64 shr = GITS_BASER_InnerShareable; |
2fd632a0 | 2327 | u64 cache = GITS_BASER_RaWaWb; |
9347359a SD |
2328 | u32 psz = SZ_64K; |
2329 | int err, i; | |
94100970 | 2330 | |
fa150019 AB |
2331 | if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_22375) |
2332 | /* erratum 24313: ignore memory access type */ | |
2333 | cache = GITS_BASER_nCnB; | |
466b7d16 | 2334 | |
1ac19ca6 | 2335 | for (i = 0; i < GITS_BASER_NR_REGS; i++) { |
2d81d425 SD |
2336 | struct its_baser *baser = its->tables + i; |
2337 | u64 val = its_read_baser(its, baser); | |
1ac19ca6 | 2338 | u64 type = GITS_BASER_TYPE(val); |
9347359a | 2339 | u32 order = get_order(psz); |
3faf24ea | 2340 | bool indirect = false; |
1ac19ca6 | 2341 | |
4cacac57 MZ |
2342 | switch (type) { |
2343 | case GITS_BASER_TYPE_NONE: | |
1ac19ca6 MZ |
2344 | continue; |
2345 | ||
4cacac57 | 2346 | case GITS_BASER_TYPE_DEVICE: |
32bd44dc SD |
2347 | indirect = its_parse_indirect_baser(its, baser, |
2348 | psz, &order, | |
576a8342 | 2349 | device_ids(its)); |
8d565748 ZY |
2350 | break; |
2351 | ||
4cacac57 | 2352 | case GITS_BASER_TYPE_VCPU: |
5e516846 MZ |
2353 | if (is_v4_1(its)) { |
2354 | struct its_node *sibling; | |
2355 | ||
2356 | WARN_ON(i != 2); | |
2357 | if ((sibling = find_sibling_its(its))) { | |
2358 | *baser = sibling->tables[2]; | |
2359 | its_write_baser(its, baser, baser->val); | |
2360 | continue; | |
2361 | } | |
2362 | } | |
2363 | ||
4cacac57 | 2364 | indirect = its_parse_indirect_baser(its, baser, |
32bd44dc SD |
2365 | psz, &order, |
2366 | ITS_MAX_VPEID_BITS); | |
4cacac57 MZ |
2367 | break; |
2368 | } | |
f54b97ed | 2369 | |
3faf24ea | 2370 | err = its_setup_baser(its, baser, cache, shr, psz, order, indirect); |
9347359a SD |
2371 | if (err < 0) { |
2372 | its_free_tables(its); | |
2373 | return err; | |
1ac19ca6 MZ |
2374 | } |
2375 | ||
9347359a SD |
2376 | /* Update settings which will be used for next BASERn */ |
2377 | psz = baser->psz; | |
2378 | cache = baser->val & GITS_BASER_CACHEABILITY_MASK; | |
2379 | shr = baser->val & GITS_BASER_SHAREABILITY_MASK; | |
1ac19ca6 MZ |
2380 | } |
2381 | ||
2382 | return 0; | |
1ac19ca6 MZ |
2383 | } |
2384 | ||
5e516846 MZ |
2385 | static u64 inherit_vpe_l1_table_from_its(void) |
2386 | { | |
2387 | struct its_node *its; | |
2388 | u64 val; | |
2389 | u32 aff; | |
2390 | ||
2391 | val = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER); | |
2392 | aff = compute_common_aff(val); | |
2393 | ||
2394 | list_for_each_entry(its, &its_nodes, entry) { | |
2395 | u64 baser, addr; | |
2396 | ||
2397 | if (!is_v4_1(its)) | |
2398 | continue; | |
2399 | ||
2400 | if (!FIELD_GET(GITS_TYPER_SVPET, its->typer)) | |
2401 | continue; | |
2402 | ||
2403 | if (aff != compute_its_aff(its)) | |
2404 | continue; | |
2405 | ||
2406 | /* GICv4.1 guarantees that the vPE table is GITS_BASER2 */ | |
2407 | baser = its->tables[2].val; | |
2408 | if (!(baser & GITS_BASER_VALID)) | |
2409 | continue; | |
2410 | ||
2411 | /* We have a winner! */ | |
8b718d40 ZY |
2412 | gic_data_rdist()->vpe_l1_base = its->tables[2].base; |
2413 | ||
5e516846 MZ |
2414 | val = GICR_VPROPBASER_4_1_VALID; |
2415 | if (baser & GITS_BASER_INDIRECT) | |
2416 | val |= GICR_VPROPBASER_4_1_INDIRECT; | |
2417 | val |= FIELD_PREP(GICR_VPROPBASER_4_1_PAGE_SIZE, | |
2418 | FIELD_GET(GITS_BASER_PAGE_SIZE_MASK, baser)); | |
2419 | switch (FIELD_GET(GITS_BASER_PAGE_SIZE_MASK, baser)) { | |
2420 | case GIC_PAGE_SIZE_64K: | |
2421 | addr = GITS_BASER_ADDR_48_to_52(baser); | |
2422 | break; | |
2423 | default: | |
2424 | addr = baser & GENMASK_ULL(47, 12); | |
2425 | break; | |
2426 | } | |
2427 | val |= FIELD_PREP(GICR_VPROPBASER_4_1_ADDR, addr >> 12); | |
2428 | val |= FIELD_PREP(GICR_VPROPBASER_SHAREABILITY_MASK, | |
2429 | FIELD_GET(GITS_BASER_SHAREABILITY_MASK, baser)); | |
2430 | val |= FIELD_PREP(GICR_VPROPBASER_INNER_CACHEABILITY_MASK, | |
2431 | FIELD_GET(GITS_BASER_INNER_CACHEABILITY_MASK, baser)); | |
2432 | val |= FIELD_PREP(GICR_VPROPBASER_4_1_SIZE, GITS_BASER_NR_PAGES(baser) - 1); | |
2433 | ||
2434 | return val; | |
2435 | } | |
2436 | ||
2437 | return 0; | |
2438 | } | |
2439 | ||
2440 | static u64 inherit_vpe_l1_table_from_rd(cpumask_t **mask) | |
2441 | { | |
2442 | u32 aff; | |
2443 | u64 val; | |
2444 | int cpu; | |
2445 | ||
2446 | val = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER); | |
2447 | aff = compute_common_aff(val); | |
2448 | ||
2449 | for_each_possible_cpu(cpu) { | |
2450 | void __iomem *base = gic_data_rdist_cpu(cpu)->rd_base; | |
5e516846 MZ |
2451 | |
2452 | if (!base || cpu == smp_processor_id()) | |
2453 | continue; | |
2454 | ||
2455 | val = gic_read_typer(base + GICR_TYPER); | |
4bccf1d7 | 2456 | if (aff != compute_common_aff(val)) |
5e516846 MZ |
2457 | continue; |
2458 | ||
2459 | /* | |
2460 | * At this point, we have a victim. This particular CPU | |
2461 | * has already booted, and has an affinity that matches | |
2462 | * ours wrt CommonLPIAff. Let's use its own VPROPBASER. | |
2463 | * Make sure we don't write the Z bit in that case. | |
2464 | */ | |
5186a6cc | 2465 | val = gicr_read_vpropbaser(base + SZ_128K + GICR_VPROPBASER); |
5e516846 MZ |
2466 | val &= ~GICR_VPROPBASER_4_1_Z; |
2467 | ||
8b718d40 | 2468 | gic_data_rdist()->vpe_l1_base = gic_data_rdist_cpu(cpu)->vpe_l1_base; |
5e516846 MZ |
2469 | *mask = gic_data_rdist_cpu(cpu)->vpe_table_mask; |
2470 | ||
2471 | return val; | |
2472 | } | |
2473 | ||
2474 | return 0; | |
2475 | } | |
2476 | ||
4e6437f1 ZY |
2477 | static bool allocate_vpe_l2_table(int cpu, u32 id) |
2478 | { | |
2479 | void __iomem *base = gic_data_rdist_cpu(cpu)->rd_base; | |
490d332e MZ |
2480 | unsigned int psz, esz, idx, npg, gpsz; |
2481 | u64 val; | |
4e6437f1 ZY |
2482 | struct page *page; |
2483 | __le64 *table; | |
2484 | ||
2485 | if (!gic_rdists->has_rvpeid) | |
2486 | return true; | |
2487 | ||
28d160de MZ |
2488 | /* Skip non-present CPUs */ |
2489 | if (!base) | |
2490 | return true; | |
2491 | ||
5186a6cc | 2492 | val = gicr_read_vpropbaser(base + SZ_128K + GICR_VPROPBASER); |
4e6437f1 ZY |
2493 | |
2494 | esz = FIELD_GET(GICR_VPROPBASER_4_1_ENTRY_SIZE, val) + 1; | |
2495 | gpsz = FIELD_GET(GICR_VPROPBASER_4_1_PAGE_SIZE, val); | |
2496 | npg = FIELD_GET(GICR_VPROPBASER_4_1_SIZE, val) + 1; | |
2497 | ||
2498 | switch (gpsz) { | |
2499 | default: | |
2500 | WARN_ON(1); | |
2501 | /* fall through */ | |
2502 | case GIC_PAGE_SIZE_4K: | |
2503 | psz = SZ_4K; | |
2504 | break; | |
2505 | case GIC_PAGE_SIZE_16K: | |
2506 | psz = SZ_16K; | |
2507 | break; | |
2508 | case GIC_PAGE_SIZE_64K: | |
2509 | psz = SZ_64K; | |
2510 | break; | |
2511 | } | |
2512 | ||
2513 | /* Don't allow vpe_id that exceeds single, flat table limit */ | |
2514 | if (!(val & GICR_VPROPBASER_4_1_INDIRECT)) | |
2515 | return (id < (npg * psz / (esz * SZ_8))); | |
2516 | ||
2517 | /* Compute 1st level table index & check if that exceeds table limit */ | |
2518 | idx = id >> ilog2(psz / (esz * SZ_8)); | |
2519 | if (idx >= (npg * psz / GITS_LVL1_ENTRY_SIZE)) | |
2520 | return false; | |
2521 | ||
2522 | table = gic_data_rdist_cpu(cpu)->vpe_l1_base; | |
2523 | ||
2524 | /* Allocate memory for 2nd level table */ | |
2525 | if (!table[idx]) { | |
2526 | page = alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(psz)); | |
2527 | if (!page) | |
2528 | return false; | |
2529 | ||
2530 | /* Flush Lvl2 table to PoC if hw doesn't support coherency */ | |
2531 | if (!(val & GICR_VPROPBASER_SHAREABILITY_MASK)) | |
2532 | gic_flush_dcache_to_poc(page_address(page), psz); | |
2533 | ||
2534 | table[idx] = cpu_to_le64(page_to_phys(page) | GITS_BASER_VALID); | |
2535 | ||
2536 | /* Flush Lvl1 entry to PoC if hw doesn't support coherency */ | |
2537 | if (!(val & GICR_VPROPBASER_SHAREABILITY_MASK)) | |
2538 | gic_flush_dcache_to_poc(table + idx, GITS_LVL1_ENTRY_SIZE); | |
2539 | ||
2540 | /* Ensure updated table contents are visible to RD hardware */ | |
2541 | dsb(sy); | |
2542 | } | |
2543 | ||
2544 | return true; | |
2545 | } | |
2546 | ||
5e516846 MZ |
2547 | static int allocate_vpe_l1_table(void) |
2548 | { | |
2549 | void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); | |
2550 | u64 val, gpsz, npg, pa; | |
2551 | unsigned int psz = SZ_64K; | |
2552 | unsigned int np, epp, esz; | |
2553 | struct page *page; | |
2554 | ||
2555 | if (!gic_rdists->has_rvpeid) | |
2556 | return 0; | |
2557 | ||
2558 | /* | |
2559 | * if VPENDBASER.Valid is set, disable any previously programmed | |
2560 | * VPE by setting PendingLast while clearing Valid. This has the | |
2561 | * effect of making sure no doorbell will be generated and we can | |
2562 | * then safely clear VPROPBASER.Valid. | |
2563 | */ | |
5186a6cc ZY |
2564 | if (gicr_read_vpendbaser(vlpi_base + GICR_VPENDBASER) & GICR_VPENDBASER_Valid) |
2565 | gicr_write_vpendbaser(GICR_VPENDBASER_PendingLast, | |
5e516846 MZ |
2566 | vlpi_base + GICR_VPENDBASER); |
2567 | ||
2568 | /* | |
2569 | * If we can inherit the configuration from another RD, let's do | |
2570 | * so. Otherwise, we have to go through the allocation process. We | |
2571 | * assume that all RDs have the exact same requirements, as | |
2572 | * nothing will work otherwise. | |
2573 | */ | |
2574 | val = inherit_vpe_l1_table_from_rd(&gic_data_rdist()->vpe_table_mask); | |
2575 | if (val & GICR_VPROPBASER_4_1_VALID) | |
2576 | goto out; | |
2577 | ||
2578 | gic_data_rdist()->vpe_table_mask = kzalloc(sizeof(cpumask_t), GFP_KERNEL); | |
2579 | if (!gic_data_rdist()->vpe_table_mask) | |
2580 | return -ENOMEM; | |
2581 | ||
2582 | val = inherit_vpe_l1_table_from_its(); | |
2583 | if (val & GICR_VPROPBASER_4_1_VALID) | |
2584 | goto out; | |
2585 | ||
2586 | /* First probe the page size */ | |
2587 | val = FIELD_PREP(GICR_VPROPBASER_4_1_PAGE_SIZE, GIC_PAGE_SIZE_64K); | |
5186a6cc ZY |
2588 | gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER); |
2589 | val = gicr_read_vpropbaser(vlpi_base + GICR_VPROPBASER); | |
5e516846 MZ |
2590 | gpsz = FIELD_GET(GICR_VPROPBASER_4_1_PAGE_SIZE, val); |
2591 | esz = FIELD_GET(GICR_VPROPBASER_4_1_ENTRY_SIZE, val); | |
2592 | ||
2593 | switch (gpsz) { | |
2594 | default: | |
2595 | gpsz = GIC_PAGE_SIZE_4K; | |
2596 | /* fall through */ | |
2597 | case GIC_PAGE_SIZE_4K: | |
2598 | psz = SZ_4K; | |
2599 | break; | |
2600 | case GIC_PAGE_SIZE_16K: | |
2601 | psz = SZ_16K; | |
2602 | break; | |
2603 | case GIC_PAGE_SIZE_64K: | |
2604 | psz = SZ_64K; | |
2605 | break; | |
2606 | } | |
2607 | ||
2608 | /* | |
2609 | * Start populating the register from scratch, including RO fields | |
2610 | * (which we want to print in debug cases...) | |
2611 | */ | |
2612 | val = 0; | |
2613 | val |= FIELD_PREP(GICR_VPROPBASER_4_1_PAGE_SIZE, gpsz); | |
2614 | val |= FIELD_PREP(GICR_VPROPBASER_4_1_ENTRY_SIZE, esz); | |
2615 | ||
2616 | /* How many entries per GIC page? */ | |
2617 | esz++; | |
2618 | epp = psz / (esz * SZ_8); | |
2619 | ||
2620 | /* | |
2621 | * If we need more than just a single L1 page, flag the table | |
2622 | * as indirect and compute the number of required L1 pages. | |
2623 | */ | |
2624 | if (epp < ITS_MAX_VPEID) { | |
2625 | int nl2; | |
2626 | ||
2627 | val |= GICR_VPROPBASER_4_1_INDIRECT; | |
2628 | ||
2629 | /* Number of L2 pages required to cover the VPEID space */ | |
2630 | nl2 = DIV_ROUND_UP(ITS_MAX_VPEID, epp); | |
2631 | ||
2632 | /* Number of L1 pages to point to the L2 pages */ | |
2633 | npg = DIV_ROUND_UP(nl2 * SZ_8, psz); | |
2634 | } else { | |
2635 | npg = 1; | |
2636 | } | |
2637 | ||
e88bd316 | 2638 | val |= FIELD_PREP(GICR_VPROPBASER_4_1_SIZE, npg - 1); |
5e516846 MZ |
2639 | |
2640 | /* Right, that's the number of CPU pages we need for L1 */ | |
2641 | np = DIV_ROUND_UP(npg * psz, PAGE_SIZE); | |
2642 | ||
2643 | pr_debug("np = %d, npg = %lld, psz = %d, epp = %d, esz = %d\n", | |
2644 | np, npg, psz, epp, esz); | |
2645 | page = alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(np * PAGE_SIZE)); | |
2646 | if (!page) | |
2647 | return -ENOMEM; | |
2648 | ||
8b718d40 | 2649 | gic_data_rdist()->vpe_l1_base = page_address(page); |
5e516846 MZ |
2650 | pa = virt_to_phys(page_address(page)); |
2651 | WARN_ON(!IS_ALIGNED(pa, psz)); | |
2652 | ||
2653 | val |= FIELD_PREP(GICR_VPROPBASER_4_1_ADDR, pa >> 12); | |
2654 | val |= GICR_VPROPBASER_RaWb; | |
2655 | val |= GICR_VPROPBASER_InnerShareable; | |
2656 | val |= GICR_VPROPBASER_4_1_Z; | |
2657 | val |= GICR_VPROPBASER_4_1_VALID; | |
2658 | ||
2659 | out: | |
5186a6cc | 2660 | gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER); |
5e516846 MZ |
2661 | cpumask_set_cpu(smp_processor_id(), gic_data_rdist()->vpe_table_mask); |
2662 | ||
2663 | pr_debug("CPU%d: VPROPBASER = %llx %*pbl\n", | |
2664 | smp_processor_id(), val, | |
2665 | cpumask_pr_args(gic_data_rdist()->vpe_table_mask)); | |
2666 | ||
2667 | return 0; | |
2668 | } | |
2669 | ||
1ac19ca6 MZ |
2670 | static int its_alloc_collections(struct its_node *its) |
2671 | { | |
83559b47 MZ |
2672 | int i; |
2673 | ||
6396bb22 | 2674 | its->collections = kcalloc(nr_cpu_ids, sizeof(*its->collections), |
1ac19ca6 MZ |
2675 | GFP_KERNEL); |
2676 | if (!its->collections) | |
2677 | return -ENOMEM; | |
2678 | ||
83559b47 MZ |
2679 | for (i = 0; i < nr_cpu_ids; i++) |
2680 | its->collections[i].target_address = ~0ULL; | |
2681 | ||
1ac19ca6 MZ |
2682 | return 0; |
2683 | } | |
2684 | ||
7c297a2d MZ |
2685 | static struct page *its_allocate_pending_table(gfp_t gfp_flags) |
2686 | { | |
2687 | struct page *pend_page; | |
adaab500 | 2688 | |
7c297a2d | 2689 | pend_page = alloc_pages(gfp_flags | __GFP_ZERO, |
adaab500 | 2690 | get_order(LPI_PENDBASE_SZ)); |
7c297a2d MZ |
2691 | if (!pend_page) |
2692 | return NULL; | |
2693 | ||
2694 | /* Make sure the GIC will observe the zero-ed page */ | |
2695 | gic_flush_dcache_to_poc(page_address(pend_page), LPI_PENDBASE_SZ); | |
2696 | ||
2697 | return pend_page; | |
2698 | } | |
2699 | ||
7d75bbb4 MZ |
2700 | static void its_free_pending_table(struct page *pt) |
2701 | { | |
adaab500 | 2702 | free_pages((unsigned long)page_address(pt), get_order(LPI_PENDBASE_SZ)); |
7d75bbb4 MZ |
2703 | } |
2704 | ||
c6e2ccb6 | 2705 | /* |
5e2c9f9a MZ |
2706 | * Booting with kdump and LPIs enabled is generally fine. Any other |
2707 | * case is wrong in the absence of firmware/EFI support. | |
c6e2ccb6 | 2708 | */ |
c440a9d9 MZ |
2709 | static bool enabled_lpis_allowed(void) |
2710 | { | |
5e2c9f9a MZ |
2711 | phys_addr_t addr; |
2712 | u64 val; | |
c6e2ccb6 | 2713 | |
5e2c9f9a MZ |
2714 | /* Check whether the property table is in a reserved region */ |
2715 | val = gicr_read_propbaser(gic_data_rdist_rd_base() + GICR_PROPBASER); | |
2716 | addr = val & GENMASK_ULL(51, 12); | |
2717 | ||
2718 | return gic_check_reserved_range(addr, LPI_PROPBASE_SZ); | |
c440a9d9 MZ |
2719 | } |
2720 | ||
11e37d35 | 2721 | static int __init allocate_lpi_tables(void) |
1ac19ca6 | 2722 | { |
c440a9d9 | 2723 | u64 val; |
11e37d35 | 2724 | int err, cpu; |
1ac19ca6 | 2725 | |
c440a9d9 MZ |
2726 | /* |
2727 | * If LPIs are enabled while we run this from the boot CPU, | |
2728 | * flag the RD tables as pre-allocated if the stars do align. | |
2729 | */ | |
2730 | val = readl_relaxed(gic_data_rdist_rd_base() + GICR_CTLR); | |
2731 | if ((val & GICR_CTLR_ENABLE_LPIS) && enabled_lpis_allowed()) { | |
2732 | gic_rdists->flags |= (RDIST_FLAGS_RD_TABLES_PREALLOCATED | | |
2733 | RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING); | |
2734 | pr_info("GICv3: Using preallocated redistributor tables\n"); | |
2735 | } | |
2736 | ||
11e37d35 MZ |
2737 | err = its_setup_lpi_prop_table(); |
2738 | if (err) | |
2739 | return err; | |
2740 | ||
2741 | /* | |
2742 | * We allocate all the pending tables anyway, as we may have a | |
2743 | * mix of RDs that have had LPIs enabled, and some that | |
2744 | * don't. We'll free the unused ones as each CPU comes online. | |
2745 | */ | |
2746 | for_each_possible_cpu(cpu) { | |
2747 | struct page *pend_page; | |
7c297a2d MZ |
2748 | |
2749 | pend_page = its_allocate_pending_table(GFP_NOWAIT); | |
1ac19ca6 | 2750 | if (!pend_page) { |
11e37d35 MZ |
2751 | pr_err("Failed to allocate PENDBASE for CPU%d\n", cpu); |
2752 | return -ENOMEM; | |
1ac19ca6 MZ |
2753 | } |
2754 | ||
11e37d35 | 2755 | gic_data_rdist_cpu(cpu)->pend_page = pend_page; |
1ac19ca6 MZ |
2756 | } |
2757 | ||
11e37d35 MZ |
2758 | return 0; |
2759 | } | |
2760 | ||
e64fab1a | 2761 | static u64 its_clear_vpend_valid(void __iomem *vlpi_base, u64 clr, u64 set) |
6479450f HG |
2762 | { |
2763 | u32 count = 1000000; /* 1s! */ | |
2764 | bool clean; | |
2765 | u64 val; | |
2766 | ||
5186a6cc | 2767 | val = gicr_read_vpendbaser(vlpi_base + GICR_VPENDBASER); |
6479450f | 2768 | val &= ~GICR_VPENDBASER_Valid; |
e64fab1a MZ |
2769 | val &= ~clr; |
2770 | val |= set; | |
5186a6cc | 2771 | gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER); |
6479450f HG |
2772 | |
2773 | do { | |
5186a6cc | 2774 | val = gicr_read_vpendbaser(vlpi_base + GICR_VPENDBASER); |
6479450f HG |
2775 | clean = !(val & GICR_VPENDBASER_Dirty); |
2776 | if (!clean) { | |
2777 | count--; | |
2778 | cpu_relax(); | |
2779 | udelay(1); | |
2780 | } | |
2781 | } while (!clean && count); | |
2782 | ||
e64fab1a MZ |
2783 | if (unlikely(val & GICR_VPENDBASER_Dirty)) { |
2784 | pr_err_ratelimited("ITS virtual pending table not cleaning\n"); | |
2785 | val |= GICR_VPENDBASER_PendingLast; | |
2786 | } | |
2787 | ||
6479450f HG |
2788 | return val; |
2789 | } | |
2790 | ||
11e37d35 MZ |
2791 | static void its_cpu_init_lpis(void) |
2792 | { | |
2793 | void __iomem *rbase = gic_data_rdist_rd_base(); | |
2794 | struct page *pend_page; | |
2795 | phys_addr_t paddr; | |
2796 | u64 val, tmp; | |
2797 | ||
2798 | if (gic_data_rdist()->lpi_enabled) | |
2799 | return; | |
2800 | ||
c440a9d9 MZ |
2801 | val = readl_relaxed(rbase + GICR_CTLR); |
2802 | if ((gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED) && | |
2803 | (val & GICR_CTLR_ENABLE_LPIS)) { | |
f842ca8e MZ |
2804 | /* |
2805 | * Check that we get the same property table on all | |
2806 | * RDs. If we don't, this is hopeless. | |
2807 | */ | |
2808 | paddr = gicr_read_propbaser(rbase + GICR_PROPBASER); | |
2809 | paddr &= GENMASK_ULL(51, 12); | |
2810 | if (WARN_ON(gic_rdists->prop_table_pa != paddr)) | |
2811 | add_taint(TAINT_CRAP, LOCKDEP_STILL_OK); | |
2812 | ||
c440a9d9 MZ |
2813 | paddr = gicr_read_pendbaser(rbase + GICR_PENDBASER); |
2814 | paddr &= GENMASK_ULL(51, 16); | |
2815 | ||
5e2c9f9a | 2816 | WARN_ON(!gic_check_reserved_range(paddr, LPI_PENDBASE_SZ)); |
c440a9d9 MZ |
2817 | its_free_pending_table(gic_data_rdist()->pend_page); |
2818 | gic_data_rdist()->pend_page = NULL; | |
2819 | ||
2820 | goto out; | |
2821 | } | |
2822 | ||
11e37d35 MZ |
2823 | pend_page = gic_data_rdist()->pend_page; |
2824 | paddr = page_to_phys(pend_page); | |
3fb68fae | 2825 | WARN_ON(gic_reserve_range(paddr, LPI_PENDBASE_SZ)); |
11e37d35 | 2826 | |
1ac19ca6 | 2827 | /* set PROPBASE */ |
e1a2e201 | 2828 | val = (gic_rdists->prop_table_pa | |
1ac19ca6 | 2829 | GICR_PROPBASER_InnerShareable | |
2fd632a0 | 2830 | GICR_PROPBASER_RaWaWb | |
1ac19ca6 MZ |
2831 | ((LPI_NRBITS - 1) & GICR_PROPBASER_IDBITS_MASK)); |
2832 | ||
0968a619 VM |
2833 | gicr_write_propbaser(val, rbase + GICR_PROPBASER); |
2834 | tmp = gicr_read_propbaser(rbase + GICR_PROPBASER); | |
1ac19ca6 MZ |
2835 | |
2836 | if ((tmp ^ val) & GICR_PROPBASER_SHAREABILITY_MASK) { | |
241a386c MZ |
2837 | if (!(tmp & GICR_PROPBASER_SHAREABILITY_MASK)) { |
2838 | /* | |
2839 | * The HW reports non-shareable, we must | |
2840 | * remove the cacheability attributes as | |
2841 | * well. | |
2842 | */ | |
2843 | val &= ~(GICR_PROPBASER_SHAREABILITY_MASK | | |
2844 | GICR_PROPBASER_CACHEABILITY_MASK); | |
2845 | val |= GICR_PROPBASER_nC; | |
0968a619 | 2846 | gicr_write_propbaser(val, rbase + GICR_PROPBASER); |
241a386c | 2847 | } |
1ac19ca6 MZ |
2848 | pr_info_once("GIC: using cache flushing for LPI property table\n"); |
2849 | gic_rdists->flags |= RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING; | |
2850 | } | |
2851 | ||
2852 | /* set PENDBASE */ | |
2853 | val = (page_to_phys(pend_page) | | |
4ad3e363 | 2854 | GICR_PENDBASER_InnerShareable | |
2fd632a0 | 2855 | GICR_PENDBASER_RaWaWb); |
1ac19ca6 | 2856 | |
0968a619 VM |
2857 | gicr_write_pendbaser(val, rbase + GICR_PENDBASER); |
2858 | tmp = gicr_read_pendbaser(rbase + GICR_PENDBASER); | |
241a386c MZ |
2859 | |
2860 | if (!(tmp & GICR_PENDBASER_SHAREABILITY_MASK)) { | |
2861 | /* | |
2862 | * The HW reports non-shareable, we must remove the | |
2863 | * cacheability attributes as well. | |
2864 | */ | |
2865 | val &= ~(GICR_PENDBASER_SHAREABILITY_MASK | | |
2866 | GICR_PENDBASER_CACHEABILITY_MASK); | |
2867 | val |= GICR_PENDBASER_nC; | |
0968a619 | 2868 | gicr_write_pendbaser(val, rbase + GICR_PENDBASER); |
241a386c | 2869 | } |
1ac19ca6 MZ |
2870 | |
2871 | /* Enable LPIs */ | |
2872 | val = readl_relaxed(rbase + GICR_CTLR); | |
2873 | val |= GICR_CTLR_ENABLE_LPIS; | |
2874 | writel_relaxed(val, rbase + GICR_CTLR); | |
2875 | ||
5e516846 | 2876 | if (gic_rdists->has_vlpis && !gic_rdists->has_rvpeid) { |
6479450f HG |
2877 | void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); |
2878 | ||
2879 | /* | |
2880 | * It's possible for CPU to receive VLPIs before it is | |
2881 | * sheduled as a vPE, especially for the first CPU, and the | |
2882 | * VLPI with INTID larger than 2^(IDbits+1) will be considered | |
2883 | * as out of range and dropped by GIC. | |
2884 | * So we initialize IDbits to known value to avoid VLPI drop. | |
2885 | */ | |
2886 | val = (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK; | |
2887 | pr_debug("GICv4: CPU%d: Init IDbits to 0x%llx for GICR_VPROPBASER\n", | |
2888 | smp_processor_id(), val); | |
5186a6cc | 2889 | gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER); |
6479450f HG |
2890 | |
2891 | /* | |
2892 | * Also clear Valid bit of GICR_VPENDBASER, in case some | |
2893 | * ancient programming gets left in and has possibility of | |
2894 | * corrupting memory. | |
2895 | */ | |
e64fab1a | 2896 | val = its_clear_vpend_valid(vlpi_base, 0, 0); |
6479450f HG |
2897 | } |
2898 | ||
5e516846 MZ |
2899 | if (allocate_vpe_l1_table()) { |
2900 | /* | |
2901 | * If the allocation has failed, we're in massive trouble. | |
2902 | * Disable direct injection, and pray that no VM was | |
2903 | * already running... | |
2904 | */ | |
2905 | gic_rdists->has_rvpeid = false; | |
2906 | gic_rdists->has_vlpis = false; | |
2907 | } | |
2908 | ||
1ac19ca6 MZ |
2909 | /* Make sure the GIC has seen the above */ |
2910 | dsb(sy); | |
c440a9d9 | 2911 | out: |
11e37d35 | 2912 | gic_data_rdist()->lpi_enabled = true; |
c440a9d9 | 2913 | pr_info("GICv3: CPU%d: using %s LPI pending table @%pa\n", |
11e37d35 | 2914 | smp_processor_id(), |
c440a9d9 | 2915 | gic_data_rdist()->pend_page ? "allocated" : "reserved", |
11e37d35 | 2916 | &paddr); |
1ac19ca6 MZ |
2917 | } |
2918 | ||
920181ce | 2919 | static void its_cpu_init_collection(struct its_node *its) |
1ac19ca6 | 2920 | { |
920181ce DB |
2921 | int cpu = smp_processor_id(); |
2922 | u64 target; | |
1ac19ca6 | 2923 | |
920181ce DB |
2924 | /* avoid cross node collections and its mapping */ |
2925 | if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) { | |
2926 | struct device_node *cpu_node; | |
fbf8f40e | 2927 | |
920181ce DB |
2928 | cpu_node = of_get_cpu_node(cpu, NULL); |
2929 | if (its->numa_node != NUMA_NO_NODE && | |
2930 | its->numa_node != of_node_to_nid(cpu_node)) | |
2931 | return; | |
2932 | } | |
fbf8f40e | 2933 | |
920181ce DB |
2934 | /* |
2935 | * We now have to bind each collection to its target | |
2936 | * redistributor. | |
2937 | */ | |
2938 | if (gic_read_typer(its->base + GITS_TYPER) & GITS_TYPER_PTA) { | |
1ac19ca6 | 2939 | /* |
920181ce | 2940 | * This ITS wants the physical address of the |
1ac19ca6 MZ |
2941 | * redistributor. |
2942 | */ | |
920181ce DB |
2943 | target = gic_data_rdist()->phys_base; |
2944 | } else { | |
2945 | /* This ITS wants a linear CPU number. */ | |
2946 | target = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER); | |
2947 | target = GICR_TYPER_CPU_NUMBER(target) << 16; | |
2948 | } | |
1ac19ca6 | 2949 | |
920181ce DB |
2950 | /* Perform collection mapping */ |
2951 | its->collections[cpu].target_address = target; | |
2952 | its->collections[cpu].col_id = cpu; | |
1ac19ca6 | 2953 | |
920181ce DB |
2954 | its_send_mapc(its, &its->collections[cpu], 1); |
2955 | its_send_invall(its, &its->collections[cpu]); | |
2956 | } | |
2957 | ||
2958 | static void its_cpu_init_collections(void) | |
2959 | { | |
2960 | struct its_node *its; | |
2961 | ||
a8db7456 | 2962 | raw_spin_lock(&its_lock); |
920181ce DB |
2963 | |
2964 | list_for_each_entry(its, &its_nodes, entry) | |
2965 | its_cpu_init_collection(its); | |
1ac19ca6 | 2966 | |
a8db7456 | 2967 | raw_spin_unlock(&its_lock); |
1ac19ca6 | 2968 | } |
84a6a2e7 MZ |
2969 | |
2970 | static struct its_device *its_find_device(struct its_node *its, u32 dev_id) | |
2971 | { | |
2972 | struct its_device *its_dev = NULL, *tmp; | |
3e39e8f5 | 2973 | unsigned long flags; |
84a6a2e7 | 2974 | |
3e39e8f5 | 2975 | raw_spin_lock_irqsave(&its->lock, flags); |
84a6a2e7 MZ |
2976 | |
2977 | list_for_each_entry(tmp, &its->its_device_list, entry) { | |
2978 | if (tmp->device_id == dev_id) { | |
2979 | its_dev = tmp; | |
2980 | break; | |
2981 | } | |
2982 | } | |
2983 | ||
3e39e8f5 | 2984 | raw_spin_unlock_irqrestore(&its->lock, flags); |
84a6a2e7 MZ |
2985 | |
2986 | return its_dev; | |
2987 | } | |
2988 | ||
466b7d16 SD |
2989 | static struct its_baser *its_get_baser(struct its_node *its, u32 type) |
2990 | { | |
2991 | int i; | |
2992 | ||
2993 | for (i = 0; i < GITS_BASER_NR_REGS; i++) { | |
2994 | if (GITS_BASER_TYPE(its->tables[i].val) == type) | |
2995 | return &its->tables[i]; | |
2996 | } | |
2997 | ||
2998 | return NULL; | |
2999 | } | |
3000 | ||
539d3782 SD |
3001 | static bool its_alloc_table_entry(struct its_node *its, |
3002 | struct its_baser *baser, u32 id) | |
3faf24ea | 3003 | { |
3faf24ea SD |
3004 | struct page *page; |
3005 | u32 esz, idx; | |
3006 | __le64 *table; | |
3007 | ||
3faf24ea SD |
3008 | /* Don't allow device id that exceeds single, flat table limit */ |
3009 | esz = GITS_BASER_ENTRY_SIZE(baser->val); | |
3010 | if (!(baser->val & GITS_BASER_INDIRECT)) | |
70cc81ed | 3011 | return (id < (PAGE_ORDER_TO_SIZE(baser->order) / esz)); |
3faf24ea SD |
3012 | |
3013 | /* Compute 1st level table index & check if that exceeds table limit */ | |
70cc81ed | 3014 | idx = id >> ilog2(baser->psz / esz); |
3faf24ea SD |
3015 | if (idx >= (PAGE_ORDER_TO_SIZE(baser->order) / GITS_LVL1_ENTRY_SIZE)) |
3016 | return false; | |
3017 | ||
3018 | table = baser->base; | |
3019 | ||
3020 | /* Allocate memory for 2nd level table */ | |
3021 | if (!table[idx]) { | |
539d3782 SD |
3022 | page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, |
3023 | get_order(baser->psz)); | |
3faf24ea SD |
3024 | if (!page) |
3025 | return false; | |
3026 | ||
3027 | /* Flush Lvl2 table to PoC if hw doesn't support coherency */ | |
3028 | if (!(baser->val & GITS_BASER_SHAREABILITY_MASK)) | |
328191c0 | 3029 | gic_flush_dcache_to_poc(page_address(page), baser->psz); |
3faf24ea SD |
3030 | |
3031 | table[idx] = cpu_to_le64(page_to_phys(page) | GITS_BASER_VALID); | |
3032 | ||
3033 | /* Flush Lvl1 entry to PoC if hw doesn't support coherency */ | |
3034 | if (!(baser->val & GITS_BASER_SHAREABILITY_MASK)) | |
328191c0 | 3035 | gic_flush_dcache_to_poc(table + idx, GITS_LVL1_ENTRY_SIZE); |
3faf24ea SD |
3036 | |
3037 | /* Ensure updated table contents are visible to ITS hardware */ | |
3038 | dsb(sy); | |
3039 | } | |
3040 | ||
3041 | return true; | |
3042 | } | |
3043 | ||
70cc81ed MZ |
3044 | static bool its_alloc_device_table(struct its_node *its, u32 dev_id) |
3045 | { | |
3046 | struct its_baser *baser; | |
3047 | ||
3048 | baser = its_get_baser(its, GITS_BASER_TYPE_DEVICE); | |
3049 | ||
3050 | /* Don't allow device id that exceeds ITS hardware limit */ | |
3051 | if (!baser) | |
576a8342 | 3052 | return (ilog2(dev_id) < device_ids(its)); |
70cc81ed | 3053 | |
539d3782 | 3054 | return its_alloc_table_entry(its, baser, dev_id); |
70cc81ed MZ |
3055 | } |
3056 | ||
7d75bbb4 MZ |
3057 | static bool its_alloc_vpe_table(u32 vpe_id) |
3058 | { | |
3059 | struct its_node *its; | |
4e6437f1 | 3060 | int cpu; |
7d75bbb4 MZ |
3061 | |
3062 | /* | |
3063 | * Make sure the L2 tables are allocated on *all* v4 ITSs. We | |
3064 | * could try and only do it on ITSs corresponding to devices | |
3065 | * that have interrupts targeted at this VPE, but the | |
3066 | * complexity becomes crazy (and you have tons of memory | |
3067 | * anyway, right?). | |
3068 | */ | |
3069 | list_for_each_entry(its, &its_nodes, entry) { | |
3070 | struct its_baser *baser; | |
3071 | ||
0dd57fed | 3072 | if (!is_v4(its)) |
7d75bbb4 | 3073 | continue; |
3faf24ea | 3074 | |
7d75bbb4 MZ |
3075 | baser = its_get_baser(its, GITS_BASER_TYPE_VCPU); |
3076 | if (!baser) | |
3077 | return false; | |
3faf24ea | 3078 | |
539d3782 | 3079 | if (!its_alloc_table_entry(its, baser, vpe_id)) |
7d75bbb4 | 3080 | return false; |
3faf24ea SD |
3081 | } |
3082 | ||
4e6437f1 ZY |
3083 | /* Non v4.1? No need to iterate RDs and go back early. */ |
3084 | if (!gic_rdists->has_rvpeid) | |
3085 | return true; | |
3086 | ||
3087 | /* | |
3088 | * Make sure the L2 tables are allocated for all copies of | |
3089 | * the L1 table on *all* v4.1 RDs. | |
3090 | */ | |
3091 | for_each_possible_cpu(cpu) { | |
3092 | if (!allocate_vpe_l2_table(cpu, vpe_id)) | |
3093 | return false; | |
3094 | } | |
3095 | ||
3faf24ea SD |
3096 | return true; |
3097 | } | |
3098 | ||
84a6a2e7 | 3099 | static struct its_device *its_create_device(struct its_node *its, u32 dev_id, |
93f94ea0 | 3100 | int nvecs, bool alloc_lpis) |
84a6a2e7 MZ |
3101 | { |
3102 | struct its_device *dev; | |
93f94ea0 | 3103 | unsigned long *lpi_map = NULL; |
3e39e8f5 | 3104 | unsigned long flags; |
591e5bec | 3105 | u16 *col_map = NULL; |
84a6a2e7 MZ |
3106 | void *itt; |
3107 | int lpi_base; | |
3108 | int nr_lpis; | |
c8481267 | 3109 | int nr_ites; |
84a6a2e7 MZ |
3110 | int sz; |
3111 | ||
3faf24ea | 3112 | if (!its_alloc_device_table(its, dev_id)) |
466b7d16 SD |
3113 | return NULL; |
3114 | ||
147c8f37 MZ |
3115 | if (WARN_ON(!is_power_of_2(nvecs))) |
3116 | nvecs = roundup_pow_of_two(nvecs); | |
3117 | ||
84a6a2e7 | 3118 | dev = kzalloc(sizeof(*dev), GFP_KERNEL); |
c8481267 | 3119 | /* |
147c8f37 MZ |
3120 | * Even if the device wants a single LPI, the ITT must be |
3121 | * sized as a power of two (and you need at least one bit...). | |
c8481267 | 3122 | */ |
147c8f37 | 3123 | nr_ites = max(2, nvecs); |
ffedbf0c | 3124 | sz = nr_ites * (FIELD_GET(GITS_TYPER_ITT_ENTRY_SIZE, its->typer) + 1); |
84a6a2e7 | 3125 | sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1; |
539d3782 | 3126 | itt = kzalloc_node(sz, GFP_KERNEL, its->numa_node); |
93f94ea0 | 3127 | if (alloc_lpis) { |
38dd7c49 | 3128 | lpi_map = its_lpi_alloc(nvecs, &lpi_base, &nr_lpis); |
93f94ea0 | 3129 | if (lpi_map) |
6396bb22 | 3130 | col_map = kcalloc(nr_lpis, sizeof(*col_map), |
93f94ea0 MZ |
3131 | GFP_KERNEL); |
3132 | } else { | |
6396bb22 | 3133 | col_map = kcalloc(nr_ites, sizeof(*col_map), GFP_KERNEL); |
93f94ea0 MZ |
3134 | nr_lpis = 0; |
3135 | lpi_base = 0; | |
3136 | } | |
84a6a2e7 | 3137 | |
93f94ea0 | 3138 | if (!dev || !itt || !col_map || (!lpi_map && alloc_lpis)) { |
84a6a2e7 MZ |
3139 | kfree(dev); |
3140 | kfree(itt); | |
3141 | kfree(lpi_map); | |
591e5bec | 3142 | kfree(col_map); |
84a6a2e7 MZ |
3143 | return NULL; |
3144 | } | |
3145 | ||
328191c0 | 3146 | gic_flush_dcache_to_poc(itt, sz); |
5a9a8915 | 3147 | |
84a6a2e7 MZ |
3148 | dev->its = its; |
3149 | dev->itt = itt; | |
c8481267 | 3150 | dev->nr_ites = nr_ites; |
591e5bec MZ |
3151 | dev->event_map.lpi_map = lpi_map; |
3152 | dev->event_map.col_map = col_map; | |
3153 | dev->event_map.lpi_base = lpi_base; | |
3154 | dev->event_map.nr_lpis = nr_lpis; | |
11635fa2 | 3155 | raw_spin_lock_init(&dev->event_map.vlpi_lock); |
84a6a2e7 MZ |
3156 | dev->device_id = dev_id; |
3157 | INIT_LIST_HEAD(&dev->entry); | |
3158 | ||
3e39e8f5 | 3159 | raw_spin_lock_irqsave(&its->lock, flags); |
84a6a2e7 | 3160 | list_add(&dev->entry, &its->its_device_list); |
3e39e8f5 | 3161 | raw_spin_unlock_irqrestore(&its->lock, flags); |
84a6a2e7 | 3162 | |
84a6a2e7 MZ |
3163 | /* Map device to its ITT */ |
3164 | its_send_mapd(dev, 1); | |
3165 | ||
3166 | return dev; | |
3167 | } | |
3168 | ||
3169 | static void its_free_device(struct its_device *its_dev) | |
3170 | { | |
3e39e8f5 MZ |
3171 | unsigned long flags; |
3172 | ||
3173 | raw_spin_lock_irqsave(&its_dev->its->lock, flags); | |
84a6a2e7 | 3174 | list_del(&its_dev->entry); |
3e39e8f5 | 3175 | raw_spin_unlock_irqrestore(&its_dev->its->lock, flags); |
898aa5ce | 3176 | kfree(its_dev->event_map.col_map); |
84a6a2e7 MZ |
3177 | kfree(its_dev->itt); |
3178 | kfree(its_dev); | |
3179 | } | |
b48ac83d | 3180 | |
8208d170 | 3181 | static int its_alloc_device_irq(struct its_device *dev, int nvecs, irq_hw_number_t *hwirq) |
b48ac83d MZ |
3182 | { |
3183 | int idx; | |
3184 | ||
342be106 | 3185 | /* Find a free LPI region in lpi_map and allocate them. */ |
8208d170 MZ |
3186 | idx = bitmap_find_free_region(dev->event_map.lpi_map, |
3187 | dev->event_map.nr_lpis, | |
3188 | get_count_order(nvecs)); | |
3189 | if (idx < 0) | |
b48ac83d MZ |
3190 | return -ENOSPC; |
3191 | ||
591e5bec | 3192 | *hwirq = dev->event_map.lpi_base + idx; |
b48ac83d | 3193 | |
b48ac83d MZ |
3194 | return 0; |
3195 | } | |
3196 | ||
54456db9 MZ |
3197 | static int its_msi_prepare(struct irq_domain *domain, struct device *dev, |
3198 | int nvec, msi_alloc_info_t *info) | |
e8137f4f | 3199 | { |
b48ac83d | 3200 | struct its_node *its; |
b48ac83d | 3201 | struct its_device *its_dev; |
54456db9 MZ |
3202 | struct msi_domain_info *msi_info; |
3203 | u32 dev_id; | |
9791ec7d | 3204 | int err = 0; |
54456db9 MZ |
3205 | |
3206 | /* | |
a7c90f51 | 3207 | * We ignore "dev" entirely, and rely on the dev_id that has |
54456db9 MZ |
3208 | * been passed via the scratchpad. This limits this domain's |
3209 | * usefulness to upper layers that definitely know that they | |
3210 | * are built on top of the ITS. | |
3211 | */ | |
3212 | dev_id = info->scratchpad[0].ul; | |
3213 | ||
3214 | msi_info = msi_get_domain_info(domain); | |
3215 | its = msi_info->data; | |
e8137f4f | 3216 | |
20b3d54e MZ |
3217 | if (!gic_rdists->has_direct_lpi && |
3218 | vpe_proxy.dev && | |
3219 | vpe_proxy.dev->its == its && | |
3220 | dev_id == vpe_proxy.dev->device_id) { | |
3221 | /* Bad luck. Get yourself a better implementation */ | |
3222 | WARN_ONCE(1, "DevId %x clashes with GICv4 VPE proxy device\n", | |
3223 | dev_id); | |
3224 | return -EINVAL; | |
3225 | } | |
3226 | ||
9791ec7d | 3227 | mutex_lock(&its->dev_alloc_lock); |
f130420e | 3228 | its_dev = its_find_device(its, dev_id); |
e8137f4f MZ |
3229 | if (its_dev) { |
3230 | /* | |
3231 | * We already have seen this ID, probably through | |
3232 | * another alias (PCI bridge of some sort). No need to | |
3233 | * create the device. | |
3234 | */ | |
9791ec7d | 3235 | its_dev->shared = true; |
f130420e | 3236 | pr_debug("Reusing ITT for devID %x\n", dev_id); |
e8137f4f MZ |
3237 | goto out; |
3238 | } | |
b48ac83d | 3239 | |
93f94ea0 | 3240 | its_dev = its_create_device(its, dev_id, nvec, true); |
9791ec7d MZ |
3241 | if (!its_dev) { |
3242 | err = -ENOMEM; | |
3243 | goto out; | |
3244 | } | |
b48ac83d | 3245 | |
f130420e | 3246 | pr_debug("ITT %d entries, %d bits\n", nvec, ilog2(nvec)); |
e8137f4f | 3247 | out: |
9791ec7d | 3248 | mutex_unlock(&its->dev_alloc_lock); |
b48ac83d | 3249 | info->scratchpad[0].ptr = its_dev; |
9791ec7d | 3250 | return err; |
b48ac83d MZ |
3251 | } |
3252 | ||
54456db9 MZ |
3253 | static struct msi_domain_ops its_msi_domain_ops = { |
3254 | .msi_prepare = its_msi_prepare, | |
3255 | }; | |
3256 | ||
b48ac83d MZ |
3257 | static int its_irq_gic_domain_alloc(struct irq_domain *domain, |
3258 | unsigned int virq, | |
3259 | irq_hw_number_t hwirq) | |
3260 | { | |
f833f57f MZ |
3261 | struct irq_fwspec fwspec; |
3262 | ||
3263 | if (irq_domain_get_of_node(domain->parent)) { | |
3264 | fwspec.fwnode = domain->parent->fwnode; | |
3265 | fwspec.param_count = 3; | |
3266 | fwspec.param[0] = GIC_IRQ_TYPE_LPI; | |
3267 | fwspec.param[1] = hwirq; | |
3268 | fwspec.param[2] = IRQ_TYPE_EDGE_RISING; | |
3f010cf1 TN |
3269 | } else if (is_fwnode_irqchip(domain->parent->fwnode)) { |
3270 | fwspec.fwnode = domain->parent->fwnode; | |
3271 | fwspec.param_count = 2; | |
3272 | fwspec.param[0] = hwirq; | |
3273 | fwspec.param[1] = IRQ_TYPE_EDGE_RISING; | |
f833f57f MZ |
3274 | } else { |
3275 | return -EINVAL; | |
3276 | } | |
b48ac83d | 3277 | |
f833f57f | 3278 | return irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec); |
b48ac83d MZ |
3279 | } |
3280 | ||
3281 | static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, | |
3282 | unsigned int nr_irqs, void *args) | |
3283 | { | |
3284 | msi_alloc_info_t *info = args; | |
3285 | struct its_device *its_dev = info->scratchpad[0].ptr; | |
35ae7df2 | 3286 | struct its_node *its = its_dev->its; |
b48ac83d MZ |
3287 | irq_hw_number_t hwirq; |
3288 | int err; | |
3289 | int i; | |
3290 | ||
8208d170 MZ |
3291 | err = its_alloc_device_irq(its_dev, nr_irqs, &hwirq); |
3292 | if (err) | |
3293 | return err; | |
b48ac83d | 3294 | |
35ae7df2 JG |
3295 | err = iommu_dma_prepare_msi(info->desc, its->get_msi_base(its_dev)); |
3296 | if (err) | |
3297 | return err; | |
3298 | ||
8208d170 MZ |
3299 | for (i = 0; i < nr_irqs; i++) { |
3300 | err = its_irq_gic_domain_alloc(domain, virq + i, hwirq + i); | |
b48ac83d MZ |
3301 | if (err) |
3302 | return err; | |
3303 | ||
3304 | irq_domain_set_hwirq_and_chip(domain, virq + i, | |
8208d170 | 3305 | hwirq + i, &its_irq_chip, its_dev); |
0d224d35 | 3306 | irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq + i))); |
f130420e | 3307 | pr_debug("ID:%d pID:%d vID:%d\n", |
8208d170 MZ |
3308 | (int)(hwirq + i - its_dev->event_map.lpi_base), |
3309 | (int)(hwirq + i), virq + i); | |
b48ac83d MZ |
3310 | } |
3311 | ||
3312 | return 0; | |
3313 | } | |
3314 | ||
72491643 | 3315 | static int its_irq_domain_activate(struct irq_domain *domain, |
702cb0a0 | 3316 | struct irq_data *d, bool reserve) |
aca268df MZ |
3317 | { |
3318 | struct its_device *its_dev = irq_data_get_irq_chip_data(d); | |
3319 | u32 event = its_get_event_id(d); | |
fbf8f40e | 3320 | const struct cpumask *cpu_mask = cpu_online_mask; |
0d224d35 | 3321 | int cpu; |
fbf8f40e GK |
3322 | |
3323 | /* get the cpu_mask of local node */ | |
3324 | if (its_dev->its->numa_node >= 0) | |
3325 | cpu_mask = cpumask_of_node(its_dev->its->numa_node); | |
aca268df | 3326 | |
591e5bec | 3327 | /* Bind the LPI to the first possible CPU */ |
c1797b11 YY |
3328 | cpu = cpumask_first_and(cpu_mask, cpu_online_mask); |
3329 | if (cpu >= nr_cpu_ids) { | |
3330 | if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) | |
3331 | return -EINVAL; | |
3332 | ||
3333 | cpu = cpumask_first(cpu_online_mask); | |
3334 | } | |
3335 | ||
0d224d35 MZ |
3336 | its_dev->event_map.col_map[event] = cpu; |
3337 | irq_data_update_effective_affinity(d, cpumask_of(cpu)); | |
591e5bec | 3338 | |
aca268df | 3339 | /* Map the GIC IRQ and event to the device */ |
6a25ad3a | 3340 | its_send_mapti(its_dev, d->hwirq, event); |
72491643 | 3341 | return 0; |
aca268df MZ |
3342 | } |
3343 | ||
3344 | static void its_irq_domain_deactivate(struct irq_domain *domain, | |
3345 | struct irq_data *d) | |
3346 | { | |
3347 | struct its_device *its_dev = irq_data_get_irq_chip_data(d); | |
3348 | u32 event = its_get_event_id(d); | |
3349 | ||
3350 | /* Stop the delivery of interrupts */ | |
3351 | its_send_discard(its_dev, event); | |
3352 | } | |
3353 | ||
b48ac83d MZ |
3354 | static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq, |
3355 | unsigned int nr_irqs) | |
3356 | { | |
3357 | struct irq_data *d = irq_domain_get_irq_data(domain, virq); | |
3358 | struct its_device *its_dev = irq_data_get_irq_chip_data(d); | |
9791ec7d | 3359 | struct its_node *its = its_dev->its; |
b48ac83d MZ |
3360 | int i; |
3361 | ||
c9c96e30 MZ |
3362 | bitmap_release_region(its_dev->event_map.lpi_map, |
3363 | its_get_event_id(irq_domain_get_irq_data(domain, virq)), | |
3364 | get_count_order(nr_irqs)); | |
3365 | ||
b48ac83d MZ |
3366 | for (i = 0; i < nr_irqs; i++) { |
3367 | struct irq_data *data = irq_domain_get_irq_data(domain, | |
3368 | virq + i); | |
b48ac83d | 3369 | /* Nuke the entry in the domain */ |
2da39949 | 3370 | irq_domain_reset_irq_data(data); |
b48ac83d MZ |
3371 | } |
3372 | ||
9791ec7d MZ |
3373 | mutex_lock(&its->dev_alloc_lock); |
3374 | ||
3375 | /* | |
3376 | * If all interrupts have been freed, start mopping the | |
3377 | * floor. This is conditionned on the device not being shared. | |
3378 | */ | |
3379 | if (!its_dev->shared && | |
3380 | bitmap_empty(its_dev->event_map.lpi_map, | |
591e5bec | 3381 | its_dev->event_map.nr_lpis)) { |
38dd7c49 MZ |
3382 | its_lpi_free(its_dev->event_map.lpi_map, |
3383 | its_dev->event_map.lpi_base, | |
3384 | its_dev->event_map.nr_lpis); | |
b48ac83d MZ |
3385 | |
3386 | /* Unmap device/itt */ | |
3387 | its_send_mapd(its_dev, 0); | |
3388 | its_free_device(its_dev); | |
3389 | } | |
3390 | ||
9791ec7d MZ |
3391 | mutex_unlock(&its->dev_alloc_lock); |
3392 | ||
b48ac83d MZ |
3393 | irq_domain_free_irqs_parent(domain, virq, nr_irqs); |
3394 | } | |
3395 | ||
3396 | static const struct irq_domain_ops its_domain_ops = { | |
3397 | .alloc = its_irq_domain_alloc, | |
3398 | .free = its_irq_domain_free, | |
aca268df MZ |
3399 | .activate = its_irq_domain_activate, |
3400 | .deactivate = its_irq_domain_deactivate, | |
b48ac83d | 3401 | }; |
4c21f3c2 | 3402 | |
20b3d54e MZ |
3403 | /* |
3404 | * This is insane. | |
3405 | * | |
0684c704 | 3406 | * If a GICv4.0 doesn't implement Direct LPIs (which is extremely |
20b3d54e MZ |
3407 | * likely), the only way to perform an invalidate is to use a fake |
3408 | * device to issue an INV command, implying that the LPI has first | |
3409 | * been mapped to some event on that device. Since this is not exactly | |
3410 | * cheap, we try to keep that mapping around as long as possible, and | |
3411 | * only issue an UNMAP if we're short on available slots. | |
3412 | * | |
3413 | * Broken by design(tm). | |
0684c704 MZ |
3414 | * |
3415 | * GICv4.1, on the other hand, mandates that we're able to invalidate | |
3416 | * by writing to a MMIO register. It doesn't implement the whole of | |
3417 | * DirectLPI, but that's good enough. And most of the time, we don't | |
3418 | * even have to invalidate anything, as the redistributor can be told | |
3419 | * whether to generate a doorbell or not (we thus leave it enabled, | |
3420 | * always). | |
20b3d54e MZ |
3421 | */ |
3422 | static void its_vpe_db_proxy_unmap_locked(struct its_vpe *vpe) | |
3423 | { | |
0684c704 MZ |
3424 | /* GICv4.1 doesn't use a proxy, so nothing to do here */ |
3425 | if (gic_rdists->has_rvpeid) | |
3426 | return; | |
3427 | ||
20b3d54e MZ |
3428 | /* Already unmapped? */ |
3429 | if (vpe->vpe_proxy_event == -1) | |
3430 | return; | |
3431 | ||
3432 | its_send_discard(vpe_proxy.dev, vpe->vpe_proxy_event); | |
3433 | vpe_proxy.vpes[vpe->vpe_proxy_event] = NULL; | |
3434 | ||
3435 | /* | |
3436 | * We don't track empty slots at all, so let's move the | |
3437 | * next_victim pointer if we can quickly reuse that slot | |
3438 | * instead of nuking an existing entry. Not clear that this is | |
3439 | * always a win though, and this might just generate a ripple | |
3440 | * effect... Let's just hope VPEs don't migrate too often. | |
3441 | */ | |
3442 | if (vpe_proxy.vpes[vpe_proxy.next_victim]) | |
3443 | vpe_proxy.next_victim = vpe->vpe_proxy_event; | |
3444 | ||
3445 | vpe->vpe_proxy_event = -1; | |
3446 | } | |
3447 | ||
3448 | static void its_vpe_db_proxy_unmap(struct its_vpe *vpe) | |
3449 | { | |
0684c704 MZ |
3450 | /* GICv4.1 doesn't use a proxy, so nothing to do here */ |
3451 | if (gic_rdists->has_rvpeid) | |
3452 | return; | |
3453 | ||
20b3d54e MZ |
3454 | if (!gic_rdists->has_direct_lpi) { |
3455 | unsigned long flags; | |
3456 | ||
3457 | raw_spin_lock_irqsave(&vpe_proxy.lock, flags); | |
3458 | its_vpe_db_proxy_unmap_locked(vpe); | |
3459 | raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags); | |
3460 | } | |
3461 | } | |
3462 | ||
3463 | static void its_vpe_db_proxy_map_locked(struct its_vpe *vpe) | |
3464 | { | |
0684c704 MZ |
3465 | /* GICv4.1 doesn't use a proxy, so nothing to do here */ |
3466 | if (gic_rdists->has_rvpeid) | |
3467 | return; | |
3468 | ||
20b3d54e MZ |
3469 | /* Already mapped? */ |
3470 | if (vpe->vpe_proxy_event != -1) | |
3471 | return; | |
3472 | ||
3473 | /* This slot was already allocated. Kick the other VPE out. */ | |
3474 | if (vpe_proxy.vpes[vpe_proxy.next_victim]) | |
3475 | its_vpe_db_proxy_unmap_locked(vpe_proxy.vpes[vpe_proxy.next_victim]); | |
3476 | ||
3477 | /* Map the new VPE instead */ | |
3478 | vpe_proxy.vpes[vpe_proxy.next_victim] = vpe; | |
3479 | vpe->vpe_proxy_event = vpe_proxy.next_victim; | |
3480 | vpe_proxy.next_victim = (vpe_proxy.next_victim + 1) % vpe_proxy.dev->nr_ites; | |
3481 | ||
3482 | vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = vpe->col_idx; | |
3483 | its_send_mapti(vpe_proxy.dev, vpe->vpe_db_lpi, vpe->vpe_proxy_event); | |
3484 | } | |
3485 | ||
958b90d1 MZ |
3486 | static void its_vpe_db_proxy_move(struct its_vpe *vpe, int from, int to) |
3487 | { | |
3488 | unsigned long flags; | |
3489 | struct its_collection *target_col; | |
3490 | ||
0684c704 MZ |
3491 | /* GICv4.1 doesn't use a proxy, so nothing to do here */ |
3492 | if (gic_rdists->has_rvpeid) | |
3493 | return; | |
3494 | ||
958b90d1 MZ |
3495 | if (gic_rdists->has_direct_lpi) { |
3496 | void __iomem *rdbase; | |
3497 | ||
3498 | rdbase = per_cpu_ptr(gic_rdists->rdist, from)->rd_base; | |
3499 | gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR); | |
2f4f064b | 3500 | wait_for_syncr(rdbase); |
958b90d1 MZ |
3501 | |
3502 | return; | |
3503 | } | |
3504 | ||
3505 | raw_spin_lock_irqsave(&vpe_proxy.lock, flags); | |
3506 | ||
3507 | its_vpe_db_proxy_map_locked(vpe); | |
3508 | ||
3509 | target_col = &vpe_proxy.dev->its->collections[to]; | |
3510 | its_send_movi(vpe_proxy.dev, target_col, vpe->vpe_proxy_event); | |
3511 | vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = to; | |
3512 | ||
3513 | raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags); | |
3514 | } | |
3515 | ||
3171a47a MZ |
3516 | static int its_vpe_set_affinity(struct irq_data *d, |
3517 | const struct cpumask *mask_val, | |
3518 | bool force) | |
3519 | { | |
3520 | struct its_vpe *vpe = irq_data_get_irq_chip_data(d); | |
dd3f050a | 3521 | int from, cpu = cpumask_first(mask_val); |
f3a05921 | 3522 | unsigned long flags; |
3171a47a MZ |
3523 | |
3524 | /* | |
3525 | * Changing affinity is mega expensive, so let's be as lazy as | |
20b3d54e | 3526 | * we can and only do it if we really have to. Also, if mapped |
958b90d1 MZ |
3527 | * into the proxy device, we need to move the doorbell |
3528 | * interrupt to its new location. | |
f3a05921 MZ |
3529 | * |
3530 | * Another thing is that changing the affinity of a vPE affects | |
3531 | * *other interrupts* such as all the vLPIs that are routed to | |
3532 | * this vPE. This means that the irq_desc lock is not enough to | |
3533 | * protect us, and that we must ensure nobody samples vpe->col_idx | |
3534 | * during the update, hence the lock below which must also be | |
3535 | * taken on any vLPI handling path that evaluates vpe->col_idx. | |
3171a47a | 3536 | */ |
f3a05921 MZ |
3537 | from = vpe_to_cpuid_lock(vpe, &flags); |
3538 | if (from == cpu) | |
dd3f050a | 3539 | goto out; |
958b90d1 | 3540 | |
dd3f050a MZ |
3541 | vpe->col_idx = cpu; |
3542 | ||
3543 | /* | |
3544 | * GICv4.1 allows us to skip VMOVP if moving to a cpu whose RD | |
3545 | * is sharing its VPE table with the current one. | |
3546 | */ | |
3547 | if (gic_data_rdist_cpu(cpu)->vpe_table_mask && | |
3548 | cpumask_test_cpu(from, gic_data_rdist_cpu(cpu)->vpe_table_mask)) | |
3549 | goto out; | |
3171a47a | 3550 | |
dd3f050a MZ |
3551 | its_send_vmovp(vpe); |
3552 | its_vpe_db_proxy_move(vpe, from, cpu); | |
3553 | ||
3554 | out: | |
44c4c25e | 3555 | irq_data_update_effective_affinity(d, cpumask_of(cpu)); |
f3a05921 | 3556 | vpe_to_cpuid_unlock(vpe, flags); |
44c4c25e | 3557 | |
3171a47a MZ |
3558 | return IRQ_SET_MASK_OK_DONE; |
3559 | } | |
3560 | ||
e643d803 MZ |
3561 | static void its_vpe_schedule(struct its_vpe *vpe) |
3562 | { | |
50c33097 | 3563 | void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); |
e643d803 MZ |
3564 | u64 val; |
3565 | ||
3566 | /* Schedule the VPE */ | |
3567 | val = virt_to_phys(page_address(vpe->its_vm->vprop_page)) & | |
3568 | GENMASK_ULL(51, 12); | |
3569 | val |= (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK; | |
3570 | val |= GICR_VPROPBASER_RaWb; | |
3571 | val |= GICR_VPROPBASER_InnerShareable; | |
5186a6cc | 3572 | gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER); |
e643d803 MZ |
3573 | |
3574 | val = virt_to_phys(page_address(vpe->vpt_page)) & | |
3575 | GENMASK_ULL(51, 16); | |
3576 | val |= GICR_VPENDBASER_RaWaWb; | |
3577 | val |= GICR_VPENDBASER_NonShareable; | |
3578 | /* | |
3579 | * There is no good way of finding out if the pending table is | |
3580 | * empty as we can race against the doorbell interrupt very | |
3581 | * easily. So in the end, vpe->pending_last is only an | |
3582 | * indication that the vcpu has something pending, not one | |
3583 | * that the pending table is empty. A good implementation | |
3584 | * would be able to read its coarse map pretty quickly anyway, | |
3585 | * making this a tolerable issue. | |
3586 | */ | |
3587 | val |= GICR_VPENDBASER_PendingLast; | |
3588 | val |= vpe->idai ? GICR_VPENDBASER_IDAI : 0; | |
3589 | val |= GICR_VPENDBASER_Valid; | |
5186a6cc | 3590 | gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER); |
e643d803 MZ |
3591 | } |
3592 | ||
3593 | static void its_vpe_deschedule(struct its_vpe *vpe) | |
3594 | { | |
50c33097 | 3595 | void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); |
e643d803 MZ |
3596 | u64 val; |
3597 | ||
e64fab1a | 3598 | val = its_clear_vpend_valid(vlpi_base, 0, 0); |
e643d803 | 3599 | |
e64fab1a MZ |
3600 | vpe->idai = !!(val & GICR_VPENDBASER_IDAI); |
3601 | vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast); | |
e643d803 MZ |
3602 | } |
3603 | ||
40619a2e MZ |
3604 | static void its_vpe_invall(struct its_vpe *vpe) |
3605 | { | |
3606 | struct its_node *its; | |
3607 | ||
3608 | list_for_each_entry(its, &its_nodes, entry) { | |
0dd57fed | 3609 | if (!is_v4(its)) |
40619a2e MZ |
3610 | continue; |
3611 | ||
2247e1bf MZ |
3612 | if (its_list_map && !vpe->its_vm->vlpi_count[its->list_nr]) |
3613 | continue; | |
3614 | ||
3c1cceeb MZ |
3615 | /* |
3616 | * Sending a VINVALL to a single ITS is enough, as all | |
3617 | * we need is to reach the redistributors. | |
3618 | */ | |
40619a2e | 3619 | its_send_vinvall(its, vpe); |
3c1cceeb | 3620 | return; |
40619a2e MZ |
3621 | } |
3622 | } | |
3623 | ||
e643d803 MZ |
3624 | static int its_vpe_set_vcpu_affinity(struct irq_data *d, void *vcpu_info) |
3625 | { | |
3626 | struct its_vpe *vpe = irq_data_get_irq_chip_data(d); | |
3627 | struct its_cmd_info *info = vcpu_info; | |
3628 | ||
3629 | switch (info->cmd_type) { | |
3630 | case SCHEDULE_VPE: | |
3631 | its_vpe_schedule(vpe); | |
3632 | return 0; | |
3633 | ||
3634 | case DESCHEDULE_VPE: | |
3635 | its_vpe_deschedule(vpe); | |
3636 | return 0; | |
3637 | ||
5e2f7642 | 3638 | case INVALL_VPE: |
40619a2e | 3639 | its_vpe_invall(vpe); |
5e2f7642 MZ |
3640 | return 0; |
3641 | ||
e643d803 MZ |
3642 | default: |
3643 | return -EINVAL; | |
3644 | } | |
3645 | } | |
3646 | ||
20b3d54e MZ |
3647 | static void its_vpe_send_cmd(struct its_vpe *vpe, |
3648 | void (*cmd)(struct its_device *, u32)) | |
3649 | { | |
3650 | unsigned long flags; | |
3651 | ||
3652 | raw_spin_lock_irqsave(&vpe_proxy.lock, flags); | |
3653 | ||
3654 | its_vpe_db_proxy_map_locked(vpe); | |
3655 | cmd(vpe_proxy.dev, vpe->vpe_proxy_event); | |
3656 | ||
3657 | raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags); | |
3658 | } | |
3659 | ||
f6a91da7 MZ |
3660 | static void its_vpe_send_inv(struct irq_data *d) |
3661 | { | |
3662 | struct its_vpe *vpe = irq_data_get_irq_chip_data(d); | |
f6a91da7 | 3663 | |
20b3d54e MZ |
3664 | if (gic_rdists->has_direct_lpi) { |
3665 | void __iomem *rdbase; | |
3666 | ||
425c09be | 3667 | /* Target the redistributor this VPE is currently known on */ |
9058a4e9 | 3668 | raw_spin_lock(&gic_data_rdist_cpu(vpe->col_idx)->rd_lock); |
20b3d54e | 3669 | rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base; |
425c09be | 3670 | gic_write_lpir(d->parent_data->hwirq, rdbase + GICR_INVLPIR); |
2f4f064b | 3671 | wait_for_syncr(rdbase); |
9058a4e9 | 3672 | raw_spin_unlock(&gic_data_rdist_cpu(vpe->col_idx)->rd_lock); |
20b3d54e MZ |
3673 | } else { |
3674 | its_vpe_send_cmd(vpe, its_send_inv); | |
3675 | } | |
f6a91da7 MZ |
3676 | } |
3677 | ||
3678 | static void its_vpe_mask_irq(struct irq_data *d) | |
3679 | { | |
3680 | /* | |
3681 | * We need to unmask the LPI, which is described by the parent | |
3682 | * irq_data. Instead of calling into the parent (which won't | |
3683 | * exactly do the right thing, let's simply use the | |
3684 | * parent_data pointer. Yes, I'm naughty. | |
3685 | */ | |
3686 | lpi_write_config(d->parent_data, LPI_PROP_ENABLED, 0); | |
3687 | its_vpe_send_inv(d); | |
3688 | } | |
3689 | ||
3690 | static void its_vpe_unmask_irq(struct irq_data *d) | |
3691 | { | |
3692 | /* Same hack as above... */ | |
3693 | lpi_write_config(d->parent_data, 0, LPI_PROP_ENABLED); | |
3694 | its_vpe_send_inv(d); | |
3695 | } | |
3696 | ||
e57a3e28 MZ |
3697 | static int its_vpe_set_irqchip_state(struct irq_data *d, |
3698 | enum irqchip_irq_state which, | |
3699 | bool state) | |
3700 | { | |
3701 | struct its_vpe *vpe = irq_data_get_irq_chip_data(d); | |
3702 | ||
3703 | if (which != IRQCHIP_STATE_PENDING) | |
3704 | return -EINVAL; | |
3705 | ||
3706 | if (gic_rdists->has_direct_lpi) { | |
3707 | void __iomem *rdbase; | |
3708 | ||
3709 | rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base; | |
3710 | if (state) { | |
3711 | gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_SETLPIR); | |
3712 | } else { | |
3713 | gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR); | |
2f4f064b | 3714 | wait_for_syncr(rdbase); |
e57a3e28 MZ |
3715 | } |
3716 | } else { | |
3717 | if (state) | |
3718 | its_vpe_send_cmd(vpe, its_send_int); | |
3719 | else | |
3720 | its_vpe_send_cmd(vpe, its_send_clear); | |
3721 | } | |
3722 | ||
3723 | return 0; | |
3724 | } | |
3725 | ||
8fff27ae MZ |
3726 | static struct irq_chip its_vpe_irq_chip = { |
3727 | .name = "GICv4-vpe", | |
f6a91da7 MZ |
3728 | .irq_mask = its_vpe_mask_irq, |
3729 | .irq_unmask = its_vpe_unmask_irq, | |
3730 | .irq_eoi = irq_chip_eoi_parent, | |
3171a47a | 3731 | .irq_set_affinity = its_vpe_set_affinity, |
e57a3e28 | 3732 | .irq_set_irqchip_state = its_vpe_set_irqchip_state, |
e643d803 | 3733 | .irq_set_vcpu_affinity = its_vpe_set_vcpu_affinity, |
8fff27ae MZ |
3734 | }; |
3735 | ||
d97c97ba MZ |
3736 | static struct its_node *find_4_1_its(void) |
3737 | { | |
3738 | static struct its_node *its = NULL; | |
3739 | ||
3740 | if (!its) { | |
3741 | list_for_each_entry(its, &its_nodes, entry) { | |
3742 | if (is_v4_1(its)) | |
3743 | return its; | |
3744 | } | |
3745 | ||
3746 | /* Oops? */ | |
3747 | its = NULL; | |
3748 | } | |
3749 | ||
3750 | return its; | |
3751 | } | |
3752 | ||
3753 | static void its_vpe_4_1_send_inv(struct irq_data *d) | |
3754 | { | |
3755 | struct its_vpe *vpe = irq_data_get_irq_chip_data(d); | |
3756 | struct its_node *its; | |
3757 | ||
3758 | /* | |
3759 | * GICv4.1 wants doorbells to be invalidated using the | |
3760 | * INVDB command in order to be broadcast to all RDs. Send | |
3761 | * it to the first valid ITS, and let the HW do its magic. | |
3762 | */ | |
3763 | its = find_4_1_its(); | |
3764 | if (its) | |
3765 | its_send_invdb(its, vpe); | |
3766 | } | |
3767 | ||
3768 | static void its_vpe_4_1_mask_irq(struct irq_data *d) | |
3769 | { | |
3770 | lpi_write_config(d->parent_data, LPI_PROP_ENABLED, 0); | |
3771 | its_vpe_4_1_send_inv(d); | |
3772 | } | |
3773 | ||
3774 | static void its_vpe_4_1_unmask_irq(struct irq_data *d) | |
3775 | { | |
3776 | lpi_write_config(d->parent_data, 0, LPI_PROP_ENABLED); | |
3777 | its_vpe_4_1_send_inv(d); | |
3778 | } | |
3779 | ||
91bf6395 MZ |
3780 | static void its_vpe_4_1_schedule(struct its_vpe *vpe, |
3781 | struct its_cmd_info *info) | |
3782 | { | |
3783 | void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); | |
3784 | u64 val = 0; | |
3785 | ||
3786 | /* Schedule the VPE */ | |
3787 | val |= GICR_VPENDBASER_Valid; | |
3788 | val |= info->g0en ? GICR_VPENDBASER_4_1_VGRP0EN : 0; | |
3789 | val |= info->g1en ? GICR_VPENDBASER_4_1_VGRP1EN : 0; | |
3790 | val |= FIELD_PREP(GICR_VPENDBASER_4_1_VPEID, vpe->vpe_id); | |
3791 | ||
5186a6cc | 3792 | gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER); |
91bf6395 MZ |
3793 | } |
3794 | ||
e64fab1a MZ |
3795 | static void its_vpe_4_1_deschedule(struct its_vpe *vpe, |
3796 | struct its_cmd_info *info) | |
3797 | { | |
3798 | void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); | |
3799 | u64 val; | |
3800 | ||
3801 | if (info->req_db) { | |
3802 | /* | |
3803 | * vPE is going to block: make the vPE non-resident with | |
3804 | * PendingLast clear and DB set. The GIC guarantees that if | |
3805 | * we read-back PendingLast clear, then a doorbell will be | |
3806 | * delivered when an interrupt comes. | |
3807 | */ | |
3808 | val = its_clear_vpend_valid(vlpi_base, | |
3809 | GICR_VPENDBASER_PendingLast, | |
3810 | GICR_VPENDBASER_4_1_DB); | |
3811 | vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast); | |
3812 | } else { | |
3813 | /* | |
3814 | * We're not blocking, so just make the vPE non-resident | |
3815 | * with PendingLast set, indicating that we'll be back. | |
3816 | */ | |
3817 | val = its_clear_vpend_valid(vlpi_base, | |
3818 | 0, | |
3819 | GICR_VPENDBASER_PendingLast); | |
3820 | vpe->pending_last = true; | |
3821 | } | |
3822 | } | |
3823 | ||
b4a4bd0f MZ |
3824 | static void its_vpe_4_1_invall(struct its_vpe *vpe) |
3825 | { | |
3826 | void __iomem *rdbase; | |
3827 | u64 val; | |
3828 | ||
3829 | val = GICR_INVALLR_V; | |
3830 | val |= FIELD_PREP(GICR_INVALLR_VPEID, vpe->vpe_id); | |
3831 | ||
3832 | /* Target the redistributor this vPE is currently known on */ | |
9058a4e9 | 3833 | raw_spin_lock(&gic_data_rdist_cpu(vpe->col_idx)->rd_lock); |
b4a4bd0f MZ |
3834 | rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base; |
3835 | gic_write_lpir(val, rdbase + GICR_INVALLR); | |
b978c25f ZY |
3836 | |
3837 | wait_for_syncr(rdbase); | |
9058a4e9 | 3838 | raw_spin_unlock(&gic_data_rdist_cpu(vpe->col_idx)->rd_lock); |
b4a4bd0f MZ |
3839 | } |
3840 | ||
29c647f3 MZ |
3841 | static int its_vpe_4_1_set_vcpu_affinity(struct irq_data *d, void *vcpu_info) |
3842 | { | |
91bf6395 | 3843 | struct its_vpe *vpe = irq_data_get_irq_chip_data(d); |
29c647f3 MZ |
3844 | struct its_cmd_info *info = vcpu_info; |
3845 | ||
3846 | switch (info->cmd_type) { | |
3847 | case SCHEDULE_VPE: | |
91bf6395 | 3848 | its_vpe_4_1_schedule(vpe, info); |
29c647f3 MZ |
3849 | return 0; |
3850 | ||
3851 | case DESCHEDULE_VPE: | |
e64fab1a | 3852 | its_vpe_4_1_deschedule(vpe, info); |
29c647f3 MZ |
3853 | return 0; |
3854 | ||
3855 | case INVALL_VPE: | |
b4a4bd0f | 3856 | its_vpe_4_1_invall(vpe); |
29c647f3 MZ |
3857 | return 0; |
3858 | ||
3859 | default: | |
3860 | return -EINVAL; | |
3861 | } | |
3862 | } | |
3863 | ||
3864 | static struct irq_chip its_vpe_4_1_irq_chip = { | |
3865 | .name = "GICv4.1-vpe", | |
d97c97ba MZ |
3866 | .irq_mask = its_vpe_4_1_mask_irq, |
3867 | .irq_unmask = its_vpe_4_1_unmask_irq, | |
29c647f3 MZ |
3868 | .irq_eoi = irq_chip_eoi_parent, |
3869 | .irq_set_affinity = its_vpe_set_affinity, | |
3870 | .irq_set_vcpu_affinity = its_vpe_4_1_set_vcpu_affinity, | |
3871 | }; | |
3872 | ||
7d75bbb4 MZ |
3873 | static int its_vpe_id_alloc(void) |
3874 | { | |
32bd44dc | 3875 | return ida_simple_get(&its_vpeid_ida, 0, ITS_MAX_VPEID, GFP_KERNEL); |
7d75bbb4 MZ |
3876 | } |
3877 | ||
3878 | static void its_vpe_id_free(u16 id) | |
3879 | { | |
3880 | ida_simple_remove(&its_vpeid_ida, id); | |
3881 | } | |
3882 | ||
3883 | static int its_vpe_init(struct its_vpe *vpe) | |
3884 | { | |
3885 | struct page *vpt_page; | |
3886 | int vpe_id; | |
3887 | ||
3888 | /* Allocate vpe_id */ | |
3889 | vpe_id = its_vpe_id_alloc(); | |
3890 | if (vpe_id < 0) | |
3891 | return vpe_id; | |
3892 | ||
3893 | /* Allocate VPT */ | |
3894 | vpt_page = its_allocate_pending_table(GFP_KERNEL); | |
3895 | if (!vpt_page) { | |
3896 | its_vpe_id_free(vpe_id); | |
3897 | return -ENOMEM; | |
3898 | } | |
3899 | ||
3900 | if (!its_alloc_vpe_table(vpe_id)) { | |
3901 | its_vpe_id_free(vpe_id); | |
34f8eb92 | 3902 | its_free_pending_table(vpt_page); |
7d75bbb4 MZ |
3903 | return -ENOMEM; |
3904 | } | |
3905 | ||
f3a05921 | 3906 | raw_spin_lock_init(&vpe->vpe_lock); |
7d75bbb4 MZ |
3907 | vpe->vpe_id = vpe_id; |
3908 | vpe->vpt_page = vpt_page; | |
64edfaa9 MZ |
3909 | if (gic_rdists->has_rvpeid) |
3910 | atomic_set(&vpe->vmapp_count, 0); | |
3911 | else | |
3912 | vpe->vpe_proxy_event = -1; | |
7d75bbb4 MZ |
3913 | |
3914 | return 0; | |
3915 | } | |
3916 | ||
3917 | static void its_vpe_teardown(struct its_vpe *vpe) | |
3918 | { | |
20b3d54e | 3919 | its_vpe_db_proxy_unmap(vpe); |
7d75bbb4 MZ |
3920 | its_vpe_id_free(vpe->vpe_id); |
3921 | its_free_pending_table(vpe->vpt_page); | |
3922 | } | |
3923 | ||
3924 | static void its_vpe_irq_domain_free(struct irq_domain *domain, | |
3925 | unsigned int virq, | |
3926 | unsigned int nr_irqs) | |
3927 | { | |
3928 | struct its_vm *vm = domain->host_data; | |
3929 | int i; | |
3930 | ||
3931 | irq_domain_free_irqs_parent(domain, virq, nr_irqs); | |
3932 | ||
3933 | for (i = 0; i < nr_irqs; i++) { | |
3934 | struct irq_data *data = irq_domain_get_irq_data(domain, | |
3935 | virq + i); | |
3936 | struct its_vpe *vpe = irq_data_get_irq_chip_data(data); | |
3937 | ||
3938 | BUG_ON(vm != vpe->its_vm); | |
3939 | ||
3940 | clear_bit(data->hwirq, vm->db_bitmap); | |
3941 | its_vpe_teardown(vpe); | |
3942 | irq_domain_reset_irq_data(data); | |
3943 | } | |
3944 | ||
3945 | if (bitmap_empty(vm->db_bitmap, vm->nr_db_lpis)) { | |
38dd7c49 | 3946 | its_lpi_free(vm->db_bitmap, vm->db_lpi_base, vm->nr_db_lpis); |
7d75bbb4 MZ |
3947 | its_free_prop_table(vm->vprop_page); |
3948 | } | |
3949 | } | |
3950 | ||
3951 | static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, | |
3952 | unsigned int nr_irqs, void *args) | |
3953 | { | |
29c647f3 | 3954 | struct irq_chip *irqchip = &its_vpe_irq_chip; |
7d75bbb4 MZ |
3955 | struct its_vm *vm = args; |
3956 | unsigned long *bitmap; | |
3957 | struct page *vprop_page; | |
3958 | int base, nr_ids, i, err = 0; | |
3959 | ||
3960 | BUG_ON(!vm); | |
3961 | ||
38dd7c49 | 3962 | bitmap = its_lpi_alloc(roundup_pow_of_two(nr_irqs), &base, &nr_ids); |
7d75bbb4 MZ |
3963 | if (!bitmap) |
3964 | return -ENOMEM; | |
3965 | ||
3966 | if (nr_ids < nr_irqs) { | |
38dd7c49 | 3967 | its_lpi_free(bitmap, base, nr_ids); |
7d75bbb4 MZ |
3968 | return -ENOMEM; |
3969 | } | |
3970 | ||
3971 | vprop_page = its_allocate_prop_table(GFP_KERNEL); | |
3972 | if (!vprop_page) { | |
38dd7c49 | 3973 | its_lpi_free(bitmap, base, nr_ids); |
7d75bbb4 MZ |
3974 | return -ENOMEM; |
3975 | } | |
3976 | ||
3977 | vm->db_bitmap = bitmap; | |
3978 | vm->db_lpi_base = base; | |
3979 | vm->nr_db_lpis = nr_ids; | |
3980 | vm->vprop_page = vprop_page; | |
3981 | ||
29c647f3 MZ |
3982 | if (gic_rdists->has_rvpeid) |
3983 | irqchip = &its_vpe_4_1_irq_chip; | |
3984 | ||
7d75bbb4 MZ |
3985 | for (i = 0; i < nr_irqs; i++) { |
3986 | vm->vpes[i]->vpe_db_lpi = base + i; | |
3987 | err = its_vpe_init(vm->vpes[i]); | |
3988 | if (err) | |
3989 | break; | |
3990 | err = its_irq_gic_domain_alloc(domain, virq + i, | |
3991 | vm->vpes[i]->vpe_db_lpi); | |
3992 | if (err) | |
3993 | break; | |
3994 | irq_domain_set_hwirq_and_chip(domain, virq + i, i, | |
29c647f3 | 3995 | irqchip, vm->vpes[i]); |
7d75bbb4 MZ |
3996 | set_bit(i, bitmap); |
3997 | } | |
3998 | ||
3999 | if (err) { | |
4000 | if (i > 0) | |
4001 | its_vpe_irq_domain_free(domain, virq, i - 1); | |
4002 | ||
38dd7c49 | 4003 | its_lpi_free(bitmap, base, nr_ids); |
7d75bbb4 MZ |
4004 | its_free_prop_table(vprop_page); |
4005 | } | |
4006 | ||
4007 | return err; | |
4008 | } | |
4009 | ||
72491643 | 4010 | static int its_vpe_irq_domain_activate(struct irq_domain *domain, |
702cb0a0 | 4011 | struct irq_data *d, bool reserve) |
eb78192b MZ |
4012 | { |
4013 | struct its_vpe *vpe = irq_data_get_irq_chip_data(d); | |
40619a2e | 4014 | struct its_node *its; |
eb78192b | 4015 | |
2247e1bf MZ |
4016 | /* If we use the list map, we issue VMAPP on demand... */ |
4017 | if (its_list_map) | |
6ef930f2 | 4018 | return 0; |
eb78192b MZ |
4019 | |
4020 | /* Map the VPE to the first possible CPU */ | |
4021 | vpe->col_idx = cpumask_first(cpu_online_mask); | |
40619a2e MZ |
4022 | |
4023 | list_for_each_entry(its, &its_nodes, entry) { | |
0dd57fed | 4024 | if (!is_v4(its)) |
40619a2e MZ |
4025 | continue; |
4026 | ||
75fd951b | 4027 | its_send_vmapp(its, vpe, true); |
40619a2e MZ |
4028 | its_send_vinvall(its, vpe); |
4029 | } | |
4030 | ||
44c4c25e MZ |
4031 | irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx)); |
4032 | ||
72491643 | 4033 | return 0; |
eb78192b MZ |
4034 | } |
4035 | ||
4036 | static void its_vpe_irq_domain_deactivate(struct irq_domain *domain, | |
4037 | struct irq_data *d) | |
4038 | { | |
4039 | struct its_vpe *vpe = irq_data_get_irq_chip_data(d); | |
75fd951b MZ |
4040 | struct its_node *its; |
4041 | ||
2247e1bf MZ |
4042 | /* |
4043 | * If we use the list map, we unmap the VPE once no VLPIs are | |
4044 | * associated with the VM. | |
4045 | */ | |
4046 | if (its_list_map) | |
4047 | return; | |
eb78192b | 4048 | |
75fd951b | 4049 | list_for_each_entry(its, &its_nodes, entry) { |
0dd57fed | 4050 | if (!is_v4(its)) |
75fd951b | 4051 | continue; |
eb78192b | 4052 | |
75fd951b MZ |
4053 | its_send_vmapp(its, vpe, false); |
4054 | } | |
eb78192b MZ |
4055 | } |
4056 | ||
8fff27ae | 4057 | static const struct irq_domain_ops its_vpe_domain_ops = { |
7d75bbb4 MZ |
4058 | .alloc = its_vpe_irq_domain_alloc, |
4059 | .free = its_vpe_irq_domain_free, | |
eb78192b MZ |
4060 | .activate = its_vpe_irq_domain_activate, |
4061 | .deactivate = its_vpe_irq_domain_deactivate, | |
8fff27ae MZ |
4062 | }; |
4063 | ||
4559fbb3 YW |
4064 | static int its_force_quiescent(void __iomem *base) |
4065 | { | |
4066 | u32 count = 1000000; /* 1s */ | |
4067 | u32 val; | |
4068 | ||
4069 | val = readl_relaxed(base + GITS_CTLR); | |
7611da86 DD |
4070 | /* |
4071 | * GIC architecture specification requires the ITS to be both | |
4072 | * disabled and quiescent for writes to GITS_BASER<n> or | |
4073 | * GITS_CBASER to not have UNPREDICTABLE results. | |
4074 | */ | |
4075 | if ((val & GITS_CTLR_QUIESCENT) && !(val & GITS_CTLR_ENABLE)) | |
4559fbb3 YW |
4076 | return 0; |
4077 | ||
4078 | /* Disable the generation of all interrupts to this ITS */ | |
d51c4b4d | 4079 | val &= ~(GITS_CTLR_ENABLE | GITS_CTLR_ImDe); |
4559fbb3 YW |
4080 | writel_relaxed(val, base + GITS_CTLR); |
4081 | ||
4082 | /* Poll GITS_CTLR and wait until ITS becomes quiescent */ | |
4083 | while (1) { | |
4084 | val = readl_relaxed(base + GITS_CTLR); | |
4085 | if (val & GITS_CTLR_QUIESCENT) | |
4086 | return 0; | |
4087 | ||
4088 | count--; | |
4089 | if (!count) | |
4090 | return -EBUSY; | |
4091 | ||
4092 | cpu_relax(); | |
4093 | udelay(1); | |
4094 | } | |
4095 | } | |
4096 | ||
9d111d49 | 4097 | static bool __maybe_unused its_enable_quirk_cavium_22375(void *data) |
94100970 RR |
4098 | { |
4099 | struct its_node *its = data; | |
4100 | ||
576a8342 MZ |
4101 | /* erratum 22375: only alloc 8MB table size (20 bits) */ |
4102 | its->typer &= ~GITS_TYPER_DEVBITS; | |
4103 | its->typer |= FIELD_PREP(GITS_TYPER_DEVBITS, 20 - 1); | |
94100970 | 4104 | its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_22375; |
9d111d49 AB |
4105 | |
4106 | return true; | |
94100970 RR |
4107 | } |
4108 | ||
9d111d49 | 4109 | static bool __maybe_unused its_enable_quirk_cavium_23144(void *data) |
fbf8f40e GK |
4110 | { |
4111 | struct its_node *its = data; | |
4112 | ||
4113 | its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_23144; | |
9d111d49 AB |
4114 | |
4115 | return true; | |
fbf8f40e GK |
4116 | } |
4117 | ||
9d111d49 | 4118 | static bool __maybe_unused its_enable_quirk_qdf2400_e0065(void *data) |
90922a2d SD |
4119 | { |
4120 | struct its_node *its = data; | |
4121 | ||
4122 | /* On QDF2400, the size of the ITE is 16Bytes */ | |
ffedbf0c MZ |
4123 | its->typer &= ~GITS_TYPER_ITT_ENTRY_SIZE; |
4124 | its->typer |= FIELD_PREP(GITS_TYPER_ITT_ENTRY_SIZE, 16 - 1); | |
9d111d49 AB |
4125 | |
4126 | return true; | |
90922a2d SD |
4127 | } |
4128 | ||
558b0165 AB |
4129 | static u64 its_irq_get_msi_base_pre_its(struct its_device *its_dev) |
4130 | { | |
4131 | struct its_node *its = its_dev->its; | |
4132 | ||
4133 | /* | |
4134 | * The Socionext Synquacer SoC has a so-called 'pre-ITS', | |
4135 | * which maps 32-bit writes targeted at a separate window of | |
4136 | * size '4 << device_id_bits' onto writes to GITS_TRANSLATER | |
4137 | * with device ID taken from bits [device_id_bits + 1:2] of | |
4138 | * the window offset. | |
4139 | */ | |
4140 | return its->pre_its_base + (its_dev->device_id << 2); | |
4141 | } | |
4142 | ||
4143 | static bool __maybe_unused its_enable_quirk_socionext_synquacer(void *data) | |
4144 | { | |
4145 | struct its_node *its = data; | |
4146 | u32 pre_its_window[2]; | |
4147 | u32 ids; | |
4148 | ||
4149 | if (!fwnode_property_read_u32_array(its->fwnode_handle, | |
4150 | "socionext,synquacer-pre-its", | |
4151 | pre_its_window, | |
4152 | ARRAY_SIZE(pre_its_window))) { | |
4153 | ||
4154 | its->pre_its_base = pre_its_window[0]; | |
4155 | its->get_msi_base = its_irq_get_msi_base_pre_its; | |
4156 | ||
4157 | ids = ilog2(pre_its_window[1]) - 2; | |
576a8342 MZ |
4158 | if (device_ids(its) > ids) { |
4159 | its->typer &= ~GITS_TYPER_DEVBITS; | |
4160 | its->typer |= FIELD_PREP(GITS_TYPER_DEVBITS, ids - 1); | |
4161 | } | |
558b0165 AB |
4162 | |
4163 | /* the pre-ITS breaks isolation, so disable MSI remapping */ | |
4164 | its->msi_domain_flags &= ~IRQ_DOMAIN_FLAG_MSI_REMAP; | |
4165 | return true; | |
4166 | } | |
4167 | return false; | |
4168 | } | |
4169 | ||
5c9a882e MZ |
4170 | static bool __maybe_unused its_enable_quirk_hip07_161600802(void *data) |
4171 | { | |
4172 | struct its_node *its = data; | |
4173 | ||
4174 | /* | |
4175 | * Hip07 insists on using the wrong address for the VLPI | |
4176 | * page. Trick it into doing the right thing... | |
4177 | */ | |
4178 | its->vlpi_redist_offset = SZ_128K; | |
4179 | return true; | |
90922a2d SD |
4180 | } |
4181 | ||
67510cca | 4182 | static const struct gic_quirk its_quirks[] = { |
94100970 RR |
4183 | #ifdef CONFIG_CAVIUM_ERRATUM_22375 |
4184 | { | |
4185 | .desc = "ITS: Cavium errata 22375, 24313", | |
4186 | .iidr = 0xa100034c, /* ThunderX pass 1.x */ | |
4187 | .mask = 0xffff0fff, | |
4188 | .init = its_enable_quirk_cavium_22375, | |
4189 | }, | |
fbf8f40e GK |
4190 | #endif |
4191 | #ifdef CONFIG_CAVIUM_ERRATUM_23144 | |
4192 | { | |
4193 | .desc = "ITS: Cavium erratum 23144", | |
4194 | .iidr = 0xa100034c, /* ThunderX pass 1.x */ | |
4195 | .mask = 0xffff0fff, | |
4196 | .init = its_enable_quirk_cavium_23144, | |
4197 | }, | |
90922a2d SD |
4198 | #endif |
4199 | #ifdef CONFIG_QCOM_QDF2400_ERRATUM_0065 | |
4200 | { | |
4201 | .desc = "ITS: QDF2400 erratum 0065", | |
4202 | .iidr = 0x00001070, /* QDF2400 ITS rev 1.x */ | |
4203 | .mask = 0xffffffff, | |
4204 | .init = its_enable_quirk_qdf2400_e0065, | |
4205 | }, | |
558b0165 AB |
4206 | #endif |
4207 | #ifdef CONFIG_SOCIONEXT_SYNQUACER_PREITS | |
4208 | { | |
4209 | /* | |
4210 | * The Socionext Synquacer SoC incorporates ARM's own GIC-500 | |
4211 | * implementation, but with a 'pre-ITS' added that requires | |
4212 | * special handling in software. | |
4213 | */ | |
4214 | .desc = "ITS: Socionext Synquacer pre-ITS", | |
4215 | .iidr = 0x0001143b, | |
4216 | .mask = 0xffffffff, | |
4217 | .init = its_enable_quirk_socionext_synquacer, | |
4218 | }, | |
5c9a882e MZ |
4219 | #endif |
4220 | #ifdef CONFIG_HISILICON_ERRATUM_161600802 | |
4221 | { | |
4222 | .desc = "ITS: Hip07 erratum 161600802", | |
4223 | .iidr = 0x00000004, | |
4224 | .mask = 0xffffffff, | |
4225 | .init = its_enable_quirk_hip07_161600802, | |
4226 | }, | |
94100970 | 4227 | #endif |
67510cca RR |
4228 | { |
4229 | } | |
4230 | }; | |
4231 | ||
4232 | static void its_enable_quirks(struct its_node *its) | |
4233 | { | |
4234 | u32 iidr = readl_relaxed(its->base + GITS_IIDR); | |
4235 | ||
4236 | gic_enable_quirks(iidr, its_quirks, its); | |
4237 | } | |
4238 | ||
dba0bc7b DB |
4239 | static int its_save_disable(void) |
4240 | { | |
4241 | struct its_node *its; | |
4242 | int err = 0; | |
4243 | ||
a8db7456 | 4244 | raw_spin_lock(&its_lock); |
dba0bc7b DB |
4245 | list_for_each_entry(its, &its_nodes, entry) { |
4246 | void __iomem *base; | |
4247 | ||
4248 | if (!(its->flags & ITS_FLAGS_SAVE_SUSPEND_STATE)) | |
4249 | continue; | |
4250 | ||
4251 | base = its->base; | |
4252 | its->ctlr_save = readl_relaxed(base + GITS_CTLR); | |
4253 | err = its_force_quiescent(base); | |
4254 | if (err) { | |
4255 | pr_err("ITS@%pa: failed to quiesce: %d\n", | |
4256 | &its->phys_base, err); | |
4257 | writel_relaxed(its->ctlr_save, base + GITS_CTLR); | |
4258 | goto err; | |
4259 | } | |
4260 | ||
4261 | its->cbaser_save = gits_read_cbaser(base + GITS_CBASER); | |
4262 | } | |
4263 | ||
4264 | err: | |
4265 | if (err) { | |
4266 | list_for_each_entry_continue_reverse(its, &its_nodes, entry) { | |
4267 | void __iomem *base; | |
4268 | ||
4269 | if (!(its->flags & ITS_FLAGS_SAVE_SUSPEND_STATE)) | |
4270 | continue; | |
4271 | ||
4272 | base = its->base; | |
4273 | writel_relaxed(its->ctlr_save, base + GITS_CTLR); | |
4274 | } | |
4275 | } | |
a8db7456 | 4276 | raw_spin_unlock(&its_lock); |
dba0bc7b DB |
4277 | |
4278 | return err; | |
4279 | } | |
4280 | ||
4281 | static void its_restore_enable(void) | |
4282 | { | |
4283 | struct its_node *its; | |
4284 | int ret; | |
4285 | ||
a8db7456 | 4286 | raw_spin_lock(&its_lock); |
dba0bc7b DB |
4287 | list_for_each_entry(its, &its_nodes, entry) { |
4288 | void __iomem *base; | |
4289 | int i; | |
4290 | ||
4291 | if (!(its->flags & ITS_FLAGS_SAVE_SUSPEND_STATE)) | |
4292 | continue; | |
4293 | ||
4294 | base = its->base; | |
4295 | ||
4296 | /* | |
4297 | * Make sure that the ITS is disabled. If it fails to quiesce, | |
4298 | * don't restore it since writing to CBASER or BASER<n> | |
4299 | * registers is undefined according to the GIC v3 ITS | |
4300 | * Specification. | |
4301 | */ | |
4302 | ret = its_force_quiescent(base); | |
4303 | if (ret) { | |
4304 | pr_err("ITS@%pa: failed to quiesce on resume: %d\n", | |
4305 | &its->phys_base, ret); | |
4306 | continue; | |
4307 | } | |
4308 | ||
4309 | gits_write_cbaser(its->cbaser_save, base + GITS_CBASER); | |
4310 | ||
4311 | /* | |
4312 | * Writing CBASER resets CREADR to 0, so make CWRITER and | |
4313 | * cmd_write line up with it. | |
4314 | */ | |
4315 | its->cmd_write = its->cmd_base; | |
4316 | gits_write_cwriter(0, base + GITS_CWRITER); | |
4317 | ||
4318 | /* Restore GITS_BASER from the value cache. */ | |
4319 | for (i = 0; i < GITS_BASER_NR_REGS; i++) { | |
4320 | struct its_baser *baser = &its->tables[i]; | |
4321 | ||
4322 | if (!(baser->val & GITS_BASER_VALID)) | |
4323 | continue; | |
4324 | ||
4325 | its_write_baser(its, baser, baser->val); | |
4326 | } | |
4327 | writel_relaxed(its->ctlr_save, base + GITS_CTLR); | |
920181ce DB |
4328 | |
4329 | /* | |
4330 | * Reinit the collection if it's stored in the ITS. This is | |
4331 | * indicated by the col_id being less than the HCC field. | |
4332 | * CID < HCC as specified in the GIC v3 Documentation. | |
4333 | */ | |
4334 | if (its->collections[smp_processor_id()].col_id < | |
4335 | GITS_TYPER_HCC(gic_read_typer(base + GITS_TYPER))) | |
4336 | its_cpu_init_collection(its); | |
dba0bc7b | 4337 | } |
a8db7456 | 4338 | raw_spin_unlock(&its_lock); |
dba0bc7b DB |
4339 | } |
4340 | ||
4341 | static struct syscore_ops its_syscore_ops = { | |
4342 | .suspend = its_save_disable, | |
4343 | .resume = its_restore_enable, | |
4344 | }; | |
4345 | ||
db40f0a7 | 4346 | static int its_init_domain(struct fwnode_handle *handle, struct its_node *its) |
d14ae5e6 TN |
4347 | { |
4348 | struct irq_domain *inner_domain; | |
4349 | struct msi_domain_info *info; | |
4350 | ||
4351 | info = kzalloc(sizeof(*info), GFP_KERNEL); | |
4352 | if (!info) | |
4353 | return -ENOMEM; | |
4354 | ||
db40f0a7 | 4355 | inner_domain = irq_domain_create_tree(handle, &its_domain_ops, its); |
d14ae5e6 TN |
4356 | if (!inner_domain) { |
4357 | kfree(info); | |
4358 | return -ENOMEM; | |
4359 | } | |
4360 | ||
db40f0a7 | 4361 | inner_domain->parent = its_parent; |
96f0d93a | 4362 | irq_domain_update_bus_token(inner_domain, DOMAIN_BUS_NEXUS); |
558b0165 | 4363 | inner_domain->flags |= its->msi_domain_flags; |
d14ae5e6 TN |
4364 | info->ops = &its_msi_domain_ops; |
4365 | info->data = its; | |
4366 | inner_domain->host_data = info; | |
4367 | ||
4368 | return 0; | |
4369 | } | |
4370 | ||
8fff27ae MZ |
4371 | static int its_init_vpe_domain(void) |
4372 | { | |
20b3d54e MZ |
4373 | struct its_node *its; |
4374 | u32 devid; | |
4375 | int entries; | |
4376 | ||
4377 | if (gic_rdists->has_direct_lpi) { | |
4378 | pr_info("ITS: Using DirectLPI for VPE invalidation\n"); | |
4379 | return 0; | |
4380 | } | |
4381 | ||
4382 | /* Any ITS will do, even if not v4 */ | |
4383 | its = list_first_entry(&its_nodes, struct its_node, entry); | |
4384 | ||
4385 | entries = roundup_pow_of_two(nr_cpu_ids); | |
6396bb22 | 4386 | vpe_proxy.vpes = kcalloc(entries, sizeof(*vpe_proxy.vpes), |
20b3d54e MZ |
4387 | GFP_KERNEL); |
4388 | if (!vpe_proxy.vpes) { | |
4389 | pr_err("ITS: Can't allocate GICv4 proxy device array\n"); | |
4390 | return -ENOMEM; | |
4391 | } | |
4392 | ||
4393 | /* Use the last possible DevID */ | |
576a8342 | 4394 | devid = GENMASK(device_ids(its) - 1, 0); |
20b3d54e MZ |
4395 | vpe_proxy.dev = its_create_device(its, devid, entries, false); |
4396 | if (!vpe_proxy.dev) { | |
4397 | kfree(vpe_proxy.vpes); | |
4398 | pr_err("ITS: Can't allocate GICv4 proxy device\n"); | |
4399 | return -ENOMEM; | |
4400 | } | |
4401 | ||
c427a475 | 4402 | BUG_ON(entries > vpe_proxy.dev->nr_ites); |
20b3d54e MZ |
4403 | |
4404 | raw_spin_lock_init(&vpe_proxy.lock); | |
4405 | vpe_proxy.next_victim = 0; | |
4406 | pr_info("ITS: Allocated DevID %x as GICv4 proxy device (%d slots)\n", | |
4407 | devid, vpe_proxy.dev->nr_ites); | |
4408 | ||
8fff27ae MZ |
4409 | return 0; |
4410 | } | |
4411 | ||
3dfa576b MZ |
4412 | static int __init its_compute_its_list_map(struct resource *res, |
4413 | void __iomem *its_base) | |
4414 | { | |
4415 | int its_number; | |
4416 | u32 ctlr; | |
4417 | ||
4418 | /* | |
4419 | * This is assumed to be done early enough that we're | |
4420 | * guaranteed to be single-threaded, hence no | |
4421 | * locking. Should this change, we should address | |
4422 | * this. | |
4423 | */ | |
ab60491e MZ |
4424 | its_number = find_first_zero_bit(&its_list_map, GICv4_ITS_LIST_MAX); |
4425 | if (its_number >= GICv4_ITS_LIST_MAX) { | |
3dfa576b MZ |
4426 | pr_err("ITS@%pa: No ITSList entry available!\n", |
4427 | &res->start); | |
4428 | return -EINVAL; | |
4429 | } | |
4430 | ||
4431 | ctlr = readl_relaxed(its_base + GITS_CTLR); | |
4432 | ctlr &= ~GITS_CTLR_ITS_NUMBER; | |
4433 | ctlr |= its_number << GITS_CTLR_ITS_NUMBER_SHIFT; | |
4434 | writel_relaxed(ctlr, its_base + GITS_CTLR); | |
4435 | ctlr = readl_relaxed(its_base + GITS_CTLR); | |
4436 | if ((ctlr & GITS_CTLR_ITS_NUMBER) != (its_number << GITS_CTLR_ITS_NUMBER_SHIFT)) { | |
4437 | its_number = ctlr & GITS_CTLR_ITS_NUMBER; | |
4438 | its_number >>= GITS_CTLR_ITS_NUMBER_SHIFT; | |
4439 | } | |
4440 | ||
4441 | if (test_and_set_bit(its_number, &its_list_map)) { | |
4442 | pr_err("ITS@%pa: Duplicate ITSList entry %d\n", | |
4443 | &res->start, its_number); | |
4444 | return -EINVAL; | |
4445 | } | |
4446 | ||
4447 | return its_number; | |
4448 | } | |
4449 | ||
db40f0a7 TN |
4450 | static int __init its_probe_one(struct resource *res, |
4451 | struct fwnode_handle *handle, int numa_node) | |
4c21f3c2 | 4452 | { |
4c21f3c2 MZ |
4453 | struct its_node *its; |
4454 | void __iomem *its_base; | |
3dfa576b MZ |
4455 | u32 val, ctlr; |
4456 | u64 baser, tmp, typer; | |
539d3782 | 4457 | struct page *page; |
4c21f3c2 MZ |
4458 | int err; |
4459 | ||
5e46a484 | 4460 | its_base = ioremap(res->start, SZ_64K); |
4c21f3c2 | 4461 | if (!its_base) { |
db40f0a7 | 4462 | pr_warn("ITS@%pa: Unable to map ITS registers\n", &res->start); |
4c21f3c2 MZ |
4463 | return -ENOMEM; |
4464 | } | |
4465 | ||
4466 | val = readl_relaxed(its_base + GITS_PIDR2) & GIC_PIDR2_ARCH_MASK; | |
4467 | if (val != 0x30 && val != 0x40) { | |
db40f0a7 | 4468 | pr_warn("ITS@%pa: No ITS detected, giving up\n", &res->start); |
4c21f3c2 MZ |
4469 | err = -ENODEV; |
4470 | goto out_unmap; | |
4471 | } | |
4472 | ||
4559fbb3 YW |
4473 | err = its_force_quiescent(its_base); |
4474 | if (err) { | |
db40f0a7 | 4475 | pr_warn("ITS@%pa: Failed to quiesce, giving up\n", &res->start); |
4559fbb3 YW |
4476 | goto out_unmap; |
4477 | } | |
4478 | ||
db40f0a7 | 4479 | pr_info("ITS %pR\n", res); |
4c21f3c2 MZ |
4480 | |
4481 | its = kzalloc(sizeof(*its), GFP_KERNEL); | |
4482 | if (!its) { | |
4483 | err = -ENOMEM; | |
4484 | goto out_unmap; | |
4485 | } | |
4486 | ||
4487 | raw_spin_lock_init(&its->lock); | |
9791ec7d | 4488 | mutex_init(&its->dev_alloc_lock); |
4c21f3c2 MZ |
4489 | INIT_LIST_HEAD(&its->entry); |
4490 | INIT_LIST_HEAD(&its->its_device_list); | |
3dfa576b | 4491 | typer = gic_read_typer(its_base + GITS_TYPER); |
0dd57fed | 4492 | its->typer = typer; |
4c21f3c2 | 4493 | its->base = its_base; |
db40f0a7 | 4494 | its->phys_base = res->start; |
0dd57fed | 4495 | if (is_v4(its)) { |
3dfa576b MZ |
4496 | if (!(typer & GITS_TYPER_VMOVP)) { |
4497 | err = its_compute_its_list_map(res, its_base); | |
4498 | if (err < 0) | |
4499 | goto out_free_its; | |
4500 | ||
debf6d02 MZ |
4501 | its->list_nr = err; |
4502 | ||
3dfa576b MZ |
4503 | pr_info("ITS@%pa: Using ITS number %d\n", |
4504 | &res->start, err); | |
4505 | } else { | |
4506 | pr_info("ITS@%pa: Single VMOVP capable\n", &res->start); | |
4507 | } | |
5e516846 MZ |
4508 | |
4509 | if (is_v4_1(its)) { | |
4510 | u32 svpet = FIELD_GET(GITS_TYPER_SVPET, typer); | |
5e46a484 MZ |
4511 | |
4512 | its->sgir_base = ioremap(res->start + SZ_128K, SZ_64K); | |
4513 | if (!its->sgir_base) { | |
4514 | err = -ENOMEM; | |
4515 | goto out_free_its; | |
4516 | } | |
4517 | ||
5e516846 MZ |
4518 | its->mpidr = readl_relaxed(its_base + GITS_MPIDR); |
4519 | ||
4520 | pr_info("ITS@%pa: Using GICv4.1 mode %08x %08x\n", | |
4521 | &res->start, its->mpidr, svpet); | |
4522 | } | |
3dfa576b MZ |
4523 | } |
4524 | ||
db40f0a7 | 4525 | its->numa_node = numa_node; |
4c21f3c2 | 4526 | |
539d3782 SD |
4527 | page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, |
4528 | get_order(ITS_CMD_QUEUE_SZ)); | |
4529 | if (!page) { | |
4c21f3c2 | 4530 | err = -ENOMEM; |
5e46a484 | 4531 | goto out_unmap_sgir; |
4c21f3c2 | 4532 | } |
539d3782 | 4533 | its->cmd_base = (void *)page_address(page); |
4c21f3c2 | 4534 | its->cmd_write = its->cmd_base; |
558b0165 AB |
4535 | its->fwnode_handle = handle; |
4536 | its->get_msi_base = its_irq_get_msi_base; | |
4537 | its->msi_domain_flags = IRQ_DOMAIN_FLAG_MSI_REMAP; | |
4c21f3c2 | 4538 | |
67510cca RR |
4539 | its_enable_quirks(its); |
4540 | ||
0e0b0f69 | 4541 | err = its_alloc_tables(its); |
4c21f3c2 MZ |
4542 | if (err) |
4543 | goto out_free_cmd; | |
4544 | ||
4545 | err = its_alloc_collections(its); | |
4546 | if (err) | |
4547 | goto out_free_tables; | |
4548 | ||
4549 | baser = (virt_to_phys(its->cmd_base) | | |
2fd632a0 | 4550 | GITS_CBASER_RaWaWb | |
4c21f3c2 MZ |
4551 | GITS_CBASER_InnerShareable | |
4552 | (ITS_CMD_QUEUE_SZ / SZ_4K - 1) | | |
4553 | GITS_CBASER_VALID); | |
4554 | ||
0968a619 VM |
4555 | gits_write_cbaser(baser, its->base + GITS_CBASER); |
4556 | tmp = gits_read_cbaser(its->base + GITS_CBASER); | |
4c21f3c2 | 4557 | |
4ad3e363 | 4558 | if ((tmp ^ baser) & GITS_CBASER_SHAREABILITY_MASK) { |
241a386c MZ |
4559 | if (!(tmp & GITS_CBASER_SHAREABILITY_MASK)) { |
4560 | /* | |
4561 | * The HW reports non-shareable, we must | |
4562 | * remove the cacheability attributes as | |
4563 | * well. | |
4564 | */ | |
4565 | baser &= ~(GITS_CBASER_SHAREABILITY_MASK | | |
4566 | GITS_CBASER_CACHEABILITY_MASK); | |
4567 | baser |= GITS_CBASER_nC; | |
0968a619 | 4568 | gits_write_cbaser(baser, its->base + GITS_CBASER); |
241a386c | 4569 | } |
4c21f3c2 MZ |
4570 | pr_info("ITS: using cache flushing for cmd queue\n"); |
4571 | its->flags |= ITS_FLAGS_CMDQ_NEEDS_FLUSHING; | |
4572 | } | |
4573 | ||
0968a619 | 4574 | gits_write_cwriter(0, its->base + GITS_CWRITER); |
3dfa576b | 4575 | ctlr = readl_relaxed(its->base + GITS_CTLR); |
d51c4b4d | 4576 | ctlr |= GITS_CTLR_ENABLE; |
0dd57fed | 4577 | if (is_v4(its)) |
d51c4b4d MZ |
4578 | ctlr |= GITS_CTLR_ImDe; |
4579 | writel_relaxed(ctlr, its->base + GITS_CTLR); | |
241a386c | 4580 | |
dba0bc7b DB |
4581 | if (GITS_TYPER_HCC(typer)) |
4582 | its->flags |= ITS_FLAGS_SAVE_SUSPEND_STATE; | |
4583 | ||
db40f0a7 | 4584 | err = its_init_domain(handle, its); |
d14ae5e6 TN |
4585 | if (err) |
4586 | goto out_free_tables; | |
4c21f3c2 | 4587 | |
a8db7456 | 4588 | raw_spin_lock(&its_lock); |
4c21f3c2 | 4589 | list_add(&its->entry, &its_nodes); |
a8db7456 | 4590 | raw_spin_unlock(&its_lock); |
4c21f3c2 MZ |
4591 | |
4592 | return 0; | |
4593 | ||
4c21f3c2 MZ |
4594 | out_free_tables: |
4595 | its_free_tables(its); | |
4596 | out_free_cmd: | |
5bc13c2c | 4597 | free_pages((unsigned long)its->cmd_base, get_order(ITS_CMD_QUEUE_SZ)); |
5e46a484 MZ |
4598 | out_unmap_sgir: |
4599 | if (its->sgir_base) | |
4600 | iounmap(its->sgir_base); | |
4c21f3c2 MZ |
4601 | out_free_its: |
4602 | kfree(its); | |
4603 | out_unmap: | |
4604 | iounmap(its_base); | |
db40f0a7 | 4605 | pr_err("ITS@%pa: failed probing (%d)\n", &res->start, err); |
4c21f3c2 MZ |
4606 | return err; |
4607 | } | |
4608 | ||
4609 | static bool gic_rdists_supports_plpis(void) | |
4610 | { | |
589ce5f4 | 4611 | return !!(gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER) & GICR_TYPER_PLPIS); |
4c21f3c2 MZ |
4612 | } |
4613 | ||
6eb486b6 SD |
4614 | static int redist_disable_lpis(void) |
4615 | { | |
4616 | void __iomem *rbase = gic_data_rdist_rd_base(); | |
4617 | u64 timeout = USEC_PER_SEC; | |
4618 | u64 val; | |
4619 | ||
4620 | if (!gic_rdists_supports_plpis()) { | |
4621 | pr_info("CPU%d: LPIs not supported\n", smp_processor_id()); | |
4622 | return -ENXIO; | |
4623 | } | |
4624 | ||
4625 | val = readl_relaxed(rbase + GICR_CTLR); | |
4626 | if (!(val & GICR_CTLR_ENABLE_LPIS)) | |
4627 | return 0; | |
4628 | ||
11e37d35 MZ |
4629 | /* |
4630 | * If coming via a CPU hotplug event, we don't need to disable | |
4631 | * LPIs before trying to re-enable them. They are already | |
4632 | * configured and all is well in the world. | |
c440a9d9 MZ |
4633 | * |
4634 | * If running with preallocated tables, there is nothing to do. | |
11e37d35 | 4635 | */ |
c440a9d9 MZ |
4636 | if (gic_data_rdist()->lpi_enabled || |
4637 | (gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED)) | |
11e37d35 MZ |
4638 | return 0; |
4639 | ||
4640 | /* | |
4641 | * From that point on, we only try to do some damage control. | |
4642 | */ | |
4643 | pr_warn("GICv3: CPU%d: Booted with LPIs enabled, memory probably corrupted\n", | |
6eb486b6 SD |
4644 | smp_processor_id()); |
4645 | add_taint(TAINT_CRAP, LOCKDEP_STILL_OK); | |
4646 | ||
4647 | /* Disable LPIs */ | |
4648 | val &= ~GICR_CTLR_ENABLE_LPIS; | |
4649 | writel_relaxed(val, rbase + GICR_CTLR); | |
4650 | ||
4651 | /* Make sure any change to GICR_CTLR is observable by the GIC */ | |
4652 | dsb(sy); | |
4653 | ||
4654 | /* | |
4655 | * Software must observe RWP==0 after clearing GICR_CTLR.EnableLPIs | |
4656 | * from 1 to 0 before programming GICR_PEND{PROP}BASER registers. | |
4657 | * Error out if we time out waiting for RWP to clear. | |
4658 | */ | |
4659 | while (readl_relaxed(rbase + GICR_CTLR) & GICR_CTLR_RWP) { | |
4660 | if (!timeout) { | |
4661 | pr_err("CPU%d: Timeout while disabling LPIs\n", | |
4662 | smp_processor_id()); | |
4663 | return -ETIMEDOUT; | |
4664 | } | |
4665 | udelay(1); | |
4666 | timeout--; | |
4667 | } | |
4668 | ||
4669 | /* | |
4670 | * After it has been written to 1, it is IMPLEMENTATION | |
4671 | * DEFINED whether GICR_CTLR.EnableLPI becomes RES1 or can be | |
4672 | * cleared to 0. Error out if clearing the bit failed. | |
4673 | */ | |
4674 | if (readl_relaxed(rbase + GICR_CTLR) & GICR_CTLR_ENABLE_LPIS) { | |
4675 | pr_err("CPU%d: Failed to disable LPIs\n", smp_processor_id()); | |
4676 | return -EBUSY; | |
4677 | } | |
4678 | ||
4679 | return 0; | |
4680 | } | |
4681 | ||
4c21f3c2 MZ |
4682 | int its_cpu_init(void) |
4683 | { | |
4c21f3c2 | 4684 | if (!list_empty(&its_nodes)) { |
6eb486b6 SD |
4685 | int ret; |
4686 | ||
4687 | ret = redist_disable_lpis(); | |
4688 | if (ret) | |
4689 | return ret; | |
4690 | ||
4c21f3c2 | 4691 | its_cpu_init_lpis(); |
920181ce | 4692 | its_cpu_init_collections(); |
4c21f3c2 MZ |
4693 | } |
4694 | ||
4695 | return 0; | |
4696 | } | |
4697 | ||
935bba7c | 4698 | static const struct of_device_id its_device_id[] = { |
4c21f3c2 MZ |
4699 | { .compatible = "arm,gic-v3-its", }, |
4700 | {}, | |
4701 | }; | |
4702 | ||
db40f0a7 | 4703 | static int __init its_of_probe(struct device_node *node) |
4c21f3c2 MZ |
4704 | { |
4705 | struct device_node *np; | |
db40f0a7 | 4706 | struct resource res; |
4c21f3c2 MZ |
4707 | |
4708 | for (np = of_find_matching_node(node, its_device_id); np; | |
4709 | np = of_find_matching_node(np, its_device_id)) { | |
95a25625 SB |
4710 | if (!of_device_is_available(np)) |
4711 | continue; | |
d14ae5e6 | 4712 | if (!of_property_read_bool(np, "msi-controller")) { |
e81f54c6 RH |
4713 | pr_warn("%pOF: no msi-controller property, ITS ignored\n", |
4714 | np); | |
d14ae5e6 TN |
4715 | continue; |
4716 | } | |
4717 | ||
db40f0a7 | 4718 | if (of_address_to_resource(np, 0, &res)) { |
e81f54c6 | 4719 | pr_warn("%pOF: no regs?\n", np); |
db40f0a7 TN |
4720 | continue; |
4721 | } | |
4722 | ||
4723 | its_probe_one(&res, &np->fwnode, of_node_to_nid(np)); | |
4c21f3c2 | 4724 | } |
db40f0a7 TN |
4725 | return 0; |
4726 | } | |
4727 | ||
3f010cf1 TN |
4728 | #ifdef CONFIG_ACPI |
4729 | ||
4730 | #define ACPI_GICV3_ITS_MEM_SIZE (SZ_128K) | |
4731 | ||
d1ce263f | 4732 | #ifdef CONFIG_ACPI_NUMA |
dbd2b826 GK |
4733 | struct its_srat_map { |
4734 | /* numa node id */ | |
4735 | u32 numa_node; | |
4736 | /* GIC ITS ID */ | |
4737 | u32 its_id; | |
4738 | }; | |
4739 | ||
fdf6e7a8 | 4740 | static struct its_srat_map *its_srat_maps __initdata; |
dbd2b826 GK |
4741 | static int its_in_srat __initdata; |
4742 | ||
4743 | static int __init acpi_get_its_numa_node(u32 its_id) | |
4744 | { | |
4745 | int i; | |
4746 | ||
4747 | for (i = 0; i < its_in_srat; i++) { | |
4748 | if (its_id == its_srat_maps[i].its_id) | |
4749 | return its_srat_maps[i].numa_node; | |
4750 | } | |
4751 | return NUMA_NO_NODE; | |
4752 | } | |
4753 | ||
60574d1e | 4754 | static int __init gic_acpi_match_srat_its(union acpi_subtable_headers *header, |
fdf6e7a8 HG |
4755 | const unsigned long end) |
4756 | { | |
4757 | return 0; | |
4758 | } | |
4759 | ||
60574d1e | 4760 | static int __init gic_acpi_parse_srat_its(union acpi_subtable_headers *header, |
dbd2b826 GK |
4761 | const unsigned long end) |
4762 | { | |
4763 | int node; | |
4764 | struct acpi_srat_gic_its_affinity *its_affinity; | |
4765 | ||
4766 | its_affinity = (struct acpi_srat_gic_its_affinity *)header; | |
4767 | if (!its_affinity) | |
4768 | return -EINVAL; | |
4769 | ||
4770 | if (its_affinity->header.length < sizeof(*its_affinity)) { | |
4771 | pr_err("SRAT: Invalid header length %d in ITS affinity\n", | |
4772 | its_affinity->header.length); | |
4773 | return -EINVAL; | |
4774 | } | |
4775 | ||
dbd2b826 GK |
4776 | node = acpi_map_pxm_to_node(its_affinity->proximity_domain); |
4777 | ||
4778 | if (node == NUMA_NO_NODE || node >= MAX_NUMNODES) { | |
4779 | pr_err("SRAT: Invalid NUMA node %d in ITS affinity\n", node); | |
4780 | return 0; | |
4781 | } | |
4782 | ||
4783 | its_srat_maps[its_in_srat].numa_node = node; | |
4784 | its_srat_maps[its_in_srat].its_id = its_affinity->its_id; | |
4785 | its_in_srat++; | |
4786 | pr_info("SRAT: PXM %d -> ITS %d -> Node %d\n", | |
4787 | its_affinity->proximity_domain, its_affinity->its_id, node); | |
4788 | ||
4789 | return 0; | |
4790 | } | |
4791 | ||
4792 | static void __init acpi_table_parse_srat_its(void) | |
4793 | { | |
fdf6e7a8 HG |
4794 | int count; |
4795 | ||
4796 | count = acpi_table_parse_entries(ACPI_SIG_SRAT, | |
4797 | sizeof(struct acpi_table_srat), | |
4798 | ACPI_SRAT_TYPE_GIC_ITS_AFFINITY, | |
4799 | gic_acpi_match_srat_its, 0); | |
4800 | if (count <= 0) | |
4801 | return; | |
4802 | ||
6da2ec56 KC |
4803 | its_srat_maps = kmalloc_array(count, sizeof(struct its_srat_map), |
4804 | GFP_KERNEL); | |
fdf6e7a8 HG |
4805 | if (!its_srat_maps) { |
4806 | pr_warn("SRAT: Failed to allocate memory for its_srat_maps!\n"); | |
4807 | return; | |
4808 | } | |
4809 | ||
dbd2b826 GK |
4810 | acpi_table_parse_entries(ACPI_SIG_SRAT, |
4811 | sizeof(struct acpi_table_srat), | |
4812 | ACPI_SRAT_TYPE_GIC_ITS_AFFINITY, | |
4813 | gic_acpi_parse_srat_its, 0); | |
4814 | } | |
fdf6e7a8 HG |
4815 | |
4816 | /* free the its_srat_maps after ITS probing */ | |
4817 | static void __init acpi_its_srat_maps_free(void) | |
4818 | { | |
4819 | kfree(its_srat_maps); | |
4820 | } | |
dbd2b826 GK |
4821 | #else |
4822 | static void __init acpi_table_parse_srat_its(void) { } | |
4823 | static int __init acpi_get_its_numa_node(u32 its_id) { return NUMA_NO_NODE; } | |
fdf6e7a8 | 4824 | static void __init acpi_its_srat_maps_free(void) { } |
dbd2b826 GK |
4825 | #endif |
4826 | ||
60574d1e | 4827 | static int __init gic_acpi_parse_madt_its(union acpi_subtable_headers *header, |
3f010cf1 TN |
4828 | const unsigned long end) |
4829 | { | |
4830 | struct acpi_madt_generic_translator *its_entry; | |
4831 | struct fwnode_handle *dom_handle; | |
4832 | struct resource res; | |
4833 | int err; | |
4834 | ||
4835 | its_entry = (struct acpi_madt_generic_translator *)header; | |
4836 | memset(&res, 0, sizeof(res)); | |
4837 | res.start = its_entry->base_address; | |
4838 | res.end = its_entry->base_address + ACPI_GICV3_ITS_MEM_SIZE - 1; | |
4839 | res.flags = IORESOURCE_MEM; | |
4840 | ||
5778cc77 | 4841 | dom_handle = irq_domain_alloc_fwnode(&res.start); |
3f010cf1 TN |
4842 | if (!dom_handle) { |
4843 | pr_err("ITS@%pa: Unable to allocate GICv3 ITS domain token\n", | |
4844 | &res.start); | |
4845 | return -ENOMEM; | |
4846 | } | |
4847 | ||
8b4282e6 SK |
4848 | err = iort_register_domain_token(its_entry->translation_id, res.start, |
4849 | dom_handle); | |
3f010cf1 TN |
4850 | if (err) { |
4851 | pr_err("ITS@%pa: Unable to register GICv3 ITS domain token (ITS ID %d) to IORT\n", | |
4852 | &res.start, its_entry->translation_id); | |
4853 | goto dom_err; | |
4854 | } | |
4855 | ||
dbd2b826 GK |
4856 | err = its_probe_one(&res, dom_handle, |
4857 | acpi_get_its_numa_node(its_entry->translation_id)); | |
3f010cf1 TN |
4858 | if (!err) |
4859 | return 0; | |
4860 | ||
4861 | iort_deregister_domain_token(its_entry->translation_id); | |
4862 | dom_err: | |
4863 | irq_domain_free_fwnode(dom_handle); | |
4864 | return err; | |
4865 | } | |
4866 | ||
4867 | static void __init its_acpi_probe(void) | |
4868 | { | |
dbd2b826 | 4869 | acpi_table_parse_srat_its(); |
3f010cf1 TN |
4870 | acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR, |
4871 | gic_acpi_parse_madt_its, 0); | |
fdf6e7a8 | 4872 | acpi_its_srat_maps_free(); |
3f010cf1 TN |
4873 | } |
4874 | #else | |
4875 | static void __init its_acpi_probe(void) { } | |
4876 | #endif | |
4877 | ||
db40f0a7 TN |
4878 | int __init its_init(struct fwnode_handle *handle, struct rdists *rdists, |
4879 | struct irq_domain *parent_domain) | |
4880 | { | |
4881 | struct device_node *of_node; | |
8fff27ae MZ |
4882 | struct its_node *its; |
4883 | bool has_v4 = false; | |
3c40706d | 4884 | bool has_v4_1 = false; |
8fff27ae | 4885 | int err; |
db40f0a7 | 4886 | |
5e516846 MZ |
4887 | gic_rdists = rdists; |
4888 | ||
db40f0a7 TN |
4889 | its_parent = parent_domain; |
4890 | of_node = to_of_node(handle); | |
4891 | if (of_node) | |
4892 | its_of_probe(of_node); | |
4893 | else | |
3f010cf1 | 4894 | its_acpi_probe(); |
4c21f3c2 MZ |
4895 | |
4896 | if (list_empty(&its_nodes)) { | |
4897 | pr_warn("ITS: No ITS available, not enabling LPIs\n"); | |
4898 | return -ENXIO; | |
4899 | } | |
4900 | ||
11e37d35 | 4901 | err = allocate_lpi_tables(); |
8fff27ae MZ |
4902 | if (err) |
4903 | return err; | |
4904 | ||
3c40706d | 4905 | list_for_each_entry(its, &its_nodes, entry) { |
0dd57fed | 4906 | has_v4 |= is_v4(its); |
3c40706d MZ |
4907 | has_v4_1 |= is_v4_1(its); |
4908 | } | |
4909 | ||
4910 | /* Don't bother with inconsistent systems */ | |
4911 | if (WARN_ON(!has_v4_1 && rdists->has_rvpeid)) | |
4912 | rdists->has_rvpeid = false; | |
8fff27ae MZ |
4913 | |
4914 | if (has_v4 & rdists->has_vlpis) { | |
3d63cb53 MZ |
4915 | if (its_init_vpe_domain() || |
4916 | its_init_v4(parent_domain, &its_vpe_domain_ops)) { | |
8fff27ae MZ |
4917 | rdists->has_vlpis = false; |
4918 | pr_err("ITS: Disabling GICv4 support\n"); | |
4919 | } | |
4920 | } | |
4921 | ||
dba0bc7b DB |
4922 | register_syscore_ops(&its_syscore_ops); |
4923 | ||
8fff27ae | 4924 | return 0; |
4c21f3c2 | 4925 | } |