Commit | Line | Data |
---|---|---|
cc2d3216 | 1 | /* |
d7276b80 | 2 | * Copyright (C) 2013-2017 ARM Limited, All Rights Reserved. |
cc2d3216 MZ |
3 | * Author: Marc Zyngier <marc.zyngier@arm.com> |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify | |
6 | * it under the terms of the GNU General Public License version 2 as | |
7 | * published by the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License | |
15 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
16 | */ | |
17 | ||
3f010cf1 | 18 | #include <linux/acpi.h> |
8d3554b8 | 19 | #include <linux/acpi_iort.h> |
cc2d3216 MZ |
20 | #include <linux/bitmap.h> |
21 | #include <linux/cpu.h> | |
22 | #include <linux/delay.h> | |
44bb7e24 | 23 | #include <linux/dma-iommu.h> |
cc2d3216 | 24 | #include <linux/interrupt.h> |
3f010cf1 | 25 | #include <linux/irqdomain.h> |
880cb3cd MZ |
26 | #include <linux/list.h> |
27 | #include <linux/list_sort.h> | |
cc2d3216 MZ |
28 | #include <linux/log2.h> |
29 | #include <linux/mm.h> | |
30 | #include <linux/msi.h> | |
31 | #include <linux/of.h> | |
32 | #include <linux/of_address.h> | |
33 | #include <linux/of_irq.h> | |
34 | #include <linux/of_pci.h> | |
35 | #include <linux/of_platform.h> | |
36 | #include <linux/percpu.h> | |
37 | #include <linux/slab.h> | |
dba0bc7b | 38 | #include <linux/syscore_ops.h> |
cc2d3216 | 39 | |
41a83e06 | 40 | #include <linux/irqchip.h> |
cc2d3216 | 41 | #include <linux/irqchip/arm-gic-v3.h> |
c808eea8 | 42 | #include <linux/irqchip/arm-gic-v4.h> |
cc2d3216 | 43 | |
cc2d3216 MZ |
44 | #include <asm/cputype.h> |
45 | #include <asm/exception.h> | |
46 | ||
67510cca RR |
47 | #include "irq-gic-common.h" |
48 | ||
94100970 RR |
49 | #define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1ULL << 0) |
50 | #define ITS_FLAGS_WORKAROUND_CAVIUM_22375 (1ULL << 1) | |
fbf8f40e | 51 | #define ITS_FLAGS_WORKAROUND_CAVIUM_23144 (1ULL << 2) |
dba0bc7b | 52 | #define ITS_FLAGS_SAVE_SUSPEND_STATE (1ULL << 3) |
cc2d3216 | 53 | |
c48ed51c MZ |
54 | #define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0) |
55 | ||
a13b0404 MZ |
56 | static u32 lpi_id_bits; |
57 | ||
58 | /* | |
59 | * We allocate memory for PROPBASE to cover 2 ^ lpi_id_bits LPIs to | |
60 | * deal with (one configuration byte per interrupt). PENDBASE has to | |
61 | * be 64kB aligned (one bit per LPI, plus 8192 bits for SPI/PPI/SGI). | |
62 | */ | |
63 | #define LPI_NRBITS lpi_id_bits | |
64 | #define LPI_PROPBASE_SZ ALIGN(BIT(LPI_NRBITS), SZ_64K) | |
65 | #define LPI_PENDBASE_SZ ALIGN(BIT(LPI_NRBITS) / 8, SZ_64K) | |
66 | ||
67 | #define LPI_PROP_DEFAULT_PRIO 0xa0 | |
68 | ||
cc2d3216 MZ |
69 | /* |
70 | * Collection structure - just an ID, and a redistributor address to | |
71 | * ping. We use one per CPU as a bag of interrupts assigned to this | |
72 | * CPU. | |
73 | */ | |
74 | struct its_collection { | |
75 | u64 target_address; | |
76 | u16 col_id; | |
77 | }; | |
78 | ||
466b7d16 | 79 | /* |
9347359a SD |
80 | * The ITS_BASER structure - contains memory information, cached |
81 | * value of BASER register configuration and ITS page size. | |
466b7d16 SD |
82 | */ |
83 | struct its_baser { | |
84 | void *base; | |
85 | u64 val; | |
86 | u32 order; | |
9347359a | 87 | u32 psz; |
466b7d16 SD |
88 | }; |
89 | ||
558b0165 AB |
90 | struct its_device; |
91 | ||
cc2d3216 MZ |
92 | /* |
93 | * The ITS structure - contains most of the infrastructure, with the | |
841514ab MZ |
94 | * top-level MSI domain, the command queue, the collections, and the |
95 | * list of devices writing to it. | |
cc2d3216 MZ |
96 | */ |
97 | struct its_node { | |
98 | raw_spinlock_t lock; | |
99 | struct list_head entry; | |
cc2d3216 | 100 | void __iomem *base; |
db40f0a7 | 101 | phys_addr_t phys_base; |
cc2d3216 MZ |
102 | struct its_cmd_block *cmd_base; |
103 | struct its_cmd_block *cmd_write; | |
466b7d16 | 104 | struct its_baser tables[GITS_BASER_NR_REGS]; |
cc2d3216 | 105 | struct its_collection *collections; |
558b0165 AB |
106 | struct fwnode_handle *fwnode_handle; |
107 | u64 (*get_msi_base)(struct its_device *its_dev); | |
dba0bc7b DB |
108 | u64 cbaser_save; |
109 | u32 ctlr_save; | |
cc2d3216 MZ |
110 | struct list_head its_device_list; |
111 | u64 flags; | |
debf6d02 | 112 | unsigned long list_nr; |
cc2d3216 | 113 | u32 ite_size; |
466b7d16 | 114 | u32 device_ids; |
fbf8f40e | 115 | int numa_node; |
558b0165 AB |
116 | unsigned int msi_domain_flags; |
117 | u32 pre_its_base; /* for Socionext Synquacer */ | |
3dfa576b | 118 | bool is_v4; |
5c9a882e | 119 | int vlpi_redist_offset; |
cc2d3216 MZ |
120 | }; |
121 | ||
122 | #define ITS_ITT_ALIGN SZ_256 | |
123 | ||
32bd44dc SD |
124 | /* The maximum number of VPEID bits supported by VLPI commands */ |
125 | #define ITS_MAX_VPEID_BITS (16) | |
126 | #define ITS_MAX_VPEID (1 << (ITS_MAX_VPEID_BITS)) | |
127 | ||
2eca0d6c SD |
128 | /* Convert page order to size in bytes */ |
129 | #define PAGE_ORDER_TO_SIZE(o) (PAGE_SIZE << (o)) | |
130 | ||
591e5bec MZ |
131 | struct event_lpi_map { |
132 | unsigned long *lpi_map; | |
133 | u16 *col_map; | |
134 | irq_hw_number_t lpi_base; | |
135 | int nr_lpis; | |
d011e4e6 MZ |
136 | struct mutex vlpi_lock; |
137 | struct its_vm *vm; | |
138 | struct its_vlpi_map *vlpi_maps; | |
139 | int nr_vlpis; | |
591e5bec MZ |
140 | }; |
141 | ||
cc2d3216 | 142 | /* |
d011e4e6 MZ |
143 | * The ITS view of a device - belongs to an ITS, owns an interrupt |
144 | * translation table, and a list of interrupts. If it some of its | |
145 | * LPIs are injected into a guest (GICv4), the event_map.vm field | |
146 | * indicates which one. | |
cc2d3216 MZ |
147 | */ |
148 | struct its_device { | |
149 | struct list_head entry; | |
150 | struct its_node *its; | |
591e5bec | 151 | struct event_lpi_map event_map; |
cc2d3216 | 152 | void *itt; |
cc2d3216 MZ |
153 | u32 nr_ites; |
154 | u32 device_id; | |
155 | }; | |
156 | ||
20b3d54e MZ |
157 | static struct { |
158 | raw_spinlock_t lock; | |
159 | struct its_device *dev; | |
160 | struct its_vpe **vpes; | |
161 | int next_victim; | |
162 | } vpe_proxy; | |
163 | ||
1ac19ca6 MZ |
164 | static LIST_HEAD(its_nodes); |
165 | static DEFINE_SPINLOCK(its_lock); | |
1ac19ca6 | 166 | static struct rdists *gic_rdists; |
db40f0a7 | 167 | static struct irq_domain *its_parent; |
1ac19ca6 | 168 | |
3dfa576b | 169 | static unsigned long its_list_map; |
3171a47a MZ |
170 | static u16 vmovp_seq_num; |
171 | static DEFINE_RAW_SPINLOCK(vmovp_lock); | |
172 | ||
7d75bbb4 | 173 | static DEFINE_IDA(its_vpeid_ida); |
3dfa576b | 174 | |
1ac19ca6 MZ |
175 | #define gic_data_rdist() (raw_cpu_ptr(gic_rdists->rdist)) |
176 | #define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base) | |
e643d803 | 177 | #define gic_data_rdist_vlpi_base() (gic_data_rdist_rd_base() + SZ_128K) |
1ac19ca6 | 178 | |
591e5bec MZ |
179 | static struct its_collection *dev_event_to_col(struct its_device *its_dev, |
180 | u32 event) | |
181 | { | |
182 | struct its_node *its = its_dev->its; | |
183 | ||
184 | return its->collections + its_dev->event_map.col_map[event]; | |
185 | } | |
186 | ||
83559b47 MZ |
187 | static struct its_collection *valid_col(struct its_collection *col) |
188 | { | |
189 | if (WARN_ON_ONCE(col->target_address & GENMASK_ULL(0, 15))) | |
190 | return NULL; | |
191 | ||
192 | return col; | |
193 | } | |
194 | ||
205e065d MZ |
195 | static struct its_vpe *valid_vpe(struct its_node *its, struct its_vpe *vpe) |
196 | { | |
197 | if (valid_col(its->collections + vpe->col_idx)) | |
198 | return vpe; | |
199 | ||
200 | return NULL; | |
201 | } | |
202 | ||
cc2d3216 MZ |
203 | /* |
204 | * ITS command descriptors - parameters to be encoded in a command | |
205 | * block. | |
206 | */ | |
207 | struct its_cmd_desc { | |
208 | union { | |
209 | struct { | |
210 | struct its_device *dev; | |
211 | u32 event_id; | |
212 | } its_inv_cmd; | |
213 | ||
8d85dced MZ |
214 | struct { |
215 | struct its_device *dev; | |
216 | u32 event_id; | |
217 | } its_clear_cmd; | |
218 | ||
cc2d3216 MZ |
219 | struct { |
220 | struct its_device *dev; | |
221 | u32 event_id; | |
222 | } its_int_cmd; | |
223 | ||
224 | struct { | |
225 | struct its_device *dev; | |
226 | int valid; | |
227 | } its_mapd_cmd; | |
228 | ||
229 | struct { | |
230 | struct its_collection *col; | |
231 | int valid; | |
232 | } its_mapc_cmd; | |
233 | ||
234 | struct { | |
235 | struct its_device *dev; | |
236 | u32 phys_id; | |
237 | u32 event_id; | |
6a25ad3a | 238 | } its_mapti_cmd; |
cc2d3216 MZ |
239 | |
240 | struct { | |
241 | struct its_device *dev; | |
242 | struct its_collection *col; | |
591e5bec | 243 | u32 event_id; |
cc2d3216 MZ |
244 | } its_movi_cmd; |
245 | ||
246 | struct { | |
247 | struct its_device *dev; | |
248 | u32 event_id; | |
249 | } its_discard_cmd; | |
250 | ||
251 | struct { | |
252 | struct its_collection *col; | |
253 | } its_invall_cmd; | |
d011e4e6 | 254 | |
eb78192b MZ |
255 | struct { |
256 | struct its_vpe *vpe; | |
257 | } its_vinvall_cmd; | |
258 | ||
259 | struct { | |
260 | struct its_vpe *vpe; | |
261 | struct its_collection *col; | |
262 | bool valid; | |
263 | } its_vmapp_cmd; | |
264 | ||
d011e4e6 MZ |
265 | struct { |
266 | struct its_vpe *vpe; | |
267 | struct its_device *dev; | |
268 | u32 virt_id; | |
269 | u32 event_id; | |
270 | bool db_enabled; | |
271 | } its_vmapti_cmd; | |
272 | ||
273 | struct { | |
274 | struct its_vpe *vpe; | |
275 | struct its_device *dev; | |
276 | u32 event_id; | |
277 | bool db_enabled; | |
278 | } its_vmovi_cmd; | |
3171a47a MZ |
279 | |
280 | struct { | |
281 | struct its_vpe *vpe; | |
282 | struct its_collection *col; | |
283 | u16 seq_num; | |
284 | u16 its_list; | |
285 | } its_vmovp_cmd; | |
cc2d3216 MZ |
286 | }; |
287 | }; | |
288 | ||
289 | /* | |
290 | * The ITS command block, which is what the ITS actually parses. | |
291 | */ | |
292 | struct its_cmd_block { | |
293 | u64 raw_cmd[4]; | |
294 | }; | |
295 | ||
296 | #define ITS_CMD_QUEUE_SZ SZ_64K | |
297 | #define ITS_CMD_QUEUE_NR_ENTRIES (ITS_CMD_QUEUE_SZ / sizeof(struct its_cmd_block)) | |
298 | ||
67047f90 MZ |
299 | typedef struct its_collection *(*its_cmd_builder_t)(struct its_node *, |
300 | struct its_cmd_block *, | |
cc2d3216 MZ |
301 | struct its_cmd_desc *); |
302 | ||
67047f90 MZ |
303 | typedef struct its_vpe *(*its_cmd_vbuilder_t)(struct its_node *, |
304 | struct its_cmd_block *, | |
d011e4e6 MZ |
305 | struct its_cmd_desc *); |
306 | ||
4d36f136 MZ |
307 | static void its_mask_encode(u64 *raw_cmd, u64 val, int h, int l) |
308 | { | |
309 | u64 mask = GENMASK_ULL(h, l); | |
310 | *raw_cmd &= ~mask; | |
311 | *raw_cmd |= (val << l) & mask; | |
312 | } | |
313 | ||
cc2d3216 MZ |
314 | static void its_encode_cmd(struct its_cmd_block *cmd, u8 cmd_nr) |
315 | { | |
4d36f136 | 316 | its_mask_encode(&cmd->raw_cmd[0], cmd_nr, 7, 0); |
cc2d3216 MZ |
317 | } |
318 | ||
319 | static void its_encode_devid(struct its_cmd_block *cmd, u32 devid) | |
320 | { | |
4d36f136 | 321 | its_mask_encode(&cmd->raw_cmd[0], devid, 63, 32); |
cc2d3216 MZ |
322 | } |
323 | ||
324 | static void its_encode_event_id(struct its_cmd_block *cmd, u32 id) | |
325 | { | |
4d36f136 | 326 | its_mask_encode(&cmd->raw_cmd[1], id, 31, 0); |
cc2d3216 MZ |
327 | } |
328 | ||
329 | static void its_encode_phys_id(struct its_cmd_block *cmd, u32 phys_id) | |
330 | { | |
4d36f136 | 331 | its_mask_encode(&cmd->raw_cmd[1], phys_id, 63, 32); |
cc2d3216 MZ |
332 | } |
333 | ||
334 | static void its_encode_size(struct its_cmd_block *cmd, u8 size) | |
335 | { | |
4d36f136 | 336 | its_mask_encode(&cmd->raw_cmd[1], size, 4, 0); |
cc2d3216 MZ |
337 | } |
338 | ||
339 | static void its_encode_itt(struct its_cmd_block *cmd, u64 itt_addr) | |
340 | { | |
30ae9610 | 341 | its_mask_encode(&cmd->raw_cmd[2], itt_addr >> 8, 51, 8); |
cc2d3216 MZ |
342 | } |
343 | ||
344 | static void its_encode_valid(struct its_cmd_block *cmd, int valid) | |
345 | { | |
4d36f136 | 346 | its_mask_encode(&cmd->raw_cmd[2], !!valid, 63, 63); |
cc2d3216 MZ |
347 | } |
348 | ||
349 | static void its_encode_target(struct its_cmd_block *cmd, u64 target_addr) | |
350 | { | |
30ae9610 | 351 | its_mask_encode(&cmd->raw_cmd[2], target_addr >> 16, 51, 16); |
cc2d3216 MZ |
352 | } |
353 | ||
354 | static void its_encode_collection(struct its_cmd_block *cmd, u16 col) | |
355 | { | |
4d36f136 | 356 | its_mask_encode(&cmd->raw_cmd[2], col, 15, 0); |
cc2d3216 MZ |
357 | } |
358 | ||
d011e4e6 MZ |
359 | static void its_encode_vpeid(struct its_cmd_block *cmd, u16 vpeid) |
360 | { | |
361 | its_mask_encode(&cmd->raw_cmd[1], vpeid, 47, 32); | |
362 | } | |
363 | ||
364 | static void its_encode_virt_id(struct its_cmd_block *cmd, u32 virt_id) | |
365 | { | |
366 | its_mask_encode(&cmd->raw_cmd[2], virt_id, 31, 0); | |
367 | } | |
368 | ||
369 | static void its_encode_db_phys_id(struct its_cmd_block *cmd, u32 db_phys_id) | |
370 | { | |
371 | its_mask_encode(&cmd->raw_cmd[2], db_phys_id, 63, 32); | |
372 | } | |
373 | ||
374 | static void its_encode_db_valid(struct its_cmd_block *cmd, bool db_valid) | |
375 | { | |
376 | its_mask_encode(&cmd->raw_cmd[2], db_valid, 0, 0); | |
377 | } | |
378 | ||
3171a47a MZ |
379 | static void its_encode_seq_num(struct its_cmd_block *cmd, u16 seq_num) |
380 | { | |
381 | its_mask_encode(&cmd->raw_cmd[0], seq_num, 47, 32); | |
382 | } | |
383 | ||
384 | static void its_encode_its_list(struct its_cmd_block *cmd, u16 its_list) | |
385 | { | |
386 | its_mask_encode(&cmd->raw_cmd[1], its_list, 15, 0); | |
387 | } | |
388 | ||
eb78192b MZ |
389 | static void its_encode_vpt_addr(struct its_cmd_block *cmd, u64 vpt_pa) |
390 | { | |
30ae9610 | 391 | its_mask_encode(&cmd->raw_cmd[3], vpt_pa >> 16, 51, 16); |
eb78192b MZ |
392 | } |
393 | ||
394 | static void its_encode_vpt_size(struct its_cmd_block *cmd, u8 vpt_size) | |
395 | { | |
396 | its_mask_encode(&cmd->raw_cmd[3], vpt_size, 4, 0); | |
397 | } | |
398 | ||
cc2d3216 MZ |
399 | static inline void its_fixup_cmd(struct its_cmd_block *cmd) |
400 | { | |
401 | /* Let's fixup BE commands */ | |
402 | cmd->raw_cmd[0] = cpu_to_le64(cmd->raw_cmd[0]); | |
403 | cmd->raw_cmd[1] = cpu_to_le64(cmd->raw_cmd[1]); | |
404 | cmd->raw_cmd[2] = cpu_to_le64(cmd->raw_cmd[2]); | |
405 | cmd->raw_cmd[3] = cpu_to_le64(cmd->raw_cmd[3]); | |
406 | } | |
407 | ||
67047f90 MZ |
408 | static struct its_collection *its_build_mapd_cmd(struct its_node *its, |
409 | struct its_cmd_block *cmd, | |
cc2d3216 MZ |
410 | struct its_cmd_desc *desc) |
411 | { | |
412 | unsigned long itt_addr; | |
c8481267 | 413 | u8 size = ilog2(desc->its_mapd_cmd.dev->nr_ites); |
cc2d3216 MZ |
414 | |
415 | itt_addr = virt_to_phys(desc->its_mapd_cmd.dev->itt); | |
416 | itt_addr = ALIGN(itt_addr, ITS_ITT_ALIGN); | |
417 | ||
418 | its_encode_cmd(cmd, GITS_CMD_MAPD); | |
419 | its_encode_devid(cmd, desc->its_mapd_cmd.dev->device_id); | |
420 | its_encode_size(cmd, size - 1); | |
421 | its_encode_itt(cmd, itt_addr); | |
422 | its_encode_valid(cmd, desc->its_mapd_cmd.valid); | |
423 | ||
424 | its_fixup_cmd(cmd); | |
425 | ||
591e5bec | 426 | return NULL; |
cc2d3216 MZ |
427 | } |
428 | ||
67047f90 MZ |
429 | static struct its_collection *its_build_mapc_cmd(struct its_node *its, |
430 | struct its_cmd_block *cmd, | |
cc2d3216 MZ |
431 | struct its_cmd_desc *desc) |
432 | { | |
433 | its_encode_cmd(cmd, GITS_CMD_MAPC); | |
434 | its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id); | |
435 | its_encode_target(cmd, desc->its_mapc_cmd.col->target_address); | |
436 | its_encode_valid(cmd, desc->its_mapc_cmd.valid); | |
437 | ||
438 | its_fixup_cmd(cmd); | |
439 | ||
440 | return desc->its_mapc_cmd.col; | |
441 | } | |
442 | ||
67047f90 MZ |
443 | static struct its_collection *its_build_mapti_cmd(struct its_node *its, |
444 | struct its_cmd_block *cmd, | |
cc2d3216 MZ |
445 | struct its_cmd_desc *desc) |
446 | { | |
591e5bec MZ |
447 | struct its_collection *col; |
448 | ||
6a25ad3a MZ |
449 | col = dev_event_to_col(desc->its_mapti_cmd.dev, |
450 | desc->its_mapti_cmd.event_id); | |
591e5bec | 451 | |
6a25ad3a MZ |
452 | its_encode_cmd(cmd, GITS_CMD_MAPTI); |
453 | its_encode_devid(cmd, desc->its_mapti_cmd.dev->device_id); | |
454 | its_encode_event_id(cmd, desc->its_mapti_cmd.event_id); | |
455 | its_encode_phys_id(cmd, desc->its_mapti_cmd.phys_id); | |
591e5bec | 456 | its_encode_collection(cmd, col->col_id); |
cc2d3216 MZ |
457 | |
458 | its_fixup_cmd(cmd); | |
459 | ||
83559b47 | 460 | return valid_col(col); |
cc2d3216 MZ |
461 | } |
462 | ||
67047f90 MZ |
463 | static struct its_collection *its_build_movi_cmd(struct its_node *its, |
464 | struct its_cmd_block *cmd, | |
cc2d3216 MZ |
465 | struct its_cmd_desc *desc) |
466 | { | |
591e5bec MZ |
467 | struct its_collection *col; |
468 | ||
469 | col = dev_event_to_col(desc->its_movi_cmd.dev, | |
470 | desc->its_movi_cmd.event_id); | |
471 | ||
cc2d3216 MZ |
472 | its_encode_cmd(cmd, GITS_CMD_MOVI); |
473 | its_encode_devid(cmd, desc->its_movi_cmd.dev->device_id); | |
591e5bec | 474 | its_encode_event_id(cmd, desc->its_movi_cmd.event_id); |
cc2d3216 MZ |
475 | its_encode_collection(cmd, desc->its_movi_cmd.col->col_id); |
476 | ||
477 | its_fixup_cmd(cmd); | |
478 | ||
83559b47 | 479 | return valid_col(col); |
cc2d3216 MZ |
480 | } |
481 | ||
67047f90 MZ |
482 | static struct its_collection *its_build_discard_cmd(struct its_node *its, |
483 | struct its_cmd_block *cmd, | |
cc2d3216 MZ |
484 | struct its_cmd_desc *desc) |
485 | { | |
591e5bec MZ |
486 | struct its_collection *col; |
487 | ||
488 | col = dev_event_to_col(desc->its_discard_cmd.dev, | |
489 | desc->its_discard_cmd.event_id); | |
490 | ||
cc2d3216 MZ |
491 | its_encode_cmd(cmd, GITS_CMD_DISCARD); |
492 | its_encode_devid(cmd, desc->its_discard_cmd.dev->device_id); | |
493 | its_encode_event_id(cmd, desc->its_discard_cmd.event_id); | |
494 | ||
495 | its_fixup_cmd(cmd); | |
496 | ||
83559b47 | 497 | return valid_col(col); |
cc2d3216 MZ |
498 | } |
499 | ||
67047f90 MZ |
500 | static struct its_collection *its_build_inv_cmd(struct its_node *its, |
501 | struct its_cmd_block *cmd, | |
cc2d3216 MZ |
502 | struct its_cmd_desc *desc) |
503 | { | |
591e5bec MZ |
504 | struct its_collection *col; |
505 | ||
506 | col = dev_event_to_col(desc->its_inv_cmd.dev, | |
507 | desc->its_inv_cmd.event_id); | |
508 | ||
cc2d3216 MZ |
509 | its_encode_cmd(cmd, GITS_CMD_INV); |
510 | its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id); | |
511 | its_encode_event_id(cmd, desc->its_inv_cmd.event_id); | |
512 | ||
513 | its_fixup_cmd(cmd); | |
514 | ||
83559b47 | 515 | return valid_col(col); |
cc2d3216 MZ |
516 | } |
517 | ||
67047f90 MZ |
518 | static struct its_collection *its_build_int_cmd(struct its_node *its, |
519 | struct its_cmd_block *cmd, | |
8d85dced MZ |
520 | struct its_cmd_desc *desc) |
521 | { | |
522 | struct its_collection *col; | |
523 | ||
524 | col = dev_event_to_col(desc->its_int_cmd.dev, | |
525 | desc->its_int_cmd.event_id); | |
526 | ||
527 | its_encode_cmd(cmd, GITS_CMD_INT); | |
528 | its_encode_devid(cmd, desc->its_int_cmd.dev->device_id); | |
529 | its_encode_event_id(cmd, desc->its_int_cmd.event_id); | |
530 | ||
531 | its_fixup_cmd(cmd); | |
532 | ||
83559b47 | 533 | return valid_col(col); |
8d85dced MZ |
534 | } |
535 | ||
67047f90 MZ |
536 | static struct its_collection *its_build_clear_cmd(struct its_node *its, |
537 | struct its_cmd_block *cmd, | |
8d85dced MZ |
538 | struct its_cmd_desc *desc) |
539 | { | |
540 | struct its_collection *col; | |
541 | ||
542 | col = dev_event_to_col(desc->its_clear_cmd.dev, | |
543 | desc->its_clear_cmd.event_id); | |
544 | ||
545 | its_encode_cmd(cmd, GITS_CMD_CLEAR); | |
546 | its_encode_devid(cmd, desc->its_clear_cmd.dev->device_id); | |
547 | its_encode_event_id(cmd, desc->its_clear_cmd.event_id); | |
548 | ||
549 | its_fixup_cmd(cmd); | |
550 | ||
83559b47 | 551 | return valid_col(col); |
8d85dced MZ |
552 | } |
553 | ||
67047f90 MZ |
554 | static struct its_collection *its_build_invall_cmd(struct its_node *its, |
555 | struct its_cmd_block *cmd, | |
cc2d3216 MZ |
556 | struct its_cmd_desc *desc) |
557 | { | |
558 | its_encode_cmd(cmd, GITS_CMD_INVALL); | |
559 | its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id); | |
560 | ||
561 | its_fixup_cmd(cmd); | |
562 | ||
563 | return NULL; | |
564 | } | |
565 | ||
67047f90 MZ |
566 | static struct its_vpe *its_build_vinvall_cmd(struct its_node *its, |
567 | struct its_cmd_block *cmd, | |
eb78192b MZ |
568 | struct its_cmd_desc *desc) |
569 | { | |
570 | its_encode_cmd(cmd, GITS_CMD_VINVALL); | |
571 | its_encode_vpeid(cmd, desc->its_vinvall_cmd.vpe->vpe_id); | |
572 | ||
573 | its_fixup_cmd(cmd); | |
574 | ||
205e065d | 575 | return valid_vpe(its, desc->its_vinvall_cmd.vpe); |
eb78192b MZ |
576 | } |
577 | ||
67047f90 MZ |
578 | static struct its_vpe *its_build_vmapp_cmd(struct its_node *its, |
579 | struct its_cmd_block *cmd, | |
eb78192b MZ |
580 | struct its_cmd_desc *desc) |
581 | { | |
582 | unsigned long vpt_addr; | |
5c9a882e | 583 | u64 target; |
eb78192b MZ |
584 | |
585 | vpt_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->vpt_page)); | |
5c9a882e | 586 | target = desc->its_vmapp_cmd.col->target_address + its->vlpi_redist_offset; |
eb78192b MZ |
587 | |
588 | its_encode_cmd(cmd, GITS_CMD_VMAPP); | |
589 | its_encode_vpeid(cmd, desc->its_vmapp_cmd.vpe->vpe_id); | |
590 | its_encode_valid(cmd, desc->its_vmapp_cmd.valid); | |
5c9a882e | 591 | its_encode_target(cmd, target); |
eb78192b MZ |
592 | its_encode_vpt_addr(cmd, vpt_addr); |
593 | its_encode_vpt_size(cmd, LPI_NRBITS - 1); | |
594 | ||
595 | its_fixup_cmd(cmd); | |
596 | ||
205e065d | 597 | return valid_vpe(its, desc->its_vmapp_cmd.vpe); |
eb78192b MZ |
598 | } |
599 | ||
67047f90 MZ |
600 | static struct its_vpe *its_build_vmapti_cmd(struct its_node *its, |
601 | struct its_cmd_block *cmd, | |
d011e4e6 MZ |
602 | struct its_cmd_desc *desc) |
603 | { | |
604 | u32 db; | |
605 | ||
606 | if (desc->its_vmapti_cmd.db_enabled) | |
607 | db = desc->its_vmapti_cmd.vpe->vpe_db_lpi; | |
608 | else | |
609 | db = 1023; | |
610 | ||
611 | its_encode_cmd(cmd, GITS_CMD_VMAPTI); | |
612 | its_encode_devid(cmd, desc->its_vmapti_cmd.dev->device_id); | |
613 | its_encode_vpeid(cmd, desc->its_vmapti_cmd.vpe->vpe_id); | |
614 | its_encode_event_id(cmd, desc->its_vmapti_cmd.event_id); | |
615 | its_encode_db_phys_id(cmd, db); | |
616 | its_encode_virt_id(cmd, desc->its_vmapti_cmd.virt_id); | |
617 | ||
618 | its_fixup_cmd(cmd); | |
619 | ||
205e065d | 620 | return valid_vpe(its, desc->its_vmapti_cmd.vpe); |
d011e4e6 MZ |
621 | } |
622 | ||
67047f90 MZ |
623 | static struct its_vpe *its_build_vmovi_cmd(struct its_node *its, |
624 | struct its_cmd_block *cmd, | |
d011e4e6 MZ |
625 | struct its_cmd_desc *desc) |
626 | { | |
627 | u32 db; | |
628 | ||
629 | if (desc->its_vmovi_cmd.db_enabled) | |
630 | db = desc->its_vmovi_cmd.vpe->vpe_db_lpi; | |
631 | else | |
632 | db = 1023; | |
633 | ||
634 | its_encode_cmd(cmd, GITS_CMD_VMOVI); | |
635 | its_encode_devid(cmd, desc->its_vmovi_cmd.dev->device_id); | |
636 | its_encode_vpeid(cmd, desc->its_vmovi_cmd.vpe->vpe_id); | |
637 | its_encode_event_id(cmd, desc->its_vmovi_cmd.event_id); | |
638 | its_encode_db_phys_id(cmd, db); | |
639 | its_encode_db_valid(cmd, true); | |
640 | ||
641 | its_fixup_cmd(cmd); | |
642 | ||
205e065d | 643 | return valid_vpe(its, desc->its_vmovi_cmd.vpe); |
d011e4e6 MZ |
644 | } |
645 | ||
67047f90 MZ |
646 | static struct its_vpe *its_build_vmovp_cmd(struct its_node *its, |
647 | struct its_cmd_block *cmd, | |
3171a47a MZ |
648 | struct its_cmd_desc *desc) |
649 | { | |
5c9a882e MZ |
650 | u64 target; |
651 | ||
652 | target = desc->its_vmovp_cmd.col->target_address + its->vlpi_redist_offset; | |
3171a47a MZ |
653 | its_encode_cmd(cmd, GITS_CMD_VMOVP); |
654 | its_encode_seq_num(cmd, desc->its_vmovp_cmd.seq_num); | |
655 | its_encode_its_list(cmd, desc->its_vmovp_cmd.its_list); | |
656 | its_encode_vpeid(cmd, desc->its_vmovp_cmd.vpe->vpe_id); | |
5c9a882e | 657 | its_encode_target(cmd, target); |
3171a47a MZ |
658 | |
659 | its_fixup_cmd(cmd); | |
660 | ||
205e065d | 661 | return valid_vpe(its, desc->its_vmovp_cmd.vpe); |
3171a47a MZ |
662 | } |
663 | ||
cc2d3216 MZ |
664 | static u64 its_cmd_ptr_to_offset(struct its_node *its, |
665 | struct its_cmd_block *ptr) | |
666 | { | |
667 | return (ptr - its->cmd_base) * sizeof(*ptr); | |
668 | } | |
669 | ||
670 | static int its_queue_full(struct its_node *its) | |
671 | { | |
672 | int widx; | |
673 | int ridx; | |
674 | ||
675 | widx = its->cmd_write - its->cmd_base; | |
676 | ridx = readl_relaxed(its->base + GITS_CREADR) / sizeof(struct its_cmd_block); | |
677 | ||
678 | /* This is incredibly unlikely to happen, unless the ITS locks up. */ | |
679 | if (((widx + 1) % ITS_CMD_QUEUE_NR_ENTRIES) == ridx) | |
680 | return 1; | |
681 | ||
682 | return 0; | |
683 | } | |
684 | ||
685 | static struct its_cmd_block *its_allocate_entry(struct its_node *its) | |
686 | { | |
687 | struct its_cmd_block *cmd; | |
688 | u32 count = 1000000; /* 1s! */ | |
689 | ||
690 | while (its_queue_full(its)) { | |
691 | count--; | |
692 | if (!count) { | |
693 | pr_err_ratelimited("ITS queue not draining\n"); | |
694 | return NULL; | |
695 | } | |
696 | cpu_relax(); | |
697 | udelay(1); | |
698 | } | |
699 | ||
700 | cmd = its->cmd_write++; | |
701 | ||
702 | /* Handle queue wrapping */ | |
703 | if (its->cmd_write == (its->cmd_base + ITS_CMD_QUEUE_NR_ENTRIES)) | |
704 | its->cmd_write = its->cmd_base; | |
705 | ||
34d677a9 MZ |
706 | /* Clear command */ |
707 | cmd->raw_cmd[0] = 0; | |
708 | cmd->raw_cmd[1] = 0; | |
709 | cmd->raw_cmd[2] = 0; | |
710 | cmd->raw_cmd[3] = 0; | |
711 | ||
cc2d3216 MZ |
712 | return cmd; |
713 | } | |
714 | ||
715 | static struct its_cmd_block *its_post_commands(struct its_node *its) | |
716 | { | |
717 | u64 wr = its_cmd_ptr_to_offset(its, its->cmd_write); | |
718 | ||
719 | writel_relaxed(wr, its->base + GITS_CWRITER); | |
720 | ||
721 | return its->cmd_write; | |
722 | } | |
723 | ||
724 | static void its_flush_cmd(struct its_node *its, struct its_cmd_block *cmd) | |
725 | { | |
726 | /* | |
727 | * Make sure the commands written to memory are observable by | |
728 | * the ITS. | |
729 | */ | |
730 | if (its->flags & ITS_FLAGS_CMDQ_NEEDS_FLUSHING) | |
328191c0 | 731 | gic_flush_dcache_to_poc(cmd, sizeof(*cmd)); |
cc2d3216 MZ |
732 | else |
733 | dsb(ishst); | |
734 | } | |
735 | ||
a19b462f MZ |
736 | static int its_wait_for_range_completion(struct its_node *its, |
737 | struct its_cmd_block *from, | |
738 | struct its_cmd_block *to) | |
cc2d3216 MZ |
739 | { |
740 | u64 rd_idx, from_idx, to_idx; | |
741 | u32 count = 1000000; /* 1s! */ | |
742 | ||
743 | from_idx = its_cmd_ptr_to_offset(its, from); | |
744 | to_idx = its_cmd_ptr_to_offset(its, to); | |
745 | ||
746 | while (1) { | |
747 | rd_idx = readl_relaxed(its->base + GITS_CREADR); | |
9bdd8b1c MZ |
748 | |
749 | /* Direct case */ | |
750 | if (from_idx < to_idx && rd_idx >= to_idx) | |
751 | break; | |
752 | ||
753 | /* Wrapped case */ | |
754 | if (from_idx >= to_idx && rd_idx >= to_idx && rd_idx < from_idx) | |
cc2d3216 MZ |
755 | break; |
756 | ||
757 | count--; | |
758 | if (!count) { | |
a19b462f MZ |
759 | pr_err_ratelimited("ITS queue timeout (%llu %llu %llu)\n", |
760 | from_idx, to_idx, rd_idx); | |
761 | return -1; | |
cc2d3216 MZ |
762 | } |
763 | cpu_relax(); | |
764 | udelay(1); | |
765 | } | |
a19b462f MZ |
766 | |
767 | return 0; | |
cc2d3216 MZ |
768 | } |
769 | ||
e4f9094b MZ |
770 | /* Warning, macro hell follows */ |
771 | #define BUILD_SINGLE_CMD_FUNC(name, buildtype, synctype, buildfn) \ | |
772 | void name(struct its_node *its, \ | |
773 | buildtype builder, \ | |
774 | struct its_cmd_desc *desc) \ | |
775 | { \ | |
776 | struct its_cmd_block *cmd, *sync_cmd, *next_cmd; \ | |
777 | synctype *sync_obj; \ | |
778 | unsigned long flags; \ | |
779 | \ | |
780 | raw_spin_lock_irqsave(&its->lock, flags); \ | |
781 | \ | |
782 | cmd = its_allocate_entry(its); \ | |
783 | if (!cmd) { /* We're soooooo screewed... */ \ | |
784 | raw_spin_unlock_irqrestore(&its->lock, flags); \ | |
785 | return; \ | |
786 | } \ | |
67047f90 | 787 | sync_obj = builder(its, cmd, desc); \ |
e4f9094b MZ |
788 | its_flush_cmd(its, cmd); \ |
789 | \ | |
790 | if (sync_obj) { \ | |
791 | sync_cmd = its_allocate_entry(its); \ | |
792 | if (!sync_cmd) \ | |
793 | goto post; \ | |
794 | \ | |
67047f90 | 795 | buildfn(its, sync_cmd, sync_obj); \ |
e4f9094b MZ |
796 | its_flush_cmd(its, sync_cmd); \ |
797 | } \ | |
798 | \ | |
799 | post: \ | |
800 | next_cmd = its_post_commands(its); \ | |
801 | raw_spin_unlock_irqrestore(&its->lock, flags); \ | |
802 | \ | |
a19b462f MZ |
803 | if (its_wait_for_range_completion(its, cmd, next_cmd)) \ |
804 | pr_err_ratelimited("ITS cmd %ps failed\n", builder); \ | |
e4f9094b | 805 | } |
cc2d3216 | 806 | |
67047f90 MZ |
807 | static void its_build_sync_cmd(struct its_node *its, |
808 | struct its_cmd_block *sync_cmd, | |
e4f9094b MZ |
809 | struct its_collection *sync_col) |
810 | { | |
811 | its_encode_cmd(sync_cmd, GITS_CMD_SYNC); | |
812 | its_encode_target(sync_cmd, sync_col->target_address); | |
cc2d3216 | 813 | |
e4f9094b | 814 | its_fixup_cmd(sync_cmd); |
cc2d3216 MZ |
815 | } |
816 | ||
e4f9094b MZ |
817 | static BUILD_SINGLE_CMD_FUNC(its_send_single_command, its_cmd_builder_t, |
818 | struct its_collection, its_build_sync_cmd) | |
819 | ||
67047f90 MZ |
820 | static void its_build_vsync_cmd(struct its_node *its, |
821 | struct its_cmd_block *sync_cmd, | |
d011e4e6 MZ |
822 | struct its_vpe *sync_vpe) |
823 | { | |
824 | its_encode_cmd(sync_cmd, GITS_CMD_VSYNC); | |
825 | its_encode_vpeid(sync_cmd, sync_vpe->vpe_id); | |
826 | ||
827 | its_fixup_cmd(sync_cmd); | |
828 | } | |
829 | ||
830 | static BUILD_SINGLE_CMD_FUNC(its_send_single_vcommand, its_cmd_vbuilder_t, | |
831 | struct its_vpe, its_build_vsync_cmd) | |
832 | ||
8d85dced | 833 | static void its_send_int(struct its_device *dev, u32 event_id) |
cc2d3216 | 834 | { |
8d85dced | 835 | struct its_cmd_desc desc; |
cc2d3216 | 836 | |
8d85dced MZ |
837 | desc.its_int_cmd.dev = dev; |
838 | desc.its_int_cmd.event_id = event_id; | |
cc2d3216 | 839 | |
8d85dced MZ |
840 | its_send_single_command(dev->its, its_build_int_cmd, &desc); |
841 | } | |
cc2d3216 | 842 | |
8d85dced MZ |
843 | static void its_send_clear(struct its_device *dev, u32 event_id) |
844 | { | |
845 | struct its_cmd_desc desc; | |
cc2d3216 | 846 | |
8d85dced MZ |
847 | desc.its_clear_cmd.dev = dev; |
848 | desc.its_clear_cmd.event_id = event_id; | |
cc2d3216 | 849 | |
8d85dced | 850 | its_send_single_command(dev->its, its_build_clear_cmd, &desc); |
cc2d3216 MZ |
851 | } |
852 | ||
853 | static void its_send_inv(struct its_device *dev, u32 event_id) | |
854 | { | |
855 | struct its_cmd_desc desc; | |
856 | ||
857 | desc.its_inv_cmd.dev = dev; | |
858 | desc.its_inv_cmd.event_id = event_id; | |
859 | ||
860 | its_send_single_command(dev->its, its_build_inv_cmd, &desc); | |
861 | } | |
862 | ||
863 | static void its_send_mapd(struct its_device *dev, int valid) | |
864 | { | |
865 | struct its_cmd_desc desc; | |
866 | ||
867 | desc.its_mapd_cmd.dev = dev; | |
868 | desc.its_mapd_cmd.valid = !!valid; | |
869 | ||
870 | its_send_single_command(dev->its, its_build_mapd_cmd, &desc); | |
871 | } | |
872 | ||
873 | static void its_send_mapc(struct its_node *its, struct its_collection *col, | |
874 | int valid) | |
875 | { | |
876 | struct its_cmd_desc desc; | |
877 | ||
878 | desc.its_mapc_cmd.col = col; | |
879 | desc.its_mapc_cmd.valid = !!valid; | |
880 | ||
881 | its_send_single_command(its, its_build_mapc_cmd, &desc); | |
882 | } | |
883 | ||
6a25ad3a | 884 | static void its_send_mapti(struct its_device *dev, u32 irq_id, u32 id) |
cc2d3216 MZ |
885 | { |
886 | struct its_cmd_desc desc; | |
887 | ||
6a25ad3a MZ |
888 | desc.its_mapti_cmd.dev = dev; |
889 | desc.its_mapti_cmd.phys_id = irq_id; | |
890 | desc.its_mapti_cmd.event_id = id; | |
cc2d3216 | 891 | |
6a25ad3a | 892 | its_send_single_command(dev->its, its_build_mapti_cmd, &desc); |
cc2d3216 MZ |
893 | } |
894 | ||
895 | static void its_send_movi(struct its_device *dev, | |
896 | struct its_collection *col, u32 id) | |
897 | { | |
898 | struct its_cmd_desc desc; | |
899 | ||
900 | desc.its_movi_cmd.dev = dev; | |
901 | desc.its_movi_cmd.col = col; | |
591e5bec | 902 | desc.its_movi_cmd.event_id = id; |
cc2d3216 MZ |
903 | |
904 | its_send_single_command(dev->its, its_build_movi_cmd, &desc); | |
905 | } | |
906 | ||
907 | static void its_send_discard(struct its_device *dev, u32 id) | |
908 | { | |
909 | struct its_cmd_desc desc; | |
910 | ||
911 | desc.its_discard_cmd.dev = dev; | |
912 | desc.its_discard_cmd.event_id = id; | |
913 | ||
914 | its_send_single_command(dev->its, its_build_discard_cmd, &desc); | |
915 | } | |
916 | ||
917 | static void its_send_invall(struct its_node *its, struct its_collection *col) | |
918 | { | |
919 | struct its_cmd_desc desc; | |
920 | ||
921 | desc.its_invall_cmd.col = col; | |
922 | ||
923 | its_send_single_command(its, its_build_invall_cmd, &desc); | |
924 | } | |
c48ed51c | 925 | |
d011e4e6 MZ |
926 | static void its_send_vmapti(struct its_device *dev, u32 id) |
927 | { | |
928 | struct its_vlpi_map *map = &dev->event_map.vlpi_maps[id]; | |
929 | struct its_cmd_desc desc; | |
930 | ||
931 | desc.its_vmapti_cmd.vpe = map->vpe; | |
932 | desc.its_vmapti_cmd.dev = dev; | |
933 | desc.its_vmapti_cmd.virt_id = map->vintid; | |
934 | desc.its_vmapti_cmd.event_id = id; | |
935 | desc.its_vmapti_cmd.db_enabled = map->db_enabled; | |
936 | ||
937 | its_send_single_vcommand(dev->its, its_build_vmapti_cmd, &desc); | |
938 | } | |
939 | ||
940 | static void its_send_vmovi(struct its_device *dev, u32 id) | |
941 | { | |
942 | struct its_vlpi_map *map = &dev->event_map.vlpi_maps[id]; | |
943 | struct its_cmd_desc desc; | |
944 | ||
945 | desc.its_vmovi_cmd.vpe = map->vpe; | |
946 | desc.its_vmovi_cmd.dev = dev; | |
947 | desc.its_vmovi_cmd.event_id = id; | |
948 | desc.its_vmovi_cmd.db_enabled = map->db_enabled; | |
949 | ||
950 | its_send_single_vcommand(dev->its, its_build_vmovi_cmd, &desc); | |
951 | } | |
952 | ||
75fd951b MZ |
953 | static void its_send_vmapp(struct its_node *its, |
954 | struct its_vpe *vpe, bool valid) | |
eb78192b MZ |
955 | { |
956 | struct its_cmd_desc desc; | |
eb78192b MZ |
957 | |
958 | desc.its_vmapp_cmd.vpe = vpe; | |
959 | desc.its_vmapp_cmd.valid = valid; | |
75fd951b | 960 | desc.its_vmapp_cmd.col = &its->collections[vpe->col_idx]; |
eb78192b | 961 | |
75fd951b | 962 | its_send_single_vcommand(its, its_build_vmapp_cmd, &desc); |
eb78192b MZ |
963 | } |
964 | ||
3171a47a MZ |
965 | static void its_send_vmovp(struct its_vpe *vpe) |
966 | { | |
967 | struct its_cmd_desc desc; | |
968 | struct its_node *its; | |
969 | unsigned long flags; | |
970 | int col_id = vpe->col_idx; | |
971 | ||
972 | desc.its_vmovp_cmd.vpe = vpe; | |
973 | desc.its_vmovp_cmd.its_list = (u16)its_list_map; | |
974 | ||
975 | if (!its_list_map) { | |
976 | its = list_first_entry(&its_nodes, struct its_node, entry); | |
977 | desc.its_vmovp_cmd.seq_num = 0; | |
978 | desc.its_vmovp_cmd.col = &its->collections[col_id]; | |
979 | its_send_single_vcommand(its, its_build_vmovp_cmd, &desc); | |
980 | return; | |
981 | } | |
982 | ||
983 | /* | |
984 | * Yet another marvel of the architecture. If using the | |
985 | * its_list "feature", we need to make sure that all ITSs | |
986 | * receive all VMOVP commands in the same order. The only way | |
987 | * to guarantee this is to make vmovp a serialization point. | |
988 | * | |
989 | * Wall <-- Head. | |
990 | */ | |
991 | raw_spin_lock_irqsave(&vmovp_lock, flags); | |
992 | ||
993 | desc.its_vmovp_cmd.seq_num = vmovp_seq_num++; | |
994 | ||
995 | /* Emit VMOVPs */ | |
996 | list_for_each_entry(its, &its_nodes, entry) { | |
997 | if (!its->is_v4) | |
998 | continue; | |
999 | ||
2247e1bf MZ |
1000 | if (!vpe->its_vm->vlpi_count[its->list_nr]) |
1001 | continue; | |
1002 | ||
3171a47a MZ |
1003 | desc.its_vmovp_cmd.col = &its->collections[col_id]; |
1004 | its_send_single_vcommand(its, its_build_vmovp_cmd, &desc); | |
1005 | } | |
1006 | ||
1007 | raw_spin_unlock_irqrestore(&vmovp_lock, flags); | |
1008 | } | |
1009 | ||
40619a2e | 1010 | static void its_send_vinvall(struct its_node *its, struct its_vpe *vpe) |
eb78192b MZ |
1011 | { |
1012 | struct its_cmd_desc desc; | |
eb78192b MZ |
1013 | |
1014 | desc.its_vinvall_cmd.vpe = vpe; | |
40619a2e | 1015 | its_send_single_vcommand(its, its_build_vinvall_cmd, &desc); |
eb78192b MZ |
1016 | } |
1017 | ||
c48ed51c MZ |
1018 | /* |
1019 | * irqchip functions - assumes MSI, mostly. | |
1020 | */ | |
1021 | ||
1022 | static inline u32 its_get_event_id(struct irq_data *d) | |
1023 | { | |
1024 | struct its_device *its_dev = irq_data_get_irq_chip_data(d); | |
591e5bec | 1025 | return d->hwirq - its_dev->event_map.lpi_base; |
c48ed51c MZ |
1026 | } |
1027 | ||
015ec038 | 1028 | static void lpi_write_config(struct irq_data *d, u8 clr, u8 set) |
c48ed51c | 1029 | { |
015ec038 | 1030 | irq_hw_number_t hwirq; |
adcdb94e MZ |
1031 | struct page *prop_page; |
1032 | u8 *cfg; | |
c48ed51c | 1033 | |
015ec038 MZ |
1034 | if (irqd_is_forwarded_to_vcpu(d)) { |
1035 | struct its_device *its_dev = irq_data_get_irq_chip_data(d); | |
1036 | u32 event = its_get_event_id(d); | |
d4d7b4ad | 1037 | struct its_vlpi_map *map; |
015ec038 MZ |
1038 | |
1039 | prop_page = its_dev->event_map.vm->vprop_page; | |
d4d7b4ad MZ |
1040 | map = &its_dev->event_map.vlpi_maps[event]; |
1041 | hwirq = map->vintid; | |
1042 | ||
1043 | /* Remember the updated property */ | |
1044 | map->properties &= ~clr; | |
1045 | map->properties |= set | LPI_PROP_GROUP1; | |
015ec038 MZ |
1046 | } else { |
1047 | prop_page = gic_rdists->prop_page; | |
1048 | hwirq = d->hwirq; | |
1049 | } | |
adcdb94e MZ |
1050 | |
1051 | cfg = page_address(prop_page) + hwirq - 8192; | |
1052 | *cfg &= ~clr; | |
015ec038 | 1053 | *cfg |= set | LPI_PROP_GROUP1; |
c48ed51c MZ |
1054 | |
1055 | /* | |
1056 | * Make the above write visible to the redistributors. | |
1057 | * And yes, we're flushing exactly: One. Single. Byte. | |
1058 | * Humpf... | |
1059 | */ | |
1060 | if (gic_rdists->flags & RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING) | |
328191c0 | 1061 | gic_flush_dcache_to_poc(cfg, sizeof(*cfg)); |
c48ed51c MZ |
1062 | else |
1063 | dsb(ishst); | |
015ec038 MZ |
1064 | } |
1065 | ||
1066 | static void lpi_update_config(struct irq_data *d, u8 clr, u8 set) | |
1067 | { | |
1068 | struct its_device *its_dev = irq_data_get_irq_chip_data(d); | |
1069 | ||
1070 | lpi_write_config(d, clr, set); | |
adcdb94e | 1071 | its_send_inv(its_dev, its_get_event_id(d)); |
c48ed51c MZ |
1072 | } |
1073 | ||
015ec038 MZ |
1074 | static void its_vlpi_set_doorbell(struct irq_data *d, bool enable) |
1075 | { | |
1076 | struct its_device *its_dev = irq_data_get_irq_chip_data(d); | |
1077 | u32 event = its_get_event_id(d); | |
1078 | ||
1079 | if (its_dev->event_map.vlpi_maps[event].db_enabled == enable) | |
1080 | return; | |
1081 | ||
1082 | its_dev->event_map.vlpi_maps[event].db_enabled = enable; | |
1083 | ||
1084 | /* | |
1085 | * More fun with the architecture: | |
1086 | * | |
1087 | * Ideally, we'd issue a VMAPTI to set the doorbell to its LPI | |
1088 | * value or to 1023, depending on the enable bit. But that | |
1089 | * would be issueing a mapping for an /existing/ DevID+EventID | |
1090 | * pair, which is UNPREDICTABLE. Instead, let's issue a VMOVI | |
1091 | * to the /same/ vPE, using this opportunity to adjust the | |
1092 | * doorbell. Mouahahahaha. We loves it, Precious. | |
1093 | */ | |
1094 | its_send_vmovi(its_dev, event); | |
c48ed51c MZ |
1095 | } |
1096 | ||
1097 | static void its_mask_irq(struct irq_data *d) | |
1098 | { | |
015ec038 MZ |
1099 | if (irqd_is_forwarded_to_vcpu(d)) |
1100 | its_vlpi_set_doorbell(d, false); | |
1101 | ||
adcdb94e | 1102 | lpi_update_config(d, LPI_PROP_ENABLED, 0); |
c48ed51c MZ |
1103 | } |
1104 | ||
1105 | static void its_unmask_irq(struct irq_data *d) | |
1106 | { | |
015ec038 MZ |
1107 | if (irqd_is_forwarded_to_vcpu(d)) |
1108 | its_vlpi_set_doorbell(d, true); | |
1109 | ||
adcdb94e | 1110 | lpi_update_config(d, 0, LPI_PROP_ENABLED); |
c48ed51c MZ |
1111 | } |
1112 | ||
c48ed51c MZ |
1113 | static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val, |
1114 | bool force) | |
1115 | { | |
fbf8f40e GK |
1116 | unsigned int cpu; |
1117 | const struct cpumask *cpu_mask = cpu_online_mask; | |
c48ed51c MZ |
1118 | struct its_device *its_dev = irq_data_get_irq_chip_data(d); |
1119 | struct its_collection *target_col; | |
1120 | u32 id = its_get_event_id(d); | |
1121 | ||
015ec038 MZ |
1122 | /* A forwarded interrupt should use irq_set_vcpu_affinity */ |
1123 | if (irqd_is_forwarded_to_vcpu(d)) | |
1124 | return -EINVAL; | |
1125 | ||
fbf8f40e GK |
1126 | /* lpi cannot be routed to a redistributor that is on a foreign node */ |
1127 | if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) { | |
1128 | if (its_dev->its->numa_node >= 0) { | |
1129 | cpu_mask = cpumask_of_node(its_dev->its->numa_node); | |
1130 | if (!cpumask_intersects(mask_val, cpu_mask)) | |
1131 | return -EINVAL; | |
1132 | } | |
1133 | } | |
1134 | ||
1135 | cpu = cpumask_any_and(mask_val, cpu_mask); | |
1136 | ||
c48ed51c MZ |
1137 | if (cpu >= nr_cpu_ids) |
1138 | return -EINVAL; | |
1139 | ||
8b8d94a7 M |
1140 | /* don't set the affinity when the target cpu is same as current one */ |
1141 | if (cpu != its_dev->event_map.col_map[id]) { | |
1142 | target_col = &its_dev->its->collections[cpu]; | |
1143 | its_send_movi(its_dev, target_col, id); | |
1144 | its_dev->event_map.col_map[id] = cpu; | |
0d224d35 | 1145 | irq_data_update_effective_affinity(d, cpumask_of(cpu)); |
8b8d94a7 | 1146 | } |
c48ed51c MZ |
1147 | |
1148 | return IRQ_SET_MASK_OK_DONE; | |
1149 | } | |
1150 | ||
558b0165 AB |
1151 | static u64 its_irq_get_msi_base(struct its_device *its_dev) |
1152 | { | |
1153 | struct its_node *its = its_dev->its; | |
1154 | ||
1155 | return its->phys_base + GITS_TRANSLATER; | |
1156 | } | |
1157 | ||
b48ac83d MZ |
1158 | static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg) |
1159 | { | |
1160 | struct its_device *its_dev = irq_data_get_irq_chip_data(d); | |
1161 | struct its_node *its; | |
1162 | u64 addr; | |
1163 | ||
1164 | its = its_dev->its; | |
558b0165 | 1165 | addr = its->get_msi_base(its_dev); |
b48ac83d | 1166 | |
b11283eb VM |
1167 | msg->address_lo = lower_32_bits(addr); |
1168 | msg->address_hi = upper_32_bits(addr); | |
b48ac83d | 1169 | msg->data = its_get_event_id(d); |
44bb7e24 RM |
1170 | |
1171 | iommu_dma_map_msi_msg(d->irq, msg); | |
b48ac83d MZ |
1172 | } |
1173 | ||
8d85dced MZ |
1174 | static int its_irq_set_irqchip_state(struct irq_data *d, |
1175 | enum irqchip_irq_state which, | |
1176 | bool state) | |
1177 | { | |
1178 | struct its_device *its_dev = irq_data_get_irq_chip_data(d); | |
1179 | u32 event = its_get_event_id(d); | |
1180 | ||
1181 | if (which != IRQCHIP_STATE_PENDING) | |
1182 | return -EINVAL; | |
1183 | ||
1184 | if (state) | |
1185 | its_send_int(its_dev, event); | |
1186 | else | |
1187 | its_send_clear(its_dev, event); | |
1188 | ||
1189 | return 0; | |
1190 | } | |
1191 | ||
2247e1bf MZ |
1192 | static void its_map_vm(struct its_node *its, struct its_vm *vm) |
1193 | { | |
1194 | unsigned long flags; | |
1195 | ||
1196 | /* Not using the ITS list? Everything is always mapped. */ | |
1197 | if (!its_list_map) | |
1198 | return; | |
1199 | ||
1200 | raw_spin_lock_irqsave(&vmovp_lock, flags); | |
1201 | ||
1202 | /* | |
1203 | * If the VM wasn't mapped yet, iterate over the vpes and get | |
1204 | * them mapped now. | |
1205 | */ | |
1206 | vm->vlpi_count[its->list_nr]++; | |
1207 | ||
1208 | if (vm->vlpi_count[its->list_nr] == 1) { | |
1209 | int i; | |
1210 | ||
1211 | for (i = 0; i < vm->nr_vpes; i++) { | |
1212 | struct its_vpe *vpe = vm->vpes[i]; | |
44c4c25e | 1213 | struct irq_data *d = irq_get_irq_data(vpe->irq); |
2247e1bf MZ |
1214 | |
1215 | /* Map the VPE to the first possible CPU */ | |
1216 | vpe->col_idx = cpumask_first(cpu_online_mask); | |
1217 | its_send_vmapp(its, vpe, true); | |
1218 | its_send_vinvall(its, vpe); | |
44c4c25e | 1219 | irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx)); |
2247e1bf MZ |
1220 | } |
1221 | } | |
1222 | ||
1223 | raw_spin_unlock_irqrestore(&vmovp_lock, flags); | |
1224 | } | |
1225 | ||
1226 | static void its_unmap_vm(struct its_node *its, struct its_vm *vm) | |
1227 | { | |
1228 | unsigned long flags; | |
1229 | ||
1230 | /* Not using the ITS list? Everything is always mapped. */ | |
1231 | if (!its_list_map) | |
1232 | return; | |
1233 | ||
1234 | raw_spin_lock_irqsave(&vmovp_lock, flags); | |
1235 | ||
1236 | if (!--vm->vlpi_count[its->list_nr]) { | |
1237 | int i; | |
1238 | ||
1239 | for (i = 0; i < vm->nr_vpes; i++) | |
1240 | its_send_vmapp(its, vm->vpes[i], false); | |
1241 | } | |
1242 | ||
1243 | raw_spin_unlock_irqrestore(&vmovp_lock, flags); | |
1244 | } | |
1245 | ||
d011e4e6 MZ |
1246 | static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info) |
1247 | { | |
1248 | struct its_device *its_dev = irq_data_get_irq_chip_data(d); | |
1249 | u32 event = its_get_event_id(d); | |
1250 | int ret = 0; | |
1251 | ||
1252 | if (!info->map) | |
1253 | return -EINVAL; | |
1254 | ||
1255 | mutex_lock(&its_dev->event_map.vlpi_lock); | |
1256 | ||
1257 | if (!its_dev->event_map.vm) { | |
1258 | struct its_vlpi_map *maps; | |
1259 | ||
6396bb22 | 1260 | maps = kcalloc(its_dev->event_map.nr_lpis, sizeof(*maps), |
d011e4e6 MZ |
1261 | GFP_KERNEL); |
1262 | if (!maps) { | |
1263 | ret = -ENOMEM; | |
1264 | goto out; | |
1265 | } | |
1266 | ||
1267 | its_dev->event_map.vm = info->map->vm; | |
1268 | its_dev->event_map.vlpi_maps = maps; | |
1269 | } else if (its_dev->event_map.vm != info->map->vm) { | |
1270 | ret = -EINVAL; | |
1271 | goto out; | |
1272 | } | |
1273 | ||
1274 | /* Get our private copy of the mapping information */ | |
1275 | its_dev->event_map.vlpi_maps[event] = *info->map; | |
1276 | ||
1277 | if (irqd_is_forwarded_to_vcpu(d)) { | |
1278 | /* Already mapped, move it around */ | |
1279 | its_send_vmovi(its_dev, event); | |
1280 | } else { | |
2247e1bf MZ |
1281 | /* Ensure all the VPEs are mapped on this ITS */ |
1282 | its_map_vm(its_dev->its, info->map->vm); | |
1283 | ||
d4d7b4ad MZ |
1284 | /* |
1285 | * Flag the interrupt as forwarded so that we can | |
1286 | * start poking the virtual property table. | |
1287 | */ | |
1288 | irqd_set_forwarded_to_vcpu(d); | |
1289 | ||
1290 | /* Write out the property to the prop table */ | |
1291 | lpi_write_config(d, 0xff, info->map->properties); | |
1292 | ||
d011e4e6 MZ |
1293 | /* Drop the physical mapping */ |
1294 | its_send_discard(its_dev, event); | |
1295 | ||
1296 | /* and install the virtual one */ | |
1297 | its_send_vmapti(its_dev, event); | |
d011e4e6 MZ |
1298 | |
1299 | /* Increment the number of VLPIs */ | |
1300 | its_dev->event_map.nr_vlpis++; | |
1301 | } | |
1302 | ||
1303 | out: | |
1304 | mutex_unlock(&its_dev->event_map.vlpi_lock); | |
1305 | return ret; | |
1306 | } | |
1307 | ||
1308 | static int its_vlpi_get(struct irq_data *d, struct its_cmd_info *info) | |
1309 | { | |
1310 | struct its_device *its_dev = irq_data_get_irq_chip_data(d); | |
1311 | u32 event = its_get_event_id(d); | |
1312 | int ret = 0; | |
1313 | ||
1314 | mutex_lock(&its_dev->event_map.vlpi_lock); | |
1315 | ||
1316 | if (!its_dev->event_map.vm || | |
1317 | !its_dev->event_map.vlpi_maps[event].vm) { | |
1318 | ret = -EINVAL; | |
1319 | goto out; | |
1320 | } | |
1321 | ||
1322 | /* Copy our mapping information to the incoming request */ | |
1323 | *info->map = its_dev->event_map.vlpi_maps[event]; | |
1324 | ||
1325 | out: | |
1326 | mutex_unlock(&its_dev->event_map.vlpi_lock); | |
1327 | return ret; | |
1328 | } | |
1329 | ||
1330 | static int its_vlpi_unmap(struct irq_data *d) | |
1331 | { | |
1332 | struct its_device *its_dev = irq_data_get_irq_chip_data(d); | |
1333 | u32 event = its_get_event_id(d); | |
1334 | int ret = 0; | |
1335 | ||
1336 | mutex_lock(&its_dev->event_map.vlpi_lock); | |
1337 | ||
1338 | if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d)) { | |
1339 | ret = -EINVAL; | |
1340 | goto out; | |
1341 | } | |
1342 | ||
1343 | /* Drop the virtual mapping */ | |
1344 | its_send_discard(its_dev, event); | |
1345 | ||
1346 | /* and restore the physical one */ | |
1347 | irqd_clr_forwarded_to_vcpu(d); | |
1348 | its_send_mapti(its_dev, d->hwirq, event); | |
1349 | lpi_update_config(d, 0xff, (LPI_PROP_DEFAULT_PRIO | | |
1350 | LPI_PROP_ENABLED | | |
1351 | LPI_PROP_GROUP1)); | |
1352 | ||
2247e1bf MZ |
1353 | /* Potentially unmap the VM from this ITS */ |
1354 | its_unmap_vm(its_dev->its, its_dev->event_map.vm); | |
1355 | ||
d011e4e6 MZ |
1356 | /* |
1357 | * Drop the refcount and make the device available again if | |
1358 | * this was the last VLPI. | |
1359 | */ | |
1360 | if (!--its_dev->event_map.nr_vlpis) { | |
1361 | its_dev->event_map.vm = NULL; | |
1362 | kfree(its_dev->event_map.vlpi_maps); | |
1363 | } | |
1364 | ||
1365 | out: | |
1366 | mutex_unlock(&its_dev->event_map.vlpi_lock); | |
1367 | return ret; | |
1368 | } | |
1369 | ||
015ec038 MZ |
1370 | static int its_vlpi_prop_update(struct irq_data *d, struct its_cmd_info *info) |
1371 | { | |
1372 | struct its_device *its_dev = irq_data_get_irq_chip_data(d); | |
1373 | ||
1374 | if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d)) | |
1375 | return -EINVAL; | |
1376 | ||
1377 | if (info->cmd_type == PROP_UPDATE_AND_INV_VLPI) | |
1378 | lpi_update_config(d, 0xff, info->config); | |
1379 | else | |
1380 | lpi_write_config(d, 0xff, info->config); | |
1381 | its_vlpi_set_doorbell(d, !!(info->config & LPI_PROP_ENABLED)); | |
1382 | ||
1383 | return 0; | |
1384 | } | |
1385 | ||
c808eea8 MZ |
1386 | static int its_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu_info) |
1387 | { | |
1388 | struct its_device *its_dev = irq_data_get_irq_chip_data(d); | |
1389 | struct its_cmd_info *info = vcpu_info; | |
1390 | ||
1391 | /* Need a v4 ITS */ | |
d011e4e6 | 1392 | if (!its_dev->its->is_v4) |
c808eea8 MZ |
1393 | return -EINVAL; |
1394 | ||
d011e4e6 MZ |
1395 | /* Unmap request? */ |
1396 | if (!info) | |
1397 | return its_vlpi_unmap(d); | |
1398 | ||
c808eea8 MZ |
1399 | switch (info->cmd_type) { |
1400 | case MAP_VLPI: | |
d011e4e6 | 1401 | return its_vlpi_map(d, info); |
c808eea8 MZ |
1402 | |
1403 | case GET_VLPI: | |
d011e4e6 | 1404 | return its_vlpi_get(d, info); |
c808eea8 MZ |
1405 | |
1406 | case PROP_UPDATE_VLPI: | |
1407 | case PROP_UPDATE_AND_INV_VLPI: | |
015ec038 | 1408 | return its_vlpi_prop_update(d, info); |
c808eea8 MZ |
1409 | |
1410 | default: | |
1411 | return -EINVAL; | |
1412 | } | |
1413 | } | |
1414 | ||
c48ed51c MZ |
1415 | static struct irq_chip its_irq_chip = { |
1416 | .name = "ITS", | |
1417 | .irq_mask = its_mask_irq, | |
1418 | .irq_unmask = its_unmask_irq, | |
004fa08d | 1419 | .irq_eoi = irq_chip_eoi_parent, |
c48ed51c | 1420 | .irq_set_affinity = its_set_affinity, |
b48ac83d | 1421 | .irq_compose_msi_msg = its_irq_compose_msi_msg, |
8d85dced | 1422 | .irq_set_irqchip_state = its_irq_set_irqchip_state, |
c808eea8 | 1423 | .irq_set_vcpu_affinity = its_irq_set_vcpu_affinity, |
b48ac83d MZ |
1424 | }; |
1425 | ||
880cb3cd | 1426 | |
bf9529f8 MZ |
1427 | /* |
1428 | * How we allocate LPIs: | |
1429 | * | |
880cb3cd MZ |
1430 | * lpi_range_list contains ranges of LPIs that are to available to |
1431 | * allocate from. To allocate LPIs, just pick the first range that | |
1432 | * fits the required allocation, and reduce it by the required | |
1433 | * amount. Once empty, remove the range from the list. | |
1434 | * | |
1435 | * To free a range of LPIs, add a free range to the list, sort it and | |
1436 | * merge the result if the new range happens to be adjacent to an | |
1437 | * already free block. | |
bf9529f8 | 1438 | * |
880cb3cd MZ |
1439 | * The consequence of the above is that allocation is cost is low, but |
1440 | * freeing is expensive. We assumes that freeing rarely occurs. | |
1441 | */ | |
1442 | ||
1443 | /* | |
1444 | * Compatibility defines until we fully refactor the allocator | |
bf9529f8 MZ |
1445 | */ |
1446 | #define IRQS_PER_CHUNK_SHIFT 5 | |
4f2c7583 | 1447 | #define IRQS_PER_CHUNK (1UL << IRQS_PER_CHUNK_SHIFT) |
bf9529f8 | 1448 | |
880cb3cd MZ |
1449 | static DEFINE_MUTEX(lpi_range_lock); |
1450 | static LIST_HEAD(lpi_range_list); | |
1451 | ||
1452 | struct lpi_range { | |
1453 | struct list_head entry; | |
1454 | u32 base_id; | |
1455 | u32 span; | |
1456 | }; | |
bf9529f8 | 1457 | |
880cb3cd | 1458 | static struct lpi_range *mk_lpi_range(u32 base, u32 span) |
bf9529f8 | 1459 | { |
880cb3cd MZ |
1460 | struct lpi_range *range; |
1461 | ||
1462 | range = kzalloc(sizeof(*range), GFP_KERNEL); | |
1463 | if (range) { | |
1464 | INIT_LIST_HEAD(&range->entry); | |
1465 | range->base_id = base; | |
1466 | range->span = span; | |
1467 | } | |
1468 | ||
1469 | return range; | |
bf9529f8 MZ |
1470 | } |
1471 | ||
880cb3cd | 1472 | static int lpi_range_cmp(void *priv, struct list_head *a, struct list_head *b) |
bf9529f8 | 1473 | { |
880cb3cd MZ |
1474 | struct lpi_range *ra, *rb; |
1475 | ||
1476 | ra = container_of(a, struct lpi_range, entry); | |
1477 | rb = container_of(b, struct lpi_range, entry); | |
1478 | ||
1479 | return rb->base_id - ra->base_id; | |
bf9529f8 MZ |
1480 | } |
1481 | ||
880cb3cd | 1482 | static void merge_lpi_ranges(void) |
bf9529f8 | 1483 | { |
880cb3cd | 1484 | struct lpi_range *range, *tmp; |
bf9529f8 | 1485 | |
880cb3cd MZ |
1486 | list_for_each_entry_safe(range, tmp, &lpi_range_list, entry) { |
1487 | if (!list_is_last(&range->entry, &lpi_range_list) && | |
1488 | (tmp->base_id == (range->base_id + range->span))) { | |
1489 | tmp->base_id = range->base_id; | |
1490 | tmp->span += range->span; | |
1491 | list_del(&range->entry); | |
1492 | kfree(range); | |
1493 | } | |
bf9529f8 | 1494 | } |
880cb3cd | 1495 | } |
bf9529f8 | 1496 | |
880cb3cd MZ |
1497 | static int alloc_lpi_range(u32 nr_lpis, u32 *base) |
1498 | { | |
1499 | struct lpi_range *range, *tmp; | |
1500 | int err = -ENOSPC; | |
1501 | ||
1502 | mutex_lock(&lpi_range_lock); | |
1503 | ||
1504 | list_for_each_entry_safe(range, tmp, &lpi_range_list, entry) { | |
1505 | if (range->span >= nr_lpis) { | |
1506 | *base = range->base_id; | |
1507 | range->base_id += nr_lpis; | |
1508 | range->span -= nr_lpis; | |
1509 | ||
1510 | if (range->span == 0) { | |
1511 | list_del(&range->entry); | |
1512 | kfree(range); | |
1513 | } | |
1514 | ||
1515 | err = 0; | |
1516 | break; | |
1517 | } | |
1518 | } | |
1519 | ||
1520 | mutex_unlock(&lpi_range_lock); | |
1521 | ||
1522 | pr_debug("ITS: alloc %u:%u\n", *base, nr_lpis); | |
1523 | return err; | |
bf9529f8 MZ |
1524 | } |
1525 | ||
880cb3cd | 1526 | static int free_lpi_range(u32 base, u32 nr_lpis) |
bf9529f8 | 1527 | { |
880cb3cd MZ |
1528 | struct lpi_range *new; |
1529 | int err = 0; | |
1530 | ||
1531 | mutex_lock(&lpi_range_lock); | |
1532 | ||
1533 | new = mk_lpi_range(base, nr_lpis); | |
1534 | if (!new) { | |
1535 | err = -ENOMEM; | |
1536 | goto out; | |
1537 | } | |
1538 | ||
1539 | list_add(&new->entry, &lpi_range_list); | |
1540 | list_sort(NULL, &lpi_range_list, lpi_range_cmp); | |
1541 | merge_lpi_ranges(); | |
1542 | out: | |
1543 | mutex_unlock(&lpi_range_lock); | |
1544 | return err; | |
1545 | } | |
1546 | ||
1547 | static int __init its_lpi_init(u32 id_bits) | |
1548 | { | |
1549 | u32 lpis = (1UL << id_bits) - 8192; | |
1550 | int err; | |
1551 | ||
1552 | /* | |
1553 | * Initializing the allocator is just the same as freeing the | |
1554 | * full range of LPIs. | |
1555 | */ | |
1556 | err = free_lpi_range(8192, lpis); | |
1557 | pr_debug("ITS: Allocator initialized for %u LPIs\n", lpis); | |
1558 | return err; | |
1559 | } | |
bf9529f8 | 1560 | |
880cb3cd MZ |
1561 | static unsigned long *its_lpi_alloc_chunks(int nr_irqs, u32 *base, int *nr_ids) |
1562 | { | |
1563 | unsigned long *bitmap = NULL; | |
1564 | int err = 0; | |
1565 | int nr_lpis; | |
bf9529f8 | 1566 | |
880cb3cd | 1567 | nr_lpis = round_up(nr_irqs, IRQS_PER_CHUNK); |
bf9529f8 MZ |
1568 | |
1569 | do { | |
880cb3cd MZ |
1570 | err = alloc_lpi_range(nr_lpis, base); |
1571 | if (!err) | |
bf9529f8 MZ |
1572 | break; |
1573 | ||
880cb3cd MZ |
1574 | nr_lpis -= IRQS_PER_CHUNK; |
1575 | } while (nr_lpis > 0); | |
bf9529f8 | 1576 | |
880cb3cd | 1577 | if (err) |
bf9529f8 MZ |
1578 | goto out; |
1579 | ||
880cb3cd | 1580 | bitmap = kcalloc(BITS_TO_LONGS(nr_lpis), sizeof (long), GFP_ATOMIC); |
bf9529f8 MZ |
1581 | if (!bitmap) |
1582 | goto out; | |
1583 | ||
880cb3cd | 1584 | *nr_ids = nr_lpis; |
bf9529f8 MZ |
1585 | |
1586 | out: | |
c8415b94 MZ |
1587 | if (!bitmap) |
1588 | *base = *nr_ids = 0; | |
1589 | ||
bf9529f8 MZ |
1590 | return bitmap; |
1591 | } | |
1592 | ||
880cb3cd | 1593 | static void its_lpi_free_chunks(unsigned long *bitmap, u32 base, u32 nr_ids) |
bf9529f8 | 1594 | { |
880cb3cd | 1595 | WARN_ON(free_lpi_range(base, nr_ids)); |
cf2be8ba | 1596 | kfree(bitmap); |
bf9529f8 | 1597 | } |
1ac19ca6 | 1598 | |
0e5ccf91 MZ |
1599 | static struct page *its_allocate_prop_table(gfp_t gfp_flags) |
1600 | { | |
1601 | struct page *prop_page; | |
1ac19ca6 | 1602 | |
0e5ccf91 MZ |
1603 | prop_page = alloc_pages(gfp_flags, get_order(LPI_PROPBASE_SZ)); |
1604 | if (!prop_page) | |
1605 | return NULL; | |
1606 | ||
1607 | /* Priority 0xa0, Group-1, disabled */ | |
1608 | memset(page_address(prop_page), | |
1609 | LPI_PROP_DEFAULT_PRIO | LPI_PROP_GROUP1, | |
1610 | LPI_PROPBASE_SZ); | |
1611 | ||
1612 | /* Make sure the GIC will observe the written configuration */ | |
1613 | gic_flush_dcache_to_poc(page_address(prop_page), LPI_PROPBASE_SZ); | |
1614 | ||
1615 | return prop_page; | |
1616 | } | |
1617 | ||
7d75bbb4 MZ |
1618 | static void its_free_prop_table(struct page *prop_page) |
1619 | { | |
1620 | free_pages((unsigned long)page_address(prop_page), | |
1621 | get_order(LPI_PROPBASE_SZ)); | |
1622 | } | |
1ac19ca6 MZ |
1623 | |
1624 | static int __init its_alloc_lpi_tables(void) | |
1625 | { | |
1626 | phys_addr_t paddr; | |
1627 | ||
fe8e9350 | 1628 | lpi_id_bits = gic_rdists->id_bits; |
0e5ccf91 | 1629 | gic_rdists->prop_page = its_allocate_prop_table(GFP_NOWAIT); |
1ac19ca6 MZ |
1630 | if (!gic_rdists->prop_page) { |
1631 | pr_err("Failed to allocate PROPBASE\n"); | |
1632 | return -ENOMEM; | |
1633 | } | |
1634 | ||
1635 | paddr = page_to_phys(gic_rdists->prop_page); | |
1636 | pr_info("GIC: using LPI property table @%pa\n", &paddr); | |
1637 | ||
6c31e123 | 1638 | return its_lpi_init(lpi_id_bits); |
1ac19ca6 MZ |
1639 | } |
1640 | ||
1641 | static const char *its_base_type_string[] = { | |
1642 | [GITS_BASER_TYPE_DEVICE] = "Devices", | |
1643 | [GITS_BASER_TYPE_VCPU] = "Virtual CPUs", | |
4f46de9d | 1644 | [GITS_BASER_TYPE_RESERVED3] = "Reserved (3)", |
1ac19ca6 MZ |
1645 | [GITS_BASER_TYPE_COLLECTION] = "Interrupt Collections", |
1646 | [GITS_BASER_TYPE_RESERVED5] = "Reserved (5)", | |
1647 | [GITS_BASER_TYPE_RESERVED6] = "Reserved (6)", | |
1648 | [GITS_BASER_TYPE_RESERVED7] = "Reserved (7)", | |
1649 | }; | |
1650 | ||
2d81d425 SD |
1651 | static u64 its_read_baser(struct its_node *its, struct its_baser *baser) |
1652 | { | |
1653 | u32 idx = baser - its->tables; | |
1654 | ||
0968a619 | 1655 | return gits_read_baser(its->base + GITS_BASER + (idx << 3)); |
2d81d425 SD |
1656 | } |
1657 | ||
1658 | static void its_write_baser(struct its_node *its, struct its_baser *baser, | |
1659 | u64 val) | |
1660 | { | |
1661 | u32 idx = baser - its->tables; | |
1662 | ||
0968a619 | 1663 | gits_write_baser(val, its->base + GITS_BASER + (idx << 3)); |
2d81d425 SD |
1664 | baser->val = its_read_baser(its, baser); |
1665 | } | |
1666 | ||
9347359a | 1667 | static int its_setup_baser(struct its_node *its, struct its_baser *baser, |
3faf24ea SD |
1668 | u64 cache, u64 shr, u32 psz, u32 order, |
1669 | bool indirect) | |
9347359a SD |
1670 | { |
1671 | u64 val = its_read_baser(its, baser); | |
1672 | u64 esz = GITS_BASER_ENTRY_SIZE(val); | |
1673 | u64 type = GITS_BASER_TYPE(val); | |
30ae9610 | 1674 | u64 baser_phys, tmp; |
9347359a SD |
1675 | u32 alloc_pages; |
1676 | void *base; | |
9347359a SD |
1677 | |
1678 | retry_alloc_baser: | |
1679 | alloc_pages = (PAGE_ORDER_TO_SIZE(order) / psz); | |
1680 | if (alloc_pages > GITS_BASER_PAGES_MAX) { | |
1681 | pr_warn("ITS@%pa: %s too large, reduce ITS pages %u->%u\n", | |
1682 | &its->phys_base, its_base_type_string[type], | |
1683 | alloc_pages, GITS_BASER_PAGES_MAX); | |
1684 | alloc_pages = GITS_BASER_PAGES_MAX; | |
1685 | order = get_order(GITS_BASER_PAGES_MAX * psz); | |
1686 | } | |
1687 | ||
1688 | base = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order); | |
1689 | if (!base) | |
1690 | return -ENOMEM; | |
1691 | ||
30ae9610 SD |
1692 | baser_phys = virt_to_phys(base); |
1693 | ||
1694 | /* Check if the physical address of the memory is above 48bits */ | |
1695 | if (IS_ENABLED(CONFIG_ARM64_64K_PAGES) && (baser_phys >> 48)) { | |
1696 | ||
1697 | /* 52bit PA is supported only when PageSize=64K */ | |
1698 | if (psz != SZ_64K) { | |
1699 | pr_err("ITS: no 52bit PA support when psz=%d\n", psz); | |
1700 | free_pages((unsigned long)base, order); | |
1701 | return -ENXIO; | |
1702 | } | |
1703 | ||
1704 | /* Convert 52bit PA to 48bit field */ | |
1705 | baser_phys = GITS_BASER_PHYS_52_to_48(baser_phys); | |
1706 | } | |
1707 | ||
9347359a | 1708 | retry_baser: |
30ae9610 | 1709 | val = (baser_phys | |
9347359a SD |
1710 | (type << GITS_BASER_TYPE_SHIFT) | |
1711 | ((esz - 1) << GITS_BASER_ENTRY_SIZE_SHIFT) | | |
1712 | ((alloc_pages - 1) << GITS_BASER_PAGES_SHIFT) | | |
1713 | cache | | |
1714 | shr | | |
1715 | GITS_BASER_VALID); | |
1716 | ||
3faf24ea SD |
1717 | val |= indirect ? GITS_BASER_INDIRECT : 0x0; |
1718 | ||
9347359a SD |
1719 | switch (psz) { |
1720 | case SZ_4K: | |
1721 | val |= GITS_BASER_PAGE_SIZE_4K; | |
1722 | break; | |
1723 | case SZ_16K: | |
1724 | val |= GITS_BASER_PAGE_SIZE_16K; | |
1725 | break; | |
1726 | case SZ_64K: | |
1727 | val |= GITS_BASER_PAGE_SIZE_64K; | |
1728 | break; | |
1729 | } | |
1730 | ||
1731 | its_write_baser(its, baser, val); | |
1732 | tmp = baser->val; | |
1733 | ||
1734 | if ((val ^ tmp) & GITS_BASER_SHAREABILITY_MASK) { | |
1735 | /* | |
1736 | * Shareability didn't stick. Just use | |
1737 | * whatever the read reported, which is likely | |
1738 | * to be the only thing this redistributor | |
1739 | * supports. If that's zero, make it | |
1740 | * non-cacheable as well. | |
1741 | */ | |
1742 | shr = tmp & GITS_BASER_SHAREABILITY_MASK; | |
1743 | if (!shr) { | |
1744 | cache = GITS_BASER_nC; | |
328191c0 | 1745 | gic_flush_dcache_to_poc(base, PAGE_ORDER_TO_SIZE(order)); |
9347359a SD |
1746 | } |
1747 | goto retry_baser; | |
1748 | } | |
1749 | ||
1750 | if ((val ^ tmp) & GITS_BASER_PAGE_SIZE_MASK) { | |
1751 | /* | |
1752 | * Page size didn't stick. Let's try a smaller | |
1753 | * size and retry. If we reach 4K, then | |
1754 | * something is horribly wrong... | |
1755 | */ | |
1756 | free_pages((unsigned long)base, order); | |
1757 | baser->base = NULL; | |
1758 | ||
1759 | switch (psz) { | |
1760 | case SZ_16K: | |
1761 | psz = SZ_4K; | |
1762 | goto retry_alloc_baser; | |
1763 | case SZ_64K: | |
1764 | psz = SZ_16K; | |
1765 | goto retry_alloc_baser; | |
1766 | } | |
1767 | } | |
1768 | ||
1769 | if (val != tmp) { | |
b11283eb | 1770 | pr_err("ITS@%pa: %s doesn't stick: %llx %llx\n", |
9347359a | 1771 | &its->phys_base, its_base_type_string[type], |
b11283eb | 1772 | val, tmp); |
9347359a SD |
1773 | free_pages((unsigned long)base, order); |
1774 | return -ENXIO; | |
1775 | } | |
1776 | ||
1777 | baser->order = order; | |
1778 | baser->base = base; | |
1779 | baser->psz = psz; | |
3faf24ea | 1780 | tmp = indirect ? GITS_LVL1_ENTRY_SIZE : esz; |
9347359a | 1781 | |
3faf24ea | 1782 | pr_info("ITS@%pa: allocated %d %s @%lx (%s, esz %d, psz %dK, shr %d)\n", |
d524eaa2 | 1783 | &its->phys_base, (int)(PAGE_ORDER_TO_SIZE(order) / (int)tmp), |
9347359a SD |
1784 | its_base_type_string[type], |
1785 | (unsigned long)virt_to_phys(base), | |
3faf24ea | 1786 | indirect ? "indirect" : "flat", (int)esz, |
9347359a SD |
1787 | psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT); |
1788 | ||
1789 | return 0; | |
1790 | } | |
1791 | ||
4cacac57 MZ |
1792 | static bool its_parse_indirect_baser(struct its_node *its, |
1793 | struct its_baser *baser, | |
32bd44dc | 1794 | u32 psz, u32 *order, u32 ids) |
4b75c459 | 1795 | { |
4cacac57 MZ |
1796 | u64 tmp = its_read_baser(its, baser); |
1797 | u64 type = GITS_BASER_TYPE(tmp); | |
1798 | u64 esz = GITS_BASER_ENTRY_SIZE(tmp); | |
2fd632a0 | 1799 | u64 val = GITS_BASER_InnerShareable | GITS_BASER_RaWaWb; |
4b75c459 | 1800 | u32 new_order = *order; |
3faf24ea SD |
1801 | bool indirect = false; |
1802 | ||
1803 | /* No need to enable Indirection if memory requirement < (psz*2)bytes */ | |
1804 | if ((esz << ids) > (psz * 2)) { | |
1805 | /* | |
1806 | * Find out whether hw supports a single or two-level table by | |
1807 | * table by reading bit at offset '62' after writing '1' to it. | |
1808 | */ | |
1809 | its_write_baser(its, baser, val | GITS_BASER_INDIRECT); | |
1810 | indirect = !!(baser->val & GITS_BASER_INDIRECT); | |
1811 | ||
1812 | if (indirect) { | |
1813 | /* | |
1814 | * The size of the lvl2 table is equal to ITS page size | |
1815 | * which is 'psz'. For computing lvl1 table size, | |
1816 | * subtract ID bits that sparse lvl2 table from 'ids' | |
1817 | * which is reported by ITS hardware times lvl1 table | |
1818 | * entry size. | |
1819 | */ | |
d524eaa2 | 1820 | ids -= ilog2(psz / (int)esz); |
3faf24ea SD |
1821 | esz = GITS_LVL1_ENTRY_SIZE; |
1822 | } | |
1823 | } | |
4b75c459 SD |
1824 | |
1825 | /* | |
1826 | * Allocate as many entries as required to fit the | |
1827 | * range of device IDs that the ITS can grok... The ID | |
1828 | * space being incredibly sparse, this results in a | |
3faf24ea SD |
1829 | * massive waste of memory if two-level device table |
1830 | * feature is not supported by hardware. | |
4b75c459 SD |
1831 | */ |
1832 | new_order = max_t(u32, get_order(esz << ids), new_order); | |
1833 | if (new_order >= MAX_ORDER) { | |
1834 | new_order = MAX_ORDER - 1; | |
d524eaa2 | 1835 | ids = ilog2(PAGE_ORDER_TO_SIZE(new_order) / (int)esz); |
4cacac57 MZ |
1836 | pr_warn("ITS@%pa: %s Table too large, reduce ids %u->%u\n", |
1837 | &its->phys_base, its_base_type_string[type], | |
1838 | its->device_ids, ids); | |
4b75c459 SD |
1839 | } |
1840 | ||
1841 | *order = new_order; | |
3faf24ea SD |
1842 | |
1843 | return indirect; | |
4b75c459 SD |
1844 | } |
1845 | ||
1ac19ca6 MZ |
1846 | static void its_free_tables(struct its_node *its) |
1847 | { | |
1848 | int i; | |
1849 | ||
1850 | for (i = 0; i < GITS_BASER_NR_REGS; i++) { | |
1a485f4d SD |
1851 | if (its->tables[i].base) { |
1852 | free_pages((unsigned long)its->tables[i].base, | |
1853 | its->tables[i].order); | |
1854 | its->tables[i].base = NULL; | |
1ac19ca6 MZ |
1855 | } |
1856 | } | |
1857 | } | |
1858 | ||
0e0b0f69 | 1859 | static int its_alloc_tables(struct its_node *its) |
1ac19ca6 | 1860 | { |
1ac19ca6 | 1861 | u64 shr = GITS_BASER_InnerShareable; |
2fd632a0 | 1862 | u64 cache = GITS_BASER_RaWaWb; |
9347359a SD |
1863 | u32 psz = SZ_64K; |
1864 | int err, i; | |
94100970 | 1865 | |
fa150019 AB |
1866 | if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_22375) |
1867 | /* erratum 24313: ignore memory access type */ | |
1868 | cache = GITS_BASER_nCnB; | |
466b7d16 | 1869 | |
1ac19ca6 | 1870 | for (i = 0; i < GITS_BASER_NR_REGS; i++) { |
2d81d425 SD |
1871 | struct its_baser *baser = its->tables + i; |
1872 | u64 val = its_read_baser(its, baser); | |
1ac19ca6 | 1873 | u64 type = GITS_BASER_TYPE(val); |
9347359a | 1874 | u32 order = get_order(psz); |
3faf24ea | 1875 | bool indirect = false; |
1ac19ca6 | 1876 | |
4cacac57 MZ |
1877 | switch (type) { |
1878 | case GITS_BASER_TYPE_NONE: | |
1ac19ca6 MZ |
1879 | continue; |
1880 | ||
4cacac57 | 1881 | case GITS_BASER_TYPE_DEVICE: |
32bd44dc SD |
1882 | indirect = its_parse_indirect_baser(its, baser, |
1883 | psz, &order, | |
1884 | its->device_ids); | |
4cacac57 MZ |
1885 | case GITS_BASER_TYPE_VCPU: |
1886 | indirect = its_parse_indirect_baser(its, baser, | |
32bd44dc SD |
1887 | psz, &order, |
1888 | ITS_MAX_VPEID_BITS); | |
4cacac57 MZ |
1889 | break; |
1890 | } | |
f54b97ed | 1891 | |
3faf24ea | 1892 | err = its_setup_baser(its, baser, cache, shr, psz, order, indirect); |
9347359a SD |
1893 | if (err < 0) { |
1894 | its_free_tables(its); | |
1895 | return err; | |
1ac19ca6 MZ |
1896 | } |
1897 | ||
9347359a SD |
1898 | /* Update settings which will be used for next BASERn */ |
1899 | psz = baser->psz; | |
1900 | cache = baser->val & GITS_BASER_CACHEABILITY_MASK; | |
1901 | shr = baser->val & GITS_BASER_SHAREABILITY_MASK; | |
1ac19ca6 MZ |
1902 | } |
1903 | ||
1904 | return 0; | |
1ac19ca6 MZ |
1905 | } |
1906 | ||
1907 | static int its_alloc_collections(struct its_node *its) | |
1908 | { | |
83559b47 MZ |
1909 | int i; |
1910 | ||
6396bb22 | 1911 | its->collections = kcalloc(nr_cpu_ids, sizeof(*its->collections), |
1ac19ca6 MZ |
1912 | GFP_KERNEL); |
1913 | if (!its->collections) | |
1914 | return -ENOMEM; | |
1915 | ||
83559b47 MZ |
1916 | for (i = 0; i < nr_cpu_ids; i++) |
1917 | its->collections[i].target_address = ~0ULL; | |
1918 | ||
1ac19ca6 MZ |
1919 | return 0; |
1920 | } | |
1921 | ||
7c297a2d MZ |
1922 | static struct page *its_allocate_pending_table(gfp_t gfp_flags) |
1923 | { | |
1924 | struct page *pend_page; | |
1925 | /* | |
1926 | * The pending pages have to be at least 64kB aligned, | |
1927 | * hence the 'max(LPI_PENDBASE_SZ, SZ_64K)' below. | |
1928 | */ | |
1929 | pend_page = alloc_pages(gfp_flags | __GFP_ZERO, | |
1930 | get_order(max_t(u32, LPI_PENDBASE_SZ, SZ_64K))); | |
1931 | if (!pend_page) | |
1932 | return NULL; | |
1933 | ||
1934 | /* Make sure the GIC will observe the zero-ed page */ | |
1935 | gic_flush_dcache_to_poc(page_address(pend_page), LPI_PENDBASE_SZ); | |
1936 | ||
1937 | return pend_page; | |
1938 | } | |
1939 | ||
7d75bbb4 MZ |
1940 | static void its_free_pending_table(struct page *pt) |
1941 | { | |
1942 | free_pages((unsigned long)page_address(pt), | |
1943 | get_order(max_t(u32, LPI_PENDBASE_SZ, SZ_64K))); | |
1944 | } | |
1945 | ||
1ac19ca6 MZ |
1946 | static void its_cpu_init_lpis(void) |
1947 | { | |
1948 | void __iomem *rbase = gic_data_rdist_rd_base(); | |
1949 | struct page *pend_page; | |
1950 | u64 val, tmp; | |
1951 | ||
1952 | /* If we didn't allocate the pending table yet, do it now */ | |
1953 | pend_page = gic_data_rdist()->pend_page; | |
1954 | if (!pend_page) { | |
1955 | phys_addr_t paddr; | |
7c297a2d MZ |
1956 | |
1957 | pend_page = its_allocate_pending_table(GFP_NOWAIT); | |
1ac19ca6 MZ |
1958 | if (!pend_page) { |
1959 | pr_err("Failed to allocate PENDBASE for CPU%d\n", | |
1960 | smp_processor_id()); | |
1961 | return; | |
1962 | } | |
1963 | ||
1ac19ca6 MZ |
1964 | paddr = page_to_phys(pend_page); |
1965 | pr_info("CPU%d: using LPI pending table @%pa\n", | |
1966 | smp_processor_id(), &paddr); | |
1967 | gic_data_rdist()->pend_page = pend_page; | |
1968 | } | |
1969 | ||
1ac19ca6 MZ |
1970 | /* set PROPBASE */ |
1971 | val = (page_to_phys(gic_rdists->prop_page) | | |
1972 | GICR_PROPBASER_InnerShareable | | |
2fd632a0 | 1973 | GICR_PROPBASER_RaWaWb | |
1ac19ca6 MZ |
1974 | ((LPI_NRBITS - 1) & GICR_PROPBASER_IDBITS_MASK)); |
1975 | ||
0968a619 VM |
1976 | gicr_write_propbaser(val, rbase + GICR_PROPBASER); |
1977 | tmp = gicr_read_propbaser(rbase + GICR_PROPBASER); | |
1ac19ca6 MZ |
1978 | |
1979 | if ((tmp ^ val) & GICR_PROPBASER_SHAREABILITY_MASK) { | |
241a386c MZ |
1980 | if (!(tmp & GICR_PROPBASER_SHAREABILITY_MASK)) { |
1981 | /* | |
1982 | * The HW reports non-shareable, we must | |
1983 | * remove the cacheability attributes as | |
1984 | * well. | |
1985 | */ | |
1986 | val &= ~(GICR_PROPBASER_SHAREABILITY_MASK | | |
1987 | GICR_PROPBASER_CACHEABILITY_MASK); | |
1988 | val |= GICR_PROPBASER_nC; | |
0968a619 | 1989 | gicr_write_propbaser(val, rbase + GICR_PROPBASER); |
241a386c | 1990 | } |
1ac19ca6 MZ |
1991 | pr_info_once("GIC: using cache flushing for LPI property table\n"); |
1992 | gic_rdists->flags |= RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING; | |
1993 | } | |
1994 | ||
1995 | /* set PENDBASE */ | |
1996 | val = (page_to_phys(pend_page) | | |
4ad3e363 | 1997 | GICR_PENDBASER_InnerShareable | |
2fd632a0 | 1998 | GICR_PENDBASER_RaWaWb); |
1ac19ca6 | 1999 | |
0968a619 VM |
2000 | gicr_write_pendbaser(val, rbase + GICR_PENDBASER); |
2001 | tmp = gicr_read_pendbaser(rbase + GICR_PENDBASER); | |
241a386c MZ |
2002 | |
2003 | if (!(tmp & GICR_PENDBASER_SHAREABILITY_MASK)) { | |
2004 | /* | |
2005 | * The HW reports non-shareable, we must remove the | |
2006 | * cacheability attributes as well. | |
2007 | */ | |
2008 | val &= ~(GICR_PENDBASER_SHAREABILITY_MASK | | |
2009 | GICR_PENDBASER_CACHEABILITY_MASK); | |
2010 | val |= GICR_PENDBASER_nC; | |
0968a619 | 2011 | gicr_write_pendbaser(val, rbase + GICR_PENDBASER); |
241a386c | 2012 | } |
1ac19ca6 MZ |
2013 | |
2014 | /* Enable LPIs */ | |
2015 | val = readl_relaxed(rbase + GICR_CTLR); | |
2016 | val |= GICR_CTLR_ENABLE_LPIS; | |
2017 | writel_relaxed(val, rbase + GICR_CTLR); | |
2018 | ||
2019 | /* Make sure the GIC has seen the above */ | |
2020 | dsb(sy); | |
2021 | } | |
2022 | ||
920181ce | 2023 | static void its_cpu_init_collection(struct its_node *its) |
1ac19ca6 | 2024 | { |
920181ce DB |
2025 | int cpu = smp_processor_id(); |
2026 | u64 target; | |
1ac19ca6 | 2027 | |
920181ce DB |
2028 | /* avoid cross node collections and its mapping */ |
2029 | if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) { | |
2030 | struct device_node *cpu_node; | |
fbf8f40e | 2031 | |
920181ce DB |
2032 | cpu_node = of_get_cpu_node(cpu, NULL); |
2033 | if (its->numa_node != NUMA_NO_NODE && | |
2034 | its->numa_node != of_node_to_nid(cpu_node)) | |
2035 | return; | |
2036 | } | |
fbf8f40e | 2037 | |
920181ce DB |
2038 | /* |
2039 | * We now have to bind each collection to its target | |
2040 | * redistributor. | |
2041 | */ | |
2042 | if (gic_read_typer(its->base + GITS_TYPER) & GITS_TYPER_PTA) { | |
1ac19ca6 | 2043 | /* |
920181ce | 2044 | * This ITS wants the physical address of the |
1ac19ca6 MZ |
2045 | * redistributor. |
2046 | */ | |
920181ce DB |
2047 | target = gic_data_rdist()->phys_base; |
2048 | } else { | |
2049 | /* This ITS wants a linear CPU number. */ | |
2050 | target = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER); | |
2051 | target = GICR_TYPER_CPU_NUMBER(target) << 16; | |
2052 | } | |
1ac19ca6 | 2053 | |
920181ce DB |
2054 | /* Perform collection mapping */ |
2055 | its->collections[cpu].target_address = target; | |
2056 | its->collections[cpu].col_id = cpu; | |
1ac19ca6 | 2057 | |
920181ce DB |
2058 | its_send_mapc(its, &its->collections[cpu], 1); |
2059 | its_send_invall(its, &its->collections[cpu]); | |
2060 | } | |
2061 | ||
2062 | static void its_cpu_init_collections(void) | |
2063 | { | |
2064 | struct its_node *its; | |
2065 | ||
2066 | spin_lock(&its_lock); | |
2067 | ||
2068 | list_for_each_entry(its, &its_nodes, entry) | |
2069 | its_cpu_init_collection(its); | |
1ac19ca6 MZ |
2070 | |
2071 | spin_unlock(&its_lock); | |
2072 | } | |
84a6a2e7 MZ |
2073 | |
2074 | static struct its_device *its_find_device(struct its_node *its, u32 dev_id) | |
2075 | { | |
2076 | struct its_device *its_dev = NULL, *tmp; | |
3e39e8f5 | 2077 | unsigned long flags; |
84a6a2e7 | 2078 | |
3e39e8f5 | 2079 | raw_spin_lock_irqsave(&its->lock, flags); |
84a6a2e7 MZ |
2080 | |
2081 | list_for_each_entry(tmp, &its->its_device_list, entry) { | |
2082 | if (tmp->device_id == dev_id) { | |
2083 | its_dev = tmp; | |
2084 | break; | |
2085 | } | |
2086 | } | |
2087 | ||
3e39e8f5 | 2088 | raw_spin_unlock_irqrestore(&its->lock, flags); |
84a6a2e7 MZ |
2089 | |
2090 | return its_dev; | |
2091 | } | |
2092 | ||
466b7d16 SD |
2093 | static struct its_baser *its_get_baser(struct its_node *its, u32 type) |
2094 | { | |
2095 | int i; | |
2096 | ||
2097 | for (i = 0; i < GITS_BASER_NR_REGS; i++) { | |
2098 | if (GITS_BASER_TYPE(its->tables[i].val) == type) | |
2099 | return &its->tables[i]; | |
2100 | } | |
2101 | ||
2102 | return NULL; | |
2103 | } | |
2104 | ||
70cc81ed | 2105 | static bool its_alloc_table_entry(struct its_baser *baser, u32 id) |
3faf24ea | 2106 | { |
3faf24ea SD |
2107 | struct page *page; |
2108 | u32 esz, idx; | |
2109 | __le64 *table; | |
2110 | ||
3faf24ea SD |
2111 | /* Don't allow device id that exceeds single, flat table limit */ |
2112 | esz = GITS_BASER_ENTRY_SIZE(baser->val); | |
2113 | if (!(baser->val & GITS_BASER_INDIRECT)) | |
70cc81ed | 2114 | return (id < (PAGE_ORDER_TO_SIZE(baser->order) / esz)); |
3faf24ea SD |
2115 | |
2116 | /* Compute 1st level table index & check if that exceeds table limit */ | |
70cc81ed | 2117 | idx = id >> ilog2(baser->psz / esz); |
3faf24ea SD |
2118 | if (idx >= (PAGE_ORDER_TO_SIZE(baser->order) / GITS_LVL1_ENTRY_SIZE)) |
2119 | return false; | |
2120 | ||
2121 | table = baser->base; | |
2122 | ||
2123 | /* Allocate memory for 2nd level table */ | |
2124 | if (!table[idx]) { | |
2125 | page = alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(baser->psz)); | |
2126 | if (!page) | |
2127 | return false; | |
2128 | ||
2129 | /* Flush Lvl2 table to PoC if hw doesn't support coherency */ | |
2130 | if (!(baser->val & GITS_BASER_SHAREABILITY_MASK)) | |
328191c0 | 2131 | gic_flush_dcache_to_poc(page_address(page), baser->psz); |
3faf24ea SD |
2132 | |
2133 | table[idx] = cpu_to_le64(page_to_phys(page) | GITS_BASER_VALID); | |
2134 | ||
2135 | /* Flush Lvl1 entry to PoC if hw doesn't support coherency */ | |
2136 | if (!(baser->val & GITS_BASER_SHAREABILITY_MASK)) | |
328191c0 | 2137 | gic_flush_dcache_to_poc(table + idx, GITS_LVL1_ENTRY_SIZE); |
3faf24ea SD |
2138 | |
2139 | /* Ensure updated table contents are visible to ITS hardware */ | |
2140 | dsb(sy); | |
2141 | } | |
2142 | ||
2143 | return true; | |
2144 | } | |
2145 | ||
70cc81ed MZ |
2146 | static bool its_alloc_device_table(struct its_node *its, u32 dev_id) |
2147 | { | |
2148 | struct its_baser *baser; | |
2149 | ||
2150 | baser = its_get_baser(its, GITS_BASER_TYPE_DEVICE); | |
2151 | ||
2152 | /* Don't allow device id that exceeds ITS hardware limit */ | |
2153 | if (!baser) | |
2154 | return (ilog2(dev_id) < its->device_ids); | |
2155 | ||
2156 | return its_alloc_table_entry(baser, dev_id); | |
2157 | } | |
2158 | ||
7d75bbb4 MZ |
2159 | static bool its_alloc_vpe_table(u32 vpe_id) |
2160 | { | |
2161 | struct its_node *its; | |
2162 | ||
2163 | /* | |
2164 | * Make sure the L2 tables are allocated on *all* v4 ITSs. We | |
2165 | * could try and only do it on ITSs corresponding to devices | |
2166 | * that have interrupts targeted at this VPE, but the | |
2167 | * complexity becomes crazy (and you have tons of memory | |
2168 | * anyway, right?). | |
2169 | */ | |
2170 | list_for_each_entry(its, &its_nodes, entry) { | |
2171 | struct its_baser *baser; | |
2172 | ||
2173 | if (!its->is_v4) | |
2174 | continue; | |
3faf24ea | 2175 | |
7d75bbb4 MZ |
2176 | baser = its_get_baser(its, GITS_BASER_TYPE_VCPU); |
2177 | if (!baser) | |
2178 | return false; | |
3faf24ea | 2179 | |
7d75bbb4 MZ |
2180 | if (!its_alloc_table_entry(baser, vpe_id)) |
2181 | return false; | |
3faf24ea SD |
2182 | } |
2183 | ||
2184 | return true; | |
2185 | } | |
2186 | ||
84a6a2e7 | 2187 | static struct its_device *its_create_device(struct its_node *its, u32 dev_id, |
93f94ea0 | 2188 | int nvecs, bool alloc_lpis) |
84a6a2e7 MZ |
2189 | { |
2190 | struct its_device *dev; | |
93f94ea0 | 2191 | unsigned long *lpi_map = NULL; |
3e39e8f5 | 2192 | unsigned long flags; |
591e5bec | 2193 | u16 *col_map = NULL; |
84a6a2e7 MZ |
2194 | void *itt; |
2195 | int lpi_base; | |
2196 | int nr_lpis; | |
c8481267 | 2197 | int nr_ites; |
84a6a2e7 MZ |
2198 | int sz; |
2199 | ||
3faf24ea | 2200 | if (!its_alloc_device_table(its, dev_id)) |
466b7d16 SD |
2201 | return NULL; |
2202 | ||
84a6a2e7 | 2203 | dev = kzalloc(sizeof(*dev), GFP_KERNEL); |
c8481267 | 2204 | /* |
4f2c7583 AB |
2205 | * We allocate at least one chunk worth of LPIs bet device, |
2206 | * and thus that many ITEs. The device may require less though. | |
c8481267 | 2207 | */ |
4f2c7583 | 2208 | nr_ites = max(IRQS_PER_CHUNK, roundup_pow_of_two(nvecs)); |
c8481267 | 2209 | sz = nr_ites * its->ite_size; |
84a6a2e7 | 2210 | sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1; |
6c834125 | 2211 | itt = kzalloc(sz, GFP_KERNEL); |
93f94ea0 MZ |
2212 | if (alloc_lpis) { |
2213 | lpi_map = its_lpi_alloc_chunks(nvecs, &lpi_base, &nr_lpis); | |
2214 | if (lpi_map) | |
6396bb22 | 2215 | col_map = kcalloc(nr_lpis, sizeof(*col_map), |
93f94ea0 MZ |
2216 | GFP_KERNEL); |
2217 | } else { | |
6396bb22 | 2218 | col_map = kcalloc(nr_ites, sizeof(*col_map), GFP_KERNEL); |
93f94ea0 MZ |
2219 | nr_lpis = 0; |
2220 | lpi_base = 0; | |
2221 | } | |
84a6a2e7 | 2222 | |
93f94ea0 | 2223 | if (!dev || !itt || !col_map || (!lpi_map && alloc_lpis)) { |
84a6a2e7 MZ |
2224 | kfree(dev); |
2225 | kfree(itt); | |
2226 | kfree(lpi_map); | |
591e5bec | 2227 | kfree(col_map); |
84a6a2e7 MZ |
2228 | return NULL; |
2229 | } | |
2230 | ||
328191c0 | 2231 | gic_flush_dcache_to_poc(itt, sz); |
5a9a8915 | 2232 | |
84a6a2e7 MZ |
2233 | dev->its = its; |
2234 | dev->itt = itt; | |
c8481267 | 2235 | dev->nr_ites = nr_ites; |
591e5bec MZ |
2236 | dev->event_map.lpi_map = lpi_map; |
2237 | dev->event_map.col_map = col_map; | |
2238 | dev->event_map.lpi_base = lpi_base; | |
2239 | dev->event_map.nr_lpis = nr_lpis; | |
d011e4e6 | 2240 | mutex_init(&dev->event_map.vlpi_lock); |
84a6a2e7 MZ |
2241 | dev->device_id = dev_id; |
2242 | INIT_LIST_HEAD(&dev->entry); | |
2243 | ||
3e39e8f5 | 2244 | raw_spin_lock_irqsave(&its->lock, flags); |
84a6a2e7 | 2245 | list_add(&dev->entry, &its->its_device_list); |
3e39e8f5 | 2246 | raw_spin_unlock_irqrestore(&its->lock, flags); |
84a6a2e7 | 2247 | |
84a6a2e7 MZ |
2248 | /* Map device to its ITT */ |
2249 | its_send_mapd(dev, 1); | |
2250 | ||
2251 | return dev; | |
2252 | } | |
2253 | ||
2254 | static void its_free_device(struct its_device *its_dev) | |
2255 | { | |
3e39e8f5 MZ |
2256 | unsigned long flags; |
2257 | ||
2258 | raw_spin_lock_irqsave(&its_dev->its->lock, flags); | |
84a6a2e7 | 2259 | list_del(&its_dev->entry); |
3e39e8f5 | 2260 | raw_spin_unlock_irqrestore(&its_dev->its->lock, flags); |
84a6a2e7 MZ |
2261 | kfree(its_dev->itt); |
2262 | kfree(its_dev); | |
2263 | } | |
b48ac83d MZ |
2264 | |
2265 | static int its_alloc_device_irq(struct its_device *dev, irq_hw_number_t *hwirq) | |
2266 | { | |
2267 | int idx; | |
2268 | ||
591e5bec MZ |
2269 | idx = find_first_zero_bit(dev->event_map.lpi_map, |
2270 | dev->event_map.nr_lpis); | |
2271 | if (idx == dev->event_map.nr_lpis) | |
b48ac83d MZ |
2272 | return -ENOSPC; |
2273 | ||
591e5bec MZ |
2274 | *hwirq = dev->event_map.lpi_base + idx; |
2275 | set_bit(idx, dev->event_map.lpi_map); | |
b48ac83d | 2276 | |
b48ac83d MZ |
2277 | return 0; |
2278 | } | |
2279 | ||
54456db9 MZ |
2280 | static int its_msi_prepare(struct irq_domain *domain, struct device *dev, |
2281 | int nvec, msi_alloc_info_t *info) | |
e8137f4f | 2282 | { |
b48ac83d | 2283 | struct its_node *its; |
b48ac83d | 2284 | struct its_device *its_dev; |
54456db9 MZ |
2285 | struct msi_domain_info *msi_info; |
2286 | u32 dev_id; | |
2287 | ||
2288 | /* | |
2289 | * We ignore "dev" entierely, and rely on the dev_id that has | |
2290 | * been passed via the scratchpad. This limits this domain's | |
2291 | * usefulness to upper layers that definitely know that they | |
2292 | * are built on top of the ITS. | |
2293 | */ | |
2294 | dev_id = info->scratchpad[0].ul; | |
2295 | ||
2296 | msi_info = msi_get_domain_info(domain); | |
2297 | its = msi_info->data; | |
e8137f4f | 2298 | |
20b3d54e MZ |
2299 | if (!gic_rdists->has_direct_lpi && |
2300 | vpe_proxy.dev && | |
2301 | vpe_proxy.dev->its == its && | |
2302 | dev_id == vpe_proxy.dev->device_id) { | |
2303 | /* Bad luck. Get yourself a better implementation */ | |
2304 | WARN_ONCE(1, "DevId %x clashes with GICv4 VPE proxy device\n", | |
2305 | dev_id); | |
2306 | return -EINVAL; | |
2307 | } | |
2308 | ||
f130420e | 2309 | its_dev = its_find_device(its, dev_id); |
e8137f4f MZ |
2310 | if (its_dev) { |
2311 | /* | |
2312 | * We already have seen this ID, probably through | |
2313 | * another alias (PCI bridge of some sort). No need to | |
2314 | * create the device. | |
2315 | */ | |
f130420e | 2316 | pr_debug("Reusing ITT for devID %x\n", dev_id); |
e8137f4f MZ |
2317 | goto out; |
2318 | } | |
b48ac83d | 2319 | |
93f94ea0 | 2320 | its_dev = its_create_device(its, dev_id, nvec, true); |
b48ac83d MZ |
2321 | if (!its_dev) |
2322 | return -ENOMEM; | |
2323 | ||
f130420e | 2324 | pr_debug("ITT %d entries, %d bits\n", nvec, ilog2(nvec)); |
e8137f4f | 2325 | out: |
b48ac83d | 2326 | info->scratchpad[0].ptr = its_dev; |
b48ac83d MZ |
2327 | return 0; |
2328 | } | |
2329 | ||
54456db9 MZ |
2330 | static struct msi_domain_ops its_msi_domain_ops = { |
2331 | .msi_prepare = its_msi_prepare, | |
2332 | }; | |
2333 | ||
b48ac83d MZ |
2334 | static int its_irq_gic_domain_alloc(struct irq_domain *domain, |
2335 | unsigned int virq, | |
2336 | irq_hw_number_t hwirq) | |
2337 | { | |
f833f57f MZ |
2338 | struct irq_fwspec fwspec; |
2339 | ||
2340 | if (irq_domain_get_of_node(domain->parent)) { | |
2341 | fwspec.fwnode = domain->parent->fwnode; | |
2342 | fwspec.param_count = 3; | |
2343 | fwspec.param[0] = GIC_IRQ_TYPE_LPI; | |
2344 | fwspec.param[1] = hwirq; | |
2345 | fwspec.param[2] = IRQ_TYPE_EDGE_RISING; | |
3f010cf1 TN |
2346 | } else if (is_fwnode_irqchip(domain->parent->fwnode)) { |
2347 | fwspec.fwnode = domain->parent->fwnode; | |
2348 | fwspec.param_count = 2; | |
2349 | fwspec.param[0] = hwirq; | |
2350 | fwspec.param[1] = IRQ_TYPE_EDGE_RISING; | |
f833f57f MZ |
2351 | } else { |
2352 | return -EINVAL; | |
2353 | } | |
b48ac83d | 2354 | |
f833f57f | 2355 | return irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec); |
b48ac83d MZ |
2356 | } |
2357 | ||
2358 | static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, | |
2359 | unsigned int nr_irqs, void *args) | |
2360 | { | |
2361 | msi_alloc_info_t *info = args; | |
2362 | struct its_device *its_dev = info->scratchpad[0].ptr; | |
2363 | irq_hw_number_t hwirq; | |
2364 | int err; | |
2365 | int i; | |
2366 | ||
2367 | for (i = 0; i < nr_irqs; i++) { | |
2368 | err = its_alloc_device_irq(its_dev, &hwirq); | |
2369 | if (err) | |
2370 | return err; | |
2371 | ||
2372 | err = its_irq_gic_domain_alloc(domain, virq + i, hwirq); | |
2373 | if (err) | |
2374 | return err; | |
2375 | ||
2376 | irq_domain_set_hwirq_and_chip(domain, virq + i, | |
2377 | hwirq, &its_irq_chip, its_dev); | |
0d224d35 | 2378 | irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq + i))); |
f130420e MZ |
2379 | pr_debug("ID:%d pID:%d vID:%d\n", |
2380 | (int)(hwirq - its_dev->event_map.lpi_base), | |
2381 | (int) hwirq, virq + i); | |
b48ac83d MZ |
2382 | } |
2383 | ||
2384 | return 0; | |
2385 | } | |
2386 | ||
72491643 | 2387 | static int its_irq_domain_activate(struct irq_domain *domain, |
702cb0a0 | 2388 | struct irq_data *d, bool reserve) |
aca268df MZ |
2389 | { |
2390 | struct its_device *its_dev = irq_data_get_irq_chip_data(d); | |
2391 | u32 event = its_get_event_id(d); | |
fbf8f40e | 2392 | const struct cpumask *cpu_mask = cpu_online_mask; |
0d224d35 | 2393 | int cpu; |
fbf8f40e GK |
2394 | |
2395 | /* get the cpu_mask of local node */ | |
2396 | if (its_dev->its->numa_node >= 0) | |
2397 | cpu_mask = cpumask_of_node(its_dev->its->numa_node); | |
aca268df | 2398 | |
591e5bec | 2399 | /* Bind the LPI to the first possible CPU */ |
c1797b11 YY |
2400 | cpu = cpumask_first_and(cpu_mask, cpu_online_mask); |
2401 | if (cpu >= nr_cpu_ids) { | |
2402 | if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) | |
2403 | return -EINVAL; | |
2404 | ||
2405 | cpu = cpumask_first(cpu_online_mask); | |
2406 | } | |
2407 | ||
0d224d35 MZ |
2408 | its_dev->event_map.col_map[event] = cpu; |
2409 | irq_data_update_effective_affinity(d, cpumask_of(cpu)); | |
591e5bec | 2410 | |
aca268df | 2411 | /* Map the GIC IRQ and event to the device */ |
6a25ad3a | 2412 | its_send_mapti(its_dev, d->hwirq, event); |
72491643 | 2413 | return 0; |
aca268df MZ |
2414 | } |
2415 | ||
2416 | static void its_irq_domain_deactivate(struct irq_domain *domain, | |
2417 | struct irq_data *d) | |
2418 | { | |
2419 | struct its_device *its_dev = irq_data_get_irq_chip_data(d); | |
2420 | u32 event = its_get_event_id(d); | |
2421 | ||
2422 | /* Stop the delivery of interrupts */ | |
2423 | its_send_discard(its_dev, event); | |
2424 | } | |
2425 | ||
b48ac83d MZ |
2426 | static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq, |
2427 | unsigned int nr_irqs) | |
2428 | { | |
2429 | struct irq_data *d = irq_domain_get_irq_data(domain, virq); | |
2430 | struct its_device *its_dev = irq_data_get_irq_chip_data(d); | |
2431 | int i; | |
2432 | ||
2433 | for (i = 0; i < nr_irqs; i++) { | |
2434 | struct irq_data *data = irq_domain_get_irq_data(domain, | |
2435 | virq + i); | |
aca268df | 2436 | u32 event = its_get_event_id(data); |
b48ac83d MZ |
2437 | |
2438 | /* Mark interrupt index as unused */ | |
591e5bec | 2439 | clear_bit(event, its_dev->event_map.lpi_map); |
b48ac83d MZ |
2440 | |
2441 | /* Nuke the entry in the domain */ | |
2da39949 | 2442 | irq_domain_reset_irq_data(data); |
b48ac83d MZ |
2443 | } |
2444 | ||
2445 | /* If all interrupts have been freed, start mopping the floor */ | |
591e5bec MZ |
2446 | if (bitmap_empty(its_dev->event_map.lpi_map, |
2447 | its_dev->event_map.nr_lpis)) { | |
cf2be8ba MZ |
2448 | its_lpi_free_chunks(its_dev->event_map.lpi_map, |
2449 | its_dev->event_map.lpi_base, | |
2450 | its_dev->event_map.nr_lpis); | |
2451 | kfree(its_dev->event_map.col_map); | |
b48ac83d MZ |
2452 | |
2453 | /* Unmap device/itt */ | |
2454 | its_send_mapd(its_dev, 0); | |
2455 | its_free_device(its_dev); | |
2456 | } | |
2457 | ||
2458 | irq_domain_free_irqs_parent(domain, virq, nr_irqs); | |
2459 | } | |
2460 | ||
2461 | static const struct irq_domain_ops its_domain_ops = { | |
2462 | .alloc = its_irq_domain_alloc, | |
2463 | .free = its_irq_domain_free, | |
aca268df MZ |
2464 | .activate = its_irq_domain_activate, |
2465 | .deactivate = its_irq_domain_deactivate, | |
b48ac83d | 2466 | }; |
4c21f3c2 | 2467 | |
20b3d54e MZ |
2468 | /* |
2469 | * This is insane. | |
2470 | * | |
2471 | * If a GICv4 doesn't implement Direct LPIs (which is extremely | |
2472 | * likely), the only way to perform an invalidate is to use a fake | |
2473 | * device to issue an INV command, implying that the LPI has first | |
2474 | * been mapped to some event on that device. Since this is not exactly | |
2475 | * cheap, we try to keep that mapping around as long as possible, and | |
2476 | * only issue an UNMAP if we're short on available slots. | |
2477 | * | |
2478 | * Broken by design(tm). | |
2479 | */ | |
2480 | static void its_vpe_db_proxy_unmap_locked(struct its_vpe *vpe) | |
2481 | { | |
2482 | /* Already unmapped? */ | |
2483 | if (vpe->vpe_proxy_event == -1) | |
2484 | return; | |
2485 | ||
2486 | its_send_discard(vpe_proxy.dev, vpe->vpe_proxy_event); | |
2487 | vpe_proxy.vpes[vpe->vpe_proxy_event] = NULL; | |
2488 | ||
2489 | /* | |
2490 | * We don't track empty slots at all, so let's move the | |
2491 | * next_victim pointer if we can quickly reuse that slot | |
2492 | * instead of nuking an existing entry. Not clear that this is | |
2493 | * always a win though, and this might just generate a ripple | |
2494 | * effect... Let's just hope VPEs don't migrate too often. | |
2495 | */ | |
2496 | if (vpe_proxy.vpes[vpe_proxy.next_victim]) | |
2497 | vpe_proxy.next_victim = vpe->vpe_proxy_event; | |
2498 | ||
2499 | vpe->vpe_proxy_event = -1; | |
2500 | } | |
2501 | ||
2502 | static void its_vpe_db_proxy_unmap(struct its_vpe *vpe) | |
2503 | { | |
2504 | if (!gic_rdists->has_direct_lpi) { | |
2505 | unsigned long flags; | |
2506 | ||
2507 | raw_spin_lock_irqsave(&vpe_proxy.lock, flags); | |
2508 | its_vpe_db_proxy_unmap_locked(vpe); | |
2509 | raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags); | |
2510 | } | |
2511 | } | |
2512 | ||
2513 | static void its_vpe_db_proxy_map_locked(struct its_vpe *vpe) | |
2514 | { | |
2515 | /* Already mapped? */ | |
2516 | if (vpe->vpe_proxy_event != -1) | |
2517 | return; | |
2518 | ||
2519 | /* This slot was already allocated. Kick the other VPE out. */ | |
2520 | if (vpe_proxy.vpes[vpe_proxy.next_victim]) | |
2521 | its_vpe_db_proxy_unmap_locked(vpe_proxy.vpes[vpe_proxy.next_victim]); | |
2522 | ||
2523 | /* Map the new VPE instead */ | |
2524 | vpe_proxy.vpes[vpe_proxy.next_victim] = vpe; | |
2525 | vpe->vpe_proxy_event = vpe_proxy.next_victim; | |
2526 | vpe_proxy.next_victim = (vpe_proxy.next_victim + 1) % vpe_proxy.dev->nr_ites; | |
2527 | ||
2528 | vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = vpe->col_idx; | |
2529 | its_send_mapti(vpe_proxy.dev, vpe->vpe_db_lpi, vpe->vpe_proxy_event); | |
2530 | } | |
2531 | ||
958b90d1 MZ |
2532 | static void its_vpe_db_proxy_move(struct its_vpe *vpe, int from, int to) |
2533 | { | |
2534 | unsigned long flags; | |
2535 | struct its_collection *target_col; | |
2536 | ||
2537 | if (gic_rdists->has_direct_lpi) { | |
2538 | void __iomem *rdbase; | |
2539 | ||
2540 | rdbase = per_cpu_ptr(gic_rdists->rdist, from)->rd_base; | |
2541 | gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR); | |
2542 | while (gic_read_lpir(rdbase + GICR_SYNCR) & 1) | |
2543 | cpu_relax(); | |
2544 | ||
2545 | return; | |
2546 | } | |
2547 | ||
2548 | raw_spin_lock_irqsave(&vpe_proxy.lock, flags); | |
2549 | ||
2550 | its_vpe_db_proxy_map_locked(vpe); | |
2551 | ||
2552 | target_col = &vpe_proxy.dev->its->collections[to]; | |
2553 | its_send_movi(vpe_proxy.dev, target_col, vpe->vpe_proxy_event); | |
2554 | vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = to; | |
2555 | ||
2556 | raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags); | |
2557 | } | |
2558 | ||
3171a47a MZ |
2559 | static int its_vpe_set_affinity(struct irq_data *d, |
2560 | const struct cpumask *mask_val, | |
2561 | bool force) | |
2562 | { | |
2563 | struct its_vpe *vpe = irq_data_get_irq_chip_data(d); | |
2564 | int cpu = cpumask_first(mask_val); | |
2565 | ||
2566 | /* | |
2567 | * Changing affinity is mega expensive, so let's be as lazy as | |
20b3d54e | 2568 | * we can and only do it if we really have to. Also, if mapped |
958b90d1 MZ |
2569 | * into the proxy device, we need to move the doorbell |
2570 | * interrupt to its new location. | |
3171a47a MZ |
2571 | */ |
2572 | if (vpe->col_idx != cpu) { | |
958b90d1 MZ |
2573 | int from = vpe->col_idx; |
2574 | ||
3171a47a MZ |
2575 | vpe->col_idx = cpu; |
2576 | its_send_vmovp(vpe); | |
958b90d1 | 2577 | its_vpe_db_proxy_move(vpe, from, cpu); |
3171a47a MZ |
2578 | } |
2579 | ||
44c4c25e MZ |
2580 | irq_data_update_effective_affinity(d, cpumask_of(cpu)); |
2581 | ||
3171a47a MZ |
2582 | return IRQ_SET_MASK_OK_DONE; |
2583 | } | |
2584 | ||
e643d803 MZ |
2585 | static void its_vpe_schedule(struct its_vpe *vpe) |
2586 | { | |
50c33097 | 2587 | void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); |
e643d803 MZ |
2588 | u64 val; |
2589 | ||
2590 | /* Schedule the VPE */ | |
2591 | val = virt_to_phys(page_address(vpe->its_vm->vprop_page)) & | |
2592 | GENMASK_ULL(51, 12); | |
2593 | val |= (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK; | |
2594 | val |= GICR_VPROPBASER_RaWb; | |
2595 | val |= GICR_VPROPBASER_InnerShareable; | |
2596 | gits_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER); | |
2597 | ||
2598 | val = virt_to_phys(page_address(vpe->vpt_page)) & | |
2599 | GENMASK_ULL(51, 16); | |
2600 | val |= GICR_VPENDBASER_RaWaWb; | |
2601 | val |= GICR_VPENDBASER_NonShareable; | |
2602 | /* | |
2603 | * There is no good way of finding out if the pending table is | |
2604 | * empty as we can race against the doorbell interrupt very | |
2605 | * easily. So in the end, vpe->pending_last is only an | |
2606 | * indication that the vcpu has something pending, not one | |
2607 | * that the pending table is empty. A good implementation | |
2608 | * would be able to read its coarse map pretty quickly anyway, | |
2609 | * making this a tolerable issue. | |
2610 | */ | |
2611 | val |= GICR_VPENDBASER_PendingLast; | |
2612 | val |= vpe->idai ? GICR_VPENDBASER_IDAI : 0; | |
2613 | val |= GICR_VPENDBASER_Valid; | |
2614 | gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER); | |
2615 | } | |
2616 | ||
2617 | static void its_vpe_deschedule(struct its_vpe *vpe) | |
2618 | { | |
50c33097 | 2619 | void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); |
e643d803 MZ |
2620 | u32 count = 1000000; /* 1s! */ |
2621 | bool clean; | |
2622 | u64 val; | |
2623 | ||
2624 | /* We're being scheduled out */ | |
2625 | val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER); | |
2626 | val &= ~GICR_VPENDBASER_Valid; | |
2627 | gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER); | |
2628 | ||
2629 | do { | |
2630 | val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER); | |
2631 | clean = !(val & GICR_VPENDBASER_Dirty); | |
2632 | if (!clean) { | |
2633 | count--; | |
2634 | cpu_relax(); | |
2635 | udelay(1); | |
2636 | } | |
2637 | } while (!clean && count); | |
2638 | ||
2639 | if (unlikely(!clean && !count)) { | |
2640 | pr_err_ratelimited("ITS virtual pending table not cleaning\n"); | |
2641 | vpe->idai = false; | |
2642 | vpe->pending_last = true; | |
2643 | } else { | |
2644 | vpe->idai = !!(val & GICR_VPENDBASER_IDAI); | |
2645 | vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast); | |
2646 | } | |
2647 | } | |
2648 | ||
40619a2e MZ |
2649 | static void its_vpe_invall(struct its_vpe *vpe) |
2650 | { | |
2651 | struct its_node *its; | |
2652 | ||
2653 | list_for_each_entry(its, &its_nodes, entry) { | |
2654 | if (!its->is_v4) | |
2655 | continue; | |
2656 | ||
2247e1bf MZ |
2657 | if (its_list_map && !vpe->its_vm->vlpi_count[its->list_nr]) |
2658 | continue; | |
2659 | ||
3c1cceeb MZ |
2660 | /* |
2661 | * Sending a VINVALL to a single ITS is enough, as all | |
2662 | * we need is to reach the redistributors. | |
2663 | */ | |
40619a2e | 2664 | its_send_vinvall(its, vpe); |
3c1cceeb | 2665 | return; |
40619a2e MZ |
2666 | } |
2667 | } | |
2668 | ||
e643d803 MZ |
2669 | static int its_vpe_set_vcpu_affinity(struct irq_data *d, void *vcpu_info) |
2670 | { | |
2671 | struct its_vpe *vpe = irq_data_get_irq_chip_data(d); | |
2672 | struct its_cmd_info *info = vcpu_info; | |
2673 | ||
2674 | switch (info->cmd_type) { | |
2675 | case SCHEDULE_VPE: | |
2676 | its_vpe_schedule(vpe); | |
2677 | return 0; | |
2678 | ||
2679 | case DESCHEDULE_VPE: | |
2680 | its_vpe_deschedule(vpe); | |
2681 | return 0; | |
2682 | ||
5e2f7642 | 2683 | case INVALL_VPE: |
40619a2e | 2684 | its_vpe_invall(vpe); |
5e2f7642 MZ |
2685 | return 0; |
2686 | ||
e643d803 MZ |
2687 | default: |
2688 | return -EINVAL; | |
2689 | } | |
2690 | } | |
2691 | ||
20b3d54e MZ |
2692 | static void its_vpe_send_cmd(struct its_vpe *vpe, |
2693 | void (*cmd)(struct its_device *, u32)) | |
2694 | { | |
2695 | unsigned long flags; | |
2696 | ||
2697 | raw_spin_lock_irqsave(&vpe_proxy.lock, flags); | |
2698 | ||
2699 | its_vpe_db_proxy_map_locked(vpe); | |
2700 | cmd(vpe_proxy.dev, vpe->vpe_proxy_event); | |
2701 | ||
2702 | raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags); | |
2703 | } | |
2704 | ||
f6a91da7 MZ |
2705 | static void its_vpe_send_inv(struct irq_data *d) |
2706 | { | |
2707 | struct its_vpe *vpe = irq_data_get_irq_chip_data(d); | |
f6a91da7 | 2708 | |
20b3d54e MZ |
2709 | if (gic_rdists->has_direct_lpi) { |
2710 | void __iomem *rdbase; | |
2711 | ||
2712 | rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base; | |
2713 | gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_INVLPIR); | |
2714 | while (gic_read_lpir(rdbase + GICR_SYNCR) & 1) | |
2715 | cpu_relax(); | |
2716 | } else { | |
2717 | its_vpe_send_cmd(vpe, its_send_inv); | |
2718 | } | |
f6a91da7 MZ |
2719 | } |
2720 | ||
2721 | static void its_vpe_mask_irq(struct irq_data *d) | |
2722 | { | |
2723 | /* | |
2724 | * We need to unmask the LPI, which is described by the parent | |
2725 | * irq_data. Instead of calling into the parent (which won't | |
2726 | * exactly do the right thing, let's simply use the | |
2727 | * parent_data pointer. Yes, I'm naughty. | |
2728 | */ | |
2729 | lpi_write_config(d->parent_data, LPI_PROP_ENABLED, 0); | |
2730 | its_vpe_send_inv(d); | |
2731 | } | |
2732 | ||
2733 | static void its_vpe_unmask_irq(struct irq_data *d) | |
2734 | { | |
2735 | /* Same hack as above... */ | |
2736 | lpi_write_config(d->parent_data, 0, LPI_PROP_ENABLED); | |
2737 | its_vpe_send_inv(d); | |
2738 | } | |
2739 | ||
e57a3e28 MZ |
2740 | static int its_vpe_set_irqchip_state(struct irq_data *d, |
2741 | enum irqchip_irq_state which, | |
2742 | bool state) | |
2743 | { | |
2744 | struct its_vpe *vpe = irq_data_get_irq_chip_data(d); | |
2745 | ||
2746 | if (which != IRQCHIP_STATE_PENDING) | |
2747 | return -EINVAL; | |
2748 | ||
2749 | if (gic_rdists->has_direct_lpi) { | |
2750 | void __iomem *rdbase; | |
2751 | ||
2752 | rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base; | |
2753 | if (state) { | |
2754 | gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_SETLPIR); | |
2755 | } else { | |
2756 | gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR); | |
2757 | while (gic_read_lpir(rdbase + GICR_SYNCR) & 1) | |
2758 | cpu_relax(); | |
2759 | } | |
2760 | } else { | |
2761 | if (state) | |
2762 | its_vpe_send_cmd(vpe, its_send_int); | |
2763 | else | |
2764 | its_vpe_send_cmd(vpe, its_send_clear); | |
2765 | } | |
2766 | ||
2767 | return 0; | |
2768 | } | |
2769 | ||
8fff27ae MZ |
2770 | static struct irq_chip its_vpe_irq_chip = { |
2771 | .name = "GICv4-vpe", | |
f6a91da7 MZ |
2772 | .irq_mask = its_vpe_mask_irq, |
2773 | .irq_unmask = its_vpe_unmask_irq, | |
2774 | .irq_eoi = irq_chip_eoi_parent, | |
3171a47a | 2775 | .irq_set_affinity = its_vpe_set_affinity, |
e57a3e28 | 2776 | .irq_set_irqchip_state = its_vpe_set_irqchip_state, |
e643d803 | 2777 | .irq_set_vcpu_affinity = its_vpe_set_vcpu_affinity, |
8fff27ae MZ |
2778 | }; |
2779 | ||
7d75bbb4 MZ |
2780 | static int its_vpe_id_alloc(void) |
2781 | { | |
32bd44dc | 2782 | return ida_simple_get(&its_vpeid_ida, 0, ITS_MAX_VPEID, GFP_KERNEL); |
7d75bbb4 MZ |
2783 | } |
2784 | ||
2785 | static void its_vpe_id_free(u16 id) | |
2786 | { | |
2787 | ida_simple_remove(&its_vpeid_ida, id); | |
2788 | } | |
2789 | ||
2790 | static int its_vpe_init(struct its_vpe *vpe) | |
2791 | { | |
2792 | struct page *vpt_page; | |
2793 | int vpe_id; | |
2794 | ||
2795 | /* Allocate vpe_id */ | |
2796 | vpe_id = its_vpe_id_alloc(); | |
2797 | if (vpe_id < 0) | |
2798 | return vpe_id; | |
2799 | ||
2800 | /* Allocate VPT */ | |
2801 | vpt_page = its_allocate_pending_table(GFP_KERNEL); | |
2802 | if (!vpt_page) { | |
2803 | its_vpe_id_free(vpe_id); | |
2804 | return -ENOMEM; | |
2805 | } | |
2806 | ||
2807 | if (!its_alloc_vpe_table(vpe_id)) { | |
2808 | its_vpe_id_free(vpe_id); | |
2809 | its_free_pending_table(vpe->vpt_page); | |
2810 | return -ENOMEM; | |
2811 | } | |
2812 | ||
2813 | vpe->vpe_id = vpe_id; | |
2814 | vpe->vpt_page = vpt_page; | |
20b3d54e | 2815 | vpe->vpe_proxy_event = -1; |
7d75bbb4 MZ |
2816 | |
2817 | return 0; | |
2818 | } | |
2819 | ||
2820 | static void its_vpe_teardown(struct its_vpe *vpe) | |
2821 | { | |
20b3d54e | 2822 | its_vpe_db_proxy_unmap(vpe); |
7d75bbb4 MZ |
2823 | its_vpe_id_free(vpe->vpe_id); |
2824 | its_free_pending_table(vpe->vpt_page); | |
2825 | } | |
2826 | ||
2827 | static void its_vpe_irq_domain_free(struct irq_domain *domain, | |
2828 | unsigned int virq, | |
2829 | unsigned int nr_irqs) | |
2830 | { | |
2831 | struct its_vm *vm = domain->host_data; | |
2832 | int i; | |
2833 | ||
2834 | irq_domain_free_irqs_parent(domain, virq, nr_irqs); | |
2835 | ||
2836 | for (i = 0; i < nr_irqs; i++) { | |
2837 | struct irq_data *data = irq_domain_get_irq_data(domain, | |
2838 | virq + i); | |
2839 | struct its_vpe *vpe = irq_data_get_irq_chip_data(data); | |
2840 | ||
2841 | BUG_ON(vm != vpe->its_vm); | |
2842 | ||
2843 | clear_bit(data->hwirq, vm->db_bitmap); | |
2844 | its_vpe_teardown(vpe); | |
2845 | irq_domain_reset_irq_data(data); | |
2846 | } | |
2847 | ||
2848 | if (bitmap_empty(vm->db_bitmap, vm->nr_db_lpis)) { | |
2849 | its_lpi_free_chunks(vm->db_bitmap, vm->db_lpi_base, vm->nr_db_lpis); | |
2850 | its_free_prop_table(vm->vprop_page); | |
2851 | } | |
2852 | } | |
2853 | ||
2854 | static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, | |
2855 | unsigned int nr_irqs, void *args) | |
2856 | { | |
2857 | struct its_vm *vm = args; | |
2858 | unsigned long *bitmap; | |
2859 | struct page *vprop_page; | |
2860 | int base, nr_ids, i, err = 0; | |
2861 | ||
2862 | BUG_ON(!vm); | |
2863 | ||
2864 | bitmap = its_lpi_alloc_chunks(nr_irqs, &base, &nr_ids); | |
2865 | if (!bitmap) | |
2866 | return -ENOMEM; | |
2867 | ||
2868 | if (nr_ids < nr_irqs) { | |
2869 | its_lpi_free_chunks(bitmap, base, nr_ids); | |
2870 | return -ENOMEM; | |
2871 | } | |
2872 | ||
2873 | vprop_page = its_allocate_prop_table(GFP_KERNEL); | |
2874 | if (!vprop_page) { | |
2875 | its_lpi_free_chunks(bitmap, base, nr_ids); | |
2876 | return -ENOMEM; | |
2877 | } | |
2878 | ||
2879 | vm->db_bitmap = bitmap; | |
2880 | vm->db_lpi_base = base; | |
2881 | vm->nr_db_lpis = nr_ids; | |
2882 | vm->vprop_page = vprop_page; | |
2883 | ||
2884 | for (i = 0; i < nr_irqs; i++) { | |
2885 | vm->vpes[i]->vpe_db_lpi = base + i; | |
2886 | err = its_vpe_init(vm->vpes[i]); | |
2887 | if (err) | |
2888 | break; | |
2889 | err = its_irq_gic_domain_alloc(domain, virq + i, | |
2890 | vm->vpes[i]->vpe_db_lpi); | |
2891 | if (err) | |
2892 | break; | |
2893 | irq_domain_set_hwirq_and_chip(domain, virq + i, i, | |
2894 | &its_vpe_irq_chip, vm->vpes[i]); | |
2895 | set_bit(i, bitmap); | |
2896 | } | |
2897 | ||
2898 | if (err) { | |
2899 | if (i > 0) | |
2900 | its_vpe_irq_domain_free(domain, virq, i - 1); | |
2901 | ||
2902 | its_lpi_free_chunks(bitmap, base, nr_ids); | |
2903 | its_free_prop_table(vprop_page); | |
2904 | } | |
2905 | ||
2906 | return err; | |
2907 | } | |
2908 | ||
72491643 | 2909 | static int its_vpe_irq_domain_activate(struct irq_domain *domain, |
702cb0a0 | 2910 | struct irq_data *d, bool reserve) |
eb78192b MZ |
2911 | { |
2912 | struct its_vpe *vpe = irq_data_get_irq_chip_data(d); | |
40619a2e | 2913 | struct its_node *its; |
eb78192b | 2914 | |
2247e1bf MZ |
2915 | /* If we use the list map, we issue VMAPP on demand... */ |
2916 | if (its_list_map) | |
6ef930f2 | 2917 | return 0; |
eb78192b MZ |
2918 | |
2919 | /* Map the VPE to the first possible CPU */ | |
2920 | vpe->col_idx = cpumask_first(cpu_online_mask); | |
40619a2e MZ |
2921 | |
2922 | list_for_each_entry(its, &its_nodes, entry) { | |
2923 | if (!its->is_v4) | |
2924 | continue; | |
2925 | ||
75fd951b | 2926 | its_send_vmapp(its, vpe, true); |
40619a2e MZ |
2927 | its_send_vinvall(its, vpe); |
2928 | } | |
2929 | ||
44c4c25e MZ |
2930 | irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx)); |
2931 | ||
72491643 | 2932 | return 0; |
eb78192b MZ |
2933 | } |
2934 | ||
2935 | static void its_vpe_irq_domain_deactivate(struct irq_domain *domain, | |
2936 | struct irq_data *d) | |
2937 | { | |
2938 | struct its_vpe *vpe = irq_data_get_irq_chip_data(d); | |
75fd951b MZ |
2939 | struct its_node *its; |
2940 | ||
2247e1bf MZ |
2941 | /* |
2942 | * If we use the list map, we unmap the VPE once no VLPIs are | |
2943 | * associated with the VM. | |
2944 | */ | |
2945 | if (its_list_map) | |
2946 | return; | |
eb78192b | 2947 | |
75fd951b MZ |
2948 | list_for_each_entry(its, &its_nodes, entry) { |
2949 | if (!its->is_v4) | |
2950 | continue; | |
eb78192b | 2951 | |
75fd951b MZ |
2952 | its_send_vmapp(its, vpe, false); |
2953 | } | |
eb78192b MZ |
2954 | } |
2955 | ||
8fff27ae | 2956 | static const struct irq_domain_ops its_vpe_domain_ops = { |
7d75bbb4 MZ |
2957 | .alloc = its_vpe_irq_domain_alloc, |
2958 | .free = its_vpe_irq_domain_free, | |
eb78192b MZ |
2959 | .activate = its_vpe_irq_domain_activate, |
2960 | .deactivate = its_vpe_irq_domain_deactivate, | |
8fff27ae MZ |
2961 | }; |
2962 | ||
4559fbb3 YW |
2963 | static int its_force_quiescent(void __iomem *base) |
2964 | { | |
2965 | u32 count = 1000000; /* 1s */ | |
2966 | u32 val; | |
2967 | ||
2968 | val = readl_relaxed(base + GITS_CTLR); | |
7611da86 DD |
2969 | /* |
2970 | * GIC architecture specification requires the ITS to be both | |
2971 | * disabled and quiescent for writes to GITS_BASER<n> or | |
2972 | * GITS_CBASER to not have UNPREDICTABLE results. | |
2973 | */ | |
2974 | if ((val & GITS_CTLR_QUIESCENT) && !(val & GITS_CTLR_ENABLE)) | |
4559fbb3 YW |
2975 | return 0; |
2976 | ||
2977 | /* Disable the generation of all interrupts to this ITS */ | |
d51c4b4d | 2978 | val &= ~(GITS_CTLR_ENABLE | GITS_CTLR_ImDe); |
4559fbb3 YW |
2979 | writel_relaxed(val, base + GITS_CTLR); |
2980 | ||
2981 | /* Poll GITS_CTLR and wait until ITS becomes quiescent */ | |
2982 | while (1) { | |
2983 | val = readl_relaxed(base + GITS_CTLR); | |
2984 | if (val & GITS_CTLR_QUIESCENT) | |
2985 | return 0; | |
2986 | ||
2987 | count--; | |
2988 | if (!count) | |
2989 | return -EBUSY; | |
2990 | ||
2991 | cpu_relax(); | |
2992 | udelay(1); | |
2993 | } | |
2994 | } | |
2995 | ||
9d111d49 | 2996 | static bool __maybe_unused its_enable_quirk_cavium_22375(void *data) |
94100970 RR |
2997 | { |
2998 | struct its_node *its = data; | |
2999 | ||
fa150019 AB |
3000 | /* erratum 22375: only alloc 8MB table size */ |
3001 | its->device_ids = 0x14; /* 20 bits, 8MB */ | |
94100970 | 3002 | its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_22375; |
9d111d49 AB |
3003 | |
3004 | return true; | |
94100970 RR |
3005 | } |
3006 | ||
9d111d49 | 3007 | static bool __maybe_unused its_enable_quirk_cavium_23144(void *data) |
fbf8f40e GK |
3008 | { |
3009 | struct its_node *its = data; | |
3010 | ||
3011 | its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_23144; | |
9d111d49 AB |
3012 | |
3013 | return true; | |
fbf8f40e GK |
3014 | } |
3015 | ||
9d111d49 | 3016 | static bool __maybe_unused its_enable_quirk_qdf2400_e0065(void *data) |
90922a2d SD |
3017 | { |
3018 | struct its_node *its = data; | |
3019 | ||
3020 | /* On QDF2400, the size of the ITE is 16Bytes */ | |
3021 | its->ite_size = 16; | |
9d111d49 AB |
3022 | |
3023 | return true; | |
90922a2d SD |
3024 | } |
3025 | ||
558b0165 AB |
3026 | static u64 its_irq_get_msi_base_pre_its(struct its_device *its_dev) |
3027 | { | |
3028 | struct its_node *its = its_dev->its; | |
3029 | ||
3030 | /* | |
3031 | * The Socionext Synquacer SoC has a so-called 'pre-ITS', | |
3032 | * which maps 32-bit writes targeted at a separate window of | |
3033 | * size '4 << device_id_bits' onto writes to GITS_TRANSLATER | |
3034 | * with device ID taken from bits [device_id_bits + 1:2] of | |
3035 | * the window offset. | |
3036 | */ | |
3037 | return its->pre_its_base + (its_dev->device_id << 2); | |
3038 | } | |
3039 | ||
3040 | static bool __maybe_unused its_enable_quirk_socionext_synquacer(void *data) | |
3041 | { | |
3042 | struct its_node *its = data; | |
3043 | u32 pre_its_window[2]; | |
3044 | u32 ids; | |
3045 | ||
3046 | if (!fwnode_property_read_u32_array(its->fwnode_handle, | |
3047 | "socionext,synquacer-pre-its", | |
3048 | pre_its_window, | |
3049 | ARRAY_SIZE(pre_its_window))) { | |
3050 | ||
3051 | its->pre_its_base = pre_its_window[0]; | |
3052 | its->get_msi_base = its_irq_get_msi_base_pre_its; | |
3053 | ||
3054 | ids = ilog2(pre_its_window[1]) - 2; | |
3055 | if (its->device_ids > ids) | |
3056 | its->device_ids = ids; | |
3057 | ||
3058 | /* the pre-ITS breaks isolation, so disable MSI remapping */ | |
3059 | its->msi_domain_flags &= ~IRQ_DOMAIN_FLAG_MSI_REMAP; | |
3060 | return true; | |
3061 | } | |
3062 | return false; | |
3063 | } | |
3064 | ||
5c9a882e MZ |
3065 | static bool __maybe_unused its_enable_quirk_hip07_161600802(void *data) |
3066 | { | |
3067 | struct its_node *its = data; | |
3068 | ||
3069 | /* | |
3070 | * Hip07 insists on using the wrong address for the VLPI | |
3071 | * page. Trick it into doing the right thing... | |
3072 | */ | |
3073 | its->vlpi_redist_offset = SZ_128K; | |
3074 | return true; | |
90922a2d SD |
3075 | } |
3076 | ||
67510cca | 3077 | static const struct gic_quirk its_quirks[] = { |
94100970 RR |
3078 | #ifdef CONFIG_CAVIUM_ERRATUM_22375 |
3079 | { | |
3080 | .desc = "ITS: Cavium errata 22375, 24313", | |
3081 | .iidr = 0xa100034c, /* ThunderX pass 1.x */ | |
3082 | .mask = 0xffff0fff, | |
3083 | .init = its_enable_quirk_cavium_22375, | |
3084 | }, | |
fbf8f40e GK |
3085 | #endif |
3086 | #ifdef CONFIG_CAVIUM_ERRATUM_23144 | |
3087 | { | |
3088 | .desc = "ITS: Cavium erratum 23144", | |
3089 | .iidr = 0xa100034c, /* ThunderX pass 1.x */ | |
3090 | .mask = 0xffff0fff, | |
3091 | .init = its_enable_quirk_cavium_23144, | |
3092 | }, | |
90922a2d SD |
3093 | #endif |
3094 | #ifdef CONFIG_QCOM_QDF2400_ERRATUM_0065 | |
3095 | { | |
3096 | .desc = "ITS: QDF2400 erratum 0065", | |
3097 | .iidr = 0x00001070, /* QDF2400 ITS rev 1.x */ | |
3098 | .mask = 0xffffffff, | |
3099 | .init = its_enable_quirk_qdf2400_e0065, | |
3100 | }, | |
558b0165 AB |
3101 | #endif |
3102 | #ifdef CONFIG_SOCIONEXT_SYNQUACER_PREITS | |
3103 | { | |
3104 | /* | |
3105 | * The Socionext Synquacer SoC incorporates ARM's own GIC-500 | |
3106 | * implementation, but with a 'pre-ITS' added that requires | |
3107 | * special handling in software. | |
3108 | */ | |
3109 | .desc = "ITS: Socionext Synquacer pre-ITS", | |
3110 | .iidr = 0x0001143b, | |
3111 | .mask = 0xffffffff, | |
3112 | .init = its_enable_quirk_socionext_synquacer, | |
3113 | }, | |
5c9a882e MZ |
3114 | #endif |
3115 | #ifdef CONFIG_HISILICON_ERRATUM_161600802 | |
3116 | { | |
3117 | .desc = "ITS: Hip07 erratum 161600802", | |
3118 | .iidr = 0x00000004, | |
3119 | .mask = 0xffffffff, | |
3120 | .init = its_enable_quirk_hip07_161600802, | |
3121 | }, | |
94100970 | 3122 | #endif |
67510cca RR |
3123 | { |
3124 | } | |
3125 | }; | |
3126 | ||
3127 | static void its_enable_quirks(struct its_node *its) | |
3128 | { | |
3129 | u32 iidr = readl_relaxed(its->base + GITS_IIDR); | |
3130 | ||
3131 | gic_enable_quirks(iidr, its_quirks, its); | |
3132 | } | |
3133 | ||
dba0bc7b DB |
3134 | static int its_save_disable(void) |
3135 | { | |
3136 | struct its_node *its; | |
3137 | int err = 0; | |
3138 | ||
3139 | spin_lock(&its_lock); | |
3140 | list_for_each_entry(its, &its_nodes, entry) { | |
3141 | void __iomem *base; | |
3142 | ||
3143 | if (!(its->flags & ITS_FLAGS_SAVE_SUSPEND_STATE)) | |
3144 | continue; | |
3145 | ||
3146 | base = its->base; | |
3147 | its->ctlr_save = readl_relaxed(base + GITS_CTLR); | |
3148 | err = its_force_quiescent(base); | |
3149 | if (err) { | |
3150 | pr_err("ITS@%pa: failed to quiesce: %d\n", | |
3151 | &its->phys_base, err); | |
3152 | writel_relaxed(its->ctlr_save, base + GITS_CTLR); | |
3153 | goto err; | |
3154 | } | |
3155 | ||
3156 | its->cbaser_save = gits_read_cbaser(base + GITS_CBASER); | |
3157 | } | |
3158 | ||
3159 | err: | |
3160 | if (err) { | |
3161 | list_for_each_entry_continue_reverse(its, &its_nodes, entry) { | |
3162 | void __iomem *base; | |
3163 | ||
3164 | if (!(its->flags & ITS_FLAGS_SAVE_SUSPEND_STATE)) | |
3165 | continue; | |
3166 | ||
3167 | base = its->base; | |
3168 | writel_relaxed(its->ctlr_save, base + GITS_CTLR); | |
3169 | } | |
3170 | } | |
3171 | spin_unlock(&its_lock); | |
3172 | ||
3173 | return err; | |
3174 | } | |
3175 | ||
3176 | static void its_restore_enable(void) | |
3177 | { | |
3178 | struct its_node *its; | |
3179 | int ret; | |
3180 | ||
3181 | spin_lock(&its_lock); | |
3182 | list_for_each_entry(its, &its_nodes, entry) { | |
3183 | void __iomem *base; | |
3184 | int i; | |
3185 | ||
3186 | if (!(its->flags & ITS_FLAGS_SAVE_SUSPEND_STATE)) | |
3187 | continue; | |
3188 | ||
3189 | base = its->base; | |
3190 | ||
3191 | /* | |
3192 | * Make sure that the ITS is disabled. If it fails to quiesce, | |
3193 | * don't restore it since writing to CBASER or BASER<n> | |
3194 | * registers is undefined according to the GIC v3 ITS | |
3195 | * Specification. | |
3196 | */ | |
3197 | ret = its_force_quiescent(base); | |
3198 | if (ret) { | |
3199 | pr_err("ITS@%pa: failed to quiesce on resume: %d\n", | |
3200 | &its->phys_base, ret); | |
3201 | continue; | |
3202 | } | |
3203 | ||
3204 | gits_write_cbaser(its->cbaser_save, base + GITS_CBASER); | |
3205 | ||
3206 | /* | |
3207 | * Writing CBASER resets CREADR to 0, so make CWRITER and | |
3208 | * cmd_write line up with it. | |
3209 | */ | |
3210 | its->cmd_write = its->cmd_base; | |
3211 | gits_write_cwriter(0, base + GITS_CWRITER); | |
3212 | ||
3213 | /* Restore GITS_BASER from the value cache. */ | |
3214 | for (i = 0; i < GITS_BASER_NR_REGS; i++) { | |
3215 | struct its_baser *baser = &its->tables[i]; | |
3216 | ||
3217 | if (!(baser->val & GITS_BASER_VALID)) | |
3218 | continue; | |
3219 | ||
3220 | its_write_baser(its, baser, baser->val); | |
3221 | } | |
3222 | writel_relaxed(its->ctlr_save, base + GITS_CTLR); | |
920181ce DB |
3223 | |
3224 | /* | |
3225 | * Reinit the collection if it's stored in the ITS. This is | |
3226 | * indicated by the col_id being less than the HCC field. | |
3227 | * CID < HCC as specified in the GIC v3 Documentation. | |
3228 | */ | |
3229 | if (its->collections[smp_processor_id()].col_id < | |
3230 | GITS_TYPER_HCC(gic_read_typer(base + GITS_TYPER))) | |
3231 | its_cpu_init_collection(its); | |
dba0bc7b DB |
3232 | } |
3233 | spin_unlock(&its_lock); | |
3234 | } | |
3235 | ||
3236 | static struct syscore_ops its_syscore_ops = { | |
3237 | .suspend = its_save_disable, | |
3238 | .resume = its_restore_enable, | |
3239 | }; | |
3240 | ||
db40f0a7 | 3241 | static int its_init_domain(struct fwnode_handle *handle, struct its_node *its) |
d14ae5e6 TN |
3242 | { |
3243 | struct irq_domain *inner_domain; | |
3244 | struct msi_domain_info *info; | |
3245 | ||
3246 | info = kzalloc(sizeof(*info), GFP_KERNEL); | |
3247 | if (!info) | |
3248 | return -ENOMEM; | |
3249 | ||
db40f0a7 | 3250 | inner_domain = irq_domain_create_tree(handle, &its_domain_ops, its); |
d14ae5e6 TN |
3251 | if (!inner_domain) { |
3252 | kfree(info); | |
3253 | return -ENOMEM; | |
3254 | } | |
3255 | ||
db40f0a7 | 3256 | inner_domain->parent = its_parent; |
96f0d93a | 3257 | irq_domain_update_bus_token(inner_domain, DOMAIN_BUS_NEXUS); |
558b0165 | 3258 | inner_domain->flags |= its->msi_domain_flags; |
d14ae5e6 TN |
3259 | info->ops = &its_msi_domain_ops; |
3260 | info->data = its; | |
3261 | inner_domain->host_data = info; | |
3262 | ||
3263 | return 0; | |
3264 | } | |
3265 | ||
8fff27ae MZ |
3266 | static int its_init_vpe_domain(void) |
3267 | { | |
20b3d54e MZ |
3268 | struct its_node *its; |
3269 | u32 devid; | |
3270 | int entries; | |
3271 | ||
3272 | if (gic_rdists->has_direct_lpi) { | |
3273 | pr_info("ITS: Using DirectLPI for VPE invalidation\n"); | |
3274 | return 0; | |
3275 | } | |
3276 | ||
3277 | /* Any ITS will do, even if not v4 */ | |
3278 | its = list_first_entry(&its_nodes, struct its_node, entry); | |
3279 | ||
3280 | entries = roundup_pow_of_two(nr_cpu_ids); | |
6396bb22 | 3281 | vpe_proxy.vpes = kcalloc(entries, sizeof(*vpe_proxy.vpes), |
20b3d54e MZ |
3282 | GFP_KERNEL); |
3283 | if (!vpe_proxy.vpes) { | |
3284 | pr_err("ITS: Can't allocate GICv4 proxy device array\n"); | |
3285 | return -ENOMEM; | |
3286 | } | |
3287 | ||
3288 | /* Use the last possible DevID */ | |
3289 | devid = GENMASK(its->device_ids - 1, 0); | |
3290 | vpe_proxy.dev = its_create_device(its, devid, entries, false); | |
3291 | if (!vpe_proxy.dev) { | |
3292 | kfree(vpe_proxy.vpes); | |
3293 | pr_err("ITS: Can't allocate GICv4 proxy device\n"); | |
3294 | return -ENOMEM; | |
3295 | } | |
3296 | ||
c427a475 | 3297 | BUG_ON(entries > vpe_proxy.dev->nr_ites); |
20b3d54e MZ |
3298 | |
3299 | raw_spin_lock_init(&vpe_proxy.lock); | |
3300 | vpe_proxy.next_victim = 0; | |
3301 | pr_info("ITS: Allocated DevID %x as GICv4 proxy device (%d slots)\n", | |
3302 | devid, vpe_proxy.dev->nr_ites); | |
3303 | ||
8fff27ae MZ |
3304 | return 0; |
3305 | } | |
3306 | ||
3dfa576b MZ |
3307 | static int __init its_compute_its_list_map(struct resource *res, |
3308 | void __iomem *its_base) | |
3309 | { | |
3310 | int its_number; | |
3311 | u32 ctlr; | |
3312 | ||
3313 | /* | |
3314 | * This is assumed to be done early enough that we're | |
3315 | * guaranteed to be single-threaded, hence no | |
3316 | * locking. Should this change, we should address | |
3317 | * this. | |
3318 | */ | |
ab60491e MZ |
3319 | its_number = find_first_zero_bit(&its_list_map, GICv4_ITS_LIST_MAX); |
3320 | if (its_number >= GICv4_ITS_LIST_MAX) { | |
3dfa576b MZ |
3321 | pr_err("ITS@%pa: No ITSList entry available!\n", |
3322 | &res->start); | |
3323 | return -EINVAL; | |
3324 | } | |
3325 | ||
3326 | ctlr = readl_relaxed(its_base + GITS_CTLR); | |
3327 | ctlr &= ~GITS_CTLR_ITS_NUMBER; | |
3328 | ctlr |= its_number << GITS_CTLR_ITS_NUMBER_SHIFT; | |
3329 | writel_relaxed(ctlr, its_base + GITS_CTLR); | |
3330 | ctlr = readl_relaxed(its_base + GITS_CTLR); | |
3331 | if ((ctlr & GITS_CTLR_ITS_NUMBER) != (its_number << GITS_CTLR_ITS_NUMBER_SHIFT)) { | |
3332 | its_number = ctlr & GITS_CTLR_ITS_NUMBER; | |
3333 | its_number >>= GITS_CTLR_ITS_NUMBER_SHIFT; | |
3334 | } | |
3335 | ||
3336 | if (test_and_set_bit(its_number, &its_list_map)) { | |
3337 | pr_err("ITS@%pa: Duplicate ITSList entry %d\n", | |
3338 | &res->start, its_number); | |
3339 | return -EINVAL; | |
3340 | } | |
3341 | ||
3342 | return its_number; | |
3343 | } | |
3344 | ||
db40f0a7 TN |
3345 | static int __init its_probe_one(struct resource *res, |
3346 | struct fwnode_handle *handle, int numa_node) | |
4c21f3c2 | 3347 | { |
4c21f3c2 MZ |
3348 | struct its_node *its; |
3349 | void __iomem *its_base; | |
3dfa576b MZ |
3350 | u32 val, ctlr; |
3351 | u64 baser, tmp, typer; | |
4c21f3c2 MZ |
3352 | int err; |
3353 | ||
db40f0a7 | 3354 | its_base = ioremap(res->start, resource_size(res)); |
4c21f3c2 | 3355 | if (!its_base) { |
db40f0a7 | 3356 | pr_warn("ITS@%pa: Unable to map ITS registers\n", &res->start); |
4c21f3c2 MZ |
3357 | return -ENOMEM; |
3358 | } | |
3359 | ||
3360 | val = readl_relaxed(its_base + GITS_PIDR2) & GIC_PIDR2_ARCH_MASK; | |
3361 | if (val != 0x30 && val != 0x40) { | |
db40f0a7 | 3362 | pr_warn("ITS@%pa: No ITS detected, giving up\n", &res->start); |
4c21f3c2 MZ |
3363 | err = -ENODEV; |
3364 | goto out_unmap; | |
3365 | } | |
3366 | ||
4559fbb3 YW |
3367 | err = its_force_quiescent(its_base); |
3368 | if (err) { | |
db40f0a7 | 3369 | pr_warn("ITS@%pa: Failed to quiesce, giving up\n", &res->start); |
4559fbb3 YW |
3370 | goto out_unmap; |
3371 | } | |
3372 | ||
db40f0a7 | 3373 | pr_info("ITS %pR\n", res); |
4c21f3c2 MZ |
3374 | |
3375 | its = kzalloc(sizeof(*its), GFP_KERNEL); | |
3376 | if (!its) { | |
3377 | err = -ENOMEM; | |
3378 | goto out_unmap; | |
3379 | } | |
3380 | ||
3381 | raw_spin_lock_init(&its->lock); | |
3382 | INIT_LIST_HEAD(&its->entry); | |
3383 | INIT_LIST_HEAD(&its->its_device_list); | |
3dfa576b | 3384 | typer = gic_read_typer(its_base + GITS_TYPER); |
4c21f3c2 | 3385 | its->base = its_base; |
db40f0a7 | 3386 | its->phys_base = res->start; |
3dfa576b | 3387 | its->ite_size = GITS_TYPER_ITT_ENTRY_SIZE(typer); |
fa150019 | 3388 | its->device_ids = GITS_TYPER_DEVBITS(typer); |
3dfa576b MZ |
3389 | its->is_v4 = !!(typer & GITS_TYPER_VLPIS); |
3390 | if (its->is_v4) { | |
3391 | if (!(typer & GITS_TYPER_VMOVP)) { | |
3392 | err = its_compute_its_list_map(res, its_base); | |
3393 | if (err < 0) | |
3394 | goto out_free_its; | |
3395 | ||
debf6d02 MZ |
3396 | its->list_nr = err; |
3397 | ||
3dfa576b MZ |
3398 | pr_info("ITS@%pa: Using ITS number %d\n", |
3399 | &res->start, err); | |
3400 | } else { | |
3401 | pr_info("ITS@%pa: Single VMOVP capable\n", &res->start); | |
3402 | } | |
3403 | } | |
3404 | ||
db40f0a7 | 3405 | its->numa_node = numa_node; |
4c21f3c2 | 3406 | |
5bc13c2c RR |
3407 | its->cmd_base = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, |
3408 | get_order(ITS_CMD_QUEUE_SZ)); | |
4c21f3c2 MZ |
3409 | if (!its->cmd_base) { |
3410 | err = -ENOMEM; | |
3411 | goto out_free_its; | |
3412 | } | |
3413 | its->cmd_write = its->cmd_base; | |
558b0165 AB |
3414 | its->fwnode_handle = handle; |
3415 | its->get_msi_base = its_irq_get_msi_base; | |
3416 | its->msi_domain_flags = IRQ_DOMAIN_FLAG_MSI_REMAP; | |
4c21f3c2 | 3417 | |
67510cca RR |
3418 | its_enable_quirks(its); |
3419 | ||
0e0b0f69 | 3420 | err = its_alloc_tables(its); |
4c21f3c2 MZ |
3421 | if (err) |
3422 | goto out_free_cmd; | |
3423 | ||
3424 | err = its_alloc_collections(its); | |
3425 | if (err) | |
3426 | goto out_free_tables; | |
3427 | ||
3428 | baser = (virt_to_phys(its->cmd_base) | | |
2fd632a0 | 3429 | GITS_CBASER_RaWaWb | |
4c21f3c2 MZ |
3430 | GITS_CBASER_InnerShareable | |
3431 | (ITS_CMD_QUEUE_SZ / SZ_4K - 1) | | |
3432 | GITS_CBASER_VALID); | |
3433 | ||
0968a619 VM |
3434 | gits_write_cbaser(baser, its->base + GITS_CBASER); |
3435 | tmp = gits_read_cbaser(its->base + GITS_CBASER); | |
4c21f3c2 | 3436 | |
4ad3e363 | 3437 | if ((tmp ^ baser) & GITS_CBASER_SHAREABILITY_MASK) { |
241a386c MZ |
3438 | if (!(tmp & GITS_CBASER_SHAREABILITY_MASK)) { |
3439 | /* | |
3440 | * The HW reports non-shareable, we must | |
3441 | * remove the cacheability attributes as | |
3442 | * well. | |
3443 | */ | |
3444 | baser &= ~(GITS_CBASER_SHAREABILITY_MASK | | |
3445 | GITS_CBASER_CACHEABILITY_MASK); | |
3446 | baser |= GITS_CBASER_nC; | |
0968a619 | 3447 | gits_write_cbaser(baser, its->base + GITS_CBASER); |
241a386c | 3448 | } |
4c21f3c2 MZ |
3449 | pr_info("ITS: using cache flushing for cmd queue\n"); |
3450 | its->flags |= ITS_FLAGS_CMDQ_NEEDS_FLUSHING; | |
3451 | } | |
3452 | ||
0968a619 | 3453 | gits_write_cwriter(0, its->base + GITS_CWRITER); |
3dfa576b | 3454 | ctlr = readl_relaxed(its->base + GITS_CTLR); |
d51c4b4d MZ |
3455 | ctlr |= GITS_CTLR_ENABLE; |
3456 | if (its->is_v4) | |
3457 | ctlr |= GITS_CTLR_ImDe; | |
3458 | writel_relaxed(ctlr, its->base + GITS_CTLR); | |
241a386c | 3459 | |
dba0bc7b DB |
3460 | if (GITS_TYPER_HCC(typer)) |
3461 | its->flags |= ITS_FLAGS_SAVE_SUSPEND_STATE; | |
3462 | ||
db40f0a7 | 3463 | err = its_init_domain(handle, its); |
d14ae5e6 TN |
3464 | if (err) |
3465 | goto out_free_tables; | |
4c21f3c2 MZ |
3466 | |
3467 | spin_lock(&its_lock); | |
3468 | list_add(&its->entry, &its_nodes); | |
3469 | spin_unlock(&its_lock); | |
3470 | ||
3471 | return 0; | |
3472 | ||
4c21f3c2 MZ |
3473 | out_free_tables: |
3474 | its_free_tables(its); | |
3475 | out_free_cmd: | |
5bc13c2c | 3476 | free_pages((unsigned long)its->cmd_base, get_order(ITS_CMD_QUEUE_SZ)); |
4c21f3c2 MZ |
3477 | out_free_its: |
3478 | kfree(its); | |
3479 | out_unmap: | |
3480 | iounmap(its_base); | |
db40f0a7 | 3481 | pr_err("ITS@%pa: failed probing (%d)\n", &res->start, err); |
4c21f3c2 MZ |
3482 | return err; |
3483 | } | |
3484 | ||
3485 | static bool gic_rdists_supports_plpis(void) | |
3486 | { | |
589ce5f4 | 3487 | return !!(gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER) & GICR_TYPER_PLPIS); |
4c21f3c2 MZ |
3488 | } |
3489 | ||
6eb486b6 SD |
3490 | static int redist_disable_lpis(void) |
3491 | { | |
3492 | void __iomem *rbase = gic_data_rdist_rd_base(); | |
3493 | u64 timeout = USEC_PER_SEC; | |
3494 | u64 val; | |
3495 | ||
82f499c8 MZ |
3496 | /* |
3497 | * If coming via a CPU hotplug event, we don't need to disable | |
3498 | * LPIs before trying to re-enable them. They are already | |
3499 | * configured and all is well in the world. Detect this case | |
3500 | * by checking the allocation of the pending table for the | |
3501 | * current CPU. | |
3502 | */ | |
3503 | if (gic_data_rdist()->pend_page) | |
3504 | return 0; | |
3505 | ||
6eb486b6 SD |
3506 | if (!gic_rdists_supports_plpis()) { |
3507 | pr_info("CPU%d: LPIs not supported\n", smp_processor_id()); | |
3508 | return -ENXIO; | |
3509 | } | |
3510 | ||
3511 | val = readl_relaxed(rbase + GICR_CTLR); | |
3512 | if (!(val & GICR_CTLR_ENABLE_LPIS)) | |
3513 | return 0; | |
3514 | ||
3515 | pr_warn("CPU%d: Booted with LPIs enabled, memory probably corrupted\n", | |
3516 | smp_processor_id()); | |
3517 | add_taint(TAINT_CRAP, LOCKDEP_STILL_OK); | |
3518 | ||
3519 | /* Disable LPIs */ | |
3520 | val &= ~GICR_CTLR_ENABLE_LPIS; | |
3521 | writel_relaxed(val, rbase + GICR_CTLR); | |
3522 | ||
3523 | /* Make sure any change to GICR_CTLR is observable by the GIC */ | |
3524 | dsb(sy); | |
3525 | ||
3526 | /* | |
3527 | * Software must observe RWP==0 after clearing GICR_CTLR.EnableLPIs | |
3528 | * from 1 to 0 before programming GICR_PEND{PROP}BASER registers. | |
3529 | * Error out if we time out waiting for RWP to clear. | |
3530 | */ | |
3531 | while (readl_relaxed(rbase + GICR_CTLR) & GICR_CTLR_RWP) { | |
3532 | if (!timeout) { | |
3533 | pr_err("CPU%d: Timeout while disabling LPIs\n", | |
3534 | smp_processor_id()); | |
3535 | return -ETIMEDOUT; | |
3536 | } | |
3537 | udelay(1); | |
3538 | timeout--; | |
3539 | } | |
3540 | ||
3541 | /* | |
3542 | * After it has been written to 1, it is IMPLEMENTATION | |
3543 | * DEFINED whether GICR_CTLR.EnableLPI becomes RES1 or can be | |
3544 | * cleared to 0. Error out if clearing the bit failed. | |
3545 | */ | |
3546 | if (readl_relaxed(rbase + GICR_CTLR) & GICR_CTLR_ENABLE_LPIS) { | |
3547 | pr_err("CPU%d: Failed to disable LPIs\n", smp_processor_id()); | |
3548 | return -EBUSY; | |
3549 | } | |
3550 | ||
3551 | return 0; | |
3552 | } | |
3553 | ||
4c21f3c2 MZ |
3554 | int its_cpu_init(void) |
3555 | { | |
4c21f3c2 | 3556 | if (!list_empty(&its_nodes)) { |
6eb486b6 SD |
3557 | int ret; |
3558 | ||
3559 | ret = redist_disable_lpis(); | |
3560 | if (ret) | |
3561 | return ret; | |
3562 | ||
4c21f3c2 | 3563 | its_cpu_init_lpis(); |
920181ce | 3564 | its_cpu_init_collections(); |
4c21f3c2 MZ |
3565 | } |
3566 | ||
3567 | return 0; | |
3568 | } | |
3569 | ||
935bba7c | 3570 | static const struct of_device_id its_device_id[] = { |
4c21f3c2 MZ |
3571 | { .compatible = "arm,gic-v3-its", }, |
3572 | {}, | |
3573 | }; | |
3574 | ||
db40f0a7 | 3575 | static int __init its_of_probe(struct device_node *node) |
4c21f3c2 MZ |
3576 | { |
3577 | struct device_node *np; | |
db40f0a7 | 3578 | struct resource res; |
4c21f3c2 MZ |
3579 | |
3580 | for (np = of_find_matching_node(node, its_device_id); np; | |
3581 | np = of_find_matching_node(np, its_device_id)) { | |
95a25625 SB |
3582 | if (!of_device_is_available(np)) |
3583 | continue; | |
d14ae5e6 | 3584 | if (!of_property_read_bool(np, "msi-controller")) { |
e81f54c6 RH |
3585 | pr_warn("%pOF: no msi-controller property, ITS ignored\n", |
3586 | np); | |
d14ae5e6 TN |
3587 | continue; |
3588 | } | |
3589 | ||
db40f0a7 | 3590 | if (of_address_to_resource(np, 0, &res)) { |
e81f54c6 | 3591 | pr_warn("%pOF: no regs?\n", np); |
db40f0a7 TN |
3592 | continue; |
3593 | } | |
3594 | ||
3595 | its_probe_one(&res, &np->fwnode, of_node_to_nid(np)); | |
4c21f3c2 | 3596 | } |
db40f0a7 TN |
3597 | return 0; |
3598 | } | |
3599 | ||
3f010cf1 TN |
3600 | #ifdef CONFIG_ACPI |
3601 | ||
3602 | #define ACPI_GICV3_ITS_MEM_SIZE (SZ_128K) | |
3603 | ||
d1ce263f | 3604 | #ifdef CONFIG_ACPI_NUMA |
dbd2b826 GK |
3605 | struct its_srat_map { |
3606 | /* numa node id */ | |
3607 | u32 numa_node; | |
3608 | /* GIC ITS ID */ | |
3609 | u32 its_id; | |
3610 | }; | |
3611 | ||
fdf6e7a8 | 3612 | static struct its_srat_map *its_srat_maps __initdata; |
dbd2b826 GK |
3613 | static int its_in_srat __initdata; |
3614 | ||
3615 | static int __init acpi_get_its_numa_node(u32 its_id) | |
3616 | { | |
3617 | int i; | |
3618 | ||
3619 | for (i = 0; i < its_in_srat; i++) { | |
3620 | if (its_id == its_srat_maps[i].its_id) | |
3621 | return its_srat_maps[i].numa_node; | |
3622 | } | |
3623 | return NUMA_NO_NODE; | |
3624 | } | |
3625 | ||
fdf6e7a8 HG |
3626 | static int __init gic_acpi_match_srat_its(struct acpi_subtable_header *header, |
3627 | const unsigned long end) | |
3628 | { | |
3629 | return 0; | |
3630 | } | |
3631 | ||
dbd2b826 GK |
3632 | static int __init gic_acpi_parse_srat_its(struct acpi_subtable_header *header, |
3633 | const unsigned long end) | |
3634 | { | |
3635 | int node; | |
3636 | struct acpi_srat_gic_its_affinity *its_affinity; | |
3637 | ||
3638 | its_affinity = (struct acpi_srat_gic_its_affinity *)header; | |
3639 | if (!its_affinity) | |
3640 | return -EINVAL; | |
3641 | ||
3642 | if (its_affinity->header.length < sizeof(*its_affinity)) { | |
3643 | pr_err("SRAT: Invalid header length %d in ITS affinity\n", | |
3644 | its_affinity->header.length); | |
3645 | return -EINVAL; | |
3646 | } | |
3647 | ||
dbd2b826 GK |
3648 | node = acpi_map_pxm_to_node(its_affinity->proximity_domain); |
3649 | ||
3650 | if (node == NUMA_NO_NODE || node >= MAX_NUMNODES) { | |
3651 | pr_err("SRAT: Invalid NUMA node %d in ITS affinity\n", node); | |
3652 | return 0; | |
3653 | } | |
3654 | ||
3655 | its_srat_maps[its_in_srat].numa_node = node; | |
3656 | its_srat_maps[its_in_srat].its_id = its_affinity->its_id; | |
3657 | its_in_srat++; | |
3658 | pr_info("SRAT: PXM %d -> ITS %d -> Node %d\n", | |
3659 | its_affinity->proximity_domain, its_affinity->its_id, node); | |
3660 | ||
3661 | return 0; | |
3662 | } | |
3663 | ||
3664 | static void __init acpi_table_parse_srat_its(void) | |
3665 | { | |
fdf6e7a8 HG |
3666 | int count; |
3667 | ||
3668 | count = acpi_table_parse_entries(ACPI_SIG_SRAT, | |
3669 | sizeof(struct acpi_table_srat), | |
3670 | ACPI_SRAT_TYPE_GIC_ITS_AFFINITY, | |
3671 | gic_acpi_match_srat_its, 0); | |
3672 | if (count <= 0) | |
3673 | return; | |
3674 | ||
6da2ec56 KC |
3675 | its_srat_maps = kmalloc_array(count, sizeof(struct its_srat_map), |
3676 | GFP_KERNEL); | |
fdf6e7a8 HG |
3677 | if (!its_srat_maps) { |
3678 | pr_warn("SRAT: Failed to allocate memory for its_srat_maps!\n"); | |
3679 | return; | |
3680 | } | |
3681 | ||
dbd2b826 GK |
3682 | acpi_table_parse_entries(ACPI_SIG_SRAT, |
3683 | sizeof(struct acpi_table_srat), | |
3684 | ACPI_SRAT_TYPE_GIC_ITS_AFFINITY, | |
3685 | gic_acpi_parse_srat_its, 0); | |
3686 | } | |
fdf6e7a8 HG |
3687 | |
3688 | /* free the its_srat_maps after ITS probing */ | |
3689 | static void __init acpi_its_srat_maps_free(void) | |
3690 | { | |
3691 | kfree(its_srat_maps); | |
3692 | } | |
dbd2b826 GK |
3693 | #else |
3694 | static void __init acpi_table_parse_srat_its(void) { } | |
3695 | static int __init acpi_get_its_numa_node(u32 its_id) { return NUMA_NO_NODE; } | |
fdf6e7a8 | 3696 | static void __init acpi_its_srat_maps_free(void) { } |
dbd2b826 GK |
3697 | #endif |
3698 | ||
3f010cf1 TN |
3699 | static int __init gic_acpi_parse_madt_its(struct acpi_subtable_header *header, |
3700 | const unsigned long end) | |
3701 | { | |
3702 | struct acpi_madt_generic_translator *its_entry; | |
3703 | struct fwnode_handle *dom_handle; | |
3704 | struct resource res; | |
3705 | int err; | |
3706 | ||
3707 | its_entry = (struct acpi_madt_generic_translator *)header; | |
3708 | memset(&res, 0, sizeof(res)); | |
3709 | res.start = its_entry->base_address; | |
3710 | res.end = its_entry->base_address + ACPI_GICV3_ITS_MEM_SIZE - 1; | |
3711 | res.flags = IORESOURCE_MEM; | |
3712 | ||
3713 | dom_handle = irq_domain_alloc_fwnode((void *)its_entry->base_address); | |
3714 | if (!dom_handle) { | |
3715 | pr_err("ITS@%pa: Unable to allocate GICv3 ITS domain token\n", | |
3716 | &res.start); | |
3717 | return -ENOMEM; | |
3718 | } | |
3719 | ||
8b4282e6 SK |
3720 | err = iort_register_domain_token(its_entry->translation_id, res.start, |
3721 | dom_handle); | |
3f010cf1 TN |
3722 | if (err) { |
3723 | pr_err("ITS@%pa: Unable to register GICv3 ITS domain token (ITS ID %d) to IORT\n", | |
3724 | &res.start, its_entry->translation_id); | |
3725 | goto dom_err; | |
3726 | } | |
3727 | ||
dbd2b826 GK |
3728 | err = its_probe_one(&res, dom_handle, |
3729 | acpi_get_its_numa_node(its_entry->translation_id)); | |
3f010cf1 TN |
3730 | if (!err) |
3731 | return 0; | |
3732 | ||
3733 | iort_deregister_domain_token(its_entry->translation_id); | |
3734 | dom_err: | |
3735 | irq_domain_free_fwnode(dom_handle); | |
3736 | return err; | |
3737 | } | |
3738 | ||
3739 | static void __init its_acpi_probe(void) | |
3740 | { | |
dbd2b826 | 3741 | acpi_table_parse_srat_its(); |
3f010cf1 TN |
3742 | acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR, |
3743 | gic_acpi_parse_madt_its, 0); | |
fdf6e7a8 | 3744 | acpi_its_srat_maps_free(); |
3f010cf1 TN |
3745 | } |
3746 | #else | |
3747 | static void __init its_acpi_probe(void) { } | |
3748 | #endif | |
3749 | ||
db40f0a7 TN |
3750 | int __init its_init(struct fwnode_handle *handle, struct rdists *rdists, |
3751 | struct irq_domain *parent_domain) | |
3752 | { | |
3753 | struct device_node *of_node; | |
8fff27ae MZ |
3754 | struct its_node *its; |
3755 | bool has_v4 = false; | |
3756 | int err; | |
db40f0a7 TN |
3757 | |
3758 | its_parent = parent_domain; | |
3759 | of_node = to_of_node(handle); | |
3760 | if (of_node) | |
3761 | its_of_probe(of_node); | |
3762 | else | |
3f010cf1 | 3763 | its_acpi_probe(); |
4c21f3c2 MZ |
3764 | |
3765 | if (list_empty(&its_nodes)) { | |
3766 | pr_warn("ITS: No ITS available, not enabling LPIs\n"); | |
3767 | return -ENXIO; | |
3768 | } | |
3769 | ||
3770 | gic_rdists = rdists; | |
8fff27ae MZ |
3771 | err = its_alloc_lpi_tables(); |
3772 | if (err) | |
3773 | return err; | |
3774 | ||
3775 | list_for_each_entry(its, &its_nodes, entry) | |
3776 | has_v4 |= its->is_v4; | |
3777 | ||
3778 | if (has_v4 & rdists->has_vlpis) { | |
3d63cb53 MZ |
3779 | if (its_init_vpe_domain() || |
3780 | its_init_v4(parent_domain, &its_vpe_domain_ops)) { | |
8fff27ae MZ |
3781 | rdists->has_vlpis = false; |
3782 | pr_err("ITS: Disabling GICv4 support\n"); | |
3783 | } | |
3784 | } | |
3785 | ||
dba0bc7b DB |
3786 | register_syscore_ops(&its_syscore_ops); |
3787 | ||
8fff27ae | 3788 | return 0; |
4c21f3c2 | 3789 | } |