Commit | Line | Data |
---|---|---|
cc2d3216 | 1 | /* |
d7276b80 | 2 | * Copyright (C) 2013-2017 ARM Limited, All Rights Reserved. |
cc2d3216 MZ |
3 | * Author: Marc Zyngier <marc.zyngier@arm.com> |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify | |
6 | * it under the terms of the GNU General Public License version 2 as | |
7 | * published by the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License | |
15 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
16 | */ | |
17 | ||
3f010cf1 | 18 | #include <linux/acpi.h> |
8d3554b8 | 19 | #include <linux/acpi_iort.h> |
cc2d3216 MZ |
20 | #include <linux/bitmap.h> |
21 | #include <linux/cpu.h> | |
22 | #include <linux/delay.h> | |
44bb7e24 | 23 | #include <linux/dma-iommu.h> |
cc2d3216 | 24 | #include <linux/interrupt.h> |
3f010cf1 | 25 | #include <linux/irqdomain.h> |
cc2d3216 MZ |
26 | #include <linux/log2.h> |
27 | #include <linux/mm.h> | |
28 | #include <linux/msi.h> | |
29 | #include <linux/of.h> | |
30 | #include <linux/of_address.h> | |
31 | #include <linux/of_irq.h> | |
32 | #include <linux/of_pci.h> | |
33 | #include <linux/of_platform.h> | |
34 | #include <linux/percpu.h> | |
35 | #include <linux/slab.h> | |
dba0bc7b | 36 | #include <linux/syscore_ops.h> |
cc2d3216 | 37 | |
41a83e06 | 38 | #include <linux/irqchip.h> |
cc2d3216 | 39 | #include <linux/irqchip/arm-gic-v3.h> |
c808eea8 | 40 | #include <linux/irqchip/arm-gic-v4.h> |
cc2d3216 | 41 | |
cc2d3216 MZ |
42 | #include <asm/cputype.h> |
43 | #include <asm/exception.h> | |
44 | ||
67510cca RR |
45 | #include "irq-gic-common.h" |
46 | ||
94100970 RR |
47 | #define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1ULL << 0) |
48 | #define ITS_FLAGS_WORKAROUND_CAVIUM_22375 (1ULL << 1) | |
fbf8f40e | 49 | #define ITS_FLAGS_WORKAROUND_CAVIUM_23144 (1ULL << 2) |
dba0bc7b | 50 | #define ITS_FLAGS_SAVE_SUSPEND_STATE (1ULL << 3) |
cc2d3216 | 51 | |
c48ed51c MZ |
52 | #define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0) |
53 | ||
a13b0404 MZ |
54 | static u32 lpi_id_bits; |
55 | ||
56 | /* | |
57 | * We allocate memory for PROPBASE to cover 2 ^ lpi_id_bits LPIs to | |
58 | * deal with (one configuration byte per interrupt). PENDBASE has to | |
59 | * be 64kB aligned (one bit per LPI, plus 8192 bits for SPI/PPI/SGI). | |
60 | */ | |
61 | #define LPI_NRBITS lpi_id_bits | |
62 | #define LPI_PROPBASE_SZ ALIGN(BIT(LPI_NRBITS), SZ_64K) | |
63 | #define LPI_PENDBASE_SZ ALIGN(BIT(LPI_NRBITS) / 8, SZ_64K) | |
64 | ||
65 | #define LPI_PROP_DEFAULT_PRIO 0xa0 | |
66 | ||
cc2d3216 MZ |
67 | /* |
68 | * Collection structure - just an ID, and a redistributor address to | |
69 | * ping. We use one per CPU as a bag of interrupts assigned to this | |
70 | * CPU. | |
71 | */ | |
72 | struct its_collection { | |
73 | u64 target_address; | |
74 | u16 col_id; | |
75 | }; | |
76 | ||
466b7d16 | 77 | /* |
9347359a SD |
78 | * The ITS_BASER structure - contains memory information, cached |
79 | * value of BASER register configuration and ITS page size. | |
466b7d16 SD |
80 | */ |
81 | struct its_baser { | |
82 | void *base; | |
83 | u64 val; | |
84 | u32 order; | |
9347359a | 85 | u32 psz; |
466b7d16 SD |
86 | }; |
87 | ||
558b0165 AB |
88 | struct its_device; |
89 | ||
cc2d3216 MZ |
90 | /* |
91 | * The ITS structure - contains most of the infrastructure, with the | |
841514ab MZ |
92 | * top-level MSI domain, the command queue, the collections, and the |
93 | * list of devices writing to it. | |
cc2d3216 MZ |
94 | */ |
95 | struct its_node { | |
96 | raw_spinlock_t lock; | |
97 | struct list_head entry; | |
cc2d3216 | 98 | void __iomem *base; |
db40f0a7 | 99 | phys_addr_t phys_base; |
cc2d3216 MZ |
100 | struct its_cmd_block *cmd_base; |
101 | struct its_cmd_block *cmd_write; | |
466b7d16 | 102 | struct its_baser tables[GITS_BASER_NR_REGS]; |
cc2d3216 | 103 | struct its_collection *collections; |
558b0165 AB |
104 | struct fwnode_handle *fwnode_handle; |
105 | u64 (*get_msi_base)(struct its_device *its_dev); | |
dba0bc7b DB |
106 | u64 cbaser_save; |
107 | u32 ctlr_save; | |
cc2d3216 MZ |
108 | struct list_head its_device_list; |
109 | u64 flags; | |
debf6d02 | 110 | unsigned long list_nr; |
cc2d3216 | 111 | u32 ite_size; |
466b7d16 | 112 | u32 device_ids; |
fbf8f40e | 113 | int numa_node; |
558b0165 AB |
114 | unsigned int msi_domain_flags; |
115 | u32 pre_its_base; /* for Socionext Synquacer */ | |
3dfa576b | 116 | bool is_v4; |
5c9a882e | 117 | int vlpi_redist_offset; |
cc2d3216 MZ |
118 | }; |
119 | ||
120 | #define ITS_ITT_ALIGN SZ_256 | |
121 | ||
32bd44dc SD |
122 | /* The maximum number of VPEID bits supported by VLPI commands */ |
123 | #define ITS_MAX_VPEID_BITS (16) | |
124 | #define ITS_MAX_VPEID (1 << (ITS_MAX_VPEID_BITS)) | |
125 | ||
2eca0d6c SD |
126 | /* Convert page order to size in bytes */ |
127 | #define PAGE_ORDER_TO_SIZE(o) (PAGE_SIZE << (o)) | |
128 | ||
591e5bec MZ |
129 | struct event_lpi_map { |
130 | unsigned long *lpi_map; | |
131 | u16 *col_map; | |
132 | irq_hw_number_t lpi_base; | |
133 | int nr_lpis; | |
d011e4e6 MZ |
134 | struct mutex vlpi_lock; |
135 | struct its_vm *vm; | |
136 | struct its_vlpi_map *vlpi_maps; | |
137 | int nr_vlpis; | |
591e5bec MZ |
138 | }; |
139 | ||
cc2d3216 | 140 | /* |
d011e4e6 MZ |
141 | * The ITS view of a device - belongs to an ITS, owns an interrupt |
142 | * translation table, and a list of interrupts. If it some of its | |
143 | * LPIs are injected into a guest (GICv4), the event_map.vm field | |
144 | * indicates which one. | |
cc2d3216 MZ |
145 | */ |
146 | struct its_device { | |
147 | struct list_head entry; | |
148 | struct its_node *its; | |
591e5bec | 149 | struct event_lpi_map event_map; |
cc2d3216 | 150 | void *itt; |
cc2d3216 MZ |
151 | u32 nr_ites; |
152 | u32 device_id; | |
153 | }; | |
154 | ||
20b3d54e MZ |
155 | static struct { |
156 | raw_spinlock_t lock; | |
157 | struct its_device *dev; | |
158 | struct its_vpe **vpes; | |
159 | int next_victim; | |
160 | } vpe_proxy; | |
161 | ||
1ac19ca6 MZ |
162 | static LIST_HEAD(its_nodes); |
163 | static DEFINE_SPINLOCK(its_lock); | |
1ac19ca6 | 164 | static struct rdists *gic_rdists; |
db40f0a7 | 165 | static struct irq_domain *its_parent; |
1ac19ca6 | 166 | |
3dfa576b | 167 | static unsigned long its_list_map; |
3171a47a MZ |
168 | static u16 vmovp_seq_num; |
169 | static DEFINE_RAW_SPINLOCK(vmovp_lock); | |
170 | ||
7d75bbb4 | 171 | static DEFINE_IDA(its_vpeid_ida); |
3dfa576b | 172 | |
1ac19ca6 MZ |
173 | #define gic_data_rdist() (raw_cpu_ptr(gic_rdists->rdist)) |
174 | #define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base) | |
e643d803 | 175 | #define gic_data_rdist_vlpi_base() (gic_data_rdist_rd_base() + SZ_128K) |
1ac19ca6 | 176 | |
591e5bec MZ |
177 | static struct its_collection *dev_event_to_col(struct its_device *its_dev, |
178 | u32 event) | |
179 | { | |
180 | struct its_node *its = its_dev->its; | |
181 | ||
182 | return its->collections + its_dev->event_map.col_map[event]; | |
183 | } | |
184 | ||
83559b47 MZ |
185 | static struct its_collection *valid_col(struct its_collection *col) |
186 | { | |
187 | if (WARN_ON_ONCE(col->target_address & GENMASK_ULL(0, 15))) | |
188 | return NULL; | |
189 | ||
190 | return col; | |
191 | } | |
192 | ||
205e065d MZ |
193 | static struct its_vpe *valid_vpe(struct its_node *its, struct its_vpe *vpe) |
194 | { | |
195 | if (valid_col(its->collections + vpe->col_idx)) | |
196 | return vpe; | |
197 | ||
198 | return NULL; | |
199 | } | |
200 | ||
cc2d3216 MZ |
201 | /* |
202 | * ITS command descriptors - parameters to be encoded in a command | |
203 | * block. | |
204 | */ | |
205 | struct its_cmd_desc { | |
206 | union { | |
207 | struct { | |
208 | struct its_device *dev; | |
209 | u32 event_id; | |
210 | } its_inv_cmd; | |
211 | ||
8d85dced MZ |
212 | struct { |
213 | struct its_device *dev; | |
214 | u32 event_id; | |
215 | } its_clear_cmd; | |
216 | ||
cc2d3216 MZ |
217 | struct { |
218 | struct its_device *dev; | |
219 | u32 event_id; | |
220 | } its_int_cmd; | |
221 | ||
222 | struct { | |
223 | struct its_device *dev; | |
224 | int valid; | |
225 | } its_mapd_cmd; | |
226 | ||
227 | struct { | |
228 | struct its_collection *col; | |
229 | int valid; | |
230 | } its_mapc_cmd; | |
231 | ||
232 | struct { | |
233 | struct its_device *dev; | |
234 | u32 phys_id; | |
235 | u32 event_id; | |
6a25ad3a | 236 | } its_mapti_cmd; |
cc2d3216 MZ |
237 | |
238 | struct { | |
239 | struct its_device *dev; | |
240 | struct its_collection *col; | |
591e5bec | 241 | u32 event_id; |
cc2d3216 MZ |
242 | } its_movi_cmd; |
243 | ||
244 | struct { | |
245 | struct its_device *dev; | |
246 | u32 event_id; | |
247 | } its_discard_cmd; | |
248 | ||
249 | struct { | |
250 | struct its_collection *col; | |
251 | } its_invall_cmd; | |
d011e4e6 | 252 | |
eb78192b MZ |
253 | struct { |
254 | struct its_vpe *vpe; | |
255 | } its_vinvall_cmd; | |
256 | ||
257 | struct { | |
258 | struct its_vpe *vpe; | |
259 | struct its_collection *col; | |
260 | bool valid; | |
261 | } its_vmapp_cmd; | |
262 | ||
d011e4e6 MZ |
263 | struct { |
264 | struct its_vpe *vpe; | |
265 | struct its_device *dev; | |
266 | u32 virt_id; | |
267 | u32 event_id; | |
268 | bool db_enabled; | |
269 | } its_vmapti_cmd; | |
270 | ||
271 | struct { | |
272 | struct its_vpe *vpe; | |
273 | struct its_device *dev; | |
274 | u32 event_id; | |
275 | bool db_enabled; | |
276 | } its_vmovi_cmd; | |
3171a47a MZ |
277 | |
278 | struct { | |
279 | struct its_vpe *vpe; | |
280 | struct its_collection *col; | |
281 | u16 seq_num; | |
282 | u16 its_list; | |
283 | } its_vmovp_cmd; | |
cc2d3216 MZ |
284 | }; |
285 | }; | |
286 | ||
287 | /* | |
288 | * The ITS command block, which is what the ITS actually parses. | |
289 | */ | |
290 | struct its_cmd_block { | |
291 | u64 raw_cmd[4]; | |
292 | }; | |
293 | ||
294 | #define ITS_CMD_QUEUE_SZ SZ_64K | |
295 | #define ITS_CMD_QUEUE_NR_ENTRIES (ITS_CMD_QUEUE_SZ / sizeof(struct its_cmd_block)) | |
296 | ||
67047f90 MZ |
297 | typedef struct its_collection *(*its_cmd_builder_t)(struct its_node *, |
298 | struct its_cmd_block *, | |
cc2d3216 MZ |
299 | struct its_cmd_desc *); |
300 | ||
67047f90 MZ |
301 | typedef struct its_vpe *(*its_cmd_vbuilder_t)(struct its_node *, |
302 | struct its_cmd_block *, | |
d011e4e6 MZ |
303 | struct its_cmd_desc *); |
304 | ||
4d36f136 MZ |
305 | static void its_mask_encode(u64 *raw_cmd, u64 val, int h, int l) |
306 | { | |
307 | u64 mask = GENMASK_ULL(h, l); | |
308 | *raw_cmd &= ~mask; | |
309 | *raw_cmd |= (val << l) & mask; | |
310 | } | |
311 | ||
cc2d3216 MZ |
312 | static void its_encode_cmd(struct its_cmd_block *cmd, u8 cmd_nr) |
313 | { | |
4d36f136 | 314 | its_mask_encode(&cmd->raw_cmd[0], cmd_nr, 7, 0); |
cc2d3216 MZ |
315 | } |
316 | ||
317 | static void its_encode_devid(struct its_cmd_block *cmd, u32 devid) | |
318 | { | |
4d36f136 | 319 | its_mask_encode(&cmd->raw_cmd[0], devid, 63, 32); |
cc2d3216 MZ |
320 | } |
321 | ||
322 | static void its_encode_event_id(struct its_cmd_block *cmd, u32 id) | |
323 | { | |
4d36f136 | 324 | its_mask_encode(&cmd->raw_cmd[1], id, 31, 0); |
cc2d3216 MZ |
325 | } |
326 | ||
327 | static void its_encode_phys_id(struct its_cmd_block *cmd, u32 phys_id) | |
328 | { | |
4d36f136 | 329 | its_mask_encode(&cmd->raw_cmd[1], phys_id, 63, 32); |
cc2d3216 MZ |
330 | } |
331 | ||
332 | static void its_encode_size(struct its_cmd_block *cmd, u8 size) | |
333 | { | |
4d36f136 | 334 | its_mask_encode(&cmd->raw_cmd[1], size, 4, 0); |
cc2d3216 MZ |
335 | } |
336 | ||
337 | static void its_encode_itt(struct its_cmd_block *cmd, u64 itt_addr) | |
338 | { | |
30ae9610 | 339 | its_mask_encode(&cmd->raw_cmd[2], itt_addr >> 8, 51, 8); |
cc2d3216 MZ |
340 | } |
341 | ||
342 | static void its_encode_valid(struct its_cmd_block *cmd, int valid) | |
343 | { | |
4d36f136 | 344 | its_mask_encode(&cmd->raw_cmd[2], !!valid, 63, 63); |
cc2d3216 MZ |
345 | } |
346 | ||
347 | static void its_encode_target(struct its_cmd_block *cmd, u64 target_addr) | |
348 | { | |
30ae9610 | 349 | its_mask_encode(&cmd->raw_cmd[2], target_addr >> 16, 51, 16); |
cc2d3216 MZ |
350 | } |
351 | ||
352 | static void its_encode_collection(struct its_cmd_block *cmd, u16 col) | |
353 | { | |
4d36f136 | 354 | its_mask_encode(&cmd->raw_cmd[2], col, 15, 0); |
cc2d3216 MZ |
355 | } |
356 | ||
d011e4e6 MZ |
357 | static void its_encode_vpeid(struct its_cmd_block *cmd, u16 vpeid) |
358 | { | |
359 | its_mask_encode(&cmd->raw_cmd[1], vpeid, 47, 32); | |
360 | } | |
361 | ||
362 | static void its_encode_virt_id(struct its_cmd_block *cmd, u32 virt_id) | |
363 | { | |
364 | its_mask_encode(&cmd->raw_cmd[2], virt_id, 31, 0); | |
365 | } | |
366 | ||
367 | static void its_encode_db_phys_id(struct its_cmd_block *cmd, u32 db_phys_id) | |
368 | { | |
369 | its_mask_encode(&cmd->raw_cmd[2], db_phys_id, 63, 32); | |
370 | } | |
371 | ||
372 | static void its_encode_db_valid(struct its_cmd_block *cmd, bool db_valid) | |
373 | { | |
374 | its_mask_encode(&cmd->raw_cmd[2], db_valid, 0, 0); | |
375 | } | |
376 | ||
3171a47a MZ |
377 | static void its_encode_seq_num(struct its_cmd_block *cmd, u16 seq_num) |
378 | { | |
379 | its_mask_encode(&cmd->raw_cmd[0], seq_num, 47, 32); | |
380 | } | |
381 | ||
382 | static void its_encode_its_list(struct its_cmd_block *cmd, u16 its_list) | |
383 | { | |
384 | its_mask_encode(&cmd->raw_cmd[1], its_list, 15, 0); | |
385 | } | |
386 | ||
eb78192b MZ |
387 | static void its_encode_vpt_addr(struct its_cmd_block *cmd, u64 vpt_pa) |
388 | { | |
30ae9610 | 389 | its_mask_encode(&cmd->raw_cmd[3], vpt_pa >> 16, 51, 16); |
eb78192b MZ |
390 | } |
391 | ||
392 | static void its_encode_vpt_size(struct its_cmd_block *cmd, u8 vpt_size) | |
393 | { | |
394 | its_mask_encode(&cmd->raw_cmd[3], vpt_size, 4, 0); | |
395 | } | |
396 | ||
cc2d3216 MZ |
397 | static inline void its_fixup_cmd(struct its_cmd_block *cmd) |
398 | { | |
399 | /* Let's fixup BE commands */ | |
400 | cmd->raw_cmd[0] = cpu_to_le64(cmd->raw_cmd[0]); | |
401 | cmd->raw_cmd[1] = cpu_to_le64(cmd->raw_cmd[1]); | |
402 | cmd->raw_cmd[2] = cpu_to_le64(cmd->raw_cmd[2]); | |
403 | cmd->raw_cmd[3] = cpu_to_le64(cmd->raw_cmd[3]); | |
404 | } | |
405 | ||
67047f90 MZ |
406 | static struct its_collection *its_build_mapd_cmd(struct its_node *its, |
407 | struct its_cmd_block *cmd, | |
cc2d3216 MZ |
408 | struct its_cmd_desc *desc) |
409 | { | |
410 | unsigned long itt_addr; | |
c8481267 | 411 | u8 size = ilog2(desc->its_mapd_cmd.dev->nr_ites); |
cc2d3216 MZ |
412 | |
413 | itt_addr = virt_to_phys(desc->its_mapd_cmd.dev->itt); | |
414 | itt_addr = ALIGN(itt_addr, ITS_ITT_ALIGN); | |
415 | ||
416 | its_encode_cmd(cmd, GITS_CMD_MAPD); | |
417 | its_encode_devid(cmd, desc->its_mapd_cmd.dev->device_id); | |
418 | its_encode_size(cmd, size - 1); | |
419 | its_encode_itt(cmd, itt_addr); | |
420 | its_encode_valid(cmd, desc->its_mapd_cmd.valid); | |
421 | ||
422 | its_fixup_cmd(cmd); | |
423 | ||
591e5bec | 424 | return NULL; |
cc2d3216 MZ |
425 | } |
426 | ||
67047f90 MZ |
427 | static struct its_collection *its_build_mapc_cmd(struct its_node *its, |
428 | struct its_cmd_block *cmd, | |
cc2d3216 MZ |
429 | struct its_cmd_desc *desc) |
430 | { | |
431 | its_encode_cmd(cmd, GITS_CMD_MAPC); | |
432 | its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id); | |
433 | its_encode_target(cmd, desc->its_mapc_cmd.col->target_address); | |
434 | its_encode_valid(cmd, desc->its_mapc_cmd.valid); | |
435 | ||
436 | its_fixup_cmd(cmd); | |
437 | ||
438 | return desc->its_mapc_cmd.col; | |
439 | } | |
440 | ||
67047f90 MZ |
441 | static struct its_collection *its_build_mapti_cmd(struct its_node *its, |
442 | struct its_cmd_block *cmd, | |
cc2d3216 MZ |
443 | struct its_cmd_desc *desc) |
444 | { | |
591e5bec MZ |
445 | struct its_collection *col; |
446 | ||
6a25ad3a MZ |
447 | col = dev_event_to_col(desc->its_mapti_cmd.dev, |
448 | desc->its_mapti_cmd.event_id); | |
591e5bec | 449 | |
6a25ad3a MZ |
450 | its_encode_cmd(cmd, GITS_CMD_MAPTI); |
451 | its_encode_devid(cmd, desc->its_mapti_cmd.dev->device_id); | |
452 | its_encode_event_id(cmd, desc->its_mapti_cmd.event_id); | |
453 | its_encode_phys_id(cmd, desc->its_mapti_cmd.phys_id); | |
591e5bec | 454 | its_encode_collection(cmd, col->col_id); |
cc2d3216 MZ |
455 | |
456 | its_fixup_cmd(cmd); | |
457 | ||
83559b47 | 458 | return valid_col(col); |
cc2d3216 MZ |
459 | } |
460 | ||
67047f90 MZ |
461 | static struct its_collection *its_build_movi_cmd(struct its_node *its, |
462 | struct its_cmd_block *cmd, | |
cc2d3216 MZ |
463 | struct its_cmd_desc *desc) |
464 | { | |
591e5bec MZ |
465 | struct its_collection *col; |
466 | ||
467 | col = dev_event_to_col(desc->its_movi_cmd.dev, | |
468 | desc->its_movi_cmd.event_id); | |
469 | ||
cc2d3216 MZ |
470 | its_encode_cmd(cmd, GITS_CMD_MOVI); |
471 | its_encode_devid(cmd, desc->its_movi_cmd.dev->device_id); | |
591e5bec | 472 | its_encode_event_id(cmd, desc->its_movi_cmd.event_id); |
cc2d3216 MZ |
473 | its_encode_collection(cmd, desc->its_movi_cmd.col->col_id); |
474 | ||
475 | its_fixup_cmd(cmd); | |
476 | ||
83559b47 | 477 | return valid_col(col); |
cc2d3216 MZ |
478 | } |
479 | ||
67047f90 MZ |
480 | static struct its_collection *its_build_discard_cmd(struct its_node *its, |
481 | struct its_cmd_block *cmd, | |
cc2d3216 MZ |
482 | struct its_cmd_desc *desc) |
483 | { | |
591e5bec MZ |
484 | struct its_collection *col; |
485 | ||
486 | col = dev_event_to_col(desc->its_discard_cmd.dev, | |
487 | desc->its_discard_cmd.event_id); | |
488 | ||
cc2d3216 MZ |
489 | its_encode_cmd(cmd, GITS_CMD_DISCARD); |
490 | its_encode_devid(cmd, desc->its_discard_cmd.dev->device_id); | |
491 | its_encode_event_id(cmd, desc->its_discard_cmd.event_id); | |
492 | ||
493 | its_fixup_cmd(cmd); | |
494 | ||
83559b47 | 495 | return valid_col(col); |
cc2d3216 MZ |
496 | } |
497 | ||
67047f90 MZ |
498 | static struct its_collection *its_build_inv_cmd(struct its_node *its, |
499 | struct its_cmd_block *cmd, | |
cc2d3216 MZ |
500 | struct its_cmd_desc *desc) |
501 | { | |
591e5bec MZ |
502 | struct its_collection *col; |
503 | ||
504 | col = dev_event_to_col(desc->its_inv_cmd.dev, | |
505 | desc->its_inv_cmd.event_id); | |
506 | ||
cc2d3216 MZ |
507 | its_encode_cmd(cmd, GITS_CMD_INV); |
508 | its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id); | |
509 | its_encode_event_id(cmd, desc->its_inv_cmd.event_id); | |
510 | ||
511 | its_fixup_cmd(cmd); | |
512 | ||
83559b47 | 513 | return valid_col(col); |
cc2d3216 MZ |
514 | } |
515 | ||
67047f90 MZ |
516 | static struct its_collection *its_build_int_cmd(struct its_node *its, |
517 | struct its_cmd_block *cmd, | |
8d85dced MZ |
518 | struct its_cmd_desc *desc) |
519 | { | |
520 | struct its_collection *col; | |
521 | ||
522 | col = dev_event_to_col(desc->its_int_cmd.dev, | |
523 | desc->its_int_cmd.event_id); | |
524 | ||
525 | its_encode_cmd(cmd, GITS_CMD_INT); | |
526 | its_encode_devid(cmd, desc->its_int_cmd.dev->device_id); | |
527 | its_encode_event_id(cmd, desc->its_int_cmd.event_id); | |
528 | ||
529 | its_fixup_cmd(cmd); | |
530 | ||
83559b47 | 531 | return valid_col(col); |
8d85dced MZ |
532 | } |
533 | ||
67047f90 MZ |
534 | static struct its_collection *its_build_clear_cmd(struct its_node *its, |
535 | struct its_cmd_block *cmd, | |
8d85dced MZ |
536 | struct its_cmd_desc *desc) |
537 | { | |
538 | struct its_collection *col; | |
539 | ||
540 | col = dev_event_to_col(desc->its_clear_cmd.dev, | |
541 | desc->its_clear_cmd.event_id); | |
542 | ||
543 | its_encode_cmd(cmd, GITS_CMD_CLEAR); | |
544 | its_encode_devid(cmd, desc->its_clear_cmd.dev->device_id); | |
545 | its_encode_event_id(cmd, desc->its_clear_cmd.event_id); | |
546 | ||
547 | its_fixup_cmd(cmd); | |
548 | ||
83559b47 | 549 | return valid_col(col); |
8d85dced MZ |
550 | } |
551 | ||
67047f90 MZ |
552 | static struct its_collection *its_build_invall_cmd(struct its_node *its, |
553 | struct its_cmd_block *cmd, | |
cc2d3216 MZ |
554 | struct its_cmd_desc *desc) |
555 | { | |
556 | its_encode_cmd(cmd, GITS_CMD_INVALL); | |
557 | its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id); | |
558 | ||
559 | its_fixup_cmd(cmd); | |
560 | ||
561 | return NULL; | |
562 | } | |
563 | ||
67047f90 MZ |
564 | static struct its_vpe *its_build_vinvall_cmd(struct its_node *its, |
565 | struct its_cmd_block *cmd, | |
eb78192b MZ |
566 | struct its_cmd_desc *desc) |
567 | { | |
568 | its_encode_cmd(cmd, GITS_CMD_VINVALL); | |
569 | its_encode_vpeid(cmd, desc->its_vinvall_cmd.vpe->vpe_id); | |
570 | ||
571 | its_fixup_cmd(cmd); | |
572 | ||
205e065d | 573 | return valid_vpe(its, desc->its_vinvall_cmd.vpe); |
eb78192b MZ |
574 | } |
575 | ||
67047f90 MZ |
576 | static struct its_vpe *its_build_vmapp_cmd(struct its_node *its, |
577 | struct its_cmd_block *cmd, | |
eb78192b MZ |
578 | struct its_cmd_desc *desc) |
579 | { | |
580 | unsigned long vpt_addr; | |
5c9a882e | 581 | u64 target; |
eb78192b MZ |
582 | |
583 | vpt_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->vpt_page)); | |
5c9a882e | 584 | target = desc->its_vmapp_cmd.col->target_address + its->vlpi_redist_offset; |
eb78192b MZ |
585 | |
586 | its_encode_cmd(cmd, GITS_CMD_VMAPP); | |
587 | its_encode_vpeid(cmd, desc->its_vmapp_cmd.vpe->vpe_id); | |
588 | its_encode_valid(cmd, desc->its_vmapp_cmd.valid); | |
5c9a882e | 589 | its_encode_target(cmd, target); |
eb78192b MZ |
590 | its_encode_vpt_addr(cmd, vpt_addr); |
591 | its_encode_vpt_size(cmd, LPI_NRBITS - 1); | |
592 | ||
593 | its_fixup_cmd(cmd); | |
594 | ||
205e065d | 595 | return valid_vpe(its, desc->its_vmapp_cmd.vpe); |
eb78192b MZ |
596 | } |
597 | ||
67047f90 MZ |
598 | static struct its_vpe *its_build_vmapti_cmd(struct its_node *its, |
599 | struct its_cmd_block *cmd, | |
d011e4e6 MZ |
600 | struct its_cmd_desc *desc) |
601 | { | |
602 | u32 db; | |
603 | ||
604 | if (desc->its_vmapti_cmd.db_enabled) | |
605 | db = desc->its_vmapti_cmd.vpe->vpe_db_lpi; | |
606 | else | |
607 | db = 1023; | |
608 | ||
609 | its_encode_cmd(cmd, GITS_CMD_VMAPTI); | |
610 | its_encode_devid(cmd, desc->its_vmapti_cmd.dev->device_id); | |
611 | its_encode_vpeid(cmd, desc->its_vmapti_cmd.vpe->vpe_id); | |
612 | its_encode_event_id(cmd, desc->its_vmapti_cmd.event_id); | |
613 | its_encode_db_phys_id(cmd, db); | |
614 | its_encode_virt_id(cmd, desc->its_vmapti_cmd.virt_id); | |
615 | ||
616 | its_fixup_cmd(cmd); | |
617 | ||
205e065d | 618 | return valid_vpe(its, desc->its_vmapti_cmd.vpe); |
d011e4e6 MZ |
619 | } |
620 | ||
67047f90 MZ |
621 | static struct its_vpe *its_build_vmovi_cmd(struct its_node *its, |
622 | struct its_cmd_block *cmd, | |
d011e4e6 MZ |
623 | struct its_cmd_desc *desc) |
624 | { | |
625 | u32 db; | |
626 | ||
627 | if (desc->its_vmovi_cmd.db_enabled) | |
628 | db = desc->its_vmovi_cmd.vpe->vpe_db_lpi; | |
629 | else | |
630 | db = 1023; | |
631 | ||
632 | its_encode_cmd(cmd, GITS_CMD_VMOVI); | |
633 | its_encode_devid(cmd, desc->its_vmovi_cmd.dev->device_id); | |
634 | its_encode_vpeid(cmd, desc->its_vmovi_cmd.vpe->vpe_id); | |
635 | its_encode_event_id(cmd, desc->its_vmovi_cmd.event_id); | |
636 | its_encode_db_phys_id(cmd, db); | |
637 | its_encode_db_valid(cmd, true); | |
638 | ||
639 | its_fixup_cmd(cmd); | |
640 | ||
205e065d | 641 | return valid_vpe(its, desc->its_vmovi_cmd.vpe); |
d011e4e6 MZ |
642 | } |
643 | ||
67047f90 MZ |
644 | static struct its_vpe *its_build_vmovp_cmd(struct its_node *its, |
645 | struct its_cmd_block *cmd, | |
3171a47a MZ |
646 | struct its_cmd_desc *desc) |
647 | { | |
5c9a882e MZ |
648 | u64 target; |
649 | ||
650 | target = desc->its_vmovp_cmd.col->target_address + its->vlpi_redist_offset; | |
3171a47a MZ |
651 | its_encode_cmd(cmd, GITS_CMD_VMOVP); |
652 | its_encode_seq_num(cmd, desc->its_vmovp_cmd.seq_num); | |
653 | its_encode_its_list(cmd, desc->its_vmovp_cmd.its_list); | |
654 | its_encode_vpeid(cmd, desc->its_vmovp_cmd.vpe->vpe_id); | |
5c9a882e | 655 | its_encode_target(cmd, target); |
3171a47a MZ |
656 | |
657 | its_fixup_cmd(cmd); | |
658 | ||
205e065d | 659 | return valid_vpe(its, desc->its_vmovp_cmd.vpe); |
3171a47a MZ |
660 | } |
661 | ||
cc2d3216 MZ |
662 | static u64 its_cmd_ptr_to_offset(struct its_node *its, |
663 | struct its_cmd_block *ptr) | |
664 | { | |
665 | return (ptr - its->cmd_base) * sizeof(*ptr); | |
666 | } | |
667 | ||
668 | static int its_queue_full(struct its_node *its) | |
669 | { | |
670 | int widx; | |
671 | int ridx; | |
672 | ||
673 | widx = its->cmd_write - its->cmd_base; | |
674 | ridx = readl_relaxed(its->base + GITS_CREADR) / sizeof(struct its_cmd_block); | |
675 | ||
676 | /* This is incredibly unlikely to happen, unless the ITS locks up. */ | |
677 | if (((widx + 1) % ITS_CMD_QUEUE_NR_ENTRIES) == ridx) | |
678 | return 1; | |
679 | ||
680 | return 0; | |
681 | } | |
682 | ||
683 | static struct its_cmd_block *its_allocate_entry(struct its_node *its) | |
684 | { | |
685 | struct its_cmd_block *cmd; | |
686 | u32 count = 1000000; /* 1s! */ | |
687 | ||
688 | while (its_queue_full(its)) { | |
689 | count--; | |
690 | if (!count) { | |
691 | pr_err_ratelimited("ITS queue not draining\n"); | |
692 | return NULL; | |
693 | } | |
694 | cpu_relax(); | |
695 | udelay(1); | |
696 | } | |
697 | ||
698 | cmd = its->cmd_write++; | |
699 | ||
700 | /* Handle queue wrapping */ | |
701 | if (its->cmd_write == (its->cmd_base + ITS_CMD_QUEUE_NR_ENTRIES)) | |
702 | its->cmd_write = its->cmd_base; | |
703 | ||
34d677a9 MZ |
704 | /* Clear command */ |
705 | cmd->raw_cmd[0] = 0; | |
706 | cmd->raw_cmd[1] = 0; | |
707 | cmd->raw_cmd[2] = 0; | |
708 | cmd->raw_cmd[3] = 0; | |
709 | ||
cc2d3216 MZ |
710 | return cmd; |
711 | } | |
712 | ||
713 | static struct its_cmd_block *its_post_commands(struct its_node *its) | |
714 | { | |
715 | u64 wr = its_cmd_ptr_to_offset(its, its->cmd_write); | |
716 | ||
717 | writel_relaxed(wr, its->base + GITS_CWRITER); | |
718 | ||
719 | return its->cmd_write; | |
720 | } | |
721 | ||
722 | static void its_flush_cmd(struct its_node *its, struct its_cmd_block *cmd) | |
723 | { | |
724 | /* | |
725 | * Make sure the commands written to memory are observable by | |
726 | * the ITS. | |
727 | */ | |
728 | if (its->flags & ITS_FLAGS_CMDQ_NEEDS_FLUSHING) | |
328191c0 | 729 | gic_flush_dcache_to_poc(cmd, sizeof(*cmd)); |
cc2d3216 MZ |
730 | else |
731 | dsb(ishst); | |
732 | } | |
733 | ||
a19b462f MZ |
734 | static int its_wait_for_range_completion(struct its_node *its, |
735 | struct its_cmd_block *from, | |
736 | struct its_cmd_block *to) | |
cc2d3216 MZ |
737 | { |
738 | u64 rd_idx, from_idx, to_idx; | |
739 | u32 count = 1000000; /* 1s! */ | |
740 | ||
741 | from_idx = its_cmd_ptr_to_offset(its, from); | |
742 | to_idx = its_cmd_ptr_to_offset(its, to); | |
743 | ||
744 | while (1) { | |
745 | rd_idx = readl_relaxed(its->base + GITS_CREADR); | |
9bdd8b1c MZ |
746 | |
747 | /* Direct case */ | |
748 | if (from_idx < to_idx && rd_idx >= to_idx) | |
749 | break; | |
750 | ||
751 | /* Wrapped case */ | |
752 | if (from_idx >= to_idx && rd_idx >= to_idx && rd_idx < from_idx) | |
cc2d3216 MZ |
753 | break; |
754 | ||
755 | count--; | |
756 | if (!count) { | |
a19b462f MZ |
757 | pr_err_ratelimited("ITS queue timeout (%llu %llu %llu)\n", |
758 | from_idx, to_idx, rd_idx); | |
759 | return -1; | |
cc2d3216 MZ |
760 | } |
761 | cpu_relax(); | |
762 | udelay(1); | |
763 | } | |
a19b462f MZ |
764 | |
765 | return 0; | |
cc2d3216 MZ |
766 | } |
767 | ||
e4f9094b MZ |
768 | /* Warning, macro hell follows */ |
769 | #define BUILD_SINGLE_CMD_FUNC(name, buildtype, synctype, buildfn) \ | |
770 | void name(struct its_node *its, \ | |
771 | buildtype builder, \ | |
772 | struct its_cmd_desc *desc) \ | |
773 | { \ | |
774 | struct its_cmd_block *cmd, *sync_cmd, *next_cmd; \ | |
775 | synctype *sync_obj; \ | |
776 | unsigned long flags; \ | |
777 | \ | |
778 | raw_spin_lock_irqsave(&its->lock, flags); \ | |
779 | \ | |
780 | cmd = its_allocate_entry(its); \ | |
781 | if (!cmd) { /* We're soooooo screewed... */ \ | |
782 | raw_spin_unlock_irqrestore(&its->lock, flags); \ | |
783 | return; \ | |
784 | } \ | |
67047f90 | 785 | sync_obj = builder(its, cmd, desc); \ |
e4f9094b MZ |
786 | its_flush_cmd(its, cmd); \ |
787 | \ | |
788 | if (sync_obj) { \ | |
789 | sync_cmd = its_allocate_entry(its); \ | |
790 | if (!sync_cmd) \ | |
791 | goto post; \ | |
792 | \ | |
67047f90 | 793 | buildfn(its, sync_cmd, sync_obj); \ |
e4f9094b MZ |
794 | its_flush_cmd(its, sync_cmd); \ |
795 | } \ | |
796 | \ | |
797 | post: \ | |
798 | next_cmd = its_post_commands(its); \ | |
799 | raw_spin_unlock_irqrestore(&its->lock, flags); \ | |
800 | \ | |
a19b462f MZ |
801 | if (its_wait_for_range_completion(its, cmd, next_cmd)) \ |
802 | pr_err_ratelimited("ITS cmd %ps failed\n", builder); \ | |
e4f9094b | 803 | } |
cc2d3216 | 804 | |
67047f90 MZ |
805 | static void its_build_sync_cmd(struct its_node *its, |
806 | struct its_cmd_block *sync_cmd, | |
e4f9094b MZ |
807 | struct its_collection *sync_col) |
808 | { | |
809 | its_encode_cmd(sync_cmd, GITS_CMD_SYNC); | |
810 | its_encode_target(sync_cmd, sync_col->target_address); | |
cc2d3216 | 811 | |
e4f9094b | 812 | its_fixup_cmd(sync_cmd); |
cc2d3216 MZ |
813 | } |
814 | ||
e4f9094b MZ |
815 | static BUILD_SINGLE_CMD_FUNC(its_send_single_command, its_cmd_builder_t, |
816 | struct its_collection, its_build_sync_cmd) | |
817 | ||
67047f90 MZ |
818 | static void its_build_vsync_cmd(struct its_node *its, |
819 | struct its_cmd_block *sync_cmd, | |
d011e4e6 MZ |
820 | struct its_vpe *sync_vpe) |
821 | { | |
822 | its_encode_cmd(sync_cmd, GITS_CMD_VSYNC); | |
823 | its_encode_vpeid(sync_cmd, sync_vpe->vpe_id); | |
824 | ||
825 | its_fixup_cmd(sync_cmd); | |
826 | } | |
827 | ||
828 | static BUILD_SINGLE_CMD_FUNC(its_send_single_vcommand, its_cmd_vbuilder_t, | |
829 | struct its_vpe, its_build_vsync_cmd) | |
830 | ||
8d85dced | 831 | static void its_send_int(struct its_device *dev, u32 event_id) |
cc2d3216 | 832 | { |
8d85dced | 833 | struct its_cmd_desc desc; |
cc2d3216 | 834 | |
8d85dced MZ |
835 | desc.its_int_cmd.dev = dev; |
836 | desc.its_int_cmd.event_id = event_id; | |
cc2d3216 | 837 | |
8d85dced MZ |
838 | its_send_single_command(dev->its, its_build_int_cmd, &desc); |
839 | } | |
cc2d3216 | 840 | |
8d85dced MZ |
841 | static void its_send_clear(struct its_device *dev, u32 event_id) |
842 | { | |
843 | struct its_cmd_desc desc; | |
cc2d3216 | 844 | |
8d85dced MZ |
845 | desc.its_clear_cmd.dev = dev; |
846 | desc.its_clear_cmd.event_id = event_id; | |
cc2d3216 | 847 | |
8d85dced | 848 | its_send_single_command(dev->its, its_build_clear_cmd, &desc); |
cc2d3216 MZ |
849 | } |
850 | ||
851 | static void its_send_inv(struct its_device *dev, u32 event_id) | |
852 | { | |
853 | struct its_cmd_desc desc; | |
854 | ||
855 | desc.its_inv_cmd.dev = dev; | |
856 | desc.its_inv_cmd.event_id = event_id; | |
857 | ||
858 | its_send_single_command(dev->its, its_build_inv_cmd, &desc); | |
859 | } | |
860 | ||
861 | static void its_send_mapd(struct its_device *dev, int valid) | |
862 | { | |
863 | struct its_cmd_desc desc; | |
864 | ||
865 | desc.its_mapd_cmd.dev = dev; | |
866 | desc.its_mapd_cmd.valid = !!valid; | |
867 | ||
868 | its_send_single_command(dev->its, its_build_mapd_cmd, &desc); | |
869 | } | |
870 | ||
871 | static void its_send_mapc(struct its_node *its, struct its_collection *col, | |
872 | int valid) | |
873 | { | |
874 | struct its_cmd_desc desc; | |
875 | ||
876 | desc.its_mapc_cmd.col = col; | |
877 | desc.its_mapc_cmd.valid = !!valid; | |
878 | ||
879 | its_send_single_command(its, its_build_mapc_cmd, &desc); | |
880 | } | |
881 | ||
6a25ad3a | 882 | static void its_send_mapti(struct its_device *dev, u32 irq_id, u32 id) |
cc2d3216 MZ |
883 | { |
884 | struct its_cmd_desc desc; | |
885 | ||
6a25ad3a MZ |
886 | desc.its_mapti_cmd.dev = dev; |
887 | desc.its_mapti_cmd.phys_id = irq_id; | |
888 | desc.its_mapti_cmd.event_id = id; | |
cc2d3216 | 889 | |
6a25ad3a | 890 | its_send_single_command(dev->its, its_build_mapti_cmd, &desc); |
cc2d3216 MZ |
891 | } |
892 | ||
893 | static void its_send_movi(struct its_device *dev, | |
894 | struct its_collection *col, u32 id) | |
895 | { | |
896 | struct its_cmd_desc desc; | |
897 | ||
898 | desc.its_movi_cmd.dev = dev; | |
899 | desc.its_movi_cmd.col = col; | |
591e5bec | 900 | desc.its_movi_cmd.event_id = id; |
cc2d3216 MZ |
901 | |
902 | its_send_single_command(dev->its, its_build_movi_cmd, &desc); | |
903 | } | |
904 | ||
905 | static void its_send_discard(struct its_device *dev, u32 id) | |
906 | { | |
907 | struct its_cmd_desc desc; | |
908 | ||
909 | desc.its_discard_cmd.dev = dev; | |
910 | desc.its_discard_cmd.event_id = id; | |
911 | ||
912 | its_send_single_command(dev->its, its_build_discard_cmd, &desc); | |
913 | } | |
914 | ||
915 | static void its_send_invall(struct its_node *its, struct its_collection *col) | |
916 | { | |
917 | struct its_cmd_desc desc; | |
918 | ||
919 | desc.its_invall_cmd.col = col; | |
920 | ||
921 | its_send_single_command(its, its_build_invall_cmd, &desc); | |
922 | } | |
c48ed51c | 923 | |
d011e4e6 MZ |
924 | static void its_send_vmapti(struct its_device *dev, u32 id) |
925 | { | |
926 | struct its_vlpi_map *map = &dev->event_map.vlpi_maps[id]; | |
927 | struct its_cmd_desc desc; | |
928 | ||
929 | desc.its_vmapti_cmd.vpe = map->vpe; | |
930 | desc.its_vmapti_cmd.dev = dev; | |
931 | desc.its_vmapti_cmd.virt_id = map->vintid; | |
932 | desc.its_vmapti_cmd.event_id = id; | |
933 | desc.its_vmapti_cmd.db_enabled = map->db_enabled; | |
934 | ||
935 | its_send_single_vcommand(dev->its, its_build_vmapti_cmd, &desc); | |
936 | } | |
937 | ||
938 | static void its_send_vmovi(struct its_device *dev, u32 id) | |
939 | { | |
940 | struct its_vlpi_map *map = &dev->event_map.vlpi_maps[id]; | |
941 | struct its_cmd_desc desc; | |
942 | ||
943 | desc.its_vmovi_cmd.vpe = map->vpe; | |
944 | desc.its_vmovi_cmd.dev = dev; | |
945 | desc.its_vmovi_cmd.event_id = id; | |
946 | desc.its_vmovi_cmd.db_enabled = map->db_enabled; | |
947 | ||
948 | its_send_single_vcommand(dev->its, its_build_vmovi_cmd, &desc); | |
949 | } | |
950 | ||
75fd951b MZ |
951 | static void its_send_vmapp(struct its_node *its, |
952 | struct its_vpe *vpe, bool valid) | |
eb78192b MZ |
953 | { |
954 | struct its_cmd_desc desc; | |
eb78192b MZ |
955 | |
956 | desc.its_vmapp_cmd.vpe = vpe; | |
957 | desc.its_vmapp_cmd.valid = valid; | |
75fd951b | 958 | desc.its_vmapp_cmd.col = &its->collections[vpe->col_idx]; |
eb78192b | 959 | |
75fd951b | 960 | its_send_single_vcommand(its, its_build_vmapp_cmd, &desc); |
eb78192b MZ |
961 | } |
962 | ||
3171a47a MZ |
963 | static void its_send_vmovp(struct its_vpe *vpe) |
964 | { | |
965 | struct its_cmd_desc desc; | |
966 | struct its_node *its; | |
967 | unsigned long flags; | |
968 | int col_id = vpe->col_idx; | |
969 | ||
970 | desc.its_vmovp_cmd.vpe = vpe; | |
971 | desc.its_vmovp_cmd.its_list = (u16)its_list_map; | |
972 | ||
973 | if (!its_list_map) { | |
974 | its = list_first_entry(&its_nodes, struct its_node, entry); | |
975 | desc.its_vmovp_cmd.seq_num = 0; | |
976 | desc.its_vmovp_cmd.col = &its->collections[col_id]; | |
977 | its_send_single_vcommand(its, its_build_vmovp_cmd, &desc); | |
978 | return; | |
979 | } | |
980 | ||
981 | /* | |
982 | * Yet another marvel of the architecture. If using the | |
983 | * its_list "feature", we need to make sure that all ITSs | |
984 | * receive all VMOVP commands in the same order. The only way | |
985 | * to guarantee this is to make vmovp a serialization point. | |
986 | * | |
987 | * Wall <-- Head. | |
988 | */ | |
989 | raw_spin_lock_irqsave(&vmovp_lock, flags); | |
990 | ||
991 | desc.its_vmovp_cmd.seq_num = vmovp_seq_num++; | |
992 | ||
993 | /* Emit VMOVPs */ | |
994 | list_for_each_entry(its, &its_nodes, entry) { | |
995 | if (!its->is_v4) | |
996 | continue; | |
997 | ||
2247e1bf MZ |
998 | if (!vpe->its_vm->vlpi_count[its->list_nr]) |
999 | continue; | |
1000 | ||
3171a47a MZ |
1001 | desc.its_vmovp_cmd.col = &its->collections[col_id]; |
1002 | its_send_single_vcommand(its, its_build_vmovp_cmd, &desc); | |
1003 | } | |
1004 | ||
1005 | raw_spin_unlock_irqrestore(&vmovp_lock, flags); | |
1006 | } | |
1007 | ||
40619a2e | 1008 | static void its_send_vinvall(struct its_node *its, struct its_vpe *vpe) |
eb78192b MZ |
1009 | { |
1010 | struct its_cmd_desc desc; | |
eb78192b MZ |
1011 | |
1012 | desc.its_vinvall_cmd.vpe = vpe; | |
40619a2e | 1013 | its_send_single_vcommand(its, its_build_vinvall_cmd, &desc); |
eb78192b MZ |
1014 | } |
1015 | ||
c48ed51c MZ |
1016 | /* |
1017 | * irqchip functions - assumes MSI, mostly. | |
1018 | */ | |
1019 | ||
1020 | static inline u32 its_get_event_id(struct irq_data *d) | |
1021 | { | |
1022 | struct its_device *its_dev = irq_data_get_irq_chip_data(d); | |
591e5bec | 1023 | return d->hwirq - its_dev->event_map.lpi_base; |
c48ed51c MZ |
1024 | } |
1025 | ||
015ec038 | 1026 | static void lpi_write_config(struct irq_data *d, u8 clr, u8 set) |
c48ed51c | 1027 | { |
015ec038 | 1028 | irq_hw_number_t hwirq; |
adcdb94e MZ |
1029 | struct page *prop_page; |
1030 | u8 *cfg; | |
c48ed51c | 1031 | |
015ec038 MZ |
1032 | if (irqd_is_forwarded_to_vcpu(d)) { |
1033 | struct its_device *its_dev = irq_data_get_irq_chip_data(d); | |
1034 | u32 event = its_get_event_id(d); | |
d4d7b4ad | 1035 | struct its_vlpi_map *map; |
015ec038 MZ |
1036 | |
1037 | prop_page = its_dev->event_map.vm->vprop_page; | |
d4d7b4ad MZ |
1038 | map = &its_dev->event_map.vlpi_maps[event]; |
1039 | hwirq = map->vintid; | |
1040 | ||
1041 | /* Remember the updated property */ | |
1042 | map->properties &= ~clr; | |
1043 | map->properties |= set | LPI_PROP_GROUP1; | |
015ec038 MZ |
1044 | } else { |
1045 | prop_page = gic_rdists->prop_page; | |
1046 | hwirq = d->hwirq; | |
1047 | } | |
adcdb94e MZ |
1048 | |
1049 | cfg = page_address(prop_page) + hwirq - 8192; | |
1050 | *cfg &= ~clr; | |
015ec038 | 1051 | *cfg |= set | LPI_PROP_GROUP1; |
c48ed51c MZ |
1052 | |
1053 | /* | |
1054 | * Make the above write visible to the redistributors. | |
1055 | * And yes, we're flushing exactly: One. Single. Byte. | |
1056 | * Humpf... | |
1057 | */ | |
1058 | if (gic_rdists->flags & RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING) | |
328191c0 | 1059 | gic_flush_dcache_to_poc(cfg, sizeof(*cfg)); |
c48ed51c MZ |
1060 | else |
1061 | dsb(ishst); | |
015ec038 MZ |
1062 | } |
1063 | ||
1064 | static void lpi_update_config(struct irq_data *d, u8 clr, u8 set) | |
1065 | { | |
1066 | struct its_device *its_dev = irq_data_get_irq_chip_data(d); | |
1067 | ||
1068 | lpi_write_config(d, clr, set); | |
adcdb94e | 1069 | its_send_inv(its_dev, its_get_event_id(d)); |
c48ed51c MZ |
1070 | } |
1071 | ||
015ec038 MZ |
1072 | static void its_vlpi_set_doorbell(struct irq_data *d, bool enable) |
1073 | { | |
1074 | struct its_device *its_dev = irq_data_get_irq_chip_data(d); | |
1075 | u32 event = its_get_event_id(d); | |
1076 | ||
1077 | if (its_dev->event_map.vlpi_maps[event].db_enabled == enable) | |
1078 | return; | |
1079 | ||
1080 | its_dev->event_map.vlpi_maps[event].db_enabled = enable; | |
1081 | ||
1082 | /* | |
1083 | * More fun with the architecture: | |
1084 | * | |
1085 | * Ideally, we'd issue a VMAPTI to set the doorbell to its LPI | |
1086 | * value or to 1023, depending on the enable bit. But that | |
1087 | * would be issueing a mapping for an /existing/ DevID+EventID | |
1088 | * pair, which is UNPREDICTABLE. Instead, let's issue a VMOVI | |
1089 | * to the /same/ vPE, using this opportunity to adjust the | |
1090 | * doorbell. Mouahahahaha. We loves it, Precious. | |
1091 | */ | |
1092 | its_send_vmovi(its_dev, event); | |
c48ed51c MZ |
1093 | } |
1094 | ||
1095 | static void its_mask_irq(struct irq_data *d) | |
1096 | { | |
015ec038 MZ |
1097 | if (irqd_is_forwarded_to_vcpu(d)) |
1098 | its_vlpi_set_doorbell(d, false); | |
1099 | ||
adcdb94e | 1100 | lpi_update_config(d, LPI_PROP_ENABLED, 0); |
c48ed51c MZ |
1101 | } |
1102 | ||
1103 | static void its_unmask_irq(struct irq_data *d) | |
1104 | { | |
015ec038 MZ |
1105 | if (irqd_is_forwarded_to_vcpu(d)) |
1106 | its_vlpi_set_doorbell(d, true); | |
1107 | ||
adcdb94e | 1108 | lpi_update_config(d, 0, LPI_PROP_ENABLED); |
c48ed51c MZ |
1109 | } |
1110 | ||
c48ed51c MZ |
1111 | static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val, |
1112 | bool force) | |
1113 | { | |
fbf8f40e GK |
1114 | unsigned int cpu; |
1115 | const struct cpumask *cpu_mask = cpu_online_mask; | |
c48ed51c MZ |
1116 | struct its_device *its_dev = irq_data_get_irq_chip_data(d); |
1117 | struct its_collection *target_col; | |
1118 | u32 id = its_get_event_id(d); | |
1119 | ||
015ec038 MZ |
1120 | /* A forwarded interrupt should use irq_set_vcpu_affinity */ |
1121 | if (irqd_is_forwarded_to_vcpu(d)) | |
1122 | return -EINVAL; | |
1123 | ||
fbf8f40e GK |
1124 | /* lpi cannot be routed to a redistributor that is on a foreign node */ |
1125 | if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) { | |
1126 | if (its_dev->its->numa_node >= 0) { | |
1127 | cpu_mask = cpumask_of_node(its_dev->its->numa_node); | |
1128 | if (!cpumask_intersects(mask_val, cpu_mask)) | |
1129 | return -EINVAL; | |
1130 | } | |
1131 | } | |
1132 | ||
1133 | cpu = cpumask_any_and(mask_val, cpu_mask); | |
1134 | ||
c48ed51c MZ |
1135 | if (cpu >= nr_cpu_ids) |
1136 | return -EINVAL; | |
1137 | ||
8b8d94a7 M |
1138 | /* don't set the affinity when the target cpu is same as current one */ |
1139 | if (cpu != its_dev->event_map.col_map[id]) { | |
1140 | target_col = &its_dev->its->collections[cpu]; | |
1141 | its_send_movi(its_dev, target_col, id); | |
1142 | its_dev->event_map.col_map[id] = cpu; | |
0d224d35 | 1143 | irq_data_update_effective_affinity(d, cpumask_of(cpu)); |
8b8d94a7 | 1144 | } |
c48ed51c MZ |
1145 | |
1146 | return IRQ_SET_MASK_OK_DONE; | |
1147 | } | |
1148 | ||
558b0165 AB |
1149 | static u64 its_irq_get_msi_base(struct its_device *its_dev) |
1150 | { | |
1151 | struct its_node *its = its_dev->its; | |
1152 | ||
1153 | return its->phys_base + GITS_TRANSLATER; | |
1154 | } | |
1155 | ||
b48ac83d MZ |
1156 | static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg) |
1157 | { | |
1158 | struct its_device *its_dev = irq_data_get_irq_chip_data(d); | |
1159 | struct its_node *its; | |
1160 | u64 addr; | |
1161 | ||
1162 | its = its_dev->its; | |
558b0165 | 1163 | addr = its->get_msi_base(its_dev); |
b48ac83d | 1164 | |
b11283eb VM |
1165 | msg->address_lo = lower_32_bits(addr); |
1166 | msg->address_hi = upper_32_bits(addr); | |
b48ac83d | 1167 | msg->data = its_get_event_id(d); |
44bb7e24 RM |
1168 | |
1169 | iommu_dma_map_msi_msg(d->irq, msg); | |
b48ac83d MZ |
1170 | } |
1171 | ||
8d85dced MZ |
1172 | static int its_irq_set_irqchip_state(struct irq_data *d, |
1173 | enum irqchip_irq_state which, | |
1174 | bool state) | |
1175 | { | |
1176 | struct its_device *its_dev = irq_data_get_irq_chip_data(d); | |
1177 | u32 event = its_get_event_id(d); | |
1178 | ||
1179 | if (which != IRQCHIP_STATE_PENDING) | |
1180 | return -EINVAL; | |
1181 | ||
1182 | if (state) | |
1183 | its_send_int(its_dev, event); | |
1184 | else | |
1185 | its_send_clear(its_dev, event); | |
1186 | ||
1187 | return 0; | |
1188 | } | |
1189 | ||
2247e1bf MZ |
1190 | static void its_map_vm(struct its_node *its, struct its_vm *vm) |
1191 | { | |
1192 | unsigned long flags; | |
1193 | ||
1194 | /* Not using the ITS list? Everything is always mapped. */ | |
1195 | if (!its_list_map) | |
1196 | return; | |
1197 | ||
1198 | raw_spin_lock_irqsave(&vmovp_lock, flags); | |
1199 | ||
1200 | /* | |
1201 | * If the VM wasn't mapped yet, iterate over the vpes and get | |
1202 | * them mapped now. | |
1203 | */ | |
1204 | vm->vlpi_count[its->list_nr]++; | |
1205 | ||
1206 | if (vm->vlpi_count[its->list_nr] == 1) { | |
1207 | int i; | |
1208 | ||
1209 | for (i = 0; i < vm->nr_vpes; i++) { | |
1210 | struct its_vpe *vpe = vm->vpes[i]; | |
44c4c25e | 1211 | struct irq_data *d = irq_get_irq_data(vpe->irq); |
2247e1bf MZ |
1212 | |
1213 | /* Map the VPE to the first possible CPU */ | |
1214 | vpe->col_idx = cpumask_first(cpu_online_mask); | |
1215 | its_send_vmapp(its, vpe, true); | |
1216 | its_send_vinvall(its, vpe); | |
44c4c25e | 1217 | irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx)); |
2247e1bf MZ |
1218 | } |
1219 | } | |
1220 | ||
1221 | raw_spin_unlock_irqrestore(&vmovp_lock, flags); | |
1222 | } | |
1223 | ||
1224 | static void its_unmap_vm(struct its_node *its, struct its_vm *vm) | |
1225 | { | |
1226 | unsigned long flags; | |
1227 | ||
1228 | /* Not using the ITS list? Everything is always mapped. */ | |
1229 | if (!its_list_map) | |
1230 | return; | |
1231 | ||
1232 | raw_spin_lock_irqsave(&vmovp_lock, flags); | |
1233 | ||
1234 | if (!--vm->vlpi_count[its->list_nr]) { | |
1235 | int i; | |
1236 | ||
1237 | for (i = 0; i < vm->nr_vpes; i++) | |
1238 | its_send_vmapp(its, vm->vpes[i], false); | |
1239 | } | |
1240 | ||
1241 | raw_spin_unlock_irqrestore(&vmovp_lock, flags); | |
1242 | } | |
1243 | ||
d011e4e6 MZ |
1244 | static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info) |
1245 | { | |
1246 | struct its_device *its_dev = irq_data_get_irq_chip_data(d); | |
1247 | u32 event = its_get_event_id(d); | |
1248 | int ret = 0; | |
1249 | ||
1250 | if (!info->map) | |
1251 | return -EINVAL; | |
1252 | ||
1253 | mutex_lock(&its_dev->event_map.vlpi_lock); | |
1254 | ||
1255 | if (!its_dev->event_map.vm) { | |
1256 | struct its_vlpi_map *maps; | |
1257 | ||
6396bb22 | 1258 | maps = kcalloc(its_dev->event_map.nr_lpis, sizeof(*maps), |
d011e4e6 MZ |
1259 | GFP_KERNEL); |
1260 | if (!maps) { | |
1261 | ret = -ENOMEM; | |
1262 | goto out; | |
1263 | } | |
1264 | ||
1265 | its_dev->event_map.vm = info->map->vm; | |
1266 | its_dev->event_map.vlpi_maps = maps; | |
1267 | } else if (its_dev->event_map.vm != info->map->vm) { | |
1268 | ret = -EINVAL; | |
1269 | goto out; | |
1270 | } | |
1271 | ||
1272 | /* Get our private copy of the mapping information */ | |
1273 | its_dev->event_map.vlpi_maps[event] = *info->map; | |
1274 | ||
1275 | if (irqd_is_forwarded_to_vcpu(d)) { | |
1276 | /* Already mapped, move it around */ | |
1277 | its_send_vmovi(its_dev, event); | |
1278 | } else { | |
2247e1bf MZ |
1279 | /* Ensure all the VPEs are mapped on this ITS */ |
1280 | its_map_vm(its_dev->its, info->map->vm); | |
1281 | ||
d4d7b4ad MZ |
1282 | /* |
1283 | * Flag the interrupt as forwarded so that we can | |
1284 | * start poking the virtual property table. | |
1285 | */ | |
1286 | irqd_set_forwarded_to_vcpu(d); | |
1287 | ||
1288 | /* Write out the property to the prop table */ | |
1289 | lpi_write_config(d, 0xff, info->map->properties); | |
1290 | ||
d011e4e6 MZ |
1291 | /* Drop the physical mapping */ |
1292 | its_send_discard(its_dev, event); | |
1293 | ||
1294 | /* and install the virtual one */ | |
1295 | its_send_vmapti(its_dev, event); | |
d011e4e6 MZ |
1296 | |
1297 | /* Increment the number of VLPIs */ | |
1298 | its_dev->event_map.nr_vlpis++; | |
1299 | } | |
1300 | ||
1301 | out: | |
1302 | mutex_unlock(&its_dev->event_map.vlpi_lock); | |
1303 | return ret; | |
1304 | } | |
1305 | ||
1306 | static int its_vlpi_get(struct irq_data *d, struct its_cmd_info *info) | |
1307 | { | |
1308 | struct its_device *its_dev = irq_data_get_irq_chip_data(d); | |
1309 | u32 event = its_get_event_id(d); | |
1310 | int ret = 0; | |
1311 | ||
1312 | mutex_lock(&its_dev->event_map.vlpi_lock); | |
1313 | ||
1314 | if (!its_dev->event_map.vm || | |
1315 | !its_dev->event_map.vlpi_maps[event].vm) { | |
1316 | ret = -EINVAL; | |
1317 | goto out; | |
1318 | } | |
1319 | ||
1320 | /* Copy our mapping information to the incoming request */ | |
1321 | *info->map = its_dev->event_map.vlpi_maps[event]; | |
1322 | ||
1323 | out: | |
1324 | mutex_unlock(&its_dev->event_map.vlpi_lock); | |
1325 | return ret; | |
1326 | } | |
1327 | ||
1328 | static int its_vlpi_unmap(struct irq_data *d) | |
1329 | { | |
1330 | struct its_device *its_dev = irq_data_get_irq_chip_data(d); | |
1331 | u32 event = its_get_event_id(d); | |
1332 | int ret = 0; | |
1333 | ||
1334 | mutex_lock(&its_dev->event_map.vlpi_lock); | |
1335 | ||
1336 | if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d)) { | |
1337 | ret = -EINVAL; | |
1338 | goto out; | |
1339 | } | |
1340 | ||
1341 | /* Drop the virtual mapping */ | |
1342 | its_send_discard(its_dev, event); | |
1343 | ||
1344 | /* and restore the physical one */ | |
1345 | irqd_clr_forwarded_to_vcpu(d); | |
1346 | its_send_mapti(its_dev, d->hwirq, event); | |
1347 | lpi_update_config(d, 0xff, (LPI_PROP_DEFAULT_PRIO | | |
1348 | LPI_PROP_ENABLED | | |
1349 | LPI_PROP_GROUP1)); | |
1350 | ||
2247e1bf MZ |
1351 | /* Potentially unmap the VM from this ITS */ |
1352 | its_unmap_vm(its_dev->its, its_dev->event_map.vm); | |
1353 | ||
d011e4e6 MZ |
1354 | /* |
1355 | * Drop the refcount and make the device available again if | |
1356 | * this was the last VLPI. | |
1357 | */ | |
1358 | if (!--its_dev->event_map.nr_vlpis) { | |
1359 | its_dev->event_map.vm = NULL; | |
1360 | kfree(its_dev->event_map.vlpi_maps); | |
1361 | } | |
1362 | ||
1363 | out: | |
1364 | mutex_unlock(&its_dev->event_map.vlpi_lock); | |
1365 | return ret; | |
1366 | } | |
1367 | ||
015ec038 MZ |
1368 | static int its_vlpi_prop_update(struct irq_data *d, struct its_cmd_info *info) |
1369 | { | |
1370 | struct its_device *its_dev = irq_data_get_irq_chip_data(d); | |
1371 | ||
1372 | if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d)) | |
1373 | return -EINVAL; | |
1374 | ||
1375 | if (info->cmd_type == PROP_UPDATE_AND_INV_VLPI) | |
1376 | lpi_update_config(d, 0xff, info->config); | |
1377 | else | |
1378 | lpi_write_config(d, 0xff, info->config); | |
1379 | its_vlpi_set_doorbell(d, !!(info->config & LPI_PROP_ENABLED)); | |
1380 | ||
1381 | return 0; | |
1382 | } | |
1383 | ||
c808eea8 MZ |
1384 | static int its_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu_info) |
1385 | { | |
1386 | struct its_device *its_dev = irq_data_get_irq_chip_data(d); | |
1387 | struct its_cmd_info *info = vcpu_info; | |
1388 | ||
1389 | /* Need a v4 ITS */ | |
d011e4e6 | 1390 | if (!its_dev->its->is_v4) |
c808eea8 MZ |
1391 | return -EINVAL; |
1392 | ||
d011e4e6 MZ |
1393 | /* Unmap request? */ |
1394 | if (!info) | |
1395 | return its_vlpi_unmap(d); | |
1396 | ||
c808eea8 MZ |
1397 | switch (info->cmd_type) { |
1398 | case MAP_VLPI: | |
d011e4e6 | 1399 | return its_vlpi_map(d, info); |
c808eea8 MZ |
1400 | |
1401 | case GET_VLPI: | |
d011e4e6 | 1402 | return its_vlpi_get(d, info); |
c808eea8 MZ |
1403 | |
1404 | case PROP_UPDATE_VLPI: | |
1405 | case PROP_UPDATE_AND_INV_VLPI: | |
015ec038 | 1406 | return its_vlpi_prop_update(d, info); |
c808eea8 MZ |
1407 | |
1408 | default: | |
1409 | return -EINVAL; | |
1410 | } | |
1411 | } | |
1412 | ||
c48ed51c MZ |
1413 | static struct irq_chip its_irq_chip = { |
1414 | .name = "ITS", | |
1415 | .irq_mask = its_mask_irq, | |
1416 | .irq_unmask = its_unmask_irq, | |
004fa08d | 1417 | .irq_eoi = irq_chip_eoi_parent, |
c48ed51c | 1418 | .irq_set_affinity = its_set_affinity, |
b48ac83d | 1419 | .irq_compose_msi_msg = its_irq_compose_msi_msg, |
8d85dced | 1420 | .irq_set_irqchip_state = its_irq_set_irqchip_state, |
c808eea8 | 1421 | .irq_set_vcpu_affinity = its_irq_set_vcpu_affinity, |
b48ac83d MZ |
1422 | }; |
1423 | ||
bf9529f8 MZ |
1424 | /* |
1425 | * How we allocate LPIs: | |
1426 | * | |
1427 | * The GIC has id_bits bits for interrupt identifiers. From there, we | |
1428 | * must subtract 8192 which are reserved for SGIs/PPIs/SPIs. Then, as | |
1429 | * we allocate LPIs by chunks of 32, we can shift the whole thing by 5 | |
1430 | * bits to the right. | |
1431 | * | |
1432 | * This gives us (((1UL << id_bits) - 8192) >> 5) possible allocations. | |
1433 | */ | |
1434 | #define IRQS_PER_CHUNK_SHIFT 5 | |
4f2c7583 | 1435 | #define IRQS_PER_CHUNK (1UL << IRQS_PER_CHUNK_SHIFT) |
6c31e123 | 1436 | #define ITS_MAX_LPI_NRBITS 16 /* 64K LPIs */ |
bf9529f8 MZ |
1437 | |
1438 | static unsigned long *lpi_bitmap; | |
1439 | static u32 lpi_chunks; | |
1440 | static DEFINE_SPINLOCK(lpi_lock); | |
1441 | ||
1442 | static int its_lpi_to_chunk(int lpi) | |
1443 | { | |
1444 | return (lpi - 8192) >> IRQS_PER_CHUNK_SHIFT; | |
1445 | } | |
1446 | ||
1447 | static int its_chunk_to_lpi(int chunk) | |
1448 | { | |
1449 | return (chunk << IRQS_PER_CHUNK_SHIFT) + 8192; | |
1450 | } | |
1451 | ||
04a0e4de | 1452 | static int __init its_lpi_init(u32 id_bits) |
bf9529f8 MZ |
1453 | { |
1454 | lpi_chunks = its_lpi_to_chunk(1UL << id_bits); | |
1455 | ||
6396bb22 | 1456 | lpi_bitmap = kcalloc(BITS_TO_LONGS(lpi_chunks), sizeof(long), |
bf9529f8 MZ |
1457 | GFP_KERNEL); |
1458 | if (!lpi_bitmap) { | |
1459 | lpi_chunks = 0; | |
1460 | return -ENOMEM; | |
1461 | } | |
1462 | ||
1463 | pr_info("ITS: Allocated %d chunks for LPIs\n", (int)lpi_chunks); | |
1464 | return 0; | |
1465 | } | |
1466 | ||
1467 | static unsigned long *its_lpi_alloc_chunks(int nr_irqs, int *base, int *nr_ids) | |
1468 | { | |
1469 | unsigned long *bitmap = NULL; | |
1470 | int chunk_id; | |
1471 | int nr_chunks; | |
1472 | int i; | |
1473 | ||
1474 | nr_chunks = DIV_ROUND_UP(nr_irqs, IRQS_PER_CHUNK); | |
1475 | ||
1476 | spin_lock(&lpi_lock); | |
1477 | ||
1478 | do { | |
1479 | chunk_id = bitmap_find_next_zero_area(lpi_bitmap, lpi_chunks, | |
1480 | 0, nr_chunks, 0); | |
1481 | if (chunk_id < lpi_chunks) | |
1482 | break; | |
1483 | ||
1484 | nr_chunks--; | |
1485 | } while (nr_chunks > 0); | |
1486 | ||
1487 | if (!nr_chunks) | |
1488 | goto out; | |
1489 | ||
6396bb22 KC |
1490 | bitmap = kcalloc(BITS_TO_LONGS(nr_chunks * IRQS_PER_CHUNK), |
1491 | sizeof(long), | |
bf9529f8 MZ |
1492 | GFP_ATOMIC); |
1493 | if (!bitmap) | |
1494 | goto out; | |
1495 | ||
1496 | for (i = 0; i < nr_chunks; i++) | |
1497 | set_bit(chunk_id + i, lpi_bitmap); | |
1498 | ||
1499 | *base = its_chunk_to_lpi(chunk_id); | |
1500 | *nr_ids = nr_chunks * IRQS_PER_CHUNK; | |
1501 | ||
1502 | out: | |
1503 | spin_unlock(&lpi_lock); | |
1504 | ||
c8415b94 MZ |
1505 | if (!bitmap) |
1506 | *base = *nr_ids = 0; | |
1507 | ||
bf9529f8 MZ |
1508 | return bitmap; |
1509 | } | |
1510 | ||
cf2be8ba | 1511 | static void its_lpi_free_chunks(unsigned long *bitmap, int base, int nr_ids) |
bf9529f8 MZ |
1512 | { |
1513 | int lpi; | |
1514 | ||
1515 | spin_lock(&lpi_lock); | |
1516 | ||
1517 | for (lpi = base; lpi < (base + nr_ids); lpi += IRQS_PER_CHUNK) { | |
1518 | int chunk = its_lpi_to_chunk(lpi); | |
cf2be8ba | 1519 | |
bf9529f8 MZ |
1520 | BUG_ON(chunk > lpi_chunks); |
1521 | if (test_bit(chunk, lpi_bitmap)) { | |
1522 | clear_bit(chunk, lpi_bitmap); | |
1523 | } else { | |
1524 | pr_err("Bad LPI chunk %d\n", chunk); | |
1525 | } | |
1526 | } | |
1527 | ||
1528 | spin_unlock(&lpi_lock); | |
1529 | ||
cf2be8ba | 1530 | kfree(bitmap); |
bf9529f8 | 1531 | } |
1ac19ca6 | 1532 | |
0e5ccf91 MZ |
1533 | static struct page *its_allocate_prop_table(gfp_t gfp_flags) |
1534 | { | |
1535 | struct page *prop_page; | |
1ac19ca6 | 1536 | |
0e5ccf91 MZ |
1537 | prop_page = alloc_pages(gfp_flags, get_order(LPI_PROPBASE_SZ)); |
1538 | if (!prop_page) | |
1539 | return NULL; | |
1540 | ||
1541 | /* Priority 0xa0, Group-1, disabled */ | |
1542 | memset(page_address(prop_page), | |
1543 | LPI_PROP_DEFAULT_PRIO | LPI_PROP_GROUP1, | |
1544 | LPI_PROPBASE_SZ); | |
1545 | ||
1546 | /* Make sure the GIC will observe the written configuration */ | |
1547 | gic_flush_dcache_to_poc(page_address(prop_page), LPI_PROPBASE_SZ); | |
1548 | ||
1549 | return prop_page; | |
1550 | } | |
1551 | ||
7d75bbb4 MZ |
1552 | static void its_free_prop_table(struct page *prop_page) |
1553 | { | |
1554 | free_pages((unsigned long)page_address(prop_page), | |
1555 | get_order(LPI_PROPBASE_SZ)); | |
1556 | } | |
1ac19ca6 MZ |
1557 | |
1558 | static int __init its_alloc_lpi_tables(void) | |
1559 | { | |
1560 | phys_addr_t paddr; | |
1561 | ||
6c31e123 | 1562 | lpi_id_bits = min_t(u32, gic_rdists->id_bits, ITS_MAX_LPI_NRBITS); |
0e5ccf91 | 1563 | gic_rdists->prop_page = its_allocate_prop_table(GFP_NOWAIT); |
1ac19ca6 MZ |
1564 | if (!gic_rdists->prop_page) { |
1565 | pr_err("Failed to allocate PROPBASE\n"); | |
1566 | return -ENOMEM; | |
1567 | } | |
1568 | ||
1569 | paddr = page_to_phys(gic_rdists->prop_page); | |
1570 | pr_info("GIC: using LPI property table @%pa\n", &paddr); | |
1571 | ||
6c31e123 | 1572 | return its_lpi_init(lpi_id_bits); |
1ac19ca6 MZ |
1573 | } |
1574 | ||
1575 | static const char *its_base_type_string[] = { | |
1576 | [GITS_BASER_TYPE_DEVICE] = "Devices", | |
1577 | [GITS_BASER_TYPE_VCPU] = "Virtual CPUs", | |
4f46de9d | 1578 | [GITS_BASER_TYPE_RESERVED3] = "Reserved (3)", |
1ac19ca6 MZ |
1579 | [GITS_BASER_TYPE_COLLECTION] = "Interrupt Collections", |
1580 | [GITS_BASER_TYPE_RESERVED5] = "Reserved (5)", | |
1581 | [GITS_BASER_TYPE_RESERVED6] = "Reserved (6)", | |
1582 | [GITS_BASER_TYPE_RESERVED7] = "Reserved (7)", | |
1583 | }; | |
1584 | ||
2d81d425 SD |
1585 | static u64 its_read_baser(struct its_node *its, struct its_baser *baser) |
1586 | { | |
1587 | u32 idx = baser - its->tables; | |
1588 | ||
0968a619 | 1589 | return gits_read_baser(its->base + GITS_BASER + (idx << 3)); |
2d81d425 SD |
1590 | } |
1591 | ||
1592 | static void its_write_baser(struct its_node *its, struct its_baser *baser, | |
1593 | u64 val) | |
1594 | { | |
1595 | u32 idx = baser - its->tables; | |
1596 | ||
0968a619 | 1597 | gits_write_baser(val, its->base + GITS_BASER + (idx << 3)); |
2d81d425 SD |
1598 | baser->val = its_read_baser(its, baser); |
1599 | } | |
1600 | ||
9347359a | 1601 | static int its_setup_baser(struct its_node *its, struct its_baser *baser, |
3faf24ea SD |
1602 | u64 cache, u64 shr, u32 psz, u32 order, |
1603 | bool indirect) | |
9347359a SD |
1604 | { |
1605 | u64 val = its_read_baser(its, baser); | |
1606 | u64 esz = GITS_BASER_ENTRY_SIZE(val); | |
1607 | u64 type = GITS_BASER_TYPE(val); | |
30ae9610 | 1608 | u64 baser_phys, tmp; |
9347359a SD |
1609 | u32 alloc_pages; |
1610 | void *base; | |
9347359a SD |
1611 | |
1612 | retry_alloc_baser: | |
1613 | alloc_pages = (PAGE_ORDER_TO_SIZE(order) / psz); | |
1614 | if (alloc_pages > GITS_BASER_PAGES_MAX) { | |
1615 | pr_warn("ITS@%pa: %s too large, reduce ITS pages %u->%u\n", | |
1616 | &its->phys_base, its_base_type_string[type], | |
1617 | alloc_pages, GITS_BASER_PAGES_MAX); | |
1618 | alloc_pages = GITS_BASER_PAGES_MAX; | |
1619 | order = get_order(GITS_BASER_PAGES_MAX * psz); | |
1620 | } | |
1621 | ||
1622 | base = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order); | |
1623 | if (!base) | |
1624 | return -ENOMEM; | |
1625 | ||
30ae9610 SD |
1626 | baser_phys = virt_to_phys(base); |
1627 | ||
1628 | /* Check if the physical address of the memory is above 48bits */ | |
1629 | if (IS_ENABLED(CONFIG_ARM64_64K_PAGES) && (baser_phys >> 48)) { | |
1630 | ||
1631 | /* 52bit PA is supported only when PageSize=64K */ | |
1632 | if (psz != SZ_64K) { | |
1633 | pr_err("ITS: no 52bit PA support when psz=%d\n", psz); | |
1634 | free_pages((unsigned long)base, order); | |
1635 | return -ENXIO; | |
1636 | } | |
1637 | ||
1638 | /* Convert 52bit PA to 48bit field */ | |
1639 | baser_phys = GITS_BASER_PHYS_52_to_48(baser_phys); | |
1640 | } | |
1641 | ||
9347359a | 1642 | retry_baser: |
30ae9610 | 1643 | val = (baser_phys | |
9347359a SD |
1644 | (type << GITS_BASER_TYPE_SHIFT) | |
1645 | ((esz - 1) << GITS_BASER_ENTRY_SIZE_SHIFT) | | |
1646 | ((alloc_pages - 1) << GITS_BASER_PAGES_SHIFT) | | |
1647 | cache | | |
1648 | shr | | |
1649 | GITS_BASER_VALID); | |
1650 | ||
3faf24ea SD |
1651 | val |= indirect ? GITS_BASER_INDIRECT : 0x0; |
1652 | ||
9347359a SD |
1653 | switch (psz) { |
1654 | case SZ_4K: | |
1655 | val |= GITS_BASER_PAGE_SIZE_4K; | |
1656 | break; | |
1657 | case SZ_16K: | |
1658 | val |= GITS_BASER_PAGE_SIZE_16K; | |
1659 | break; | |
1660 | case SZ_64K: | |
1661 | val |= GITS_BASER_PAGE_SIZE_64K; | |
1662 | break; | |
1663 | } | |
1664 | ||
1665 | its_write_baser(its, baser, val); | |
1666 | tmp = baser->val; | |
1667 | ||
1668 | if ((val ^ tmp) & GITS_BASER_SHAREABILITY_MASK) { | |
1669 | /* | |
1670 | * Shareability didn't stick. Just use | |
1671 | * whatever the read reported, which is likely | |
1672 | * to be the only thing this redistributor | |
1673 | * supports. If that's zero, make it | |
1674 | * non-cacheable as well. | |
1675 | */ | |
1676 | shr = tmp & GITS_BASER_SHAREABILITY_MASK; | |
1677 | if (!shr) { | |
1678 | cache = GITS_BASER_nC; | |
328191c0 | 1679 | gic_flush_dcache_to_poc(base, PAGE_ORDER_TO_SIZE(order)); |
9347359a SD |
1680 | } |
1681 | goto retry_baser; | |
1682 | } | |
1683 | ||
1684 | if ((val ^ tmp) & GITS_BASER_PAGE_SIZE_MASK) { | |
1685 | /* | |
1686 | * Page size didn't stick. Let's try a smaller | |
1687 | * size and retry. If we reach 4K, then | |
1688 | * something is horribly wrong... | |
1689 | */ | |
1690 | free_pages((unsigned long)base, order); | |
1691 | baser->base = NULL; | |
1692 | ||
1693 | switch (psz) { | |
1694 | case SZ_16K: | |
1695 | psz = SZ_4K; | |
1696 | goto retry_alloc_baser; | |
1697 | case SZ_64K: | |
1698 | psz = SZ_16K; | |
1699 | goto retry_alloc_baser; | |
1700 | } | |
1701 | } | |
1702 | ||
1703 | if (val != tmp) { | |
b11283eb | 1704 | pr_err("ITS@%pa: %s doesn't stick: %llx %llx\n", |
9347359a | 1705 | &its->phys_base, its_base_type_string[type], |
b11283eb | 1706 | val, tmp); |
9347359a SD |
1707 | free_pages((unsigned long)base, order); |
1708 | return -ENXIO; | |
1709 | } | |
1710 | ||
1711 | baser->order = order; | |
1712 | baser->base = base; | |
1713 | baser->psz = psz; | |
3faf24ea | 1714 | tmp = indirect ? GITS_LVL1_ENTRY_SIZE : esz; |
9347359a | 1715 | |
3faf24ea | 1716 | pr_info("ITS@%pa: allocated %d %s @%lx (%s, esz %d, psz %dK, shr %d)\n", |
d524eaa2 | 1717 | &its->phys_base, (int)(PAGE_ORDER_TO_SIZE(order) / (int)tmp), |
9347359a SD |
1718 | its_base_type_string[type], |
1719 | (unsigned long)virt_to_phys(base), | |
3faf24ea | 1720 | indirect ? "indirect" : "flat", (int)esz, |
9347359a SD |
1721 | psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT); |
1722 | ||
1723 | return 0; | |
1724 | } | |
1725 | ||
4cacac57 MZ |
1726 | static bool its_parse_indirect_baser(struct its_node *its, |
1727 | struct its_baser *baser, | |
32bd44dc | 1728 | u32 psz, u32 *order, u32 ids) |
4b75c459 | 1729 | { |
4cacac57 MZ |
1730 | u64 tmp = its_read_baser(its, baser); |
1731 | u64 type = GITS_BASER_TYPE(tmp); | |
1732 | u64 esz = GITS_BASER_ENTRY_SIZE(tmp); | |
2fd632a0 | 1733 | u64 val = GITS_BASER_InnerShareable | GITS_BASER_RaWaWb; |
4b75c459 | 1734 | u32 new_order = *order; |
3faf24ea SD |
1735 | bool indirect = false; |
1736 | ||
1737 | /* No need to enable Indirection if memory requirement < (psz*2)bytes */ | |
1738 | if ((esz << ids) > (psz * 2)) { | |
1739 | /* | |
1740 | * Find out whether hw supports a single or two-level table by | |
1741 | * table by reading bit at offset '62' after writing '1' to it. | |
1742 | */ | |
1743 | its_write_baser(its, baser, val | GITS_BASER_INDIRECT); | |
1744 | indirect = !!(baser->val & GITS_BASER_INDIRECT); | |
1745 | ||
1746 | if (indirect) { | |
1747 | /* | |
1748 | * The size of the lvl2 table is equal to ITS page size | |
1749 | * which is 'psz'. For computing lvl1 table size, | |
1750 | * subtract ID bits that sparse lvl2 table from 'ids' | |
1751 | * which is reported by ITS hardware times lvl1 table | |
1752 | * entry size. | |
1753 | */ | |
d524eaa2 | 1754 | ids -= ilog2(psz / (int)esz); |
3faf24ea SD |
1755 | esz = GITS_LVL1_ENTRY_SIZE; |
1756 | } | |
1757 | } | |
4b75c459 SD |
1758 | |
1759 | /* | |
1760 | * Allocate as many entries as required to fit the | |
1761 | * range of device IDs that the ITS can grok... The ID | |
1762 | * space being incredibly sparse, this results in a | |
3faf24ea SD |
1763 | * massive waste of memory if two-level device table |
1764 | * feature is not supported by hardware. | |
4b75c459 SD |
1765 | */ |
1766 | new_order = max_t(u32, get_order(esz << ids), new_order); | |
1767 | if (new_order >= MAX_ORDER) { | |
1768 | new_order = MAX_ORDER - 1; | |
d524eaa2 | 1769 | ids = ilog2(PAGE_ORDER_TO_SIZE(new_order) / (int)esz); |
4cacac57 MZ |
1770 | pr_warn("ITS@%pa: %s Table too large, reduce ids %u->%u\n", |
1771 | &its->phys_base, its_base_type_string[type], | |
1772 | its->device_ids, ids); | |
4b75c459 SD |
1773 | } |
1774 | ||
1775 | *order = new_order; | |
3faf24ea SD |
1776 | |
1777 | return indirect; | |
4b75c459 SD |
1778 | } |
1779 | ||
1ac19ca6 MZ |
1780 | static void its_free_tables(struct its_node *its) |
1781 | { | |
1782 | int i; | |
1783 | ||
1784 | for (i = 0; i < GITS_BASER_NR_REGS; i++) { | |
1a485f4d SD |
1785 | if (its->tables[i].base) { |
1786 | free_pages((unsigned long)its->tables[i].base, | |
1787 | its->tables[i].order); | |
1788 | its->tables[i].base = NULL; | |
1ac19ca6 MZ |
1789 | } |
1790 | } | |
1791 | } | |
1792 | ||
0e0b0f69 | 1793 | static int its_alloc_tables(struct its_node *its) |
1ac19ca6 | 1794 | { |
1ac19ca6 | 1795 | u64 shr = GITS_BASER_InnerShareable; |
2fd632a0 | 1796 | u64 cache = GITS_BASER_RaWaWb; |
9347359a SD |
1797 | u32 psz = SZ_64K; |
1798 | int err, i; | |
94100970 | 1799 | |
fa150019 AB |
1800 | if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_22375) |
1801 | /* erratum 24313: ignore memory access type */ | |
1802 | cache = GITS_BASER_nCnB; | |
466b7d16 | 1803 | |
1ac19ca6 | 1804 | for (i = 0; i < GITS_BASER_NR_REGS; i++) { |
2d81d425 SD |
1805 | struct its_baser *baser = its->tables + i; |
1806 | u64 val = its_read_baser(its, baser); | |
1ac19ca6 | 1807 | u64 type = GITS_BASER_TYPE(val); |
9347359a | 1808 | u32 order = get_order(psz); |
3faf24ea | 1809 | bool indirect = false; |
1ac19ca6 | 1810 | |
4cacac57 MZ |
1811 | switch (type) { |
1812 | case GITS_BASER_TYPE_NONE: | |
1ac19ca6 MZ |
1813 | continue; |
1814 | ||
4cacac57 | 1815 | case GITS_BASER_TYPE_DEVICE: |
32bd44dc SD |
1816 | indirect = its_parse_indirect_baser(its, baser, |
1817 | psz, &order, | |
1818 | its->device_ids); | |
4cacac57 MZ |
1819 | case GITS_BASER_TYPE_VCPU: |
1820 | indirect = its_parse_indirect_baser(its, baser, | |
32bd44dc SD |
1821 | psz, &order, |
1822 | ITS_MAX_VPEID_BITS); | |
4cacac57 MZ |
1823 | break; |
1824 | } | |
f54b97ed | 1825 | |
3faf24ea | 1826 | err = its_setup_baser(its, baser, cache, shr, psz, order, indirect); |
9347359a SD |
1827 | if (err < 0) { |
1828 | its_free_tables(its); | |
1829 | return err; | |
1ac19ca6 MZ |
1830 | } |
1831 | ||
9347359a SD |
1832 | /* Update settings which will be used for next BASERn */ |
1833 | psz = baser->psz; | |
1834 | cache = baser->val & GITS_BASER_CACHEABILITY_MASK; | |
1835 | shr = baser->val & GITS_BASER_SHAREABILITY_MASK; | |
1ac19ca6 MZ |
1836 | } |
1837 | ||
1838 | return 0; | |
1ac19ca6 MZ |
1839 | } |
1840 | ||
1841 | static int its_alloc_collections(struct its_node *its) | |
1842 | { | |
83559b47 MZ |
1843 | int i; |
1844 | ||
6396bb22 | 1845 | its->collections = kcalloc(nr_cpu_ids, sizeof(*its->collections), |
1ac19ca6 MZ |
1846 | GFP_KERNEL); |
1847 | if (!its->collections) | |
1848 | return -ENOMEM; | |
1849 | ||
83559b47 MZ |
1850 | for (i = 0; i < nr_cpu_ids; i++) |
1851 | its->collections[i].target_address = ~0ULL; | |
1852 | ||
1ac19ca6 MZ |
1853 | return 0; |
1854 | } | |
1855 | ||
7c297a2d MZ |
1856 | static struct page *its_allocate_pending_table(gfp_t gfp_flags) |
1857 | { | |
1858 | struct page *pend_page; | |
1859 | /* | |
1860 | * The pending pages have to be at least 64kB aligned, | |
1861 | * hence the 'max(LPI_PENDBASE_SZ, SZ_64K)' below. | |
1862 | */ | |
1863 | pend_page = alloc_pages(gfp_flags | __GFP_ZERO, | |
1864 | get_order(max_t(u32, LPI_PENDBASE_SZ, SZ_64K))); | |
1865 | if (!pend_page) | |
1866 | return NULL; | |
1867 | ||
1868 | /* Make sure the GIC will observe the zero-ed page */ | |
1869 | gic_flush_dcache_to_poc(page_address(pend_page), LPI_PENDBASE_SZ); | |
1870 | ||
1871 | return pend_page; | |
1872 | } | |
1873 | ||
7d75bbb4 MZ |
1874 | static void its_free_pending_table(struct page *pt) |
1875 | { | |
1876 | free_pages((unsigned long)page_address(pt), | |
1877 | get_order(max_t(u32, LPI_PENDBASE_SZ, SZ_64K))); | |
1878 | } | |
1879 | ||
1ac19ca6 MZ |
1880 | static void its_cpu_init_lpis(void) |
1881 | { | |
1882 | void __iomem *rbase = gic_data_rdist_rd_base(); | |
1883 | struct page *pend_page; | |
1884 | u64 val, tmp; | |
1885 | ||
1886 | /* If we didn't allocate the pending table yet, do it now */ | |
1887 | pend_page = gic_data_rdist()->pend_page; | |
1888 | if (!pend_page) { | |
1889 | phys_addr_t paddr; | |
7c297a2d MZ |
1890 | |
1891 | pend_page = its_allocate_pending_table(GFP_NOWAIT); | |
1ac19ca6 MZ |
1892 | if (!pend_page) { |
1893 | pr_err("Failed to allocate PENDBASE for CPU%d\n", | |
1894 | smp_processor_id()); | |
1895 | return; | |
1896 | } | |
1897 | ||
1ac19ca6 MZ |
1898 | paddr = page_to_phys(pend_page); |
1899 | pr_info("CPU%d: using LPI pending table @%pa\n", | |
1900 | smp_processor_id(), &paddr); | |
1901 | gic_data_rdist()->pend_page = pend_page; | |
1902 | } | |
1903 | ||
1ac19ca6 MZ |
1904 | /* set PROPBASE */ |
1905 | val = (page_to_phys(gic_rdists->prop_page) | | |
1906 | GICR_PROPBASER_InnerShareable | | |
2fd632a0 | 1907 | GICR_PROPBASER_RaWaWb | |
1ac19ca6 MZ |
1908 | ((LPI_NRBITS - 1) & GICR_PROPBASER_IDBITS_MASK)); |
1909 | ||
0968a619 VM |
1910 | gicr_write_propbaser(val, rbase + GICR_PROPBASER); |
1911 | tmp = gicr_read_propbaser(rbase + GICR_PROPBASER); | |
1ac19ca6 MZ |
1912 | |
1913 | if ((tmp ^ val) & GICR_PROPBASER_SHAREABILITY_MASK) { | |
241a386c MZ |
1914 | if (!(tmp & GICR_PROPBASER_SHAREABILITY_MASK)) { |
1915 | /* | |
1916 | * The HW reports non-shareable, we must | |
1917 | * remove the cacheability attributes as | |
1918 | * well. | |
1919 | */ | |
1920 | val &= ~(GICR_PROPBASER_SHAREABILITY_MASK | | |
1921 | GICR_PROPBASER_CACHEABILITY_MASK); | |
1922 | val |= GICR_PROPBASER_nC; | |
0968a619 | 1923 | gicr_write_propbaser(val, rbase + GICR_PROPBASER); |
241a386c | 1924 | } |
1ac19ca6 MZ |
1925 | pr_info_once("GIC: using cache flushing for LPI property table\n"); |
1926 | gic_rdists->flags |= RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING; | |
1927 | } | |
1928 | ||
1929 | /* set PENDBASE */ | |
1930 | val = (page_to_phys(pend_page) | | |
4ad3e363 | 1931 | GICR_PENDBASER_InnerShareable | |
2fd632a0 | 1932 | GICR_PENDBASER_RaWaWb); |
1ac19ca6 | 1933 | |
0968a619 VM |
1934 | gicr_write_pendbaser(val, rbase + GICR_PENDBASER); |
1935 | tmp = gicr_read_pendbaser(rbase + GICR_PENDBASER); | |
241a386c MZ |
1936 | |
1937 | if (!(tmp & GICR_PENDBASER_SHAREABILITY_MASK)) { | |
1938 | /* | |
1939 | * The HW reports non-shareable, we must remove the | |
1940 | * cacheability attributes as well. | |
1941 | */ | |
1942 | val &= ~(GICR_PENDBASER_SHAREABILITY_MASK | | |
1943 | GICR_PENDBASER_CACHEABILITY_MASK); | |
1944 | val |= GICR_PENDBASER_nC; | |
0968a619 | 1945 | gicr_write_pendbaser(val, rbase + GICR_PENDBASER); |
241a386c | 1946 | } |
1ac19ca6 MZ |
1947 | |
1948 | /* Enable LPIs */ | |
1949 | val = readl_relaxed(rbase + GICR_CTLR); | |
1950 | val |= GICR_CTLR_ENABLE_LPIS; | |
1951 | writel_relaxed(val, rbase + GICR_CTLR); | |
1952 | ||
1953 | /* Make sure the GIC has seen the above */ | |
1954 | dsb(sy); | |
1955 | } | |
1956 | ||
920181ce | 1957 | static void its_cpu_init_collection(struct its_node *its) |
1ac19ca6 | 1958 | { |
920181ce DB |
1959 | int cpu = smp_processor_id(); |
1960 | u64 target; | |
1ac19ca6 | 1961 | |
920181ce DB |
1962 | /* avoid cross node collections and its mapping */ |
1963 | if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) { | |
1964 | struct device_node *cpu_node; | |
fbf8f40e | 1965 | |
920181ce DB |
1966 | cpu_node = of_get_cpu_node(cpu, NULL); |
1967 | if (its->numa_node != NUMA_NO_NODE && | |
1968 | its->numa_node != of_node_to_nid(cpu_node)) | |
1969 | return; | |
1970 | } | |
fbf8f40e | 1971 | |
920181ce DB |
1972 | /* |
1973 | * We now have to bind each collection to its target | |
1974 | * redistributor. | |
1975 | */ | |
1976 | if (gic_read_typer(its->base + GITS_TYPER) & GITS_TYPER_PTA) { | |
1ac19ca6 | 1977 | /* |
920181ce | 1978 | * This ITS wants the physical address of the |
1ac19ca6 MZ |
1979 | * redistributor. |
1980 | */ | |
920181ce DB |
1981 | target = gic_data_rdist()->phys_base; |
1982 | } else { | |
1983 | /* This ITS wants a linear CPU number. */ | |
1984 | target = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER); | |
1985 | target = GICR_TYPER_CPU_NUMBER(target) << 16; | |
1986 | } | |
1ac19ca6 | 1987 | |
920181ce DB |
1988 | /* Perform collection mapping */ |
1989 | its->collections[cpu].target_address = target; | |
1990 | its->collections[cpu].col_id = cpu; | |
1ac19ca6 | 1991 | |
920181ce DB |
1992 | its_send_mapc(its, &its->collections[cpu], 1); |
1993 | its_send_invall(its, &its->collections[cpu]); | |
1994 | } | |
1995 | ||
1996 | static void its_cpu_init_collections(void) | |
1997 | { | |
1998 | struct its_node *its; | |
1999 | ||
2000 | spin_lock(&its_lock); | |
2001 | ||
2002 | list_for_each_entry(its, &its_nodes, entry) | |
2003 | its_cpu_init_collection(its); | |
1ac19ca6 MZ |
2004 | |
2005 | spin_unlock(&its_lock); | |
2006 | } | |
84a6a2e7 MZ |
2007 | |
2008 | static struct its_device *its_find_device(struct its_node *its, u32 dev_id) | |
2009 | { | |
2010 | struct its_device *its_dev = NULL, *tmp; | |
3e39e8f5 | 2011 | unsigned long flags; |
84a6a2e7 | 2012 | |
3e39e8f5 | 2013 | raw_spin_lock_irqsave(&its->lock, flags); |
84a6a2e7 MZ |
2014 | |
2015 | list_for_each_entry(tmp, &its->its_device_list, entry) { | |
2016 | if (tmp->device_id == dev_id) { | |
2017 | its_dev = tmp; | |
2018 | break; | |
2019 | } | |
2020 | } | |
2021 | ||
3e39e8f5 | 2022 | raw_spin_unlock_irqrestore(&its->lock, flags); |
84a6a2e7 MZ |
2023 | |
2024 | return its_dev; | |
2025 | } | |
2026 | ||
466b7d16 SD |
2027 | static struct its_baser *its_get_baser(struct its_node *its, u32 type) |
2028 | { | |
2029 | int i; | |
2030 | ||
2031 | for (i = 0; i < GITS_BASER_NR_REGS; i++) { | |
2032 | if (GITS_BASER_TYPE(its->tables[i].val) == type) | |
2033 | return &its->tables[i]; | |
2034 | } | |
2035 | ||
2036 | return NULL; | |
2037 | } | |
2038 | ||
70cc81ed | 2039 | static bool its_alloc_table_entry(struct its_baser *baser, u32 id) |
3faf24ea | 2040 | { |
3faf24ea SD |
2041 | struct page *page; |
2042 | u32 esz, idx; | |
2043 | __le64 *table; | |
2044 | ||
3faf24ea SD |
2045 | /* Don't allow device id that exceeds single, flat table limit */ |
2046 | esz = GITS_BASER_ENTRY_SIZE(baser->val); | |
2047 | if (!(baser->val & GITS_BASER_INDIRECT)) | |
70cc81ed | 2048 | return (id < (PAGE_ORDER_TO_SIZE(baser->order) / esz)); |
3faf24ea SD |
2049 | |
2050 | /* Compute 1st level table index & check if that exceeds table limit */ | |
70cc81ed | 2051 | idx = id >> ilog2(baser->psz / esz); |
3faf24ea SD |
2052 | if (idx >= (PAGE_ORDER_TO_SIZE(baser->order) / GITS_LVL1_ENTRY_SIZE)) |
2053 | return false; | |
2054 | ||
2055 | table = baser->base; | |
2056 | ||
2057 | /* Allocate memory for 2nd level table */ | |
2058 | if (!table[idx]) { | |
2059 | page = alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(baser->psz)); | |
2060 | if (!page) | |
2061 | return false; | |
2062 | ||
2063 | /* Flush Lvl2 table to PoC if hw doesn't support coherency */ | |
2064 | if (!(baser->val & GITS_BASER_SHAREABILITY_MASK)) | |
328191c0 | 2065 | gic_flush_dcache_to_poc(page_address(page), baser->psz); |
3faf24ea SD |
2066 | |
2067 | table[idx] = cpu_to_le64(page_to_phys(page) | GITS_BASER_VALID); | |
2068 | ||
2069 | /* Flush Lvl1 entry to PoC if hw doesn't support coherency */ | |
2070 | if (!(baser->val & GITS_BASER_SHAREABILITY_MASK)) | |
328191c0 | 2071 | gic_flush_dcache_to_poc(table + idx, GITS_LVL1_ENTRY_SIZE); |
3faf24ea SD |
2072 | |
2073 | /* Ensure updated table contents are visible to ITS hardware */ | |
2074 | dsb(sy); | |
2075 | } | |
2076 | ||
2077 | return true; | |
2078 | } | |
2079 | ||
70cc81ed MZ |
2080 | static bool its_alloc_device_table(struct its_node *its, u32 dev_id) |
2081 | { | |
2082 | struct its_baser *baser; | |
2083 | ||
2084 | baser = its_get_baser(its, GITS_BASER_TYPE_DEVICE); | |
2085 | ||
2086 | /* Don't allow device id that exceeds ITS hardware limit */ | |
2087 | if (!baser) | |
2088 | return (ilog2(dev_id) < its->device_ids); | |
2089 | ||
2090 | return its_alloc_table_entry(baser, dev_id); | |
2091 | } | |
2092 | ||
7d75bbb4 MZ |
2093 | static bool its_alloc_vpe_table(u32 vpe_id) |
2094 | { | |
2095 | struct its_node *its; | |
2096 | ||
2097 | /* | |
2098 | * Make sure the L2 tables are allocated on *all* v4 ITSs. We | |
2099 | * could try and only do it on ITSs corresponding to devices | |
2100 | * that have interrupts targeted at this VPE, but the | |
2101 | * complexity becomes crazy (and you have tons of memory | |
2102 | * anyway, right?). | |
2103 | */ | |
2104 | list_for_each_entry(its, &its_nodes, entry) { | |
2105 | struct its_baser *baser; | |
2106 | ||
2107 | if (!its->is_v4) | |
2108 | continue; | |
3faf24ea | 2109 | |
7d75bbb4 MZ |
2110 | baser = its_get_baser(its, GITS_BASER_TYPE_VCPU); |
2111 | if (!baser) | |
2112 | return false; | |
3faf24ea | 2113 | |
7d75bbb4 MZ |
2114 | if (!its_alloc_table_entry(baser, vpe_id)) |
2115 | return false; | |
3faf24ea SD |
2116 | } |
2117 | ||
2118 | return true; | |
2119 | } | |
2120 | ||
84a6a2e7 | 2121 | static struct its_device *its_create_device(struct its_node *its, u32 dev_id, |
93f94ea0 | 2122 | int nvecs, bool alloc_lpis) |
84a6a2e7 MZ |
2123 | { |
2124 | struct its_device *dev; | |
93f94ea0 | 2125 | unsigned long *lpi_map = NULL; |
3e39e8f5 | 2126 | unsigned long flags; |
591e5bec | 2127 | u16 *col_map = NULL; |
84a6a2e7 MZ |
2128 | void *itt; |
2129 | int lpi_base; | |
2130 | int nr_lpis; | |
c8481267 | 2131 | int nr_ites; |
84a6a2e7 MZ |
2132 | int sz; |
2133 | ||
3faf24ea | 2134 | if (!its_alloc_device_table(its, dev_id)) |
466b7d16 SD |
2135 | return NULL; |
2136 | ||
84a6a2e7 | 2137 | dev = kzalloc(sizeof(*dev), GFP_KERNEL); |
c8481267 | 2138 | /* |
4f2c7583 AB |
2139 | * We allocate at least one chunk worth of LPIs bet device, |
2140 | * and thus that many ITEs. The device may require less though. | |
c8481267 | 2141 | */ |
4f2c7583 | 2142 | nr_ites = max(IRQS_PER_CHUNK, roundup_pow_of_two(nvecs)); |
c8481267 | 2143 | sz = nr_ites * its->ite_size; |
84a6a2e7 | 2144 | sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1; |
6c834125 | 2145 | itt = kzalloc(sz, GFP_KERNEL); |
93f94ea0 MZ |
2146 | if (alloc_lpis) { |
2147 | lpi_map = its_lpi_alloc_chunks(nvecs, &lpi_base, &nr_lpis); | |
2148 | if (lpi_map) | |
6396bb22 | 2149 | col_map = kcalloc(nr_lpis, sizeof(*col_map), |
93f94ea0 MZ |
2150 | GFP_KERNEL); |
2151 | } else { | |
6396bb22 | 2152 | col_map = kcalloc(nr_ites, sizeof(*col_map), GFP_KERNEL); |
93f94ea0 MZ |
2153 | nr_lpis = 0; |
2154 | lpi_base = 0; | |
2155 | } | |
84a6a2e7 | 2156 | |
93f94ea0 | 2157 | if (!dev || !itt || !col_map || (!lpi_map && alloc_lpis)) { |
84a6a2e7 MZ |
2158 | kfree(dev); |
2159 | kfree(itt); | |
2160 | kfree(lpi_map); | |
591e5bec | 2161 | kfree(col_map); |
84a6a2e7 MZ |
2162 | return NULL; |
2163 | } | |
2164 | ||
328191c0 | 2165 | gic_flush_dcache_to_poc(itt, sz); |
5a9a8915 | 2166 | |
84a6a2e7 MZ |
2167 | dev->its = its; |
2168 | dev->itt = itt; | |
c8481267 | 2169 | dev->nr_ites = nr_ites; |
591e5bec MZ |
2170 | dev->event_map.lpi_map = lpi_map; |
2171 | dev->event_map.col_map = col_map; | |
2172 | dev->event_map.lpi_base = lpi_base; | |
2173 | dev->event_map.nr_lpis = nr_lpis; | |
d011e4e6 | 2174 | mutex_init(&dev->event_map.vlpi_lock); |
84a6a2e7 MZ |
2175 | dev->device_id = dev_id; |
2176 | INIT_LIST_HEAD(&dev->entry); | |
2177 | ||
3e39e8f5 | 2178 | raw_spin_lock_irqsave(&its->lock, flags); |
84a6a2e7 | 2179 | list_add(&dev->entry, &its->its_device_list); |
3e39e8f5 | 2180 | raw_spin_unlock_irqrestore(&its->lock, flags); |
84a6a2e7 | 2181 | |
84a6a2e7 MZ |
2182 | /* Map device to its ITT */ |
2183 | its_send_mapd(dev, 1); | |
2184 | ||
2185 | return dev; | |
2186 | } | |
2187 | ||
2188 | static void its_free_device(struct its_device *its_dev) | |
2189 | { | |
3e39e8f5 MZ |
2190 | unsigned long flags; |
2191 | ||
2192 | raw_spin_lock_irqsave(&its_dev->its->lock, flags); | |
84a6a2e7 | 2193 | list_del(&its_dev->entry); |
3e39e8f5 | 2194 | raw_spin_unlock_irqrestore(&its_dev->its->lock, flags); |
84a6a2e7 MZ |
2195 | kfree(its_dev->itt); |
2196 | kfree(its_dev); | |
2197 | } | |
b48ac83d MZ |
2198 | |
2199 | static int its_alloc_device_irq(struct its_device *dev, irq_hw_number_t *hwirq) | |
2200 | { | |
2201 | int idx; | |
2202 | ||
591e5bec MZ |
2203 | idx = find_first_zero_bit(dev->event_map.lpi_map, |
2204 | dev->event_map.nr_lpis); | |
2205 | if (idx == dev->event_map.nr_lpis) | |
b48ac83d MZ |
2206 | return -ENOSPC; |
2207 | ||
591e5bec MZ |
2208 | *hwirq = dev->event_map.lpi_base + idx; |
2209 | set_bit(idx, dev->event_map.lpi_map); | |
b48ac83d | 2210 | |
b48ac83d MZ |
2211 | return 0; |
2212 | } | |
2213 | ||
54456db9 MZ |
2214 | static int its_msi_prepare(struct irq_domain *domain, struct device *dev, |
2215 | int nvec, msi_alloc_info_t *info) | |
e8137f4f | 2216 | { |
b48ac83d | 2217 | struct its_node *its; |
b48ac83d | 2218 | struct its_device *its_dev; |
54456db9 MZ |
2219 | struct msi_domain_info *msi_info; |
2220 | u32 dev_id; | |
2221 | ||
2222 | /* | |
2223 | * We ignore "dev" entierely, and rely on the dev_id that has | |
2224 | * been passed via the scratchpad. This limits this domain's | |
2225 | * usefulness to upper layers that definitely know that they | |
2226 | * are built on top of the ITS. | |
2227 | */ | |
2228 | dev_id = info->scratchpad[0].ul; | |
2229 | ||
2230 | msi_info = msi_get_domain_info(domain); | |
2231 | its = msi_info->data; | |
e8137f4f | 2232 | |
20b3d54e MZ |
2233 | if (!gic_rdists->has_direct_lpi && |
2234 | vpe_proxy.dev && | |
2235 | vpe_proxy.dev->its == its && | |
2236 | dev_id == vpe_proxy.dev->device_id) { | |
2237 | /* Bad luck. Get yourself a better implementation */ | |
2238 | WARN_ONCE(1, "DevId %x clashes with GICv4 VPE proxy device\n", | |
2239 | dev_id); | |
2240 | return -EINVAL; | |
2241 | } | |
2242 | ||
f130420e | 2243 | its_dev = its_find_device(its, dev_id); |
e8137f4f MZ |
2244 | if (its_dev) { |
2245 | /* | |
2246 | * We already have seen this ID, probably through | |
2247 | * another alias (PCI bridge of some sort). No need to | |
2248 | * create the device. | |
2249 | */ | |
f130420e | 2250 | pr_debug("Reusing ITT for devID %x\n", dev_id); |
e8137f4f MZ |
2251 | goto out; |
2252 | } | |
b48ac83d | 2253 | |
93f94ea0 | 2254 | its_dev = its_create_device(its, dev_id, nvec, true); |
b48ac83d MZ |
2255 | if (!its_dev) |
2256 | return -ENOMEM; | |
2257 | ||
f130420e | 2258 | pr_debug("ITT %d entries, %d bits\n", nvec, ilog2(nvec)); |
e8137f4f | 2259 | out: |
b48ac83d | 2260 | info->scratchpad[0].ptr = its_dev; |
b48ac83d MZ |
2261 | return 0; |
2262 | } | |
2263 | ||
54456db9 MZ |
2264 | static struct msi_domain_ops its_msi_domain_ops = { |
2265 | .msi_prepare = its_msi_prepare, | |
2266 | }; | |
2267 | ||
b48ac83d MZ |
2268 | static int its_irq_gic_domain_alloc(struct irq_domain *domain, |
2269 | unsigned int virq, | |
2270 | irq_hw_number_t hwirq) | |
2271 | { | |
f833f57f MZ |
2272 | struct irq_fwspec fwspec; |
2273 | ||
2274 | if (irq_domain_get_of_node(domain->parent)) { | |
2275 | fwspec.fwnode = domain->parent->fwnode; | |
2276 | fwspec.param_count = 3; | |
2277 | fwspec.param[0] = GIC_IRQ_TYPE_LPI; | |
2278 | fwspec.param[1] = hwirq; | |
2279 | fwspec.param[2] = IRQ_TYPE_EDGE_RISING; | |
3f010cf1 TN |
2280 | } else if (is_fwnode_irqchip(domain->parent->fwnode)) { |
2281 | fwspec.fwnode = domain->parent->fwnode; | |
2282 | fwspec.param_count = 2; | |
2283 | fwspec.param[0] = hwirq; | |
2284 | fwspec.param[1] = IRQ_TYPE_EDGE_RISING; | |
f833f57f MZ |
2285 | } else { |
2286 | return -EINVAL; | |
2287 | } | |
b48ac83d | 2288 | |
f833f57f | 2289 | return irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec); |
b48ac83d MZ |
2290 | } |
2291 | ||
2292 | static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, | |
2293 | unsigned int nr_irqs, void *args) | |
2294 | { | |
2295 | msi_alloc_info_t *info = args; | |
2296 | struct its_device *its_dev = info->scratchpad[0].ptr; | |
2297 | irq_hw_number_t hwirq; | |
2298 | int err; | |
2299 | int i; | |
2300 | ||
2301 | for (i = 0; i < nr_irqs; i++) { | |
2302 | err = its_alloc_device_irq(its_dev, &hwirq); | |
2303 | if (err) | |
2304 | return err; | |
2305 | ||
2306 | err = its_irq_gic_domain_alloc(domain, virq + i, hwirq); | |
2307 | if (err) | |
2308 | return err; | |
2309 | ||
2310 | irq_domain_set_hwirq_and_chip(domain, virq + i, | |
2311 | hwirq, &its_irq_chip, its_dev); | |
0d224d35 | 2312 | irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq + i))); |
f130420e MZ |
2313 | pr_debug("ID:%d pID:%d vID:%d\n", |
2314 | (int)(hwirq - its_dev->event_map.lpi_base), | |
2315 | (int) hwirq, virq + i); | |
b48ac83d MZ |
2316 | } |
2317 | ||
2318 | return 0; | |
2319 | } | |
2320 | ||
72491643 | 2321 | static int its_irq_domain_activate(struct irq_domain *domain, |
702cb0a0 | 2322 | struct irq_data *d, bool reserve) |
aca268df MZ |
2323 | { |
2324 | struct its_device *its_dev = irq_data_get_irq_chip_data(d); | |
2325 | u32 event = its_get_event_id(d); | |
fbf8f40e | 2326 | const struct cpumask *cpu_mask = cpu_online_mask; |
0d224d35 | 2327 | int cpu; |
fbf8f40e GK |
2328 | |
2329 | /* get the cpu_mask of local node */ | |
2330 | if (its_dev->its->numa_node >= 0) | |
2331 | cpu_mask = cpumask_of_node(its_dev->its->numa_node); | |
aca268df | 2332 | |
591e5bec | 2333 | /* Bind the LPI to the first possible CPU */ |
c1797b11 YY |
2334 | cpu = cpumask_first_and(cpu_mask, cpu_online_mask); |
2335 | if (cpu >= nr_cpu_ids) { | |
2336 | if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) | |
2337 | return -EINVAL; | |
2338 | ||
2339 | cpu = cpumask_first(cpu_online_mask); | |
2340 | } | |
2341 | ||
0d224d35 MZ |
2342 | its_dev->event_map.col_map[event] = cpu; |
2343 | irq_data_update_effective_affinity(d, cpumask_of(cpu)); | |
591e5bec | 2344 | |
aca268df | 2345 | /* Map the GIC IRQ and event to the device */ |
6a25ad3a | 2346 | its_send_mapti(its_dev, d->hwirq, event); |
72491643 | 2347 | return 0; |
aca268df MZ |
2348 | } |
2349 | ||
2350 | static void its_irq_domain_deactivate(struct irq_domain *domain, | |
2351 | struct irq_data *d) | |
2352 | { | |
2353 | struct its_device *its_dev = irq_data_get_irq_chip_data(d); | |
2354 | u32 event = its_get_event_id(d); | |
2355 | ||
2356 | /* Stop the delivery of interrupts */ | |
2357 | its_send_discard(its_dev, event); | |
2358 | } | |
2359 | ||
b48ac83d MZ |
2360 | static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq, |
2361 | unsigned int nr_irqs) | |
2362 | { | |
2363 | struct irq_data *d = irq_domain_get_irq_data(domain, virq); | |
2364 | struct its_device *its_dev = irq_data_get_irq_chip_data(d); | |
2365 | int i; | |
2366 | ||
2367 | for (i = 0; i < nr_irqs; i++) { | |
2368 | struct irq_data *data = irq_domain_get_irq_data(domain, | |
2369 | virq + i); | |
aca268df | 2370 | u32 event = its_get_event_id(data); |
b48ac83d MZ |
2371 | |
2372 | /* Mark interrupt index as unused */ | |
591e5bec | 2373 | clear_bit(event, its_dev->event_map.lpi_map); |
b48ac83d MZ |
2374 | |
2375 | /* Nuke the entry in the domain */ | |
2da39949 | 2376 | irq_domain_reset_irq_data(data); |
b48ac83d MZ |
2377 | } |
2378 | ||
2379 | /* If all interrupts have been freed, start mopping the floor */ | |
591e5bec MZ |
2380 | if (bitmap_empty(its_dev->event_map.lpi_map, |
2381 | its_dev->event_map.nr_lpis)) { | |
cf2be8ba MZ |
2382 | its_lpi_free_chunks(its_dev->event_map.lpi_map, |
2383 | its_dev->event_map.lpi_base, | |
2384 | its_dev->event_map.nr_lpis); | |
2385 | kfree(its_dev->event_map.col_map); | |
b48ac83d MZ |
2386 | |
2387 | /* Unmap device/itt */ | |
2388 | its_send_mapd(its_dev, 0); | |
2389 | its_free_device(its_dev); | |
2390 | } | |
2391 | ||
2392 | irq_domain_free_irqs_parent(domain, virq, nr_irqs); | |
2393 | } | |
2394 | ||
2395 | static const struct irq_domain_ops its_domain_ops = { | |
2396 | .alloc = its_irq_domain_alloc, | |
2397 | .free = its_irq_domain_free, | |
aca268df MZ |
2398 | .activate = its_irq_domain_activate, |
2399 | .deactivate = its_irq_domain_deactivate, | |
b48ac83d | 2400 | }; |
4c21f3c2 | 2401 | |
20b3d54e MZ |
2402 | /* |
2403 | * This is insane. | |
2404 | * | |
2405 | * If a GICv4 doesn't implement Direct LPIs (which is extremely | |
2406 | * likely), the only way to perform an invalidate is to use a fake | |
2407 | * device to issue an INV command, implying that the LPI has first | |
2408 | * been mapped to some event on that device. Since this is not exactly | |
2409 | * cheap, we try to keep that mapping around as long as possible, and | |
2410 | * only issue an UNMAP if we're short on available slots. | |
2411 | * | |
2412 | * Broken by design(tm). | |
2413 | */ | |
2414 | static void its_vpe_db_proxy_unmap_locked(struct its_vpe *vpe) | |
2415 | { | |
2416 | /* Already unmapped? */ | |
2417 | if (vpe->vpe_proxy_event == -1) | |
2418 | return; | |
2419 | ||
2420 | its_send_discard(vpe_proxy.dev, vpe->vpe_proxy_event); | |
2421 | vpe_proxy.vpes[vpe->vpe_proxy_event] = NULL; | |
2422 | ||
2423 | /* | |
2424 | * We don't track empty slots at all, so let's move the | |
2425 | * next_victim pointer if we can quickly reuse that slot | |
2426 | * instead of nuking an existing entry. Not clear that this is | |
2427 | * always a win though, and this might just generate a ripple | |
2428 | * effect... Let's just hope VPEs don't migrate too often. | |
2429 | */ | |
2430 | if (vpe_proxy.vpes[vpe_proxy.next_victim]) | |
2431 | vpe_proxy.next_victim = vpe->vpe_proxy_event; | |
2432 | ||
2433 | vpe->vpe_proxy_event = -1; | |
2434 | } | |
2435 | ||
2436 | static void its_vpe_db_proxy_unmap(struct its_vpe *vpe) | |
2437 | { | |
2438 | if (!gic_rdists->has_direct_lpi) { | |
2439 | unsigned long flags; | |
2440 | ||
2441 | raw_spin_lock_irqsave(&vpe_proxy.lock, flags); | |
2442 | its_vpe_db_proxy_unmap_locked(vpe); | |
2443 | raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags); | |
2444 | } | |
2445 | } | |
2446 | ||
2447 | static void its_vpe_db_proxy_map_locked(struct its_vpe *vpe) | |
2448 | { | |
2449 | /* Already mapped? */ | |
2450 | if (vpe->vpe_proxy_event != -1) | |
2451 | return; | |
2452 | ||
2453 | /* This slot was already allocated. Kick the other VPE out. */ | |
2454 | if (vpe_proxy.vpes[vpe_proxy.next_victim]) | |
2455 | its_vpe_db_proxy_unmap_locked(vpe_proxy.vpes[vpe_proxy.next_victim]); | |
2456 | ||
2457 | /* Map the new VPE instead */ | |
2458 | vpe_proxy.vpes[vpe_proxy.next_victim] = vpe; | |
2459 | vpe->vpe_proxy_event = vpe_proxy.next_victim; | |
2460 | vpe_proxy.next_victim = (vpe_proxy.next_victim + 1) % vpe_proxy.dev->nr_ites; | |
2461 | ||
2462 | vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = vpe->col_idx; | |
2463 | its_send_mapti(vpe_proxy.dev, vpe->vpe_db_lpi, vpe->vpe_proxy_event); | |
2464 | } | |
2465 | ||
958b90d1 MZ |
2466 | static void its_vpe_db_proxy_move(struct its_vpe *vpe, int from, int to) |
2467 | { | |
2468 | unsigned long flags; | |
2469 | struct its_collection *target_col; | |
2470 | ||
2471 | if (gic_rdists->has_direct_lpi) { | |
2472 | void __iomem *rdbase; | |
2473 | ||
2474 | rdbase = per_cpu_ptr(gic_rdists->rdist, from)->rd_base; | |
2475 | gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR); | |
2476 | while (gic_read_lpir(rdbase + GICR_SYNCR) & 1) | |
2477 | cpu_relax(); | |
2478 | ||
2479 | return; | |
2480 | } | |
2481 | ||
2482 | raw_spin_lock_irqsave(&vpe_proxy.lock, flags); | |
2483 | ||
2484 | its_vpe_db_proxy_map_locked(vpe); | |
2485 | ||
2486 | target_col = &vpe_proxy.dev->its->collections[to]; | |
2487 | its_send_movi(vpe_proxy.dev, target_col, vpe->vpe_proxy_event); | |
2488 | vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = to; | |
2489 | ||
2490 | raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags); | |
2491 | } | |
2492 | ||
3171a47a MZ |
2493 | static int its_vpe_set_affinity(struct irq_data *d, |
2494 | const struct cpumask *mask_val, | |
2495 | bool force) | |
2496 | { | |
2497 | struct its_vpe *vpe = irq_data_get_irq_chip_data(d); | |
2498 | int cpu = cpumask_first(mask_val); | |
2499 | ||
2500 | /* | |
2501 | * Changing affinity is mega expensive, so let's be as lazy as | |
20b3d54e | 2502 | * we can and only do it if we really have to. Also, if mapped |
958b90d1 MZ |
2503 | * into the proxy device, we need to move the doorbell |
2504 | * interrupt to its new location. | |
3171a47a MZ |
2505 | */ |
2506 | if (vpe->col_idx != cpu) { | |
958b90d1 MZ |
2507 | int from = vpe->col_idx; |
2508 | ||
3171a47a MZ |
2509 | vpe->col_idx = cpu; |
2510 | its_send_vmovp(vpe); | |
958b90d1 | 2511 | its_vpe_db_proxy_move(vpe, from, cpu); |
3171a47a MZ |
2512 | } |
2513 | ||
44c4c25e MZ |
2514 | irq_data_update_effective_affinity(d, cpumask_of(cpu)); |
2515 | ||
3171a47a MZ |
2516 | return IRQ_SET_MASK_OK_DONE; |
2517 | } | |
2518 | ||
e643d803 MZ |
2519 | static void its_vpe_schedule(struct its_vpe *vpe) |
2520 | { | |
50c33097 | 2521 | void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); |
e643d803 MZ |
2522 | u64 val; |
2523 | ||
2524 | /* Schedule the VPE */ | |
2525 | val = virt_to_phys(page_address(vpe->its_vm->vprop_page)) & | |
2526 | GENMASK_ULL(51, 12); | |
2527 | val |= (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK; | |
2528 | val |= GICR_VPROPBASER_RaWb; | |
2529 | val |= GICR_VPROPBASER_InnerShareable; | |
2530 | gits_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER); | |
2531 | ||
2532 | val = virt_to_phys(page_address(vpe->vpt_page)) & | |
2533 | GENMASK_ULL(51, 16); | |
2534 | val |= GICR_VPENDBASER_RaWaWb; | |
2535 | val |= GICR_VPENDBASER_NonShareable; | |
2536 | /* | |
2537 | * There is no good way of finding out if the pending table is | |
2538 | * empty as we can race against the doorbell interrupt very | |
2539 | * easily. So in the end, vpe->pending_last is only an | |
2540 | * indication that the vcpu has something pending, not one | |
2541 | * that the pending table is empty. A good implementation | |
2542 | * would be able to read its coarse map pretty quickly anyway, | |
2543 | * making this a tolerable issue. | |
2544 | */ | |
2545 | val |= GICR_VPENDBASER_PendingLast; | |
2546 | val |= vpe->idai ? GICR_VPENDBASER_IDAI : 0; | |
2547 | val |= GICR_VPENDBASER_Valid; | |
2548 | gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER); | |
2549 | } | |
2550 | ||
2551 | static void its_vpe_deschedule(struct its_vpe *vpe) | |
2552 | { | |
50c33097 | 2553 | void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); |
e643d803 MZ |
2554 | u32 count = 1000000; /* 1s! */ |
2555 | bool clean; | |
2556 | u64 val; | |
2557 | ||
2558 | /* We're being scheduled out */ | |
2559 | val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER); | |
2560 | val &= ~GICR_VPENDBASER_Valid; | |
2561 | gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER); | |
2562 | ||
2563 | do { | |
2564 | val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER); | |
2565 | clean = !(val & GICR_VPENDBASER_Dirty); | |
2566 | if (!clean) { | |
2567 | count--; | |
2568 | cpu_relax(); | |
2569 | udelay(1); | |
2570 | } | |
2571 | } while (!clean && count); | |
2572 | ||
2573 | if (unlikely(!clean && !count)) { | |
2574 | pr_err_ratelimited("ITS virtual pending table not cleaning\n"); | |
2575 | vpe->idai = false; | |
2576 | vpe->pending_last = true; | |
2577 | } else { | |
2578 | vpe->idai = !!(val & GICR_VPENDBASER_IDAI); | |
2579 | vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast); | |
2580 | } | |
2581 | } | |
2582 | ||
40619a2e MZ |
2583 | static void its_vpe_invall(struct its_vpe *vpe) |
2584 | { | |
2585 | struct its_node *its; | |
2586 | ||
2587 | list_for_each_entry(its, &its_nodes, entry) { | |
2588 | if (!its->is_v4) | |
2589 | continue; | |
2590 | ||
2247e1bf MZ |
2591 | if (its_list_map && !vpe->its_vm->vlpi_count[its->list_nr]) |
2592 | continue; | |
2593 | ||
3c1cceeb MZ |
2594 | /* |
2595 | * Sending a VINVALL to a single ITS is enough, as all | |
2596 | * we need is to reach the redistributors. | |
2597 | */ | |
40619a2e | 2598 | its_send_vinvall(its, vpe); |
3c1cceeb | 2599 | return; |
40619a2e MZ |
2600 | } |
2601 | } | |
2602 | ||
e643d803 MZ |
2603 | static int its_vpe_set_vcpu_affinity(struct irq_data *d, void *vcpu_info) |
2604 | { | |
2605 | struct its_vpe *vpe = irq_data_get_irq_chip_data(d); | |
2606 | struct its_cmd_info *info = vcpu_info; | |
2607 | ||
2608 | switch (info->cmd_type) { | |
2609 | case SCHEDULE_VPE: | |
2610 | its_vpe_schedule(vpe); | |
2611 | return 0; | |
2612 | ||
2613 | case DESCHEDULE_VPE: | |
2614 | its_vpe_deschedule(vpe); | |
2615 | return 0; | |
2616 | ||
5e2f7642 | 2617 | case INVALL_VPE: |
40619a2e | 2618 | its_vpe_invall(vpe); |
5e2f7642 MZ |
2619 | return 0; |
2620 | ||
e643d803 MZ |
2621 | default: |
2622 | return -EINVAL; | |
2623 | } | |
2624 | } | |
2625 | ||
20b3d54e MZ |
2626 | static void its_vpe_send_cmd(struct its_vpe *vpe, |
2627 | void (*cmd)(struct its_device *, u32)) | |
2628 | { | |
2629 | unsigned long flags; | |
2630 | ||
2631 | raw_spin_lock_irqsave(&vpe_proxy.lock, flags); | |
2632 | ||
2633 | its_vpe_db_proxy_map_locked(vpe); | |
2634 | cmd(vpe_proxy.dev, vpe->vpe_proxy_event); | |
2635 | ||
2636 | raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags); | |
2637 | } | |
2638 | ||
f6a91da7 MZ |
2639 | static void its_vpe_send_inv(struct irq_data *d) |
2640 | { | |
2641 | struct its_vpe *vpe = irq_data_get_irq_chip_data(d); | |
f6a91da7 | 2642 | |
20b3d54e MZ |
2643 | if (gic_rdists->has_direct_lpi) { |
2644 | void __iomem *rdbase; | |
2645 | ||
2646 | rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base; | |
2647 | gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_INVLPIR); | |
2648 | while (gic_read_lpir(rdbase + GICR_SYNCR) & 1) | |
2649 | cpu_relax(); | |
2650 | } else { | |
2651 | its_vpe_send_cmd(vpe, its_send_inv); | |
2652 | } | |
f6a91da7 MZ |
2653 | } |
2654 | ||
2655 | static void its_vpe_mask_irq(struct irq_data *d) | |
2656 | { | |
2657 | /* | |
2658 | * We need to unmask the LPI, which is described by the parent | |
2659 | * irq_data. Instead of calling into the parent (which won't | |
2660 | * exactly do the right thing, let's simply use the | |
2661 | * parent_data pointer. Yes, I'm naughty. | |
2662 | */ | |
2663 | lpi_write_config(d->parent_data, LPI_PROP_ENABLED, 0); | |
2664 | its_vpe_send_inv(d); | |
2665 | } | |
2666 | ||
2667 | static void its_vpe_unmask_irq(struct irq_data *d) | |
2668 | { | |
2669 | /* Same hack as above... */ | |
2670 | lpi_write_config(d->parent_data, 0, LPI_PROP_ENABLED); | |
2671 | its_vpe_send_inv(d); | |
2672 | } | |
2673 | ||
e57a3e28 MZ |
2674 | static int its_vpe_set_irqchip_state(struct irq_data *d, |
2675 | enum irqchip_irq_state which, | |
2676 | bool state) | |
2677 | { | |
2678 | struct its_vpe *vpe = irq_data_get_irq_chip_data(d); | |
2679 | ||
2680 | if (which != IRQCHIP_STATE_PENDING) | |
2681 | return -EINVAL; | |
2682 | ||
2683 | if (gic_rdists->has_direct_lpi) { | |
2684 | void __iomem *rdbase; | |
2685 | ||
2686 | rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base; | |
2687 | if (state) { | |
2688 | gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_SETLPIR); | |
2689 | } else { | |
2690 | gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR); | |
2691 | while (gic_read_lpir(rdbase + GICR_SYNCR) & 1) | |
2692 | cpu_relax(); | |
2693 | } | |
2694 | } else { | |
2695 | if (state) | |
2696 | its_vpe_send_cmd(vpe, its_send_int); | |
2697 | else | |
2698 | its_vpe_send_cmd(vpe, its_send_clear); | |
2699 | } | |
2700 | ||
2701 | return 0; | |
2702 | } | |
2703 | ||
8fff27ae MZ |
2704 | static struct irq_chip its_vpe_irq_chip = { |
2705 | .name = "GICv4-vpe", | |
f6a91da7 MZ |
2706 | .irq_mask = its_vpe_mask_irq, |
2707 | .irq_unmask = its_vpe_unmask_irq, | |
2708 | .irq_eoi = irq_chip_eoi_parent, | |
3171a47a | 2709 | .irq_set_affinity = its_vpe_set_affinity, |
e57a3e28 | 2710 | .irq_set_irqchip_state = its_vpe_set_irqchip_state, |
e643d803 | 2711 | .irq_set_vcpu_affinity = its_vpe_set_vcpu_affinity, |
8fff27ae MZ |
2712 | }; |
2713 | ||
7d75bbb4 MZ |
2714 | static int its_vpe_id_alloc(void) |
2715 | { | |
32bd44dc | 2716 | return ida_simple_get(&its_vpeid_ida, 0, ITS_MAX_VPEID, GFP_KERNEL); |
7d75bbb4 MZ |
2717 | } |
2718 | ||
2719 | static void its_vpe_id_free(u16 id) | |
2720 | { | |
2721 | ida_simple_remove(&its_vpeid_ida, id); | |
2722 | } | |
2723 | ||
2724 | static int its_vpe_init(struct its_vpe *vpe) | |
2725 | { | |
2726 | struct page *vpt_page; | |
2727 | int vpe_id; | |
2728 | ||
2729 | /* Allocate vpe_id */ | |
2730 | vpe_id = its_vpe_id_alloc(); | |
2731 | if (vpe_id < 0) | |
2732 | return vpe_id; | |
2733 | ||
2734 | /* Allocate VPT */ | |
2735 | vpt_page = its_allocate_pending_table(GFP_KERNEL); | |
2736 | if (!vpt_page) { | |
2737 | its_vpe_id_free(vpe_id); | |
2738 | return -ENOMEM; | |
2739 | } | |
2740 | ||
2741 | if (!its_alloc_vpe_table(vpe_id)) { | |
2742 | its_vpe_id_free(vpe_id); | |
2743 | its_free_pending_table(vpe->vpt_page); | |
2744 | return -ENOMEM; | |
2745 | } | |
2746 | ||
2747 | vpe->vpe_id = vpe_id; | |
2748 | vpe->vpt_page = vpt_page; | |
20b3d54e | 2749 | vpe->vpe_proxy_event = -1; |
7d75bbb4 MZ |
2750 | |
2751 | return 0; | |
2752 | } | |
2753 | ||
2754 | static void its_vpe_teardown(struct its_vpe *vpe) | |
2755 | { | |
20b3d54e | 2756 | its_vpe_db_proxy_unmap(vpe); |
7d75bbb4 MZ |
2757 | its_vpe_id_free(vpe->vpe_id); |
2758 | its_free_pending_table(vpe->vpt_page); | |
2759 | } | |
2760 | ||
2761 | static void its_vpe_irq_domain_free(struct irq_domain *domain, | |
2762 | unsigned int virq, | |
2763 | unsigned int nr_irqs) | |
2764 | { | |
2765 | struct its_vm *vm = domain->host_data; | |
2766 | int i; | |
2767 | ||
2768 | irq_domain_free_irqs_parent(domain, virq, nr_irqs); | |
2769 | ||
2770 | for (i = 0; i < nr_irqs; i++) { | |
2771 | struct irq_data *data = irq_domain_get_irq_data(domain, | |
2772 | virq + i); | |
2773 | struct its_vpe *vpe = irq_data_get_irq_chip_data(data); | |
2774 | ||
2775 | BUG_ON(vm != vpe->its_vm); | |
2776 | ||
2777 | clear_bit(data->hwirq, vm->db_bitmap); | |
2778 | its_vpe_teardown(vpe); | |
2779 | irq_domain_reset_irq_data(data); | |
2780 | } | |
2781 | ||
2782 | if (bitmap_empty(vm->db_bitmap, vm->nr_db_lpis)) { | |
2783 | its_lpi_free_chunks(vm->db_bitmap, vm->db_lpi_base, vm->nr_db_lpis); | |
2784 | its_free_prop_table(vm->vprop_page); | |
2785 | } | |
2786 | } | |
2787 | ||
2788 | static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, | |
2789 | unsigned int nr_irqs, void *args) | |
2790 | { | |
2791 | struct its_vm *vm = args; | |
2792 | unsigned long *bitmap; | |
2793 | struct page *vprop_page; | |
2794 | int base, nr_ids, i, err = 0; | |
2795 | ||
2796 | BUG_ON(!vm); | |
2797 | ||
2798 | bitmap = its_lpi_alloc_chunks(nr_irqs, &base, &nr_ids); | |
2799 | if (!bitmap) | |
2800 | return -ENOMEM; | |
2801 | ||
2802 | if (nr_ids < nr_irqs) { | |
2803 | its_lpi_free_chunks(bitmap, base, nr_ids); | |
2804 | return -ENOMEM; | |
2805 | } | |
2806 | ||
2807 | vprop_page = its_allocate_prop_table(GFP_KERNEL); | |
2808 | if (!vprop_page) { | |
2809 | its_lpi_free_chunks(bitmap, base, nr_ids); | |
2810 | return -ENOMEM; | |
2811 | } | |
2812 | ||
2813 | vm->db_bitmap = bitmap; | |
2814 | vm->db_lpi_base = base; | |
2815 | vm->nr_db_lpis = nr_ids; | |
2816 | vm->vprop_page = vprop_page; | |
2817 | ||
2818 | for (i = 0; i < nr_irqs; i++) { | |
2819 | vm->vpes[i]->vpe_db_lpi = base + i; | |
2820 | err = its_vpe_init(vm->vpes[i]); | |
2821 | if (err) | |
2822 | break; | |
2823 | err = its_irq_gic_domain_alloc(domain, virq + i, | |
2824 | vm->vpes[i]->vpe_db_lpi); | |
2825 | if (err) | |
2826 | break; | |
2827 | irq_domain_set_hwirq_and_chip(domain, virq + i, i, | |
2828 | &its_vpe_irq_chip, vm->vpes[i]); | |
2829 | set_bit(i, bitmap); | |
2830 | } | |
2831 | ||
2832 | if (err) { | |
2833 | if (i > 0) | |
2834 | its_vpe_irq_domain_free(domain, virq, i - 1); | |
2835 | ||
2836 | its_lpi_free_chunks(bitmap, base, nr_ids); | |
2837 | its_free_prop_table(vprop_page); | |
2838 | } | |
2839 | ||
2840 | return err; | |
2841 | } | |
2842 | ||
72491643 | 2843 | static int its_vpe_irq_domain_activate(struct irq_domain *domain, |
702cb0a0 | 2844 | struct irq_data *d, bool reserve) |
eb78192b MZ |
2845 | { |
2846 | struct its_vpe *vpe = irq_data_get_irq_chip_data(d); | |
40619a2e | 2847 | struct its_node *its; |
eb78192b | 2848 | |
2247e1bf MZ |
2849 | /* If we use the list map, we issue VMAPP on demand... */ |
2850 | if (its_list_map) | |
6ef930f2 | 2851 | return 0; |
eb78192b MZ |
2852 | |
2853 | /* Map the VPE to the first possible CPU */ | |
2854 | vpe->col_idx = cpumask_first(cpu_online_mask); | |
40619a2e MZ |
2855 | |
2856 | list_for_each_entry(its, &its_nodes, entry) { | |
2857 | if (!its->is_v4) | |
2858 | continue; | |
2859 | ||
75fd951b | 2860 | its_send_vmapp(its, vpe, true); |
40619a2e MZ |
2861 | its_send_vinvall(its, vpe); |
2862 | } | |
2863 | ||
44c4c25e MZ |
2864 | irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx)); |
2865 | ||
72491643 | 2866 | return 0; |
eb78192b MZ |
2867 | } |
2868 | ||
2869 | static void its_vpe_irq_domain_deactivate(struct irq_domain *domain, | |
2870 | struct irq_data *d) | |
2871 | { | |
2872 | struct its_vpe *vpe = irq_data_get_irq_chip_data(d); | |
75fd951b MZ |
2873 | struct its_node *its; |
2874 | ||
2247e1bf MZ |
2875 | /* |
2876 | * If we use the list map, we unmap the VPE once no VLPIs are | |
2877 | * associated with the VM. | |
2878 | */ | |
2879 | if (its_list_map) | |
2880 | return; | |
eb78192b | 2881 | |
75fd951b MZ |
2882 | list_for_each_entry(its, &its_nodes, entry) { |
2883 | if (!its->is_v4) | |
2884 | continue; | |
eb78192b | 2885 | |
75fd951b MZ |
2886 | its_send_vmapp(its, vpe, false); |
2887 | } | |
eb78192b MZ |
2888 | } |
2889 | ||
8fff27ae | 2890 | static const struct irq_domain_ops its_vpe_domain_ops = { |
7d75bbb4 MZ |
2891 | .alloc = its_vpe_irq_domain_alloc, |
2892 | .free = its_vpe_irq_domain_free, | |
eb78192b MZ |
2893 | .activate = its_vpe_irq_domain_activate, |
2894 | .deactivate = its_vpe_irq_domain_deactivate, | |
8fff27ae MZ |
2895 | }; |
2896 | ||
4559fbb3 YW |
2897 | static int its_force_quiescent(void __iomem *base) |
2898 | { | |
2899 | u32 count = 1000000; /* 1s */ | |
2900 | u32 val; | |
2901 | ||
2902 | val = readl_relaxed(base + GITS_CTLR); | |
7611da86 DD |
2903 | /* |
2904 | * GIC architecture specification requires the ITS to be both | |
2905 | * disabled and quiescent for writes to GITS_BASER<n> or | |
2906 | * GITS_CBASER to not have UNPREDICTABLE results. | |
2907 | */ | |
2908 | if ((val & GITS_CTLR_QUIESCENT) && !(val & GITS_CTLR_ENABLE)) | |
4559fbb3 YW |
2909 | return 0; |
2910 | ||
2911 | /* Disable the generation of all interrupts to this ITS */ | |
d51c4b4d | 2912 | val &= ~(GITS_CTLR_ENABLE | GITS_CTLR_ImDe); |
4559fbb3 YW |
2913 | writel_relaxed(val, base + GITS_CTLR); |
2914 | ||
2915 | /* Poll GITS_CTLR and wait until ITS becomes quiescent */ | |
2916 | while (1) { | |
2917 | val = readl_relaxed(base + GITS_CTLR); | |
2918 | if (val & GITS_CTLR_QUIESCENT) | |
2919 | return 0; | |
2920 | ||
2921 | count--; | |
2922 | if (!count) | |
2923 | return -EBUSY; | |
2924 | ||
2925 | cpu_relax(); | |
2926 | udelay(1); | |
2927 | } | |
2928 | } | |
2929 | ||
9d111d49 | 2930 | static bool __maybe_unused its_enable_quirk_cavium_22375(void *data) |
94100970 RR |
2931 | { |
2932 | struct its_node *its = data; | |
2933 | ||
fa150019 AB |
2934 | /* erratum 22375: only alloc 8MB table size */ |
2935 | its->device_ids = 0x14; /* 20 bits, 8MB */ | |
94100970 | 2936 | its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_22375; |
9d111d49 AB |
2937 | |
2938 | return true; | |
94100970 RR |
2939 | } |
2940 | ||
9d111d49 | 2941 | static bool __maybe_unused its_enable_quirk_cavium_23144(void *data) |
fbf8f40e GK |
2942 | { |
2943 | struct its_node *its = data; | |
2944 | ||
2945 | its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_23144; | |
9d111d49 AB |
2946 | |
2947 | return true; | |
fbf8f40e GK |
2948 | } |
2949 | ||
9d111d49 | 2950 | static bool __maybe_unused its_enable_quirk_qdf2400_e0065(void *data) |
90922a2d SD |
2951 | { |
2952 | struct its_node *its = data; | |
2953 | ||
2954 | /* On QDF2400, the size of the ITE is 16Bytes */ | |
2955 | its->ite_size = 16; | |
9d111d49 AB |
2956 | |
2957 | return true; | |
90922a2d SD |
2958 | } |
2959 | ||
558b0165 AB |
2960 | static u64 its_irq_get_msi_base_pre_its(struct its_device *its_dev) |
2961 | { | |
2962 | struct its_node *its = its_dev->its; | |
2963 | ||
2964 | /* | |
2965 | * The Socionext Synquacer SoC has a so-called 'pre-ITS', | |
2966 | * which maps 32-bit writes targeted at a separate window of | |
2967 | * size '4 << device_id_bits' onto writes to GITS_TRANSLATER | |
2968 | * with device ID taken from bits [device_id_bits + 1:2] of | |
2969 | * the window offset. | |
2970 | */ | |
2971 | return its->pre_its_base + (its_dev->device_id << 2); | |
2972 | } | |
2973 | ||
2974 | static bool __maybe_unused its_enable_quirk_socionext_synquacer(void *data) | |
2975 | { | |
2976 | struct its_node *its = data; | |
2977 | u32 pre_its_window[2]; | |
2978 | u32 ids; | |
2979 | ||
2980 | if (!fwnode_property_read_u32_array(its->fwnode_handle, | |
2981 | "socionext,synquacer-pre-its", | |
2982 | pre_its_window, | |
2983 | ARRAY_SIZE(pre_its_window))) { | |
2984 | ||
2985 | its->pre_its_base = pre_its_window[0]; | |
2986 | its->get_msi_base = its_irq_get_msi_base_pre_its; | |
2987 | ||
2988 | ids = ilog2(pre_its_window[1]) - 2; | |
2989 | if (its->device_ids > ids) | |
2990 | its->device_ids = ids; | |
2991 | ||
2992 | /* the pre-ITS breaks isolation, so disable MSI remapping */ | |
2993 | its->msi_domain_flags &= ~IRQ_DOMAIN_FLAG_MSI_REMAP; | |
2994 | return true; | |
2995 | } | |
2996 | return false; | |
2997 | } | |
2998 | ||
5c9a882e MZ |
2999 | static bool __maybe_unused its_enable_quirk_hip07_161600802(void *data) |
3000 | { | |
3001 | struct its_node *its = data; | |
3002 | ||
3003 | /* | |
3004 | * Hip07 insists on using the wrong address for the VLPI | |
3005 | * page. Trick it into doing the right thing... | |
3006 | */ | |
3007 | its->vlpi_redist_offset = SZ_128K; | |
3008 | return true; | |
90922a2d SD |
3009 | } |
3010 | ||
67510cca | 3011 | static const struct gic_quirk its_quirks[] = { |
94100970 RR |
3012 | #ifdef CONFIG_CAVIUM_ERRATUM_22375 |
3013 | { | |
3014 | .desc = "ITS: Cavium errata 22375, 24313", | |
3015 | .iidr = 0xa100034c, /* ThunderX pass 1.x */ | |
3016 | .mask = 0xffff0fff, | |
3017 | .init = its_enable_quirk_cavium_22375, | |
3018 | }, | |
fbf8f40e GK |
3019 | #endif |
3020 | #ifdef CONFIG_CAVIUM_ERRATUM_23144 | |
3021 | { | |
3022 | .desc = "ITS: Cavium erratum 23144", | |
3023 | .iidr = 0xa100034c, /* ThunderX pass 1.x */ | |
3024 | .mask = 0xffff0fff, | |
3025 | .init = its_enable_quirk_cavium_23144, | |
3026 | }, | |
90922a2d SD |
3027 | #endif |
3028 | #ifdef CONFIG_QCOM_QDF2400_ERRATUM_0065 | |
3029 | { | |
3030 | .desc = "ITS: QDF2400 erratum 0065", | |
3031 | .iidr = 0x00001070, /* QDF2400 ITS rev 1.x */ | |
3032 | .mask = 0xffffffff, | |
3033 | .init = its_enable_quirk_qdf2400_e0065, | |
3034 | }, | |
558b0165 AB |
3035 | #endif |
3036 | #ifdef CONFIG_SOCIONEXT_SYNQUACER_PREITS | |
3037 | { | |
3038 | /* | |
3039 | * The Socionext Synquacer SoC incorporates ARM's own GIC-500 | |
3040 | * implementation, but with a 'pre-ITS' added that requires | |
3041 | * special handling in software. | |
3042 | */ | |
3043 | .desc = "ITS: Socionext Synquacer pre-ITS", | |
3044 | .iidr = 0x0001143b, | |
3045 | .mask = 0xffffffff, | |
3046 | .init = its_enable_quirk_socionext_synquacer, | |
3047 | }, | |
5c9a882e MZ |
3048 | #endif |
3049 | #ifdef CONFIG_HISILICON_ERRATUM_161600802 | |
3050 | { | |
3051 | .desc = "ITS: Hip07 erratum 161600802", | |
3052 | .iidr = 0x00000004, | |
3053 | .mask = 0xffffffff, | |
3054 | .init = its_enable_quirk_hip07_161600802, | |
3055 | }, | |
94100970 | 3056 | #endif |
67510cca RR |
3057 | { |
3058 | } | |
3059 | }; | |
3060 | ||
3061 | static void its_enable_quirks(struct its_node *its) | |
3062 | { | |
3063 | u32 iidr = readl_relaxed(its->base + GITS_IIDR); | |
3064 | ||
3065 | gic_enable_quirks(iidr, its_quirks, its); | |
3066 | } | |
3067 | ||
dba0bc7b DB |
3068 | static int its_save_disable(void) |
3069 | { | |
3070 | struct its_node *its; | |
3071 | int err = 0; | |
3072 | ||
3073 | spin_lock(&its_lock); | |
3074 | list_for_each_entry(its, &its_nodes, entry) { | |
3075 | void __iomem *base; | |
3076 | ||
3077 | if (!(its->flags & ITS_FLAGS_SAVE_SUSPEND_STATE)) | |
3078 | continue; | |
3079 | ||
3080 | base = its->base; | |
3081 | its->ctlr_save = readl_relaxed(base + GITS_CTLR); | |
3082 | err = its_force_quiescent(base); | |
3083 | if (err) { | |
3084 | pr_err("ITS@%pa: failed to quiesce: %d\n", | |
3085 | &its->phys_base, err); | |
3086 | writel_relaxed(its->ctlr_save, base + GITS_CTLR); | |
3087 | goto err; | |
3088 | } | |
3089 | ||
3090 | its->cbaser_save = gits_read_cbaser(base + GITS_CBASER); | |
3091 | } | |
3092 | ||
3093 | err: | |
3094 | if (err) { | |
3095 | list_for_each_entry_continue_reverse(its, &its_nodes, entry) { | |
3096 | void __iomem *base; | |
3097 | ||
3098 | if (!(its->flags & ITS_FLAGS_SAVE_SUSPEND_STATE)) | |
3099 | continue; | |
3100 | ||
3101 | base = its->base; | |
3102 | writel_relaxed(its->ctlr_save, base + GITS_CTLR); | |
3103 | } | |
3104 | } | |
3105 | spin_unlock(&its_lock); | |
3106 | ||
3107 | return err; | |
3108 | } | |
3109 | ||
3110 | static void its_restore_enable(void) | |
3111 | { | |
3112 | struct its_node *its; | |
3113 | int ret; | |
3114 | ||
3115 | spin_lock(&its_lock); | |
3116 | list_for_each_entry(its, &its_nodes, entry) { | |
3117 | void __iomem *base; | |
3118 | int i; | |
3119 | ||
3120 | if (!(its->flags & ITS_FLAGS_SAVE_SUSPEND_STATE)) | |
3121 | continue; | |
3122 | ||
3123 | base = its->base; | |
3124 | ||
3125 | /* | |
3126 | * Make sure that the ITS is disabled. If it fails to quiesce, | |
3127 | * don't restore it since writing to CBASER or BASER<n> | |
3128 | * registers is undefined according to the GIC v3 ITS | |
3129 | * Specification. | |
3130 | */ | |
3131 | ret = its_force_quiescent(base); | |
3132 | if (ret) { | |
3133 | pr_err("ITS@%pa: failed to quiesce on resume: %d\n", | |
3134 | &its->phys_base, ret); | |
3135 | continue; | |
3136 | } | |
3137 | ||
3138 | gits_write_cbaser(its->cbaser_save, base + GITS_CBASER); | |
3139 | ||
3140 | /* | |
3141 | * Writing CBASER resets CREADR to 0, so make CWRITER and | |
3142 | * cmd_write line up with it. | |
3143 | */ | |
3144 | its->cmd_write = its->cmd_base; | |
3145 | gits_write_cwriter(0, base + GITS_CWRITER); | |
3146 | ||
3147 | /* Restore GITS_BASER from the value cache. */ | |
3148 | for (i = 0; i < GITS_BASER_NR_REGS; i++) { | |
3149 | struct its_baser *baser = &its->tables[i]; | |
3150 | ||
3151 | if (!(baser->val & GITS_BASER_VALID)) | |
3152 | continue; | |
3153 | ||
3154 | its_write_baser(its, baser, baser->val); | |
3155 | } | |
3156 | writel_relaxed(its->ctlr_save, base + GITS_CTLR); | |
920181ce DB |
3157 | |
3158 | /* | |
3159 | * Reinit the collection if it's stored in the ITS. This is | |
3160 | * indicated by the col_id being less than the HCC field. | |
3161 | * CID < HCC as specified in the GIC v3 Documentation. | |
3162 | */ | |
3163 | if (its->collections[smp_processor_id()].col_id < | |
3164 | GITS_TYPER_HCC(gic_read_typer(base + GITS_TYPER))) | |
3165 | its_cpu_init_collection(its); | |
dba0bc7b DB |
3166 | } |
3167 | spin_unlock(&its_lock); | |
3168 | } | |
3169 | ||
3170 | static struct syscore_ops its_syscore_ops = { | |
3171 | .suspend = its_save_disable, | |
3172 | .resume = its_restore_enable, | |
3173 | }; | |
3174 | ||
db40f0a7 | 3175 | static int its_init_domain(struct fwnode_handle *handle, struct its_node *its) |
d14ae5e6 TN |
3176 | { |
3177 | struct irq_domain *inner_domain; | |
3178 | struct msi_domain_info *info; | |
3179 | ||
3180 | info = kzalloc(sizeof(*info), GFP_KERNEL); | |
3181 | if (!info) | |
3182 | return -ENOMEM; | |
3183 | ||
db40f0a7 | 3184 | inner_domain = irq_domain_create_tree(handle, &its_domain_ops, its); |
d14ae5e6 TN |
3185 | if (!inner_domain) { |
3186 | kfree(info); | |
3187 | return -ENOMEM; | |
3188 | } | |
3189 | ||
db40f0a7 | 3190 | inner_domain->parent = its_parent; |
96f0d93a | 3191 | irq_domain_update_bus_token(inner_domain, DOMAIN_BUS_NEXUS); |
558b0165 | 3192 | inner_domain->flags |= its->msi_domain_flags; |
d14ae5e6 TN |
3193 | info->ops = &its_msi_domain_ops; |
3194 | info->data = its; | |
3195 | inner_domain->host_data = info; | |
3196 | ||
3197 | return 0; | |
3198 | } | |
3199 | ||
8fff27ae MZ |
3200 | static int its_init_vpe_domain(void) |
3201 | { | |
20b3d54e MZ |
3202 | struct its_node *its; |
3203 | u32 devid; | |
3204 | int entries; | |
3205 | ||
3206 | if (gic_rdists->has_direct_lpi) { | |
3207 | pr_info("ITS: Using DirectLPI for VPE invalidation\n"); | |
3208 | return 0; | |
3209 | } | |
3210 | ||
3211 | /* Any ITS will do, even if not v4 */ | |
3212 | its = list_first_entry(&its_nodes, struct its_node, entry); | |
3213 | ||
3214 | entries = roundup_pow_of_two(nr_cpu_ids); | |
6396bb22 | 3215 | vpe_proxy.vpes = kcalloc(entries, sizeof(*vpe_proxy.vpes), |
20b3d54e MZ |
3216 | GFP_KERNEL); |
3217 | if (!vpe_proxy.vpes) { | |
3218 | pr_err("ITS: Can't allocate GICv4 proxy device array\n"); | |
3219 | return -ENOMEM; | |
3220 | } | |
3221 | ||
3222 | /* Use the last possible DevID */ | |
3223 | devid = GENMASK(its->device_ids - 1, 0); | |
3224 | vpe_proxy.dev = its_create_device(its, devid, entries, false); | |
3225 | if (!vpe_proxy.dev) { | |
3226 | kfree(vpe_proxy.vpes); | |
3227 | pr_err("ITS: Can't allocate GICv4 proxy device\n"); | |
3228 | return -ENOMEM; | |
3229 | } | |
3230 | ||
c427a475 | 3231 | BUG_ON(entries > vpe_proxy.dev->nr_ites); |
20b3d54e MZ |
3232 | |
3233 | raw_spin_lock_init(&vpe_proxy.lock); | |
3234 | vpe_proxy.next_victim = 0; | |
3235 | pr_info("ITS: Allocated DevID %x as GICv4 proxy device (%d slots)\n", | |
3236 | devid, vpe_proxy.dev->nr_ites); | |
3237 | ||
8fff27ae MZ |
3238 | return 0; |
3239 | } | |
3240 | ||
3dfa576b MZ |
3241 | static int __init its_compute_its_list_map(struct resource *res, |
3242 | void __iomem *its_base) | |
3243 | { | |
3244 | int its_number; | |
3245 | u32 ctlr; | |
3246 | ||
3247 | /* | |
3248 | * This is assumed to be done early enough that we're | |
3249 | * guaranteed to be single-threaded, hence no | |
3250 | * locking. Should this change, we should address | |
3251 | * this. | |
3252 | */ | |
ab60491e MZ |
3253 | its_number = find_first_zero_bit(&its_list_map, GICv4_ITS_LIST_MAX); |
3254 | if (its_number >= GICv4_ITS_LIST_MAX) { | |
3dfa576b MZ |
3255 | pr_err("ITS@%pa: No ITSList entry available!\n", |
3256 | &res->start); | |
3257 | return -EINVAL; | |
3258 | } | |
3259 | ||
3260 | ctlr = readl_relaxed(its_base + GITS_CTLR); | |
3261 | ctlr &= ~GITS_CTLR_ITS_NUMBER; | |
3262 | ctlr |= its_number << GITS_CTLR_ITS_NUMBER_SHIFT; | |
3263 | writel_relaxed(ctlr, its_base + GITS_CTLR); | |
3264 | ctlr = readl_relaxed(its_base + GITS_CTLR); | |
3265 | if ((ctlr & GITS_CTLR_ITS_NUMBER) != (its_number << GITS_CTLR_ITS_NUMBER_SHIFT)) { | |
3266 | its_number = ctlr & GITS_CTLR_ITS_NUMBER; | |
3267 | its_number >>= GITS_CTLR_ITS_NUMBER_SHIFT; | |
3268 | } | |
3269 | ||
3270 | if (test_and_set_bit(its_number, &its_list_map)) { | |
3271 | pr_err("ITS@%pa: Duplicate ITSList entry %d\n", | |
3272 | &res->start, its_number); | |
3273 | return -EINVAL; | |
3274 | } | |
3275 | ||
3276 | return its_number; | |
3277 | } | |
3278 | ||
db40f0a7 TN |
3279 | static int __init its_probe_one(struct resource *res, |
3280 | struct fwnode_handle *handle, int numa_node) | |
4c21f3c2 | 3281 | { |
4c21f3c2 MZ |
3282 | struct its_node *its; |
3283 | void __iomem *its_base; | |
3dfa576b MZ |
3284 | u32 val, ctlr; |
3285 | u64 baser, tmp, typer; | |
4c21f3c2 MZ |
3286 | int err; |
3287 | ||
db40f0a7 | 3288 | its_base = ioremap(res->start, resource_size(res)); |
4c21f3c2 | 3289 | if (!its_base) { |
db40f0a7 | 3290 | pr_warn("ITS@%pa: Unable to map ITS registers\n", &res->start); |
4c21f3c2 MZ |
3291 | return -ENOMEM; |
3292 | } | |
3293 | ||
3294 | val = readl_relaxed(its_base + GITS_PIDR2) & GIC_PIDR2_ARCH_MASK; | |
3295 | if (val != 0x30 && val != 0x40) { | |
db40f0a7 | 3296 | pr_warn("ITS@%pa: No ITS detected, giving up\n", &res->start); |
4c21f3c2 MZ |
3297 | err = -ENODEV; |
3298 | goto out_unmap; | |
3299 | } | |
3300 | ||
4559fbb3 YW |
3301 | err = its_force_quiescent(its_base); |
3302 | if (err) { | |
db40f0a7 | 3303 | pr_warn("ITS@%pa: Failed to quiesce, giving up\n", &res->start); |
4559fbb3 YW |
3304 | goto out_unmap; |
3305 | } | |
3306 | ||
db40f0a7 | 3307 | pr_info("ITS %pR\n", res); |
4c21f3c2 MZ |
3308 | |
3309 | its = kzalloc(sizeof(*its), GFP_KERNEL); | |
3310 | if (!its) { | |
3311 | err = -ENOMEM; | |
3312 | goto out_unmap; | |
3313 | } | |
3314 | ||
3315 | raw_spin_lock_init(&its->lock); | |
3316 | INIT_LIST_HEAD(&its->entry); | |
3317 | INIT_LIST_HEAD(&its->its_device_list); | |
3dfa576b | 3318 | typer = gic_read_typer(its_base + GITS_TYPER); |
4c21f3c2 | 3319 | its->base = its_base; |
db40f0a7 | 3320 | its->phys_base = res->start; |
3dfa576b | 3321 | its->ite_size = GITS_TYPER_ITT_ENTRY_SIZE(typer); |
fa150019 | 3322 | its->device_ids = GITS_TYPER_DEVBITS(typer); |
3dfa576b MZ |
3323 | its->is_v4 = !!(typer & GITS_TYPER_VLPIS); |
3324 | if (its->is_v4) { | |
3325 | if (!(typer & GITS_TYPER_VMOVP)) { | |
3326 | err = its_compute_its_list_map(res, its_base); | |
3327 | if (err < 0) | |
3328 | goto out_free_its; | |
3329 | ||
debf6d02 MZ |
3330 | its->list_nr = err; |
3331 | ||
3dfa576b MZ |
3332 | pr_info("ITS@%pa: Using ITS number %d\n", |
3333 | &res->start, err); | |
3334 | } else { | |
3335 | pr_info("ITS@%pa: Single VMOVP capable\n", &res->start); | |
3336 | } | |
3337 | } | |
3338 | ||
db40f0a7 | 3339 | its->numa_node = numa_node; |
4c21f3c2 | 3340 | |
5bc13c2c RR |
3341 | its->cmd_base = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, |
3342 | get_order(ITS_CMD_QUEUE_SZ)); | |
4c21f3c2 MZ |
3343 | if (!its->cmd_base) { |
3344 | err = -ENOMEM; | |
3345 | goto out_free_its; | |
3346 | } | |
3347 | its->cmd_write = its->cmd_base; | |
558b0165 AB |
3348 | its->fwnode_handle = handle; |
3349 | its->get_msi_base = its_irq_get_msi_base; | |
3350 | its->msi_domain_flags = IRQ_DOMAIN_FLAG_MSI_REMAP; | |
4c21f3c2 | 3351 | |
67510cca RR |
3352 | its_enable_quirks(its); |
3353 | ||
0e0b0f69 | 3354 | err = its_alloc_tables(its); |
4c21f3c2 MZ |
3355 | if (err) |
3356 | goto out_free_cmd; | |
3357 | ||
3358 | err = its_alloc_collections(its); | |
3359 | if (err) | |
3360 | goto out_free_tables; | |
3361 | ||
3362 | baser = (virt_to_phys(its->cmd_base) | | |
2fd632a0 | 3363 | GITS_CBASER_RaWaWb | |
4c21f3c2 MZ |
3364 | GITS_CBASER_InnerShareable | |
3365 | (ITS_CMD_QUEUE_SZ / SZ_4K - 1) | | |
3366 | GITS_CBASER_VALID); | |
3367 | ||
0968a619 VM |
3368 | gits_write_cbaser(baser, its->base + GITS_CBASER); |
3369 | tmp = gits_read_cbaser(its->base + GITS_CBASER); | |
4c21f3c2 | 3370 | |
4ad3e363 | 3371 | if ((tmp ^ baser) & GITS_CBASER_SHAREABILITY_MASK) { |
241a386c MZ |
3372 | if (!(tmp & GITS_CBASER_SHAREABILITY_MASK)) { |
3373 | /* | |
3374 | * The HW reports non-shareable, we must | |
3375 | * remove the cacheability attributes as | |
3376 | * well. | |
3377 | */ | |
3378 | baser &= ~(GITS_CBASER_SHAREABILITY_MASK | | |
3379 | GITS_CBASER_CACHEABILITY_MASK); | |
3380 | baser |= GITS_CBASER_nC; | |
0968a619 | 3381 | gits_write_cbaser(baser, its->base + GITS_CBASER); |
241a386c | 3382 | } |
4c21f3c2 MZ |
3383 | pr_info("ITS: using cache flushing for cmd queue\n"); |
3384 | its->flags |= ITS_FLAGS_CMDQ_NEEDS_FLUSHING; | |
3385 | } | |
3386 | ||
0968a619 | 3387 | gits_write_cwriter(0, its->base + GITS_CWRITER); |
3dfa576b | 3388 | ctlr = readl_relaxed(its->base + GITS_CTLR); |
d51c4b4d MZ |
3389 | ctlr |= GITS_CTLR_ENABLE; |
3390 | if (its->is_v4) | |
3391 | ctlr |= GITS_CTLR_ImDe; | |
3392 | writel_relaxed(ctlr, its->base + GITS_CTLR); | |
241a386c | 3393 | |
dba0bc7b DB |
3394 | if (GITS_TYPER_HCC(typer)) |
3395 | its->flags |= ITS_FLAGS_SAVE_SUSPEND_STATE; | |
3396 | ||
db40f0a7 | 3397 | err = its_init_domain(handle, its); |
d14ae5e6 TN |
3398 | if (err) |
3399 | goto out_free_tables; | |
4c21f3c2 MZ |
3400 | |
3401 | spin_lock(&its_lock); | |
3402 | list_add(&its->entry, &its_nodes); | |
3403 | spin_unlock(&its_lock); | |
3404 | ||
3405 | return 0; | |
3406 | ||
4c21f3c2 MZ |
3407 | out_free_tables: |
3408 | its_free_tables(its); | |
3409 | out_free_cmd: | |
5bc13c2c | 3410 | free_pages((unsigned long)its->cmd_base, get_order(ITS_CMD_QUEUE_SZ)); |
4c21f3c2 MZ |
3411 | out_free_its: |
3412 | kfree(its); | |
3413 | out_unmap: | |
3414 | iounmap(its_base); | |
db40f0a7 | 3415 | pr_err("ITS@%pa: failed probing (%d)\n", &res->start, err); |
4c21f3c2 MZ |
3416 | return err; |
3417 | } | |
3418 | ||
3419 | static bool gic_rdists_supports_plpis(void) | |
3420 | { | |
589ce5f4 | 3421 | return !!(gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER) & GICR_TYPER_PLPIS); |
4c21f3c2 MZ |
3422 | } |
3423 | ||
6eb486b6 SD |
3424 | static int redist_disable_lpis(void) |
3425 | { | |
3426 | void __iomem *rbase = gic_data_rdist_rd_base(); | |
3427 | u64 timeout = USEC_PER_SEC; | |
3428 | u64 val; | |
3429 | ||
82f499c8 MZ |
3430 | /* |
3431 | * If coming via a CPU hotplug event, we don't need to disable | |
3432 | * LPIs before trying to re-enable them. They are already | |
3433 | * configured and all is well in the world. Detect this case | |
3434 | * by checking the allocation of the pending table for the | |
3435 | * current CPU. | |
3436 | */ | |
3437 | if (gic_data_rdist()->pend_page) | |
3438 | return 0; | |
3439 | ||
6eb486b6 SD |
3440 | if (!gic_rdists_supports_plpis()) { |
3441 | pr_info("CPU%d: LPIs not supported\n", smp_processor_id()); | |
3442 | return -ENXIO; | |
3443 | } | |
3444 | ||
3445 | val = readl_relaxed(rbase + GICR_CTLR); | |
3446 | if (!(val & GICR_CTLR_ENABLE_LPIS)) | |
3447 | return 0; | |
3448 | ||
3449 | pr_warn("CPU%d: Booted with LPIs enabled, memory probably corrupted\n", | |
3450 | smp_processor_id()); | |
3451 | add_taint(TAINT_CRAP, LOCKDEP_STILL_OK); | |
3452 | ||
3453 | /* Disable LPIs */ | |
3454 | val &= ~GICR_CTLR_ENABLE_LPIS; | |
3455 | writel_relaxed(val, rbase + GICR_CTLR); | |
3456 | ||
3457 | /* Make sure any change to GICR_CTLR is observable by the GIC */ | |
3458 | dsb(sy); | |
3459 | ||
3460 | /* | |
3461 | * Software must observe RWP==0 after clearing GICR_CTLR.EnableLPIs | |
3462 | * from 1 to 0 before programming GICR_PEND{PROP}BASER registers. | |
3463 | * Error out if we time out waiting for RWP to clear. | |
3464 | */ | |
3465 | while (readl_relaxed(rbase + GICR_CTLR) & GICR_CTLR_RWP) { | |
3466 | if (!timeout) { | |
3467 | pr_err("CPU%d: Timeout while disabling LPIs\n", | |
3468 | smp_processor_id()); | |
3469 | return -ETIMEDOUT; | |
3470 | } | |
3471 | udelay(1); | |
3472 | timeout--; | |
3473 | } | |
3474 | ||
3475 | /* | |
3476 | * After it has been written to 1, it is IMPLEMENTATION | |
3477 | * DEFINED whether GICR_CTLR.EnableLPI becomes RES1 or can be | |
3478 | * cleared to 0. Error out if clearing the bit failed. | |
3479 | */ | |
3480 | if (readl_relaxed(rbase + GICR_CTLR) & GICR_CTLR_ENABLE_LPIS) { | |
3481 | pr_err("CPU%d: Failed to disable LPIs\n", smp_processor_id()); | |
3482 | return -EBUSY; | |
3483 | } | |
3484 | ||
3485 | return 0; | |
3486 | } | |
3487 | ||
4c21f3c2 MZ |
3488 | int its_cpu_init(void) |
3489 | { | |
4c21f3c2 | 3490 | if (!list_empty(&its_nodes)) { |
6eb486b6 SD |
3491 | int ret; |
3492 | ||
3493 | ret = redist_disable_lpis(); | |
3494 | if (ret) | |
3495 | return ret; | |
3496 | ||
4c21f3c2 | 3497 | its_cpu_init_lpis(); |
920181ce | 3498 | its_cpu_init_collections(); |
4c21f3c2 MZ |
3499 | } |
3500 | ||
3501 | return 0; | |
3502 | } | |
3503 | ||
935bba7c | 3504 | static const struct of_device_id its_device_id[] = { |
4c21f3c2 MZ |
3505 | { .compatible = "arm,gic-v3-its", }, |
3506 | {}, | |
3507 | }; | |
3508 | ||
db40f0a7 | 3509 | static int __init its_of_probe(struct device_node *node) |
4c21f3c2 MZ |
3510 | { |
3511 | struct device_node *np; | |
db40f0a7 | 3512 | struct resource res; |
4c21f3c2 MZ |
3513 | |
3514 | for (np = of_find_matching_node(node, its_device_id); np; | |
3515 | np = of_find_matching_node(np, its_device_id)) { | |
95a25625 SB |
3516 | if (!of_device_is_available(np)) |
3517 | continue; | |
d14ae5e6 | 3518 | if (!of_property_read_bool(np, "msi-controller")) { |
e81f54c6 RH |
3519 | pr_warn("%pOF: no msi-controller property, ITS ignored\n", |
3520 | np); | |
d14ae5e6 TN |
3521 | continue; |
3522 | } | |
3523 | ||
db40f0a7 | 3524 | if (of_address_to_resource(np, 0, &res)) { |
e81f54c6 | 3525 | pr_warn("%pOF: no regs?\n", np); |
db40f0a7 TN |
3526 | continue; |
3527 | } | |
3528 | ||
3529 | its_probe_one(&res, &np->fwnode, of_node_to_nid(np)); | |
4c21f3c2 | 3530 | } |
db40f0a7 TN |
3531 | return 0; |
3532 | } | |
3533 | ||
3f010cf1 TN |
3534 | #ifdef CONFIG_ACPI |
3535 | ||
3536 | #define ACPI_GICV3_ITS_MEM_SIZE (SZ_128K) | |
3537 | ||
d1ce263f | 3538 | #ifdef CONFIG_ACPI_NUMA |
dbd2b826 GK |
3539 | struct its_srat_map { |
3540 | /* numa node id */ | |
3541 | u32 numa_node; | |
3542 | /* GIC ITS ID */ | |
3543 | u32 its_id; | |
3544 | }; | |
3545 | ||
fdf6e7a8 | 3546 | static struct its_srat_map *its_srat_maps __initdata; |
dbd2b826 GK |
3547 | static int its_in_srat __initdata; |
3548 | ||
3549 | static int __init acpi_get_its_numa_node(u32 its_id) | |
3550 | { | |
3551 | int i; | |
3552 | ||
3553 | for (i = 0; i < its_in_srat; i++) { | |
3554 | if (its_id == its_srat_maps[i].its_id) | |
3555 | return its_srat_maps[i].numa_node; | |
3556 | } | |
3557 | return NUMA_NO_NODE; | |
3558 | } | |
3559 | ||
fdf6e7a8 HG |
3560 | static int __init gic_acpi_match_srat_its(struct acpi_subtable_header *header, |
3561 | const unsigned long end) | |
3562 | { | |
3563 | return 0; | |
3564 | } | |
3565 | ||
dbd2b826 GK |
3566 | static int __init gic_acpi_parse_srat_its(struct acpi_subtable_header *header, |
3567 | const unsigned long end) | |
3568 | { | |
3569 | int node; | |
3570 | struct acpi_srat_gic_its_affinity *its_affinity; | |
3571 | ||
3572 | its_affinity = (struct acpi_srat_gic_its_affinity *)header; | |
3573 | if (!its_affinity) | |
3574 | return -EINVAL; | |
3575 | ||
3576 | if (its_affinity->header.length < sizeof(*its_affinity)) { | |
3577 | pr_err("SRAT: Invalid header length %d in ITS affinity\n", | |
3578 | its_affinity->header.length); | |
3579 | return -EINVAL; | |
3580 | } | |
3581 | ||
dbd2b826 GK |
3582 | node = acpi_map_pxm_to_node(its_affinity->proximity_domain); |
3583 | ||
3584 | if (node == NUMA_NO_NODE || node >= MAX_NUMNODES) { | |
3585 | pr_err("SRAT: Invalid NUMA node %d in ITS affinity\n", node); | |
3586 | return 0; | |
3587 | } | |
3588 | ||
3589 | its_srat_maps[its_in_srat].numa_node = node; | |
3590 | its_srat_maps[its_in_srat].its_id = its_affinity->its_id; | |
3591 | its_in_srat++; | |
3592 | pr_info("SRAT: PXM %d -> ITS %d -> Node %d\n", | |
3593 | its_affinity->proximity_domain, its_affinity->its_id, node); | |
3594 | ||
3595 | return 0; | |
3596 | } | |
3597 | ||
3598 | static void __init acpi_table_parse_srat_its(void) | |
3599 | { | |
fdf6e7a8 HG |
3600 | int count; |
3601 | ||
3602 | count = acpi_table_parse_entries(ACPI_SIG_SRAT, | |
3603 | sizeof(struct acpi_table_srat), | |
3604 | ACPI_SRAT_TYPE_GIC_ITS_AFFINITY, | |
3605 | gic_acpi_match_srat_its, 0); | |
3606 | if (count <= 0) | |
3607 | return; | |
3608 | ||
6da2ec56 KC |
3609 | its_srat_maps = kmalloc_array(count, sizeof(struct its_srat_map), |
3610 | GFP_KERNEL); | |
fdf6e7a8 HG |
3611 | if (!its_srat_maps) { |
3612 | pr_warn("SRAT: Failed to allocate memory for its_srat_maps!\n"); | |
3613 | return; | |
3614 | } | |
3615 | ||
dbd2b826 GK |
3616 | acpi_table_parse_entries(ACPI_SIG_SRAT, |
3617 | sizeof(struct acpi_table_srat), | |
3618 | ACPI_SRAT_TYPE_GIC_ITS_AFFINITY, | |
3619 | gic_acpi_parse_srat_its, 0); | |
3620 | } | |
fdf6e7a8 HG |
3621 | |
3622 | /* free the its_srat_maps after ITS probing */ | |
3623 | static void __init acpi_its_srat_maps_free(void) | |
3624 | { | |
3625 | kfree(its_srat_maps); | |
3626 | } | |
dbd2b826 GK |
3627 | #else |
3628 | static void __init acpi_table_parse_srat_its(void) { } | |
3629 | static int __init acpi_get_its_numa_node(u32 its_id) { return NUMA_NO_NODE; } | |
fdf6e7a8 | 3630 | static void __init acpi_its_srat_maps_free(void) { } |
dbd2b826 GK |
3631 | #endif |
3632 | ||
3f010cf1 TN |
3633 | static int __init gic_acpi_parse_madt_its(struct acpi_subtable_header *header, |
3634 | const unsigned long end) | |
3635 | { | |
3636 | struct acpi_madt_generic_translator *its_entry; | |
3637 | struct fwnode_handle *dom_handle; | |
3638 | struct resource res; | |
3639 | int err; | |
3640 | ||
3641 | its_entry = (struct acpi_madt_generic_translator *)header; | |
3642 | memset(&res, 0, sizeof(res)); | |
3643 | res.start = its_entry->base_address; | |
3644 | res.end = its_entry->base_address + ACPI_GICV3_ITS_MEM_SIZE - 1; | |
3645 | res.flags = IORESOURCE_MEM; | |
3646 | ||
3647 | dom_handle = irq_domain_alloc_fwnode((void *)its_entry->base_address); | |
3648 | if (!dom_handle) { | |
3649 | pr_err("ITS@%pa: Unable to allocate GICv3 ITS domain token\n", | |
3650 | &res.start); | |
3651 | return -ENOMEM; | |
3652 | } | |
3653 | ||
8b4282e6 SK |
3654 | err = iort_register_domain_token(its_entry->translation_id, res.start, |
3655 | dom_handle); | |
3f010cf1 TN |
3656 | if (err) { |
3657 | pr_err("ITS@%pa: Unable to register GICv3 ITS domain token (ITS ID %d) to IORT\n", | |
3658 | &res.start, its_entry->translation_id); | |
3659 | goto dom_err; | |
3660 | } | |
3661 | ||
dbd2b826 GK |
3662 | err = its_probe_one(&res, dom_handle, |
3663 | acpi_get_its_numa_node(its_entry->translation_id)); | |
3f010cf1 TN |
3664 | if (!err) |
3665 | return 0; | |
3666 | ||
3667 | iort_deregister_domain_token(its_entry->translation_id); | |
3668 | dom_err: | |
3669 | irq_domain_free_fwnode(dom_handle); | |
3670 | return err; | |
3671 | } | |
3672 | ||
3673 | static void __init its_acpi_probe(void) | |
3674 | { | |
dbd2b826 | 3675 | acpi_table_parse_srat_its(); |
3f010cf1 TN |
3676 | acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR, |
3677 | gic_acpi_parse_madt_its, 0); | |
fdf6e7a8 | 3678 | acpi_its_srat_maps_free(); |
3f010cf1 TN |
3679 | } |
3680 | #else | |
3681 | static void __init its_acpi_probe(void) { } | |
3682 | #endif | |
3683 | ||
db40f0a7 TN |
3684 | int __init its_init(struct fwnode_handle *handle, struct rdists *rdists, |
3685 | struct irq_domain *parent_domain) | |
3686 | { | |
3687 | struct device_node *of_node; | |
8fff27ae MZ |
3688 | struct its_node *its; |
3689 | bool has_v4 = false; | |
3690 | int err; | |
db40f0a7 TN |
3691 | |
3692 | its_parent = parent_domain; | |
3693 | of_node = to_of_node(handle); | |
3694 | if (of_node) | |
3695 | its_of_probe(of_node); | |
3696 | else | |
3f010cf1 | 3697 | its_acpi_probe(); |
4c21f3c2 MZ |
3698 | |
3699 | if (list_empty(&its_nodes)) { | |
3700 | pr_warn("ITS: No ITS available, not enabling LPIs\n"); | |
3701 | return -ENXIO; | |
3702 | } | |
3703 | ||
3704 | gic_rdists = rdists; | |
8fff27ae MZ |
3705 | err = its_alloc_lpi_tables(); |
3706 | if (err) | |
3707 | return err; | |
3708 | ||
3709 | list_for_each_entry(its, &its_nodes, entry) | |
3710 | has_v4 |= its->is_v4; | |
3711 | ||
3712 | if (has_v4 & rdists->has_vlpis) { | |
3d63cb53 MZ |
3713 | if (its_init_vpe_domain() || |
3714 | its_init_v4(parent_domain, &its_vpe_domain_ops)) { | |
8fff27ae MZ |
3715 | rdists->has_vlpis = false; |
3716 | pr_err("ITS: Disabling GICv4 support\n"); | |
3717 | } | |
3718 | } | |
3719 | ||
dba0bc7b DB |
3720 | register_syscore_ops(&its_syscore_ops); |
3721 | ||
8fff27ae | 3722 | return 0; |
4c21f3c2 | 3723 | } |