Commit | Line | Data |
---|---|---|
eac1e731 CLG |
1 | /* |
2 | * Copyright 2016,2017 IBM Corporation. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or | |
5 | * modify it under the terms of the GNU General Public License | |
6 | * as published by the Free Software Foundation; either version | |
7 | * 2 of the License, or (at your option) any later version. | |
8 | */ | |
9 | ||
10 | #define pr_fmt(fmt) "xive: " fmt | |
11 | ||
12 | #include <linux/types.h> | |
13 | #include <linux/irq.h> | |
14 | #include <linux/smp.h> | |
15 | #include <linux/interrupt.h> | |
16 | #include <linux/init.h> | |
17 | #include <linux/of.h> | |
18 | #include <linux/slab.h> | |
19 | #include <linux/spinlock.h> | |
20 | #include <linux/cpumask.h> | |
21 | #include <linux/mm.h> | |
22 | ||
23 | #include <asm/prom.h> | |
24 | #include <asm/io.h> | |
25 | #include <asm/smp.h> | |
26 | #include <asm/irq.h> | |
27 | #include <asm/errno.h> | |
28 | #include <asm/xive.h> | |
29 | #include <asm/xive-regs.h> | |
30 | #include <asm/hvcall.h> | |
31 | ||
32 | #include "xive-internal.h" | |
33 | ||
34 | static u32 xive_queue_shift; | |
35 | ||
36 | struct xive_irq_bitmap { | |
37 | unsigned long *bitmap; | |
38 | unsigned int base; | |
39 | unsigned int count; | |
40 | spinlock_t lock; | |
41 | struct list_head list; | |
42 | }; | |
43 | ||
44 | static LIST_HEAD(xive_irq_bitmaps); | |
45 | ||
46 | static int xive_irq_bitmap_add(int base, int count) | |
47 | { | |
48 | struct xive_irq_bitmap *xibm; | |
49 | ||
50 | xibm = kzalloc(sizeof(*xibm), GFP_ATOMIC); | |
51 | if (!xibm) | |
52 | return -ENOMEM; | |
53 | ||
54 | spin_lock_init(&xibm->lock); | |
55 | xibm->base = base; | |
56 | xibm->count = count; | |
57 | xibm->bitmap = kzalloc(xibm->count, GFP_KERNEL); | |
58 | list_add(&xibm->list, &xive_irq_bitmaps); | |
59 | ||
60 | pr_info("Using IRQ range [%x-%x]", xibm->base, | |
61 | xibm->base + xibm->count - 1); | |
62 | return 0; | |
63 | } | |
64 | ||
65 | static int __xive_irq_bitmap_alloc(struct xive_irq_bitmap *xibm) | |
66 | { | |
67 | int irq; | |
68 | ||
69 | irq = find_first_zero_bit(xibm->bitmap, xibm->count); | |
70 | if (irq != xibm->count) { | |
71 | set_bit(irq, xibm->bitmap); | |
72 | irq += xibm->base; | |
73 | } else { | |
74 | irq = -ENOMEM; | |
75 | } | |
76 | ||
77 | return irq; | |
78 | } | |
79 | ||
80 | static int xive_irq_bitmap_alloc(void) | |
81 | { | |
82 | struct xive_irq_bitmap *xibm; | |
83 | unsigned long flags; | |
84 | int irq = -ENOENT; | |
85 | ||
86 | list_for_each_entry(xibm, &xive_irq_bitmaps, list) { | |
87 | spin_lock_irqsave(&xibm->lock, flags); | |
88 | irq = __xive_irq_bitmap_alloc(xibm); | |
89 | spin_unlock_irqrestore(&xibm->lock, flags); | |
90 | if (irq >= 0) | |
91 | break; | |
92 | } | |
93 | return irq; | |
94 | } | |
95 | ||
96 | static void xive_irq_bitmap_free(int irq) | |
97 | { | |
98 | unsigned long flags; | |
99 | struct xive_irq_bitmap *xibm; | |
100 | ||
101 | list_for_each_entry(xibm, &xive_irq_bitmaps, list) { | |
102 | if ((irq >= xibm->base) && (irq < xibm->base + xibm->count)) { | |
103 | spin_lock_irqsave(&xibm->lock, flags); | |
104 | clear_bit(irq - xibm->base, xibm->bitmap); | |
105 | spin_unlock_irqrestore(&xibm->lock, flags); | |
106 | break; | |
107 | } | |
108 | } | |
109 | } | |
110 | ||
111 | static long plpar_int_get_source_info(unsigned long flags, | |
112 | unsigned long lisn, | |
113 | unsigned long *src_flags, | |
114 | unsigned long *eoi_page, | |
115 | unsigned long *trig_page, | |
116 | unsigned long *esb_shift) | |
117 | { | |
118 | unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; | |
119 | long rc; | |
120 | ||
121 | rc = plpar_hcall(H_INT_GET_SOURCE_INFO, retbuf, flags, lisn); | |
122 | if (rc) { | |
123 | pr_err("H_INT_GET_SOURCE_INFO lisn=%ld failed %ld\n", lisn, rc); | |
124 | return rc; | |
125 | } | |
126 | ||
127 | *src_flags = retbuf[0]; | |
128 | *eoi_page = retbuf[1]; | |
129 | *trig_page = retbuf[2]; | |
130 | *esb_shift = retbuf[3]; | |
131 | ||
132 | pr_devel("H_INT_GET_SOURCE_INFO flags=%lx eoi=%lx trig=%lx shift=%lx\n", | |
133 | retbuf[0], retbuf[1], retbuf[2], retbuf[3]); | |
134 | ||
135 | return 0; | |
136 | } | |
137 | ||
138 | #define XIVE_SRC_SET_EISN (1ull << (63 - 62)) | |
139 | #define XIVE_SRC_MASK (1ull << (63 - 63)) /* unused */ | |
140 | ||
141 | static long plpar_int_set_source_config(unsigned long flags, | |
142 | unsigned long lisn, | |
143 | unsigned long target, | |
144 | unsigned long prio, | |
145 | unsigned long sw_irq) | |
146 | { | |
147 | long rc; | |
148 | ||
149 | ||
150 | pr_devel("H_INT_SET_SOURCE_CONFIG flags=%lx lisn=%lx target=%lx prio=%lx sw_irq=%lx\n", | |
151 | flags, lisn, target, prio, sw_irq); | |
152 | ||
153 | ||
154 | rc = plpar_hcall_norets(H_INT_SET_SOURCE_CONFIG, flags, lisn, | |
155 | target, prio, sw_irq); | |
156 | if (rc) { | |
157 | pr_err("H_INT_SET_SOURCE_CONFIG lisn=%ld target=%lx prio=%lx failed %ld\n", | |
158 | lisn, target, prio, rc); | |
159 | return rc; | |
160 | } | |
161 | ||
162 | return 0; | |
163 | } | |
164 | ||
165 | static long plpar_int_get_queue_info(unsigned long flags, | |
166 | unsigned long target, | |
167 | unsigned long priority, | |
168 | unsigned long *esn_page, | |
169 | unsigned long *esn_size) | |
170 | { | |
171 | unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; | |
172 | long rc; | |
173 | ||
174 | rc = plpar_hcall(H_INT_GET_QUEUE_INFO, retbuf, flags, target, priority); | |
175 | if (rc) { | |
176 | pr_err("H_INT_GET_QUEUE_INFO cpu=%ld prio=%ld failed %ld\n", | |
177 | target, priority, rc); | |
178 | return rc; | |
179 | } | |
180 | ||
181 | *esn_page = retbuf[0]; | |
182 | *esn_size = retbuf[1]; | |
183 | ||
184 | pr_devel("H_INT_GET_QUEUE_INFO page=%lx size=%lx\n", | |
185 | retbuf[0], retbuf[1]); | |
186 | ||
187 | return 0; | |
188 | } | |
189 | ||
190 | #define XIVE_EQ_ALWAYS_NOTIFY (1ull << (63 - 63)) | |
191 | ||
192 | static long plpar_int_set_queue_config(unsigned long flags, | |
193 | unsigned long target, | |
194 | unsigned long priority, | |
195 | unsigned long qpage, | |
196 | unsigned long qsize) | |
197 | { | |
198 | long rc; | |
199 | ||
200 | pr_devel("H_INT_SET_QUEUE_CONFIG flags=%lx target=%lx priority=%lx qpage=%lx qsize=%lx\n", | |
201 | flags, target, priority, qpage, qsize); | |
202 | ||
203 | rc = plpar_hcall_norets(H_INT_SET_QUEUE_CONFIG, flags, target, | |
204 | priority, qpage, qsize); | |
205 | if (rc) { | |
206 | pr_err("H_INT_SET_QUEUE_CONFIG cpu=%ld prio=%ld qpage=%lx returned %ld\n", | |
207 | target, priority, qpage, rc); | |
208 | return rc; | |
209 | } | |
210 | ||
211 | return 0; | |
212 | } | |
213 | ||
214 | static long plpar_int_sync(unsigned long flags, unsigned long lisn) | |
215 | { | |
216 | long rc; | |
217 | ||
218 | rc = plpar_hcall_norets(H_INT_SYNC, flags, lisn); | |
219 | if (rc) { | |
220 | pr_err("H_INT_SYNC lisn=%ld returned %ld\n", lisn, rc); | |
221 | return rc; | |
222 | } | |
223 | ||
224 | return 0; | |
225 | } | |
226 | ||
bed81ee1 CLG |
227 | #define XIVE_ESB_FLAG_STORE (1ull << (63 - 63)) |
228 | ||
229 | static long plpar_int_esb(unsigned long flags, | |
230 | unsigned long lisn, | |
231 | unsigned long offset, | |
232 | unsigned long in_data, | |
233 | unsigned long *out_data) | |
234 | { | |
235 | unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; | |
236 | long rc; | |
237 | ||
238 | pr_devel("H_INT_ESB flags=%lx lisn=%lx offset=%lx in=%lx\n", | |
239 | flags, lisn, offset, in_data); | |
240 | ||
241 | rc = plpar_hcall(H_INT_ESB, retbuf, flags, lisn, offset, in_data); | |
242 | if (rc) { | |
243 | pr_err("H_INT_ESB lisn=%ld offset=%ld returned %ld\n", | |
244 | lisn, offset, rc); | |
245 | return rc; | |
246 | } | |
247 | ||
248 | *out_data = retbuf[0]; | |
249 | ||
250 | return 0; | |
251 | } | |
252 | ||
253 | static u64 xive_spapr_esb_rw(u32 lisn, u32 offset, u64 data, bool write) | |
254 | { | |
255 | unsigned long read_data; | |
256 | long rc; | |
257 | ||
258 | rc = plpar_int_esb(write ? XIVE_ESB_FLAG_STORE : 0, | |
259 | lisn, offset, data, &read_data); | |
260 | if (rc) | |
261 | return -1; | |
262 | ||
263 | return write ? 0 : read_data; | |
264 | } | |
265 | ||
266 | #define XIVE_SRC_H_INT_ESB (1ull << (63 - 60)) | |
eac1e731 CLG |
267 | #define XIVE_SRC_LSI (1ull << (63 - 61)) |
268 | #define XIVE_SRC_TRIGGER (1ull << (63 - 62)) | |
269 | #define XIVE_SRC_STORE_EOI (1ull << (63 - 63)) | |
270 | ||
271 | static int xive_spapr_populate_irq_data(u32 hw_irq, struct xive_irq_data *data) | |
272 | { | |
273 | long rc; | |
274 | unsigned long flags; | |
275 | unsigned long eoi_page; | |
276 | unsigned long trig_page; | |
277 | unsigned long esb_shift; | |
278 | ||
279 | memset(data, 0, sizeof(*data)); | |
280 | ||
281 | rc = plpar_int_get_source_info(0, hw_irq, &flags, &eoi_page, &trig_page, | |
282 | &esb_shift); | |
283 | if (rc) | |
284 | return -EINVAL; | |
285 | ||
bed81ee1 CLG |
286 | if (flags & XIVE_SRC_H_INT_ESB) |
287 | data->flags |= XIVE_IRQ_FLAG_H_INT_ESB; | |
eac1e731 CLG |
288 | if (flags & XIVE_SRC_STORE_EOI) |
289 | data->flags |= XIVE_IRQ_FLAG_STORE_EOI; | |
290 | if (flags & XIVE_SRC_LSI) | |
291 | data->flags |= XIVE_IRQ_FLAG_LSI; | |
292 | data->eoi_page = eoi_page; | |
293 | data->esb_shift = esb_shift; | |
294 | data->trig_page = trig_page; | |
295 | ||
296 | /* | |
297 | * No chip-id for the sPAPR backend. This has an impact how we | |
298 | * pick a target. See xive_pick_irq_target(). | |
299 | */ | |
300 | data->src_chip = XIVE_INVALID_CHIP_ID; | |
301 | ||
302 | data->eoi_mmio = ioremap(data->eoi_page, 1u << data->esb_shift); | |
303 | if (!data->eoi_mmio) { | |
304 | pr_err("Failed to map EOI page for irq 0x%x\n", hw_irq); | |
305 | return -ENOMEM; | |
306 | } | |
307 | ||
c58a14a9 CLG |
308 | data->hw_irq = hw_irq; |
309 | ||
eac1e731 CLG |
310 | /* Full function page supports trigger */ |
311 | if (flags & XIVE_SRC_TRIGGER) { | |
312 | data->trig_mmio = data->eoi_mmio; | |
313 | return 0; | |
314 | } | |
315 | ||
316 | data->trig_mmio = ioremap(data->trig_page, 1u << data->esb_shift); | |
317 | if (!data->trig_mmio) { | |
318 | pr_err("Failed to map trigger page for irq 0x%x\n", hw_irq); | |
319 | return -ENOMEM; | |
320 | } | |
321 | return 0; | |
322 | } | |
323 | ||
324 | static int xive_spapr_configure_irq(u32 hw_irq, u32 target, u8 prio, u32 sw_irq) | |
325 | { | |
326 | long rc; | |
327 | ||
328 | rc = plpar_int_set_source_config(XIVE_SRC_SET_EISN, hw_irq, target, | |
329 | prio, sw_irq); | |
330 | ||
331 | return rc == 0 ? 0 : -ENXIO; | |
332 | } | |
333 | ||
334 | /* This can be called multiple time to change a queue configuration */ | |
335 | static int xive_spapr_configure_queue(u32 target, struct xive_q *q, u8 prio, | |
336 | __be32 *qpage, u32 order) | |
337 | { | |
338 | s64 rc = 0; | |
339 | unsigned long esn_page; | |
340 | unsigned long esn_size; | |
341 | u64 flags, qpage_phys; | |
342 | ||
343 | /* If there's an actual queue page, clean it */ | |
344 | if (order) { | |
345 | if (WARN_ON(!qpage)) | |
346 | return -EINVAL; | |
347 | qpage_phys = __pa(qpage); | |
348 | } else { | |
349 | qpage_phys = 0; | |
350 | } | |
351 | ||
352 | /* Initialize the rest of the fields */ | |
353 | q->msk = order ? ((1u << (order - 2)) - 1) : 0; | |
354 | q->idx = 0; | |
355 | q->toggle = 0; | |
356 | ||
357 | rc = plpar_int_get_queue_info(0, target, prio, &esn_page, &esn_size); | |
358 | if (rc) { | |
359 | pr_err("Error %lld getting queue info prio %d\n", rc, prio); | |
360 | rc = -EIO; | |
361 | goto fail; | |
362 | } | |
363 | ||
364 | /* TODO: add support for the notification page */ | |
365 | q->eoi_phys = esn_page; | |
366 | ||
367 | /* Default is to always notify */ | |
368 | flags = XIVE_EQ_ALWAYS_NOTIFY; | |
369 | ||
370 | /* Configure and enable the queue in HW */ | |
371 | rc = plpar_int_set_queue_config(flags, target, prio, qpage_phys, order); | |
372 | if (rc) { | |
373 | pr_err("Error %lld setting queue for prio %d\n", rc, prio); | |
374 | rc = -EIO; | |
375 | } else { | |
376 | q->qpage = qpage; | |
377 | } | |
378 | fail: | |
379 | return rc; | |
380 | } | |
381 | ||
382 | static int xive_spapr_setup_queue(unsigned int cpu, struct xive_cpu *xc, | |
383 | u8 prio) | |
384 | { | |
385 | struct xive_q *q = &xc->queue[prio]; | |
386 | __be32 *qpage; | |
387 | ||
388 | qpage = xive_queue_page_alloc(cpu, xive_queue_shift); | |
389 | if (IS_ERR(qpage)) | |
390 | return PTR_ERR(qpage); | |
391 | ||
392 | return xive_spapr_configure_queue(cpu, q, prio, qpage, | |
393 | xive_queue_shift); | |
394 | } | |
395 | ||
396 | static void xive_spapr_cleanup_queue(unsigned int cpu, struct xive_cpu *xc, | |
397 | u8 prio) | |
398 | { | |
399 | struct xive_q *q = &xc->queue[prio]; | |
400 | unsigned int alloc_order; | |
401 | long rc; | |
402 | ||
403 | rc = plpar_int_set_queue_config(0, cpu, prio, 0, 0); | |
404 | if (rc) | |
405 | pr_err("Error %ld setting queue for prio %d\n", rc, prio); | |
406 | ||
407 | alloc_order = xive_alloc_order(xive_queue_shift); | |
408 | free_pages((unsigned long)q->qpage, alloc_order); | |
409 | q->qpage = NULL; | |
410 | } | |
411 | ||
412 | static bool xive_spapr_match(struct device_node *node) | |
413 | { | |
414 | /* Ignore cascaded controllers for the moment */ | |
415 | return 1; | |
416 | } | |
417 | ||
418 | #ifdef CONFIG_SMP | |
419 | static int xive_spapr_get_ipi(unsigned int cpu, struct xive_cpu *xc) | |
420 | { | |
421 | int irq = xive_irq_bitmap_alloc(); | |
422 | ||
423 | if (irq < 0) { | |
424 | pr_err("Failed to allocate IPI on CPU %d\n", cpu); | |
425 | return -ENXIO; | |
426 | } | |
427 | ||
428 | xc->hw_ipi = irq; | |
429 | return 0; | |
430 | } | |
431 | ||
432 | static void xive_spapr_put_ipi(unsigned int cpu, struct xive_cpu *xc) | |
433 | { | |
74f12821 CLG |
434 | if (!xc->hw_ipi) |
435 | return; | |
436 | ||
eac1e731 | 437 | xive_irq_bitmap_free(xc->hw_ipi); |
74f12821 | 438 | xc->hw_ipi = 0; |
eac1e731 CLG |
439 | } |
440 | #endif /* CONFIG_SMP */ | |
441 | ||
442 | static void xive_spapr_shutdown(void) | |
443 | { | |
444 | long rc; | |
445 | ||
446 | rc = plpar_hcall_norets(H_INT_RESET, 0); | |
447 | if (rc) | |
448 | pr_err("H_INT_RESET failed %ld\n", rc); | |
449 | } | |
450 | ||
451 | /* | |
452 | * Perform an "ack" cycle on the current thread. Grab the pending | |
453 | * active priorities and update the CPPR to the most favored one. | |
454 | */ | |
455 | static void xive_spapr_update_pending(struct xive_cpu *xc) | |
456 | { | |
457 | u8 nsr, cppr; | |
458 | u16 ack; | |
459 | ||
460 | /* | |
461 | * Perform the "Acknowledge O/S to Register" cycle. | |
462 | * | |
463 | * Let's speedup the access to the TIMA using the raw I/O | |
464 | * accessor as we don't need the synchronisation routine of | |
465 | * the higher level ones | |
466 | */ | |
467 | ack = be16_to_cpu(__raw_readw(xive_tima + TM_SPC_ACK_OS_REG)); | |
468 | ||
469 | /* Synchronize subsequent queue accesses */ | |
470 | mb(); | |
471 | ||
472 | /* | |
473 | * Grab the CPPR and the "NSR" field which indicates the source | |
474 | * of the interrupt (if any) | |
475 | */ | |
476 | cppr = ack & 0xff; | |
477 | nsr = ack >> 8; | |
478 | ||
479 | if (nsr & TM_QW1_NSR_EO) { | |
480 | if (cppr == 0xff) | |
481 | return; | |
482 | /* Mark the priority pending */ | |
483 | xc->pending_prio |= 1 << cppr; | |
484 | ||
485 | /* | |
486 | * A new interrupt should never have a CPPR less favored | |
487 | * than our current one. | |
488 | */ | |
489 | if (cppr >= xc->cppr) | |
490 | pr_err("CPU %d odd ack CPPR, got %d at %d\n", | |
491 | smp_processor_id(), cppr, xc->cppr); | |
492 | ||
493 | /* Update our idea of what the CPPR is */ | |
494 | xc->cppr = cppr; | |
495 | } | |
496 | } | |
497 | ||
498 | static void xive_spapr_eoi(u32 hw_irq) | |
499 | { | |
500 | /* Not used */; | |
501 | } | |
502 | ||
503 | static void xive_spapr_setup_cpu(unsigned int cpu, struct xive_cpu *xc) | |
504 | { | |
505 | /* Only some debug on the TIMA settings */ | |
506 | pr_debug("(HW value: %08x %08x %08x)\n", | |
507 | in_be32(xive_tima + TM_QW1_OS + TM_WORD0), | |
508 | in_be32(xive_tima + TM_QW1_OS + TM_WORD1), | |
509 | in_be32(xive_tima + TM_QW1_OS + TM_WORD2)); | |
510 | } | |
511 | ||
512 | static void xive_spapr_teardown_cpu(unsigned int cpu, struct xive_cpu *xc) | |
513 | { | |
514 | /* Nothing to do */; | |
515 | } | |
516 | ||
517 | static void xive_spapr_sync_source(u32 hw_irq) | |
518 | { | |
519 | /* Specs are unclear on what this is doing */ | |
520 | plpar_int_sync(0, hw_irq); | |
521 | } | |
522 | ||
523 | static const struct xive_ops xive_spapr_ops = { | |
524 | .populate_irq_data = xive_spapr_populate_irq_data, | |
525 | .configure_irq = xive_spapr_configure_irq, | |
526 | .setup_queue = xive_spapr_setup_queue, | |
527 | .cleanup_queue = xive_spapr_cleanup_queue, | |
528 | .match = xive_spapr_match, | |
529 | .shutdown = xive_spapr_shutdown, | |
530 | .update_pending = xive_spapr_update_pending, | |
531 | .eoi = xive_spapr_eoi, | |
532 | .setup_cpu = xive_spapr_setup_cpu, | |
533 | .teardown_cpu = xive_spapr_teardown_cpu, | |
534 | .sync_source = xive_spapr_sync_source, | |
bed81ee1 | 535 | .esb_rw = xive_spapr_esb_rw, |
eac1e731 CLG |
536 | #ifdef CONFIG_SMP |
537 | .get_ipi = xive_spapr_get_ipi, | |
538 | .put_ipi = xive_spapr_put_ipi, | |
539 | #endif /* CONFIG_SMP */ | |
540 | .name = "spapr", | |
541 | }; | |
542 | ||
543 | /* | |
544 | * get max priority from "/ibm,plat-res-int-priorities" | |
545 | */ | |
546 | static bool xive_get_max_prio(u8 *max_prio) | |
547 | { | |
548 | struct device_node *rootdn; | |
549 | const __be32 *reg; | |
550 | u32 len; | |
551 | int prio, found; | |
552 | ||
553 | rootdn = of_find_node_by_path("/"); | |
554 | if (!rootdn) { | |
555 | pr_err("not root node found !\n"); | |
556 | return false; | |
557 | } | |
558 | ||
559 | reg = of_get_property(rootdn, "ibm,plat-res-int-priorities", &len); | |
560 | if (!reg) { | |
561 | pr_err("Failed to read 'ibm,plat-res-int-priorities' property\n"); | |
562 | return false; | |
563 | } | |
564 | ||
565 | if (len % (2 * sizeof(u32)) != 0) { | |
566 | pr_err("invalid 'ibm,plat-res-int-priorities' property\n"); | |
567 | return false; | |
568 | } | |
569 | ||
570 | /* HW supports priorities in the range [0-7] and 0xFF is a | |
571 | * wildcard priority used to mask. We scan the ranges reserved | |
572 | * by the hypervisor to find the lowest priority we can use. | |
573 | */ | |
574 | found = 0xFF; | |
575 | for (prio = 0; prio < 8; prio++) { | |
576 | int reserved = 0; | |
577 | int i; | |
578 | ||
579 | for (i = 0; i < len / (2 * sizeof(u32)); i++) { | |
580 | int base = be32_to_cpu(reg[2 * i]); | |
581 | int range = be32_to_cpu(reg[2 * i + 1]); | |
582 | ||
583 | if (prio >= base && prio < base + range) | |
584 | reserved++; | |
585 | } | |
586 | ||
587 | if (!reserved) | |
588 | found = prio; | |
589 | } | |
590 | ||
591 | if (found == 0xFF) { | |
592 | pr_err("no valid priority found in 'ibm,plat-res-int-priorities'\n"); | |
593 | return false; | |
594 | } | |
595 | ||
596 | *max_prio = found; | |
597 | return true; | |
598 | } | |
599 | ||
265601f0 | 600 | bool __init xive_spapr_init(void) |
eac1e731 CLG |
601 | { |
602 | struct device_node *np; | |
603 | struct resource r; | |
604 | void __iomem *tima; | |
605 | struct property *prop; | |
606 | u8 max_prio; | |
607 | u32 val; | |
608 | u32 len; | |
609 | const __be32 *reg; | |
610 | int i; | |
611 | ||
612 | if (xive_cmdline_disabled) | |
613 | return false; | |
614 | ||
615 | pr_devel("%s()\n", __func__); | |
616 | np = of_find_compatible_node(NULL, NULL, "ibm,power-ivpe"); | |
617 | if (!np) { | |
618 | pr_devel("not found !\n"); | |
619 | return false; | |
620 | } | |
621 | pr_devel("Found %s\n", np->full_name); | |
622 | ||
623 | /* Resource 1 is the OS ring TIMA */ | |
624 | if (of_address_to_resource(np, 1, &r)) { | |
625 | pr_err("Failed to get thread mgmnt area resource\n"); | |
626 | return false; | |
627 | } | |
628 | tima = ioremap(r.start, resource_size(&r)); | |
629 | if (!tima) { | |
630 | pr_err("Failed to map thread mgmnt area\n"); | |
631 | return false; | |
632 | } | |
633 | ||
634 | if (!xive_get_max_prio(&max_prio)) | |
635 | return false; | |
636 | ||
637 | /* Feed the IRQ number allocator with the ranges given in the DT */ | |
638 | reg = of_get_property(np, "ibm,xive-lisn-ranges", &len); | |
639 | if (!reg) { | |
640 | pr_err("Failed to read 'ibm,xive-lisn-ranges' property\n"); | |
641 | return false; | |
642 | } | |
643 | ||
644 | if (len % (2 * sizeof(u32)) != 0) { | |
645 | pr_err("invalid 'ibm,xive-lisn-ranges' property\n"); | |
646 | return false; | |
647 | } | |
648 | ||
649 | for (i = 0; i < len / (2 * sizeof(u32)); i++, reg += 2) | |
650 | xive_irq_bitmap_add(be32_to_cpu(reg[0]), | |
651 | be32_to_cpu(reg[1])); | |
652 | ||
653 | /* Iterate the EQ sizes and pick one */ | |
654 | of_property_for_each_u32(np, "ibm,xive-eq-sizes", prop, reg, val) { | |
655 | xive_queue_shift = val; | |
656 | if (val == PAGE_SHIFT) | |
657 | break; | |
658 | } | |
659 | ||
660 | /* Initialize XIVE core with our backend */ | |
661 | if (!xive_core_init(&xive_spapr_ops, tima, TM_QW1_OS, max_prio)) | |
662 | return false; | |
663 | ||
664 | pr_info("Using %dkB queues\n", 1 << (xive_queue_shift - 10)); | |
665 | return true; | |
666 | } |