Commit | Line | Data |
---|---|---|
67207b96 AB |
1 | /* |
2 | * Low-level SPU handling | |
3 | * | |
4 | * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 | |
5 | * | |
6 | * Author: Arnd Bergmann <arndb@de.ibm.com> | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify | |
9 | * it under the terms of the GNU General Public License as published by | |
10 | * the Free Software Foundation; either version 2, or (at your option) | |
11 | * any later version. | |
12 | * | |
13 | * This program is distributed in the hope that it will be useful, | |
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
16 | * GNU General Public License for more details. | |
17 | * | |
18 | * You should have received a copy of the GNU General Public License | |
19 | * along with this program; if not, write to the Free Software | |
20 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | |
21 | */ | |
22 | ||
3b3d22cb | 23 | #undef DEBUG |
67207b96 AB |
24 | |
25 | #include <linux/interrupt.h> | |
26 | #include <linux/list.h> | |
27 | #include <linux/module.h> | |
67207b96 AB |
28 | #include <linux/ptrace.h> |
29 | #include <linux/slab.h> | |
30 | #include <linux/wait.h> | |
e28b0031 GL |
31 | #include <linux/mm.h> |
32 | #include <linux/io.h> | |
14cc3e2b | 33 | #include <linux/mutex.h> |
bce94513 | 34 | #include <linux/linux_logo.h> |
67207b96 | 35 | #include <asm/spu.h> |
540270d8 | 36 | #include <asm/spu_priv1.h> |
ff8a8f25 | 37 | #include <asm/xmon.h> |
3ad216ca AB |
38 | #include <asm/prom.h> |
39 | #include "spu_priv1_mmio.h" | |
67207b96 | 40 | |
e28b0031 | 41 | const struct spu_management_ops *spu_management_ops; |
ccf17e9d JK |
42 | EXPORT_SYMBOL_GPL(spu_management_ops); |
43 | ||
540270d8 | 44 | const struct spu_priv1_ops *spu_priv1_ops; |
24140594 | 45 | EXPORT_SYMBOL_GPL(spu_priv1_ops); |
540270d8 | 46 | |
24140594 CH |
47 | struct cbe_spu_info cbe_spu_info[MAX_NUMNODES]; |
48 | EXPORT_SYMBOL_GPL(cbe_spu_info); | |
94b2a439 | 49 | |
24140594 CH |
50 | /* |
51 | * Protects cbe_spu_info and spu->number. | |
52 | */ | |
53 | static DEFINE_SPINLOCK(spu_lock); | |
54 | ||
55 | /* | |
56 | * List of all spus in the system. | |
57 | * | |
58 | * This list is iterated by callers from irq context and callers that | |
59 | * want to sleep. Thus modifications need to be done with both | |
60 | * spu_full_list_lock and spu_full_list_mutex held, while iterating | |
61 | * through it requires either of these locks. | |
62 | * | |
63 | * In addition spu_full_list_lock protects all assignmens to | |
64 | * spu->mm. | |
65 | */ | |
66 | static LIST_HEAD(spu_full_list); | |
67 | static DEFINE_SPINLOCK(spu_full_list_lock); | |
68 | static DEFINE_MUTEX(spu_full_list_mutex); | |
540270d8 | 69 | |
94b2a439 BH |
70 | void spu_invalidate_slbs(struct spu *spu) |
71 | { | |
72 | struct spu_priv2 __iomem *priv2 = spu->priv2; | |
73 | ||
74 | if (spu_mfc_sr1_get(spu) & MFC_STATE1_RELOCATE_MASK) | |
75 | out_be64(&priv2->slb_invalidate_all_W, 0UL); | |
76 | } | |
77 | EXPORT_SYMBOL_GPL(spu_invalidate_slbs); | |
78 | ||
79 | /* This is called by the MM core when a segment size is changed, to | |
80 | * request a flush of all the SPEs using a given mm | |
81 | */ | |
82 | void spu_flush_all_slbs(struct mm_struct *mm) | |
83 | { | |
84 | struct spu *spu; | |
85 | unsigned long flags; | |
86 | ||
24140594 | 87 | spin_lock_irqsave(&spu_full_list_lock, flags); |
94b2a439 BH |
88 | list_for_each_entry(spu, &spu_full_list, full_list) { |
89 | if (spu->mm == mm) | |
90 | spu_invalidate_slbs(spu); | |
91 | } | |
24140594 | 92 | spin_unlock_irqrestore(&spu_full_list_lock, flags); |
94b2a439 BH |
93 | } |
94 | ||
95 | /* The hack below stinks... try to do something better one of | |
96 | * these days... Does it even work properly with NR_CPUS == 1 ? | |
97 | */ | |
98 | static inline void mm_needs_global_tlbie(struct mm_struct *mm) | |
99 | { | |
100 | int nr = (NR_CPUS > 1) ? NR_CPUS : NR_CPUS + 1; | |
101 | ||
102 | /* Global TLBIE broadcast required with SPEs. */ | |
103 | __cpus_setall(&mm->cpu_vm_mask, nr); | |
104 | } | |
105 | ||
106 | void spu_associate_mm(struct spu *spu, struct mm_struct *mm) | |
107 | { | |
108 | unsigned long flags; | |
109 | ||
24140594 | 110 | spin_lock_irqsave(&spu_full_list_lock, flags); |
94b2a439 | 111 | spu->mm = mm; |
24140594 | 112 | spin_unlock_irqrestore(&spu_full_list_lock, flags); |
94b2a439 BH |
113 | if (mm) |
114 | mm_needs_global_tlbie(mm); | |
115 | } | |
116 | EXPORT_SYMBOL_GPL(spu_associate_mm); | |
117 | ||
67207b96 AB |
118 | static int __spu_trap_invalid_dma(struct spu *spu) |
119 | { | |
120 | pr_debug("%s\n", __FUNCTION__); | |
9add11da | 121 | spu->dma_callback(spu, SPE_EVENT_INVALID_DMA); |
67207b96 AB |
122 | return 0; |
123 | } | |
124 | ||
125 | static int __spu_trap_dma_align(struct spu *spu) | |
126 | { | |
127 | pr_debug("%s\n", __FUNCTION__); | |
9add11da | 128 | spu->dma_callback(spu, SPE_EVENT_DMA_ALIGNMENT); |
67207b96 AB |
129 | return 0; |
130 | } | |
131 | ||
132 | static int __spu_trap_error(struct spu *spu) | |
133 | { | |
134 | pr_debug("%s\n", __FUNCTION__); | |
9add11da | 135 | spu->dma_callback(spu, SPE_EVENT_SPE_ERROR); |
67207b96 AB |
136 | return 0; |
137 | } | |
138 | ||
139 | static void spu_restart_dma(struct spu *spu) | |
140 | { | |
141 | struct spu_priv2 __iomem *priv2 = spu->priv2; | |
5473af04 | 142 | |
8837d921 | 143 | if (!test_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags)) |
5473af04 | 144 | out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESTART_DMA_COMMAND); |
67207b96 AB |
145 | } |
146 | ||
147 | static int __spu_trap_data_seg(struct spu *spu, unsigned long ea) | |
148 | { | |
8b3d6663 AB |
149 | struct spu_priv2 __iomem *priv2 = spu->priv2; |
150 | struct mm_struct *mm = spu->mm; | |
724bd80e | 151 | u64 esid, vsid, llp; |
94b2a439 | 152 | int psize; |
67207b96 AB |
153 | |
154 | pr_debug("%s\n", __FUNCTION__); | |
155 | ||
8837d921 | 156 | if (test_bit(SPU_CONTEXT_SWITCH_ACTIVE, &spu->flags)) { |
8b3d6663 AB |
157 | /* SLBs are pre-loaded for context switch, so |
158 | * we should never get here! | |
159 | */ | |
5473af04 MN |
160 | printk("%s: invalid access during switch!\n", __func__); |
161 | return 1; | |
162 | } | |
0afacde3 | 163 | esid = (ea & ESID_MASK) | SLB_ESID_V; |
164 | ||
165 | switch(REGION_ID(ea)) { | |
166 | case USER_REGION_ID: | |
d0f13e3c BH |
167 | #ifdef CONFIG_PPC_MM_SLICES |
168 | psize = get_slice_psize(mm, ea); | |
169 | #else | |
170 | psize = mm->context.user_psize; | |
0afacde3 | 171 | #endif |
0afacde3 | 172 | vsid = (get_vsid(mm->context.id, ea) << SLB_VSID_SHIFT) | |
94b2a439 | 173 | SLB_VSID_USER; |
0afacde3 | 174 | break; |
175 | case VMALLOC_REGION_ID: | |
94b2a439 BH |
176 | if (ea < VMALLOC_END) |
177 | psize = mmu_vmalloc_psize; | |
178 | else | |
179 | psize = mmu_io_psize; | |
0afacde3 | 180 | vsid = (get_kernel_vsid(ea) << SLB_VSID_SHIFT) | |
94b2a439 | 181 | SLB_VSID_KERNEL; |
0afacde3 | 182 | break; |
183 | case KERNEL_REGION_ID: | |
94b2a439 | 184 | psize = mmu_linear_psize; |
0afacde3 | 185 | vsid = (get_kernel_vsid(ea) << SLB_VSID_SHIFT) | |
94b2a439 | 186 | SLB_VSID_KERNEL; |
0afacde3 | 187 | break; |
188 | default: | |
8b3d6663 AB |
189 | /* Future: support kernel segments so that drivers |
190 | * can use SPUs. | |
191 | */ | |
67207b96 AB |
192 | pr_debug("invalid region access at %016lx\n", ea); |
193 | return 1; | |
194 | } | |
94b2a439 | 195 | llp = mmu_psize_defs[psize].sllp; |
67207b96 | 196 | |
8b3d6663 | 197 | out_be64(&priv2->slb_index_W, spu->slb_replace); |
94b2a439 | 198 | out_be64(&priv2->slb_vsid_RW, vsid | llp); |
8b3d6663 AB |
199 | out_be64(&priv2->slb_esid_RW, esid); |
200 | ||
201 | spu->slb_replace++; | |
67207b96 AB |
202 | if (spu->slb_replace >= 8) |
203 | spu->slb_replace = 0; | |
204 | ||
67207b96 | 205 | spu_restart_dma(spu); |
e9f8a0b6 | 206 | spu->stats.slb_flt++; |
67207b96 AB |
207 | return 0; |
208 | } | |
209 | ||
5473af04 | 210 | extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap); //XXX |
8b3d6663 | 211 | static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr) |
67207b96 | 212 | { |
a33a7d73 | 213 | pr_debug("%s, %lx, %lx\n", __FUNCTION__, dsisr, ea); |
67207b96 | 214 | |
5473af04 MN |
215 | /* Handle kernel space hash faults immediately. |
216 | User hash faults need to be deferred to process context. */ | |
217 | if ((dsisr & MFC_DSISR_PTE_NOT_FOUND) | |
218 | && REGION_ID(ea) != USER_REGION_ID | |
219 | && hash_page(ea, _PAGE_PRESENT, 0x300) == 0) { | |
220 | spu_restart_dma(spu); | |
221 | return 0; | |
222 | } | |
223 | ||
8837d921 | 224 | if (test_bit(SPU_CONTEXT_SWITCH_ACTIVE, &spu->flags)) { |
5473af04 MN |
225 | printk("%s: invalid access during switch!\n", __func__); |
226 | return 1; | |
227 | } | |
67207b96 | 228 | |
8b3d6663 AB |
229 | spu->dar = ea; |
230 | spu->dsisr = dsisr; | |
231 | mb(); | |
ba723fe2 | 232 | spu->stop_callback(spu); |
67207b96 AB |
233 | return 0; |
234 | } | |
235 | ||
236 | static irqreturn_t | |
f5a92458 | 237 | spu_irq_class_0(int irq, void *data) |
67207b96 AB |
238 | { |
239 | struct spu *spu; | |
240 | ||
241 | spu = data; | |
242 | spu->class_0_pending = 1; | |
ba723fe2 | 243 | spu->stop_callback(spu); |
67207b96 AB |
244 | |
245 | return IRQ_HANDLED; | |
246 | } | |
247 | ||
5110459f | 248 | int |
67207b96 AB |
249 | spu_irq_class_0_bottom(struct spu *spu) |
250 | { | |
3a843d7c | 251 | unsigned long stat, mask; |
3650cfe2 | 252 | unsigned long flags; |
67207b96 AB |
253 | |
254 | spu->class_0_pending = 0; | |
255 | ||
3650cfe2 | 256 | spin_lock_irqsave(&spu->register_lock, flags); |
f0831acc AB |
257 | mask = spu_int_mask_get(spu, 0); |
258 | stat = spu_int_stat_get(spu, 0); | |
67207b96 | 259 | |
3a843d7c AB |
260 | stat &= mask; |
261 | ||
2cd90bc8 | 262 | if (stat & 1) /* invalid DMA alignment */ |
67207b96 AB |
263 | __spu_trap_dma_align(spu); |
264 | ||
2cd90bc8 AB |
265 | if (stat & 2) /* invalid MFC DMA */ |
266 | __spu_trap_invalid_dma(spu); | |
267 | ||
67207b96 AB |
268 | if (stat & 4) /* error on SPU */ |
269 | __spu_trap_error(spu); | |
270 | ||
f0831acc | 271 | spu_int_stat_clear(spu, 0, stat); |
3650cfe2 | 272 | spin_unlock_irqrestore(&spu->register_lock, flags); |
5110459f AB |
273 | |
274 | return (stat & 0x7) ? -EIO : 0; | |
67207b96 | 275 | } |
5110459f | 276 | EXPORT_SYMBOL_GPL(spu_irq_class_0_bottom); |
67207b96 AB |
277 | |
278 | static irqreturn_t | |
f5a92458 | 279 | spu_irq_class_1(int irq, void *data) |
67207b96 AB |
280 | { |
281 | struct spu *spu; | |
8b3d6663 | 282 | unsigned long stat, mask, dar, dsisr; |
67207b96 AB |
283 | |
284 | spu = data; | |
8b3d6663 AB |
285 | |
286 | /* atomically read & clear class1 status. */ | |
287 | spin_lock(&spu->register_lock); | |
f0831acc AB |
288 | mask = spu_int_mask_get(spu, 1); |
289 | stat = spu_int_stat_get(spu, 1) & mask; | |
290 | dar = spu_mfc_dar_get(spu); | |
291 | dsisr = spu_mfc_dsisr_get(spu); | |
38307341 | 292 | if (stat & 2) /* mapping fault */ |
f0831acc AB |
293 | spu_mfc_dsisr_set(spu, 0ul); |
294 | spu_int_stat_clear(spu, 1, stat); | |
8b3d6663 | 295 | spin_unlock(&spu->register_lock); |
a33a7d73 AB |
296 | pr_debug("%s: %lx %lx %lx %lx\n", __FUNCTION__, mask, stat, |
297 | dar, dsisr); | |
67207b96 AB |
298 | |
299 | if (stat & 1) /* segment fault */ | |
300 | __spu_trap_data_seg(spu, dar); | |
301 | ||
302 | if (stat & 2) { /* mapping fault */ | |
8b3d6663 | 303 | __spu_trap_data_map(spu, dar, dsisr); |
67207b96 AB |
304 | } |
305 | ||
306 | if (stat & 4) /* ls compare & suspend on get */ | |
307 | ; | |
308 | ||
309 | if (stat & 8) /* ls compare & suspend on put */ | |
310 | ; | |
311 | ||
67207b96 AB |
312 | return stat ? IRQ_HANDLED : IRQ_NONE; |
313 | } | |
314 | ||
315 | static irqreturn_t | |
f5a92458 | 316 | spu_irq_class_2(int irq, void *data) |
67207b96 AB |
317 | { |
318 | struct spu *spu; | |
319 | unsigned long stat; | |
3a843d7c | 320 | unsigned long mask; |
67207b96 AB |
321 | |
322 | spu = data; | |
ba723fe2 | 323 | spin_lock(&spu->register_lock); |
f0831acc AB |
324 | stat = spu_int_stat_get(spu, 2); |
325 | mask = spu_int_mask_get(spu, 2); | |
ba723fe2 MN |
326 | /* ignore interrupts we're not waiting for */ |
327 | stat &= mask; | |
328 | /* | |
329 | * mailbox interrupts (0x1 and 0x10) are level triggered. | |
330 | * mask them now before acknowledging. | |
331 | */ | |
332 | if (stat & 0x11) | |
333 | spu_int_mask_and(spu, 2, ~(stat & 0x11)); | |
334 | /* acknowledge all interrupts before the callbacks */ | |
335 | spu_int_stat_clear(spu, 2, stat); | |
336 | spin_unlock(&spu->register_lock); | |
67207b96 | 337 | |
3a843d7c | 338 | pr_debug("class 2 interrupt %d, %lx, %lx\n", irq, stat, mask); |
67207b96 | 339 | |
67207b96 | 340 | if (stat & 1) /* PPC core mailbox */ |
ba723fe2 | 341 | spu->ibox_callback(spu); |
67207b96 AB |
342 | |
343 | if (stat & 2) /* SPU stop-and-signal */ | |
ba723fe2 | 344 | spu->stop_callback(spu); |
67207b96 AB |
345 | |
346 | if (stat & 4) /* SPU halted */ | |
ba723fe2 | 347 | spu->stop_callback(spu); |
67207b96 AB |
348 | |
349 | if (stat & 8) /* DMA tag group complete */ | |
ba723fe2 | 350 | spu->mfc_callback(spu); |
67207b96 AB |
351 | |
352 | if (stat & 0x10) /* SPU mailbox threshold */ | |
ba723fe2 | 353 | spu->wbox_callback(spu); |
67207b96 | 354 | |
e9f8a0b6 | 355 | spu->stats.class2_intr++; |
67207b96 AB |
356 | return stat ? IRQ_HANDLED : IRQ_NONE; |
357 | } | |
358 | ||
0ebfff14 | 359 | static int spu_request_irqs(struct spu *spu) |
67207b96 | 360 | { |
0ebfff14 | 361 | int ret = 0; |
67207b96 | 362 | |
0ebfff14 BH |
363 | if (spu->irqs[0] != NO_IRQ) { |
364 | snprintf(spu->irq_c0, sizeof (spu->irq_c0), "spe%02d.0", | |
365 | spu->number); | |
366 | ret = request_irq(spu->irqs[0], spu_irq_class_0, | |
367 | IRQF_DISABLED, | |
368 | spu->irq_c0, spu); | |
369 | if (ret) | |
370 | goto bail0; | |
371 | } | |
372 | if (spu->irqs[1] != NO_IRQ) { | |
373 | snprintf(spu->irq_c1, sizeof (spu->irq_c1), "spe%02d.1", | |
374 | spu->number); | |
375 | ret = request_irq(spu->irqs[1], spu_irq_class_1, | |
376 | IRQF_DISABLED, | |
377 | spu->irq_c1, spu); | |
378 | if (ret) | |
379 | goto bail1; | |
380 | } | |
381 | if (spu->irqs[2] != NO_IRQ) { | |
382 | snprintf(spu->irq_c2, sizeof (spu->irq_c2), "spe%02d.2", | |
383 | spu->number); | |
384 | ret = request_irq(spu->irqs[2], spu_irq_class_2, | |
385 | IRQF_DISABLED, | |
386 | spu->irq_c2, spu); | |
387 | if (ret) | |
388 | goto bail2; | |
389 | } | |
390 | return 0; | |
67207b96 | 391 | |
0ebfff14 BH |
392 | bail2: |
393 | if (spu->irqs[1] != NO_IRQ) | |
394 | free_irq(spu->irqs[1], spu); | |
395 | bail1: | |
396 | if (spu->irqs[0] != NO_IRQ) | |
397 | free_irq(spu->irqs[0], spu); | |
398 | bail0: | |
67207b96 AB |
399 | return ret; |
400 | } | |
401 | ||
0ebfff14 | 402 | static void spu_free_irqs(struct spu *spu) |
67207b96 | 403 | { |
0ebfff14 BH |
404 | if (spu->irqs[0] != NO_IRQ) |
405 | free_irq(spu->irqs[0], spu); | |
406 | if (spu->irqs[1] != NO_IRQ) | |
407 | free_irq(spu->irqs[1], spu); | |
408 | if (spu->irqs[2] != NO_IRQ) | |
409 | free_irq(spu->irqs[2], spu); | |
67207b96 AB |
410 | } |
411 | ||
67207b96 AB |
412 | static void spu_init_channels(struct spu *spu) |
413 | { | |
414 | static const struct { | |
415 | unsigned channel; | |
416 | unsigned count; | |
417 | } zero_list[] = { | |
418 | { 0x00, 1, }, { 0x01, 1, }, { 0x03, 1, }, { 0x04, 1, }, | |
419 | { 0x18, 1, }, { 0x19, 1, }, { 0x1b, 1, }, { 0x1d, 1, }, | |
420 | }, count_list[] = { | |
421 | { 0x00, 0, }, { 0x03, 0, }, { 0x04, 0, }, { 0x15, 16, }, | |
422 | { 0x17, 1, }, { 0x18, 0, }, { 0x19, 0, }, { 0x1b, 0, }, | |
423 | { 0x1c, 1, }, { 0x1d, 0, }, { 0x1e, 1, }, | |
424 | }; | |
6ff730c3 | 425 | struct spu_priv2 __iomem *priv2; |
67207b96 AB |
426 | int i; |
427 | ||
428 | priv2 = spu->priv2; | |
429 | ||
430 | /* initialize all channel data to zero */ | |
431 | for (i = 0; i < ARRAY_SIZE(zero_list); i++) { | |
432 | int count; | |
433 | ||
434 | out_be64(&priv2->spu_chnlcntptr_RW, zero_list[i].channel); | |
435 | for (count = 0; count < zero_list[i].count; count++) | |
436 | out_be64(&priv2->spu_chnldata_RW, 0); | |
437 | } | |
438 | ||
439 | /* initialize channel counts to meaningful values */ | |
440 | for (i = 0; i < ARRAY_SIZE(count_list); i++) { | |
441 | out_be64(&priv2->spu_chnlcntptr_RW, count_list[i].channel); | |
442 | out_be64(&priv2->spu_chnlcnt_RW, count_list[i].count); | |
443 | } | |
444 | } | |
445 | ||
cbc23d3e AB |
446 | struct spu *spu_alloc_spu(struct spu *req_spu) |
447 | { | |
448 | struct spu *spu, *ret = NULL; | |
449 | ||
24140594 | 450 | spin_lock(&spu_lock); |
cbc23d3e AB |
451 | list_for_each_entry(spu, &cbe_spu_info[req_spu->node].free_spus, list) { |
452 | if (spu == req_spu) { | |
453 | list_del_init(&spu->list); | |
454 | pr_debug("Got SPU %d %d\n", spu->number, spu->node); | |
455 | spu_init_channels(spu); | |
456 | ret = spu; | |
457 | break; | |
458 | } | |
459 | } | |
24140594 | 460 | spin_unlock(&spu_lock); |
cbc23d3e AB |
461 | return ret; |
462 | } | |
463 | EXPORT_SYMBOL_GPL(spu_alloc_spu); | |
464 | ||
a68cf983 | 465 | struct spu *spu_alloc_node(int node) |
67207b96 | 466 | { |
a68cf983 | 467 | struct spu *spu = NULL; |
67207b96 | 468 | |
24140594 | 469 | spin_lock(&spu_lock); |
aa6d5b20 AB |
470 | if (!list_empty(&cbe_spu_info[node].free_spus)) { |
471 | spu = list_entry(cbe_spu_info[node].free_spus.next, struct spu, | |
472 | list); | |
67207b96 | 473 | list_del_init(&spu->list); |
cc21a66d | 474 | pr_debug("Got SPU %d %d\n", spu->number, spu->node); |
67207b96 | 475 | } |
24140594 | 476 | spin_unlock(&spu_lock); |
67207b96 | 477 | |
62c05d58 CH |
478 | if (spu) |
479 | spu_init_channels(spu); | |
a68cf983 MN |
480 | return spu; |
481 | } | |
482 | EXPORT_SYMBOL_GPL(spu_alloc_node); | |
483 | ||
484 | struct spu *spu_alloc(void) | |
485 | { | |
486 | struct spu *spu = NULL; | |
487 | int node; | |
488 | ||
489 | for (node = 0; node < MAX_NUMNODES; node++) { | |
490 | spu = spu_alloc_node(node); | |
491 | if (spu) | |
492 | break; | |
493 | } | |
67207b96 AB |
494 | |
495 | return spu; | |
496 | } | |
67207b96 AB |
497 | |
498 | void spu_free(struct spu *spu) | |
499 | { | |
24140594 | 500 | spin_lock(&spu_lock); |
aa6d5b20 | 501 | list_add_tail(&spu->list, &cbe_spu_info[spu->node].free_spus); |
24140594 | 502 | spin_unlock(&spu_lock); |
67207b96 | 503 | } |
39c73c33 | 504 | EXPORT_SYMBOL_GPL(spu_free); |
67207b96 | 505 | |
6deac066 GL |
506 | static int spu_shutdown(struct sys_device *sysdev) |
507 | { | |
508 | struct spu *spu = container_of(sysdev, struct spu, sysdev); | |
509 | ||
510 | spu_free_irqs(spu); | |
511 | spu_destroy_spu(spu); | |
512 | return 0; | |
513 | } | |
514 | ||
1d64093f | 515 | struct sysdev_class spu_sysdev_class = { |
6deac066 GL |
516 | set_kset_name("spu"), |
517 | .shutdown = spu_shutdown, | |
1d64093f JK |
518 | }; |
519 | ||
e570beb6 CK |
520 | int spu_add_sysdev_attr(struct sysdev_attribute *attr) |
521 | { | |
522 | struct spu *spu; | |
e570beb6 | 523 | |
24140594 | 524 | mutex_lock(&spu_full_list_mutex); |
e570beb6 CK |
525 | list_for_each_entry(spu, &spu_full_list, full_list) |
526 | sysdev_create_file(&spu->sysdev, attr); | |
24140594 | 527 | mutex_unlock(&spu_full_list_mutex); |
e570beb6 | 528 | |
e570beb6 CK |
529 | return 0; |
530 | } | |
531 | EXPORT_SYMBOL_GPL(spu_add_sysdev_attr); | |
532 | ||
533 | int spu_add_sysdev_attr_group(struct attribute_group *attrs) | |
534 | { | |
535 | struct spu *spu; | |
e570beb6 | 536 | |
24140594 | 537 | mutex_lock(&spu_full_list_mutex); |
e570beb6 CK |
538 | list_for_each_entry(spu, &spu_full_list, full_list) |
539 | sysfs_create_group(&spu->sysdev.kobj, attrs); | |
24140594 | 540 | mutex_unlock(&spu_full_list_mutex); |
e570beb6 | 541 | |
e570beb6 CK |
542 | return 0; |
543 | } | |
544 | EXPORT_SYMBOL_GPL(spu_add_sysdev_attr_group); | |
545 | ||
546 | ||
547 | void spu_remove_sysdev_attr(struct sysdev_attribute *attr) | |
548 | { | |
549 | struct spu *spu; | |
e570beb6 | 550 | |
24140594 | 551 | mutex_lock(&spu_full_list_mutex); |
e570beb6 CK |
552 | list_for_each_entry(spu, &spu_full_list, full_list) |
553 | sysdev_remove_file(&spu->sysdev, attr); | |
24140594 | 554 | mutex_unlock(&spu_full_list_mutex); |
e570beb6 CK |
555 | } |
556 | EXPORT_SYMBOL_GPL(spu_remove_sysdev_attr); | |
557 | ||
558 | void spu_remove_sysdev_attr_group(struct attribute_group *attrs) | |
559 | { | |
560 | struct spu *spu; | |
e570beb6 | 561 | |
24140594 | 562 | mutex_lock(&spu_full_list_mutex); |
e570beb6 CK |
563 | list_for_each_entry(spu, &spu_full_list, full_list) |
564 | sysfs_remove_group(&spu->sysdev.kobj, attrs); | |
24140594 | 565 | mutex_unlock(&spu_full_list_mutex); |
e570beb6 CK |
566 | } |
567 | EXPORT_SYMBOL_GPL(spu_remove_sysdev_attr_group); | |
568 | ||
1d64093f JK |
569 | static int spu_create_sysdev(struct spu *spu) |
570 | { | |
571 | int ret; | |
572 | ||
573 | spu->sysdev.id = spu->number; | |
574 | spu->sysdev.cls = &spu_sysdev_class; | |
575 | ret = sysdev_register(&spu->sysdev); | |
576 | if (ret) { | |
577 | printk(KERN_ERR "Can't register SPU %d with sysfs\n", | |
578 | spu->number); | |
579 | return ret; | |
580 | } | |
581 | ||
0021550c | 582 | sysfs_add_device_to_node(&spu->sysdev, spu->node); |
1d64093f JK |
583 | |
584 | return 0; | |
585 | } | |
586 | ||
e28b0031 | 587 | static int __init create_spu(void *data) |
67207b96 AB |
588 | { |
589 | struct spu *spu; | |
590 | int ret; | |
591 | static int number; | |
94b2a439 | 592 | unsigned long flags; |
27ec41d3 | 593 | struct timespec ts; |
67207b96 AB |
594 | |
595 | ret = -ENOMEM; | |
ecec2177 | 596 | spu = kzalloc(sizeof (*spu), GFP_KERNEL); |
67207b96 AB |
597 | if (!spu) |
598 | goto out; | |
599 | ||
e28b0031 | 600 | spin_lock_init(&spu->register_lock); |
24140594 | 601 | spin_lock(&spu_lock); |
e28b0031 | 602 | spu->number = number++; |
24140594 | 603 | spin_unlock(&spu_lock); |
e28b0031 GL |
604 | |
605 | ret = spu_create_spu(spu, data); | |
e5267b4b | 606 | |
67207b96 AB |
607 | if (ret) |
608 | goto out_free; | |
609 | ||
24f43b33 | 610 | spu_mfc_sdr_setup(spu); |
f0831acc | 611 | spu_mfc_sr1_set(spu, 0x33); |
67207b96 AB |
612 | ret = spu_request_irqs(spu); |
613 | if (ret) | |
e28b0031 | 614 | goto out_destroy; |
67207b96 | 615 | |
1d64093f JK |
616 | ret = spu_create_sysdev(spu); |
617 | if (ret) | |
618 | goto out_free_irqs; | |
619 | ||
24140594 | 620 | spin_lock(&spu_lock); |
aa6d5b20 AB |
621 | list_add(&spu->list, &cbe_spu_info[spu->node].free_spus); |
622 | list_add(&spu->cbe_list, &cbe_spu_info[spu->node].spus); | |
623 | cbe_spu_info[spu->node].n_spus++; | |
24140594 CH |
624 | spin_unlock(&spu_lock); |
625 | ||
626 | mutex_lock(&spu_full_list_mutex); | |
627 | spin_lock_irqsave(&spu_full_list_lock, flags); | |
e570beb6 | 628 | list_add(&spu->full_list, &spu_full_list); |
24140594 CH |
629 | spin_unlock_irqrestore(&spu_full_list_lock, flags); |
630 | mutex_unlock(&spu_full_list_mutex); | |
67207b96 | 631 | |
27ec41d3 AD |
632 | spu->stats.util_state = SPU_UTIL_IDLE_LOADED; |
633 | ktime_get_ts(&ts); | |
634 | spu->stats.tstamp = timespec_to_ns(&ts); | |
fe2f896d | 635 | |
9d92af62 AB |
636 | INIT_LIST_HEAD(&spu->aff_list); |
637 | ||
67207b96 AB |
638 | goto out; |
639 | ||
1d64093f JK |
640 | out_free_irqs: |
641 | spu_free_irqs(spu); | |
e28b0031 GL |
642 | out_destroy: |
643 | spu_destroy_spu(spu); | |
67207b96 AB |
644 | out_free: |
645 | kfree(spu); | |
646 | out: | |
647 | return ret; | |
648 | } | |
649 | ||
fe2f896d CH |
650 | static const char *spu_state_names[] = { |
651 | "user", "system", "iowait", "idle" | |
652 | }; | |
653 | ||
654 | static unsigned long long spu_acct_time(struct spu *spu, | |
655 | enum spu_utilization_state state) | |
656 | { | |
27ec41d3 | 657 | struct timespec ts; |
fe2f896d CH |
658 | unsigned long long time = spu->stats.times[state]; |
659 | ||
27ec41d3 AD |
660 | /* |
661 | * If the spu is idle or the context is stopped, utilization | |
662 | * statistics are not updated. Apply the time delta from the | |
663 | * last recorded state of the spu. | |
664 | */ | |
665 | if (spu->stats.util_state == state) { | |
666 | ktime_get_ts(&ts); | |
667 | time += timespec_to_ns(&ts) - spu->stats.tstamp; | |
668 | } | |
fe2f896d | 669 | |
27ec41d3 | 670 | return time / NSEC_PER_MSEC; |
fe2f896d CH |
671 | } |
672 | ||
673 | ||
674 | static ssize_t spu_stat_show(struct sys_device *sysdev, char *buf) | |
675 | { | |
676 | struct spu *spu = container_of(sysdev, struct spu, sysdev); | |
677 | ||
678 | return sprintf(buf, "%s %llu %llu %llu %llu " | |
679 | "%llu %llu %llu %llu %llu %llu %llu %llu\n", | |
27ec41d3 | 680 | spu_state_names[spu->stats.util_state], |
fe2f896d CH |
681 | spu_acct_time(spu, SPU_UTIL_USER), |
682 | spu_acct_time(spu, SPU_UTIL_SYSTEM), | |
683 | spu_acct_time(spu, SPU_UTIL_IOWAIT), | |
27ec41d3 | 684 | spu_acct_time(spu, SPU_UTIL_IDLE_LOADED), |
fe2f896d CH |
685 | spu->stats.vol_ctx_switch, |
686 | spu->stats.invol_ctx_switch, | |
687 | spu->stats.slb_flt, | |
688 | spu->stats.hash_flt, | |
689 | spu->stats.min_flt, | |
690 | spu->stats.maj_flt, | |
691 | spu->stats.class2_intr, | |
692 | spu->stats.libassist); | |
693 | } | |
694 | ||
695 | static SYSDEV_ATTR(stat, 0644, spu_stat_show, NULL); | |
696 | ||
3ad216ca AB |
697 | /* Hardcoded affinity idxs for QS20 */ |
698 | #define SPES_PER_BE 8 | |
699 | static int QS20_reg_idxs[SPES_PER_BE] = { 0, 2, 4, 6, 7, 5, 3, 1 }; | |
700 | static int QS20_reg_memory[SPES_PER_BE] = { 1, 1, 0, 0, 0, 0, 0, 0 }; | |
701 | ||
702 | static struct spu *spu_lookup_reg(int node, u32 reg) | |
703 | { | |
704 | struct spu *spu; | |
705 | ||
706 | list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) { | |
707 | if (*(u32 *)get_property(spu_devnode(spu), "reg", NULL) == reg) | |
708 | return spu; | |
709 | } | |
710 | return NULL; | |
711 | } | |
712 | ||
713 | static void init_aff_QS20_harcoded(void) | |
714 | { | |
715 | int node, i; | |
716 | struct spu *last_spu, *spu; | |
717 | u32 reg; | |
718 | ||
719 | for (node = 0; node < MAX_NUMNODES; node++) { | |
720 | last_spu = NULL; | |
721 | for (i = 0; i < SPES_PER_BE; i++) { | |
722 | reg = QS20_reg_idxs[i]; | |
723 | spu = spu_lookup_reg(node, reg); | |
724 | if (!spu) | |
725 | continue; | |
726 | spu->has_mem_affinity = QS20_reg_memory[reg]; | |
727 | if (last_spu) | |
728 | list_add_tail(&spu->aff_list, | |
729 | &last_spu->aff_list); | |
730 | last_spu = spu; | |
731 | } | |
732 | } | |
733 | } | |
734 | ||
735 | static int of_has_vicinity(void) | |
736 | { | |
737 | struct spu* spu; | |
738 | ||
739 | spu = list_entry(cbe_spu_info[0].spus.next, struct spu, cbe_list); | |
740 | return of_find_property(spu_devnode(spu), "vicinity", NULL) != NULL; | |
741 | } | |
742 | ||
9e7cbcbb AB |
743 | static struct spu *aff_devnode_spu(int cbe, struct device_node *dn) |
744 | { | |
745 | struct spu *spu; | |
746 | ||
747 | list_for_each_entry(spu, &cbe_spu_info[cbe].spus, cbe_list) | |
748 | if (spu_devnode(spu) == dn) | |
749 | return spu; | |
750 | return NULL; | |
751 | } | |
752 | ||
753 | static struct spu * | |
754 | aff_node_next_to(int cbe, struct device_node *target, struct device_node *avoid) | |
755 | { | |
756 | struct spu *spu; | |
757 | const phandle *vic_handles; | |
758 | int lenp, i; | |
759 | ||
760 | list_for_each_entry(spu, &cbe_spu_info[cbe].spus, cbe_list) { | |
761 | if (spu_devnode(spu) == avoid) | |
762 | continue; | |
763 | vic_handles = get_property(spu_devnode(spu), "vicinity", &lenp); | |
764 | for (i=0; i < (lenp / sizeof(phandle)); i++) { | |
765 | if (vic_handles[i] == target->linux_phandle) | |
766 | return spu; | |
767 | } | |
768 | } | |
769 | return NULL; | |
770 | } | |
771 | ||
772 | static void init_aff_fw_vicinity_node(int cbe) | |
773 | { | |
774 | struct spu *spu, *last_spu; | |
775 | struct device_node *vic_dn, *last_spu_dn; | |
776 | phandle avoid_ph; | |
777 | const phandle *vic_handles; | |
778 | const char *name; | |
779 | int lenp, i, added, mem_aff; | |
780 | ||
781 | last_spu = list_entry(cbe_spu_info[cbe].spus.next, struct spu, cbe_list); | |
782 | avoid_ph = 0; | |
783 | for (added = 1; added < cbe_spu_info[cbe].n_spus; added++) { | |
784 | last_spu_dn = spu_devnode(last_spu); | |
785 | vic_handles = get_property(last_spu_dn, "vicinity", &lenp); | |
786 | ||
787 | for (i = 0; i < (lenp / sizeof(phandle)); i++) { | |
788 | if (vic_handles[i] == avoid_ph) | |
789 | continue; | |
790 | ||
791 | vic_dn = of_find_node_by_phandle(vic_handles[i]); | |
792 | if (!vic_dn) | |
793 | continue; | |
794 | ||
795 | name = get_property(vic_dn, "name", NULL); | |
796 | if (strcmp(name, "spe") == 0) { | |
797 | spu = aff_devnode_spu(cbe, vic_dn); | |
798 | avoid_ph = last_spu_dn->linux_phandle; | |
799 | } | |
800 | else { | |
801 | mem_aff = strcmp(name, "mic-tm") == 0; | |
802 | spu = aff_node_next_to(cbe, vic_dn, last_spu_dn); | |
803 | if (!spu) | |
804 | continue; | |
805 | if (mem_aff) { | |
806 | last_spu->has_mem_affinity = 1; | |
807 | spu->has_mem_affinity = 1; | |
808 | } | |
809 | avoid_ph = vic_dn->linux_phandle; | |
810 | } | |
811 | list_add_tail(&spu->aff_list, &last_spu->aff_list); | |
812 | last_spu = spu; | |
813 | break; | |
814 | } | |
815 | } | |
816 | } | |
817 | ||
818 | static void init_aff_fw_vicinity(void) | |
819 | { | |
820 | int cbe; | |
821 | ||
822 | /* sets has_mem_affinity for each spu, as long as the | |
823 | * spu->aff_list list, linking each spu to its neighbors | |
824 | */ | |
825 | for (cbe = 0; cbe < MAX_NUMNODES; cbe++) | |
826 | init_aff_fw_vicinity_node(cbe); | |
827 | } | |
828 | ||
67207b96 AB |
829 | static int __init init_spu_base(void) |
830 | { | |
befdc746 | 831 | int i, ret = 0; |
67207b96 | 832 | |
aa6d5b20 AB |
833 | for (i = 0; i < MAX_NUMNODES; i++) { |
834 | INIT_LIST_HEAD(&cbe_spu_info[i].spus); | |
835 | INIT_LIST_HEAD(&cbe_spu_info[i].free_spus); | |
836 | } | |
ccf17e9d | 837 | |
da06aa08 | 838 | if (!spu_management_ops) |
befdc746 | 839 | goto out; |
da06aa08 | 840 | |
1d64093f JK |
841 | /* create sysdev class for spus */ |
842 | ret = sysdev_class_register(&spu_sysdev_class); | |
843 | if (ret) | |
befdc746 | 844 | goto out; |
1d64093f | 845 | |
e28b0031 GL |
846 | ret = spu_enumerate_spus(create_spu); |
847 | ||
bce94513 | 848 | if (ret < 0) { |
e28b0031 GL |
849 | printk(KERN_WARNING "%s: Error initializing spus\n", |
850 | __FUNCTION__); | |
befdc746 | 851 | goto out_unregister_sysdev_class; |
67207b96 | 852 | } |
ff8a8f25 | 853 | |
bce94513 GU |
854 | if (ret > 0) { |
855 | /* | |
856 | * We cannot put the forward declaration in | |
857 | * <linux/linux_logo.h> because of conflicting session type | |
858 | * conflicts for const and __initdata with different compiler | |
859 | * versions | |
860 | */ | |
861 | extern const struct linux_logo logo_spe_clut224; | |
862 | ||
863 | fb_append_extra_logo(&logo_spe_clut224, ret); | |
864 | } | |
865 | ||
24140594 | 866 | mutex_lock(&spu_full_list_mutex); |
ff8a8f25 | 867 | xmon_register_spus(&spu_full_list); |
8d2655e6 | 868 | crash_register_spus(&spu_full_list); |
24140594 | 869 | mutex_unlock(&spu_full_list_mutex); |
fe2f896d CH |
870 | spu_add_sysdev_attr(&attr_stat); |
871 | ||
9e7cbcbb AB |
872 | if (of_has_vicinity()) { |
873 | init_aff_fw_vicinity(); | |
874 | } else { | |
3ad216ca AB |
875 | long root = of_get_flat_dt_root(); |
876 | if (of_flat_dt_is_compatible(root, "IBM,CPBW-1.0")) | |
877 | init_aff_QS20_harcoded(); | |
878 | } | |
879 | ||
befdc746 CH |
880 | return 0; |
881 | ||
882 | out_unregister_sysdev_class: | |
883 | sysdev_class_unregister(&spu_sysdev_class); | |
884 | out: | |
67207b96 AB |
885 | return ret; |
886 | } | |
887 | module_init(init_spu_base); | |
888 | ||
889 | MODULE_LICENSE("GPL"); | |
890 | MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>"); |