Commit | Line | Data |
---|---|---|
67207b96 AB |
1 | /* |
2 | * Low-level SPU handling | |
3 | * | |
4 | * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 | |
5 | * | |
6 | * Author: Arnd Bergmann <arndb@de.ibm.com> | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify | |
9 | * it under the terms of the GNU General Public License as published by | |
10 | * the Free Software Foundation; either version 2, or (at your option) | |
11 | * any later version. | |
12 | * | |
13 | * This program is distributed in the hope that it will be useful, | |
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
16 | * GNU General Public License for more details. | |
17 | * | |
18 | * You should have received a copy of the GNU General Public License | |
19 | * along with this program; if not, write to the Free Software | |
20 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | |
21 | */ | |
22 | ||
3b3d22cb | 23 | #undef DEBUG |
67207b96 AB |
24 | |
25 | #include <linux/interrupt.h> | |
26 | #include <linux/list.h> | |
27 | #include <linux/module.h> | |
67207b96 AB |
28 | #include <linux/ptrace.h> |
29 | #include <linux/slab.h> | |
30 | #include <linux/wait.h> | |
e28b0031 GL |
31 | #include <linux/mm.h> |
32 | #include <linux/io.h> | |
14cc3e2b | 33 | #include <linux/mutex.h> |
bce94513 | 34 | #include <linux/linux_logo.h> |
67207b96 | 35 | #include <asm/spu.h> |
540270d8 | 36 | #include <asm/spu_priv1.h> |
58bd403c | 37 | #include <asm/spu_csa.h> |
ff8a8f25 | 38 | #include <asm/xmon.h> |
3ad216ca | 39 | #include <asm/prom.h> |
67207b96 | 40 | |
e28b0031 | 41 | const struct spu_management_ops *spu_management_ops; |
ccf17e9d JK |
42 | EXPORT_SYMBOL_GPL(spu_management_ops); |
43 | ||
540270d8 | 44 | const struct spu_priv1_ops *spu_priv1_ops; |
24140594 | 45 | EXPORT_SYMBOL_GPL(spu_priv1_ops); |
540270d8 | 46 | |
24140594 CH |
47 | struct cbe_spu_info cbe_spu_info[MAX_NUMNODES]; |
48 | EXPORT_SYMBOL_GPL(cbe_spu_info); | |
94b2a439 | 49 | |
3ce2f62b JK |
50 | /* |
51 | * The spufs fault-handling code needs to call force_sig_info to raise signals | |
52 | * on DMA errors. Export it here to avoid general kernel-wide access to this | |
53 | * function | |
54 | */ | |
55 | EXPORT_SYMBOL_GPL(force_sig_info); | |
56 | ||
24140594 CH |
57 | /* |
58 | * Protects cbe_spu_info and spu->number. | |
59 | */ | |
60 | static DEFINE_SPINLOCK(spu_lock); | |
61 | ||
62 | /* | |
63 | * List of all spus in the system. | |
64 | * | |
65 | * This list is iterated by callers from irq context and callers that | |
66 | * want to sleep. Thus modifications need to be done with both | |
67 | * spu_full_list_lock and spu_full_list_mutex held, while iterating | |
68 | * through it requires either of these locks. | |
69 | * | |
70 | * In addition spu_full_list_lock protects all assignmens to | |
71 | * spu->mm. | |
72 | */ | |
73 | static LIST_HEAD(spu_full_list); | |
74 | static DEFINE_SPINLOCK(spu_full_list_lock); | |
75 | static DEFINE_MUTEX(spu_full_list_mutex); | |
540270d8 | 76 | |
58bd403c JK |
77 | struct spu_slb { |
78 | u64 esid, vsid; | |
79 | }; | |
80 | ||
94b2a439 BH |
81 | void spu_invalidate_slbs(struct spu *spu) |
82 | { | |
83 | struct spu_priv2 __iomem *priv2 = spu->priv2; | |
84 | ||
85 | if (spu_mfc_sr1_get(spu) & MFC_STATE1_RELOCATE_MASK) | |
86 | out_be64(&priv2->slb_invalidate_all_W, 0UL); | |
87 | } | |
88 | EXPORT_SYMBOL_GPL(spu_invalidate_slbs); | |
89 | ||
90 | /* This is called by the MM core when a segment size is changed, to | |
91 | * request a flush of all the SPEs using a given mm | |
92 | */ | |
93 | void spu_flush_all_slbs(struct mm_struct *mm) | |
94 | { | |
95 | struct spu *spu; | |
96 | unsigned long flags; | |
97 | ||
24140594 | 98 | spin_lock_irqsave(&spu_full_list_lock, flags); |
94b2a439 BH |
99 | list_for_each_entry(spu, &spu_full_list, full_list) { |
100 | if (spu->mm == mm) | |
101 | spu_invalidate_slbs(spu); | |
102 | } | |
24140594 | 103 | spin_unlock_irqrestore(&spu_full_list_lock, flags); |
94b2a439 BH |
104 | } |
105 | ||
106 | /* The hack below stinks... try to do something better one of | |
107 | * these days... Does it even work properly with NR_CPUS == 1 ? | |
108 | */ | |
109 | static inline void mm_needs_global_tlbie(struct mm_struct *mm) | |
110 | { | |
111 | int nr = (NR_CPUS > 1) ? NR_CPUS : NR_CPUS + 1; | |
112 | ||
113 | /* Global TLBIE broadcast required with SPEs. */ | |
114 | __cpus_setall(&mm->cpu_vm_mask, nr); | |
115 | } | |
116 | ||
117 | void spu_associate_mm(struct spu *spu, struct mm_struct *mm) | |
118 | { | |
119 | unsigned long flags; | |
120 | ||
24140594 | 121 | spin_lock_irqsave(&spu_full_list_lock, flags); |
94b2a439 | 122 | spu->mm = mm; |
24140594 | 123 | spin_unlock_irqrestore(&spu_full_list_lock, flags); |
94b2a439 BH |
124 | if (mm) |
125 | mm_needs_global_tlbie(mm); | |
126 | } | |
127 | EXPORT_SYMBOL_GPL(spu_associate_mm); | |
128 | ||
67207b96 AB |
129 | static int __spu_trap_invalid_dma(struct spu *spu) |
130 | { | |
131 | pr_debug("%s\n", __FUNCTION__); | |
9add11da | 132 | spu->dma_callback(spu, SPE_EVENT_INVALID_DMA); |
67207b96 AB |
133 | return 0; |
134 | } | |
135 | ||
136 | static int __spu_trap_dma_align(struct spu *spu) | |
137 | { | |
138 | pr_debug("%s\n", __FUNCTION__); | |
9add11da | 139 | spu->dma_callback(spu, SPE_EVENT_DMA_ALIGNMENT); |
67207b96 AB |
140 | return 0; |
141 | } | |
142 | ||
143 | static int __spu_trap_error(struct spu *spu) | |
144 | { | |
145 | pr_debug("%s\n", __FUNCTION__); | |
9add11da | 146 | spu->dma_callback(spu, SPE_EVENT_SPE_ERROR); |
67207b96 AB |
147 | return 0; |
148 | } | |
149 | ||
150 | static void spu_restart_dma(struct spu *spu) | |
151 | { | |
152 | struct spu_priv2 __iomem *priv2 = spu->priv2; | |
5473af04 | 153 | |
8837d921 | 154 | if (!test_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags)) |
5473af04 | 155 | out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESTART_DMA_COMMAND); |
67207b96 AB |
156 | } |
157 | ||
58bd403c JK |
158 | static inline void spu_load_slb(struct spu *spu, int slbe, struct spu_slb *slb) |
159 | { | |
160 | struct spu_priv2 __iomem *priv2 = spu->priv2; | |
161 | ||
162 | pr_debug("%s: adding SLB[%d] 0x%016lx 0x%016lx\n", | |
163 | __func__, slbe, slb->vsid, slb->esid); | |
164 | ||
165 | out_be64(&priv2->slb_index_W, slbe); | |
166 | out_be64(&priv2->slb_vsid_RW, slb->vsid); | |
167 | out_be64(&priv2->slb_esid_RW, slb->esid); | |
168 | } | |
169 | ||
67207b96 AB |
170 | static int __spu_trap_data_seg(struct spu *spu, unsigned long ea) |
171 | { | |
8b3d6663 | 172 | struct mm_struct *mm = spu->mm; |
4d43466d | 173 | struct spu_slb slb; |
94b2a439 | 174 | int psize; |
67207b96 AB |
175 | |
176 | pr_debug("%s\n", __FUNCTION__); | |
177 | ||
8837d921 | 178 | if (test_bit(SPU_CONTEXT_SWITCH_ACTIVE, &spu->flags)) { |
8b3d6663 AB |
179 | /* SLBs are pre-loaded for context switch, so |
180 | * we should never get here! | |
181 | */ | |
5473af04 MN |
182 | printk("%s: invalid access during switch!\n", __func__); |
183 | return 1; | |
184 | } | |
4d43466d | 185 | slb.esid = (ea & ESID_MASK) | SLB_ESID_V; |
0afacde3 | 186 | |
187 | switch(REGION_ID(ea)) { | |
188 | case USER_REGION_ID: | |
d0f13e3c BH |
189 | #ifdef CONFIG_PPC_MM_SLICES |
190 | psize = get_slice_psize(mm, ea); | |
191 | #else | |
192 | psize = mm->context.user_psize; | |
0afacde3 | 193 | #endif |
4d43466d JK |
194 | slb.vsid = (get_vsid(mm->context.id, ea, MMU_SEGSIZE_256M) |
195 | << SLB_VSID_SHIFT) | SLB_VSID_USER; | |
0afacde3 | 196 | break; |
197 | case VMALLOC_REGION_ID: | |
94b2a439 BH |
198 | if (ea < VMALLOC_END) |
199 | psize = mmu_vmalloc_psize; | |
200 | else | |
201 | psize = mmu_io_psize; | |
4d43466d JK |
202 | slb.vsid = (get_kernel_vsid(ea, MMU_SEGSIZE_256M) |
203 | << SLB_VSID_SHIFT) | SLB_VSID_KERNEL; | |
0afacde3 | 204 | break; |
205 | case KERNEL_REGION_ID: | |
94b2a439 | 206 | psize = mmu_linear_psize; |
4d43466d JK |
207 | slb.vsid = (get_kernel_vsid(ea, MMU_SEGSIZE_256M) |
208 | << SLB_VSID_SHIFT) | SLB_VSID_KERNEL; | |
0afacde3 | 209 | break; |
210 | default: | |
8b3d6663 AB |
211 | /* Future: support kernel segments so that drivers |
212 | * can use SPUs. | |
213 | */ | |
67207b96 AB |
214 | pr_debug("invalid region access at %016lx\n", ea); |
215 | return 1; | |
216 | } | |
4d43466d | 217 | slb.vsid |= mmu_psize_defs[psize].sllp; |
67207b96 | 218 | |
4d43466d | 219 | spu_load_slb(spu, spu->slb_replace, &slb); |
8b3d6663 AB |
220 | |
221 | spu->slb_replace++; | |
67207b96 AB |
222 | if (spu->slb_replace >= 8) |
223 | spu->slb_replace = 0; | |
224 | ||
67207b96 | 225 | spu_restart_dma(spu); |
e9f8a0b6 | 226 | spu->stats.slb_flt++; |
67207b96 AB |
227 | return 0; |
228 | } | |
229 | ||
5473af04 | 230 | extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap); //XXX |
8b3d6663 | 231 | static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr) |
67207b96 | 232 | { |
a33a7d73 | 233 | pr_debug("%s, %lx, %lx\n", __FUNCTION__, dsisr, ea); |
67207b96 | 234 | |
5473af04 MN |
235 | /* Handle kernel space hash faults immediately. |
236 | User hash faults need to be deferred to process context. */ | |
237 | if ((dsisr & MFC_DSISR_PTE_NOT_FOUND) | |
238 | && REGION_ID(ea) != USER_REGION_ID | |
239 | && hash_page(ea, _PAGE_PRESENT, 0x300) == 0) { | |
240 | spu_restart_dma(spu); | |
241 | return 0; | |
242 | } | |
243 | ||
8837d921 | 244 | if (test_bit(SPU_CONTEXT_SWITCH_ACTIVE, &spu->flags)) { |
5473af04 MN |
245 | printk("%s: invalid access during switch!\n", __func__); |
246 | return 1; | |
247 | } | |
67207b96 | 248 | |
8b3d6663 AB |
249 | spu->dar = ea; |
250 | spu->dsisr = dsisr; | |
251 | mb(); | |
ba723fe2 | 252 | spu->stop_callback(spu); |
67207b96 AB |
253 | return 0; |
254 | } | |
255 | ||
58bd403c JK |
256 | static void __spu_kernel_slb(void *addr, struct spu_slb *slb) |
257 | { | |
258 | unsigned long ea = (unsigned long)addr; | |
259 | u64 llp; | |
260 | ||
261 | if (REGION_ID(ea) == KERNEL_REGION_ID) | |
262 | llp = mmu_psize_defs[mmu_linear_psize].sllp; | |
263 | else | |
264 | llp = mmu_psize_defs[mmu_virtual_psize].sllp; | |
265 | ||
266 | slb->vsid = (get_kernel_vsid(ea, MMU_SEGSIZE_256M) << SLB_VSID_SHIFT) | | |
267 | SLB_VSID_KERNEL | llp; | |
268 | slb->esid = (ea & ESID_MASK) | SLB_ESID_V; | |
269 | } | |
270 | ||
271 | /** | |
272 | * Setup the SPU kernel SLBs, in preparation for a context save/restore. We | |
273 | * need to map both the context save area, and the save/restore code. | |
274 | */ | |
275 | void spu_setup_kernel_slbs(struct spu *spu, struct spu_lscsa *lscsa, void *code) | |
276 | { | |
277 | struct spu_slb code_slb, lscsa_slb; | |
278 | ||
279 | __spu_kernel_slb(lscsa, &lscsa_slb); | |
280 | __spu_kernel_slb(code, &code_slb); | |
281 | ||
282 | spu_load_slb(spu, 0, &lscsa_slb); | |
283 | if (lscsa_slb.esid != code_slb.esid) | |
284 | spu_load_slb(spu, 1, &code_slb); | |
285 | } | |
286 | EXPORT_SYMBOL_GPL(spu_setup_kernel_slbs); | |
287 | ||
67207b96 | 288 | static irqreturn_t |
f5a92458 | 289 | spu_irq_class_0(int irq, void *data) |
67207b96 AB |
290 | { |
291 | struct spu *spu; | |
b7f90a40 | 292 | unsigned long stat, mask; |
67207b96 AB |
293 | |
294 | spu = data; | |
b7f90a40 MN |
295 | |
296 | mask = spu_int_mask_get(spu, 0); | |
297 | stat = spu_int_stat_get(spu, 0); | |
298 | stat &= mask; | |
299 | ||
300 | spin_lock(&spu->register_lock); | |
301 | spu->class_0_pending |= stat; | |
302 | spin_unlock(&spu->register_lock); | |
303 | ||
ba723fe2 | 304 | spu->stop_callback(spu); |
67207b96 | 305 | |
b7f90a40 MN |
306 | spu_int_stat_clear(spu, 0, stat); |
307 | ||
67207b96 AB |
308 | return IRQ_HANDLED; |
309 | } | |
310 | ||
5110459f | 311 | int |
67207b96 AB |
312 | spu_irq_class_0_bottom(struct spu *spu) |
313 | { | |
3650cfe2 | 314 | unsigned long flags; |
b7f90a40 | 315 | unsigned long stat; |
67207b96 | 316 | |
3650cfe2 | 317 | spin_lock_irqsave(&spu->register_lock, flags); |
b7f90a40 MN |
318 | stat = spu->class_0_pending; |
319 | spu->class_0_pending = 0; | |
3a843d7c | 320 | |
2cd90bc8 | 321 | if (stat & 1) /* invalid DMA alignment */ |
67207b96 AB |
322 | __spu_trap_dma_align(spu); |
323 | ||
2cd90bc8 AB |
324 | if (stat & 2) /* invalid MFC DMA */ |
325 | __spu_trap_invalid_dma(spu); | |
326 | ||
67207b96 AB |
327 | if (stat & 4) /* error on SPU */ |
328 | __spu_trap_error(spu); | |
329 | ||
3650cfe2 | 330 | spin_unlock_irqrestore(&spu->register_lock, flags); |
5110459f AB |
331 | |
332 | return (stat & 0x7) ? -EIO : 0; | |
67207b96 | 333 | } |
5110459f | 334 | EXPORT_SYMBOL_GPL(spu_irq_class_0_bottom); |
67207b96 AB |
335 | |
336 | static irqreturn_t | |
f5a92458 | 337 | spu_irq_class_1(int irq, void *data) |
67207b96 AB |
338 | { |
339 | struct spu *spu; | |
8b3d6663 | 340 | unsigned long stat, mask, dar, dsisr; |
67207b96 AB |
341 | |
342 | spu = data; | |
8b3d6663 AB |
343 | |
344 | /* atomically read & clear class1 status. */ | |
345 | spin_lock(&spu->register_lock); | |
f0831acc AB |
346 | mask = spu_int_mask_get(spu, 1); |
347 | stat = spu_int_stat_get(spu, 1) & mask; | |
348 | dar = spu_mfc_dar_get(spu); | |
349 | dsisr = spu_mfc_dsisr_get(spu); | |
38307341 | 350 | if (stat & 2) /* mapping fault */ |
f0831acc AB |
351 | spu_mfc_dsisr_set(spu, 0ul); |
352 | spu_int_stat_clear(spu, 1, stat); | |
8b3d6663 | 353 | spin_unlock(&spu->register_lock); |
a33a7d73 AB |
354 | pr_debug("%s: %lx %lx %lx %lx\n", __FUNCTION__, mask, stat, |
355 | dar, dsisr); | |
67207b96 AB |
356 | |
357 | if (stat & 1) /* segment fault */ | |
358 | __spu_trap_data_seg(spu, dar); | |
359 | ||
360 | if (stat & 2) { /* mapping fault */ | |
8b3d6663 | 361 | __spu_trap_data_map(spu, dar, dsisr); |
67207b96 AB |
362 | } |
363 | ||
364 | if (stat & 4) /* ls compare & suspend on get */ | |
365 | ; | |
366 | ||
367 | if (stat & 8) /* ls compare & suspend on put */ | |
368 | ; | |
369 | ||
67207b96 AB |
370 | return stat ? IRQ_HANDLED : IRQ_NONE; |
371 | } | |
372 | ||
373 | static irqreturn_t | |
f5a92458 | 374 | spu_irq_class_2(int irq, void *data) |
67207b96 AB |
375 | { |
376 | struct spu *spu; | |
377 | unsigned long stat; | |
3a843d7c | 378 | unsigned long mask; |
67207b96 AB |
379 | |
380 | spu = data; | |
ba723fe2 | 381 | spin_lock(&spu->register_lock); |
f0831acc AB |
382 | stat = spu_int_stat_get(spu, 2); |
383 | mask = spu_int_mask_get(spu, 2); | |
ba723fe2 MN |
384 | /* ignore interrupts we're not waiting for */ |
385 | stat &= mask; | |
386 | /* | |
387 | * mailbox interrupts (0x1 and 0x10) are level triggered. | |
388 | * mask them now before acknowledging. | |
389 | */ | |
390 | if (stat & 0x11) | |
391 | spu_int_mask_and(spu, 2, ~(stat & 0x11)); | |
392 | /* acknowledge all interrupts before the callbacks */ | |
393 | spu_int_stat_clear(spu, 2, stat); | |
394 | spin_unlock(&spu->register_lock); | |
67207b96 | 395 | |
3a843d7c | 396 | pr_debug("class 2 interrupt %d, %lx, %lx\n", irq, stat, mask); |
67207b96 | 397 | |
67207b96 | 398 | if (stat & 1) /* PPC core mailbox */ |
ba723fe2 | 399 | spu->ibox_callback(spu); |
67207b96 AB |
400 | |
401 | if (stat & 2) /* SPU stop-and-signal */ | |
ba723fe2 | 402 | spu->stop_callback(spu); |
67207b96 AB |
403 | |
404 | if (stat & 4) /* SPU halted */ | |
ba723fe2 | 405 | spu->stop_callback(spu); |
67207b96 AB |
406 | |
407 | if (stat & 8) /* DMA tag group complete */ | |
ba723fe2 | 408 | spu->mfc_callback(spu); |
67207b96 AB |
409 | |
410 | if (stat & 0x10) /* SPU mailbox threshold */ | |
ba723fe2 | 411 | spu->wbox_callback(spu); |
67207b96 | 412 | |
e9f8a0b6 | 413 | spu->stats.class2_intr++; |
67207b96 AB |
414 | return stat ? IRQ_HANDLED : IRQ_NONE; |
415 | } | |
416 | ||
0ebfff14 | 417 | static int spu_request_irqs(struct spu *spu) |
67207b96 | 418 | { |
0ebfff14 | 419 | int ret = 0; |
67207b96 | 420 | |
0ebfff14 BH |
421 | if (spu->irqs[0] != NO_IRQ) { |
422 | snprintf(spu->irq_c0, sizeof (spu->irq_c0), "spe%02d.0", | |
423 | spu->number); | |
424 | ret = request_irq(spu->irqs[0], spu_irq_class_0, | |
425 | IRQF_DISABLED, | |
426 | spu->irq_c0, spu); | |
427 | if (ret) | |
428 | goto bail0; | |
429 | } | |
430 | if (spu->irqs[1] != NO_IRQ) { | |
431 | snprintf(spu->irq_c1, sizeof (spu->irq_c1), "spe%02d.1", | |
432 | spu->number); | |
433 | ret = request_irq(spu->irqs[1], spu_irq_class_1, | |
434 | IRQF_DISABLED, | |
435 | spu->irq_c1, spu); | |
436 | if (ret) | |
437 | goto bail1; | |
438 | } | |
439 | if (spu->irqs[2] != NO_IRQ) { | |
440 | snprintf(spu->irq_c2, sizeof (spu->irq_c2), "spe%02d.2", | |
441 | spu->number); | |
442 | ret = request_irq(spu->irqs[2], spu_irq_class_2, | |
443 | IRQF_DISABLED, | |
444 | spu->irq_c2, spu); | |
445 | if (ret) | |
446 | goto bail2; | |
447 | } | |
448 | return 0; | |
67207b96 | 449 | |
0ebfff14 BH |
450 | bail2: |
451 | if (spu->irqs[1] != NO_IRQ) | |
452 | free_irq(spu->irqs[1], spu); | |
453 | bail1: | |
454 | if (spu->irqs[0] != NO_IRQ) | |
455 | free_irq(spu->irqs[0], spu); | |
456 | bail0: | |
67207b96 AB |
457 | return ret; |
458 | } | |
459 | ||
0ebfff14 | 460 | static void spu_free_irqs(struct spu *spu) |
67207b96 | 461 | { |
0ebfff14 BH |
462 | if (spu->irqs[0] != NO_IRQ) |
463 | free_irq(spu->irqs[0], spu); | |
464 | if (spu->irqs[1] != NO_IRQ) | |
465 | free_irq(spu->irqs[1], spu); | |
466 | if (spu->irqs[2] != NO_IRQ) | |
467 | free_irq(spu->irqs[2], spu); | |
67207b96 AB |
468 | } |
469 | ||
486acd48 | 470 | void spu_init_channels(struct spu *spu) |
67207b96 AB |
471 | { |
472 | static const struct { | |
473 | unsigned channel; | |
474 | unsigned count; | |
475 | } zero_list[] = { | |
476 | { 0x00, 1, }, { 0x01, 1, }, { 0x03, 1, }, { 0x04, 1, }, | |
477 | { 0x18, 1, }, { 0x19, 1, }, { 0x1b, 1, }, { 0x1d, 1, }, | |
478 | }, count_list[] = { | |
479 | { 0x00, 0, }, { 0x03, 0, }, { 0x04, 0, }, { 0x15, 16, }, | |
480 | { 0x17, 1, }, { 0x18, 0, }, { 0x19, 0, }, { 0x1b, 0, }, | |
481 | { 0x1c, 1, }, { 0x1d, 0, }, { 0x1e, 1, }, | |
482 | }; | |
6ff730c3 | 483 | struct spu_priv2 __iomem *priv2; |
67207b96 AB |
484 | int i; |
485 | ||
486 | priv2 = spu->priv2; | |
487 | ||
488 | /* initialize all channel data to zero */ | |
489 | for (i = 0; i < ARRAY_SIZE(zero_list); i++) { | |
490 | int count; | |
491 | ||
492 | out_be64(&priv2->spu_chnlcntptr_RW, zero_list[i].channel); | |
493 | for (count = 0; count < zero_list[i].count; count++) | |
494 | out_be64(&priv2->spu_chnldata_RW, 0); | |
495 | } | |
496 | ||
497 | /* initialize channel counts to meaningful values */ | |
498 | for (i = 0; i < ARRAY_SIZE(count_list); i++) { | |
499 | out_be64(&priv2->spu_chnlcntptr_RW, count_list[i].channel); | |
500 | out_be64(&priv2->spu_chnlcnt_RW, count_list[i].count); | |
501 | } | |
502 | } | |
486acd48 | 503 | EXPORT_SYMBOL_GPL(spu_init_channels); |
67207b96 | 504 | |
6deac066 GL |
505 | static int spu_shutdown(struct sys_device *sysdev) |
506 | { | |
507 | struct spu *spu = container_of(sysdev, struct spu, sysdev); | |
508 | ||
509 | spu_free_irqs(spu); | |
510 | spu_destroy_spu(spu); | |
511 | return 0; | |
512 | } | |
513 | ||
1238819a | 514 | static struct sysdev_class spu_sysdev_class = { |
6deac066 GL |
515 | set_kset_name("spu"), |
516 | .shutdown = spu_shutdown, | |
1d64093f JK |
517 | }; |
518 | ||
e570beb6 CK |
519 | int spu_add_sysdev_attr(struct sysdev_attribute *attr) |
520 | { | |
521 | struct spu *spu; | |
e570beb6 | 522 | |
24140594 | 523 | mutex_lock(&spu_full_list_mutex); |
e570beb6 CK |
524 | list_for_each_entry(spu, &spu_full_list, full_list) |
525 | sysdev_create_file(&spu->sysdev, attr); | |
24140594 | 526 | mutex_unlock(&spu_full_list_mutex); |
e570beb6 | 527 | |
e570beb6 CK |
528 | return 0; |
529 | } | |
530 | EXPORT_SYMBOL_GPL(spu_add_sysdev_attr); | |
531 | ||
532 | int spu_add_sysdev_attr_group(struct attribute_group *attrs) | |
533 | { | |
534 | struct spu *spu; | |
e570beb6 | 535 | |
24140594 | 536 | mutex_lock(&spu_full_list_mutex); |
e570beb6 CK |
537 | list_for_each_entry(spu, &spu_full_list, full_list) |
538 | sysfs_create_group(&spu->sysdev.kobj, attrs); | |
24140594 | 539 | mutex_unlock(&spu_full_list_mutex); |
e570beb6 | 540 | |
e570beb6 CK |
541 | return 0; |
542 | } | |
543 | EXPORT_SYMBOL_GPL(spu_add_sysdev_attr_group); | |
544 | ||
545 | ||
546 | void spu_remove_sysdev_attr(struct sysdev_attribute *attr) | |
547 | { | |
548 | struct spu *spu; | |
e570beb6 | 549 | |
24140594 | 550 | mutex_lock(&spu_full_list_mutex); |
e570beb6 CK |
551 | list_for_each_entry(spu, &spu_full_list, full_list) |
552 | sysdev_remove_file(&spu->sysdev, attr); | |
24140594 | 553 | mutex_unlock(&spu_full_list_mutex); |
e570beb6 CK |
554 | } |
555 | EXPORT_SYMBOL_GPL(spu_remove_sysdev_attr); | |
556 | ||
557 | void spu_remove_sysdev_attr_group(struct attribute_group *attrs) | |
558 | { | |
559 | struct spu *spu; | |
e570beb6 | 560 | |
24140594 | 561 | mutex_lock(&spu_full_list_mutex); |
e570beb6 CK |
562 | list_for_each_entry(spu, &spu_full_list, full_list) |
563 | sysfs_remove_group(&spu->sysdev.kobj, attrs); | |
24140594 | 564 | mutex_unlock(&spu_full_list_mutex); |
e570beb6 CK |
565 | } |
566 | EXPORT_SYMBOL_GPL(spu_remove_sysdev_attr_group); | |
567 | ||
1d64093f JK |
568 | static int spu_create_sysdev(struct spu *spu) |
569 | { | |
570 | int ret; | |
571 | ||
572 | spu->sysdev.id = spu->number; | |
573 | spu->sysdev.cls = &spu_sysdev_class; | |
574 | ret = sysdev_register(&spu->sysdev); | |
575 | if (ret) { | |
576 | printk(KERN_ERR "Can't register SPU %d with sysfs\n", | |
577 | spu->number); | |
578 | return ret; | |
579 | } | |
580 | ||
0021550c | 581 | sysfs_add_device_to_node(&spu->sysdev, spu->node); |
1d64093f JK |
582 | |
583 | return 0; | |
584 | } | |
585 | ||
e28b0031 | 586 | static int __init create_spu(void *data) |
67207b96 AB |
587 | { |
588 | struct spu *spu; | |
589 | int ret; | |
590 | static int number; | |
94b2a439 | 591 | unsigned long flags; |
27ec41d3 | 592 | struct timespec ts; |
67207b96 AB |
593 | |
594 | ret = -ENOMEM; | |
ecec2177 | 595 | spu = kzalloc(sizeof (*spu), GFP_KERNEL); |
67207b96 AB |
596 | if (!spu) |
597 | goto out; | |
598 | ||
486acd48 CH |
599 | spu->alloc_state = SPU_FREE; |
600 | ||
e28b0031 | 601 | spin_lock_init(&spu->register_lock); |
24140594 | 602 | spin_lock(&spu_lock); |
e28b0031 | 603 | spu->number = number++; |
24140594 | 604 | spin_unlock(&spu_lock); |
e28b0031 GL |
605 | |
606 | ret = spu_create_spu(spu, data); | |
e5267b4b | 607 | |
67207b96 AB |
608 | if (ret) |
609 | goto out_free; | |
610 | ||
24f43b33 | 611 | spu_mfc_sdr_setup(spu); |
f0831acc | 612 | spu_mfc_sr1_set(spu, 0x33); |
67207b96 AB |
613 | ret = spu_request_irqs(spu); |
614 | if (ret) | |
e28b0031 | 615 | goto out_destroy; |
67207b96 | 616 | |
1d64093f JK |
617 | ret = spu_create_sysdev(spu); |
618 | if (ret) | |
619 | goto out_free_irqs; | |
620 | ||
486acd48 | 621 | mutex_lock(&cbe_spu_info[spu->node].list_mutex); |
aa6d5b20 AB |
622 | list_add(&spu->cbe_list, &cbe_spu_info[spu->node].spus); |
623 | cbe_spu_info[spu->node].n_spus++; | |
486acd48 | 624 | mutex_unlock(&cbe_spu_info[spu->node].list_mutex); |
24140594 CH |
625 | |
626 | mutex_lock(&spu_full_list_mutex); | |
627 | spin_lock_irqsave(&spu_full_list_lock, flags); | |
e570beb6 | 628 | list_add(&spu->full_list, &spu_full_list); |
24140594 CH |
629 | spin_unlock_irqrestore(&spu_full_list_lock, flags); |
630 | mutex_unlock(&spu_full_list_mutex); | |
67207b96 | 631 | |
27ec41d3 AD |
632 | spu->stats.util_state = SPU_UTIL_IDLE_LOADED; |
633 | ktime_get_ts(&ts); | |
634 | spu->stats.tstamp = timespec_to_ns(&ts); | |
fe2f896d | 635 | |
9d92af62 AB |
636 | INIT_LIST_HEAD(&spu->aff_list); |
637 | ||
67207b96 AB |
638 | goto out; |
639 | ||
1d64093f JK |
640 | out_free_irqs: |
641 | spu_free_irqs(spu); | |
e28b0031 GL |
642 | out_destroy: |
643 | spu_destroy_spu(spu); | |
67207b96 AB |
644 | out_free: |
645 | kfree(spu); | |
646 | out: | |
647 | return ret; | |
648 | } | |
649 | ||
fe2f896d CH |
650 | static const char *spu_state_names[] = { |
651 | "user", "system", "iowait", "idle" | |
652 | }; | |
653 | ||
654 | static unsigned long long spu_acct_time(struct spu *spu, | |
655 | enum spu_utilization_state state) | |
656 | { | |
27ec41d3 | 657 | struct timespec ts; |
fe2f896d CH |
658 | unsigned long long time = spu->stats.times[state]; |
659 | ||
27ec41d3 AD |
660 | /* |
661 | * If the spu is idle or the context is stopped, utilization | |
662 | * statistics are not updated. Apply the time delta from the | |
663 | * last recorded state of the spu. | |
664 | */ | |
665 | if (spu->stats.util_state == state) { | |
666 | ktime_get_ts(&ts); | |
667 | time += timespec_to_ns(&ts) - spu->stats.tstamp; | |
668 | } | |
fe2f896d | 669 | |
27ec41d3 | 670 | return time / NSEC_PER_MSEC; |
fe2f896d CH |
671 | } |
672 | ||
673 | ||
674 | static ssize_t spu_stat_show(struct sys_device *sysdev, char *buf) | |
675 | { | |
676 | struct spu *spu = container_of(sysdev, struct spu, sysdev); | |
677 | ||
678 | return sprintf(buf, "%s %llu %llu %llu %llu " | |
679 | "%llu %llu %llu %llu %llu %llu %llu %llu\n", | |
27ec41d3 | 680 | spu_state_names[spu->stats.util_state], |
fe2f896d CH |
681 | spu_acct_time(spu, SPU_UTIL_USER), |
682 | spu_acct_time(spu, SPU_UTIL_SYSTEM), | |
683 | spu_acct_time(spu, SPU_UTIL_IOWAIT), | |
27ec41d3 | 684 | spu_acct_time(spu, SPU_UTIL_IDLE_LOADED), |
fe2f896d CH |
685 | spu->stats.vol_ctx_switch, |
686 | spu->stats.invol_ctx_switch, | |
687 | spu->stats.slb_flt, | |
688 | spu->stats.hash_flt, | |
689 | spu->stats.min_flt, | |
690 | spu->stats.maj_flt, | |
691 | spu->stats.class2_intr, | |
692 | spu->stats.libassist); | |
693 | } | |
694 | ||
695 | static SYSDEV_ATTR(stat, 0644, spu_stat_show, NULL); | |
696 | ||
67207b96 AB |
697 | static int __init init_spu_base(void) |
698 | { | |
befdc746 | 699 | int i, ret = 0; |
67207b96 | 700 | |
aa6d5b20 | 701 | for (i = 0; i < MAX_NUMNODES; i++) { |
486acd48 | 702 | mutex_init(&cbe_spu_info[i].list_mutex); |
aa6d5b20 | 703 | INIT_LIST_HEAD(&cbe_spu_info[i].spus); |
aa6d5b20 | 704 | } |
ccf17e9d | 705 | |
da06aa08 | 706 | if (!spu_management_ops) |
befdc746 | 707 | goto out; |
da06aa08 | 708 | |
1d64093f JK |
709 | /* create sysdev class for spus */ |
710 | ret = sysdev_class_register(&spu_sysdev_class); | |
711 | if (ret) | |
befdc746 | 712 | goto out; |
1d64093f | 713 | |
e28b0031 GL |
714 | ret = spu_enumerate_spus(create_spu); |
715 | ||
bce94513 | 716 | if (ret < 0) { |
e28b0031 GL |
717 | printk(KERN_WARNING "%s: Error initializing spus\n", |
718 | __FUNCTION__); | |
befdc746 | 719 | goto out_unregister_sysdev_class; |
67207b96 | 720 | } |
ff8a8f25 | 721 | |
bce94513 GU |
722 | if (ret > 0) { |
723 | /* | |
724 | * We cannot put the forward declaration in | |
725 | * <linux/linux_logo.h> because of conflicting session type | |
726 | * conflicts for const and __initdata with different compiler | |
727 | * versions | |
728 | */ | |
729 | extern const struct linux_logo logo_spe_clut224; | |
730 | ||
731 | fb_append_extra_logo(&logo_spe_clut224, ret); | |
732 | } | |
733 | ||
24140594 | 734 | mutex_lock(&spu_full_list_mutex); |
ff8a8f25 | 735 | xmon_register_spus(&spu_full_list); |
8d2655e6 | 736 | crash_register_spus(&spu_full_list); |
24140594 | 737 | mutex_unlock(&spu_full_list_mutex); |
fe2f896d CH |
738 | spu_add_sysdev_attr(&attr_stat); |
739 | ||
f5996449 | 740 | spu_init_affinity(); |
3ad216ca | 741 | |
befdc746 CH |
742 | return 0; |
743 | ||
744 | out_unregister_sysdev_class: | |
745 | sysdev_class_unregister(&spu_sysdev_class); | |
746 | out: | |
67207b96 AB |
747 | return ret; |
748 | } | |
749 | module_init(init_spu_base); | |
750 | ||
751 | MODULE_LICENSE("GPL"); | |
752 | MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>"); |