Commit | Line | Data |
---|---|---|
67207b96 AB |
1 | /* |
2 | * Low-level SPU handling | |
3 | * | |
4 | * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 | |
5 | * | |
6 | * Author: Arnd Bergmann <arndb@de.ibm.com> | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify | |
9 | * it under the terms of the GNU General Public License as published by | |
10 | * the Free Software Foundation; either version 2, or (at your option) | |
11 | * any later version. | |
12 | * | |
13 | * This program is distributed in the hope that it will be useful, | |
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
16 | * GNU General Public License for more details. | |
17 | * | |
18 | * You should have received a copy of the GNU General Public License | |
19 | * along with this program; if not, write to the Free Software | |
20 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | |
21 | */ | |
22 | ||
3b3d22cb | 23 | #undef DEBUG |
67207b96 AB |
24 | |
25 | #include <linux/interrupt.h> | |
26 | #include <linux/list.h> | |
27 | #include <linux/module.h> | |
67207b96 AB |
28 | #include <linux/ptrace.h> |
29 | #include <linux/slab.h> | |
30 | #include <linux/wait.h> | |
e28b0031 GL |
31 | #include <linux/mm.h> |
32 | #include <linux/io.h> | |
14cc3e2b | 33 | #include <linux/mutex.h> |
bce94513 | 34 | #include <linux/linux_logo.h> |
67207b96 | 35 | #include <asm/spu.h> |
540270d8 | 36 | #include <asm/spu_priv1.h> |
58bd403c | 37 | #include <asm/spu_csa.h> |
ff8a8f25 | 38 | #include <asm/xmon.h> |
3ad216ca | 39 | #include <asm/prom.h> |
67207b96 | 40 | |
e28b0031 | 41 | const struct spu_management_ops *spu_management_ops; |
ccf17e9d JK |
42 | EXPORT_SYMBOL_GPL(spu_management_ops); |
43 | ||
540270d8 | 44 | const struct spu_priv1_ops *spu_priv1_ops; |
24140594 | 45 | EXPORT_SYMBOL_GPL(spu_priv1_ops); |
540270d8 | 46 | |
24140594 CH |
47 | struct cbe_spu_info cbe_spu_info[MAX_NUMNODES]; |
48 | EXPORT_SYMBOL_GPL(cbe_spu_info); | |
94b2a439 | 49 | |
3ce2f62b JK |
50 | /* |
51 | * The spufs fault-handling code needs to call force_sig_info to raise signals | |
52 | * on DMA errors. Export it here to avoid general kernel-wide access to this | |
53 | * function | |
54 | */ | |
55 | EXPORT_SYMBOL_GPL(force_sig_info); | |
56 | ||
24140594 CH |
57 | /* |
58 | * Protects cbe_spu_info and spu->number. | |
59 | */ | |
60 | static DEFINE_SPINLOCK(spu_lock); | |
61 | ||
62 | /* | |
63 | * List of all spus in the system. | |
64 | * | |
65 | * This list is iterated by callers from irq context and callers that | |
66 | * want to sleep. Thus modifications need to be done with both | |
67 | * spu_full_list_lock and spu_full_list_mutex held, while iterating | |
68 | * through it requires either of these locks. | |
69 | * | |
70 | * In addition spu_full_list_lock protects all assignmens to | |
71 | * spu->mm. | |
72 | */ | |
73 | static LIST_HEAD(spu_full_list); | |
74 | static DEFINE_SPINLOCK(spu_full_list_lock); | |
75 | static DEFINE_MUTEX(spu_full_list_mutex); | |
540270d8 | 76 | |
58bd403c JK |
77 | struct spu_slb { |
78 | u64 esid, vsid; | |
79 | }; | |
80 | ||
94b2a439 BH |
81 | void spu_invalidate_slbs(struct spu *spu) |
82 | { | |
83 | struct spu_priv2 __iomem *priv2 = spu->priv2; | |
c92a1acb | 84 | unsigned long flags; |
94b2a439 | 85 | |
c92a1acb | 86 | spin_lock_irqsave(&spu->register_lock, flags); |
94b2a439 BH |
87 | if (spu_mfc_sr1_get(spu) & MFC_STATE1_RELOCATE_MASK) |
88 | out_be64(&priv2->slb_invalidate_all_W, 0UL); | |
c92a1acb | 89 | spin_unlock_irqrestore(&spu->register_lock, flags); |
94b2a439 BH |
90 | } |
91 | EXPORT_SYMBOL_GPL(spu_invalidate_slbs); | |
92 | ||
93 | /* This is called by the MM core when a segment size is changed, to | |
94 | * request a flush of all the SPEs using a given mm | |
95 | */ | |
96 | void spu_flush_all_slbs(struct mm_struct *mm) | |
97 | { | |
98 | struct spu *spu; | |
99 | unsigned long flags; | |
100 | ||
24140594 | 101 | spin_lock_irqsave(&spu_full_list_lock, flags); |
94b2a439 BH |
102 | list_for_each_entry(spu, &spu_full_list, full_list) { |
103 | if (spu->mm == mm) | |
104 | spu_invalidate_slbs(spu); | |
105 | } | |
24140594 | 106 | spin_unlock_irqrestore(&spu_full_list_lock, flags); |
94b2a439 BH |
107 | } |
108 | ||
109 | /* The hack below stinks... try to do something better one of | |
110 | * these days... Does it even work properly with NR_CPUS == 1 ? | |
111 | */ | |
112 | static inline void mm_needs_global_tlbie(struct mm_struct *mm) | |
113 | { | |
114 | int nr = (NR_CPUS > 1) ? NR_CPUS : NR_CPUS + 1; | |
115 | ||
116 | /* Global TLBIE broadcast required with SPEs. */ | |
117 | __cpus_setall(&mm->cpu_vm_mask, nr); | |
118 | } | |
119 | ||
120 | void spu_associate_mm(struct spu *spu, struct mm_struct *mm) | |
121 | { | |
122 | unsigned long flags; | |
123 | ||
24140594 | 124 | spin_lock_irqsave(&spu_full_list_lock, flags); |
94b2a439 | 125 | spu->mm = mm; |
24140594 | 126 | spin_unlock_irqrestore(&spu_full_list_lock, flags); |
94b2a439 BH |
127 | if (mm) |
128 | mm_needs_global_tlbie(mm); | |
129 | } | |
130 | EXPORT_SYMBOL_GPL(spu_associate_mm); | |
131 | ||
f6eb7d7f JK |
132 | int spu_64k_pages_available(void) |
133 | { | |
134 | return mmu_psize_defs[MMU_PAGE_64K].shift != 0; | |
135 | } | |
136 | EXPORT_SYMBOL_GPL(spu_64k_pages_available); | |
137 | ||
67207b96 AB |
138 | static void spu_restart_dma(struct spu *spu) |
139 | { | |
140 | struct spu_priv2 __iomem *priv2 = spu->priv2; | |
5473af04 | 141 | |
8837d921 | 142 | if (!test_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags)) |
5473af04 | 143 | out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESTART_DMA_COMMAND); |
67207b96 AB |
144 | } |
145 | ||
58bd403c JK |
146 | static inline void spu_load_slb(struct spu *spu, int slbe, struct spu_slb *slb) |
147 | { | |
148 | struct spu_priv2 __iomem *priv2 = spu->priv2; | |
149 | ||
150 | pr_debug("%s: adding SLB[%d] 0x%016lx 0x%016lx\n", | |
151 | __func__, slbe, slb->vsid, slb->esid); | |
152 | ||
153 | out_be64(&priv2->slb_index_W, slbe); | |
cc4b7c18 AB |
154 | /* set invalid before writing vsid */ |
155 | out_be64(&priv2->slb_esid_RW, 0); | |
156 | /* now it's safe to write the vsid */ | |
58bd403c | 157 | out_be64(&priv2->slb_vsid_RW, slb->vsid); |
cc4b7c18 | 158 | /* setting the new esid makes the entry valid again */ |
58bd403c JK |
159 | out_be64(&priv2->slb_esid_RW, slb->esid); |
160 | } | |
161 | ||
67207b96 AB |
162 | static int __spu_trap_data_seg(struct spu *spu, unsigned long ea) |
163 | { | |
8b3d6663 | 164 | struct mm_struct *mm = spu->mm; |
4d43466d | 165 | struct spu_slb slb; |
94b2a439 | 166 | int psize; |
67207b96 | 167 | |
e48b1b45 | 168 | pr_debug("%s\n", __func__); |
67207b96 | 169 | |
4d43466d | 170 | slb.esid = (ea & ESID_MASK) | SLB_ESID_V; |
0afacde3 | 171 | |
172 | switch(REGION_ID(ea)) { | |
173 | case USER_REGION_ID: | |
d0f13e3c BH |
174 | #ifdef CONFIG_PPC_MM_SLICES |
175 | psize = get_slice_psize(mm, ea); | |
176 | #else | |
177 | psize = mm->context.user_psize; | |
0afacde3 | 178 | #endif |
4d43466d JK |
179 | slb.vsid = (get_vsid(mm->context.id, ea, MMU_SEGSIZE_256M) |
180 | << SLB_VSID_SHIFT) | SLB_VSID_USER; | |
0afacde3 | 181 | break; |
182 | case VMALLOC_REGION_ID: | |
94b2a439 BH |
183 | if (ea < VMALLOC_END) |
184 | psize = mmu_vmalloc_psize; | |
185 | else | |
186 | psize = mmu_io_psize; | |
4d43466d JK |
187 | slb.vsid = (get_kernel_vsid(ea, MMU_SEGSIZE_256M) |
188 | << SLB_VSID_SHIFT) | SLB_VSID_KERNEL; | |
0afacde3 | 189 | break; |
190 | case KERNEL_REGION_ID: | |
94b2a439 | 191 | psize = mmu_linear_psize; |
4d43466d JK |
192 | slb.vsid = (get_kernel_vsid(ea, MMU_SEGSIZE_256M) |
193 | << SLB_VSID_SHIFT) | SLB_VSID_KERNEL; | |
0afacde3 | 194 | break; |
195 | default: | |
8b3d6663 AB |
196 | /* Future: support kernel segments so that drivers |
197 | * can use SPUs. | |
198 | */ | |
67207b96 AB |
199 | pr_debug("invalid region access at %016lx\n", ea); |
200 | return 1; | |
201 | } | |
4d43466d | 202 | slb.vsid |= mmu_psize_defs[psize].sllp; |
67207b96 | 203 | |
4d43466d | 204 | spu_load_slb(spu, spu->slb_replace, &slb); |
8b3d6663 AB |
205 | |
206 | spu->slb_replace++; | |
67207b96 AB |
207 | if (spu->slb_replace >= 8) |
208 | spu->slb_replace = 0; | |
209 | ||
67207b96 | 210 | spu_restart_dma(spu); |
e9f8a0b6 | 211 | spu->stats.slb_flt++; |
67207b96 AB |
212 | return 0; |
213 | } | |
214 | ||
5473af04 | 215 | extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap); //XXX |
8b3d6663 | 216 | static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr) |
67207b96 | 217 | { |
e48b1b45 | 218 | pr_debug("%s, %lx, %lx\n", __func__, dsisr, ea); |
67207b96 | 219 | |
5473af04 MN |
220 | /* Handle kernel space hash faults immediately. |
221 | User hash faults need to be deferred to process context. */ | |
222 | if ((dsisr & MFC_DSISR_PTE_NOT_FOUND) | |
223 | && REGION_ID(ea) != USER_REGION_ID | |
224 | && hash_page(ea, _PAGE_PRESENT, 0x300) == 0) { | |
225 | spu_restart_dma(spu); | |
226 | return 0; | |
227 | } | |
228 | ||
d6ad39bc | 229 | spu->class_0_pending = 0; |
8b3d6663 AB |
230 | spu->dar = ea; |
231 | spu->dsisr = dsisr; | |
d6ad39bc | 232 | |
ba723fe2 | 233 | spu->stop_callback(spu); |
d6ad39bc | 234 | |
67207b96 AB |
235 | return 0; |
236 | } | |
237 | ||
58bd403c JK |
238 | static void __spu_kernel_slb(void *addr, struct spu_slb *slb) |
239 | { | |
240 | unsigned long ea = (unsigned long)addr; | |
241 | u64 llp; | |
242 | ||
243 | if (REGION_ID(ea) == KERNEL_REGION_ID) | |
244 | llp = mmu_psize_defs[mmu_linear_psize].sllp; | |
245 | else | |
246 | llp = mmu_psize_defs[mmu_virtual_psize].sllp; | |
247 | ||
248 | slb->vsid = (get_kernel_vsid(ea, MMU_SEGSIZE_256M) << SLB_VSID_SHIFT) | | |
249 | SLB_VSID_KERNEL | llp; | |
250 | slb->esid = (ea & ESID_MASK) | SLB_ESID_V; | |
251 | } | |
252 | ||
684bd614 JK |
253 | /** |
254 | * Given an array of @nr_slbs SLB entries, @slbs, return non-zero if the | |
255 | * address @new_addr is present. | |
256 | */ | |
257 | static inline int __slb_present(struct spu_slb *slbs, int nr_slbs, | |
258 | void *new_addr) | |
259 | { | |
260 | unsigned long ea = (unsigned long)new_addr; | |
261 | int i; | |
262 | ||
263 | for (i = 0; i < nr_slbs; i++) | |
264 | if (!((slbs[i].esid ^ ea) & ESID_MASK)) | |
265 | return 1; | |
266 | ||
267 | return 0; | |
268 | } | |
269 | ||
58bd403c JK |
270 | /** |
271 | * Setup the SPU kernel SLBs, in preparation for a context save/restore. We | |
272 | * need to map both the context save area, and the save/restore code. | |
684bd614 JK |
273 | * |
274 | * Because the lscsa and code may cross segment boundaires, we check to see | |
275 | * if mappings are required for the start and end of each range. We currently | |
276 | * assume that the mappings are smaller that one segment - if not, something | |
277 | * is seriously wrong. | |
58bd403c | 278 | */ |
684bd614 JK |
279 | void spu_setup_kernel_slbs(struct spu *spu, struct spu_lscsa *lscsa, |
280 | void *code, int code_size) | |
58bd403c | 281 | { |
684bd614 JK |
282 | struct spu_slb slbs[4]; |
283 | int i, nr_slbs = 0; | |
284 | /* start and end addresses of both mappings */ | |
285 | void *addrs[] = { | |
286 | lscsa, (void *)lscsa + sizeof(*lscsa) - 1, | |
287 | code, code + code_size - 1 | |
288 | }; | |
289 | ||
290 | /* check the set of addresses, and create a new entry in the slbs array | |
291 | * if there isn't already a SLB for that address */ | |
292 | for (i = 0; i < ARRAY_SIZE(addrs); i++) { | |
293 | if (__slb_present(slbs, nr_slbs, addrs[i])) | |
294 | continue; | |
295 | ||
296 | __spu_kernel_slb(addrs[i], &slbs[nr_slbs]); | |
297 | nr_slbs++; | |
298 | } | |
58bd403c | 299 | |
c92a1acb | 300 | spin_lock_irq(&spu->register_lock); |
684bd614 JK |
301 | /* Add the set of SLBs */ |
302 | for (i = 0; i < nr_slbs; i++) | |
303 | spu_load_slb(spu, i, &slbs[i]); | |
c92a1acb | 304 | spin_unlock_irq(&spu->register_lock); |
58bd403c JK |
305 | } |
306 | EXPORT_SYMBOL_GPL(spu_setup_kernel_slbs); | |
307 | ||
67207b96 | 308 | static irqreturn_t |
f5a92458 | 309 | spu_irq_class_0(int irq, void *data) |
67207b96 AB |
310 | { |
311 | struct spu *spu; | |
b7f90a40 | 312 | unsigned long stat, mask; |
67207b96 AB |
313 | |
314 | spu = data; | |
b7f90a40 | 315 | |
d6ad39bc | 316 | spin_lock(&spu->register_lock); |
b7f90a40 | 317 | mask = spu_int_mask_get(spu, 0); |
d6ad39bc | 318 | stat = spu_int_stat_get(spu, 0) & mask; |
b7f90a40 | 319 | |
b7f90a40 | 320 | spu->class_0_pending |= stat; |
d6ad39bc JK |
321 | spu->dsisr = spu_mfc_dsisr_get(spu); |
322 | spu->dar = spu_mfc_dar_get(spu); | |
b7f90a40 MN |
323 | spin_unlock(&spu->register_lock); |
324 | ||
ba723fe2 | 325 | spu->stop_callback(spu); |
67207b96 | 326 | |
b7f90a40 MN |
327 | spu_int_stat_clear(spu, 0, stat); |
328 | ||
67207b96 AB |
329 | return IRQ_HANDLED; |
330 | } | |
331 | ||
67207b96 | 332 | static irqreturn_t |
f5a92458 | 333 | spu_irq_class_1(int irq, void *data) |
67207b96 AB |
334 | { |
335 | struct spu *spu; | |
8b3d6663 | 336 | unsigned long stat, mask, dar, dsisr; |
67207b96 AB |
337 | |
338 | spu = data; | |
8b3d6663 AB |
339 | |
340 | /* atomically read & clear class1 status. */ | |
341 | spin_lock(&spu->register_lock); | |
f0831acc AB |
342 | mask = spu_int_mask_get(spu, 1); |
343 | stat = spu_int_stat_get(spu, 1) & mask; | |
344 | dar = spu_mfc_dar_get(spu); | |
345 | dsisr = spu_mfc_dsisr_get(spu); | |
8af30675 | 346 | if (stat & CLASS1_STORAGE_FAULT_INTR) |
f0831acc AB |
347 | spu_mfc_dsisr_set(spu, 0ul); |
348 | spu_int_stat_clear(spu, 1, stat); | |
67207b96 | 349 | |
8af30675 | 350 | if (stat & CLASS1_SEGMENT_FAULT_INTR) |
67207b96 AB |
351 | __spu_trap_data_seg(spu, dar); |
352 | ||
c92a1acb | 353 | spin_unlock(&spu->register_lock); |
e48b1b45 | 354 | pr_debug("%s: %lx %lx %lx %lx\n", __func__, mask, stat, |
c92a1acb AB |
355 | dar, dsisr); |
356 | ||
8af30675 | 357 | if (stat & CLASS1_STORAGE_FAULT_INTR) |
8b3d6663 | 358 | __spu_trap_data_map(spu, dar, dsisr); |
67207b96 | 359 | |
8af30675 | 360 | if (stat & CLASS1_LS_COMPARE_SUSPEND_ON_GET_INTR) |
67207b96 AB |
361 | ; |
362 | ||
8af30675 | 363 | if (stat & CLASS1_LS_COMPARE_SUSPEND_ON_PUT_INTR) |
67207b96 AB |
364 | ; |
365 | ||
67207b96 AB |
366 | return stat ? IRQ_HANDLED : IRQ_NONE; |
367 | } | |
368 | ||
369 | static irqreturn_t | |
f5a92458 | 370 | spu_irq_class_2(int irq, void *data) |
67207b96 AB |
371 | { |
372 | struct spu *spu; | |
373 | unsigned long stat; | |
3a843d7c | 374 | unsigned long mask; |
8af30675 JK |
375 | const int mailbox_intrs = |
376 | CLASS2_MAILBOX_THRESHOLD_INTR | CLASS2_MAILBOX_INTR; | |
67207b96 AB |
377 | |
378 | spu = data; | |
ba723fe2 | 379 | spin_lock(&spu->register_lock); |
f0831acc AB |
380 | stat = spu_int_stat_get(spu, 2); |
381 | mask = spu_int_mask_get(spu, 2); | |
ba723fe2 MN |
382 | /* ignore interrupts we're not waiting for */ |
383 | stat &= mask; | |
8af30675 JK |
384 | |
385 | /* mailbox interrupts are level triggered. mask them now before | |
386 | * acknowledging */ | |
387 | if (stat & mailbox_intrs) | |
388 | spu_int_mask_and(spu, 2, ~(stat & mailbox_intrs)); | |
ba723fe2 MN |
389 | /* acknowledge all interrupts before the callbacks */ |
390 | spu_int_stat_clear(spu, 2, stat); | |
391 | spin_unlock(&spu->register_lock); | |
67207b96 | 392 | |
3a843d7c | 393 | pr_debug("class 2 interrupt %d, %lx, %lx\n", irq, stat, mask); |
67207b96 | 394 | |
8af30675 | 395 | if (stat & CLASS2_MAILBOX_INTR) |
ba723fe2 | 396 | spu->ibox_callback(spu); |
67207b96 | 397 | |
8af30675 | 398 | if (stat & CLASS2_SPU_STOP_INTR) |
ba723fe2 | 399 | spu->stop_callback(spu); |
67207b96 | 400 | |
8af30675 | 401 | if (stat & CLASS2_SPU_HALT_INTR) |
ba723fe2 | 402 | spu->stop_callback(spu); |
67207b96 | 403 | |
8af30675 | 404 | if (stat & CLASS2_SPU_DMA_TAG_GROUP_COMPLETE_INTR) |
ba723fe2 | 405 | spu->mfc_callback(spu); |
67207b96 | 406 | |
8af30675 | 407 | if (stat & CLASS2_MAILBOX_THRESHOLD_INTR) |
ba723fe2 | 408 | spu->wbox_callback(spu); |
67207b96 | 409 | |
e9f8a0b6 | 410 | spu->stats.class2_intr++; |
67207b96 AB |
411 | return stat ? IRQ_HANDLED : IRQ_NONE; |
412 | } | |
413 | ||
0ebfff14 | 414 | static int spu_request_irqs(struct spu *spu) |
67207b96 | 415 | { |
0ebfff14 | 416 | int ret = 0; |
67207b96 | 417 | |
0ebfff14 BH |
418 | if (spu->irqs[0] != NO_IRQ) { |
419 | snprintf(spu->irq_c0, sizeof (spu->irq_c0), "spe%02d.0", | |
420 | spu->number); | |
421 | ret = request_irq(spu->irqs[0], spu_irq_class_0, | |
422 | IRQF_DISABLED, | |
423 | spu->irq_c0, spu); | |
424 | if (ret) | |
425 | goto bail0; | |
426 | } | |
427 | if (spu->irqs[1] != NO_IRQ) { | |
428 | snprintf(spu->irq_c1, sizeof (spu->irq_c1), "spe%02d.1", | |
429 | spu->number); | |
430 | ret = request_irq(spu->irqs[1], spu_irq_class_1, | |
431 | IRQF_DISABLED, | |
432 | spu->irq_c1, spu); | |
433 | if (ret) | |
434 | goto bail1; | |
435 | } | |
436 | if (spu->irqs[2] != NO_IRQ) { | |
437 | snprintf(spu->irq_c2, sizeof (spu->irq_c2), "spe%02d.2", | |
438 | spu->number); | |
439 | ret = request_irq(spu->irqs[2], spu_irq_class_2, | |
440 | IRQF_DISABLED, | |
441 | spu->irq_c2, spu); | |
442 | if (ret) | |
443 | goto bail2; | |
444 | } | |
445 | return 0; | |
67207b96 | 446 | |
0ebfff14 BH |
447 | bail2: |
448 | if (spu->irqs[1] != NO_IRQ) | |
449 | free_irq(spu->irqs[1], spu); | |
450 | bail1: | |
451 | if (spu->irqs[0] != NO_IRQ) | |
452 | free_irq(spu->irqs[0], spu); | |
453 | bail0: | |
67207b96 AB |
454 | return ret; |
455 | } | |
456 | ||
0ebfff14 | 457 | static void spu_free_irqs(struct spu *spu) |
67207b96 | 458 | { |
0ebfff14 BH |
459 | if (spu->irqs[0] != NO_IRQ) |
460 | free_irq(spu->irqs[0], spu); | |
461 | if (spu->irqs[1] != NO_IRQ) | |
462 | free_irq(spu->irqs[1], spu); | |
463 | if (spu->irqs[2] != NO_IRQ) | |
464 | free_irq(spu->irqs[2], spu); | |
67207b96 AB |
465 | } |
466 | ||
486acd48 | 467 | void spu_init_channels(struct spu *spu) |
67207b96 AB |
468 | { |
469 | static const struct { | |
470 | unsigned channel; | |
471 | unsigned count; | |
472 | } zero_list[] = { | |
473 | { 0x00, 1, }, { 0x01, 1, }, { 0x03, 1, }, { 0x04, 1, }, | |
474 | { 0x18, 1, }, { 0x19, 1, }, { 0x1b, 1, }, { 0x1d, 1, }, | |
475 | }, count_list[] = { | |
476 | { 0x00, 0, }, { 0x03, 0, }, { 0x04, 0, }, { 0x15, 16, }, | |
477 | { 0x17, 1, }, { 0x18, 0, }, { 0x19, 0, }, { 0x1b, 0, }, | |
478 | { 0x1c, 1, }, { 0x1d, 0, }, { 0x1e, 1, }, | |
479 | }; | |
6ff730c3 | 480 | struct spu_priv2 __iomem *priv2; |
67207b96 AB |
481 | int i; |
482 | ||
483 | priv2 = spu->priv2; | |
484 | ||
485 | /* initialize all channel data to zero */ | |
486 | for (i = 0; i < ARRAY_SIZE(zero_list); i++) { | |
487 | int count; | |
488 | ||
489 | out_be64(&priv2->spu_chnlcntptr_RW, zero_list[i].channel); | |
490 | for (count = 0; count < zero_list[i].count; count++) | |
491 | out_be64(&priv2->spu_chnldata_RW, 0); | |
492 | } | |
493 | ||
494 | /* initialize channel counts to meaningful values */ | |
495 | for (i = 0; i < ARRAY_SIZE(count_list); i++) { | |
496 | out_be64(&priv2->spu_chnlcntptr_RW, count_list[i].channel); | |
497 | out_be64(&priv2->spu_chnlcnt_RW, count_list[i].count); | |
498 | } | |
499 | } | |
486acd48 | 500 | EXPORT_SYMBOL_GPL(spu_init_channels); |
67207b96 | 501 | |
6deac066 GL |
502 | static int spu_shutdown(struct sys_device *sysdev) |
503 | { | |
504 | struct spu *spu = container_of(sysdev, struct spu, sysdev); | |
505 | ||
506 | spu_free_irqs(spu); | |
507 | spu_destroy_spu(spu); | |
508 | return 0; | |
509 | } | |
510 | ||
1238819a | 511 | static struct sysdev_class spu_sysdev_class = { |
af5ca3f4 | 512 | .name = "spu", |
6deac066 | 513 | .shutdown = spu_shutdown, |
1d64093f JK |
514 | }; |
515 | ||
e570beb6 CK |
516 | int spu_add_sysdev_attr(struct sysdev_attribute *attr) |
517 | { | |
518 | struct spu *spu; | |
e570beb6 | 519 | |
24140594 | 520 | mutex_lock(&spu_full_list_mutex); |
e570beb6 CK |
521 | list_for_each_entry(spu, &spu_full_list, full_list) |
522 | sysdev_create_file(&spu->sysdev, attr); | |
24140594 | 523 | mutex_unlock(&spu_full_list_mutex); |
e570beb6 | 524 | |
e570beb6 CK |
525 | return 0; |
526 | } | |
527 | EXPORT_SYMBOL_GPL(spu_add_sysdev_attr); | |
528 | ||
529 | int spu_add_sysdev_attr_group(struct attribute_group *attrs) | |
530 | { | |
531 | struct spu *spu; | |
1e771039 | 532 | int rc = 0; |
e570beb6 | 533 | |
24140594 | 534 | mutex_lock(&spu_full_list_mutex); |
1e771039 JK |
535 | list_for_each_entry(spu, &spu_full_list, full_list) { |
536 | rc = sysfs_create_group(&spu->sysdev.kobj, attrs); | |
537 | ||
538 | /* we're in trouble here, but try unwinding anyway */ | |
539 | if (rc) { | |
540 | printk(KERN_ERR "%s: can't create sysfs group '%s'\n", | |
541 | __func__, attrs->name); | |
542 | ||
543 | list_for_each_entry_continue_reverse(spu, | |
544 | &spu_full_list, full_list) | |
545 | sysfs_remove_group(&spu->sysdev.kobj, attrs); | |
546 | break; | |
547 | } | |
548 | } | |
549 | ||
24140594 | 550 | mutex_unlock(&spu_full_list_mutex); |
e570beb6 | 551 | |
1e771039 | 552 | return rc; |
e570beb6 CK |
553 | } |
554 | EXPORT_SYMBOL_GPL(spu_add_sysdev_attr_group); | |
555 | ||
556 | ||
557 | void spu_remove_sysdev_attr(struct sysdev_attribute *attr) | |
558 | { | |
559 | struct spu *spu; | |
e570beb6 | 560 | |
24140594 | 561 | mutex_lock(&spu_full_list_mutex); |
e570beb6 CK |
562 | list_for_each_entry(spu, &spu_full_list, full_list) |
563 | sysdev_remove_file(&spu->sysdev, attr); | |
24140594 | 564 | mutex_unlock(&spu_full_list_mutex); |
e570beb6 CK |
565 | } |
566 | EXPORT_SYMBOL_GPL(spu_remove_sysdev_attr); | |
567 | ||
568 | void spu_remove_sysdev_attr_group(struct attribute_group *attrs) | |
569 | { | |
570 | struct spu *spu; | |
e570beb6 | 571 | |
24140594 | 572 | mutex_lock(&spu_full_list_mutex); |
e570beb6 CK |
573 | list_for_each_entry(spu, &spu_full_list, full_list) |
574 | sysfs_remove_group(&spu->sysdev.kobj, attrs); | |
24140594 | 575 | mutex_unlock(&spu_full_list_mutex); |
e570beb6 CK |
576 | } |
577 | EXPORT_SYMBOL_GPL(spu_remove_sysdev_attr_group); | |
578 | ||
1d64093f JK |
579 | static int spu_create_sysdev(struct spu *spu) |
580 | { | |
581 | int ret; | |
582 | ||
583 | spu->sysdev.id = spu->number; | |
584 | spu->sysdev.cls = &spu_sysdev_class; | |
585 | ret = sysdev_register(&spu->sysdev); | |
586 | if (ret) { | |
587 | printk(KERN_ERR "Can't register SPU %d with sysfs\n", | |
588 | spu->number); | |
589 | return ret; | |
590 | } | |
591 | ||
0021550c | 592 | sysfs_add_device_to_node(&spu->sysdev, spu->node); |
1d64093f JK |
593 | |
594 | return 0; | |
595 | } | |
596 | ||
e28b0031 | 597 | static int __init create_spu(void *data) |
67207b96 AB |
598 | { |
599 | struct spu *spu; | |
600 | int ret; | |
601 | static int number; | |
94b2a439 | 602 | unsigned long flags; |
27ec41d3 | 603 | struct timespec ts; |
67207b96 AB |
604 | |
605 | ret = -ENOMEM; | |
ecec2177 | 606 | spu = kzalloc(sizeof (*spu), GFP_KERNEL); |
67207b96 AB |
607 | if (!spu) |
608 | goto out; | |
609 | ||
486acd48 CH |
610 | spu->alloc_state = SPU_FREE; |
611 | ||
e28b0031 | 612 | spin_lock_init(&spu->register_lock); |
24140594 | 613 | spin_lock(&spu_lock); |
e28b0031 | 614 | spu->number = number++; |
24140594 | 615 | spin_unlock(&spu_lock); |
e28b0031 GL |
616 | |
617 | ret = spu_create_spu(spu, data); | |
e5267b4b | 618 | |
67207b96 AB |
619 | if (ret) |
620 | goto out_free; | |
621 | ||
24f43b33 | 622 | spu_mfc_sdr_setup(spu); |
f0831acc | 623 | spu_mfc_sr1_set(spu, 0x33); |
67207b96 AB |
624 | ret = spu_request_irqs(spu); |
625 | if (ret) | |
e28b0031 | 626 | goto out_destroy; |
67207b96 | 627 | |
1d64093f JK |
628 | ret = spu_create_sysdev(spu); |
629 | if (ret) | |
630 | goto out_free_irqs; | |
631 | ||
486acd48 | 632 | mutex_lock(&cbe_spu_info[spu->node].list_mutex); |
aa6d5b20 AB |
633 | list_add(&spu->cbe_list, &cbe_spu_info[spu->node].spus); |
634 | cbe_spu_info[spu->node].n_spus++; | |
486acd48 | 635 | mutex_unlock(&cbe_spu_info[spu->node].list_mutex); |
24140594 CH |
636 | |
637 | mutex_lock(&spu_full_list_mutex); | |
638 | spin_lock_irqsave(&spu_full_list_lock, flags); | |
e570beb6 | 639 | list_add(&spu->full_list, &spu_full_list); |
24140594 CH |
640 | spin_unlock_irqrestore(&spu_full_list_lock, flags); |
641 | mutex_unlock(&spu_full_list_mutex); | |
67207b96 | 642 | |
27ec41d3 AD |
643 | spu->stats.util_state = SPU_UTIL_IDLE_LOADED; |
644 | ktime_get_ts(&ts); | |
645 | spu->stats.tstamp = timespec_to_ns(&ts); | |
fe2f896d | 646 | |
9d92af62 AB |
647 | INIT_LIST_HEAD(&spu->aff_list); |
648 | ||
67207b96 AB |
649 | goto out; |
650 | ||
1d64093f JK |
651 | out_free_irqs: |
652 | spu_free_irqs(spu); | |
e28b0031 GL |
653 | out_destroy: |
654 | spu_destroy_spu(spu); | |
67207b96 AB |
655 | out_free: |
656 | kfree(spu); | |
657 | out: | |
658 | return ret; | |
659 | } | |
660 | ||
fe2f896d CH |
661 | static const char *spu_state_names[] = { |
662 | "user", "system", "iowait", "idle" | |
663 | }; | |
664 | ||
665 | static unsigned long long spu_acct_time(struct spu *spu, | |
666 | enum spu_utilization_state state) | |
667 | { | |
27ec41d3 | 668 | struct timespec ts; |
fe2f896d CH |
669 | unsigned long long time = spu->stats.times[state]; |
670 | ||
27ec41d3 AD |
671 | /* |
672 | * If the spu is idle or the context is stopped, utilization | |
673 | * statistics are not updated. Apply the time delta from the | |
674 | * last recorded state of the spu. | |
675 | */ | |
676 | if (spu->stats.util_state == state) { | |
677 | ktime_get_ts(&ts); | |
678 | time += timespec_to_ns(&ts) - spu->stats.tstamp; | |
679 | } | |
fe2f896d | 680 | |
27ec41d3 | 681 | return time / NSEC_PER_MSEC; |
fe2f896d CH |
682 | } |
683 | ||
684 | ||
685 | static ssize_t spu_stat_show(struct sys_device *sysdev, char *buf) | |
686 | { | |
687 | struct spu *spu = container_of(sysdev, struct spu, sysdev); | |
688 | ||
689 | return sprintf(buf, "%s %llu %llu %llu %llu " | |
690 | "%llu %llu %llu %llu %llu %llu %llu %llu\n", | |
27ec41d3 | 691 | spu_state_names[spu->stats.util_state], |
fe2f896d CH |
692 | spu_acct_time(spu, SPU_UTIL_USER), |
693 | spu_acct_time(spu, SPU_UTIL_SYSTEM), | |
694 | spu_acct_time(spu, SPU_UTIL_IOWAIT), | |
27ec41d3 | 695 | spu_acct_time(spu, SPU_UTIL_IDLE_LOADED), |
fe2f896d CH |
696 | spu->stats.vol_ctx_switch, |
697 | spu->stats.invol_ctx_switch, | |
698 | spu->stats.slb_flt, | |
699 | spu->stats.hash_flt, | |
700 | spu->stats.min_flt, | |
701 | spu->stats.maj_flt, | |
702 | spu->stats.class2_intr, | |
703 | spu->stats.libassist); | |
704 | } | |
705 | ||
706 | static SYSDEV_ATTR(stat, 0644, spu_stat_show, NULL); | |
707 | ||
67207b96 AB |
708 | static int __init init_spu_base(void) |
709 | { | |
befdc746 | 710 | int i, ret = 0; |
67207b96 | 711 | |
aa6d5b20 | 712 | for (i = 0; i < MAX_NUMNODES; i++) { |
486acd48 | 713 | mutex_init(&cbe_spu_info[i].list_mutex); |
aa6d5b20 | 714 | INIT_LIST_HEAD(&cbe_spu_info[i].spus); |
aa6d5b20 | 715 | } |
ccf17e9d | 716 | |
da06aa08 | 717 | if (!spu_management_ops) |
befdc746 | 718 | goto out; |
da06aa08 | 719 | |
1d64093f JK |
720 | /* create sysdev class for spus */ |
721 | ret = sysdev_class_register(&spu_sysdev_class); | |
722 | if (ret) | |
befdc746 | 723 | goto out; |
1d64093f | 724 | |
e28b0031 GL |
725 | ret = spu_enumerate_spus(create_spu); |
726 | ||
bce94513 | 727 | if (ret < 0) { |
e28b0031 | 728 | printk(KERN_WARNING "%s: Error initializing spus\n", |
e48b1b45 | 729 | __func__); |
befdc746 | 730 | goto out_unregister_sysdev_class; |
67207b96 | 731 | } |
ff8a8f25 | 732 | |
bce94513 GU |
733 | if (ret > 0) { |
734 | /* | |
735 | * We cannot put the forward declaration in | |
736 | * <linux/linux_logo.h> because of conflicting session type | |
737 | * conflicts for const and __initdata with different compiler | |
738 | * versions | |
739 | */ | |
740 | extern const struct linux_logo logo_spe_clut224; | |
741 | ||
742 | fb_append_extra_logo(&logo_spe_clut224, ret); | |
743 | } | |
744 | ||
24140594 | 745 | mutex_lock(&spu_full_list_mutex); |
ff8a8f25 | 746 | xmon_register_spus(&spu_full_list); |
8d2655e6 | 747 | crash_register_spus(&spu_full_list); |
24140594 | 748 | mutex_unlock(&spu_full_list_mutex); |
fe2f896d CH |
749 | spu_add_sysdev_attr(&attr_stat); |
750 | ||
f5996449 | 751 | spu_init_affinity(); |
3ad216ca | 752 | |
befdc746 CH |
753 | return 0; |
754 | ||
755 | out_unregister_sysdev_class: | |
756 | sysdev_class_unregister(&spu_sysdev_class); | |
757 | out: | |
67207b96 AB |
758 | return ret; |
759 | } | |
760 | module_init(init_spu_base); | |
761 | ||
762 | MODULE_LICENSE("GPL"); | |
763 | MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>"); |