Commit | Line | Data |
---|---|---|
67207b96 AB |
1 | /* |
2 | * Low-level SPU handling | |
3 | * | |
4 | * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 | |
5 | * | |
6 | * Author: Arnd Bergmann <arndb@de.ibm.com> | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify | |
9 | * it under the terms of the GNU General Public License as published by | |
10 | * the Free Software Foundation; either version 2, or (at your option) | |
11 | * any later version. | |
12 | * | |
13 | * This program is distributed in the hope that it will be useful, | |
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
16 | * GNU General Public License for more details. | |
17 | * | |
18 | * You should have received a copy of the GNU General Public License | |
19 | * along with this program; if not, write to the Free Software | |
20 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | |
21 | */ | |
22 | ||
3b3d22cb | 23 | #undef DEBUG |
67207b96 AB |
24 | |
25 | #include <linux/interrupt.h> | |
26 | #include <linux/list.h> | |
27 | #include <linux/module.h> | |
67207b96 AB |
28 | #include <linux/ptrace.h> |
29 | #include <linux/slab.h> | |
30 | #include <linux/wait.h> | |
e28b0031 GL |
31 | #include <linux/mm.h> |
32 | #include <linux/io.h> | |
14cc3e2b | 33 | #include <linux/mutex.h> |
bce94513 | 34 | #include <linux/linux_logo.h> |
67207b96 | 35 | #include <asm/spu.h> |
540270d8 | 36 | #include <asm/spu_priv1.h> |
58bd403c | 37 | #include <asm/spu_csa.h> |
ff8a8f25 | 38 | #include <asm/xmon.h> |
3ad216ca | 39 | #include <asm/prom.h> |
67207b96 | 40 | |
e28b0031 | 41 | const struct spu_management_ops *spu_management_ops; |
ccf17e9d JK |
42 | EXPORT_SYMBOL_GPL(spu_management_ops); |
43 | ||
540270d8 | 44 | const struct spu_priv1_ops *spu_priv1_ops; |
24140594 | 45 | EXPORT_SYMBOL_GPL(spu_priv1_ops); |
540270d8 | 46 | |
24140594 CH |
47 | struct cbe_spu_info cbe_spu_info[MAX_NUMNODES]; |
48 | EXPORT_SYMBOL_GPL(cbe_spu_info); | |
94b2a439 | 49 | |
3ce2f62b JK |
50 | /* |
51 | * The spufs fault-handling code needs to call force_sig_info to raise signals | |
52 | * on DMA errors. Export it here to avoid general kernel-wide access to this | |
53 | * function | |
54 | */ | |
55 | EXPORT_SYMBOL_GPL(force_sig_info); | |
56 | ||
24140594 CH |
57 | /* |
58 | * Protects cbe_spu_info and spu->number. | |
59 | */ | |
60 | static DEFINE_SPINLOCK(spu_lock); | |
61 | ||
62 | /* | |
63 | * List of all spus in the system. | |
64 | * | |
65 | * This list is iterated by callers from irq context and callers that | |
66 | * want to sleep. Thus modifications need to be done with both | |
67 | * spu_full_list_lock and spu_full_list_mutex held, while iterating | |
68 | * through it requires either of these locks. | |
69 | * | |
70 | * In addition spu_full_list_lock protects all assignmens to | |
71 | * spu->mm. | |
72 | */ | |
73 | static LIST_HEAD(spu_full_list); | |
74 | static DEFINE_SPINLOCK(spu_full_list_lock); | |
75 | static DEFINE_MUTEX(spu_full_list_mutex); | |
540270d8 | 76 | |
58bd403c JK |
77 | struct spu_slb { |
78 | u64 esid, vsid; | |
79 | }; | |
80 | ||
94b2a439 BH |
81 | void spu_invalidate_slbs(struct spu *spu) |
82 | { | |
83 | struct spu_priv2 __iomem *priv2 = spu->priv2; | |
c92a1acb | 84 | unsigned long flags; |
94b2a439 | 85 | |
c92a1acb | 86 | spin_lock_irqsave(&spu->register_lock, flags); |
94b2a439 BH |
87 | if (spu_mfc_sr1_get(spu) & MFC_STATE1_RELOCATE_MASK) |
88 | out_be64(&priv2->slb_invalidate_all_W, 0UL); | |
c92a1acb | 89 | spin_unlock_irqrestore(&spu->register_lock, flags); |
94b2a439 BH |
90 | } |
91 | EXPORT_SYMBOL_GPL(spu_invalidate_slbs); | |
92 | ||
93 | /* This is called by the MM core when a segment size is changed, to | |
94 | * request a flush of all the SPEs using a given mm | |
95 | */ | |
96 | void spu_flush_all_slbs(struct mm_struct *mm) | |
97 | { | |
98 | struct spu *spu; | |
99 | unsigned long flags; | |
100 | ||
24140594 | 101 | spin_lock_irqsave(&spu_full_list_lock, flags); |
94b2a439 BH |
102 | list_for_each_entry(spu, &spu_full_list, full_list) { |
103 | if (spu->mm == mm) | |
104 | spu_invalidate_slbs(spu); | |
105 | } | |
24140594 | 106 | spin_unlock_irqrestore(&spu_full_list_lock, flags); |
94b2a439 BH |
107 | } |
108 | ||
109 | /* The hack below stinks... try to do something better one of | |
110 | * these days... Does it even work properly with NR_CPUS == 1 ? | |
111 | */ | |
112 | static inline void mm_needs_global_tlbie(struct mm_struct *mm) | |
113 | { | |
114 | int nr = (NR_CPUS > 1) ? NR_CPUS : NR_CPUS + 1; | |
115 | ||
116 | /* Global TLBIE broadcast required with SPEs. */ | |
56aa4129 | 117 | bitmap_fill(cpumask_bits(mm_cpumask(mm)), nr); |
94b2a439 BH |
118 | } |
119 | ||
120 | void spu_associate_mm(struct spu *spu, struct mm_struct *mm) | |
121 | { | |
122 | unsigned long flags; | |
123 | ||
24140594 | 124 | spin_lock_irqsave(&spu_full_list_lock, flags); |
94b2a439 | 125 | spu->mm = mm; |
24140594 | 126 | spin_unlock_irqrestore(&spu_full_list_lock, flags); |
94b2a439 BH |
127 | if (mm) |
128 | mm_needs_global_tlbie(mm); | |
129 | } | |
130 | EXPORT_SYMBOL_GPL(spu_associate_mm); | |
131 | ||
f6eb7d7f JK |
132 | int spu_64k_pages_available(void) |
133 | { | |
134 | return mmu_psize_defs[MMU_PAGE_64K].shift != 0; | |
135 | } | |
136 | EXPORT_SYMBOL_GPL(spu_64k_pages_available); | |
137 | ||
67207b96 AB |
138 | static void spu_restart_dma(struct spu *spu) |
139 | { | |
140 | struct spu_priv2 __iomem *priv2 = spu->priv2; | |
5473af04 | 141 | |
8837d921 | 142 | if (!test_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags)) |
5473af04 | 143 | out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESTART_DMA_COMMAND); |
de102892 LB |
144 | else { |
145 | set_bit(SPU_CONTEXT_FAULT_PENDING, &spu->flags); | |
146 | mb(); | |
147 | } | |
67207b96 AB |
148 | } |
149 | ||
58bd403c JK |
150 | static inline void spu_load_slb(struct spu *spu, int slbe, struct spu_slb *slb) |
151 | { | |
152 | struct spu_priv2 __iomem *priv2 = spu->priv2; | |
153 | ||
fe333321 | 154 | pr_debug("%s: adding SLB[%d] 0x%016llx 0x%016llx\n", |
58bd403c JK |
155 | __func__, slbe, slb->vsid, slb->esid); |
156 | ||
157 | out_be64(&priv2->slb_index_W, slbe); | |
cc4b7c18 AB |
158 | /* set invalid before writing vsid */ |
159 | out_be64(&priv2->slb_esid_RW, 0); | |
160 | /* now it's safe to write the vsid */ | |
58bd403c | 161 | out_be64(&priv2->slb_vsid_RW, slb->vsid); |
cc4b7c18 | 162 | /* setting the new esid makes the entry valid again */ |
58bd403c JK |
163 | out_be64(&priv2->slb_esid_RW, slb->esid); |
164 | } | |
165 | ||
67207b96 AB |
166 | static int __spu_trap_data_seg(struct spu *spu, unsigned long ea) |
167 | { | |
8b3d6663 | 168 | struct mm_struct *mm = spu->mm; |
4d43466d | 169 | struct spu_slb slb; |
94b2a439 | 170 | int psize; |
67207b96 | 171 | |
e48b1b45 | 172 | pr_debug("%s\n", __func__); |
67207b96 | 173 | |
4d43466d | 174 | slb.esid = (ea & ESID_MASK) | SLB_ESID_V; |
0afacde3 | 175 | |
176 | switch(REGION_ID(ea)) { | |
177 | case USER_REGION_ID: | |
d0f13e3c BH |
178 | #ifdef CONFIG_PPC_MM_SLICES |
179 | psize = get_slice_psize(mm, ea); | |
180 | #else | |
181 | psize = mm->context.user_psize; | |
0afacde3 | 182 | #endif |
4d43466d JK |
183 | slb.vsid = (get_vsid(mm->context.id, ea, MMU_SEGSIZE_256M) |
184 | << SLB_VSID_SHIFT) | SLB_VSID_USER; | |
0afacde3 | 185 | break; |
186 | case VMALLOC_REGION_ID: | |
94b2a439 BH |
187 | if (ea < VMALLOC_END) |
188 | psize = mmu_vmalloc_psize; | |
189 | else | |
190 | psize = mmu_io_psize; | |
4d43466d JK |
191 | slb.vsid = (get_kernel_vsid(ea, MMU_SEGSIZE_256M) |
192 | << SLB_VSID_SHIFT) | SLB_VSID_KERNEL; | |
0afacde3 | 193 | break; |
194 | case KERNEL_REGION_ID: | |
94b2a439 | 195 | psize = mmu_linear_psize; |
4d43466d JK |
196 | slb.vsid = (get_kernel_vsid(ea, MMU_SEGSIZE_256M) |
197 | << SLB_VSID_SHIFT) | SLB_VSID_KERNEL; | |
0afacde3 | 198 | break; |
199 | default: | |
8b3d6663 AB |
200 | /* Future: support kernel segments so that drivers |
201 | * can use SPUs. | |
202 | */ | |
67207b96 AB |
203 | pr_debug("invalid region access at %016lx\n", ea); |
204 | return 1; | |
205 | } | |
4d43466d | 206 | slb.vsid |= mmu_psize_defs[psize].sllp; |
67207b96 | 207 | |
4d43466d | 208 | spu_load_slb(spu, spu->slb_replace, &slb); |
8b3d6663 AB |
209 | |
210 | spu->slb_replace++; | |
67207b96 AB |
211 | if (spu->slb_replace >= 8) |
212 | spu->slb_replace = 0; | |
213 | ||
67207b96 | 214 | spu_restart_dma(spu); |
e9f8a0b6 | 215 | spu->stats.slb_flt++; |
67207b96 AB |
216 | return 0; |
217 | } | |
218 | ||
5473af04 | 219 | extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap); //XXX |
8b3d6663 | 220 | static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr) |
67207b96 | 221 | { |
2c911a14 LB |
222 | int ret; |
223 | ||
fe333321 | 224 | pr_debug("%s, %llx, %lx\n", __func__, dsisr, ea); |
67207b96 | 225 | |
2c911a14 LB |
226 | /* |
227 | * Handle kernel space hash faults immediately. User hash | |
228 | * faults need to be deferred to process context. | |
229 | */ | |
230 | if ((dsisr & MFC_DSISR_PTE_NOT_FOUND) && | |
231 | (REGION_ID(ea) != USER_REGION_ID)) { | |
232 | ||
233 | spin_unlock(&spu->register_lock); | |
234 | ret = hash_page(ea, _PAGE_PRESENT, 0x300); | |
235 | spin_lock(&spu->register_lock); | |
236 | ||
237 | if (!ret) { | |
238 | spu_restart_dma(spu); | |
239 | return 0; | |
240 | } | |
5473af04 MN |
241 | } |
242 | ||
f3d69e05 LB |
243 | spu->class_1_dar = ea; |
244 | spu->class_1_dsisr = dsisr; | |
245 | ||
246 | spu->stop_callback(spu, 1); | |
d6ad39bc | 247 | |
f3d69e05 LB |
248 | spu->class_1_dar = 0; |
249 | spu->class_1_dsisr = 0; | |
d6ad39bc | 250 | |
67207b96 AB |
251 | return 0; |
252 | } | |
253 | ||
58bd403c JK |
254 | static void __spu_kernel_slb(void *addr, struct spu_slb *slb) |
255 | { | |
256 | unsigned long ea = (unsigned long)addr; | |
257 | u64 llp; | |
258 | ||
259 | if (REGION_ID(ea) == KERNEL_REGION_ID) | |
260 | llp = mmu_psize_defs[mmu_linear_psize].sllp; | |
261 | else | |
262 | llp = mmu_psize_defs[mmu_virtual_psize].sllp; | |
263 | ||
264 | slb->vsid = (get_kernel_vsid(ea, MMU_SEGSIZE_256M) << SLB_VSID_SHIFT) | | |
265 | SLB_VSID_KERNEL | llp; | |
266 | slb->esid = (ea & ESID_MASK) | SLB_ESID_V; | |
267 | } | |
268 | ||
684bd614 JK |
269 | /** |
270 | * Given an array of @nr_slbs SLB entries, @slbs, return non-zero if the | |
271 | * address @new_addr is present. | |
272 | */ | |
273 | static inline int __slb_present(struct spu_slb *slbs, int nr_slbs, | |
274 | void *new_addr) | |
275 | { | |
276 | unsigned long ea = (unsigned long)new_addr; | |
277 | int i; | |
278 | ||
279 | for (i = 0; i < nr_slbs; i++) | |
280 | if (!((slbs[i].esid ^ ea) & ESID_MASK)) | |
281 | return 1; | |
282 | ||
283 | return 0; | |
284 | } | |
285 | ||
58bd403c JK |
286 | /** |
287 | * Setup the SPU kernel SLBs, in preparation for a context save/restore. We | |
288 | * need to map both the context save area, and the save/restore code. | |
684bd614 JK |
289 | * |
290 | * Because the lscsa and code may cross segment boundaires, we check to see | |
291 | * if mappings are required for the start and end of each range. We currently | |
292 | * assume that the mappings are smaller that one segment - if not, something | |
293 | * is seriously wrong. | |
58bd403c | 294 | */ |
684bd614 JK |
295 | void spu_setup_kernel_slbs(struct spu *spu, struct spu_lscsa *lscsa, |
296 | void *code, int code_size) | |
58bd403c | 297 | { |
684bd614 JK |
298 | struct spu_slb slbs[4]; |
299 | int i, nr_slbs = 0; | |
300 | /* start and end addresses of both mappings */ | |
301 | void *addrs[] = { | |
302 | lscsa, (void *)lscsa + sizeof(*lscsa) - 1, | |
303 | code, code + code_size - 1 | |
304 | }; | |
305 | ||
306 | /* check the set of addresses, and create a new entry in the slbs array | |
307 | * if there isn't already a SLB for that address */ | |
308 | for (i = 0; i < ARRAY_SIZE(addrs); i++) { | |
309 | if (__slb_present(slbs, nr_slbs, addrs[i])) | |
310 | continue; | |
311 | ||
312 | __spu_kernel_slb(addrs[i], &slbs[nr_slbs]); | |
313 | nr_slbs++; | |
314 | } | |
58bd403c | 315 | |
c92a1acb | 316 | spin_lock_irq(&spu->register_lock); |
684bd614 JK |
317 | /* Add the set of SLBs */ |
318 | for (i = 0; i < nr_slbs; i++) | |
319 | spu_load_slb(spu, i, &slbs[i]); | |
c92a1acb | 320 | spin_unlock_irq(&spu->register_lock); |
58bd403c JK |
321 | } |
322 | EXPORT_SYMBOL_GPL(spu_setup_kernel_slbs); | |
323 | ||
67207b96 | 324 | static irqreturn_t |
f5a92458 | 325 | spu_irq_class_0(int irq, void *data) |
67207b96 AB |
326 | { |
327 | struct spu *spu; | |
b7f90a40 | 328 | unsigned long stat, mask; |
67207b96 AB |
329 | |
330 | spu = data; | |
b7f90a40 | 331 | |
d6ad39bc | 332 | spin_lock(&spu->register_lock); |
b7f90a40 | 333 | mask = spu_int_mask_get(spu, 0); |
d6ad39bc | 334 | stat = spu_int_stat_get(spu, 0) & mask; |
b7f90a40 | 335 | |
b7f90a40 | 336 | spu->class_0_pending |= stat; |
f3d69e05 | 337 | spu->class_0_dar = spu_mfc_dar_get(spu); |
f3d69e05 | 338 | spu->stop_callback(spu, 0); |
f3d69e05 | 339 | spu->class_0_pending = 0; |
f3d69e05 | 340 | spu->class_0_dar = 0; |
67207b96 | 341 | |
b7f90a40 | 342 | spu_int_stat_clear(spu, 0, stat); |
2c911a14 | 343 | spin_unlock(&spu->register_lock); |
b7f90a40 | 344 | |
67207b96 AB |
345 | return IRQ_HANDLED; |
346 | } | |
347 | ||
67207b96 | 348 | static irqreturn_t |
f5a92458 | 349 | spu_irq_class_1(int irq, void *data) |
67207b96 AB |
350 | { |
351 | struct spu *spu; | |
8b3d6663 | 352 | unsigned long stat, mask, dar, dsisr; |
67207b96 AB |
353 | |
354 | spu = data; | |
8b3d6663 AB |
355 | |
356 | /* atomically read & clear class1 status. */ | |
357 | spin_lock(&spu->register_lock); | |
f0831acc AB |
358 | mask = spu_int_mask_get(spu, 1); |
359 | stat = spu_int_stat_get(spu, 1) & mask; | |
360 | dar = spu_mfc_dar_get(spu); | |
361 | dsisr = spu_mfc_dsisr_get(spu); | |
8af30675 | 362 | if (stat & CLASS1_STORAGE_FAULT_INTR) |
f0831acc AB |
363 | spu_mfc_dsisr_set(spu, 0ul); |
364 | spu_int_stat_clear(spu, 1, stat); | |
67207b96 | 365 | |
e48b1b45 | 366 | pr_debug("%s: %lx %lx %lx %lx\n", __func__, mask, stat, |
c92a1acb AB |
367 | dar, dsisr); |
368 | ||
2c911a14 LB |
369 | if (stat & CLASS1_SEGMENT_FAULT_INTR) |
370 | __spu_trap_data_seg(spu, dar); | |
371 | ||
8af30675 | 372 | if (stat & CLASS1_STORAGE_FAULT_INTR) |
8b3d6663 | 373 | __spu_trap_data_map(spu, dar, dsisr); |
67207b96 | 374 | |
8af30675 | 375 | if (stat & CLASS1_LS_COMPARE_SUSPEND_ON_GET_INTR) |
67207b96 AB |
376 | ; |
377 | ||
8af30675 | 378 | if (stat & CLASS1_LS_COMPARE_SUSPEND_ON_PUT_INTR) |
67207b96 AB |
379 | ; |
380 | ||
f3d69e05 LB |
381 | spu->class_1_dsisr = 0; |
382 | spu->class_1_dar = 0; | |
383 | ||
2c911a14 LB |
384 | spin_unlock(&spu->register_lock); |
385 | ||
67207b96 AB |
386 | return stat ? IRQ_HANDLED : IRQ_NONE; |
387 | } | |
388 | ||
389 | static irqreturn_t | |
f5a92458 | 390 | spu_irq_class_2(int irq, void *data) |
67207b96 AB |
391 | { |
392 | struct spu *spu; | |
393 | unsigned long stat; | |
3a843d7c | 394 | unsigned long mask; |
8af30675 JK |
395 | const int mailbox_intrs = |
396 | CLASS2_MAILBOX_THRESHOLD_INTR | CLASS2_MAILBOX_INTR; | |
67207b96 AB |
397 | |
398 | spu = data; | |
ba723fe2 | 399 | spin_lock(&spu->register_lock); |
f0831acc AB |
400 | stat = spu_int_stat_get(spu, 2); |
401 | mask = spu_int_mask_get(spu, 2); | |
ba723fe2 MN |
402 | /* ignore interrupts we're not waiting for */ |
403 | stat &= mask; | |
8af30675 JK |
404 | /* mailbox interrupts are level triggered. mask them now before |
405 | * acknowledging */ | |
406 | if (stat & mailbox_intrs) | |
407 | spu_int_mask_and(spu, 2, ~(stat & mailbox_intrs)); | |
ba723fe2 MN |
408 | /* acknowledge all interrupts before the callbacks */ |
409 | spu_int_stat_clear(spu, 2, stat); | |
67207b96 | 410 | |
3a843d7c | 411 | pr_debug("class 2 interrupt %d, %lx, %lx\n", irq, stat, mask); |
67207b96 | 412 | |
8af30675 | 413 | if (stat & CLASS2_MAILBOX_INTR) |
ba723fe2 | 414 | spu->ibox_callback(spu); |
67207b96 | 415 | |
8af30675 | 416 | if (stat & CLASS2_SPU_STOP_INTR) |
f3d69e05 | 417 | spu->stop_callback(spu, 2); |
67207b96 | 418 | |
8af30675 | 419 | if (stat & CLASS2_SPU_HALT_INTR) |
f3d69e05 | 420 | spu->stop_callback(spu, 2); |
67207b96 | 421 | |
8af30675 | 422 | if (stat & CLASS2_SPU_DMA_TAG_GROUP_COMPLETE_INTR) |
ba723fe2 | 423 | spu->mfc_callback(spu); |
67207b96 | 424 | |
8af30675 | 425 | if (stat & CLASS2_MAILBOX_THRESHOLD_INTR) |
ba723fe2 | 426 | spu->wbox_callback(spu); |
67207b96 | 427 | |
e9f8a0b6 | 428 | spu->stats.class2_intr++; |
2c911a14 LB |
429 | |
430 | spin_unlock(&spu->register_lock); | |
431 | ||
67207b96 AB |
432 | return stat ? IRQ_HANDLED : IRQ_NONE; |
433 | } | |
434 | ||
0ebfff14 | 435 | static int spu_request_irqs(struct spu *spu) |
67207b96 | 436 | { |
0ebfff14 | 437 | int ret = 0; |
67207b96 | 438 | |
0ebfff14 BH |
439 | if (spu->irqs[0] != NO_IRQ) { |
440 | snprintf(spu->irq_c0, sizeof (spu->irq_c0), "spe%02d.0", | |
441 | spu->number); | |
442 | ret = request_irq(spu->irqs[0], spu_irq_class_0, | |
443 | IRQF_DISABLED, | |
444 | spu->irq_c0, spu); | |
445 | if (ret) | |
446 | goto bail0; | |
447 | } | |
448 | if (spu->irqs[1] != NO_IRQ) { | |
449 | snprintf(spu->irq_c1, sizeof (spu->irq_c1), "spe%02d.1", | |
450 | spu->number); | |
451 | ret = request_irq(spu->irqs[1], spu_irq_class_1, | |
452 | IRQF_DISABLED, | |
453 | spu->irq_c1, spu); | |
454 | if (ret) | |
455 | goto bail1; | |
456 | } | |
457 | if (spu->irqs[2] != NO_IRQ) { | |
458 | snprintf(spu->irq_c2, sizeof (spu->irq_c2), "spe%02d.2", | |
459 | spu->number); | |
460 | ret = request_irq(spu->irqs[2], spu_irq_class_2, | |
461 | IRQF_DISABLED, | |
462 | spu->irq_c2, spu); | |
463 | if (ret) | |
464 | goto bail2; | |
465 | } | |
466 | return 0; | |
67207b96 | 467 | |
0ebfff14 BH |
468 | bail2: |
469 | if (spu->irqs[1] != NO_IRQ) | |
470 | free_irq(spu->irqs[1], spu); | |
471 | bail1: | |
472 | if (spu->irqs[0] != NO_IRQ) | |
473 | free_irq(spu->irqs[0], spu); | |
474 | bail0: | |
67207b96 AB |
475 | return ret; |
476 | } | |
477 | ||
0ebfff14 | 478 | static void spu_free_irqs(struct spu *spu) |
67207b96 | 479 | { |
0ebfff14 BH |
480 | if (spu->irqs[0] != NO_IRQ) |
481 | free_irq(spu->irqs[0], spu); | |
482 | if (spu->irqs[1] != NO_IRQ) | |
483 | free_irq(spu->irqs[1], spu); | |
484 | if (spu->irqs[2] != NO_IRQ) | |
485 | free_irq(spu->irqs[2], spu); | |
67207b96 AB |
486 | } |
487 | ||
486acd48 | 488 | void spu_init_channels(struct spu *spu) |
67207b96 AB |
489 | { |
490 | static const struct { | |
491 | unsigned channel; | |
492 | unsigned count; | |
493 | } zero_list[] = { | |
494 | { 0x00, 1, }, { 0x01, 1, }, { 0x03, 1, }, { 0x04, 1, }, | |
495 | { 0x18, 1, }, { 0x19, 1, }, { 0x1b, 1, }, { 0x1d, 1, }, | |
496 | }, count_list[] = { | |
497 | { 0x00, 0, }, { 0x03, 0, }, { 0x04, 0, }, { 0x15, 16, }, | |
498 | { 0x17, 1, }, { 0x18, 0, }, { 0x19, 0, }, { 0x1b, 0, }, | |
499 | { 0x1c, 1, }, { 0x1d, 0, }, { 0x1e, 1, }, | |
500 | }; | |
6ff730c3 | 501 | struct spu_priv2 __iomem *priv2; |
67207b96 AB |
502 | int i; |
503 | ||
504 | priv2 = spu->priv2; | |
505 | ||
506 | /* initialize all channel data to zero */ | |
507 | for (i = 0; i < ARRAY_SIZE(zero_list); i++) { | |
508 | int count; | |
509 | ||
510 | out_be64(&priv2->spu_chnlcntptr_RW, zero_list[i].channel); | |
511 | for (count = 0; count < zero_list[i].count; count++) | |
512 | out_be64(&priv2->spu_chnldata_RW, 0); | |
513 | } | |
514 | ||
515 | /* initialize channel counts to meaningful values */ | |
516 | for (i = 0; i < ARRAY_SIZE(count_list); i++) { | |
517 | out_be64(&priv2->spu_chnlcntptr_RW, count_list[i].channel); | |
518 | out_be64(&priv2->spu_chnlcnt_RW, count_list[i].count); | |
519 | } | |
520 | } | |
486acd48 | 521 | EXPORT_SYMBOL_GPL(spu_init_channels); |
67207b96 | 522 | |
6deac066 GL |
523 | static int spu_shutdown(struct sys_device *sysdev) |
524 | { | |
525 | struct spu *spu = container_of(sysdev, struct spu, sysdev); | |
526 | ||
527 | spu_free_irqs(spu); | |
528 | spu_destroy_spu(spu); | |
529 | return 0; | |
530 | } | |
531 | ||
1238819a | 532 | static struct sysdev_class spu_sysdev_class = { |
af5ca3f4 | 533 | .name = "spu", |
6deac066 | 534 | .shutdown = spu_shutdown, |
1d64093f JK |
535 | }; |
536 | ||
e570beb6 CK |
537 | int spu_add_sysdev_attr(struct sysdev_attribute *attr) |
538 | { | |
539 | struct spu *spu; | |
e570beb6 | 540 | |
24140594 | 541 | mutex_lock(&spu_full_list_mutex); |
e570beb6 CK |
542 | list_for_each_entry(spu, &spu_full_list, full_list) |
543 | sysdev_create_file(&spu->sysdev, attr); | |
24140594 | 544 | mutex_unlock(&spu_full_list_mutex); |
e570beb6 | 545 | |
e570beb6 CK |
546 | return 0; |
547 | } | |
548 | EXPORT_SYMBOL_GPL(spu_add_sysdev_attr); | |
549 | ||
550 | int spu_add_sysdev_attr_group(struct attribute_group *attrs) | |
551 | { | |
552 | struct spu *spu; | |
1e771039 | 553 | int rc = 0; |
e570beb6 | 554 | |
24140594 | 555 | mutex_lock(&spu_full_list_mutex); |
1e771039 JK |
556 | list_for_each_entry(spu, &spu_full_list, full_list) { |
557 | rc = sysfs_create_group(&spu->sysdev.kobj, attrs); | |
558 | ||
559 | /* we're in trouble here, but try unwinding anyway */ | |
560 | if (rc) { | |
561 | printk(KERN_ERR "%s: can't create sysfs group '%s'\n", | |
562 | __func__, attrs->name); | |
563 | ||
564 | list_for_each_entry_continue_reverse(spu, | |
565 | &spu_full_list, full_list) | |
566 | sysfs_remove_group(&spu->sysdev.kobj, attrs); | |
567 | break; | |
568 | } | |
569 | } | |
570 | ||
24140594 | 571 | mutex_unlock(&spu_full_list_mutex); |
e570beb6 | 572 | |
1e771039 | 573 | return rc; |
e570beb6 CK |
574 | } |
575 | EXPORT_SYMBOL_GPL(spu_add_sysdev_attr_group); | |
576 | ||
577 | ||
578 | void spu_remove_sysdev_attr(struct sysdev_attribute *attr) | |
579 | { | |
580 | struct spu *spu; | |
e570beb6 | 581 | |
24140594 | 582 | mutex_lock(&spu_full_list_mutex); |
e570beb6 CK |
583 | list_for_each_entry(spu, &spu_full_list, full_list) |
584 | sysdev_remove_file(&spu->sysdev, attr); | |
24140594 | 585 | mutex_unlock(&spu_full_list_mutex); |
e570beb6 CK |
586 | } |
587 | EXPORT_SYMBOL_GPL(spu_remove_sysdev_attr); | |
588 | ||
589 | void spu_remove_sysdev_attr_group(struct attribute_group *attrs) | |
590 | { | |
591 | struct spu *spu; | |
e570beb6 | 592 | |
24140594 | 593 | mutex_lock(&spu_full_list_mutex); |
e570beb6 CK |
594 | list_for_each_entry(spu, &spu_full_list, full_list) |
595 | sysfs_remove_group(&spu->sysdev.kobj, attrs); | |
24140594 | 596 | mutex_unlock(&spu_full_list_mutex); |
e570beb6 CK |
597 | } |
598 | EXPORT_SYMBOL_GPL(spu_remove_sysdev_attr_group); | |
599 | ||
1d64093f JK |
600 | static int spu_create_sysdev(struct spu *spu) |
601 | { | |
602 | int ret; | |
603 | ||
604 | spu->sysdev.id = spu->number; | |
605 | spu->sysdev.cls = &spu_sysdev_class; | |
606 | ret = sysdev_register(&spu->sysdev); | |
607 | if (ret) { | |
608 | printk(KERN_ERR "Can't register SPU %d with sysfs\n", | |
609 | spu->number); | |
610 | return ret; | |
611 | } | |
612 | ||
0021550c | 613 | sysfs_add_device_to_node(&spu->sysdev, spu->node); |
1d64093f JK |
614 | |
615 | return 0; | |
616 | } | |
617 | ||
e28b0031 | 618 | static int __init create_spu(void *data) |
67207b96 AB |
619 | { |
620 | struct spu *spu; | |
621 | int ret; | |
622 | static int number; | |
94b2a439 | 623 | unsigned long flags; |
27ec41d3 | 624 | struct timespec ts; |
67207b96 AB |
625 | |
626 | ret = -ENOMEM; | |
ecec2177 | 627 | spu = kzalloc(sizeof (*spu), GFP_KERNEL); |
67207b96 AB |
628 | if (!spu) |
629 | goto out; | |
630 | ||
486acd48 CH |
631 | spu->alloc_state = SPU_FREE; |
632 | ||
e28b0031 | 633 | spin_lock_init(&spu->register_lock); |
24140594 | 634 | spin_lock(&spu_lock); |
e28b0031 | 635 | spu->number = number++; |
24140594 | 636 | spin_unlock(&spu_lock); |
e28b0031 GL |
637 | |
638 | ret = spu_create_spu(spu, data); | |
e5267b4b | 639 | |
67207b96 AB |
640 | if (ret) |
641 | goto out_free; | |
642 | ||
24f43b33 | 643 | spu_mfc_sdr_setup(spu); |
f0831acc | 644 | spu_mfc_sr1_set(spu, 0x33); |
67207b96 AB |
645 | ret = spu_request_irqs(spu); |
646 | if (ret) | |
e28b0031 | 647 | goto out_destroy; |
67207b96 | 648 | |
1d64093f JK |
649 | ret = spu_create_sysdev(spu); |
650 | if (ret) | |
651 | goto out_free_irqs; | |
652 | ||
486acd48 | 653 | mutex_lock(&cbe_spu_info[spu->node].list_mutex); |
aa6d5b20 AB |
654 | list_add(&spu->cbe_list, &cbe_spu_info[spu->node].spus); |
655 | cbe_spu_info[spu->node].n_spus++; | |
486acd48 | 656 | mutex_unlock(&cbe_spu_info[spu->node].list_mutex); |
24140594 CH |
657 | |
658 | mutex_lock(&spu_full_list_mutex); | |
659 | spin_lock_irqsave(&spu_full_list_lock, flags); | |
e570beb6 | 660 | list_add(&spu->full_list, &spu_full_list); |
24140594 CH |
661 | spin_unlock_irqrestore(&spu_full_list_lock, flags); |
662 | mutex_unlock(&spu_full_list_mutex); | |
67207b96 | 663 | |
27ec41d3 AD |
664 | spu->stats.util_state = SPU_UTIL_IDLE_LOADED; |
665 | ktime_get_ts(&ts); | |
666 | spu->stats.tstamp = timespec_to_ns(&ts); | |
fe2f896d | 667 | |
9d92af62 AB |
668 | INIT_LIST_HEAD(&spu->aff_list); |
669 | ||
67207b96 AB |
670 | goto out; |
671 | ||
1d64093f JK |
672 | out_free_irqs: |
673 | spu_free_irqs(spu); | |
e28b0031 GL |
674 | out_destroy: |
675 | spu_destroy_spu(spu); | |
67207b96 AB |
676 | out_free: |
677 | kfree(spu); | |
678 | out: | |
679 | return ret; | |
680 | } | |
681 | ||
fe2f896d CH |
682 | static const char *spu_state_names[] = { |
683 | "user", "system", "iowait", "idle" | |
684 | }; | |
685 | ||
686 | static unsigned long long spu_acct_time(struct spu *spu, | |
687 | enum spu_utilization_state state) | |
688 | { | |
27ec41d3 | 689 | struct timespec ts; |
fe2f896d CH |
690 | unsigned long long time = spu->stats.times[state]; |
691 | ||
27ec41d3 AD |
692 | /* |
693 | * If the spu is idle or the context is stopped, utilization | |
694 | * statistics are not updated. Apply the time delta from the | |
695 | * last recorded state of the spu. | |
696 | */ | |
697 | if (spu->stats.util_state == state) { | |
698 | ktime_get_ts(&ts); | |
699 | time += timespec_to_ns(&ts) - spu->stats.tstamp; | |
700 | } | |
fe2f896d | 701 | |
27ec41d3 | 702 | return time / NSEC_PER_MSEC; |
fe2f896d CH |
703 | } |
704 | ||
705 | ||
4a0b2b4d AK |
706 | static ssize_t spu_stat_show(struct sys_device *sysdev, |
707 | struct sysdev_attribute *attr, char *buf) | |
fe2f896d CH |
708 | { |
709 | struct spu *spu = container_of(sysdev, struct spu, sysdev); | |
710 | ||
711 | return sprintf(buf, "%s %llu %llu %llu %llu " | |
712 | "%llu %llu %llu %llu %llu %llu %llu %llu\n", | |
27ec41d3 | 713 | spu_state_names[spu->stats.util_state], |
fe2f896d CH |
714 | spu_acct_time(spu, SPU_UTIL_USER), |
715 | spu_acct_time(spu, SPU_UTIL_SYSTEM), | |
716 | spu_acct_time(spu, SPU_UTIL_IOWAIT), | |
27ec41d3 | 717 | spu_acct_time(spu, SPU_UTIL_IDLE_LOADED), |
fe2f896d CH |
718 | spu->stats.vol_ctx_switch, |
719 | spu->stats.invol_ctx_switch, | |
720 | spu->stats.slb_flt, | |
721 | spu->stats.hash_flt, | |
722 | spu->stats.min_flt, | |
723 | spu->stats.maj_flt, | |
724 | spu->stats.class2_intr, | |
725 | spu->stats.libassist); | |
726 | } | |
727 | ||
728 | static SYSDEV_ATTR(stat, 0644, spu_stat_show, NULL); | |
729 | ||
67207b96 AB |
730 | static int __init init_spu_base(void) |
731 | { | |
befdc746 | 732 | int i, ret = 0; |
67207b96 | 733 | |
aa6d5b20 | 734 | for (i = 0; i < MAX_NUMNODES; i++) { |
486acd48 | 735 | mutex_init(&cbe_spu_info[i].list_mutex); |
aa6d5b20 | 736 | INIT_LIST_HEAD(&cbe_spu_info[i].spus); |
aa6d5b20 | 737 | } |
ccf17e9d | 738 | |
da06aa08 | 739 | if (!spu_management_ops) |
befdc746 | 740 | goto out; |
da06aa08 | 741 | |
1d64093f JK |
742 | /* create sysdev class for spus */ |
743 | ret = sysdev_class_register(&spu_sysdev_class); | |
744 | if (ret) | |
befdc746 | 745 | goto out; |
1d64093f | 746 | |
e28b0031 GL |
747 | ret = spu_enumerate_spus(create_spu); |
748 | ||
bce94513 | 749 | if (ret < 0) { |
e28b0031 | 750 | printk(KERN_WARNING "%s: Error initializing spus\n", |
e48b1b45 | 751 | __func__); |
befdc746 | 752 | goto out_unregister_sysdev_class; |
67207b96 | 753 | } |
ff8a8f25 | 754 | |
bce94513 GU |
755 | if (ret > 0) { |
756 | /* | |
757 | * We cannot put the forward declaration in | |
758 | * <linux/linux_logo.h> because of conflicting session type | |
759 | * conflicts for const and __initdata with different compiler | |
760 | * versions | |
761 | */ | |
762 | extern const struct linux_logo logo_spe_clut224; | |
763 | ||
764 | fb_append_extra_logo(&logo_spe_clut224, ret); | |
765 | } | |
766 | ||
24140594 | 767 | mutex_lock(&spu_full_list_mutex); |
ff8a8f25 | 768 | xmon_register_spus(&spu_full_list); |
8d2655e6 | 769 | crash_register_spus(&spu_full_list); |
24140594 | 770 | mutex_unlock(&spu_full_list_mutex); |
fe2f896d CH |
771 | spu_add_sysdev_attr(&attr_stat); |
772 | ||
f5996449 | 773 | spu_init_affinity(); |
3ad216ca | 774 | |
befdc746 CH |
775 | return 0; |
776 | ||
777 | out_unregister_sysdev_class: | |
778 | sysdev_class_unregister(&spu_sysdev_class); | |
779 | out: | |
67207b96 AB |
780 | return ret; |
781 | } | |
782 | module_init(init_spu_base); | |
783 | ||
784 | MODULE_LICENSE("GPL"); | |
785 | MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>"); |