Commit | Line | Data |
---|---|---|
8b3d6663 AB |
1 | /* sched.c - SPU scheduler. |
2 | * | |
3 | * Copyright (C) IBM 2005 | |
4 | * Author: Mark Nutter <mnutter@us.ibm.com> | |
5 | * | |
a68cf983 | 6 | * 2006-03-31 NUMA domains added. |
8b3d6663 AB |
7 | * |
8 | * This program is free software; you can redistribute it and/or modify | |
9 | * it under the terms of the GNU General Public License as published by | |
10 | * the Free Software Foundation; either version 2, or (at your option) | |
11 | * any later version. | |
12 | * | |
13 | * This program is distributed in the hope that it will be useful, | |
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
16 | * GNU General Public License for more details. | |
17 | * | |
18 | * You should have received a copy of the GNU General Public License | |
19 | * along with this program; if not, write to the Free Software | |
20 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | |
21 | */ | |
22 | ||
3b3d22cb AB |
23 | #undef DEBUG |
24 | ||
8b3d6663 AB |
25 | #include <linux/module.h> |
26 | #include <linux/errno.h> | |
27 | #include <linux/sched.h> | |
28 | #include <linux/kernel.h> | |
29 | #include <linux/mm.h> | |
30 | #include <linux/completion.h> | |
31 | #include <linux/vmalloc.h> | |
32 | #include <linux/smp.h> | |
8b3d6663 AB |
33 | #include <linux/stddef.h> |
34 | #include <linux/unistd.h> | |
a68cf983 MN |
35 | #include <linux/numa.h> |
36 | #include <linux/mutex.h> | |
86767277 | 37 | #include <linux/notifier.h> |
8b3d6663 AB |
38 | |
39 | #include <asm/io.h> | |
40 | #include <asm/mmu_context.h> | |
41 | #include <asm/spu.h> | |
42 | #include <asm/spu_csa.h> | |
a91942ae | 43 | #include <asm/spu_priv1.h> |
8b3d6663 AB |
44 | #include "spufs.h" |
45 | ||
2eb1b120 | 46 | #define SPU_TIMESLICE (HZ) |
2a911f0b | 47 | |
8b3d6663 | 48 | struct spu_prio_array { |
72cb3608 | 49 | DECLARE_BITMAP(bitmap, MAX_PRIO); |
079cdb61 CH |
50 | struct list_head runq[MAX_PRIO]; |
51 | spinlock_t runq_lock; | |
a68cf983 MN |
52 | struct list_head active_list[MAX_NUMNODES]; |
53 | struct mutex active_mutex[MAX_NUMNODES]; | |
8b3d6663 AB |
54 | }; |
55 | ||
a68cf983 | 56 | static struct spu_prio_array *spu_prio; |
2eb1b120 | 57 | static struct workqueue_struct *spu_sched_wq; |
8b3d6663 | 58 | |
a68cf983 | 59 | static inline int node_allowed(int node) |
8b3d6663 | 60 | { |
a68cf983 | 61 | cpumask_t mask; |
8b3d6663 | 62 | |
a68cf983 MN |
63 | if (!nr_cpus_node(node)) |
64 | return 0; | |
65 | mask = node_to_cpumask(node); | |
66 | if (!cpus_intersects(mask, current->cpus_allowed)) | |
67 | return 0; | |
68 | return 1; | |
8b3d6663 AB |
69 | } |
70 | ||
2eb1b120 CH |
71 | void spu_start_tick(struct spu_context *ctx) |
72 | { | |
08873095 CH |
73 | if (ctx->policy == SCHED_RR) { |
74 | /* | |
75 | * Make sure the exiting bit is cleared. | |
76 | */ | |
77 | clear_bit(SPU_SCHED_EXITING, &ctx->sched_flags); | |
390c5343 | 78 | mb(); |
2eb1b120 | 79 | queue_delayed_work(spu_sched_wq, &ctx->sched_work, SPU_TIMESLICE); |
08873095 | 80 | } |
2eb1b120 CH |
81 | } |
82 | ||
83 | void spu_stop_tick(struct spu_context *ctx) | |
84 | { | |
08873095 CH |
85 | if (ctx->policy == SCHED_RR) { |
86 | /* | |
87 | * While the work can be rearming normally setting this flag | |
88 | * makes sure it does not rearm itself anymore. | |
89 | */ | |
90 | set_bit(SPU_SCHED_EXITING, &ctx->sched_flags); | |
390c5343 | 91 | mb(); |
2eb1b120 | 92 | cancel_delayed_work(&ctx->sched_work); |
08873095 | 93 | } |
2eb1b120 CH |
94 | } |
95 | ||
96 | void spu_sched_tick(struct work_struct *work) | |
97 | { | |
98 | struct spu_context *ctx = | |
99 | container_of(work, struct spu_context, sched_work.work); | |
100 | struct spu *spu; | |
b3e76cc3 | 101 | int preempted = 0; |
2eb1b120 | 102 | |
08873095 CH |
103 | /* |
104 | * If this context is being stopped avoid rescheduling from the | |
105 | * scheduler tick because we would block on the state_mutex. | |
106 | * The caller will yield the spu later on anyway. | |
107 | */ | |
108 | if (test_bit(SPU_SCHED_EXITING, &ctx->sched_flags)) | |
109 | return; | |
110 | ||
2eb1b120 CH |
111 | mutex_lock(&ctx->state_mutex); |
112 | spu = ctx->spu; | |
113 | if (spu) { | |
114 | int best = sched_find_first_bit(spu_prio->bitmap); | |
115 | if (best <= ctx->prio) { | |
116 | spu_deactivate(ctx); | |
b3e76cc3 | 117 | preempted = 1; |
2eb1b120 CH |
118 | } |
119 | } | |
120 | mutex_unlock(&ctx->state_mutex); | |
121 | ||
b3e76cc3 CH |
122 | if (preempted) { |
123 | /* | |
124 | * We need to break out of the wait loop in spu_run manually | |
125 | * to ensure this context gets put on the runqueue again | |
126 | * ASAP. | |
127 | */ | |
128 | wake_up(&ctx->stop_wq); | |
129 | } else | |
2eb1b120 CH |
130 | spu_start_tick(ctx); |
131 | } | |
132 | ||
202557d2 CH |
133 | /** |
134 | * spu_add_to_active_list - add spu to active list | |
135 | * @spu: spu to add to the active list | |
136 | */ | |
137 | static void spu_add_to_active_list(struct spu *spu) | |
138 | { | |
139 | mutex_lock(&spu_prio->active_mutex[spu->node]); | |
140 | list_add_tail(&spu->list, &spu_prio->active_list[spu->node]); | |
141 | mutex_unlock(&spu_prio->active_mutex[spu->node]); | |
142 | } | |
143 | ||
144 | /** | |
145 | * spu_remove_from_active_list - remove spu from active list | |
146 | * @spu: spu to remove from the active list | |
202557d2 | 147 | */ |
678b2ff1 | 148 | static void spu_remove_from_active_list(struct spu *spu) |
202557d2 CH |
149 | { |
150 | int node = spu->node; | |
202557d2 CH |
151 | |
152 | mutex_lock(&spu_prio->active_mutex[node]); | |
678b2ff1 | 153 | list_del_init(&spu->list); |
202557d2 | 154 | mutex_unlock(&spu_prio->active_mutex[node]); |
202557d2 CH |
155 | } |
156 | ||
86767277 AB |
157 | static BLOCKING_NOTIFIER_HEAD(spu_switch_notifier); |
158 | ||
159 | static void spu_switch_notify(struct spu *spu, struct spu_context *ctx) | |
160 | { | |
161 | blocking_notifier_call_chain(&spu_switch_notifier, | |
162 | ctx ? ctx->object_id : 0, spu); | |
163 | } | |
164 | ||
165 | int spu_switch_event_register(struct notifier_block * n) | |
166 | { | |
167 | return blocking_notifier_chain_register(&spu_switch_notifier, n); | |
168 | } | |
169 | ||
170 | int spu_switch_event_unregister(struct notifier_block * n) | |
171 | { | |
172 | return blocking_notifier_chain_unregister(&spu_switch_notifier, n); | |
173 | } | |
174 | ||
202557d2 CH |
175 | /** |
176 | * spu_bind_context - bind spu context to physical spu | |
177 | * @spu: physical spu to bind to | |
178 | * @ctx: context to bind | |
179 | */ | |
180 | static void spu_bind_context(struct spu *spu, struct spu_context *ctx) | |
8b3d6663 | 181 | { |
a68cf983 MN |
182 | pr_debug("%s: pid=%d SPU=%d NODE=%d\n", __FUNCTION__, current->pid, |
183 | spu->number, spu->node); | |
8b3d6663 AB |
184 | spu->ctx = ctx; |
185 | spu->flags = 0; | |
186 | ctx->spu = spu; | |
187 | ctx->ops = &spu_hw_ops; | |
188 | spu->pid = current->pid; | |
94b2a439 | 189 | spu_associate_mm(spu, ctx->owner); |
8b3d6663 AB |
190 | spu->ibox_callback = spufs_ibox_callback; |
191 | spu->wbox_callback = spufs_wbox_callback; | |
5110459f | 192 | spu->stop_callback = spufs_stop_callback; |
a33a7d73 | 193 | spu->mfc_callback = spufs_mfc_callback; |
9add11da | 194 | spu->dma_callback = spufs_dma_callback; |
8b3d6663 | 195 | mb(); |
5110459f | 196 | spu_unmap_mappings(ctx); |
8b3d6663 | 197 | spu_restore(&ctx->csa, spu); |
2a911f0b | 198 | spu->timestamp = jiffies; |
a68cf983 | 199 | spu_cpu_affinity_set(spu, raw_smp_processor_id()); |
86767277 | 200 | spu_switch_notify(spu, ctx); |
202557d2 | 201 | spu_add_to_active_list(spu); |
81998baf | 202 | ctx->state = SPU_STATE_RUNNABLE; |
8b3d6663 AB |
203 | } |
204 | ||
202557d2 CH |
205 | /** |
206 | * spu_unbind_context - unbind spu context from physical spu | |
207 | * @spu: physical spu to unbind from | |
208 | * @ctx: context to unbind | |
202557d2 | 209 | */ |
678b2ff1 | 210 | static void spu_unbind_context(struct spu *spu, struct spu_context *ctx) |
8b3d6663 | 211 | { |
a68cf983 MN |
212 | pr_debug("%s: unbind pid=%d SPU=%d NODE=%d\n", __FUNCTION__, |
213 | spu->pid, spu->number, spu->node); | |
202557d2 | 214 | |
678b2ff1 | 215 | spu_remove_from_active_list(spu); |
86767277 | 216 | spu_switch_notify(spu, NULL); |
5110459f | 217 | spu_unmap_mappings(ctx); |
8b3d6663 | 218 | spu_save(&ctx->csa, spu); |
2a911f0b | 219 | spu->timestamp = jiffies; |
8b3d6663 AB |
220 | ctx->state = SPU_STATE_SAVED; |
221 | spu->ibox_callback = NULL; | |
222 | spu->wbox_callback = NULL; | |
5110459f | 223 | spu->stop_callback = NULL; |
a33a7d73 | 224 | spu->mfc_callback = NULL; |
9add11da | 225 | spu->dma_callback = NULL; |
94b2a439 | 226 | spu_associate_mm(spu, NULL); |
8b3d6663 | 227 | spu->pid = 0; |
8b3d6663 AB |
228 | ctx->ops = &spu_backing_ops; |
229 | ctx->spu = NULL; | |
2a911f0b | 230 | spu->flags = 0; |
8b3d6663 AB |
231 | spu->ctx = NULL; |
232 | } | |
233 | ||
079cdb61 CH |
234 | /** |
235 | * spu_add_to_rq - add a context to the runqueue | |
236 | * @ctx: context to add | |
237 | */ | |
4e0f4ed0 | 238 | static void __spu_add_to_rq(struct spu_context *ctx) |
8b3d6663 | 239 | { |
4e0f4ed0 LB |
240 | int prio = ctx->prio; |
241 | ||
242 | list_add_tail(&ctx->rq, &spu_prio->runq[prio]); | |
243 | set_bit(prio, spu_prio->bitmap); | |
2a911f0b | 244 | } |
5110459f | 245 | |
4e0f4ed0 | 246 | static void __spu_del_from_rq(struct spu_context *ctx) |
a475c2f4 | 247 | { |
4e0f4ed0 LB |
248 | int prio = ctx->prio; |
249 | ||
a475c2f4 CH |
250 | if (!list_empty(&ctx->rq)) |
251 | list_del_init(&ctx->rq); | |
252 | if (list_empty(&spu_prio->runq[prio])) | |
4e0f4ed0 | 253 | clear_bit(prio, spu_prio->bitmap); |
079cdb61 | 254 | } |
a68cf983 | 255 | |
079cdb61 | 256 | static void spu_prio_wait(struct spu_context *ctx) |
8b3d6663 | 257 | { |
a68cf983 | 258 | DEFINE_WAIT(wait); |
8b3d6663 | 259 | |
4e0f4ed0 | 260 | spin_lock(&spu_prio->runq_lock); |
079cdb61 | 261 | prepare_to_wait_exclusive(&ctx->stop_wq, &wait, TASK_INTERRUPTIBLE); |
a68cf983 | 262 | if (!signal_pending(current)) { |
4e0f4ed0 LB |
263 | __spu_add_to_rq(ctx); |
264 | spin_unlock(&spu_prio->runq_lock); | |
650f8b02 | 265 | mutex_unlock(&ctx->state_mutex); |
a68cf983 | 266 | schedule(); |
650f8b02 | 267 | mutex_lock(&ctx->state_mutex); |
4e0f4ed0 LB |
268 | spin_lock(&spu_prio->runq_lock); |
269 | __spu_del_from_rq(ctx); | |
8b3d6663 | 270 | } |
4e0f4ed0 | 271 | spin_unlock(&spu_prio->runq_lock); |
079cdb61 CH |
272 | __set_current_state(TASK_RUNNING); |
273 | remove_wait_queue(&ctx->stop_wq, &wait); | |
8b3d6663 AB |
274 | } |
275 | ||
079cdb61 CH |
276 | /** |
277 | * spu_reschedule - try to find a runnable context for a spu | |
278 | * @spu: spu available | |
279 | * | |
280 | * This function is called whenever a spu becomes idle. It looks for the | |
281 | * most suitable runnable spu context and schedules it for execution. | |
282 | */ | |
283 | static void spu_reschedule(struct spu *spu) | |
8b3d6663 | 284 | { |
079cdb61 CH |
285 | int best; |
286 | ||
287 | spu_free(spu); | |
288 | ||
289 | spin_lock(&spu_prio->runq_lock); | |
290 | best = sched_find_first_bit(spu_prio->bitmap); | |
a68cf983 | 291 | if (best < MAX_PRIO) { |
a475c2f4 CH |
292 | struct list_head *rq = &spu_prio->runq[best]; |
293 | struct spu_context *ctx; | |
294 | ||
295 | BUG_ON(list_empty(rq)); | |
296 | ||
297 | ctx = list_entry(rq->next, struct spu_context, rq); | |
4e0f4ed0 | 298 | __spu_del_from_rq(ctx); |
a475c2f4 | 299 | wake_up(&ctx->stop_wq); |
a68cf983 | 300 | } |
079cdb61 | 301 | spin_unlock(&spu_prio->runq_lock); |
8b3d6663 AB |
302 | } |
303 | ||
079cdb61 | 304 | static struct spu *spu_get_idle(struct spu_context *ctx) |
a68cf983 MN |
305 | { |
306 | struct spu *spu = NULL; | |
307 | int node = cpu_to_node(raw_smp_processor_id()); | |
308 | int n; | |
309 | ||
310 | for (n = 0; n < MAX_NUMNODES; n++, node++) { | |
311 | node = (node < MAX_NUMNODES) ? node : 0; | |
312 | if (!node_allowed(node)) | |
313 | continue; | |
314 | spu = spu_alloc_node(node); | |
315 | if (spu) | |
316 | break; | |
317 | } | |
318 | return spu; | |
319 | } | |
8b3d6663 | 320 | |
52f04fcf CH |
321 | /** |
322 | * find_victim - find a lower priority context to preempt | |
323 | * @ctx: canidate context for running | |
324 | * | |
325 | * Returns the freed physical spu to run the new context on. | |
326 | */ | |
327 | static struct spu *find_victim(struct spu_context *ctx) | |
328 | { | |
329 | struct spu_context *victim = NULL; | |
330 | struct spu *spu; | |
331 | int node, n; | |
332 | ||
333 | /* | |
334 | * Look for a possible preemption candidate on the local node first. | |
335 | * If there is no candidate look at the other nodes. This isn't | |
336 | * exactly fair, but so far the whole spu schedule tries to keep | |
337 | * a strong node affinity. We might want to fine-tune this in | |
338 | * the future. | |
339 | */ | |
340 | restart: | |
341 | node = cpu_to_node(raw_smp_processor_id()); | |
342 | for (n = 0; n < MAX_NUMNODES; n++, node++) { | |
343 | node = (node < MAX_NUMNODES) ? node : 0; | |
344 | if (!node_allowed(node)) | |
345 | continue; | |
346 | ||
347 | mutex_lock(&spu_prio->active_mutex[node]); | |
348 | list_for_each_entry(spu, &spu_prio->active_list[node], list) { | |
349 | struct spu_context *tmp = spu->ctx; | |
350 | ||
351 | if (tmp->rt_priority < ctx->rt_priority && | |
352 | (!victim || tmp->rt_priority < victim->rt_priority)) | |
353 | victim = spu->ctx; | |
354 | } | |
355 | mutex_unlock(&spu_prio->active_mutex[node]); | |
356 | ||
357 | if (victim) { | |
358 | /* | |
359 | * This nests ctx->state_mutex, but we always lock | |
360 | * higher priority contexts before lower priority | |
361 | * ones, so this is safe until we introduce | |
362 | * priority inheritance schemes. | |
363 | */ | |
364 | if (!mutex_trylock(&victim->state_mutex)) { | |
365 | victim = NULL; | |
366 | goto restart; | |
367 | } | |
368 | ||
369 | spu = victim->spu; | |
370 | if (!spu) { | |
371 | /* | |
372 | * This race can happen because we've dropped | |
373 | * the active list mutex. No a problem, just | |
374 | * restart the search. | |
375 | */ | |
376 | mutex_unlock(&victim->state_mutex); | |
377 | victim = NULL; | |
378 | goto restart; | |
379 | } | |
380 | spu_unbind_context(spu, victim); | |
381 | mutex_unlock(&victim->state_mutex); | |
e097b513 CH |
382 | /* |
383 | * We need to break out of the wait loop in spu_run | |
384 | * manually to ensure this context gets put on the | |
385 | * runqueue again ASAP. | |
386 | */ | |
387 | wake_up(&victim->stop_wq); | |
52f04fcf CH |
388 | return spu; |
389 | } | |
390 | } | |
391 | ||
392 | return NULL; | |
393 | } | |
394 | ||
079cdb61 CH |
395 | /** |
396 | * spu_activate - find a free spu for a context and execute it | |
397 | * @ctx: spu context to schedule | |
398 | * @flags: flags (currently ignored) | |
399 | * | |
08873095 | 400 | * Tries to find a free spu to run @ctx. If no free spu is available |
079cdb61 CH |
401 | * add the context to the runqueue so it gets woken up once an spu |
402 | * is available. | |
403 | */ | |
26bec673 | 404 | int spu_activate(struct spu_context *ctx, unsigned long flags) |
8b3d6663 | 405 | { |
8b3d6663 | 406 | |
079cdb61 CH |
407 | if (ctx->spu) |
408 | return 0; | |
409 | ||
410 | do { | |
411 | struct spu *spu; | |
412 | ||
413 | spu = spu_get_idle(ctx); | |
52f04fcf CH |
414 | /* |
415 | * If this is a realtime thread we try to get it running by | |
416 | * preempting a lower priority thread. | |
417 | */ | |
418 | if (!spu && ctx->rt_priority) | |
419 | spu = find_victim(ctx); | |
079cdb61 | 420 | if (spu) { |
202557d2 | 421 | spu_bind_context(spu, ctx); |
079cdb61 | 422 | return 0; |
a68cf983 | 423 | } |
079cdb61 | 424 | |
50b520d4 | 425 | spu_prio_wait(ctx); |
079cdb61 CH |
426 | } while (!signal_pending(current)); |
427 | ||
428 | return -ERESTARTSYS; | |
8b3d6663 AB |
429 | } |
430 | ||
678b2ff1 CH |
431 | /** |
432 | * spu_deactivate - unbind a context from it's physical spu | |
433 | * @ctx: spu context to unbind | |
434 | * | |
435 | * Unbind @ctx from the physical spu it is running on and schedule | |
436 | * the highest priority context to run on the freed physical spu. | |
437 | */ | |
8b3d6663 AB |
438 | void spu_deactivate(struct spu_context *ctx) |
439 | { | |
678b2ff1 | 440 | struct spu *spu = ctx->spu; |
8b3d6663 | 441 | |
678b2ff1 CH |
442 | if (spu) { |
443 | spu_unbind_context(spu, ctx); | |
079cdb61 | 444 | spu_reschedule(spu); |
678b2ff1 | 445 | } |
8b3d6663 AB |
446 | } |
447 | ||
ae7b4c52 CH |
448 | /** |
449 | * spu_yield - yield a physical spu if others are waiting | |
450 | * @ctx: spu context to yield | |
451 | * | |
452 | * Check if there is a higher priority context waiting and if yes | |
453 | * unbind @ctx from the physical spu and schedule the highest | |
454 | * priority context to run on the freed physical spu instead. | |
455 | */ | |
8b3d6663 AB |
456 | void spu_yield(struct spu_context *ctx) |
457 | { | |
458 | struct spu *spu; | |
459 | ||
650f8b02 | 460 | if (mutex_trylock(&ctx->state_mutex)) { |
a68cf983 MN |
461 | if ((spu = ctx->spu) != NULL) { |
462 | int best = sched_find_first_bit(spu_prio->bitmap); | |
463 | if (best < MAX_PRIO) { | |
464 | pr_debug("%s: yielding SPU %d NODE %d\n", | |
465 | __FUNCTION__, spu->number, spu->node); | |
466 | spu_deactivate(ctx); | |
a68cf983 MN |
467 | } |
468 | } | |
650f8b02 | 469 | mutex_unlock(&ctx->state_mutex); |
8b3d6663 | 470 | } |
8b3d6663 AB |
471 | } |
472 | ||
473 | int __init spu_sched_init(void) | |
474 | { | |
8b3d6663 AB |
475 | int i; |
476 | ||
2eb1b120 CH |
477 | spu_sched_wq = create_singlethread_workqueue("spusched"); |
478 | if (!spu_sched_wq) | |
479 | return 1; | |
480 | ||
a68cf983 MN |
481 | spu_prio = kzalloc(sizeof(struct spu_prio_array), GFP_KERNEL); |
482 | if (!spu_prio) { | |
483 | printk(KERN_WARNING "%s: Unable to allocate priority queue.\n", | |
8b3d6663 | 484 | __FUNCTION__); |
2eb1b120 | 485 | destroy_workqueue(spu_sched_wq); |
8b3d6663 AB |
486 | return 1; |
487 | } | |
8b3d6663 | 488 | for (i = 0; i < MAX_PRIO; i++) { |
079cdb61 | 489 | INIT_LIST_HEAD(&spu_prio->runq[i]); |
a68cf983 | 490 | __clear_bit(i, spu_prio->bitmap); |
8b3d6663 | 491 | } |
a68cf983 MN |
492 | __set_bit(MAX_PRIO, spu_prio->bitmap); |
493 | for (i = 0; i < MAX_NUMNODES; i++) { | |
494 | mutex_init(&spu_prio->active_mutex[i]); | |
495 | INIT_LIST_HEAD(&spu_prio->active_list[i]); | |
8b3d6663 | 496 | } |
079cdb61 | 497 | spin_lock_init(&spu_prio->runq_lock); |
8b3d6663 AB |
498 | return 0; |
499 | } | |
500 | ||
501 | void __exit spu_sched_exit(void) | |
502 | { | |
a68cf983 MN |
503 | struct spu *spu, *tmp; |
504 | int node; | |
505 | ||
506 | for (node = 0; node < MAX_NUMNODES; node++) { | |
507 | mutex_lock(&spu_prio->active_mutex[node]); | |
508 | list_for_each_entry_safe(spu, tmp, &spu_prio->active_list[node], | |
509 | list) { | |
510 | list_del_init(&spu->list); | |
511 | spu_free(spu); | |
512 | } | |
513 | mutex_unlock(&spu_prio->active_mutex[node]); | |
8b3d6663 | 514 | } |
a68cf983 | 515 | kfree(spu_prio); |
2eb1b120 | 516 | destroy_workqueue(spu_sched_wq); |
8b3d6663 | 517 | } |