Commit | Line | Data |
---|---|---|
67207b96 AB |
1 | /* |
2 | * SPU file system -- SPU context management | |
3 | * | |
4 | * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 | |
5 | * | |
6 | * Author: Arnd Bergmann <arndb@de.ibm.com> | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify | |
9 | * it under the terms of the GNU General Public License as published by | |
10 | * the Free Software Foundation; either version 2, or (at your option) | |
11 | * any later version. | |
12 | * | |
13 | * This program is distributed in the hope that it will be useful, | |
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
16 | * GNU General Public License for more details. | |
17 | * | |
18 | * You should have received a copy of the GNU General Public License | |
19 | * along with this program; if not, write to the Free Software | |
20 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | |
21 | */ | |
22 | ||
8b3d6663 AB |
23 | #include <linux/fs.h> |
24 | #include <linux/mm.h> | |
1474855d | 25 | #include <linux/module.h> |
67207b96 | 26 | #include <linux/slab.h> |
65de66f0 | 27 | #include <asm/atomic.h> |
67207b96 | 28 | #include <asm/spu.h> |
5473af04 | 29 | #include <asm/spu_csa.h> |
67207b96 AB |
30 | #include "spufs.h" |
31 | ||
65de66f0 CH |
32 | |
33 | atomic_t nr_spu_contexts = ATOMIC_INIT(0); | |
34 | ||
6263203e | 35 | struct spu_context *alloc_spu_context(struct spu_gang *gang) |
67207b96 AB |
36 | { |
37 | struct spu_context *ctx; | |
c5c45913 | 38 | ctx = kzalloc(sizeof *ctx, GFP_KERNEL); |
67207b96 AB |
39 | if (!ctx) |
40 | goto out; | |
8b3d6663 AB |
41 | /* Binding to physical processor deferred |
42 | * until spu_activate(). | |
5473af04 | 43 | */ |
f1fa74f4 | 44 | if (spu_init_csa(&ctx->csa)) |
5473af04 | 45 | goto out_free; |
67207b96 | 46 | spin_lock_init(&ctx->mmio_lock); |
47d3a5fa | 47 | mutex_init(&ctx->mapping_lock); |
67207b96 | 48 | kref_init(&ctx->kref); |
650f8b02 | 49 | mutex_init(&ctx->state_mutex); |
e45d48a3 | 50 | mutex_init(&ctx->run_mutex); |
8b3d6663 AB |
51 | init_waitqueue_head(&ctx->ibox_wq); |
52 | init_waitqueue_head(&ctx->wbox_wq); | |
5110459f | 53 | init_waitqueue_head(&ctx->stop_wq); |
a33a7d73 | 54 | init_waitqueue_head(&ctx->mfc_wq); |
8b3d6663 | 55 | ctx->state = SPU_STATE_SAVED; |
8b3d6663 AB |
56 | ctx->ops = &spu_backing_ops; |
57 | ctx->owner = get_task_mm(current); | |
a475c2f4 | 58 | INIT_LIST_HEAD(&ctx->rq); |
8e68e2f2 | 59 | INIT_LIST_HEAD(&ctx->aff_list); |
6263203e AB |
60 | if (gang) |
61 | spu_gang_add_ctx(gang, ctx); | |
ea1ae594 | 62 | ctx->cpus_allowed = current->cpus_allowed; |
fe443ef2 | 63 | spu_set_timeslice(ctx); |
27ec41d3 | 64 | ctx->stats.util_state = SPU_UTIL_IDLE_LOADED; |
65de66f0 CH |
65 | |
66 | atomic_inc(&nr_spu_contexts); | |
67207b96 AB |
67 | goto out; |
68 | out_free: | |
69 | kfree(ctx); | |
70 | ctx = NULL; | |
71 | out: | |
72 | return ctx; | |
73 | } | |
74 | ||
75 | void destroy_spu_context(struct kref *kref) | |
76 | { | |
77 | struct spu_context *ctx; | |
78 | ctx = container_of(kref, struct spu_context, kref); | |
650f8b02 | 79 | mutex_lock(&ctx->state_mutex); |
8b3d6663 | 80 | spu_deactivate(ctx); |
650f8b02 | 81 | mutex_unlock(&ctx->state_mutex); |
5473af04 | 82 | spu_fini_csa(&ctx->csa); |
6263203e AB |
83 | if (ctx->gang) |
84 | spu_gang_remove_ctx(ctx->gang, ctx); | |
1474855d BN |
85 | if (ctx->prof_priv_kref) |
86 | kref_put(ctx->prof_priv_kref, ctx->prof_priv_release); | |
a475c2f4 | 87 | BUG_ON(!list_empty(&ctx->rq)); |
65de66f0 | 88 | atomic_dec(&nr_spu_contexts); |
67207b96 AB |
89 | kfree(ctx); |
90 | } | |
91 | ||
92 | struct spu_context * get_spu_context(struct spu_context *ctx) | |
93 | { | |
94 | kref_get(&ctx->kref); | |
95 | return ctx; | |
96 | } | |
97 | ||
98 | int put_spu_context(struct spu_context *ctx) | |
99 | { | |
100 | return kref_put(&ctx->kref, &destroy_spu_context); | |
101 | } | |
102 | ||
8b3d6663 AB |
103 | /* give up the mm reference when the context is about to be destroyed */ |
104 | void spu_forget(struct spu_context *ctx) | |
105 | { | |
106 | struct mm_struct *mm; | |
107 | spu_acquire_saved(ctx); | |
108 | mm = ctx->owner; | |
109 | ctx->owner = NULL; | |
110 | mmput(mm); | |
111 | spu_release(ctx); | |
112 | } | |
113 | ||
5110459f | 114 | void spu_unmap_mappings(struct spu_context *ctx) |
8b3d6663 | 115 | { |
47d3a5fa | 116 | mutex_lock(&ctx->mapping_lock); |
6df10a82 MN |
117 | if (ctx->local_store) |
118 | unmap_mapping_range(ctx->local_store, 0, LS_SIZE, 1); | |
119 | if (ctx->mfc) | |
17e0e270 | 120 | unmap_mapping_range(ctx->mfc, 0, 0x1000, 1); |
6df10a82 | 121 | if (ctx->cntl) |
17e0e270 | 122 | unmap_mapping_range(ctx->cntl, 0, 0x1000, 1); |
6df10a82 | 123 | if (ctx->signal1) |
17e0e270 | 124 | unmap_mapping_range(ctx->signal1, 0, PAGE_SIZE, 1); |
6df10a82 | 125 | if (ctx->signal2) |
17e0e270 BH |
126 | unmap_mapping_range(ctx->signal2, 0, PAGE_SIZE, 1); |
127 | if (ctx->mss) | |
128 | unmap_mapping_range(ctx->mss, 0, 0x1000, 1); | |
129 | if (ctx->psmap) | |
130 | unmap_mapping_range(ctx->psmap, 0, 0x20000, 1); | |
47d3a5fa | 131 | mutex_unlock(&ctx->mapping_lock); |
8b3d6663 AB |
132 | } |
133 | ||
6a0641e5 CH |
134 | /** |
135 | * spu_acquire_runnable - lock spu contex and make sure it is in runnable state | |
136 | * @ctx: spu contex to lock | |
137 | * | |
138 | * Note: | |
139 | * Returns 0 and with the context locked on success | |
140 | * Returns negative error and with the context _unlocked_ on failure. | |
141 | */ | |
26bec673 | 142 | int spu_acquire_runnable(struct spu_context *ctx, unsigned long flags) |
8b3d6663 | 143 | { |
6a0641e5 | 144 | int ret = -EINVAL; |
8b3d6663 | 145 | |
6a0641e5 | 146 | spu_acquire(ctx); |
8b3d6663 | 147 | if (ctx->state == SPU_STATE_SAVED) { |
6a0641e5 CH |
148 | /* |
149 | * Context is about to be freed, so we can't acquire it anymore. | |
150 | */ | |
151 | if (!ctx->owner) | |
152 | goto out_unlock; | |
26bec673 | 153 | ret = spu_activate(ctx, flags); |
01062465 | 154 | if (ret) |
6a0641e5 | 155 | goto out_unlock; |
8389998a | 156 | } |
8b3d6663 | 157 | |
6a0641e5 | 158 | return 0; |
8b3d6663 | 159 | |
6a0641e5 CH |
160 | out_unlock: |
161 | spu_release(ctx); | |
8b3d6663 AB |
162 | return ret; |
163 | } | |
164 | ||
6a0641e5 CH |
165 | /** |
166 | * spu_acquire_saved - lock spu contex and make sure it is in saved state | |
167 | * @ctx: spu contex to lock | |
168 | */ | |
8b3d6663 AB |
169 | void spu_acquire_saved(struct spu_context *ctx) |
170 | { | |
6a0641e5 | 171 | spu_acquire(ctx); |
27b1ea09 CH |
172 | if (ctx->state != SPU_STATE_SAVED) { |
173 | set_bit(SPU_SCHED_WAS_ACTIVE, &ctx->sched_flags); | |
8b3d6663 | 174 | spu_deactivate(ctx); |
27b1ea09 CH |
175 | } |
176 | } | |
177 | ||
178 | /** | |
179 | * spu_release_saved - unlock spu context and return it to the runqueue | |
180 | * @ctx: context to unlock | |
181 | */ | |
182 | void spu_release_saved(struct spu_context *ctx) | |
183 | { | |
184 | BUG_ON(ctx->state != SPU_STATE_SAVED); | |
185 | ||
186 | if (test_and_clear_bit(SPU_SCHED_WAS_ACTIVE, &ctx->sched_flags)) | |
187 | spu_activate(ctx, 0); | |
188 | ||
189 | spu_release(ctx); | |
8b3d6663 | 190 | } |
1474855d BN |
191 | |
192 | void spu_set_profile_private_kref(struct spu_context *ctx, | |
193 | struct kref *prof_info_kref, | |
194 | void ( * prof_info_release) (struct kref *kref)) | |
195 | { | |
196 | ctx->prof_priv_kref = prof_info_kref; | |
197 | ctx->prof_priv_release = prof_info_release; | |
198 | } | |
199 | EXPORT_SYMBOL_GPL(spu_set_profile_private_kref); | |
200 | ||
201 | void *spu_get_profile_private_kref(struct spu_context *ctx) | |
202 | { | |
203 | return ctx->prof_priv_kref; | |
204 | } | |
205 | EXPORT_SYMBOL_GPL(spu_get_profile_private_kref); | |
206 | ||
207 |