License cleanup: add SPDX GPL-2.0 license identifier to files with no license
[linux-block.git] / arch / powerpc / platforms / cell / spufs / run.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
0afacde3 2#define DEBUG
3
ce8ab854
AB
4#include <linux/wait.h>
5#include <linux/ptrace.h>
6
7#include <asm/spu.h>
c6730ed4
JK
8#include <asm/spu_priv1.h>
9#include <asm/io.h>
cfff5b23 10#include <asm/unistd.h>
ce8ab854
AB
11
12#include "spufs.h"
13
14/* interrupt-level stop callback function. */
f3d69e05 15void spufs_stop_callback(struct spu *spu, int irq)
ce8ab854
AB
16{
17 struct spu_context *ctx = spu->ctx;
18
d6ad39bc
JK
19 /*
20 * It should be impossible to preempt a context while an exception
21 * is being processed, since the context switch code is specially
22 * coded to deal with interrupts ... But, just in case, sanity check
23 * the context pointer. It is OK to return doing nothing since
24 * the exception will be regenerated when the context is resumed.
25 */
26 if (ctx) {
27 /* Copy exception arguments into module specific structure */
f3d69e05
LB
28 switch(irq) {
29 case 0 :
30 ctx->csa.class_0_pending = spu->class_0_pending;
f3d69e05
LB
31 ctx->csa.class_0_dar = spu->class_0_dar;
32 break;
33 case 1 :
34 ctx->csa.class_1_dsisr = spu->class_1_dsisr;
35 ctx->csa.class_1_dar = spu->class_1_dar;
36 break;
37 case 2 :
38 break;
39 }
d6ad39bc
JK
40
41 /* ensure that the exception status has hit memory before a
42 * thread waiting on the context's stop queue is woken */
43 smp_wmb();
44
45 wake_up_all(&ctx->stop_wq);
46 }
ce8ab854
AB
47}
48
e65c2f6f 49int spu_stopped(struct spu_context *ctx, u32 *stat)
ce8ab854 50{
e65c2f6f
LB
51 u64 dsisr;
52 u32 stopped;
ce8ab854 53
d84050f4
LB
54 stopped = SPU_STATUS_INVALID_INSTR | SPU_STATUS_SINGLE_STEP |
55 SPU_STATUS_STOPPED_BY_HALT | SPU_STATUS_STOPPED_BY_STOP;
36aaccc1 56
d84050f4
LB
57top:
58 *stat = ctx->ops->status_read(ctx);
59 if (*stat & stopped) {
60 /*
61 * If the spu hasn't finished stopping, we need to
62 * re-read the register to get the stopped value.
63 */
64 if (*stat & SPU_STATUS_RUNNING)
65 goto top;
e65c2f6f 66 return 1;
d84050f4 67 }
e65c2f6f 68
d84050f4 69 if (test_bit(SPU_SCHED_NOTIFY_ACTIVE, &ctx->sched_flags))
e65c2f6f
LB
70 return 1;
71
f3d69e05 72 dsisr = ctx->csa.class_1_dsisr;
e65c2f6f 73 if (dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED))
36aaccc1 74 return 1;
e65c2f6f
LB
75
76 if (ctx->csa.class_0_pending)
77 return 1;
78
79 return 0;
ce8ab854
AB
80}
81
c6730ed4
JK
82static int spu_setup_isolated(struct spu_context *ctx)
83{
84 int ret;
85 u64 __iomem *mfc_cntl;
86 u64 sr1;
87 u32 status;
88 unsigned long timeout;
89 const u32 status_loading = SPU_STATUS_RUNNING
90 | SPU_STATUS_ISOLATED_STATE | SPU_STATUS_ISOLATED_LOAD_STATUS;
91
7ec18ab9 92 ret = -ENODEV;
c6730ed4 93 if (!isolated_loader)
c6730ed4
JK
94 goto out;
95
7ec18ab9
CH
96 /*
97 * We need to exclude userspace access to the context.
98 *
99 * To protect against memory access we invalidate all ptes
100 * and make sure the pagefault handlers block on the mutex.
101 */
102 spu_unmap_mappings(ctx);
103
c6730ed4
JK
104 mfc_cntl = &ctx->spu->priv2->mfc_control_RW;
105
106 /* purge the MFC DMA queue to ensure no spurious accesses before we
107 * enter kernel mode */
108 timeout = jiffies + HZ;
109 out_be64(mfc_cntl, MFC_CNTL_PURGE_DMA_REQUEST);
110 while ((in_be64(mfc_cntl) & MFC_CNTL_PURGE_DMA_STATUS_MASK)
111 != MFC_CNTL_PURGE_DMA_COMPLETE) {
112 if (time_after(jiffies, timeout)) {
113 printk(KERN_ERR "%s: timeout flushing MFC DMA queue\n",
e48b1b45 114 __func__);
c6730ed4 115 ret = -EIO;
7ec18ab9 116 goto out;
c6730ed4
JK
117 }
118 cond_resched();
119 }
120
3688b46b
JK
121 /* clear purge status */
122 out_be64(mfc_cntl, 0);
123
c6730ed4
JK
124 /* put the SPE in kernel mode to allow access to the loader */
125 sr1 = spu_mfc_sr1_get(ctx->spu);
126 sr1 &= ~MFC_STATE1_PROBLEM_STATE_MASK;
127 spu_mfc_sr1_set(ctx->spu, sr1);
128
129 /* start the loader */
130 ctx->ops->signal1_write(ctx, (unsigned long)isolated_loader >> 32);
131 ctx->ops->signal2_write(ctx,
132 (unsigned long)isolated_loader & 0xffffffff);
133
134 ctx->ops->runcntl_write(ctx,
135 SPU_RUNCNTL_RUNNABLE | SPU_RUNCNTL_ISOLATE);
136
137 ret = 0;
138 timeout = jiffies + HZ;
139 while (((status = ctx->ops->status_read(ctx)) & status_loading) ==
140 status_loading) {
141 if (time_after(jiffies, timeout)) {
142 printk(KERN_ERR "%s: timeout waiting for loader\n",
e48b1b45 143 __func__);
c6730ed4
JK
144 ret = -EIO;
145 goto out_drop_priv;
146 }
147 cond_resched();
148 }
149
150 if (!(status & SPU_STATUS_RUNNING)) {
151 /* If isolated LOAD has failed: run SPU, we will get a stop-and
152 * signal later. */
e48b1b45 153 pr_debug("%s: isolated LOAD failed\n", __func__);
c6730ed4
JK
154 ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE);
155 ret = -EACCES;
7ec18ab9
CH
156 goto out_drop_priv;
157 }
c6730ed4 158
7ec18ab9 159 if (!(status & SPU_STATUS_ISOLATED_STATE)) {
c6730ed4 160 /* This isn't allowed by the CBEA, but check anyway */
e48b1b45 161 pr_debug("%s: SPU fell out of isolated mode?\n", __func__);
c6730ed4
JK
162 ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_STOP);
163 ret = -EINVAL;
7ec18ab9 164 goto out_drop_priv;
c6730ed4
JK
165 }
166
167out_drop_priv:
168 /* Finished accessing the loader. Drop kernel mode */
169 sr1 |= MFC_STATE1_PROBLEM_STATE_MASK;
170 spu_mfc_sr1_set(ctx->spu, sr1);
171
c6730ed4
JK
172out:
173 return ret;
174}
175
36aaccc1 176static int spu_run_init(struct spu_context *ctx, u32 *npc)
ce8ab854 177{
e65c2f6f 178 unsigned long runcntl = SPU_RUNCNTL_RUNNABLE;
91569531 179 int ret;
cc210b3e 180
27ec41d3
AD
181 spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
182
e65c2f6f
LB
183 /*
184 * NOSCHED is synchronous scheduling with respect to the caller.
185 * The caller waits for the context to be loaded.
186 */
187 if (ctx->flags & SPU_CREATE_NOSCHED) {
91569531 188 if (ctx->state == SPU_STATE_SAVED) {
91569531
LB
189 ret = spu_activate(ctx, 0);
190 if (ret)
191 return ret;
192 }
e65c2f6f 193 }
aa45e256 194
e65c2f6f
LB
195 /*
196 * Apply special setup as required.
197 */
198 if (ctx->flags & SPU_CREATE_ISOLATE) {
c6730ed4 199 if (!(ctx->ops->status_read(ctx) & SPU_STATUS_ISOLATED_STATE)) {
91569531 200 ret = spu_setup_isolated(ctx);
7ec18ab9 201 if (ret)
aa45e256 202 return ret;
c6730ed4
JK
203 }
204
91569531
LB
205 /*
206 * If userspace has set the runcntrl register (eg, to
207 * issue an isolated exit), we need to re-set it here
208 */
c6730ed4
JK
209 runcntl = ctx->ops->runcntl_read(ctx) &
210 (SPU_RUNCNTL_RUNNABLE | SPU_RUNCNTL_ISOLATE);
211 if (runcntl == 0)
212 runcntl = SPU_RUNCNTL_RUNNABLE;
2eb1b120 213 } else {
cc210b3e
LB
214 unsigned long privcntl;
215
05169237 216 if (test_thread_flag(TIF_SINGLESTEP))
cc210b3e
LB
217 privcntl = SPU_PRIVCNTL_MODE_SINGLE_STEP;
218 else
219 privcntl = SPU_PRIVCNTL_MODE_NORMAL;
cc210b3e 220
cc210b3e 221 ctx->ops->privcntl_write(ctx, privcntl);
d9dd421f
JK
222 ctx->ops->npc_write(ctx, *npc);
223 }
224
225 ctx->ops->runcntl_write(ctx, runcntl);
226
227 if (ctx->flags & SPU_CREATE_NOSCHED) {
228 spuctx_switch_state(ctx, SPU_UTIL_USER);
229 } else {
c6730ed4 230
91569531 231 if (ctx->state == SPU_STATE_SAVED) {
91569531
LB
232 ret = spu_activate(ctx, 0);
233 if (ret)
234 return ret;
e65c2f6f
LB
235 } else {
236 spuctx_switch_state(ctx, SPU_UTIL_USER);
91569531 237 }
91569531 238 }
27ec41d3 239
ce7c191b 240 set_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags);
aa45e256 241 return 0;
ce8ab854
AB
242}
243
36aaccc1
BN
244static int spu_run_fini(struct spu_context *ctx, u32 *npc,
245 u32 *status)
ce8ab854
AB
246{
247 int ret = 0;
248
e65c2f6f
LB
249 spu_del_from_rq(ctx);
250
ce8ab854
AB
251 *status = ctx->ops->status_read(ctx);
252 *npc = ctx->ops->npc_read(ctx);
27ec41d3
AD
253
254 spuctx_switch_state(ctx, SPU_UTIL_IDLE_LOADED);
ce7c191b 255 clear_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags);
f5ed0eb6 256 spu_switch_log_notify(NULL, ctx, SWITCH_LOG_EXIT, *status);
ce8ab854
AB
257 spu_release(ctx);
258
259 if (signal_pending(current))
260 ret = -ERESTARTSYS;
2ebb2477 261
ce8ab854
AB
262 return ret;
263}
264
2dd14934
AB
265/*
266 * SPU syscall restarting is tricky because we violate the basic
267 * assumption that the signal handler is running on the interrupted
268 * thread. Here instead, the handler runs on PowerPC user space code,
269 * while the syscall was called from the SPU.
270 * This means we can only do a very rough approximation of POSIX
271 * signal semantics.
272 */
1238819a 273static int spu_handle_restartsys(struct spu_context *ctx, long *spu_ret,
2dd14934
AB
274 unsigned int *npc)
275{
276 int ret;
277
278 switch (*spu_ret) {
279 case -ERESTARTSYS:
280 case -ERESTARTNOINTR:
281 /*
282 * Enter the regular syscall restarting for
283 * sys_spu_run, then restart the SPU syscall
284 * callback.
285 */
286 *npc -= 8;
287 ret = -ERESTARTSYS;
288 break;
289 case -ERESTARTNOHAND:
290 case -ERESTART_RESTARTBLOCK:
291 /*
292 * Restart block is too hard for now, just return -EINTR
293 * to the SPU.
294 * ERESTARTNOHAND comes from sys_pause, we also return
295 * -EINTR from there.
296 * Assume that we need to be restarted ourselves though.
297 */
298 *spu_ret = -EINTR;
299 ret = -ERESTARTSYS;
300 break;
301 default:
302 printk(KERN_WARNING "%s: unexpected return code %ld\n",
e48b1b45 303 __func__, *spu_ret);
2dd14934
AB
304 ret = 0;
305 }
306 return ret;
307}
308
1238819a 309static int spu_process_callback(struct spu_context *ctx)
2dd14934
AB
310{
311 struct spu_syscall_block s;
312 u32 ls_pointer, npc;
9e2fe2ce 313 void __iomem *ls;
2dd14934 314 long spu_ret;
d29694f0 315 int ret;
2dd14934
AB
316
317 /* get syscall block from local store */
9e2fe2ce
AM
318 npc = ctx->ops->npc_read(ctx) & ~3;
319 ls = (void __iomem *)ctx->ops->get_ls(ctx);
320 ls_pointer = in_be32(ls + npc);
2dd14934
AB
321 if (ls_pointer > (LS_SIZE - sizeof(s)))
322 return -EFAULT;
9e2fe2ce 323 memcpy_fromio(&s, ls + ls_pointer, sizeof(s));
2dd14934
AB
324
325 /* do actual syscall without pinning the spu */
326 ret = 0;
327 spu_ret = -ENOSYS;
328 npc += 4;
329
f43194e4 330 if (s.nr_ret < NR_syscalls) {
2dd14934
AB
331 spu_release(ctx);
332 /* do actual system call from here */
333 spu_ret = spu_sys_callback(&s);
334 if (spu_ret <= -ERESTARTSYS) {
335 ret = spu_handle_restartsys(ctx, &spu_ret, &npc);
336 }
d29694f0 337 mutex_lock(&ctx->state_mutex);
2dd14934
AB
338 if (ret == -ERESTARTSYS)
339 return ret;
340 }
341
4eb5aef5
JK
342 /* need to re-get the ls, as it may have changed when we released the
343 * spu */
344 ls = (void __iomem *)ctx->ops->get_ls(ctx);
345
2dd14934 346 /* write result, jump over indirect pointer */
9e2fe2ce 347 memcpy_toio(ls + ls_pointer, &spu_ret, sizeof(spu_ret));
2dd14934
AB
348 ctx->ops->npc_write(ctx, npc);
349 ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE);
350 return ret;
351}
352
50af32a9 353long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *event)
ce8ab854
AB
354{
355 int ret;
36aaccc1 356 struct spu *spu;
9add11da 357 u32 status;
ce8ab854 358
e45d48a3 359 if (mutex_lock_interruptible(&ctx->run_mutex))
ce8ab854
AB
360 return -ERESTARTSYS;
361
9add11da 362 ctx->event_return = 0;
aa45e256 363
c9101bdb
CH
364 ret = spu_acquire(ctx);
365 if (ret)
366 goto out_unlock;
2cf2b3b4 367
c0bace5c
JK
368 spu_enable_spu(ctx);
369
91569531 370 spu_update_sched_info(ctx);
aa45e256
CH
371
372 ret = spu_run_init(ctx, npc);
373 if (ret) {
374 spu_release(ctx);
ce8ab854 375 goto out;
aa45e256 376 }
ce8ab854
AB
377
378 do {
9add11da 379 ret = spufs_wait(ctx->stop_wq, spu_stopped(ctx, &status));
eebead5b
CH
380 if (unlikely(ret)) {
381 /*
382 * This is nasty: we need the state_mutex for all the
383 * bookkeeping even if the syscall was interrupted by
384 * a signal. ewww.
385 */
386 mutex_lock(&ctx->state_mutex);
ce8ab854 387 break;
eebead5b 388 }
36aaccc1
BN
389 spu = ctx->spu;
390 if (unlikely(test_and_clear_bit(SPU_SCHED_NOTIFY_ACTIVE,
391 &ctx->sched_flags))) {
392 if (!(status & SPU_STATUS_STOPPED_BY_STOP)) {
393 spu_switch_notify(spu, ctx);
394 continue;
395 }
396 }
27ec41d3
AD
397
398 spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
399
9add11da
AB
400 if ((status & SPU_STATUS_STOPPED_BY_STOP) &&
401 (status >> SPU_STOP_STATUS_SHIFT == 0x2104)) {
2dd14934
AB
402 ret = spu_process_callback(ctx);
403 if (ret)
404 break;
9add11da 405 status &= ~SPU_STATUS_STOPPED_BY_STOP;
2dd14934 406 }
57dace23
AB
407 ret = spufs_handle_class1(ctx);
408 if (ret)
409 break;
410
d6ad39bc
JK
411 ret = spufs_handle_class0(ctx);
412 if (ret)
413 break;
414
d6ad39bc
JK
415 if (signal_pending(current))
416 ret = -ERESTARTSYS;
9add11da 417 } while (!ret && !(status & (SPU_STATUS_STOPPED_BY_STOP |
05169237
BH
418 SPU_STATUS_STOPPED_BY_HALT |
419 SPU_STATUS_SINGLE_STEP)));
ce8ab854 420
c25620d7 421 spu_disable_spu(ctx);
9add11da 422 ret = spu_run_fini(ctx, npc, &status);
ce8ab854
AB
423 spu_yield(ctx);
424
e66686b4
LB
425 if ((status & SPU_STATUS_STOPPED_BY_STOP) &&
426 (((status >> SPU_STOP_STATUS_SHIFT) & 0x3f00) == 0x2100))
427 ctx->stats.libassist++;
428
2ebb2477
MN
429 if ((ret == 0) ||
430 ((ret == -ERESTARTSYS) &&
431 ((status & SPU_STATUS_STOPPED_BY_HALT) ||
05169237 432 (status & SPU_STATUS_SINGLE_STEP) ||
2ebb2477
MN
433 ((status & SPU_STATUS_STOPPED_BY_STOP) &&
434 (status >> SPU_STOP_STATUS_SHIFT != 0x2104)))))
435 ret = status;
436
05169237
BH
437 /* Note: we don't need to force_sig SIGTRAP on single-step
438 * since we have TIF_SINGLESTEP set, thus the kernel will do
027dfac6 439 * it upon return from the syscall anyway.
05169237 440 */
60cf54db
JK
441 if (unlikely(status & SPU_STATUS_SINGLE_STEP))
442 ret = -ERESTARTSYS;
443
444 else if (unlikely((status & SPU_STATUS_STOPPED_BY_STOP)
445 && (status >> SPU_STOP_STATUS_SHIFT) == 0x3fff)) {
c2b2226c
AB
446 force_sig(SIGTRAP, current);
447 ret = -ERESTARTSYS;
2ebb2477
MN
448 }
449
ce8ab854 450out:
9add11da 451 *event = ctx->event_return;
c9101bdb 452out_unlock:
e45d48a3 453 mutex_unlock(&ctx->run_mutex);
ce8ab854
AB
454 return ret;
455}