wrappers for ->i_mutex access
[linux-block.git] / arch / powerpc / platforms / cell / spufs / file.c
CommitLineData
67207b96
AB
1/*
2 * SPU file system -- file contents
3 *
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
5 *
6 * Author: Arnd Bergmann <arndb@de.ibm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
a33a7d73
AB
23#undef DEBUG
24
67207b96
AB
25#include <linux/fs.h>
26#include <linux/ioctl.h>
4b16f8e2 27#include <linux/export.h>
d88cfffa 28#include <linux/pagemap.h>
67207b96 29#include <linux/poll.h>
5110459f 30#include <linux/ptrace.h>
cbe709c1 31#include <linux/seq_file.h>
5a0e3ad6 32#include <linux/slab.h>
67207b96
AB
33
34#include <asm/io.h>
dfe1e09f 35#include <asm/time.h>
67207b96 36#include <asm/spu.h>
b9e3bd77 37#include <asm/spu_info.h>
67207b96
AB
38#include <asm/uaccess.h>
39
40#include "spufs.h"
ae142e0c 41#include "sputrace.h"
67207b96 42
27d5bf2a
BH
43#define SPUFS_MMAP_4K (PAGE_SIZE == 0x1000)
44
197b1a82
CH
45/* Simple attribute files */
46struct spufs_attr {
47 int (*get)(void *, u64 *);
48 int (*set)(void *, u64);
49 char get_buf[24]; /* enough to store a u64 and "\n\0" */
50 char set_buf[24];
51 void *data;
52 const char *fmt; /* format for read operation */
53 struct mutex mutex; /* protects access to these buffers */
54};
55
56static int spufs_attr_open(struct inode *inode, struct file *file,
57 int (*get)(void *, u64 *), int (*set)(void *, u64),
58 const char *fmt)
59{
60 struct spufs_attr *attr;
61
62 attr = kmalloc(sizeof(*attr), GFP_KERNEL);
63 if (!attr)
64 return -ENOMEM;
65
66 attr->get = get;
67 attr->set = set;
68 attr->data = inode->i_private;
69 attr->fmt = fmt;
70 mutex_init(&attr->mutex);
71 file->private_data = attr;
72
73 return nonseekable_open(inode, file);
74}
75
76static int spufs_attr_release(struct inode *inode, struct file *file)
77{
78 kfree(file->private_data);
79 return 0;
80}
81
82static ssize_t spufs_attr_read(struct file *file, char __user *buf,
83 size_t len, loff_t *ppos)
84{
85 struct spufs_attr *attr;
86 size_t size;
87 ssize_t ret;
88
89 attr = file->private_data;
90 if (!attr->get)
91 return -EACCES;
92
93 ret = mutex_lock_interruptible(&attr->mutex);
94 if (ret)
95 return ret;
96
97 if (*ppos) { /* continued read */
98 size = strlen(attr->get_buf);
99 } else { /* first read */
100 u64 val;
101 ret = attr->get(attr->data, &val);
102 if (ret)
103 goto out;
104
105 size = scnprintf(attr->get_buf, sizeof(attr->get_buf),
106 attr->fmt, (unsigned long long)val);
107 }
108
109 ret = simple_read_from_buffer(buf, len, ppos, attr->get_buf, size);
110out:
111 mutex_unlock(&attr->mutex);
112 return ret;
113}
114
115static ssize_t spufs_attr_write(struct file *file, const char __user *buf,
116 size_t len, loff_t *ppos)
117{
118 struct spufs_attr *attr;
119 u64 val;
120 size_t size;
121 ssize_t ret;
122
123 attr = file->private_data;
124 if (!attr->set)
125 return -EACCES;
126
127 ret = mutex_lock_interruptible(&attr->mutex);
128 if (ret)
129 return ret;
130
131 ret = -EFAULT;
132 size = min(sizeof(attr->set_buf) - 1, len);
133 if (copy_from_user(attr->set_buf, buf, size))
134 goto out;
135
136 ret = len; /* claim we got the whole input */
137 attr->set_buf[size] = '\0';
138 val = simple_strtol(attr->set_buf, NULL, 0);
139 attr->set(attr->data, val);
140out:
141 mutex_unlock(&attr->mutex);
142 return ret;
143}
144
145#define DEFINE_SPUFS_SIMPLE_ATTRIBUTE(__fops, __get, __set, __fmt) \
146static int __fops ## _open(struct inode *inode, struct file *file) \
147{ \
148 __simple_attr_check_format(__fmt, 0ull); \
149 return spufs_attr_open(inode, file, __get, __set, __fmt); \
150} \
828c0950 151static const struct file_operations __fops = { \
197b1a82
CH
152 .open = __fops ## _open, \
153 .release = spufs_attr_release, \
154 .read = spufs_attr_read, \
155 .write = spufs_attr_write, \
fc15351d 156 .llseek = generic_file_llseek, \
197b1a82
CH
157};
158
cbe709c1 159
67207b96
AB
160static int
161spufs_mem_open(struct inode *inode, struct file *file)
162{
163 struct spufs_inode_info *i = SPUFS_I(inode);
6df10a82 164 struct spu_context *ctx = i->i_ctx;
43c2bbd9 165
47d3a5fa 166 mutex_lock(&ctx->mapping_lock);
6df10a82 167 file->private_data = ctx;
43c2bbd9
CH
168 if (!i->i_openers++)
169 ctx->local_store = inode->i_mapping;
47d3a5fa 170 mutex_unlock(&ctx->mapping_lock);
43c2bbd9
CH
171 return 0;
172}
173
174static int
175spufs_mem_release(struct inode *inode, struct file *file)
176{
177 struct spufs_inode_info *i = SPUFS_I(inode);
178 struct spu_context *ctx = i->i_ctx;
179
47d3a5fa 180 mutex_lock(&ctx->mapping_lock);
43c2bbd9
CH
181 if (!--i->i_openers)
182 ctx->local_store = NULL;
47d3a5fa 183 mutex_unlock(&ctx->mapping_lock);
67207b96
AB
184 return 0;
185}
186
bf1ab978
DGM
187static ssize_t
188__spufs_mem_read(struct spu_context *ctx, char __user *buffer,
189 size_t size, loff_t *pos)
190{
191 char *local_store = ctx->ops->get_ls(ctx);
192 return simple_read_from_buffer(buffer, size, pos, local_store,
193 LS_SIZE);
194}
195
67207b96
AB
196static ssize_t
197spufs_mem_read(struct file *file, char __user *buffer,
198 size_t size, loff_t *pos)
199{
bf1ab978 200 struct spu_context *ctx = file->private_data;
aa0ed2bd 201 ssize_t ret;
67207b96 202
c9101bdb
CH
203 ret = spu_acquire(ctx);
204 if (ret)
205 return ret;
bf1ab978 206 ret = __spufs_mem_read(ctx, buffer, size, pos);
8b3d6663 207 spu_release(ctx);
c9101bdb 208
67207b96
AB
209 return ret;
210}
211
212static ssize_t
213spufs_mem_write(struct file *file, const char __user *buffer,
aa0ed2bd 214 size_t size, loff_t *ppos)
67207b96
AB
215{
216 struct spu_context *ctx = file->private_data;
8b3d6663 217 char *local_store;
aa0ed2bd 218 loff_t pos = *ppos;
8b3d6663 219 int ret;
67207b96 220
aa0ed2bd 221 if (pos > LS_SIZE)
67207b96 222 return -EFBIG;
8b3d6663 223
c9101bdb
CH
224 ret = spu_acquire(ctx);
225 if (ret)
226 return ret;
227
8b3d6663 228 local_store = ctx->ops->get_ls(ctx);
63c3b9d7 229 size = simple_write_to_buffer(local_store, LS_SIZE, ppos, buffer, size);
8b3d6663 230 spu_release(ctx);
aa0ed2bd 231
aa0ed2bd 232 return size;
67207b96
AB
233}
234
b1e2270f
NP
235static int
236spufs_mem_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
8b3d6663 237{
f1fa74f4 238 struct spu_context *ctx = vma->vm_file->private_data;
b1e2270f
NP
239 unsigned long address = (unsigned long)vmf->virtual_address;
240 unsigned long pfn, offset;
241
b1e2270f 242 offset = vmf->pgoff << PAGE_SHIFT;
128b8546 243 if (offset >= LS_SIZE)
b1e2270f 244 return VM_FAULT_SIGBUS;
128b8546 245
b1e2270f
NP
246 pr_debug("spufs_mem_mmap_fault address=0x%lx, offset=0x%lx\n",
247 address, offset);
f1fa74f4 248
c9101bdb 249 if (spu_acquire(ctx))
b1e2270f 250 return VM_FAULT_NOPAGE;
8b3d6663 251
ac91cb8d 252 if (ctx->state == SPU_STATE_SAVED) {
64b3d0e8 253 vma->vm_page_prot = pgprot_cached(vma->vm_page_prot);
78bde53e 254 pfn = vmalloc_to_pfn(ctx->csa.lscsa->ls + offset);
ac91cb8d 255 } else {
64b3d0e8 256 vma->vm_page_prot = pgprot_noncached_wc(vma->vm_page_prot);
78bde53e 257 pfn = (ctx->spu->local_store_phys + offset) >> PAGE_SHIFT;
ac91cb8d 258 }
78bde53e 259 vm_insert_pfn(vma, address, pfn);
8b3d6663 260
78bde53e 261 spu_release(ctx);
8b3d6663 262
b1e2270f 263 return VM_FAULT_NOPAGE;
8b3d6663
AB
264}
265
a352894d
BH
266static int spufs_mem_mmap_access(struct vm_area_struct *vma,
267 unsigned long address,
268 void *buf, int len, int write)
269{
270 struct spu_context *ctx = vma->vm_file->private_data;
271 unsigned long offset = address - vma->vm_start;
272 char *local_store;
273
274 if (write && !(vma->vm_flags & VM_WRITE))
275 return -EACCES;
276 if (spu_acquire(ctx))
277 return -EINTR;
278 if ((offset + len) > vma->vm_end)
279 len = vma->vm_end - offset;
280 local_store = ctx->ops->get_ls(ctx);
281 if (write)
282 memcpy_toio(local_store + offset, buf, len);
283 else
284 memcpy_fromio(buf, local_store + offset, len);
285 spu_release(ctx);
286 return len;
287}
78bde53e 288
f0f37e2f 289static const struct vm_operations_struct spufs_mem_mmap_vmops = {
b1e2270f 290 .fault = spufs_mem_mmap_fault,
a352894d 291 .access = spufs_mem_mmap_access,
8b3d6663
AB
292};
293
f1fa74f4 294static int spufs_mem_mmap(struct file *file, struct vm_area_struct *vma)
67207b96 295{
8b3d6663
AB
296 if (!(vma->vm_flags & VM_SHARED))
297 return -EINVAL;
67207b96 298
78bde53e 299 vma->vm_flags |= VM_IO | VM_PFNMAP;
64b3d0e8 300 vma->vm_page_prot = pgprot_noncached_wc(vma->vm_page_prot);
8b3d6663
AB
301
302 vma->vm_ops = &spufs_mem_mmap_vmops;
67207b96
AB
303 return 0;
304}
305
5dfe4c96 306static const struct file_operations spufs_mem_fops = {
7022543e
JK
307 .open = spufs_mem_open,
308 .release = spufs_mem_release,
309 .read = spufs_mem_read,
310 .write = spufs_mem_write,
311 .llseek = generic_file_llseek,
312 .mmap = spufs_mem_mmap,
8b3d6663
AB
313};
314
b1e2270f
NP
315static int spufs_ps_fault(struct vm_area_struct *vma,
316 struct vm_fault *vmf,
78bde53e 317 unsigned long ps_offs,
27d5bf2a 318 unsigned long ps_size)
6df10a82 319{
6df10a82 320 struct spu_context *ctx = vma->vm_file->private_data;
b1e2270f 321 unsigned long area, offset = vmf->pgoff << PAGE_SHIFT;
eebead5b 322 int ret = 0;
6df10a82 323
b1e2270f 324 spu_context_nospu_trace(spufs_ps_fault__enter, ctx);
038200cf 325
27d5bf2a 326 if (offset >= ps_size)
b1e2270f 327 return VM_FAULT_SIGBUS;
6df10a82 328
60657263
JK
329 if (fatal_signal_pending(current))
330 return VM_FAULT_SIGBUS;
331
d5883137
JK
332 /*
333 * Because we release the mmap_sem, the context may be destroyed while
334 * we're in spu_wait. Grab an extra reference so it isn't destroyed
335 * in the meantime.
336 */
337 get_spu_context(ctx);
338
33bfd7a7
AB
339 /*
340 * We have to wait for context to be loaded before we have
341 * pages to hand out to the user, but we don't want to wait
342 * with the mmap_sem held.
343 * It is possible to drop the mmap_sem here, but then we need
b1e2270f 344 * to return VM_FAULT_NOPAGE because the mappings may have
33bfd7a7 345 * hanged.
78bde53e 346 */
c9101bdb 347 if (spu_acquire(ctx))
d5883137 348 goto refault;
c9101bdb 349
33bfd7a7
AB
350 if (ctx->state == SPU_STATE_SAVED) {
351 up_read(&current->mm->mmap_sem);
b1e2270f 352 spu_context_nospu_trace(spufs_ps_fault__sleep, ctx);
eebead5b 353 ret = spufs_wait(ctx->run_wq, ctx->state == SPU_STATE_RUNNABLE);
b1e2270f 354 spu_context_trace(spufs_ps_fault__wake, ctx, ctx->spu);
33bfd7a7 355 down_read(&current->mm->mmap_sem);
c9101bdb
CH
356 } else {
357 area = ctx->spu->problem_phys + ps_offs;
b1e2270f
NP
358 vm_insert_pfn(vma, (unsigned long)vmf->virtual_address,
359 (area + offset) >> PAGE_SHIFT);
360 spu_context_trace(spufs_ps_fault__insert, ctx, ctx->spu);
33bfd7a7 361 }
6df10a82 362
eebead5b
CH
363 if (!ret)
364 spu_release(ctx);
d5883137
JK
365
366refault:
367 put_spu_context(ctx);
b1e2270f 368 return VM_FAULT_NOPAGE;
6df10a82
MN
369}
370
27d5bf2a 371#if SPUFS_MMAP_4K
b1e2270f
NP
372static int spufs_cntl_mmap_fault(struct vm_area_struct *vma,
373 struct vm_fault *vmf)
6df10a82 374{
87ff6090 375 return spufs_ps_fault(vma, vmf, 0x4000, SPUFS_CNTL_MAP_SIZE);
6df10a82
MN
376}
377
f0f37e2f 378static const struct vm_operations_struct spufs_cntl_mmap_vmops = {
b1e2270f 379 .fault = spufs_cntl_mmap_fault,
6df10a82
MN
380};
381
382/*
383 * mmap support for problem state control area [0x4000 - 0x4fff].
6df10a82
MN
384 */
385static int spufs_cntl_mmap(struct file *file, struct vm_area_struct *vma)
386{
387 if (!(vma->vm_flags & VM_SHARED))
388 return -EINVAL;
389
78bde53e 390 vma->vm_flags |= VM_IO | VM_PFNMAP;
64b3d0e8 391 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
6df10a82
MN
392
393 vma->vm_ops = &spufs_cntl_mmap_vmops;
394 return 0;
395}
27d5bf2a
BH
396#else /* SPUFS_MMAP_4K */
397#define spufs_cntl_mmap NULL
398#endif /* !SPUFS_MMAP_4K */
6df10a82 399
197b1a82 400static int spufs_cntl_get(void *data, u64 *val)
6df10a82 401{
e1dbff2b 402 struct spu_context *ctx = data;
c9101bdb 403 int ret;
6df10a82 404
c9101bdb
CH
405 ret = spu_acquire(ctx);
406 if (ret)
407 return ret;
197b1a82 408 *val = ctx->ops->status_read(ctx);
e1dbff2b
AB
409 spu_release(ctx);
410
197b1a82 411 return 0;
6df10a82
MN
412}
413
197b1a82 414static int spufs_cntl_set(void *data, u64 val)
6df10a82 415{
e1dbff2b 416 struct spu_context *ctx = data;
c9101bdb 417 int ret;
e1dbff2b 418
c9101bdb
CH
419 ret = spu_acquire(ctx);
420 if (ret)
421 return ret;
e1dbff2b
AB
422 ctx->ops->runcntl_write(ctx, val);
423 spu_release(ctx);
197b1a82
CH
424
425 return 0;
6df10a82
MN
426}
427
e1dbff2b 428static int spufs_cntl_open(struct inode *inode, struct file *file)
6df10a82 429{
e1dbff2b
AB
430 struct spufs_inode_info *i = SPUFS_I(inode);
431 struct spu_context *ctx = i->i_ctx;
432
47d3a5fa 433 mutex_lock(&ctx->mapping_lock);
e1dbff2b 434 file->private_data = ctx;
43c2bbd9
CH
435 if (!i->i_openers++)
436 ctx->cntl = inode->i_mapping;
47d3a5fa 437 mutex_unlock(&ctx->mapping_lock);
8b88b099 438 return simple_attr_open(inode, file, spufs_cntl_get,
e1dbff2b 439 spufs_cntl_set, "0x%08lx");
6df10a82
MN
440}
441
43c2bbd9
CH
442static int
443spufs_cntl_release(struct inode *inode, struct file *file)
444{
445 struct spufs_inode_info *i = SPUFS_I(inode);
446 struct spu_context *ctx = i->i_ctx;
447
74bedc4d 448 simple_attr_release(inode, file);
43c2bbd9 449
47d3a5fa 450 mutex_lock(&ctx->mapping_lock);
43c2bbd9
CH
451 if (!--i->i_openers)
452 ctx->cntl = NULL;
47d3a5fa 453 mutex_unlock(&ctx->mapping_lock);
43c2bbd9
CH
454 return 0;
455}
456
5dfe4c96 457static const struct file_operations spufs_cntl_fops = {
6df10a82 458 .open = spufs_cntl_open,
43c2bbd9 459 .release = spufs_cntl_release,
8b88b099
CH
460 .read = simple_attr_read,
461 .write = simple_attr_write,
fc15351d 462 .llseek = generic_file_llseek,
6df10a82 463 .mmap = spufs_cntl_mmap,
6df10a82
MN
464};
465
8b3d6663
AB
466static int
467spufs_regs_open(struct inode *inode, struct file *file)
468{
469 struct spufs_inode_info *i = SPUFS_I(inode);
470 file->private_data = i->i_ctx;
471 return 0;
472}
473
bf1ab978
DGM
474static ssize_t
475__spufs_regs_read(struct spu_context *ctx, char __user *buffer,
476 size_t size, loff_t *pos)
477{
478 struct spu_lscsa *lscsa = ctx->csa.lscsa;
479 return simple_read_from_buffer(buffer, size, pos,
480 lscsa->gprs, sizeof lscsa->gprs);
481}
482
8b3d6663
AB
483static ssize_t
484spufs_regs_read(struct file *file, char __user *buffer,
485 size_t size, loff_t *pos)
486{
8b3d6663 487 int ret;
bf1ab978 488 struct spu_context *ctx = file->private_data;
8b3d6663 489
f027faa2
JK
490 /* pre-check for file position: if we'd return EOF, there's no point
491 * causing a deschedule */
492 if (*pos >= sizeof(ctx->csa.lscsa->gprs))
493 return 0;
494
c9101bdb
CH
495 ret = spu_acquire_saved(ctx);
496 if (ret)
497 return ret;
bf1ab978 498 ret = __spufs_regs_read(ctx, buffer, size, pos);
27b1ea09 499 spu_release_saved(ctx);
8b3d6663
AB
500 return ret;
501}
502
503static ssize_t
504spufs_regs_write(struct file *file, const char __user *buffer,
505 size_t size, loff_t *pos)
506{
507 struct spu_context *ctx = file->private_data;
508 struct spu_lscsa *lscsa = ctx->csa.lscsa;
509 int ret;
510
d219889b 511 if (*pos >= sizeof(lscsa->gprs))
8b3d6663 512 return -EFBIG;
d219889b 513
c9101bdb
CH
514 ret = spu_acquire_saved(ctx);
515 if (ret)
516 return ret;
8b3d6663 517
63c3b9d7
AM
518 size = simple_write_to_buffer(lscsa->gprs, sizeof(lscsa->gprs), pos,
519 buffer, size);
8b3d6663 520
27b1ea09 521 spu_release_saved(ctx);
63c3b9d7 522 return size;
8b3d6663
AB
523}
524
5dfe4c96 525static const struct file_operations spufs_regs_fops = {
8b3d6663
AB
526 .open = spufs_regs_open,
527 .read = spufs_regs_read,
528 .write = spufs_regs_write,
67207b96
AB
529 .llseek = generic_file_llseek,
530};
531
bf1ab978
DGM
532static ssize_t
533__spufs_fpcr_read(struct spu_context *ctx, char __user * buffer,
534 size_t size, loff_t * pos)
535{
536 struct spu_lscsa *lscsa = ctx->csa.lscsa;
537 return simple_read_from_buffer(buffer, size, pos,
538 &lscsa->fpcr, sizeof(lscsa->fpcr));
539}
540
8b3d6663
AB
541static ssize_t
542spufs_fpcr_read(struct file *file, char __user * buffer,
543 size_t size, loff_t * pos)
544{
8b3d6663 545 int ret;
bf1ab978 546 struct spu_context *ctx = file->private_data;
8b3d6663 547
c9101bdb
CH
548 ret = spu_acquire_saved(ctx);
549 if (ret)
550 return ret;
bf1ab978 551 ret = __spufs_fpcr_read(ctx, buffer, size, pos);
27b1ea09 552 spu_release_saved(ctx);
8b3d6663
AB
553 return ret;
554}
555
556static ssize_t
557spufs_fpcr_write(struct file *file, const char __user * buffer,
558 size_t size, loff_t * pos)
559{
560 struct spu_context *ctx = file->private_data;
561 struct spu_lscsa *lscsa = ctx->csa.lscsa;
562 int ret;
563
d219889b 564 if (*pos >= sizeof(lscsa->fpcr))
8b3d6663 565 return -EFBIG;
8b3d6663 566
c9101bdb
CH
567 ret = spu_acquire_saved(ctx);
568 if (ret)
569 return ret;
8b3d6663 570
63c3b9d7
AM
571 size = simple_write_to_buffer(&lscsa->fpcr, sizeof(lscsa->fpcr), pos,
572 buffer, size);
8b3d6663 573
27b1ea09 574 spu_release_saved(ctx);
63c3b9d7 575 return size;
8b3d6663
AB
576}
577
5dfe4c96 578static const struct file_operations spufs_fpcr_fops = {
8b3d6663
AB
579 .open = spufs_regs_open,
580 .read = spufs_fpcr_read,
581 .write = spufs_fpcr_write,
582 .llseek = generic_file_llseek,
583};
584
67207b96
AB
585/* generic open function for all pipe-like files */
586static int spufs_pipe_open(struct inode *inode, struct file *file)
587{
588 struct spufs_inode_info *i = SPUFS_I(inode);
589 file->private_data = i->i_ctx;
590
591 return nonseekable_open(inode, file);
592}
593
cdcc89bb
AB
594/*
595 * Read as many bytes from the mailbox as possible, until
596 * one of the conditions becomes true:
597 *
598 * - no more data available in the mailbox
599 * - end of the user provided buffer
600 * - end of the mapped area
601 */
67207b96
AB
602static ssize_t spufs_mbox_read(struct file *file, char __user *buf,
603 size_t len, loff_t *pos)
604{
8b3d6663 605 struct spu_context *ctx = file->private_data;
cdcc89bb
AB
606 u32 mbox_data, __user *udata;
607 ssize_t count;
67207b96
AB
608
609 if (len < 4)
610 return -EINVAL;
611
cdcc89bb
AB
612 if (!access_ok(VERIFY_WRITE, buf, len))
613 return -EFAULT;
614
615 udata = (void __user *)buf;
616
c9101bdb
CH
617 count = spu_acquire(ctx);
618 if (count)
619 return count;
620
274cef5e 621 for (count = 0; (count + 4) <= len; count += 4, udata++) {
cdcc89bb
AB
622 int ret;
623 ret = ctx->ops->mbox_read(ctx, &mbox_data);
624 if (ret == 0)
625 break;
626
627 /*
628 * at the end of the mapped area, we can fault
629 * but still need to return the data we have
630 * read successfully so far.
631 */
632 ret = __put_user(mbox_data, udata);
633 if (ret) {
634 if (!count)
635 count = -EFAULT;
636 break;
637 }
638 }
8b3d6663 639 spu_release(ctx);
67207b96 640
cdcc89bb
AB
641 if (!count)
642 count = -EAGAIN;
67207b96 643
cdcc89bb 644 return count;
67207b96
AB
645}
646
5dfe4c96 647static const struct file_operations spufs_mbox_fops = {
67207b96
AB
648 .open = spufs_pipe_open,
649 .read = spufs_mbox_read,
fc15351d 650 .llseek = no_llseek,
67207b96
AB
651};
652
653static ssize_t spufs_mbox_stat_read(struct file *file, char __user *buf,
654 size_t len, loff_t *pos)
655{
8b3d6663 656 struct spu_context *ctx = file->private_data;
c9101bdb 657 ssize_t ret;
67207b96
AB
658 u32 mbox_stat;
659
660 if (len < 4)
661 return -EINVAL;
662
c9101bdb
CH
663 ret = spu_acquire(ctx);
664 if (ret)
665 return ret;
8b3d6663
AB
666
667 mbox_stat = ctx->ops->mbox_stat_read(ctx) & 0xff;
668
669 spu_release(ctx);
67207b96
AB
670
671 if (copy_to_user(buf, &mbox_stat, sizeof mbox_stat))
672 return -EFAULT;
673
674 return 4;
675}
676
5dfe4c96 677static const struct file_operations spufs_mbox_stat_fops = {
67207b96
AB
678 .open = spufs_pipe_open,
679 .read = spufs_mbox_stat_read,
fc15351d 680 .llseek = no_llseek,
67207b96
AB
681};
682
683/* low-level ibox access function */
8b3d6663 684size_t spu_ibox_read(struct spu_context *ctx, u32 *data)
67207b96 685{
8b3d6663
AB
686 return ctx->ops->ibox_read(ctx, data);
687}
67207b96 688
8b3d6663
AB
689static int spufs_ibox_fasync(int fd, struct file *file, int on)
690{
691 struct spu_context *ctx = file->private_data;
67207b96 692
8b3d6663 693 return fasync_helper(fd, file, on, &ctx->ibox_fasync);
67207b96 694}
67207b96 695
8b3d6663
AB
696/* interrupt-level ibox callback function. */
697void spufs_ibox_callback(struct spu *spu)
67207b96 698{
8b3d6663
AB
699 struct spu_context *ctx = spu->ctx;
700
e65c2f6f
LB
701 if (!ctx)
702 return;
703
8b3d6663
AB
704 wake_up_all(&ctx->ibox_wq);
705 kill_fasync(&ctx->ibox_fasync, SIGIO, POLLIN);
67207b96
AB
706}
707
cdcc89bb
AB
708/*
709 * Read as many bytes from the interrupt mailbox as possible, until
710 * one of the conditions becomes true:
711 *
712 * - no more data available in the mailbox
713 * - end of the user provided buffer
714 * - end of the mapped area
715 *
716 * If the file is opened without O_NONBLOCK, we wait here until
717 * any data is available, but return when we have been able to
718 * read something.
719 */
67207b96
AB
720static ssize_t spufs_ibox_read(struct file *file, char __user *buf,
721 size_t len, loff_t *pos)
722{
8b3d6663 723 struct spu_context *ctx = file->private_data;
cdcc89bb
AB
724 u32 ibox_data, __user *udata;
725 ssize_t count;
67207b96
AB
726
727 if (len < 4)
728 return -EINVAL;
729
cdcc89bb
AB
730 if (!access_ok(VERIFY_WRITE, buf, len))
731 return -EFAULT;
732
733 udata = (void __user *)buf;
734
c9101bdb
CH
735 count = spu_acquire(ctx);
736 if (count)
eebead5b 737 goto out;
67207b96 738
cdcc89bb
AB
739 /* wait only for the first element */
740 count = 0;
67207b96 741 if (file->f_flags & O_NONBLOCK) {
eebead5b 742 if (!spu_ibox_read(ctx, &ibox_data)) {
cdcc89bb 743 count = -EAGAIN;
eebead5b
CH
744 goto out_unlock;
745 }
67207b96 746 } else {
cdcc89bb 747 count = spufs_wait(ctx->ibox_wq, spu_ibox_read(ctx, &ibox_data));
eebead5b
CH
748 if (count)
749 goto out;
67207b96
AB
750 }
751
cdcc89bb
AB
752 /* if we can't write at all, return -EFAULT */
753 count = __put_user(ibox_data, udata);
754 if (count)
eebead5b 755 goto out_unlock;
8b3d6663 756
cdcc89bb
AB
757 for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) {
758 int ret;
759 ret = ctx->ops->ibox_read(ctx, &ibox_data);
760 if (ret == 0)
761 break;
762 /*
763 * at the end of the mapped area, we can fault
764 * but still need to return the data we have
765 * read successfully so far.
766 */
767 ret = __put_user(ibox_data, udata);
768 if (ret)
769 break;
770 }
67207b96 771
eebead5b 772out_unlock:
cdcc89bb 773 spu_release(ctx);
eebead5b 774out:
cdcc89bb 775 return count;
67207b96
AB
776}
777
778static unsigned int spufs_ibox_poll(struct file *file, poll_table *wait)
779{
8b3d6663 780 struct spu_context *ctx = file->private_data;
67207b96
AB
781 unsigned int mask;
782
8b3d6663 783 poll_wait(file, &ctx->ibox_wq, wait);
67207b96 784
c9101bdb
CH
785 /*
786 * For now keep this uninterruptible and also ignore the rule
787 * that poll should not sleep. Will be fixed later.
788 */
789 mutex_lock(&ctx->state_mutex);
3a843d7c
AB
790 mask = ctx->ops->mbox_stat_poll(ctx, POLLIN | POLLRDNORM);
791 spu_release(ctx);
67207b96
AB
792
793 return mask;
794}
795
5dfe4c96 796static const struct file_operations spufs_ibox_fops = {
67207b96
AB
797 .open = spufs_pipe_open,
798 .read = spufs_ibox_read,
799 .poll = spufs_ibox_poll,
800 .fasync = spufs_ibox_fasync,
fc15351d 801 .llseek = no_llseek,
67207b96
AB
802};
803
804static ssize_t spufs_ibox_stat_read(struct file *file, char __user *buf,
805 size_t len, loff_t *pos)
806{
8b3d6663 807 struct spu_context *ctx = file->private_data;
c9101bdb 808 ssize_t ret;
67207b96
AB
809 u32 ibox_stat;
810
811 if (len < 4)
812 return -EINVAL;
813
c9101bdb
CH
814 ret = spu_acquire(ctx);
815 if (ret)
816 return ret;
8b3d6663
AB
817 ibox_stat = (ctx->ops->mbox_stat_read(ctx) >> 16) & 0xff;
818 spu_release(ctx);
67207b96
AB
819
820 if (copy_to_user(buf, &ibox_stat, sizeof ibox_stat))
821 return -EFAULT;
822
823 return 4;
824}
825
5dfe4c96 826static const struct file_operations spufs_ibox_stat_fops = {
67207b96
AB
827 .open = spufs_pipe_open,
828 .read = spufs_ibox_stat_read,
fc15351d 829 .llseek = no_llseek,
67207b96
AB
830};
831
832/* low-level mailbox write */
8b3d6663 833size_t spu_wbox_write(struct spu_context *ctx, u32 data)
67207b96 834{
8b3d6663
AB
835 return ctx->ops->wbox_write(ctx, data);
836}
67207b96 837
8b3d6663
AB
838static int spufs_wbox_fasync(int fd, struct file *file, int on)
839{
840 struct spu_context *ctx = file->private_data;
841 int ret;
67207b96 842
8b3d6663 843 ret = fasync_helper(fd, file, on, &ctx->wbox_fasync);
67207b96 844
67207b96
AB
845 return ret;
846}
67207b96 847
8b3d6663
AB
848/* interrupt-level wbox callback function. */
849void spufs_wbox_callback(struct spu *spu)
67207b96 850{
8b3d6663
AB
851 struct spu_context *ctx = spu->ctx;
852
e65c2f6f
LB
853 if (!ctx)
854 return;
855
8b3d6663
AB
856 wake_up_all(&ctx->wbox_wq);
857 kill_fasync(&ctx->wbox_fasync, SIGIO, POLLOUT);
67207b96
AB
858}
859
cdcc89bb
AB
860/*
861 * Write as many bytes to the interrupt mailbox as possible, until
862 * one of the conditions becomes true:
863 *
864 * - the mailbox is full
865 * - end of the user provided buffer
866 * - end of the mapped area
867 *
868 * If the file is opened without O_NONBLOCK, we wait here until
869 * space is availabyl, but return when we have been able to
870 * write something.
871 */
67207b96
AB
872static ssize_t spufs_wbox_write(struct file *file, const char __user *buf,
873 size_t len, loff_t *pos)
874{
8b3d6663 875 struct spu_context *ctx = file->private_data;
cdcc89bb
AB
876 u32 wbox_data, __user *udata;
877 ssize_t count;
67207b96
AB
878
879 if (len < 4)
880 return -EINVAL;
881
cdcc89bb
AB
882 udata = (void __user *)buf;
883 if (!access_ok(VERIFY_READ, buf, len))
884 return -EFAULT;
885
886 if (__get_user(wbox_data, udata))
67207b96
AB
887 return -EFAULT;
888
c9101bdb
CH
889 count = spu_acquire(ctx);
890 if (count)
eebead5b 891 goto out;
8b3d6663 892
cdcc89bb
AB
893 /*
894 * make sure we can at least write one element, by waiting
895 * in case of !O_NONBLOCK
896 */
897 count = 0;
67207b96 898 if (file->f_flags & O_NONBLOCK) {
eebead5b 899 if (!spu_wbox_write(ctx, wbox_data)) {
cdcc89bb 900 count = -EAGAIN;
eebead5b
CH
901 goto out_unlock;
902 }
67207b96 903 } else {
cdcc89bb 904 count = spufs_wait(ctx->wbox_wq, spu_wbox_write(ctx, wbox_data));
eebead5b
CH
905 if (count)
906 goto out;
67207b96
AB
907 }
908
8b3d6663 909
96de0e25 910 /* write as much as possible */
cdcc89bb
AB
911 for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) {
912 int ret;
913 ret = __get_user(wbox_data, udata);
914 if (ret)
915 break;
916
917 ret = spu_wbox_write(ctx, wbox_data);
918 if (ret == 0)
919 break;
920 }
921
eebead5b 922out_unlock:
cdcc89bb 923 spu_release(ctx);
eebead5b 924out:
cdcc89bb 925 return count;
67207b96
AB
926}
927
928static unsigned int spufs_wbox_poll(struct file *file, poll_table *wait)
929{
8b3d6663 930 struct spu_context *ctx = file->private_data;
67207b96
AB
931 unsigned int mask;
932
8b3d6663 933 poll_wait(file, &ctx->wbox_wq, wait);
67207b96 934
c9101bdb
CH
935 /*
936 * For now keep this uninterruptible and also ignore the rule
937 * that poll should not sleep. Will be fixed later.
938 */
939 mutex_lock(&ctx->state_mutex);
3a843d7c
AB
940 mask = ctx->ops->mbox_stat_poll(ctx, POLLOUT | POLLWRNORM);
941 spu_release(ctx);
67207b96
AB
942
943 return mask;
944}
945
5dfe4c96 946static const struct file_operations spufs_wbox_fops = {
67207b96
AB
947 .open = spufs_pipe_open,
948 .write = spufs_wbox_write,
949 .poll = spufs_wbox_poll,
950 .fasync = spufs_wbox_fasync,
fc15351d 951 .llseek = no_llseek,
67207b96
AB
952};
953
954static ssize_t spufs_wbox_stat_read(struct file *file, char __user *buf,
955 size_t len, loff_t *pos)
956{
8b3d6663 957 struct spu_context *ctx = file->private_data;
c9101bdb 958 ssize_t ret;
67207b96
AB
959 u32 wbox_stat;
960
961 if (len < 4)
962 return -EINVAL;
963
c9101bdb
CH
964 ret = spu_acquire(ctx);
965 if (ret)
966 return ret;
8b3d6663
AB
967 wbox_stat = (ctx->ops->mbox_stat_read(ctx) >> 8) & 0xff;
968 spu_release(ctx);
67207b96
AB
969
970 if (copy_to_user(buf, &wbox_stat, sizeof wbox_stat))
971 return -EFAULT;
972
973 return 4;
974}
975
5dfe4c96 976static const struct file_operations spufs_wbox_stat_fops = {
67207b96
AB
977 .open = spufs_pipe_open,
978 .read = spufs_wbox_stat_read,
fc15351d 979 .llseek = no_llseek,
67207b96
AB
980};
981
6df10a82
MN
982static int spufs_signal1_open(struct inode *inode, struct file *file)
983{
984 struct spufs_inode_info *i = SPUFS_I(inode);
985 struct spu_context *ctx = i->i_ctx;
43c2bbd9 986
47d3a5fa 987 mutex_lock(&ctx->mapping_lock);
6df10a82 988 file->private_data = ctx;
43c2bbd9
CH
989 if (!i->i_openers++)
990 ctx->signal1 = inode->i_mapping;
47d3a5fa 991 mutex_unlock(&ctx->mapping_lock);
6df10a82
MN
992 return nonseekable_open(inode, file);
993}
994
43c2bbd9
CH
995static int
996spufs_signal1_release(struct inode *inode, struct file *file)
997{
998 struct spufs_inode_info *i = SPUFS_I(inode);
999 struct spu_context *ctx = i->i_ctx;
1000
47d3a5fa 1001 mutex_lock(&ctx->mapping_lock);
43c2bbd9
CH
1002 if (!--i->i_openers)
1003 ctx->signal1 = NULL;
47d3a5fa 1004 mutex_unlock(&ctx->mapping_lock);
43c2bbd9
CH
1005 return 0;
1006}
1007
bf1ab978 1008static ssize_t __spufs_signal1_read(struct spu_context *ctx, char __user *buf,
67207b96
AB
1009 size_t len, loff_t *pos)
1010{
17f88ceb 1011 int ret = 0;
67207b96
AB
1012 u32 data;
1013
67207b96
AB
1014 if (len < 4)
1015 return -EINVAL;
1016
17f88ceb
DGM
1017 if (ctx->csa.spu_chnlcnt_RW[3]) {
1018 data = ctx->csa.spu_chnldata_RW[3];
1019 ret = 4;
1020 }
8b3d6663 1021
17f88ceb
DGM
1022 if (!ret)
1023 goto out;
1024
67207b96
AB
1025 if (copy_to_user(buf, &data, 4))
1026 return -EFAULT;
1027
17f88ceb
DGM
1028out:
1029 return ret;
67207b96
AB
1030}
1031
bf1ab978
DGM
1032static ssize_t spufs_signal1_read(struct file *file, char __user *buf,
1033 size_t len, loff_t *pos)
1034{
1035 int ret;
1036 struct spu_context *ctx = file->private_data;
1037
c9101bdb
CH
1038 ret = spu_acquire_saved(ctx);
1039 if (ret)
1040 return ret;
bf1ab978 1041 ret = __spufs_signal1_read(ctx, buf, len, pos);
27b1ea09 1042 spu_release_saved(ctx);
bf1ab978
DGM
1043
1044 return ret;
1045}
1046
67207b96
AB
1047static ssize_t spufs_signal1_write(struct file *file, const char __user *buf,
1048 size_t len, loff_t *pos)
1049{
1050 struct spu_context *ctx;
c9101bdb 1051 ssize_t ret;
67207b96
AB
1052 u32 data;
1053
1054 ctx = file->private_data;
67207b96
AB
1055
1056 if (len < 4)
1057 return -EINVAL;
1058
1059 if (copy_from_user(&data, buf, 4))
1060 return -EFAULT;
1061
c9101bdb
CH
1062 ret = spu_acquire(ctx);
1063 if (ret)
1064 return ret;
8b3d6663
AB
1065 ctx->ops->signal1_write(ctx, data);
1066 spu_release(ctx);
67207b96
AB
1067
1068 return 4;
1069}
1070
b1e2270f
NP
1071static int
1072spufs_signal1_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
6df10a82 1073{
87ff6090
JK
1074#if SPUFS_SIGNAL_MAP_SIZE == 0x1000
1075 return spufs_ps_fault(vma, vmf, 0x14000, SPUFS_SIGNAL_MAP_SIZE);
1076#elif SPUFS_SIGNAL_MAP_SIZE == 0x10000
27d5bf2a
BH
1077 /* For 64k pages, both signal1 and signal2 can be used to mmap the whole
1078 * signal 1 and 2 area
1079 */
87ff6090 1080 return spufs_ps_fault(vma, vmf, 0x10000, SPUFS_SIGNAL_MAP_SIZE);
27d5bf2a
BH
1081#else
1082#error unsupported page size
1083#endif
6df10a82
MN
1084}
1085
f0f37e2f 1086static const struct vm_operations_struct spufs_signal1_mmap_vmops = {
b1e2270f 1087 .fault = spufs_signal1_mmap_fault,
6df10a82
MN
1088};
1089
1090static int spufs_signal1_mmap(struct file *file, struct vm_area_struct *vma)
1091{
1092 if (!(vma->vm_flags & VM_SHARED))
1093 return -EINVAL;
1094
78bde53e 1095 vma->vm_flags |= VM_IO | VM_PFNMAP;
64b3d0e8 1096 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
6df10a82
MN
1097
1098 vma->vm_ops = &spufs_signal1_mmap_vmops;
1099 return 0;
1100}
6df10a82 1101
5dfe4c96 1102static const struct file_operations spufs_signal1_fops = {
6df10a82 1103 .open = spufs_signal1_open,
43c2bbd9 1104 .release = spufs_signal1_release,
67207b96
AB
1105 .read = spufs_signal1_read,
1106 .write = spufs_signal1_write,
6df10a82 1107 .mmap = spufs_signal1_mmap,
fc15351d 1108 .llseek = no_llseek,
67207b96
AB
1109};
1110
d054b36f
JK
1111static const struct file_operations spufs_signal1_nosched_fops = {
1112 .open = spufs_signal1_open,
1113 .release = spufs_signal1_release,
1114 .write = spufs_signal1_write,
1115 .mmap = spufs_signal1_mmap,
fc15351d 1116 .llseek = no_llseek,
d054b36f
JK
1117};
1118
6df10a82
MN
1119static int spufs_signal2_open(struct inode *inode, struct file *file)
1120{
1121 struct spufs_inode_info *i = SPUFS_I(inode);
1122 struct spu_context *ctx = i->i_ctx;
43c2bbd9 1123
47d3a5fa 1124 mutex_lock(&ctx->mapping_lock);
6df10a82 1125 file->private_data = ctx;
43c2bbd9
CH
1126 if (!i->i_openers++)
1127 ctx->signal2 = inode->i_mapping;
47d3a5fa 1128 mutex_unlock(&ctx->mapping_lock);
6df10a82
MN
1129 return nonseekable_open(inode, file);
1130}
1131
43c2bbd9
CH
1132static int
1133spufs_signal2_release(struct inode *inode, struct file *file)
1134{
1135 struct spufs_inode_info *i = SPUFS_I(inode);
1136 struct spu_context *ctx = i->i_ctx;
1137
47d3a5fa 1138 mutex_lock(&ctx->mapping_lock);
43c2bbd9
CH
1139 if (!--i->i_openers)
1140 ctx->signal2 = NULL;
47d3a5fa 1141 mutex_unlock(&ctx->mapping_lock);
43c2bbd9
CH
1142 return 0;
1143}
1144
bf1ab978 1145static ssize_t __spufs_signal2_read(struct spu_context *ctx, char __user *buf,
67207b96
AB
1146 size_t len, loff_t *pos)
1147{
17f88ceb 1148 int ret = 0;
67207b96
AB
1149 u32 data;
1150
67207b96
AB
1151 if (len < 4)
1152 return -EINVAL;
1153
17f88ceb
DGM
1154 if (ctx->csa.spu_chnlcnt_RW[4]) {
1155 data = ctx->csa.spu_chnldata_RW[4];
1156 ret = 4;
1157 }
8b3d6663 1158
17f88ceb
DGM
1159 if (!ret)
1160 goto out;
1161
67207b96
AB
1162 if (copy_to_user(buf, &data, 4))
1163 return -EFAULT;
1164
17f88ceb 1165out:
bf1ab978
DGM
1166 return ret;
1167}
1168
1169static ssize_t spufs_signal2_read(struct file *file, char __user *buf,
1170 size_t len, loff_t *pos)
1171{
1172 struct spu_context *ctx = file->private_data;
1173 int ret;
1174
c9101bdb
CH
1175 ret = spu_acquire_saved(ctx);
1176 if (ret)
1177 return ret;
bf1ab978 1178 ret = __spufs_signal2_read(ctx, buf, len, pos);
27b1ea09 1179 spu_release_saved(ctx);
bf1ab978
DGM
1180
1181 return ret;
67207b96
AB
1182}
1183
1184static ssize_t spufs_signal2_write(struct file *file, const char __user *buf,
1185 size_t len, loff_t *pos)
1186{
1187 struct spu_context *ctx;
c9101bdb 1188 ssize_t ret;
67207b96
AB
1189 u32 data;
1190
1191 ctx = file->private_data;
67207b96
AB
1192
1193 if (len < 4)
1194 return -EINVAL;
1195
1196 if (copy_from_user(&data, buf, 4))
1197 return -EFAULT;
1198
c9101bdb
CH
1199 ret = spu_acquire(ctx);
1200 if (ret)
1201 return ret;
8b3d6663
AB
1202 ctx->ops->signal2_write(ctx, data);
1203 spu_release(ctx);
67207b96
AB
1204
1205 return 4;
1206}
1207
27d5bf2a 1208#if SPUFS_MMAP_4K
b1e2270f
NP
1209static int
1210spufs_signal2_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
6df10a82 1211{
87ff6090
JK
1212#if SPUFS_SIGNAL_MAP_SIZE == 0x1000
1213 return spufs_ps_fault(vma, vmf, 0x1c000, SPUFS_SIGNAL_MAP_SIZE);
1214#elif SPUFS_SIGNAL_MAP_SIZE == 0x10000
27d5bf2a
BH
1215 /* For 64k pages, both signal1 and signal2 can be used to mmap the whole
1216 * signal 1 and 2 area
1217 */
87ff6090 1218 return spufs_ps_fault(vma, vmf, 0x10000, SPUFS_SIGNAL_MAP_SIZE);
27d5bf2a
BH
1219#else
1220#error unsupported page size
1221#endif
6df10a82
MN
1222}
1223
f0f37e2f 1224static const struct vm_operations_struct spufs_signal2_mmap_vmops = {
b1e2270f 1225 .fault = spufs_signal2_mmap_fault,
6df10a82
MN
1226};
1227
1228static int spufs_signal2_mmap(struct file *file, struct vm_area_struct *vma)
1229{
1230 if (!(vma->vm_flags & VM_SHARED))
1231 return -EINVAL;
1232
78bde53e 1233 vma->vm_flags |= VM_IO | VM_PFNMAP;
64b3d0e8 1234 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
6df10a82
MN
1235
1236 vma->vm_ops = &spufs_signal2_mmap_vmops;
1237 return 0;
1238}
27d5bf2a
BH
1239#else /* SPUFS_MMAP_4K */
1240#define spufs_signal2_mmap NULL
1241#endif /* !SPUFS_MMAP_4K */
6df10a82 1242
5dfe4c96 1243static const struct file_operations spufs_signal2_fops = {
6df10a82 1244 .open = spufs_signal2_open,
43c2bbd9 1245 .release = spufs_signal2_release,
67207b96
AB
1246 .read = spufs_signal2_read,
1247 .write = spufs_signal2_write,
6df10a82 1248 .mmap = spufs_signal2_mmap,
fc15351d 1249 .llseek = no_llseek,
67207b96
AB
1250};
1251
d054b36f
JK
1252static const struct file_operations spufs_signal2_nosched_fops = {
1253 .open = spufs_signal2_open,
1254 .release = spufs_signal2_release,
1255 .write = spufs_signal2_write,
1256 .mmap = spufs_signal2_mmap,
fc15351d 1257 .llseek = no_llseek,
d054b36f
JK
1258};
1259
104f0cc2
ME
1260/*
1261 * This is a wrapper around DEFINE_SIMPLE_ATTRIBUTE which does the
1262 * work of acquiring (or not) the SPU context before calling through
1263 * to the actual get routine. The set routine is called directly.
1264 */
1265#define SPU_ATTR_NOACQUIRE 0
1266#define SPU_ATTR_ACQUIRE 1
1267#define SPU_ATTR_ACQUIRE_SAVED 2
1268
1269#define DEFINE_SPUFS_ATTRIBUTE(__name, __get, __set, __fmt, __acquire) \
197b1a82 1270static int __##__get(void *data, u64 *val) \
104f0cc2
ME
1271{ \
1272 struct spu_context *ctx = data; \
c9101bdb 1273 int ret = 0; \
104f0cc2
ME
1274 \
1275 if (__acquire == SPU_ATTR_ACQUIRE) { \
c9101bdb
CH
1276 ret = spu_acquire(ctx); \
1277 if (ret) \
1278 return ret; \
197b1a82 1279 *val = __get(ctx); \
104f0cc2
ME
1280 spu_release(ctx); \
1281 } else if (__acquire == SPU_ATTR_ACQUIRE_SAVED) { \
c9101bdb
CH
1282 ret = spu_acquire_saved(ctx); \
1283 if (ret) \
1284 return ret; \
197b1a82 1285 *val = __get(ctx); \
104f0cc2
ME
1286 spu_release_saved(ctx); \
1287 } else \
197b1a82 1288 *val = __get(ctx); \
104f0cc2 1289 \
197b1a82 1290 return 0; \
104f0cc2 1291} \
197b1a82 1292DEFINE_SPUFS_SIMPLE_ATTRIBUTE(__name, __##__get, __set, __fmt);
104f0cc2 1293
197b1a82 1294static int spufs_signal1_type_set(void *data, u64 val)
67207b96
AB
1295{
1296 struct spu_context *ctx = data;
c9101bdb 1297 int ret;
67207b96 1298
c9101bdb
CH
1299 ret = spu_acquire(ctx);
1300 if (ret)
1301 return ret;
8b3d6663
AB
1302 ctx->ops->signal1_type_set(ctx, val);
1303 spu_release(ctx);
197b1a82
CH
1304
1305 return 0;
67207b96
AB
1306}
1307
104f0cc2 1308static u64 spufs_signal1_type_get(struct spu_context *ctx)
bf1ab978 1309{
bf1ab978
DGM
1310 return ctx->ops->signal1_type_get(ctx);
1311}
104f0cc2 1312DEFINE_SPUFS_ATTRIBUTE(spufs_signal1_type, spufs_signal1_type_get,
af8b44e0 1313 spufs_signal1_type_set, "%llu\n", SPU_ATTR_ACQUIRE);
bf1ab978 1314
67207b96 1315
197b1a82 1316static int spufs_signal2_type_set(void *data, u64 val)
67207b96
AB
1317{
1318 struct spu_context *ctx = data;
c9101bdb 1319 int ret;
67207b96 1320
c9101bdb
CH
1321 ret = spu_acquire(ctx);
1322 if (ret)
1323 return ret;
8b3d6663
AB
1324 ctx->ops->signal2_type_set(ctx, val);
1325 spu_release(ctx);
197b1a82
CH
1326
1327 return 0;
67207b96
AB
1328}
1329
104f0cc2 1330static u64 spufs_signal2_type_get(struct spu_context *ctx)
bf1ab978 1331{
bf1ab978
DGM
1332 return ctx->ops->signal2_type_get(ctx);
1333}
104f0cc2 1334DEFINE_SPUFS_ATTRIBUTE(spufs_signal2_type, spufs_signal2_type_get,
af8b44e0 1335 spufs_signal2_type_set, "%llu\n", SPU_ATTR_ACQUIRE);
67207b96 1336
27d5bf2a 1337#if SPUFS_MMAP_4K
b1e2270f
NP
1338static int
1339spufs_mss_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
d9379c4b 1340{
87ff6090 1341 return spufs_ps_fault(vma, vmf, 0x0000, SPUFS_MSS_MAP_SIZE);
d9379c4b 1342}
1343
f0f37e2f 1344static const struct vm_operations_struct spufs_mss_mmap_vmops = {
b1e2270f 1345 .fault = spufs_mss_mmap_fault,
d9379c4b 1346};
1347
1348/*
1349 * mmap support for problem state MFC DMA area [0x0000 - 0x0fff].
d9379c4b 1350 */
1351static int spufs_mss_mmap(struct file *file, struct vm_area_struct *vma)
1352{
1353 if (!(vma->vm_flags & VM_SHARED))
1354 return -EINVAL;
1355
78bde53e 1356 vma->vm_flags |= VM_IO | VM_PFNMAP;
64b3d0e8 1357 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
d9379c4b 1358
1359 vma->vm_ops = &spufs_mss_mmap_vmops;
1360 return 0;
1361}
27d5bf2a
BH
1362#else /* SPUFS_MMAP_4K */
1363#define spufs_mss_mmap NULL
1364#endif /* !SPUFS_MMAP_4K */
d9379c4b 1365
1366static int spufs_mss_open(struct inode *inode, struct file *file)
1367{
1368 struct spufs_inode_info *i = SPUFS_I(inode);
17e0e270 1369 struct spu_context *ctx = i->i_ctx;
d9379c4b 1370
1371 file->private_data = i->i_ctx;
43c2bbd9 1372
47d3a5fa 1373 mutex_lock(&ctx->mapping_lock);
43c2bbd9
CH
1374 if (!i->i_openers++)
1375 ctx->mss = inode->i_mapping;
47d3a5fa 1376 mutex_unlock(&ctx->mapping_lock);
d9379c4b 1377 return nonseekable_open(inode, file);
1378}
1379
43c2bbd9
CH
1380static int
1381spufs_mss_release(struct inode *inode, struct file *file)
1382{
1383 struct spufs_inode_info *i = SPUFS_I(inode);
1384 struct spu_context *ctx = i->i_ctx;
1385
47d3a5fa 1386 mutex_lock(&ctx->mapping_lock);
43c2bbd9
CH
1387 if (!--i->i_openers)
1388 ctx->mss = NULL;
47d3a5fa 1389 mutex_unlock(&ctx->mapping_lock);
43c2bbd9
CH
1390 return 0;
1391}
1392
5dfe4c96 1393static const struct file_operations spufs_mss_fops = {
d9379c4b 1394 .open = spufs_mss_open,
43c2bbd9 1395 .release = spufs_mss_release,
d9379c4b 1396 .mmap = spufs_mss_mmap,
fc15351d 1397 .llseek = no_llseek,
27d5bf2a
BH
1398};
1399
b1e2270f
NP
1400static int
1401spufs_psmap_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
27d5bf2a 1402{
87ff6090 1403 return spufs_ps_fault(vma, vmf, 0x0000, SPUFS_PS_MAP_SIZE);
27d5bf2a
BH
1404}
1405
f0f37e2f 1406static const struct vm_operations_struct spufs_psmap_mmap_vmops = {
b1e2270f 1407 .fault = spufs_psmap_mmap_fault,
27d5bf2a
BH
1408};
1409
1410/*
1411 * mmap support for full problem state area [0x00000 - 0x1ffff].
1412 */
1413static int spufs_psmap_mmap(struct file *file, struct vm_area_struct *vma)
1414{
1415 if (!(vma->vm_flags & VM_SHARED))
1416 return -EINVAL;
1417
78bde53e 1418 vma->vm_flags |= VM_IO | VM_PFNMAP;
64b3d0e8 1419 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
27d5bf2a
BH
1420
1421 vma->vm_ops = &spufs_psmap_mmap_vmops;
1422 return 0;
1423}
1424
1425static int spufs_psmap_open(struct inode *inode, struct file *file)
1426{
1427 struct spufs_inode_info *i = SPUFS_I(inode);
17e0e270 1428 struct spu_context *ctx = i->i_ctx;
27d5bf2a 1429
47d3a5fa 1430 mutex_lock(&ctx->mapping_lock);
27d5bf2a 1431 file->private_data = i->i_ctx;
43c2bbd9
CH
1432 if (!i->i_openers++)
1433 ctx->psmap = inode->i_mapping;
47d3a5fa 1434 mutex_unlock(&ctx->mapping_lock);
27d5bf2a
BH
1435 return nonseekable_open(inode, file);
1436}
1437
43c2bbd9
CH
1438static int
1439spufs_psmap_release(struct inode *inode, struct file *file)
1440{
1441 struct spufs_inode_info *i = SPUFS_I(inode);
1442 struct spu_context *ctx = i->i_ctx;
1443
47d3a5fa 1444 mutex_lock(&ctx->mapping_lock);
43c2bbd9
CH
1445 if (!--i->i_openers)
1446 ctx->psmap = NULL;
47d3a5fa 1447 mutex_unlock(&ctx->mapping_lock);
43c2bbd9
CH
1448 return 0;
1449}
1450
5dfe4c96 1451static const struct file_operations spufs_psmap_fops = {
27d5bf2a 1452 .open = spufs_psmap_open,
43c2bbd9 1453 .release = spufs_psmap_release,
27d5bf2a 1454 .mmap = spufs_psmap_mmap,
fc15351d 1455 .llseek = no_llseek,
d9379c4b 1456};
1457
1458
27d5bf2a 1459#if SPUFS_MMAP_4K
b1e2270f
NP
1460static int
1461spufs_mfc_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
6df10a82 1462{
87ff6090 1463 return spufs_ps_fault(vma, vmf, 0x3000, SPUFS_MFC_MAP_SIZE);
6df10a82
MN
1464}
1465
f0f37e2f 1466static const struct vm_operations_struct spufs_mfc_mmap_vmops = {
b1e2270f 1467 .fault = spufs_mfc_mmap_fault,
6df10a82
MN
1468};
1469
1470/*
1471 * mmap support for problem state MFC DMA area [0x0000 - 0x0fff].
6df10a82
MN
1472 */
1473static int spufs_mfc_mmap(struct file *file, struct vm_area_struct *vma)
1474{
1475 if (!(vma->vm_flags & VM_SHARED))
1476 return -EINVAL;
1477
78bde53e 1478 vma->vm_flags |= VM_IO | VM_PFNMAP;
64b3d0e8 1479 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
6df10a82
MN
1480
1481 vma->vm_ops = &spufs_mfc_mmap_vmops;
1482 return 0;
1483}
27d5bf2a
BH
1484#else /* SPUFS_MMAP_4K */
1485#define spufs_mfc_mmap NULL
1486#endif /* !SPUFS_MMAP_4K */
a33a7d73
AB
1487
1488static int spufs_mfc_open(struct inode *inode, struct file *file)
1489{
1490 struct spufs_inode_info *i = SPUFS_I(inode);
1491 struct spu_context *ctx = i->i_ctx;
1492
1493 /* we don't want to deal with DMA into other processes */
1494 if (ctx->owner != current->mm)
1495 return -EINVAL;
1496
1497 if (atomic_read(&inode->i_count) != 1)
1498 return -EBUSY;
1499
47d3a5fa 1500 mutex_lock(&ctx->mapping_lock);
a33a7d73 1501 file->private_data = ctx;
43c2bbd9
CH
1502 if (!i->i_openers++)
1503 ctx->mfc = inode->i_mapping;
47d3a5fa 1504 mutex_unlock(&ctx->mapping_lock);
a33a7d73
AB
1505 return nonseekable_open(inode, file);
1506}
1507
43c2bbd9
CH
1508static int
1509spufs_mfc_release(struct inode *inode, struct file *file)
1510{
1511 struct spufs_inode_info *i = SPUFS_I(inode);
1512 struct spu_context *ctx = i->i_ctx;
1513
47d3a5fa 1514 mutex_lock(&ctx->mapping_lock);
43c2bbd9
CH
1515 if (!--i->i_openers)
1516 ctx->mfc = NULL;
47d3a5fa 1517 mutex_unlock(&ctx->mapping_lock);
43c2bbd9
CH
1518 return 0;
1519}
1520
a33a7d73
AB
1521/* interrupt-level mfc callback function. */
1522void spufs_mfc_callback(struct spu *spu)
1523{
1524 struct spu_context *ctx = spu->ctx;
1525
e65c2f6f
LB
1526 if (!ctx)
1527 return;
1528
a33a7d73
AB
1529 wake_up_all(&ctx->mfc_wq);
1530
e48b1b45 1531 pr_debug("%s %s\n", __func__, spu->name);
a33a7d73
AB
1532 if (ctx->mfc_fasync) {
1533 u32 free_elements, tagstatus;
1534 unsigned int mask;
1535
1536 /* no need for spu_acquire in interrupt context */
1537 free_elements = ctx->ops->get_mfc_free_elements(ctx);
1538 tagstatus = ctx->ops->read_mfc_tagstatus(ctx);
1539
1540 mask = 0;
1541 if (free_elements & 0xffff)
1542 mask |= POLLOUT;
1543 if (tagstatus & ctx->tagwait)
1544 mask |= POLLIN;
1545
1546 kill_fasync(&ctx->mfc_fasync, SIGIO, mask);
1547 }
1548}
1549
1550static int spufs_read_mfc_tagstatus(struct spu_context *ctx, u32 *status)
1551{
1552 /* See if there is one tag group is complete */
1553 /* FIXME we need locking around tagwait */
1554 *status = ctx->ops->read_mfc_tagstatus(ctx) & ctx->tagwait;
1555 ctx->tagwait &= ~*status;
1556 if (*status)
1557 return 1;
1558
1559 /* enable interrupt waiting for any tag group,
1560 may silently fail if interrupts are already enabled */
1561 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 1);
1562 return 0;
1563}
1564
1565static ssize_t spufs_mfc_read(struct file *file, char __user *buffer,
1566 size_t size, loff_t *pos)
1567{
1568 struct spu_context *ctx = file->private_data;
1569 int ret = -EINVAL;
1570 u32 status;
1571
1572 if (size != 4)
1573 goto out;
1574
c9101bdb
CH
1575 ret = spu_acquire(ctx);
1576 if (ret)
1577 return ret;
1578
1579 ret = -EINVAL;
a33a7d73
AB
1580 if (file->f_flags & O_NONBLOCK) {
1581 status = ctx->ops->read_mfc_tagstatus(ctx);
1582 if (!(status & ctx->tagwait))
1583 ret = -EAGAIN;
1584 else
c9101bdb 1585 /* XXX(hch): shouldn't we clear ret here? */
a33a7d73
AB
1586 ctx->tagwait &= ~status;
1587 } else {
1588 ret = spufs_wait(ctx->mfc_wq,
1589 spufs_read_mfc_tagstatus(ctx, &status));
eebead5b
CH
1590 if (ret)
1591 goto out;
a33a7d73
AB
1592 }
1593 spu_release(ctx);
1594
a33a7d73
AB
1595 ret = 4;
1596 if (copy_to_user(buffer, &status, 4))
1597 ret = -EFAULT;
1598
1599out:
1600 return ret;
1601}
1602
1603static int spufs_check_valid_dma(struct mfc_dma_command *cmd)
1604{
9477e455 1605 pr_debug("queueing DMA %x %llx %x %x %x\n", cmd->lsa,
a33a7d73
AB
1606 cmd->ea, cmd->size, cmd->tag, cmd->cmd);
1607
1608 switch (cmd->cmd) {
1609 case MFC_PUT_CMD:
1610 case MFC_PUTF_CMD:
1611 case MFC_PUTB_CMD:
1612 case MFC_GET_CMD:
1613 case MFC_GETF_CMD:
1614 case MFC_GETB_CMD:
1615 break;
1616 default:
1617 pr_debug("invalid DMA opcode %x\n", cmd->cmd);
1618 return -EIO;
1619 }
1620
1621 if ((cmd->lsa & 0xf) != (cmd->ea &0xf)) {
9477e455 1622 pr_debug("invalid DMA alignment, ea %llx lsa %x\n",
a33a7d73
AB
1623 cmd->ea, cmd->lsa);
1624 return -EIO;
1625 }
1626
1627 switch (cmd->size & 0xf) {
1628 case 1:
1629 break;
1630 case 2:
1631 if (cmd->lsa & 1)
1632 goto error;
1633 break;
1634 case 4:
1635 if (cmd->lsa & 3)
1636 goto error;
1637 break;
1638 case 8:
1639 if (cmd->lsa & 7)
1640 goto error;
1641 break;
1642 case 0:
1643 if (cmd->lsa & 15)
1644 goto error;
1645 break;
1646 error:
1647 default:
1648 pr_debug("invalid DMA alignment %x for size %x\n",
1649 cmd->lsa & 0xf, cmd->size);
1650 return -EIO;
1651 }
1652
1653 if (cmd->size > 16 * 1024) {
1654 pr_debug("invalid DMA size %x\n", cmd->size);
1655 return -EIO;
1656 }
1657
1658 if (cmd->tag & 0xfff0) {
1659 /* we reserve the higher tag numbers for kernel use */
1660 pr_debug("invalid DMA tag\n");
1661 return -EIO;
1662 }
1663
1664 if (cmd->class) {
1665 /* not supported in this version */
1666 pr_debug("invalid DMA class\n");
1667 return -EIO;
1668 }
1669
1670 return 0;
1671}
1672
1673static int spu_send_mfc_command(struct spu_context *ctx,
1674 struct mfc_dma_command cmd,
1675 int *error)
1676{
1677 *error = ctx->ops->send_mfc_command(ctx, &cmd);
1678 if (*error == -EAGAIN) {
1679 /* wait for any tag group to complete
1680 so we have space for the new command */
1681 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 1);
1682 /* try again, because the queue might be
1683 empty again */
1684 *error = ctx->ops->send_mfc_command(ctx, &cmd);
1685 if (*error == -EAGAIN)
1686 return 0;
1687 }
1688 return 1;
1689}
1690
1691static ssize_t spufs_mfc_write(struct file *file, const char __user *buffer,
1692 size_t size, loff_t *pos)
1693{
1694 struct spu_context *ctx = file->private_data;
1695 struct mfc_dma_command cmd;
1696 int ret = -EINVAL;
1697
1698 if (size != sizeof cmd)
1699 goto out;
1700
1701 ret = -EFAULT;
1702 if (copy_from_user(&cmd, buffer, sizeof cmd))
1703 goto out;
1704
1705 ret = spufs_check_valid_dma(&cmd);
1706 if (ret)
1707 goto out;
1708
c9101bdb
CH
1709 ret = spu_acquire(ctx);
1710 if (ret)
1711 goto out;
1712
33bfd7a7 1713 ret = spufs_wait(ctx->run_wq, ctx->state == SPU_STATE_RUNNABLE);
577f8f10
AM
1714 if (ret)
1715 goto out;
1716
a33a7d73
AB
1717 if (file->f_flags & O_NONBLOCK) {
1718 ret = ctx->ops->send_mfc_command(ctx, &cmd);
1719 } else {
1720 int status;
1721 ret = spufs_wait(ctx->mfc_wq,
1722 spu_send_mfc_command(ctx, cmd, &status));
eebead5b
CH
1723 if (ret)
1724 goto out;
a33a7d73
AB
1725 if (status)
1726 ret = status;
1727 }
a33a7d73
AB
1728
1729 if (ret)
933b0e35 1730 goto out_unlock;
a33a7d73
AB
1731
1732 ctx->tagwait |= 1 << cmd.tag;
3692dc66 1733 ret = size;
a33a7d73 1734
933b0e35
KA
1735out_unlock:
1736 spu_release(ctx);
a33a7d73
AB
1737out:
1738 return ret;
1739}
1740
1741static unsigned int spufs_mfc_poll(struct file *file,poll_table *wait)
1742{
1743 struct spu_context *ctx = file->private_data;
1744 u32 free_elements, tagstatus;
1745 unsigned int mask;
1746
933b0e35
KA
1747 poll_wait(file, &ctx->mfc_wq, wait);
1748
c9101bdb
CH
1749 /*
1750 * For now keep this uninterruptible and also ignore the rule
1751 * that poll should not sleep. Will be fixed later.
1752 */
1753 mutex_lock(&ctx->state_mutex);
a33a7d73
AB
1754 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 2);
1755 free_elements = ctx->ops->get_mfc_free_elements(ctx);
1756 tagstatus = ctx->ops->read_mfc_tagstatus(ctx);
1757 spu_release(ctx);
1758
a33a7d73
AB
1759 mask = 0;
1760 if (free_elements & 0xffff)
1761 mask |= POLLOUT | POLLWRNORM;
1762 if (tagstatus & ctx->tagwait)
1763 mask |= POLLIN | POLLRDNORM;
1764
e48b1b45 1765 pr_debug("%s: free %d tagstatus %d tagwait %d\n", __func__,
a33a7d73
AB
1766 free_elements, tagstatus, ctx->tagwait);
1767
1768 return mask;
1769}
1770
73b6af8a 1771static int spufs_mfc_flush(struct file *file, fl_owner_t id)
a33a7d73
AB
1772{
1773 struct spu_context *ctx = file->private_data;
1774 int ret;
1775
c9101bdb
CH
1776 ret = spu_acquire(ctx);
1777 if (ret)
eebead5b 1778 goto out;
a33a7d73
AB
1779#if 0
1780/* this currently hangs */
1781 ret = spufs_wait(ctx->mfc_wq,
1782 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 2));
1783 if (ret)
1784 goto out;
1785 ret = spufs_wait(ctx->mfc_wq,
1786 ctx->ops->read_mfc_tagstatus(ctx) == ctx->tagwait);
eebead5b
CH
1787 if (ret)
1788 goto out;
a33a7d73
AB
1789#else
1790 ret = 0;
1791#endif
1792 spu_release(ctx);
eebead5b 1793out:
a33a7d73
AB
1794 return ret;
1795}
1796
02c24a82
JB
1797static int spufs_mfc_fsync(struct file *file, loff_t start, loff_t end, int datasync)
1798{
496ad9aa 1799 struct inode *inode = file_inode(file);
02c24a82
JB
1800 int err = filemap_write_and_wait_range(inode->i_mapping, start, end);
1801 if (!err) {
5955102c 1802 inode_lock(inode);
02c24a82 1803 err = spufs_mfc_flush(file, NULL);
5955102c 1804 inode_unlock(inode);
02c24a82
JB
1805 }
1806 return err;
a33a7d73
AB
1807}
1808
1809static int spufs_mfc_fasync(int fd, struct file *file, int on)
1810{
1811 struct spu_context *ctx = file->private_data;
1812
1813 return fasync_helper(fd, file, on, &ctx->mfc_fasync);
1814}
1815
5dfe4c96 1816static const struct file_operations spufs_mfc_fops = {
a33a7d73 1817 .open = spufs_mfc_open,
43c2bbd9 1818 .release = spufs_mfc_release,
a33a7d73
AB
1819 .read = spufs_mfc_read,
1820 .write = spufs_mfc_write,
1821 .poll = spufs_mfc_poll,
1822 .flush = spufs_mfc_flush,
1823 .fsync = spufs_mfc_fsync,
1824 .fasync = spufs_mfc_fasync,
6df10a82 1825 .mmap = spufs_mfc_mmap,
fc15351d 1826 .llseek = no_llseek,
a33a7d73
AB
1827};
1828
197b1a82 1829static int spufs_npc_set(void *data, u64 val)
67207b96
AB
1830{
1831 struct spu_context *ctx = data;
c9101bdb
CH
1832 int ret;
1833
1834 ret = spu_acquire(ctx);
1835 if (ret)
1836 return ret;
8b3d6663
AB
1837 ctx->ops->npc_write(ctx, val);
1838 spu_release(ctx);
197b1a82
CH
1839
1840 return 0;
67207b96
AB
1841}
1842
104f0cc2 1843static u64 spufs_npc_get(struct spu_context *ctx)
78810ff6
ME
1844{
1845 return ctx->ops->npc_read(ctx);
1846}
104f0cc2
ME
1847DEFINE_SPUFS_ATTRIBUTE(spufs_npc_ops, spufs_npc_get, spufs_npc_set,
1848 "0x%llx\n", SPU_ATTR_ACQUIRE);
67207b96 1849
197b1a82 1850static int spufs_decr_set(void *data, u64 val)
8b3d6663
AB
1851{
1852 struct spu_context *ctx = data;
1853 struct spu_lscsa *lscsa = ctx->csa.lscsa;
c9101bdb
CH
1854 int ret;
1855
1856 ret = spu_acquire_saved(ctx);
1857 if (ret)
1858 return ret;
8b3d6663 1859 lscsa->decr.slot[0] = (u32) val;
27b1ea09 1860 spu_release_saved(ctx);
197b1a82
CH
1861
1862 return 0;
8b3d6663
AB
1863}
1864
104f0cc2 1865static u64 spufs_decr_get(struct spu_context *ctx)
8b3d6663 1866{
8b3d6663 1867 struct spu_lscsa *lscsa = ctx->csa.lscsa;
bf1ab978
DGM
1868 return lscsa->decr.slot[0];
1869}
104f0cc2
ME
1870DEFINE_SPUFS_ATTRIBUTE(spufs_decr_ops, spufs_decr_get, spufs_decr_set,
1871 "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED);
8b3d6663 1872
197b1a82 1873static int spufs_decr_status_set(void *data, u64 val)
8b3d6663
AB
1874{
1875 struct spu_context *ctx = data;
c9101bdb
CH
1876 int ret;
1877
1878 ret = spu_acquire_saved(ctx);
1879 if (ret)
1880 return ret;
d40a01d4
MN
1881 if (val)
1882 ctx->csa.priv2.mfc_control_RW |= MFC_CNTL_DECREMENTER_RUNNING;
1883 else
1884 ctx->csa.priv2.mfc_control_RW &= ~MFC_CNTL_DECREMENTER_RUNNING;
27b1ea09 1885 spu_release_saved(ctx);
197b1a82
CH
1886
1887 return 0;
8b3d6663
AB
1888}
1889
104f0cc2 1890static u64 spufs_decr_status_get(struct spu_context *ctx)
8b3d6663 1891{
d40a01d4
MN
1892 if (ctx->csa.priv2.mfc_control_RW & MFC_CNTL_DECREMENTER_RUNNING)
1893 return SPU_DECR_STATUS_RUNNING;
1894 else
1895 return 0;
bf1ab978 1896}
104f0cc2
ME
1897DEFINE_SPUFS_ATTRIBUTE(spufs_decr_status_ops, spufs_decr_status_get,
1898 spufs_decr_status_set, "0x%llx\n",
1899 SPU_ATTR_ACQUIRE_SAVED);
8b3d6663 1900
197b1a82 1901static int spufs_event_mask_set(void *data, u64 val)
8b3d6663
AB
1902{
1903 struct spu_context *ctx = data;
1904 struct spu_lscsa *lscsa = ctx->csa.lscsa;
c9101bdb
CH
1905 int ret;
1906
1907 ret = spu_acquire_saved(ctx);
1908 if (ret)
1909 return ret;
8b3d6663 1910 lscsa->event_mask.slot[0] = (u32) val;
27b1ea09 1911 spu_release_saved(ctx);
197b1a82
CH
1912
1913 return 0;
8b3d6663
AB
1914}
1915
104f0cc2 1916static u64 spufs_event_mask_get(struct spu_context *ctx)
8b3d6663 1917{
8b3d6663 1918 struct spu_lscsa *lscsa = ctx->csa.lscsa;
bf1ab978
DGM
1919 return lscsa->event_mask.slot[0];
1920}
1921
104f0cc2
ME
1922DEFINE_SPUFS_ATTRIBUTE(spufs_event_mask_ops, spufs_event_mask_get,
1923 spufs_event_mask_set, "0x%llx\n",
1924 SPU_ATTR_ACQUIRE_SAVED);
8b3d6663 1925
104f0cc2 1926static u64 spufs_event_status_get(struct spu_context *ctx)
b9e3bd77 1927{
b9e3bd77 1928 struct spu_state *state = &ctx->csa;
b9e3bd77 1929 u64 stat;
b9e3bd77
DGM
1930 stat = state->spu_chnlcnt_RW[0];
1931 if (stat)
bf1ab978
DGM
1932 return state->spu_chnldata_RW[0];
1933 return 0;
1934}
104f0cc2
ME
1935DEFINE_SPUFS_ATTRIBUTE(spufs_event_status_ops, spufs_event_status_get,
1936 NULL, "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED)
b9e3bd77 1937
197b1a82 1938static int spufs_srr0_set(void *data, u64 val)
8b3d6663
AB
1939{
1940 struct spu_context *ctx = data;
1941 struct spu_lscsa *lscsa = ctx->csa.lscsa;
c9101bdb
CH
1942 int ret;
1943
1944 ret = spu_acquire_saved(ctx);
1945 if (ret)
1946 return ret;
8b3d6663 1947 lscsa->srr0.slot[0] = (u32) val;
27b1ea09 1948 spu_release_saved(ctx);
197b1a82
CH
1949
1950 return 0;
8b3d6663
AB
1951}
1952
104f0cc2 1953static u64 spufs_srr0_get(struct spu_context *ctx)
8b3d6663 1954{
8b3d6663 1955 struct spu_lscsa *lscsa = ctx->csa.lscsa;
104f0cc2 1956 return lscsa->srr0.slot[0];
8b3d6663 1957}
104f0cc2
ME
1958DEFINE_SPUFS_ATTRIBUTE(spufs_srr0_ops, spufs_srr0_get, spufs_srr0_set,
1959 "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED)
8b3d6663 1960
104f0cc2 1961static u64 spufs_id_get(struct spu_context *ctx)
7b1a7014 1962{
7b1a7014 1963 u64 num;
1964
7b1a7014 1965 if (ctx->state == SPU_STATE_RUNNABLE)
1966 num = ctx->spu->number;
1967 else
1968 num = (unsigned int)-1;
7b1a7014 1969
1970 return num;
1971}
104f0cc2
ME
1972DEFINE_SPUFS_ATTRIBUTE(spufs_id_ops, spufs_id_get, NULL, "0x%llx\n",
1973 SPU_ATTR_ACQUIRE)
7b1a7014 1974
104f0cc2 1975static u64 spufs_object_id_get(struct spu_context *ctx)
bf1ab978
DGM
1976{
1977 /* FIXME: Should there really be no locking here? */
104f0cc2 1978 return ctx->object_id;
bf1ab978
DGM
1979}
1980
197b1a82 1981static int spufs_object_id_set(void *data, u64 id)
86767277
AB
1982{
1983 struct spu_context *ctx = data;
1984 ctx->object_id = id;
197b1a82
CH
1985
1986 return 0;
86767277
AB
1987}
1988
104f0cc2
ME
1989DEFINE_SPUFS_ATTRIBUTE(spufs_object_id_ops, spufs_object_id_get,
1990 spufs_object_id_set, "0x%llx\n", SPU_ATTR_NOACQUIRE);
86767277 1991
104f0cc2 1992static u64 spufs_lslr_get(struct spu_context *ctx)
bf1ab978 1993{
bf1ab978
DGM
1994 return ctx->csa.priv2.spu_lslr_RW;
1995}
104f0cc2
ME
1996DEFINE_SPUFS_ATTRIBUTE(spufs_lslr_ops, spufs_lslr_get, NULL, "0x%llx\n",
1997 SPU_ATTR_ACQUIRE_SAVED);
b9e3bd77
DGM
1998
1999static int spufs_info_open(struct inode *inode, struct file *file)
2000{
2001 struct spufs_inode_info *i = SPUFS_I(inode);
2002 struct spu_context *ctx = i->i_ctx;
2003 file->private_data = ctx;
2004 return 0;
2005}
2006
cbe709c1
BH
2007static int spufs_caps_show(struct seq_file *s, void *private)
2008{
2009 struct spu_context *ctx = s->private;
2010
2011 if (!(ctx->flags & SPU_CREATE_NOSCHED))
2012 seq_puts(s, "sched\n");
2013 if (!(ctx->flags & SPU_CREATE_ISOLATE))
2014 seq_puts(s, "step\n");
2015 return 0;
2016}
2017
2018static int spufs_caps_open(struct inode *inode, struct file *file)
2019{
2020 return single_open(file, spufs_caps_show, SPUFS_I(inode)->i_ctx);
2021}
2022
2023static const struct file_operations spufs_caps_fops = {
2024 .open = spufs_caps_open,
2025 .read = seq_read,
2026 .llseek = seq_lseek,
2027 .release = single_release,
2028};
2029
bf1ab978
DGM
2030static ssize_t __spufs_mbox_info_read(struct spu_context *ctx,
2031 char __user *buf, size_t len, loff_t *pos)
2032{
bf1ab978
DGM
2033 u32 data;
2034
cbea9238
JK
2035 /* EOF if there's no entry in the mbox */
2036 if (!(ctx->csa.prob.mb_stat_R & 0x0000ff))
2037 return 0;
2038
2039 data = ctx->csa.prob.pu_mb_R;
bf1ab978
DGM
2040
2041 return simple_read_from_buffer(buf, len, pos, &data, sizeof data);
2042}
2043
69a2f00c
DGM
2044static ssize_t spufs_mbox_info_read(struct file *file, char __user *buf,
2045 size_t len, loff_t *pos)
2046{
bf1ab978 2047 int ret;
69a2f00c 2048 struct spu_context *ctx = file->private_data;
69a2f00c
DGM
2049
2050 if (!access_ok(VERIFY_WRITE, buf, len))
2051 return -EFAULT;
2052
c9101bdb
CH
2053 ret = spu_acquire_saved(ctx);
2054 if (ret)
2055 return ret;
69a2f00c 2056 spin_lock(&ctx->csa.register_lock);
bf1ab978 2057 ret = __spufs_mbox_info_read(ctx, buf, len, pos);
69a2f00c 2058 spin_unlock(&ctx->csa.register_lock);
27b1ea09 2059 spu_release_saved(ctx);
69a2f00c 2060
bf1ab978 2061 return ret;
69a2f00c
DGM
2062}
2063
5dfe4c96 2064static const struct file_operations spufs_mbox_info_fops = {
69a2f00c
DGM
2065 .open = spufs_info_open,
2066 .read = spufs_mbox_info_read,
2067 .llseek = generic_file_llseek,
2068};
2069
bf1ab978
DGM
2070static ssize_t __spufs_ibox_info_read(struct spu_context *ctx,
2071 char __user *buf, size_t len, loff_t *pos)
2072{
bf1ab978
DGM
2073 u32 data;
2074
cbea9238
JK
2075 /* EOF if there's no entry in the ibox */
2076 if (!(ctx->csa.prob.mb_stat_R & 0xff0000))
2077 return 0;
2078
2079 data = ctx->csa.priv2.puint_mb_R;
bf1ab978
DGM
2080
2081 return simple_read_from_buffer(buf, len, pos, &data, sizeof data);
2082}
2083
69a2f00c
DGM
2084static ssize_t spufs_ibox_info_read(struct file *file, char __user *buf,
2085 size_t len, loff_t *pos)
2086{
2087 struct spu_context *ctx = file->private_data;
bf1ab978 2088 int ret;
69a2f00c
DGM
2089
2090 if (!access_ok(VERIFY_WRITE, buf, len))
2091 return -EFAULT;
2092
c9101bdb
CH
2093 ret = spu_acquire_saved(ctx);
2094 if (ret)
2095 return ret;
69a2f00c 2096 spin_lock(&ctx->csa.register_lock);
bf1ab978 2097 ret = __spufs_ibox_info_read(ctx, buf, len, pos);
69a2f00c 2098 spin_unlock(&ctx->csa.register_lock);
27b1ea09 2099 spu_release_saved(ctx);
69a2f00c 2100
bf1ab978 2101 return ret;
69a2f00c
DGM
2102}
2103
5dfe4c96 2104static const struct file_operations spufs_ibox_info_fops = {
69a2f00c
DGM
2105 .open = spufs_info_open,
2106 .read = spufs_ibox_info_read,
2107 .llseek = generic_file_llseek,
2108};
2109
bf1ab978
DGM
2110static ssize_t __spufs_wbox_info_read(struct spu_context *ctx,
2111 char __user *buf, size_t len, loff_t *pos)
69a2f00c 2112{
69a2f00c
DGM
2113 int i, cnt;
2114 u32 data[4];
2115 u32 wbox_stat;
2116
bf1ab978
DGM
2117 wbox_stat = ctx->csa.prob.mb_stat_R;
2118 cnt = 4 - ((wbox_stat & 0x00ff00) >> 8);
2119 for (i = 0; i < cnt; i++) {
2120 data[i] = ctx->csa.spu_mailbox_data[i];
2121 }
2122
2123 return simple_read_from_buffer(buf, len, pos, &data,
2124 cnt * sizeof(u32));
2125}
2126
2127static ssize_t spufs_wbox_info_read(struct file *file, char __user *buf,
2128 size_t len, loff_t *pos)
2129{
2130 struct spu_context *ctx = file->private_data;
2131 int ret;
2132
69a2f00c
DGM
2133 if (!access_ok(VERIFY_WRITE, buf, len))
2134 return -EFAULT;
2135
c9101bdb
CH
2136 ret = spu_acquire_saved(ctx);
2137 if (ret)
2138 return ret;
69a2f00c 2139 spin_lock(&ctx->csa.register_lock);
bf1ab978 2140 ret = __spufs_wbox_info_read(ctx, buf, len, pos);
69a2f00c 2141 spin_unlock(&ctx->csa.register_lock);
27b1ea09 2142 spu_release_saved(ctx);
69a2f00c 2143
bf1ab978 2144 return ret;
69a2f00c
DGM
2145}
2146
5dfe4c96 2147static const struct file_operations spufs_wbox_info_fops = {
69a2f00c
DGM
2148 .open = spufs_info_open,
2149 .read = spufs_wbox_info_read,
2150 .llseek = generic_file_llseek,
2151};
2152
bf1ab978
DGM
2153static ssize_t __spufs_dma_info_read(struct spu_context *ctx,
2154 char __user *buf, size_t len, loff_t *pos)
b9e3bd77 2155{
b9e3bd77
DGM
2156 struct spu_dma_info info;
2157 struct mfc_cq_sr *qp, *spuqp;
2158 int i;
2159
b9e3bd77
DGM
2160 info.dma_info_type = ctx->csa.priv2.spu_tag_status_query_RW;
2161 info.dma_info_mask = ctx->csa.lscsa->tag_mask.slot[0];
2162 info.dma_info_status = ctx->csa.spu_chnldata_RW[24];
2163 info.dma_info_stall_and_notify = ctx->csa.spu_chnldata_RW[25];
2164 info.dma_info_atomic_command_status = ctx->csa.spu_chnldata_RW[27];
2165 for (i = 0; i < 16; i++) {
2166 qp = &info.dma_info_command_data[i];
2167 spuqp = &ctx->csa.priv2.spuq[i];
2168
2169 qp->mfc_cq_data0_RW = spuqp->mfc_cq_data0_RW;
2170 qp->mfc_cq_data1_RW = spuqp->mfc_cq_data1_RW;
2171 qp->mfc_cq_data2_RW = spuqp->mfc_cq_data2_RW;
2172 qp->mfc_cq_data3_RW = spuqp->mfc_cq_data3_RW;
2173 }
b9e3bd77
DGM
2174
2175 return simple_read_from_buffer(buf, len, pos, &info,
2176 sizeof info);
2177}
2178
bf1ab978
DGM
2179static ssize_t spufs_dma_info_read(struct file *file, char __user *buf,
2180 size_t len, loff_t *pos)
2181{
2182 struct spu_context *ctx = file->private_data;
2183 int ret;
2184
2185 if (!access_ok(VERIFY_WRITE, buf, len))
2186 return -EFAULT;
2187
c9101bdb
CH
2188 ret = spu_acquire_saved(ctx);
2189 if (ret)
2190 return ret;
bf1ab978
DGM
2191 spin_lock(&ctx->csa.register_lock);
2192 ret = __spufs_dma_info_read(ctx, buf, len, pos);
2193 spin_unlock(&ctx->csa.register_lock);
27b1ea09 2194 spu_release_saved(ctx);
bf1ab978
DGM
2195
2196 return ret;
2197}
2198
5dfe4c96 2199static const struct file_operations spufs_dma_info_fops = {
b9e3bd77
DGM
2200 .open = spufs_info_open,
2201 .read = spufs_dma_info_read,
fc15351d 2202 .llseek = no_llseek,
b9e3bd77
DGM
2203};
2204
bf1ab978
DGM
2205static ssize_t __spufs_proxydma_info_read(struct spu_context *ctx,
2206 char __user *buf, size_t len, loff_t *pos)
b9e3bd77 2207{
b9e3bd77 2208 struct spu_proxydma_info info;
b9e3bd77 2209 struct mfc_cq_sr *qp, *puqp;
bf1ab978 2210 int ret = sizeof info;
b9e3bd77
DGM
2211 int i;
2212
2213 if (len < ret)
2214 return -EINVAL;
2215
2216 if (!access_ok(VERIFY_WRITE, buf, len))
2217 return -EFAULT;
2218
b9e3bd77
DGM
2219 info.proxydma_info_type = ctx->csa.prob.dma_querytype_RW;
2220 info.proxydma_info_mask = ctx->csa.prob.dma_querymask_RW;
2221 info.proxydma_info_status = ctx->csa.prob.dma_tagstatus_R;
2222 for (i = 0; i < 8; i++) {
2223 qp = &info.proxydma_info_command_data[i];
2224 puqp = &ctx->csa.priv2.puq[i];
2225
2226 qp->mfc_cq_data0_RW = puqp->mfc_cq_data0_RW;
2227 qp->mfc_cq_data1_RW = puqp->mfc_cq_data1_RW;
2228 qp->mfc_cq_data2_RW = puqp->mfc_cq_data2_RW;
2229 qp->mfc_cq_data3_RW = puqp->mfc_cq_data3_RW;
2230 }
bf1ab978
DGM
2231
2232 return simple_read_from_buffer(buf, len, pos, &info,
2233 sizeof info);
2234}
2235
2236static ssize_t spufs_proxydma_info_read(struct file *file, char __user *buf,
2237 size_t len, loff_t *pos)
2238{
2239 struct spu_context *ctx = file->private_data;
2240 int ret;
2241
c9101bdb
CH
2242 ret = spu_acquire_saved(ctx);
2243 if (ret)
2244 return ret;
bf1ab978
DGM
2245 spin_lock(&ctx->csa.register_lock);
2246 ret = __spufs_proxydma_info_read(ctx, buf, len, pos);
b9e3bd77 2247 spin_unlock(&ctx->csa.register_lock);
27b1ea09 2248 spu_release_saved(ctx);
b9e3bd77 2249
b9e3bd77
DGM
2250 return ret;
2251}
2252
5dfe4c96 2253static const struct file_operations spufs_proxydma_info_fops = {
b9e3bd77
DGM
2254 .open = spufs_info_open,
2255 .read = spufs_proxydma_info_read,
fc15351d 2256 .llseek = no_llseek,
b9e3bd77
DGM
2257};
2258
476273ad
CH
2259static int spufs_show_tid(struct seq_file *s, void *private)
2260{
2261 struct spu_context *ctx = s->private;
2262
2263 seq_printf(s, "%d\n", ctx->tid);
2264 return 0;
2265}
2266
2267static int spufs_tid_open(struct inode *inode, struct file *file)
2268{
2269 return single_open(file, spufs_show_tid, SPUFS_I(inode)->i_ctx);
2270}
2271
2272static const struct file_operations spufs_tid_fops = {
2273 .open = spufs_tid_open,
2274 .read = seq_read,
2275 .llseek = seq_lseek,
2276 .release = single_release,
2277};
2278
e9f8a0b6
CH
2279static const char *ctx_state_names[] = {
2280 "user", "system", "iowait", "loaded"
2281};
2282
2283static unsigned long long spufs_acct_time(struct spu_context *ctx,
27ec41d3 2284 enum spu_utilization_state state)
e9f8a0b6 2285{
27ec41d3 2286 unsigned long long time = ctx->stats.times[state];
e9f8a0b6 2287
27ec41d3
AD
2288 /*
2289 * In general, utilization statistics are updated by the controlling
2290 * thread as the spu context moves through various well defined
2291 * state transitions, but if the context is lazily loaded its
2292 * utilization statistics are not updated as the controlling thread
2293 * is not tightly coupled with the execution of the spu context. We
2294 * calculate and apply the time delta from the last recorded state
2295 * of the spu context.
2296 */
2297 if (ctx->spu && ctx->stats.util_state == state) {
f2dec1ea 2298 time += ktime_get_ns() - ctx->stats.tstamp;
27ec41d3 2299 }
e9f8a0b6 2300
27ec41d3 2301 return time / NSEC_PER_MSEC;
e9f8a0b6
CH
2302}
2303
2304static unsigned long long spufs_slb_flts(struct spu_context *ctx)
2305{
2306 unsigned long long slb_flts = ctx->stats.slb_flt;
2307
2308 if (ctx->state == SPU_STATE_RUNNABLE) {
2309 slb_flts += (ctx->spu->stats.slb_flt -
2310 ctx->stats.slb_flt_base);
2311 }
2312
2313 return slb_flts;
2314}
2315
2316static unsigned long long spufs_class2_intrs(struct spu_context *ctx)
2317{
2318 unsigned long long class2_intrs = ctx->stats.class2_intr;
2319
2320 if (ctx->state == SPU_STATE_RUNNABLE) {
2321 class2_intrs += (ctx->spu->stats.class2_intr -
2322 ctx->stats.class2_intr_base);
2323 }
2324
2325 return class2_intrs;
2326}
2327
2328
2329static int spufs_show_stat(struct seq_file *s, void *private)
2330{
2331 struct spu_context *ctx = s->private;
c9101bdb
CH
2332 int ret;
2333
2334 ret = spu_acquire(ctx);
2335 if (ret)
2336 return ret;
e9f8a0b6 2337
e9f8a0b6
CH
2338 seq_printf(s, "%s %llu %llu %llu %llu "
2339 "%llu %llu %llu %llu %llu %llu %llu %llu\n",
27ec41d3
AD
2340 ctx_state_names[ctx->stats.util_state],
2341 spufs_acct_time(ctx, SPU_UTIL_USER),
2342 spufs_acct_time(ctx, SPU_UTIL_SYSTEM),
2343 spufs_acct_time(ctx, SPU_UTIL_IOWAIT),
2344 spufs_acct_time(ctx, SPU_UTIL_IDLE_LOADED),
e9f8a0b6
CH
2345 ctx->stats.vol_ctx_switch,
2346 ctx->stats.invol_ctx_switch,
2347 spufs_slb_flts(ctx),
2348 ctx->stats.hash_flt,
2349 ctx->stats.min_flt,
2350 ctx->stats.maj_flt,
2351 spufs_class2_intrs(ctx),
2352 ctx->stats.libassist);
2353 spu_release(ctx);
2354 return 0;
2355}
2356
2357static int spufs_stat_open(struct inode *inode, struct file *file)
2358{
2359 return single_open(file, spufs_show_stat, SPUFS_I(inode)->i_ctx);
2360}
2361
2362static const struct file_operations spufs_stat_fops = {
2363 .open = spufs_stat_open,
2364 .read = seq_read,
2365 .llseek = seq_lseek,
2366 .release = single_release,
2367};
2368
5158e9b5
CH
2369static inline int spufs_switch_log_used(struct spu_context *ctx)
2370{
2371 return (ctx->switch_log->head - ctx->switch_log->tail) %
2372 SWITCH_LOG_BUFSIZE;
2373}
2374
2375static inline int spufs_switch_log_avail(struct spu_context *ctx)
2376{
2377 return SWITCH_LOG_BUFSIZE - spufs_switch_log_used(ctx);
2378}
2379
2380static int spufs_switch_log_open(struct inode *inode, struct file *file)
2381{
2382 struct spu_context *ctx = SPUFS_I(inode)->i_ctx;
f5ed0eb6
JK
2383 int rc;
2384
2385 rc = spu_acquire(ctx);
2386 if (rc)
2387 return rc;
5158e9b5 2388
5158e9b5 2389 if (ctx->switch_log) {
f5ed0eb6
JK
2390 rc = -EBUSY;
2391 goto out;
5158e9b5 2392 }
f5ed0eb6 2393
837ef884 2394 ctx->switch_log = kmalloc(sizeof(struct switch_log) +
f5ed0eb6
JK
2395 SWITCH_LOG_BUFSIZE * sizeof(struct switch_log_entry),
2396 GFP_KERNEL);
2397
2398 if (!ctx->switch_log) {
2399 rc = -ENOMEM;
2400 goto out;
2401 }
2402
837ef884 2403 ctx->switch_log->head = ctx->switch_log->tail = 0;
f5ed0eb6
JK
2404 init_waitqueue_head(&ctx->switch_log->wait);
2405 rc = 0;
2406
2407out:
2408 spu_release(ctx);
2409 return rc;
2410}
2411
2412static int spufs_switch_log_release(struct inode *inode, struct file *file)
2413{
2414 struct spu_context *ctx = SPUFS_I(inode)->i_ctx;
2415 int rc;
2416
2417 rc = spu_acquire(ctx);
2418 if (rc)
2419 return rc;
2420
2421 kfree(ctx->switch_log);
2422 ctx->switch_log = NULL;
2423 spu_release(ctx);
5158e9b5
CH
2424
2425 return 0;
5158e9b5
CH
2426}
2427
2428static int switch_log_sprint(struct spu_context *ctx, char *tbuf, int n)
2429{
2430 struct switch_log_entry *p;
2431
2432 p = ctx->switch_log->log + ctx->switch_log->tail % SWITCH_LOG_BUFSIZE;
2433
2434 return snprintf(tbuf, n, "%u.%09u %d %u %u %llu\n",
2435 (unsigned int) p->tstamp.tv_sec,
2436 (unsigned int) p->tstamp.tv_nsec,
2437 p->spu_id,
2438 (unsigned int) p->type,
2439 (unsigned int) p->val,
2440 (unsigned long long) p->timebase);
2441}
2442
2443static ssize_t spufs_switch_log_read(struct file *file, char __user *buf,
2444 size_t len, loff_t *ppos)
2445{
496ad9aa 2446 struct inode *inode = file_inode(file);
5158e9b5
CH
2447 struct spu_context *ctx = SPUFS_I(inode)->i_ctx;
2448 int error = 0, cnt = 0;
2449
17e37675 2450 if (!buf)
5158e9b5
CH
2451 return -EINVAL;
2452
f5ed0eb6
JK
2453 error = spu_acquire(ctx);
2454 if (error)
2455 return error;
2456
5158e9b5
CH
2457 while (cnt < len) {
2458 char tbuf[128];
2459 int width;
2460
14f693ee
JK
2461 if (spufs_switch_log_used(ctx) == 0) {
2462 if (cnt > 0) {
2463 /* If there's data ready to go, we can
2464 * just return straight away */
2465 break;
2466
2467 } else if (file->f_flags & O_NONBLOCK) {
f5ed0eb6
JK
2468 error = -EAGAIN;
2469 break;
14f693ee
JK
2470
2471 } else {
2472 /* spufs_wait will drop the mutex and
2473 * re-acquire, but since we're in read(), the
2474 * file cannot be _released (and so
2475 * ctx->switch_log is stable).
2476 */
2477 error = spufs_wait(ctx->switch_log->wait,
2478 spufs_switch_log_used(ctx) > 0);
2479
2480 /* On error, spufs_wait returns without the
2481 * state mutex held */
2482 if (error)
2483 return error;
2484
2485 /* We may have had entries read from underneath
2486 * us while we dropped the mutex in spufs_wait,
2487 * so re-check */
2488 if (spufs_switch_log_used(ctx) == 0)
2489 continue;
f5ed0eb6 2490 }
5158e9b5
CH
2491 }
2492
5158e9b5 2493 width = switch_log_sprint(ctx, tbuf, sizeof(tbuf));
f5ed0eb6 2494 if (width < len)
5158e9b5
CH
2495 ctx->switch_log->tail =
2496 (ctx->switch_log->tail + 1) %
2497 SWITCH_LOG_BUFSIZE;
f5ed0eb6
JK
2498 else
2499 /* If the record is greater than space available return
2500 * partial buffer (so far) */
5158e9b5
CH
2501 break;
2502
2503 error = copy_to_user(buf + cnt, tbuf, width);
2504 if (error)
2505 break;
2506 cnt += width;
2507 }
2508
f5ed0eb6
JK
2509 spu_release(ctx);
2510
5158e9b5
CH
2511 return cnt == 0 ? error : cnt;
2512}
2513
2514static unsigned int spufs_switch_log_poll(struct file *file, poll_table *wait)
2515{
496ad9aa 2516 struct inode *inode = file_inode(file);
5158e9b5
CH
2517 struct spu_context *ctx = SPUFS_I(inode)->i_ctx;
2518 unsigned int mask = 0;
f5ed0eb6 2519 int rc;
5158e9b5
CH
2520
2521 poll_wait(file, &ctx->switch_log->wait, wait);
2522
f5ed0eb6
JK
2523 rc = spu_acquire(ctx);
2524 if (rc)
2525 return rc;
2526
5158e9b5
CH
2527 if (spufs_switch_log_used(ctx) > 0)
2528 mask |= POLLIN;
2529
f5ed0eb6
JK
2530 spu_release(ctx);
2531
5158e9b5
CH
2532 return mask;
2533}
2534
2535static const struct file_operations spufs_switch_log_fops = {
f5ed0eb6
JK
2536 .open = spufs_switch_log_open,
2537 .read = spufs_switch_log_read,
2538 .poll = spufs_switch_log_poll,
2539 .release = spufs_switch_log_release,
fc15351d 2540 .llseek = no_llseek,
5158e9b5
CH
2541};
2542
f5ed0eb6
JK
2543/**
2544 * Log a context switch event to a switch log reader.
2545 *
2546 * Must be called with ctx->state_mutex held.
2547 */
5158e9b5
CH
2548void spu_switch_log_notify(struct spu *spu, struct spu_context *ctx,
2549 u32 type, u32 val)
2550{
2551 if (!ctx->switch_log)
2552 return;
2553
5158e9b5
CH
2554 if (spufs_switch_log_avail(ctx) > 1) {
2555 struct switch_log_entry *p;
2556
2557 p = ctx->switch_log->log + ctx->switch_log->head;
2558 ktime_get_ts(&p->tstamp);
2559 p->timebase = get_tb();
2560 p->spu_id = spu ? spu->number : -1;
2561 p->type = type;
2562 p->val = val;
2563
2564 ctx->switch_log->head =
2565 (ctx->switch_log->head + 1) % SWITCH_LOG_BUFSIZE;
2566 }
5158e9b5
CH
2567
2568 wake_up(&ctx->switch_log->wait);
2569}
e9f8a0b6 2570
46deed69
LB
2571static int spufs_show_ctx(struct seq_file *s, void *private)
2572{
2573 struct spu_context *ctx = s->private;
2574 u64 mfc_control_RW;
2575
2576 mutex_lock(&ctx->state_mutex);
2577 if (ctx->spu) {
2578 struct spu *spu = ctx->spu;
2579 struct spu_priv2 __iomem *priv2 = spu->priv2;
2580
2581 spin_lock_irq(&spu->register_lock);
2582 mfc_control_RW = in_be64(&priv2->mfc_control_RW);
2583 spin_unlock_irq(&spu->register_lock);
2584 } else {
2585 struct spu_state *csa = &ctx->csa;
2586
2587 mfc_control_RW = csa->priv2.mfc_control_RW;
2588 }
2589
2590 seq_printf(s, "%c flgs(%lx) sflgs(%lx) pri(%d) ts(%d) spu(%02d)"
9477e455 2591 " %c %llx %llx %llx %llx %x %x\n",
46deed69
LB
2592 ctx->state == SPU_STATE_SAVED ? 'S' : 'R',
2593 ctx->flags,
2594 ctx->sched_flags,
2595 ctx->prio,
2596 ctx->time_slice,
2597 ctx->spu ? ctx->spu->number : -1,
2598 !list_empty(&ctx->rq) ? 'q' : ' ',
2599 ctx->csa.class_0_pending,
2600 ctx->csa.class_0_dar,
2601 ctx->csa.class_1_dsisr,
2602 mfc_control_RW,
2603 ctx->ops->runcntl_read(ctx),
2604 ctx->ops->status_read(ctx));
2605
2606 mutex_unlock(&ctx->state_mutex);
2607
2608 return 0;
2609}
2610
2611static int spufs_ctx_open(struct inode *inode, struct file *file)
2612{
2613 return single_open(file, spufs_show_ctx, SPUFS_I(inode)->i_ctx);
2614}
2615
2616static const struct file_operations spufs_ctx_fops = {
2617 .open = spufs_ctx_open,
2618 .read = seq_read,
2619 .llseek = seq_lseek,
2620 .release = single_release,
2621};
2622
74254647 2623const struct spufs_tree_descr spufs_dir_contents[] = {
cbe709c1 2624 { "capabilities", &spufs_caps_fops, 0444, },
6f7dde81
JK
2625 { "mem", &spufs_mem_fops, 0666, LS_SIZE, },
2626 { "regs", &spufs_regs_fops, 0666, sizeof(struct spu_reg128[128]), },
67207b96
AB
2627 { "mbox", &spufs_mbox_fops, 0444, },
2628 { "ibox", &spufs_ibox_fops, 0444, },
2629 { "wbox", &spufs_wbox_fops, 0222, },
6f7dde81
JK
2630 { "mbox_stat", &spufs_mbox_stat_fops, 0444, sizeof(u32), },
2631 { "ibox_stat", &spufs_ibox_stat_fops, 0444, sizeof(u32), },
2632 { "wbox_stat", &spufs_wbox_stat_fops, 0444, sizeof(u32), },
603c4612
JK
2633 { "signal1", &spufs_signal1_fops, 0666, },
2634 { "signal2", &spufs_signal2_fops, 0666, },
67207b96
AB
2635 { "signal1_type", &spufs_signal1_type, 0666, },
2636 { "signal2_type", &spufs_signal2_type, 0666, },
6df10a82 2637 { "cntl", &spufs_cntl_fops, 0666, },
6f7dde81 2638 { "fpcr", &spufs_fpcr_fops, 0666, sizeof(struct spu_reg128), },
b9e3bd77
DGM
2639 { "lslr", &spufs_lslr_ops, 0444, },
2640 { "mfc", &spufs_mfc_fops, 0666, },
2641 { "mss", &spufs_mss_fops, 0666, },
2642 { "npc", &spufs_npc_ops, 0666, },
2643 { "srr0", &spufs_srr0_ops, 0666, },
8b3d6663
AB
2644 { "decr", &spufs_decr_ops, 0666, },
2645 { "decr_status", &spufs_decr_status_ops, 0666, },
8b3d6663 2646 { "event_mask", &spufs_event_mask_ops, 0666, },
b9e3bd77 2647 { "event_status", &spufs_event_status_ops, 0444, },
6f7dde81 2648 { "psmap", &spufs_psmap_fops, 0666, SPUFS_PS_MAP_SIZE, },
86767277
AB
2649 { "phys-id", &spufs_id_ops, 0666, },
2650 { "object-id", &spufs_object_id_ops, 0666, },
6f7dde81
JK
2651 { "mbox_info", &spufs_mbox_info_fops, 0444, sizeof(u32), },
2652 { "ibox_info", &spufs_ibox_info_fops, 0444, sizeof(u32), },
2653 { "wbox_info", &spufs_wbox_info_fops, 0444, sizeof(u32), },
2654 { "dma_info", &spufs_dma_info_fops, 0444,
2655 sizeof(struct spu_dma_info), },
2656 { "proxydma_info", &spufs_proxydma_info_fops, 0444,
2657 sizeof(struct spu_proxydma_info)},
476273ad 2658 { "tid", &spufs_tid_fops, 0444, },
e9f8a0b6 2659 { "stat", &spufs_stat_fops, 0444, },
5158e9b5 2660 { "switch_log", &spufs_switch_log_fops, 0444 },
67207b96
AB
2661 {},
2662};
5737edd1 2663
74254647 2664const struct spufs_tree_descr spufs_dir_nosched_contents[] = {
cbe709c1 2665 { "capabilities", &spufs_caps_fops, 0444, },
6f7dde81 2666 { "mem", &spufs_mem_fops, 0666, LS_SIZE, },
5737edd1
MN
2667 { "mbox", &spufs_mbox_fops, 0444, },
2668 { "ibox", &spufs_ibox_fops, 0444, },
2669 { "wbox", &spufs_wbox_fops, 0222, },
6f7dde81
JK
2670 { "mbox_stat", &spufs_mbox_stat_fops, 0444, sizeof(u32), },
2671 { "ibox_stat", &spufs_ibox_stat_fops, 0444, sizeof(u32), },
2672 { "wbox_stat", &spufs_wbox_stat_fops, 0444, sizeof(u32), },
d054b36f
JK
2673 { "signal1", &spufs_signal1_nosched_fops, 0222, },
2674 { "signal2", &spufs_signal2_nosched_fops, 0222, },
5737edd1
MN
2675 { "signal1_type", &spufs_signal1_type, 0666, },
2676 { "signal2_type", &spufs_signal2_type, 0666, },
2677 { "mss", &spufs_mss_fops, 0666, },
2678 { "mfc", &spufs_mfc_fops, 0666, },
2679 { "cntl", &spufs_cntl_fops, 0666, },
2680 { "npc", &spufs_npc_ops, 0666, },
6f7dde81 2681 { "psmap", &spufs_psmap_fops, 0666, SPUFS_PS_MAP_SIZE, },
5737edd1
MN
2682 { "phys-id", &spufs_id_ops, 0666, },
2683 { "object-id", &spufs_object_id_ops, 0666, },
476273ad 2684 { "tid", &spufs_tid_fops, 0444, },
e9f8a0b6 2685 { "stat", &spufs_stat_fops, 0444, },
2c3e4787
JK
2686 {},
2687};
2688
74254647 2689const struct spufs_tree_descr spufs_dir_debug_contents[] = {
46deed69 2690 { ".ctx", &spufs_ctx_fops, 0444, },
5737edd1
MN
2691 {},
2692};
bf1ab978 2693
74254647 2694const struct spufs_coredump_reader spufs_coredump_read[] = {
4fca9c42
ME
2695 { "regs", __spufs_regs_read, NULL, sizeof(struct spu_reg128[128])},
2696 { "fpcr", __spufs_fpcr_read, NULL, sizeof(struct spu_reg128) },
104f0cc2
ME
2697 { "lslr", NULL, spufs_lslr_get, 19 },
2698 { "decr", NULL, spufs_decr_get, 19 },
2699 { "decr_status", NULL, spufs_decr_status_get, 19 },
4fca9c42
ME
2700 { "mem", __spufs_mem_read, NULL, LS_SIZE, },
2701 { "signal1", __spufs_signal1_read, NULL, sizeof(u32) },
104f0cc2 2702 { "signal1_type", NULL, spufs_signal1_type_get, 19 },
4fca9c42 2703 { "signal2", __spufs_signal2_read, NULL, sizeof(u32) },
104f0cc2
ME
2704 { "signal2_type", NULL, spufs_signal2_type_get, 19 },
2705 { "event_mask", NULL, spufs_event_mask_get, 19 },
2706 { "event_status", NULL, spufs_event_status_get, 19 },
4fca9c42
ME
2707 { "mbox_info", __spufs_mbox_info_read, NULL, sizeof(u32) },
2708 { "ibox_info", __spufs_ibox_info_read, NULL, sizeof(u32) },
2709 { "wbox_info", __spufs_wbox_info_read, NULL, 4 * sizeof(u32)},
2710 { "dma_info", __spufs_dma_info_read, NULL, sizeof(struct spu_dma_info)},
2711 { "proxydma_info", __spufs_proxydma_info_read,
2712 NULL, sizeof(struct spu_proxydma_info)},
104f0cc2
ME
2713 { "object-id", NULL, spufs_object_id_get, 19 },
2714 { "npc", NULL, spufs_npc_get, 19 },
936d5bf1 2715 { NULL },
bf1ab978 2716};