blob: b4d38cb65f17ba332c699b865cf34e7092c39de3 [file] [log] [blame]
Arnd Bergmann67207b92005-11-15 15:53:48 -05001/*
2 * SPU file system -- file contents
3 *
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
5 *
6 * Author: Arnd Bergmann <arndb@de.ibm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
Arnd Bergmanna33a7d72006-03-23 00:00:11 +010023#undef DEBUG
24
Arnd Bergmann67207b92005-11-15 15:53:48 -050025#include <linux/fs.h>
26#include <linux/ioctl.h>
27#include <linux/module.h>
Arnd Bergmannd88cfff2005-12-05 22:52:22 -050028#include <linux/pagemap.h>
Arnd Bergmann67207b92005-11-15 15:53:48 -050029#include <linux/poll.h>
Arnd Bergmann51104592005-12-05 22:52:25 -050030#include <linux/ptrace.h>
Arnd Bergmann67207b92005-11-15 15:53:48 -050031
32#include <asm/io.h>
33#include <asm/semaphore.h>
34#include <asm/spu.h>
35#include <asm/uaccess.h>
36
37#include "spufs.h"
38
Benjamin Herrenschmidt27d5bf22006-10-04 17:26:11 +020039#define SPUFS_MMAP_4K (PAGE_SIZE == 0x1000)
40
Arnd Bergmann8b3d6662005-11-15 15:53:52 -050041
Arnd Bergmann67207b92005-11-15 15:53:48 -050042static int
43spufs_mem_open(struct inode *inode, struct file *file)
44{
45 struct spufs_inode_info *i = SPUFS_I(inode);
Mark Nutter6df10a82006-03-23 00:00:12 +010046 struct spu_context *ctx = i->i_ctx;
47 file->private_data = ctx;
48 file->f_mapping = inode->i_mapping;
49 ctx->local_store = inode->i_mapping;
Arnd Bergmann67207b92005-11-15 15:53:48 -050050 return 0;
51}
52
53static ssize_t
54spufs_mem_read(struct file *file, char __user *buffer,
55 size_t size, loff_t *pos)
56{
Arnd Bergmann8b3d6662005-11-15 15:53:52 -050057 struct spu_context *ctx = file->private_data;
58 char *local_store;
Arnd Bergmann67207b92005-11-15 15:53:48 -050059 int ret;
60
Arnd Bergmann8b3d6662005-11-15 15:53:52 -050061 spu_acquire(ctx);
Arnd Bergmann67207b92005-11-15 15:53:48 -050062
Arnd Bergmann8b3d6662005-11-15 15:53:52 -050063 local_store = ctx->ops->get_ls(ctx);
64 ret = simple_read_from_buffer(buffer, size, pos, local_store, LS_SIZE);
Arnd Bergmann67207b92005-11-15 15:53:48 -050065
Arnd Bergmann8b3d6662005-11-15 15:53:52 -050066 spu_release(ctx);
Arnd Bergmann67207b92005-11-15 15:53:48 -050067 return ret;
68}
69
70static ssize_t
71spufs_mem_write(struct file *file, const char __user *buffer,
72 size_t size, loff_t *pos)
73{
74 struct spu_context *ctx = file->private_data;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -050075 char *local_store;
76 int ret;
Arnd Bergmann67207b92005-11-15 15:53:48 -050077
78 size = min_t(ssize_t, LS_SIZE - *pos, size);
79 if (size <= 0)
80 return -EFBIG;
81 *pos += size;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -050082
83 spu_acquire(ctx);
84
85 local_store = ctx->ops->get_ls(ctx);
86 ret = copy_from_user(local_store + *pos - size,
87 buffer, size) ? -EFAULT : size;
88
89 spu_release(ctx);
90 return ret;
Arnd Bergmann67207b92005-11-15 15:53:48 -050091}
92
Arnd Bergmann8b3d6662005-11-15 15:53:52 -050093static struct page *
94spufs_mem_mmap_nopage(struct vm_area_struct *vma,
95 unsigned long address, int *type)
96{
97 struct page *page = NOPAGE_SIGBUS;
98
99 struct spu_context *ctx = vma->vm_file->private_data;
100 unsigned long offset = address - vma->vm_start;
101 offset += vma->vm_pgoff << PAGE_SHIFT;
102
103 spu_acquire(ctx);
104
Arnd Bergmannac91cb82006-10-04 17:26:16 +0200105 if (ctx->state == SPU_STATE_SAVED) {
106 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
107 & ~(_PAGE_NO_CACHE | _PAGE_GUARDED));
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500108 page = vmalloc_to_page(ctx->csa.lscsa->ls + offset);
Arnd Bergmannac91cb82006-10-04 17:26:16 +0200109 } else {
110 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
111 | _PAGE_NO_CACHE | _PAGE_GUARDED);
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500112 page = pfn_to_page((ctx->spu->local_store_phys + offset)
113 >> PAGE_SHIFT);
Arnd Bergmannac91cb82006-10-04 17:26:16 +0200114 }
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500115 spu_release(ctx);
116
117 if (type)
118 *type = VM_FAULT_MINOR;
119
Arnd Bergmannd88cfff2005-12-05 22:52:22 -0500120 page_cache_get(page);
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500121 return page;
122}
123
124static struct vm_operations_struct spufs_mem_mmap_vmops = {
125 .nopage = spufs_mem_mmap_nopage,
126};
127
Arnd Bergmann67207b92005-11-15 15:53:48 -0500128static int
129spufs_mem_mmap(struct file *file, struct vm_area_struct *vma)
130{
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500131 if (!(vma->vm_flags & VM_SHARED))
132 return -EINVAL;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500133
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500134 /* FIXME: */
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500135 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
136 | _PAGE_NO_CACHE);
137
138 vma->vm_ops = &spufs_mem_mmap_vmops;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500139 return 0;
140}
141
142static struct file_operations spufs_mem_fops = {
143 .open = spufs_mem_open,
144 .read = spufs_mem_read,
145 .write = spufs_mem_write,
Arnd Bergmann67207b92005-11-15 15:53:48 -0500146 .llseek = generic_file_llseek,
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500147 .mmap = spufs_mem_mmap,
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500148};
149
Mark Nutter6df10a82006-03-23 00:00:12 +0100150static struct page *spufs_ps_nopage(struct vm_area_struct *vma,
151 unsigned long address,
Benjamin Herrenschmidt27d5bf22006-10-04 17:26:11 +0200152 int *type, unsigned long ps_offs,
153 unsigned long ps_size)
Mark Nutter6df10a82006-03-23 00:00:12 +0100154{
155 struct page *page = NOPAGE_SIGBUS;
156 int fault_type = VM_FAULT_SIGBUS;
157 struct spu_context *ctx = vma->vm_file->private_data;
158 unsigned long offset = address - vma->vm_start;
159 unsigned long area;
160 int ret;
161
162 offset += vma->vm_pgoff << PAGE_SHIFT;
Benjamin Herrenschmidt27d5bf22006-10-04 17:26:11 +0200163 if (offset >= ps_size)
Mark Nutter6df10a82006-03-23 00:00:12 +0100164 goto out;
165
166 ret = spu_acquire_runnable(ctx);
167 if (ret)
168 goto out;
169
170 area = ctx->spu->problem_phys + ps_offs;
171 page = pfn_to_page((area + offset) >> PAGE_SHIFT);
172 fault_type = VM_FAULT_MINOR;
173 page_cache_get(page);
174
175 spu_release(ctx);
176
177 out:
178 if (type)
179 *type = fault_type;
180
181 return page;
182}
183
Benjamin Herrenschmidt27d5bf22006-10-04 17:26:11 +0200184#if SPUFS_MMAP_4K
Mark Nutter6df10a82006-03-23 00:00:12 +0100185static struct page *spufs_cntl_mmap_nopage(struct vm_area_struct *vma,
186 unsigned long address, int *type)
187{
Benjamin Herrenschmidt27d5bf22006-10-04 17:26:11 +0200188 return spufs_ps_nopage(vma, address, type, 0x4000, 0x1000);
Mark Nutter6df10a82006-03-23 00:00:12 +0100189}
190
191static struct vm_operations_struct spufs_cntl_mmap_vmops = {
192 .nopage = spufs_cntl_mmap_nopage,
193};
194
195/*
196 * mmap support for problem state control area [0x4000 - 0x4fff].
Mark Nutter6df10a82006-03-23 00:00:12 +0100197 */
198static int spufs_cntl_mmap(struct file *file, struct vm_area_struct *vma)
199{
200 if (!(vma->vm_flags & VM_SHARED))
201 return -EINVAL;
202
Mark Nutter6df10a82006-03-23 00:00:12 +0100203 vma->vm_flags |= VM_RESERVED;
204 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
Benjamin Herrenschmidt23cc7702006-06-23 20:57:47 +0200205 | _PAGE_NO_CACHE | _PAGE_GUARDED);
Mark Nutter6df10a82006-03-23 00:00:12 +0100206
207 vma->vm_ops = &spufs_cntl_mmap_vmops;
208 return 0;
209}
Benjamin Herrenschmidt27d5bf22006-10-04 17:26:11 +0200210#else /* SPUFS_MMAP_4K */
211#define spufs_cntl_mmap NULL
212#endif /* !SPUFS_MMAP_4K */
Mark Nutter6df10a82006-03-23 00:00:12 +0100213
214static int spufs_cntl_open(struct inode *inode, struct file *file)
215{
216 struct spufs_inode_info *i = SPUFS_I(inode);
217 struct spu_context *ctx = i->i_ctx;
218
219 file->private_data = ctx;
220 file->f_mapping = inode->i_mapping;
221 ctx->cntl = inode->i_mapping;
222 return 0;
223}
224
225static ssize_t
226spufs_cntl_read(struct file *file, char __user *buffer,
227 size_t size, loff_t *pos)
228{
229 /* FIXME: read from spu status */
230 return -EINVAL;
231}
232
233static ssize_t
234spufs_cntl_write(struct file *file, const char __user *buffer,
235 size_t size, loff_t *pos)
236{
237 /* FIXME: write to runctl bit */
238 return -EINVAL;
239}
240
241static struct file_operations spufs_cntl_fops = {
242 .open = spufs_cntl_open,
243 .read = spufs_cntl_read,
244 .write = spufs_cntl_write,
Mark Nutter6df10a82006-03-23 00:00:12 +0100245 .mmap = spufs_cntl_mmap,
Mark Nutter6df10a82006-03-23 00:00:12 +0100246};
247
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500248static int
249spufs_regs_open(struct inode *inode, struct file *file)
250{
251 struct spufs_inode_info *i = SPUFS_I(inode);
252 file->private_data = i->i_ctx;
253 return 0;
254}
255
256static ssize_t
257spufs_regs_read(struct file *file, char __user *buffer,
258 size_t size, loff_t *pos)
259{
260 struct spu_context *ctx = file->private_data;
261 struct spu_lscsa *lscsa = ctx->csa.lscsa;
262 int ret;
263
264 spu_acquire_saved(ctx);
265
266 ret = simple_read_from_buffer(buffer, size, pos,
267 lscsa->gprs, sizeof lscsa->gprs);
268
269 spu_release(ctx);
270 return ret;
271}
272
273static ssize_t
274spufs_regs_write(struct file *file, const char __user *buffer,
275 size_t size, loff_t *pos)
276{
277 struct spu_context *ctx = file->private_data;
278 struct spu_lscsa *lscsa = ctx->csa.lscsa;
279 int ret;
280
281 size = min_t(ssize_t, sizeof lscsa->gprs - *pos, size);
282 if (size <= 0)
283 return -EFBIG;
284 *pos += size;
285
286 spu_acquire_saved(ctx);
287
288 ret = copy_from_user(lscsa->gprs + *pos - size,
289 buffer, size) ? -EFAULT : size;
290
291 spu_release(ctx);
292 return ret;
293}
294
295static struct file_operations spufs_regs_fops = {
296 .open = spufs_regs_open,
297 .read = spufs_regs_read,
298 .write = spufs_regs_write,
299 .llseek = generic_file_llseek,
300};
301
302static ssize_t
303spufs_fpcr_read(struct file *file, char __user * buffer,
304 size_t size, loff_t * pos)
305{
306 struct spu_context *ctx = file->private_data;
307 struct spu_lscsa *lscsa = ctx->csa.lscsa;
308 int ret;
309
310 spu_acquire_saved(ctx);
311
312 ret = simple_read_from_buffer(buffer, size, pos,
313 &lscsa->fpcr, sizeof(lscsa->fpcr));
314
315 spu_release(ctx);
316 return ret;
317}
318
319static ssize_t
320spufs_fpcr_write(struct file *file, const char __user * buffer,
321 size_t size, loff_t * pos)
322{
323 struct spu_context *ctx = file->private_data;
324 struct spu_lscsa *lscsa = ctx->csa.lscsa;
325 int ret;
326
327 size = min_t(ssize_t, sizeof(lscsa->fpcr) - *pos, size);
328 if (size <= 0)
329 return -EFBIG;
330 *pos += size;
331
332 spu_acquire_saved(ctx);
333
334 ret = copy_from_user((char *)&lscsa->fpcr + *pos - size,
335 buffer, size) ? -EFAULT : size;
336
337 spu_release(ctx);
338 return ret;
339}
340
341static struct file_operations spufs_fpcr_fops = {
342 .open = spufs_regs_open,
343 .read = spufs_fpcr_read,
344 .write = spufs_fpcr_write,
345 .llseek = generic_file_llseek,
Arnd Bergmann67207b92005-11-15 15:53:48 -0500346};
347
348/* generic open function for all pipe-like files */
349static int spufs_pipe_open(struct inode *inode, struct file *file)
350{
351 struct spufs_inode_info *i = SPUFS_I(inode);
352 file->private_data = i->i_ctx;
353
354 return nonseekable_open(inode, file);
355}
356
Arnd Bergmanncdcc89b2006-10-04 17:26:17 +0200357/*
358 * Read as many bytes from the mailbox as possible, until
359 * one of the conditions becomes true:
360 *
361 * - no more data available in the mailbox
362 * - end of the user provided buffer
363 * - end of the mapped area
364 */
Arnd Bergmann67207b92005-11-15 15:53:48 -0500365static ssize_t spufs_mbox_read(struct file *file, char __user *buf,
366 size_t len, loff_t *pos)
367{
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500368 struct spu_context *ctx = file->private_data;
Arnd Bergmanncdcc89b2006-10-04 17:26:17 +0200369 u32 mbox_data, __user *udata;
370 ssize_t count;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500371
372 if (len < 4)
373 return -EINVAL;
374
Arnd Bergmanncdcc89b2006-10-04 17:26:17 +0200375 if (!access_ok(VERIFY_WRITE, buf, len))
Arnd Bergmann67207b92005-11-15 15:53:48 -0500376 return -EFAULT;
377
Arnd Bergmanncdcc89b2006-10-04 17:26:17 +0200378 udata = (void __user *)buf;
379
380 spu_acquire(ctx);
381 for (count = 0; count <= len; count += 4, udata++) {
382 int ret;
383 ret = ctx->ops->mbox_read(ctx, &mbox_data);
384 if (ret == 0)
385 break;
386
387 /*
388 * at the end of the mapped area, we can fault
389 * but still need to return the data we have
390 * read successfully so far.
391 */
392 ret = __put_user(mbox_data, udata);
393 if (ret) {
394 if (!count)
395 count = -EFAULT;
396 break;
397 }
398 }
399 spu_release(ctx);
400
401 if (!count)
402 count = -EAGAIN;
403
404 return count;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500405}
406
407static struct file_operations spufs_mbox_fops = {
408 .open = spufs_pipe_open,
409 .read = spufs_mbox_read,
410};
411
412static ssize_t spufs_mbox_stat_read(struct file *file, char __user *buf,
413 size_t len, loff_t *pos)
414{
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500415 struct spu_context *ctx = file->private_data;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500416 u32 mbox_stat;
417
418 if (len < 4)
419 return -EINVAL;
420
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500421 spu_acquire(ctx);
422
423 mbox_stat = ctx->ops->mbox_stat_read(ctx) & 0xff;
424
425 spu_release(ctx);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500426
427 if (copy_to_user(buf, &mbox_stat, sizeof mbox_stat))
428 return -EFAULT;
429
430 return 4;
431}
432
433static struct file_operations spufs_mbox_stat_fops = {
434 .open = spufs_pipe_open,
435 .read = spufs_mbox_stat_read,
436};
437
438/* low-level ibox access function */
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500439size_t spu_ibox_read(struct spu_context *ctx, u32 *data)
Arnd Bergmann67207b92005-11-15 15:53:48 -0500440{
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500441 return ctx->ops->ibox_read(ctx, data);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500442}
Arnd Bergmann67207b92005-11-15 15:53:48 -0500443
444static int spufs_ibox_fasync(int fd, struct file *file, int on)
445{
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500446 struct spu_context *ctx = file->private_data;
447
448 return fasync_helper(fd, file, on, &ctx->ibox_fasync);
449}
450
451/* interrupt-level ibox callback function. */
452void spufs_ibox_callback(struct spu *spu)
453{
454 struct spu_context *ctx = spu->ctx;
455
456 wake_up_all(&ctx->ibox_wq);
457 kill_fasync(&ctx->ibox_fasync, SIGIO, POLLIN);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500458}
459
Arnd Bergmanncdcc89b2006-10-04 17:26:17 +0200460/*
461 * Read as many bytes from the interrupt mailbox as possible, until
462 * one of the conditions becomes true:
463 *
464 * - no more data available in the mailbox
465 * - end of the user provided buffer
466 * - end of the mapped area
467 *
468 * If the file is opened without O_NONBLOCK, we wait here until
469 * any data is available, but return when we have been able to
470 * read something.
471 */
Arnd Bergmann67207b92005-11-15 15:53:48 -0500472static ssize_t spufs_ibox_read(struct file *file, char __user *buf,
473 size_t len, loff_t *pos)
474{
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500475 struct spu_context *ctx = file->private_data;
Arnd Bergmanncdcc89b2006-10-04 17:26:17 +0200476 u32 ibox_data, __user *udata;
477 ssize_t count;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500478
479 if (len < 4)
480 return -EINVAL;
481
Arnd Bergmanncdcc89b2006-10-04 17:26:17 +0200482 if (!access_ok(VERIFY_WRITE, buf, len))
483 return -EFAULT;
484
485 udata = (void __user *)buf;
486
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500487 spu_acquire(ctx);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500488
Arnd Bergmanncdcc89b2006-10-04 17:26:17 +0200489 /* wait only for the first element */
490 count = 0;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500491 if (file->f_flags & O_NONBLOCK) {
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500492 if (!spu_ibox_read(ctx, &ibox_data))
Arnd Bergmanncdcc89b2006-10-04 17:26:17 +0200493 count = -EAGAIN;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500494 } else {
Arnd Bergmanncdcc89b2006-10-04 17:26:17 +0200495 count = spufs_wait(ctx->ibox_wq, spu_ibox_read(ctx, &ibox_data));
496 }
497 if (count)
498 goto out;
499
500 /* if we can't write at all, return -EFAULT */
501 count = __put_user(ibox_data, udata);
502 if (count)
503 goto out;
504
505 for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) {
506 int ret;
507 ret = ctx->ops->ibox_read(ctx, &ibox_data);
508 if (ret == 0)
509 break;
510 /*
511 * at the end of the mapped area, we can fault
512 * but still need to return the data we have
513 * read successfully so far.
514 */
515 ret = __put_user(ibox_data, udata);
516 if (ret)
517 break;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500518 }
519
Arnd Bergmanncdcc89b2006-10-04 17:26:17 +0200520out:
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500521 spu_release(ctx);
522
Arnd Bergmanncdcc89b2006-10-04 17:26:17 +0200523 return count;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500524}
525
526static unsigned int spufs_ibox_poll(struct file *file, poll_table *wait)
527{
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500528 struct spu_context *ctx = file->private_data;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500529 unsigned int mask;
530
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500531 poll_wait(file, &ctx->ibox_wq, wait);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500532
Arnd Bergmann3a843d72005-12-05 22:52:27 -0500533 spu_acquire(ctx);
534 mask = ctx->ops->mbox_stat_poll(ctx, POLLIN | POLLRDNORM);
535 spu_release(ctx);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500536
537 return mask;
538}
539
540static struct file_operations spufs_ibox_fops = {
541 .open = spufs_pipe_open,
542 .read = spufs_ibox_read,
543 .poll = spufs_ibox_poll,
544 .fasync = spufs_ibox_fasync,
545};
546
547static ssize_t spufs_ibox_stat_read(struct file *file, char __user *buf,
548 size_t len, loff_t *pos)
549{
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500550 struct spu_context *ctx = file->private_data;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500551 u32 ibox_stat;
552
553 if (len < 4)
554 return -EINVAL;
555
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500556 spu_acquire(ctx);
557 ibox_stat = (ctx->ops->mbox_stat_read(ctx) >> 16) & 0xff;
558 spu_release(ctx);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500559
560 if (copy_to_user(buf, &ibox_stat, sizeof ibox_stat))
561 return -EFAULT;
562
563 return 4;
564}
565
566static struct file_operations spufs_ibox_stat_fops = {
567 .open = spufs_pipe_open,
568 .read = spufs_ibox_stat_read,
569};
570
571/* low-level mailbox write */
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500572size_t spu_wbox_write(struct spu_context *ctx, u32 data)
Arnd Bergmann67207b92005-11-15 15:53:48 -0500573{
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500574 return ctx->ops->wbox_write(ctx, data);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500575}
Arnd Bergmann67207b92005-11-15 15:53:48 -0500576
577static int spufs_wbox_fasync(int fd, struct file *file, int on)
578{
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500579 struct spu_context *ctx = file->private_data;
580 int ret;
581
582 ret = fasync_helper(fd, file, on, &ctx->wbox_fasync);
583
584 return ret;
585}
586
587/* interrupt-level wbox callback function. */
588void spufs_wbox_callback(struct spu *spu)
589{
590 struct spu_context *ctx = spu->ctx;
591
592 wake_up_all(&ctx->wbox_wq);
593 kill_fasync(&ctx->wbox_fasync, SIGIO, POLLOUT);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500594}
595
Arnd Bergmanncdcc89b2006-10-04 17:26:17 +0200596/*
597 * Write as many bytes to the interrupt mailbox as possible, until
598 * one of the conditions becomes true:
599 *
600 * - the mailbox is full
601 * - end of the user provided buffer
602 * - end of the mapped area
603 *
604 * If the file is opened without O_NONBLOCK, we wait here until
605 * space is availabyl, but return when we have been able to
606 * write something.
607 */
Arnd Bergmann67207b92005-11-15 15:53:48 -0500608static ssize_t spufs_wbox_write(struct file *file, const char __user *buf,
609 size_t len, loff_t *pos)
610{
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500611 struct spu_context *ctx = file->private_data;
Arnd Bergmanncdcc89b2006-10-04 17:26:17 +0200612 u32 wbox_data, __user *udata;
613 ssize_t count;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500614
615 if (len < 4)
616 return -EINVAL;
617
Arnd Bergmanncdcc89b2006-10-04 17:26:17 +0200618 udata = (void __user *)buf;
619 if (!access_ok(VERIFY_READ, buf, len))
620 return -EFAULT;
621
622 if (__get_user(wbox_data, udata))
Arnd Bergmann67207b92005-11-15 15:53:48 -0500623 return -EFAULT;
624
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500625 spu_acquire(ctx);
626
Arnd Bergmanncdcc89b2006-10-04 17:26:17 +0200627 /*
628 * make sure we can at least write one element, by waiting
629 * in case of !O_NONBLOCK
630 */
631 count = 0;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500632 if (file->f_flags & O_NONBLOCK) {
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500633 if (!spu_wbox_write(ctx, wbox_data))
Arnd Bergmanncdcc89b2006-10-04 17:26:17 +0200634 count = -EAGAIN;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500635 } else {
Arnd Bergmanncdcc89b2006-10-04 17:26:17 +0200636 count = spufs_wait(ctx->wbox_wq, spu_wbox_write(ctx, wbox_data));
Arnd Bergmann67207b92005-11-15 15:53:48 -0500637 }
638
Arnd Bergmanncdcc89b2006-10-04 17:26:17 +0200639 if (count)
640 goto out;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500641
Arnd Bergmanncdcc89b2006-10-04 17:26:17 +0200642 /* write aѕ much as possible */
643 for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) {
644 int ret;
645 ret = __get_user(wbox_data, udata);
646 if (ret)
647 break;
648
649 ret = spu_wbox_write(ctx, wbox_data);
650 if (ret == 0)
651 break;
652 }
653
654out:
655 spu_release(ctx);
656 return count;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500657}
658
659static unsigned int spufs_wbox_poll(struct file *file, poll_table *wait)
660{
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500661 struct spu_context *ctx = file->private_data;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500662 unsigned int mask;
663
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500664 poll_wait(file, &ctx->wbox_wq, wait);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500665
Arnd Bergmann3a843d72005-12-05 22:52:27 -0500666 spu_acquire(ctx);
667 mask = ctx->ops->mbox_stat_poll(ctx, POLLOUT | POLLWRNORM);
668 spu_release(ctx);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500669
670 return mask;
671}
672
673static struct file_operations spufs_wbox_fops = {
674 .open = spufs_pipe_open,
675 .write = spufs_wbox_write,
676 .poll = spufs_wbox_poll,
677 .fasync = spufs_wbox_fasync,
678};
679
680static ssize_t spufs_wbox_stat_read(struct file *file, char __user *buf,
681 size_t len, loff_t *pos)
682{
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500683 struct spu_context *ctx = file->private_data;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500684 u32 wbox_stat;
685
686 if (len < 4)
687 return -EINVAL;
688
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500689 spu_acquire(ctx);
690 wbox_stat = (ctx->ops->mbox_stat_read(ctx) >> 8) & 0xff;
691 spu_release(ctx);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500692
693 if (copy_to_user(buf, &wbox_stat, sizeof wbox_stat))
694 return -EFAULT;
695
696 return 4;
697}
698
699static struct file_operations spufs_wbox_stat_fops = {
700 .open = spufs_pipe_open,
701 .read = spufs_wbox_stat_read,
702};
703
Mark Nutter6df10a82006-03-23 00:00:12 +0100704static int spufs_signal1_open(struct inode *inode, struct file *file)
705{
706 struct spufs_inode_info *i = SPUFS_I(inode);
707 struct spu_context *ctx = i->i_ctx;
708 file->private_data = ctx;
709 file->f_mapping = inode->i_mapping;
710 ctx->signal1 = inode->i_mapping;
711 return nonseekable_open(inode, file);
712}
713
Arnd Bergmann67207b92005-11-15 15:53:48 -0500714static ssize_t spufs_signal1_read(struct file *file, char __user *buf,
715 size_t len, loff_t *pos)
716{
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500717 struct spu_context *ctx = file->private_data;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500718 u32 data;
719
Arnd Bergmann67207b92005-11-15 15:53:48 -0500720 if (len < 4)
721 return -EINVAL;
722
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500723 spu_acquire(ctx);
724 data = ctx->ops->signal1_read(ctx);
725 spu_release(ctx);
726
Arnd Bergmann67207b92005-11-15 15:53:48 -0500727 if (copy_to_user(buf, &data, 4))
728 return -EFAULT;
729
730 return 4;
731}
732
733static ssize_t spufs_signal1_write(struct file *file, const char __user *buf,
734 size_t len, loff_t *pos)
735{
736 struct spu_context *ctx;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500737 u32 data;
738
739 ctx = file->private_data;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500740
741 if (len < 4)
742 return -EINVAL;
743
744 if (copy_from_user(&data, buf, 4))
745 return -EFAULT;
746
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500747 spu_acquire(ctx);
748 ctx->ops->signal1_write(ctx, data);
749 spu_release(ctx);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500750
751 return 4;
752}
753
Mark Nutter6df10a82006-03-23 00:00:12 +0100754static struct page *spufs_signal1_mmap_nopage(struct vm_area_struct *vma,
755 unsigned long address, int *type)
756{
Benjamin Herrenschmidt27d5bf22006-10-04 17:26:11 +0200757#if PAGE_SIZE == 0x1000
758 return spufs_ps_nopage(vma, address, type, 0x14000, 0x1000);
759#elif PAGE_SIZE == 0x10000
760 /* For 64k pages, both signal1 and signal2 can be used to mmap the whole
761 * signal 1 and 2 area
762 */
763 return spufs_ps_nopage(vma, address, type, 0x10000, 0x10000);
764#else
765#error unsupported page size
766#endif
Mark Nutter6df10a82006-03-23 00:00:12 +0100767}
768
769static struct vm_operations_struct spufs_signal1_mmap_vmops = {
770 .nopage = spufs_signal1_mmap_nopage,
771};
772
773static int spufs_signal1_mmap(struct file *file, struct vm_area_struct *vma)
774{
775 if (!(vma->vm_flags & VM_SHARED))
776 return -EINVAL;
777
778 vma->vm_flags |= VM_RESERVED;
779 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
Benjamin Herrenschmidt23cc7702006-06-23 20:57:47 +0200780 | _PAGE_NO_CACHE | _PAGE_GUARDED);
Mark Nutter6df10a82006-03-23 00:00:12 +0100781
782 vma->vm_ops = &spufs_signal1_mmap_vmops;
783 return 0;
784}
Mark Nutter6df10a82006-03-23 00:00:12 +0100785
Arnd Bergmann67207b92005-11-15 15:53:48 -0500786static struct file_operations spufs_signal1_fops = {
Mark Nutter6df10a82006-03-23 00:00:12 +0100787 .open = spufs_signal1_open,
Arnd Bergmann67207b92005-11-15 15:53:48 -0500788 .read = spufs_signal1_read,
789 .write = spufs_signal1_write,
Mark Nutter6df10a82006-03-23 00:00:12 +0100790 .mmap = spufs_signal1_mmap,
Arnd Bergmann67207b92005-11-15 15:53:48 -0500791};
792
Mark Nutter6df10a82006-03-23 00:00:12 +0100793static int spufs_signal2_open(struct inode *inode, struct file *file)
794{
795 struct spufs_inode_info *i = SPUFS_I(inode);
796 struct spu_context *ctx = i->i_ctx;
797 file->private_data = ctx;
798 file->f_mapping = inode->i_mapping;
799 ctx->signal2 = inode->i_mapping;
800 return nonseekable_open(inode, file);
801}
802
Arnd Bergmann67207b92005-11-15 15:53:48 -0500803static ssize_t spufs_signal2_read(struct file *file, char __user *buf,
804 size_t len, loff_t *pos)
805{
806 struct spu_context *ctx;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500807 u32 data;
808
809 ctx = file->private_data;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500810
811 if (len < 4)
812 return -EINVAL;
813
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500814 spu_acquire(ctx);
815 data = ctx->ops->signal2_read(ctx);
816 spu_release(ctx);
817
Arnd Bergmann67207b92005-11-15 15:53:48 -0500818 if (copy_to_user(buf, &data, 4))
819 return -EFAULT;
820
821 return 4;
822}
823
824static ssize_t spufs_signal2_write(struct file *file, const char __user *buf,
825 size_t len, loff_t *pos)
826{
827 struct spu_context *ctx;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500828 u32 data;
829
830 ctx = file->private_data;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500831
832 if (len < 4)
833 return -EINVAL;
834
835 if (copy_from_user(&data, buf, 4))
836 return -EFAULT;
837
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500838 spu_acquire(ctx);
839 ctx->ops->signal2_write(ctx, data);
840 spu_release(ctx);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500841
842 return 4;
843}
844
Benjamin Herrenschmidt27d5bf22006-10-04 17:26:11 +0200845#if SPUFS_MMAP_4K
Mark Nutter6df10a82006-03-23 00:00:12 +0100846static struct page *spufs_signal2_mmap_nopage(struct vm_area_struct *vma,
847 unsigned long address, int *type)
848{
Benjamin Herrenschmidt27d5bf22006-10-04 17:26:11 +0200849#if PAGE_SIZE == 0x1000
850 return spufs_ps_nopage(vma, address, type, 0x1c000, 0x1000);
851#elif PAGE_SIZE == 0x10000
852 /* For 64k pages, both signal1 and signal2 can be used to mmap the whole
853 * signal 1 and 2 area
854 */
855 return spufs_ps_nopage(vma, address, type, 0x10000, 0x10000);
856#else
857#error unsupported page size
858#endif
Mark Nutter6df10a82006-03-23 00:00:12 +0100859}
860
861static struct vm_operations_struct spufs_signal2_mmap_vmops = {
862 .nopage = spufs_signal2_mmap_nopage,
863};
864
865static int spufs_signal2_mmap(struct file *file, struct vm_area_struct *vma)
866{
867 if (!(vma->vm_flags & VM_SHARED))
868 return -EINVAL;
869
870 /* FIXME: */
871 vma->vm_flags |= VM_RESERVED;
872 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
Benjamin Herrenschmidt23cc7702006-06-23 20:57:47 +0200873 | _PAGE_NO_CACHE | _PAGE_GUARDED);
Mark Nutter6df10a82006-03-23 00:00:12 +0100874
875 vma->vm_ops = &spufs_signal2_mmap_vmops;
876 return 0;
877}
Benjamin Herrenschmidt27d5bf22006-10-04 17:26:11 +0200878#else /* SPUFS_MMAP_4K */
879#define spufs_signal2_mmap NULL
880#endif /* !SPUFS_MMAP_4K */
Mark Nutter6df10a82006-03-23 00:00:12 +0100881
Arnd Bergmann67207b92005-11-15 15:53:48 -0500882static struct file_operations spufs_signal2_fops = {
Mark Nutter6df10a82006-03-23 00:00:12 +0100883 .open = spufs_signal2_open,
Arnd Bergmann67207b92005-11-15 15:53:48 -0500884 .read = spufs_signal2_read,
885 .write = spufs_signal2_write,
Mark Nutter6df10a82006-03-23 00:00:12 +0100886 .mmap = spufs_signal2_mmap,
Arnd Bergmann67207b92005-11-15 15:53:48 -0500887};
888
889static void spufs_signal1_type_set(void *data, u64 val)
890{
891 struct spu_context *ctx = data;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500892
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500893 spu_acquire(ctx);
894 ctx->ops->signal1_type_set(ctx, val);
895 spu_release(ctx);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500896}
897
898static u64 spufs_signal1_type_get(void *data)
899{
900 struct spu_context *ctx = data;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500901 u64 ret;
902
903 spu_acquire(ctx);
904 ret = ctx->ops->signal1_type_get(ctx);
905 spu_release(ctx);
906
907 return ret;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500908}
909DEFINE_SIMPLE_ATTRIBUTE(spufs_signal1_type, spufs_signal1_type_get,
910 spufs_signal1_type_set, "%llu");
911
912static void spufs_signal2_type_set(void *data, u64 val)
913{
914 struct spu_context *ctx = data;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500915
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500916 spu_acquire(ctx);
917 ctx->ops->signal2_type_set(ctx, val);
918 spu_release(ctx);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500919}
920
921static u64 spufs_signal2_type_get(void *data)
922{
923 struct spu_context *ctx = data;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500924 u64 ret;
925
926 spu_acquire(ctx);
927 ret = ctx->ops->signal2_type_get(ctx);
928 spu_release(ctx);
929
930 return ret;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500931}
932DEFINE_SIMPLE_ATTRIBUTE(spufs_signal2_type, spufs_signal2_type_get,
933 spufs_signal2_type_set, "%llu");
934
Benjamin Herrenschmidt27d5bf22006-10-04 17:26:11 +0200935#if SPUFS_MMAP_4K
arnd@arndb.ded9379c42006-06-19 20:33:21 +0200936static struct page *spufs_mss_mmap_nopage(struct vm_area_struct *vma,
937 unsigned long address, int *type)
938{
Benjamin Herrenschmidt27d5bf22006-10-04 17:26:11 +0200939 return spufs_ps_nopage(vma, address, type, 0x0000, 0x1000);
arnd@arndb.ded9379c42006-06-19 20:33:21 +0200940}
941
942static struct vm_operations_struct spufs_mss_mmap_vmops = {
943 .nopage = spufs_mss_mmap_nopage,
944};
945
946/*
947 * mmap support for problem state MFC DMA area [0x0000 - 0x0fff].
arnd@arndb.ded9379c42006-06-19 20:33:21 +0200948 */
949static int spufs_mss_mmap(struct file *file, struct vm_area_struct *vma)
950{
951 if (!(vma->vm_flags & VM_SHARED))
952 return -EINVAL;
953
arnd@arndb.ded9379c42006-06-19 20:33:21 +0200954 vma->vm_flags |= VM_RESERVED;
955 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
Benjamin Herrenschmidt23cc7702006-06-23 20:57:47 +0200956 | _PAGE_NO_CACHE | _PAGE_GUARDED);
arnd@arndb.ded9379c42006-06-19 20:33:21 +0200957
958 vma->vm_ops = &spufs_mss_mmap_vmops;
959 return 0;
960}
Benjamin Herrenschmidt27d5bf22006-10-04 17:26:11 +0200961#else /* SPUFS_MMAP_4K */
962#define spufs_mss_mmap NULL
963#endif /* !SPUFS_MMAP_4K */
arnd@arndb.ded9379c42006-06-19 20:33:21 +0200964
965static int spufs_mss_open(struct inode *inode, struct file *file)
966{
967 struct spufs_inode_info *i = SPUFS_I(inode);
968
969 file->private_data = i->i_ctx;
970 return nonseekable_open(inode, file);
971}
972
973static struct file_operations spufs_mss_fops = {
974 .open = spufs_mss_open,
arnd@arndb.ded9379c42006-06-19 20:33:21 +0200975 .mmap = spufs_mss_mmap,
Benjamin Herrenschmidt27d5bf22006-10-04 17:26:11 +0200976};
977
978static struct page *spufs_psmap_mmap_nopage(struct vm_area_struct *vma,
979 unsigned long address, int *type)
980{
981 return spufs_ps_nopage(vma, address, type, 0x0000, 0x20000);
982}
983
984static struct vm_operations_struct spufs_psmap_mmap_vmops = {
985 .nopage = spufs_psmap_mmap_nopage,
986};
987
988/*
989 * mmap support for full problem state area [0x00000 - 0x1ffff].
990 */
991static int spufs_psmap_mmap(struct file *file, struct vm_area_struct *vma)
992{
993 if (!(vma->vm_flags & VM_SHARED))
994 return -EINVAL;
995
996 vma->vm_flags |= VM_RESERVED;
997 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
998 | _PAGE_NO_CACHE | _PAGE_GUARDED);
999
1000 vma->vm_ops = &spufs_psmap_mmap_vmops;
1001 return 0;
1002}
1003
1004static int spufs_psmap_open(struct inode *inode, struct file *file)
1005{
1006 struct spufs_inode_info *i = SPUFS_I(inode);
1007
1008 file->private_data = i->i_ctx;
1009 return nonseekable_open(inode, file);
1010}
1011
1012static struct file_operations spufs_psmap_fops = {
1013 .open = spufs_psmap_open,
1014 .mmap = spufs_psmap_mmap,
arnd@arndb.ded9379c42006-06-19 20:33:21 +02001015};
1016
1017
Benjamin Herrenschmidt27d5bf22006-10-04 17:26:11 +02001018#if SPUFS_MMAP_4K
Mark Nutter6df10a82006-03-23 00:00:12 +01001019static struct page *spufs_mfc_mmap_nopage(struct vm_area_struct *vma,
1020 unsigned long address, int *type)
1021{
Benjamin Herrenschmidt27d5bf22006-10-04 17:26:11 +02001022 return spufs_ps_nopage(vma, address, type, 0x3000, 0x1000);
Mark Nutter6df10a82006-03-23 00:00:12 +01001023}
1024
1025static struct vm_operations_struct spufs_mfc_mmap_vmops = {
1026 .nopage = spufs_mfc_mmap_nopage,
1027};
1028
1029/*
1030 * mmap support for problem state MFC DMA area [0x0000 - 0x0fff].
Mark Nutter6df10a82006-03-23 00:00:12 +01001031 */
1032static int spufs_mfc_mmap(struct file *file, struct vm_area_struct *vma)
1033{
1034 if (!(vma->vm_flags & VM_SHARED))
1035 return -EINVAL;
1036
Mark Nutter6df10a82006-03-23 00:00:12 +01001037 vma->vm_flags |= VM_RESERVED;
1038 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
Benjamin Herrenschmidt23cc7702006-06-23 20:57:47 +02001039 | _PAGE_NO_CACHE | _PAGE_GUARDED);
Mark Nutter6df10a82006-03-23 00:00:12 +01001040
1041 vma->vm_ops = &spufs_mfc_mmap_vmops;
1042 return 0;
1043}
Benjamin Herrenschmidt27d5bf22006-10-04 17:26:11 +02001044#else /* SPUFS_MMAP_4K */
1045#define spufs_mfc_mmap NULL
1046#endif /* !SPUFS_MMAP_4K */
Arnd Bergmanna33a7d72006-03-23 00:00:11 +01001047
1048static int spufs_mfc_open(struct inode *inode, struct file *file)
1049{
1050 struct spufs_inode_info *i = SPUFS_I(inode);
1051 struct spu_context *ctx = i->i_ctx;
1052
1053 /* we don't want to deal with DMA into other processes */
1054 if (ctx->owner != current->mm)
1055 return -EINVAL;
1056
1057 if (atomic_read(&inode->i_count) != 1)
1058 return -EBUSY;
1059
1060 file->private_data = ctx;
1061 return nonseekable_open(inode, file);
1062}
1063
1064/* interrupt-level mfc callback function. */
1065void spufs_mfc_callback(struct spu *spu)
1066{
1067 struct spu_context *ctx = spu->ctx;
1068
1069 wake_up_all(&ctx->mfc_wq);
1070
1071 pr_debug("%s %s\n", __FUNCTION__, spu->name);
1072 if (ctx->mfc_fasync) {
1073 u32 free_elements, tagstatus;
1074 unsigned int mask;
1075
1076 /* no need for spu_acquire in interrupt context */
1077 free_elements = ctx->ops->get_mfc_free_elements(ctx);
1078 tagstatus = ctx->ops->read_mfc_tagstatus(ctx);
1079
1080 mask = 0;
1081 if (free_elements & 0xffff)
1082 mask |= POLLOUT;
1083 if (tagstatus & ctx->tagwait)
1084 mask |= POLLIN;
1085
1086 kill_fasync(&ctx->mfc_fasync, SIGIO, mask);
1087 }
1088}
1089
1090static int spufs_read_mfc_tagstatus(struct spu_context *ctx, u32 *status)
1091{
1092 /* See if there is one tag group is complete */
1093 /* FIXME we need locking around tagwait */
1094 *status = ctx->ops->read_mfc_tagstatus(ctx) & ctx->tagwait;
1095 ctx->tagwait &= ~*status;
1096 if (*status)
1097 return 1;
1098
1099 /* enable interrupt waiting for any tag group,
1100 may silently fail if interrupts are already enabled */
1101 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 1);
1102 return 0;
1103}
1104
1105static ssize_t spufs_mfc_read(struct file *file, char __user *buffer,
1106 size_t size, loff_t *pos)
1107{
1108 struct spu_context *ctx = file->private_data;
1109 int ret = -EINVAL;
1110 u32 status;
1111
1112 if (size != 4)
1113 goto out;
1114
1115 spu_acquire(ctx);
1116 if (file->f_flags & O_NONBLOCK) {
1117 status = ctx->ops->read_mfc_tagstatus(ctx);
1118 if (!(status & ctx->tagwait))
1119 ret = -EAGAIN;
1120 else
1121 ctx->tagwait &= ~status;
1122 } else {
1123 ret = spufs_wait(ctx->mfc_wq,
1124 spufs_read_mfc_tagstatus(ctx, &status));
1125 }
1126 spu_release(ctx);
1127
1128 if (ret)
1129 goto out;
1130
1131 ret = 4;
1132 if (copy_to_user(buffer, &status, 4))
1133 ret = -EFAULT;
1134
1135out:
1136 return ret;
1137}
1138
1139static int spufs_check_valid_dma(struct mfc_dma_command *cmd)
1140{
1141 pr_debug("queueing DMA %x %lx %x %x %x\n", cmd->lsa,
1142 cmd->ea, cmd->size, cmd->tag, cmd->cmd);
1143
1144 switch (cmd->cmd) {
1145 case MFC_PUT_CMD:
1146 case MFC_PUTF_CMD:
1147 case MFC_PUTB_CMD:
1148 case MFC_GET_CMD:
1149 case MFC_GETF_CMD:
1150 case MFC_GETB_CMD:
1151 break;
1152 default:
1153 pr_debug("invalid DMA opcode %x\n", cmd->cmd);
1154 return -EIO;
1155 }
1156
1157 if ((cmd->lsa & 0xf) != (cmd->ea &0xf)) {
1158 pr_debug("invalid DMA alignment, ea %lx lsa %x\n",
1159 cmd->ea, cmd->lsa);
1160 return -EIO;
1161 }
1162
1163 switch (cmd->size & 0xf) {
1164 case 1:
1165 break;
1166 case 2:
1167 if (cmd->lsa & 1)
1168 goto error;
1169 break;
1170 case 4:
1171 if (cmd->lsa & 3)
1172 goto error;
1173 break;
1174 case 8:
1175 if (cmd->lsa & 7)
1176 goto error;
1177 break;
1178 case 0:
1179 if (cmd->lsa & 15)
1180 goto error;
1181 break;
1182 error:
1183 default:
1184 pr_debug("invalid DMA alignment %x for size %x\n",
1185 cmd->lsa & 0xf, cmd->size);
1186 return -EIO;
1187 }
1188
1189 if (cmd->size > 16 * 1024) {
1190 pr_debug("invalid DMA size %x\n", cmd->size);
1191 return -EIO;
1192 }
1193
1194 if (cmd->tag & 0xfff0) {
1195 /* we reserve the higher tag numbers for kernel use */
1196 pr_debug("invalid DMA tag\n");
1197 return -EIO;
1198 }
1199
1200 if (cmd->class) {
1201 /* not supported in this version */
1202 pr_debug("invalid DMA class\n");
1203 return -EIO;
1204 }
1205
1206 return 0;
1207}
1208
1209static int spu_send_mfc_command(struct spu_context *ctx,
1210 struct mfc_dma_command cmd,
1211 int *error)
1212{
1213 *error = ctx->ops->send_mfc_command(ctx, &cmd);
1214 if (*error == -EAGAIN) {
1215 /* wait for any tag group to complete
1216 so we have space for the new command */
1217 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 1);
1218 /* try again, because the queue might be
1219 empty again */
1220 *error = ctx->ops->send_mfc_command(ctx, &cmd);
1221 if (*error == -EAGAIN)
1222 return 0;
1223 }
1224 return 1;
1225}
1226
1227static ssize_t spufs_mfc_write(struct file *file, const char __user *buffer,
1228 size_t size, loff_t *pos)
1229{
1230 struct spu_context *ctx = file->private_data;
1231 struct mfc_dma_command cmd;
1232 int ret = -EINVAL;
1233
1234 if (size != sizeof cmd)
1235 goto out;
1236
1237 ret = -EFAULT;
1238 if (copy_from_user(&cmd, buffer, sizeof cmd))
1239 goto out;
1240
1241 ret = spufs_check_valid_dma(&cmd);
1242 if (ret)
1243 goto out;
1244
1245 spu_acquire_runnable(ctx);
1246 if (file->f_flags & O_NONBLOCK) {
1247 ret = ctx->ops->send_mfc_command(ctx, &cmd);
1248 } else {
1249 int status;
1250 ret = spufs_wait(ctx->mfc_wq,
1251 spu_send_mfc_command(ctx, cmd, &status));
1252 if (status)
1253 ret = status;
1254 }
1255 spu_release(ctx);
1256
1257 if (ret)
1258 goto out;
1259
1260 ctx->tagwait |= 1 << cmd.tag;
1261
1262out:
1263 return ret;
1264}
1265
1266static unsigned int spufs_mfc_poll(struct file *file,poll_table *wait)
1267{
1268 struct spu_context *ctx = file->private_data;
1269 u32 free_elements, tagstatus;
1270 unsigned int mask;
1271
1272 spu_acquire(ctx);
1273 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 2);
1274 free_elements = ctx->ops->get_mfc_free_elements(ctx);
1275 tagstatus = ctx->ops->read_mfc_tagstatus(ctx);
1276 spu_release(ctx);
1277
1278 poll_wait(file, &ctx->mfc_wq, wait);
1279
1280 mask = 0;
1281 if (free_elements & 0xffff)
1282 mask |= POLLOUT | POLLWRNORM;
1283 if (tagstatus & ctx->tagwait)
1284 mask |= POLLIN | POLLRDNORM;
1285
1286 pr_debug("%s: free %d tagstatus %d tagwait %d\n", __FUNCTION__,
1287 free_elements, tagstatus, ctx->tagwait);
1288
1289 return mask;
1290}
1291
Al Viro73b6af82006-06-25 16:42:33 -07001292static int spufs_mfc_flush(struct file *file, fl_owner_t id)
Arnd Bergmanna33a7d72006-03-23 00:00:11 +01001293{
1294 struct spu_context *ctx = file->private_data;
1295 int ret;
1296
1297 spu_acquire(ctx);
1298#if 0
1299/* this currently hangs */
1300 ret = spufs_wait(ctx->mfc_wq,
1301 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 2));
1302 if (ret)
1303 goto out;
1304 ret = spufs_wait(ctx->mfc_wq,
1305 ctx->ops->read_mfc_tagstatus(ctx) == ctx->tagwait);
1306out:
1307#else
1308 ret = 0;
1309#endif
1310 spu_release(ctx);
1311
1312 return ret;
1313}
1314
1315static int spufs_mfc_fsync(struct file *file, struct dentry *dentry,
1316 int datasync)
1317{
Al Viro73b6af82006-06-25 16:42:33 -07001318 return spufs_mfc_flush(file, NULL);
Arnd Bergmanna33a7d72006-03-23 00:00:11 +01001319}
1320
1321static int spufs_mfc_fasync(int fd, struct file *file, int on)
1322{
1323 struct spu_context *ctx = file->private_data;
1324
1325 return fasync_helper(fd, file, on, &ctx->mfc_fasync);
1326}
1327
1328static struct file_operations spufs_mfc_fops = {
1329 .open = spufs_mfc_open,
1330 .read = spufs_mfc_read,
1331 .write = spufs_mfc_write,
1332 .poll = spufs_mfc_poll,
1333 .flush = spufs_mfc_flush,
1334 .fsync = spufs_mfc_fsync,
1335 .fasync = spufs_mfc_fasync,
Mark Nutter6df10a82006-03-23 00:00:12 +01001336 .mmap = spufs_mfc_mmap,
Arnd Bergmanna33a7d72006-03-23 00:00:11 +01001337};
1338
Arnd Bergmann67207b92005-11-15 15:53:48 -05001339static void spufs_npc_set(void *data, u64 val)
1340{
1341 struct spu_context *ctx = data;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -05001342 spu_acquire(ctx);
1343 ctx->ops->npc_write(ctx, val);
1344 spu_release(ctx);
Arnd Bergmann67207b92005-11-15 15:53:48 -05001345}
1346
1347static u64 spufs_npc_get(void *data)
1348{
1349 struct spu_context *ctx = data;
1350 u64 ret;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -05001351 spu_acquire(ctx);
1352 ret = ctx->ops->npc_read(ctx);
1353 spu_release(ctx);
Arnd Bergmann67207b92005-11-15 15:53:48 -05001354 return ret;
1355}
1356DEFINE_SIMPLE_ATTRIBUTE(spufs_npc_ops, spufs_npc_get, spufs_npc_set, "%llx\n")
1357
Arnd Bergmann8b3d6662005-11-15 15:53:52 -05001358static void spufs_decr_set(void *data, u64 val)
1359{
1360 struct spu_context *ctx = data;
1361 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1362 spu_acquire_saved(ctx);
1363 lscsa->decr.slot[0] = (u32) val;
1364 spu_release(ctx);
1365}
1366
1367static u64 spufs_decr_get(void *data)
1368{
1369 struct spu_context *ctx = data;
1370 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1371 u64 ret;
1372 spu_acquire_saved(ctx);
1373 ret = lscsa->decr.slot[0];
1374 spu_release(ctx);
1375 return ret;
1376}
1377DEFINE_SIMPLE_ATTRIBUTE(spufs_decr_ops, spufs_decr_get, spufs_decr_set,
1378 "%llx\n")
1379
1380static void spufs_decr_status_set(void *data, u64 val)
1381{
1382 struct spu_context *ctx = data;
1383 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1384 spu_acquire_saved(ctx);
1385 lscsa->decr_status.slot[0] = (u32) val;
1386 spu_release(ctx);
1387}
1388
1389static u64 spufs_decr_status_get(void *data)
1390{
1391 struct spu_context *ctx = data;
1392 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1393 u64 ret;
1394 spu_acquire_saved(ctx);
1395 ret = lscsa->decr_status.slot[0];
1396 spu_release(ctx);
1397 return ret;
1398}
1399DEFINE_SIMPLE_ATTRIBUTE(spufs_decr_status_ops, spufs_decr_status_get,
1400 spufs_decr_status_set, "%llx\n")
1401
1402static void spufs_spu_tag_mask_set(void *data, u64 val)
1403{
1404 struct spu_context *ctx = data;
1405 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1406 spu_acquire_saved(ctx);
1407 lscsa->tag_mask.slot[0] = (u32) val;
1408 spu_release(ctx);
1409}
1410
1411static u64 spufs_spu_tag_mask_get(void *data)
1412{
1413 struct spu_context *ctx = data;
1414 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1415 u64 ret;
1416 spu_acquire_saved(ctx);
1417 ret = lscsa->tag_mask.slot[0];
1418 spu_release(ctx);
1419 return ret;
1420}
1421DEFINE_SIMPLE_ATTRIBUTE(spufs_spu_tag_mask_ops, spufs_spu_tag_mask_get,
1422 spufs_spu_tag_mask_set, "%llx\n")
1423
1424static void spufs_event_mask_set(void *data, u64 val)
1425{
1426 struct spu_context *ctx = data;
1427 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1428 spu_acquire_saved(ctx);
1429 lscsa->event_mask.slot[0] = (u32) val;
1430 spu_release(ctx);
1431}
1432
1433static u64 spufs_event_mask_get(void *data)
1434{
1435 struct spu_context *ctx = data;
1436 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1437 u64 ret;
1438 spu_acquire_saved(ctx);
1439 ret = lscsa->event_mask.slot[0];
1440 spu_release(ctx);
1441 return ret;
1442}
1443DEFINE_SIMPLE_ATTRIBUTE(spufs_event_mask_ops, spufs_event_mask_get,
1444 spufs_event_mask_set, "%llx\n")
1445
1446static void spufs_srr0_set(void *data, u64 val)
1447{
1448 struct spu_context *ctx = data;
1449 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1450 spu_acquire_saved(ctx);
1451 lscsa->srr0.slot[0] = (u32) val;
1452 spu_release(ctx);
1453}
1454
1455static u64 spufs_srr0_get(void *data)
1456{
1457 struct spu_context *ctx = data;
1458 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1459 u64 ret;
1460 spu_acquire_saved(ctx);
1461 ret = lscsa->srr0.slot[0];
1462 spu_release(ctx);
1463 return ret;
1464}
1465DEFINE_SIMPLE_ATTRIBUTE(spufs_srr0_ops, spufs_srr0_get, spufs_srr0_set,
1466 "%llx\n")
1467
arnd@arndb.de7b1a7012006-06-19 20:33:24 +02001468static u64 spufs_id_get(void *data)
1469{
1470 struct spu_context *ctx = data;
1471 u64 num;
1472
1473 spu_acquire(ctx);
1474 if (ctx->state == SPU_STATE_RUNNABLE)
1475 num = ctx->spu->number;
1476 else
1477 num = (unsigned int)-1;
1478 spu_release(ctx);
1479
1480 return num;
1481}
Al Viroe45d6632006-09-23 01:37:41 +01001482DEFINE_SIMPLE_ATTRIBUTE(spufs_id_ops, spufs_id_get, NULL, "0x%llx\n")
arnd@arndb.de7b1a7012006-06-19 20:33:24 +02001483
Arnd Bergmann67207b92005-11-15 15:53:48 -05001484struct tree_descr spufs_dir_contents[] = {
1485 { "mem", &spufs_mem_fops, 0666, },
Arnd Bergmann8b3d6662005-11-15 15:53:52 -05001486 { "regs", &spufs_regs_fops, 0666, },
Arnd Bergmann67207b92005-11-15 15:53:48 -05001487 { "mbox", &spufs_mbox_fops, 0444, },
1488 { "ibox", &spufs_ibox_fops, 0444, },
1489 { "wbox", &spufs_wbox_fops, 0222, },
1490 { "mbox_stat", &spufs_mbox_stat_fops, 0444, },
1491 { "ibox_stat", &spufs_ibox_stat_fops, 0444, },
1492 { "wbox_stat", &spufs_wbox_stat_fops, 0444, },
1493 { "signal1", &spufs_signal1_fops, 0666, },
1494 { "signal2", &spufs_signal2_fops, 0666, },
1495 { "signal1_type", &spufs_signal1_type, 0666, },
1496 { "signal2_type", &spufs_signal2_type, 0666, },
arnd@arndb.ded9379c42006-06-19 20:33:21 +02001497 { "mss", &spufs_mss_fops, 0666, },
Arnd Bergmanna33a7d72006-03-23 00:00:11 +01001498 { "mfc", &spufs_mfc_fops, 0666, },
Mark Nutter6df10a82006-03-23 00:00:12 +01001499 { "cntl", &spufs_cntl_fops, 0666, },
Arnd Bergmann67207b92005-11-15 15:53:48 -05001500 { "npc", &spufs_npc_ops, 0666, },
Arnd Bergmann8b3d6662005-11-15 15:53:52 -05001501 { "fpcr", &spufs_fpcr_fops, 0666, },
1502 { "decr", &spufs_decr_ops, 0666, },
1503 { "decr_status", &spufs_decr_status_ops, 0666, },
1504 { "spu_tag_mask", &spufs_spu_tag_mask_ops, 0666, },
1505 { "event_mask", &spufs_event_mask_ops, 0666, },
1506 { "srr0", &spufs_srr0_ops, 0666, },
arnd@arndb.de7b1a7012006-06-19 20:33:24 +02001507 { "phys-id", &spufs_id_ops, 0666, },
Benjamin Herrenschmidt27d5bf22006-10-04 17:26:11 +02001508 { "psmap", &spufs_psmap_fops, 0666, },
Arnd Bergmann67207b92005-11-15 15:53:48 -05001509 {},
1510};