blob: 514a1d508035ac24dfaee36a96e162b2a061814f [file] [log] [blame]
Arnd Bergmann67207b92005-11-15 15:53:48 -05001/*
2 * SPU file system -- file contents
3 *
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
5 *
6 * Author: Arnd Bergmann <arndb@de.ibm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
Arnd Bergmanna33a7d72006-03-23 00:00:11 +010023#undef DEBUG
24
Arnd Bergmann67207b92005-11-15 15:53:48 -050025#include <linux/fs.h>
26#include <linux/ioctl.h>
27#include <linux/module.h>
Arnd Bergmannd88cfff2005-12-05 22:52:22 -050028#include <linux/pagemap.h>
Arnd Bergmann67207b92005-11-15 15:53:48 -050029#include <linux/poll.h>
Arnd Bergmann51104592005-12-05 22:52:25 -050030#include <linux/ptrace.h>
Arnd Bergmann67207b92005-11-15 15:53:48 -050031
32#include <asm/io.h>
33#include <asm/semaphore.h>
34#include <asm/spu.h>
35#include <asm/uaccess.h>
36
37#include "spufs.h"
38
Benjamin Herrenschmidt27d5bf22006-10-04 17:26:11 +020039#define SPUFS_MMAP_4K (PAGE_SIZE == 0x1000)
40
Arnd Bergmann8b3d6662005-11-15 15:53:52 -050041
Arnd Bergmann67207b92005-11-15 15:53:48 -050042static int
43spufs_mem_open(struct inode *inode, struct file *file)
44{
45 struct spufs_inode_info *i = SPUFS_I(inode);
Mark Nutter6df10a82006-03-23 00:00:12 +010046 struct spu_context *ctx = i->i_ctx;
47 file->private_data = ctx;
48 file->f_mapping = inode->i_mapping;
49 ctx->local_store = inode->i_mapping;
Arnd Bergmann67207b92005-11-15 15:53:48 -050050 return 0;
51}
52
53static ssize_t
54spufs_mem_read(struct file *file, char __user *buffer,
55 size_t size, loff_t *pos)
56{
Arnd Bergmann8b3d6662005-11-15 15:53:52 -050057 struct spu_context *ctx = file->private_data;
58 char *local_store;
Arnd Bergmann67207b92005-11-15 15:53:48 -050059 int ret;
60
Arnd Bergmann8b3d6662005-11-15 15:53:52 -050061 spu_acquire(ctx);
Arnd Bergmann67207b92005-11-15 15:53:48 -050062
Arnd Bergmann8b3d6662005-11-15 15:53:52 -050063 local_store = ctx->ops->get_ls(ctx);
64 ret = simple_read_from_buffer(buffer, size, pos, local_store, LS_SIZE);
Arnd Bergmann67207b92005-11-15 15:53:48 -050065
Arnd Bergmann8b3d6662005-11-15 15:53:52 -050066 spu_release(ctx);
Arnd Bergmann67207b92005-11-15 15:53:48 -050067 return ret;
68}
69
70static ssize_t
71spufs_mem_write(struct file *file, const char __user *buffer,
72 size_t size, loff_t *pos)
73{
74 struct spu_context *ctx = file->private_data;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -050075 char *local_store;
76 int ret;
Arnd Bergmann67207b92005-11-15 15:53:48 -050077
78 size = min_t(ssize_t, LS_SIZE - *pos, size);
79 if (size <= 0)
80 return -EFBIG;
81 *pos += size;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -050082
83 spu_acquire(ctx);
84
85 local_store = ctx->ops->get_ls(ctx);
86 ret = copy_from_user(local_store + *pos - size,
87 buffer, size) ? -EFAULT : size;
88
89 spu_release(ctx);
90 return ret;
Arnd Bergmann67207b92005-11-15 15:53:48 -050091}
92
Arnd Bergmann8b3d6662005-11-15 15:53:52 -050093static struct page *
94spufs_mem_mmap_nopage(struct vm_area_struct *vma,
95 unsigned long address, int *type)
96{
97 struct page *page = NOPAGE_SIGBUS;
98
99 struct spu_context *ctx = vma->vm_file->private_data;
100 unsigned long offset = address - vma->vm_start;
101 offset += vma->vm_pgoff << PAGE_SHIFT;
102
103 spu_acquire(ctx);
104
Arnd Bergmannac91cb82006-10-04 17:26:16 +0200105 if (ctx->state == SPU_STATE_SAVED) {
106 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
107 & ~(_PAGE_NO_CACHE | _PAGE_GUARDED));
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500108 page = vmalloc_to_page(ctx->csa.lscsa->ls + offset);
Arnd Bergmannac91cb82006-10-04 17:26:16 +0200109 } else {
110 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
111 | _PAGE_NO_CACHE | _PAGE_GUARDED);
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500112 page = pfn_to_page((ctx->spu->local_store_phys + offset)
113 >> PAGE_SHIFT);
Arnd Bergmannac91cb82006-10-04 17:26:16 +0200114 }
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500115 spu_release(ctx);
116
117 if (type)
118 *type = VM_FAULT_MINOR;
119
Arnd Bergmannd88cfff2005-12-05 22:52:22 -0500120 page_cache_get(page);
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500121 return page;
122}
123
124static struct vm_operations_struct spufs_mem_mmap_vmops = {
125 .nopage = spufs_mem_mmap_nopage,
126};
127
Arnd Bergmann67207b92005-11-15 15:53:48 -0500128static int
129spufs_mem_mmap(struct file *file, struct vm_area_struct *vma)
130{
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500131 if (!(vma->vm_flags & VM_SHARED))
132 return -EINVAL;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500133
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500134 /* FIXME: */
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500135 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
136 | _PAGE_NO_CACHE);
137
138 vma->vm_ops = &spufs_mem_mmap_vmops;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500139 return 0;
140}
141
142static struct file_operations spufs_mem_fops = {
143 .open = spufs_mem_open,
144 .read = spufs_mem_read,
145 .write = spufs_mem_write,
Arnd Bergmann67207b92005-11-15 15:53:48 -0500146 .llseek = generic_file_llseek,
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500147 .mmap = spufs_mem_mmap,
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500148};
149
Mark Nutter6df10a82006-03-23 00:00:12 +0100150static struct page *spufs_ps_nopage(struct vm_area_struct *vma,
151 unsigned long address,
Benjamin Herrenschmidt27d5bf22006-10-04 17:26:11 +0200152 int *type, unsigned long ps_offs,
153 unsigned long ps_size)
Mark Nutter6df10a82006-03-23 00:00:12 +0100154{
155 struct page *page = NOPAGE_SIGBUS;
156 int fault_type = VM_FAULT_SIGBUS;
157 struct spu_context *ctx = vma->vm_file->private_data;
158 unsigned long offset = address - vma->vm_start;
159 unsigned long area;
160 int ret;
161
162 offset += vma->vm_pgoff << PAGE_SHIFT;
Benjamin Herrenschmidt27d5bf22006-10-04 17:26:11 +0200163 if (offset >= ps_size)
Mark Nutter6df10a82006-03-23 00:00:12 +0100164 goto out;
165
166 ret = spu_acquire_runnable(ctx);
167 if (ret)
168 goto out;
169
170 area = ctx->spu->problem_phys + ps_offs;
171 page = pfn_to_page((area + offset) >> PAGE_SHIFT);
172 fault_type = VM_FAULT_MINOR;
173 page_cache_get(page);
174
175 spu_release(ctx);
176
177 out:
178 if (type)
179 *type = fault_type;
180
181 return page;
182}
183
Benjamin Herrenschmidt27d5bf22006-10-04 17:26:11 +0200184#if SPUFS_MMAP_4K
Mark Nutter6df10a82006-03-23 00:00:12 +0100185static struct page *spufs_cntl_mmap_nopage(struct vm_area_struct *vma,
186 unsigned long address, int *type)
187{
Benjamin Herrenschmidt27d5bf22006-10-04 17:26:11 +0200188 return spufs_ps_nopage(vma, address, type, 0x4000, 0x1000);
Mark Nutter6df10a82006-03-23 00:00:12 +0100189}
190
191static struct vm_operations_struct spufs_cntl_mmap_vmops = {
192 .nopage = spufs_cntl_mmap_nopage,
193};
194
195/*
196 * mmap support for problem state control area [0x4000 - 0x4fff].
Mark Nutter6df10a82006-03-23 00:00:12 +0100197 */
198static int spufs_cntl_mmap(struct file *file, struct vm_area_struct *vma)
199{
200 if (!(vma->vm_flags & VM_SHARED))
201 return -EINVAL;
202
Mark Nutter6df10a82006-03-23 00:00:12 +0100203 vma->vm_flags |= VM_RESERVED;
204 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
Benjamin Herrenschmidt23cc7702006-06-23 20:57:47 +0200205 | _PAGE_NO_CACHE | _PAGE_GUARDED);
Mark Nutter6df10a82006-03-23 00:00:12 +0100206
207 vma->vm_ops = &spufs_cntl_mmap_vmops;
208 return 0;
209}
Benjamin Herrenschmidt27d5bf22006-10-04 17:26:11 +0200210#else /* SPUFS_MMAP_4K */
211#define spufs_cntl_mmap NULL
212#endif /* !SPUFS_MMAP_4K */
Mark Nutter6df10a82006-03-23 00:00:12 +0100213
214static int spufs_cntl_open(struct inode *inode, struct file *file)
215{
216 struct spufs_inode_info *i = SPUFS_I(inode);
217 struct spu_context *ctx = i->i_ctx;
218
219 file->private_data = ctx;
220 file->f_mapping = inode->i_mapping;
221 ctx->cntl = inode->i_mapping;
222 return 0;
223}
224
225static ssize_t
226spufs_cntl_read(struct file *file, char __user *buffer,
227 size_t size, loff_t *pos)
228{
229 /* FIXME: read from spu status */
230 return -EINVAL;
231}
232
233static ssize_t
234spufs_cntl_write(struct file *file, const char __user *buffer,
235 size_t size, loff_t *pos)
236{
237 /* FIXME: write to runctl bit */
238 return -EINVAL;
239}
240
241static struct file_operations spufs_cntl_fops = {
242 .open = spufs_cntl_open,
243 .read = spufs_cntl_read,
244 .write = spufs_cntl_write,
Mark Nutter6df10a82006-03-23 00:00:12 +0100245 .mmap = spufs_cntl_mmap,
Mark Nutter6df10a82006-03-23 00:00:12 +0100246};
247
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500248static int
249spufs_regs_open(struct inode *inode, struct file *file)
250{
251 struct spufs_inode_info *i = SPUFS_I(inode);
252 file->private_data = i->i_ctx;
253 return 0;
254}
255
256static ssize_t
257spufs_regs_read(struct file *file, char __user *buffer,
258 size_t size, loff_t *pos)
259{
260 struct spu_context *ctx = file->private_data;
261 struct spu_lscsa *lscsa = ctx->csa.lscsa;
262 int ret;
263
264 spu_acquire_saved(ctx);
265
266 ret = simple_read_from_buffer(buffer, size, pos,
267 lscsa->gprs, sizeof lscsa->gprs);
268
269 spu_release(ctx);
270 return ret;
271}
272
273static ssize_t
274spufs_regs_write(struct file *file, const char __user *buffer,
275 size_t size, loff_t *pos)
276{
277 struct spu_context *ctx = file->private_data;
278 struct spu_lscsa *lscsa = ctx->csa.lscsa;
279 int ret;
280
281 size = min_t(ssize_t, sizeof lscsa->gprs - *pos, size);
282 if (size <= 0)
283 return -EFBIG;
284 *pos += size;
285
286 spu_acquire_saved(ctx);
287
288 ret = copy_from_user(lscsa->gprs + *pos - size,
289 buffer, size) ? -EFAULT : size;
290
291 spu_release(ctx);
292 return ret;
293}
294
295static struct file_operations spufs_regs_fops = {
296 .open = spufs_regs_open,
297 .read = spufs_regs_read,
298 .write = spufs_regs_write,
299 .llseek = generic_file_llseek,
300};
301
302static ssize_t
303spufs_fpcr_read(struct file *file, char __user * buffer,
304 size_t size, loff_t * pos)
305{
306 struct spu_context *ctx = file->private_data;
307 struct spu_lscsa *lscsa = ctx->csa.lscsa;
308 int ret;
309
310 spu_acquire_saved(ctx);
311
312 ret = simple_read_from_buffer(buffer, size, pos,
313 &lscsa->fpcr, sizeof(lscsa->fpcr));
314
315 spu_release(ctx);
316 return ret;
317}
318
319static ssize_t
320spufs_fpcr_write(struct file *file, const char __user * buffer,
321 size_t size, loff_t * pos)
322{
323 struct spu_context *ctx = file->private_data;
324 struct spu_lscsa *lscsa = ctx->csa.lscsa;
325 int ret;
326
327 size = min_t(ssize_t, sizeof(lscsa->fpcr) - *pos, size);
328 if (size <= 0)
329 return -EFBIG;
330 *pos += size;
331
332 spu_acquire_saved(ctx);
333
334 ret = copy_from_user((char *)&lscsa->fpcr + *pos - size,
335 buffer, size) ? -EFAULT : size;
336
337 spu_release(ctx);
338 return ret;
339}
340
341static struct file_operations spufs_fpcr_fops = {
342 .open = spufs_regs_open,
343 .read = spufs_fpcr_read,
344 .write = spufs_fpcr_write,
345 .llseek = generic_file_llseek,
Arnd Bergmann67207b92005-11-15 15:53:48 -0500346};
347
348/* generic open function for all pipe-like files */
349static int spufs_pipe_open(struct inode *inode, struct file *file)
350{
351 struct spufs_inode_info *i = SPUFS_I(inode);
352 file->private_data = i->i_ctx;
353
354 return nonseekable_open(inode, file);
355}
356
357static ssize_t spufs_mbox_read(struct file *file, char __user *buf,
358 size_t len, loff_t *pos)
359{
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500360 struct spu_context *ctx = file->private_data;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500361 u32 mbox_data;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500362 int ret;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500363
364 if (len < 4)
365 return -EINVAL;
366
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500367 spu_acquire(ctx);
368 ret = ctx->ops->mbox_read(ctx, &mbox_data);
369 spu_release(ctx);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500370
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500371 if (!ret)
372 return -EAGAIN;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500373
374 if (copy_to_user(buf, &mbox_data, sizeof mbox_data))
375 return -EFAULT;
376
377 return 4;
378}
379
380static struct file_operations spufs_mbox_fops = {
381 .open = spufs_pipe_open,
382 .read = spufs_mbox_read,
383};
384
385static ssize_t spufs_mbox_stat_read(struct file *file, char __user *buf,
386 size_t len, loff_t *pos)
387{
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500388 struct spu_context *ctx = file->private_data;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500389 u32 mbox_stat;
390
391 if (len < 4)
392 return -EINVAL;
393
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500394 spu_acquire(ctx);
395
396 mbox_stat = ctx->ops->mbox_stat_read(ctx) & 0xff;
397
398 spu_release(ctx);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500399
400 if (copy_to_user(buf, &mbox_stat, sizeof mbox_stat))
401 return -EFAULT;
402
403 return 4;
404}
405
406static struct file_operations spufs_mbox_stat_fops = {
407 .open = spufs_pipe_open,
408 .read = spufs_mbox_stat_read,
409};
410
411/* low-level ibox access function */
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500412size_t spu_ibox_read(struct spu_context *ctx, u32 *data)
Arnd Bergmann67207b92005-11-15 15:53:48 -0500413{
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500414 return ctx->ops->ibox_read(ctx, data);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500415}
Arnd Bergmann67207b92005-11-15 15:53:48 -0500416
417static int spufs_ibox_fasync(int fd, struct file *file, int on)
418{
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500419 struct spu_context *ctx = file->private_data;
420
421 return fasync_helper(fd, file, on, &ctx->ibox_fasync);
422}
423
424/* interrupt-level ibox callback function. */
425void spufs_ibox_callback(struct spu *spu)
426{
427 struct spu_context *ctx = spu->ctx;
428
429 wake_up_all(&ctx->ibox_wq);
430 kill_fasync(&ctx->ibox_fasync, SIGIO, POLLIN);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500431}
432
433static ssize_t spufs_ibox_read(struct file *file, char __user *buf,
434 size_t len, loff_t *pos)
435{
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500436 struct spu_context *ctx = file->private_data;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500437 u32 ibox_data;
438 ssize_t ret;
439
440 if (len < 4)
441 return -EINVAL;
442
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500443 spu_acquire(ctx);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500444
445 ret = 0;
446 if (file->f_flags & O_NONBLOCK) {
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500447 if (!spu_ibox_read(ctx, &ibox_data))
Arnd Bergmann67207b92005-11-15 15:53:48 -0500448 ret = -EAGAIN;
449 } else {
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500450 ret = spufs_wait(ctx->ibox_wq, spu_ibox_read(ctx, &ibox_data));
Arnd Bergmann67207b92005-11-15 15:53:48 -0500451 }
452
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500453 spu_release(ctx);
454
Arnd Bergmann67207b92005-11-15 15:53:48 -0500455 if (ret)
456 return ret;
457
458 ret = 4;
459 if (copy_to_user(buf, &ibox_data, sizeof ibox_data))
460 ret = -EFAULT;
461
462 return ret;
463}
464
465static unsigned int spufs_ibox_poll(struct file *file, poll_table *wait)
466{
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500467 struct spu_context *ctx = file->private_data;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500468 unsigned int mask;
469
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500470 poll_wait(file, &ctx->ibox_wq, wait);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500471
Arnd Bergmann3a843d72005-12-05 22:52:27 -0500472 spu_acquire(ctx);
473 mask = ctx->ops->mbox_stat_poll(ctx, POLLIN | POLLRDNORM);
474 spu_release(ctx);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500475
476 return mask;
477}
478
479static struct file_operations spufs_ibox_fops = {
480 .open = spufs_pipe_open,
481 .read = spufs_ibox_read,
482 .poll = spufs_ibox_poll,
483 .fasync = spufs_ibox_fasync,
484};
485
486static ssize_t spufs_ibox_stat_read(struct file *file, char __user *buf,
487 size_t len, loff_t *pos)
488{
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500489 struct spu_context *ctx = file->private_data;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500490 u32 ibox_stat;
491
492 if (len < 4)
493 return -EINVAL;
494
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500495 spu_acquire(ctx);
496 ibox_stat = (ctx->ops->mbox_stat_read(ctx) >> 16) & 0xff;
497 spu_release(ctx);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500498
499 if (copy_to_user(buf, &ibox_stat, sizeof ibox_stat))
500 return -EFAULT;
501
502 return 4;
503}
504
505static struct file_operations spufs_ibox_stat_fops = {
506 .open = spufs_pipe_open,
507 .read = spufs_ibox_stat_read,
508};
509
510/* low-level mailbox write */
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500511size_t spu_wbox_write(struct spu_context *ctx, u32 data)
Arnd Bergmann67207b92005-11-15 15:53:48 -0500512{
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500513 return ctx->ops->wbox_write(ctx, data);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500514}
Arnd Bergmann67207b92005-11-15 15:53:48 -0500515
516static int spufs_wbox_fasync(int fd, struct file *file, int on)
517{
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500518 struct spu_context *ctx = file->private_data;
519 int ret;
520
521 ret = fasync_helper(fd, file, on, &ctx->wbox_fasync);
522
523 return ret;
524}
525
526/* interrupt-level wbox callback function. */
527void spufs_wbox_callback(struct spu *spu)
528{
529 struct spu_context *ctx = spu->ctx;
530
531 wake_up_all(&ctx->wbox_wq);
532 kill_fasync(&ctx->wbox_fasync, SIGIO, POLLOUT);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500533}
534
535static ssize_t spufs_wbox_write(struct file *file, const char __user *buf,
536 size_t len, loff_t *pos)
537{
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500538 struct spu_context *ctx = file->private_data;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500539 u32 wbox_data;
540 int ret;
541
542 if (len < 4)
543 return -EINVAL;
544
Arnd Bergmann67207b92005-11-15 15:53:48 -0500545 if (copy_from_user(&wbox_data, buf, sizeof wbox_data))
546 return -EFAULT;
547
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500548 spu_acquire(ctx);
549
Arnd Bergmann67207b92005-11-15 15:53:48 -0500550 ret = 0;
551 if (file->f_flags & O_NONBLOCK) {
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500552 if (!spu_wbox_write(ctx, wbox_data))
Arnd Bergmann67207b92005-11-15 15:53:48 -0500553 ret = -EAGAIN;
554 } else {
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500555 ret = spufs_wait(ctx->wbox_wq, spu_wbox_write(ctx, wbox_data));
Arnd Bergmann67207b92005-11-15 15:53:48 -0500556 }
557
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500558 spu_release(ctx);
559
Arnd Bergmann67207b92005-11-15 15:53:48 -0500560 return ret ? ret : sizeof wbox_data;
561}
562
563static unsigned int spufs_wbox_poll(struct file *file, poll_table *wait)
564{
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500565 struct spu_context *ctx = file->private_data;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500566 unsigned int mask;
567
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500568 poll_wait(file, &ctx->wbox_wq, wait);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500569
Arnd Bergmann3a843d72005-12-05 22:52:27 -0500570 spu_acquire(ctx);
571 mask = ctx->ops->mbox_stat_poll(ctx, POLLOUT | POLLWRNORM);
572 spu_release(ctx);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500573
574 return mask;
575}
576
577static struct file_operations spufs_wbox_fops = {
578 .open = spufs_pipe_open,
579 .write = spufs_wbox_write,
580 .poll = spufs_wbox_poll,
581 .fasync = spufs_wbox_fasync,
582};
583
584static ssize_t spufs_wbox_stat_read(struct file *file, char __user *buf,
585 size_t len, loff_t *pos)
586{
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500587 struct spu_context *ctx = file->private_data;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500588 u32 wbox_stat;
589
590 if (len < 4)
591 return -EINVAL;
592
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500593 spu_acquire(ctx);
594 wbox_stat = (ctx->ops->mbox_stat_read(ctx) >> 8) & 0xff;
595 spu_release(ctx);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500596
597 if (copy_to_user(buf, &wbox_stat, sizeof wbox_stat))
598 return -EFAULT;
599
600 return 4;
601}
602
603static struct file_operations spufs_wbox_stat_fops = {
604 .open = spufs_pipe_open,
605 .read = spufs_wbox_stat_read,
606};
607
Mark Nutter6df10a82006-03-23 00:00:12 +0100608static int spufs_signal1_open(struct inode *inode, struct file *file)
609{
610 struct spufs_inode_info *i = SPUFS_I(inode);
611 struct spu_context *ctx = i->i_ctx;
612 file->private_data = ctx;
613 file->f_mapping = inode->i_mapping;
614 ctx->signal1 = inode->i_mapping;
615 return nonseekable_open(inode, file);
616}
617
Arnd Bergmann67207b92005-11-15 15:53:48 -0500618static ssize_t spufs_signal1_read(struct file *file, char __user *buf,
619 size_t len, loff_t *pos)
620{
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500621 struct spu_context *ctx = file->private_data;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500622 u32 data;
623
Arnd Bergmann67207b92005-11-15 15:53:48 -0500624 if (len < 4)
625 return -EINVAL;
626
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500627 spu_acquire(ctx);
628 data = ctx->ops->signal1_read(ctx);
629 spu_release(ctx);
630
Arnd Bergmann67207b92005-11-15 15:53:48 -0500631 if (copy_to_user(buf, &data, 4))
632 return -EFAULT;
633
634 return 4;
635}
636
637static ssize_t spufs_signal1_write(struct file *file, const char __user *buf,
638 size_t len, loff_t *pos)
639{
640 struct spu_context *ctx;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500641 u32 data;
642
643 ctx = file->private_data;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500644
645 if (len < 4)
646 return -EINVAL;
647
648 if (copy_from_user(&data, buf, 4))
649 return -EFAULT;
650
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500651 spu_acquire(ctx);
652 ctx->ops->signal1_write(ctx, data);
653 spu_release(ctx);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500654
655 return 4;
656}
657
Mark Nutter6df10a82006-03-23 00:00:12 +0100658static struct page *spufs_signal1_mmap_nopage(struct vm_area_struct *vma,
659 unsigned long address, int *type)
660{
Benjamin Herrenschmidt27d5bf22006-10-04 17:26:11 +0200661#if PAGE_SIZE == 0x1000
662 return spufs_ps_nopage(vma, address, type, 0x14000, 0x1000);
663#elif PAGE_SIZE == 0x10000
664 /* For 64k pages, both signal1 and signal2 can be used to mmap the whole
665 * signal 1 and 2 area
666 */
667 return spufs_ps_nopage(vma, address, type, 0x10000, 0x10000);
668#else
669#error unsupported page size
670#endif
Mark Nutter6df10a82006-03-23 00:00:12 +0100671}
672
673static struct vm_operations_struct spufs_signal1_mmap_vmops = {
674 .nopage = spufs_signal1_mmap_nopage,
675};
676
677static int spufs_signal1_mmap(struct file *file, struct vm_area_struct *vma)
678{
679 if (!(vma->vm_flags & VM_SHARED))
680 return -EINVAL;
681
682 vma->vm_flags |= VM_RESERVED;
683 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
Benjamin Herrenschmidt23cc7702006-06-23 20:57:47 +0200684 | _PAGE_NO_CACHE | _PAGE_GUARDED);
Mark Nutter6df10a82006-03-23 00:00:12 +0100685
686 vma->vm_ops = &spufs_signal1_mmap_vmops;
687 return 0;
688}
Mark Nutter6df10a82006-03-23 00:00:12 +0100689
Arnd Bergmann67207b92005-11-15 15:53:48 -0500690static struct file_operations spufs_signal1_fops = {
Mark Nutter6df10a82006-03-23 00:00:12 +0100691 .open = spufs_signal1_open,
Arnd Bergmann67207b92005-11-15 15:53:48 -0500692 .read = spufs_signal1_read,
693 .write = spufs_signal1_write,
Mark Nutter6df10a82006-03-23 00:00:12 +0100694 .mmap = spufs_signal1_mmap,
Arnd Bergmann67207b92005-11-15 15:53:48 -0500695};
696
Mark Nutter6df10a82006-03-23 00:00:12 +0100697static int spufs_signal2_open(struct inode *inode, struct file *file)
698{
699 struct spufs_inode_info *i = SPUFS_I(inode);
700 struct spu_context *ctx = i->i_ctx;
701 file->private_data = ctx;
702 file->f_mapping = inode->i_mapping;
703 ctx->signal2 = inode->i_mapping;
704 return nonseekable_open(inode, file);
705}
706
Arnd Bergmann67207b92005-11-15 15:53:48 -0500707static ssize_t spufs_signal2_read(struct file *file, char __user *buf,
708 size_t len, loff_t *pos)
709{
710 struct spu_context *ctx;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500711 u32 data;
712
713 ctx = file->private_data;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500714
715 if (len < 4)
716 return -EINVAL;
717
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500718 spu_acquire(ctx);
719 data = ctx->ops->signal2_read(ctx);
720 spu_release(ctx);
721
Arnd Bergmann67207b92005-11-15 15:53:48 -0500722 if (copy_to_user(buf, &data, 4))
723 return -EFAULT;
724
725 return 4;
726}
727
728static ssize_t spufs_signal2_write(struct file *file, const char __user *buf,
729 size_t len, loff_t *pos)
730{
731 struct spu_context *ctx;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500732 u32 data;
733
734 ctx = file->private_data;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500735
736 if (len < 4)
737 return -EINVAL;
738
739 if (copy_from_user(&data, buf, 4))
740 return -EFAULT;
741
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500742 spu_acquire(ctx);
743 ctx->ops->signal2_write(ctx, data);
744 spu_release(ctx);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500745
746 return 4;
747}
748
Benjamin Herrenschmidt27d5bf22006-10-04 17:26:11 +0200749#if SPUFS_MMAP_4K
Mark Nutter6df10a82006-03-23 00:00:12 +0100750static struct page *spufs_signal2_mmap_nopage(struct vm_area_struct *vma,
751 unsigned long address, int *type)
752{
Benjamin Herrenschmidt27d5bf22006-10-04 17:26:11 +0200753#if PAGE_SIZE == 0x1000
754 return spufs_ps_nopage(vma, address, type, 0x1c000, 0x1000);
755#elif PAGE_SIZE == 0x10000
756 /* For 64k pages, both signal1 and signal2 can be used to mmap the whole
757 * signal 1 and 2 area
758 */
759 return spufs_ps_nopage(vma, address, type, 0x10000, 0x10000);
760#else
761#error unsupported page size
762#endif
Mark Nutter6df10a82006-03-23 00:00:12 +0100763}
764
765static struct vm_operations_struct spufs_signal2_mmap_vmops = {
766 .nopage = spufs_signal2_mmap_nopage,
767};
768
769static int spufs_signal2_mmap(struct file *file, struct vm_area_struct *vma)
770{
771 if (!(vma->vm_flags & VM_SHARED))
772 return -EINVAL;
773
774 /* FIXME: */
775 vma->vm_flags |= VM_RESERVED;
776 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
Benjamin Herrenschmidt23cc7702006-06-23 20:57:47 +0200777 | _PAGE_NO_CACHE | _PAGE_GUARDED);
Mark Nutter6df10a82006-03-23 00:00:12 +0100778
779 vma->vm_ops = &spufs_signal2_mmap_vmops;
780 return 0;
781}
Benjamin Herrenschmidt27d5bf22006-10-04 17:26:11 +0200782#else /* SPUFS_MMAP_4K */
783#define spufs_signal2_mmap NULL
784#endif /* !SPUFS_MMAP_4K */
Mark Nutter6df10a82006-03-23 00:00:12 +0100785
Arnd Bergmann67207b92005-11-15 15:53:48 -0500786static struct file_operations spufs_signal2_fops = {
Mark Nutter6df10a82006-03-23 00:00:12 +0100787 .open = spufs_signal2_open,
Arnd Bergmann67207b92005-11-15 15:53:48 -0500788 .read = spufs_signal2_read,
789 .write = spufs_signal2_write,
Mark Nutter6df10a82006-03-23 00:00:12 +0100790 .mmap = spufs_signal2_mmap,
Arnd Bergmann67207b92005-11-15 15:53:48 -0500791};
792
793static void spufs_signal1_type_set(void *data, u64 val)
794{
795 struct spu_context *ctx = data;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500796
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500797 spu_acquire(ctx);
798 ctx->ops->signal1_type_set(ctx, val);
799 spu_release(ctx);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500800}
801
802static u64 spufs_signal1_type_get(void *data)
803{
804 struct spu_context *ctx = data;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500805 u64 ret;
806
807 spu_acquire(ctx);
808 ret = ctx->ops->signal1_type_get(ctx);
809 spu_release(ctx);
810
811 return ret;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500812}
813DEFINE_SIMPLE_ATTRIBUTE(spufs_signal1_type, spufs_signal1_type_get,
814 spufs_signal1_type_set, "%llu");
815
816static void spufs_signal2_type_set(void *data, u64 val)
817{
818 struct spu_context *ctx = data;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500819
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500820 spu_acquire(ctx);
821 ctx->ops->signal2_type_set(ctx, val);
822 spu_release(ctx);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500823}
824
825static u64 spufs_signal2_type_get(void *data)
826{
827 struct spu_context *ctx = data;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500828 u64 ret;
829
830 spu_acquire(ctx);
831 ret = ctx->ops->signal2_type_get(ctx);
832 spu_release(ctx);
833
834 return ret;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500835}
836DEFINE_SIMPLE_ATTRIBUTE(spufs_signal2_type, spufs_signal2_type_get,
837 spufs_signal2_type_set, "%llu");
838
Benjamin Herrenschmidt27d5bf22006-10-04 17:26:11 +0200839#if SPUFS_MMAP_4K
arnd@arndb.ded9379c42006-06-19 20:33:21 +0200840static struct page *spufs_mss_mmap_nopage(struct vm_area_struct *vma,
841 unsigned long address, int *type)
842{
Benjamin Herrenschmidt27d5bf22006-10-04 17:26:11 +0200843 return spufs_ps_nopage(vma, address, type, 0x0000, 0x1000);
arnd@arndb.ded9379c42006-06-19 20:33:21 +0200844}
845
846static struct vm_operations_struct spufs_mss_mmap_vmops = {
847 .nopage = spufs_mss_mmap_nopage,
848};
849
850/*
851 * mmap support for problem state MFC DMA area [0x0000 - 0x0fff].
arnd@arndb.ded9379c42006-06-19 20:33:21 +0200852 */
853static int spufs_mss_mmap(struct file *file, struct vm_area_struct *vma)
854{
855 if (!(vma->vm_flags & VM_SHARED))
856 return -EINVAL;
857
arnd@arndb.ded9379c42006-06-19 20:33:21 +0200858 vma->vm_flags |= VM_RESERVED;
859 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
Benjamin Herrenschmidt23cc7702006-06-23 20:57:47 +0200860 | _PAGE_NO_CACHE | _PAGE_GUARDED);
arnd@arndb.ded9379c42006-06-19 20:33:21 +0200861
862 vma->vm_ops = &spufs_mss_mmap_vmops;
863 return 0;
864}
Benjamin Herrenschmidt27d5bf22006-10-04 17:26:11 +0200865#else /* SPUFS_MMAP_4K */
866#define spufs_mss_mmap NULL
867#endif /* !SPUFS_MMAP_4K */
arnd@arndb.ded9379c42006-06-19 20:33:21 +0200868
869static int spufs_mss_open(struct inode *inode, struct file *file)
870{
871 struct spufs_inode_info *i = SPUFS_I(inode);
872
873 file->private_data = i->i_ctx;
874 return nonseekable_open(inode, file);
875}
876
877static struct file_operations spufs_mss_fops = {
878 .open = spufs_mss_open,
arnd@arndb.ded9379c42006-06-19 20:33:21 +0200879 .mmap = spufs_mss_mmap,
Benjamin Herrenschmidt27d5bf22006-10-04 17:26:11 +0200880};
881
882static struct page *spufs_psmap_mmap_nopage(struct vm_area_struct *vma,
883 unsigned long address, int *type)
884{
885 return spufs_ps_nopage(vma, address, type, 0x0000, 0x20000);
886}
887
888static struct vm_operations_struct spufs_psmap_mmap_vmops = {
889 .nopage = spufs_psmap_mmap_nopage,
890};
891
892/*
893 * mmap support for full problem state area [0x00000 - 0x1ffff].
894 */
895static int spufs_psmap_mmap(struct file *file, struct vm_area_struct *vma)
896{
897 if (!(vma->vm_flags & VM_SHARED))
898 return -EINVAL;
899
900 vma->vm_flags |= VM_RESERVED;
901 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
902 | _PAGE_NO_CACHE | _PAGE_GUARDED);
903
904 vma->vm_ops = &spufs_psmap_mmap_vmops;
905 return 0;
906}
907
908static int spufs_psmap_open(struct inode *inode, struct file *file)
909{
910 struct spufs_inode_info *i = SPUFS_I(inode);
911
912 file->private_data = i->i_ctx;
913 return nonseekable_open(inode, file);
914}
915
916static struct file_operations spufs_psmap_fops = {
917 .open = spufs_psmap_open,
918 .mmap = spufs_psmap_mmap,
arnd@arndb.ded9379c42006-06-19 20:33:21 +0200919};
920
921
Benjamin Herrenschmidt27d5bf22006-10-04 17:26:11 +0200922#if SPUFS_MMAP_4K
Mark Nutter6df10a82006-03-23 00:00:12 +0100923static struct page *spufs_mfc_mmap_nopage(struct vm_area_struct *vma,
924 unsigned long address, int *type)
925{
Benjamin Herrenschmidt27d5bf22006-10-04 17:26:11 +0200926 return spufs_ps_nopage(vma, address, type, 0x3000, 0x1000);
Mark Nutter6df10a82006-03-23 00:00:12 +0100927}
928
929static struct vm_operations_struct spufs_mfc_mmap_vmops = {
930 .nopage = spufs_mfc_mmap_nopage,
931};
932
933/*
934 * mmap support for problem state MFC DMA area [0x0000 - 0x0fff].
Mark Nutter6df10a82006-03-23 00:00:12 +0100935 */
936static int spufs_mfc_mmap(struct file *file, struct vm_area_struct *vma)
937{
938 if (!(vma->vm_flags & VM_SHARED))
939 return -EINVAL;
940
Mark Nutter6df10a82006-03-23 00:00:12 +0100941 vma->vm_flags |= VM_RESERVED;
942 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
Benjamin Herrenschmidt23cc7702006-06-23 20:57:47 +0200943 | _PAGE_NO_CACHE | _PAGE_GUARDED);
Mark Nutter6df10a82006-03-23 00:00:12 +0100944
945 vma->vm_ops = &spufs_mfc_mmap_vmops;
946 return 0;
947}
Benjamin Herrenschmidt27d5bf22006-10-04 17:26:11 +0200948#else /* SPUFS_MMAP_4K */
949#define spufs_mfc_mmap NULL
950#endif /* !SPUFS_MMAP_4K */
Arnd Bergmanna33a7d72006-03-23 00:00:11 +0100951
952static int spufs_mfc_open(struct inode *inode, struct file *file)
953{
954 struct spufs_inode_info *i = SPUFS_I(inode);
955 struct spu_context *ctx = i->i_ctx;
956
957 /* we don't want to deal with DMA into other processes */
958 if (ctx->owner != current->mm)
959 return -EINVAL;
960
961 if (atomic_read(&inode->i_count) != 1)
962 return -EBUSY;
963
964 file->private_data = ctx;
965 return nonseekable_open(inode, file);
966}
967
968/* interrupt-level mfc callback function. */
969void spufs_mfc_callback(struct spu *spu)
970{
971 struct spu_context *ctx = spu->ctx;
972
973 wake_up_all(&ctx->mfc_wq);
974
975 pr_debug("%s %s\n", __FUNCTION__, spu->name);
976 if (ctx->mfc_fasync) {
977 u32 free_elements, tagstatus;
978 unsigned int mask;
979
980 /* no need for spu_acquire in interrupt context */
981 free_elements = ctx->ops->get_mfc_free_elements(ctx);
982 tagstatus = ctx->ops->read_mfc_tagstatus(ctx);
983
984 mask = 0;
985 if (free_elements & 0xffff)
986 mask |= POLLOUT;
987 if (tagstatus & ctx->tagwait)
988 mask |= POLLIN;
989
990 kill_fasync(&ctx->mfc_fasync, SIGIO, mask);
991 }
992}
993
994static int spufs_read_mfc_tagstatus(struct spu_context *ctx, u32 *status)
995{
996 /* See if there is one tag group is complete */
997 /* FIXME we need locking around tagwait */
998 *status = ctx->ops->read_mfc_tagstatus(ctx) & ctx->tagwait;
999 ctx->tagwait &= ~*status;
1000 if (*status)
1001 return 1;
1002
1003 /* enable interrupt waiting for any tag group,
1004 may silently fail if interrupts are already enabled */
1005 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 1);
1006 return 0;
1007}
1008
1009static ssize_t spufs_mfc_read(struct file *file, char __user *buffer,
1010 size_t size, loff_t *pos)
1011{
1012 struct spu_context *ctx = file->private_data;
1013 int ret = -EINVAL;
1014 u32 status;
1015
1016 if (size != 4)
1017 goto out;
1018
1019 spu_acquire(ctx);
1020 if (file->f_flags & O_NONBLOCK) {
1021 status = ctx->ops->read_mfc_tagstatus(ctx);
1022 if (!(status & ctx->tagwait))
1023 ret = -EAGAIN;
1024 else
1025 ctx->tagwait &= ~status;
1026 } else {
1027 ret = spufs_wait(ctx->mfc_wq,
1028 spufs_read_mfc_tagstatus(ctx, &status));
1029 }
1030 spu_release(ctx);
1031
1032 if (ret)
1033 goto out;
1034
1035 ret = 4;
1036 if (copy_to_user(buffer, &status, 4))
1037 ret = -EFAULT;
1038
1039out:
1040 return ret;
1041}
1042
1043static int spufs_check_valid_dma(struct mfc_dma_command *cmd)
1044{
1045 pr_debug("queueing DMA %x %lx %x %x %x\n", cmd->lsa,
1046 cmd->ea, cmd->size, cmd->tag, cmd->cmd);
1047
1048 switch (cmd->cmd) {
1049 case MFC_PUT_CMD:
1050 case MFC_PUTF_CMD:
1051 case MFC_PUTB_CMD:
1052 case MFC_GET_CMD:
1053 case MFC_GETF_CMD:
1054 case MFC_GETB_CMD:
1055 break;
1056 default:
1057 pr_debug("invalid DMA opcode %x\n", cmd->cmd);
1058 return -EIO;
1059 }
1060
1061 if ((cmd->lsa & 0xf) != (cmd->ea &0xf)) {
1062 pr_debug("invalid DMA alignment, ea %lx lsa %x\n",
1063 cmd->ea, cmd->lsa);
1064 return -EIO;
1065 }
1066
1067 switch (cmd->size & 0xf) {
1068 case 1:
1069 break;
1070 case 2:
1071 if (cmd->lsa & 1)
1072 goto error;
1073 break;
1074 case 4:
1075 if (cmd->lsa & 3)
1076 goto error;
1077 break;
1078 case 8:
1079 if (cmd->lsa & 7)
1080 goto error;
1081 break;
1082 case 0:
1083 if (cmd->lsa & 15)
1084 goto error;
1085 break;
1086 error:
1087 default:
1088 pr_debug("invalid DMA alignment %x for size %x\n",
1089 cmd->lsa & 0xf, cmd->size);
1090 return -EIO;
1091 }
1092
1093 if (cmd->size > 16 * 1024) {
1094 pr_debug("invalid DMA size %x\n", cmd->size);
1095 return -EIO;
1096 }
1097
1098 if (cmd->tag & 0xfff0) {
1099 /* we reserve the higher tag numbers for kernel use */
1100 pr_debug("invalid DMA tag\n");
1101 return -EIO;
1102 }
1103
1104 if (cmd->class) {
1105 /* not supported in this version */
1106 pr_debug("invalid DMA class\n");
1107 return -EIO;
1108 }
1109
1110 return 0;
1111}
1112
1113static int spu_send_mfc_command(struct spu_context *ctx,
1114 struct mfc_dma_command cmd,
1115 int *error)
1116{
1117 *error = ctx->ops->send_mfc_command(ctx, &cmd);
1118 if (*error == -EAGAIN) {
1119 /* wait for any tag group to complete
1120 so we have space for the new command */
1121 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 1);
1122 /* try again, because the queue might be
1123 empty again */
1124 *error = ctx->ops->send_mfc_command(ctx, &cmd);
1125 if (*error == -EAGAIN)
1126 return 0;
1127 }
1128 return 1;
1129}
1130
1131static ssize_t spufs_mfc_write(struct file *file, const char __user *buffer,
1132 size_t size, loff_t *pos)
1133{
1134 struct spu_context *ctx = file->private_data;
1135 struct mfc_dma_command cmd;
1136 int ret = -EINVAL;
1137
1138 if (size != sizeof cmd)
1139 goto out;
1140
1141 ret = -EFAULT;
1142 if (copy_from_user(&cmd, buffer, sizeof cmd))
1143 goto out;
1144
1145 ret = spufs_check_valid_dma(&cmd);
1146 if (ret)
1147 goto out;
1148
1149 spu_acquire_runnable(ctx);
1150 if (file->f_flags & O_NONBLOCK) {
1151 ret = ctx->ops->send_mfc_command(ctx, &cmd);
1152 } else {
1153 int status;
1154 ret = spufs_wait(ctx->mfc_wq,
1155 spu_send_mfc_command(ctx, cmd, &status));
1156 if (status)
1157 ret = status;
1158 }
1159 spu_release(ctx);
1160
1161 if (ret)
1162 goto out;
1163
1164 ctx->tagwait |= 1 << cmd.tag;
1165
1166out:
1167 return ret;
1168}
1169
1170static unsigned int spufs_mfc_poll(struct file *file,poll_table *wait)
1171{
1172 struct spu_context *ctx = file->private_data;
1173 u32 free_elements, tagstatus;
1174 unsigned int mask;
1175
1176 spu_acquire(ctx);
1177 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 2);
1178 free_elements = ctx->ops->get_mfc_free_elements(ctx);
1179 tagstatus = ctx->ops->read_mfc_tagstatus(ctx);
1180 spu_release(ctx);
1181
1182 poll_wait(file, &ctx->mfc_wq, wait);
1183
1184 mask = 0;
1185 if (free_elements & 0xffff)
1186 mask |= POLLOUT | POLLWRNORM;
1187 if (tagstatus & ctx->tagwait)
1188 mask |= POLLIN | POLLRDNORM;
1189
1190 pr_debug("%s: free %d tagstatus %d tagwait %d\n", __FUNCTION__,
1191 free_elements, tagstatus, ctx->tagwait);
1192
1193 return mask;
1194}
1195
Al Viro73b6af82006-06-25 16:42:33 -07001196static int spufs_mfc_flush(struct file *file, fl_owner_t id)
Arnd Bergmanna33a7d72006-03-23 00:00:11 +01001197{
1198 struct spu_context *ctx = file->private_data;
1199 int ret;
1200
1201 spu_acquire(ctx);
1202#if 0
1203/* this currently hangs */
1204 ret = spufs_wait(ctx->mfc_wq,
1205 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 2));
1206 if (ret)
1207 goto out;
1208 ret = spufs_wait(ctx->mfc_wq,
1209 ctx->ops->read_mfc_tagstatus(ctx) == ctx->tagwait);
1210out:
1211#else
1212 ret = 0;
1213#endif
1214 spu_release(ctx);
1215
1216 return ret;
1217}
1218
1219static int spufs_mfc_fsync(struct file *file, struct dentry *dentry,
1220 int datasync)
1221{
Al Viro73b6af82006-06-25 16:42:33 -07001222 return spufs_mfc_flush(file, NULL);
Arnd Bergmanna33a7d72006-03-23 00:00:11 +01001223}
1224
1225static int spufs_mfc_fasync(int fd, struct file *file, int on)
1226{
1227 struct spu_context *ctx = file->private_data;
1228
1229 return fasync_helper(fd, file, on, &ctx->mfc_fasync);
1230}
1231
1232static struct file_operations spufs_mfc_fops = {
1233 .open = spufs_mfc_open,
1234 .read = spufs_mfc_read,
1235 .write = spufs_mfc_write,
1236 .poll = spufs_mfc_poll,
1237 .flush = spufs_mfc_flush,
1238 .fsync = spufs_mfc_fsync,
1239 .fasync = spufs_mfc_fasync,
Mark Nutter6df10a82006-03-23 00:00:12 +01001240 .mmap = spufs_mfc_mmap,
Arnd Bergmanna33a7d72006-03-23 00:00:11 +01001241};
1242
Arnd Bergmann67207b92005-11-15 15:53:48 -05001243static void spufs_npc_set(void *data, u64 val)
1244{
1245 struct spu_context *ctx = data;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -05001246 spu_acquire(ctx);
1247 ctx->ops->npc_write(ctx, val);
1248 spu_release(ctx);
Arnd Bergmann67207b92005-11-15 15:53:48 -05001249}
1250
1251static u64 spufs_npc_get(void *data)
1252{
1253 struct spu_context *ctx = data;
1254 u64 ret;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -05001255 spu_acquire(ctx);
1256 ret = ctx->ops->npc_read(ctx);
1257 spu_release(ctx);
Arnd Bergmann67207b92005-11-15 15:53:48 -05001258 return ret;
1259}
1260DEFINE_SIMPLE_ATTRIBUTE(spufs_npc_ops, spufs_npc_get, spufs_npc_set, "%llx\n")
1261
Arnd Bergmann8b3d6662005-11-15 15:53:52 -05001262static void spufs_decr_set(void *data, u64 val)
1263{
1264 struct spu_context *ctx = data;
1265 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1266 spu_acquire_saved(ctx);
1267 lscsa->decr.slot[0] = (u32) val;
1268 spu_release(ctx);
1269}
1270
1271static u64 spufs_decr_get(void *data)
1272{
1273 struct spu_context *ctx = data;
1274 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1275 u64 ret;
1276 spu_acquire_saved(ctx);
1277 ret = lscsa->decr.slot[0];
1278 spu_release(ctx);
1279 return ret;
1280}
1281DEFINE_SIMPLE_ATTRIBUTE(spufs_decr_ops, spufs_decr_get, spufs_decr_set,
1282 "%llx\n")
1283
1284static void spufs_decr_status_set(void *data, u64 val)
1285{
1286 struct spu_context *ctx = data;
1287 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1288 spu_acquire_saved(ctx);
1289 lscsa->decr_status.slot[0] = (u32) val;
1290 spu_release(ctx);
1291}
1292
1293static u64 spufs_decr_status_get(void *data)
1294{
1295 struct spu_context *ctx = data;
1296 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1297 u64 ret;
1298 spu_acquire_saved(ctx);
1299 ret = lscsa->decr_status.slot[0];
1300 spu_release(ctx);
1301 return ret;
1302}
1303DEFINE_SIMPLE_ATTRIBUTE(spufs_decr_status_ops, spufs_decr_status_get,
1304 spufs_decr_status_set, "%llx\n")
1305
1306static void spufs_spu_tag_mask_set(void *data, u64 val)
1307{
1308 struct spu_context *ctx = data;
1309 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1310 spu_acquire_saved(ctx);
1311 lscsa->tag_mask.slot[0] = (u32) val;
1312 spu_release(ctx);
1313}
1314
1315static u64 spufs_spu_tag_mask_get(void *data)
1316{
1317 struct spu_context *ctx = data;
1318 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1319 u64 ret;
1320 spu_acquire_saved(ctx);
1321 ret = lscsa->tag_mask.slot[0];
1322 spu_release(ctx);
1323 return ret;
1324}
1325DEFINE_SIMPLE_ATTRIBUTE(spufs_spu_tag_mask_ops, spufs_spu_tag_mask_get,
1326 spufs_spu_tag_mask_set, "%llx\n")
1327
1328static void spufs_event_mask_set(void *data, u64 val)
1329{
1330 struct spu_context *ctx = data;
1331 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1332 spu_acquire_saved(ctx);
1333 lscsa->event_mask.slot[0] = (u32) val;
1334 spu_release(ctx);
1335}
1336
1337static u64 spufs_event_mask_get(void *data)
1338{
1339 struct spu_context *ctx = data;
1340 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1341 u64 ret;
1342 spu_acquire_saved(ctx);
1343 ret = lscsa->event_mask.slot[0];
1344 spu_release(ctx);
1345 return ret;
1346}
1347DEFINE_SIMPLE_ATTRIBUTE(spufs_event_mask_ops, spufs_event_mask_get,
1348 spufs_event_mask_set, "%llx\n")
1349
1350static void spufs_srr0_set(void *data, u64 val)
1351{
1352 struct spu_context *ctx = data;
1353 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1354 spu_acquire_saved(ctx);
1355 lscsa->srr0.slot[0] = (u32) val;
1356 spu_release(ctx);
1357}
1358
1359static u64 spufs_srr0_get(void *data)
1360{
1361 struct spu_context *ctx = data;
1362 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1363 u64 ret;
1364 spu_acquire_saved(ctx);
1365 ret = lscsa->srr0.slot[0];
1366 spu_release(ctx);
1367 return ret;
1368}
1369DEFINE_SIMPLE_ATTRIBUTE(spufs_srr0_ops, spufs_srr0_get, spufs_srr0_set,
1370 "%llx\n")
1371
arnd@arndb.de7b1a7012006-06-19 20:33:24 +02001372static u64 spufs_id_get(void *data)
1373{
1374 struct spu_context *ctx = data;
1375 u64 num;
1376
1377 spu_acquire(ctx);
1378 if (ctx->state == SPU_STATE_RUNNABLE)
1379 num = ctx->spu->number;
1380 else
1381 num = (unsigned int)-1;
1382 spu_release(ctx);
1383
1384 return num;
1385}
Al Viroe45d6632006-09-23 01:37:41 +01001386DEFINE_SIMPLE_ATTRIBUTE(spufs_id_ops, spufs_id_get, NULL, "0x%llx\n")
arnd@arndb.de7b1a7012006-06-19 20:33:24 +02001387
Arnd Bergmann67207b92005-11-15 15:53:48 -05001388struct tree_descr spufs_dir_contents[] = {
1389 { "mem", &spufs_mem_fops, 0666, },
Arnd Bergmann8b3d6662005-11-15 15:53:52 -05001390 { "regs", &spufs_regs_fops, 0666, },
Arnd Bergmann67207b92005-11-15 15:53:48 -05001391 { "mbox", &spufs_mbox_fops, 0444, },
1392 { "ibox", &spufs_ibox_fops, 0444, },
1393 { "wbox", &spufs_wbox_fops, 0222, },
1394 { "mbox_stat", &spufs_mbox_stat_fops, 0444, },
1395 { "ibox_stat", &spufs_ibox_stat_fops, 0444, },
1396 { "wbox_stat", &spufs_wbox_stat_fops, 0444, },
1397 { "signal1", &spufs_signal1_fops, 0666, },
1398 { "signal2", &spufs_signal2_fops, 0666, },
1399 { "signal1_type", &spufs_signal1_type, 0666, },
1400 { "signal2_type", &spufs_signal2_type, 0666, },
arnd@arndb.ded9379c42006-06-19 20:33:21 +02001401 { "mss", &spufs_mss_fops, 0666, },
Arnd Bergmanna33a7d72006-03-23 00:00:11 +01001402 { "mfc", &spufs_mfc_fops, 0666, },
Mark Nutter6df10a82006-03-23 00:00:12 +01001403 { "cntl", &spufs_cntl_fops, 0666, },
Arnd Bergmann67207b92005-11-15 15:53:48 -05001404 { "npc", &spufs_npc_ops, 0666, },
Arnd Bergmann8b3d6662005-11-15 15:53:52 -05001405 { "fpcr", &spufs_fpcr_fops, 0666, },
1406 { "decr", &spufs_decr_ops, 0666, },
1407 { "decr_status", &spufs_decr_status_ops, 0666, },
1408 { "spu_tag_mask", &spufs_spu_tag_mask_ops, 0666, },
1409 { "event_mask", &spufs_event_mask_ops, 0666, },
1410 { "srr0", &spufs_srr0_ops, 0666, },
arnd@arndb.de7b1a7012006-06-19 20:33:24 +02001411 { "phys-id", &spufs_id_ops, 0666, },
Benjamin Herrenschmidt27d5bf22006-10-04 17:26:11 +02001412 { "psmap", &spufs_psmap_fops, 0666, },
Arnd Bergmann67207b92005-11-15 15:53:48 -05001413 {},
1414};