blob: 9e4f2739341d4f6432c7e15bf2ae38fd688a4616 [file] [log] [blame]
Arnd Bergmann67207b92005-11-15 15:53:48 -05001/*
2 * SPU file system -- file contents
3 *
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
5 *
6 * Author: Arnd Bergmann <arndb@de.ibm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
Arnd Bergmanna33a7d72006-03-23 00:00:11 +010023#undef DEBUG
24
Arnd Bergmann67207b92005-11-15 15:53:48 -050025#include <linux/fs.h>
26#include <linux/ioctl.h>
27#include <linux/module.h>
Arnd Bergmannd88cfff2005-12-05 22:52:22 -050028#include <linux/pagemap.h>
Arnd Bergmann67207b92005-11-15 15:53:48 -050029#include <linux/poll.h>
Arnd Bergmann51104592005-12-05 22:52:25 -050030#include <linux/ptrace.h>
Benjamin Herrenschmidtcbe709c2007-06-04 15:15:38 +100031#include <linux/seq_file.h>
Christoph Hellwig038200c2008-01-11 15:03:26 +110032#include <linux/marker.h>
Arnd Bergmann67207b92005-11-15 15:53:48 -050033
34#include <asm/io.h>
FUJITA Tomonoridfe1e092008-05-13 19:07:42 +100035#include <asm/time.h>
Arnd Bergmann67207b92005-11-15 15:53:48 -050036#include <asm/spu.h>
Dwayne Grant McConnellb9e3bd72006-11-20 18:44:58 +010037#include <asm/spu_info.h>
Arnd Bergmann67207b92005-11-15 15:53:48 -050038#include <asm/uaccess.h>
39
40#include "spufs.h"
41
Benjamin Herrenschmidt27d5bf22006-10-04 17:26:11 +020042#define SPUFS_MMAP_4K (PAGE_SIZE == 0x1000)
43
Christoph Hellwig197b1a82007-12-20 16:39:59 +090044/* Simple attribute files */
45struct spufs_attr {
46 int (*get)(void *, u64 *);
47 int (*set)(void *, u64);
48 char get_buf[24]; /* enough to store a u64 and "\n\0" */
49 char set_buf[24];
50 void *data;
51 const char *fmt; /* format for read operation */
52 struct mutex mutex; /* protects access to these buffers */
53};
54
55static int spufs_attr_open(struct inode *inode, struct file *file,
56 int (*get)(void *, u64 *), int (*set)(void *, u64),
57 const char *fmt)
58{
59 struct spufs_attr *attr;
60
61 attr = kmalloc(sizeof(*attr), GFP_KERNEL);
62 if (!attr)
63 return -ENOMEM;
64
65 attr->get = get;
66 attr->set = set;
67 attr->data = inode->i_private;
68 attr->fmt = fmt;
69 mutex_init(&attr->mutex);
70 file->private_data = attr;
71
72 return nonseekable_open(inode, file);
73}
74
75static int spufs_attr_release(struct inode *inode, struct file *file)
76{
77 kfree(file->private_data);
78 return 0;
79}
80
81static ssize_t spufs_attr_read(struct file *file, char __user *buf,
82 size_t len, loff_t *ppos)
83{
84 struct spufs_attr *attr;
85 size_t size;
86 ssize_t ret;
87
88 attr = file->private_data;
89 if (!attr->get)
90 return -EACCES;
91
92 ret = mutex_lock_interruptible(&attr->mutex);
93 if (ret)
94 return ret;
95
96 if (*ppos) { /* continued read */
97 size = strlen(attr->get_buf);
98 } else { /* first read */
99 u64 val;
100 ret = attr->get(attr->data, &val);
101 if (ret)
102 goto out;
103
104 size = scnprintf(attr->get_buf, sizeof(attr->get_buf),
105 attr->fmt, (unsigned long long)val);
106 }
107
108 ret = simple_read_from_buffer(buf, len, ppos, attr->get_buf, size);
109out:
110 mutex_unlock(&attr->mutex);
111 return ret;
112}
113
114static ssize_t spufs_attr_write(struct file *file, const char __user *buf,
115 size_t len, loff_t *ppos)
116{
117 struct spufs_attr *attr;
118 u64 val;
119 size_t size;
120 ssize_t ret;
121
122 attr = file->private_data;
123 if (!attr->set)
124 return -EACCES;
125
126 ret = mutex_lock_interruptible(&attr->mutex);
127 if (ret)
128 return ret;
129
130 ret = -EFAULT;
131 size = min(sizeof(attr->set_buf) - 1, len);
132 if (copy_from_user(attr->set_buf, buf, size))
133 goto out;
134
135 ret = len; /* claim we got the whole input */
136 attr->set_buf[size] = '\0';
137 val = simple_strtol(attr->set_buf, NULL, 0);
138 attr->set(attr->data, val);
139out:
140 mutex_unlock(&attr->mutex);
141 return ret;
142}
143
144#define DEFINE_SPUFS_SIMPLE_ATTRIBUTE(__fops, __get, __set, __fmt) \
145static int __fops ## _open(struct inode *inode, struct file *file) \
146{ \
147 __simple_attr_check_format(__fmt, 0ull); \
148 return spufs_attr_open(inode, file, __get, __set, __fmt); \
149} \
150static struct file_operations __fops = { \
151 .owner = THIS_MODULE, \
152 .open = __fops ## _open, \
153 .release = spufs_attr_release, \
154 .read = spufs_attr_read, \
155 .write = spufs_attr_write, \
156};
157
Benjamin Herrenschmidtcbe709c2007-06-04 15:15:38 +1000158
Arnd Bergmann67207b92005-11-15 15:53:48 -0500159static int
160spufs_mem_open(struct inode *inode, struct file *file)
161{
162 struct spufs_inode_info *i = SPUFS_I(inode);
Mark Nutter6df10a82006-03-23 00:00:12 +0100163 struct spu_context *ctx = i->i_ctx;
Christoph Hellwig43c2bbd2007-04-23 21:08:07 +0200164
Christoph Hellwig47d3a5f2007-06-04 23:26:51 +1000165 mutex_lock(&ctx->mapping_lock);
Mark Nutter6df10a82006-03-23 00:00:12 +0100166 file->private_data = ctx;
Christoph Hellwig43c2bbd2007-04-23 21:08:07 +0200167 if (!i->i_openers++)
168 ctx->local_store = inode->i_mapping;
Christoph Hellwig47d3a5f2007-06-04 23:26:51 +1000169 mutex_unlock(&ctx->mapping_lock);
Christoph Hellwig43c2bbd2007-04-23 21:08:07 +0200170 return 0;
171}
172
173static int
174spufs_mem_release(struct inode *inode, struct file *file)
175{
176 struct spufs_inode_info *i = SPUFS_I(inode);
177 struct spu_context *ctx = i->i_ctx;
178
Christoph Hellwig47d3a5f2007-06-04 23:26:51 +1000179 mutex_lock(&ctx->mapping_lock);
Christoph Hellwig43c2bbd2007-04-23 21:08:07 +0200180 if (!--i->i_openers)
181 ctx->local_store = NULL;
Christoph Hellwig47d3a5f2007-06-04 23:26:51 +1000182 mutex_unlock(&ctx->mapping_lock);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500183 return 0;
184}
185
186static ssize_t
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +0100187__spufs_mem_read(struct spu_context *ctx, char __user *buffer,
188 size_t size, loff_t *pos)
189{
190 char *local_store = ctx->ops->get_ls(ctx);
191 return simple_read_from_buffer(buffer, size, pos, local_store,
192 LS_SIZE);
193}
194
195static ssize_t
Arnd Bergmann67207b92005-11-15 15:53:48 -0500196spufs_mem_read(struct file *file, char __user *buffer,
197 size_t size, loff_t *pos)
198{
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +0100199 struct spu_context *ctx = file->private_data;
Arnd Bergmannaa0ed2b2007-03-10 00:05:35 +0100200 ssize_t ret;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500201
Christoph Hellwigc9101bd2007-12-20 16:39:59 +0900202 ret = spu_acquire(ctx);
203 if (ret)
204 return ret;
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +0100205 ret = __spufs_mem_read(ctx, buffer, size, pos);
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500206 spu_release(ctx);
Christoph Hellwigc9101bd2007-12-20 16:39:59 +0900207
Arnd Bergmann67207b92005-11-15 15:53:48 -0500208 return ret;
209}
210
211static ssize_t
212spufs_mem_write(struct file *file, const char __user *buffer,
Arnd Bergmannaa0ed2b2007-03-10 00:05:35 +0100213 size_t size, loff_t *ppos)
Arnd Bergmann67207b92005-11-15 15:53:48 -0500214{
215 struct spu_context *ctx = file->private_data;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500216 char *local_store;
Arnd Bergmannaa0ed2b2007-03-10 00:05:35 +0100217 loff_t pos = *ppos;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500218 int ret;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500219
Arnd Bergmannaa0ed2b2007-03-10 00:05:35 +0100220 if (pos < 0)
221 return -EINVAL;
222 if (pos > LS_SIZE)
Arnd Bergmann67207b92005-11-15 15:53:48 -0500223 return -EFBIG;
Arnd Bergmannaa0ed2b2007-03-10 00:05:35 +0100224 if (size > LS_SIZE - pos)
225 size = LS_SIZE - pos;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500226
Christoph Hellwigc9101bd2007-12-20 16:39:59 +0900227 ret = spu_acquire(ctx);
228 if (ret)
229 return ret;
230
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500231 local_store = ctx->ops->get_ls(ctx);
Arnd Bergmannaa0ed2b2007-03-10 00:05:35 +0100232 ret = copy_from_user(local_store + pos, buffer, size);
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500233 spu_release(ctx);
Arnd Bergmannaa0ed2b2007-03-10 00:05:35 +0100234
235 if (ret)
236 return -EFAULT;
237 *ppos = pos + size;
238 return size;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500239}
240
Nick Pigginb1e22702008-06-10 09:26:08 +1000241static int
242spufs_mem_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500243{
Benjamin Herrenschmidtf1fa74f2007-05-08 16:27:29 +1000244 struct spu_context *ctx = vma->vm_file->private_data;
Nick Pigginb1e22702008-06-10 09:26:08 +1000245 unsigned long address = (unsigned long)vmf->virtual_address;
246 unsigned long pfn, offset;
247
Benjamin Herrenschmidtf1fa74f2007-05-08 16:27:29 +1000248#ifdef CONFIG_SPU_FS_64K_LS
249 struct spu_state *csa = &ctx->csa;
250 int psize;
Benjamin Herrenschmidt78bde532007-02-13 11:46:06 +1100251
Benjamin Herrenschmidtf1fa74f2007-05-08 16:27:29 +1000252 /* Check what page size we are using */
253 psize = get_slice_psize(vma->vm_mm, address);
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500254
Benjamin Herrenschmidtf1fa74f2007-05-08 16:27:29 +1000255 /* Some sanity checking */
256 BUG_ON(csa->use_big_pages != (psize == MMU_PAGE_64K));
257
258 /* Wow, 64K, cool, we need to align the address though */
259 if (csa->use_big_pages) {
260 BUG_ON(vma->vm_start & 0xffff);
261 address &= ~0xfffful;
262 }
263#endif /* CONFIG_SPU_FS_64K_LS */
264
Nick Pigginb1e22702008-06-10 09:26:08 +1000265 offset = vmf->pgoff << PAGE_SHIFT;
Masato Noguchi128b8542007-02-13 21:54:30 +0100266 if (offset >= LS_SIZE)
Nick Pigginb1e22702008-06-10 09:26:08 +1000267 return VM_FAULT_SIGBUS;
Masato Noguchi128b8542007-02-13 21:54:30 +0100268
Nick Pigginb1e22702008-06-10 09:26:08 +1000269 pr_debug("spufs_mem_mmap_fault address=0x%lx, offset=0x%lx\n",
270 address, offset);
Benjamin Herrenschmidtf1fa74f2007-05-08 16:27:29 +1000271
Christoph Hellwigc9101bd2007-12-20 16:39:59 +0900272 if (spu_acquire(ctx))
Nick Pigginb1e22702008-06-10 09:26:08 +1000273 return VM_FAULT_NOPAGE;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500274
Arnd Bergmannac91cb82006-10-04 17:26:16 +0200275 if (ctx->state == SPU_STATE_SAVED) {
Benjamin Herrenschmidt64b3d0e2008-12-18 19:13:51 +0000276 vma->vm_page_prot = pgprot_cached(vma->vm_page_prot);
Benjamin Herrenschmidt78bde532007-02-13 11:46:06 +1100277 pfn = vmalloc_to_pfn(ctx->csa.lscsa->ls + offset);
Arnd Bergmannac91cb82006-10-04 17:26:16 +0200278 } else {
Benjamin Herrenschmidt64b3d0e2008-12-18 19:13:51 +0000279 vma->vm_page_prot = pgprot_noncached_wc(vma->vm_page_prot);
Benjamin Herrenschmidt78bde532007-02-13 11:46:06 +1100280 pfn = (ctx->spu->local_store_phys + offset) >> PAGE_SHIFT;
Arnd Bergmannac91cb82006-10-04 17:26:16 +0200281 }
Benjamin Herrenschmidt78bde532007-02-13 11:46:06 +1100282 vm_insert_pfn(vma, address, pfn);
283
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500284 spu_release(ctx);
285
Nick Pigginb1e22702008-06-10 09:26:08 +1000286 return VM_FAULT_NOPAGE;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500287}
288
Benjamin Herrenschmidta3528942008-07-23 21:27:09 -0700289static int spufs_mem_mmap_access(struct vm_area_struct *vma,
290 unsigned long address,
291 void *buf, int len, int write)
292{
293 struct spu_context *ctx = vma->vm_file->private_data;
294 unsigned long offset = address - vma->vm_start;
295 char *local_store;
296
297 if (write && !(vma->vm_flags & VM_WRITE))
298 return -EACCES;
299 if (spu_acquire(ctx))
300 return -EINTR;
301 if ((offset + len) > vma->vm_end)
302 len = vma->vm_end - offset;
303 local_store = ctx->ops->get_ls(ctx);
304 if (write)
305 memcpy_toio(local_store + offset, buf, len);
306 else
307 memcpy_fromio(buf, local_store + offset, len);
308 spu_release(ctx);
309 return len;
310}
Benjamin Herrenschmidt78bde532007-02-13 11:46:06 +1100311
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500312static struct vm_operations_struct spufs_mem_mmap_vmops = {
Nick Pigginb1e22702008-06-10 09:26:08 +1000313 .fault = spufs_mem_mmap_fault,
Benjamin Herrenschmidta3528942008-07-23 21:27:09 -0700314 .access = spufs_mem_mmap_access,
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500315};
316
Benjamin Herrenschmidtf1fa74f2007-05-08 16:27:29 +1000317static int spufs_mem_mmap(struct file *file, struct vm_area_struct *vma)
Arnd Bergmann67207b92005-11-15 15:53:48 -0500318{
Benjamin Herrenschmidtf1fa74f2007-05-08 16:27:29 +1000319#ifdef CONFIG_SPU_FS_64K_LS
320 struct spu_context *ctx = file->private_data;
321 struct spu_state *csa = &ctx->csa;
322
323 /* Sanity check VMA alignment */
324 if (csa->use_big_pages) {
325 pr_debug("spufs_mem_mmap 64K, start=0x%lx, end=0x%lx,"
326 " pgoff=0x%lx\n", vma->vm_start, vma->vm_end,
327 vma->vm_pgoff);
328 if (vma->vm_start & 0xffff)
329 return -EINVAL;
330 if (vma->vm_pgoff & 0xf)
331 return -EINVAL;
332 }
333#endif /* CONFIG_SPU_FS_64K_LS */
334
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500335 if (!(vma->vm_flags & VM_SHARED))
336 return -EINVAL;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500337
Benjamin Herrenschmidt78bde532007-02-13 11:46:06 +1100338 vma->vm_flags |= VM_IO | VM_PFNMAP;
Benjamin Herrenschmidt64b3d0e2008-12-18 19:13:51 +0000339 vma->vm_page_prot = pgprot_noncached_wc(vma->vm_page_prot);
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500340
341 vma->vm_ops = &spufs_mem_mmap_vmops;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500342 return 0;
343}
344
Benjamin Herrenschmidtf1fa74f2007-05-08 16:27:29 +1000345#ifdef CONFIG_SPU_FS_64K_LS
Sebastian Siewior12388192007-09-19 14:38:12 +1000346static unsigned long spufs_get_unmapped_area(struct file *file,
347 unsigned long addr, unsigned long len, unsigned long pgoff,
348 unsigned long flags)
Benjamin Herrenschmidtf1fa74f2007-05-08 16:27:29 +1000349{
350 struct spu_context *ctx = file->private_data;
351 struct spu_state *csa = &ctx->csa;
352
353 /* If not using big pages, fallback to normal MM g_u_a */
354 if (!csa->use_big_pages)
355 return current->mm->get_unmapped_area(file, addr, len,
356 pgoff, flags);
357
358 /* Else, try to obtain a 64K pages slice */
359 return slice_get_unmapped_area(addr, len, flags,
360 MMU_PAGE_64K, 1, 0);
361}
362#endif /* CONFIG_SPU_FS_64K_LS */
363
Arjan van de Ven5dfe4c92007-02-12 00:55:31 -0800364static const struct file_operations spufs_mem_fops = {
Jeremy Kerr70225432007-06-29 10:58:00 +1000365 .open = spufs_mem_open,
366 .release = spufs_mem_release,
367 .read = spufs_mem_read,
368 .write = spufs_mem_write,
369 .llseek = generic_file_llseek,
370 .mmap = spufs_mem_mmap,
Benjamin Herrenschmidtf1fa74f2007-05-08 16:27:29 +1000371#ifdef CONFIG_SPU_FS_64K_LS
372 .get_unmapped_area = spufs_get_unmapped_area,
373#endif
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500374};
375
Nick Pigginb1e22702008-06-10 09:26:08 +1000376static int spufs_ps_fault(struct vm_area_struct *vma,
377 struct vm_fault *vmf,
Benjamin Herrenschmidt78bde532007-02-13 11:46:06 +1100378 unsigned long ps_offs,
Benjamin Herrenschmidt27d5bf22006-10-04 17:26:11 +0200379 unsigned long ps_size)
Mark Nutter6df10a82006-03-23 00:00:12 +0100380{
Mark Nutter6df10a82006-03-23 00:00:12 +0100381 struct spu_context *ctx = vma->vm_file->private_data;
Nick Pigginb1e22702008-06-10 09:26:08 +1000382 unsigned long area, offset = vmf->pgoff << PAGE_SHIFT;
Christoph Hellwigeebead52008-02-08 15:50:41 +1100383 int ret = 0;
Mark Nutter6df10a82006-03-23 00:00:12 +0100384
Nick Pigginb1e22702008-06-10 09:26:08 +1000385 spu_context_nospu_trace(spufs_ps_fault__enter, ctx);
Christoph Hellwig038200c2008-01-11 15:03:26 +1100386
Benjamin Herrenschmidt27d5bf22006-10-04 17:26:11 +0200387 if (offset >= ps_size)
Nick Pigginb1e22702008-06-10 09:26:08 +1000388 return VM_FAULT_SIGBUS;
Mark Nutter6df10a82006-03-23 00:00:12 +0100389
Jeremy Kerr60657262008-11-11 10:22:22 +1100390 if (fatal_signal_pending(current))
391 return VM_FAULT_SIGBUS;
392
Arnd Bergmann33bfd7a2007-12-20 16:39:59 +0900393 /*
Jeremy Kerrd5883132008-02-26 13:31:42 +1100394 * Because we release the mmap_sem, the context may be destroyed while
395 * we're in spu_wait. Grab an extra reference so it isn't destroyed
396 * in the meantime.
397 */
398 get_spu_context(ctx);
399
400 /*
Arnd Bergmann33bfd7a2007-12-20 16:39:59 +0900401 * We have to wait for context to be loaded before we have
402 * pages to hand out to the user, but we don't want to wait
403 * with the mmap_sem held.
404 * It is possible to drop the mmap_sem here, but then we need
Nick Pigginb1e22702008-06-10 09:26:08 +1000405 * to return VM_FAULT_NOPAGE because the mappings may have
Arnd Bergmann33bfd7a2007-12-20 16:39:59 +0900406 * hanged.
Benjamin Herrenschmidt78bde532007-02-13 11:46:06 +1100407 */
Christoph Hellwigc9101bd2007-12-20 16:39:59 +0900408 if (spu_acquire(ctx))
Jeremy Kerrd5883132008-02-26 13:31:42 +1100409 goto refault;
Christoph Hellwigc9101bd2007-12-20 16:39:59 +0900410
Arnd Bergmann33bfd7a2007-12-20 16:39:59 +0900411 if (ctx->state == SPU_STATE_SAVED) {
412 up_read(&current->mm->mmap_sem);
Nick Pigginb1e22702008-06-10 09:26:08 +1000413 spu_context_nospu_trace(spufs_ps_fault__sleep, ctx);
Christoph Hellwigeebead52008-02-08 15:50:41 +1100414 ret = spufs_wait(ctx->run_wq, ctx->state == SPU_STATE_RUNNABLE);
Nick Pigginb1e22702008-06-10 09:26:08 +1000415 spu_context_trace(spufs_ps_fault__wake, ctx, ctx->spu);
Arnd Bergmann33bfd7a2007-12-20 16:39:59 +0900416 down_read(&current->mm->mmap_sem);
Christoph Hellwigc9101bd2007-12-20 16:39:59 +0900417 } else {
418 area = ctx->spu->problem_phys + ps_offs;
Nick Pigginb1e22702008-06-10 09:26:08 +1000419 vm_insert_pfn(vma, (unsigned long)vmf->virtual_address,
420 (area + offset) >> PAGE_SHIFT);
421 spu_context_trace(spufs_ps_fault__insert, ctx, ctx->spu);
Arnd Bergmann33bfd7a2007-12-20 16:39:59 +0900422 }
Mark Nutter6df10a82006-03-23 00:00:12 +0100423
Christoph Hellwigeebead52008-02-08 15:50:41 +1100424 if (!ret)
425 spu_release(ctx);
Jeremy Kerrd5883132008-02-26 13:31:42 +1100426
427refault:
428 put_spu_context(ctx);
Nick Pigginb1e22702008-06-10 09:26:08 +1000429 return VM_FAULT_NOPAGE;
Mark Nutter6df10a82006-03-23 00:00:12 +0100430}
431
Benjamin Herrenschmidt27d5bf22006-10-04 17:26:11 +0200432#if SPUFS_MMAP_4K
Nick Pigginb1e22702008-06-10 09:26:08 +1000433static int spufs_cntl_mmap_fault(struct vm_area_struct *vma,
434 struct vm_fault *vmf)
Mark Nutter6df10a82006-03-23 00:00:12 +0100435{
Jeremy Kerr87ff6092008-07-01 10:22:50 +1000436 return spufs_ps_fault(vma, vmf, 0x4000, SPUFS_CNTL_MAP_SIZE);
Mark Nutter6df10a82006-03-23 00:00:12 +0100437}
438
439static struct vm_operations_struct spufs_cntl_mmap_vmops = {
Nick Pigginb1e22702008-06-10 09:26:08 +1000440 .fault = spufs_cntl_mmap_fault,
Mark Nutter6df10a82006-03-23 00:00:12 +0100441};
442
443/*
444 * mmap support for problem state control area [0x4000 - 0x4fff].
Mark Nutter6df10a82006-03-23 00:00:12 +0100445 */
446static int spufs_cntl_mmap(struct file *file, struct vm_area_struct *vma)
447{
448 if (!(vma->vm_flags & VM_SHARED))
449 return -EINVAL;
450
Benjamin Herrenschmidt78bde532007-02-13 11:46:06 +1100451 vma->vm_flags |= VM_IO | VM_PFNMAP;
Benjamin Herrenschmidt64b3d0e2008-12-18 19:13:51 +0000452 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
Mark Nutter6df10a82006-03-23 00:00:12 +0100453
454 vma->vm_ops = &spufs_cntl_mmap_vmops;
455 return 0;
456}
Benjamin Herrenschmidt27d5bf22006-10-04 17:26:11 +0200457#else /* SPUFS_MMAP_4K */
458#define spufs_cntl_mmap NULL
459#endif /* !SPUFS_MMAP_4K */
Mark Nutter6df10a82006-03-23 00:00:12 +0100460
Christoph Hellwig197b1a82007-12-20 16:39:59 +0900461static int spufs_cntl_get(void *data, u64 *val)
Arnd Bergmanne1dbff22006-10-04 17:26:19 +0200462{
463 struct spu_context *ctx = data;
Christoph Hellwigc9101bd2007-12-20 16:39:59 +0900464 int ret;
Arnd Bergmanne1dbff22006-10-04 17:26:19 +0200465
Christoph Hellwigc9101bd2007-12-20 16:39:59 +0900466 ret = spu_acquire(ctx);
467 if (ret)
468 return ret;
Christoph Hellwig197b1a82007-12-20 16:39:59 +0900469 *val = ctx->ops->status_read(ctx);
Arnd Bergmanne1dbff22006-10-04 17:26:19 +0200470 spu_release(ctx);
471
Christoph Hellwig197b1a82007-12-20 16:39:59 +0900472 return 0;
Arnd Bergmanne1dbff22006-10-04 17:26:19 +0200473}
474
Christoph Hellwig197b1a82007-12-20 16:39:59 +0900475static int spufs_cntl_set(void *data, u64 val)
Arnd Bergmanne1dbff22006-10-04 17:26:19 +0200476{
477 struct spu_context *ctx = data;
Christoph Hellwigc9101bd2007-12-20 16:39:59 +0900478 int ret;
Arnd Bergmanne1dbff22006-10-04 17:26:19 +0200479
Christoph Hellwigc9101bd2007-12-20 16:39:59 +0900480 ret = spu_acquire(ctx);
481 if (ret)
482 return ret;
Arnd Bergmanne1dbff22006-10-04 17:26:19 +0200483 ctx->ops->runcntl_write(ctx, val);
484 spu_release(ctx);
Christoph Hellwig197b1a82007-12-20 16:39:59 +0900485
486 return 0;
Arnd Bergmanne1dbff22006-10-04 17:26:19 +0200487}
488
Mark Nutter6df10a82006-03-23 00:00:12 +0100489static int spufs_cntl_open(struct inode *inode, struct file *file)
490{
491 struct spufs_inode_info *i = SPUFS_I(inode);
492 struct spu_context *ctx = i->i_ctx;
493
Christoph Hellwig47d3a5f2007-06-04 23:26:51 +1000494 mutex_lock(&ctx->mapping_lock);
Mark Nutter6df10a82006-03-23 00:00:12 +0100495 file->private_data = ctx;
Christoph Hellwig43c2bbd2007-04-23 21:08:07 +0200496 if (!i->i_openers++)
497 ctx->cntl = inode->i_mapping;
Christoph Hellwig47d3a5f2007-06-04 23:26:51 +1000498 mutex_unlock(&ctx->mapping_lock);
Christoph Hellwig8b88b092008-02-08 04:20:26 -0800499 return simple_attr_open(inode, file, spufs_cntl_get,
Arnd Bergmanne1dbff22006-10-04 17:26:19 +0200500 spufs_cntl_set, "0x%08lx");
Mark Nutter6df10a82006-03-23 00:00:12 +0100501}
502
Christoph Hellwig43c2bbd2007-04-23 21:08:07 +0200503static int
504spufs_cntl_release(struct inode *inode, struct file *file)
505{
506 struct spufs_inode_info *i = SPUFS_I(inode);
507 struct spu_context *ctx = i->i_ctx;
508
Christoph Hellwig74bedc42008-02-08 04:20:28 -0800509 simple_attr_release(inode, file);
Christoph Hellwig43c2bbd2007-04-23 21:08:07 +0200510
Christoph Hellwig47d3a5f2007-06-04 23:26:51 +1000511 mutex_lock(&ctx->mapping_lock);
Christoph Hellwig43c2bbd2007-04-23 21:08:07 +0200512 if (!--i->i_openers)
513 ctx->cntl = NULL;
Christoph Hellwig47d3a5f2007-06-04 23:26:51 +1000514 mutex_unlock(&ctx->mapping_lock);
Christoph Hellwig43c2bbd2007-04-23 21:08:07 +0200515 return 0;
516}
517
Arjan van de Ven5dfe4c92007-02-12 00:55:31 -0800518static const struct file_operations spufs_cntl_fops = {
Mark Nutter6df10a82006-03-23 00:00:12 +0100519 .open = spufs_cntl_open,
Christoph Hellwig43c2bbd2007-04-23 21:08:07 +0200520 .release = spufs_cntl_release,
Christoph Hellwig8b88b092008-02-08 04:20:26 -0800521 .read = simple_attr_read,
522 .write = simple_attr_write,
Mark Nutter6df10a82006-03-23 00:00:12 +0100523 .mmap = spufs_cntl_mmap,
Mark Nutter6df10a82006-03-23 00:00:12 +0100524};
525
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500526static int
527spufs_regs_open(struct inode *inode, struct file *file)
528{
529 struct spufs_inode_info *i = SPUFS_I(inode);
530 file->private_data = i->i_ctx;
531 return 0;
532}
533
534static ssize_t
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +0100535__spufs_regs_read(struct spu_context *ctx, char __user *buffer,
536 size_t size, loff_t *pos)
537{
538 struct spu_lscsa *lscsa = ctx->csa.lscsa;
539 return simple_read_from_buffer(buffer, size, pos,
540 lscsa->gprs, sizeof lscsa->gprs);
541}
542
543static ssize_t
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500544spufs_regs_read(struct file *file, char __user *buffer,
545 size_t size, loff_t *pos)
546{
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500547 int ret;
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +0100548 struct spu_context *ctx = file->private_data;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500549
Jeremy Kerrf027faa2008-10-16 11:11:12 +1100550 /* pre-check for file position: if we'd return EOF, there's no point
551 * causing a deschedule */
552 if (*pos >= sizeof(ctx->csa.lscsa->gprs))
553 return 0;
554
Christoph Hellwigc9101bd2007-12-20 16:39:59 +0900555 ret = spu_acquire_saved(ctx);
556 if (ret)
557 return ret;
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +0100558 ret = __spufs_regs_read(ctx, buffer, size, pos);
Christoph Hellwig27b1ea02007-07-20 21:39:34 +0200559 spu_release_saved(ctx);
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500560 return ret;
561}
562
563static ssize_t
564spufs_regs_write(struct file *file, const char __user *buffer,
565 size_t size, loff_t *pos)
566{
567 struct spu_context *ctx = file->private_data;
568 struct spu_lscsa *lscsa = ctx->csa.lscsa;
569 int ret;
570
571 size = min_t(ssize_t, sizeof lscsa->gprs - *pos, size);
572 if (size <= 0)
573 return -EFBIG;
574 *pos += size;
575
Christoph Hellwigc9101bd2007-12-20 16:39:59 +0900576 ret = spu_acquire_saved(ctx);
577 if (ret)
578 return ret;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500579
580 ret = copy_from_user(lscsa->gprs + *pos - size,
581 buffer, size) ? -EFAULT : size;
582
Christoph Hellwig27b1ea02007-07-20 21:39:34 +0200583 spu_release_saved(ctx);
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500584 return ret;
585}
586
Arjan van de Ven5dfe4c92007-02-12 00:55:31 -0800587static const struct file_operations spufs_regs_fops = {
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500588 .open = spufs_regs_open,
589 .read = spufs_regs_read,
590 .write = spufs_regs_write,
591 .llseek = generic_file_llseek,
592};
593
594static ssize_t
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +0100595__spufs_fpcr_read(struct spu_context *ctx, char __user * buffer,
596 size_t size, loff_t * pos)
597{
598 struct spu_lscsa *lscsa = ctx->csa.lscsa;
599 return simple_read_from_buffer(buffer, size, pos,
600 &lscsa->fpcr, sizeof(lscsa->fpcr));
601}
602
603static ssize_t
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500604spufs_fpcr_read(struct file *file, char __user * buffer,
605 size_t size, loff_t * pos)
606{
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500607 int ret;
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +0100608 struct spu_context *ctx = file->private_data;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500609
Christoph Hellwigc9101bd2007-12-20 16:39:59 +0900610 ret = spu_acquire_saved(ctx);
611 if (ret)
612 return ret;
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +0100613 ret = __spufs_fpcr_read(ctx, buffer, size, pos);
Christoph Hellwig27b1ea02007-07-20 21:39:34 +0200614 spu_release_saved(ctx);
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500615 return ret;
616}
617
618static ssize_t
619spufs_fpcr_write(struct file *file, const char __user * buffer,
620 size_t size, loff_t * pos)
621{
622 struct spu_context *ctx = file->private_data;
623 struct spu_lscsa *lscsa = ctx->csa.lscsa;
624 int ret;
625
626 size = min_t(ssize_t, sizeof(lscsa->fpcr) - *pos, size);
627 if (size <= 0)
628 return -EFBIG;
Christoph Hellwigc9101bd2007-12-20 16:39:59 +0900629
630 ret = spu_acquire_saved(ctx);
631 if (ret)
632 return ret;
633
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500634 *pos += size;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500635 ret = copy_from_user((char *)&lscsa->fpcr + *pos - size,
636 buffer, size) ? -EFAULT : size;
637
Christoph Hellwig27b1ea02007-07-20 21:39:34 +0200638 spu_release_saved(ctx);
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500639 return ret;
640}
641
Arjan van de Ven5dfe4c92007-02-12 00:55:31 -0800642static const struct file_operations spufs_fpcr_fops = {
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500643 .open = spufs_regs_open,
644 .read = spufs_fpcr_read,
645 .write = spufs_fpcr_write,
646 .llseek = generic_file_llseek,
Arnd Bergmann67207b92005-11-15 15:53:48 -0500647};
648
649/* generic open function for all pipe-like files */
650static int spufs_pipe_open(struct inode *inode, struct file *file)
651{
652 struct spufs_inode_info *i = SPUFS_I(inode);
653 file->private_data = i->i_ctx;
654
655 return nonseekable_open(inode, file);
656}
657
Arnd Bergmanncdcc89b2006-10-04 17:26:17 +0200658/*
659 * Read as many bytes from the mailbox as possible, until
660 * one of the conditions becomes true:
661 *
662 * - no more data available in the mailbox
663 * - end of the user provided buffer
664 * - end of the mapped area
665 */
Arnd Bergmann67207b92005-11-15 15:53:48 -0500666static ssize_t spufs_mbox_read(struct file *file, char __user *buf,
667 size_t len, loff_t *pos)
668{
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500669 struct spu_context *ctx = file->private_data;
Arnd Bergmanncdcc89b2006-10-04 17:26:17 +0200670 u32 mbox_data, __user *udata;
671 ssize_t count;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500672
673 if (len < 4)
674 return -EINVAL;
675
Arnd Bergmanncdcc89b2006-10-04 17:26:17 +0200676 if (!access_ok(VERIFY_WRITE, buf, len))
Arnd Bergmann67207b92005-11-15 15:53:48 -0500677 return -EFAULT;
678
Arnd Bergmanncdcc89b2006-10-04 17:26:17 +0200679 udata = (void __user *)buf;
680
Christoph Hellwigc9101bd2007-12-20 16:39:59 +0900681 count = spu_acquire(ctx);
682 if (count)
683 return count;
684
Arnd Bergmann274cef52006-10-24 18:01:42 +0200685 for (count = 0; (count + 4) <= len; count += 4, udata++) {
Arnd Bergmanncdcc89b2006-10-04 17:26:17 +0200686 int ret;
687 ret = ctx->ops->mbox_read(ctx, &mbox_data);
688 if (ret == 0)
689 break;
690
691 /*
692 * at the end of the mapped area, we can fault
693 * but still need to return the data we have
694 * read successfully so far.
695 */
696 ret = __put_user(mbox_data, udata);
697 if (ret) {
698 if (!count)
699 count = -EFAULT;
700 break;
701 }
702 }
703 spu_release(ctx);
704
705 if (!count)
706 count = -EAGAIN;
707
708 return count;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500709}
710
Arjan van de Ven5dfe4c92007-02-12 00:55:31 -0800711static const struct file_operations spufs_mbox_fops = {
Arnd Bergmann67207b92005-11-15 15:53:48 -0500712 .open = spufs_pipe_open,
713 .read = spufs_mbox_read,
714};
715
716static ssize_t spufs_mbox_stat_read(struct file *file, char __user *buf,
717 size_t len, loff_t *pos)
718{
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500719 struct spu_context *ctx = file->private_data;
Christoph Hellwigc9101bd2007-12-20 16:39:59 +0900720 ssize_t ret;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500721 u32 mbox_stat;
722
723 if (len < 4)
724 return -EINVAL;
725
Christoph Hellwigc9101bd2007-12-20 16:39:59 +0900726 ret = spu_acquire(ctx);
727 if (ret)
728 return ret;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500729
730 mbox_stat = ctx->ops->mbox_stat_read(ctx) & 0xff;
731
732 spu_release(ctx);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500733
734 if (copy_to_user(buf, &mbox_stat, sizeof mbox_stat))
735 return -EFAULT;
736
737 return 4;
738}
739
Arjan van de Ven5dfe4c92007-02-12 00:55:31 -0800740static const struct file_operations spufs_mbox_stat_fops = {
Arnd Bergmann67207b92005-11-15 15:53:48 -0500741 .open = spufs_pipe_open,
742 .read = spufs_mbox_stat_read,
743};
744
745/* low-level ibox access function */
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500746size_t spu_ibox_read(struct spu_context *ctx, u32 *data)
Arnd Bergmann67207b92005-11-15 15:53:48 -0500747{
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500748 return ctx->ops->ibox_read(ctx, data);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500749}
Arnd Bergmann67207b92005-11-15 15:53:48 -0500750
751static int spufs_ibox_fasync(int fd, struct file *file, int on)
752{
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500753 struct spu_context *ctx = file->private_data;
754
755 return fasync_helper(fd, file, on, &ctx->ibox_fasync);
756}
757
758/* interrupt-level ibox callback function. */
759void spufs_ibox_callback(struct spu *spu)
760{
761 struct spu_context *ctx = spu->ctx;
762
Luke Browninge65c2f62007-12-20 16:39:59 +0900763 if (!ctx)
764 return;
765
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500766 wake_up_all(&ctx->ibox_wq);
767 kill_fasync(&ctx->ibox_fasync, SIGIO, POLLIN);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500768}
769
Arnd Bergmanncdcc89b2006-10-04 17:26:17 +0200770/*
771 * Read as many bytes from the interrupt mailbox as possible, until
772 * one of the conditions becomes true:
773 *
774 * - no more data available in the mailbox
775 * - end of the user provided buffer
776 * - end of the mapped area
777 *
778 * If the file is opened without O_NONBLOCK, we wait here until
779 * any data is available, but return when we have been able to
780 * read something.
781 */
Arnd Bergmann67207b92005-11-15 15:53:48 -0500782static ssize_t spufs_ibox_read(struct file *file, char __user *buf,
783 size_t len, loff_t *pos)
784{
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500785 struct spu_context *ctx = file->private_data;
Arnd Bergmanncdcc89b2006-10-04 17:26:17 +0200786 u32 ibox_data, __user *udata;
787 ssize_t count;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500788
789 if (len < 4)
790 return -EINVAL;
791
Arnd Bergmanncdcc89b2006-10-04 17:26:17 +0200792 if (!access_ok(VERIFY_WRITE, buf, len))
793 return -EFAULT;
794
795 udata = (void __user *)buf;
796
Christoph Hellwigc9101bd2007-12-20 16:39:59 +0900797 count = spu_acquire(ctx);
798 if (count)
Christoph Hellwigeebead52008-02-08 15:50:41 +1100799 goto out;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500800
Arnd Bergmanncdcc89b2006-10-04 17:26:17 +0200801 /* wait only for the first element */
802 count = 0;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500803 if (file->f_flags & O_NONBLOCK) {
Christoph Hellwigeebead52008-02-08 15:50:41 +1100804 if (!spu_ibox_read(ctx, &ibox_data)) {
Arnd Bergmanncdcc89b2006-10-04 17:26:17 +0200805 count = -EAGAIN;
Christoph Hellwigeebead52008-02-08 15:50:41 +1100806 goto out_unlock;
807 }
Arnd Bergmann67207b92005-11-15 15:53:48 -0500808 } else {
Arnd Bergmanncdcc89b2006-10-04 17:26:17 +0200809 count = spufs_wait(ctx->ibox_wq, spu_ibox_read(ctx, &ibox_data));
Christoph Hellwigeebead52008-02-08 15:50:41 +1100810 if (count)
811 goto out;
Arnd Bergmanncdcc89b2006-10-04 17:26:17 +0200812 }
Arnd Bergmanncdcc89b2006-10-04 17:26:17 +0200813
814 /* if we can't write at all, return -EFAULT */
815 count = __put_user(ibox_data, udata);
816 if (count)
Christoph Hellwigeebead52008-02-08 15:50:41 +1100817 goto out_unlock;
Arnd Bergmanncdcc89b2006-10-04 17:26:17 +0200818
819 for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) {
820 int ret;
821 ret = ctx->ops->ibox_read(ctx, &ibox_data);
822 if (ret == 0)
823 break;
824 /*
825 * at the end of the mapped area, we can fault
826 * but still need to return the data we have
827 * read successfully so far.
828 */
829 ret = __put_user(ibox_data, udata);
830 if (ret)
831 break;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500832 }
833
Christoph Hellwigeebead52008-02-08 15:50:41 +1100834out_unlock:
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500835 spu_release(ctx);
Christoph Hellwigeebead52008-02-08 15:50:41 +1100836out:
Arnd Bergmanncdcc89b2006-10-04 17:26:17 +0200837 return count;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500838}
839
840static unsigned int spufs_ibox_poll(struct file *file, poll_table *wait)
841{
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500842 struct spu_context *ctx = file->private_data;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500843 unsigned int mask;
844
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500845 poll_wait(file, &ctx->ibox_wq, wait);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500846
Christoph Hellwigc9101bd2007-12-20 16:39:59 +0900847 /*
848 * For now keep this uninterruptible and also ignore the rule
849 * that poll should not sleep. Will be fixed later.
850 */
851 mutex_lock(&ctx->state_mutex);
Arnd Bergmann3a843d72005-12-05 22:52:27 -0500852 mask = ctx->ops->mbox_stat_poll(ctx, POLLIN | POLLRDNORM);
853 spu_release(ctx);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500854
855 return mask;
856}
857
Arjan van de Ven5dfe4c92007-02-12 00:55:31 -0800858static const struct file_operations spufs_ibox_fops = {
Arnd Bergmann67207b92005-11-15 15:53:48 -0500859 .open = spufs_pipe_open,
860 .read = spufs_ibox_read,
861 .poll = spufs_ibox_poll,
862 .fasync = spufs_ibox_fasync,
863};
864
865static ssize_t spufs_ibox_stat_read(struct file *file, char __user *buf,
866 size_t len, loff_t *pos)
867{
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500868 struct spu_context *ctx = file->private_data;
Christoph Hellwigc9101bd2007-12-20 16:39:59 +0900869 ssize_t ret;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500870 u32 ibox_stat;
871
872 if (len < 4)
873 return -EINVAL;
874
Christoph Hellwigc9101bd2007-12-20 16:39:59 +0900875 ret = spu_acquire(ctx);
876 if (ret)
877 return ret;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500878 ibox_stat = (ctx->ops->mbox_stat_read(ctx) >> 16) & 0xff;
879 spu_release(ctx);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500880
881 if (copy_to_user(buf, &ibox_stat, sizeof ibox_stat))
882 return -EFAULT;
883
884 return 4;
885}
886
Arjan van de Ven5dfe4c92007-02-12 00:55:31 -0800887static const struct file_operations spufs_ibox_stat_fops = {
Arnd Bergmann67207b92005-11-15 15:53:48 -0500888 .open = spufs_pipe_open,
889 .read = spufs_ibox_stat_read,
890};
891
892/* low-level mailbox write */
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500893size_t spu_wbox_write(struct spu_context *ctx, u32 data)
Arnd Bergmann67207b92005-11-15 15:53:48 -0500894{
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500895 return ctx->ops->wbox_write(ctx, data);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500896}
Arnd Bergmann67207b92005-11-15 15:53:48 -0500897
898static int spufs_wbox_fasync(int fd, struct file *file, int on)
899{
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500900 struct spu_context *ctx = file->private_data;
901 int ret;
902
903 ret = fasync_helper(fd, file, on, &ctx->wbox_fasync);
904
905 return ret;
906}
907
908/* interrupt-level wbox callback function. */
909void spufs_wbox_callback(struct spu *spu)
910{
911 struct spu_context *ctx = spu->ctx;
912
Luke Browninge65c2f62007-12-20 16:39:59 +0900913 if (!ctx)
914 return;
915
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500916 wake_up_all(&ctx->wbox_wq);
917 kill_fasync(&ctx->wbox_fasync, SIGIO, POLLOUT);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500918}
919
Arnd Bergmanncdcc89b2006-10-04 17:26:17 +0200920/*
921 * Write as many bytes to the interrupt mailbox as possible, until
922 * one of the conditions becomes true:
923 *
924 * - the mailbox is full
925 * - end of the user provided buffer
926 * - end of the mapped area
927 *
928 * If the file is opened without O_NONBLOCK, we wait here until
929 * space is availabyl, but return when we have been able to
930 * write something.
931 */
Arnd Bergmann67207b92005-11-15 15:53:48 -0500932static ssize_t spufs_wbox_write(struct file *file, const char __user *buf,
933 size_t len, loff_t *pos)
934{
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500935 struct spu_context *ctx = file->private_data;
Arnd Bergmanncdcc89b2006-10-04 17:26:17 +0200936 u32 wbox_data, __user *udata;
937 ssize_t count;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500938
939 if (len < 4)
940 return -EINVAL;
941
Arnd Bergmanncdcc89b2006-10-04 17:26:17 +0200942 udata = (void __user *)buf;
943 if (!access_ok(VERIFY_READ, buf, len))
944 return -EFAULT;
945
946 if (__get_user(wbox_data, udata))
Arnd Bergmann67207b92005-11-15 15:53:48 -0500947 return -EFAULT;
948
Christoph Hellwigc9101bd2007-12-20 16:39:59 +0900949 count = spu_acquire(ctx);
950 if (count)
Christoph Hellwigeebead52008-02-08 15:50:41 +1100951 goto out;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500952
Arnd Bergmanncdcc89b2006-10-04 17:26:17 +0200953 /*
954 * make sure we can at least write one element, by waiting
955 * in case of !O_NONBLOCK
956 */
957 count = 0;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500958 if (file->f_flags & O_NONBLOCK) {
Christoph Hellwigeebead52008-02-08 15:50:41 +1100959 if (!spu_wbox_write(ctx, wbox_data)) {
Arnd Bergmanncdcc89b2006-10-04 17:26:17 +0200960 count = -EAGAIN;
Christoph Hellwigeebead52008-02-08 15:50:41 +1100961 goto out_unlock;
962 }
Arnd Bergmann67207b92005-11-15 15:53:48 -0500963 } else {
Arnd Bergmanncdcc89b2006-10-04 17:26:17 +0200964 count = spufs_wait(ctx->wbox_wq, spu_wbox_write(ctx, wbox_data));
Christoph Hellwigeebead52008-02-08 15:50:41 +1100965 if (count)
966 goto out;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500967 }
968
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500969
Jan Engelhardt96de0e22007-10-19 23:21:04 +0200970 /* write as much as possible */
Arnd Bergmanncdcc89b2006-10-04 17:26:17 +0200971 for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) {
972 int ret;
973 ret = __get_user(wbox_data, udata);
974 if (ret)
975 break;
976
977 ret = spu_wbox_write(ctx, wbox_data);
978 if (ret == 0)
979 break;
980 }
981
Christoph Hellwigeebead52008-02-08 15:50:41 +1100982out_unlock:
Arnd Bergmanncdcc89b2006-10-04 17:26:17 +0200983 spu_release(ctx);
Christoph Hellwigeebead52008-02-08 15:50:41 +1100984out:
Arnd Bergmanncdcc89b2006-10-04 17:26:17 +0200985 return count;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500986}
987
988static unsigned int spufs_wbox_poll(struct file *file, poll_table *wait)
989{
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500990 struct spu_context *ctx = file->private_data;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500991 unsigned int mask;
992
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500993 poll_wait(file, &ctx->wbox_wq, wait);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500994
Christoph Hellwigc9101bd2007-12-20 16:39:59 +0900995 /*
996 * For now keep this uninterruptible and also ignore the rule
997 * that poll should not sleep. Will be fixed later.
998 */
999 mutex_lock(&ctx->state_mutex);
Arnd Bergmann3a843d72005-12-05 22:52:27 -05001000 mask = ctx->ops->mbox_stat_poll(ctx, POLLOUT | POLLWRNORM);
1001 spu_release(ctx);
Arnd Bergmann67207b92005-11-15 15:53:48 -05001002
1003 return mask;
1004}
1005
Arjan van de Ven5dfe4c92007-02-12 00:55:31 -08001006static const struct file_operations spufs_wbox_fops = {
Arnd Bergmann67207b92005-11-15 15:53:48 -05001007 .open = spufs_pipe_open,
1008 .write = spufs_wbox_write,
1009 .poll = spufs_wbox_poll,
1010 .fasync = spufs_wbox_fasync,
1011};
1012
1013static ssize_t spufs_wbox_stat_read(struct file *file, char __user *buf,
1014 size_t len, loff_t *pos)
1015{
Arnd Bergmann8b3d6662005-11-15 15:53:52 -05001016 struct spu_context *ctx = file->private_data;
Christoph Hellwigc9101bd2007-12-20 16:39:59 +09001017 ssize_t ret;
Arnd Bergmann67207b92005-11-15 15:53:48 -05001018 u32 wbox_stat;
1019
1020 if (len < 4)
1021 return -EINVAL;
1022
Christoph Hellwigc9101bd2007-12-20 16:39:59 +09001023 ret = spu_acquire(ctx);
1024 if (ret)
1025 return ret;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -05001026 wbox_stat = (ctx->ops->mbox_stat_read(ctx) >> 8) & 0xff;
1027 spu_release(ctx);
Arnd Bergmann67207b92005-11-15 15:53:48 -05001028
1029 if (copy_to_user(buf, &wbox_stat, sizeof wbox_stat))
1030 return -EFAULT;
1031
1032 return 4;
1033}
1034
Arjan van de Ven5dfe4c92007-02-12 00:55:31 -08001035static const struct file_operations spufs_wbox_stat_fops = {
Arnd Bergmann67207b92005-11-15 15:53:48 -05001036 .open = spufs_pipe_open,
1037 .read = spufs_wbox_stat_read,
1038};
1039
Mark Nutter6df10a82006-03-23 00:00:12 +01001040static int spufs_signal1_open(struct inode *inode, struct file *file)
1041{
1042 struct spufs_inode_info *i = SPUFS_I(inode);
1043 struct spu_context *ctx = i->i_ctx;
Christoph Hellwig43c2bbd2007-04-23 21:08:07 +02001044
Christoph Hellwig47d3a5f2007-06-04 23:26:51 +10001045 mutex_lock(&ctx->mapping_lock);
Mark Nutter6df10a82006-03-23 00:00:12 +01001046 file->private_data = ctx;
Christoph Hellwig43c2bbd2007-04-23 21:08:07 +02001047 if (!i->i_openers++)
1048 ctx->signal1 = inode->i_mapping;
Christoph Hellwig47d3a5f2007-06-04 23:26:51 +10001049 mutex_unlock(&ctx->mapping_lock);
Mark Nutter6df10a82006-03-23 00:00:12 +01001050 return nonseekable_open(inode, file);
1051}
1052
Christoph Hellwig43c2bbd2007-04-23 21:08:07 +02001053static int
1054spufs_signal1_release(struct inode *inode, struct file *file)
1055{
1056 struct spufs_inode_info *i = SPUFS_I(inode);
1057 struct spu_context *ctx = i->i_ctx;
1058
Christoph Hellwig47d3a5f2007-06-04 23:26:51 +10001059 mutex_lock(&ctx->mapping_lock);
Christoph Hellwig43c2bbd2007-04-23 21:08:07 +02001060 if (!--i->i_openers)
1061 ctx->signal1 = NULL;
Christoph Hellwig47d3a5f2007-06-04 23:26:51 +10001062 mutex_unlock(&ctx->mapping_lock);
Christoph Hellwig43c2bbd2007-04-23 21:08:07 +02001063 return 0;
1064}
1065
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +01001066static ssize_t __spufs_signal1_read(struct spu_context *ctx, char __user *buf,
Arnd Bergmann67207b92005-11-15 15:53:48 -05001067 size_t len, loff_t *pos)
1068{
Dwayne Grant McConnell17f88ce2006-11-20 18:45:01 +01001069 int ret = 0;
Arnd Bergmann67207b92005-11-15 15:53:48 -05001070 u32 data;
1071
Arnd Bergmann67207b92005-11-15 15:53:48 -05001072 if (len < 4)
1073 return -EINVAL;
1074
Dwayne Grant McConnell17f88ce2006-11-20 18:45:01 +01001075 if (ctx->csa.spu_chnlcnt_RW[3]) {
1076 data = ctx->csa.spu_chnldata_RW[3];
1077 ret = 4;
1078 }
Arnd Bergmann8b3d6662005-11-15 15:53:52 -05001079
Dwayne Grant McConnell17f88ce2006-11-20 18:45:01 +01001080 if (!ret)
1081 goto out;
1082
Arnd Bergmann67207b92005-11-15 15:53:48 -05001083 if (copy_to_user(buf, &data, 4))
1084 return -EFAULT;
1085
Dwayne Grant McConnell17f88ce2006-11-20 18:45:01 +01001086out:
1087 return ret;
Arnd Bergmann67207b92005-11-15 15:53:48 -05001088}
1089
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +01001090static ssize_t spufs_signal1_read(struct file *file, char __user *buf,
1091 size_t len, loff_t *pos)
1092{
1093 int ret;
1094 struct spu_context *ctx = file->private_data;
1095
Christoph Hellwigc9101bd2007-12-20 16:39:59 +09001096 ret = spu_acquire_saved(ctx);
1097 if (ret)
1098 return ret;
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +01001099 ret = __spufs_signal1_read(ctx, buf, len, pos);
Christoph Hellwig27b1ea02007-07-20 21:39:34 +02001100 spu_release_saved(ctx);
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +01001101
1102 return ret;
1103}
1104
Arnd Bergmann67207b92005-11-15 15:53:48 -05001105static ssize_t spufs_signal1_write(struct file *file, const char __user *buf,
1106 size_t len, loff_t *pos)
1107{
1108 struct spu_context *ctx;
Christoph Hellwigc9101bd2007-12-20 16:39:59 +09001109 ssize_t ret;
Arnd Bergmann67207b92005-11-15 15:53:48 -05001110 u32 data;
1111
1112 ctx = file->private_data;
Arnd Bergmann67207b92005-11-15 15:53:48 -05001113
1114 if (len < 4)
1115 return -EINVAL;
1116
1117 if (copy_from_user(&data, buf, 4))
1118 return -EFAULT;
1119
Christoph Hellwigc9101bd2007-12-20 16:39:59 +09001120 ret = spu_acquire(ctx);
1121 if (ret)
1122 return ret;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -05001123 ctx->ops->signal1_write(ctx, data);
1124 spu_release(ctx);
Arnd Bergmann67207b92005-11-15 15:53:48 -05001125
1126 return 4;
1127}
1128
Nick Pigginb1e22702008-06-10 09:26:08 +10001129static int
1130spufs_signal1_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
Mark Nutter6df10a82006-03-23 00:00:12 +01001131{
Jeremy Kerr87ff6092008-07-01 10:22:50 +10001132#if SPUFS_SIGNAL_MAP_SIZE == 0x1000
1133 return spufs_ps_fault(vma, vmf, 0x14000, SPUFS_SIGNAL_MAP_SIZE);
1134#elif SPUFS_SIGNAL_MAP_SIZE == 0x10000
Benjamin Herrenschmidt27d5bf22006-10-04 17:26:11 +02001135 /* For 64k pages, both signal1 and signal2 can be used to mmap the whole
1136 * signal 1 and 2 area
1137 */
Jeremy Kerr87ff6092008-07-01 10:22:50 +10001138 return spufs_ps_fault(vma, vmf, 0x10000, SPUFS_SIGNAL_MAP_SIZE);
Benjamin Herrenschmidt27d5bf22006-10-04 17:26:11 +02001139#else
1140#error unsupported page size
1141#endif
Mark Nutter6df10a82006-03-23 00:00:12 +01001142}
1143
1144static struct vm_operations_struct spufs_signal1_mmap_vmops = {
Nick Pigginb1e22702008-06-10 09:26:08 +10001145 .fault = spufs_signal1_mmap_fault,
Mark Nutter6df10a82006-03-23 00:00:12 +01001146};
1147
1148static int spufs_signal1_mmap(struct file *file, struct vm_area_struct *vma)
1149{
1150 if (!(vma->vm_flags & VM_SHARED))
1151 return -EINVAL;
1152
Benjamin Herrenschmidt78bde532007-02-13 11:46:06 +11001153 vma->vm_flags |= VM_IO | VM_PFNMAP;
Benjamin Herrenschmidt64b3d0e2008-12-18 19:13:51 +00001154 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
Mark Nutter6df10a82006-03-23 00:00:12 +01001155
1156 vma->vm_ops = &spufs_signal1_mmap_vmops;
1157 return 0;
1158}
Mark Nutter6df10a82006-03-23 00:00:12 +01001159
Arjan van de Ven5dfe4c92007-02-12 00:55:31 -08001160static const struct file_operations spufs_signal1_fops = {
Mark Nutter6df10a82006-03-23 00:00:12 +01001161 .open = spufs_signal1_open,
Christoph Hellwig43c2bbd2007-04-23 21:08:07 +02001162 .release = spufs_signal1_release,
Arnd Bergmann67207b92005-11-15 15:53:48 -05001163 .read = spufs_signal1_read,
1164 .write = spufs_signal1_write,
Mark Nutter6df10a82006-03-23 00:00:12 +01001165 .mmap = spufs_signal1_mmap,
Arnd Bergmann67207b92005-11-15 15:53:48 -05001166};
1167
Jeremy Kerrd054b362007-07-20 21:39:31 +02001168static const struct file_operations spufs_signal1_nosched_fops = {
1169 .open = spufs_signal1_open,
1170 .release = spufs_signal1_release,
1171 .write = spufs_signal1_write,
1172 .mmap = spufs_signal1_mmap,
1173};
1174
Mark Nutter6df10a82006-03-23 00:00:12 +01001175static int spufs_signal2_open(struct inode *inode, struct file *file)
1176{
1177 struct spufs_inode_info *i = SPUFS_I(inode);
1178 struct spu_context *ctx = i->i_ctx;
Christoph Hellwig43c2bbd2007-04-23 21:08:07 +02001179
Christoph Hellwig47d3a5f2007-06-04 23:26:51 +10001180 mutex_lock(&ctx->mapping_lock);
Mark Nutter6df10a82006-03-23 00:00:12 +01001181 file->private_data = ctx;
Christoph Hellwig43c2bbd2007-04-23 21:08:07 +02001182 if (!i->i_openers++)
1183 ctx->signal2 = inode->i_mapping;
Christoph Hellwig47d3a5f2007-06-04 23:26:51 +10001184 mutex_unlock(&ctx->mapping_lock);
Mark Nutter6df10a82006-03-23 00:00:12 +01001185 return nonseekable_open(inode, file);
1186}
1187
Christoph Hellwig43c2bbd2007-04-23 21:08:07 +02001188static int
1189spufs_signal2_release(struct inode *inode, struct file *file)
1190{
1191 struct spufs_inode_info *i = SPUFS_I(inode);
1192 struct spu_context *ctx = i->i_ctx;
1193
Christoph Hellwig47d3a5f2007-06-04 23:26:51 +10001194 mutex_lock(&ctx->mapping_lock);
Christoph Hellwig43c2bbd2007-04-23 21:08:07 +02001195 if (!--i->i_openers)
1196 ctx->signal2 = NULL;
Christoph Hellwig47d3a5f2007-06-04 23:26:51 +10001197 mutex_unlock(&ctx->mapping_lock);
Christoph Hellwig43c2bbd2007-04-23 21:08:07 +02001198 return 0;
1199}
1200
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +01001201static ssize_t __spufs_signal2_read(struct spu_context *ctx, char __user *buf,
Arnd Bergmann67207b92005-11-15 15:53:48 -05001202 size_t len, loff_t *pos)
1203{
Dwayne Grant McConnell17f88ce2006-11-20 18:45:01 +01001204 int ret = 0;
Arnd Bergmann67207b92005-11-15 15:53:48 -05001205 u32 data;
1206
Arnd Bergmann67207b92005-11-15 15:53:48 -05001207 if (len < 4)
1208 return -EINVAL;
1209
Dwayne Grant McConnell17f88ce2006-11-20 18:45:01 +01001210 if (ctx->csa.spu_chnlcnt_RW[4]) {
1211 data = ctx->csa.spu_chnldata_RW[4];
1212 ret = 4;
1213 }
Arnd Bergmann8b3d6662005-11-15 15:53:52 -05001214
Dwayne Grant McConnell17f88ce2006-11-20 18:45:01 +01001215 if (!ret)
1216 goto out;
1217
Arnd Bergmann67207b92005-11-15 15:53:48 -05001218 if (copy_to_user(buf, &data, 4))
1219 return -EFAULT;
1220
Dwayne Grant McConnell17f88ce2006-11-20 18:45:01 +01001221out:
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +01001222 return ret;
1223}
1224
1225static ssize_t spufs_signal2_read(struct file *file, char __user *buf,
1226 size_t len, loff_t *pos)
1227{
1228 struct spu_context *ctx = file->private_data;
1229 int ret;
1230
Christoph Hellwigc9101bd2007-12-20 16:39:59 +09001231 ret = spu_acquire_saved(ctx);
1232 if (ret)
1233 return ret;
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +01001234 ret = __spufs_signal2_read(ctx, buf, len, pos);
Christoph Hellwig27b1ea02007-07-20 21:39:34 +02001235 spu_release_saved(ctx);
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +01001236
1237 return ret;
Arnd Bergmann67207b92005-11-15 15:53:48 -05001238}
1239
1240static ssize_t spufs_signal2_write(struct file *file, const char __user *buf,
1241 size_t len, loff_t *pos)
1242{
1243 struct spu_context *ctx;
Christoph Hellwigc9101bd2007-12-20 16:39:59 +09001244 ssize_t ret;
Arnd Bergmann67207b92005-11-15 15:53:48 -05001245 u32 data;
1246
1247 ctx = file->private_data;
Arnd Bergmann67207b92005-11-15 15:53:48 -05001248
1249 if (len < 4)
1250 return -EINVAL;
1251
1252 if (copy_from_user(&data, buf, 4))
1253 return -EFAULT;
1254
Christoph Hellwigc9101bd2007-12-20 16:39:59 +09001255 ret = spu_acquire(ctx);
1256 if (ret)
1257 return ret;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -05001258 ctx->ops->signal2_write(ctx, data);
1259 spu_release(ctx);
Arnd Bergmann67207b92005-11-15 15:53:48 -05001260
1261 return 4;
1262}
1263
Benjamin Herrenschmidt27d5bf22006-10-04 17:26:11 +02001264#if SPUFS_MMAP_4K
Nick Pigginb1e22702008-06-10 09:26:08 +10001265static int
1266spufs_signal2_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
Mark Nutter6df10a82006-03-23 00:00:12 +01001267{
Jeremy Kerr87ff6092008-07-01 10:22:50 +10001268#if SPUFS_SIGNAL_MAP_SIZE == 0x1000
1269 return spufs_ps_fault(vma, vmf, 0x1c000, SPUFS_SIGNAL_MAP_SIZE);
1270#elif SPUFS_SIGNAL_MAP_SIZE == 0x10000
Benjamin Herrenschmidt27d5bf22006-10-04 17:26:11 +02001271 /* For 64k pages, both signal1 and signal2 can be used to mmap the whole
1272 * signal 1 and 2 area
1273 */
Jeremy Kerr87ff6092008-07-01 10:22:50 +10001274 return spufs_ps_fault(vma, vmf, 0x10000, SPUFS_SIGNAL_MAP_SIZE);
Benjamin Herrenschmidt27d5bf22006-10-04 17:26:11 +02001275#else
1276#error unsupported page size
1277#endif
Mark Nutter6df10a82006-03-23 00:00:12 +01001278}
1279
1280static struct vm_operations_struct spufs_signal2_mmap_vmops = {
Nick Pigginb1e22702008-06-10 09:26:08 +10001281 .fault = spufs_signal2_mmap_fault,
Mark Nutter6df10a82006-03-23 00:00:12 +01001282};
1283
1284static int spufs_signal2_mmap(struct file *file, struct vm_area_struct *vma)
1285{
1286 if (!(vma->vm_flags & VM_SHARED))
1287 return -EINVAL;
1288
Benjamin Herrenschmidt78bde532007-02-13 11:46:06 +11001289 vma->vm_flags |= VM_IO | VM_PFNMAP;
Benjamin Herrenschmidt64b3d0e2008-12-18 19:13:51 +00001290 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
Mark Nutter6df10a82006-03-23 00:00:12 +01001291
1292 vma->vm_ops = &spufs_signal2_mmap_vmops;
1293 return 0;
1294}
Benjamin Herrenschmidt27d5bf22006-10-04 17:26:11 +02001295#else /* SPUFS_MMAP_4K */
1296#define spufs_signal2_mmap NULL
1297#endif /* !SPUFS_MMAP_4K */
Mark Nutter6df10a82006-03-23 00:00:12 +01001298
Arjan van de Ven5dfe4c92007-02-12 00:55:31 -08001299static const struct file_operations spufs_signal2_fops = {
Mark Nutter6df10a82006-03-23 00:00:12 +01001300 .open = spufs_signal2_open,
Christoph Hellwig43c2bbd2007-04-23 21:08:07 +02001301 .release = spufs_signal2_release,
Arnd Bergmann67207b92005-11-15 15:53:48 -05001302 .read = spufs_signal2_read,
1303 .write = spufs_signal2_write,
Mark Nutter6df10a82006-03-23 00:00:12 +01001304 .mmap = spufs_signal2_mmap,
Arnd Bergmann67207b92005-11-15 15:53:48 -05001305};
1306
Jeremy Kerrd054b362007-07-20 21:39:31 +02001307static const struct file_operations spufs_signal2_nosched_fops = {
1308 .open = spufs_signal2_open,
1309 .release = spufs_signal2_release,
1310 .write = spufs_signal2_write,
1311 .mmap = spufs_signal2_mmap,
1312};
1313
Michael Ellerman104f0cc2007-09-19 14:38:12 +10001314/*
1315 * This is a wrapper around DEFINE_SIMPLE_ATTRIBUTE which does the
1316 * work of acquiring (or not) the SPU context before calling through
1317 * to the actual get routine. The set routine is called directly.
1318 */
1319#define SPU_ATTR_NOACQUIRE 0
1320#define SPU_ATTR_ACQUIRE 1
1321#define SPU_ATTR_ACQUIRE_SAVED 2
1322
1323#define DEFINE_SPUFS_ATTRIBUTE(__name, __get, __set, __fmt, __acquire) \
Christoph Hellwig197b1a82007-12-20 16:39:59 +09001324static int __##__get(void *data, u64 *val) \
Michael Ellerman104f0cc2007-09-19 14:38:12 +10001325{ \
1326 struct spu_context *ctx = data; \
Christoph Hellwigc9101bd2007-12-20 16:39:59 +09001327 int ret = 0; \
Michael Ellerman104f0cc2007-09-19 14:38:12 +10001328 \
1329 if (__acquire == SPU_ATTR_ACQUIRE) { \
Christoph Hellwigc9101bd2007-12-20 16:39:59 +09001330 ret = spu_acquire(ctx); \
1331 if (ret) \
1332 return ret; \
Christoph Hellwig197b1a82007-12-20 16:39:59 +09001333 *val = __get(ctx); \
Michael Ellerman104f0cc2007-09-19 14:38:12 +10001334 spu_release(ctx); \
1335 } else if (__acquire == SPU_ATTR_ACQUIRE_SAVED) { \
Christoph Hellwigc9101bd2007-12-20 16:39:59 +09001336 ret = spu_acquire_saved(ctx); \
1337 if (ret) \
1338 return ret; \
Christoph Hellwig197b1a82007-12-20 16:39:59 +09001339 *val = __get(ctx); \
Michael Ellerman104f0cc2007-09-19 14:38:12 +10001340 spu_release_saved(ctx); \
1341 } else \
Christoph Hellwig197b1a82007-12-20 16:39:59 +09001342 *val = __get(ctx); \
Michael Ellerman104f0cc2007-09-19 14:38:12 +10001343 \
Christoph Hellwig197b1a82007-12-20 16:39:59 +09001344 return 0; \
Michael Ellerman104f0cc2007-09-19 14:38:12 +10001345} \
Christoph Hellwig197b1a82007-12-20 16:39:59 +09001346DEFINE_SPUFS_SIMPLE_ATTRIBUTE(__name, __##__get, __set, __fmt);
Michael Ellerman104f0cc2007-09-19 14:38:12 +10001347
Christoph Hellwig197b1a82007-12-20 16:39:59 +09001348static int spufs_signal1_type_set(void *data, u64 val)
Arnd Bergmann67207b92005-11-15 15:53:48 -05001349{
1350 struct spu_context *ctx = data;
Christoph Hellwigc9101bd2007-12-20 16:39:59 +09001351 int ret;
Arnd Bergmann67207b92005-11-15 15:53:48 -05001352
Christoph Hellwigc9101bd2007-12-20 16:39:59 +09001353 ret = spu_acquire(ctx);
1354 if (ret)
1355 return ret;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -05001356 ctx->ops->signal1_type_set(ctx, val);
1357 spu_release(ctx);
Christoph Hellwig197b1a82007-12-20 16:39:59 +09001358
1359 return 0;
Arnd Bergmann67207b92005-11-15 15:53:48 -05001360}
1361
Michael Ellerman104f0cc2007-09-19 14:38:12 +10001362static u64 spufs_signal1_type_get(struct spu_context *ctx)
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +01001363{
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +01001364 return ctx->ops->signal1_type_get(ctx);
1365}
Michael Ellerman104f0cc2007-09-19 14:38:12 +10001366DEFINE_SPUFS_ATTRIBUTE(spufs_signal1_type, spufs_signal1_type_get,
Jeremy Kerraf8b44e2008-03-25 13:15:11 +11001367 spufs_signal1_type_set, "%llu\n", SPU_ATTR_ACQUIRE);
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +01001368
Arnd Bergmann67207b92005-11-15 15:53:48 -05001369
Christoph Hellwig197b1a82007-12-20 16:39:59 +09001370static int spufs_signal2_type_set(void *data, u64 val)
Arnd Bergmann67207b92005-11-15 15:53:48 -05001371{
1372 struct spu_context *ctx = data;
Christoph Hellwigc9101bd2007-12-20 16:39:59 +09001373 int ret;
Arnd Bergmann67207b92005-11-15 15:53:48 -05001374
Christoph Hellwigc9101bd2007-12-20 16:39:59 +09001375 ret = spu_acquire(ctx);
1376 if (ret)
1377 return ret;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -05001378 ctx->ops->signal2_type_set(ctx, val);
1379 spu_release(ctx);
Christoph Hellwig197b1a82007-12-20 16:39:59 +09001380
1381 return 0;
Arnd Bergmann67207b92005-11-15 15:53:48 -05001382}
1383
Michael Ellerman104f0cc2007-09-19 14:38:12 +10001384static u64 spufs_signal2_type_get(struct spu_context *ctx)
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +01001385{
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +01001386 return ctx->ops->signal2_type_get(ctx);
1387}
Michael Ellerman104f0cc2007-09-19 14:38:12 +10001388DEFINE_SPUFS_ATTRIBUTE(spufs_signal2_type, spufs_signal2_type_get,
Jeremy Kerraf8b44e2008-03-25 13:15:11 +11001389 spufs_signal2_type_set, "%llu\n", SPU_ATTR_ACQUIRE);
Arnd Bergmann67207b92005-11-15 15:53:48 -05001390
Benjamin Herrenschmidt27d5bf22006-10-04 17:26:11 +02001391#if SPUFS_MMAP_4K
Nick Pigginb1e22702008-06-10 09:26:08 +10001392static int
1393spufs_mss_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
arnd@arndb.ded9379c42006-06-19 20:33:21 +02001394{
Jeremy Kerr87ff6092008-07-01 10:22:50 +10001395 return spufs_ps_fault(vma, vmf, 0x0000, SPUFS_MSS_MAP_SIZE);
arnd@arndb.ded9379c42006-06-19 20:33:21 +02001396}
1397
1398static struct vm_operations_struct spufs_mss_mmap_vmops = {
Nick Pigginb1e22702008-06-10 09:26:08 +10001399 .fault = spufs_mss_mmap_fault,
arnd@arndb.ded9379c42006-06-19 20:33:21 +02001400};
1401
1402/*
1403 * mmap support for problem state MFC DMA area [0x0000 - 0x0fff].
arnd@arndb.ded9379c42006-06-19 20:33:21 +02001404 */
1405static int spufs_mss_mmap(struct file *file, struct vm_area_struct *vma)
1406{
1407 if (!(vma->vm_flags & VM_SHARED))
1408 return -EINVAL;
1409
Benjamin Herrenschmidt78bde532007-02-13 11:46:06 +11001410 vma->vm_flags |= VM_IO | VM_PFNMAP;
Benjamin Herrenschmidt64b3d0e2008-12-18 19:13:51 +00001411 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
arnd@arndb.ded9379c42006-06-19 20:33:21 +02001412
1413 vma->vm_ops = &spufs_mss_mmap_vmops;
1414 return 0;
1415}
Benjamin Herrenschmidt27d5bf22006-10-04 17:26:11 +02001416#else /* SPUFS_MMAP_4K */
1417#define spufs_mss_mmap NULL
1418#endif /* !SPUFS_MMAP_4K */
arnd@arndb.ded9379c42006-06-19 20:33:21 +02001419
1420static int spufs_mss_open(struct inode *inode, struct file *file)
1421{
1422 struct spufs_inode_info *i = SPUFS_I(inode);
Benjamin Herrenschmidt17e0e272007-02-13 11:46:08 +11001423 struct spu_context *ctx = i->i_ctx;
arnd@arndb.ded9379c42006-06-19 20:33:21 +02001424
1425 file->private_data = i->i_ctx;
Christoph Hellwig43c2bbd2007-04-23 21:08:07 +02001426
Christoph Hellwig47d3a5f2007-06-04 23:26:51 +10001427 mutex_lock(&ctx->mapping_lock);
Christoph Hellwig43c2bbd2007-04-23 21:08:07 +02001428 if (!i->i_openers++)
1429 ctx->mss = inode->i_mapping;
Christoph Hellwig47d3a5f2007-06-04 23:26:51 +10001430 mutex_unlock(&ctx->mapping_lock);
arnd@arndb.ded9379c42006-06-19 20:33:21 +02001431 return nonseekable_open(inode, file);
1432}
1433
Christoph Hellwig43c2bbd2007-04-23 21:08:07 +02001434static int
1435spufs_mss_release(struct inode *inode, struct file *file)
1436{
1437 struct spufs_inode_info *i = SPUFS_I(inode);
1438 struct spu_context *ctx = i->i_ctx;
1439
Christoph Hellwig47d3a5f2007-06-04 23:26:51 +10001440 mutex_lock(&ctx->mapping_lock);
Christoph Hellwig43c2bbd2007-04-23 21:08:07 +02001441 if (!--i->i_openers)
1442 ctx->mss = NULL;
Christoph Hellwig47d3a5f2007-06-04 23:26:51 +10001443 mutex_unlock(&ctx->mapping_lock);
Christoph Hellwig43c2bbd2007-04-23 21:08:07 +02001444 return 0;
1445}
1446
Arjan van de Ven5dfe4c92007-02-12 00:55:31 -08001447static const struct file_operations spufs_mss_fops = {
arnd@arndb.ded9379c42006-06-19 20:33:21 +02001448 .open = spufs_mss_open,
Christoph Hellwig43c2bbd2007-04-23 21:08:07 +02001449 .release = spufs_mss_release,
arnd@arndb.ded9379c42006-06-19 20:33:21 +02001450 .mmap = spufs_mss_mmap,
Benjamin Herrenschmidt27d5bf22006-10-04 17:26:11 +02001451};
1452
Nick Pigginb1e22702008-06-10 09:26:08 +10001453static int
1454spufs_psmap_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
Benjamin Herrenschmidt27d5bf22006-10-04 17:26:11 +02001455{
Jeremy Kerr87ff6092008-07-01 10:22:50 +10001456 return spufs_ps_fault(vma, vmf, 0x0000, SPUFS_PS_MAP_SIZE);
Benjamin Herrenschmidt27d5bf22006-10-04 17:26:11 +02001457}
1458
1459static struct vm_operations_struct spufs_psmap_mmap_vmops = {
Nick Pigginb1e22702008-06-10 09:26:08 +10001460 .fault = spufs_psmap_mmap_fault,
Benjamin Herrenschmidt27d5bf22006-10-04 17:26:11 +02001461};
1462
1463/*
1464 * mmap support for full problem state area [0x00000 - 0x1ffff].
1465 */
1466static int spufs_psmap_mmap(struct file *file, struct vm_area_struct *vma)
1467{
1468 if (!(vma->vm_flags & VM_SHARED))
1469 return -EINVAL;
1470
Benjamin Herrenschmidt78bde532007-02-13 11:46:06 +11001471 vma->vm_flags |= VM_IO | VM_PFNMAP;
Benjamin Herrenschmidt64b3d0e2008-12-18 19:13:51 +00001472 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
Benjamin Herrenschmidt27d5bf22006-10-04 17:26:11 +02001473
1474 vma->vm_ops = &spufs_psmap_mmap_vmops;
1475 return 0;
1476}
1477
1478static int spufs_psmap_open(struct inode *inode, struct file *file)
1479{
1480 struct spufs_inode_info *i = SPUFS_I(inode);
Benjamin Herrenschmidt17e0e272007-02-13 11:46:08 +11001481 struct spu_context *ctx = i->i_ctx;
Benjamin Herrenschmidt27d5bf22006-10-04 17:26:11 +02001482
Christoph Hellwig47d3a5f2007-06-04 23:26:51 +10001483 mutex_lock(&ctx->mapping_lock);
Benjamin Herrenschmidt27d5bf22006-10-04 17:26:11 +02001484 file->private_data = i->i_ctx;
Christoph Hellwig43c2bbd2007-04-23 21:08:07 +02001485 if (!i->i_openers++)
1486 ctx->psmap = inode->i_mapping;
Christoph Hellwig47d3a5f2007-06-04 23:26:51 +10001487 mutex_unlock(&ctx->mapping_lock);
Benjamin Herrenschmidt27d5bf22006-10-04 17:26:11 +02001488 return nonseekable_open(inode, file);
1489}
1490
Christoph Hellwig43c2bbd2007-04-23 21:08:07 +02001491static int
1492spufs_psmap_release(struct inode *inode, struct file *file)
1493{
1494 struct spufs_inode_info *i = SPUFS_I(inode);
1495 struct spu_context *ctx = i->i_ctx;
1496
Christoph Hellwig47d3a5f2007-06-04 23:26:51 +10001497 mutex_lock(&ctx->mapping_lock);
Christoph Hellwig43c2bbd2007-04-23 21:08:07 +02001498 if (!--i->i_openers)
1499 ctx->psmap = NULL;
Christoph Hellwig47d3a5f2007-06-04 23:26:51 +10001500 mutex_unlock(&ctx->mapping_lock);
Christoph Hellwig43c2bbd2007-04-23 21:08:07 +02001501 return 0;
1502}
1503
Arjan van de Ven5dfe4c92007-02-12 00:55:31 -08001504static const struct file_operations spufs_psmap_fops = {
Benjamin Herrenschmidt27d5bf22006-10-04 17:26:11 +02001505 .open = spufs_psmap_open,
Christoph Hellwig43c2bbd2007-04-23 21:08:07 +02001506 .release = spufs_psmap_release,
Benjamin Herrenschmidt27d5bf22006-10-04 17:26:11 +02001507 .mmap = spufs_psmap_mmap,
arnd@arndb.ded9379c42006-06-19 20:33:21 +02001508};
1509
1510
Benjamin Herrenschmidt27d5bf22006-10-04 17:26:11 +02001511#if SPUFS_MMAP_4K
Nick Pigginb1e22702008-06-10 09:26:08 +10001512static int
1513spufs_mfc_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
Mark Nutter6df10a82006-03-23 00:00:12 +01001514{
Jeremy Kerr87ff6092008-07-01 10:22:50 +10001515 return spufs_ps_fault(vma, vmf, 0x3000, SPUFS_MFC_MAP_SIZE);
Mark Nutter6df10a82006-03-23 00:00:12 +01001516}
1517
1518static struct vm_operations_struct spufs_mfc_mmap_vmops = {
Nick Pigginb1e22702008-06-10 09:26:08 +10001519 .fault = spufs_mfc_mmap_fault,
Mark Nutter6df10a82006-03-23 00:00:12 +01001520};
1521
1522/*
1523 * mmap support for problem state MFC DMA area [0x0000 - 0x0fff].
Mark Nutter6df10a82006-03-23 00:00:12 +01001524 */
1525static int spufs_mfc_mmap(struct file *file, struct vm_area_struct *vma)
1526{
1527 if (!(vma->vm_flags & VM_SHARED))
1528 return -EINVAL;
1529
Benjamin Herrenschmidt78bde532007-02-13 11:46:06 +11001530 vma->vm_flags |= VM_IO | VM_PFNMAP;
Benjamin Herrenschmidt64b3d0e2008-12-18 19:13:51 +00001531 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
Mark Nutter6df10a82006-03-23 00:00:12 +01001532
1533 vma->vm_ops = &spufs_mfc_mmap_vmops;
1534 return 0;
1535}
Benjamin Herrenschmidt27d5bf22006-10-04 17:26:11 +02001536#else /* SPUFS_MMAP_4K */
1537#define spufs_mfc_mmap NULL
1538#endif /* !SPUFS_MMAP_4K */
Arnd Bergmanna33a7d72006-03-23 00:00:11 +01001539
1540static int spufs_mfc_open(struct inode *inode, struct file *file)
1541{
1542 struct spufs_inode_info *i = SPUFS_I(inode);
1543 struct spu_context *ctx = i->i_ctx;
1544
1545 /* we don't want to deal with DMA into other processes */
1546 if (ctx->owner != current->mm)
1547 return -EINVAL;
1548
1549 if (atomic_read(&inode->i_count) != 1)
1550 return -EBUSY;
1551
Christoph Hellwig47d3a5f2007-06-04 23:26:51 +10001552 mutex_lock(&ctx->mapping_lock);
Arnd Bergmanna33a7d72006-03-23 00:00:11 +01001553 file->private_data = ctx;
Christoph Hellwig43c2bbd2007-04-23 21:08:07 +02001554 if (!i->i_openers++)
1555 ctx->mfc = inode->i_mapping;
Christoph Hellwig47d3a5f2007-06-04 23:26:51 +10001556 mutex_unlock(&ctx->mapping_lock);
Arnd Bergmanna33a7d72006-03-23 00:00:11 +01001557 return nonseekable_open(inode, file);
1558}
1559
Christoph Hellwig43c2bbd2007-04-23 21:08:07 +02001560static int
1561spufs_mfc_release(struct inode *inode, struct file *file)
1562{
1563 struct spufs_inode_info *i = SPUFS_I(inode);
1564 struct spu_context *ctx = i->i_ctx;
1565
Christoph Hellwig47d3a5f2007-06-04 23:26:51 +10001566 mutex_lock(&ctx->mapping_lock);
Christoph Hellwig43c2bbd2007-04-23 21:08:07 +02001567 if (!--i->i_openers)
1568 ctx->mfc = NULL;
Christoph Hellwig47d3a5f2007-06-04 23:26:51 +10001569 mutex_unlock(&ctx->mapping_lock);
Christoph Hellwig43c2bbd2007-04-23 21:08:07 +02001570 return 0;
1571}
1572
Arnd Bergmanna33a7d72006-03-23 00:00:11 +01001573/* interrupt-level mfc callback function. */
1574void spufs_mfc_callback(struct spu *spu)
1575{
1576 struct spu_context *ctx = spu->ctx;
1577
Luke Browninge65c2f62007-12-20 16:39:59 +09001578 if (!ctx)
1579 return;
1580
Arnd Bergmanna33a7d72006-03-23 00:00:11 +01001581 wake_up_all(&ctx->mfc_wq);
1582
Harvey Harrisone48b1b42008-03-29 08:21:07 +11001583 pr_debug("%s %s\n", __func__, spu->name);
Arnd Bergmanna33a7d72006-03-23 00:00:11 +01001584 if (ctx->mfc_fasync) {
1585 u32 free_elements, tagstatus;
1586 unsigned int mask;
1587
1588 /* no need for spu_acquire in interrupt context */
1589 free_elements = ctx->ops->get_mfc_free_elements(ctx);
1590 tagstatus = ctx->ops->read_mfc_tagstatus(ctx);
1591
1592 mask = 0;
1593 if (free_elements & 0xffff)
1594 mask |= POLLOUT;
1595 if (tagstatus & ctx->tagwait)
1596 mask |= POLLIN;
1597
1598 kill_fasync(&ctx->mfc_fasync, SIGIO, mask);
1599 }
1600}
1601
1602static int spufs_read_mfc_tagstatus(struct spu_context *ctx, u32 *status)
1603{
1604 /* See if there is one tag group is complete */
1605 /* FIXME we need locking around tagwait */
1606 *status = ctx->ops->read_mfc_tagstatus(ctx) & ctx->tagwait;
1607 ctx->tagwait &= ~*status;
1608 if (*status)
1609 return 1;
1610
1611 /* enable interrupt waiting for any tag group,
1612 may silently fail if interrupts are already enabled */
1613 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 1);
1614 return 0;
1615}
1616
1617static ssize_t spufs_mfc_read(struct file *file, char __user *buffer,
1618 size_t size, loff_t *pos)
1619{
1620 struct spu_context *ctx = file->private_data;
1621 int ret = -EINVAL;
1622 u32 status;
1623
1624 if (size != 4)
1625 goto out;
1626
Christoph Hellwigc9101bd2007-12-20 16:39:59 +09001627 ret = spu_acquire(ctx);
1628 if (ret)
1629 return ret;
1630
1631 ret = -EINVAL;
Arnd Bergmanna33a7d72006-03-23 00:00:11 +01001632 if (file->f_flags & O_NONBLOCK) {
1633 status = ctx->ops->read_mfc_tagstatus(ctx);
1634 if (!(status & ctx->tagwait))
1635 ret = -EAGAIN;
1636 else
Christoph Hellwigc9101bd2007-12-20 16:39:59 +09001637 /* XXX(hch): shouldn't we clear ret here? */
Arnd Bergmanna33a7d72006-03-23 00:00:11 +01001638 ctx->tagwait &= ~status;
1639 } else {
1640 ret = spufs_wait(ctx->mfc_wq,
1641 spufs_read_mfc_tagstatus(ctx, &status));
Christoph Hellwigeebead52008-02-08 15:50:41 +11001642 if (ret)
1643 goto out;
Arnd Bergmanna33a7d72006-03-23 00:00:11 +01001644 }
1645 spu_release(ctx);
1646
Arnd Bergmanna33a7d72006-03-23 00:00:11 +01001647 ret = 4;
1648 if (copy_to_user(buffer, &status, 4))
1649 ret = -EFAULT;
1650
1651out:
1652 return ret;
1653}
1654
1655static int spufs_check_valid_dma(struct mfc_dma_command *cmd)
1656{
Stephen Rothwell9477e452009-01-06 14:27:38 +00001657 pr_debug("queueing DMA %x %llx %x %x %x\n", cmd->lsa,
Arnd Bergmanna33a7d72006-03-23 00:00:11 +01001658 cmd->ea, cmd->size, cmd->tag, cmd->cmd);
1659
1660 switch (cmd->cmd) {
1661 case MFC_PUT_CMD:
1662 case MFC_PUTF_CMD:
1663 case MFC_PUTB_CMD:
1664 case MFC_GET_CMD:
1665 case MFC_GETF_CMD:
1666 case MFC_GETB_CMD:
1667 break;
1668 default:
1669 pr_debug("invalid DMA opcode %x\n", cmd->cmd);
1670 return -EIO;
1671 }
1672
1673 if ((cmd->lsa & 0xf) != (cmd->ea &0xf)) {
Stephen Rothwell9477e452009-01-06 14:27:38 +00001674 pr_debug("invalid DMA alignment, ea %llx lsa %x\n",
Arnd Bergmanna33a7d72006-03-23 00:00:11 +01001675 cmd->ea, cmd->lsa);
1676 return -EIO;
1677 }
1678
1679 switch (cmd->size & 0xf) {
1680 case 1:
1681 break;
1682 case 2:
1683 if (cmd->lsa & 1)
1684 goto error;
1685 break;
1686 case 4:
1687 if (cmd->lsa & 3)
1688 goto error;
1689 break;
1690 case 8:
1691 if (cmd->lsa & 7)
1692 goto error;
1693 break;
1694 case 0:
1695 if (cmd->lsa & 15)
1696 goto error;
1697 break;
1698 error:
1699 default:
1700 pr_debug("invalid DMA alignment %x for size %x\n",
1701 cmd->lsa & 0xf, cmd->size);
1702 return -EIO;
1703 }
1704
1705 if (cmd->size > 16 * 1024) {
1706 pr_debug("invalid DMA size %x\n", cmd->size);
1707 return -EIO;
1708 }
1709
1710 if (cmd->tag & 0xfff0) {
1711 /* we reserve the higher tag numbers for kernel use */
1712 pr_debug("invalid DMA tag\n");
1713 return -EIO;
1714 }
1715
1716 if (cmd->class) {
1717 /* not supported in this version */
1718 pr_debug("invalid DMA class\n");
1719 return -EIO;
1720 }
1721
1722 return 0;
1723}
1724
1725static int spu_send_mfc_command(struct spu_context *ctx,
1726 struct mfc_dma_command cmd,
1727 int *error)
1728{
1729 *error = ctx->ops->send_mfc_command(ctx, &cmd);
1730 if (*error == -EAGAIN) {
1731 /* wait for any tag group to complete
1732 so we have space for the new command */
1733 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 1);
1734 /* try again, because the queue might be
1735 empty again */
1736 *error = ctx->ops->send_mfc_command(ctx, &cmd);
1737 if (*error == -EAGAIN)
1738 return 0;
1739 }
1740 return 1;
1741}
1742
1743static ssize_t spufs_mfc_write(struct file *file, const char __user *buffer,
1744 size_t size, loff_t *pos)
1745{
1746 struct spu_context *ctx = file->private_data;
1747 struct mfc_dma_command cmd;
1748 int ret = -EINVAL;
1749
1750 if (size != sizeof cmd)
1751 goto out;
1752
1753 ret = -EFAULT;
1754 if (copy_from_user(&cmd, buffer, sizeof cmd))
1755 goto out;
1756
1757 ret = spufs_check_valid_dma(&cmd);
1758 if (ret)
1759 goto out;
1760
Christoph Hellwigc9101bd2007-12-20 16:39:59 +09001761 ret = spu_acquire(ctx);
1762 if (ret)
1763 goto out;
1764
Arnd Bergmann33bfd7a2007-12-20 16:39:59 +09001765 ret = spufs_wait(ctx->run_wq, ctx->state == SPU_STATE_RUNNABLE);
Akinobu Mita577f8f12007-04-23 21:08:18 +02001766 if (ret)
1767 goto out;
1768
Arnd Bergmanna33a7d72006-03-23 00:00:11 +01001769 if (file->f_flags & O_NONBLOCK) {
1770 ret = ctx->ops->send_mfc_command(ctx, &cmd);
1771 } else {
1772 int status;
1773 ret = spufs_wait(ctx->mfc_wq,
1774 spu_send_mfc_command(ctx, cmd, &status));
Christoph Hellwigeebead52008-02-08 15:50:41 +11001775 if (ret)
1776 goto out;
Arnd Bergmanna33a7d72006-03-23 00:00:11 +01001777 if (status)
1778 ret = status;
1779 }
Arnd Bergmanna33a7d72006-03-23 00:00:11 +01001780
1781 if (ret)
Kazunori Asayama933b0e32007-06-29 10:58:08 +10001782 goto out_unlock;
Arnd Bergmanna33a7d72006-03-23 00:00:11 +01001783
1784 ctx->tagwait |= 1 << cmd.tag;
Masato Noguchi3692dc62006-11-20 18:45:07 +01001785 ret = size;
Arnd Bergmanna33a7d72006-03-23 00:00:11 +01001786
Kazunori Asayama933b0e32007-06-29 10:58:08 +10001787out_unlock:
1788 spu_release(ctx);
Arnd Bergmanna33a7d72006-03-23 00:00:11 +01001789out:
1790 return ret;
1791}
1792
1793static unsigned int spufs_mfc_poll(struct file *file,poll_table *wait)
1794{
1795 struct spu_context *ctx = file->private_data;
1796 u32 free_elements, tagstatus;
1797 unsigned int mask;
1798
Kazunori Asayama933b0e32007-06-29 10:58:08 +10001799 poll_wait(file, &ctx->mfc_wq, wait);
1800
Christoph Hellwigc9101bd2007-12-20 16:39:59 +09001801 /*
1802 * For now keep this uninterruptible and also ignore the rule
1803 * that poll should not sleep. Will be fixed later.
1804 */
1805 mutex_lock(&ctx->state_mutex);
Arnd Bergmanna33a7d72006-03-23 00:00:11 +01001806 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 2);
1807 free_elements = ctx->ops->get_mfc_free_elements(ctx);
1808 tagstatus = ctx->ops->read_mfc_tagstatus(ctx);
1809 spu_release(ctx);
1810
Arnd Bergmanna33a7d72006-03-23 00:00:11 +01001811 mask = 0;
1812 if (free_elements & 0xffff)
1813 mask |= POLLOUT | POLLWRNORM;
1814 if (tagstatus & ctx->tagwait)
1815 mask |= POLLIN | POLLRDNORM;
1816
Harvey Harrisone48b1b42008-03-29 08:21:07 +11001817 pr_debug("%s: free %d tagstatus %d tagwait %d\n", __func__,
Arnd Bergmanna33a7d72006-03-23 00:00:11 +01001818 free_elements, tagstatus, ctx->tagwait);
1819
1820 return mask;
1821}
1822
Al Viro73b6af82006-06-25 16:42:33 -07001823static int spufs_mfc_flush(struct file *file, fl_owner_t id)
Arnd Bergmanna33a7d72006-03-23 00:00:11 +01001824{
1825 struct spu_context *ctx = file->private_data;
1826 int ret;
1827
Christoph Hellwigc9101bd2007-12-20 16:39:59 +09001828 ret = spu_acquire(ctx);
1829 if (ret)
Christoph Hellwigeebead52008-02-08 15:50:41 +11001830 goto out;
Arnd Bergmanna33a7d72006-03-23 00:00:11 +01001831#if 0
1832/* this currently hangs */
1833 ret = spufs_wait(ctx->mfc_wq,
1834 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 2));
1835 if (ret)
1836 goto out;
1837 ret = spufs_wait(ctx->mfc_wq,
1838 ctx->ops->read_mfc_tagstatus(ctx) == ctx->tagwait);
Christoph Hellwigeebead52008-02-08 15:50:41 +11001839 if (ret)
1840 goto out;
Arnd Bergmanna33a7d72006-03-23 00:00:11 +01001841#else
1842 ret = 0;
1843#endif
1844 spu_release(ctx);
Christoph Hellwigeebead52008-02-08 15:50:41 +11001845out:
Arnd Bergmanna33a7d72006-03-23 00:00:11 +01001846 return ret;
1847}
1848
1849static int spufs_mfc_fsync(struct file *file, struct dentry *dentry,
1850 int datasync)
1851{
Al Viro73b6af82006-06-25 16:42:33 -07001852 return spufs_mfc_flush(file, NULL);
Arnd Bergmanna33a7d72006-03-23 00:00:11 +01001853}
1854
1855static int spufs_mfc_fasync(int fd, struct file *file, int on)
1856{
1857 struct spu_context *ctx = file->private_data;
1858
1859 return fasync_helper(fd, file, on, &ctx->mfc_fasync);
1860}
1861
Arjan van de Ven5dfe4c92007-02-12 00:55:31 -08001862static const struct file_operations spufs_mfc_fops = {
Arnd Bergmanna33a7d72006-03-23 00:00:11 +01001863 .open = spufs_mfc_open,
Christoph Hellwig43c2bbd2007-04-23 21:08:07 +02001864 .release = spufs_mfc_release,
Arnd Bergmanna33a7d72006-03-23 00:00:11 +01001865 .read = spufs_mfc_read,
1866 .write = spufs_mfc_write,
1867 .poll = spufs_mfc_poll,
1868 .flush = spufs_mfc_flush,
1869 .fsync = spufs_mfc_fsync,
1870 .fasync = spufs_mfc_fasync,
Mark Nutter6df10a82006-03-23 00:00:12 +01001871 .mmap = spufs_mfc_mmap,
Arnd Bergmanna33a7d72006-03-23 00:00:11 +01001872};
1873
Christoph Hellwig197b1a82007-12-20 16:39:59 +09001874static int spufs_npc_set(void *data, u64 val)
Arnd Bergmann67207b92005-11-15 15:53:48 -05001875{
1876 struct spu_context *ctx = data;
Christoph Hellwigc9101bd2007-12-20 16:39:59 +09001877 int ret;
1878
1879 ret = spu_acquire(ctx);
1880 if (ret)
1881 return ret;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -05001882 ctx->ops->npc_write(ctx, val);
1883 spu_release(ctx);
Christoph Hellwig197b1a82007-12-20 16:39:59 +09001884
1885 return 0;
Arnd Bergmann67207b92005-11-15 15:53:48 -05001886}
1887
Michael Ellerman104f0cc2007-09-19 14:38:12 +10001888static u64 spufs_npc_get(struct spu_context *ctx)
Michael Ellerman78810ff2007-09-19 14:38:12 +10001889{
1890 return ctx->ops->npc_read(ctx);
1891}
Michael Ellerman104f0cc2007-09-19 14:38:12 +10001892DEFINE_SPUFS_ATTRIBUTE(spufs_npc_ops, spufs_npc_get, spufs_npc_set,
1893 "0x%llx\n", SPU_ATTR_ACQUIRE);
Arnd Bergmann67207b92005-11-15 15:53:48 -05001894
Christoph Hellwig197b1a82007-12-20 16:39:59 +09001895static int spufs_decr_set(void *data, u64 val)
Arnd Bergmann8b3d6662005-11-15 15:53:52 -05001896{
1897 struct spu_context *ctx = data;
1898 struct spu_lscsa *lscsa = ctx->csa.lscsa;
Christoph Hellwigc9101bd2007-12-20 16:39:59 +09001899 int ret;
1900
1901 ret = spu_acquire_saved(ctx);
1902 if (ret)
1903 return ret;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -05001904 lscsa->decr.slot[0] = (u32) val;
Christoph Hellwig27b1ea02007-07-20 21:39:34 +02001905 spu_release_saved(ctx);
Christoph Hellwig197b1a82007-12-20 16:39:59 +09001906
1907 return 0;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -05001908}
1909
Michael Ellerman104f0cc2007-09-19 14:38:12 +10001910static u64 spufs_decr_get(struct spu_context *ctx)
Arnd Bergmann8b3d6662005-11-15 15:53:52 -05001911{
Arnd Bergmann8b3d6662005-11-15 15:53:52 -05001912 struct spu_lscsa *lscsa = ctx->csa.lscsa;
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +01001913 return lscsa->decr.slot[0];
1914}
Michael Ellerman104f0cc2007-09-19 14:38:12 +10001915DEFINE_SPUFS_ATTRIBUTE(spufs_decr_ops, spufs_decr_get, spufs_decr_set,
1916 "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED);
Arnd Bergmann8b3d6662005-11-15 15:53:52 -05001917
Christoph Hellwig197b1a82007-12-20 16:39:59 +09001918static int spufs_decr_status_set(void *data, u64 val)
Arnd Bergmann8b3d6662005-11-15 15:53:52 -05001919{
1920 struct spu_context *ctx = data;
Christoph Hellwigc9101bd2007-12-20 16:39:59 +09001921 int ret;
1922
1923 ret = spu_acquire_saved(ctx);
1924 if (ret)
1925 return ret;
Masato Noguchid40a01d2007-07-20 21:39:38 +02001926 if (val)
1927 ctx->csa.priv2.mfc_control_RW |= MFC_CNTL_DECREMENTER_RUNNING;
1928 else
1929 ctx->csa.priv2.mfc_control_RW &= ~MFC_CNTL_DECREMENTER_RUNNING;
Christoph Hellwig27b1ea02007-07-20 21:39:34 +02001930 spu_release_saved(ctx);
Christoph Hellwig197b1a82007-12-20 16:39:59 +09001931
1932 return 0;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -05001933}
1934
Michael Ellerman104f0cc2007-09-19 14:38:12 +10001935static u64 spufs_decr_status_get(struct spu_context *ctx)
Arnd Bergmann8b3d6662005-11-15 15:53:52 -05001936{
Masato Noguchid40a01d2007-07-20 21:39:38 +02001937 if (ctx->csa.priv2.mfc_control_RW & MFC_CNTL_DECREMENTER_RUNNING)
1938 return SPU_DECR_STATUS_RUNNING;
1939 else
1940 return 0;
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +01001941}
Michael Ellerman104f0cc2007-09-19 14:38:12 +10001942DEFINE_SPUFS_ATTRIBUTE(spufs_decr_status_ops, spufs_decr_status_get,
1943 spufs_decr_status_set, "0x%llx\n",
1944 SPU_ATTR_ACQUIRE_SAVED);
Arnd Bergmann8b3d6662005-11-15 15:53:52 -05001945
Christoph Hellwig197b1a82007-12-20 16:39:59 +09001946static int spufs_event_mask_set(void *data, u64 val)
Arnd Bergmann8b3d6662005-11-15 15:53:52 -05001947{
1948 struct spu_context *ctx = data;
1949 struct spu_lscsa *lscsa = ctx->csa.lscsa;
Christoph Hellwigc9101bd2007-12-20 16:39:59 +09001950 int ret;
1951
1952 ret = spu_acquire_saved(ctx);
1953 if (ret)
1954 return ret;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -05001955 lscsa->event_mask.slot[0] = (u32) val;
Christoph Hellwig27b1ea02007-07-20 21:39:34 +02001956 spu_release_saved(ctx);
Christoph Hellwig197b1a82007-12-20 16:39:59 +09001957
1958 return 0;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -05001959}
1960
Michael Ellerman104f0cc2007-09-19 14:38:12 +10001961static u64 spufs_event_mask_get(struct spu_context *ctx)
Arnd Bergmann8b3d6662005-11-15 15:53:52 -05001962{
Arnd Bergmann8b3d6662005-11-15 15:53:52 -05001963 struct spu_lscsa *lscsa = ctx->csa.lscsa;
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +01001964 return lscsa->event_mask.slot[0];
1965}
1966
Michael Ellerman104f0cc2007-09-19 14:38:12 +10001967DEFINE_SPUFS_ATTRIBUTE(spufs_event_mask_ops, spufs_event_mask_get,
1968 spufs_event_mask_set, "0x%llx\n",
1969 SPU_ATTR_ACQUIRE_SAVED);
Arnd Bergmann8b3d6662005-11-15 15:53:52 -05001970
Michael Ellerman104f0cc2007-09-19 14:38:12 +10001971static u64 spufs_event_status_get(struct spu_context *ctx)
Dwayne Grant McConnellb9e3bd72006-11-20 18:44:58 +01001972{
Dwayne Grant McConnellb9e3bd72006-11-20 18:44:58 +01001973 struct spu_state *state = &ctx->csa;
Dwayne Grant McConnellb9e3bd72006-11-20 18:44:58 +01001974 u64 stat;
Dwayne Grant McConnellb9e3bd72006-11-20 18:44:58 +01001975 stat = state->spu_chnlcnt_RW[0];
1976 if (stat)
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +01001977 return state->spu_chnldata_RW[0];
1978 return 0;
1979}
Michael Ellerman104f0cc2007-09-19 14:38:12 +10001980DEFINE_SPUFS_ATTRIBUTE(spufs_event_status_ops, spufs_event_status_get,
1981 NULL, "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED)
Dwayne Grant McConnellb9e3bd72006-11-20 18:44:58 +01001982
Christoph Hellwig197b1a82007-12-20 16:39:59 +09001983static int spufs_srr0_set(void *data, u64 val)
Arnd Bergmann8b3d6662005-11-15 15:53:52 -05001984{
1985 struct spu_context *ctx = data;
1986 struct spu_lscsa *lscsa = ctx->csa.lscsa;
Christoph Hellwigc9101bd2007-12-20 16:39:59 +09001987 int ret;
1988
1989 ret = spu_acquire_saved(ctx);
1990 if (ret)
1991 return ret;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -05001992 lscsa->srr0.slot[0] = (u32) val;
Christoph Hellwig27b1ea02007-07-20 21:39:34 +02001993 spu_release_saved(ctx);
Christoph Hellwig197b1a82007-12-20 16:39:59 +09001994
1995 return 0;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -05001996}
1997
Michael Ellerman104f0cc2007-09-19 14:38:12 +10001998static u64 spufs_srr0_get(struct spu_context *ctx)
Arnd Bergmann8b3d6662005-11-15 15:53:52 -05001999{
Arnd Bergmann8b3d6662005-11-15 15:53:52 -05002000 struct spu_lscsa *lscsa = ctx->csa.lscsa;
Michael Ellerman104f0cc2007-09-19 14:38:12 +10002001 return lscsa->srr0.slot[0];
Arnd Bergmann8b3d6662005-11-15 15:53:52 -05002002}
Michael Ellerman104f0cc2007-09-19 14:38:12 +10002003DEFINE_SPUFS_ATTRIBUTE(spufs_srr0_ops, spufs_srr0_get, spufs_srr0_set,
2004 "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED)
Arnd Bergmann8b3d6662005-11-15 15:53:52 -05002005
Michael Ellerman104f0cc2007-09-19 14:38:12 +10002006static u64 spufs_id_get(struct spu_context *ctx)
arnd@arndb.de7b1a7012006-06-19 20:33:24 +02002007{
arnd@arndb.de7b1a7012006-06-19 20:33:24 +02002008 u64 num;
2009
arnd@arndb.de7b1a7012006-06-19 20:33:24 +02002010 if (ctx->state == SPU_STATE_RUNNABLE)
2011 num = ctx->spu->number;
2012 else
2013 num = (unsigned int)-1;
arnd@arndb.de7b1a7012006-06-19 20:33:24 +02002014
2015 return num;
2016}
Michael Ellerman104f0cc2007-09-19 14:38:12 +10002017DEFINE_SPUFS_ATTRIBUTE(spufs_id_ops, spufs_id_get, NULL, "0x%llx\n",
2018 SPU_ATTR_ACQUIRE)
arnd@arndb.de7b1a7012006-06-19 20:33:24 +02002019
Michael Ellerman104f0cc2007-09-19 14:38:12 +10002020static u64 spufs_object_id_get(struct spu_context *ctx)
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +01002021{
2022 /* FIXME: Should there really be no locking here? */
Michael Ellerman104f0cc2007-09-19 14:38:12 +10002023 return ctx->object_id;
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +01002024}
2025
Christoph Hellwig197b1a82007-12-20 16:39:59 +09002026static int spufs_object_id_set(void *data, u64 id)
Arnd Bergmann86767272006-10-04 17:26:21 +02002027{
2028 struct spu_context *ctx = data;
2029 ctx->object_id = id;
Christoph Hellwig197b1a82007-12-20 16:39:59 +09002030
2031 return 0;
Arnd Bergmann86767272006-10-04 17:26:21 +02002032}
2033
Michael Ellerman104f0cc2007-09-19 14:38:12 +10002034DEFINE_SPUFS_ATTRIBUTE(spufs_object_id_ops, spufs_object_id_get,
2035 spufs_object_id_set, "0x%llx\n", SPU_ATTR_NOACQUIRE);
Arnd Bergmann86767272006-10-04 17:26:21 +02002036
Michael Ellerman104f0cc2007-09-19 14:38:12 +10002037static u64 spufs_lslr_get(struct spu_context *ctx)
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +01002038{
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +01002039 return ctx->csa.priv2.spu_lslr_RW;
2040}
Michael Ellerman104f0cc2007-09-19 14:38:12 +10002041DEFINE_SPUFS_ATTRIBUTE(spufs_lslr_ops, spufs_lslr_get, NULL, "0x%llx\n",
2042 SPU_ATTR_ACQUIRE_SAVED);
Dwayne Grant McConnellb9e3bd72006-11-20 18:44:58 +01002043
2044static int spufs_info_open(struct inode *inode, struct file *file)
2045{
2046 struct spufs_inode_info *i = SPUFS_I(inode);
2047 struct spu_context *ctx = i->i_ctx;
2048 file->private_data = ctx;
2049 return 0;
2050}
2051
Benjamin Herrenschmidtcbe709c2007-06-04 15:15:38 +10002052static int spufs_caps_show(struct seq_file *s, void *private)
2053{
2054 struct spu_context *ctx = s->private;
2055
2056 if (!(ctx->flags & SPU_CREATE_NOSCHED))
2057 seq_puts(s, "sched\n");
2058 if (!(ctx->flags & SPU_CREATE_ISOLATE))
2059 seq_puts(s, "step\n");
2060 return 0;
2061}
2062
2063static int spufs_caps_open(struct inode *inode, struct file *file)
2064{
2065 return single_open(file, spufs_caps_show, SPUFS_I(inode)->i_ctx);
2066}
2067
2068static const struct file_operations spufs_caps_fops = {
2069 .open = spufs_caps_open,
2070 .read = seq_read,
2071 .llseek = seq_lseek,
2072 .release = single_release,
2073};
2074
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +01002075static ssize_t __spufs_mbox_info_read(struct spu_context *ctx,
2076 char __user *buf, size_t len, loff_t *pos)
2077{
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +01002078 u32 data;
2079
Jeremy Kerrcbea9232007-12-20 16:39:59 +09002080 /* EOF if there's no entry in the mbox */
2081 if (!(ctx->csa.prob.mb_stat_R & 0x0000ff))
2082 return 0;
2083
2084 data = ctx->csa.prob.pu_mb_R;
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +01002085
2086 return simple_read_from_buffer(buf, len, pos, &data, sizeof data);
2087}
2088
Dwayne Grant McConnell69a2f002006-11-20 18:45:00 +01002089static ssize_t spufs_mbox_info_read(struct file *file, char __user *buf,
2090 size_t len, loff_t *pos)
2091{
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +01002092 int ret;
Dwayne Grant McConnell69a2f002006-11-20 18:45:00 +01002093 struct spu_context *ctx = file->private_data;
Dwayne Grant McConnell69a2f002006-11-20 18:45:00 +01002094
2095 if (!access_ok(VERIFY_WRITE, buf, len))
2096 return -EFAULT;
2097
Christoph Hellwigc9101bd2007-12-20 16:39:59 +09002098 ret = spu_acquire_saved(ctx);
2099 if (ret)
2100 return ret;
Dwayne Grant McConnell69a2f002006-11-20 18:45:00 +01002101 spin_lock(&ctx->csa.register_lock);
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +01002102 ret = __spufs_mbox_info_read(ctx, buf, len, pos);
Dwayne Grant McConnell69a2f002006-11-20 18:45:00 +01002103 spin_unlock(&ctx->csa.register_lock);
Christoph Hellwig27b1ea02007-07-20 21:39:34 +02002104 spu_release_saved(ctx);
Dwayne Grant McConnell69a2f002006-11-20 18:45:00 +01002105
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +01002106 return ret;
Dwayne Grant McConnell69a2f002006-11-20 18:45:00 +01002107}
2108
Arjan van de Ven5dfe4c92007-02-12 00:55:31 -08002109static const struct file_operations spufs_mbox_info_fops = {
Dwayne Grant McConnell69a2f002006-11-20 18:45:00 +01002110 .open = spufs_info_open,
2111 .read = spufs_mbox_info_read,
2112 .llseek = generic_file_llseek,
2113};
2114
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +01002115static ssize_t __spufs_ibox_info_read(struct spu_context *ctx,
2116 char __user *buf, size_t len, loff_t *pos)
2117{
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +01002118 u32 data;
2119
Jeremy Kerrcbea9232007-12-20 16:39:59 +09002120 /* EOF if there's no entry in the ibox */
2121 if (!(ctx->csa.prob.mb_stat_R & 0xff0000))
2122 return 0;
2123
2124 data = ctx->csa.priv2.puint_mb_R;
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +01002125
2126 return simple_read_from_buffer(buf, len, pos, &data, sizeof data);
2127}
2128
Dwayne Grant McConnell69a2f002006-11-20 18:45:00 +01002129static ssize_t spufs_ibox_info_read(struct file *file, char __user *buf,
2130 size_t len, loff_t *pos)
2131{
2132 struct spu_context *ctx = file->private_data;
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +01002133 int ret;
Dwayne Grant McConnell69a2f002006-11-20 18:45:00 +01002134
2135 if (!access_ok(VERIFY_WRITE, buf, len))
2136 return -EFAULT;
2137
Christoph Hellwigc9101bd2007-12-20 16:39:59 +09002138 ret = spu_acquire_saved(ctx);
2139 if (ret)
2140 return ret;
Dwayne Grant McConnell69a2f002006-11-20 18:45:00 +01002141 spin_lock(&ctx->csa.register_lock);
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +01002142 ret = __spufs_ibox_info_read(ctx, buf, len, pos);
Dwayne Grant McConnell69a2f002006-11-20 18:45:00 +01002143 spin_unlock(&ctx->csa.register_lock);
Christoph Hellwig27b1ea02007-07-20 21:39:34 +02002144 spu_release_saved(ctx);
Dwayne Grant McConnell69a2f002006-11-20 18:45:00 +01002145
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +01002146 return ret;
Dwayne Grant McConnell69a2f002006-11-20 18:45:00 +01002147}
2148
Arjan van de Ven5dfe4c92007-02-12 00:55:31 -08002149static const struct file_operations spufs_ibox_info_fops = {
Dwayne Grant McConnell69a2f002006-11-20 18:45:00 +01002150 .open = spufs_info_open,
2151 .read = spufs_ibox_info_read,
2152 .llseek = generic_file_llseek,
2153};
2154
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +01002155static ssize_t __spufs_wbox_info_read(struct spu_context *ctx,
2156 char __user *buf, size_t len, loff_t *pos)
2157{
2158 int i, cnt;
2159 u32 data[4];
2160 u32 wbox_stat;
2161
2162 wbox_stat = ctx->csa.prob.mb_stat_R;
2163 cnt = 4 - ((wbox_stat & 0x00ff00) >> 8);
2164 for (i = 0; i < cnt; i++) {
2165 data[i] = ctx->csa.spu_mailbox_data[i];
2166 }
2167
2168 return simple_read_from_buffer(buf, len, pos, &data,
2169 cnt * sizeof(u32));
2170}
2171
Dwayne Grant McConnell69a2f002006-11-20 18:45:00 +01002172static ssize_t spufs_wbox_info_read(struct file *file, char __user *buf,
2173 size_t len, loff_t *pos)
2174{
2175 struct spu_context *ctx = file->private_data;
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +01002176 int ret;
Dwayne Grant McConnell69a2f002006-11-20 18:45:00 +01002177
2178 if (!access_ok(VERIFY_WRITE, buf, len))
2179 return -EFAULT;
2180
Christoph Hellwigc9101bd2007-12-20 16:39:59 +09002181 ret = spu_acquire_saved(ctx);
2182 if (ret)
2183 return ret;
Dwayne Grant McConnell69a2f002006-11-20 18:45:00 +01002184 spin_lock(&ctx->csa.register_lock);
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +01002185 ret = __spufs_wbox_info_read(ctx, buf, len, pos);
Dwayne Grant McConnell69a2f002006-11-20 18:45:00 +01002186 spin_unlock(&ctx->csa.register_lock);
Christoph Hellwig27b1ea02007-07-20 21:39:34 +02002187 spu_release_saved(ctx);
Dwayne Grant McConnell69a2f002006-11-20 18:45:00 +01002188
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +01002189 return ret;
Dwayne Grant McConnell69a2f002006-11-20 18:45:00 +01002190}
2191
Arjan van de Ven5dfe4c92007-02-12 00:55:31 -08002192static const struct file_operations spufs_wbox_info_fops = {
Dwayne Grant McConnell69a2f002006-11-20 18:45:00 +01002193 .open = spufs_info_open,
2194 .read = spufs_wbox_info_read,
2195 .llseek = generic_file_llseek,
2196};
2197
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +01002198static ssize_t __spufs_dma_info_read(struct spu_context *ctx,
2199 char __user *buf, size_t len, loff_t *pos)
Dwayne Grant McConnellb9e3bd72006-11-20 18:44:58 +01002200{
Dwayne Grant McConnellb9e3bd72006-11-20 18:44:58 +01002201 struct spu_dma_info info;
2202 struct mfc_cq_sr *qp, *spuqp;
2203 int i;
2204
Dwayne Grant McConnellb9e3bd72006-11-20 18:44:58 +01002205 info.dma_info_type = ctx->csa.priv2.spu_tag_status_query_RW;
2206 info.dma_info_mask = ctx->csa.lscsa->tag_mask.slot[0];
2207 info.dma_info_status = ctx->csa.spu_chnldata_RW[24];
2208 info.dma_info_stall_and_notify = ctx->csa.spu_chnldata_RW[25];
2209 info.dma_info_atomic_command_status = ctx->csa.spu_chnldata_RW[27];
2210 for (i = 0; i < 16; i++) {
2211 qp = &info.dma_info_command_data[i];
2212 spuqp = &ctx->csa.priv2.spuq[i];
2213
2214 qp->mfc_cq_data0_RW = spuqp->mfc_cq_data0_RW;
2215 qp->mfc_cq_data1_RW = spuqp->mfc_cq_data1_RW;
2216 qp->mfc_cq_data2_RW = spuqp->mfc_cq_data2_RW;
2217 qp->mfc_cq_data3_RW = spuqp->mfc_cq_data3_RW;
2218 }
Dwayne Grant McConnellb9e3bd72006-11-20 18:44:58 +01002219
2220 return simple_read_from_buffer(buf, len, pos, &info,
2221 sizeof info);
2222}
2223
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +01002224static ssize_t spufs_dma_info_read(struct file *file, char __user *buf,
2225 size_t len, loff_t *pos)
2226{
2227 struct spu_context *ctx = file->private_data;
2228 int ret;
2229
2230 if (!access_ok(VERIFY_WRITE, buf, len))
2231 return -EFAULT;
2232
Christoph Hellwigc9101bd2007-12-20 16:39:59 +09002233 ret = spu_acquire_saved(ctx);
2234 if (ret)
2235 return ret;
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +01002236 spin_lock(&ctx->csa.register_lock);
2237 ret = __spufs_dma_info_read(ctx, buf, len, pos);
2238 spin_unlock(&ctx->csa.register_lock);
Christoph Hellwig27b1ea02007-07-20 21:39:34 +02002239 spu_release_saved(ctx);
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +01002240
2241 return ret;
2242}
2243
Arjan van de Ven5dfe4c92007-02-12 00:55:31 -08002244static const struct file_operations spufs_dma_info_fops = {
Dwayne Grant McConnellb9e3bd72006-11-20 18:44:58 +01002245 .open = spufs_info_open,
2246 .read = spufs_dma_info_read,
2247};
2248
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +01002249static ssize_t __spufs_proxydma_info_read(struct spu_context *ctx,
2250 char __user *buf, size_t len, loff_t *pos)
Dwayne Grant McConnellb9e3bd72006-11-20 18:44:58 +01002251{
Dwayne Grant McConnellb9e3bd72006-11-20 18:44:58 +01002252 struct spu_proxydma_info info;
Dwayne Grant McConnellb9e3bd72006-11-20 18:44:58 +01002253 struct mfc_cq_sr *qp, *puqp;
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +01002254 int ret = sizeof info;
Dwayne Grant McConnellb9e3bd72006-11-20 18:44:58 +01002255 int i;
2256
2257 if (len < ret)
2258 return -EINVAL;
2259
2260 if (!access_ok(VERIFY_WRITE, buf, len))
2261 return -EFAULT;
2262
Dwayne Grant McConnellb9e3bd72006-11-20 18:44:58 +01002263 info.proxydma_info_type = ctx->csa.prob.dma_querytype_RW;
2264 info.proxydma_info_mask = ctx->csa.prob.dma_querymask_RW;
2265 info.proxydma_info_status = ctx->csa.prob.dma_tagstatus_R;
2266 for (i = 0; i < 8; i++) {
2267 qp = &info.proxydma_info_command_data[i];
2268 puqp = &ctx->csa.priv2.puq[i];
2269
2270 qp->mfc_cq_data0_RW = puqp->mfc_cq_data0_RW;
2271 qp->mfc_cq_data1_RW = puqp->mfc_cq_data1_RW;
2272 qp->mfc_cq_data2_RW = puqp->mfc_cq_data2_RW;
2273 qp->mfc_cq_data3_RW = puqp->mfc_cq_data3_RW;
2274 }
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +01002275
2276 return simple_read_from_buffer(buf, len, pos, &info,
2277 sizeof info);
2278}
2279
2280static ssize_t spufs_proxydma_info_read(struct file *file, char __user *buf,
2281 size_t len, loff_t *pos)
2282{
2283 struct spu_context *ctx = file->private_data;
2284 int ret;
2285
Christoph Hellwigc9101bd2007-12-20 16:39:59 +09002286 ret = spu_acquire_saved(ctx);
2287 if (ret)
2288 return ret;
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +01002289 spin_lock(&ctx->csa.register_lock);
2290 ret = __spufs_proxydma_info_read(ctx, buf, len, pos);
Dwayne Grant McConnellb9e3bd72006-11-20 18:44:58 +01002291 spin_unlock(&ctx->csa.register_lock);
Christoph Hellwig27b1ea02007-07-20 21:39:34 +02002292 spu_release_saved(ctx);
Dwayne Grant McConnellb9e3bd72006-11-20 18:44:58 +01002293
Dwayne Grant McConnellb9e3bd72006-11-20 18:44:58 +01002294 return ret;
2295}
2296
Arjan van de Ven5dfe4c92007-02-12 00:55:31 -08002297static const struct file_operations spufs_proxydma_info_fops = {
Dwayne Grant McConnellb9e3bd72006-11-20 18:44:58 +01002298 .open = spufs_info_open,
2299 .read = spufs_proxydma_info_read,
2300};
2301
Christoph Hellwig476273a2007-06-29 10:58:01 +10002302static int spufs_show_tid(struct seq_file *s, void *private)
2303{
2304 struct spu_context *ctx = s->private;
2305
2306 seq_printf(s, "%d\n", ctx->tid);
2307 return 0;
2308}
2309
2310static int spufs_tid_open(struct inode *inode, struct file *file)
2311{
2312 return single_open(file, spufs_show_tid, SPUFS_I(inode)->i_ctx);
2313}
2314
2315static const struct file_operations spufs_tid_fops = {
2316 .open = spufs_tid_open,
2317 .read = seq_read,
2318 .llseek = seq_lseek,
2319 .release = single_release,
2320};
2321
Christoph Hellwige9f8a0b2007-06-29 10:58:03 +10002322static const char *ctx_state_names[] = {
2323 "user", "system", "iowait", "loaded"
2324};
2325
2326static unsigned long long spufs_acct_time(struct spu_context *ctx,
Andre Detsch27ec41d2007-07-20 21:39:33 +02002327 enum spu_utilization_state state)
Christoph Hellwige9f8a0b2007-06-29 10:58:03 +10002328{
Andre Detsch27ec41d2007-07-20 21:39:33 +02002329 struct timespec ts;
2330 unsigned long long time = ctx->stats.times[state];
Christoph Hellwige9f8a0b2007-06-29 10:58:03 +10002331
Andre Detsch27ec41d2007-07-20 21:39:33 +02002332 /*
2333 * In general, utilization statistics are updated by the controlling
2334 * thread as the spu context moves through various well defined
2335 * state transitions, but if the context is lazily loaded its
2336 * utilization statistics are not updated as the controlling thread
2337 * is not tightly coupled with the execution of the spu context. We
2338 * calculate and apply the time delta from the last recorded state
2339 * of the spu context.
2340 */
2341 if (ctx->spu && ctx->stats.util_state == state) {
2342 ktime_get_ts(&ts);
2343 time += timespec_to_ns(&ts) - ctx->stats.tstamp;
2344 }
Christoph Hellwige9f8a0b2007-06-29 10:58:03 +10002345
Andre Detsch27ec41d2007-07-20 21:39:33 +02002346 return time / NSEC_PER_MSEC;
Christoph Hellwige9f8a0b2007-06-29 10:58:03 +10002347}
2348
2349static unsigned long long spufs_slb_flts(struct spu_context *ctx)
2350{
2351 unsigned long long slb_flts = ctx->stats.slb_flt;
2352
2353 if (ctx->state == SPU_STATE_RUNNABLE) {
2354 slb_flts += (ctx->spu->stats.slb_flt -
2355 ctx->stats.slb_flt_base);
2356 }
2357
2358 return slb_flts;
2359}
2360
2361static unsigned long long spufs_class2_intrs(struct spu_context *ctx)
2362{
2363 unsigned long long class2_intrs = ctx->stats.class2_intr;
2364
2365 if (ctx->state == SPU_STATE_RUNNABLE) {
2366 class2_intrs += (ctx->spu->stats.class2_intr -
2367 ctx->stats.class2_intr_base);
2368 }
2369
2370 return class2_intrs;
2371}
2372
2373
2374static int spufs_show_stat(struct seq_file *s, void *private)
2375{
2376 struct spu_context *ctx = s->private;
Christoph Hellwigc9101bd2007-12-20 16:39:59 +09002377 int ret;
Christoph Hellwige9f8a0b2007-06-29 10:58:03 +10002378
Christoph Hellwigc9101bd2007-12-20 16:39:59 +09002379 ret = spu_acquire(ctx);
2380 if (ret)
2381 return ret;
2382
Christoph Hellwige9f8a0b2007-06-29 10:58:03 +10002383 seq_printf(s, "%s %llu %llu %llu %llu "
2384 "%llu %llu %llu %llu %llu %llu %llu %llu\n",
Andre Detsch27ec41d2007-07-20 21:39:33 +02002385 ctx_state_names[ctx->stats.util_state],
2386 spufs_acct_time(ctx, SPU_UTIL_USER),
2387 spufs_acct_time(ctx, SPU_UTIL_SYSTEM),
2388 spufs_acct_time(ctx, SPU_UTIL_IOWAIT),
2389 spufs_acct_time(ctx, SPU_UTIL_IDLE_LOADED),
Christoph Hellwige9f8a0b2007-06-29 10:58:03 +10002390 ctx->stats.vol_ctx_switch,
2391 ctx->stats.invol_ctx_switch,
2392 spufs_slb_flts(ctx),
2393 ctx->stats.hash_flt,
2394 ctx->stats.min_flt,
2395 ctx->stats.maj_flt,
2396 spufs_class2_intrs(ctx),
2397 ctx->stats.libassist);
2398 spu_release(ctx);
2399 return 0;
2400}
2401
2402static int spufs_stat_open(struct inode *inode, struct file *file)
2403{
2404 return single_open(file, spufs_show_stat, SPUFS_I(inode)->i_ctx);
2405}
2406
2407static const struct file_operations spufs_stat_fops = {
2408 .open = spufs_stat_open,
2409 .read = seq_read,
2410 .llseek = seq_lseek,
2411 .release = single_release,
2412};
2413
Christoph Hellwig5158e9b2008-04-29 17:08:38 +10002414static inline int spufs_switch_log_used(struct spu_context *ctx)
2415{
2416 return (ctx->switch_log->head - ctx->switch_log->tail) %
2417 SWITCH_LOG_BUFSIZE;
2418}
2419
2420static inline int spufs_switch_log_avail(struct spu_context *ctx)
2421{
2422 return SWITCH_LOG_BUFSIZE - spufs_switch_log_used(ctx);
2423}
2424
2425static int spufs_switch_log_open(struct inode *inode, struct file *file)
2426{
2427 struct spu_context *ctx = SPUFS_I(inode)->i_ctx;
Jeremy Kerrf5ed0eb2008-10-16 10:03:46 +11002428 int rc;
Christoph Hellwig5158e9b2008-04-29 17:08:38 +10002429
Jeremy Kerrf5ed0eb2008-10-16 10:03:46 +11002430 rc = spu_acquire(ctx);
2431 if (rc)
2432 return rc;
2433
Christoph Hellwig5158e9b2008-04-29 17:08:38 +10002434 if (ctx->switch_log) {
Jeremy Kerrf5ed0eb2008-10-16 10:03:46 +11002435 rc = -EBUSY;
2436 goto out;
Christoph Hellwig5158e9b2008-04-29 17:08:38 +10002437 }
Jeremy Kerrf5ed0eb2008-10-16 10:03:46 +11002438
Jeremy Kerr837ef882008-10-17 12:02:31 +11002439 ctx->switch_log = kmalloc(sizeof(struct switch_log) +
Jeremy Kerrf5ed0eb2008-10-16 10:03:46 +11002440 SWITCH_LOG_BUFSIZE * sizeof(struct switch_log_entry),
2441 GFP_KERNEL);
2442
2443 if (!ctx->switch_log) {
2444 rc = -ENOMEM;
2445 goto out;
2446 }
2447
Jeremy Kerr837ef882008-10-17 12:02:31 +11002448 ctx->switch_log->head = ctx->switch_log->tail = 0;
Jeremy Kerrf5ed0eb2008-10-16 10:03:46 +11002449 init_waitqueue_head(&ctx->switch_log->wait);
2450 rc = 0;
2451
2452out:
2453 spu_release(ctx);
2454 return rc;
2455}
2456
2457static int spufs_switch_log_release(struct inode *inode, struct file *file)
2458{
2459 struct spu_context *ctx = SPUFS_I(inode)->i_ctx;
2460 int rc;
2461
2462 rc = spu_acquire(ctx);
2463 if (rc)
2464 return rc;
2465
2466 kfree(ctx->switch_log);
2467 ctx->switch_log = NULL;
2468 spu_release(ctx);
Christoph Hellwig5158e9b2008-04-29 17:08:38 +10002469
2470 return 0;
Christoph Hellwig5158e9b2008-04-29 17:08:38 +10002471}
2472
2473static int switch_log_sprint(struct spu_context *ctx, char *tbuf, int n)
2474{
2475 struct switch_log_entry *p;
2476
2477 p = ctx->switch_log->log + ctx->switch_log->tail % SWITCH_LOG_BUFSIZE;
2478
2479 return snprintf(tbuf, n, "%u.%09u %d %u %u %llu\n",
2480 (unsigned int) p->tstamp.tv_sec,
2481 (unsigned int) p->tstamp.tv_nsec,
2482 p->spu_id,
2483 (unsigned int) p->type,
2484 (unsigned int) p->val,
2485 (unsigned long long) p->timebase);
2486}
2487
2488static ssize_t spufs_switch_log_read(struct file *file, char __user *buf,
2489 size_t len, loff_t *ppos)
2490{
2491 struct inode *inode = file->f_path.dentry->d_inode;
2492 struct spu_context *ctx = SPUFS_I(inode)->i_ctx;
2493 int error = 0, cnt = 0;
2494
2495 if (!buf || len < 0)
2496 return -EINVAL;
2497
Jeremy Kerrf5ed0eb2008-10-16 10:03:46 +11002498 error = spu_acquire(ctx);
2499 if (error)
2500 return error;
2501
Christoph Hellwig5158e9b2008-04-29 17:08:38 +10002502 while (cnt < len) {
2503 char tbuf[128];
2504 int width;
2505
Jeremy Kerr14f693e2008-10-16 10:51:46 +11002506 if (spufs_switch_log_used(ctx) == 0) {
2507 if (cnt > 0) {
2508 /* If there's data ready to go, we can
2509 * just return straight away */
2510 break;
2511
2512 } else if (file->f_flags & O_NONBLOCK) {
Jeremy Kerrf5ed0eb2008-10-16 10:03:46 +11002513 error = -EAGAIN;
Christoph Hellwig5158e9b2008-04-29 17:08:38 +10002514 break;
Jeremy Kerr14f693e2008-10-16 10:51:46 +11002515
2516 } else {
2517 /* spufs_wait will drop the mutex and
2518 * re-acquire, but since we're in read(), the
2519 * file cannot be _released (and so
2520 * ctx->switch_log is stable).
2521 */
2522 error = spufs_wait(ctx->switch_log->wait,
2523 spufs_switch_log_used(ctx) > 0);
2524
2525 /* On error, spufs_wait returns without the
2526 * state mutex held */
2527 if (error)
2528 return error;
2529
2530 /* We may have had entries read from underneath
2531 * us while we dropped the mutex in spufs_wait,
2532 * so re-check */
2533 if (spufs_switch_log_used(ctx) == 0)
2534 continue;
Jeremy Kerrf5ed0eb2008-10-16 10:03:46 +11002535 }
Christoph Hellwig5158e9b2008-04-29 17:08:38 +10002536 }
2537
Christoph Hellwig5158e9b2008-04-29 17:08:38 +10002538 width = switch_log_sprint(ctx, tbuf, sizeof(tbuf));
Jeremy Kerrf5ed0eb2008-10-16 10:03:46 +11002539 if (width < len)
Christoph Hellwig5158e9b2008-04-29 17:08:38 +10002540 ctx->switch_log->tail =
2541 (ctx->switch_log->tail + 1) %
2542 SWITCH_LOG_BUFSIZE;
Jeremy Kerrf5ed0eb2008-10-16 10:03:46 +11002543 else
2544 /* If the record is greater than space available return
2545 * partial buffer (so far) */
Christoph Hellwig5158e9b2008-04-29 17:08:38 +10002546 break;
2547
2548 error = copy_to_user(buf + cnt, tbuf, width);
2549 if (error)
2550 break;
2551 cnt += width;
2552 }
2553
Jeremy Kerrf5ed0eb2008-10-16 10:03:46 +11002554 spu_release(ctx);
2555
Christoph Hellwig5158e9b2008-04-29 17:08:38 +10002556 return cnt == 0 ? error : cnt;
2557}
2558
2559static unsigned int spufs_switch_log_poll(struct file *file, poll_table *wait)
2560{
2561 struct inode *inode = file->f_path.dentry->d_inode;
2562 struct spu_context *ctx = SPUFS_I(inode)->i_ctx;
2563 unsigned int mask = 0;
Jeremy Kerrf5ed0eb2008-10-16 10:03:46 +11002564 int rc;
Christoph Hellwig5158e9b2008-04-29 17:08:38 +10002565
2566 poll_wait(file, &ctx->switch_log->wait, wait);
2567
Jeremy Kerrf5ed0eb2008-10-16 10:03:46 +11002568 rc = spu_acquire(ctx);
2569 if (rc)
2570 return rc;
2571
Christoph Hellwig5158e9b2008-04-29 17:08:38 +10002572 if (spufs_switch_log_used(ctx) > 0)
2573 mask |= POLLIN;
2574
Jeremy Kerrf5ed0eb2008-10-16 10:03:46 +11002575 spu_release(ctx);
2576
Christoph Hellwig5158e9b2008-04-29 17:08:38 +10002577 return mask;
2578}
2579
2580static const struct file_operations spufs_switch_log_fops = {
Jeremy Kerrf5ed0eb2008-10-16 10:03:46 +11002581 .owner = THIS_MODULE,
2582 .open = spufs_switch_log_open,
2583 .read = spufs_switch_log_read,
2584 .poll = spufs_switch_log_poll,
2585 .release = spufs_switch_log_release,
Christoph Hellwig5158e9b2008-04-29 17:08:38 +10002586};
2587
Jeremy Kerrf5ed0eb2008-10-16 10:03:46 +11002588/**
2589 * Log a context switch event to a switch log reader.
2590 *
2591 * Must be called with ctx->state_mutex held.
2592 */
Christoph Hellwig5158e9b2008-04-29 17:08:38 +10002593void spu_switch_log_notify(struct spu *spu, struct spu_context *ctx,
2594 u32 type, u32 val)
2595{
2596 if (!ctx->switch_log)
2597 return;
2598
Christoph Hellwig5158e9b2008-04-29 17:08:38 +10002599 if (spufs_switch_log_avail(ctx) > 1) {
2600 struct switch_log_entry *p;
2601
2602 p = ctx->switch_log->log + ctx->switch_log->head;
2603 ktime_get_ts(&p->tstamp);
2604 p->timebase = get_tb();
2605 p->spu_id = spu ? spu->number : -1;
2606 p->type = type;
2607 p->val = val;
2608
2609 ctx->switch_log->head =
2610 (ctx->switch_log->head + 1) % SWITCH_LOG_BUFSIZE;
2611 }
Christoph Hellwig5158e9b2008-04-29 17:08:38 +10002612
2613 wake_up(&ctx->switch_log->wait);
2614}
Christoph Hellwige9f8a0b2007-06-29 10:58:03 +10002615
Luke Browning46deed62008-06-16 11:36:43 +10002616static int spufs_show_ctx(struct seq_file *s, void *private)
2617{
2618 struct spu_context *ctx = s->private;
2619 u64 mfc_control_RW;
2620
2621 mutex_lock(&ctx->state_mutex);
2622 if (ctx->spu) {
2623 struct spu *spu = ctx->spu;
2624 struct spu_priv2 __iomem *priv2 = spu->priv2;
2625
2626 spin_lock_irq(&spu->register_lock);
2627 mfc_control_RW = in_be64(&priv2->mfc_control_RW);
2628 spin_unlock_irq(&spu->register_lock);
2629 } else {
2630 struct spu_state *csa = &ctx->csa;
2631
2632 mfc_control_RW = csa->priv2.mfc_control_RW;
2633 }
2634
2635 seq_printf(s, "%c flgs(%lx) sflgs(%lx) pri(%d) ts(%d) spu(%02d)"
Stephen Rothwell9477e452009-01-06 14:27:38 +00002636 " %c %llx %llx %llx %llx %x %x\n",
Luke Browning46deed62008-06-16 11:36:43 +10002637 ctx->state == SPU_STATE_SAVED ? 'S' : 'R',
2638 ctx->flags,
2639 ctx->sched_flags,
2640 ctx->prio,
2641 ctx->time_slice,
2642 ctx->spu ? ctx->spu->number : -1,
2643 !list_empty(&ctx->rq) ? 'q' : ' ',
2644 ctx->csa.class_0_pending,
2645 ctx->csa.class_0_dar,
2646 ctx->csa.class_1_dsisr,
2647 mfc_control_RW,
2648 ctx->ops->runcntl_read(ctx),
2649 ctx->ops->status_read(ctx));
2650
2651 mutex_unlock(&ctx->state_mutex);
2652
2653 return 0;
2654}
2655
2656static int spufs_ctx_open(struct inode *inode, struct file *file)
2657{
2658 return single_open(file, spufs_show_ctx, SPUFS_I(inode)->i_ctx);
2659}
2660
2661static const struct file_operations spufs_ctx_fops = {
2662 .open = spufs_ctx_open,
2663 .read = seq_read,
2664 .llseek = seq_lseek,
2665 .release = single_release,
2666};
2667
Jeremy Kerr74254642009-02-17 11:44:14 +11002668const struct spufs_tree_descr spufs_dir_contents[] = {
Benjamin Herrenschmidtcbe709c2007-06-04 15:15:38 +10002669 { "capabilities", &spufs_caps_fops, 0444, },
Jeremy Kerr6f7dde82008-06-30 14:38:37 +10002670 { "mem", &spufs_mem_fops, 0666, LS_SIZE, },
2671 { "regs", &spufs_regs_fops, 0666, sizeof(struct spu_reg128[128]), },
Arnd Bergmann67207b92005-11-15 15:53:48 -05002672 { "mbox", &spufs_mbox_fops, 0444, },
2673 { "ibox", &spufs_ibox_fops, 0444, },
2674 { "wbox", &spufs_wbox_fops, 0222, },
Jeremy Kerr6f7dde82008-06-30 14:38:37 +10002675 { "mbox_stat", &spufs_mbox_stat_fops, 0444, sizeof(u32), },
2676 { "ibox_stat", &spufs_ibox_stat_fops, 0444, sizeof(u32), },
2677 { "wbox_stat", &spufs_wbox_stat_fops, 0444, sizeof(u32), },
Jeremy Kerr603c4612007-09-26 10:53:45 +10002678 { "signal1", &spufs_signal1_fops, 0666, },
2679 { "signal2", &spufs_signal2_fops, 0666, },
Arnd Bergmann67207b92005-11-15 15:53:48 -05002680 { "signal1_type", &spufs_signal1_type, 0666, },
2681 { "signal2_type", &spufs_signal2_type, 0666, },
Mark Nutter6df10a82006-03-23 00:00:12 +01002682 { "cntl", &spufs_cntl_fops, 0666, },
Jeremy Kerr6f7dde82008-06-30 14:38:37 +10002683 { "fpcr", &spufs_fpcr_fops, 0666, sizeof(struct spu_reg128), },
Dwayne Grant McConnellb9e3bd72006-11-20 18:44:58 +01002684 { "lslr", &spufs_lslr_ops, 0444, },
2685 { "mfc", &spufs_mfc_fops, 0666, },
2686 { "mss", &spufs_mss_fops, 0666, },
2687 { "npc", &spufs_npc_ops, 0666, },
2688 { "srr0", &spufs_srr0_ops, 0666, },
Arnd Bergmann8b3d6662005-11-15 15:53:52 -05002689 { "decr", &spufs_decr_ops, 0666, },
2690 { "decr_status", &spufs_decr_status_ops, 0666, },
Arnd Bergmann8b3d6662005-11-15 15:53:52 -05002691 { "event_mask", &spufs_event_mask_ops, 0666, },
Dwayne Grant McConnellb9e3bd72006-11-20 18:44:58 +01002692 { "event_status", &spufs_event_status_ops, 0444, },
Jeremy Kerr6f7dde82008-06-30 14:38:37 +10002693 { "psmap", &spufs_psmap_fops, 0666, SPUFS_PS_MAP_SIZE, },
Arnd Bergmann86767272006-10-04 17:26:21 +02002694 { "phys-id", &spufs_id_ops, 0666, },
2695 { "object-id", &spufs_object_id_ops, 0666, },
Jeremy Kerr6f7dde82008-06-30 14:38:37 +10002696 { "mbox_info", &spufs_mbox_info_fops, 0444, sizeof(u32), },
2697 { "ibox_info", &spufs_ibox_info_fops, 0444, sizeof(u32), },
2698 { "wbox_info", &spufs_wbox_info_fops, 0444, sizeof(u32), },
2699 { "dma_info", &spufs_dma_info_fops, 0444,
2700 sizeof(struct spu_dma_info), },
2701 { "proxydma_info", &spufs_proxydma_info_fops, 0444,
2702 sizeof(struct spu_proxydma_info)},
Christoph Hellwig476273a2007-06-29 10:58:01 +10002703 { "tid", &spufs_tid_fops, 0444, },
Christoph Hellwige9f8a0b2007-06-29 10:58:03 +10002704 { "stat", &spufs_stat_fops, 0444, },
Christoph Hellwig5158e9b2008-04-29 17:08:38 +10002705 { "switch_log", &spufs_switch_log_fops, 0444 },
Arnd Bergmann67207b92005-11-15 15:53:48 -05002706 {},
2707};
Mark Nutter5737edd2006-10-24 18:31:16 +02002708
Jeremy Kerr74254642009-02-17 11:44:14 +11002709const struct spufs_tree_descr spufs_dir_nosched_contents[] = {
Benjamin Herrenschmidtcbe709c2007-06-04 15:15:38 +10002710 { "capabilities", &spufs_caps_fops, 0444, },
Jeremy Kerr6f7dde82008-06-30 14:38:37 +10002711 { "mem", &spufs_mem_fops, 0666, LS_SIZE, },
Mark Nutter5737edd2006-10-24 18:31:16 +02002712 { "mbox", &spufs_mbox_fops, 0444, },
2713 { "ibox", &spufs_ibox_fops, 0444, },
2714 { "wbox", &spufs_wbox_fops, 0222, },
Jeremy Kerr6f7dde82008-06-30 14:38:37 +10002715 { "mbox_stat", &spufs_mbox_stat_fops, 0444, sizeof(u32), },
2716 { "ibox_stat", &spufs_ibox_stat_fops, 0444, sizeof(u32), },
2717 { "wbox_stat", &spufs_wbox_stat_fops, 0444, sizeof(u32), },
Jeremy Kerrd054b362007-07-20 21:39:31 +02002718 { "signal1", &spufs_signal1_nosched_fops, 0222, },
2719 { "signal2", &spufs_signal2_nosched_fops, 0222, },
Mark Nutter5737edd2006-10-24 18:31:16 +02002720 { "signal1_type", &spufs_signal1_type, 0666, },
2721 { "signal2_type", &spufs_signal2_type, 0666, },
2722 { "mss", &spufs_mss_fops, 0666, },
2723 { "mfc", &spufs_mfc_fops, 0666, },
2724 { "cntl", &spufs_cntl_fops, 0666, },
2725 { "npc", &spufs_npc_ops, 0666, },
Jeremy Kerr6f7dde82008-06-30 14:38:37 +10002726 { "psmap", &spufs_psmap_fops, 0666, SPUFS_PS_MAP_SIZE, },
Mark Nutter5737edd2006-10-24 18:31:16 +02002727 { "phys-id", &spufs_id_ops, 0666, },
2728 { "object-id", &spufs_object_id_ops, 0666, },
Christoph Hellwig476273a2007-06-29 10:58:01 +10002729 { "tid", &spufs_tid_fops, 0444, },
Christoph Hellwige9f8a0b2007-06-29 10:58:03 +10002730 { "stat", &spufs_stat_fops, 0444, },
Jeremy Kerr2c3e4782008-07-03 11:42:20 +10002731 {},
2732};
2733
Jeremy Kerr74254642009-02-17 11:44:14 +11002734const struct spufs_tree_descr spufs_dir_debug_contents[] = {
Luke Browning46deed62008-06-16 11:36:43 +10002735 { ".ctx", &spufs_ctx_fops, 0444, },
Mark Nutter5737edd2006-10-24 18:31:16 +02002736 {},
2737};
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +01002738
Jeremy Kerr74254642009-02-17 11:44:14 +11002739const struct spufs_coredump_reader spufs_coredump_read[] = {
Michael Ellerman4fca9c42007-09-19 14:38:12 +10002740 { "regs", __spufs_regs_read, NULL, sizeof(struct spu_reg128[128])},
2741 { "fpcr", __spufs_fpcr_read, NULL, sizeof(struct spu_reg128) },
Michael Ellerman104f0cc2007-09-19 14:38:12 +10002742 { "lslr", NULL, spufs_lslr_get, 19 },
2743 { "decr", NULL, spufs_decr_get, 19 },
2744 { "decr_status", NULL, spufs_decr_status_get, 19 },
Michael Ellerman4fca9c42007-09-19 14:38:12 +10002745 { "mem", __spufs_mem_read, NULL, LS_SIZE, },
2746 { "signal1", __spufs_signal1_read, NULL, sizeof(u32) },
Michael Ellerman104f0cc2007-09-19 14:38:12 +10002747 { "signal1_type", NULL, spufs_signal1_type_get, 19 },
Michael Ellerman4fca9c42007-09-19 14:38:12 +10002748 { "signal2", __spufs_signal2_read, NULL, sizeof(u32) },
Michael Ellerman104f0cc2007-09-19 14:38:12 +10002749 { "signal2_type", NULL, spufs_signal2_type_get, 19 },
2750 { "event_mask", NULL, spufs_event_mask_get, 19 },
2751 { "event_status", NULL, spufs_event_status_get, 19 },
Michael Ellerman4fca9c42007-09-19 14:38:12 +10002752 { "mbox_info", __spufs_mbox_info_read, NULL, sizeof(u32) },
2753 { "ibox_info", __spufs_ibox_info_read, NULL, sizeof(u32) },
2754 { "wbox_info", __spufs_wbox_info_read, NULL, 4 * sizeof(u32)},
2755 { "dma_info", __spufs_dma_info_read, NULL, sizeof(struct spu_dma_info)},
2756 { "proxydma_info", __spufs_proxydma_info_read,
2757 NULL, sizeof(struct spu_proxydma_info)},
Michael Ellerman104f0cc2007-09-19 14:38:12 +10002758 { "object-id", NULL, spufs_object_id_get, 19 },
2759 { "npc", NULL, spufs_npc_get, 19 },
Michael Ellerman936d5bf2007-09-19 14:38:12 +10002760 { NULL },
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +01002761};