blob: 2936a0044c049681d6eeeb80eddbb3ab4ad9295a [file] [log] [blame]
Arnd Bergmann67207b92005-11-15 15:53:48 -05001/*
2 * SPU file system -- file contents
3 *
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
5 *
6 * Author: Arnd Bergmann <arndb@de.ibm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
Arnd Bergmanna33a7d72006-03-23 00:00:11 +010023#undef DEBUG
24
Arnd Bergmann67207b92005-11-15 15:53:48 -050025#include <linux/fs.h>
26#include <linux/ioctl.h>
Paul Gortmaker4b16f8e2011-07-22 18:24:23 -040027#include <linux/export.h>
Arnd Bergmannd88cfff2005-12-05 22:52:22 -050028#include <linux/pagemap.h>
Arnd Bergmann67207b92005-11-15 15:53:48 -050029#include <linux/poll.h>
Arnd Bergmann51104592005-12-05 22:52:25 -050030#include <linux/ptrace.h>
Benjamin Herrenschmidtcbe709c2007-06-04 15:15:38 +100031#include <linux/seq_file.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090032#include <linux/slab.h>
Arnd Bergmann67207b92005-11-15 15:53:48 -050033
34#include <asm/io.h>
FUJITA Tomonoridfe1e092008-05-13 19:07:42 +100035#include <asm/time.h>
Arnd Bergmann67207b92005-11-15 15:53:48 -050036#include <asm/spu.h>
Dwayne Grant McConnellb9e3bd72006-11-20 18:44:58 +010037#include <asm/spu_info.h>
Arnd Bergmann67207b92005-11-15 15:53:48 -050038#include <asm/uaccess.h>
39
40#include "spufs.h"
Christoph Hellwigae142e02009-06-12 04:31:52 +000041#include "sputrace.h"
Arnd Bergmann67207b92005-11-15 15:53:48 -050042
Benjamin Herrenschmidt27d5bf22006-10-04 17:26:11 +020043#define SPUFS_MMAP_4K (PAGE_SIZE == 0x1000)
44
Christoph Hellwig197b1a82007-12-20 16:39:59 +090045/* Simple attribute files */
46struct spufs_attr {
47 int (*get)(void *, u64 *);
48 int (*set)(void *, u64);
49 char get_buf[24]; /* enough to store a u64 and "\n\0" */
50 char set_buf[24];
51 void *data;
52 const char *fmt; /* format for read operation */
53 struct mutex mutex; /* protects access to these buffers */
54};
55
56static int spufs_attr_open(struct inode *inode, struct file *file,
57 int (*get)(void *, u64 *), int (*set)(void *, u64),
58 const char *fmt)
59{
60 struct spufs_attr *attr;
61
62 attr = kmalloc(sizeof(*attr), GFP_KERNEL);
63 if (!attr)
64 return -ENOMEM;
65
66 attr->get = get;
67 attr->set = set;
68 attr->data = inode->i_private;
69 attr->fmt = fmt;
70 mutex_init(&attr->mutex);
71 file->private_data = attr;
72
73 return nonseekable_open(inode, file);
74}
75
76static int spufs_attr_release(struct inode *inode, struct file *file)
77{
78 kfree(file->private_data);
79 return 0;
80}
81
82static ssize_t spufs_attr_read(struct file *file, char __user *buf,
83 size_t len, loff_t *ppos)
84{
85 struct spufs_attr *attr;
86 size_t size;
87 ssize_t ret;
88
89 attr = file->private_data;
90 if (!attr->get)
91 return -EACCES;
92
93 ret = mutex_lock_interruptible(&attr->mutex);
94 if (ret)
95 return ret;
96
97 if (*ppos) { /* continued read */
98 size = strlen(attr->get_buf);
99 } else { /* first read */
100 u64 val;
101 ret = attr->get(attr->data, &val);
102 if (ret)
103 goto out;
104
105 size = scnprintf(attr->get_buf, sizeof(attr->get_buf),
106 attr->fmt, (unsigned long long)val);
107 }
108
109 ret = simple_read_from_buffer(buf, len, ppos, attr->get_buf, size);
110out:
111 mutex_unlock(&attr->mutex);
112 return ret;
113}
114
115static ssize_t spufs_attr_write(struct file *file, const char __user *buf,
116 size_t len, loff_t *ppos)
117{
118 struct spufs_attr *attr;
119 u64 val;
120 size_t size;
121 ssize_t ret;
122
123 attr = file->private_data;
124 if (!attr->set)
125 return -EACCES;
126
127 ret = mutex_lock_interruptible(&attr->mutex);
128 if (ret)
129 return ret;
130
131 ret = -EFAULT;
132 size = min(sizeof(attr->set_buf) - 1, len);
133 if (copy_from_user(attr->set_buf, buf, size))
134 goto out;
135
136 ret = len; /* claim we got the whole input */
137 attr->set_buf[size] = '\0';
138 val = simple_strtol(attr->set_buf, NULL, 0);
139 attr->set(attr->data, val);
140out:
141 mutex_unlock(&attr->mutex);
142 return ret;
143}
144
145#define DEFINE_SPUFS_SIMPLE_ATTRIBUTE(__fops, __get, __set, __fmt) \
146static int __fops ## _open(struct inode *inode, struct file *file) \
147{ \
148 __simple_attr_check_format(__fmt, 0ull); \
149 return spufs_attr_open(inode, file, __get, __set, __fmt); \
150} \
Alexey Dobriyan828c0952009-10-01 15:43:56 -0700151static const struct file_operations __fops = { \
Christoph Hellwig197b1a82007-12-20 16:39:59 +0900152 .open = __fops ## _open, \
153 .release = spufs_attr_release, \
154 .read = spufs_attr_read, \
155 .write = spufs_attr_write, \
Arnd Bergmannfc153512010-09-14 10:22:33 +0000156 .llseek = generic_file_llseek, \
Christoph Hellwig197b1a82007-12-20 16:39:59 +0900157};
158
Benjamin Herrenschmidtcbe709c2007-06-04 15:15:38 +1000159
Arnd Bergmann67207b92005-11-15 15:53:48 -0500160static int
161spufs_mem_open(struct inode *inode, struct file *file)
162{
163 struct spufs_inode_info *i = SPUFS_I(inode);
Mark Nutter6df10a82006-03-23 00:00:12 +0100164 struct spu_context *ctx = i->i_ctx;
Christoph Hellwig43c2bbd2007-04-23 21:08:07 +0200165
Christoph Hellwig47d3a5f2007-06-04 23:26:51 +1000166 mutex_lock(&ctx->mapping_lock);
Mark Nutter6df10a82006-03-23 00:00:12 +0100167 file->private_data = ctx;
Christoph Hellwig43c2bbd2007-04-23 21:08:07 +0200168 if (!i->i_openers++)
169 ctx->local_store = inode->i_mapping;
Christoph Hellwig47d3a5f2007-06-04 23:26:51 +1000170 mutex_unlock(&ctx->mapping_lock);
Christoph Hellwig43c2bbd2007-04-23 21:08:07 +0200171 return 0;
172}
173
174static int
175spufs_mem_release(struct inode *inode, struct file *file)
176{
177 struct spufs_inode_info *i = SPUFS_I(inode);
178 struct spu_context *ctx = i->i_ctx;
179
Christoph Hellwig47d3a5f2007-06-04 23:26:51 +1000180 mutex_lock(&ctx->mapping_lock);
Christoph Hellwig43c2bbd2007-04-23 21:08:07 +0200181 if (!--i->i_openers)
182 ctx->local_store = NULL;
Christoph Hellwig47d3a5f2007-06-04 23:26:51 +1000183 mutex_unlock(&ctx->mapping_lock);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500184 return 0;
185}
186
187static ssize_t
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +0100188__spufs_mem_read(struct spu_context *ctx, char __user *buffer,
189 size_t size, loff_t *pos)
190{
191 char *local_store = ctx->ops->get_ls(ctx);
192 return simple_read_from_buffer(buffer, size, pos, local_store,
193 LS_SIZE);
194}
195
196static ssize_t
Arnd Bergmann67207b92005-11-15 15:53:48 -0500197spufs_mem_read(struct file *file, char __user *buffer,
198 size_t size, loff_t *pos)
199{
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +0100200 struct spu_context *ctx = file->private_data;
Arnd Bergmannaa0ed2b2007-03-10 00:05:35 +0100201 ssize_t ret;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500202
Christoph Hellwigc9101bd2007-12-20 16:39:59 +0900203 ret = spu_acquire(ctx);
204 if (ret)
205 return ret;
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +0100206 ret = __spufs_mem_read(ctx, buffer, size, pos);
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500207 spu_release(ctx);
Christoph Hellwigc9101bd2007-12-20 16:39:59 +0900208
Arnd Bergmann67207b92005-11-15 15:53:48 -0500209 return ret;
210}
211
212static ssize_t
213spufs_mem_write(struct file *file, const char __user *buffer,
Arnd Bergmannaa0ed2b2007-03-10 00:05:35 +0100214 size_t size, loff_t *ppos)
Arnd Bergmann67207b92005-11-15 15:53:48 -0500215{
216 struct spu_context *ctx = file->private_data;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500217 char *local_store;
Arnd Bergmannaa0ed2b2007-03-10 00:05:35 +0100218 loff_t pos = *ppos;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500219 int ret;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500220
Arnd Bergmannaa0ed2b2007-03-10 00:05:35 +0100221 if (pos > LS_SIZE)
Arnd Bergmann67207b92005-11-15 15:53:48 -0500222 return -EFBIG;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500223
Christoph Hellwigc9101bd2007-12-20 16:39:59 +0900224 ret = spu_acquire(ctx);
225 if (ret)
226 return ret;
227
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500228 local_store = ctx->ops->get_ls(ctx);
Akinobu Mita63c3b9d2010-12-24 20:03:56 +0000229 size = simple_write_to_buffer(local_store, LS_SIZE, ppos, buffer, size);
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500230 spu_release(ctx);
Arnd Bergmannaa0ed2b2007-03-10 00:05:35 +0100231
Arnd Bergmannaa0ed2b2007-03-10 00:05:35 +0100232 return size;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500233}
234
Nick Pigginb1e22702008-06-10 09:26:08 +1000235static int
236spufs_mem_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500237{
Benjamin Herrenschmidtf1fa74f2007-05-08 16:27:29 +1000238 struct spu_context *ctx = vma->vm_file->private_data;
Nick Pigginb1e22702008-06-10 09:26:08 +1000239 unsigned long address = (unsigned long)vmf->virtual_address;
240 unsigned long pfn, offset;
241
Nick Pigginb1e22702008-06-10 09:26:08 +1000242 offset = vmf->pgoff << PAGE_SHIFT;
Masato Noguchi128b8542007-02-13 21:54:30 +0100243 if (offset >= LS_SIZE)
Nick Pigginb1e22702008-06-10 09:26:08 +1000244 return VM_FAULT_SIGBUS;
Masato Noguchi128b8542007-02-13 21:54:30 +0100245
Nick Pigginb1e22702008-06-10 09:26:08 +1000246 pr_debug("spufs_mem_mmap_fault address=0x%lx, offset=0x%lx\n",
247 address, offset);
Benjamin Herrenschmidtf1fa74f2007-05-08 16:27:29 +1000248
Christoph Hellwigc9101bd2007-12-20 16:39:59 +0900249 if (spu_acquire(ctx))
Nick Pigginb1e22702008-06-10 09:26:08 +1000250 return VM_FAULT_NOPAGE;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500251
Arnd Bergmannac91cb82006-10-04 17:26:16 +0200252 if (ctx->state == SPU_STATE_SAVED) {
Benjamin Herrenschmidt64b3d0e2008-12-18 19:13:51 +0000253 vma->vm_page_prot = pgprot_cached(vma->vm_page_prot);
Benjamin Herrenschmidt78bde532007-02-13 11:46:06 +1100254 pfn = vmalloc_to_pfn(ctx->csa.lscsa->ls + offset);
Arnd Bergmannac91cb82006-10-04 17:26:16 +0200255 } else {
Benjamin Herrenschmidt64b3d0e2008-12-18 19:13:51 +0000256 vma->vm_page_prot = pgprot_noncached_wc(vma->vm_page_prot);
Benjamin Herrenschmidt78bde532007-02-13 11:46:06 +1100257 pfn = (ctx->spu->local_store_phys + offset) >> PAGE_SHIFT;
Arnd Bergmannac91cb82006-10-04 17:26:16 +0200258 }
Benjamin Herrenschmidt78bde532007-02-13 11:46:06 +1100259 vm_insert_pfn(vma, address, pfn);
260
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500261 spu_release(ctx);
262
Nick Pigginb1e22702008-06-10 09:26:08 +1000263 return VM_FAULT_NOPAGE;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500264}
265
Benjamin Herrenschmidta3528942008-07-23 21:27:09 -0700266static int spufs_mem_mmap_access(struct vm_area_struct *vma,
267 unsigned long address,
268 void *buf, int len, int write)
269{
270 struct spu_context *ctx = vma->vm_file->private_data;
271 unsigned long offset = address - vma->vm_start;
272 char *local_store;
273
274 if (write && !(vma->vm_flags & VM_WRITE))
275 return -EACCES;
276 if (spu_acquire(ctx))
277 return -EINTR;
278 if ((offset + len) > vma->vm_end)
279 len = vma->vm_end - offset;
280 local_store = ctx->ops->get_ls(ctx);
281 if (write)
282 memcpy_toio(local_store + offset, buf, len);
283 else
284 memcpy_fromio(buf, local_store + offset, len);
285 spu_release(ctx);
286 return len;
287}
Benjamin Herrenschmidt78bde532007-02-13 11:46:06 +1100288
Alexey Dobriyanf0f37e22009-09-27 22:29:37 +0400289static const struct vm_operations_struct spufs_mem_mmap_vmops = {
Nick Pigginb1e22702008-06-10 09:26:08 +1000290 .fault = spufs_mem_mmap_fault,
Benjamin Herrenschmidta3528942008-07-23 21:27:09 -0700291 .access = spufs_mem_mmap_access,
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500292};
293
Benjamin Herrenschmidtf1fa74f2007-05-08 16:27:29 +1000294static int spufs_mem_mmap(struct file *file, struct vm_area_struct *vma)
Arnd Bergmann67207b92005-11-15 15:53:48 -0500295{
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500296 if (!(vma->vm_flags & VM_SHARED))
297 return -EINVAL;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500298
Benjamin Herrenschmidt78bde532007-02-13 11:46:06 +1100299 vma->vm_flags |= VM_IO | VM_PFNMAP;
Benjamin Herrenschmidt64b3d0e2008-12-18 19:13:51 +0000300 vma->vm_page_prot = pgprot_noncached_wc(vma->vm_page_prot);
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500301
302 vma->vm_ops = &spufs_mem_mmap_vmops;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500303 return 0;
304}
305
Arjan van de Ven5dfe4c92007-02-12 00:55:31 -0800306static const struct file_operations spufs_mem_fops = {
Jeremy Kerr70225432007-06-29 10:58:00 +1000307 .open = spufs_mem_open,
308 .release = spufs_mem_release,
309 .read = spufs_mem_read,
310 .write = spufs_mem_write,
311 .llseek = generic_file_llseek,
312 .mmap = spufs_mem_mmap,
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500313};
314
Nick Pigginb1e22702008-06-10 09:26:08 +1000315static int spufs_ps_fault(struct vm_area_struct *vma,
316 struct vm_fault *vmf,
Benjamin Herrenschmidt78bde532007-02-13 11:46:06 +1100317 unsigned long ps_offs,
Benjamin Herrenschmidt27d5bf22006-10-04 17:26:11 +0200318 unsigned long ps_size)
Mark Nutter6df10a82006-03-23 00:00:12 +0100319{
Mark Nutter6df10a82006-03-23 00:00:12 +0100320 struct spu_context *ctx = vma->vm_file->private_data;
Nick Pigginb1e22702008-06-10 09:26:08 +1000321 unsigned long area, offset = vmf->pgoff << PAGE_SHIFT;
Christoph Hellwigeebead52008-02-08 15:50:41 +1100322 int ret = 0;
Mark Nutter6df10a82006-03-23 00:00:12 +0100323
Nick Pigginb1e22702008-06-10 09:26:08 +1000324 spu_context_nospu_trace(spufs_ps_fault__enter, ctx);
Christoph Hellwig038200c2008-01-11 15:03:26 +1100325
Benjamin Herrenschmidt27d5bf22006-10-04 17:26:11 +0200326 if (offset >= ps_size)
Nick Pigginb1e22702008-06-10 09:26:08 +1000327 return VM_FAULT_SIGBUS;
Mark Nutter6df10a82006-03-23 00:00:12 +0100328
Jeremy Kerr60657262008-11-11 10:22:22 +1100329 if (fatal_signal_pending(current))
330 return VM_FAULT_SIGBUS;
331
Arnd Bergmann33bfd7a2007-12-20 16:39:59 +0900332 /*
Jeremy Kerrd5883132008-02-26 13:31:42 +1100333 * Because we release the mmap_sem, the context may be destroyed while
334 * we're in spu_wait. Grab an extra reference so it isn't destroyed
335 * in the meantime.
336 */
337 get_spu_context(ctx);
338
339 /*
Arnd Bergmann33bfd7a2007-12-20 16:39:59 +0900340 * We have to wait for context to be loaded before we have
341 * pages to hand out to the user, but we don't want to wait
342 * with the mmap_sem held.
343 * It is possible to drop the mmap_sem here, but then we need
Nick Pigginb1e22702008-06-10 09:26:08 +1000344 * to return VM_FAULT_NOPAGE because the mappings may have
Arnd Bergmann33bfd7a2007-12-20 16:39:59 +0900345 * hanged.
Benjamin Herrenschmidt78bde532007-02-13 11:46:06 +1100346 */
Christoph Hellwigc9101bd2007-12-20 16:39:59 +0900347 if (spu_acquire(ctx))
Jeremy Kerrd5883132008-02-26 13:31:42 +1100348 goto refault;
Christoph Hellwigc9101bd2007-12-20 16:39:59 +0900349
Arnd Bergmann33bfd7a2007-12-20 16:39:59 +0900350 if (ctx->state == SPU_STATE_SAVED) {
351 up_read(&current->mm->mmap_sem);
Nick Pigginb1e22702008-06-10 09:26:08 +1000352 spu_context_nospu_trace(spufs_ps_fault__sleep, ctx);
Christoph Hellwigeebead52008-02-08 15:50:41 +1100353 ret = spufs_wait(ctx->run_wq, ctx->state == SPU_STATE_RUNNABLE);
Nick Pigginb1e22702008-06-10 09:26:08 +1000354 spu_context_trace(spufs_ps_fault__wake, ctx, ctx->spu);
Arnd Bergmann33bfd7a2007-12-20 16:39:59 +0900355 down_read(&current->mm->mmap_sem);
Christoph Hellwigc9101bd2007-12-20 16:39:59 +0900356 } else {
357 area = ctx->spu->problem_phys + ps_offs;
Nick Pigginb1e22702008-06-10 09:26:08 +1000358 vm_insert_pfn(vma, (unsigned long)vmf->virtual_address,
359 (area + offset) >> PAGE_SHIFT);
360 spu_context_trace(spufs_ps_fault__insert, ctx, ctx->spu);
Arnd Bergmann33bfd7a2007-12-20 16:39:59 +0900361 }
Mark Nutter6df10a82006-03-23 00:00:12 +0100362
Christoph Hellwigeebead52008-02-08 15:50:41 +1100363 if (!ret)
364 spu_release(ctx);
Jeremy Kerrd5883132008-02-26 13:31:42 +1100365
366refault:
367 put_spu_context(ctx);
Nick Pigginb1e22702008-06-10 09:26:08 +1000368 return VM_FAULT_NOPAGE;
Mark Nutter6df10a82006-03-23 00:00:12 +0100369}
370
Benjamin Herrenschmidt27d5bf22006-10-04 17:26:11 +0200371#if SPUFS_MMAP_4K
Nick Pigginb1e22702008-06-10 09:26:08 +1000372static int spufs_cntl_mmap_fault(struct vm_area_struct *vma,
373 struct vm_fault *vmf)
Mark Nutter6df10a82006-03-23 00:00:12 +0100374{
Jeremy Kerr87ff6092008-07-01 10:22:50 +1000375 return spufs_ps_fault(vma, vmf, 0x4000, SPUFS_CNTL_MAP_SIZE);
Mark Nutter6df10a82006-03-23 00:00:12 +0100376}
377
Alexey Dobriyanf0f37e22009-09-27 22:29:37 +0400378static const struct vm_operations_struct spufs_cntl_mmap_vmops = {
Nick Pigginb1e22702008-06-10 09:26:08 +1000379 .fault = spufs_cntl_mmap_fault,
Mark Nutter6df10a82006-03-23 00:00:12 +0100380};
381
382/*
383 * mmap support for problem state control area [0x4000 - 0x4fff].
Mark Nutter6df10a82006-03-23 00:00:12 +0100384 */
385static int spufs_cntl_mmap(struct file *file, struct vm_area_struct *vma)
386{
387 if (!(vma->vm_flags & VM_SHARED))
388 return -EINVAL;
389
Benjamin Herrenschmidt78bde532007-02-13 11:46:06 +1100390 vma->vm_flags |= VM_IO | VM_PFNMAP;
Benjamin Herrenschmidt64b3d0e2008-12-18 19:13:51 +0000391 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
Mark Nutter6df10a82006-03-23 00:00:12 +0100392
393 vma->vm_ops = &spufs_cntl_mmap_vmops;
394 return 0;
395}
Benjamin Herrenschmidt27d5bf22006-10-04 17:26:11 +0200396#else /* SPUFS_MMAP_4K */
397#define spufs_cntl_mmap NULL
398#endif /* !SPUFS_MMAP_4K */
Mark Nutter6df10a82006-03-23 00:00:12 +0100399
Christoph Hellwig197b1a82007-12-20 16:39:59 +0900400static int spufs_cntl_get(void *data, u64 *val)
Arnd Bergmanne1dbff22006-10-04 17:26:19 +0200401{
402 struct spu_context *ctx = data;
Christoph Hellwigc9101bd2007-12-20 16:39:59 +0900403 int ret;
Arnd Bergmanne1dbff22006-10-04 17:26:19 +0200404
Christoph Hellwigc9101bd2007-12-20 16:39:59 +0900405 ret = spu_acquire(ctx);
406 if (ret)
407 return ret;
Christoph Hellwig197b1a82007-12-20 16:39:59 +0900408 *val = ctx->ops->status_read(ctx);
Arnd Bergmanne1dbff22006-10-04 17:26:19 +0200409 spu_release(ctx);
410
Christoph Hellwig197b1a82007-12-20 16:39:59 +0900411 return 0;
Arnd Bergmanne1dbff22006-10-04 17:26:19 +0200412}
413
Christoph Hellwig197b1a82007-12-20 16:39:59 +0900414static int spufs_cntl_set(void *data, u64 val)
Arnd Bergmanne1dbff22006-10-04 17:26:19 +0200415{
416 struct spu_context *ctx = data;
Christoph Hellwigc9101bd2007-12-20 16:39:59 +0900417 int ret;
Arnd Bergmanne1dbff22006-10-04 17:26:19 +0200418
Christoph Hellwigc9101bd2007-12-20 16:39:59 +0900419 ret = spu_acquire(ctx);
420 if (ret)
421 return ret;
Arnd Bergmanne1dbff22006-10-04 17:26:19 +0200422 ctx->ops->runcntl_write(ctx, val);
423 spu_release(ctx);
Christoph Hellwig197b1a82007-12-20 16:39:59 +0900424
425 return 0;
Arnd Bergmanne1dbff22006-10-04 17:26:19 +0200426}
427
Mark Nutter6df10a82006-03-23 00:00:12 +0100428static int spufs_cntl_open(struct inode *inode, struct file *file)
429{
430 struct spufs_inode_info *i = SPUFS_I(inode);
431 struct spu_context *ctx = i->i_ctx;
432
Christoph Hellwig47d3a5f2007-06-04 23:26:51 +1000433 mutex_lock(&ctx->mapping_lock);
Mark Nutter6df10a82006-03-23 00:00:12 +0100434 file->private_data = ctx;
Christoph Hellwig43c2bbd2007-04-23 21:08:07 +0200435 if (!i->i_openers++)
436 ctx->cntl = inode->i_mapping;
Christoph Hellwig47d3a5f2007-06-04 23:26:51 +1000437 mutex_unlock(&ctx->mapping_lock);
Christoph Hellwig8b88b092008-02-08 04:20:26 -0800438 return simple_attr_open(inode, file, spufs_cntl_get,
Arnd Bergmanne1dbff22006-10-04 17:26:19 +0200439 spufs_cntl_set, "0x%08lx");
Mark Nutter6df10a82006-03-23 00:00:12 +0100440}
441
Christoph Hellwig43c2bbd2007-04-23 21:08:07 +0200442static int
443spufs_cntl_release(struct inode *inode, struct file *file)
444{
445 struct spufs_inode_info *i = SPUFS_I(inode);
446 struct spu_context *ctx = i->i_ctx;
447
Christoph Hellwig74bedc42008-02-08 04:20:28 -0800448 simple_attr_release(inode, file);
Christoph Hellwig43c2bbd2007-04-23 21:08:07 +0200449
Christoph Hellwig47d3a5f2007-06-04 23:26:51 +1000450 mutex_lock(&ctx->mapping_lock);
Christoph Hellwig43c2bbd2007-04-23 21:08:07 +0200451 if (!--i->i_openers)
452 ctx->cntl = NULL;
Christoph Hellwig47d3a5f2007-06-04 23:26:51 +1000453 mutex_unlock(&ctx->mapping_lock);
Christoph Hellwig43c2bbd2007-04-23 21:08:07 +0200454 return 0;
455}
456
Arjan van de Ven5dfe4c92007-02-12 00:55:31 -0800457static const struct file_operations spufs_cntl_fops = {
Mark Nutter6df10a82006-03-23 00:00:12 +0100458 .open = spufs_cntl_open,
Christoph Hellwig43c2bbd2007-04-23 21:08:07 +0200459 .release = spufs_cntl_release,
Christoph Hellwig8b88b092008-02-08 04:20:26 -0800460 .read = simple_attr_read,
461 .write = simple_attr_write,
Arnd Bergmannfc153512010-09-14 10:22:33 +0000462 .llseek = generic_file_llseek,
Mark Nutter6df10a82006-03-23 00:00:12 +0100463 .mmap = spufs_cntl_mmap,
Mark Nutter6df10a82006-03-23 00:00:12 +0100464};
465
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500466static int
467spufs_regs_open(struct inode *inode, struct file *file)
468{
469 struct spufs_inode_info *i = SPUFS_I(inode);
470 file->private_data = i->i_ctx;
471 return 0;
472}
473
474static ssize_t
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +0100475__spufs_regs_read(struct spu_context *ctx, char __user *buffer,
476 size_t size, loff_t *pos)
477{
478 struct spu_lscsa *lscsa = ctx->csa.lscsa;
479 return simple_read_from_buffer(buffer, size, pos,
480 lscsa->gprs, sizeof lscsa->gprs);
481}
482
483static ssize_t
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500484spufs_regs_read(struct file *file, char __user *buffer,
485 size_t size, loff_t *pos)
486{
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500487 int ret;
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +0100488 struct spu_context *ctx = file->private_data;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500489
Jeremy Kerrf027faa2008-10-16 11:11:12 +1100490 /* pre-check for file position: if we'd return EOF, there's no point
491 * causing a deschedule */
492 if (*pos >= sizeof(ctx->csa.lscsa->gprs))
493 return 0;
494
Christoph Hellwigc9101bd2007-12-20 16:39:59 +0900495 ret = spu_acquire_saved(ctx);
496 if (ret)
497 return ret;
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +0100498 ret = __spufs_regs_read(ctx, buffer, size, pos);
Christoph Hellwig27b1ea02007-07-20 21:39:34 +0200499 spu_release_saved(ctx);
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500500 return ret;
501}
502
503static ssize_t
504spufs_regs_write(struct file *file, const char __user *buffer,
505 size_t size, loff_t *pos)
506{
507 struct spu_context *ctx = file->private_data;
508 struct spu_lscsa *lscsa = ctx->csa.lscsa;
509 int ret;
510
Jeremy Kerrd2198892009-03-03 19:38:07 +0000511 if (*pos >= sizeof(lscsa->gprs))
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500512 return -EFBIG;
Jeremy Kerrd2198892009-03-03 19:38:07 +0000513
Christoph Hellwigc9101bd2007-12-20 16:39:59 +0900514 ret = spu_acquire_saved(ctx);
515 if (ret)
516 return ret;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500517
Akinobu Mita63c3b9d2010-12-24 20:03:56 +0000518 size = simple_write_to_buffer(lscsa->gprs, sizeof(lscsa->gprs), pos,
519 buffer, size);
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500520
Christoph Hellwig27b1ea02007-07-20 21:39:34 +0200521 spu_release_saved(ctx);
Akinobu Mita63c3b9d2010-12-24 20:03:56 +0000522 return size;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500523}
524
Arjan van de Ven5dfe4c92007-02-12 00:55:31 -0800525static const struct file_operations spufs_regs_fops = {
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500526 .open = spufs_regs_open,
527 .read = spufs_regs_read,
528 .write = spufs_regs_write,
529 .llseek = generic_file_llseek,
530};
531
532static ssize_t
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +0100533__spufs_fpcr_read(struct spu_context *ctx, char __user * buffer,
534 size_t size, loff_t * pos)
535{
536 struct spu_lscsa *lscsa = ctx->csa.lscsa;
537 return simple_read_from_buffer(buffer, size, pos,
538 &lscsa->fpcr, sizeof(lscsa->fpcr));
539}
540
541static ssize_t
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500542spufs_fpcr_read(struct file *file, char __user * buffer,
543 size_t size, loff_t * pos)
544{
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500545 int ret;
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +0100546 struct spu_context *ctx = file->private_data;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500547
Christoph Hellwigc9101bd2007-12-20 16:39:59 +0900548 ret = spu_acquire_saved(ctx);
549 if (ret)
550 return ret;
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +0100551 ret = __spufs_fpcr_read(ctx, buffer, size, pos);
Christoph Hellwig27b1ea02007-07-20 21:39:34 +0200552 spu_release_saved(ctx);
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500553 return ret;
554}
555
556static ssize_t
557spufs_fpcr_write(struct file *file, const char __user * buffer,
558 size_t size, loff_t * pos)
559{
560 struct spu_context *ctx = file->private_data;
561 struct spu_lscsa *lscsa = ctx->csa.lscsa;
562 int ret;
563
Jeremy Kerrd2198892009-03-03 19:38:07 +0000564 if (*pos >= sizeof(lscsa->fpcr))
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500565 return -EFBIG;
Christoph Hellwigc9101bd2007-12-20 16:39:59 +0900566
567 ret = spu_acquire_saved(ctx);
568 if (ret)
569 return ret;
570
Akinobu Mita63c3b9d2010-12-24 20:03:56 +0000571 size = simple_write_to_buffer(&lscsa->fpcr, sizeof(lscsa->fpcr), pos,
572 buffer, size);
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500573
Christoph Hellwig27b1ea02007-07-20 21:39:34 +0200574 spu_release_saved(ctx);
Akinobu Mita63c3b9d2010-12-24 20:03:56 +0000575 return size;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500576}
577
Arjan van de Ven5dfe4c92007-02-12 00:55:31 -0800578static const struct file_operations spufs_fpcr_fops = {
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500579 .open = spufs_regs_open,
580 .read = spufs_fpcr_read,
581 .write = spufs_fpcr_write,
582 .llseek = generic_file_llseek,
Arnd Bergmann67207b92005-11-15 15:53:48 -0500583};
584
585/* generic open function for all pipe-like files */
586static int spufs_pipe_open(struct inode *inode, struct file *file)
587{
588 struct spufs_inode_info *i = SPUFS_I(inode);
589 file->private_data = i->i_ctx;
590
591 return nonseekable_open(inode, file);
592}
593
Arnd Bergmanncdcc89b2006-10-04 17:26:17 +0200594/*
595 * Read as many bytes from the mailbox as possible, until
596 * one of the conditions becomes true:
597 *
598 * - no more data available in the mailbox
599 * - end of the user provided buffer
600 * - end of the mapped area
601 */
Arnd Bergmann67207b92005-11-15 15:53:48 -0500602static ssize_t spufs_mbox_read(struct file *file, char __user *buf,
603 size_t len, loff_t *pos)
604{
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500605 struct spu_context *ctx = file->private_data;
Arnd Bergmanncdcc89b2006-10-04 17:26:17 +0200606 u32 mbox_data, __user *udata;
607 ssize_t count;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500608
609 if (len < 4)
610 return -EINVAL;
611
Arnd Bergmanncdcc89b2006-10-04 17:26:17 +0200612 if (!access_ok(VERIFY_WRITE, buf, len))
Arnd Bergmann67207b92005-11-15 15:53:48 -0500613 return -EFAULT;
614
Arnd Bergmanncdcc89b2006-10-04 17:26:17 +0200615 udata = (void __user *)buf;
616
Christoph Hellwigc9101bd2007-12-20 16:39:59 +0900617 count = spu_acquire(ctx);
618 if (count)
619 return count;
620
Arnd Bergmann274cef52006-10-24 18:01:42 +0200621 for (count = 0; (count + 4) <= len; count += 4, udata++) {
Arnd Bergmanncdcc89b2006-10-04 17:26:17 +0200622 int ret;
623 ret = ctx->ops->mbox_read(ctx, &mbox_data);
624 if (ret == 0)
625 break;
626
627 /*
628 * at the end of the mapped area, we can fault
629 * but still need to return the data we have
630 * read successfully so far.
631 */
632 ret = __put_user(mbox_data, udata);
633 if (ret) {
634 if (!count)
635 count = -EFAULT;
636 break;
637 }
638 }
639 spu_release(ctx);
640
641 if (!count)
642 count = -EAGAIN;
643
644 return count;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500645}
646
Arjan van de Ven5dfe4c92007-02-12 00:55:31 -0800647static const struct file_operations spufs_mbox_fops = {
Arnd Bergmann67207b92005-11-15 15:53:48 -0500648 .open = spufs_pipe_open,
649 .read = spufs_mbox_read,
Arnd Bergmannfc153512010-09-14 10:22:33 +0000650 .llseek = no_llseek,
Arnd Bergmann67207b92005-11-15 15:53:48 -0500651};
652
653static ssize_t spufs_mbox_stat_read(struct file *file, char __user *buf,
654 size_t len, loff_t *pos)
655{
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500656 struct spu_context *ctx = file->private_data;
Christoph Hellwigc9101bd2007-12-20 16:39:59 +0900657 ssize_t ret;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500658 u32 mbox_stat;
659
660 if (len < 4)
661 return -EINVAL;
662
Christoph Hellwigc9101bd2007-12-20 16:39:59 +0900663 ret = spu_acquire(ctx);
664 if (ret)
665 return ret;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500666
667 mbox_stat = ctx->ops->mbox_stat_read(ctx) & 0xff;
668
669 spu_release(ctx);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500670
671 if (copy_to_user(buf, &mbox_stat, sizeof mbox_stat))
672 return -EFAULT;
673
674 return 4;
675}
676
Arjan van de Ven5dfe4c92007-02-12 00:55:31 -0800677static const struct file_operations spufs_mbox_stat_fops = {
Arnd Bergmann67207b92005-11-15 15:53:48 -0500678 .open = spufs_pipe_open,
679 .read = spufs_mbox_stat_read,
Arnd Bergmannfc153512010-09-14 10:22:33 +0000680 .llseek = no_llseek,
Arnd Bergmann67207b92005-11-15 15:53:48 -0500681};
682
683/* low-level ibox access function */
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500684size_t spu_ibox_read(struct spu_context *ctx, u32 *data)
Arnd Bergmann67207b92005-11-15 15:53:48 -0500685{
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500686 return ctx->ops->ibox_read(ctx, data);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500687}
Arnd Bergmann67207b92005-11-15 15:53:48 -0500688
689static int spufs_ibox_fasync(int fd, struct file *file, int on)
690{
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500691 struct spu_context *ctx = file->private_data;
692
693 return fasync_helper(fd, file, on, &ctx->ibox_fasync);
694}
695
696/* interrupt-level ibox callback function. */
697void spufs_ibox_callback(struct spu *spu)
698{
699 struct spu_context *ctx = spu->ctx;
700
Luke Browninge65c2f62007-12-20 16:39:59 +0900701 if (!ctx)
702 return;
703
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500704 wake_up_all(&ctx->ibox_wq);
705 kill_fasync(&ctx->ibox_fasync, SIGIO, POLLIN);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500706}
707
Arnd Bergmanncdcc89b2006-10-04 17:26:17 +0200708/*
709 * Read as many bytes from the interrupt mailbox as possible, until
710 * one of the conditions becomes true:
711 *
712 * - no more data available in the mailbox
713 * - end of the user provided buffer
714 * - end of the mapped area
715 *
716 * If the file is opened without O_NONBLOCK, we wait here until
717 * any data is available, but return when we have been able to
718 * read something.
719 */
Arnd Bergmann67207b92005-11-15 15:53:48 -0500720static ssize_t spufs_ibox_read(struct file *file, char __user *buf,
721 size_t len, loff_t *pos)
722{
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500723 struct spu_context *ctx = file->private_data;
Arnd Bergmanncdcc89b2006-10-04 17:26:17 +0200724 u32 ibox_data, __user *udata;
725 ssize_t count;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500726
727 if (len < 4)
728 return -EINVAL;
729
Arnd Bergmanncdcc89b2006-10-04 17:26:17 +0200730 if (!access_ok(VERIFY_WRITE, buf, len))
731 return -EFAULT;
732
733 udata = (void __user *)buf;
734
Christoph Hellwigc9101bd2007-12-20 16:39:59 +0900735 count = spu_acquire(ctx);
736 if (count)
Christoph Hellwigeebead52008-02-08 15:50:41 +1100737 goto out;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500738
Arnd Bergmanncdcc89b2006-10-04 17:26:17 +0200739 /* wait only for the first element */
740 count = 0;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500741 if (file->f_flags & O_NONBLOCK) {
Christoph Hellwigeebead52008-02-08 15:50:41 +1100742 if (!spu_ibox_read(ctx, &ibox_data)) {
Arnd Bergmanncdcc89b2006-10-04 17:26:17 +0200743 count = -EAGAIN;
Christoph Hellwigeebead52008-02-08 15:50:41 +1100744 goto out_unlock;
745 }
Arnd Bergmann67207b92005-11-15 15:53:48 -0500746 } else {
Arnd Bergmanncdcc89b2006-10-04 17:26:17 +0200747 count = spufs_wait(ctx->ibox_wq, spu_ibox_read(ctx, &ibox_data));
Christoph Hellwigeebead52008-02-08 15:50:41 +1100748 if (count)
749 goto out;
Arnd Bergmanncdcc89b2006-10-04 17:26:17 +0200750 }
Arnd Bergmanncdcc89b2006-10-04 17:26:17 +0200751
752 /* if we can't write at all, return -EFAULT */
753 count = __put_user(ibox_data, udata);
754 if (count)
Christoph Hellwigeebead52008-02-08 15:50:41 +1100755 goto out_unlock;
Arnd Bergmanncdcc89b2006-10-04 17:26:17 +0200756
757 for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) {
758 int ret;
759 ret = ctx->ops->ibox_read(ctx, &ibox_data);
760 if (ret == 0)
761 break;
762 /*
763 * at the end of the mapped area, we can fault
764 * but still need to return the data we have
765 * read successfully so far.
766 */
767 ret = __put_user(ibox_data, udata);
768 if (ret)
769 break;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500770 }
771
Christoph Hellwigeebead52008-02-08 15:50:41 +1100772out_unlock:
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500773 spu_release(ctx);
Christoph Hellwigeebead52008-02-08 15:50:41 +1100774out:
Arnd Bergmanncdcc89b2006-10-04 17:26:17 +0200775 return count;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500776}
777
778static unsigned int spufs_ibox_poll(struct file *file, poll_table *wait)
779{
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500780 struct spu_context *ctx = file->private_data;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500781 unsigned int mask;
782
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500783 poll_wait(file, &ctx->ibox_wq, wait);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500784
Christoph Hellwigc9101bd2007-12-20 16:39:59 +0900785 /*
786 * For now keep this uninterruptible and also ignore the rule
787 * that poll should not sleep. Will be fixed later.
788 */
789 mutex_lock(&ctx->state_mutex);
Arnd Bergmann3a843d72005-12-05 22:52:27 -0500790 mask = ctx->ops->mbox_stat_poll(ctx, POLLIN | POLLRDNORM);
791 spu_release(ctx);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500792
793 return mask;
794}
795
Arjan van de Ven5dfe4c92007-02-12 00:55:31 -0800796static const struct file_operations spufs_ibox_fops = {
Arnd Bergmann67207b92005-11-15 15:53:48 -0500797 .open = spufs_pipe_open,
798 .read = spufs_ibox_read,
799 .poll = spufs_ibox_poll,
800 .fasync = spufs_ibox_fasync,
Arnd Bergmannfc153512010-09-14 10:22:33 +0000801 .llseek = no_llseek,
Arnd Bergmann67207b92005-11-15 15:53:48 -0500802};
803
804static ssize_t spufs_ibox_stat_read(struct file *file, char __user *buf,
805 size_t len, loff_t *pos)
806{
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500807 struct spu_context *ctx = file->private_data;
Christoph Hellwigc9101bd2007-12-20 16:39:59 +0900808 ssize_t ret;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500809 u32 ibox_stat;
810
811 if (len < 4)
812 return -EINVAL;
813
Christoph Hellwigc9101bd2007-12-20 16:39:59 +0900814 ret = spu_acquire(ctx);
815 if (ret)
816 return ret;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500817 ibox_stat = (ctx->ops->mbox_stat_read(ctx) >> 16) & 0xff;
818 spu_release(ctx);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500819
820 if (copy_to_user(buf, &ibox_stat, sizeof ibox_stat))
821 return -EFAULT;
822
823 return 4;
824}
825
Arjan van de Ven5dfe4c92007-02-12 00:55:31 -0800826static const struct file_operations spufs_ibox_stat_fops = {
Arnd Bergmann67207b92005-11-15 15:53:48 -0500827 .open = spufs_pipe_open,
828 .read = spufs_ibox_stat_read,
Arnd Bergmannfc153512010-09-14 10:22:33 +0000829 .llseek = no_llseek,
Arnd Bergmann67207b92005-11-15 15:53:48 -0500830};
831
832/* low-level mailbox write */
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500833size_t spu_wbox_write(struct spu_context *ctx, u32 data)
Arnd Bergmann67207b92005-11-15 15:53:48 -0500834{
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500835 return ctx->ops->wbox_write(ctx, data);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500836}
Arnd Bergmann67207b92005-11-15 15:53:48 -0500837
838static int spufs_wbox_fasync(int fd, struct file *file, int on)
839{
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500840 struct spu_context *ctx = file->private_data;
841 int ret;
842
843 ret = fasync_helper(fd, file, on, &ctx->wbox_fasync);
844
845 return ret;
846}
847
848/* interrupt-level wbox callback function. */
849void spufs_wbox_callback(struct spu *spu)
850{
851 struct spu_context *ctx = spu->ctx;
852
Luke Browninge65c2f62007-12-20 16:39:59 +0900853 if (!ctx)
854 return;
855
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500856 wake_up_all(&ctx->wbox_wq);
857 kill_fasync(&ctx->wbox_fasync, SIGIO, POLLOUT);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500858}
859
Arnd Bergmanncdcc89b2006-10-04 17:26:17 +0200860/*
861 * Write as many bytes to the interrupt mailbox as possible, until
862 * one of the conditions becomes true:
863 *
864 * - the mailbox is full
865 * - end of the user provided buffer
866 * - end of the mapped area
867 *
868 * If the file is opened without O_NONBLOCK, we wait here until
869 * space is availabyl, but return when we have been able to
870 * write something.
871 */
Arnd Bergmann67207b92005-11-15 15:53:48 -0500872static ssize_t spufs_wbox_write(struct file *file, const char __user *buf,
873 size_t len, loff_t *pos)
874{
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500875 struct spu_context *ctx = file->private_data;
Arnd Bergmanncdcc89b2006-10-04 17:26:17 +0200876 u32 wbox_data, __user *udata;
877 ssize_t count;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500878
879 if (len < 4)
880 return -EINVAL;
881
Arnd Bergmanncdcc89b2006-10-04 17:26:17 +0200882 udata = (void __user *)buf;
883 if (!access_ok(VERIFY_READ, buf, len))
884 return -EFAULT;
885
886 if (__get_user(wbox_data, udata))
Arnd Bergmann67207b92005-11-15 15:53:48 -0500887 return -EFAULT;
888
Christoph Hellwigc9101bd2007-12-20 16:39:59 +0900889 count = spu_acquire(ctx);
890 if (count)
Christoph Hellwigeebead52008-02-08 15:50:41 +1100891 goto out;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500892
Arnd Bergmanncdcc89b2006-10-04 17:26:17 +0200893 /*
894 * make sure we can at least write one element, by waiting
895 * in case of !O_NONBLOCK
896 */
897 count = 0;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500898 if (file->f_flags & O_NONBLOCK) {
Christoph Hellwigeebead52008-02-08 15:50:41 +1100899 if (!spu_wbox_write(ctx, wbox_data)) {
Arnd Bergmanncdcc89b2006-10-04 17:26:17 +0200900 count = -EAGAIN;
Christoph Hellwigeebead52008-02-08 15:50:41 +1100901 goto out_unlock;
902 }
Arnd Bergmann67207b92005-11-15 15:53:48 -0500903 } else {
Arnd Bergmanncdcc89b2006-10-04 17:26:17 +0200904 count = spufs_wait(ctx->wbox_wq, spu_wbox_write(ctx, wbox_data));
Christoph Hellwigeebead52008-02-08 15:50:41 +1100905 if (count)
906 goto out;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500907 }
908
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500909
Jan Engelhardt96de0e22007-10-19 23:21:04 +0200910 /* write as much as possible */
Arnd Bergmanncdcc89b2006-10-04 17:26:17 +0200911 for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) {
912 int ret;
913 ret = __get_user(wbox_data, udata);
914 if (ret)
915 break;
916
917 ret = spu_wbox_write(ctx, wbox_data);
918 if (ret == 0)
919 break;
920 }
921
Christoph Hellwigeebead52008-02-08 15:50:41 +1100922out_unlock:
Arnd Bergmanncdcc89b2006-10-04 17:26:17 +0200923 spu_release(ctx);
Christoph Hellwigeebead52008-02-08 15:50:41 +1100924out:
Arnd Bergmanncdcc89b2006-10-04 17:26:17 +0200925 return count;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500926}
927
928static unsigned int spufs_wbox_poll(struct file *file, poll_table *wait)
929{
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500930 struct spu_context *ctx = file->private_data;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500931 unsigned int mask;
932
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500933 poll_wait(file, &ctx->wbox_wq, wait);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500934
Christoph Hellwigc9101bd2007-12-20 16:39:59 +0900935 /*
936 * For now keep this uninterruptible and also ignore the rule
937 * that poll should not sleep. Will be fixed later.
938 */
939 mutex_lock(&ctx->state_mutex);
Arnd Bergmann3a843d72005-12-05 22:52:27 -0500940 mask = ctx->ops->mbox_stat_poll(ctx, POLLOUT | POLLWRNORM);
941 spu_release(ctx);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500942
943 return mask;
944}
945
Arjan van de Ven5dfe4c92007-02-12 00:55:31 -0800946static const struct file_operations spufs_wbox_fops = {
Arnd Bergmann67207b92005-11-15 15:53:48 -0500947 .open = spufs_pipe_open,
948 .write = spufs_wbox_write,
949 .poll = spufs_wbox_poll,
950 .fasync = spufs_wbox_fasync,
Arnd Bergmannfc153512010-09-14 10:22:33 +0000951 .llseek = no_llseek,
Arnd Bergmann67207b92005-11-15 15:53:48 -0500952};
953
954static ssize_t spufs_wbox_stat_read(struct file *file, char __user *buf,
955 size_t len, loff_t *pos)
956{
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500957 struct spu_context *ctx = file->private_data;
Christoph Hellwigc9101bd2007-12-20 16:39:59 +0900958 ssize_t ret;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500959 u32 wbox_stat;
960
961 if (len < 4)
962 return -EINVAL;
963
Christoph Hellwigc9101bd2007-12-20 16:39:59 +0900964 ret = spu_acquire(ctx);
965 if (ret)
966 return ret;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500967 wbox_stat = (ctx->ops->mbox_stat_read(ctx) >> 8) & 0xff;
968 spu_release(ctx);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500969
970 if (copy_to_user(buf, &wbox_stat, sizeof wbox_stat))
971 return -EFAULT;
972
973 return 4;
974}
975
Arjan van de Ven5dfe4c92007-02-12 00:55:31 -0800976static const struct file_operations spufs_wbox_stat_fops = {
Arnd Bergmann67207b92005-11-15 15:53:48 -0500977 .open = spufs_pipe_open,
978 .read = spufs_wbox_stat_read,
Arnd Bergmannfc153512010-09-14 10:22:33 +0000979 .llseek = no_llseek,
Arnd Bergmann67207b92005-11-15 15:53:48 -0500980};
981
Mark Nutter6df10a82006-03-23 00:00:12 +0100982static int spufs_signal1_open(struct inode *inode, struct file *file)
983{
984 struct spufs_inode_info *i = SPUFS_I(inode);
985 struct spu_context *ctx = i->i_ctx;
Christoph Hellwig43c2bbd2007-04-23 21:08:07 +0200986
Christoph Hellwig47d3a5f2007-06-04 23:26:51 +1000987 mutex_lock(&ctx->mapping_lock);
Mark Nutter6df10a82006-03-23 00:00:12 +0100988 file->private_data = ctx;
Christoph Hellwig43c2bbd2007-04-23 21:08:07 +0200989 if (!i->i_openers++)
990 ctx->signal1 = inode->i_mapping;
Christoph Hellwig47d3a5f2007-06-04 23:26:51 +1000991 mutex_unlock(&ctx->mapping_lock);
Mark Nutter6df10a82006-03-23 00:00:12 +0100992 return nonseekable_open(inode, file);
993}
994
Christoph Hellwig43c2bbd2007-04-23 21:08:07 +0200995static int
996spufs_signal1_release(struct inode *inode, struct file *file)
997{
998 struct spufs_inode_info *i = SPUFS_I(inode);
999 struct spu_context *ctx = i->i_ctx;
1000
Christoph Hellwig47d3a5f2007-06-04 23:26:51 +10001001 mutex_lock(&ctx->mapping_lock);
Christoph Hellwig43c2bbd2007-04-23 21:08:07 +02001002 if (!--i->i_openers)
1003 ctx->signal1 = NULL;
Christoph Hellwig47d3a5f2007-06-04 23:26:51 +10001004 mutex_unlock(&ctx->mapping_lock);
Christoph Hellwig43c2bbd2007-04-23 21:08:07 +02001005 return 0;
1006}
1007
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +01001008static ssize_t __spufs_signal1_read(struct spu_context *ctx, char __user *buf,
Arnd Bergmann67207b92005-11-15 15:53:48 -05001009 size_t len, loff_t *pos)
1010{
Dwayne Grant McConnell17f88ce2006-11-20 18:45:01 +01001011 int ret = 0;
Arnd Bergmann67207b92005-11-15 15:53:48 -05001012 u32 data;
1013
Arnd Bergmann67207b92005-11-15 15:53:48 -05001014 if (len < 4)
1015 return -EINVAL;
1016
Dwayne Grant McConnell17f88ce2006-11-20 18:45:01 +01001017 if (ctx->csa.spu_chnlcnt_RW[3]) {
1018 data = ctx->csa.spu_chnldata_RW[3];
1019 ret = 4;
1020 }
Arnd Bergmann8b3d6662005-11-15 15:53:52 -05001021
Dwayne Grant McConnell17f88ce2006-11-20 18:45:01 +01001022 if (!ret)
1023 goto out;
1024
Arnd Bergmann67207b92005-11-15 15:53:48 -05001025 if (copy_to_user(buf, &data, 4))
1026 return -EFAULT;
1027
Dwayne Grant McConnell17f88ce2006-11-20 18:45:01 +01001028out:
1029 return ret;
Arnd Bergmann67207b92005-11-15 15:53:48 -05001030}
1031
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +01001032static ssize_t spufs_signal1_read(struct file *file, char __user *buf,
1033 size_t len, loff_t *pos)
1034{
1035 int ret;
1036 struct spu_context *ctx = file->private_data;
1037
Christoph Hellwigc9101bd2007-12-20 16:39:59 +09001038 ret = spu_acquire_saved(ctx);
1039 if (ret)
1040 return ret;
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +01001041 ret = __spufs_signal1_read(ctx, buf, len, pos);
Christoph Hellwig27b1ea02007-07-20 21:39:34 +02001042 spu_release_saved(ctx);
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +01001043
1044 return ret;
1045}
1046
Arnd Bergmann67207b92005-11-15 15:53:48 -05001047static ssize_t spufs_signal1_write(struct file *file, const char __user *buf,
1048 size_t len, loff_t *pos)
1049{
1050 struct spu_context *ctx;
Christoph Hellwigc9101bd2007-12-20 16:39:59 +09001051 ssize_t ret;
Arnd Bergmann67207b92005-11-15 15:53:48 -05001052 u32 data;
1053
1054 ctx = file->private_data;
Arnd Bergmann67207b92005-11-15 15:53:48 -05001055
1056 if (len < 4)
1057 return -EINVAL;
1058
1059 if (copy_from_user(&data, buf, 4))
1060 return -EFAULT;
1061
Christoph Hellwigc9101bd2007-12-20 16:39:59 +09001062 ret = spu_acquire(ctx);
1063 if (ret)
1064 return ret;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -05001065 ctx->ops->signal1_write(ctx, data);
1066 spu_release(ctx);
Arnd Bergmann67207b92005-11-15 15:53:48 -05001067
1068 return 4;
1069}
1070
Nick Pigginb1e22702008-06-10 09:26:08 +10001071static int
1072spufs_signal1_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
Mark Nutter6df10a82006-03-23 00:00:12 +01001073{
Jeremy Kerr87ff6092008-07-01 10:22:50 +10001074#if SPUFS_SIGNAL_MAP_SIZE == 0x1000
1075 return spufs_ps_fault(vma, vmf, 0x14000, SPUFS_SIGNAL_MAP_SIZE);
1076#elif SPUFS_SIGNAL_MAP_SIZE == 0x10000
Benjamin Herrenschmidt27d5bf22006-10-04 17:26:11 +02001077 /* For 64k pages, both signal1 and signal2 can be used to mmap the whole
1078 * signal 1 and 2 area
1079 */
Jeremy Kerr87ff6092008-07-01 10:22:50 +10001080 return spufs_ps_fault(vma, vmf, 0x10000, SPUFS_SIGNAL_MAP_SIZE);
Benjamin Herrenschmidt27d5bf22006-10-04 17:26:11 +02001081#else
1082#error unsupported page size
1083#endif
Mark Nutter6df10a82006-03-23 00:00:12 +01001084}
1085
Alexey Dobriyanf0f37e22009-09-27 22:29:37 +04001086static const struct vm_operations_struct spufs_signal1_mmap_vmops = {
Nick Pigginb1e22702008-06-10 09:26:08 +10001087 .fault = spufs_signal1_mmap_fault,
Mark Nutter6df10a82006-03-23 00:00:12 +01001088};
1089
1090static int spufs_signal1_mmap(struct file *file, struct vm_area_struct *vma)
1091{
1092 if (!(vma->vm_flags & VM_SHARED))
1093 return -EINVAL;
1094
Benjamin Herrenschmidt78bde532007-02-13 11:46:06 +11001095 vma->vm_flags |= VM_IO | VM_PFNMAP;
Benjamin Herrenschmidt64b3d0e2008-12-18 19:13:51 +00001096 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
Mark Nutter6df10a82006-03-23 00:00:12 +01001097
1098 vma->vm_ops = &spufs_signal1_mmap_vmops;
1099 return 0;
1100}
Mark Nutter6df10a82006-03-23 00:00:12 +01001101
Arjan van de Ven5dfe4c92007-02-12 00:55:31 -08001102static const struct file_operations spufs_signal1_fops = {
Mark Nutter6df10a82006-03-23 00:00:12 +01001103 .open = spufs_signal1_open,
Christoph Hellwig43c2bbd2007-04-23 21:08:07 +02001104 .release = spufs_signal1_release,
Arnd Bergmann67207b92005-11-15 15:53:48 -05001105 .read = spufs_signal1_read,
1106 .write = spufs_signal1_write,
Mark Nutter6df10a82006-03-23 00:00:12 +01001107 .mmap = spufs_signal1_mmap,
Arnd Bergmannfc153512010-09-14 10:22:33 +00001108 .llseek = no_llseek,
Arnd Bergmann67207b92005-11-15 15:53:48 -05001109};
1110
Jeremy Kerrd054b362007-07-20 21:39:31 +02001111static const struct file_operations spufs_signal1_nosched_fops = {
1112 .open = spufs_signal1_open,
1113 .release = spufs_signal1_release,
1114 .write = spufs_signal1_write,
1115 .mmap = spufs_signal1_mmap,
Arnd Bergmannfc153512010-09-14 10:22:33 +00001116 .llseek = no_llseek,
Jeremy Kerrd054b362007-07-20 21:39:31 +02001117};
1118
Mark Nutter6df10a82006-03-23 00:00:12 +01001119static int spufs_signal2_open(struct inode *inode, struct file *file)
1120{
1121 struct spufs_inode_info *i = SPUFS_I(inode);
1122 struct spu_context *ctx = i->i_ctx;
Christoph Hellwig43c2bbd2007-04-23 21:08:07 +02001123
Christoph Hellwig47d3a5f2007-06-04 23:26:51 +10001124 mutex_lock(&ctx->mapping_lock);
Mark Nutter6df10a82006-03-23 00:00:12 +01001125 file->private_data = ctx;
Christoph Hellwig43c2bbd2007-04-23 21:08:07 +02001126 if (!i->i_openers++)
1127 ctx->signal2 = inode->i_mapping;
Christoph Hellwig47d3a5f2007-06-04 23:26:51 +10001128 mutex_unlock(&ctx->mapping_lock);
Mark Nutter6df10a82006-03-23 00:00:12 +01001129 return nonseekable_open(inode, file);
1130}
1131
Christoph Hellwig43c2bbd2007-04-23 21:08:07 +02001132static int
1133spufs_signal2_release(struct inode *inode, struct file *file)
1134{
1135 struct spufs_inode_info *i = SPUFS_I(inode);
1136 struct spu_context *ctx = i->i_ctx;
1137
Christoph Hellwig47d3a5f2007-06-04 23:26:51 +10001138 mutex_lock(&ctx->mapping_lock);
Christoph Hellwig43c2bbd2007-04-23 21:08:07 +02001139 if (!--i->i_openers)
1140 ctx->signal2 = NULL;
Christoph Hellwig47d3a5f2007-06-04 23:26:51 +10001141 mutex_unlock(&ctx->mapping_lock);
Christoph Hellwig43c2bbd2007-04-23 21:08:07 +02001142 return 0;
1143}
1144
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +01001145static ssize_t __spufs_signal2_read(struct spu_context *ctx, char __user *buf,
Arnd Bergmann67207b92005-11-15 15:53:48 -05001146 size_t len, loff_t *pos)
1147{
Dwayne Grant McConnell17f88ce2006-11-20 18:45:01 +01001148 int ret = 0;
Arnd Bergmann67207b92005-11-15 15:53:48 -05001149 u32 data;
1150
Arnd Bergmann67207b92005-11-15 15:53:48 -05001151 if (len < 4)
1152 return -EINVAL;
1153
Dwayne Grant McConnell17f88ce2006-11-20 18:45:01 +01001154 if (ctx->csa.spu_chnlcnt_RW[4]) {
1155 data = ctx->csa.spu_chnldata_RW[4];
1156 ret = 4;
1157 }
Arnd Bergmann8b3d6662005-11-15 15:53:52 -05001158
Dwayne Grant McConnell17f88ce2006-11-20 18:45:01 +01001159 if (!ret)
1160 goto out;
1161
Arnd Bergmann67207b92005-11-15 15:53:48 -05001162 if (copy_to_user(buf, &data, 4))
1163 return -EFAULT;
1164
Dwayne Grant McConnell17f88ce2006-11-20 18:45:01 +01001165out:
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +01001166 return ret;
1167}
1168
1169static ssize_t spufs_signal2_read(struct file *file, char __user *buf,
1170 size_t len, loff_t *pos)
1171{
1172 struct spu_context *ctx = file->private_data;
1173 int ret;
1174
Christoph Hellwigc9101bd2007-12-20 16:39:59 +09001175 ret = spu_acquire_saved(ctx);
1176 if (ret)
1177 return ret;
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +01001178 ret = __spufs_signal2_read(ctx, buf, len, pos);
Christoph Hellwig27b1ea02007-07-20 21:39:34 +02001179 spu_release_saved(ctx);
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +01001180
1181 return ret;
Arnd Bergmann67207b92005-11-15 15:53:48 -05001182}
1183
1184static ssize_t spufs_signal2_write(struct file *file, const char __user *buf,
1185 size_t len, loff_t *pos)
1186{
1187 struct spu_context *ctx;
Christoph Hellwigc9101bd2007-12-20 16:39:59 +09001188 ssize_t ret;
Arnd Bergmann67207b92005-11-15 15:53:48 -05001189 u32 data;
1190
1191 ctx = file->private_data;
Arnd Bergmann67207b92005-11-15 15:53:48 -05001192
1193 if (len < 4)
1194 return -EINVAL;
1195
1196 if (copy_from_user(&data, buf, 4))
1197 return -EFAULT;
1198
Christoph Hellwigc9101bd2007-12-20 16:39:59 +09001199 ret = spu_acquire(ctx);
1200 if (ret)
1201 return ret;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -05001202 ctx->ops->signal2_write(ctx, data);
1203 spu_release(ctx);
Arnd Bergmann67207b92005-11-15 15:53:48 -05001204
1205 return 4;
1206}
1207
Benjamin Herrenschmidt27d5bf22006-10-04 17:26:11 +02001208#if SPUFS_MMAP_4K
Nick Pigginb1e22702008-06-10 09:26:08 +10001209static int
1210spufs_signal2_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
Mark Nutter6df10a82006-03-23 00:00:12 +01001211{
Jeremy Kerr87ff6092008-07-01 10:22:50 +10001212#if SPUFS_SIGNAL_MAP_SIZE == 0x1000
1213 return spufs_ps_fault(vma, vmf, 0x1c000, SPUFS_SIGNAL_MAP_SIZE);
1214#elif SPUFS_SIGNAL_MAP_SIZE == 0x10000
Benjamin Herrenschmidt27d5bf22006-10-04 17:26:11 +02001215 /* For 64k pages, both signal1 and signal2 can be used to mmap the whole
1216 * signal 1 and 2 area
1217 */
Jeremy Kerr87ff6092008-07-01 10:22:50 +10001218 return spufs_ps_fault(vma, vmf, 0x10000, SPUFS_SIGNAL_MAP_SIZE);
Benjamin Herrenschmidt27d5bf22006-10-04 17:26:11 +02001219#else
1220#error unsupported page size
1221#endif
Mark Nutter6df10a82006-03-23 00:00:12 +01001222}
1223
Alexey Dobriyanf0f37e22009-09-27 22:29:37 +04001224static const struct vm_operations_struct spufs_signal2_mmap_vmops = {
Nick Pigginb1e22702008-06-10 09:26:08 +10001225 .fault = spufs_signal2_mmap_fault,
Mark Nutter6df10a82006-03-23 00:00:12 +01001226};
1227
1228static int spufs_signal2_mmap(struct file *file, struct vm_area_struct *vma)
1229{
1230 if (!(vma->vm_flags & VM_SHARED))
1231 return -EINVAL;
1232
Benjamin Herrenschmidt78bde532007-02-13 11:46:06 +11001233 vma->vm_flags |= VM_IO | VM_PFNMAP;
Benjamin Herrenschmidt64b3d0e2008-12-18 19:13:51 +00001234 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
Mark Nutter6df10a82006-03-23 00:00:12 +01001235
1236 vma->vm_ops = &spufs_signal2_mmap_vmops;
1237 return 0;
1238}
Benjamin Herrenschmidt27d5bf22006-10-04 17:26:11 +02001239#else /* SPUFS_MMAP_4K */
1240#define spufs_signal2_mmap NULL
1241#endif /* !SPUFS_MMAP_4K */
Mark Nutter6df10a82006-03-23 00:00:12 +01001242
Arjan van de Ven5dfe4c92007-02-12 00:55:31 -08001243static const struct file_operations spufs_signal2_fops = {
Mark Nutter6df10a82006-03-23 00:00:12 +01001244 .open = spufs_signal2_open,
Christoph Hellwig43c2bbd2007-04-23 21:08:07 +02001245 .release = spufs_signal2_release,
Arnd Bergmann67207b92005-11-15 15:53:48 -05001246 .read = spufs_signal2_read,
1247 .write = spufs_signal2_write,
Mark Nutter6df10a82006-03-23 00:00:12 +01001248 .mmap = spufs_signal2_mmap,
Arnd Bergmannfc153512010-09-14 10:22:33 +00001249 .llseek = no_llseek,
Arnd Bergmann67207b92005-11-15 15:53:48 -05001250};
1251
Jeremy Kerrd054b362007-07-20 21:39:31 +02001252static const struct file_operations spufs_signal2_nosched_fops = {
1253 .open = spufs_signal2_open,
1254 .release = spufs_signal2_release,
1255 .write = spufs_signal2_write,
1256 .mmap = spufs_signal2_mmap,
Arnd Bergmannfc153512010-09-14 10:22:33 +00001257 .llseek = no_llseek,
Jeremy Kerrd054b362007-07-20 21:39:31 +02001258};
1259
Michael Ellerman104f0cc2007-09-19 14:38:12 +10001260/*
1261 * This is a wrapper around DEFINE_SIMPLE_ATTRIBUTE which does the
1262 * work of acquiring (or not) the SPU context before calling through
1263 * to the actual get routine. The set routine is called directly.
1264 */
1265#define SPU_ATTR_NOACQUIRE 0
1266#define SPU_ATTR_ACQUIRE 1
1267#define SPU_ATTR_ACQUIRE_SAVED 2
1268
1269#define DEFINE_SPUFS_ATTRIBUTE(__name, __get, __set, __fmt, __acquire) \
Christoph Hellwig197b1a82007-12-20 16:39:59 +09001270static int __##__get(void *data, u64 *val) \
Michael Ellerman104f0cc2007-09-19 14:38:12 +10001271{ \
1272 struct spu_context *ctx = data; \
Christoph Hellwigc9101bd2007-12-20 16:39:59 +09001273 int ret = 0; \
Michael Ellerman104f0cc2007-09-19 14:38:12 +10001274 \
1275 if (__acquire == SPU_ATTR_ACQUIRE) { \
Christoph Hellwigc9101bd2007-12-20 16:39:59 +09001276 ret = spu_acquire(ctx); \
1277 if (ret) \
1278 return ret; \
Christoph Hellwig197b1a82007-12-20 16:39:59 +09001279 *val = __get(ctx); \
Michael Ellerman104f0cc2007-09-19 14:38:12 +10001280 spu_release(ctx); \
1281 } else if (__acquire == SPU_ATTR_ACQUIRE_SAVED) { \
Christoph Hellwigc9101bd2007-12-20 16:39:59 +09001282 ret = spu_acquire_saved(ctx); \
1283 if (ret) \
1284 return ret; \
Christoph Hellwig197b1a82007-12-20 16:39:59 +09001285 *val = __get(ctx); \
Michael Ellerman104f0cc2007-09-19 14:38:12 +10001286 spu_release_saved(ctx); \
1287 } else \
Christoph Hellwig197b1a82007-12-20 16:39:59 +09001288 *val = __get(ctx); \
Michael Ellerman104f0cc2007-09-19 14:38:12 +10001289 \
Christoph Hellwig197b1a82007-12-20 16:39:59 +09001290 return 0; \
Michael Ellerman104f0cc2007-09-19 14:38:12 +10001291} \
Christoph Hellwig197b1a82007-12-20 16:39:59 +09001292DEFINE_SPUFS_SIMPLE_ATTRIBUTE(__name, __##__get, __set, __fmt);
Michael Ellerman104f0cc2007-09-19 14:38:12 +10001293
Christoph Hellwig197b1a82007-12-20 16:39:59 +09001294static int spufs_signal1_type_set(void *data, u64 val)
Arnd Bergmann67207b92005-11-15 15:53:48 -05001295{
1296 struct spu_context *ctx = data;
Christoph Hellwigc9101bd2007-12-20 16:39:59 +09001297 int ret;
Arnd Bergmann67207b92005-11-15 15:53:48 -05001298
Christoph Hellwigc9101bd2007-12-20 16:39:59 +09001299 ret = spu_acquire(ctx);
1300 if (ret)
1301 return ret;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -05001302 ctx->ops->signal1_type_set(ctx, val);
1303 spu_release(ctx);
Christoph Hellwig197b1a82007-12-20 16:39:59 +09001304
1305 return 0;
Arnd Bergmann67207b92005-11-15 15:53:48 -05001306}
1307
Michael Ellerman104f0cc2007-09-19 14:38:12 +10001308static u64 spufs_signal1_type_get(struct spu_context *ctx)
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +01001309{
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +01001310 return ctx->ops->signal1_type_get(ctx);
1311}
Michael Ellerman104f0cc2007-09-19 14:38:12 +10001312DEFINE_SPUFS_ATTRIBUTE(spufs_signal1_type, spufs_signal1_type_get,
Jeremy Kerraf8b44e2008-03-25 13:15:11 +11001313 spufs_signal1_type_set, "%llu\n", SPU_ATTR_ACQUIRE);
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +01001314
Arnd Bergmann67207b92005-11-15 15:53:48 -05001315
Christoph Hellwig197b1a82007-12-20 16:39:59 +09001316static int spufs_signal2_type_set(void *data, u64 val)
Arnd Bergmann67207b92005-11-15 15:53:48 -05001317{
1318 struct spu_context *ctx = data;
Christoph Hellwigc9101bd2007-12-20 16:39:59 +09001319 int ret;
Arnd Bergmann67207b92005-11-15 15:53:48 -05001320
Christoph Hellwigc9101bd2007-12-20 16:39:59 +09001321 ret = spu_acquire(ctx);
1322 if (ret)
1323 return ret;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -05001324 ctx->ops->signal2_type_set(ctx, val);
1325 spu_release(ctx);
Christoph Hellwig197b1a82007-12-20 16:39:59 +09001326
1327 return 0;
Arnd Bergmann67207b92005-11-15 15:53:48 -05001328}
1329
Michael Ellerman104f0cc2007-09-19 14:38:12 +10001330static u64 spufs_signal2_type_get(struct spu_context *ctx)
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +01001331{
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +01001332 return ctx->ops->signal2_type_get(ctx);
1333}
Michael Ellerman104f0cc2007-09-19 14:38:12 +10001334DEFINE_SPUFS_ATTRIBUTE(spufs_signal2_type, spufs_signal2_type_get,
Jeremy Kerraf8b44e2008-03-25 13:15:11 +11001335 spufs_signal2_type_set, "%llu\n", SPU_ATTR_ACQUIRE);
Arnd Bergmann67207b92005-11-15 15:53:48 -05001336
Benjamin Herrenschmidt27d5bf22006-10-04 17:26:11 +02001337#if SPUFS_MMAP_4K
Nick Pigginb1e22702008-06-10 09:26:08 +10001338static int
1339spufs_mss_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
arnd@arndb.ded9379c42006-06-19 20:33:21 +02001340{
Jeremy Kerr87ff6092008-07-01 10:22:50 +10001341 return spufs_ps_fault(vma, vmf, 0x0000, SPUFS_MSS_MAP_SIZE);
arnd@arndb.ded9379c42006-06-19 20:33:21 +02001342}
1343
Alexey Dobriyanf0f37e22009-09-27 22:29:37 +04001344static const struct vm_operations_struct spufs_mss_mmap_vmops = {
Nick Pigginb1e22702008-06-10 09:26:08 +10001345 .fault = spufs_mss_mmap_fault,
arnd@arndb.ded9379c42006-06-19 20:33:21 +02001346};
1347
1348/*
1349 * mmap support for problem state MFC DMA area [0x0000 - 0x0fff].
arnd@arndb.ded9379c42006-06-19 20:33:21 +02001350 */
1351static int spufs_mss_mmap(struct file *file, struct vm_area_struct *vma)
1352{
1353 if (!(vma->vm_flags & VM_SHARED))
1354 return -EINVAL;
1355
Benjamin Herrenschmidt78bde532007-02-13 11:46:06 +11001356 vma->vm_flags |= VM_IO | VM_PFNMAP;
Benjamin Herrenschmidt64b3d0e2008-12-18 19:13:51 +00001357 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
arnd@arndb.ded9379c42006-06-19 20:33:21 +02001358
1359 vma->vm_ops = &spufs_mss_mmap_vmops;
1360 return 0;
1361}
Benjamin Herrenschmidt27d5bf22006-10-04 17:26:11 +02001362#else /* SPUFS_MMAP_4K */
1363#define spufs_mss_mmap NULL
1364#endif /* !SPUFS_MMAP_4K */
arnd@arndb.ded9379c42006-06-19 20:33:21 +02001365
1366static int spufs_mss_open(struct inode *inode, struct file *file)
1367{
1368 struct spufs_inode_info *i = SPUFS_I(inode);
Benjamin Herrenschmidt17e0e272007-02-13 11:46:08 +11001369 struct spu_context *ctx = i->i_ctx;
arnd@arndb.ded9379c42006-06-19 20:33:21 +02001370
1371 file->private_data = i->i_ctx;
Christoph Hellwig43c2bbd2007-04-23 21:08:07 +02001372
Christoph Hellwig47d3a5f2007-06-04 23:26:51 +10001373 mutex_lock(&ctx->mapping_lock);
Christoph Hellwig43c2bbd2007-04-23 21:08:07 +02001374 if (!i->i_openers++)
1375 ctx->mss = inode->i_mapping;
Christoph Hellwig47d3a5f2007-06-04 23:26:51 +10001376 mutex_unlock(&ctx->mapping_lock);
arnd@arndb.ded9379c42006-06-19 20:33:21 +02001377 return nonseekable_open(inode, file);
1378}
1379
Christoph Hellwig43c2bbd2007-04-23 21:08:07 +02001380static int
1381spufs_mss_release(struct inode *inode, struct file *file)
1382{
1383 struct spufs_inode_info *i = SPUFS_I(inode);
1384 struct spu_context *ctx = i->i_ctx;
1385
Christoph Hellwig47d3a5f2007-06-04 23:26:51 +10001386 mutex_lock(&ctx->mapping_lock);
Christoph Hellwig43c2bbd2007-04-23 21:08:07 +02001387 if (!--i->i_openers)
1388 ctx->mss = NULL;
Christoph Hellwig47d3a5f2007-06-04 23:26:51 +10001389 mutex_unlock(&ctx->mapping_lock);
Christoph Hellwig43c2bbd2007-04-23 21:08:07 +02001390 return 0;
1391}
1392
Arjan van de Ven5dfe4c92007-02-12 00:55:31 -08001393static const struct file_operations spufs_mss_fops = {
arnd@arndb.ded9379c42006-06-19 20:33:21 +02001394 .open = spufs_mss_open,
Christoph Hellwig43c2bbd2007-04-23 21:08:07 +02001395 .release = spufs_mss_release,
arnd@arndb.ded9379c42006-06-19 20:33:21 +02001396 .mmap = spufs_mss_mmap,
Arnd Bergmannfc153512010-09-14 10:22:33 +00001397 .llseek = no_llseek,
Benjamin Herrenschmidt27d5bf22006-10-04 17:26:11 +02001398};
1399
Nick Pigginb1e22702008-06-10 09:26:08 +10001400static int
1401spufs_psmap_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
Benjamin Herrenschmidt27d5bf22006-10-04 17:26:11 +02001402{
Jeremy Kerr87ff6092008-07-01 10:22:50 +10001403 return spufs_ps_fault(vma, vmf, 0x0000, SPUFS_PS_MAP_SIZE);
Benjamin Herrenschmidt27d5bf22006-10-04 17:26:11 +02001404}
1405
Alexey Dobriyanf0f37e22009-09-27 22:29:37 +04001406static const struct vm_operations_struct spufs_psmap_mmap_vmops = {
Nick Pigginb1e22702008-06-10 09:26:08 +10001407 .fault = spufs_psmap_mmap_fault,
Benjamin Herrenschmidt27d5bf22006-10-04 17:26:11 +02001408};
1409
1410/*
1411 * mmap support for full problem state area [0x00000 - 0x1ffff].
1412 */
1413static int spufs_psmap_mmap(struct file *file, struct vm_area_struct *vma)
1414{
1415 if (!(vma->vm_flags & VM_SHARED))
1416 return -EINVAL;
1417
Benjamin Herrenschmidt78bde532007-02-13 11:46:06 +11001418 vma->vm_flags |= VM_IO | VM_PFNMAP;
Benjamin Herrenschmidt64b3d0e2008-12-18 19:13:51 +00001419 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
Benjamin Herrenschmidt27d5bf22006-10-04 17:26:11 +02001420
1421 vma->vm_ops = &spufs_psmap_mmap_vmops;
1422 return 0;
1423}
1424
1425static int spufs_psmap_open(struct inode *inode, struct file *file)
1426{
1427 struct spufs_inode_info *i = SPUFS_I(inode);
Benjamin Herrenschmidt17e0e272007-02-13 11:46:08 +11001428 struct spu_context *ctx = i->i_ctx;
Benjamin Herrenschmidt27d5bf22006-10-04 17:26:11 +02001429
Christoph Hellwig47d3a5f2007-06-04 23:26:51 +10001430 mutex_lock(&ctx->mapping_lock);
Benjamin Herrenschmidt27d5bf22006-10-04 17:26:11 +02001431 file->private_data = i->i_ctx;
Christoph Hellwig43c2bbd2007-04-23 21:08:07 +02001432 if (!i->i_openers++)
1433 ctx->psmap = inode->i_mapping;
Christoph Hellwig47d3a5f2007-06-04 23:26:51 +10001434 mutex_unlock(&ctx->mapping_lock);
Benjamin Herrenschmidt27d5bf22006-10-04 17:26:11 +02001435 return nonseekable_open(inode, file);
1436}
1437
Christoph Hellwig43c2bbd2007-04-23 21:08:07 +02001438static int
1439spufs_psmap_release(struct inode *inode, struct file *file)
1440{
1441 struct spufs_inode_info *i = SPUFS_I(inode);
1442 struct spu_context *ctx = i->i_ctx;
1443
Christoph Hellwig47d3a5f2007-06-04 23:26:51 +10001444 mutex_lock(&ctx->mapping_lock);
Christoph Hellwig43c2bbd2007-04-23 21:08:07 +02001445 if (!--i->i_openers)
1446 ctx->psmap = NULL;
Christoph Hellwig47d3a5f2007-06-04 23:26:51 +10001447 mutex_unlock(&ctx->mapping_lock);
Christoph Hellwig43c2bbd2007-04-23 21:08:07 +02001448 return 0;
1449}
1450
Arjan van de Ven5dfe4c92007-02-12 00:55:31 -08001451static const struct file_operations spufs_psmap_fops = {
Benjamin Herrenschmidt27d5bf22006-10-04 17:26:11 +02001452 .open = spufs_psmap_open,
Christoph Hellwig43c2bbd2007-04-23 21:08:07 +02001453 .release = spufs_psmap_release,
Benjamin Herrenschmidt27d5bf22006-10-04 17:26:11 +02001454 .mmap = spufs_psmap_mmap,
Arnd Bergmannfc153512010-09-14 10:22:33 +00001455 .llseek = no_llseek,
arnd@arndb.ded9379c42006-06-19 20:33:21 +02001456};
1457
1458
Benjamin Herrenschmidt27d5bf22006-10-04 17:26:11 +02001459#if SPUFS_MMAP_4K
Nick Pigginb1e22702008-06-10 09:26:08 +10001460static int
1461spufs_mfc_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
Mark Nutter6df10a82006-03-23 00:00:12 +01001462{
Jeremy Kerr87ff6092008-07-01 10:22:50 +10001463 return spufs_ps_fault(vma, vmf, 0x3000, SPUFS_MFC_MAP_SIZE);
Mark Nutter6df10a82006-03-23 00:00:12 +01001464}
1465
Alexey Dobriyanf0f37e22009-09-27 22:29:37 +04001466static const struct vm_operations_struct spufs_mfc_mmap_vmops = {
Nick Pigginb1e22702008-06-10 09:26:08 +10001467 .fault = spufs_mfc_mmap_fault,
Mark Nutter6df10a82006-03-23 00:00:12 +01001468};
1469
1470/*
1471 * mmap support for problem state MFC DMA area [0x0000 - 0x0fff].
Mark Nutter6df10a82006-03-23 00:00:12 +01001472 */
1473static int spufs_mfc_mmap(struct file *file, struct vm_area_struct *vma)
1474{
1475 if (!(vma->vm_flags & VM_SHARED))
1476 return -EINVAL;
1477
Benjamin Herrenschmidt78bde532007-02-13 11:46:06 +11001478 vma->vm_flags |= VM_IO | VM_PFNMAP;
Benjamin Herrenschmidt64b3d0e2008-12-18 19:13:51 +00001479 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
Mark Nutter6df10a82006-03-23 00:00:12 +01001480
1481 vma->vm_ops = &spufs_mfc_mmap_vmops;
1482 return 0;
1483}
Benjamin Herrenschmidt27d5bf22006-10-04 17:26:11 +02001484#else /* SPUFS_MMAP_4K */
1485#define spufs_mfc_mmap NULL
1486#endif /* !SPUFS_MMAP_4K */
Arnd Bergmanna33a7d72006-03-23 00:00:11 +01001487
1488static int spufs_mfc_open(struct inode *inode, struct file *file)
1489{
1490 struct spufs_inode_info *i = SPUFS_I(inode);
1491 struct spu_context *ctx = i->i_ctx;
1492
1493 /* we don't want to deal with DMA into other processes */
1494 if (ctx->owner != current->mm)
1495 return -EINVAL;
1496
1497 if (atomic_read(&inode->i_count) != 1)
1498 return -EBUSY;
1499
Christoph Hellwig47d3a5f2007-06-04 23:26:51 +10001500 mutex_lock(&ctx->mapping_lock);
Arnd Bergmanna33a7d72006-03-23 00:00:11 +01001501 file->private_data = ctx;
Christoph Hellwig43c2bbd2007-04-23 21:08:07 +02001502 if (!i->i_openers++)
1503 ctx->mfc = inode->i_mapping;
Christoph Hellwig47d3a5f2007-06-04 23:26:51 +10001504 mutex_unlock(&ctx->mapping_lock);
Arnd Bergmanna33a7d72006-03-23 00:00:11 +01001505 return nonseekable_open(inode, file);
1506}
1507
Christoph Hellwig43c2bbd2007-04-23 21:08:07 +02001508static int
1509spufs_mfc_release(struct inode *inode, struct file *file)
1510{
1511 struct spufs_inode_info *i = SPUFS_I(inode);
1512 struct spu_context *ctx = i->i_ctx;
1513
Christoph Hellwig47d3a5f2007-06-04 23:26:51 +10001514 mutex_lock(&ctx->mapping_lock);
Christoph Hellwig43c2bbd2007-04-23 21:08:07 +02001515 if (!--i->i_openers)
1516 ctx->mfc = NULL;
Christoph Hellwig47d3a5f2007-06-04 23:26:51 +10001517 mutex_unlock(&ctx->mapping_lock);
Christoph Hellwig43c2bbd2007-04-23 21:08:07 +02001518 return 0;
1519}
1520
Arnd Bergmanna33a7d72006-03-23 00:00:11 +01001521/* interrupt-level mfc callback function. */
1522void spufs_mfc_callback(struct spu *spu)
1523{
1524 struct spu_context *ctx = spu->ctx;
1525
Luke Browninge65c2f62007-12-20 16:39:59 +09001526 if (!ctx)
1527 return;
1528
Arnd Bergmanna33a7d72006-03-23 00:00:11 +01001529 wake_up_all(&ctx->mfc_wq);
1530
Harvey Harrisone48b1b42008-03-29 08:21:07 +11001531 pr_debug("%s %s\n", __func__, spu->name);
Arnd Bergmanna33a7d72006-03-23 00:00:11 +01001532 if (ctx->mfc_fasync) {
1533 u32 free_elements, tagstatus;
1534 unsigned int mask;
1535
1536 /* no need for spu_acquire in interrupt context */
1537 free_elements = ctx->ops->get_mfc_free_elements(ctx);
1538 tagstatus = ctx->ops->read_mfc_tagstatus(ctx);
1539
1540 mask = 0;
1541 if (free_elements & 0xffff)
1542 mask |= POLLOUT;
1543 if (tagstatus & ctx->tagwait)
1544 mask |= POLLIN;
1545
1546 kill_fasync(&ctx->mfc_fasync, SIGIO, mask);
1547 }
1548}
1549
1550static int spufs_read_mfc_tagstatus(struct spu_context *ctx, u32 *status)
1551{
1552 /* See if there is one tag group is complete */
1553 /* FIXME we need locking around tagwait */
1554 *status = ctx->ops->read_mfc_tagstatus(ctx) & ctx->tagwait;
1555 ctx->tagwait &= ~*status;
1556 if (*status)
1557 return 1;
1558
1559 /* enable interrupt waiting for any tag group,
1560 may silently fail if interrupts are already enabled */
1561 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 1);
1562 return 0;
1563}
1564
1565static ssize_t spufs_mfc_read(struct file *file, char __user *buffer,
1566 size_t size, loff_t *pos)
1567{
1568 struct spu_context *ctx = file->private_data;
1569 int ret = -EINVAL;
1570 u32 status;
1571
1572 if (size != 4)
1573 goto out;
1574
Christoph Hellwigc9101bd2007-12-20 16:39:59 +09001575 ret = spu_acquire(ctx);
1576 if (ret)
1577 return ret;
1578
1579 ret = -EINVAL;
Arnd Bergmanna33a7d72006-03-23 00:00:11 +01001580 if (file->f_flags & O_NONBLOCK) {
1581 status = ctx->ops->read_mfc_tagstatus(ctx);
1582 if (!(status & ctx->tagwait))
1583 ret = -EAGAIN;
1584 else
Christoph Hellwigc9101bd2007-12-20 16:39:59 +09001585 /* XXX(hch): shouldn't we clear ret here? */
Arnd Bergmanna33a7d72006-03-23 00:00:11 +01001586 ctx->tagwait &= ~status;
1587 } else {
1588 ret = spufs_wait(ctx->mfc_wq,
1589 spufs_read_mfc_tagstatus(ctx, &status));
Christoph Hellwigeebead52008-02-08 15:50:41 +11001590 if (ret)
1591 goto out;
Arnd Bergmanna33a7d72006-03-23 00:00:11 +01001592 }
1593 spu_release(ctx);
1594
Arnd Bergmanna33a7d72006-03-23 00:00:11 +01001595 ret = 4;
1596 if (copy_to_user(buffer, &status, 4))
1597 ret = -EFAULT;
1598
1599out:
1600 return ret;
1601}
1602
1603static int spufs_check_valid_dma(struct mfc_dma_command *cmd)
1604{
Stephen Rothwell9477e452009-01-06 14:27:38 +00001605 pr_debug("queueing DMA %x %llx %x %x %x\n", cmd->lsa,
Arnd Bergmanna33a7d72006-03-23 00:00:11 +01001606 cmd->ea, cmd->size, cmd->tag, cmd->cmd);
1607
1608 switch (cmd->cmd) {
1609 case MFC_PUT_CMD:
1610 case MFC_PUTF_CMD:
1611 case MFC_PUTB_CMD:
1612 case MFC_GET_CMD:
1613 case MFC_GETF_CMD:
1614 case MFC_GETB_CMD:
1615 break;
1616 default:
1617 pr_debug("invalid DMA opcode %x\n", cmd->cmd);
1618 return -EIO;
1619 }
1620
1621 if ((cmd->lsa & 0xf) != (cmd->ea &0xf)) {
Stephen Rothwell9477e452009-01-06 14:27:38 +00001622 pr_debug("invalid DMA alignment, ea %llx lsa %x\n",
Arnd Bergmanna33a7d72006-03-23 00:00:11 +01001623 cmd->ea, cmd->lsa);
1624 return -EIO;
1625 }
1626
1627 switch (cmd->size & 0xf) {
1628 case 1:
1629 break;
1630 case 2:
1631 if (cmd->lsa & 1)
1632 goto error;
1633 break;
1634 case 4:
1635 if (cmd->lsa & 3)
1636 goto error;
1637 break;
1638 case 8:
1639 if (cmd->lsa & 7)
1640 goto error;
1641 break;
1642 case 0:
1643 if (cmd->lsa & 15)
1644 goto error;
1645 break;
1646 error:
1647 default:
1648 pr_debug("invalid DMA alignment %x for size %x\n",
1649 cmd->lsa & 0xf, cmd->size);
1650 return -EIO;
1651 }
1652
1653 if (cmd->size > 16 * 1024) {
1654 pr_debug("invalid DMA size %x\n", cmd->size);
1655 return -EIO;
1656 }
1657
1658 if (cmd->tag & 0xfff0) {
1659 /* we reserve the higher tag numbers for kernel use */
1660 pr_debug("invalid DMA tag\n");
1661 return -EIO;
1662 }
1663
1664 if (cmd->class) {
1665 /* not supported in this version */
1666 pr_debug("invalid DMA class\n");
1667 return -EIO;
1668 }
1669
1670 return 0;
1671}
1672
1673static int spu_send_mfc_command(struct spu_context *ctx,
1674 struct mfc_dma_command cmd,
1675 int *error)
1676{
1677 *error = ctx->ops->send_mfc_command(ctx, &cmd);
1678 if (*error == -EAGAIN) {
1679 /* wait for any tag group to complete
1680 so we have space for the new command */
1681 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 1);
1682 /* try again, because the queue might be
1683 empty again */
1684 *error = ctx->ops->send_mfc_command(ctx, &cmd);
1685 if (*error == -EAGAIN)
1686 return 0;
1687 }
1688 return 1;
1689}
1690
1691static ssize_t spufs_mfc_write(struct file *file, const char __user *buffer,
1692 size_t size, loff_t *pos)
1693{
1694 struct spu_context *ctx = file->private_data;
1695 struct mfc_dma_command cmd;
1696 int ret = -EINVAL;
1697
1698 if (size != sizeof cmd)
1699 goto out;
1700
1701 ret = -EFAULT;
1702 if (copy_from_user(&cmd, buffer, sizeof cmd))
1703 goto out;
1704
1705 ret = spufs_check_valid_dma(&cmd);
1706 if (ret)
1707 goto out;
1708
Christoph Hellwigc9101bd2007-12-20 16:39:59 +09001709 ret = spu_acquire(ctx);
1710 if (ret)
1711 goto out;
1712
Arnd Bergmann33bfd7a2007-12-20 16:39:59 +09001713 ret = spufs_wait(ctx->run_wq, ctx->state == SPU_STATE_RUNNABLE);
Akinobu Mita577f8f12007-04-23 21:08:18 +02001714 if (ret)
1715 goto out;
1716
Arnd Bergmanna33a7d72006-03-23 00:00:11 +01001717 if (file->f_flags & O_NONBLOCK) {
1718 ret = ctx->ops->send_mfc_command(ctx, &cmd);
1719 } else {
1720 int status;
1721 ret = spufs_wait(ctx->mfc_wq,
1722 spu_send_mfc_command(ctx, cmd, &status));
Christoph Hellwigeebead52008-02-08 15:50:41 +11001723 if (ret)
1724 goto out;
Arnd Bergmanna33a7d72006-03-23 00:00:11 +01001725 if (status)
1726 ret = status;
1727 }
Arnd Bergmanna33a7d72006-03-23 00:00:11 +01001728
1729 if (ret)
Kazunori Asayama933b0e32007-06-29 10:58:08 +10001730 goto out_unlock;
Arnd Bergmanna33a7d72006-03-23 00:00:11 +01001731
1732 ctx->tagwait |= 1 << cmd.tag;
Masato Noguchi3692dc62006-11-20 18:45:07 +01001733 ret = size;
Arnd Bergmanna33a7d72006-03-23 00:00:11 +01001734
Kazunori Asayama933b0e32007-06-29 10:58:08 +10001735out_unlock:
1736 spu_release(ctx);
Arnd Bergmanna33a7d72006-03-23 00:00:11 +01001737out:
1738 return ret;
1739}
1740
1741static unsigned int spufs_mfc_poll(struct file *file,poll_table *wait)
1742{
1743 struct spu_context *ctx = file->private_data;
1744 u32 free_elements, tagstatus;
1745 unsigned int mask;
1746
Kazunori Asayama933b0e32007-06-29 10:58:08 +10001747 poll_wait(file, &ctx->mfc_wq, wait);
1748
Christoph Hellwigc9101bd2007-12-20 16:39:59 +09001749 /*
1750 * For now keep this uninterruptible and also ignore the rule
1751 * that poll should not sleep. Will be fixed later.
1752 */
1753 mutex_lock(&ctx->state_mutex);
Arnd Bergmanna33a7d72006-03-23 00:00:11 +01001754 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 2);
1755 free_elements = ctx->ops->get_mfc_free_elements(ctx);
1756 tagstatus = ctx->ops->read_mfc_tagstatus(ctx);
1757 spu_release(ctx);
1758
Arnd Bergmanna33a7d72006-03-23 00:00:11 +01001759 mask = 0;
1760 if (free_elements & 0xffff)
1761 mask |= POLLOUT | POLLWRNORM;
1762 if (tagstatus & ctx->tagwait)
1763 mask |= POLLIN | POLLRDNORM;
1764
Harvey Harrisone48b1b42008-03-29 08:21:07 +11001765 pr_debug("%s: free %d tagstatus %d tagwait %d\n", __func__,
Arnd Bergmanna33a7d72006-03-23 00:00:11 +01001766 free_elements, tagstatus, ctx->tagwait);
1767
1768 return mask;
1769}
1770
Al Viro73b6af82006-06-25 16:42:33 -07001771static int spufs_mfc_flush(struct file *file, fl_owner_t id)
Arnd Bergmanna33a7d72006-03-23 00:00:11 +01001772{
1773 struct spu_context *ctx = file->private_data;
1774 int ret;
1775
Christoph Hellwigc9101bd2007-12-20 16:39:59 +09001776 ret = spu_acquire(ctx);
1777 if (ret)
Christoph Hellwigeebead52008-02-08 15:50:41 +11001778 goto out;
Arnd Bergmanna33a7d72006-03-23 00:00:11 +01001779#if 0
1780/* this currently hangs */
1781 ret = spufs_wait(ctx->mfc_wq,
1782 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 2));
1783 if (ret)
1784 goto out;
1785 ret = spufs_wait(ctx->mfc_wq,
1786 ctx->ops->read_mfc_tagstatus(ctx) == ctx->tagwait);
Christoph Hellwigeebead52008-02-08 15:50:41 +11001787 if (ret)
1788 goto out;
Arnd Bergmanna33a7d72006-03-23 00:00:11 +01001789#else
1790 ret = 0;
1791#endif
1792 spu_release(ctx);
Christoph Hellwigeebead52008-02-08 15:50:41 +11001793out:
Arnd Bergmanna33a7d72006-03-23 00:00:11 +01001794 return ret;
1795}
1796
Josef Bacik02c24a82011-07-16 20:44:56 -04001797static int spufs_mfc_fsync(struct file *file, loff_t start, loff_t end, int datasync)
Arnd Bergmanna33a7d72006-03-23 00:00:11 +01001798{
Al Viro496ad9a2013-01-23 17:07:38 -05001799 struct inode *inode = file_inode(file);
Josef Bacik02c24a82011-07-16 20:44:56 -04001800 int err = filemap_write_and_wait_range(inode->i_mapping, start, end);
1801 if (!err) {
Al Viro59551022016-01-22 15:40:57 -05001802 inode_lock(inode);
Josef Bacik02c24a82011-07-16 20:44:56 -04001803 err = spufs_mfc_flush(file, NULL);
Al Viro59551022016-01-22 15:40:57 -05001804 inode_unlock(inode);
Josef Bacik02c24a82011-07-16 20:44:56 -04001805 }
1806 return err;
Arnd Bergmanna33a7d72006-03-23 00:00:11 +01001807}
1808
1809static int spufs_mfc_fasync(int fd, struct file *file, int on)
1810{
1811 struct spu_context *ctx = file->private_data;
1812
1813 return fasync_helper(fd, file, on, &ctx->mfc_fasync);
1814}
1815
Arjan van de Ven5dfe4c92007-02-12 00:55:31 -08001816static const struct file_operations spufs_mfc_fops = {
Arnd Bergmanna33a7d72006-03-23 00:00:11 +01001817 .open = spufs_mfc_open,
Christoph Hellwig43c2bbd2007-04-23 21:08:07 +02001818 .release = spufs_mfc_release,
Arnd Bergmanna33a7d72006-03-23 00:00:11 +01001819 .read = spufs_mfc_read,
1820 .write = spufs_mfc_write,
1821 .poll = spufs_mfc_poll,
1822 .flush = spufs_mfc_flush,
1823 .fsync = spufs_mfc_fsync,
1824 .fasync = spufs_mfc_fasync,
Mark Nutter6df10a82006-03-23 00:00:12 +01001825 .mmap = spufs_mfc_mmap,
Arnd Bergmannfc153512010-09-14 10:22:33 +00001826 .llseek = no_llseek,
Arnd Bergmanna33a7d72006-03-23 00:00:11 +01001827};
1828
Christoph Hellwig197b1a82007-12-20 16:39:59 +09001829static int spufs_npc_set(void *data, u64 val)
Arnd Bergmann67207b92005-11-15 15:53:48 -05001830{
1831 struct spu_context *ctx = data;
Christoph Hellwigc9101bd2007-12-20 16:39:59 +09001832 int ret;
1833
1834 ret = spu_acquire(ctx);
1835 if (ret)
1836 return ret;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -05001837 ctx->ops->npc_write(ctx, val);
1838 spu_release(ctx);
Christoph Hellwig197b1a82007-12-20 16:39:59 +09001839
1840 return 0;
Arnd Bergmann67207b92005-11-15 15:53:48 -05001841}
1842
Michael Ellerman104f0cc2007-09-19 14:38:12 +10001843static u64 spufs_npc_get(struct spu_context *ctx)
Michael Ellerman78810ff2007-09-19 14:38:12 +10001844{
1845 return ctx->ops->npc_read(ctx);
1846}
Michael Ellerman104f0cc2007-09-19 14:38:12 +10001847DEFINE_SPUFS_ATTRIBUTE(spufs_npc_ops, spufs_npc_get, spufs_npc_set,
1848 "0x%llx\n", SPU_ATTR_ACQUIRE);
Arnd Bergmann67207b92005-11-15 15:53:48 -05001849
Christoph Hellwig197b1a82007-12-20 16:39:59 +09001850static int spufs_decr_set(void *data, u64 val)
Arnd Bergmann8b3d6662005-11-15 15:53:52 -05001851{
1852 struct spu_context *ctx = data;
1853 struct spu_lscsa *lscsa = ctx->csa.lscsa;
Christoph Hellwigc9101bd2007-12-20 16:39:59 +09001854 int ret;
1855
1856 ret = spu_acquire_saved(ctx);
1857 if (ret)
1858 return ret;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -05001859 lscsa->decr.slot[0] = (u32) val;
Christoph Hellwig27b1ea02007-07-20 21:39:34 +02001860 spu_release_saved(ctx);
Christoph Hellwig197b1a82007-12-20 16:39:59 +09001861
1862 return 0;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -05001863}
1864
Michael Ellerman104f0cc2007-09-19 14:38:12 +10001865static u64 spufs_decr_get(struct spu_context *ctx)
Arnd Bergmann8b3d6662005-11-15 15:53:52 -05001866{
Arnd Bergmann8b3d6662005-11-15 15:53:52 -05001867 struct spu_lscsa *lscsa = ctx->csa.lscsa;
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +01001868 return lscsa->decr.slot[0];
1869}
Michael Ellerman104f0cc2007-09-19 14:38:12 +10001870DEFINE_SPUFS_ATTRIBUTE(spufs_decr_ops, spufs_decr_get, spufs_decr_set,
1871 "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED);
Arnd Bergmann8b3d6662005-11-15 15:53:52 -05001872
Christoph Hellwig197b1a82007-12-20 16:39:59 +09001873static int spufs_decr_status_set(void *data, u64 val)
Arnd Bergmann8b3d6662005-11-15 15:53:52 -05001874{
1875 struct spu_context *ctx = data;
Christoph Hellwigc9101bd2007-12-20 16:39:59 +09001876 int ret;
1877
1878 ret = spu_acquire_saved(ctx);
1879 if (ret)
1880 return ret;
Masato Noguchid40a01d2007-07-20 21:39:38 +02001881 if (val)
1882 ctx->csa.priv2.mfc_control_RW |= MFC_CNTL_DECREMENTER_RUNNING;
1883 else
1884 ctx->csa.priv2.mfc_control_RW &= ~MFC_CNTL_DECREMENTER_RUNNING;
Christoph Hellwig27b1ea02007-07-20 21:39:34 +02001885 spu_release_saved(ctx);
Christoph Hellwig197b1a82007-12-20 16:39:59 +09001886
1887 return 0;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -05001888}
1889
Michael Ellerman104f0cc2007-09-19 14:38:12 +10001890static u64 spufs_decr_status_get(struct spu_context *ctx)
Arnd Bergmann8b3d6662005-11-15 15:53:52 -05001891{
Masato Noguchid40a01d2007-07-20 21:39:38 +02001892 if (ctx->csa.priv2.mfc_control_RW & MFC_CNTL_DECREMENTER_RUNNING)
1893 return SPU_DECR_STATUS_RUNNING;
1894 else
1895 return 0;
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +01001896}
Michael Ellerman104f0cc2007-09-19 14:38:12 +10001897DEFINE_SPUFS_ATTRIBUTE(spufs_decr_status_ops, spufs_decr_status_get,
1898 spufs_decr_status_set, "0x%llx\n",
1899 SPU_ATTR_ACQUIRE_SAVED);
Arnd Bergmann8b3d6662005-11-15 15:53:52 -05001900
Christoph Hellwig197b1a82007-12-20 16:39:59 +09001901static int spufs_event_mask_set(void *data, u64 val)
Arnd Bergmann8b3d6662005-11-15 15:53:52 -05001902{
1903 struct spu_context *ctx = data;
1904 struct spu_lscsa *lscsa = ctx->csa.lscsa;
Christoph Hellwigc9101bd2007-12-20 16:39:59 +09001905 int ret;
1906
1907 ret = spu_acquire_saved(ctx);
1908 if (ret)
1909 return ret;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -05001910 lscsa->event_mask.slot[0] = (u32) val;
Christoph Hellwig27b1ea02007-07-20 21:39:34 +02001911 spu_release_saved(ctx);
Christoph Hellwig197b1a82007-12-20 16:39:59 +09001912
1913 return 0;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -05001914}
1915
Michael Ellerman104f0cc2007-09-19 14:38:12 +10001916static u64 spufs_event_mask_get(struct spu_context *ctx)
Arnd Bergmann8b3d6662005-11-15 15:53:52 -05001917{
Arnd Bergmann8b3d6662005-11-15 15:53:52 -05001918 struct spu_lscsa *lscsa = ctx->csa.lscsa;
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +01001919 return lscsa->event_mask.slot[0];
1920}
1921
Michael Ellerman104f0cc2007-09-19 14:38:12 +10001922DEFINE_SPUFS_ATTRIBUTE(spufs_event_mask_ops, spufs_event_mask_get,
1923 spufs_event_mask_set, "0x%llx\n",
1924 SPU_ATTR_ACQUIRE_SAVED);
Arnd Bergmann8b3d6662005-11-15 15:53:52 -05001925
Michael Ellerman104f0cc2007-09-19 14:38:12 +10001926static u64 spufs_event_status_get(struct spu_context *ctx)
Dwayne Grant McConnellb9e3bd72006-11-20 18:44:58 +01001927{
Dwayne Grant McConnellb9e3bd72006-11-20 18:44:58 +01001928 struct spu_state *state = &ctx->csa;
Dwayne Grant McConnellb9e3bd72006-11-20 18:44:58 +01001929 u64 stat;
Dwayne Grant McConnellb9e3bd72006-11-20 18:44:58 +01001930 stat = state->spu_chnlcnt_RW[0];
1931 if (stat)
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +01001932 return state->spu_chnldata_RW[0];
1933 return 0;
1934}
Michael Ellerman104f0cc2007-09-19 14:38:12 +10001935DEFINE_SPUFS_ATTRIBUTE(spufs_event_status_ops, spufs_event_status_get,
1936 NULL, "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED)
Dwayne Grant McConnellb9e3bd72006-11-20 18:44:58 +01001937
Christoph Hellwig197b1a82007-12-20 16:39:59 +09001938static int spufs_srr0_set(void *data, u64 val)
Arnd Bergmann8b3d6662005-11-15 15:53:52 -05001939{
1940 struct spu_context *ctx = data;
1941 struct spu_lscsa *lscsa = ctx->csa.lscsa;
Christoph Hellwigc9101bd2007-12-20 16:39:59 +09001942 int ret;
1943
1944 ret = spu_acquire_saved(ctx);
1945 if (ret)
1946 return ret;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -05001947 lscsa->srr0.slot[0] = (u32) val;
Christoph Hellwig27b1ea02007-07-20 21:39:34 +02001948 spu_release_saved(ctx);
Christoph Hellwig197b1a82007-12-20 16:39:59 +09001949
1950 return 0;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -05001951}
1952
Michael Ellerman104f0cc2007-09-19 14:38:12 +10001953static u64 spufs_srr0_get(struct spu_context *ctx)
Arnd Bergmann8b3d6662005-11-15 15:53:52 -05001954{
Arnd Bergmann8b3d6662005-11-15 15:53:52 -05001955 struct spu_lscsa *lscsa = ctx->csa.lscsa;
Michael Ellerman104f0cc2007-09-19 14:38:12 +10001956 return lscsa->srr0.slot[0];
Arnd Bergmann8b3d6662005-11-15 15:53:52 -05001957}
Michael Ellerman104f0cc2007-09-19 14:38:12 +10001958DEFINE_SPUFS_ATTRIBUTE(spufs_srr0_ops, spufs_srr0_get, spufs_srr0_set,
1959 "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED)
Arnd Bergmann8b3d6662005-11-15 15:53:52 -05001960
Michael Ellerman104f0cc2007-09-19 14:38:12 +10001961static u64 spufs_id_get(struct spu_context *ctx)
arnd@arndb.de7b1a7012006-06-19 20:33:24 +02001962{
arnd@arndb.de7b1a7012006-06-19 20:33:24 +02001963 u64 num;
1964
arnd@arndb.de7b1a7012006-06-19 20:33:24 +02001965 if (ctx->state == SPU_STATE_RUNNABLE)
1966 num = ctx->spu->number;
1967 else
1968 num = (unsigned int)-1;
arnd@arndb.de7b1a7012006-06-19 20:33:24 +02001969
1970 return num;
1971}
Michael Ellerman104f0cc2007-09-19 14:38:12 +10001972DEFINE_SPUFS_ATTRIBUTE(spufs_id_ops, spufs_id_get, NULL, "0x%llx\n",
1973 SPU_ATTR_ACQUIRE)
arnd@arndb.de7b1a7012006-06-19 20:33:24 +02001974
Michael Ellerman104f0cc2007-09-19 14:38:12 +10001975static u64 spufs_object_id_get(struct spu_context *ctx)
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +01001976{
1977 /* FIXME: Should there really be no locking here? */
Michael Ellerman104f0cc2007-09-19 14:38:12 +10001978 return ctx->object_id;
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +01001979}
1980
Christoph Hellwig197b1a82007-12-20 16:39:59 +09001981static int spufs_object_id_set(void *data, u64 id)
Arnd Bergmann86767272006-10-04 17:26:21 +02001982{
1983 struct spu_context *ctx = data;
1984 ctx->object_id = id;
Christoph Hellwig197b1a82007-12-20 16:39:59 +09001985
1986 return 0;
Arnd Bergmann86767272006-10-04 17:26:21 +02001987}
1988
Michael Ellerman104f0cc2007-09-19 14:38:12 +10001989DEFINE_SPUFS_ATTRIBUTE(spufs_object_id_ops, spufs_object_id_get,
1990 spufs_object_id_set, "0x%llx\n", SPU_ATTR_NOACQUIRE);
Arnd Bergmann86767272006-10-04 17:26:21 +02001991
Michael Ellerman104f0cc2007-09-19 14:38:12 +10001992static u64 spufs_lslr_get(struct spu_context *ctx)
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +01001993{
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +01001994 return ctx->csa.priv2.spu_lslr_RW;
1995}
Michael Ellerman104f0cc2007-09-19 14:38:12 +10001996DEFINE_SPUFS_ATTRIBUTE(spufs_lslr_ops, spufs_lslr_get, NULL, "0x%llx\n",
1997 SPU_ATTR_ACQUIRE_SAVED);
Dwayne Grant McConnellb9e3bd72006-11-20 18:44:58 +01001998
1999static int spufs_info_open(struct inode *inode, struct file *file)
2000{
2001 struct spufs_inode_info *i = SPUFS_I(inode);
2002 struct spu_context *ctx = i->i_ctx;
2003 file->private_data = ctx;
2004 return 0;
2005}
2006
Benjamin Herrenschmidtcbe709c2007-06-04 15:15:38 +10002007static int spufs_caps_show(struct seq_file *s, void *private)
2008{
2009 struct spu_context *ctx = s->private;
2010
2011 if (!(ctx->flags & SPU_CREATE_NOSCHED))
2012 seq_puts(s, "sched\n");
2013 if (!(ctx->flags & SPU_CREATE_ISOLATE))
2014 seq_puts(s, "step\n");
2015 return 0;
2016}
2017
2018static int spufs_caps_open(struct inode *inode, struct file *file)
2019{
2020 return single_open(file, spufs_caps_show, SPUFS_I(inode)->i_ctx);
2021}
2022
2023static const struct file_operations spufs_caps_fops = {
2024 .open = spufs_caps_open,
2025 .read = seq_read,
2026 .llseek = seq_lseek,
2027 .release = single_release,
2028};
2029
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +01002030static ssize_t __spufs_mbox_info_read(struct spu_context *ctx,
2031 char __user *buf, size_t len, loff_t *pos)
2032{
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +01002033 u32 data;
2034
Jeremy Kerrcbea9232007-12-20 16:39:59 +09002035 /* EOF if there's no entry in the mbox */
2036 if (!(ctx->csa.prob.mb_stat_R & 0x0000ff))
2037 return 0;
2038
2039 data = ctx->csa.prob.pu_mb_R;
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +01002040
2041 return simple_read_from_buffer(buf, len, pos, &data, sizeof data);
2042}
2043
Dwayne Grant McConnell69a2f002006-11-20 18:45:00 +01002044static ssize_t spufs_mbox_info_read(struct file *file, char __user *buf,
2045 size_t len, loff_t *pos)
2046{
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +01002047 int ret;
Dwayne Grant McConnell69a2f002006-11-20 18:45:00 +01002048 struct spu_context *ctx = file->private_data;
Dwayne Grant McConnell69a2f002006-11-20 18:45:00 +01002049
2050 if (!access_ok(VERIFY_WRITE, buf, len))
2051 return -EFAULT;
2052
Christoph Hellwigc9101bd2007-12-20 16:39:59 +09002053 ret = spu_acquire_saved(ctx);
2054 if (ret)
2055 return ret;
Dwayne Grant McConnell69a2f002006-11-20 18:45:00 +01002056 spin_lock(&ctx->csa.register_lock);
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +01002057 ret = __spufs_mbox_info_read(ctx, buf, len, pos);
Dwayne Grant McConnell69a2f002006-11-20 18:45:00 +01002058 spin_unlock(&ctx->csa.register_lock);
Christoph Hellwig27b1ea02007-07-20 21:39:34 +02002059 spu_release_saved(ctx);
Dwayne Grant McConnell69a2f002006-11-20 18:45:00 +01002060
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +01002061 return ret;
Dwayne Grant McConnell69a2f002006-11-20 18:45:00 +01002062}
2063
Arjan van de Ven5dfe4c92007-02-12 00:55:31 -08002064static const struct file_operations spufs_mbox_info_fops = {
Dwayne Grant McConnell69a2f002006-11-20 18:45:00 +01002065 .open = spufs_info_open,
2066 .read = spufs_mbox_info_read,
2067 .llseek = generic_file_llseek,
2068};
2069
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +01002070static ssize_t __spufs_ibox_info_read(struct spu_context *ctx,
2071 char __user *buf, size_t len, loff_t *pos)
2072{
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +01002073 u32 data;
2074
Jeremy Kerrcbea9232007-12-20 16:39:59 +09002075 /* EOF if there's no entry in the ibox */
2076 if (!(ctx->csa.prob.mb_stat_R & 0xff0000))
2077 return 0;
2078
2079 data = ctx->csa.priv2.puint_mb_R;
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +01002080
2081 return simple_read_from_buffer(buf, len, pos, &data, sizeof data);
2082}
2083
Dwayne Grant McConnell69a2f002006-11-20 18:45:00 +01002084static ssize_t spufs_ibox_info_read(struct file *file, char __user *buf,
2085 size_t len, loff_t *pos)
2086{
2087 struct spu_context *ctx = file->private_data;
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +01002088 int ret;
Dwayne Grant McConnell69a2f002006-11-20 18:45:00 +01002089
2090 if (!access_ok(VERIFY_WRITE, buf, len))
2091 return -EFAULT;
2092
Christoph Hellwigc9101bd2007-12-20 16:39:59 +09002093 ret = spu_acquire_saved(ctx);
2094 if (ret)
2095 return ret;
Dwayne Grant McConnell69a2f002006-11-20 18:45:00 +01002096 spin_lock(&ctx->csa.register_lock);
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +01002097 ret = __spufs_ibox_info_read(ctx, buf, len, pos);
Dwayne Grant McConnell69a2f002006-11-20 18:45:00 +01002098 spin_unlock(&ctx->csa.register_lock);
Christoph Hellwig27b1ea02007-07-20 21:39:34 +02002099 spu_release_saved(ctx);
Dwayne Grant McConnell69a2f002006-11-20 18:45:00 +01002100
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +01002101 return ret;
Dwayne Grant McConnell69a2f002006-11-20 18:45:00 +01002102}
2103
Arjan van de Ven5dfe4c92007-02-12 00:55:31 -08002104static const struct file_operations spufs_ibox_info_fops = {
Dwayne Grant McConnell69a2f002006-11-20 18:45:00 +01002105 .open = spufs_info_open,
2106 .read = spufs_ibox_info_read,
2107 .llseek = generic_file_llseek,
2108};
2109
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +01002110static ssize_t __spufs_wbox_info_read(struct spu_context *ctx,
2111 char __user *buf, size_t len, loff_t *pos)
2112{
2113 int i, cnt;
2114 u32 data[4];
2115 u32 wbox_stat;
2116
2117 wbox_stat = ctx->csa.prob.mb_stat_R;
2118 cnt = 4 - ((wbox_stat & 0x00ff00) >> 8);
2119 for (i = 0; i < cnt; i++) {
2120 data[i] = ctx->csa.spu_mailbox_data[i];
2121 }
2122
2123 return simple_read_from_buffer(buf, len, pos, &data,
2124 cnt * sizeof(u32));
2125}
2126
Dwayne Grant McConnell69a2f002006-11-20 18:45:00 +01002127static ssize_t spufs_wbox_info_read(struct file *file, char __user *buf,
2128 size_t len, loff_t *pos)
2129{
2130 struct spu_context *ctx = file->private_data;
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +01002131 int ret;
Dwayne Grant McConnell69a2f002006-11-20 18:45:00 +01002132
2133 if (!access_ok(VERIFY_WRITE, buf, len))
2134 return -EFAULT;
2135
Christoph Hellwigc9101bd2007-12-20 16:39:59 +09002136 ret = spu_acquire_saved(ctx);
2137 if (ret)
2138 return ret;
Dwayne Grant McConnell69a2f002006-11-20 18:45:00 +01002139 spin_lock(&ctx->csa.register_lock);
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +01002140 ret = __spufs_wbox_info_read(ctx, buf, len, pos);
Dwayne Grant McConnell69a2f002006-11-20 18:45:00 +01002141 spin_unlock(&ctx->csa.register_lock);
Christoph Hellwig27b1ea02007-07-20 21:39:34 +02002142 spu_release_saved(ctx);
Dwayne Grant McConnell69a2f002006-11-20 18:45:00 +01002143
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +01002144 return ret;
Dwayne Grant McConnell69a2f002006-11-20 18:45:00 +01002145}
2146
Arjan van de Ven5dfe4c92007-02-12 00:55:31 -08002147static const struct file_operations spufs_wbox_info_fops = {
Dwayne Grant McConnell69a2f002006-11-20 18:45:00 +01002148 .open = spufs_info_open,
2149 .read = spufs_wbox_info_read,
2150 .llseek = generic_file_llseek,
2151};
2152
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +01002153static ssize_t __spufs_dma_info_read(struct spu_context *ctx,
2154 char __user *buf, size_t len, loff_t *pos)
Dwayne Grant McConnellb9e3bd72006-11-20 18:44:58 +01002155{
Dwayne Grant McConnellb9e3bd72006-11-20 18:44:58 +01002156 struct spu_dma_info info;
2157 struct mfc_cq_sr *qp, *spuqp;
2158 int i;
2159
Dwayne Grant McConnellb9e3bd72006-11-20 18:44:58 +01002160 info.dma_info_type = ctx->csa.priv2.spu_tag_status_query_RW;
2161 info.dma_info_mask = ctx->csa.lscsa->tag_mask.slot[0];
2162 info.dma_info_status = ctx->csa.spu_chnldata_RW[24];
2163 info.dma_info_stall_and_notify = ctx->csa.spu_chnldata_RW[25];
2164 info.dma_info_atomic_command_status = ctx->csa.spu_chnldata_RW[27];
2165 for (i = 0; i < 16; i++) {
2166 qp = &info.dma_info_command_data[i];
2167 spuqp = &ctx->csa.priv2.spuq[i];
2168
2169 qp->mfc_cq_data0_RW = spuqp->mfc_cq_data0_RW;
2170 qp->mfc_cq_data1_RW = spuqp->mfc_cq_data1_RW;
2171 qp->mfc_cq_data2_RW = spuqp->mfc_cq_data2_RW;
2172 qp->mfc_cq_data3_RW = spuqp->mfc_cq_data3_RW;
2173 }
Dwayne Grant McConnellb9e3bd72006-11-20 18:44:58 +01002174
2175 return simple_read_from_buffer(buf, len, pos, &info,
2176 sizeof info);
2177}
2178
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +01002179static ssize_t spufs_dma_info_read(struct file *file, char __user *buf,
2180 size_t len, loff_t *pos)
2181{
2182 struct spu_context *ctx = file->private_data;
2183 int ret;
2184
2185 if (!access_ok(VERIFY_WRITE, buf, len))
2186 return -EFAULT;
2187
Christoph Hellwigc9101bd2007-12-20 16:39:59 +09002188 ret = spu_acquire_saved(ctx);
2189 if (ret)
2190 return ret;
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +01002191 spin_lock(&ctx->csa.register_lock);
2192 ret = __spufs_dma_info_read(ctx, buf, len, pos);
2193 spin_unlock(&ctx->csa.register_lock);
Christoph Hellwig27b1ea02007-07-20 21:39:34 +02002194 spu_release_saved(ctx);
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +01002195
2196 return ret;
2197}
2198
Arjan van de Ven5dfe4c92007-02-12 00:55:31 -08002199static const struct file_operations spufs_dma_info_fops = {
Dwayne Grant McConnellb9e3bd72006-11-20 18:44:58 +01002200 .open = spufs_info_open,
2201 .read = spufs_dma_info_read,
Arnd Bergmannfc153512010-09-14 10:22:33 +00002202 .llseek = no_llseek,
Dwayne Grant McConnellb9e3bd72006-11-20 18:44:58 +01002203};
2204
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +01002205static ssize_t __spufs_proxydma_info_read(struct spu_context *ctx,
2206 char __user *buf, size_t len, loff_t *pos)
Dwayne Grant McConnellb9e3bd72006-11-20 18:44:58 +01002207{
Dwayne Grant McConnellb9e3bd72006-11-20 18:44:58 +01002208 struct spu_proxydma_info info;
Dwayne Grant McConnellb9e3bd72006-11-20 18:44:58 +01002209 struct mfc_cq_sr *qp, *puqp;
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +01002210 int ret = sizeof info;
Dwayne Grant McConnellb9e3bd72006-11-20 18:44:58 +01002211 int i;
2212
2213 if (len < ret)
2214 return -EINVAL;
2215
2216 if (!access_ok(VERIFY_WRITE, buf, len))
2217 return -EFAULT;
2218
Dwayne Grant McConnellb9e3bd72006-11-20 18:44:58 +01002219 info.proxydma_info_type = ctx->csa.prob.dma_querytype_RW;
2220 info.proxydma_info_mask = ctx->csa.prob.dma_querymask_RW;
2221 info.proxydma_info_status = ctx->csa.prob.dma_tagstatus_R;
2222 for (i = 0; i < 8; i++) {
2223 qp = &info.proxydma_info_command_data[i];
2224 puqp = &ctx->csa.priv2.puq[i];
2225
2226 qp->mfc_cq_data0_RW = puqp->mfc_cq_data0_RW;
2227 qp->mfc_cq_data1_RW = puqp->mfc_cq_data1_RW;
2228 qp->mfc_cq_data2_RW = puqp->mfc_cq_data2_RW;
2229 qp->mfc_cq_data3_RW = puqp->mfc_cq_data3_RW;
2230 }
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +01002231
2232 return simple_read_from_buffer(buf, len, pos, &info,
2233 sizeof info);
2234}
2235
2236static ssize_t spufs_proxydma_info_read(struct file *file, char __user *buf,
2237 size_t len, loff_t *pos)
2238{
2239 struct spu_context *ctx = file->private_data;
2240 int ret;
2241
Christoph Hellwigc9101bd2007-12-20 16:39:59 +09002242 ret = spu_acquire_saved(ctx);
2243 if (ret)
2244 return ret;
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +01002245 spin_lock(&ctx->csa.register_lock);
2246 ret = __spufs_proxydma_info_read(ctx, buf, len, pos);
Dwayne Grant McConnellb9e3bd72006-11-20 18:44:58 +01002247 spin_unlock(&ctx->csa.register_lock);
Christoph Hellwig27b1ea02007-07-20 21:39:34 +02002248 spu_release_saved(ctx);
Dwayne Grant McConnellb9e3bd72006-11-20 18:44:58 +01002249
Dwayne Grant McConnellb9e3bd72006-11-20 18:44:58 +01002250 return ret;
2251}
2252
Arjan van de Ven5dfe4c92007-02-12 00:55:31 -08002253static const struct file_operations spufs_proxydma_info_fops = {
Dwayne Grant McConnellb9e3bd72006-11-20 18:44:58 +01002254 .open = spufs_info_open,
2255 .read = spufs_proxydma_info_read,
Arnd Bergmannfc153512010-09-14 10:22:33 +00002256 .llseek = no_llseek,
Dwayne Grant McConnellb9e3bd72006-11-20 18:44:58 +01002257};
2258
Christoph Hellwig476273a2007-06-29 10:58:01 +10002259static int spufs_show_tid(struct seq_file *s, void *private)
2260{
2261 struct spu_context *ctx = s->private;
2262
2263 seq_printf(s, "%d\n", ctx->tid);
2264 return 0;
2265}
2266
2267static int spufs_tid_open(struct inode *inode, struct file *file)
2268{
2269 return single_open(file, spufs_show_tid, SPUFS_I(inode)->i_ctx);
2270}
2271
2272static const struct file_operations spufs_tid_fops = {
2273 .open = spufs_tid_open,
2274 .read = seq_read,
2275 .llseek = seq_lseek,
2276 .release = single_release,
2277};
2278
Christoph Hellwige9f8a0b2007-06-29 10:58:03 +10002279static const char *ctx_state_names[] = {
2280 "user", "system", "iowait", "loaded"
2281};
2282
2283static unsigned long long spufs_acct_time(struct spu_context *ctx,
Andre Detsch27ec41d2007-07-20 21:39:33 +02002284 enum spu_utilization_state state)
Christoph Hellwige9f8a0b2007-06-29 10:58:03 +10002285{
Andre Detsch27ec41d2007-07-20 21:39:33 +02002286 unsigned long long time = ctx->stats.times[state];
Christoph Hellwige9f8a0b2007-06-29 10:58:03 +10002287
Andre Detsch27ec41d2007-07-20 21:39:33 +02002288 /*
2289 * In general, utilization statistics are updated by the controlling
2290 * thread as the spu context moves through various well defined
2291 * state transitions, but if the context is lazily loaded its
2292 * utilization statistics are not updated as the controlling thread
2293 * is not tightly coupled with the execution of the spu context. We
2294 * calculate and apply the time delta from the last recorded state
2295 * of the spu context.
2296 */
2297 if (ctx->spu && ctx->stats.util_state == state) {
Thomas Gleixnerf2dec1e2014-07-16 21:04:38 +00002298 time += ktime_get_ns() - ctx->stats.tstamp;
Andre Detsch27ec41d2007-07-20 21:39:33 +02002299 }
Christoph Hellwige9f8a0b2007-06-29 10:58:03 +10002300
Andre Detsch27ec41d2007-07-20 21:39:33 +02002301 return time / NSEC_PER_MSEC;
Christoph Hellwige9f8a0b2007-06-29 10:58:03 +10002302}
2303
2304static unsigned long long spufs_slb_flts(struct spu_context *ctx)
2305{
2306 unsigned long long slb_flts = ctx->stats.slb_flt;
2307
2308 if (ctx->state == SPU_STATE_RUNNABLE) {
2309 slb_flts += (ctx->spu->stats.slb_flt -
2310 ctx->stats.slb_flt_base);
2311 }
2312
2313 return slb_flts;
2314}
2315
2316static unsigned long long spufs_class2_intrs(struct spu_context *ctx)
2317{
2318 unsigned long long class2_intrs = ctx->stats.class2_intr;
2319
2320 if (ctx->state == SPU_STATE_RUNNABLE) {
2321 class2_intrs += (ctx->spu->stats.class2_intr -
2322 ctx->stats.class2_intr_base);
2323 }
2324
2325 return class2_intrs;
2326}
2327
2328
2329static int spufs_show_stat(struct seq_file *s, void *private)
2330{
2331 struct spu_context *ctx = s->private;
Christoph Hellwigc9101bd2007-12-20 16:39:59 +09002332 int ret;
Christoph Hellwige9f8a0b2007-06-29 10:58:03 +10002333
Christoph Hellwigc9101bd2007-12-20 16:39:59 +09002334 ret = spu_acquire(ctx);
2335 if (ret)
2336 return ret;
2337
Christoph Hellwige9f8a0b2007-06-29 10:58:03 +10002338 seq_printf(s, "%s %llu %llu %llu %llu "
2339 "%llu %llu %llu %llu %llu %llu %llu %llu\n",
Andre Detsch27ec41d2007-07-20 21:39:33 +02002340 ctx_state_names[ctx->stats.util_state],
2341 spufs_acct_time(ctx, SPU_UTIL_USER),
2342 spufs_acct_time(ctx, SPU_UTIL_SYSTEM),
2343 spufs_acct_time(ctx, SPU_UTIL_IOWAIT),
2344 spufs_acct_time(ctx, SPU_UTIL_IDLE_LOADED),
Christoph Hellwige9f8a0b2007-06-29 10:58:03 +10002345 ctx->stats.vol_ctx_switch,
2346 ctx->stats.invol_ctx_switch,
2347 spufs_slb_flts(ctx),
2348 ctx->stats.hash_flt,
2349 ctx->stats.min_flt,
2350 ctx->stats.maj_flt,
2351 spufs_class2_intrs(ctx),
2352 ctx->stats.libassist);
2353 spu_release(ctx);
2354 return 0;
2355}
2356
2357static int spufs_stat_open(struct inode *inode, struct file *file)
2358{
2359 return single_open(file, spufs_show_stat, SPUFS_I(inode)->i_ctx);
2360}
2361
2362static const struct file_operations spufs_stat_fops = {
2363 .open = spufs_stat_open,
2364 .read = seq_read,
2365 .llseek = seq_lseek,
2366 .release = single_release,
2367};
2368
Christoph Hellwig5158e9b2008-04-29 17:08:38 +10002369static inline int spufs_switch_log_used(struct spu_context *ctx)
2370{
2371 return (ctx->switch_log->head - ctx->switch_log->tail) %
2372 SWITCH_LOG_BUFSIZE;
2373}
2374
2375static inline int spufs_switch_log_avail(struct spu_context *ctx)
2376{
2377 return SWITCH_LOG_BUFSIZE - spufs_switch_log_used(ctx);
2378}
2379
2380static int spufs_switch_log_open(struct inode *inode, struct file *file)
2381{
2382 struct spu_context *ctx = SPUFS_I(inode)->i_ctx;
Jeremy Kerrf5ed0eb2008-10-16 10:03:46 +11002383 int rc;
Christoph Hellwig5158e9b2008-04-29 17:08:38 +10002384
Jeremy Kerrf5ed0eb2008-10-16 10:03:46 +11002385 rc = spu_acquire(ctx);
2386 if (rc)
2387 return rc;
2388
Christoph Hellwig5158e9b2008-04-29 17:08:38 +10002389 if (ctx->switch_log) {
Jeremy Kerrf5ed0eb2008-10-16 10:03:46 +11002390 rc = -EBUSY;
2391 goto out;
Christoph Hellwig5158e9b2008-04-29 17:08:38 +10002392 }
Jeremy Kerrf5ed0eb2008-10-16 10:03:46 +11002393
Jeremy Kerr837ef882008-10-17 12:02:31 +11002394 ctx->switch_log = kmalloc(sizeof(struct switch_log) +
Jeremy Kerrf5ed0eb2008-10-16 10:03:46 +11002395 SWITCH_LOG_BUFSIZE * sizeof(struct switch_log_entry),
2396 GFP_KERNEL);
2397
2398 if (!ctx->switch_log) {
2399 rc = -ENOMEM;
2400 goto out;
2401 }
2402
Jeremy Kerr837ef882008-10-17 12:02:31 +11002403 ctx->switch_log->head = ctx->switch_log->tail = 0;
Jeremy Kerrf5ed0eb2008-10-16 10:03:46 +11002404 init_waitqueue_head(&ctx->switch_log->wait);
2405 rc = 0;
2406
2407out:
2408 spu_release(ctx);
2409 return rc;
2410}
2411
2412static int spufs_switch_log_release(struct inode *inode, struct file *file)
2413{
2414 struct spu_context *ctx = SPUFS_I(inode)->i_ctx;
2415 int rc;
2416
2417 rc = spu_acquire(ctx);
2418 if (rc)
2419 return rc;
2420
2421 kfree(ctx->switch_log);
2422 ctx->switch_log = NULL;
2423 spu_release(ctx);
Christoph Hellwig5158e9b2008-04-29 17:08:38 +10002424
2425 return 0;
Christoph Hellwig5158e9b2008-04-29 17:08:38 +10002426}
2427
2428static int switch_log_sprint(struct spu_context *ctx, char *tbuf, int n)
2429{
2430 struct switch_log_entry *p;
2431
2432 p = ctx->switch_log->log + ctx->switch_log->tail % SWITCH_LOG_BUFSIZE;
2433
2434 return snprintf(tbuf, n, "%u.%09u %d %u %u %llu\n",
2435 (unsigned int) p->tstamp.tv_sec,
2436 (unsigned int) p->tstamp.tv_nsec,
2437 p->spu_id,
2438 (unsigned int) p->type,
2439 (unsigned int) p->val,
2440 (unsigned long long) p->timebase);
2441}
2442
2443static ssize_t spufs_switch_log_read(struct file *file, char __user *buf,
2444 size_t len, loff_t *ppos)
2445{
Al Viro496ad9a2013-01-23 17:07:38 -05002446 struct inode *inode = file_inode(file);
Christoph Hellwig5158e9b2008-04-29 17:08:38 +10002447 struct spu_context *ctx = SPUFS_I(inode)->i_ctx;
2448 int error = 0, cnt = 0;
2449
roel kluin17e37672009-10-14 05:32:28 +00002450 if (!buf)
Christoph Hellwig5158e9b2008-04-29 17:08:38 +10002451 return -EINVAL;
2452
Jeremy Kerrf5ed0eb2008-10-16 10:03:46 +11002453 error = spu_acquire(ctx);
2454 if (error)
2455 return error;
2456
Christoph Hellwig5158e9b2008-04-29 17:08:38 +10002457 while (cnt < len) {
2458 char tbuf[128];
2459 int width;
2460
Jeremy Kerr14f693e2008-10-16 10:51:46 +11002461 if (spufs_switch_log_used(ctx) == 0) {
2462 if (cnt > 0) {
2463 /* If there's data ready to go, we can
2464 * just return straight away */
2465 break;
2466
2467 } else if (file->f_flags & O_NONBLOCK) {
Jeremy Kerrf5ed0eb2008-10-16 10:03:46 +11002468 error = -EAGAIN;
Christoph Hellwig5158e9b2008-04-29 17:08:38 +10002469 break;
Jeremy Kerr14f693e2008-10-16 10:51:46 +11002470
2471 } else {
2472 /* spufs_wait will drop the mutex and
2473 * re-acquire, but since we're in read(), the
2474 * file cannot be _released (and so
2475 * ctx->switch_log is stable).
2476 */
2477 error = spufs_wait(ctx->switch_log->wait,
2478 spufs_switch_log_used(ctx) > 0);
2479
2480 /* On error, spufs_wait returns without the
2481 * state mutex held */
2482 if (error)
2483 return error;
2484
2485 /* We may have had entries read from underneath
2486 * us while we dropped the mutex in spufs_wait,
2487 * so re-check */
2488 if (spufs_switch_log_used(ctx) == 0)
2489 continue;
Jeremy Kerrf5ed0eb2008-10-16 10:03:46 +11002490 }
Christoph Hellwig5158e9b2008-04-29 17:08:38 +10002491 }
2492
Christoph Hellwig5158e9b2008-04-29 17:08:38 +10002493 width = switch_log_sprint(ctx, tbuf, sizeof(tbuf));
Jeremy Kerrf5ed0eb2008-10-16 10:03:46 +11002494 if (width < len)
Christoph Hellwig5158e9b2008-04-29 17:08:38 +10002495 ctx->switch_log->tail =
2496 (ctx->switch_log->tail + 1) %
2497 SWITCH_LOG_BUFSIZE;
Jeremy Kerrf5ed0eb2008-10-16 10:03:46 +11002498 else
2499 /* If the record is greater than space available return
2500 * partial buffer (so far) */
Christoph Hellwig5158e9b2008-04-29 17:08:38 +10002501 break;
2502
2503 error = copy_to_user(buf + cnt, tbuf, width);
2504 if (error)
2505 break;
2506 cnt += width;
2507 }
2508
Jeremy Kerrf5ed0eb2008-10-16 10:03:46 +11002509 spu_release(ctx);
2510
Christoph Hellwig5158e9b2008-04-29 17:08:38 +10002511 return cnt == 0 ? error : cnt;
2512}
2513
2514static unsigned int spufs_switch_log_poll(struct file *file, poll_table *wait)
2515{
Al Viro496ad9a2013-01-23 17:07:38 -05002516 struct inode *inode = file_inode(file);
Christoph Hellwig5158e9b2008-04-29 17:08:38 +10002517 struct spu_context *ctx = SPUFS_I(inode)->i_ctx;
2518 unsigned int mask = 0;
Jeremy Kerrf5ed0eb2008-10-16 10:03:46 +11002519 int rc;
Christoph Hellwig5158e9b2008-04-29 17:08:38 +10002520
2521 poll_wait(file, &ctx->switch_log->wait, wait);
2522
Jeremy Kerrf5ed0eb2008-10-16 10:03:46 +11002523 rc = spu_acquire(ctx);
2524 if (rc)
2525 return rc;
2526
Christoph Hellwig5158e9b2008-04-29 17:08:38 +10002527 if (spufs_switch_log_used(ctx) > 0)
2528 mask |= POLLIN;
2529
Jeremy Kerrf5ed0eb2008-10-16 10:03:46 +11002530 spu_release(ctx);
2531
Christoph Hellwig5158e9b2008-04-29 17:08:38 +10002532 return mask;
2533}
2534
2535static const struct file_operations spufs_switch_log_fops = {
Jeremy Kerrf5ed0eb2008-10-16 10:03:46 +11002536 .open = spufs_switch_log_open,
2537 .read = spufs_switch_log_read,
2538 .poll = spufs_switch_log_poll,
2539 .release = spufs_switch_log_release,
Arnd Bergmannfc153512010-09-14 10:22:33 +00002540 .llseek = no_llseek,
Christoph Hellwig5158e9b2008-04-29 17:08:38 +10002541};
2542
Jeremy Kerrf5ed0eb2008-10-16 10:03:46 +11002543/**
2544 * Log a context switch event to a switch log reader.
2545 *
2546 * Must be called with ctx->state_mutex held.
2547 */
Christoph Hellwig5158e9b2008-04-29 17:08:38 +10002548void spu_switch_log_notify(struct spu *spu, struct spu_context *ctx,
2549 u32 type, u32 val)
2550{
2551 if (!ctx->switch_log)
2552 return;
2553
Christoph Hellwig5158e9b2008-04-29 17:08:38 +10002554 if (spufs_switch_log_avail(ctx) > 1) {
2555 struct switch_log_entry *p;
2556
2557 p = ctx->switch_log->log + ctx->switch_log->head;
2558 ktime_get_ts(&p->tstamp);
2559 p->timebase = get_tb();
2560 p->spu_id = spu ? spu->number : -1;
2561 p->type = type;
2562 p->val = val;
2563
2564 ctx->switch_log->head =
2565 (ctx->switch_log->head + 1) % SWITCH_LOG_BUFSIZE;
2566 }
Christoph Hellwig5158e9b2008-04-29 17:08:38 +10002567
2568 wake_up(&ctx->switch_log->wait);
2569}
Christoph Hellwige9f8a0b2007-06-29 10:58:03 +10002570
Luke Browning46deed62008-06-16 11:36:43 +10002571static int spufs_show_ctx(struct seq_file *s, void *private)
2572{
2573 struct spu_context *ctx = s->private;
2574 u64 mfc_control_RW;
2575
2576 mutex_lock(&ctx->state_mutex);
2577 if (ctx->spu) {
2578 struct spu *spu = ctx->spu;
2579 struct spu_priv2 __iomem *priv2 = spu->priv2;
2580
2581 spin_lock_irq(&spu->register_lock);
2582 mfc_control_RW = in_be64(&priv2->mfc_control_RW);
2583 spin_unlock_irq(&spu->register_lock);
2584 } else {
2585 struct spu_state *csa = &ctx->csa;
2586
2587 mfc_control_RW = csa->priv2.mfc_control_RW;
2588 }
2589
2590 seq_printf(s, "%c flgs(%lx) sflgs(%lx) pri(%d) ts(%d) spu(%02d)"
Stephen Rothwell9477e452009-01-06 14:27:38 +00002591 " %c %llx %llx %llx %llx %x %x\n",
Luke Browning46deed62008-06-16 11:36:43 +10002592 ctx->state == SPU_STATE_SAVED ? 'S' : 'R',
2593 ctx->flags,
2594 ctx->sched_flags,
2595 ctx->prio,
2596 ctx->time_slice,
2597 ctx->spu ? ctx->spu->number : -1,
2598 !list_empty(&ctx->rq) ? 'q' : ' ',
2599 ctx->csa.class_0_pending,
2600 ctx->csa.class_0_dar,
2601 ctx->csa.class_1_dsisr,
2602 mfc_control_RW,
2603 ctx->ops->runcntl_read(ctx),
2604 ctx->ops->status_read(ctx));
2605
2606 mutex_unlock(&ctx->state_mutex);
2607
2608 return 0;
2609}
2610
2611static int spufs_ctx_open(struct inode *inode, struct file *file)
2612{
2613 return single_open(file, spufs_show_ctx, SPUFS_I(inode)->i_ctx);
2614}
2615
2616static const struct file_operations spufs_ctx_fops = {
2617 .open = spufs_ctx_open,
2618 .read = seq_read,
2619 .llseek = seq_lseek,
2620 .release = single_release,
2621};
2622
Jeremy Kerr74254642009-02-17 11:44:14 +11002623const struct spufs_tree_descr spufs_dir_contents[] = {
Benjamin Herrenschmidtcbe709c2007-06-04 15:15:38 +10002624 { "capabilities", &spufs_caps_fops, 0444, },
Jeremy Kerr6f7dde82008-06-30 14:38:37 +10002625 { "mem", &spufs_mem_fops, 0666, LS_SIZE, },
2626 { "regs", &spufs_regs_fops, 0666, sizeof(struct spu_reg128[128]), },
Arnd Bergmann67207b92005-11-15 15:53:48 -05002627 { "mbox", &spufs_mbox_fops, 0444, },
2628 { "ibox", &spufs_ibox_fops, 0444, },
2629 { "wbox", &spufs_wbox_fops, 0222, },
Jeremy Kerr6f7dde82008-06-30 14:38:37 +10002630 { "mbox_stat", &spufs_mbox_stat_fops, 0444, sizeof(u32), },
2631 { "ibox_stat", &spufs_ibox_stat_fops, 0444, sizeof(u32), },
2632 { "wbox_stat", &spufs_wbox_stat_fops, 0444, sizeof(u32), },
Jeremy Kerr603c4612007-09-26 10:53:45 +10002633 { "signal1", &spufs_signal1_fops, 0666, },
2634 { "signal2", &spufs_signal2_fops, 0666, },
Arnd Bergmann67207b92005-11-15 15:53:48 -05002635 { "signal1_type", &spufs_signal1_type, 0666, },
2636 { "signal2_type", &spufs_signal2_type, 0666, },
Mark Nutter6df10a82006-03-23 00:00:12 +01002637 { "cntl", &spufs_cntl_fops, 0666, },
Jeremy Kerr6f7dde82008-06-30 14:38:37 +10002638 { "fpcr", &spufs_fpcr_fops, 0666, sizeof(struct spu_reg128), },
Dwayne Grant McConnellb9e3bd72006-11-20 18:44:58 +01002639 { "lslr", &spufs_lslr_ops, 0444, },
2640 { "mfc", &spufs_mfc_fops, 0666, },
2641 { "mss", &spufs_mss_fops, 0666, },
2642 { "npc", &spufs_npc_ops, 0666, },
2643 { "srr0", &spufs_srr0_ops, 0666, },
Arnd Bergmann8b3d6662005-11-15 15:53:52 -05002644 { "decr", &spufs_decr_ops, 0666, },
2645 { "decr_status", &spufs_decr_status_ops, 0666, },
Arnd Bergmann8b3d6662005-11-15 15:53:52 -05002646 { "event_mask", &spufs_event_mask_ops, 0666, },
Dwayne Grant McConnellb9e3bd72006-11-20 18:44:58 +01002647 { "event_status", &spufs_event_status_ops, 0444, },
Jeremy Kerr6f7dde82008-06-30 14:38:37 +10002648 { "psmap", &spufs_psmap_fops, 0666, SPUFS_PS_MAP_SIZE, },
Arnd Bergmann86767272006-10-04 17:26:21 +02002649 { "phys-id", &spufs_id_ops, 0666, },
2650 { "object-id", &spufs_object_id_ops, 0666, },
Jeremy Kerr6f7dde82008-06-30 14:38:37 +10002651 { "mbox_info", &spufs_mbox_info_fops, 0444, sizeof(u32), },
2652 { "ibox_info", &spufs_ibox_info_fops, 0444, sizeof(u32), },
2653 { "wbox_info", &spufs_wbox_info_fops, 0444, sizeof(u32), },
2654 { "dma_info", &spufs_dma_info_fops, 0444,
2655 sizeof(struct spu_dma_info), },
2656 { "proxydma_info", &spufs_proxydma_info_fops, 0444,
2657 sizeof(struct spu_proxydma_info)},
Christoph Hellwig476273a2007-06-29 10:58:01 +10002658 { "tid", &spufs_tid_fops, 0444, },
Christoph Hellwige9f8a0b2007-06-29 10:58:03 +10002659 { "stat", &spufs_stat_fops, 0444, },
Christoph Hellwig5158e9b2008-04-29 17:08:38 +10002660 { "switch_log", &spufs_switch_log_fops, 0444 },
Arnd Bergmann67207b92005-11-15 15:53:48 -05002661 {},
2662};
Mark Nutter5737edd2006-10-24 18:31:16 +02002663
Jeremy Kerr74254642009-02-17 11:44:14 +11002664const struct spufs_tree_descr spufs_dir_nosched_contents[] = {
Benjamin Herrenschmidtcbe709c2007-06-04 15:15:38 +10002665 { "capabilities", &spufs_caps_fops, 0444, },
Jeremy Kerr6f7dde82008-06-30 14:38:37 +10002666 { "mem", &spufs_mem_fops, 0666, LS_SIZE, },
Mark Nutter5737edd2006-10-24 18:31:16 +02002667 { "mbox", &spufs_mbox_fops, 0444, },
2668 { "ibox", &spufs_ibox_fops, 0444, },
2669 { "wbox", &spufs_wbox_fops, 0222, },
Jeremy Kerr6f7dde82008-06-30 14:38:37 +10002670 { "mbox_stat", &spufs_mbox_stat_fops, 0444, sizeof(u32), },
2671 { "ibox_stat", &spufs_ibox_stat_fops, 0444, sizeof(u32), },
2672 { "wbox_stat", &spufs_wbox_stat_fops, 0444, sizeof(u32), },
Jeremy Kerrd054b362007-07-20 21:39:31 +02002673 { "signal1", &spufs_signal1_nosched_fops, 0222, },
2674 { "signal2", &spufs_signal2_nosched_fops, 0222, },
Mark Nutter5737edd2006-10-24 18:31:16 +02002675 { "signal1_type", &spufs_signal1_type, 0666, },
2676 { "signal2_type", &spufs_signal2_type, 0666, },
2677 { "mss", &spufs_mss_fops, 0666, },
2678 { "mfc", &spufs_mfc_fops, 0666, },
2679 { "cntl", &spufs_cntl_fops, 0666, },
2680 { "npc", &spufs_npc_ops, 0666, },
Jeremy Kerr6f7dde82008-06-30 14:38:37 +10002681 { "psmap", &spufs_psmap_fops, 0666, SPUFS_PS_MAP_SIZE, },
Mark Nutter5737edd2006-10-24 18:31:16 +02002682 { "phys-id", &spufs_id_ops, 0666, },
2683 { "object-id", &spufs_object_id_ops, 0666, },
Christoph Hellwig476273a2007-06-29 10:58:01 +10002684 { "tid", &spufs_tid_fops, 0444, },
Christoph Hellwige9f8a0b2007-06-29 10:58:03 +10002685 { "stat", &spufs_stat_fops, 0444, },
Jeremy Kerr2c3e4782008-07-03 11:42:20 +10002686 {},
2687};
2688
Jeremy Kerr74254642009-02-17 11:44:14 +11002689const struct spufs_tree_descr spufs_dir_debug_contents[] = {
Luke Browning46deed62008-06-16 11:36:43 +10002690 { ".ctx", &spufs_ctx_fops, 0444, },
Mark Nutter5737edd2006-10-24 18:31:16 +02002691 {},
2692};
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +01002693
Jeremy Kerr74254642009-02-17 11:44:14 +11002694const struct spufs_coredump_reader spufs_coredump_read[] = {
Michael Ellerman4fca9c42007-09-19 14:38:12 +10002695 { "regs", __spufs_regs_read, NULL, sizeof(struct spu_reg128[128])},
2696 { "fpcr", __spufs_fpcr_read, NULL, sizeof(struct spu_reg128) },
Michael Ellerman104f0cc2007-09-19 14:38:12 +10002697 { "lslr", NULL, spufs_lslr_get, 19 },
2698 { "decr", NULL, spufs_decr_get, 19 },
2699 { "decr_status", NULL, spufs_decr_status_get, 19 },
Michael Ellerman4fca9c42007-09-19 14:38:12 +10002700 { "mem", __spufs_mem_read, NULL, LS_SIZE, },
2701 { "signal1", __spufs_signal1_read, NULL, sizeof(u32) },
Michael Ellerman104f0cc2007-09-19 14:38:12 +10002702 { "signal1_type", NULL, spufs_signal1_type_get, 19 },
Michael Ellerman4fca9c42007-09-19 14:38:12 +10002703 { "signal2", __spufs_signal2_read, NULL, sizeof(u32) },
Michael Ellerman104f0cc2007-09-19 14:38:12 +10002704 { "signal2_type", NULL, spufs_signal2_type_get, 19 },
2705 { "event_mask", NULL, spufs_event_mask_get, 19 },
2706 { "event_status", NULL, spufs_event_status_get, 19 },
Michael Ellerman4fca9c42007-09-19 14:38:12 +10002707 { "mbox_info", __spufs_mbox_info_read, NULL, sizeof(u32) },
2708 { "ibox_info", __spufs_ibox_info_read, NULL, sizeof(u32) },
2709 { "wbox_info", __spufs_wbox_info_read, NULL, 4 * sizeof(u32)},
2710 { "dma_info", __spufs_dma_info_read, NULL, sizeof(struct spu_dma_info)},
2711 { "proxydma_info", __spufs_proxydma_info_read,
2712 NULL, sizeof(struct spu_proxydma_info)},
Michael Ellerman104f0cc2007-09-19 14:38:12 +10002713 { "object-id", NULL, spufs_object_id_get, 19 },
2714 { "npc", NULL, spufs_npc_get, 19 },
Michael Ellerman936d5bf2007-09-19 14:38:12 +10002715 { NULL },
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +01002716};