blob: 182dc914cbc975edca2ba6a1927793489f744727 [file] [log] [blame]
Arnd Bergmann57dace22007-04-23 21:08:15 +02001/*
2 * Low-level SPU handling
3 *
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
5 *
6 * Author: Arnd Bergmann <arndb@de.ibm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22#include <linux/sched.h>
23#include <linux/mm.h>
24#include <linux/module.h>
25
26#include <asm/spu.h>
27#include <asm/spu_csa.h>
28
29#include "spufs.h"
30
31/*
32 * This ought to be kept in sync with the powerpc specific do_page_fault
33 * function. Currently, there are a few corner cases that we haven't had
34 * to handle fortunately.
35 */
36static int spu_handle_mm_fault(struct mm_struct *mm, unsigned long ea, unsigned long dsisr)
37{
38 struct vm_area_struct *vma;
39 unsigned long is_write;
40 int ret;
41
42#if 0
43 if (!IS_VALID_EA(ea)) {
44 return -EFAULT;
45 }
46#endif /* XXX */
47 if (mm == NULL) {
48 return -EFAULT;
49 }
50 if (mm->pgd == NULL) {
51 return -EFAULT;
52 }
53
54 down_read(&mm->mmap_sem);
55 vma = find_vma(mm, ea);
56 if (!vma)
57 goto bad_area;
58 if (vma->vm_start <= ea)
59 goto good_area;
60 if (!(vma->vm_flags & VM_GROWSDOWN))
61 goto bad_area;
62 if (expand_stack(vma, ea))
63 goto bad_area;
64good_area:
65 is_write = dsisr & MFC_DSISR_ACCESS_PUT;
66 if (is_write) {
67 if (!(vma->vm_flags & VM_WRITE))
68 goto bad_area;
69 } else {
70 if (dsisr & MFC_DSISR_ACCESS_DENIED)
71 goto bad_area;
72 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
73 goto bad_area;
74 }
75 ret = 0;
76 switch (handle_mm_fault(mm, vma, ea, is_write)) {
77 case VM_FAULT_MINOR:
78 current->min_flt++;
79 break;
80 case VM_FAULT_MAJOR:
81 current->maj_flt++;
82 break;
83 case VM_FAULT_SIGBUS:
84 ret = -EFAULT;
85 goto bad_area;
86 case VM_FAULT_OOM:
87 ret = -ENOMEM;
88 goto bad_area;
89 default:
90 BUG();
91 }
92 up_read(&mm->mmap_sem);
93 return ret;
94
95bad_area:
96 up_read(&mm->mmap_sem);
97 return -EFAULT;
98}
99
100static void spufs_handle_dma_error(struct spu_context *ctx, int type)
101{
102 if (ctx->flags & SPU_CREATE_EVENTS_ENABLED) {
103 ctx->event_return |= type;
104 wake_up_all(&ctx->stop_wq);
105 } else {
106 switch (type) {
107 case SPE_EVENT_DMA_ALIGNMENT:
108 case SPE_EVENT_SPE_DATA_STORAGE:
109 case SPE_EVENT_INVALID_DMA:
110 force_sig(SIGBUS, /* info, */ current);
111 break;
112 case SPE_EVENT_SPE_ERROR:
113 force_sig(SIGILL, /* info */ current);
114 break;
115 }
116 }
117}
118
119void spufs_dma_callback(struct spu *spu, int type)
120{
121 spufs_handle_dma_error(spu->ctx, type);
122}
123EXPORT_SYMBOL_GPL(spufs_dma_callback);
124
125/*
126 * bottom half handler for page faults, we can't do this from
127 * interrupt context, since we might need to sleep.
128 * we also need to give up the mutex so we can get scheduled
129 * out while waiting for the backing store.
130 *
131 * TODO: try calling hash_page from the interrupt handler first
132 * in order to speed up the easy case.
133 */
134int spufs_handle_class1(struct spu_context *ctx)
135{
136 u64 ea, dsisr, access;
137 unsigned long flags;
138 int ret;
139
140 /*
141 * dar and dsisr get passed from the registers
142 * to the spu_context, to this function, but not
143 * back to the spu if it gets scheduled again.
144 *
145 * if we don't handle the fault for a saved context
146 * in time, we can still expect to get the same fault
147 * the immediately after the context restore.
148 */
149 if (ctx->state == SPU_STATE_RUNNABLE) {
150 ea = ctx->spu->dar;
151 dsisr = ctx->spu->dsisr;
152 ctx->spu->dar= ctx->spu->dsisr = 0;
153 } else {
154 ea = ctx->csa.priv1.mfc_dar_RW;
155 dsisr = ctx->csa.priv1.mfc_dsisr_RW;
156 ctx->csa.priv1.mfc_dar_RW = 0;
157 ctx->csa.priv1.mfc_dsisr_RW = 0;
158 }
159
160 if (!(dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED)))
161 return 0;
162
163 pr_debug("ctx %p: ea %016lx, dsisr %016lx state %d\n", ctx, ea,
164 dsisr, ctx->state);
165
166 /* we must not hold the lock when entering spu_handle_mm_fault */
167 spu_release(ctx);
168
169 access = (_PAGE_PRESENT | _PAGE_USER);
170 access |= (dsisr & MFC_DSISR_ACCESS_PUT) ? _PAGE_RW : 0UL;
171 local_irq_save(flags);
172 ret = hash_page(ea, access, 0x300);
173 local_irq_restore(flags);
174
175 /* hashing failed, so try the actual fault handler */
176 if (ret)
177 ret = spu_handle_mm_fault(current->mm, ea, dsisr);
178
179 spu_acquire(ctx);
180 /*
181 * If we handled the fault successfully and are in runnable
182 * state, restart the DMA.
183 * In case of unhandled error report the problem to user space.
184 */
185 if (!ret) {
186 if (ctx->spu)
187 ctx->ops->restart_dma(ctx);
188 } else
189 spufs_handle_dma_error(ctx, SPE_EVENT_SPE_DATA_STORAGE);
190
191 return ret;
192}
193EXPORT_SYMBOL_GPL(spufs_handle_class1);