blob: 061bae33b6e0967fbe58804509b6937125c25391 [file] [log] [blame]
Ben Skeggs6ee73862009-12-11 19:24:15 +10001/*
2 * Copyright (C) 2006 Ben Skeggs.
3 *
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sublicense, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial
16 * portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 */
27
28/*
29 * Authors:
30 * Ben Skeggs <darktama@iinet.net.au>
31 */
32
33#include "drmP.h"
34#include "drm.h"
35#include "nouveau_drm.h"
36#include "nouveau_drv.h"
37#include "nouveau_reg.h"
Ben Skeggsa8eaebc2010-09-01 15:24:31 +100038#include "nouveau_ramht.h"
Ben Skeggsd7facf92010-11-03 10:06:43 +100039#include "nouveau_util.h"
Ben Skeggs6ee73862009-12-11 19:24:15 +100040
41/* needed for hotplug irq */
42#include "nouveau_connector.h"
43#include "nv50_display.h"
44
45void
46nouveau_irq_preinstall(struct drm_device *dev)
47{
48 struct drm_nouveau_private *dev_priv = dev->dev_private;
49
50 /* Master disable */
51 nv_wr32(dev, NV03_PMC_INTR_EN_0, 0);
52
Ben Skeggs4b223ee2010-08-03 10:00:56 +100053 if (dev_priv->card_type >= NV_50) {
Ben Skeggs6ee73862009-12-11 19:24:15 +100054 INIT_WORK(&dev_priv->irq_work, nv50_display_irq_handler_bh);
Ben Skeggsa5acac62010-03-30 15:14:41 +100055 INIT_WORK(&dev_priv->hpd_work, nv50_display_irq_hotplug_bh);
Andy Lutomirskiab838332010-11-16 18:40:52 -050056 spin_lock_init(&dev_priv->hpd_state.lock);
Ben Skeggs6ee73862009-12-11 19:24:15 +100057 INIT_LIST_HEAD(&dev_priv->vbl_waiting);
58 }
59}
60
61int
62nouveau_irq_postinstall(struct drm_device *dev)
63{
Ben Skeggs35fa2f22010-10-21 14:07:03 +100064 struct drm_nouveau_private *dev_priv = dev->dev_private;
65
Ben Skeggs6ee73862009-12-11 19:24:15 +100066 /* Master enable */
67 nv_wr32(dev, NV03_PMC_INTR_EN_0, NV_PMC_INTR_EN_0_MASTER_ENABLE);
Ben Skeggs35fa2f22010-10-21 14:07:03 +100068 if (dev_priv->msi_enabled)
69 nv_wr08(dev, 0x00088068, 0xff);
70
Ben Skeggs6ee73862009-12-11 19:24:15 +100071 return 0;
72}
73
74void
75nouveau_irq_uninstall(struct drm_device *dev)
76{
77 /* Master disable */
78 nv_wr32(dev, NV03_PMC_INTR_EN_0, 0);
79}
80
Ben Skeggs6ee73862009-12-11 19:24:15 +100081static bool
Ben Skeggs7c74cbd2010-09-23 11:03:01 +100082nouveau_fifo_swmthd(struct drm_device *dev, u32 chid, u32 addr, u32 data)
Ben Skeggs6ee73862009-12-11 19:24:15 +100083{
Ben Skeggs7c74cbd2010-09-23 11:03:01 +100084 struct drm_nouveau_private *dev_priv = dev->dev_private;
85 struct nouveau_channel *chan = NULL;
86 struct nouveau_gpuobj *obj;
Ben Skeggscff5c132010-10-06 16:16:59 +100087 unsigned long flags;
Ben Skeggs6ee73862009-12-11 19:24:15 +100088 const int subc = (addr >> 13) & 0x7;
89 const int mthd = addr & 0x1ffc;
Ben Skeggs7c74cbd2010-09-23 11:03:01 +100090 bool handled = false;
91 u32 engine;
Ben Skeggs6ee73862009-12-11 19:24:15 +100092
Ben Skeggscff5c132010-10-06 16:16:59 +100093 spin_lock_irqsave(&dev_priv->channels.lock, flags);
Ben Skeggs7c74cbd2010-09-23 11:03:01 +100094 if (likely(chid >= 0 && chid < dev_priv->engine.fifo.channels))
Ben Skeggscff5c132010-10-06 16:16:59 +100095 chan = dev_priv->channels.ptr[chid];
Ben Skeggs7c74cbd2010-09-23 11:03:01 +100096 if (unlikely(!chan))
Ben Skeggscff5c132010-10-06 16:16:59 +100097 goto out;
Ben Skeggs6ee73862009-12-11 19:24:15 +100098
Ben Skeggs7c74cbd2010-09-23 11:03:01 +100099 switch (mthd) {
100 case 0x0000: /* bind object to subchannel */
101 obj = nouveau_ramht_find(chan, data);
102 if (unlikely(!obj || obj->engine != NVOBJ_ENGINE_SW))
103 break;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000104
Ben Skeggs7c74cbd2010-09-23 11:03:01 +1000105 chan->sw_subchannel[subc] = obj->class;
106 engine = 0x0000000f << (subc * 4);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000107
Ben Skeggs7c74cbd2010-09-23 11:03:01 +1000108 nv_mask(dev, NV04_PFIFO_CACHE1_ENGINE, engine, 0x00000000);
109 handled = true;
110 break;
111 default:
112 engine = nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE);
113 if (unlikely(((engine >> (subc * 4)) & 0xf) != 0))
114 break;
115
Ben Skeggsb8c157d2010-10-20 10:39:35 +1000116 if (!nouveau_gpuobj_mthd_call(chan, chan->sw_subchannel[subc],
117 mthd, data))
Ben Skeggs7c74cbd2010-09-23 11:03:01 +1000118 handled = true;
119 break;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000120 }
121
Ben Skeggscff5c132010-10-06 16:16:59 +1000122out:
123 spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
Ben Skeggs7c74cbd2010-09-23 11:03:01 +1000124 return handled;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000125}
126
127static void
128nouveau_fifo_irq_handler(struct drm_device *dev)
129{
130 struct drm_nouveau_private *dev_priv = dev->dev_private;
131 struct nouveau_engine *engine = &dev_priv->engine;
132 uint32_t status, reassign;
133 int cnt = 0;
134
135 reassign = nv_rd32(dev, NV03_PFIFO_CACHES) & 1;
136 while ((status = nv_rd32(dev, NV03_PFIFO_INTR_0)) && (cnt++ < 100)) {
Ben Skeggs6ee73862009-12-11 19:24:15 +1000137 uint32_t chid, get;
138
139 nv_wr32(dev, NV03_PFIFO_CACHES, 0);
140
141 chid = engine->fifo.channel_id(dev);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000142 get = nv_rd32(dev, NV03_PFIFO_CACHE1_GET);
143
144 if (status & NV_PFIFO_INTR_CACHE_ERROR) {
145 uint32_t mthd, data;
146 int ptr;
147
148 /* NV_PFIFO_CACHE1_GET actually goes to 0xffc before
149 * wrapping on my G80 chips, but CACHE1 isn't big
150 * enough for this much data.. Tests show that it
151 * wraps around to the start at GET=0x800.. No clue
152 * as to why..
153 */
154 ptr = (get & 0x7ff) >> 2;
155
156 if (dev_priv->card_type < NV_40) {
157 mthd = nv_rd32(dev,
158 NV04_PFIFO_CACHE1_METHOD(ptr));
159 data = nv_rd32(dev,
160 NV04_PFIFO_CACHE1_DATA(ptr));
161 } else {
162 mthd = nv_rd32(dev,
163 NV40_PFIFO_CACHE1_METHOD(ptr));
164 data = nv_rd32(dev,
165 NV40_PFIFO_CACHE1_DATA(ptr));
166 }
167
Ben Skeggs7c74cbd2010-09-23 11:03:01 +1000168 if (!nouveau_fifo_swmthd(dev, chid, mthd, data)) {
Ben Skeggs6ee73862009-12-11 19:24:15 +1000169 NV_INFO(dev, "PFIFO_CACHE_ERROR - Ch %d/%d "
170 "Mthd 0x%04x Data 0x%08x\n",
171 chid, (mthd >> 13) & 7, mthd & 0x1ffc,
172 data);
173 }
174
175 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 0);
176 nv_wr32(dev, NV03_PFIFO_INTR_0,
177 NV_PFIFO_INTR_CACHE_ERROR);
178
179 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0,
180 nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) & ~1);
181 nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4);
182 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0,
183 nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) | 1);
184 nv_wr32(dev, NV04_PFIFO_CACHE1_HASH, 0);
185
186 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH,
187 nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUSH) | 1);
188 nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
189
190 status &= ~NV_PFIFO_INTR_CACHE_ERROR;
191 }
192
193 if (status & NV_PFIFO_INTR_DMA_PUSHER) {
Francisco Jerezcbab95d2010-10-11 03:43:58 +0200194 u32 dma_get = nv_rd32(dev, 0x003244);
195 u32 dma_put = nv_rd32(dev, 0x003240);
Ben Skeggse071f8c2010-09-08 15:40:30 +1000196 u32 push = nv_rd32(dev, 0x003220);
197 u32 state = nv_rd32(dev, 0x003228);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000198
Ben Skeggse071f8c2010-09-08 15:40:30 +1000199 if (dev_priv->card_type == NV_50) {
200 u32 ho_get = nv_rd32(dev, 0x003328);
201 u32 ho_put = nv_rd32(dev, 0x003320);
202 u32 ib_get = nv_rd32(dev, 0x003334);
203 u32 ib_put = nv_rd32(dev, 0x003330);
204
Jiri Slabyda3bd822010-10-05 15:07:33 +0200205 if (nouveau_ratelimit())
206 NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%02x%08x "
Ben Skeggse071f8c2010-09-08 15:40:30 +1000207 "Put 0x%02x%08x IbGet 0x%08x IbPut 0x%08x "
208 "State 0x%08x Push 0x%08x\n",
Francisco Jerezcbab95d2010-10-11 03:43:58 +0200209 chid, ho_get, dma_get, ho_put,
210 dma_put, ib_get, ib_put, state,
211 push);
Ben Skeggse071f8c2010-09-08 15:40:30 +1000212
213 /* METHOD_COUNT, in DMA_STATE on earlier chipsets */
214 nv_wr32(dev, 0x003364, 0x00000000);
Francisco Jerezcbab95d2010-10-11 03:43:58 +0200215 if (dma_get != dma_put || ho_get != ho_put) {
216 nv_wr32(dev, 0x003244, dma_put);
Ben Skeggse071f8c2010-09-08 15:40:30 +1000217 nv_wr32(dev, 0x003328, ho_put);
218 } else
219 if (ib_get != ib_put) {
220 nv_wr32(dev, 0x003334, ib_put);
221 }
222 } else {
223 NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%08x "
224 "Put 0x%08x State 0x%08x Push 0x%08x\n",
Francisco Jerezcbab95d2010-10-11 03:43:58 +0200225 chid, dma_get, dma_put, state, push);
Ben Skeggse071f8c2010-09-08 15:40:30 +1000226
Francisco Jerezcbab95d2010-10-11 03:43:58 +0200227 if (dma_get != dma_put)
228 nv_wr32(dev, 0x003244, dma_put);
Ben Skeggse071f8c2010-09-08 15:40:30 +1000229 }
230
231 nv_wr32(dev, 0x003228, 0x00000000);
232 nv_wr32(dev, 0x003220, 0x00000001);
233 nv_wr32(dev, 0x002100, NV_PFIFO_INTR_DMA_PUSHER);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000234 status &= ~NV_PFIFO_INTR_DMA_PUSHER;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000235 }
236
Francisco Jerez139295b2010-01-30 18:28:00 +0100237 if (status & NV_PFIFO_INTR_SEMAPHORE) {
238 uint32_t sem;
239
240 status &= ~NV_PFIFO_INTR_SEMAPHORE;
241 nv_wr32(dev, NV03_PFIFO_INTR_0,
242 NV_PFIFO_INTR_SEMAPHORE);
243
244 sem = nv_rd32(dev, NV10_PFIFO_CACHE1_SEMAPHORE);
245 nv_wr32(dev, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1);
246
247 nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4);
248 nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
249 }
250
Ben Skeggs1da26562010-09-03 15:56:12 +1000251 if (dev_priv->card_type == NV_50) {
252 if (status & 0x00000010) {
253 nv50_fb_vm_trap(dev, 1, "PFIFO_BAR_FAULT");
254 status &= ~0x00000010;
255 nv_wr32(dev, 0x002100, 0x00000010);
256 }
257 }
258
Ben Skeggs6ee73862009-12-11 19:24:15 +1000259 if (status) {
Jiri Slabyda3bd822010-10-05 15:07:33 +0200260 if (nouveau_ratelimit())
261 NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n",
262 status, chid);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000263 nv_wr32(dev, NV03_PFIFO_INTR_0, status);
264 status = 0;
265 }
266
267 nv_wr32(dev, NV03_PFIFO_CACHES, reassign);
268 }
269
270 if (status) {
271 NV_INFO(dev, "PFIFO still angry after %d spins, halt\n", cnt);
272 nv_wr32(dev, 0x2140, 0);
273 nv_wr32(dev, 0x140, 0);
274 }
275
276 nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PFIFO_PENDING);
277}
278
279struct nouveau_bitfield_names {
280 uint32_t mask;
281 const char *name;
282};
283
284static struct nouveau_bitfield_names nstatus_names[] =
285{
286 { NV04_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" },
287 { NV04_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" },
288 { NV04_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" },
289 { NV04_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" }
290};
291
292static struct nouveau_bitfield_names nstatus_names_nv10[] =
293{
294 { NV10_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" },
295 { NV10_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" },
296 { NV10_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" },
297 { NV10_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" }
298};
299
300static struct nouveau_bitfield_names nsource_names[] =
301{
302 { NV03_PGRAPH_NSOURCE_NOTIFICATION, "NOTIFICATION" },
303 { NV03_PGRAPH_NSOURCE_DATA_ERROR, "DATA_ERROR" },
304 { NV03_PGRAPH_NSOURCE_PROTECTION_ERROR, "PROTECTION_ERROR" },
305 { NV03_PGRAPH_NSOURCE_RANGE_EXCEPTION, "RANGE_EXCEPTION" },
306 { NV03_PGRAPH_NSOURCE_LIMIT_COLOR, "LIMIT_COLOR" },
307 { NV03_PGRAPH_NSOURCE_LIMIT_ZETA, "LIMIT_ZETA" },
308 { NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD, "ILLEGAL_MTHD" },
309 { NV03_PGRAPH_NSOURCE_DMA_R_PROTECTION, "DMA_R_PROTECTION" },
310 { NV03_PGRAPH_NSOURCE_DMA_W_PROTECTION, "DMA_W_PROTECTION" },
311 { NV03_PGRAPH_NSOURCE_FORMAT_EXCEPTION, "FORMAT_EXCEPTION" },
312 { NV03_PGRAPH_NSOURCE_PATCH_EXCEPTION, "PATCH_EXCEPTION" },
313 { NV03_PGRAPH_NSOURCE_STATE_INVALID, "STATE_INVALID" },
314 { NV03_PGRAPH_NSOURCE_DOUBLE_NOTIFY, "DOUBLE_NOTIFY" },
315 { NV03_PGRAPH_NSOURCE_NOTIFY_IN_USE, "NOTIFY_IN_USE" },
316 { NV03_PGRAPH_NSOURCE_METHOD_CNT, "METHOD_CNT" },
317 { NV03_PGRAPH_NSOURCE_BFR_NOTIFICATION, "BFR_NOTIFICATION" },
318 { NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION, "DMA_VTX_PROTECTION" },
319 { NV03_PGRAPH_NSOURCE_DMA_WIDTH_A, "DMA_WIDTH_A" },
320 { NV03_PGRAPH_NSOURCE_DMA_WIDTH_B, "DMA_WIDTH_B" },
321};
322
323static void
324nouveau_print_bitfield_names_(uint32_t value,
325 const struct nouveau_bitfield_names *namelist,
326 const int namelist_len)
327{
328 /*
329 * Caller must have already printed the KERN_* log level for us.
330 * Also the caller is responsible for adding the newline.
331 */
332 int i;
333 for (i = 0; i < namelist_len; ++i) {
334 uint32_t mask = namelist[i].mask;
335 if (value & mask) {
336 printk(" %s", namelist[i].name);
337 value &= ~mask;
338 }
339 }
340 if (value)
341 printk(" (unknown bits 0x%08x)", value);
342}
343#define nouveau_print_bitfield_names(val, namelist) \
344 nouveau_print_bitfield_names_((val), (namelist), ARRAY_SIZE(namelist))
345
Marcin Koƛcielnicki304424e2010-03-01 00:18:39 +0000346struct nouveau_enum_names {
347 uint32_t value;
348 const char *name;
349};
350
351static void
352nouveau_print_enum_names_(uint32_t value,
353 const struct nouveau_enum_names *namelist,
354 const int namelist_len)
355{
356 /*
357 * Caller must have already printed the KERN_* log level for us.
358 * Also the caller is responsible for adding the newline.
359 */
360 int i;
361 for (i = 0; i < namelist_len; ++i) {
362 if (value == namelist[i].value) {
363 printk("%s", namelist[i].name);
364 return;
365 }
366 }
367 printk("unknown value 0x%08x", value);
368}
369#define nouveau_print_enum_names(val, namelist) \
370 nouveau_print_enum_names_((val), (namelist), ARRAY_SIZE(namelist))
Ben Skeggs6ee73862009-12-11 19:24:15 +1000371
372static int
373nouveau_graph_chid_from_grctx(struct drm_device *dev)
374{
375 struct drm_nouveau_private *dev_priv = dev->dev_private;
Ben Skeggscff5c132010-10-06 16:16:59 +1000376 struct nouveau_channel *chan;
377 unsigned long flags;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000378 uint32_t inst;
379 int i;
380
381 if (dev_priv->card_type < NV_40)
382 return dev_priv->engine.fifo.channels;
383 else
384 if (dev_priv->card_type < NV_50) {
385 inst = (nv_rd32(dev, 0x40032c) & 0xfffff) << 4;
386
Ben Skeggscff5c132010-10-06 16:16:59 +1000387 spin_lock_irqsave(&dev_priv->channels.lock, flags);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000388 for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
Ben Skeggscff5c132010-10-06 16:16:59 +1000389 chan = dev_priv->channels.ptr[i];
Ben Skeggs6ee73862009-12-11 19:24:15 +1000390 if (!chan || !chan->ramin_grctx)
391 continue;
392
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000393 if (inst == chan->ramin_grctx->pinst)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000394 break;
395 }
Ben Skeggscff5c132010-10-06 16:16:59 +1000396 spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000397 } else {
398 inst = (nv_rd32(dev, 0x40032c) & 0xfffff) << 12;
399
Ben Skeggscff5c132010-10-06 16:16:59 +1000400 spin_lock_irqsave(&dev_priv->channels.lock, flags);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000401 for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
Ben Skeggscff5c132010-10-06 16:16:59 +1000402 chan = dev_priv->channels.ptr[i];
Ben Skeggs6ee73862009-12-11 19:24:15 +1000403 if (!chan || !chan->ramin)
404 continue;
405
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000406 if (inst == chan->ramin->vinst)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000407 break;
408 }
Ben Skeggscff5c132010-10-06 16:16:59 +1000409 spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000410 }
411
412
413 return i;
414}
415
416static int
417nouveau_graph_trapped_channel(struct drm_device *dev, int *channel_ret)
418{
419 struct drm_nouveau_private *dev_priv = dev->dev_private;
420 struct nouveau_engine *engine = &dev_priv->engine;
421 int channel;
422
423 if (dev_priv->card_type < NV_10)
424 channel = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 24) & 0xf;
425 else
426 if (dev_priv->card_type < NV_40)
427 channel = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f;
428 else
429 channel = nouveau_graph_chid_from_grctx(dev);
430
Ben Skeggscff5c132010-10-06 16:16:59 +1000431 if (channel >= engine->fifo.channels ||
432 !dev_priv->channels.ptr[channel]) {
Ben Skeggs6ee73862009-12-11 19:24:15 +1000433 NV_ERROR(dev, "AIII, invalid/inactive channel id %d\n", channel);
434 return -EINVAL;
435 }
436
437 *channel_ret = channel;
438 return 0;
439}
440
441struct nouveau_pgraph_trap {
442 int channel;
443 int class;
444 int subc, mthd, size;
445 uint32_t data, data2;
446 uint32_t nsource, nstatus;
447};
448
449static void
450nouveau_graph_trap_info(struct drm_device *dev,
451 struct nouveau_pgraph_trap *trap)
452{
453 struct drm_nouveau_private *dev_priv = dev->dev_private;
454 uint32_t address;
455
456 trap->nsource = trap->nstatus = 0;
457 if (dev_priv->card_type < NV_50) {
458 trap->nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
459 trap->nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS);
460 }
461
462 if (nouveau_graph_trapped_channel(dev, &trap->channel))
463 trap->channel = -1;
464 address = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
465
466 trap->mthd = address & 0x1FFC;
467 trap->data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
468 if (dev_priv->card_type < NV_10) {
469 trap->subc = (address >> 13) & 0x7;
470 } else {
471 trap->subc = (address >> 16) & 0x7;
472 trap->data2 = nv_rd32(dev, NV10_PGRAPH_TRAPPED_DATA_HIGH);
473 }
474
475 if (dev_priv->card_type < NV_10)
476 trap->class = nv_rd32(dev, 0x400180 + trap->subc*4) & 0xFF;
477 else if (dev_priv->card_type < NV_40)
478 trap->class = nv_rd32(dev, 0x400160 + trap->subc*4) & 0xFFF;
479 else if (dev_priv->card_type < NV_50)
480 trap->class = nv_rd32(dev, 0x400160 + trap->subc*4) & 0xFFFF;
481 else
482 trap->class = nv_rd32(dev, 0x400814);
483}
484
485static void
486nouveau_graph_dump_trap_info(struct drm_device *dev, const char *id,
487 struct nouveau_pgraph_trap *trap)
488{
489 struct drm_nouveau_private *dev_priv = dev->dev_private;
490 uint32_t nsource = trap->nsource, nstatus = trap->nstatus;
491
Marcin Koƛcielnicki304424e2010-03-01 00:18:39 +0000492 if (dev_priv->card_type < NV_50) {
493 NV_INFO(dev, "%s - nSource:", id);
494 nouveau_print_bitfield_names(nsource, nsource_names);
495 printk(", nStatus:");
496 if (dev_priv->card_type < NV_10)
497 nouveau_print_bitfield_names(nstatus, nstatus_names);
498 else
499 nouveau_print_bitfield_names(nstatus, nstatus_names_nv10);
500 printk("\n");
501 }
Ben Skeggs6ee73862009-12-11 19:24:15 +1000502
503 NV_INFO(dev, "%s - Ch %d/%d Class 0x%04x Mthd 0x%04x "
504 "Data 0x%08x:0x%08x\n",
505 id, trap->channel, trap->subc,
506 trap->class, trap->mthd,
507 trap->data2, trap->data);
508}
509
510static int
511nouveau_pgraph_intr_swmthd(struct drm_device *dev,
512 struct nouveau_pgraph_trap *trap)
513{
514 struct drm_nouveau_private *dev_priv = dev->dev_private;
Ben Skeggsb8c157d2010-10-20 10:39:35 +1000515 struct nouveau_channel *chan;
Ben Skeggscff5c132010-10-06 16:16:59 +1000516 unsigned long flags;
517 int ret = -EINVAL;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000518
Ben Skeggscff5c132010-10-06 16:16:59 +1000519 spin_lock_irqsave(&dev_priv->channels.lock, flags);
520 if (trap->channel > 0 &&
521 trap->channel < dev_priv->engine.fifo.channels &&
522 dev_priv->channels.ptr[trap->channel]) {
Ben Skeggsb8c157d2010-10-20 10:39:35 +1000523 chan = dev_priv->channels.ptr[trap->channel];
524 ret = nouveau_gpuobj_mthd_call(chan, trap->class, trap->mthd, trap->data);
Ben Skeggscff5c132010-10-06 16:16:59 +1000525 }
526 spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000527
Ben Skeggscff5c132010-10-06 16:16:59 +1000528 return ret;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000529}
530
531static inline void
532nouveau_pgraph_intr_notify(struct drm_device *dev, uint32_t nsource)
533{
534 struct nouveau_pgraph_trap trap;
535 int unhandled = 0;
536
537 nouveau_graph_trap_info(dev, &trap);
538
539 if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
540 if (nouveau_pgraph_intr_swmthd(dev, &trap))
541 unhandled = 1;
542 } else {
543 unhandled = 1;
544 }
545
546 if (unhandled)
547 nouveau_graph_dump_trap_info(dev, "PGRAPH_NOTIFY", &trap);
548}
549
Ben Skeggs6ee73862009-12-11 19:24:15 +1000550
551static inline void
552nouveau_pgraph_intr_error(struct drm_device *dev, uint32_t nsource)
553{
554 struct nouveau_pgraph_trap trap;
555 int unhandled = 0;
556
557 nouveau_graph_trap_info(dev, &trap);
558 trap.nsource = nsource;
559
560 if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
561 if (nouveau_pgraph_intr_swmthd(dev, &trap))
562 unhandled = 1;
Luca Barbierid051bbb2010-01-16 15:27:51 +0100563 } else if (nsource & NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION) {
564 uint32_t v = nv_rd32(dev, 0x402000);
565 nv_wr32(dev, 0x402000, v);
566
567 /* dump the error anyway for now: it's useful for
568 Gallium development */
569 unhandled = 1;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000570 } else {
571 unhandled = 1;
572 }
573
574 if (unhandled && nouveau_ratelimit())
575 nouveau_graph_dump_trap_info(dev, "PGRAPH_ERROR", &trap);
576}
577
578static inline void
579nouveau_pgraph_intr_context_switch(struct drm_device *dev)
580{
581 struct drm_nouveau_private *dev_priv = dev->dev_private;
582 struct nouveau_engine *engine = &dev_priv->engine;
583 uint32_t chid;
584
585 chid = engine->fifo.channel_id(dev);
586 NV_DEBUG(dev, "PGRAPH context switch interrupt channel %x\n", chid);
587
588 switch (dev_priv->card_type) {
589 case NV_04:
590 nv04_graph_context_switch(dev);
591 break;
592 case NV_10:
593 nv10_graph_context_switch(dev);
594 break;
595 default:
596 NV_ERROR(dev, "Context switch not implemented\n");
597 break;
598 }
599}
600
601static void
602nouveau_pgraph_irq_handler(struct drm_device *dev)
603{
604 uint32_t status;
605
606 while ((status = nv_rd32(dev, NV03_PGRAPH_INTR))) {
607 uint32_t nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
608
609 if (status & NV_PGRAPH_INTR_NOTIFY) {
610 nouveau_pgraph_intr_notify(dev, nsource);
611
612 status &= ~NV_PGRAPH_INTR_NOTIFY;
613 nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_NOTIFY);
614 }
615
616 if (status & NV_PGRAPH_INTR_ERROR) {
617 nouveau_pgraph_intr_error(dev, nsource);
618
619 status &= ~NV_PGRAPH_INTR_ERROR;
620 nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_ERROR);
621 }
622
623 if (status & NV_PGRAPH_INTR_CONTEXT_SWITCH) {
Ben Skeggs6ee73862009-12-11 19:24:15 +1000624 status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
625 nv_wr32(dev, NV03_PGRAPH_INTR,
626 NV_PGRAPH_INTR_CONTEXT_SWITCH);
Francisco Jerez308dceb2010-08-04 04:41:55 +0200627
628 nouveau_pgraph_intr_context_switch(dev);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000629 }
630
631 if (status) {
632 NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n", status);
633 nv_wr32(dev, NV03_PGRAPH_INTR, status);
634 }
635
636 if ((nv_rd32(dev, NV04_PGRAPH_FIFO) & (1 << 0)) == 0)
637 nv_wr32(dev, NV04_PGRAPH_FIFO, 1);
638 }
639
640 nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING);
641}
642
Marcin Koƛcielnicki304424e2010-03-01 00:18:39 +0000643static struct nouveau_enum_names nv50_mp_exec_error_names[] =
644{
645 { 3, "STACK_UNDERFLOW" },
646 { 4, "QUADON_ACTIVE" },
647 { 8, "TIMEOUT" },
648 { 0x10, "INVALID_OPCODE" },
649 { 0x40, "BREAKPOINT" },
650};
651
652static void
653nv50_pgraph_mp_trap(struct drm_device *dev, int tpid, int display)
654{
655 struct drm_nouveau_private *dev_priv = dev->dev_private;
656 uint32_t units = nv_rd32(dev, 0x1540);
657 uint32_t addr, mp10, status, pc, oplow, ophigh;
658 int i;
659 int mps = 0;
660 for (i = 0; i < 4; i++) {
661 if (!(units & 1 << (i+24)))
662 continue;
663 if (dev_priv->chipset < 0xa0)
664 addr = 0x408200 + (tpid << 12) + (i << 7);
665 else
666 addr = 0x408100 + (tpid << 11) + (i << 7);
667 mp10 = nv_rd32(dev, addr + 0x10);
668 status = nv_rd32(dev, addr + 0x14);
669 if (!status)
670 continue;
671 if (display) {
672 nv_rd32(dev, addr + 0x20);
673 pc = nv_rd32(dev, addr + 0x24);
674 oplow = nv_rd32(dev, addr + 0x70);
675 ophigh= nv_rd32(dev, addr + 0x74);
676 NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - "
677 "TP %d MP %d: ", tpid, i);
678 nouveau_print_enum_names(status,
679 nv50_mp_exec_error_names);
680 printk(" at %06x warp %d, opcode %08x %08x\n",
681 pc&0xffffff, pc >> 24,
682 oplow, ophigh);
683 }
684 nv_wr32(dev, addr + 0x10, mp10);
685 nv_wr32(dev, addr + 0x14, 0);
686 mps++;
687 }
688 if (!mps && display)
689 NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - TP %d: "
690 "No MPs claiming errors?\n", tpid);
691}
692
693static void
694nv50_pgraph_tp_trap(struct drm_device *dev, int type, uint32_t ustatus_old,
695 uint32_t ustatus_new, int display, const char *name)
696{
697 struct drm_nouveau_private *dev_priv = dev->dev_private;
698 int tps = 0;
699 uint32_t units = nv_rd32(dev, 0x1540);
700 int i, r;
701 uint32_t ustatus_addr, ustatus;
702 for (i = 0; i < 16; i++) {
703 if (!(units & (1 << i)))
704 continue;
705 if (dev_priv->chipset < 0xa0)
706 ustatus_addr = ustatus_old + (i << 12);
707 else
708 ustatus_addr = ustatus_new + (i << 11);
709 ustatus = nv_rd32(dev, ustatus_addr) & 0x7fffffff;
710 if (!ustatus)
711 continue;
712 tps++;
713 switch (type) {
714 case 6: /* texture error... unknown for now */
Ben Skeggsd96773e2010-09-03 15:46:58 +1000715 nv50_fb_vm_trap(dev, display, name);
Marcin Koƛcielnicki304424e2010-03-01 00:18:39 +0000716 if (display) {
717 NV_ERROR(dev, "magic set %d:\n", i);
718 for (r = ustatus_addr + 4; r <= ustatus_addr + 0x10; r += 4)
719 NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
720 nv_rd32(dev, r));
721 }
722 break;
723 case 7: /* MP error */
724 if (ustatus & 0x00010000) {
725 nv50_pgraph_mp_trap(dev, i, display);
726 ustatus &= ~0x00010000;
727 }
728 break;
729 case 8: /* TPDMA error */
730 {
731 uint32_t e0c = nv_rd32(dev, ustatus_addr + 4);
732 uint32_t e10 = nv_rd32(dev, ustatus_addr + 8);
733 uint32_t e14 = nv_rd32(dev, ustatus_addr + 0xc);
734 uint32_t e18 = nv_rd32(dev, ustatus_addr + 0x10);
735 uint32_t e1c = nv_rd32(dev, ustatus_addr + 0x14);
736 uint32_t e20 = nv_rd32(dev, ustatus_addr + 0x18);
737 uint32_t e24 = nv_rd32(dev, ustatus_addr + 0x1c);
Ben Skeggsd96773e2010-09-03 15:46:58 +1000738 nv50_fb_vm_trap(dev, display, name);
Marcin Koƛcielnicki304424e2010-03-01 00:18:39 +0000739 /* 2d engine destination */
740 if (ustatus & 0x00000010) {
741 if (display) {
742 NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - Unknown fault at address %02x%08x\n",
743 i, e14, e10);
744 NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
745 i, e0c, e18, e1c, e20, e24);
746 }
747 ustatus &= ~0x00000010;
748 }
749 /* Render target */
750 if (ustatus & 0x00000040) {
751 if (display) {
752 NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - Unknown fault at address %02x%08x\n",
753 i, e14, e10);
754 NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
755 i, e0c, e18, e1c, e20, e24);
756 }
757 ustatus &= ~0x00000040;
758 }
759 /* CUDA memory: l[], g[] or stack. */
760 if (ustatus & 0x00000080) {
761 if (display) {
762 if (e18 & 0x80000000) {
763 /* g[] read fault? */
764 NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global read fault at address %02x%08x\n",
765 i, e14, e10 | ((e18 >> 24) & 0x1f));
766 e18 &= ~0x1f000000;
767 } else if (e18 & 0xc) {
768 /* g[] write fault? */
769 NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global write fault at address %02x%08x\n",
770 i, e14, e10 | ((e18 >> 7) & 0x1f));
771 e18 &= ~0x00000f80;
772 } else {
773 NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Unknown CUDA fault at address %02x%08x\n",
774 i, e14, e10);
775 }
776 NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
777 i, e0c, e18, e1c, e20, e24);
778 }
779 ustatus &= ~0x00000080;
780 }
781 }
782 break;
783 }
784 if (ustatus) {
785 if (display)
786 NV_INFO(dev, "%s - TP%d: Unhandled ustatus 0x%08x\n", name, i, ustatus);
787 }
788 nv_wr32(dev, ustatus_addr, 0xc0000000);
789 }
790
791 if (!tps && display)
792 NV_INFO(dev, "%s - No TPs claiming errors?\n", name);
793}
794
795static void
796nv50_pgraph_trap_handler(struct drm_device *dev)
797{
798 struct nouveau_pgraph_trap trap;
799 uint32_t status = nv_rd32(dev, 0x400108);
800 uint32_t ustatus;
801 int display = nouveau_ratelimit();
802
803
804 if (!status && display) {
805 nouveau_graph_trap_info(dev, &trap);
806 nouveau_graph_dump_trap_info(dev, "PGRAPH_TRAP", &trap);
807 NV_INFO(dev, "PGRAPH_TRAP - no units reporting traps?\n");
808 }
809
810 /* DISPATCH: Relays commands to other units and handles NOTIFY,
811 * COND, QUERY. If you get a trap from it, the command is still stuck
812 * in DISPATCH and you need to do something about it. */
813 if (status & 0x001) {
814 ustatus = nv_rd32(dev, 0x400804) & 0x7fffffff;
815 if (!ustatus && display) {
816 NV_INFO(dev, "PGRAPH_TRAP_DISPATCH - no ustatus?\n");
817 }
818
819 /* Known to be triggered by screwed up NOTIFY and COND... */
820 if (ustatus & 0x00000001) {
Ben Skeggsd96773e2010-09-03 15:46:58 +1000821 nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_DISPATCH_FAULT");
Marcin Koƛcielnicki304424e2010-03-01 00:18:39 +0000822 nv_wr32(dev, 0x400500, 0);
823 if (nv_rd32(dev, 0x400808) & 0x80000000) {
824 if (display) {
825 if (nouveau_graph_trapped_channel(dev, &trap.channel))
826 trap.channel = -1;
827 trap.class = nv_rd32(dev, 0x400814);
828 trap.mthd = nv_rd32(dev, 0x400808) & 0x1ffc;
829 trap.subc = (nv_rd32(dev, 0x400808) >> 16) & 0x7;
830 trap.data = nv_rd32(dev, 0x40080c);
831 trap.data2 = nv_rd32(dev, 0x400810);
832 nouveau_graph_dump_trap_info(dev,
833 "PGRAPH_TRAP_DISPATCH_FAULT", &trap);
834 NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_FAULT - 400808: %08x\n", nv_rd32(dev, 0x400808));
835 NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_FAULT - 400848: %08x\n", nv_rd32(dev, 0x400848));
836 }
837 nv_wr32(dev, 0x400808, 0);
838 } else if (display) {
839 NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_FAULT - No stuck command?\n");
840 }
841 nv_wr32(dev, 0x4008e8, nv_rd32(dev, 0x4008e8) & 3);
842 nv_wr32(dev, 0x400848, 0);
843 ustatus &= ~0x00000001;
844 }
845 if (ustatus & 0x00000002) {
Ben Skeggsd96773e2010-09-03 15:46:58 +1000846 nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_DISPATCH_QUERY");
Marcin Koƛcielnicki304424e2010-03-01 00:18:39 +0000847 nv_wr32(dev, 0x400500, 0);
848 if (nv_rd32(dev, 0x40084c) & 0x80000000) {
849 if (display) {
850 if (nouveau_graph_trapped_channel(dev, &trap.channel))
851 trap.channel = -1;
852 trap.class = nv_rd32(dev, 0x400814);
853 trap.mthd = nv_rd32(dev, 0x40084c) & 0x1ffc;
854 trap.subc = (nv_rd32(dev, 0x40084c) >> 16) & 0x7;
855 trap.data = nv_rd32(dev, 0x40085c);
856 trap.data2 = 0;
857 nouveau_graph_dump_trap_info(dev,
858 "PGRAPH_TRAP_DISPATCH_QUERY", &trap);
859 NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_QUERY - 40084c: %08x\n", nv_rd32(dev, 0x40084c));
860 }
861 nv_wr32(dev, 0x40084c, 0);
862 } else if (display) {
863 NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_QUERY - No stuck command?\n");
864 }
865 ustatus &= ~0x00000002;
866 }
867 if (ustatus && display)
868 NV_INFO(dev, "PGRAPH_TRAP_DISPATCH - Unhandled ustatus 0x%08x\n", ustatus);
869 nv_wr32(dev, 0x400804, 0xc0000000);
870 nv_wr32(dev, 0x400108, 0x001);
871 status &= ~0x001;
872 }
873
874 /* TRAPs other than dispatch use the "normal" trap regs. */
875 if (status && display) {
876 nouveau_graph_trap_info(dev, &trap);
877 nouveau_graph_dump_trap_info(dev,
878 "PGRAPH_TRAP", &trap);
879 }
880
881 /* M2MF: Memory to memory copy engine. */
882 if (status & 0x002) {
883 ustatus = nv_rd32(dev, 0x406800) & 0x7fffffff;
884 if (!ustatus && display) {
885 NV_INFO(dev, "PGRAPH_TRAP_M2MF - no ustatus?\n");
886 }
887 if (ustatus & 0x00000001) {
Ben Skeggsd96773e2010-09-03 15:46:58 +1000888 nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_NOTIFY");
Marcin Koƛcielnicki304424e2010-03-01 00:18:39 +0000889 ustatus &= ~0x00000001;
890 }
891 if (ustatus & 0x00000002) {
Ben Skeggsd96773e2010-09-03 15:46:58 +1000892 nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_IN");
Marcin Koƛcielnicki304424e2010-03-01 00:18:39 +0000893 ustatus &= ~0x00000002;
894 }
895 if (ustatus & 0x00000004) {
Ben Skeggsd96773e2010-09-03 15:46:58 +1000896 nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_OUT");
Marcin Koƛcielnicki304424e2010-03-01 00:18:39 +0000897 ustatus &= ~0x00000004;
898 }
899 NV_INFO (dev, "PGRAPH_TRAP_M2MF - %08x %08x %08x %08x\n",
900 nv_rd32(dev, 0x406804),
901 nv_rd32(dev, 0x406808),
902 nv_rd32(dev, 0x40680c),
903 nv_rd32(dev, 0x406810));
904 if (ustatus && display)
905 NV_INFO(dev, "PGRAPH_TRAP_M2MF - Unhandled ustatus 0x%08x\n", ustatus);
906 /* No sane way found yet -- just reset the bugger. */
907 nv_wr32(dev, 0x400040, 2);
908 nv_wr32(dev, 0x400040, 0);
909 nv_wr32(dev, 0x406800, 0xc0000000);
910 nv_wr32(dev, 0x400108, 0x002);
911 status &= ~0x002;
912 }
913
914 /* VFETCH: Fetches data from vertex buffers. */
915 if (status & 0x004) {
916 ustatus = nv_rd32(dev, 0x400c04) & 0x7fffffff;
917 if (!ustatus && display) {
918 NV_INFO(dev, "PGRAPH_TRAP_VFETCH - no ustatus?\n");
919 }
920 if (ustatus & 0x00000001) {
Ben Skeggsd96773e2010-09-03 15:46:58 +1000921 nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_VFETCH_FAULT");
Marcin Koƛcielnicki304424e2010-03-01 00:18:39 +0000922 NV_INFO (dev, "PGRAPH_TRAP_VFETCH_FAULT - %08x %08x %08x %08x\n",
923 nv_rd32(dev, 0x400c00),
924 nv_rd32(dev, 0x400c08),
925 nv_rd32(dev, 0x400c0c),
926 nv_rd32(dev, 0x400c10));
927 ustatus &= ~0x00000001;
928 }
929 if (ustatus && display)
930 NV_INFO(dev, "PGRAPH_TRAP_VFETCH - Unhandled ustatus 0x%08x\n", ustatus);
931 nv_wr32(dev, 0x400c04, 0xc0000000);
932 nv_wr32(dev, 0x400108, 0x004);
933 status &= ~0x004;
934 }
935
936 /* STRMOUT: DirectX streamout / OpenGL transform feedback. */
937 if (status & 0x008) {
938 ustatus = nv_rd32(dev, 0x401800) & 0x7fffffff;
939 if (!ustatus && display) {
940 NV_INFO(dev, "PGRAPH_TRAP_STRMOUT - no ustatus?\n");
941 }
942 if (ustatus & 0x00000001) {
Ben Skeggsd96773e2010-09-03 15:46:58 +1000943 nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_STRMOUT_FAULT");
Marcin Koƛcielnicki304424e2010-03-01 00:18:39 +0000944 NV_INFO (dev, "PGRAPH_TRAP_STRMOUT_FAULT - %08x %08x %08x %08x\n",
945 nv_rd32(dev, 0x401804),
946 nv_rd32(dev, 0x401808),
947 nv_rd32(dev, 0x40180c),
948 nv_rd32(dev, 0x401810));
949 ustatus &= ~0x00000001;
950 }
951 if (ustatus && display)
952 NV_INFO(dev, "PGRAPH_TRAP_STRMOUT - Unhandled ustatus 0x%08x\n", ustatus);
953 /* No sane way found yet -- just reset the bugger. */
954 nv_wr32(dev, 0x400040, 0x80);
955 nv_wr32(dev, 0x400040, 0);
956 nv_wr32(dev, 0x401800, 0xc0000000);
957 nv_wr32(dev, 0x400108, 0x008);
958 status &= ~0x008;
959 }
960
961 /* CCACHE: Handles code and c[] caches and fills them. */
962 if (status & 0x010) {
963 ustatus = nv_rd32(dev, 0x405018) & 0x7fffffff;
964 if (!ustatus && display) {
965 NV_INFO(dev, "PGRAPH_TRAP_CCACHE - no ustatus?\n");
966 }
967 if (ustatus & 0x00000001) {
Ben Skeggsd96773e2010-09-03 15:46:58 +1000968 nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_CCACHE_FAULT");
Marcin Koƛcielnicki304424e2010-03-01 00:18:39 +0000969 NV_INFO (dev, "PGRAPH_TRAP_CCACHE_FAULT - %08x %08x %08x %08x %08x %08x %08x\n",
970 nv_rd32(dev, 0x405800),
971 nv_rd32(dev, 0x405804),
972 nv_rd32(dev, 0x405808),
973 nv_rd32(dev, 0x40580c),
974 nv_rd32(dev, 0x405810),
975 nv_rd32(dev, 0x405814),
976 nv_rd32(dev, 0x40581c));
977 ustatus &= ~0x00000001;
978 }
979 if (ustatus && display)
980 NV_INFO(dev, "PGRAPH_TRAP_CCACHE - Unhandled ustatus 0x%08x\n", ustatus);
981 nv_wr32(dev, 0x405018, 0xc0000000);
982 nv_wr32(dev, 0x400108, 0x010);
983 status &= ~0x010;
984 }
985
986 /* Unknown, not seen yet... 0x402000 is the only trap status reg
987 * remaining, so try to handle it anyway. Perhaps related to that
988 * unknown DMA slot on tesla? */
989 if (status & 0x20) {
Ben Skeggsd96773e2010-09-03 15:46:58 +1000990 nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_UNKC04");
Marcin Koƛcielnicki304424e2010-03-01 00:18:39 +0000991 ustatus = nv_rd32(dev, 0x402000) & 0x7fffffff;
992 if (display)
993 NV_INFO(dev, "PGRAPH_TRAP_UNKC04 - Unhandled ustatus 0x%08x\n", ustatus);
994 nv_wr32(dev, 0x402000, 0xc0000000);
995 /* no status modifiction on purpose */
996 }
997
998 /* TEXTURE: CUDA texturing units */
999 if (status & 0x040) {
1000 nv50_pgraph_tp_trap (dev, 6, 0x408900, 0x408600, display,
1001 "PGRAPH_TRAP_TEXTURE");
1002 nv_wr32(dev, 0x400108, 0x040);
1003 status &= ~0x040;
1004 }
1005
1006 /* MP: CUDA execution engines. */
1007 if (status & 0x080) {
1008 nv50_pgraph_tp_trap (dev, 7, 0x408314, 0x40831c, display,
1009 "PGRAPH_TRAP_MP");
1010 nv_wr32(dev, 0x400108, 0x080);
1011 status &= ~0x080;
1012 }
1013
1014 /* TPDMA: Handles TP-initiated uncached memory accesses:
1015 * l[], g[], stack, 2d surfaces, render targets. */
1016 if (status & 0x100) {
1017 nv50_pgraph_tp_trap (dev, 8, 0x408e08, 0x408708, display,
1018 "PGRAPH_TRAP_TPDMA");
1019 nv_wr32(dev, 0x400108, 0x100);
1020 status &= ~0x100;
1021 }
1022
1023 if (status) {
1024 if (display)
1025 NV_INFO(dev, "PGRAPH_TRAP - Unknown trap 0x%08x\n",
1026 status);
1027 nv_wr32(dev, 0x400108, status);
1028 }
1029}
1030
1031/* There must be a *lot* of these. Will take some time to gather them up. */
1032static struct nouveau_enum_names nv50_data_error_names[] =
1033{
1034 { 4, "INVALID_VALUE" },
1035 { 5, "INVALID_ENUM" },
1036 { 8, "INVALID_OBJECT" },
1037 { 0xc, "INVALID_BITFIELD" },
1038 { 0x28, "MP_NO_REG_SPACE" },
1039 { 0x2b, "MP_BLOCK_SIZE_MISMATCH" },
1040};
1041
1042static void
Ben Skeggs6ee73862009-12-11 19:24:15 +10001043nv50_pgraph_irq_handler(struct drm_device *dev)
1044{
Marcin Koƛcielnicki304424e2010-03-01 00:18:39 +00001045 struct nouveau_pgraph_trap trap;
1046 int unhandled = 0;
Maarten Maathuisb1d37aa2010-01-20 19:54:34 +01001047 uint32_t status;
Ben Skeggs6ee73862009-12-11 19:24:15 +10001048
Maarten Maathuisb1d37aa2010-01-20 19:54:34 +01001049 while ((status = nv_rd32(dev, NV03_PGRAPH_INTR))) {
Marcin Koƛcielnicki304424e2010-03-01 00:18:39 +00001050 /* NOTIFY: You've set a NOTIFY an a command and it's done. */
Maarten Maathuisb1d37aa2010-01-20 19:54:34 +01001051 if (status & 0x00000001) {
Marcin Koƛcielnicki304424e2010-03-01 00:18:39 +00001052 nouveau_graph_trap_info(dev, &trap);
1053 if (nouveau_ratelimit())
1054 nouveau_graph_dump_trap_info(dev,
1055 "PGRAPH_NOTIFY", &trap);
Maarten Maathuisb1d37aa2010-01-20 19:54:34 +01001056 status &= ~0x00000001;
1057 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000001);
1058 }
Ben Skeggs6ee73862009-12-11 19:24:15 +10001059
Marcin Koƛcielnicki304424e2010-03-01 00:18:39 +00001060 /* COMPUTE_QUERY: Purpose and exact cause unknown, happens
1061 * when you write 0x200 to 0x50c0 method 0x31c. */
1062 if (status & 0x00000002) {
1063 nouveau_graph_trap_info(dev, &trap);
1064 if (nouveau_ratelimit())
1065 nouveau_graph_dump_trap_info(dev,
1066 "PGRAPH_COMPUTE_QUERY", &trap);
1067 status &= ~0x00000002;
1068 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000002);
1069 }
Ben Skeggs6ee73862009-12-11 19:24:15 +10001070
Marcin Koƛcielnicki304424e2010-03-01 00:18:39 +00001071 /* Unknown, never seen: 0x4 */
1072
1073 /* ILLEGAL_MTHD: You used a wrong method for this class. */
1074 if (status & 0x00000010) {
1075 nouveau_graph_trap_info(dev, &trap);
1076 if (nouveau_pgraph_intr_swmthd(dev, &trap))
1077 unhandled = 1;
1078 if (unhandled && nouveau_ratelimit())
1079 nouveau_graph_dump_trap_info(dev,
1080 "PGRAPH_ILLEGAL_MTHD", &trap);
Maarten Maathuisb1d37aa2010-01-20 19:54:34 +01001081 status &= ~0x00000010;
1082 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000010);
1083 }
Ben Skeggs6ee73862009-12-11 19:24:15 +10001084
Marcin Koƛcielnicki304424e2010-03-01 00:18:39 +00001085 /* ILLEGAL_CLASS: You used a wrong class. */
1086 if (status & 0x00000020) {
1087 nouveau_graph_trap_info(dev, &trap);
1088 if (nouveau_ratelimit())
1089 nouveau_graph_dump_trap_info(dev,
1090 "PGRAPH_ILLEGAL_CLASS", &trap);
1091 status &= ~0x00000020;
1092 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000020);
1093 }
1094
1095 /* DOUBLE_NOTIFY: You tried to set a NOTIFY on another NOTIFY. */
1096 if (status & 0x00000040) {
1097 nouveau_graph_trap_info(dev, &trap);
1098 if (nouveau_ratelimit())
1099 nouveau_graph_dump_trap_info(dev,
1100 "PGRAPH_DOUBLE_NOTIFY", &trap);
1101 status &= ~0x00000040;
1102 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000040);
1103 }
1104
1105 /* CONTEXT_SWITCH: PGRAPH needs us to load a new context */
Maarten Maathuisb1d37aa2010-01-20 19:54:34 +01001106 if (status & 0x00001000) {
1107 nv_wr32(dev, 0x400500, 0x00000000);
1108 nv_wr32(dev, NV03_PGRAPH_INTR,
1109 NV_PGRAPH_INTR_CONTEXT_SWITCH);
1110 nv_wr32(dev, NV40_PGRAPH_INTR_EN, nv_rd32(dev,
1111 NV40_PGRAPH_INTR_EN) &
1112 ~NV_PGRAPH_INTR_CONTEXT_SWITCH);
1113 nv_wr32(dev, 0x400500, 0x00010001);
Ben Skeggs6ee73862009-12-11 19:24:15 +10001114
Maarten Maathuisb1d37aa2010-01-20 19:54:34 +01001115 nv50_graph_context_switch(dev);
Ben Skeggs6ee73862009-12-11 19:24:15 +10001116
Maarten Maathuisb1d37aa2010-01-20 19:54:34 +01001117 status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
1118 }
Ben Skeggs6ee73862009-12-11 19:24:15 +10001119
Marcin Koƛcielnicki304424e2010-03-01 00:18:39 +00001120 /* BUFFER_NOTIFY: Your m2mf transfer finished */
1121 if (status & 0x00010000) {
1122 nouveau_graph_trap_info(dev, &trap);
1123 if (nouveau_ratelimit())
1124 nouveau_graph_dump_trap_info(dev,
1125 "PGRAPH_BUFFER_NOTIFY", &trap);
1126 status &= ~0x00010000;
1127 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00010000);
1128 }
Ben Skeggs6ee73862009-12-11 19:24:15 +10001129
Marcin Koƛcielnicki304424e2010-03-01 00:18:39 +00001130 /* DATA_ERROR: Invalid value for this method, or invalid
1131 * state in current PGRAPH context for this operation */
1132 if (status & 0x00100000) {
1133 nouveau_graph_trap_info(dev, &trap);
1134 if (nouveau_ratelimit()) {
1135 nouveau_graph_dump_trap_info(dev,
1136 "PGRAPH_DATA_ERROR", &trap);
1137 NV_INFO (dev, "PGRAPH_DATA_ERROR - ");
1138 nouveau_print_enum_names(nv_rd32(dev, 0x400110),
1139 nv50_data_error_names);
1140 printk("\n");
1141 }
Maarten Maathuisb1d37aa2010-01-20 19:54:34 +01001142 status &= ~0x00100000;
1143 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00100000);
1144 }
Ben Skeggs6ee73862009-12-11 19:24:15 +10001145
Marcin Koƛcielnicki304424e2010-03-01 00:18:39 +00001146 /* TRAP: Something bad happened in the middle of command
1147 * execution. Has a billion types, subtypes, and even
1148 * subsubtypes. */
Maarten Maathuisb1d37aa2010-01-20 19:54:34 +01001149 if (status & 0x00200000) {
Marcin Koƛcielnicki304424e2010-03-01 00:18:39 +00001150 nv50_pgraph_trap_handler(dev);
Maarten Maathuisb1d37aa2010-01-20 19:54:34 +01001151 status &= ~0x00200000;
Maarten Maathuisb1d37aa2010-01-20 19:54:34 +01001152 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00200000);
1153 }
Ben Skeggs6ee73862009-12-11 19:24:15 +10001154
Marcin Koƛcielnicki304424e2010-03-01 00:18:39 +00001155 /* Unknown, never seen: 0x00400000 */
1156
1157 /* SINGLE_STEP: Happens on every method if you turned on
1158 * single stepping in 40008c */
1159 if (status & 0x01000000) {
1160 nouveau_graph_trap_info(dev, &trap);
1161 if (nouveau_ratelimit())
1162 nouveau_graph_dump_trap_info(dev,
1163 "PGRAPH_SINGLE_STEP", &trap);
1164 status &= ~0x01000000;
1165 nv_wr32(dev, NV03_PGRAPH_INTR, 0x01000000);
1166 }
1167
1168 /* 0x02000000 happens when you pause a ctxprog...
1169 * but the only way this can happen that I know is by
1170 * poking the relevant MMIO register, and we don't
1171 * do that. */
1172
Maarten Maathuisb1d37aa2010-01-20 19:54:34 +01001173 if (status) {
1174 NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n",
1175 status);
1176 nv_wr32(dev, NV03_PGRAPH_INTR, status);
1177 }
Ben Skeggs6ee73862009-12-11 19:24:15 +10001178
Maarten Maathuisb1d37aa2010-01-20 19:54:34 +01001179 {
1180 const int isb = (1 << 16) | (1 << 0);
Ben Skeggs6ee73862009-12-11 19:24:15 +10001181
Maarten Maathuisb1d37aa2010-01-20 19:54:34 +01001182 if ((nv_rd32(dev, 0x400500) & isb) != isb)
1183 nv_wr32(dev, 0x400500,
1184 nv_rd32(dev, 0x400500) | isb);
1185 }
Ben Skeggs6ee73862009-12-11 19:24:15 +10001186 }
1187
1188 nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING);
Marcin Koƛcielnicki304424e2010-03-01 00:18:39 +00001189 if (nv_rd32(dev, 0x400824) & (1 << 31))
1190 nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31));
Ben Skeggs6ee73862009-12-11 19:24:15 +10001191}
1192
1193static void
1194nouveau_crtc_irq_handler(struct drm_device *dev, int crtc)
1195{
Francisco Jerez042206c2010-10-21 18:19:29 +02001196 if (crtc & 1) {
Ben Skeggs6ee73862009-12-11 19:24:15 +10001197 nv_wr32(dev, NV_CRTC0_INTSTAT, NV_CRTC_INTR_VBLANK);
Francisco Jerez042206c2010-10-21 18:19:29 +02001198 drm_handle_vblank(dev, 0);
1199 }
Ben Skeggs6ee73862009-12-11 19:24:15 +10001200
Francisco Jerez042206c2010-10-21 18:19:29 +02001201 if (crtc & 2) {
Ben Skeggs6ee73862009-12-11 19:24:15 +10001202 nv_wr32(dev, NV_CRTC1_INTSTAT, NV_CRTC_INTR_VBLANK);
Francisco Jerez042206c2010-10-21 18:19:29 +02001203 drm_handle_vblank(dev, 1);
1204 }
Ben Skeggs6ee73862009-12-11 19:24:15 +10001205}
1206
1207irqreturn_t
1208nouveau_irq_handler(DRM_IRQ_ARGS)
1209{
1210 struct drm_device *dev = (struct drm_device *)arg;
1211 struct drm_nouveau_private *dev_priv = dev->dev_private;
Maarten Maathuisff9e5272010-02-01 20:58:27 +01001212 unsigned long flags;
Ben Skeggs8f8a5442010-11-03 09:57:28 +10001213 u32 status;
1214 int i;
Ben Skeggs6ee73862009-12-11 19:24:15 +10001215
1216 status = nv_rd32(dev, NV03_PMC_INTR_0);
1217 if (!status)
1218 return IRQ_NONE;
1219
Maarten Maathuisff9e5272010-02-01 20:58:27 +01001220 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
1221
Ben Skeggs6ee73862009-12-11 19:24:15 +10001222 if (status & NV_PMC_INTR_0_PFIFO_PENDING) {
1223 nouveau_fifo_irq_handler(dev);
1224 status &= ~NV_PMC_INTR_0_PFIFO_PENDING;
1225 }
1226
1227 if (status & NV_PMC_INTR_0_PGRAPH_PENDING) {
1228 if (dev_priv->card_type >= NV_50)
1229 nv50_pgraph_irq_handler(dev);
1230 else
1231 nouveau_pgraph_irq_handler(dev);
1232
1233 status &= ~NV_PMC_INTR_0_PGRAPH_PENDING;
1234 }
1235
1236 if (status & NV_PMC_INTR_0_CRTCn_PENDING) {
1237 nouveau_crtc_irq_handler(dev, (status>>24)&3);
1238 status &= ~NV_PMC_INTR_0_CRTCn_PENDING;
1239 }
1240
Ben Skeggs2cbd4c82010-11-03 10:18:04 +10001241 if (status & NV_PMC_INTR_0_NV50_DISPLAY_PENDING) {
Ben Skeggs6ee73862009-12-11 19:24:15 +10001242 nv50_display_irq_handler(dev);
Ben Skeggs2cbd4c82010-11-03 10:18:04 +10001243 status &= ~NV_PMC_INTR_0_NV50_DISPLAY_PENDING;
Ben Skeggs6ee73862009-12-11 19:24:15 +10001244 }
1245
Ben Skeggs8f8a5442010-11-03 09:57:28 +10001246 for (i = 0; i < 32 && status; i++) {
1247 if (!(status & (1 << i)) || !dev_priv->irq_handler[i])
1248 continue;
1249
1250 dev_priv->irq_handler[i](dev);
1251 status &= ~(1 << i);
1252 }
1253
Ben Skeggs6ee73862009-12-11 19:24:15 +10001254 if (status)
1255 NV_ERROR(dev, "Unhandled PMC INTR status bits 0x%08x\n", status);
1256
Maarten Maathuisff9e5272010-02-01 20:58:27 +01001257 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
1258
Ben Skeggs35fa2f22010-10-21 14:07:03 +10001259 if (dev_priv->msi_enabled)
1260 nv_wr08(dev, 0x00088068, 0xff);
1261
Ben Skeggs6ee73862009-12-11 19:24:15 +10001262 return IRQ_HANDLED;
1263}
Ben Skeggs35fa2f22010-10-21 14:07:03 +10001264
1265int
1266nouveau_irq_init(struct drm_device *dev)
1267{
1268 struct drm_nouveau_private *dev_priv = dev->dev_private;
1269 int ret;
1270
1271 if (nouveau_msi != 0 && dev_priv->card_type >= NV_50) {
1272 ret = pci_enable_msi(dev->pdev);
1273 if (ret == 0) {
1274 NV_INFO(dev, "enabled MSI\n");
1275 dev_priv->msi_enabled = true;
1276 }
1277 }
1278
1279 return drm_irq_install(dev);
1280}
1281
1282void
1283nouveau_irq_fini(struct drm_device *dev)
1284{
1285 struct drm_nouveau_private *dev_priv = dev->dev_private;
1286
1287 drm_irq_uninstall(dev);
1288 if (dev_priv->msi_enabled)
1289 pci_disable_msi(dev->pdev);
1290}
Ben Skeggs8f8a5442010-11-03 09:57:28 +10001291
1292void
1293nouveau_irq_register(struct drm_device *dev, int status_bit,
1294 void (*handler)(struct drm_device *))
1295{
1296 struct drm_nouveau_private *dev_priv = dev->dev_private;
1297 unsigned long flags;
1298
1299 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
1300 dev_priv->irq_handler[status_bit] = handler;
1301 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
1302}
1303
1304void
1305nouveau_irq_unregister(struct drm_device *dev, int status_bit)
1306{
1307 struct drm_nouveau_private *dev_priv = dev->dev_private;
1308 unsigned long flags;
1309
1310 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
1311 dev_priv->irq_handler[status_bit] = NULL;
1312 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
1313}