blob: f09151d17297e20b3b111f5cc63f4ccdcd7cf9cb [file] [log] [blame]
Ben Skeggs6ee73862009-12-11 19:24:15 +10001/*
2 * Copyright (C) 2006 Ben Skeggs.
3 *
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sublicense, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial
16 * portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 */
27
28/*
29 * Authors:
30 * Ben Skeggs <darktama@iinet.net.au>
31 */
32
33#include "drmP.h"
34#include "drm.h"
35#include "nouveau_drm.h"
36#include "nouveau_drv.h"
37#include "nouveau_reg.h"
Ben Skeggsa8eaebc2010-09-01 15:24:31 +100038#include "nouveau_ramht.h"
Ben Skeggs6ee73862009-12-11 19:24:15 +100039#include <linux/ratelimit.h>
40
41/* needed for hotplug irq */
42#include "nouveau_connector.h"
43#include "nv50_display.h"
44
Jiri Slabyda3bd822010-10-05 15:07:33 +020045static DEFINE_RATELIMIT_STATE(nouveau_ratelimit_state, 3 * HZ, 20);
46
47static int nouveau_ratelimit(void)
48{
49 return __ratelimit(&nouveau_ratelimit_state);
50}
51
Ben Skeggs6ee73862009-12-11 19:24:15 +100052void
53nouveau_irq_preinstall(struct drm_device *dev)
54{
55 struct drm_nouveau_private *dev_priv = dev->dev_private;
56
57 /* Master disable */
58 nv_wr32(dev, NV03_PMC_INTR_EN_0, 0);
59
Ben Skeggs4b223ee2010-08-03 10:00:56 +100060 if (dev_priv->card_type >= NV_50) {
Ben Skeggs6ee73862009-12-11 19:24:15 +100061 INIT_WORK(&dev_priv->irq_work, nv50_display_irq_handler_bh);
Ben Skeggsa5acac62010-03-30 15:14:41 +100062 INIT_WORK(&dev_priv->hpd_work, nv50_display_irq_hotplug_bh);
Andy Lutomirskiab838332010-11-16 18:40:52 -050063 spin_lock_init(&dev_priv->hpd_state.lock);
Ben Skeggs6ee73862009-12-11 19:24:15 +100064 INIT_LIST_HEAD(&dev_priv->vbl_waiting);
65 }
66}
67
68int
69nouveau_irq_postinstall(struct drm_device *dev)
70{
71 /* Master enable */
72 nv_wr32(dev, NV03_PMC_INTR_EN_0, NV_PMC_INTR_EN_0_MASTER_ENABLE);
73 return 0;
74}
75
76void
77nouveau_irq_uninstall(struct drm_device *dev)
78{
79 /* Master disable */
80 nv_wr32(dev, NV03_PMC_INTR_EN_0, 0);
81}
82
Ben Skeggs6ee73862009-12-11 19:24:15 +100083static bool
Ben Skeggs7c74cbd2010-09-23 11:03:01 +100084nouveau_fifo_swmthd(struct drm_device *dev, u32 chid, u32 addr, u32 data)
Ben Skeggs6ee73862009-12-11 19:24:15 +100085{
Ben Skeggs7c74cbd2010-09-23 11:03:01 +100086 struct drm_nouveau_private *dev_priv = dev->dev_private;
87 struct nouveau_channel *chan = NULL;
88 struct nouveau_gpuobj *obj;
Ben Skeggscff5c132010-10-06 16:16:59 +100089 unsigned long flags;
Ben Skeggs6ee73862009-12-11 19:24:15 +100090 const int subc = (addr >> 13) & 0x7;
91 const int mthd = addr & 0x1ffc;
Ben Skeggs7c74cbd2010-09-23 11:03:01 +100092 bool handled = false;
93 u32 engine;
Ben Skeggs6ee73862009-12-11 19:24:15 +100094
Ben Skeggscff5c132010-10-06 16:16:59 +100095 spin_lock_irqsave(&dev_priv->channels.lock, flags);
Ben Skeggs7c74cbd2010-09-23 11:03:01 +100096 if (likely(chid >= 0 && chid < dev_priv->engine.fifo.channels))
Ben Skeggscff5c132010-10-06 16:16:59 +100097 chan = dev_priv->channels.ptr[chid];
Ben Skeggs7c74cbd2010-09-23 11:03:01 +100098 if (unlikely(!chan))
Ben Skeggscff5c132010-10-06 16:16:59 +100099 goto out;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000100
Ben Skeggs7c74cbd2010-09-23 11:03:01 +1000101 switch (mthd) {
102 case 0x0000: /* bind object to subchannel */
103 obj = nouveau_ramht_find(chan, data);
104 if (unlikely(!obj || obj->engine != NVOBJ_ENGINE_SW))
105 break;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000106
Ben Skeggs7c74cbd2010-09-23 11:03:01 +1000107 chan->sw_subchannel[subc] = obj->class;
108 engine = 0x0000000f << (subc * 4);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000109
Ben Skeggs7c74cbd2010-09-23 11:03:01 +1000110 nv_mask(dev, NV04_PFIFO_CACHE1_ENGINE, engine, 0x00000000);
111 handled = true;
112 break;
113 default:
114 engine = nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE);
115 if (unlikely(((engine >> (subc * 4)) & 0xf) != 0))
116 break;
117
Ben Skeggsb8c157d2010-10-20 10:39:35 +1000118 if (!nouveau_gpuobj_mthd_call(chan, chan->sw_subchannel[subc],
119 mthd, data))
Ben Skeggs7c74cbd2010-09-23 11:03:01 +1000120 handled = true;
121 break;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000122 }
123
Ben Skeggscff5c132010-10-06 16:16:59 +1000124out:
125 spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
Ben Skeggs7c74cbd2010-09-23 11:03:01 +1000126 return handled;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000127}
128
129static void
130nouveau_fifo_irq_handler(struct drm_device *dev)
131{
132 struct drm_nouveau_private *dev_priv = dev->dev_private;
133 struct nouveau_engine *engine = &dev_priv->engine;
134 uint32_t status, reassign;
135 int cnt = 0;
136
137 reassign = nv_rd32(dev, NV03_PFIFO_CACHES) & 1;
138 while ((status = nv_rd32(dev, NV03_PFIFO_INTR_0)) && (cnt++ < 100)) {
Ben Skeggs6ee73862009-12-11 19:24:15 +1000139 uint32_t chid, get;
140
141 nv_wr32(dev, NV03_PFIFO_CACHES, 0);
142
143 chid = engine->fifo.channel_id(dev);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000144 get = nv_rd32(dev, NV03_PFIFO_CACHE1_GET);
145
146 if (status & NV_PFIFO_INTR_CACHE_ERROR) {
147 uint32_t mthd, data;
148 int ptr;
149
150 /* NV_PFIFO_CACHE1_GET actually goes to 0xffc before
151 * wrapping on my G80 chips, but CACHE1 isn't big
152 * enough for this much data.. Tests show that it
153 * wraps around to the start at GET=0x800.. No clue
154 * as to why..
155 */
156 ptr = (get & 0x7ff) >> 2;
157
158 if (dev_priv->card_type < NV_40) {
159 mthd = nv_rd32(dev,
160 NV04_PFIFO_CACHE1_METHOD(ptr));
161 data = nv_rd32(dev,
162 NV04_PFIFO_CACHE1_DATA(ptr));
163 } else {
164 mthd = nv_rd32(dev,
165 NV40_PFIFO_CACHE1_METHOD(ptr));
166 data = nv_rd32(dev,
167 NV40_PFIFO_CACHE1_DATA(ptr));
168 }
169
Ben Skeggs7c74cbd2010-09-23 11:03:01 +1000170 if (!nouveau_fifo_swmthd(dev, chid, mthd, data)) {
Ben Skeggs6ee73862009-12-11 19:24:15 +1000171 NV_INFO(dev, "PFIFO_CACHE_ERROR - Ch %d/%d "
172 "Mthd 0x%04x Data 0x%08x\n",
173 chid, (mthd >> 13) & 7, mthd & 0x1ffc,
174 data);
175 }
176
177 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 0);
178 nv_wr32(dev, NV03_PFIFO_INTR_0,
179 NV_PFIFO_INTR_CACHE_ERROR);
180
181 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0,
182 nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) & ~1);
183 nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4);
184 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0,
185 nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) | 1);
186 nv_wr32(dev, NV04_PFIFO_CACHE1_HASH, 0);
187
188 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH,
189 nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUSH) | 1);
190 nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
191
192 status &= ~NV_PFIFO_INTR_CACHE_ERROR;
193 }
194
195 if (status & NV_PFIFO_INTR_DMA_PUSHER) {
Francisco Jerezcbab95db2010-10-11 03:43:58 +0200196 u32 dma_get = nv_rd32(dev, 0x003244);
197 u32 dma_put = nv_rd32(dev, 0x003240);
Ben Skeggse071f8c2010-09-08 15:40:30 +1000198 u32 push = nv_rd32(dev, 0x003220);
199 u32 state = nv_rd32(dev, 0x003228);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000200
Ben Skeggse071f8c2010-09-08 15:40:30 +1000201 if (dev_priv->card_type == NV_50) {
202 u32 ho_get = nv_rd32(dev, 0x003328);
203 u32 ho_put = nv_rd32(dev, 0x003320);
204 u32 ib_get = nv_rd32(dev, 0x003334);
205 u32 ib_put = nv_rd32(dev, 0x003330);
206
Jiri Slabyda3bd822010-10-05 15:07:33 +0200207 if (nouveau_ratelimit())
208 NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%02x%08x "
Ben Skeggse071f8c2010-09-08 15:40:30 +1000209 "Put 0x%02x%08x IbGet 0x%08x IbPut 0x%08x "
210 "State 0x%08x Push 0x%08x\n",
Francisco Jerezcbab95db2010-10-11 03:43:58 +0200211 chid, ho_get, dma_get, ho_put,
212 dma_put, ib_get, ib_put, state,
213 push);
Ben Skeggse071f8c2010-09-08 15:40:30 +1000214
215 /* METHOD_COUNT, in DMA_STATE on earlier chipsets */
216 nv_wr32(dev, 0x003364, 0x00000000);
Francisco Jerezcbab95db2010-10-11 03:43:58 +0200217 if (dma_get != dma_put || ho_get != ho_put) {
218 nv_wr32(dev, 0x003244, dma_put);
Ben Skeggse071f8c2010-09-08 15:40:30 +1000219 nv_wr32(dev, 0x003328, ho_put);
220 } else
221 if (ib_get != ib_put) {
222 nv_wr32(dev, 0x003334, ib_put);
223 }
224 } else {
225 NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%08x "
226 "Put 0x%08x State 0x%08x Push 0x%08x\n",
Francisco Jerezcbab95db2010-10-11 03:43:58 +0200227 chid, dma_get, dma_put, state, push);
Ben Skeggse071f8c2010-09-08 15:40:30 +1000228
Francisco Jerezcbab95db2010-10-11 03:43:58 +0200229 if (dma_get != dma_put)
230 nv_wr32(dev, 0x003244, dma_put);
Ben Skeggse071f8c2010-09-08 15:40:30 +1000231 }
232
233 nv_wr32(dev, 0x003228, 0x00000000);
234 nv_wr32(dev, 0x003220, 0x00000001);
235 nv_wr32(dev, 0x002100, NV_PFIFO_INTR_DMA_PUSHER);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000236 status &= ~NV_PFIFO_INTR_DMA_PUSHER;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000237 }
238
Francisco Jerez139295b2010-01-30 18:28:00 +0100239 if (status & NV_PFIFO_INTR_SEMAPHORE) {
240 uint32_t sem;
241
242 status &= ~NV_PFIFO_INTR_SEMAPHORE;
243 nv_wr32(dev, NV03_PFIFO_INTR_0,
244 NV_PFIFO_INTR_SEMAPHORE);
245
246 sem = nv_rd32(dev, NV10_PFIFO_CACHE1_SEMAPHORE);
247 nv_wr32(dev, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1);
248
249 nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4);
250 nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
251 }
252
Ben Skeggs1da26562010-09-03 15:56:12 +1000253 if (dev_priv->card_type == NV_50) {
254 if (status & 0x00000010) {
255 nv50_fb_vm_trap(dev, 1, "PFIFO_BAR_FAULT");
256 status &= ~0x00000010;
257 nv_wr32(dev, 0x002100, 0x00000010);
258 }
259 }
260
Ben Skeggs6ee73862009-12-11 19:24:15 +1000261 if (status) {
Jiri Slabyda3bd822010-10-05 15:07:33 +0200262 if (nouveau_ratelimit())
263 NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n",
264 status, chid);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000265 nv_wr32(dev, NV03_PFIFO_INTR_0, status);
266 status = 0;
267 }
268
269 nv_wr32(dev, NV03_PFIFO_CACHES, reassign);
270 }
271
272 if (status) {
273 NV_INFO(dev, "PFIFO still angry after %d spins, halt\n", cnt);
274 nv_wr32(dev, 0x2140, 0);
275 nv_wr32(dev, 0x140, 0);
276 }
277
278 nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PFIFO_PENDING);
279}
280
281struct nouveau_bitfield_names {
282 uint32_t mask;
283 const char *name;
284};
285
286static struct nouveau_bitfield_names nstatus_names[] =
287{
288 { NV04_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" },
289 { NV04_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" },
290 { NV04_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" },
291 { NV04_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" }
292};
293
294static struct nouveau_bitfield_names nstatus_names_nv10[] =
295{
296 { NV10_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" },
297 { NV10_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" },
298 { NV10_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" },
299 { NV10_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" }
300};
301
302static struct nouveau_bitfield_names nsource_names[] =
303{
304 { NV03_PGRAPH_NSOURCE_NOTIFICATION, "NOTIFICATION" },
305 { NV03_PGRAPH_NSOURCE_DATA_ERROR, "DATA_ERROR" },
306 { NV03_PGRAPH_NSOURCE_PROTECTION_ERROR, "PROTECTION_ERROR" },
307 { NV03_PGRAPH_NSOURCE_RANGE_EXCEPTION, "RANGE_EXCEPTION" },
308 { NV03_PGRAPH_NSOURCE_LIMIT_COLOR, "LIMIT_COLOR" },
309 { NV03_PGRAPH_NSOURCE_LIMIT_ZETA, "LIMIT_ZETA" },
310 { NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD, "ILLEGAL_MTHD" },
311 { NV03_PGRAPH_NSOURCE_DMA_R_PROTECTION, "DMA_R_PROTECTION" },
312 { NV03_PGRAPH_NSOURCE_DMA_W_PROTECTION, "DMA_W_PROTECTION" },
313 { NV03_PGRAPH_NSOURCE_FORMAT_EXCEPTION, "FORMAT_EXCEPTION" },
314 { NV03_PGRAPH_NSOURCE_PATCH_EXCEPTION, "PATCH_EXCEPTION" },
315 { NV03_PGRAPH_NSOURCE_STATE_INVALID, "STATE_INVALID" },
316 { NV03_PGRAPH_NSOURCE_DOUBLE_NOTIFY, "DOUBLE_NOTIFY" },
317 { NV03_PGRAPH_NSOURCE_NOTIFY_IN_USE, "NOTIFY_IN_USE" },
318 { NV03_PGRAPH_NSOURCE_METHOD_CNT, "METHOD_CNT" },
319 { NV03_PGRAPH_NSOURCE_BFR_NOTIFICATION, "BFR_NOTIFICATION" },
320 { NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION, "DMA_VTX_PROTECTION" },
321 { NV03_PGRAPH_NSOURCE_DMA_WIDTH_A, "DMA_WIDTH_A" },
322 { NV03_PGRAPH_NSOURCE_DMA_WIDTH_B, "DMA_WIDTH_B" },
323};
324
325static void
326nouveau_print_bitfield_names_(uint32_t value,
327 const struct nouveau_bitfield_names *namelist,
328 const int namelist_len)
329{
330 /*
331 * Caller must have already printed the KERN_* log level for us.
332 * Also the caller is responsible for adding the newline.
333 */
334 int i;
335 for (i = 0; i < namelist_len; ++i) {
336 uint32_t mask = namelist[i].mask;
337 if (value & mask) {
338 printk(" %s", namelist[i].name);
339 value &= ~mask;
340 }
341 }
342 if (value)
343 printk(" (unknown bits 0x%08x)", value);
344}
345#define nouveau_print_bitfield_names(val, namelist) \
346 nouveau_print_bitfield_names_((val), (namelist), ARRAY_SIZE(namelist))
347
Marcin Kościelnicki304424e2010-03-01 00:18:39 +0000348struct nouveau_enum_names {
349 uint32_t value;
350 const char *name;
351};
352
353static void
354nouveau_print_enum_names_(uint32_t value,
355 const struct nouveau_enum_names *namelist,
356 const int namelist_len)
357{
358 /*
359 * Caller must have already printed the KERN_* log level for us.
360 * Also the caller is responsible for adding the newline.
361 */
362 int i;
363 for (i = 0; i < namelist_len; ++i) {
364 if (value == namelist[i].value) {
365 printk("%s", namelist[i].name);
366 return;
367 }
368 }
369 printk("unknown value 0x%08x", value);
370}
371#define nouveau_print_enum_names(val, namelist) \
372 nouveau_print_enum_names_((val), (namelist), ARRAY_SIZE(namelist))
Ben Skeggs6ee73862009-12-11 19:24:15 +1000373
374static int
375nouveau_graph_chid_from_grctx(struct drm_device *dev)
376{
377 struct drm_nouveau_private *dev_priv = dev->dev_private;
Ben Skeggscff5c132010-10-06 16:16:59 +1000378 struct nouveau_channel *chan;
379 unsigned long flags;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000380 uint32_t inst;
381 int i;
382
383 if (dev_priv->card_type < NV_40)
384 return dev_priv->engine.fifo.channels;
385 else
386 if (dev_priv->card_type < NV_50) {
387 inst = (nv_rd32(dev, 0x40032c) & 0xfffff) << 4;
388
Ben Skeggscff5c132010-10-06 16:16:59 +1000389 spin_lock_irqsave(&dev_priv->channels.lock, flags);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000390 for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
Ben Skeggscff5c132010-10-06 16:16:59 +1000391 chan = dev_priv->channels.ptr[i];
Ben Skeggs6ee73862009-12-11 19:24:15 +1000392 if (!chan || !chan->ramin_grctx)
393 continue;
394
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000395 if (inst == chan->ramin_grctx->pinst)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000396 break;
397 }
Ben Skeggscff5c132010-10-06 16:16:59 +1000398 spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000399 } else {
400 inst = (nv_rd32(dev, 0x40032c) & 0xfffff) << 12;
401
Ben Skeggscff5c132010-10-06 16:16:59 +1000402 spin_lock_irqsave(&dev_priv->channels.lock, flags);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000403 for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
Ben Skeggscff5c132010-10-06 16:16:59 +1000404 chan = dev_priv->channels.ptr[i];
Ben Skeggs6ee73862009-12-11 19:24:15 +1000405 if (!chan || !chan->ramin)
406 continue;
407
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000408 if (inst == chan->ramin->vinst)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000409 break;
410 }
Ben Skeggscff5c132010-10-06 16:16:59 +1000411 spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000412 }
413
414
415 return i;
416}
417
418static int
419nouveau_graph_trapped_channel(struct drm_device *dev, int *channel_ret)
420{
421 struct drm_nouveau_private *dev_priv = dev->dev_private;
422 struct nouveau_engine *engine = &dev_priv->engine;
423 int channel;
424
425 if (dev_priv->card_type < NV_10)
426 channel = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 24) & 0xf;
427 else
428 if (dev_priv->card_type < NV_40)
429 channel = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f;
430 else
431 channel = nouveau_graph_chid_from_grctx(dev);
432
Ben Skeggscff5c132010-10-06 16:16:59 +1000433 if (channel >= engine->fifo.channels ||
434 !dev_priv->channels.ptr[channel]) {
Ben Skeggs6ee73862009-12-11 19:24:15 +1000435 NV_ERROR(dev, "AIII, invalid/inactive channel id %d\n", channel);
436 return -EINVAL;
437 }
438
439 *channel_ret = channel;
440 return 0;
441}
442
443struct nouveau_pgraph_trap {
444 int channel;
445 int class;
446 int subc, mthd, size;
447 uint32_t data, data2;
448 uint32_t nsource, nstatus;
449};
450
451static void
452nouveau_graph_trap_info(struct drm_device *dev,
453 struct nouveau_pgraph_trap *trap)
454{
455 struct drm_nouveau_private *dev_priv = dev->dev_private;
456 uint32_t address;
457
458 trap->nsource = trap->nstatus = 0;
459 if (dev_priv->card_type < NV_50) {
460 trap->nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
461 trap->nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS);
462 }
463
464 if (nouveau_graph_trapped_channel(dev, &trap->channel))
465 trap->channel = -1;
466 address = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
467
468 trap->mthd = address & 0x1FFC;
469 trap->data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
470 if (dev_priv->card_type < NV_10) {
471 trap->subc = (address >> 13) & 0x7;
472 } else {
473 trap->subc = (address >> 16) & 0x7;
474 trap->data2 = nv_rd32(dev, NV10_PGRAPH_TRAPPED_DATA_HIGH);
475 }
476
477 if (dev_priv->card_type < NV_10)
478 trap->class = nv_rd32(dev, 0x400180 + trap->subc*4) & 0xFF;
479 else if (dev_priv->card_type < NV_40)
480 trap->class = nv_rd32(dev, 0x400160 + trap->subc*4) & 0xFFF;
481 else if (dev_priv->card_type < NV_50)
482 trap->class = nv_rd32(dev, 0x400160 + trap->subc*4) & 0xFFFF;
483 else
484 trap->class = nv_rd32(dev, 0x400814);
485}
486
487static void
488nouveau_graph_dump_trap_info(struct drm_device *dev, const char *id,
489 struct nouveau_pgraph_trap *trap)
490{
491 struct drm_nouveau_private *dev_priv = dev->dev_private;
492 uint32_t nsource = trap->nsource, nstatus = trap->nstatus;
493
Marcin Kościelnicki304424e2010-03-01 00:18:39 +0000494 if (dev_priv->card_type < NV_50) {
495 NV_INFO(dev, "%s - nSource:", id);
496 nouveau_print_bitfield_names(nsource, nsource_names);
497 printk(", nStatus:");
498 if (dev_priv->card_type < NV_10)
499 nouveau_print_bitfield_names(nstatus, nstatus_names);
500 else
501 nouveau_print_bitfield_names(nstatus, nstatus_names_nv10);
502 printk("\n");
503 }
Ben Skeggs6ee73862009-12-11 19:24:15 +1000504
505 NV_INFO(dev, "%s - Ch %d/%d Class 0x%04x Mthd 0x%04x "
506 "Data 0x%08x:0x%08x\n",
507 id, trap->channel, trap->subc,
508 trap->class, trap->mthd,
509 trap->data2, trap->data);
510}
511
512static int
513nouveau_pgraph_intr_swmthd(struct drm_device *dev,
514 struct nouveau_pgraph_trap *trap)
515{
516 struct drm_nouveau_private *dev_priv = dev->dev_private;
Ben Skeggsb8c157d2010-10-20 10:39:35 +1000517 struct nouveau_channel *chan;
Ben Skeggscff5c132010-10-06 16:16:59 +1000518 unsigned long flags;
519 int ret = -EINVAL;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000520
Ben Skeggscff5c132010-10-06 16:16:59 +1000521 spin_lock_irqsave(&dev_priv->channels.lock, flags);
522 if (trap->channel > 0 &&
523 trap->channel < dev_priv->engine.fifo.channels &&
524 dev_priv->channels.ptr[trap->channel]) {
Ben Skeggsb8c157d2010-10-20 10:39:35 +1000525 chan = dev_priv->channels.ptr[trap->channel];
526 ret = nouveau_gpuobj_mthd_call(chan, trap->class, trap->mthd, trap->data);
Ben Skeggscff5c132010-10-06 16:16:59 +1000527 }
528 spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000529
Ben Skeggscff5c132010-10-06 16:16:59 +1000530 return ret;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000531}
532
533static inline void
534nouveau_pgraph_intr_notify(struct drm_device *dev, uint32_t nsource)
535{
536 struct nouveau_pgraph_trap trap;
537 int unhandled = 0;
538
539 nouveau_graph_trap_info(dev, &trap);
540
541 if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
542 if (nouveau_pgraph_intr_swmthd(dev, &trap))
543 unhandled = 1;
544 } else {
545 unhandled = 1;
546 }
547
548 if (unhandled)
549 nouveau_graph_dump_trap_info(dev, "PGRAPH_NOTIFY", &trap);
550}
551
Ben Skeggs6ee73862009-12-11 19:24:15 +1000552
553static inline void
554nouveau_pgraph_intr_error(struct drm_device *dev, uint32_t nsource)
555{
556 struct nouveau_pgraph_trap trap;
557 int unhandled = 0;
558
559 nouveau_graph_trap_info(dev, &trap);
560 trap.nsource = nsource;
561
562 if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
563 if (nouveau_pgraph_intr_swmthd(dev, &trap))
564 unhandled = 1;
Luca Barbierid051bbb2010-01-16 15:27:51 +0100565 } else if (nsource & NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION) {
566 uint32_t v = nv_rd32(dev, 0x402000);
567 nv_wr32(dev, 0x402000, v);
568
569 /* dump the error anyway for now: it's useful for
570 Gallium development */
571 unhandled = 1;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000572 } else {
573 unhandled = 1;
574 }
575
576 if (unhandled && nouveau_ratelimit())
577 nouveau_graph_dump_trap_info(dev, "PGRAPH_ERROR", &trap);
578}
579
580static inline void
581nouveau_pgraph_intr_context_switch(struct drm_device *dev)
582{
583 struct drm_nouveau_private *dev_priv = dev->dev_private;
584 struct nouveau_engine *engine = &dev_priv->engine;
585 uint32_t chid;
586
587 chid = engine->fifo.channel_id(dev);
588 NV_DEBUG(dev, "PGRAPH context switch interrupt channel %x\n", chid);
589
590 switch (dev_priv->card_type) {
591 case NV_04:
592 nv04_graph_context_switch(dev);
593 break;
594 case NV_10:
595 nv10_graph_context_switch(dev);
596 break;
597 default:
598 NV_ERROR(dev, "Context switch not implemented\n");
599 break;
600 }
601}
602
603static void
604nouveau_pgraph_irq_handler(struct drm_device *dev)
605{
606 uint32_t status;
607
608 while ((status = nv_rd32(dev, NV03_PGRAPH_INTR))) {
609 uint32_t nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
610
611 if (status & NV_PGRAPH_INTR_NOTIFY) {
612 nouveau_pgraph_intr_notify(dev, nsource);
613
614 status &= ~NV_PGRAPH_INTR_NOTIFY;
615 nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_NOTIFY);
616 }
617
618 if (status & NV_PGRAPH_INTR_ERROR) {
619 nouveau_pgraph_intr_error(dev, nsource);
620
621 status &= ~NV_PGRAPH_INTR_ERROR;
622 nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_ERROR);
623 }
624
625 if (status & NV_PGRAPH_INTR_CONTEXT_SWITCH) {
Ben Skeggs6ee73862009-12-11 19:24:15 +1000626 status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
627 nv_wr32(dev, NV03_PGRAPH_INTR,
628 NV_PGRAPH_INTR_CONTEXT_SWITCH);
Francisco Jerez308dceb2010-08-04 04:41:55 +0200629
630 nouveau_pgraph_intr_context_switch(dev);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000631 }
632
633 if (status) {
634 NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n", status);
635 nv_wr32(dev, NV03_PGRAPH_INTR, status);
636 }
637
638 if ((nv_rd32(dev, NV04_PGRAPH_FIFO) & (1 << 0)) == 0)
639 nv_wr32(dev, NV04_PGRAPH_FIFO, 1);
640 }
641
642 nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING);
643}
644
Marcin Kościelnicki304424e2010-03-01 00:18:39 +0000645static struct nouveau_enum_names nv50_mp_exec_error_names[] =
646{
647 { 3, "STACK_UNDERFLOW" },
648 { 4, "QUADON_ACTIVE" },
649 { 8, "TIMEOUT" },
650 { 0x10, "INVALID_OPCODE" },
651 { 0x40, "BREAKPOINT" },
652};
653
654static void
655nv50_pgraph_mp_trap(struct drm_device *dev, int tpid, int display)
656{
657 struct drm_nouveau_private *dev_priv = dev->dev_private;
658 uint32_t units = nv_rd32(dev, 0x1540);
659 uint32_t addr, mp10, status, pc, oplow, ophigh;
660 int i;
661 int mps = 0;
662 for (i = 0; i < 4; i++) {
663 if (!(units & 1 << (i+24)))
664 continue;
665 if (dev_priv->chipset < 0xa0)
666 addr = 0x408200 + (tpid << 12) + (i << 7);
667 else
668 addr = 0x408100 + (tpid << 11) + (i << 7);
669 mp10 = nv_rd32(dev, addr + 0x10);
670 status = nv_rd32(dev, addr + 0x14);
671 if (!status)
672 continue;
673 if (display) {
674 nv_rd32(dev, addr + 0x20);
675 pc = nv_rd32(dev, addr + 0x24);
676 oplow = nv_rd32(dev, addr + 0x70);
677 ophigh= nv_rd32(dev, addr + 0x74);
678 NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - "
679 "TP %d MP %d: ", tpid, i);
680 nouveau_print_enum_names(status,
681 nv50_mp_exec_error_names);
682 printk(" at %06x warp %d, opcode %08x %08x\n",
683 pc&0xffffff, pc >> 24,
684 oplow, ophigh);
685 }
686 nv_wr32(dev, addr + 0x10, mp10);
687 nv_wr32(dev, addr + 0x14, 0);
688 mps++;
689 }
690 if (!mps && display)
691 NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - TP %d: "
692 "No MPs claiming errors?\n", tpid);
693}
694
695static void
696nv50_pgraph_tp_trap(struct drm_device *dev, int type, uint32_t ustatus_old,
697 uint32_t ustatus_new, int display, const char *name)
698{
699 struct drm_nouveau_private *dev_priv = dev->dev_private;
700 int tps = 0;
701 uint32_t units = nv_rd32(dev, 0x1540);
702 int i, r;
703 uint32_t ustatus_addr, ustatus;
704 for (i = 0; i < 16; i++) {
705 if (!(units & (1 << i)))
706 continue;
707 if (dev_priv->chipset < 0xa0)
708 ustatus_addr = ustatus_old + (i << 12);
709 else
710 ustatus_addr = ustatus_new + (i << 11);
711 ustatus = nv_rd32(dev, ustatus_addr) & 0x7fffffff;
712 if (!ustatus)
713 continue;
714 tps++;
715 switch (type) {
716 case 6: /* texture error... unknown for now */
Ben Skeggsd96773e2010-09-03 15:46:58 +1000717 nv50_fb_vm_trap(dev, display, name);
Marcin Kościelnicki304424e2010-03-01 00:18:39 +0000718 if (display) {
719 NV_ERROR(dev, "magic set %d:\n", i);
720 for (r = ustatus_addr + 4; r <= ustatus_addr + 0x10; r += 4)
721 NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
722 nv_rd32(dev, r));
723 }
724 break;
725 case 7: /* MP error */
726 if (ustatus & 0x00010000) {
727 nv50_pgraph_mp_trap(dev, i, display);
728 ustatus &= ~0x00010000;
729 }
730 break;
731 case 8: /* TPDMA error */
732 {
733 uint32_t e0c = nv_rd32(dev, ustatus_addr + 4);
734 uint32_t e10 = nv_rd32(dev, ustatus_addr + 8);
735 uint32_t e14 = nv_rd32(dev, ustatus_addr + 0xc);
736 uint32_t e18 = nv_rd32(dev, ustatus_addr + 0x10);
737 uint32_t e1c = nv_rd32(dev, ustatus_addr + 0x14);
738 uint32_t e20 = nv_rd32(dev, ustatus_addr + 0x18);
739 uint32_t e24 = nv_rd32(dev, ustatus_addr + 0x1c);
Ben Skeggsd96773e2010-09-03 15:46:58 +1000740 nv50_fb_vm_trap(dev, display, name);
Marcin Kościelnicki304424e2010-03-01 00:18:39 +0000741 /* 2d engine destination */
742 if (ustatus & 0x00000010) {
743 if (display) {
744 NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - Unknown fault at address %02x%08x\n",
745 i, e14, e10);
746 NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
747 i, e0c, e18, e1c, e20, e24);
748 }
749 ustatus &= ~0x00000010;
750 }
751 /* Render target */
752 if (ustatus & 0x00000040) {
753 if (display) {
754 NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - Unknown fault at address %02x%08x\n",
755 i, e14, e10);
756 NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
757 i, e0c, e18, e1c, e20, e24);
758 }
759 ustatus &= ~0x00000040;
760 }
761 /* CUDA memory: l[], g[] or stack. */
762 if (ustatus & 0x00000080) {
763 if (display) {
764 if (e18 & 0x80000000) {
765 /* g[] read fault? */
766 NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global read fault at address %02x%08x\n",
767 i, e14, e10 | ((e18 >> 24) & 0x1f));
768 e18 &= ~0x1f000000;
769 } else if (e18 & 0xc) {
770 /* g[] write fault? */
771 NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global write fault at address %02x%08x\n",
772 i, e14, e10 | ((e18 >> 7) & 0x1f));
773 e18 &= ~0x00000f80;
774 } else {
775 NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Unknown CUDA fault at address %02x%08x\n",
776 i, e14, e10);
777 }
778 NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
779 i, e0c, e18, e1c, e20, e24);
780 }
781 ustatus &= ~0x00000080;
782 }
783 }
784 break;
785 }
786 if (ustatus) {
787 if (display)
788 NV_INFO(dev, "%s - TP%d: Unhandled ustatus 0x%08x\n", name, i, ustatus);
789 }
790 nv_wr32(dev, ustatus_addr, 0xc0000000);
791 }
792
793 if (!tps && display)
794 NV_INFO(dev, "%s - No TPs claiming errors?\n", name);
795}
796
797static void
798nv50_pgraph_trap_handler(struct drm_device *dev)
799{
800 struct nouveau_pgraph_trap trap;
801 uint32_t status = nv_rd32(dev, 0x400108);
802 uint32_t ustatus;
803 int display = nouveau_ratelimit();
804
805
806 if (!status && display) {
807 nouveau_graph_trap_info(dev, &trap);
808 nouveau_graph_dump_trap_info(dev, "PGRAPH_TRAP", &trap);
809 NV_INFO(dev, "PGRAPH_TRAP - no units reporting traps?\n");
810 }
811
812 /* DISPATCH: Relays commands to other units and handles NOTIFY,
813 * COND, QUERY. If you get a trap from it, the command is still stuck
814 * in DISPATCH and you need to do something about it. */
815 if (status & 0x001) {
816 ustatus = nv_rd32(dev, 0x400804) & 0x7fffffff;
817 if (!ustatus && display) {
818 NV_INFO(dev, "PGRAPH_TRAP_DISPATCH - no ustatus?\n");
819 }
820
821 /* Known to be triggered by screwed up NOTIFY and COND... */
822 if (ustatus & 0x00000001) {
Ben Skeggsd96773e2010-09-03 15:46:58 +1000823 nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_DISPATCH_FAULT");
Marcin Kościelnicki304424e2010-03-01 00:18:39 +0000824 nv_wr32(dev, 0x400500, 0);
825 if (nv_rd32(dev, 0x400808) & 0x80000000) {
826 if (display) {
827 if (nouveau_graph_trapped_channel(dev, &trap.channel))
828 trap.channel = -1;
829 trap.class = nv_rd32(dev, 0x400814);
830 trap.mthd = nv_rd32(dev, 0x400808) & 0x1ffc;
831 trap.subc = (nv_rd32(dev, 0x400808) >> 16) & 0x7;
832 trap.data = nv_rd32(dev, 0x40080c);
833 trap.data2 = nv_rd32(dev, 0x400810);
834 nouveau_graph_dump_trap_info(dev,
835 "PGRAPH_TRAP_DISPATCH_FAULT", &trap);
836 NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_FAULT - 400808: %08x\n", nv_rd32(dev, 0x400808));
837 NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_FAULT - 400848: %08x\n", nv_rd32(dev, 0x400848));
838 }
839 nv_wr32(dev, 0x400808, 0);
840 } else if (display) {
841 NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_FAULT - No stuck command?\n");
842 }
843 nv_wr32(dev, 0x4008e8, nv_rd32(dev, 0x4008e8) & 3);
844 nv_wr32(dev, 0x400848, 0);
845 ustatus &= ~0x00000001;
846 }
847 if (ustatus & 0x00000002) {
Ben Skeggsd96773e2010-09-03 15:46:58 +1000848 nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_DISPATCH_QUERY");
Marcin Kościelnicki304424e2010-03-01 00:18:39 +0000849 nv_wr32(dev, 0x400500, 0);
850 if (nv_rd32(dev, 0x40084c) & 0x80000000) {
851 if (display) {
852 if (nouveau_graph_trapped_channel(dev, &trap.channel))
853 trap.channel = -1;
854 trap.class = nv_rd32(dev, 0x400814);
855 trap.mthd = nv_rd32(dev, 0x40084c) & 0x1ffc;
856 trap.subc = (nv_rd32(dev, 0x40084c) >> 16) & 0x7;
857 trap.data = nv_rd32(dev, 0x40085c);
858 trap.data2 = 0;
859 nouveau_graph_dump_trap_info(dev,
860 "PGRAPH_TRAP_DISPATCH_QUERY", &trap);
861 NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_QUERY - 40084c: %08x\n", nv_rd32(dev, 0x40084c));
862 }
863 nv_wr32(dev, 0x40084c, 0);
864 } else if (display) {
865 NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_QUERY - No stuck command?\n");
866 }
867 ustatus &= ~0x00000002;
868 }
869 if (ustatus && display)
870 NV_INFO(dev, "PGRAPH_TRAP_DISPATCH - Unhandled ustatus 0x%08x\n", ustatus);
871 nv_wr32(dev, 0x400804, 0xc0000000);
872 nv_wr32(dev, 0x400108, 0x001);
873 status &= ~0x001;
874 }
875
876 /* TRAPs other than dispatch use the "normal" trap regs. */
877 if (status && display) {
878 nouveau_graph_trap_info(dev, &trap);
879 nouveau_graph_dump_trap_info(dev,
880 "PGRAPH_TRAP", &trap);
881 }
882
883 /* M2MF: Memory to memory copy engine. */
884 if (status & 0x002) {
885 ustatus = nv_rd32(dev, 0x406800) & 0x7fffffff;
886 if (!ustatus && display) {
887 NV_INFO(dev, "PGRAPH_TRAP_M2MF - no ustatus?\n");
888 }
889 if (ustatus & 0x00000001) {
Ben Skeggsd96773e2010-09-03 15:46:58 +1000890 nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_NOTIFY");
Marcin Kościelnicki304424e2010-03-01 00:18:39 +0000891 ustatus &= ~0x00000001;
892 }
893 if (ustatus & 0x00000002) {
Ben Skeggsd96773e2010-09-03 15:46:58 +1000894 nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_IN");
Marcin Kościelnicki304424e2010-03-01 00:18:39 +0000895 ustatus &= ~0x00000002;
896 }
897 if (ustatus & 0x00000004) {
Ben Skeggsd96773e2010-09-03 15:46:58 +1000898 nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_OUT");
Marcin Kościelnicki304424e2010-03-01 00:18:39 +0000899 ustatus &= ~0x00000004;
900 }
901 NV_INFO (dev, "PGRAPH_TRAP_M2MF - %08x %08x %08x %08x\n",
902 nv_rd32(dev, 0x406804),
903 nv_rd32(dev, 0x406808),
904 nv_rd32(dev, 0x40680c),
905 nv_rd32(dev, 0x406810));
906 if (ustatus && display)
907 NV_INFO(dev, "PGRAPH_TRAP_M2MF - Unhandled ustatus 0x%08x\n", ustatus);
908 /* No sane way found yet -- just reset the bugger. */
909 nv_wr32(dev, 0x400040, 2);
910 nv_wr32(dev, 0x400040, 0);
911 nv_wr32(dev, 0x406800, 0xc0000000);
912 nv_wr32(dev, 0x400108, 0x002);
913 status &= ~0x002;
914 }
915
916 /* VFETCH: Fetches data from vertex buffers. */
917 if (status & 0x004) {
918 ustatus = nv_rd32(dev, 0x400c04) & 0x7fffffff;
919 if (!ustatus && display) {
920 NV_INFO(dev, "PGRAPH_TRAP_VFETCH - no ustatus?\n");
921 }
922 if (ustatus & 0x00000001) {
Ben Skeggsd96773e2010-09-03 15:46:58 +1000923 nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_VFETCH_FAULT");
Marcin Kościelnicki304424e2010-03-01 00:18:39 +0000924 NV_INFO (dev, "PGRAPH_TRAP_VFETCH_FAULT - %08x %08x %08x %08x\n",
925 nv_rd32(dev, 0x400c00),
926 nv_rd32(dev, 0x400c08),
927 nv_rd32(dev, 0x400c0c),
928 nv_rd32(dev, 0x400c10));
929 ustatus &= ~0x00000001;
930 }
931 if (ustatus && display)
932 NV_INFO(dev, "PGRAPH_TRAP_VFETCH - Unhandled ustatus 0x%08x\n", ustatus);
933 nv_wr32(dev, 0x400c04, 0xc0000000);
934 nv_wr32(dev, 0x400108, 0x004);
935 status &= ~0x004;
936 }
937
938 /* STRMOUT: DirectX streamout / OpenGL transform feedback. */
939 if (status & 0x008) {
940 ustatus = nv_rd32(dev, 0x401800) & 0x7fffffff;
941 if (!ustatus && display) {
942 NV_INFO(dev, "PGRAPH_TRAP_STRMOUT - no ustatus?\n");
943 }
944 if (ustatus & 0x00000001) {
Ben Skeggsd96773e2010-09-03 15:46:58 +1000945 nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_STRMOUT_FAULT");
Marcin Kościelnicki304424e2010-03-01 00:18:39 +0000946 NV_INFO (dev, "PGRAPH_TRAP_STRMOUT_FAULT - %08x %08x %08x %08x\n",
947 nv_rd32(dev, 0x401804),
948 nv_rd32(dev, 0x401808),
949 nv_rd32(dev, 0x40180c),
950 nv_rd32(dev, 0x401810));
951 ustatus &= ~0x00000001;
952 }
953 if (ustatus && display)
954 NV_INFO(dev, "PGRAPH_TRAP_STRMOUT - Unhandled ustatus 0x%08x\n", ustatus);
955 /* No sane way found yet -- just reset the bugger. */
956 nv_wr32(dev, 0x400040, 0x80);
957 nv_wr32(dev, 0x400040, 0);
958 nv_wr32(dev, 0x401800, 0xc0000000);
959 nv_wr32(dev, 0x400108, 0x008);
960 status &= ~0x008;
961 }
962
963 /* CCACHE: Handles code and c[] caches and fills them. */
964 if (status & 0x010) {
965 ustatus = nv_rd32(dev, 0x405018) & 0x7fffffff;
966 if (!ustatus && display) {
967 NV_INFO(dev, "PGRAPH_TRAP_CCACHE - no ustatus?\n");
968 }
969 if (ustatus & 0x00000001) {
Ben Skeggsd96773e2010-09-03 15:46:58 +1000970 nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_CCACHE_FAULT");
Marcin Kościelnicki304424e2010-03-01 00:18:39 +0000971 NV_INFO (dev, "PGRAPH_TRAP_CCACHE_FAULT - %08x %08x %08x %08x %08x %08x %08x\n",
972 nv_rd32(dev, 0x405800),
973 nv_rd32(dev, 0x405804),
974 nv_rd32(dev, 0x405808),
975 nv_rd32(dev, 0x40580c),
976 nv_rd32(dev, 0x405810),
977 nv_rd32(dev, 0x405814),
978 nv_rd32(dev, 0x40581c));
979 ustatus &= ~0x00000001;
980 }
981 if (ustatus && display)
982 NV_INFO(dev, "PGRAPH_TRAP_CCACHE - Unhandled ustatus 0x%08x\n", ustatus);
983 nv_wr32(dev, 0x405018, 0xc0000000);
984 nv_wr32(dev, 0x400108, 0x010);
985 status &= ~0x010;
986 }
987
988 /* Unknown, not seen yet... 0x402000 is the only trap status reg
989 * remaining, so try to handle it anyway. Perhaps related to that
990 * unknown DMA slot on tesla? */
991 if (status & 0x20) {
Ben Skeggsd96773e2010-09-03 15:46:58 +1000992 nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_UNKC04");
Marcin Kościelnicki304424e2010-03-01 00:18:39 +0000993 ustatus = nv_rd32(dev, 0x402000) & 0x7fffffff;
994 if (display)
995 NV_INFO(dev, "PGRAPH_TRAP_UNKC04 - Unhandled ustatus 0x%08x\n", ustatus);
996 nv_wr32(dev, 0x402000, 0xc0000000);
997 /* no status modifiction on purpose */
998 }
999
1000 /* TEXTURE: CUDA texturing units */
1001 if (status & 0x040) {
1002 nv50_pgraph_tp_trap (dev, 6, 0x408900, 0x408600, display,
1003 "PGRAPH_TRAP_TEXTURE");
1004 nv_wr32(dev, 0x400108, 0x040);
1005 status &= ~0x040;
1006 }
1007
1008 /* MP: CUDA execution engines. */
1009 if (status & 0x080) {
1010 nv50_pgraph_tp_trap (dev, 7, 0x408314, 0x40831c, display,
1011 "PGRAPH_TRAP_MP");
1012 nv_wr32(dev, 0x400108, 0x080);
1013 status &= ~0x080;
1014 }
1015
1016 /* TPDMA: Handles TP-initiated uncached memory accesses:
1017 * l[], g[], stack, 2d surfaces, render targets. */
1018 if (status & 0x100) {
1019 nv50_pgraph_tp_trap (dev, 8, 0x408e08, 0x408708, display,
1020 "PGRAPH_TRAP_TPDMA");
1021 nv_wr32(dev, 0x400108, 0x100);
1022 status &= ~0x100;
1023 }
1024
1025 if (status) {
1026 if (display)
1027 NV_INFO(dev, "PGRAPH_TRAP - Unknown trap 0x%08x\n",
1028 status);
1029 nv_wr32(dev, 0x400108, status);
1030 }
1031}
1032
1033/* There must be a *lot* of these. Will take some time to gather them up. */
1034static struct nouveau_enum_names nv50_data_error_names[] =
1035{
1036 { 4, "INVALID_VALUE" },
1037 { 5, "INVALID_ENUM" },
1038 { 8, "INVALID_OBJECT" },
1039 { 0xc, "INVALID_BITFIELD" },
1040 { 0x28, "MP_NO_REG_SPACE" },
1041 { 0x2b, "MP_BLOCK_SIZE_MISMATCH" },
1042};
1043
1044static void
Ben Skeggs6ee73862009-12-11 19:24:15 +10001045nv50_pgraph_irq_handler(struct drm_device *dev)
1046{
Marcin Kościelnicki304424e2010-03-01 00:18:39 +00001047 struct nouveau_pgraph_trap trap;
1048 int unhandled = 0;
Maarten Maathuisb1d37aa2010-01-20 19:54:34 +01001049 uint32_t status;
Ben Skeggs6ee73862009-12-11 19:24:15 +10001050
Maarten Maathuisb1d37aa2010-01-20 19:54:34 +01001051 while ((status = nv_rd32(dev, NV03_PGRAPH_INTR))) {
Marcin Kościelnicki304424e2010-03-01 00:18:39 +00001052 /* NOTIFY: You've set a NOTIFY an a command and it's done. */
Maarten Maathuisb1d37aa2010-01-20 19:54:34 +01001053 if (status & 0x00000001) {
Marcin Kościelnicki304424e2010-03-01 00:18:39 +00001054 nouveau_graph_trap_info(dev, &trap);
1055 if (nouveau_ratelimit())
1056 nouveau_graph_dump_trap_info(dev,
1057 "PGRAPH_NOTIFY", &trap);
Maarten Maathuisb1d37aa2010-01-20 19:54:34 +01001058 status &= ~0x00000001;
1059 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000001);
1060 }
Ben Skeggs6ee73862009-12-11 19:24:15 +10001061
Marcin Kościelnicki304424e2010-03-01 00:18:39 +00001062 /* COMPUTE_QUERY: Purpose and exact cause unknown, happens
1063 * when you write 0x200 to 0x50c0 method 0x31c. */
1064 if (status & 0x00000002) {
1065 nouveau_graph_trap_info(dev, &trap);
1066 if (nouveau_ratelimit())
1067 nouveau_graph_dump_trap_info(dev,
1068 "PGRAPH_COMPUTE_QUERY", &trap);
1069 status &= ~0x00000002;
1070 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000002);
1071 }
Ben Skeggs6ee73862009-12-11 19:24:15 +10001072
Marcin Kościelnicki304424e2010-03-01 00:18:39 +00001073 /* Unknown, never seen: 0x4 */
1074
1075 /* ILLEGAL_MTHD: You used a wrong method for this class. */
1076 if (status & 0x00000010) {
1077 nouveau_graph_trap_info(dev, &trap);
1078 if (nouveau_pgraph_intr_swmthd(dev, &trap))
1079 unhandled = 1;
1080 if (unhandled && nouveau_ratelimit())
1081 nouveau_graph_dump_trap_info(dev,
1082 "PGRAPH_ILLEGAL_MTHD", &trap);
Maarten Maathuisb1d37aa2010-01-20 19:54:34 +01001083 status &= ~0x00000010;
1084 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000010);
1085 }
Ben Skeggs6ee73862009-12-11 19:24:15 +10001086
Marcin Kościelnicki304424e2010-03-01 00:18:39 +00001087 /* ILLEGAL_CLASS: You used a wrong class. */
1088 if (status & 0x00000020) {
1089 nouveau_graph_trap_info(dev, &trap);
1090 if (nouveau_ratelimit())
1091 nouveau_graph_dump_trap_info(dev,
1092 "PGRAPH_ILLEGAL_CLASS", &trap);
1093 status &= ~0x00000020;
1094 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000020);
1095 }
1096
1097 /* DOUBLE_NOTIFY: You tried to set a NOTIFY on another NOTIFY. */
1098 if (status & 0x00000040) {
1099 nouveau_graph_trap_info(dev, &trap);
1100 if (nouveau_ratelimit())
1101 nouveau_graph_dump_trap_info(dev,
1102 "PGRAPH_DOUBLE_NOTIFY", &trap);
1103 status &= ~0x00000040;
1104 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000040);
1105 }
1106
1107 /* CONTEXT_SWITCH: PGRAPH needs us to load a new context */
Maarten Maathuisb1d37aa2010-01-20 19:54:34 +01001108 if (status & 0x00001000) {
1109 nv_wr32(dev, 0x400500, 0x00000000);
1110 nv_wr32(dev, NV03_PGRAPH_INTR,
1111 NV_PGRAPH_INTR_CONTEXT_SWITCH);
1112 nv_wr32(dev, NV40_PGRAPH_INTR_EN, nv_rd32(dev,
1113 NV40_PGRAPH_INTR_EN) &
1114 ~NV_PGRAPH_INTR_CONTEXT_SWITCH);
1115 nv_wr32(dev, 0x400500, 0x00010001);
Ben Skeggs6ee73862009-12-11 19:24:15 +10001116
Maarten Maathuisb1d37aa2010-01-20 19:54:34 +01001117 nv50_graph_context_switch(dev);
Ben Skeggs6ee73862009-12-11 19:24:15 +10001118
Maarten Maathuisb1d37aa2010-01-20 19:54:34 +01001119 status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
1120 }
Ben Skeggs6ee73862009-12-11 19:24:15 +10001121
Marcin Kościelnicki304424e2010-03-01 00:18:39 +00001122 /* BUFFER_NOTIFY: Your m2mf transfer finished */
1123 if (status & 0x00010000) {
1124 nouveau_graph_trap_info(dev, &trap);
1125 if (nouveau_ratelimit())
1126 nouveau_graph_dump_trap_info(dev,
1127 "PGRAPH_BUFFER_NOTIFY", &trap);
1128 status &= ~0x00010000;
1129 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00010000);
1130 }
Ben Skeggs6ee73862009-12-11 19:24:15 +10001131
Marcin Kościelnicki304424e2010-03-01 00:18:39 +00001132 /* DATA_ERROR: Invalid value for this method, or invalid
1133 * state in current PGRAPH context for this operation */
1134 if (status & 0x00100000) {
1135 nouveau_graph_trap_info(dev, &trap);
1136 if (nouveau_ratelimit()) {
1137 nouveau_graph_dump_trap_info(dev,
1138 "PGRAPH_DATA_ERROR", &trap);
1139 NV_INFO (dev, "PGRAPH_DATA_ERROR - ");
1140 nouveau_print_enum_names(nv_rd32(dev, 0x400110),
1141 nv50_data_error_names);
1142 printk("\n");
1143 }
Maarten Maathuisb1d37aa2010-01-20 19:54:34 +01001144 status &= ~0x00100000;
1145 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00100000);
1146 }
Ben Skeggs6ee73862009-12-11 19:24:15 +10001147
Marcin Kościelnicki304424e2010-03-01 00:18:39 +00001148 /* TRAP: Something bad happened in the middle of command
1149 * execution. Has a billion types, subtypes, and even
1150 * subsubtypes. */
Maarten Maathuisb1d37aa2010-01-20 19:54:34 +01001151 if (status & 0x00200000) {
Marcin Kościelnicki304424e2010-03-01 00:18:39 +00001152 nv50_pgraph_trap_handler(dev);
Maarten Maathuisb1d37aa2010-01-20 19:54:34 +01001153 status &= ~0x00200000;
Maarten Maathuisb1d37aa2010-01-20 19:54:34 +01001154 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00200000);
1155 }
Ben Skeggs6ee73862009-12-11 19:24:15 +10001156
Marcin Kościelnicki304424e2010-03-01 00:18:39 +00001157 /* Unknown, never seen: 0x00400000 */
1158
1159 /* SINGLE_STEP: Happens on every method if you turned on
1160 * single stepping in 40008c */
1161 if (status & 0x01000000) {
1162 nouveau_graph_trap_info(dev, &trap);
1163 if (nouveau_ratelimit())
1164 nouveau_graph_dump_trap_info(dev,
1165 "PGRAPH_SINGLE_STEP", &trap);
1166 status &= ~0x01000000;
1167 nv_wr32(dev, NV03_PGRAPH_INTR, 0x01000000);
1168 }
1169
1170 /* 0x02000000 happens when you pause a ctxprog...
1171 * but the only way this can happen that I know is by
1172 * poking the relevant MMIO register, and we don't
1173 * do that. */
1174
Maarten Maathuisb1d37aa2010-01-20 19:54:34 +01001175 if (status) {
1176 NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n",
1177 status);
1178 nv_wr32(dev, NV03_PGRAPH_INTR, status);
1179 }
Ben Skeggs6ee73862009-12-11 19:24:15 +10001180
Maarten Maathuisb1d37aa2010-01-20 19:54:34 +01001181 {
1182 const int isb = (1 << 16) | (1 << 0);
Ben Skeggs6ee73862009-12-11 19:24:15 +10001183
Maarten Maathuisb1d37aa2010-01-20 19:54:34 +01001184 if ((nv_rd32(dev, 0x400500) & isb) != isb)
1185 nv_wr32(dev, 0x400500,
1186 nv_rd32(dev, 0x400500) | isb);
1187 }
Ben Skeggs6ee73862009-12-11 19:24:15 +10001188 }
1189
1190 nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING);
Marcin Kościelnicki304424e2010-03-01 00:18:39 +00001191 if (nv_rd32(dev, 0x400824) & (1 << 31))
1192 nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31));
Ben Skeggs6ee73862009-12-11 19:24:15 +10001193}
1194
1195static void
1196nouveau_crtc_irq_handler(struct drm_device *dev, int crtc)
1197{
1198 if (crtc & 1)
1199 nv_wr32(dev, NV_CRTC0_INTSTAT, NV_CRTC_INTR_VBLANK);
1200
1201 if (crtc & 2)
1202 nv_wr32(dev, NV_CRTC1_INTSTAT, NV_CRTC_INTR_VBLANK);
1203}
1204
1205irqreturn_t
1206nouveau_irq_handler(DRM_IRQ_ARGS)
1207{
1208 struct drm_device *dev = (struct drm_device *)arg;
1209 struct drm_nouveau_private *dev_priv = dev->dev_private;
Dave Airlie38651672010-03-30 05:34:13 +00001210 uint32_t status;
Maarten Maathuisff9e5272010-02-01 20:58:27 +01001211 unsigned long flags;
Ben Skeggs6ee73862009-12-11 19:24:15 +10001212
1213 status = nv_rd32(dev, NV03_PMC_INTR_0);
1214 if (!status)
1215 return IRQ_NONE;
1216
Maarten Maathuisff9e5272010-02-01 20:58:27 +01001217 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
1218
Ben Skeggs6ee73862009-12-11 19:24:15 +10001219 if (status & NV_PMC_INTR_0_PFIFO_PENDING) {
1220 nouveau_fifo_irq_handler(dev);
1221 status &= ~NV_PMC_INTR_0_PFIFO_PENDING;
1222 }
1223
1224 if (status & NV_PMC_INTR_0_PGRAPH_PENDING) {
1225 if (dev_priv->card_type >= NV_50)
1226 nv50_pgraph_irq_handler(dev);
1227 else
1228 nouveau_pgraph_irq_handler(dev);
1229
1230 status &= ~NV_PMC_INTR_0_PGRAPH_PENDING;
1231 }
1232
1233 if (status & NV_PMC_INTR_0_CRTCn_PENDING) {
1234 nouveau_crtc_irq_handler(dev, (status>>24)&3);
1235 status &= ~NV_PMC_INTR_0_CRTCn_PENDING;
1236 }
1237
1238 if (status & (NV_PMC_INTR_0_NV50_DISPLAY_PENDING |
1239 NV_PMC_INTR_0_NV50_I2C_PENDING)) {
1240 nv50_display_irq_handler(dev);
1241 status &= ~(NV_PMC_INTR_0_NV50_DISPLAY_PENDING |
1242 NV_PMC_INTR_0_NV50_I2C_PENDING);
1243 }
1244
1245 if (status)
1246 NV_ERROR(dev, "Unhandled PMC INTR status bits 0x%08x\n", status);
1247
Maarten Maathuisff9e5272010-02-01 20:58:27 +01001248 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
1249
Ben Skeggs6ee73862009-12-11 19:24:15 +10001250 return IRQ_HANDLED;
1251}