blob: b3900788c66d2103bb947b7926926063c067a3eb [file] [log] [blame]
Ben Skeggs6ee73862009-12-11 19:24:15 +10001/*
2 * Copyright (C) 2007 Ben Skeggs.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include "drmP.h"
28#include "drm.h"
29#include "nouveau_drv.h"
Ben Skeggsa8eaebc2010-09-01 15:24:31 +100030#include "nouveau_ramht.h"
Marcin Kościelnickid5f3c902010-02-25 00:54:02 +000031#include "nouveau_grctx.h"
Francisco Jerez332b2422010-10-20 23:35:40 +020032#include "nouveau_dma.h"
33#include "nv50_evo.h"
Ben Skeggs6ee73862009-12-11 19:24:15 +100034
Ben Skeggs274fec92010-11-03 13:16:18 +100035static int nv50_graph_register(struct drm_device *);
36static void nv50_graph_isr(struct drm_device *);
Ben Skeggsb8c157d2010-10-20 10:39:35 +100037
Ben Skeggs6ee73862009-12-11 19:24:15 +100038static void
39nv50_graph_init_reset(struct drm_device *dev)
40{
41 uint32_t pmc_e = NV_PMC_ENABLE_PGRAPH | (1 << 21);
42
43 NV_DEBUG(dev, "\n");
44
45 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & ~pmc_e);
46 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) | pmc_e);
47}
48
49static void
50nv50_graph_init_intr(struct drm_device *dev)
51{
52 NV_DEBUG(dev, "\n");
53
Ben Skeggs274fec92010-11-03 13:16:18 +100054 nouveau_irq_register(dev, 12, nv50_graph_isr);
Ben Skeggs6ee73862009-12-11 19:24:15 +100055 nv_wr32(dev, NV03_PGRAPH_INTR, 0xffffffff);
56 nv_wr32(dev, 0x400138, 0xffffffff);
57 nv_wr32(dev, NV40_PGRAPH_INTR_EN, 0xffffffff);
58}
59
60static void
61nv50_graph_init_regs__nv(struct drm_device *dev)
62{
Marcin Kościelnicki304424e2010-03-01 00:18:39 +000063 struct drm_nouveau_private *dev_priv = dev->dev_private;
64 uint32_t units = nv_rd32(dev, 0x1540);
65 int i;
66
Ben Skeggs6ee73862009-12-11 19:24:15 +100067 NV_DEBUG(dev, "\n");
68
69 nv_wr32(dev, 0x400804, 0xc0000000);
70 nv_wr32(dev, 0x406800, 0xc0000000);
71 nv_wr32(dev, 0x400c04, 0xc0000000);
Marcin Kościelnicki716abaa2010-01-12 18:21:56 +000072 nv_wr32(dev, 0x401800, 0xc0000000);
Ben Skeggs6ee73862009-12-11 19:24:15 +100073 nv_wr32(dev, 0x405018, 0xc0000000);
74 nv_wr32(dev, 0x402000, 0xc0000000);
75
Marcin Kościelnicki304424e2010-03-01 00:18:39 +000076 for (i = 0; i < 16; i++) {
77 if (units & 1 << i) {
78 if (dev_priv->chipset < 0xa0) {
79 nv_wr32(dev, 0x408900 + (i << 12), 0xc0000000);
80 nv_wr32(dev, 0x408e08 + (i << 12), 0xc0000000);
81 nv_wr32(dev, 0x408314 + (i << 12), 0xc0000000);
82 } else {
83 nv_wr32(dev, 0x408600 + (i << 11), 0xc0000000);
84 nv_wr32(dev, 0x408708 + (i << 11), 0xc0000000);
85 nv_wr32(dev, 0x40831c + (i << 11), 0xc0000000);
86 }
87 }
88 }
89
Ben Skeggs6ee73862009-12-11 19:24:15 +100090 nv_wr32(dev, 0x400108, 0xffffffff);
91
92 nv_wr32(dev, 0x400824, 0x00004000);
93 nv_wr32(dev, 0x400500, 0x00010001);
94}
95
96static void
97nv50_graph_init_regs(struct drm_device *dev)
98{
99 NV_DEBUG(dev, "\n");
100
101 nv_wr32(dev, NV04_PGRAPH_DEBUG_3,
102 (1 << 2) /* HW_CONTEXT_SWITCH_ENABLED */);
103 nv_wr32(dev, 0x402ca8, 0x800);
104}
105
106static int
107nv50_graph_init_ctxctl(struct drm_device *dev)
108{
Ben Skeggs054b93e2009-12-15 22:02:47 +1000109 struct drm_nouveau_private *dev_priv = dev->dev_private;
Ben Skeggsec91db22010-07-08 11:53:19 +1000110 struct nouveau_grctx ctx = {};
111 uint32_t *cp;
112 int i;
Ben Skeggs054b93e2009-12-15 22:02:47 +1000113
Ben Skeggs6ee73862009-12-11 19:24:15 +1000114 NV_DEBUG(dev, "\n");
115
Ben Skeggsec91db22010-07-08 11:53:19 +1000116 cp = kmalloc(512 * 4, GFP_KERNEL);
117 if (!cp) {
118 NV_ERROR(dev, "failed to allocate ctxprog\n");
119 dev_priv->engine.graph.accel_blocked = true;
120 return 0;
Marcin Kościelnickid5f3c902010-02-25 00:54:02 +0000121 }
Marcin Kościelnickid5f3c902010-02-25 00:54:02 +0000122
Ben Skeggsec91db22010-07-08 11:53:19 +1000123 ctx.dev = dev;
124 ctx.mode = NOUVEAU_GRCTX_PROG;
125 ctx.data = cp;
126 ctx.ctxprog_max = 512;
127 if (!nv50_grctx_init(&ctx)) {
128 dev_priv->engine.graph.grctx_size = ctx.ctxvals_pos * 4;
129
130 nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0);
131 for (i = 0; i < ctx.ctxprog_len; i++)
132 nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, cp[i]);
133 } else {
134 dev_priv->engine.graph.accel_blocked = true;
Marcin Kościelnickid5f3c902010-02-25 00:54:02 +0000135 }
Ben Skeggsec91db22010-07-08 11:53:19 +1000136 kfree(cp);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000137
138 nv_wr32(dev, 0x400320, 4);
139 nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0);
140 nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, 0);
141 return 0;
142}
143
144int
145nv50_graph_init(struct drm_device *dev)
146{
147 int ret;
148
149 NV_DEBUG(dev, "\n");
150
151 nv50_graph_init_reset(dev);
152 nv50_graph_init_regs__nv(dev);
153 nv50_graph_init_regs(dev);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000154
155 ret = nv50_graph_init_ctxctl(dev);
156 if (ret)
157 return ret;
158
Ben Skeggsb8c157d2010-10-20 10:39:35 +1000159 ret = nv50_graph_register(dev);
160 if (ret)
161 return ret;
162 nv50_graph_init_intr(dev);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000163 return 0;
164}
165
166void
167nv50_graph_takedown(struct drm_device *dev)
168{
169 NV_DEBUG(dev, "\n");
Ben Skeggs274fec92010-11-03 13:16:18 +1000170 nv_wr32(dev, 0x40013c, 0x00000000);
171 nouveau_irq_unregister(dev, 12);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000172}
173
174void
175nv50_graph_fifo_access(struct drm_device *dev, bool enabled)
176{
177 const uint32_t mask = 0x00010001;
178
179 if (enabled)
180 nv_wr32(dev, 0x400500, nv_rd32(dev, 0x400500) | mask);
181 else
182 nv_wr32(dev, 0x400500, nv_rd32(dev, 0x400500) & ~mask);
183}
184
185struct nouveau_channel *
186nv50_graph_channel(struct drm_device *dev)
187{
188 struct drm_nouveau_private *dev_priv = dev->dev_private;
189 uint32_t inst;
190 int i;
191
Maarten Maathuisa51a3bf2010-02-01 18:32:09 +0100192 /* Be sure we're not in the middle of a context switch or bad things
193 * will happen, such as unloading the wrong pgraph context.
194 */
Francisco Jerez4b5c1522010-09-07 17:34:44 +0200195 if (!nv_wait(dev, 0x400300, 0x00000001, 0x00000000))
Maarten Maathuisa51a3bf2010-02-01 18:32:09 +0100196 NV_ERROR(dev, "Ctxprog is still running\n");
197
Ben Skeggs6ee73862009-12-11 19:24:15 +1000198 inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR);
199 if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED))
200 return NULL;
201 inst = (inst & NV50_PGRAPH_CTXCTL_CUR_INSTANCE) << 12;
202
203 for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
Ben Skeggscff5c132010-10-06 16:16:59 +1000204 struct nouveau_channel *chan = dev_priv->channels.ptr[i];
Ben Skeggs6ee73862009-12-11 19:24:15 +1000205
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000206 if (chan && chan->ramin && chan->ramin->vinst == inst)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000207 return chan;
208 }
209
210 return NULL;
211}
212
213int
214nv50_graph_create_context(struct nouveau_channel *chan)
215{
216 struct drm_device *dev = chan->dev;
217 struct drm_nouveau_private *dev_priv = dev->dev_private;
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000218 struct nouveau_gpuobj *ramin = chan->ramin;
Marcin Kościelnickid5f3c902010-02-25 00:54:02 +0000219 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
Ben Skeggsec91db22010-07-08 11:53:19 +1000220 struct nouveau_grctx ctx = {};
Ben Skeggs6ee73862009-12-11 19:24:15 +1000221 int hdr, ret;
222
223 NV_DEBUG(dev, "ch%d\n", chan->id);
224
Ben Skeggs3052be22010-10-20 11:46:38 +1000225 ret = nouveau_gpuobj_new(dev, chan, pgraph->grctx_size, 0,
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000226 NVOBJ_FLAG_ZERO_ALLOC |
227 NVOBJ_FLAG_ZERO_FREE, &chan->ramin_grctx);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000228 if (ret)
229 return ret;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000230
Ben Skeggsac94a342010-07-08 15:28:48 +1000231 hdr = (dev_priv->chipset == 0x50) ? 0x200 : 0x20;
Ben Skeggsb3beb162010-09-01 15:24:29 +1000232 nv_wo32(ramin, hdr + 0x00, 0x00190002);
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000233 nv_wo32(ramin, hdr + 0x04, chan->ramin_grctx->vinst +
Ben Skeggsb3beb162010-09-01 15:24:29 +1000234 pgraph->grctx_size - 1);
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000235 nv_wo32(ramin, hdr + 0x08, chan->ramin_grctx->vinst);
Ben Skeggsb3beb162010-09-01 15:24:29 +1000236 nv_wo32(ramin, hdr + 0x0c, 0);
237 nv_wo32(ramin, hdr + 0x10, 0);
238 nv_wo32(ramin, hdr + 0x14, 0x00010000);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000239
Ben Skeggsec91db22010-07-08 11:53:19 +1000240 ctx.dev = chan->dev;
241 ctx.mode = NOUVEAU_GRCTX_VALS;
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000242 ctx.data = chan->ramin_grctx;
Ben Skeggsec91db22010-07-08 11:53:19 +1000243 nv50_grctx_init(&ctx);
244
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000245 nv_wo32(chan->ramin_grctx, 0x00000, chan->ramin->vinst >> 12);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000246
Ben Skeggsf56cb862010-07-08 11:29:10 +1000247 dev_priv->engine.instmem.flush(dev);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000248 return 0;
249}
250
251void
252nv50_graph_destroy_context(struct nouveau_channel *chan)
253{
254 struct drm_device *dev = chan->dev;
255 struct drm_nouveau_private *dev_priv = dev->dev_private;
Francisco Jerez3945e472010-10-18 03:53:39 +0200256 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
Ben Skeggsac94a342010-07-08 15:28:48 +1000257 int i, hdr = (dev_priv->chipset == 0x50) ? 0x200 : 0x20;
Francisco Jerez3945e472010-10-18 03:53:39 +0200258 unsigned long flags;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000259
260 NV_DEBUG(dev, "ch%d\n", chan->id);
261
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000262 if (!chan->ramin)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000263 return;
264
Francisco Jerez3945e472010-10-18 03:53:39 +0200265 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
266 pgraph->fifo_access(dev, false);
267
268 if (pgraph->channel(dev) == chan)
269 pgraph->unload_context(dev);
270
Ben Skeggs6ee73862009-12-11 19:24:15 +1000271 for (i = hdr; i < hdr + 24; i += 4)
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000272 nv_wo32(chan->ramin, i, 0);
Ben Skeggsf56cb862010-07-08 11:29:10 +1000273 dev_priv->engine.instmem.flush(dev);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000274
Francisco Jerez3945e472010-10-18 03:53:39 +0200275 pgraph->fifo_access(dev, true);
276 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
277
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000278 nouveau_gpuobj_ref(NULL, &chan->ramin_grctx);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000279}
280
281static int
282nv50_graph_do_load_context(struct drm_device *dev, uint32_t inst)
283{
284 uint32_t fifo = nv_rd32(dev, 0x400500);
285
286 nv_wr32(dev, 0x400500, fifo & ~1);
287 nv_wr32(dev, 0x400784, inst);
288 nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) | 0x40);
289 nv_wr32(dev, 0x400320, nv_rd32(dev, 0x400320) | 0x11);
290 nv_wr32(dev, 0x400040, 0xffffffff);
291 (void)nv_rd32(dev, 0x400040);
292 nv_wr32(dev, 0x400040, 0x00000000);
293 nv_wr32(dev, 0x400304, nv_rd32(dev, 0x400304) | 1);
294
295 if (nouveau_wait_for_idle(dev))
296 nv_wr32(dev, 0x40032c, inst | (1<<31));
297 nv_wr32(dev, 0x400500, fifo);
298
299 return 0;
300}
301
302int
303nv50_graph_load_context(struct nouveau_channel *chan)
304{
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000305 uint32_t inst = chan->ramin->vinst >> 12;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000306
307 NV_DEBUG(chan->dev, "ch%d\n", chan->id);
308 return nv50_graph_do_load_context(chan->dev, inst);
309}
310
311int
312nv50_graph_unload_context(struct drm_device *dev)
313{
Maarten Maathuisa51a3bf2010-02-01 18:32:09 +0100314 uint32_t inst;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000315
316 inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR);
317 if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED))
318 return 0;
319 inst &= NV50_PGRAPH_CTXCTL_CUR_INSTANCE;
320
Maarten Maathuis0a90dc512010-01-11 21:18:53 +0100321 nouveau_wait_for_idle(dev);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000322 nv_wr32(dev, 0x400784, inst);
323 nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) | 0x20);
324 nv_wr32(dev, 0x400304, nv_rd32(dev, 0x400304) | 0x01);
325 nouveau_wait_for_idle(dev);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000326
327 nv_wr32(dev, NV50_PGRAPH_CTXCTL_CUR, inst);
328 return 0;
329}
330
Ben Skeggs274fec92010-11-03 13:16:18 +1000331static void
Ben Skeggs6ee73862009-12-11 19:24:15 +1000332nv50_graph_context_switch(struct drm_device *dev)
333{
334 uint32_t inst;
335
336 nv50_graph_unload_context(dev);
337
338 inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_NEXT);
339 inst &= NV50_PGRAPH_CTXCTL_NEXT_INSTANCE;
340 nv50_graph_do_load_context(dev, inst);
341
342 nv_wr32(dev, NV40_PGRAPH_INTR_EN, nv_rd32(dev,
343 NV40_PGRAPH_INTR_EN) | NV_PGRAPH_INTR_CONTEXT_SWITCH);
344}
345
346static int
Ben Skeggsb8c157d2010-10-20 10:39:35 +1000347nv50_graph_nvsw_dma_vblsem(struct nouveau_channel *chan,
348 u32 class, u32 mthd, u32 data)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000349{
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000350 struct nouveau_gpuobj *gpuobj;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000351
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000352 gpuobj = nouveau_ramht_find(chan, data);
353 if (!gpuobj)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000354 return -ENOENT;
355
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000356 if (nouveau_notifier_offset(gpuobj, NULL))
Ben Skeggs6ee73862009-12-11 19:24:15 +1000357 return -EINVAL;
358
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000359 chan->nvsw.vblsem = gpuobj;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000360 chan->nvsw.vblsem_offset = ~0;
361 return 0;
362}
363
364static int
Ben Skeggsb8c157d2010-10-20 10:39:35 +1000365nv50_graph_nvsw_vblsem_offset(struct nouveau_channel *chan,
366 u32 class, u32 mthd, u32 data)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000367{
368 if (nouveau_notifier_offset(chan->nvsw.vblsem, &data))
369 return -ERANGE;
370
371 chan->nvsw.vblsem_offset = data >> 2;
372 return 0;
373}
374
375static int
Ben Skeggsb8c157d2010-10-20 10:39:35 +1000376nv50_graph_nvsw_vblsem_release_val(struct nouveau_channel *chan,
377 u32 class, u32 mthd, u32 data)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000378{
379 chan->nvsw.vblsem_rval = data;
380 return 0;
381}
382
383static int
Ben Skeggsb8c157d2010-10-20 10:39:35 +1000384nv50_graph_nvsw_vblsem_release(struct nouveau_channel *chan,
385 u32 class, u32 mthd, u32 data)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000386{
387 struct drm_device *dev = chan->dev;
388 struct drm_nouveau_private *dev_priv = dev->dev_private;
389
390 if (!chan->nvsw.vblsem || chan->nvsw.vblsem_offset == ~0 || data > 1)
391 return -EINVAL;
392
Francisco Jerez042206c2010-10-21 18:19:29 +0200393 drm_vblank_get(dev, data);
Francisco Jerez1f6d2de2010-10-24 14:15:58 +0200394
395 chan->nvsw.vblsem_head = data;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000396 list_add(&chan->nvsw.vbl_wait, &dev_priv->vbl_waiting);
Francisco Jerez1f6d2de2010-10-24 14:15:58 +0200397
Ben Skeggs6ee73862009-12-11 19:24:15 +1000398 return 0;
399}
400
Ben Skeggsb8c157d2010-10-20 10:39:35 +1000401static int
Francisco Jerez332b2422010-10-20 23:35:40 +0200402nv50_graph_nvsw_mthd_page_flip(struct nouveau_channel *chan,
403 u32 class, u32 mthd, u32 data)
404{
405 struct nouveau_page_flip_state s;
406
407 if (!nouveau_finish_page_flip(chan, &s)) {
408 /* XXX - Do something here */
409 }
410
411 return 0;
412}
413
414static int
Ben Skeggsb8c157d2010-10-20 10:39:35 +1000415nv50_graph_register(struct drm_device *dev)
416{
417 struct drm_nouveau_private *dev_priv = dev->dev_private;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000418
Ben Skeggsb8c157d2010-10-20 10:39:35 +1000419 if (dev_priv->engine.graph.registered)
420 return 0;
421
422 NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */
423 NVOBJ_MTHD (dev, 0x506e, 0x018c, nv50_graph_nvsw_dma_vblsem);
424 NVOBJ_MTHD (dev, 0x506e, 0x0400, nv50_graph_nvsw_vblsem_offset);
425 NVOBJ_MTHD (dev, 0x506e, 0x0404, nv50_graph_nvsw_vblsem_release_val);
426 NVOBJ_MTHD (dev, 0x506e, 0x0408, nv50_graph_nvsw_vblsem_release);
Francisco Jerez332b2422010-10-20 23:35:40 +0200427 NVOBJ_MTHD (dev, 0x506e, 0x0500, nv50_graph_nvsw_mthd_page_flip);
Ben Skeggsb8c157d2010-10-20 10:39:35 +1000428
429 NVOBJ_CLASS(dev, 0x0030, GR); /* null */
430 NVOBJ_CLASS(dev, 0x5039, GR); /* m2mf */
431 NVOBJ_CLASS(dev, 0x502d, GR); /* 2d */
Ben Skeggsb8c157d2010-10-20 10:39:35 +1000432
433 /* tesla */
434 if (dev_priv->chipset == 0x50)
435 NVOBJ_CLASS(dev, 0x5097, GR); /* tesla (nv50) */
436 else
437 if (dev_priv->chipset < 0xa0)
438 NVOBJ_CLASS(dev, 0x8297, GR); /* tesla (nv8x/nv9x) */
439 else {
440 switch (dev_priv->chipset) {
441 case 0xa0:
442 case 0xaa:
443 case 0xac:
444 NVOBJ_CLASS(dev, 0x8397, GR);
445 break;
446 case 0xa3:
447 case 0xa5:
448 case 0xa8:
449 NVOBJ_CLASS(dev, 0x8597, GR);
450 break;
451 case 0xaf:
452 NVOBJ_CLASS(dev, 0x8697, GR);
453 break;
454 }
455 }
456
Ben Skeggsaa2c2e82010-11-04 13:40:15 +1000457 /* compute */
Ben Skeggsa169f092010-11-05 09:40:00 +1000458 NVOBJ_CLASS(dev, 0x50c0, GR);
459 if (dev_priv->chipset > 0xa0 &&
460 dev_priv->chipset != 0xaa &&
461 dev_priv->chipset != 0xac)
Ben Skeggsaa2c2e82010-11-04 13:40:15 +1000462 NVOBJ_CLASS(dev, 0x85c0, GR);
463
Ben Skeggsb8c157d2010-10-20 10:39:35 +1000464 dev_priv->engine.graph.registered = true;
465 return 0;
466}
Ben Skeggs56ac7472010-10-22 10:26:24 +1000467
468void
469nv50_graph_tlb_flush(struct drm_device *dev)
470{
471 nv50_vm_flush(dev, 0);
472}
473
474void
475nv86_graph_tlb_flush(struct drm_device *dev)
476{
477 struct drm_nouveau_private *dev_priv = dev->dev_private;
478 struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
479 bool idle, timeout = false;
480 unsigned long flags;
481 u64 start;
482 u32 tmp;
483
484 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
485 nv_mask(dev, 0x400500, 0x00000001, 0x00000000);
486
487 start = ptimer->read(dev);
488 do {
489 idle = true;
490
491 for (tmp = nv_rd32(dev, 0x400380); tmp && idle; tmp >>= 3) {
492 if ((tmp & 7) == 1)
493 idle = false;
494 }
495
496 for (tmp = nv_rd32(dev, 0x400384); tmp && idle; tmp >>= 3) {
497 if ((tmp & 7) == 1)
498 idle = false;
499 }
500
501 for (tmp = nv_rd32(dev, 0x400388); tmp && idle; tmp >>= 3) {
502 if ((tmp & 7) == 1)
503 idle = false;
504 }
505 } while (!idle && !(timeout = ptimer->read(dev) - start > 2000000000));
506
507 if (timeout) {
508 NV_ERROR(dev, "PGRAPH TLB flush idle timeout fail: "
509 "0x%08x 0x%08x 0x%08x 0x%08x\n",
510 nv_rd32(dev, 0x400700), nv_rd32(dev, 0x400380),
511 nv_rd32(dev, 0x400384), nv_rd32(dev, 0x400388));
512 }
513
514 nv50_vm_flush(dev, 0);
515
516 nv_mask(dev, 0x400500, 0x00000001, 0x00000001);
517 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
518}
Ben Skeggs274fec92010-11-03 13:16:18 +1000519
520static struct nouveau_enum nv50_mp_exec_error_names[] =
521{
522 { 3, "STACK_UNDERFLOW" },
523 { 4, "QUADON_ACTIVE" },
524 { 8, "TIMEOUT" },
525 { 0x10, "INVALID_OPCODE" },
526 { 0x40, "BREAKPOINT" },
527 {}
528};
529
530static struct nouveau_bitfield nv50_graph_trap_m2mf[] = {
531 { 0x00000001, "NOTIFY" },
532 { 0x00000002, "IN" },
533 { 0x00000004, "OUT" },
534 {}
535};
536
537static struct nouveau_bitfield nv50_graph_trap_vfetch[] = {
538 { 0x00000001, "FAULT" },
539 {}
540};
541
542static struct nouveau_bitfield nv50_graph_trap_strmout[] = {
543 { 0x00000001, "FAULT" },
544 {}
545};
546
547static struct nouveau_bitfield nv50_graph_trap_ccache[] = {
548 { 0x00000001, "FAULT" },
549 {}
550};
551
552/* There must be a *lot* of these. Will take some time to gather them up. */
553static struct nouveau_enum nv50_data_error_names[] = {
554 { 4, "INVALID_VALUE" },
555 { 5, "INVALID_ENUM" },
556 { 8, "INVALID_OBJECT" },
557 { 0xc, "INVALID_BITFIELD" },
558 { 0x28, "MP_NO_REG_SPACE" },
559 { 0x2b, "MP_BLOCK_SIZE_MISMATCH" },
560 {}
561};
562
563static struct nouveau_bitfield nv50_graph_intr[] = {
564 { 0x00000001, "NOTIFY" },
565 { 0x00000002, "COMPUTE_QUERY" },
566 { 0x00000010, "ILLEGAL_MTHD" },
567 { 0x00000020, "ILLEGAL_CLASS" },
568 { 0x00000040, "DOUBLE_NOTIFY" },
569 { 0x00001000, "CONTEXT_SWITCH" },
570 { 0x00010000, "BUFFER_NOTIFY" },
571 { 0x00100000, "DATA_ERROR" },
572 { 0x00200000, "TRAP" },
573 { 0x01000000, "SINGLE_STEP" },
574 {}
575};
576
577static void
578nv50_pgraph_mp_trap(struct drm_device *dev, int tpid, int display)
579{
580 struct drm_nouveau_private *dev_priv = dev->dev_private;
581 uint32_t units = nv_rd32(dev, 0x1540);
582 uint32_t addr, mp10, status, pc, oplow, ophigh;
583 int i;
584 int mps = 0;
585 for (i = 0; i < 4; i++) {
586 if (!(units & 1 << (i+24)))
587 continue;
588 if (dev_priv->chipset < 0xa0)
589 addr = 0x408200 + (tpid << 12) + (i << 7);
590 else
591 addr = 0x408100 + (tpid << 11) + (i << 7);
592 mp10 = nv_rd32(dev, addr + 0x10);
593 status = nv_rd32(dev, addr + 0x14);
594 if (!status)
595 continue;
596 if (display) {
597 nv_rd32(dev, addr + 0x20);
598 pc = nv_rd32(dev, addr + 0x24);
599 oplow = nv_rd32(dev, addr + 0x70);
600 ophigh= nv_rd32(dev, addr + 0x74);
601 NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - "
602 "TP %d MP %d: ", tpid, i);
603 nouveau_enum_print(nv50_mp_exec_error_names, status);
604 printk(" at %06x warp %d, opcode %08x %08x\n",
605 pc&0xffffff, pc >> 24,
606 oplow, ophigh);
607 }
608 nv_wr32(dev, addr + 0x10, mp10);
609 nv_wr32(dev, addr + 0x14, 0);
610 mps++;
611 }
612 if (!mps && display)
613 NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - TP %d: "
614 "No MPs claiming errors?\n", tpid);
615}
616
617static void
618nv50_pgraph_tp_trap(struct drm_device *dev, int type, uint32_t ustatus_old,
619 uint32_t ustatus_new, int display, const char *name)
620{
621 struct drm_nouveau_private *dev_priv = dev->dev_private;
622 int tps = 0;
623 uint32_t units = nv_rd32(dev, 0x1540);
624 int i, r;
625 uint32_t ustatus_addr, ustatus;
626 for (i = 0; i < 16; i++) {
627 if (!(units & (1 << i)))
628 continue;
629 if (dev_priv->chipset < 0xa0)
630 ustatus_addr = ustatus_old + (i << 12);
631 else
632 ustatus_addr = ustatus_new + (i << 11);
633 ustatus = nv_rd32(dev, ustatus_addr) & 0x7fffffff;
634 if (!ustatus)
635 continue;
636 tps++;
637 switch (type) {
638 case 6: /* texture error... unknown for now */
639 nv50_fb_vm_trap(dev, display, name);
640 if (display) {
641 NV_ERROR(dev, "magic set %d:\n", i);
642 for (r = ustatus_addr + 4; r <= ustatus_addr + 0x10; r += 4)
643 NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
644 nv_rd32(dev, r));
645 }
646 break;
647 case 7: /* MP error */
648 if (ustatus & 0x00010000) {
649 nv50_pgraph_mp_trap(dev, i, display);
650 ustatus &= ~0x00010000;
651 }
652 break;
653 case 8: /* TPDMA error */
654 {
655 uint32_t e0c = nv_rd32(dev, ustatus_addr + 4);
656 uint32_t e10 = nv_rd32(dev, ustatus_addr + 8);
657 uint32_t e14 = nv_rd32(dev, ustatus_addr + 0xc);
658 uint32_t e18 = nv_rd32(dev, ustatus_addr + 0x10);
659 uint32_t e1c = nv_rd32(dev, ustatus_addr + 0x14);
660 uint32_t e20 = nv_rd32(dev, ustatus_addr + 0x18);
661 uint32_t e24 = nv_rd32(dev, ustatus_addr + 0x1c);
662 nv50_fb_vm_trap(dev, display, name);
663 /* 2d engine destination */
664 if (ustatus & 0x00000010) {
665 if (display) {
666 NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - Unknown fault at address %02x%08x\n",
667 i, e14, e10);
668 NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
669 i, e0c, e18, e1c, e20, e24);
670 }
671 ustatus &= ~0x00000010;
672 }
673 /* Render target */
674 if (ustatus & 0x00000040) {
675 if (display) {
676 NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - Unknown fault at address %02x%08x\n",
677 i, e14, e10);
678 NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
679 i, e0c, e18, e1c, e20, e24);
680 }
681 ustatus &= ~0x00000040;
682 }
683 /* CUDA memory: l[], g[] or stack. */
684 if (ustatus & 0x00000080) {
685 if (display) {
686 if (e18 & 0x80000000) {
687 /* g[] read fault? */
688 NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global read fault at address %02x%08x\n",
689 i, e14, e10 | ((e18 >> 24) & 0x1f));
690 e18 &= ~0x1f000000;
691 } else if (e18 & 0xc) {
692 /* g[] write fault? */
693 NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global write fault at address %02x%08x\n",
694 i, e14, e10 | ((e18 >> 7) & 0x1f));
695 e18 &= ~0x00000f80;
696 } else {
697 NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Unknown CUDA fault at address %02x%08x\n",
698 i, e14, e10);
699 }
700 NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
701 i, e0c, e18, e1c, e20, e24);
702 }
703 ustatus &= ~0x00000080;
704 }
705 }
706 break;
707 }
708 if (ustatus) {
709 if (display)
710 NV_INFO(dev, "%s - TP%d: Unhandled ustatus 0x%08x\n", name, i, ustatus);
711 }
712 nv_wr32(dev, ustatus_addr, 0xc0000000);
713 }
714
715 if (!tps && display)
716 NV_INFO(dev, "%s - No TPs claiming errors?\n", name);
717}
718
719static int
720nv50_pgraph_trap_handler(struct drm_device *dev, u32 display, u64 inst, u32 chid)
721{
722 u32 status = nv_rd32(dev, 0x400108);
723 u32 ustatus;
724
725 if (!status && display) {
726 NV_INFO(dev, "PGRAPH - TRAP: no units reporting traps?\n");
727 return 1;
728 }
729
730 /* DISPATCH: Relays commands to other units and handles NOTIFY,
731 * COND, QUERY. If you get a trap from it, the command is still stuck
732 * in DISPATCH and you need to do something about it. */
733 if (status & 0x001) {
734 ustatus = nv_rd32(dev, 0x400804) & 0x7fffffff;
735 if (!ustatus && display) {
736 NV_INFO(dev, "PGRAPH_TRAP_DISPATCH - no ustatus?\n");
737 }
738
739 nv_wr32(dev, 0x400500, 0x00000000);
740
741 /* Known to be triggered by screwed up NOTIFY and COND... */
742 if (ustatus & 0x00000001) {
743 u32 addr = nv_rd32(dev, 0x400808);
744 u32 subc = (addr & 0x00070000) >> 16;
745 u32 mthd = (addr & 0x00001ffc);
746 u32 datal = nv_rd32(dev, 0x40080c);
747 u32 datah = nv_rd32(dev, 0x400810);
748 u32 class = nv_rd32(dev, 0x400814);
749 u32 r848 = nv_rd32(dev, 0x400848);
750
751 NV_INFO(dev, "PGRAPH - TRAP DISPATCH_FAULT\n");
752 if (display && (addr & 0x80000000)) {
753 NV_INFO(dev, "PGRAPH - ch %d (0x%010llx) "
754 "subc %d class 0x%04x mthd 0x%04x "
755 "data 0x%08x%08x "
756 "400808 0x%08x 400848 0x%08x\n",
757 chid, inst, subc, class, mthd, datah,
758 datal, addr, r848);
759 } else
760 if (display) {
761 NV_INFO(dev, "PGRAPH - no stuck command?\n");
762 }
763
764 nv_wr32(dev, 0x400808, 0);
765 nv_wr32(dev, 0x4008e8, nv_rd32(dev, 0x4008e8) & 3);
766 nv_wr32(dev, 0x400848, 0);
767 ustatus &= ~0x00000001;
768 }
769
770 if (ustatus & 0x00000002) {
771 u32 addr = nv_rd32(dev, 0x40084c);
772 u32 subc = (addr & 0x00070000) >> 16;
773 u32 mthd = (addr & 0x00001ffc);
774 u32 data = nv_rd32(dev, 0x40085c);
775 u32 class = nv_rd32(dev, 0x400814);
776
777 NV_INFO(dev, "PGRAPH - TRAP DISPATCH_QUERY\n");
778 if (display && (addr & 0x80000000)) {
779 NV_INFO(dev, "PGRAPH - ch %d (0x%010llx) "
780 "subc %d class 0x%04x mthd 0x%04x "
781 "data 0x%08x 40084c 0x%08x\n",
782 chid, inst, subc, class, mthd,
783 data, addr);
784 } else
785 if (display) {
786 NV_INFO(dev, "PGRAPH - no stuck command?\n");
787 }
788
789 nv_wr32(dev, 0x40084c, 0);
790 ustatus &= ~0x00000002;
791 }
792
793 if (ustatus && display) {
794 NV_INFO(dev, "PGRAPH - TRAP_DISPATCH (unknown "
795 "0x%08x)\n", ustatus);
796 }
797
798 nv_wr32(dev, 0x400804, 0xc0000000);
799 nv_wr32(dev, 0x400108, 0x001);
800 status &= ~0x001;
801 if (!status)
802 return 0;
803 }
804
805 /* M2MF: Memory to memory copy engine. */
806 if (status & 0x002) {
807 u32 ustatus = nv_rd32(dev, 0x406800) & 0x7fffffff;
808 if (display) {
809 NV_INFO(dev, "PGRAPH - TRAP_M2MF");
810 nouveau_bitfield_print(nv50_graph_trap_m2mf, ustatus);
811 printk("\n");
812 NV_INFO(dev, "PGRAPH - TRAP_M2MF %08x %08x %08x %08x\n",
813 nv_rd32(dev, 0x406804), nv_rd32(dev, 0x406808),
814 nv_rd32(dev, 0x40680c), nv_rd32(dev, 0x406810));
815
816 }
817
818 /* No sane way found yet -- just reset the bugger. */
819 nv_wr32(dev, 0x400040, 2);
820 nv_wr32(dev, 0x400040, 0);
821 nv_wr32(dev, 0x406800, 0xc0000000);
822 nv_wr32(dev, 0x400108, 0x002);
823 status &= ~0x002;
824 }
825
826 /* VFETCH: Fetches data from vertex buffers. */
827 if (status & 0x004) {
828 u32 ustatus = nv_rd32(dev, 0x400c04) & 0x7fffffff;
829 if (display) {
830 NV_INFO(dev, "PGRAPH - TRAP_VFETCH");
831 nouveau_bitfield_print(nv50_graph_trap_vfetch, ustatus);
832 printk("\n");
833 NV_INFO(dev, "PGRAPH - TRAP_VFETCH %08x %08x %08x %08x\n",
834 nv_rd32(dev, 0x400c00), nv_rd32(dev, 0x400c08),
835 nv_rd32(dev, 0x400c0c), nv_rd32(dev, 0x400c10));
836 }
837
838 nv_wr32(dev, 0x400c04, 0xc0000000);
839 nv_wr32(dev, 0x400108, 0x004);
840 status &= ~0x004;
841 }
842
843 /* STRMOUT: DirectX streamout / OpenGL transform feedback. */
844 if (status & 0x008) {
845 ustatus = nv_rd32(dev, 0x401800) & 0x7fffffff;
846 if (display) {
847 NV_INFO(dev, "PGRAPH - TRAP_STRMOUT");
848 nouveau_bitfield_print(nv50_graph_trap_strmout, ustatus);
849 printk("\n");
850 NV_INFO(dev, "PGRAPH - TRAP_STRMOUT %08x %08x %08x %08x\n",
851 nv_rd32(dev, 0x401804), nv_rd32(dev, 0x401808),
852 nv_rd32(dev, 0x40180c), nv_rd32(dev, 0x401810));
853
854 }
855
856 /* No sane way found yet -- just reset the bugger. */
857 nv_wr32(dev, 0x400040, 0x80);
858 nv_wr32(dev, 0x400040, 0);
859 nv_wr32(dev, 0x401800, 0xc0000000);
860 nv_wr32(dev, 0x400108, 0x008);
861 status &= ~0x008;
862 }
863
864 /* CCACHE: Handles code and c[] caches and fills them. */
865 if (status & 0x010) {
866 ustatus = nv_rd32(dev, 0x405018) & 0x7fffffff;
867 if (display) {
868 NV_INFO(dev, "PGRAPH - TRAP_CCACHE");
869 nouveau_bitfield_print(nv50_graph_trap_ccache, ustatus);
870 printk("\n");
871 NV_INFO(dev, "PGRAPH - TRAP_CCACHE %08x %08x %08x %08x"
872 " %08x %08x %08x\n",
873 nv_rd32(dev, 0x405800), nv_rd32(dev, 0x405804),
874 nv_rd32(dev, 0x405808), nv_rd32(dev, 0x40580c),
875 nv_rd32(dev, 0x405810), nv_rd32(dev, 0x405814),
876 nv_rd32(dev, 0x40581c));
877
878 }
879
880 nv_wr32(dev, 0x405018, 0xc0000000);
881 nv_wr32(dev, 0x400108, 0x010);
882 status &= ~0x010;
883 }
884
885 /* Unknown, not seen yet... 0x402000 is the only trap status reg
886 * remaining, so try to handle it anyway. Perhaps related to that
887 * unknown DMA slot on tesla? */
888 if (status & 0x20) {
889 ustatus = nv_rd32(dev, 0x402000) & 0x7fffffff;
890 if (display)
891 NV_INFO(dev, "PGRAPH - TRAP_UNKC04 0x%08x\n", ustatus);
892 nv_wr32(dev, 0x402000, 0xc0000000);
893 /* no status modifiction on purpose */
894 }
895
896 /* TEXTURE: CUDA texturing units */
897 if (status & 0x040) {
898 nv50_pgraph_tp_trap(dev, 6, 0x408900, 0x408600, display,
899 "PGRAPH - TRAP_TEXTURE");
900 nv_wr32(dev, 0x400108, 0x040);
901 status &= ~0x040;
902 }
903
904 /* MP: CUDA execution engines. */
905 if (status & 0x080) {
906 nv50_pgraph_tp_trap(dev, 7, 0x408314, 0x40831c, display,
907 "PGRAPH - TRAP_MP");
908 nv_wr32(dev, 0x400108, 0x080);
909 status &= ~0x080;
910 }
911
912 /* TPDMA: Handles TP-initiated uncached memory accesses:
913 * l[], g[], stack, 2d surfaces, render targets. */
914 if (status & 0x100) {
915 nv50_pgraph_tp_trap(dev, 8, 0x408e08, 0x408708, display,
916 "PGRAPH - TRAP_TPDMA");
917 nv_wr32(dev, 0x400108, 0x100);
918 status &= ~0x100;
919 }
920
921 if (status) {
922 if (display)
923 NV_INFO(dev, "PGRAPH - TRAP: unknown 0x%08x\n", status);
924 nv_wr32(dev, 0x400108, status);
925 }
926
927 return 1;
928}
929
930static int
931nv50_graph_isr_chid(struct drm_device *dev, u64 inst)
932{
933 struct drm_nouveau_private *dev_priv = dev->dev_private;
934 struct nouveau_channel *chan;
935 unsigned long flags;
936 int i;
937
938 spin_lock_irqsave(&dev_priv->channels.lock, flags);
939 for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
940 chan = dev_priv->channels.ptr[i];
941 if (!chan || !chan->ramin)
942 continue;
943
944 if (inst == chan->ramin->vinst)
945 break;
946 }
947 spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
948 return i;
949}
950
951static void
952nv50_graph_isr(struct drm_device *dev)
953{
954 u32 stat;
955
956 while ((stat = nv_rd32(dev, 0x400100))) {
957 u64 inst = (u64)(nv_rd32(dev, 0x40032c) & 0x0fffffff) << 12;
958 u32 chid = nv50_graph_isr_chid(dev, inst);
959 u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
960 u32 subc = (addr & 0x00070000) >> 16;
961 u32 mthd = (addr & 0x00001ffc);
962 u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
963 u32 class = nv_rd32(dev, 0x400814);
964 u32 show = stat;
965
966 if (stat & 0x00000010) {
967 if (!nouveau_gpuobj_mthd_call2(dev, chid, class,
968 mthd, data))
969 show &= ~0x00000010;
970 }
971
972 if (stat & 0x00001000) {
973 nv_wr32(dev, 0x400500, 0x00000000);
974 nv_wr32(dev, 0x400100, 0x00001000);
975 nv_mask(dev, 0x40013c, 0x00001000, 0x00000000);
976 nv50_graph_context_switch(dev);
977 stat &= ~0x00001000;
978 show &= ~0x00001000;
979 }
980
981 show = (show && nouveau_ratelimit()) ? show : 0;
982
983 if (show & 0x00100000) {
984 u32 ecode = nv_rd32(dev, 0x400110);
985 NV_INFO(dev, "PGRAPH - DATA_ERROR ");
986 nouveau_enum_print(nv50_data_error_names, ecode);
987 printk("\n");
988 }
989
990 if (stat & 0x00200000) {
991 if (!nv50_pgraph_trap_handler(dev, show, inst, chid))
992 show &= ~0x00200000;
993 }
994
995 nv_wr32(dev, 0x400100, stat);
996 nv_wr32(dev, 0x400500, 0x00010001);
997
998 if (show) {
999 NV_INFO(dev, "PGRAPH -");
1000 nouveau_bitfield_print(nv50_graph_intr, show);
1001 printk("\n");
1002 NV_INFO(dev, "PGRAPH - ch %d (0x%010llx) subc %d "
1003 "class 0x%04x mthd 0x%04x data 0x%08x\n",
1004 chid, inst, subc, class, mthd, data);
1005 }
1006 }
1007
1008 if (nv_rd32(dev, 0x400824) & (1 << 31))
1009 nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31));
1010}