blob: 82a4ded5dae82330942a0f869fa4a5610a0d2a09 [file] [log] [blame]
Ben Skeggs4b223ee2010-08-03 10:00:56 +10001/*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "drmP.h"
26
27#include "nouveau_drv.h"
Ben Skeggsb2b09932010-11-24 10:47:15 +100028#include "nouveau_mm.h"
29
30static void nvc0_fifo_isr(struct drm_device *);
31
32struct nvc0_fifo_priv {
33 struct nouveau_gpuobj *playlist[2];
34 int cur_playlist;
35 struct nouveau_vma user_vma;
36};
37
38struct nvc0_fifo_chan {
39 struct nouveau_bo *user;
40 struct nouveau_gpuobj *ramfc;
41};
42
43static void
44nvc0_fifo_playlist_update(struct drm_device *dev)
45{
46 struct drm_nouveau_private *dev_priv = dev->dev_private;
47 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
48 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
49 struct nvc0_fifo_priv *priv = pfifo->priv;
50 struct nouveau_gpuobj *cur;
51 int i, p;
52
53 cur = priv->playlist[priv->cur_playlist];
54 priv->cur_playlist = !priv->cur_playlist;
55
56 for (i = 0, p = 0; i < 128; i++) {
57 if (!(nv_rd32(dev, 0x3004 + (i * 8)) & 1))
58 continue;
59 nv_wo32(cur, p + 0, i);
60 nv_wo32(cur, p + 4, 0x00000004);
61 p += 8;
62 }
63 pinstmem->flush(dev);
64
65 nv_wr32(dev, 0x002270, cur->vinst >> 12);
66 nv_wr32(dev, 0x002274, 0x01f00000 | (p >> 3));
67 if (!nv_wait(dev, 0x00227c, 0x00100000, 0x00000000))
68 NV_ERROR(dev, "PFIFO - playlist update failed\n");
69}
Ben Skeggs4b223ee2010-08-03 10:00:56 +100070
71void
72nvc0_fifo_disable(struct drm_device *dev)
73{
74}
75
76void
77nvc0_fifo_enable(struct drm_device *dev)
78{
79}
80
81bool
82nvc0_fifo_reassign(struct drm_device *dev, bool enable)
83{
84 return false;
85}
86
87bool
Ben Skeggs4b223ee2010-08-03 10:00:56 +100088nvc0_fifo_cache_pull(struct drm_device *dev, bool enable)
89{
90 return false;
91}
92
93int
94nvc0_fifo_channel_id(struct drm_device *dev)
95{
96 return 127;
97}
98
99int
100nvc0_fifo_create_context(struct nouveau_channel *chan)
101{
Ben Skeggsb2b09932010-11-24 10:47:15 +1000102 struct drm_device *dev = chan->dev;
103 struct drm_nouveau_private *dev_priv = dev->dev_private;
104 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
105 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
106 struct nvc0_fifo_priv *priv = pfifo->priv;
107 struct nvc0_fifo_chan *fifoch;
108 u64 ib_virt, user_vinst;
109 int ret;
110
111 chan->fifo_priv = kzalloc(sizeof(*fifoch), GFP_KERNEL);
112 if (!chan->fifo_priv)
113 return -ENOMEM;
114 fifoch = chan->fifo_priv;
115
116 /* allocate vram for control regs, map into polling area */
117 ret = nouveau_bo_new(dev, NULL, 0x1000, 0, TTM_PL_FLAG_VRAM,
118 0, 0, true, true, &fifoch->user);
119 if (ret)
120 goto error;
121
122 ret = nouveau_bo_pin(fifoch->user, TTM_PL_FLAG_VRAM);
123 if (ret) {
124 nouveau_bo_ref(NULL, &fifoch->user);
125 goto error;
126 }
127
128 user_vinst = fifoch->user->bo.mem.start << PAGE_SHIFT;
129
130 ret = nouveau_bo_map(fifoch->user);
131 if (ret) {
132 nouveau_bo_unpin(fifoch->user);
133 nouveau_bo_ref(NULL, &fifoch->user);
134 goto error;
135 }
136
137 nouveau_vm_map_at(&priv->user_vma, chan->id * 0x1000,
138 fifoch->user->bo.mem.mm_node);
139
140 chan->user = ioremap_wc(pci_resource_start(dev->pdev, 1) +
141 priv->user_vma.offset + (chan->id * 0x1000),
142 PAGE_SIZE);
143 if (!chan->user) {
144 ret = -ENOMEM;
145 goto error;
146 }
147
148 ib_virt = chan->pushbuf_base + chan->dma.ib_base * 4;
149
150 /* zero channel regs */
151 nouveau_bo_wr32(fifoch->user, 0x0040/4, 0);
152 nouveau_bo_wr32(fifoch->user, 0x0044/4, 0);
153 nouveau_bo_wr32(fifoch->user, 0x0048/4, 0);
154 nouveau_bo_wr32(fifoch->user, 0x004c/4, 0);
155 nouveau_bo_wr32(fifoch->user, 0x0050/4, 0);
156 nouveau_bo_wr32(fifoch->user, 0x0058/4, 0);
157 nouveau_bo_wr32(fifoch->user, 0x005c/4, 0);
158 nouveau_bo_wr32(fifoch->user, 0x0060/4, 0);
159 nouveau_bo_wr32(fifoch->user, 0x0088/4, 0);
160 nouveau_bo_wr32(fifoch->user, 0x008c/4, 0);
161
162 /* ramfc */
163 ret = nouveau_gpuobj_new_fake(dev, chan->ramin->pinst,
164 chan->ramin->vinst, 0x100,
165 NVOBJ_FLAG_ZERO_ALLOC, &fifoch->ramfc);
166 if (ret)
167 goto error;
168
169 nv_wo32(fifoch->ramfc, 0x08, lower_32_bits(user_vinst));
170 nv_wo32(fifoch->ramfc, 0x0c, upper_32_bits(user_vinst));
171 nv_wo32(fifoch->ramfc, 0x10, 0x0000face);
172 nv_wo32(fifoch->ramfc, 0x30, 0xfffff902);
173 nv_wo32(fifoch->ramfc, 0x48, lower_32_bits(ib_virt));
174 nv_wo32(fifoch->ramfc, 0x4c, drm_order(chan->dma.ib_max + 1) << 16 |
175 upper_32_bits(ib_virt));
176 nv_wo32(fifoch->ramfc, 0x54, 0x00000002);
177 nv_wo32(fifoch->ramfc, 0x84, 0x20400000);
178 nv_wo32(fifoch->ramfc, 0x94, 0x30000001);
179 nv_wo32(fifoch->ramfc, 0x9c, 0x00000100);
180 nv_wo32(fifoch->ramfc, 0xa4, 0x1f1f1f1f);
181 nv_wo32(fifoch->ramfc, 0xa8, 0x1f1f1f1f);
182 nv_wo32(fifoch->ramfc, 0xac, 0x0000001f);
183 nv_wo32(fifoch->ramfc, 0xb8, 0xf8000000);
184 nv_wo32(fifoch->ramfc, 0xf8, 0x10003080); /* 0x002310 */
185 nv_wo32(fifoch->ramfc, 0xfc, 0x10000010); /* 0x002350 */
186 pinstmem->flush(dev);
187
188 nv_wr32(dev, 0x003000 + (chan->id * 8), 0xc0000000 |
189 (chan->ramin->vinst >> 12));
190 nv_wr32(dev, 0x003004 + (chan->id * 8), 0x001f0001);
191 nvc0_fifo_playlist_update(dev);
Ben Skeggs4b223ee2010-08-03 10:00:56 +1000192 return 0;
Ben Skeggsb2b09932010-11-24 10:47:15 +1000193
194error:
195 pfifo->destroy_context(chan);
196 return ret;
Ben Skeggs4b223ee2010-08-03 10:00:56 +1000197}
198
199void
200nvc0_fifo_destroy_context(struct nouveau_channel *chan)
201{
Ben Skeggsb2b09932010-11-24 10:47:15 +1000202 struct drm_device *dev = chan->dev;
203 struct nvc0_fifo_chan *fifoch;
204
205 nv_mask(dev, 0x003004 + (chan->id * 8), 0x00000001, 0x00000000);
206 nv_wr32(dev, 0x002634, chan->id);
207 if (!nv_wait(dev, 0x0002634, 0xffffffff, chan->id))
208 NV_WARN(dev, "0x2634 != chid: 0x%08x\n", nv_rd32(dev, 0x2634));
209
210 nvc0_fifo_playlist_update(dev);
211
212 nv_wr32(dev, 0x003000 + (chan->id * 8), 0x00000000);
213
214 if (chan->user) {
215 iounmap(chan->user);
216 chan->user = NULL;
217 }
218
219 fifoch = chan->fifo_priv;
220 chan->fifo_priv = NULL;
221 if (!fifoch)
222 return;
223
224 nouveau_gpuobj_ref(NULL, &fifoch->ramfc);
225 if (fifoch->user) {
226 nouveau_bo_unmap(fifoch->user);
227 nouveau_bo_unpin(fifoch->user);
228 nouveau_bo_ref(NULL, &fifoch->user);
229 }
230 kfree(fifoch);
Ben Skeggs4b223ee2010-08-03 10:00:56 +1000231}
232
233int
234nvc0_fifo_load_context(struct nouveau_channel *chan)
235{
236 return 0;
237}
238
239int
240nvc0_fifo_unload_context(struct drm_device *dev)
241{
242 return 0;
243}
244
Ben Skeggsb2b09932010-11-24 10:47:15 +1000245static void
246nvc0_fifo_destroy(struct drm_device *dev)
247{
248 struct drm_nouveau_private *dev_priv = dev->dev_private;
249 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
250 struct nvc0_fifo_priv *priv;
251
252 priv = pfifo->priv;
253 if (!priv)
254 return;
255
256 nouveau_vm_put(&priv->user_vma);
257 nouveau_gpuobj_ref(NULL, &priv->playlist[1]);
258 nouveau_gpuobj_ref(NULL, &priv->playlist[0]);
259 kfree(priv);
260}
261
Ben Skeggs4b223ee2010-08-03 10:00:56 +1000262void
263nvc0_fifo_takedown(struct drm_device *dev)
264{
Ben Skeggsb2b09932010-11-24 10:47:15 +1000265 nv_wr32(dev, 0x002140, 0x00000000);
266 nvc0_fifo_destroy(dev);
267}
268
269static int
270nvc0_fifo_create(struct drm_device *dev)
271{
272 struct drm_nouveau_private *dev_priv = dev->dev_private;
273 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
274 struct nvc0_fifo_priv *priv;
275 int ret;
276
277 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
278 if (!priv)
279 return -ENOMEM;
280 pfifo->priv = priv;
281
282 ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 0x1000, 0,
283 &priv->playlist[0]);
284 if (ret)
285 goto error;
286
287 ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 0x1000, 0,
288 &priv->playlist[1]);
289 if (ret)
290 goto error;
291
292 ret = nouveau_vm_get(dev_priv->bar1_vm, pfifo->channels * 0x1000,
293 12, NV_MEM_ACCESS_RW, &priv->user_vma);
294 if (ret)
295 goto error;
296
297 nouveau_irq_register(dev, 8, nvc0_fifo_isr);
298 NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */
299 return 0;
300
301error:
302 nvc0_fifo_destroy(dev);
303 return ret;
Ben Skeggs4b223ee2010-08-03 10:00:56 +1000304}
305
306int
307nvc0_fifo_init(struct drm_device *dev)
308{
Ben Skeggsb2b09932010-11-24 10:47:15 +1000309 struct drm_nouveau_private *dev_priv = dev->dev_private;
310 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
311 struct nvc0_fifo_priv *priv;
312 int ret, i;
313
314 if (!pfifo->priv) {
315 ret = nvc0_fifo_create(dev);
316 if (ret)
317 return ret;
318 }
319 priv = pfifo->priv;
320
321 /* reset PFIFO, enable all available PSUBFIFO areas */
322 nv_mask(dev, 0x000200, 0x00000100, 0x00000000);
323 nv_mask(dev, 0x000200, 0x00000100, 0x00000100);
324 nv_wr32(dev, 0x000204, 0xffffffff);
325 nv_wr32(dev, 0x002204, 0xffffffff);
326
327 /* assign engines to subfifos */
328 nv_wr32(dev, 0x002208, ~(1 << 0)); /* PGRAPH */
329 nv_wr32(dev, 0x00220c, ~(1 << 1)); /* PVP */
330 nv_wr32(dev, 0x002210, ~(1 << 1)); /* PPP */
331 nv_wr32(dev, 0x002214, ~(1 << 1)); /* PBSP */
332 nv_wr32(dev, 0x002218, ~(1 << 2)); /* PCE0 */
333 nv_wr32(dev, 0x00221c, ~(1 << 1)); /* PCE1 */
334
335 /* PSUBFIFO[n] */
336 for (i = 0; i < 3; i++) {
337 nv_mask(dev, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
338 nv_wr32(dev, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
339 nv_wr32(dev, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTR_EN */
340 }
341
342 nv_mask(dev, 0x002200, 0x00000001, 0x00000001);
343 nv_wr32(dev, 0x002254, 0x10000000 | priv->user_vma.offset >> 12);
344
345 nv_wr32(dev, 0x002a00, 0xffffffff); /* clears PFIFO.INTR bit 30 */
346 nv_wr32(dev, 0x002100, 0xffffffff);
347 nv_wr32(dev, 0x002140, 0xbfffffff);
Ben Skeggs4b223ee2010-08-03 10:00:56 +1000348 return 0;
349}
350
Ben Skeggsb2b09932010-11-24 10:47:15 +1000351struct nouveau_enum nvc0_fifo_fault_unit[] = {
352 { 0, "PGRAPH" },
353 { 3, "PEEPHOLE" },
354 { 4, "BAR1" },
355 { 5, "BAR3" },
356 { 7, "PFIFO" },
357 {}
358};
359
360struct nouveau_enum nvc0_fifo_fault_reason[] = {
361 { 0, "PT_NOT_PRESENT" },
362 { 1, "PT_TOO_SHORT" },
363 { 2, "PAGE_NOT_PRESENT" },
364 { 3, "VM_LIMIT_EXCEEDED" },
365 {}
366};
367
368struct nouveau_bitfield nvc0_fifo_subfifo_intr[] = {
369/* { 0x00008000, "" } seen with null ib push */
370 { 0x00200000, "ILLEGAL_MTHD" },
371 { 0x00800000, "EMPTY_SUBC" },
372 {}
373};
374
375static void
376nvc0_fifo_isr_vm_fault(struct drm_device *dev, int unit)
377{
378 u32 inst = nv_rd32(dev, 0x2800 + (unit * 0x10));
379 u32 valo = nv_rd32(dev, 0x2804 + (unit * 0x10));
380 u32 vahi = nv_rd32(dev, 0x2808 + (unit * 0x10));
381 u32 stat = nv_rd32(dev, 0x280c + (unit * 0x10));
382
383 NV_INFO(dev, "PFIFO: %s fault at 0x%010llx [",
384 (stat & 0x00000080) ? "write" : "read", (u64)vahi << 32 | valo);
385 nouveau_enum_print(nvc0_fifo_fault_reason, stat & 0x0000000f);
386 printk("] from ");
387 nouveau_enum_print(nvc0_fifo_fault_unit, unit);
388 printk(" on channel 0x%010llx\n", (u64)inst << 12);
389}
390
391static void
392nvc0_fifo_isr_subfifo_intr(struct drm_device *dev, int unit)
393{
394 u32 stat = nv_rd32(dev, 0x040108 + (unit * 0x2000));
395 u32 addr = nv_rd32(dev, 0x0400c0 + (unit * 0x2000));
396 u32 data = nv_rd32(dev, 0x0400c4 + (unit * 0x2000));
397 u32 chid = nv_rd32(dev, 0x040120 + (unit * 0x2000)) & 0x7f;
398 u32 subc = (addr & 0x00070000);
399 u32 mthd = (addr & 0x00003ffc);
400
401 NV_INFO(dev, "PSUBFIFO %d:", unit);
402 nouveau_bitfield_print(nvc0_fifo_subfifo_intr, stat);
403 NV_INFO(dev, "PSUBFIFO %d: ch %d subc %d mthd 0x%04x data 0x%08x\n",
404 unit, chid, subc, mthd, data);
405
406 nv_wr32(dev, 0x0400c0 + (unit * 0x2000), 0x80600008);
407 nv_wr32(dev, 0x040108 + (unit * 0x2000), stat);
408}
409
410static void
411nvc0_fifo_isr(struct drm_device *dev)
412{
413 u32 stat = nv_rd32(dev, 0x002100);
414
415 if (stat & 0x10000000) {
416 u32 units = nv_rd32(dev, 0x00259c);
417 u32 u = units;
418
419 while (u) {
420 int i = ffs(u) - 1;
421 nvc0_fifo_isr_vm_fault(dev, i);
422 u &= ~(1 << i);
423 }
424
425 nv_wr32(dev, 0x00259c, units);
426 stat &= ~0x10000000;
427 }
428
429 if (stat & 0x20000000) {
430 u32 units = nv_rd32(dev, 0x0025a0);
431 u32 u = units;
432
433 while (u) {
434 int i = ffs(u) - 1;
435 nvc0_fifo_isr_subfifo_intr(dev, i);
436 u &= ~(1 << i);
437 }
438
439 nv_wr32(dev, 0x0025a0, units);
440 stat &= ~0x20000000;
441 }
442
443 if (stat) {
444 NV_INFO(dev, "PFIFO: unhandled status 0x%08x\n", stat);
445 nv_wr32(dev, 0x002100, stat);
446 }
447
448 nv_wr32(dev, 0x2140, 0);
449}