blob: 24f036c85f30901ea72f2cb626ad7a8167313ca1 [file] [log] [blame]
Ben Skeggs4b223ee2010-08-03 10:00:56 +10001/*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
Ben Skeggs966a5b72010-11-24 10:49:02 +100025#include <linux/firmware.h>
26
Ben Skeggs4b223ee2010-08-03 10:00:56 +100027#include "drmP.h"
28
29#include "nouveau_drv.h"
Ben Skeggs966a5b72010-11-24 10:49:02 +100030#include "nouveau_mm.h"
31#include "nvc0_graph.h"
32
Ben Skeggs4b223ee2010-08-03 10:00:56 +100033void
34nvc0_graph_fifo_access(struct drm_device *dev, bool enabled)
35{
36}
37
38struct nouveau_channel *
39nvc0_graph_channel(struct drm_device *dev)
40{
41 return NULL;
42}
43
Ben Skeggs966a5b72010-11-24 10:49:02 +100044static int
Ben Skeggs7a45cd12011-04-01 10:59:53 +100045nvc0_graph_load_context(struct nouveau_channel *chan)
46{
47 struct drm_device *dev = chan->dev;
48
49 nv_wr32(dev, 0x409840, 0x00000030);
50 nv_wr32(dev, 0x409500, 0x80000000 | chan->ramin->vinst >> 12);
51 nv_wr32(dev, 0x409504, 0x00000003);
52 if (!nv_wait(dev, 0x409800, 0x00000010, 0x00000010))
53 NV_ERROR(dev, "PGRAPH: load_ctx timeout\n");
54
55 return 0;
56}
57
58static int
59nvc0_graph_unload_context_to(struct drm_device *dev, u64 chan)
60{
61 nv_wr32(dev, 0x409840, 0x00000003);
62 nv_wr32(dev, 0x409500, 0x80000000 | chan >> 12);
63 nv_wr32(dev, 0x409504, 0x00000009);
64 if (!nv_wait(dev, 0x409800, 0x00000001, 0x00000000)) {
65 NV_ERROR(dev, "PGRAPH: unload_ctx timeout\n");
66 return -EBUSY;
67 }
68
69 return 0;
70}
71
72static int
Ben Skeggs966a5b72010-11-24 10:49:02 +100073nvc0_graph_construct_context(struct nouveau_channel *chan)
74{
75 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
Ben Skeggs7a45cd12011-04-01 10:59:53 +100076 struct nvc0_graph_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_GR);
77 struct nvc0_graph_chan *grch = chan->engctx[NVOBJ_ENGINE_GR];
Ben Skeggs966a5b72010-11-24 10:49:02 +100078 struct drm_device *dev = chan->dev;
79 int ret, i;
80 u32 *ctx;
81
82 ctx = kmalloc(priv->grctx_size, GFP_KERNEL);
83 if (!ctx)
84 return -ENOMEM;
85
86 nvc0_graph_load_context(chan);
87
88 nv_wo32(grch->grctx, 0x1c, 1);
89 nv_wo32(grch->grctx, 0x20, 0);
90 nv_wo32(grch->grctx, 0x28, 0);
91 nv_wo32(grch->grctx, 0x2c, 0);
92 dev_priv->engine.instmem.flush(dev);
93
94 ret = nvc0_grctx_generate(chan);
95 if (ret) {
96 kfree(ctx);
97 return ret;
98 }
99
100 ret = nvc0_graph_unload_context_to(dev, chan->ramin->vinst);
101 if (ret) {
102 kfree(ctx);
103 return ret;
104 }
105
106 for (i = 0; i < priv->grctx_size; i += 4)
107 ctx[i / 4] = nv_ro32(grch->grctx, i);
108
109 priv->grctx_vals = ctx;
110 return 0;
111}
112
113static int
114nvc0_graph_create_context_mmio_list(struct nouveau_channel *chan)
115{
Ben Skeggs7a45cd12011-04-01 10:59:53 +1000116 struct nvc0_graph_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_GR);
117 struct nvc0_graph_chan *grch = chan->engctx[NVOBJ_ENGINE_GR];
Ben Skeggs966a5b72010-11-24 10:49:02 +1000118 struct drm_device *dev = chan->dev;
119 int i = 0, gpc, tp, ret;
120 u32 magic;
121
122 ret = nouveau_gpuobj_new(dev, NULL, 0x2000, 256, NVOBJ_FLAG_VM,
123 &grch->unk408004);
124 if (ret)
125 return ret;
126
127 ret = nouveau_gpuobj_new(dev, NULL, 0x8000, 256, NVOBJ_FLAG_VM,
128 &grch->unk40800c);
129 if (ret)
130 return ret;
131
Ben Skeggsc906ca02011-01-14 10:27:02 +1000132 ret = nouveau_gpuobj_new(dev, NULL, 384 * 1024, 4096,
133 NVOBJ_FLAG_VM | NVOBJ_FLAG_VM_USER,
Ben Skeggs966a5b72010-11-24 10:49:02 +1000134 &grch->unk418810);
135 if (ret)
136 return ret;
137
138 ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 0, NVOBJ_FLAG_VM,
139 &grch->mmio);
140 if (ret)
141 return ret;
142
143
144 nv_wo32(grch->mmio, i++ * 4, 0x00408004);
145 nv_wo32(grch->mmio, i++ * 4, grch->unk408004->vinst >> 8);
146 nv_wo32(grch->mmio, i++ * 4, 0x00408008);
147 nv_wo32(grch->mmio, i++ * 4, 0x80000018);
148
149 nv_wo32(grch->mmio, i++ * 4, 0x0040800c);
150 nv_wo32(grch->mmio, i++ * 4, grch->unk40800c->vinst >> 8);
151 nv_wo32(grch->mmio, i++ * 4, 0x00408010);
152 nv_wo32(grch->mmio, i++ * 4, 0x80000000);
153
154 nv_wo32(grch->mmio, i++ * 4, 0x00418810);
155 nv_wo32(grch->mmio, i++ * 4, 0x80000000 | grch->unk418810->vinst >> 12);
156 nv_wo32(grch->mmio, i++ * 4, 0x00419848);
157 nv_wo32(grch->mmio, i++ * 4, 0x10000000 | grch->unk418810->vinst >> 12);
158
159 nv_wo32(grch->mmio, i++ * 4, 0x00419004);
160 nv_wo32(grch->mmio, i++ * 4, grch->unk40800c->vinst >> 8);
161 nv_wo32(grch->mmio, i++ * 4, 0x00419008);
162 nv_wo32(grch->mmio, i++ * 4, 0x00000000);
163
164 nv_wo32(grch->mmio, i++ * 4, 0x00418808);
165 nv_wo32(grch->mmio, i++ * 4, grch->unk408004->vinst >> 8);
166 nv_wo32(grch->mmio, i++ * 4, 0x0041880c);
167 nv_wo32(grch->mmio, i++ * 4, 0x80000018);
168
169 magic = 0x02180000;
170 nv_wo32(grch->mmio, i++ * 4, 0x00405830);
171 nv_wo32(grch->mmio, i++ * 4, magic);
172 for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
173 for (tp = 0; tp < priv->tp_nr[gpc]; tp++, magic += 0x02fc) {
174 u32 reg = 0x504520 + (gpc * 0x8000) + (tp * 0x0800);
175 nv_wo32(grch->mmio, i++ * 4, reg);
176 nv_wo32(grch->mmio, i++ * 4, magic);
177 }
178 }
179
180 grch->mmio_nr = i / 2;
181 return 0;
182}
183
Ben Skeggs7a45cd12011-04-01 10:59:53 +1000184static int
185nvc0_graph_context_new(struct nouveau_channel *chan, int engine)
Ben Skeggs4b223ee2010-08-03 10:00:56 +1000186{
Ben Skeggs966a5b72010-11-24 10:49:02 +1000187 struct drm_device *dev = chan->dev;
Ben Skeggs7a45cd12011-04-01 10:59:53 +1000188 struct drm_nouveau_private *dev_priv = dev->dev_private;
189 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
190 struct nvc0_graph_priv *priv = nv_engine(dev, engine);
191 struct nvc0_graph_chan *grch;
Ben Skeggs966a5b72010-11-24 10:49:02 +1000192 struct nouveau_gpuobj *grctx;
193 int ret, i;
194
Ben Skeggs7a45cd12011-04-01 10:59:53 +1000195 grch = kzalloc(sizeof(*grch), GFP_KERNEL);
196 if (!grch)
Ben Skeggs966a5b72010-11-24 10:49:02 +1000197 return -ENOMEM;
Ben Skeggs7a45cd12011-04-01 10:59:53 +1000198 chan->engctx[NVOBJ_ENGINE_GR] = grch;
Ben Skeggs966a5b72010-11-24 10:49:02 +1000199
200 ret = nouveau_gpuobj_new(dev, NULL, priv->grctx_size, 256,
201 NVOBJ_FLAG_VM | NVOBJ_FLAG_ZERO_ALLOC,
202 &grch->grctx);
203 if (ret)
204 goto error;
Ben Skeggs966a5b72010-11-24 10:49:02 +1000205 grctx = grch->grctx;
206
207 ret = nvc0_graph_create_context_mmio_list(chan);
208 if (ret)
209 goto error;
210
211 nv_wo32(chan->ramin, 0x0210, lower_32_bits(grctx->vinst) | 4);
212 nv_wo32(chan->ramin, 0x0214, upper_32_bits(grctx->vinst));
213 pinstmem->flush(dev);
214
215 if (!priv->grctx_vals) {
216 ret = nvc0_graph_construct_context(chan);
217 if (ret)
218 goto error;
219 }
220
221 for (i = 0; i < priv->grctx_size; i += 4)
222 nv_wo32(grctx, i, priv->grctx_vals[i / 4]);
223
Emil Velikov71298e22011-03-19 23:31:51 +0000224 nv_wo32(grctx, 0xf4, 0);
225 nv_wo32(grctx, 0xf8, 0);
226 nv_wo32(grctx, 0x10, grch->mmio_nr);
227 nv_wo32(grctx, 0x14, lower_32_bits(grch->mmio->vinst));
228 nv_wo32(grctx, 0x18, upper_32_bits(grch->mmio->vinst));
229 nv_wo32(grctx, 0x1c, 1);
230 nv_wo32(grctx, 0x20, 0);
231 nv_wo32(grctx, 0x28, 0);
232 nv_wo32(grctx, 0x2c, 0);
Ben Skeggs966a5b72010-11-24 10:49:02 +1000233 pinstmem->flush(dev);
Ben Skeggs4b223ee2010-08-03 10:00:56 +1000234 return 0;
Ben Skeggs966a5b72010-11-24 10:49:02 +1000235
236error:
Ben Skeggs7a45cd12011-04-01 10:59:53 +1000237 priv->base.context_del(chan, engine);
Ben Skeggs966a5b72010-11-24 10:49:02 +1000238 return ret;
Ben Skeggs4b223ee2010-08-03 10:00:56 +1000239}
240
Ben Skeggs7a45cd12011-04-01 10:59:53 +1000241static void
242nvc0_graph_context_del(struct nouveau_channel *chan, int engine)
Ben Skeggs4b223ee2010-08-03 10:00:56 +1000243{
Ben Skeggs7a45cd12011-04-01 10:59:53 +1000244 struct nvc0_graph_chan *grch = chan->engctx[engine];
Ben Skeggs966a5b72010-11-24 10:49:02 +1000245
246 nouveau_gpuobj_ref(NULL, &grch->mmio);
247 nouveau_gpuobj_ref(NULL, &grch->unk418810);
248 nouveau_gpuobj_ref(NULL, &grch->unk40800c);
249 nouveau_gpuobj_ref(NULL, &grch->unk408004);
250 nouveau_gpuobj_ref(NULL, &grch->grctx);
Ben Skeggs7a45cd12011-04-01 10:59:53 +1000251 chan->engctx[engine] = NULL;
Ben Skeggs4b223ee2010-08-03 10:00:56 +1000252}
253
Ben Skeggs7a45cd12011-04-01 10:59:53 +1000254static int
255nvc0_graph_object_new(struct nouveau_channel *chan, int engine,
256 u32 handle, u16 class)
Ben Skeggs4b223ee2010-08-03 10:00:56 +1000257{
Ben Skeggs966a5b72010-11-24 10:49:02 +1000258 return 0;
259}
260
261static int
Ben Skeggs7a45cd12011-04-01 10:59:53 +1000262nvc0_graph_fini(struct drm_device *dev, int engine)
Ben Skeggs966a5b72010-11-24 10:49:02 +1000263{
Ben Skeggs7a45cd12011-04-01 10:59:53 +1000264 u32 inst = nv_rd32(dev, 0x409b00);
265 if (inst & 0x80000000) {
266 inst &= 0x0fffffff;
267 nvc0_graph_unload_context_to(dev, (u64)inst << 12);
Ben Skeggs966a5b72010-11-24 10:49:02 +1000268 }
269
Ben Skeggs4b223ee2010-08-03 10:00:56 +1000270 return 0;
271}
272
Ben Skeggs966a5b72010-11-24 10:49:02 +1000273static int
Ben Skeggsbd2f2032011-02-08 15:16:23 +1000274nvc0_graph_mthd_page_flip(struct nouveau_channel *chan,
275 u32 class, u32 mthd, u32 data)
276{
277 nouveau_finish_page_flip(chan, NULL);
278 return 0;
279}
280
Ben Skeggs966a5b72010-11-24 10:49:02 +1000281static void
282nvc0_graph_init_obj418880(struct drm_device *dev)
283{
Ben Skeggs7a45cd12011-04-01 10:59:53 +1000284 struct nvc0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
Ben Skeggs966a5b72010-11-24 10:49:02 +1000285 int i;
286
287 nv_wr32(dev, GPC_BCAST(0x0880), 0x00000000);
288 nv_wr32(dev, GPC_BCAST(0x08a4), 0x00000000);
289 for (i = 0; i < 4; i++)
290 nv_wr32(dev, GPC_BCAST(0x0888) + (i * 4), 0x00000000);
291 nv_wr32(dev, GPC_BCAST(0x08b4), priv->unk4188b4->vinst >> 8);
292 nv_wr32(dev, GPC_BCAST(0x08b8), priv->unk4188b8->vinst >> 8);
293}
294
295static void
296nvc0_graph_init_regs(struct drm_device *dev)
297{
298 nv_wr32(dev, 0x400080, 0x003083c2);
299 nv_wr32(dev, 0x400088, 0x00006fe7);
300 nv_wr32(dev, 0x40008c, 0x00000000);
301 nv_wr32(dev, 0x400090, 0x00000030);
302 nv_wr32(dev, 0x40013c, 0x013901f7);
303 nv_wr32(dev, 0x400140, 0x00000100);
304 nv_wr32(dev, 0x400144, 0x00000000);
305 nv_wr32(dev, 0x400148, 0x00000110);
306 nv_wr32(dev, 0x400138, 0x00000000);
307 nv_wr32(dev, 0x400130, 0x00000000);
308 nv_wr32(dev, 0x400134, 0x00000000);
309 nv_wr32(dev, 0x400124, 0x00000002);
310}
311
312static void
313nvc0_graph_init_gpc_0(struct drm_device *dev)
314{
Ben Skeggs7a45cd12011-04-01 10:59:53 +1000315 struct nvc0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
Ben Skeggs966a5b72010-11-24 10:49:02 +1000316 int gpc;
Ben Skeggs966a5b72010-11-24 10:49:02 +1000317
Emil Velikovf2129492011-03-19 23:31:52 +0000318 /*
319 * TP ROP UNKVAL(magic_not_rop_nr)
320 * 450: 4/0/0/0 2 3
321 * 460: 3/4/0/0 4 1
322 * 465: 3/4/4/0 4 7
323 * 470: 3/3/4/4 5 5
324 * 480: 3/4/4/4 6 6
Ben Skeggs966a5b72010-11-24 10:49:02 +1000325
Emil Velikovf2129492011-03-19 23:31:52 +0000326 * magicgpc918
327 * 450: 00200000 00000000001000000000000000000000
328 * 460: 00124925 00000000000100100100100100100101
329 * 465: 000ba2e9 00000000000010111010001011101001
330 * 470: 00092493 00000000000010010010010010010011
331 * 480: 00088889 00000000000010001000100010001001
332
333 * filled values up to tp_total, remainder 0
334 * 450: 00003210 00000000 00000000 00000000
335 * 460: 02321100 00000000 00000000 00000000
336 * 465: 22111000 00000233 00000000 00000000
337 * 470: 11110000 00233222 00000000 00000000
338 * 480: 11110000 03332222 00000000 00000000
339 */
340
Ben Skeggs966a5b72010-11-24 10:49:02 +1000341 nv_wr32(dev, GPC_BCAST(0x0980), priv->magicgpc980[0]);
342 nv_wr32(dev, GPC_BCAST(0x0984), priv->magicgpc980[1]);
343 nv_wr32(dev, GPC_BCAST(0x0988), priv->magicgpc980[2]);
344 nv_wr32(dev, GPC_BCAST(0x098c), priv->magicgpc980[3]);
345
346 for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
347 nv_wr32(dev, GPC_UNIT(gpc, 0x0914), priv->magic_not_rop_nr << 8 |
348 priv->tp_nr[gpc]);
349 nv_wr32(dev, GPC_UNIT(gpc, 0x0910), 0x00040000 | priv->tp_total);
350 nv_wr32(dev, GPC_UNIT(gpc, 0x0918), priv->magicgpc918);
351 }
352
353 nv_wr32(dev, GPC_BCAST(0x1bd4), priv->magicgpc918);
354 nv_wr32(dev, GPC_BCAST(0x08ac), priv->rop_nr);
355}
356
357static void
358nvc0_graph_init_units(struct drm_device *dev)
359{
360 nv_wr32(dev, 0x409c24, 0x000f0000);
361 nv_wr32(dev, 0x404000, 0xc0000000); /* DISPATCH */
362 nv_wr32(dev, 0x404600, 0xc0000000); /* M2MF */
363 nv_wr32(dev, 0x408030, 0xc0000000);
364 nv_wr32(dev, 0x40601c, 0xc0000000);
365 nv_wr32(dev, 0x404490, 0xc0000000); /* MACRO */
366 nv_wr32(dev, 0x406018, 0xc0000000);
367 nv_wr32(dev, 0x405840, 0xc0000000);
368 nv_wr32(dev, 0x405844, 0x00ffffff);
369 nv_mask(dev, 0x419cc0, 0x00000008, 0x00000008);
370 nv_mask(dev, 0x419eb4, 0x00001000, 0x00001000);
371}
372
373static void
374nvc0_graph_init_gpc_1(struct drm_device *dev)
375{
Ben Skeggs7a45cd12011-04-01 10:59:53 +1000376 struct nvc0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
Ben Skeggs966a5b72010-11-24 10:49:02 +1000377 int gpc, tp;
378
379 for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
380 nv_wr32(dev, GPC_UNIT(gpc, 0x0420), 0xc0000000);
381 nv_wr32(dev, GPC_UNIT(gpc, 0x0900), 0xc0000000);
382 nv_wr32(dev, GPC_UNIT(gpc, 0x1028), 0xc0000000);
383 nv_wr32(dev, GPC_UNIT(gpc, 0x0824), 0xc0000000);
384 for (tp = 0; tp < priv->tp_nr[gpc]; tp++) {
385 nv_wr32(dev, TP_UNIT(gpc, tp, 0x508), 0xffffffff);
386 nv_wr32(dev, TP_UNIT(gpc, tp, 0x50c), 0xffffffff);
387 nv_wr32(dev, TP_UNIT(gpc, tp, 0x224), 0xc0000000);
388 nv_wr32(dev, TP_UNIT(gpc, tp, 0x48c), 0xc0000000);
389 nv_wr32(dev, TP_UNIT(gpc, tp, 0x084), 0xc0000000);
Ben Skeggs0f1cb202011-01-21 11:15:16 +1000390 nv_wr32(dev, TP_UNIT(gpc, tp, 0x644), 0x001ffffe);
391 nv_wr32(dev, TP_UNIT(gpc, tp, 0x64c), 0x0000000f);
Ben Skeggs966a5b72010-11-24 10:49:02 +1000392 }
393 nv_wr32(dev, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
394 nv_wr32(dev, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
395 }
396}
397
398static void
399nvc0_graph_init_rop(struct drm_device *dev)
400{
Ben Skeggs7a45cd12011-04-01 10:59:53 +1000401 struct nvc0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
Ben Skeggs966a5b72010-11-24 10:49:02 +1000402 int rop;
403
404 for (rop = 0; rop < priv->rop_nr; rop++) {
405 nv_wr32(dev, ROP_UNIT(rop, 0x144), 0xc0000000);
406 nv_wr32(dev, ROP_UNIT(rop, 0x070), 0xc0000000);
407 nv_wr32(dev, ROP_UNIT(rop, 0x204), 0xffffffff);
408 nv_wr32(dev, ROP_UNIT(rop, 0x208), 0xffffffff);
409 }
410}
411
412static int
413nvc0_fuc_load_fw(struct drm_device *dev, u32 fuc_base,
414 const char *code_fw, const char *data_fw)
415{
416 const struct firmware *fw;
417 char name[32];
418 int ret, i;
419
420 snprintf(name, sizeof(name), "nouveau/%s", data_fw);
421 ret = request_firmware(&fw, name, &dev->pdev->dev);
422 if (ret) {
423 NV_ERROR(dev, "failed to load %s\n", data_fw);
424 return ret;
425 }
426
427 nv_wr32(dev, fuc_base + 0x01c0, 0x01000000);
428 for (i = 0; i < fw->size / 4; i++)
429 nv_wr32(dev, fuc_base + 0x01c4, ((u32 *)fw->data)[i]);
430 release_firmware(fw);
431
432 snprintf(name, sizeof(name), "nouveau/%s", code_fw);
433 ret = request_firmware(&fw, name, &dev->pdev->dev);
434 if (ret) {
435 NV_ERROR(dev, "failed to load %s\n", code_fw);
436 return ret;
437 }
438
439 nv_wr32(dev, fuc_base + 0x0180, 0x01000000);
440 for (i = 0; i < fw->size / 4; i++) {
441 if ((i & 0x3f) == 0)
442 nv_wr32(dev, fuc_base + 0x0188, i >> 6);
443 nv_wr32(dev, fuc_base + 0x0184, ((u32 *)fw->data)[i]);
444 }
445 release_firmware(fw);
446
447 return 0;
448}
449
450static int
451nvc0_graph_init_ctxctl(struct drm_device *dev)
452{
Ben Skeggs7a45cd12011-04-01 10:59:53 +1000453 struct nvc0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
Ben Skeggs966a5b72010-11-24 10:49:02 +1000454 u32 r000260;
455 int ret;
456
457 /* load fuc microcode */
458 r000260 = nv_mask(dev, 0x000260, 0x00000001, 0x00000000);
459 ret = nvc0_fuc_load_fw(dev, 0x409000, "fuc409c", "fuc409d");
460 if (ret == 0)
Ben Skeggseeb9cc02011-01-06 22:10:15 +1000461 ret = nvc0_fuc_load_fw(dev, 0x41a000, "fuc41ac", "fuc41ad");
Ben Skeggs966a5b72010-11-24 10:49:02 +1000462 nv_wr32(dev, 0x000260, r000260);
463
464 if (ret)
465 return ret;
466
467 /* start both of them running */
468 nv_wr32(dev, 0x409840, 0xffffffff);
469 nv_wr32(dev, 0x41a10c, 0x00000000);
470 nv_wr32(dev, 0x40910c, 0x00000000);
471 nv_wr32(dev, 0x41a100, 0x00000002);
472 nv_wr32(dev, 0x409100, 0x00000002);
473 if (!nv_wait(dev, 0x409800, 0x00000001, 0x00000001))
474 NV_INFO(dev, "0x409800 wait failed\n");
475
476 nv_wr32(dev, 0x409840, 0xffffffff);
477 nv_wr32(dev, 0x409500, 0x7fffffff);
478 nv_wr32(dev, 0x409504, 0x00000021);
479
480 nv_wr32(dev, 0x409840, 0xffffffff);
481 nv_wr32(dev, 0x409500, 0x00000000);
482 nv_wr32(dev, 0x409504, 0x00000010);
483 if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
484 NV_ERROR(dev, "fuc09 req 0x10 timeout\n");
485 return -EBUSY;
486 }
487 priv->grctx_size = nv_rd32(dev, 0x409800);
488
489 nv_wr32(dev, 0x409840, 0xffffffff);
490 nv_wr32(dev, 0x409500, 0x00000000);
491 nv_wr32(dev, 0x409504, 0x00000016);
492 if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
493 NV_ERROR(dev, "fuc09 req 0x16 timeout\n");
494 return -EBUSY;
495 }
496
497 nv_wr32(dev, 0x409840, 0xffffffff);
498 nv_wr32(dev, 0x409500, 0x00000000);
499 nv_wr32(dev, 0x409504, 0x00000025);
500 if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
501 NV_ERROR(dev, "fuc09 req 0x25 timeout\n");
502 return -EBUSY;
503 }
504
505 return 0;
Ben Skeggs4b223ee2010-08-03 10:00:56 +1000506}
507
Ben Skeggs7a45cd12011-04-01 10:59:53 +1000508static int
509nvc0_graph_init(struct drm_device *dev, int engine)
Ben Skeggs4b223ee2010-08-03 10:00:56 +1000510{
511 struct drm_nouveau_private *dev_priv = dev->dev_private;
Ben Skeggs966a5b72010-11-24 10:49:02 +1000512 int ret;
513
Ben Skeggs4b223ee2010-08-03 10:00:56 +1000514 dev_priv->engine.graph.accel_blocked = true;
Ben Skeggs966a5b72010-11-24 10:49:02 +1000515
516 switch (dev_priv->chipset) {
517 case 0xc0:
518 case 0xc3:
519 case 0xc4:
520 break;
521 default:
522 NV_ERROR(dev, "PGRAPH: unsupported chipset, please report!\n");
523 if (nouveau_noaccel != 0)
524 return 0;
525 break;
526 }
527
528 nv_mask(dev, 0x000200, 0x18001000, 0x00000000);
529 nv_mask(dev, 0x000200, 0x18001000, 0x18001000);
530
Ben Skeggs966a5b72010-11-24 10:49:02 +1000531 nvc0_graph_init_obj418880(dev);
532 nvc0_graph_init_regs(dev);
Emil Velikovf2129492011-03-19 23:31:52 +0000533 /*nvc0_graph_init_unitplemented_magics(dev);*/
Ben Skeggs966a5b72010-11-24 10:49:02 +1000534 nvc0_graph_init_gpc_0(dev);
Emil Velikovf2129492011-03-19 23:31:52 +0000535 /*nvc0_graph_init_unitplemented_c242(dev);*/
Ben Skeggs966a5b72010-11-24 10:49:02 +1000536
537 nv_wr32(dev, 0x400500, 0x00010001);
538 nv_wr32(dev, 0x400100, 0xffffffff);
539 nv_wr32(dev, 0x40013c, 0xffffffff);
540
541 nvc0_graph_init_units(dev);
542 nvc0_graph_init_gpc_1(dev);
543 nvc0_graph_init_rop(dev);
544
545 nv_wr32(dev, 0x400108, 0xffffffff);
546 nv_wr32(dev, 0x400138, 0xffffffff);
547 nv_wr32(dev, 0x400118, 0xffffffff);
548 nv_wr32(dev, 0x400130, 0xffffffff);
549 nv_wr32(dev, 0x40011c, 0xffffffff);
550 nv_wr32(dev, 0x400134, 0xffffffff);
551 nv_wr32(dev, 0x400054, 0x34ce3464);
552
553 ret = nvc0_graph_init_ctxctl(dev);
Ben Skeggseeb9cc02011-01-06 22:10:15 +1000554 if (ret == 0)
555 dev_priv->engine.graph.accel_blocked = false;
Ben Skeggs4b223ee2010-08-03 10:00:56 +1000556 return 0;
557}
558
Ben Skeggs966a5b72010-11-24 10:49:02 +1000559static int
560nvc0_graph_isr_chid(struct drm_device *dev, u64 inst)
561{
562 struct drm_nouveau_private *dev_priv = dev->dev_private;
563 struct nouveau_channel *chan;
564 unsigned long flags;
565 int i;
566
567 spin_lock_irqsave(&dev_priv->channels.lock, flags);
568 for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
569 chan = dev_priv->channels.ptr[i];
570 if (!chan || !chan->ramin)
571 continue;
572
573 if (inst == chan->ramin->vinst)
574 break;
575 }
576 spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
577 return i;
578}
579
580static void
581nvc0_graph_isr(struct drm_device *dev)
582{
583 u64 inst = (u64)(nv_rd32(dev, 0x409b00) & 0x0fffffff) << 12;
584 u32 chid = nvc0_graph_isr_chid(dev, inst);
585 u32 stat = nv_rd32(dev, 0x400100);
586 u32 addr = nv_rd32(dev, 0x400704);
587 u32 mthd = (addr & 0x00003ffc);
588 u32 subc = (addr & 0x00070000) >> 16;
589 u32 data = nv_rd32(dev, 0x400708);
590 u32 code = nv_rd32(dev, 0x400110);
591 u32 class = nv_rd32(dev, 0x404200 + (subc * 4));
592
593 if (stat & 0x00000010) {
Ben Skeggsbd2f2032011-02-08 15:16:23 +1000594 if (nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data)) {
595 NV_INFO(dev, "PGRAPH: ILLEGAL_MTHD ch %d [0x%010llx] "
596 "subc %d class 0x%04x mthd 0x%04x "
597 "data 0x%08x\n",
598 chid, inst, subc, class, mthd, data);
599 }
Ben Skeggs966a5b72010-11-24 10:49:02 +1000600 nv_wr32(dev, 0x400100, 0x00000010);
601 stat &= ~0x00000010;
602 }
603
Ben Skeggseae5e7f2010-12-30 11:40:07 +1000604 if (stat & 0x00000020) {
605 NV_INFO(dev, "PGRAPH: ILLEGAL_CLASS ch %d [0x%010llx] subc %d "
606 "class 0x%04x mthd 0x%04x data 0x%08x\n",
607 chid, inst, subc, class, mthd, data);
608 nv_wr32(dev, 0x400100, 0x00000020);
609 stat &= ~0x00000020;
610 }
611
Ben Skeggs966a5b72010-11-24 10:49:02 +1000612 if (stat & 0x00100000) {
613 NV_INFO(dev, "PGRAPH: DATA_ERROR [");
Ben Skeggs6effe392010-12-30 11:48:03 +1000614 nouveau_enum_print(nv50_data_error_names, code);
Ben Skeggs966a5b72010-11-24 10:49:02 +1000615 printk("] ch %d [0x%010llx] subc %d class 0x%04x "
616 "mthd 0x%04x data 0x%08x\n",
617 chid, inst, subc, class, mthd, data);
618 nv_wr32(dev, 0x400100, 0x00100000);
619 stat &= ~0x00100000;
620 }
621
Ben Skeggseae5e7f2010-12-30 11:40:07 +1000622 if (stat & 0x00200000) {
623 u32 trap = nv_rd32(dev, 0x400108);
624 NV_INFO(dev, "PGRAPH: TRAP ch %d status 0x%08x\n", chid, trap);
625 nv_wr32(dev, 0x400108, trap);
626 nv_wr32(dev, 0x400100, 0x00200000);
627 stat &= ~0x00200000;
628 }
629
Ben Skeggs966a5b72010-11-24 10:49:02 +1000630 if (stat & 0x00080000) {
631 u32 ustat = nv_rd32(dev, 0x409c18);
632
633 NV_INFO(dev, "PGRAPH: CTXCTRL ustat 0x%08x\n", ustat);
634
635 nv_wr32(dev, 0x409c20, ustat);
636 nv_wr32(dev, 0x400100, 0x00080000);
637 stat &= ~0x00080000;
638 }
639
640 if (stat) {
641 NV_INFO(dev, "PGRAPH: unknown stat 0x%08x\n", stat);
642 nv_wr32(dev, 0x400100, stat);
643 }
644
645 nv_wr32(dev, 0x400500, 0x00010001);
646}
Ben Skeggs51f73d62011-01-21 13:53:21 +1000647
648static void
649nvc0_runk140_isr(struct drm_device *dev)
650{
651 u32 units = nv_rd32(dev, 0x00017c) & 0x1f;
652
653 while (units) {
654 u32 unit = ffs(units) - 1;
655 u32 reg = 0x140000 + unit * 0x2000;
656 u32 st0 = nv_mask(dev, reg + 0x1020, 0, 0);
657 u32 st1 = nv_mask(dev, reg + 0x1420, 0, 0);
658
659 NV_INFO(dev, "PRUNK140: %d 0x%08x 0x%08x\n", unit, st0, st1);
660 units &= ~(1 << unit);
661 }
662}
Ben Skeggs7a45cd12011-04-01 10:59:53 +1000663
664static void
665nvc0_graph_destroy(struct drm_device *dev, int engine)
666{
667 struct nvc0_graph_priv *priv = nv_engine(dev, engine);
668
669 nouveau_irq_unregister(dev, 12);
670 nouveau_irq_unregister(dev, 25);
671
672 nouveau_gpuobj_ref(NULL, &priv->unk4188b8);
673 nouveau_gpuobj_ref(NULL, &priv->unk4188b4);
674
675 if (priv->grctx_vals)
676 kfree(priv->grctx_vals);
677
678 NVOBJ_ENGINE_DEL(dev, GR);
679 kfree(priv);
680}
681
682int
683nvc0_graph_create(struct drm_device *dev)
684{
685 struct drm_nouveau_private *dev_priv = dev->dev_private;
686 struct nvc0_graph_priv *priv;
687 int ret, gpc, i;
688
689 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
690 if (!priv)
691 return -ENOMEM;
692
693 priv->base.destroy = nvc0_graph_destroy;
694 priv->base.init = nvc0_graph_init;
695 priv->base.fini = nvc0_graph_fini;
696 priv->base.context_new = nvc0_graph_context_new;
697 priv->base.context_del = nvc0_graph_context_del;
698 priv->base.object_new = nvc0_graph_object_new;
699
700 NVOBJ_ENGINE_ADD(dev, GR, &priv->base);
701 nouveau_irq_register(dev, 12, nvc0_graph_isr);
702 nouveau_irq_register(dev, 25, nvc0_runk140_isr);
703
704 ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 256, 0, &priv->unk4188b4);
705 if (ret)
706 goto error;
707
708 ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 256, 0, &priv->unk4188b8);
709 if (ret)
710 goto error;
711
712 for (i = 0; i < 0x1000; i += 4) {
713 nv_wo32(priv->unk4188b4, i, 0x00000010);
714 nv_wo32(priv->unk4188b8, i, 0x00000010);
715 }
716
717 priv->gpc_nr = nv_rd32(dev, 0x409604) & 0x0000001f;
718 priv->rop_nr = (nv_rd32(dev, 0x409604) & 0x001f0000) >> 16;
719 for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
720 priv->tp_nr[gpc] = nv_rd32(dev, GPC_UNIT(gpc, 0x2608));
721 priv->tp_total += priv->tp_nr[gpc];
722 }
723
724 /*XXX: these need figuring out... */
725 switch (dev_priv->chipset) {
726 case 0xc0:
727 if (priv->tp_total == 11) { /* 465, 3/4/4/0, 4 */
728 priv->magic_not_rop_nr = 0x07;
729 /* filled values up to tp_total, the rest 0 */
730 priv->magicgpc980[0] = 0x22111000;
731 priv->magicgpc980[1] = 0x00000233;
732 priv->magicgpc980[2] = 0x00000000;
733 priv->magicgpc980[3] = 0x00000000;
734 priv->magicgpc918 = 0x000ba2e9;
735 } else
736 if (priv->tp_total == 14) { /* 470, 3/3/4/4, 5 */
737 priv->magic_not_rop_nr = 0x05;
738 priv->magicgpc980[0] = 0x11110000;
739 priv->magicgpc980[1] = 0x00233222;
740 priv->magicgpc980[2] = 0x00000000;
741 priv->magicgpc980[3] = 0x00000000;
742 priv->magicgpc918 = 0x00092493;
743 } else
744 if (priv->tp_total == 15) { /* 480, 3/4/4/4, 6 */
745 priv->magic_not_rop_nr = 0x06;
746 priv->magicgpc980[0] = 0x11110000;
747 priv->magicgpc980[1] = 0x03332222;
748 priv->magicgpc980[2] = 0x00000000;
749 priv->magicgpc980[3] = 0x00000000;
750 priv->magicgpc918 = 0x00088889;
751 }
752 break;
753 case 0xc3: /* 450, 4/0/0/0, 2 */
754 priv->magic_not_rop_nr = 0x03;
755 priv->magicgpc980[0] = 0x00003210;
756 priv->magicgpc980[1] = 0x00000000;
757 priv->magicgpc980[2] = 0x00000000;
758 priv->magicgpc980[3] = 0x00000000;
759 priv->magicgpc918 = 0x00200000;
760 break;
761 case 0xc4: /* 460, 3/4/0/0, 4 */
762 priv->magic_not_rop_nr = 0x01;
763 priv->magicgpc980[0] = 0x02321100;
764 priv->magicgpc980[1] = 0x00000000;
765 priv->magicgpc980[2] = 0x00000000;
766 priv->magicgpc980[3] = 0x00000000;
767 priv->magicgpc918 = 0x00124925;
768 break;
769 }
770
771 if (!priv->magic_not_rop_nr) {
772 NV_ERROR(dev, "PGRAPH: unknown config: %d/%d/%d/%d, %d\n",
773 priv->tp_nr[0], priv->tp_nr[1], priv->tp_nr[2],
774 priv->tp_nr[3], priv->rop_nr);
775 /* use 0xc3's values... */
776 priv->magic_not_rop_nr = 0x03;
777 priv->magicgpc980[0] = 0x00003210;
778 priv->magicgpc980[1] = 0x00000000;
779 priv->magicgpc980[2] = 0x00000000;
780 priv->magicgpc980[3] = 0x00000000;
781 priv->magicgpc918 = 0x00200000;
782 }
783
784 NVOBJ_CLASS(dev, 0x902d, GR); /* 2D */
785 NVOBJ_CLASS(dev, 0x9039, GR); /* M2MF */
786 NVOBJ_MTHD (dev, 0x9039, 0x0500, nvc0_graph_mthd_page_flip);
787 NVOBJ_CLASS(dev, 0x9097, GR); /* 3D */
788 NVOBJ_CLASS(dev, 0x90c0, GR); /* COMPUTE */
789 return 0;
790
791error:
792 nvc0_graph_destroy(dev, NVOBJ_ENGINE_GR);
793 return ret;
794}