Ben Skeggs | 6ee7386 | 2009-12-11 19:24:15 +1000 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2007 Ben Skeggs. |
| 3 | * All Rights Reserved. |
| 4 | * |
| 5 | * Permission is hereby granted, free of charge, to any person obtaining |
| 6 | * a copy of this software and associated documentation files (the |
| 7 | * "Software"), to deal in the Software without restriction, including |
| 8 | * without limitation the rights to use, copy, modify, merge, publish, |
| 9 | * distribute, sublicense, and/or sell copies of the Software, and to |
| 10 | * permit persons to whom the Software is furnished to do so, subject to |
| 11 | * the following conditions: |
| 12 | * |
| 13 | * The above copyright notice and this permission notice (including the |
| 14 | * next paragraph) shall be included in all copies or substantial |
| 15 | * portions of the Software. |
| 16 | * |
| 17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
| 18 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
| 19 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. |
| 20 | * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE |
| 21 | * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION |
| 22 | * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION |
| 23 | * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. |
| 24 | * |
| 25 | */ |
| 26 | |
| 27 | #include "drmP.h" |
| 28 | #include "drm.h" |
| 29 | #include "nouveau_drv.h" |
| 30 | |
Marcin Kościelnicki | d5f3c90 | 2010-02-25 00:54:02 +0000 | [diff] [blame] | 31 | #include "nouveau_grctx.h" |
Ben Skeggs | 6ee7386 | 2009-12-11 19:24:15 +1000 | [diff] [blame] | 32 | |
| 33 | #define IS_G80 ((dev_priv->chipset & 0xf0) == 0x50) |
| 34 | |
| 35 | static void |
| 36 | nv50_graph_init_reset(struct drm_device *dev) |
| 37 | { |
| 38 | uint32_t pmc_e = NV_PMC_ENABLE_PGRAPH | (1 << 21); |
| 39 | |
| 40 | NV_DEBUG(dev, "\n"); |
| 41 | |
| 42 | nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & ~pmc_e); |
| 43 | nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) | pmc_e); |
| 44 | } |
| 45 | |
| 46 | static void |
| 47 | nv50_graph_init_intr(struct drm_device *dev) |
| 48 | { |
| 49 | NV_DEBUG(dev, "\n"); |
| 50 | |
| 51 | nv_wr32(dev, NV03_PGRAPH_INTR, 0xffffffff); |
| 52 | nv_wr32(dev, 0x400138, 0xffffffff); |
| 53 | nv_wr32(dev, NV40_PGRAPH_INTR_EN, 0xffffffff); |
| 54 | } |
| 55 | |
| 56 | static void |
| 57 | nv50_graph_init_regs__nv(struct drm_device *dev) |
| 58 | { |
| 59 | NV_DEBUG(dev, "\n"); |
| 60 | |
| 61 | nv_wr32(dev, 0x400804, 0xc0000000); |
| 62 | nv_wr32(dev, 0x406800, 0xc0000000); |
| 63 | nv_wr32(dev, 0x400c04, 0xc0000000); |
Marcin Kościelnicki | 716abaa | 2010-01-12 18:21:56 +0000 | [diff] [blame] | 64 | nv_wr32(dev, 0x401800, 0xc0000000); |
Ben Skeggs | 6ee7386 | 2009-12-11 19:24:15 +1000 | [diff] [blame] | 65 | nv_wr32(dev, 0x405018, 0xc0000000); |
| 66 | nv_wr32(dev, 0x402000, 0xc0000000); |
| 67 | |
| 68 | nv_wr32(dev, 0x400108, 0xffffffff); |
| 69 | |
| 70 | nv_wr32(dev, 0x400824, 0x00004000); |
| 71 | nv_wr32(dev, 0x400500, 0x00010001); |
| 72 | } |
| 73 | |
| 74 | static void |
| 75 | nv50_graph_init_regs(struct drm_device *dev) |
| 76 | { |
| 77 | NV_DEBUG(dev, "\n"); |
| 78 | |
| 79 | nv_wr32(dev, NV04_PGRAPH_DEBUG_3, |
| 80 | (1 << 2) /* HW_CONTEXT_SWITCH_ENABLED */); |
| 81 | nv_wr32(dev, 0x402ca8, 0x800); |
| 82 | } |
| 83 | |
| 84 | static int |
| 85 | nv50_graph_init_ctxctl(struct drm_device *dev) |
| 86 | { |
Ben Skeggs | 054b93e | 2009-12-15 22:02:47 +1000 | [diff] [blame] | 87 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
| 88 | |
Ben Skeggs | 6ee7386 | 2009-12-11 19:24:15 +1000 | [diff] [blame] | 89 | NV_DEBUG(dev, "\n"); |
| 90 | |
Marcin Kościelnicki | d5f3c90 | 2010-02-25 00:54:02 +0000 | [diff] [blame] | 91 | if (nouveau_ctxfw) { |
| 92 | nouveau_grctx_prog_load(dev); |
| 93 | dev_priv->engine.graph.grctx_size = 0x70000; |
| 94 | } |
| 95 | if (!dev_priv->engine.graph.ctxprog) { |
| 96 | struct nouveau_grctx ctx = {}; |
| 97 | uint32_t *cp = kmalloc(512 * 4, GFP_KERNEL); |
| 98 | int i; |
| 99 | if (!cp) { |
| 100 | NV_ERROR(dev, "Couldn't alloc ctxprog! Disabling acceleration.\n"); |
| 101 | dev_priv->engine.graph.accel_blocked = true; |
| 102 | return 0; |
| 103 | } |
| 104 | ctx.dev = dev; |
| 105 | ctx.mode = NOUVEAU_GRCTX_PROG; |
| 106 | ctx.data = cp; |
| 107 | ctx.ctxprog_max = 512; |
| 108 | if (!nv50_grctx_init(&ctx)) { |
| 109 | dev_priv->engine.graph.grctx_size = ctx.ctxvals_pos * 4; |
| 110 | |
| 111 | nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0); |
| 112 | for (i = 0; i < ctx.ctxprog_len; i++) |
| 113 | nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, cp[i]); |
| 114 | } else { |
| 115 | dev_priv->engine.graph.accel_blocked = true; |
| 116 | } |
| 117 | kfree(cp); |
| 118 | } |
Ben Skeggs | 6ee7386 | 2009-12-11 19:24:15 +1000 | [diff] [blame] | 119 | |
| 120 | nv_wr32(dev, 0x400320, 4); |
| 121 | nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0); |
| 122 | nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, 0); |
| 123 | return 0; |
| 124 | } |
| 125 | |
| 126 | int |
| 127 | nv50_graph_init(struct drm_device *dev) |
| 128 | { |
| 129 | int ret; |
| 130 | |
| 131 | NV_DEBUG(dev, "\n"); |
| 132 | |
| 133 | nv50_graph_init_reset(dev); |
| 134 | nv50_graph_init_regs__nv(dev); |
| 135 | nv50_graph_init_regs(dev); |
| 136 | nv50_graph_init_intr(dev); |
| 137 | |
| 138 | ret = nv50_graph_init_ctxctl(dev); |
| 139 | if (ret) |
| 140 | return ret; |
| 141 | |
| 142 | return 0; |
| 143 | } |
| 144 | |
| 145 | void |
| 146 | nv50_graph_takedown(struct drm_device *dev) |
| 147 | { |
| 148 | NV_DEBUG(dev, "\n"); |
Ben Skeggs | 054b93e | 2009-12-15 22:02:47 +1000 | [diff] [blame] | 149 | nouveau_grctx_fini(dev); |
Ben Skeggs | 6ee7386 | 2009-12-11 19:24:15 +1000 | [diff] [blame] | 150 | } |
| 151 | |
| 152 | void |
| 153 | nv50_graph_fifo_access(struct drm_device *dev, bool enabled) |
| 154 | { |
| 155 | const uint32_t mask = 0x00010001; |
| 156 | |
| 157 | if (enabled) |
| 158 | nv_wr32(dev, 0x400500, nv_rd32(dev, 0x400500) | mask); |
| 159 | else |
| 160 | nv_wr32(dev, 0x400500, nv_rd32(dev, 0x400500) & ~mask); |
| 161 | } |
| 162 | |
| 163 | struct nouveau_channel * |
| 164 | nv50_graph_channel(struct drm_device *dev) |
| 165 | { |
| 166 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
| 167 | uint32_t inst; |
| 168 | int i; |
| 169 | |
Maarten Maathuis | a51a3bf | 2010-02-01 18:32:09 +0100 | [diff] [blame] | 170 | /* Be sure we're not in the middle of a context switch or bad things |
| 171 | * will happen, such as unloading the wrong pgraph context. |
| 172 | */ |
| 173 | if (!nv_wait(0x400300, 0x00000001, 0x00000000)) |
| 174 | NV_ERROR(dev, "Ctxprog is still running\n"); |
| 175 | |
Ben Skeggs | 6ee7386 | 2009-12-11 19:24:15 +1000 | [diff] [blame] | 176 | inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR); |
| 177 | if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED)) |
| 178 | return NULL; |
| 179 | inst = (inst & NV50_PGRAPH_CTXCTL_CUR_INSTANCE) << 12; |
| 180 | |
| 181 | for (i = 0; i < dev_priv->engine.fifo.channels; i++) { |
| 182 | struct nouveau_channel *chan = dev_priv->fifos[i]; |
| 183 | |
| 184 | if (chan && chan->ramin && chan->ramin->instance == inst) |
| 185 | return chan; |
| 186 | } |
| 187 | |
| 188 | return NULL; |
| 189 | } |
| 190 | |
| 191 | int |
| 192 | nv50_graph_create_context(struct nouveau_channel *chan) |
| 193 | { |
| 194 | struct drm_device *dev = chan->dev; |
| 195 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
| 196 | struct nouveau_gpuobj *ramin = chan->ramin->gpuobj; |
| 197 | struct nouveau_gpuobj *ctx; |
Marcin Kościelnicki | d5f3c90 | 2010-02-25 00:54:02 +0000 | [diff] [blame] | 198 | struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; |
Ben Skeggs | 6ee7386 | 2009-12-11 19:24:15 +1000 | [diff] [blame] | 199 | int hdr, ret; |
| 200 | |
| 201 | NV_DEBUG(dev, "ch%d\n", chan->id); |
| 202 | |
Marcin Kościelnicki | d5f3c90 | 2010-02-25 00:54:02 +0000 | [diff] [blame] | 203 | ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, pgraph->grctx_size, |
| 204 | 0x1000, NVOBJ_FLAG_ZERO_ALLOC | |
Ben Skeggs | 6ee7386 | 2009-12-11 19:24:15 +1000 | [diff] [blame] | 205 | NVOBJ_FLAG_ZERO_FREE, &chan->ramin_grctx); |
| 206 | if (ret) |
| 207 | return ret; |
| 208 | ctx = chan->ramin_grctx->gpuobj; |
| 209 | |
| 210 | hdr = IS_G80 ? 0x200 : 0x20; |
| 211 | dev_priv->engine.instmem.prepare_access(dev, true); |
| 212 | nv_wo32(dev, ramin, (hdr + 0x00)/4, 0x00190002); |
| 213 | nv_wo32(dev, ramin, (hdr + 0x04)/4, chan->ramin_grctx->instance + |
Marcin Kościelnicki | d5f3c90 | 2010-02-25 00:54:02 +0000 | [diff] [blame] | 214 | pgraph->grctx_size - 1); |
Ben Skeggs | 6ee7386 | 2009-12-11 19:24:15 +1000 | [diff] [blame] | 215 | nv_wo32(dev, ramin, (hdr + 0x08)/4, chan->ramin_grctx->instance); |
| 216 | nv_wo32(dev, ramin, (hdr + 0x0c)/4, 0); |
| 217 | nv_wo32(dev, ramin, (hdr + 0x10)/4, 0); |
| 218 | nv_wo32(dev, ramin, (hdr + 0x14)/4, 0x00010000); |
| 219 | dev_priv->engine.instmem.finish_access(dev); |
| 220 | |
| 221 | dev_priv->engine.instmem.prepare_access(dev, true); |
Marcin Kościelnicki | d5f3c90 | 2010-02-25 00:54:02 +0000 | [diff] [blame] | 222 | if (!pgraph->ctxprog) { |
| 223 | struct nouveau_grctx ctx = {}; |
| 224 | ctx.dev = chan->dev; |
| 225 | ctx.mode = NOUVEAU_GRCTX_VALS; |
| 226 | ctx.data = chan->ramin_grctx->gpuobj; |
| 227 | nv50_grctx_init(&ctx); |
| 228 | } else { |
| 229 | nouveau_grctx_vals_load(dev, ctx); |
| 230 | } |
Ben Skeggs | 6ee7386 | 2009-12-11 19:24:15 +1000 | [diff] [blame] | 231 | nv_wo32(dev, ctx, 0x00000/4, chan->ramin->instance >> 12); |
Ben Skeggs | 6ee7386 | 2009-12-11 19:24:15 +1000 | [diff] [blame] | 232 | dev_priv->engine.instmem.finish_access(dev); |
| 233 | |
| 234 | return 0; |
| 235 | } |
| 236 | |
| 237 | void |
| 238 | nv50_graph_destroy_context(struct nouveau_channel *chan) |
| 239 | { |
| 240 | struct drm_device *dev = chan->dev; |
| 241 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
| 242 | int i, hdr = IS_G80 ? 0x200 : 0x20; |
| 243 | |
| 244 | NV_DEBUG(dev, "ch%d\n", chan->id); |
| 245 | |
| 246 | if (!chan->ramin || !chan->ramin->gpuobj) |
| 247 | return; |
| 248 | |
| 249 | dev_priv->engine.instmem.prepare_access(dev, true); |
| 250 | for (i = hdr; i < hdr + 24; i += 4) |
| 251 | nv_wo32(dev, chan->ramin->gpuobj, i/4, 0); |
| 252 | dev_priv->engine.instmem.finish_access(dev); |
| 253 | |
| 254 | nouveau_gpuobj_ref_del(dev, &chan->ramin_grctx); |
| 255 | } |
| 256 | |
| 257 | static int |
| 258 | nv50_graph_do_load_context(struct drm_device *dev, uint32_t inst) |
| 259 | { |
| 260 | uint32_t fifo = nv_rd32(dev, 0x400500); |
| 261 | |
| 262 | nv_wr32(dev, 0x400500, fifo & ~1); |
| 263 | nv_wr32(dev, 0x400784, inst); |
| 264 | nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) | 0x40); |
| 265 | nv_wr32(dev, 0x400320, nv_rd32(dev, 0x400320) | 0x11); |
| 266 | nv_wr32(dev, 0x400040, 0xffffffff); |
| 267 | (void)nv_rd32(dev, 0x400040); |
| 268 | nv_wr32(dev, 0x400040, 0x00000000); |
| 269 | nv_wr32(dev, 0x400304, nv_rd32(dev, 0x400304) | 1); |
| 270 | |
| 271 | if (nouveau_wait_for_idle(dev)) |
| 272 | nv_wr32(dev, 0x40032c, inst | (1<<31)); |
| 273 | nv_wr32(dev, 0x400500, fifo); |
| 274 | |
| 275 | return 0; |
| 276 | } |
| 277 | |
| 278 | int |
| 279 | nv50_graph_load_context(struct nouveau_channel *chan) |
| 280 | { |
| 281 | uint32_t inst = chan->ramin->instance >> 12; |
| 282 | |
| 283 | NV_DEBUG(chan->dev, "ch%d\n", chan->id); |
| 284 | return nv50_graph_do_load_context(chan->dev, inst); |
| 285 | } |
| 286 | |
| 287 | int |
| 288 | nv50_graph_unload_context(struct drm_device *dev) |
| 289 | { |
Maarten Maathuis | a51a3bf | 2010-02-01 18:32:09 +0100 | [diff] [blame] | 290 | uint32_t inst; |
Ben Skeggs | 6ee7386 | 2009-12-11 19:24:15 +1000 | [diff] [blame] | 291 | |
| 292 | inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR); |
| 293 | if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED)) |
| 294 | return 0; |
| 295 | inst &= NV50_PGRAPH_CTXCTL_CUR_INSTANCE; |
| 296 | |
Maarten Maathuis | 0a90dc51 | 2010-01-11 21:18:53 +0100 | [diff] [blame] | 297 | nouveau_wait_for_idle(dev); |
Ben Skeggs | 6ee7386 | 2009-12-11 19:24:15 +1000 | [diff] [blame] | 298 | nv_wr32(dev, 0x400784, inst); |
| 299 | nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) | 0x20); |
| 300 | nv_wr32(dev, 0x400304, nv_rd32(dev, 0x400304) | 0x01); |
| 301 | nouveau_wait_for_idle(dev); |
Ben Skeggs | 6ee7386 | 2009-12-11 19:24:15 +1000 | [diff] [blame] | 302 | |
| 303 | nv_wr32(dev, NV50_PGRAPH_CTXCTL_CUR, inst); |
| 304 | return 0; |
| 305 | } |
| 306 | |
| 307 | void |
| 308 | nv50_graph_context_switch(struct drm_device *dev) |
| 309 | { |
| 310 | uint32_t inst; |
| 311 | |
| 312 | nv50_graph_unload_context(dev); |
| 313 | |
| 314 | inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_NEXT); |
| 315 | inst &= NV50_PGRAPH_CTXCTL_NEXT_INSTANCE; |
| 316 | nv50_graph_do_load_context(dev, inst); |
| 317 | |
| 318 | nv_wr32(dev, NV40_PGRAPH_INTR_EN, nv_rd32(dev, |
| 319 | NV40_PGRAPH_INTR_EN) | NV_PGRAPH_INTR_CONTEXT_SWITCH); |
| 320 | } |
| 321 | |
| 322 | static int |
| 323 | nv50_graph_nvsw_dma_vblsem(struct nouveau_channel *chan, int grclass, |
| 324 | int mthd, uint32_t data) |
| 325 | { |
| 326 | struct nouveau_gpuobj_ref *ref = NULL; |
| 327 | |
| 328 | if (nouveau_gpuobj_ref_find(chan, data, &ref)) |
| 329 | return -ENOENT; |
| 330 | |
| 331 | if (nouveau_notifier_offset(ref->gpuobj, NULL)) |
| 332 | return -EINVAL; |
| 333 | |
| 334 | chan->nvsw.vblsem = ref->gpuobj; |
| 335 | chan->nvsw.vblsem_offset = ~0; |
| 336 | return 0; |
| 337 | } |
| 338 | |
| 339 | static int |
| 340 | nv50_graph_nvsw_vblsem_offset(struct nouveau_channel *chan, int grclass, |
| 341 | int mthd, uint32_t data) |
| 342 | { |
| 343 | if (nouveau_notifier_offset(chan->nvsw.vblsem, &data)) |
| 344 | return -ERANGE; |
| 345 | |
| 346 | chan->nvsw.vblsem_offset = data >> 2; |
| 347 | return 0; |
| 348 | } |
| 349 | |
| 350 | static int |
| 351 | nv50_graph_nvsw_vblsem_release_val(struct nouveau_channel *chan, int grclass, |
| 352 | int mthd, uint32_t data) |
| 353 | { |
| 354 | chan->nvsw.vblsem_rval = data; |
| 355 | return 0; |
| 356 | } |
| 357 | |
| 358 | static int |
| 359 | nv50_graph_nvsw_vblsem_release(struct nouveau_channel *chan, int grclass, |
| 360 | int mthd, uint32_t data) |
| 361 | { |
| 362 | struct drm_device *dev = chan->dev; |
| 363 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
| 364 | |
| 365 | if (!chan->nvsw.vblsem || chan->nvsw.vblsem_offset == ~0 || data > 1) |
| 366 | return -EINVAL; |
| 367 | |
| 368 | if (!(nv_rd32(dev, NV50_PDISPLAY_INTR_EN) & |
| 369 | NV50_PDISPLAY_INTR_EN_VBLANK_CRTC_(data))) { |
| 370 | nv_wr32(dev, NV50_PDISPLAY_INTR_1, |
| 371 | NV50_PDISPLAY_INTR_1_VBLANK_CRTC_(data)); |
| 372 | nv_wr32(dev, NV50_PDISPLAY_INTR_EN, nv_rd32(dev, |
| 373 | NV50_PDISPLAY_INTR_EN) | |
| 374 | NV50_PDISPLAY_INTR_EN_VBLANK_CRTC_(data)); |
| 375 | } |
| 376 | |
| 377 | list_add(&chan->nvsw.vbl_wait, &dev_priv->vbl_waiting); |
| 378 | return 0; |
| 379 | } |
| 380 | |
| 381 | static struct nouveau_pgraph_object_method nv50_graph_nvsw_methods[] = { |
| 382 | { 0x018c, nv50_graph_nvsw_dma_vblsem }, |
| 383 | { 0x0400, nv50_graph_nvsw_vblsem_offset }, |
| 384 | { 0x0404, nv50_graph_nvsw_vblsem_release_val }, |
| 385 | { 0x0408, nv50_graph_nvsw_vblsem_release }, |
| 386 | {} |
| 387 | }; |
| 388 | |
| 389 | struct nouveau_pgraph_object_class nv50_graph_grclass[] = { |
| 390 | { 0x506e, true, nv50_graph_nvsw_methods }, /* nvsw */ |
| 391 | { 0x0030, false, NULL }, /* null */ |
| 392 | { 0x5039, false, NULL }, /* m2mf */ |
| 393 | { 0x502d, false, NULL }, /* 2d */ |
| 394 | { 0x50c0, false, NULL }, /* compute */ |
| 395 | { 0x5097, false, NULL }, /* tesla (nv50) */ |
| 396 | { 0x8297, false, NULL }, /* tesla (nv80/nv90) */ |
| 397 | { 0x8397, false, NULL }, /* tesla (nva0) */ |
| 398 | { 0x8597, false, NULL }, /* tesla (nva8) */ |
| 399 | {} |
| 400 | }; |