Ben Skeggs | b7bc613 | 2010-10-19 13:05:51 +1000 | [diff] [blame^] | 1 | /* |
| 2 | * Copyright 2010 Red Hat Inc. |
| 3 | * |
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 5 | * copy of this software and associated documentation files (the "Software"), |
| 6 | * to deal in the Software without restriction, including without limitation |
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
| 8 | * and/or sell copies of the Software, and to permit persons to whom the |
| 9 | * Software is furnished to do so, subject to the following conditions: |
| 10 | * |
| 11 | * The above copyright notice and this permission notice shall be included in |
| 12 | * all copies or substantial portions of the Software. |
| 13 | * |
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| 17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
| 18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
| 19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
| 20 | * OTHER DEALINGS IN THE SOFTWARE. |
| 21 | * |
| 22 | * Authors: Ben Skeggs |
| 23 | */ |
| 24 | |
| 25 | #include "drmP.h" |
| 26 | |
| 27 | #include "nouveau_drv.h" |
| 28 | #include "nouveau_dma.h" |
| 29 | #include "nouveau_ramht.h" |
| 30 | |
| 31 | static void |
| 32 | nv50_evo_channel_del(struct nouveau_channel **pchan) |
| 33 | { |
| 34 | struct nouveau_channel *chan = *pchan; |
| 35 | |
| 36 | if (!chan) |
| 37 | return; |
| 38 | *pchan = NULL; |
| 39 | |
| 40 | nouveau_gpuobj_channel_takedown(chan); |
| 41 | nouveau_bo_unmap(chan->pushbuf_bo); |
| 42 | nouveau_bo_ref(NULL, &chan->pushbuf_bo); |
| 43 | |
| 44 | if (chan->user) |
| 45 | iounmap(chan->user); |
| 46 | |
| 47 | kfree(chan); |
| 48 | } |
| 49 | |
| 50 | int |
| 51 | nv50_evo_dmaobj_new(struct nouveau_channel *evo, u32 class, u32 name, |
| 52 | u32 tile_flags, u32 magic_flags, u32 offset, u32 limit) |
| 53 | { |
| 54 | struct drm_nouveau_private *dev_priv = evo->dev->dev_private; |
| 55 | struct drm_device *dev = evo->dev; |
| 56 | struct nouveau_gpuobj *obj = NULL; |
| 57 | int ret; |
| 58 | |
| 59 | ret = nouveau_gpuobj_new(dev, evo, 6*4, 32, 0, &obj); |
| 60 | if (ret) |
| 61 | return ret; |
| 62 | obj->engine = NVOBJ_ENGINE_DISPLAY; |
| 63 | |
| 64 | nv_wo32(obj, 0, (tile_flags << 22) | (magic_flags << 16) | class); |
| 65 | nv_wo32(obj, 4, limit); |
| 66 | nv_wo32(obj, 8, offset); |
| 67 | nv_wo32(obj, 12, 0x00000000); |
| 68 | nv_wo32(obj, 16, 0x00000000); |
| 69 | if (dev_priv->card_type < NV_C0) |
| 70 | nv_wo32(obj, 20, 0x00010000); |
| 71 | else |
| 72 | nv_wo32(obj, 20, 0x00020000); |
| 73 | dev_priv->engine.instmem.flush(dev); |
| 74 | |
| 75 | ret = nouveau_ramht_insert(evo, name, obj); |
| 76 | nouveau_gpuobj_ref(NULL, &obj); |
| 77 | if (ret) { |
| 78 | return ret; |
| 79 | } |
| 80 | |
| 81 | return 0; |
| 82 | } |
| 83 | |
| 84 | static int |
| 85 | nv50_evo_channel_new(struct drm_device *dev, struct nouveau_channel **pchan) |
| 86 | { |
| 87 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
| 88 | struct nouveau_gpuobj *ramht = NULL; |
| 89 | struct nouveau_channel *chan; |
| 90 | int ret; |
| 91 | |
| 92 | chan = kzalloc(sizeof(struct nouveau_channel), GFP_KERNEL); |
| 93 | if (!chan) |
| 94 | return -ENOMEM; |
| 95 | *pchan = chan; |
| 96 | |
| 97 | chan->id = -1; |
| 98 | chan->dev = dev; |
| 99 | chan->user_get = 4; |
| 100 | chan->user_put = 0; |
| 101 | |
| 102 | ret = nouveau_gpuobj_new(dev, NULL, 32768, 0x1000, |
| 103 | NVOBJ_FLAG_ZERO_ALLOC, &chan->ramin); |
| 104 | if (ret) { |
| 105 | NV_ERROR(dev, "Error allocating EVO channel memory: %d\n", ret); |
| 106 | nv50_evo_channel_del(pchan); |
| 107 | return ret; |
| 108 | } |
| 109 | |
| 110 | ret = drm_mm_init(&chan->ramin_heap, 0, 32768); |
| 111 | if (ret) { |
| 112 | NV_ERROR(dev, "Error initialising EVO PRAMIN heap: %d\n", ret); |
| 113 | nv50_evo_channel_del(pchan); |
| 114 | return ret; |
| 115 | } |
| 116 | |
| 117 | ret = nouveau_gpuobj_new(dev, chan, 4096, 16, 0, &ramht); |
| 118 | if (ret) { |
| 119 | NV_ERROR(dev, "Unable to allocate EVO RAMHT: %d\n", ret); |
| 120 | nv50_evo_channel_del(pchan); |
| 121 | return ret; |
| 122 | } |
| 123 | |
| 124 | ret = nouveau_ramht_new(dev, ramht, &chan->ramht); |
| 125 | nouveau_gpuobj_ref(NULL, &ramht); |
| 126 | if (ret) { |
| 127 | nv50_evo_channel_del(pchan); |
| 128 | return ret; |
| 129 | } |
| 130 | |
| 131 | if (dev_priv->chipset != 0x50) { |
| 132 | ret = nv50_evo_dmaobj_new(chan, 0x3d, NvEvoFB16, 0x70, 0x19, |
| 133 | 0, 0xffffffff); |
| 134 | if (ret) { |
| 135 | nv50_evo_channel_del(pchan); |
| 136 | return ret; |
| 137 | } |
| 138 | |
| 139 | |
| 140 | ret = nv50_evo_dmaobj_new(chan, 0x3d, NvEvoFB32, 0x7a, 0x19, |
| 141 | 0, 0xffffffff); |
| 142 | if (ret) { |
| 143 | nv50_evo_channel_del(pchan); |
| 144 | return ret; |
| 145 | } |
| 146 | } |
| 147 | |
| 148 | ret = nv50_evo_dmaobj_new(chan, 0x3d, NvEvoVRAM, 0, 0x19, |
| 149 | 0, dev_priv->vram_size); |
| 150 | if (ret) { |
| 151 | nv50_evo_channel_del(pchan); |
| 152 | return ret; |
| 153 | } |
| 154 | |
| 155 | ret = nouveau_bo_new(dev, NULL, 4096, 0, TTM_PL_FLAG_VRAM, 0, 0, |
| 156 | false, true, &chan->pushbuf_bo); |
| 157 | if (ret == 0) |
| 158 | ret = nouveau_bo_pin(chan->pushbuf_bo, TTM_PL_FLAG_VRAM); |
| 159 | if (ret) { |
| 160 | NV_ERROR(dev, "Error creating EVO DMA push buffer: %d\n", ret); |
| 161 | nv50_evo_channel_del(pchan); |
| 162 | return ret; |
| 163 | } |
| 164 | |
| 165 | ret = nouveau_bo_map(chan->pushbuf_bo); |
| 166 | if (ret) { |
| 167 | NV_ERROR(dev, "Error mapping EVO DMA push buffer: %d\n", ret); |
| 168 | nv50_evo_channel_del(pchan); |
| 169 | return ret; |
| 170 | } |
| 171 | |
| 172 | chan->user = ioremap(pci_resource_start(dev->pdev, 0) + |
| 173 | NV50_PDISPLAY_USER(0), PAGE_SIZE); |
| 174 | if (!chan->user) { |
| 175 | NV_ERROR(dev, "Error mapping EVO control regs.\n"); |
| 176 | nv50_evo_channel_del(pchan); |
| 177 | return -ENOMEM; |
| 178 | } |
| 179 | |
| 180 | return 0; |
| 181 | } |
| 182 | |
| 183 | static int |
| 184 | nv50_evo_channel_init(struct nouveau_channel *evo) |
| 185 | { |
| 186 | struct drm_nouveau_private *dev_priv = evo->dev->dev_private; |
| 187 | struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer; |
| 188 | struct drm_device *dev = evo->dev; |
| 189 | int ret, i; |
| 190 | u64 start; |
| 191 | u32 tmp; |
| 192 | |
| 193 | /* taken from nv bug #12637, attempts to un-wedge the hw if it's |
| 194 | * stuck in some unspecified state |
| 195 | */ |
| 196 | start = ptimer->read(dev); |
| 197 | nv_wr32(dev, NV50_PDISPLAY_EVO_CTRL(0), 0x2b00); |
| 198 | while ((tmp = nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(0))) & 0x1e0000) { |
| 199 | if ((tmp & 0x9f0000) == 0x20000) |
| 200 | nv_wr32(dev, NV50_PDISPLAY_EVO_CTRL(0), tmp | 0x800000); |
| 201 | |
| 202 | if ((tmp & 0x3f0000) == 0x30000) |
| 203 | nv_wr32(dev, NV50_PDISPLAY_EVO_CTRL(0), tmp | 0x200000); |
| 204 | |
| 205 | if (ptimer->read(dev) - start > 1000000000ULL) { |
| 206 | NV_ERROR(dev, "timeout: (0x610200 & 0x1e0000) != 0\n"); |
| 207 | NV_ERROR(dev, "0x610200 = 0x%08x\n", tmp); |
| 208 | return -EBUSY; |
| 209 | } |
| 210 | } |
| 211 | |
| 212 | nv_wr32(dev, NV50_PDISPLAY_EVO_CTRL(0), 0x1000b03); |
| 213 | if (!nv_wait(dev, NV50_PDISPLAY_EVO_CTRL(0), |
| 214 | 0x40000000, 0x40000000)) { |
| 215 | NV_ERROR(dev, "timeout: (0x610200 & 0x40000000) == 0x40000000\n"); |
| 216 | NV_ERROR(dev, "0x610200 = 0x%08x\n", |
| 217 | nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(0))); |
| 218 | return -EBUSY; |
| 219 | } |
| 220 | |
| 221 | /* initialise fifo */ |
| 222 | nv_wr32(dev, NV50_PDISPLAY_EVO_DMA_CB(0), |
| 223 | ((evo->pushbuf_bo->bo.mem.start << PAGE_SHIFT) >> 8) | |
| 224 | NV50_PDISPLAY_EVO_DMA_CB_LOCATION_VRAM | |
| 225 | NV50_PDISPLAY_EVO_DMA_CB_VALID); |
| 226 | nv_wr32(dev, NV50_PDISPLAY_EVO_UNK2(0), 0x00010000); |
| 227 | nv_wr32(dev, NV50_PDISPLAY_EVO_HASH_TAG(0), 0x00000002); |
| 228 | if (!nv_wait(dev, 0x610200, 0x80000000, 0x00000000)) { |
| 229 | NV_ERROR(dev, "timeout: (0x610200 & 0x80000000) == 0\n"); |
| 230 | NV_ERROR(dev, "0x610200 = 0x%08x\n", nv_rd32(dev, 0x610200)); |
| 231 | return -EBUSY; |
| 232 | } |
| 233 | nv_wr32(dev, NV50_PDISPLAY_EVO_CTRL(0), |
| 234 | (nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(0)) & ~0x00000003) | |
| 235 | NV50_PDISPLAY_EVO_CTRL_DMA_ENABLED); |
| 236 | nv_wr32(dev, NV50_PDISPLAY_USER_PUT(0), 0); |
| 237 | nv_wr32(dev, NV50_PDISPLAY_EVO_CTRL(0), 0x01000003 | |
| 238 | NV50_PDISPLAY_EVO_CTRL_DMA_ENABLED); |
| 239 | |
| 240 | /* enable error reporting on the channel */ |
| 241 | nv_mask(dev, 0x610028, 0x00000000, 0x00010001 << 0); |
| 242 | |
| 243 | evo->dma.max = (4096/4) - 2; |
| 244 | evo->dma.put = 0; |
| 245 | evo->dma.cur = evo->dma.put; |
| 246 | evo->dma.free = evo->dma.max - evo->dma.cur; |
| 247 | |
| 248 | ret = RING_SPACE(evo, NOUVEAU_DMA_SKIPS); |
| 249 | if (ret) |
| 250 | return ret; |
| 251 | |
| 252 | for (i = 0; i < NOUVEAU_DMA_SKIPS; i++) |
| 253 | OUT_RING(evo, 0); |
| 254 | |
| 255 | return 0; |
| 256 | } |
| 257 | |
| 258 | static void |
| 259 | nv50_evo_channel_fini(struct nouveau_channel *evo) |
| 260 | { |
| 261 | struct drm_device *dev = evo->dev; |
| 262 | |
| 263 | nv_wr32(dev, NV50_PDISPLAY_EVO_CTRL(0), 0); |
| 264 | if (!nv_wait(dev, NV50_PDISPLAY_EVO_CTRL(0), 0x1e0000, 0)) { |
| 265 | NV_ERROR(dev, "timeout: (0x610200 & 0x1e0000) == 0\n"); |
| 266 | NV_ERROR(dev, "0x610200 = 0x%08x\n", |
| 267 | nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(0))); |
| 268 | } |
| 269 | } |
| 270 | |
| 271 | int |
| 272 | nv50_evo_init(struct drm_device *dev) |
| 273 | { |
| 274 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
| 275 | int ret; |
| 276 | |
| 277 | if (!dev_priv->evo) { |
| 278 | ret = nv50_evo_channel_new(dev, &dev_priv->evo); |
| 279 | if (ret) |
| 280 | return ret; |
| 281 | } |
| 282 | |
| 283 | return nv50_evo_channel_init(dev_priv->evo); |
| 284 | } |
| 285 | |
| 286 | void |
| 287 | nv50_evo_fini(struct drm_device *dev) |
| 288 | { |
| 289 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
| 290 | |
| 291 | if (dev_priv->evo) { |
| 292 | nv50_evo_channel_fini(dev_priv->evo); |
| 293 | nv50_evo_channel_del(&dev_priv->evo); |
| 294 | } |
| 295 | } |