Ben Skeggs | 6ee7386 | 2009-12-11 19:24:15 +1000 | [diff] [blame] | 1 | #include "drmP.h" |
| 2 | #include "nouveau_drv.h" |
| 3 | #include <linux/pagemap.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 4 | #include <linux/slab.h> |
Ben Skeggs | 6ee7386 | 2009-12-11 19:24:15 +1000 | [diff] [blame] | 5 | |
| 6 | #define NV_CTXDMA_PAGE_SHIFT 12 |
| 7 | #define NV_CTXDMA_PAGE_SIZE (1 << NV_CTXDMA_PAGE_SHIFT) |
| 8 | #define NV_CTXDMA_PAGE_MASK (NV_CTXDMA_PAGE_SIZE - 1) |
| 9 | |
| 10 | struct nouveau_sgdma_be { |
| 11 | struct ttm_backend backend; |
| 12 | struct drm_device *dev; |
| 13 | |
| 14 | dma_addr_t *pages; |
Konrad Rzeszutek Wilk | e0138c2 | 2010-12-02 11:36:24 -0500 | [diff] [blame] | 15 | bool *ttm_alloced; |
Ben Skeggs | 6ee7386 | 2009-12-11 19:24:15 +1000 | [diff] [blame] | 16 | unsigned nr_pages; |
| 17 | |
Ben Skeggs | b571fe2 | 2010-11-16 10:13:05 +1000 | [diff] [blame] | 18 | u64 offset; |
Ben Skeggs | 6ee7386 | 2009-12-11 19:24:15 +1000 | [diff] [blame] | 19 | bool bound; |
| 20 | }; |
| 21 | |
| 22 | static int |
| 23 | nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages, |
Konrad Rzeszutek Wilk | 27e8b23 | 2010-12-02 10:24:13 -0500 | [diff] [blame] | 24 | struct page **pages, struct page *dummy_read_page, |
| 25 | dma_addr_t *dma_addrs) |
Ben Skeggs | 6ee7386 | 2009-12-11 19:24:15 +1000 | [diff] [blame] | 26 | { |
| 27 | struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; |
| 28 | struct drm_device *dev = nvbe->dev; |
| 29 | |
| 30 | NV_DEBUG(nvbe->dev, "num_pages = %ld\n", num_pages); |
| 31 | |
| 32 | if (nvbe->pages) |
| 33 | return -EINVAL; |
| 34 | |
| 35 | nvbe->pages = kmalloc(sizeof(dma_addr_t) * num_pages, GFP_KERNEL); |
| 36 | if (!nvbe->pages) |
| 37 | return -ENOMEM; |
| 38 | |
Konrad Rzeszutek Wilk | e0138c2 | 2010-12-02 11:36:24 -0500 | [diff] [blame] | 39 | nvbe->ttm_alloced = kmalloc(sizeof(bool) * num_pages, GFP_KERNEL); |
| 40 | if (!nvbe->ttm_alloced) |
| 41 | return -ENOMEM; |
| 42 | |
Ben Skeggs | 6ee7386 | 2009-12-11 19:24:15 +1000 | [diff] [blame] | 43 | nvbe->nr_pages = 0; |
| 44 | while (num_pages--) { |
Konrad Rzeszutek Wilk | e0138c2 | 2010-12-02 11:36:24 -0500 | [diff] [blame] | 45 | if (dma_addrs[nvbe->nr_pages] != DMA_ERROR_CODE) { |
| 46 | nvbe->pages[nvbe->nr_pages] = |
| 47 | dma_addrs[nvbe->nr_pages]; |
| 48 | nvbe->ttm_alloced[nvbe->nr_pages] = true; |
| 49 | } else { |
| 50 | nvbe->pages[nvbe->nr_pages] = |
| 51 | pci_map_page(dev->pdev, pages[nvbe->nr_pages], 0, |
Ben Skeggs | 6ee7386 | 2009-12-11 19:24:15 +1000 | [diff] [blame] | 52 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); |
Konrad Rzeszutek Wilk | e0138c2 | 2010-12-02 11:36:24 -0500 | [diff] [blame] | 53 | if (pci_dma_mapping_error(dev->pdev, |
| 54 | nvbe->pages[nvbe->nr_pages])) { |
| 55 | be->func->clear(be); |
| 56 | return -EFAULT; |
| 57 | } |
Ben Skeggs | 8706398 | 2011-04-11 16:37:44 +1000 | [diff] [blame] | 58 | nvbe->ttm_alloced[nvbe->nr_pages] = false; |
Ben Skeggs | 6ee7386 | 2009-12-11 19:24:15 +1000 | [diff] [blame] | 59 | } |
| 60 | |
| 61 | nvbe->nr_pages++; |
| 62 | } |
| 63 | |
| 64 | return 0; |
| 65 | } |
| 66 | |
| 67 | static void |
| 68 | nouveau_sgdma_clear(struct ttm_backend *be) |
| 69 | { |
| 70 | struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; |
Marcin Slusarz | dd19e44 | 2010-01-30 15:41:00 +0100 | [diff] [blame] | 71 | struct drm_device *dev; |
Ben Skeggs | 6ee7386 | 2009-12-11 19:24:15 +1000 | [diff] [blame] | 72 | |
| 73 | if (nvbe && nvbe->pages) { |
Marcin Slusarz | dd19e44 | 2010-01-30 15:41:00 +0100 | [diff] [blame] | 74 | dev = nvbe->dev; |
| 75 | NV_DEBUG(dev, "\n"); |
| 76 | |
Ben Skeggs | 6ee7386 | 2009-12-11 19:24:15 +1000 | [diff] [blame] | 77 | if (nvbe->bound) |
| 78 | be->func->unbind(be); |
| 79 | |
| 80 | while (nvbe->nr_pages--) { |
Konrad Rzeszutek Wilk | e0138c2 | 2010-12-02 11:36:24 -0500 | [diff] [blame] | 81 | if (!nvbe->ttm_alloced[nvbe->nr_pages]) |
| 82 | pci_unmap_page(dev->pdev, nvbe->pages[nvbe->nr_pages], |
Ben Skeggs | 6ee7386 | 2009-12-11 19:24:15 +1000 | [diff] [blame] | 83 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); |
| 84 | } |
| 85 | kfree(nvbe->pages); |
Konrad Rzeszutek Wilk | e0138c2 | 2010-12-02 11:36:24 -0500 | [diff] [blame] | 86 | kfree(nvbe->ttm_alloced); |
Ben Skeggs | 6ee7386 | 2009-12-11 19:24:15 +1000 | [diff] [blame] | 87 | nvbe->pages = NULL; |
Konrad Rzeszutek Wilk | e0138c2 | 2010-12-02 11:36:24 -0500 | [diff] [blame] | 88 | nvbe->ttm_alloced = NULL; |
Ben Skeggs | 6ee7386 | 2009-12-11 19:24:15 +1000 | [diff] [blame] | 89 | nvbe->nr_pages = 0; |
| 90 | } |
| 91 | } |
| 92 | |
Ben Skeggs | efa58db | 2011-01-10 16:24:00 +1000 | [diff] [blame] | 93 | static void |
| 94 | nouveau_sgdma_destroy(struct ttm_backend *be) |
| 95 | { |
| 96 | struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; |
| 97 | |
| 98 | if (be) { |
| 99 | NV_DEBUG(nvbe->dev, "\n"); |
| 100 | |
| 101 | if (nvbe) { |
| 102 | if (nvbe->pages) |
| 103 | be->func->clear(be); |
| 104 | kfree(nvbe); |
| 105 | } |
| 106 | } |
| 107 | } |
| 108 | |
Ben Skeggs | 6ee7386 | 2009-12-11 19:24:15 +1000 | [diff] [blame] | 109 | static int |
Ben Skeggs | efa58db | 2011-01-10 16:24:00 +1000 | [diff] [blame] | 110 | nv04_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem) |
Ben Skeggs | 6ee7386 | 2009-12-11 19:24:15 +1000 | [diff] [blame] | 111 | { |
| 112 | struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; |
| 113 | struct drm_device *dev = nvbe->dev; |
| 114 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
| 115 | struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma; |
| 116 | unsigned i, j, pte; |
| 117 | |
Ben Skeggs | d961db7 | 2010-08-05 10:48:18 +1000 | [diff] [blame] | 118 | NV_DEBUG(dev, "pg=0x%lx\n", mem->start); |
Ben Skeggs | 6ee7386 | 2009-12-11 19:24:15 +1000 | [diff] [blame] | 119 | |
Ben Skeggs | b571fe2 | 2010-11-16 10:13:05 +1000 | [diff] [blame] | 120 | nvbe->offset = mem->start << PAGE_SHIFT; |
| 121 | pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2; |
Ben Skeggs | 6ee7386 | 2009-12-11 19:24:15 +1000 | [diff] [blame] | 122 | for (i = 0; i < nvbe->nr_pages; i++) { |
| 123 | dma_addr_t dma_offset = nvbe->pages[i]; |
| 124 | uint32_t offset_l = lower_32_bits(dma_offset); |
Ben Skeggs | 6ee7386 | 2009-12-11 19:24:15 +1000 | [diff] [blame] | 125 | |
Ben Skeggs | b571fe2 | 2010-11-16 10:13:05 +1000 | [diff] [blame] | 126 | for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++) { |
| 127 | nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 3); |
Ben Skeggs | 6ee7386 | 2009-12-11 19:24:15 +1000 | [diff] [blame] | 128 | dma_offset += NV_CTXDMA_PAGE_SIZE; |
| 129 | } |
| 130 | } |
Ben Skeggs | 6ee7386 | 2009-12-11 19:24:15 +1000 | [diff] [blame] | 131 | |
| 132 | nvbe->bound = true; |
| 133 | return 0; |
| 134 | } |
| 135 | |
| 136 | static int |
Ben Skeggs | efa58db | 2011-01-10 16:24:00 +1000 | [diff] [blame] | 137 | nv04_sgdma_unbind(struct ttm_backend *be) |
Ben Skeggs | 6ee7386 | 2009-12-11 19:24:15 +1000 | [diff] [blame] | 138 | { |
| 139 | struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; |
| 140 | struct drm_device *dev = nvbe->dev; |
| 141 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
| 142 | struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma; |
| 143 | unsigned i, j, pte; |
| 144 | |
| 145 | NV_DEBUG(dev, "\n"); |
| 146 | |
| 147 | if (!nvbe->bound) |
| 148 | return 0; |
| 149 | |
Ben Skeggs | b571fe2 | 2010-11-16 10:13:05 +1000 | [diff] [blame] | 150 | pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2; |
Ben Skeggs | 6ee7386 | 2009-12-11 19:24:15 +1000 | [diff] [blame] | 151 | for (i = 0; i < nvbe->nr_pages; i++) { |
Ben Skeggs | b571fe2 | 2010-11-16 10:13:05 +1000 | [diff] [blame] | 152 | for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++) |
| 153 | nv_wo32(gpuobj, (pte * 4) + 0, 0x00000000); |
Ben Skeggs | 40b2a68 | 2010-03-15 16:43:47 +1000 | [diff] [blame] | 154 | } |
| 155 | |
Ben Skeggs | 6ee7386 | 2009-12-11 19:24:15 +1000 | [diff] [blame] | 156 | nvbe->bound = false; |
| 157 | return 0; |
| 158 | } |
| 159 | |
Ben Skeggs | efa58db | 2011-01-10 16:24:00 +1000 | [diff] [blame] | 160 | static struct ttm_backend_func nv04_sgdma_backend = { |
| 161 | .populate = nouveau_sgdma_populate, |
| 162 | .clear = nouveau_sgdma_clear, |
| 163 | .bind = nv04_sgdma_bind, |
| 164 | .unbind = nv04_sgdma_unbind, |
| 165 | .destroy = nouveau_sgdma_destroy |
| 166 | }; |
Ben Skeggs | 6ee7386 | 2009-12-11 19:24:15 +1000 | [diff] [blame] | 167 | |
Ben Skeggs | 7948758 | 2011-01-11 14:52:40 +1000 | [diff] [blame] | 168 | static void |
| 169 | nv41_sgdma_flush(struct nouveau_sgdma_be *nvbe) |
| 170 | { |
| 171 | struct drm_device *dev = nvbe->dev; |
| 172 | |
| 173 | nv_wr32(dev, 0x100810, 0x00000022); |
| 174 | if (!nv_wait(dev, 0x100810, 0x00000100, 0x00000100)) |
| 175 | NV_ERROR(dev, "vm flush timeout: 0x%08x\n", |
| 176 | nv_rd32(dev, 0x100810)); |
| 177 | nv_wr32(dev, 0x100810, 0x00000000); |
| 178 | } |
| 179 | |
| 180 | static int |
| 181 | nv41_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem) |
| 182 | { |
| 183 | struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; |
| 184 | struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private; |
| 185 | struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma; |
| 186 | dma_addr_t *list = nvbe->pages; |
| 187 | u32 pte = mem->start << 2; |
| 188 | u32 cnt = nvbe->nr_pages; |
| 189 | |
| 190 | nvbe->offset = mem->start << PAGE_SHIFT; |
| 191 | |
| 192 | while (cnt--) { |
| 193 | nv_wo32(pgt, pte, (*list++ >> 7) | 1); |
| 194 | pte += 4; |
| 195 | } |
| 196 | |
| 197 | nv41_sgdma_flush(nvbe); |
| 198 | nvbe->bound = true; |
| 199 | return 0; |
| 200 | } |
| 201 | |
| 202 | static int |
| 203 | nv41_sgdma_unbind(struct ttm_backend *be) |
| 204 | { |
| 205 | struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; |
| 206 | struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private; |
| 207 | struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma; |
| 208 | u32 pte = (nvbe->offset >> 12) << 2; |
| 209 | u32 cnt = nvbe->nr_pages; |
| 210 | |
| 211 | while (cnt--) { |
| 212 | nv_wo32(pgt, pte, 0x00000000); |
| 213 | pte += 4; |
| 214 | } |
| 215 | |
| 216 | nv41_sgdma_flush(nvbe); |
| 217 | nvbe->bound = false; |
| 218 | return 0; |
| 219 | } |
| 220 | |
| 221 | static struct ttm_backend_func nv41_sgdma_backend = { |
| 222 | .populate = nouveau_sgdma_populate, |
| 223 | .clear = nouveau_sgdma_clear, |
| 224 | .bind = nv41_sgdma_bind, |
| 225 | .unbind = nv41_sgdma_unbind, |
| 226 | .destroy = nouveau_sgdma_destroy |
| 227 | }; |
| 228 | |
| 229 | static void |
| 230 | nv44_sgdma_flush(struct nouveau_sgdma_be *nvbe) |
| 231 | { |
| 232 | struct drm_device *dev = nvbe->dev; |
| 233 | |
| 234 | nv_wr32(dev, 0x100814, (nvbe->nr_pages - 1) << 12); |
| 235 | nv_wr32(dev, 0x100808, nvbe->offset | 0x20); |
| 236 | if (!nv_wait(dev, 0x100808, 0x00000001, 0x00000001)) |
| 237 | NV_ERROR(dev, "gart flush timeout: 0x%08x\n", |
| 238 | nv_rd32(dev, 0x100808)); |
| 239 | nv_wr32(dev, 0x100808, 0x00000000); |
| 240 | } |
| 241 | |
| 242 | static void |
| 243 | nv44_sgdma_fill(struct nouveau_gpuobj *pgt, dma_addr_t *list, u32 base, u32 cnt) |
| 244 | { |
| 245 | struct drm_nouveau_private *dev_priv = pgt->dev->dev_private; |
| 246 | dma_addr_t dummy = dev_priv->gart_info.dummy.addr; |
| 247 | u32 pte, tmp[4]; |
| 248 | |
| 249 | pte = base >> 2; |
| 250 | base &= ~0x0000000f; |
| 251 | |
| 252 | tmp[0] = nv_ro32(pgt, base + 0x0); |
| 253 | tmp[1] = nv_ro32(pgt, base + 0x4); |
| 254 | tmp[2] = nv_ro32(pgt, base + 0x8); |
| 255 | tmp[3] = nv_ro32(pgt, base + 0xc); |
| 256 | while (cnt--) { |
| 257 | u32 addr = list ? (*list++ >> 12) : (dummy >> 12); |
| 258 | switch (pte++ & 0x3) { |
| 259 | case 0: |
| 260 | tmp[0] &= ~0x07ffffff; |
| 261 | tmp[0] |= addr; |
| 262 | break; |
| 263 | case 1: |
| 264 | tmp[0] &= ~0xf8000000; |
| 265 | tmp[0] |= addr << 27; |
| 266 | tmp[1] &= ~0x003fffff; |
| 267 | tmp[1] |= addr >> 5; |
| 268 | break; |
| 269 | case 2: |
| 270 | tmp[1] &= ~0xffc00000; |
| 271 | tmp[1] |= addr << 22; |
| 272 | tmp[2] &= ~0x0001ffff; |
| 273 | tmp[2] |= addr >> 10; |
| 274 | break; |
| 275 | case 3: |
| 276 | tmp[2] &= ~0xfffe0000; |
| 277 | tmp[2] |= addr << 17; |
| 278 | tmp[3] &= ~0x00000fff; |
| 279 | tmp[3] |= addr >> 15; |
| 280 | break; |
| 281 | } |
| 282 | } |
| 283 | |
| 284 | tmp[3] |= 0x40000000; |
| 285 | |
| 286 | nv_wo32(pgt, base + 0x0, tmp[0]); |
| 287 | nv_wo32(pgt, base + 0x4, tmp[1]); |
| 288 | nv_wo32(pgt, base + 0x8, tmp[2]); |
| 289 | nv_wo32(pgt, base + 0xc, tmp[3]); |
| 290 | } |
| 291 | |
| 292 | static int |
| 293 | nv44_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem) |
| 294 | { |
| 295 | struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; |
| 296 | struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private; |
| 297 | struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma; |
| 298 | dma_addr_t *list = nvbe->pages; |
| 299 | u32 pte = mem->start << 2, tmp[4]; |
| 300 | u32 cnt = nvbe->nr_pages; |
| 301 | int i; |
| 302 | |
| 303 | nvbe->offset = mem->start << PAGE_SHIFT; |
| 304 | |
| 305 | if (pte & 0x0000000c) { |
| 306 | u32 max = 4 - ((pte >> 2) & 0x3); |
| 307 | u32 part = (cnt > max) ? max : cnt; |
| 308 | nv44_sgdma_fill(pgt, list, pte, part); |
| 309 | pte += (part << 2); |
| 310 | list += part; |
| 311 | cnt -= part; |
| 312 | } |
| 313 | |
| 314 | while (cnt >= 4) { |
| 315 | for (i = 0; i < 4; i++) |
| 316 | tmp[i] = *list++ >> 12; |
| 317 | nv_wo32(pgt, pte + 0x0, tmp[0] >> 0 | tmp[1] << 27); |
| 318 | nv_wo32(pgt, pte + 0x4, tmp[1] >> 5 | tmp[2] << 22); |
| 319 | nv_wo32(pgt, pte + 0x8, tmp[2] >> 10 | tmp[3] << 17); |
| 320 | nv_wo32(pgt, pte + 0xc, tmp[3] >> 15 | 0x40000000); |
| 321 | pte += 0x10; |
| 322 | cnt -= 4; |
| 323 | } |
| 324 | |
| 325 | if (cnt) |
| 326 | nv44_sgdma_fill(pgt, list, pte, cnt); |
| 327 | |
| 328 | nv44_sgdma_flush(nvbe); |
| 329 | nvbe->bound = true; |
| 330 | return 0; |
| 331 | } |
| 332 | |
| 333 | static int |
| 334 | nv44_sgdma_unbind(struct ttm_backend *be) |
| 335 | { |
| 336 | struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; |
| 337 | struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private; |
| 338 | struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma; |
| 339 | u32 pte = (nvbe->offset >> 12) << 2; |
| 340 | u32 cnt = nvbe->nr_pages; |
| 341 | |
| 342 | if (pte & 0x0000000c) { |
| 343 | u32 max = 4 - ((pte >> 2) & 0x3); |
| 344 | u32 part = (cnt > max) ? max : cnt; |
| 345 | nv44_sgdma_fill(pgt, NULL, pte, part); |
| 346 | pte += (part << 2); |
| 347 | cnt -= part; |
| 348 | } |
| 349 | |
| 350 | while (cnt >= 4) { |
| 351 | nv_wo32(pgt, pte + 0x0, 0x00000000); |
| 352 | nv_wo32(pgt, pte + 0x4, 0x00000000); |
| 353 | nv_wo32(pgt, pte + 0x8, 0x00000000); |
| 354 | nv_wo32(pgt, pte + 0xc, 0x00000000); |
| 355 | pte += 0x10; |
| 356 | cnt -= 4; |
| 357 | } |
| 358 | |
| 359 | if (cnt) |
| 360 | nv44_sgdma_fill(pgt, NULL, pte, cnt); |
| 361 | |
| 362 | nv44_sgdma_flush(nvbe); |
| 363 | nvbe->bound = false; |
| 364 | return 0; |
| 365 | } |
| 366 | |
| 367 | static struct ttm_backend_func nv44_sgdma_backend = { |
| 368 | .populate = nouveau_sgdma_populate, |
| 369 | .clear = nouveau_sgdma_clear, |
| 370 | .bind = nv44_sgdma_bind, |
| 371 | .unbind = nv44_sgdma_unbind, |
| 372 | .destroy = nouveau_sgdma_destroy |
| 373 | }; |
| 374 | |
Ben Skeggs | b571fe2 | 2010-11-16 10:13:05 +1000 | [diff] [blame] | 375 | static int |
| 376 | nv50_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem) |
| 377 | { |
| 378 | struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; |
Ben Skeggs | 26c0c9e | 2011-02-10 12:59:51 +1000 | [diff] [blame] | 379 | struct nouveau_mem *node = mem->mm_node; |
| 380 | /* noop: bound in move_notify() */ |
| 381 | node->pages = nvbe->pages; |
| 382 | nvbe->pages = (dma_addr_t *)node; |
Ben Skeggs | b571fe2 | 2010-11-16 10:13:05 +1000 | [diff] [blame] | 383 | nvbe->bound = true; |
| 384 | return 0; |
| 385 | } |
| 386 | |
| 387 | static int |
| 388 | nv50_sgdma_unbind(struct ttm_backend *be) |
| 389 | { |
| 390 | struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; |
Ben Skeggs | 26c0c9e | 2011-02-10 12:59:51 +1000 | [diff] [blame] | 391 | struct nouveau_mem *node = (struct nouveau_mem *)nvbe->pages; |
| 392 | /* noop: unbound in move_notify() */ |
| 393 | nvbe->pages = node->pages; |
| 394 | node->pages = NULL; |
Ben Skeggs | b571fe2 | 2010-11-16 10:13:05 +1000 | [diff] [blame] | 395 | nvbe->bound = false; |
| 396 | return 0; |
| 397 | } |
| 398 | |
Ben Skeggs | b571fe2 | 2010-11-16 10:13:05 +1000 | [diff] [blame] | 399 | static struct ttm_backend_func nv50_sgdma_backend = { |
| 400 | .populate = nouveau_sgdma_populate, |
| 401 | .clear = nouveau_sgdma_clear, |
| 402 | .bind = nv50_sgdma_bind, |
| 403 | .unbind = nv50_sgdma_unbind, |
| 404 | .destroy = nouveau_sgdma_destroy |
| 405 | }; |
| 406 | |
Ben Skeggs | 6ee7386 | 2009-12-11 19:24:15 +1000 | [diff] [blame] | 407 | struct ttm_backend * |
| 408 | nouveau_sgdma_init_ttm(struct drm_device *dev) |
| 409 | { |
| 410 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
| 411 | struct nouveau_sgdma_be *nvbe; |
| 412 | |
Ben Skeggs | 6ee7386 | 2009-12-11 19:24:15 +1000 | [diff] [blame] | 413 | nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL); |
| 414 | if (!nvbe) |
| 415 | return NULL; |
| 416 | |
| 417 | nvbe->dev = dev; |
| 418 | |
Ben Skeggs | 7948758 | 2011-01-11 14:52:40 +1000 | [diff] [blame] | 419 | nvbe->backend.func = dev_priv->gart_info.func; |
Ben Skeggs | 6ee7386 | 2009-12-11 19:24:15 +1000 | [diff] [blame] | 420 | return &nvbe->backend; |
| 421 | } |
| 422 | |
| 423 | int |
| 424 | nouveau_sgdma_init(struct drm_device *dev) |
| 425 | { |
| 426 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
| 427 | struct nouveau_gpuobj *gpuobj = NULL; |
Ben Skeggs | 7948758 | 2011-01-11 14:52:40 +1000 | [diff] [blame] | 428 | u32 aper_size, align; |
| 429 | int ret; |
| 430 | |
Ben Skeggs | 01d1533 | 2011-04-08 10:07:34 +1000 | [diff] [blame] | 431 | if (dev_priv->card_type >= NV_40 && drm_pci_device_is_pcie(dev)) |
Ben Skeggs | 7948758 | 2011-01-11 14:52:40 +1000 | [diff] [blame] | 432 | aper_size = 512 * 1024 * 1024; |
| 433 | else |
| 434 | aper_size = 64 * 1024 * 1024; |
| 435 | |
| 436 | /* Dear NVIDIA, NV44+ would like proper present bits in PTEs for |
| 437 | * christmas. The cards before it have them, the cards after |
| 438 | * it have them, why is NV44 so unloved? |
| 439 | */ |
| 440 | dev_priv->gart_info.dummy.page = alloc_page(GFP_DMA32 | GFP_KERNEL); |
| 441 | if (!dev_priv->gart_info.dummy.page) |
| 442 | return -ENOMEM; |
| 443 | |
| 444 | dev_priv->gart_info.dummy.addr = |
| 445 | pci_map_page(dev->pdev, dev_priv->gart_info.dummy.page, |
| 446 | 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); |
| 447 | if (pci_dma_mapping_error(dev->pdev, dev_priv->gart_info.dummy.addr)) { |
| 448 | NV_ERROR(dev, "error mapping dummy page\n"); |
| 449 | __free_page(dev_priv->gart_info.dummy.page); |
| 450 | dev_priv->gart_info.dummy.page = NULL; |
| 451 | return -ENOMEM; |
| 452 | } |
Ben Skeggs | 6ee7386 | 2009-12-11 19:24:15 +1000 | [diff] [blame] | 453 | |
Ben Skeggs | efa58db | 2011-01-10 16:24:00 +1000 | [diff] [blame] | 454 | if (dev_priv->card_type >= NV_50) { |
Ben Skeggs | 26c0c9e | 2011-02-10 12:59:51 +1000 | [diff] [blame] | 455 | dev_priv->gart_info.aper_base = 0; |
Ben Skeggs | 7948758 | 2011-01-11 14:52:40 +1000 | [diff] [blame] | 456 | dev_priv->gart_info.aper_size = aper_size; |
| 457 | dev_priv->gart_info.type = NOUVEAU_GART_HW; |
| 458 | dev_priv->gart_info.func = &nv50_sgdma_backend; |
| 459 | } else |
| 460 | if (drm_pci_device_is_pcie(dev) && |
Ben Skeggs | 01d1533 | 2011-04-08 10:07:34 +1000 | [diff] [blame] | 461 | dev_priv->chipset > 0x40 && dev_priv->chipset != 0x45) { |
Ben Skeggs | 7948758 | 2011-01-11 14:52:40 +1000 | [diff] [blame] | 462 | if (nv44_graph_class(dev)) { |
| 463 | dev_priv->gart_info.func = &nv44_sgdma_backend; |
| 464 | align = 512 * 1024; |
| 465 | } else { |
| 466 | dev_priv->gart_info.func = &nv41_sgdma_backend; |
| 467 | align = 16; |
| 468 | } |
| 469 | |
| 470 | ret = nouveau_gpuobj_new(dev, NULL, aper_size / 1024, align, |
| 471 | NVOBJ_FLAG_ZERO_ALLOC | |
| 472 | NVOBJ_FLAG_ZERO_FREE, &gpuobj); |
| 473 | if (ret) { |
| 474 | NV_ERROR(dev, "Error creating sgdma object: %d\n", ret); |
| 475 | return ret; |
| 476 | } |
| 477 | |
| 478 | dev_priv->gart_info.sg_ctxdma = gpuobj; |
| 479 | dev_priv->gart_info.aper_base = 0; |
| 480 | dev_priv->gart_info.aper_size = aper_size; |
Ben Skeggs | 58e6c7a | 2011-01-11 14:10:09 +1000 | [diff] [blame] | 481 | dev_priv->gart_info.type = NOUVEAU_GART_HW; |
Ben Skeggs | efa58db | 2011-01-10 16:24:00 +1000 | [diff] [blame] | 482 | } else { |
Ben Skeggs | 7948758 | 2011-01-11 14:52:40 +1000 | [diff] [blame] | 483 | ret = nouveau_gpuobj_new(dev, NULL, (aper_size / 1024) + 8, 16, |
| 484 | NVOBJ_FLAG_ZERO_ALLOC | |
| 485 | NVOBJ_FLAG_ZERO_FREE, &gpuobj); |
Ben Skeggs | b571fe2 | 2010-11-16 10:13:05 +1000 | [diff] [blame] | 486 | if (ret) { |
| 487 | NV_ERROR(dev, "Error creating sgdma object: %d\n", ret); |
| 488 | return ret; |
| 489 | } |
Ben Skeggs | 6ee7386 | 2009-12-11 19:24:15 +1000 | [diff] [blame] | 490 | |
Ben Skeggs | b3beb16 | 2010-09-01 15:24:29 +1000 | [diff] [blame] | 491 | nv_wo32(gpuobj, 0, NV_CLASS_DMA_IN_MEMORY | |
| 492 | (1 << 12) /* PT present */ | |
| 493 | (0 << 13) /* PT *not* linear */ | |
Ben Skeggs | 7f4a195 | 2010-11-16 11:50:09 +1000 | [diff] [blame] | 494 | (0 << 14) /* RW */ | |
| 495 | (2 << 16) /* PCI */); |
Ben Skeggs | b3beb16 | 2010-09-01 15:24:29 +1000 | [diff] [blame] | 496 | nv_wo32(gpuobj, 4, aper_size - 1); |
Ben Skeggs | b571fe2 | 2010-11-16 10:13:05 +1000 | [diff] [blame] | 497 | |
| 498 | dev_priv->gart_info.sg_ctxdma = gpuobj; |
| 499 | dev_priv->gart_info.aper_base = 0; |
| 500 | dev_priv->gart_info.aper_size = aper_size; |
Ben Skeggs | 58e6c7a | 2011-01-11 14:10:09 +1000 | [diff] [blame] | 501 | dev_priv->gart_info.type = NOUVEAU_GART_PDMA; |
Ben Skeggs | 7948758 | 2011-01-11 14:52:40 +1000 | [diff] [blame] | 502 | dev_priv->gart_info.func = &nv04_sgdma_backend; |
Ben Skeggs | 6ee7386 | 2009-12-11 19:24:15 +1000 | [diff] [blame] | 503 | } |
Ben Skeggs | 6ee7386 | 2009-12-11 19:24:15 +1000 | [diff] [blame] | 504 | |
Ben Skeggs | 6ee7386 | 2009-12-11 19:24:15 +1000 | [diff] [blame] | 505 | return 0; |
| 506 | } |
| 507 | |
| 508 | void |
| 509 | nouveau_sgdma_takedown(struct drm_device *dev) |
| 510 | { |
| 511 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
| 512 | |
Ben Skeggs | a8eaebc | 2010-09-01 15:24:31 +1000 | [diff] [blame] | 513 | nouveau_gpuobj_ref(NULL, &dev_priv->gart_info.sg_ctxdma); |
Ben Skeggs | 7948758 | 2011-01-11 14:52:40 +1000 | [diff] [blame] | 514 | |
| 515 | if (dev_priv->gart_info.dummy.page) { |
| 516 | pci_unmap_page(dev->pdev, dev_priv->gart_info.dummy.addr, |
| 517 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); |
| 518 | __free_page(dev_priv->gart_info.dummy.page); |
| 519 | dev_priv->gart_info.dummy.page = NULL; |
| 520 | } |
Ben Skeggs | 6ee7386 | 2009-12-11 19:24:15 +1000 | [diff] [blame] | 521 | } |
| 522 | |
Francisco Jerez | fd70b6c | 2010-12-08 02:37:12 +0100 | [diff] [blame] | 523 | uint32_t |
| 524 | nouveau_sgdma_get_physical(struct drm_device *dev, uint32_t offset) |
Ben Skeggs | 6ee7386 | 2009-12-11 19:24:15 +1000 | [diff] [blame] | 525 | { |
| 526 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
| 527 | struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma; |
Francisco Jerez | fd70b6c | 2010-12-08 02:37:12 +0100 | [diff] [blame] | 528 | int pte = (offset >> NV_CTXDMA_PAGE_SHIFT) + 2; |
Ben Skeggs | 6ee7386 | 2009-12-11 19:24:15 +1000 | [diff] [blame] | 529 | |
Francisco Jerez | fd70b6c | 2010-12-08 02:37:12 +0100 | [diff] [blame] | 530 | BUG_ON(dev_priv->card_type >= NV_50); |
Ben Skeggs | 6ee7386 | 2009-12-11 19:24:15 +1000 | [diff] [blame] | 531 | |
Francisco Jerez | fd70b6c | 2010-12-08 02:37:12 +0100 | [diff] [blame] | 532 | return (nv_ro32(gpuobj, 4 * pte) & ~NV_CTXDMA_PAGE_MASK) | |
| 533 | (offset & NV_CTXDMA_PAGE_MASK); |
Ben Skeggs | 6ee7386 | 2009-12-11 19:24:15 +1000 | [diff] [blame] | 534 | } |