blob: 4bce801bc588ffd04e1cdd3800e2e82ebf33008f [file] [log] [blame]
Ben Skeggs6ee73862009-12-11 19:24:15 +10001#include "drmP.h"
2#include "nouveau_drv.h"
3#include <linux/pagemap.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +09004#include <linux/slab.h>
Ben Skeggs6ee73862009-12-11 19:24:15 +10005
6#define NV_CTXDMA_PAGE_SHIFT 12
7#define NV_CTXDMA_PAGE_SIZE (1 << NV_CTXDMA_PAGE_SHIFT)
8#define NV_CTXDMA_PAGE_MASK (NV_CTXDMA_PAGE_SIZE - 1)
9
10struct nouveau_sgdma_be {
11 struct ttm_backend backend;
12 struct drm_device *dev;
13
14 dma_addr_t *pages;
Konrad Rzeszutek Wilke0138c22010-12-02 11:36:24 -050015 bool *ttm_alloced;
Ben Skeggs6ee73862009-12-11 19:24:15 +100016 unsigned nr_pages;
17
Ben Skeggsb571fe22010-11-16 10:13:05 +100018 u64 offset;
Ben Skeggs6ee73862009-12-11 19:24:15 +100019 bool bound;
20};
21
22static int
23nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages,
Konrad Rzeszutek Wilk27e8b232010-12-02 10:24:13 -050024 struct page **pages, struct page *dummy_read_page,
25 dma_addr_t *dma_addrs)
Ben Skeggs6ee73862009-12-11 19:24:15 +100026{
27 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
28 struct drm_device *dev = nvbe->dev;
29
30 NV_DEBUG(nvbe->dev, "num_pages = %ld\n", num_pages);
31
32 if (nvbe->pages)
33 return -EINVAL;
34
35 nvbe->pages = kmalloc(sizeof(dma_addr_t) * num_pages, GFP_KERNEL);
36 if (!nvbe->pages)
37 return -ENOMEM;
38
Konrad Rzeszutek Wilke0138c22010-12-02 11:36:24 -050039 nvbe->ttm_alloced = kmalloc(sizeof(bool) * num_pages, GFP_KERNEL);
40 if (!nvbe->ttm_alloced)
41 return -ENOMEM;
42
Ben Skeggs6ee73862009-12-11 19:24:15 +100043 nvbe->nr_pages = 0;
44 while (num_pages--) {
Konrad Rzeszutek Wilke0138c22010-12-02 11:36:24 -050045 if (dma_addrs[nvbe->nr_pages] != DMA_ERROR_CODE) {
46 nvbe->pages[nvbe->nr_pages] =
47 dma_addrs[nvbe->nr_pages];
48 nvbe->ttm_alloced[nvbe->nr_pages] = true;
49 } else {
50 nvbe->pages[nvbe->nr_pages] =
51 pci_map_page(dev->pdev, pages[nvbe->nr_pages], 0,
Ben Skeggs6ee73862009-12-11 19:24:15 +100052 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
Konrad Rzeszutek Wilke0138c22010-12-02 11:36:24 -050053 if (pci_dma_mapping_error(dev->pdev,
54 nvbe->pages[nvbe->nr_pages])) {
55 be->func->clear(be);
56 return -EFAULT;
57 }
Ben Skeggs87063982011-04-11 16:37:44 +100058 nvbe->ttm_alloced[nvbe->nr_pages] = false;
Ben Skeggs6ee73862009-12-11 19:24:15 +100059 }
60
61 nvbe->nr_pages++;
62 }
63
64 return 0;
65}
66
67static void
68nouveau_sgdma_clear(struct ttm_backend *be)
69{
70 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
Marcin Slusarzdd19e442010-01-30 15:41:00 +010071 struct drm_device *dev;
Ben Skeggs6ee73862009-12-11 19:24:15 +100072
73 if (nvbe && nvbe->pages) {
Marcin Slusarzdd19e442010-01-30 15:41:00 +010074 dev = nvbe->dev;
75 NV_DEBUG(dev, "\n");
76
Ben Skeggs6ee73862009-12-11 19:24:15 +100077 if (nvbe->bound)
78 be->func->unbind(be);
79
80 while (nvbe->nr_pages--) {
Konrad Rzeszutek Wilke0138c22010-12-02 11:36:24 -050081 if (!nvbe->ttm_alloced[nvbe->nr_pages])
82 pci_unmap_page(dev->pdev, nvbe->pages[nvbe->nr_pages],
Ben Skeggs6ee73862009-12-11 19:24:15 +100083 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
84 }
85 kfree(nvbe->pages);
Konrad Rzeszutek Wilke0138c22010-12-02 11:36:24 -050086 kfree(nvbe->ttm_alloced);
Ben Skeggs6ee73862009-12-11 19:24:15 +100087 nvbe->pages = NULL;
Konrad Rzeszutek Wilke0138c22010-12-02 11:36:24 -050088 nvbe->ttm_alloced = NULL;
Ben Skeggs6ee73862009-12-11 19:24:15 +100089 nvbe->nr_pages = 0;
90 }
91}
92
Ben Skeggsefa58db2011-01-10 16:24:00 +100093static void
94nouveau_sgdma_destroy(struct ttm_backend *be)
95{
96 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
97
98 if (be) {
99 NV_DEBUG(nvbe->dev, "\n");
100
101 if (nvbe) {
102 if (nvbe->pages)
103 be->func->clear(be);
104 kfree(nvbe);
105 }
106 }
107}
108
Ben Skeggs6ee73862009-12-11 19:24:15 +1000109static int
Ben Skeggsefa58db2011-01-10 16:24:00 +1000110nv04_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000111{
112 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
113 struct drm_device *dev = nvbe->dev;
114 struct drm_nouveau_private *dev_priv = dev->dev_private;
115 struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
116 unsigned i, j, pte;
117
Ben Skeggsd961db72010-08-05 10:48:18 +1000118 NV_DEBUG(dev, "pg=0x%lx\n", mem->start);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000119
Ben Skeggsb571fe22010-11-16 10:13:05 +1000120 nvbe->offset = mem->start << PAGE_SHIFT;
121 pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000122 for (i = 0; i < nvbe->nr_pages; i++) {
123 dma_addr_t dma_offset = nvbe->pages[i];
124 uint32_t offset_l = lower_32_bits(dma_offset);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000125
Ben Skeggsb571fe22010-11-16 10:13:05 +1000126 for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++) {
127 nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 3);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000128 dma_offset += NV_CTXDMA_PAGE_SIZE;
129 }
130 }
Ben Skeggs6ee73862009-12-11 19:24:15 +1000131
132 nvbe->bound = true;
133 return 0;
134}
135
136static int
Ben Skeggsefa58db2011-01-10 16:24:00 +1000137nv04_sgdma_unbind(struct ttm_backend *be)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000138{
139 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
140 struct drm_device *dev = nvbe->dev;
141 struct drm_nouveau_private *dev_priv = dev->dev_private;
142 struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
143 unsigned i, j, pte;
144
145 NV_DEBUG(dev, "\n");
146
147 if (!nvbe->bound)
148 return 0;
149
Ben Skeggsb571fe22010-11-16 10:13:05 +1000150 pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000151 for (i = 0; i < nvbe->nr_pages; i++) {
Ben Skeggsb571fe22010-11-16 10:13:05 +1000152 for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++)
153 nv_wo32(gpuobj, (pte * 4) + 0, 0x00000000);
Ben Skeggs40b2a682010-03-15 16:43:47 +1000154 }
155
Ben Skeggs6ee73862009-12-11 19:24:15 +1000156 nvbe->bound = false;
157 return 0;
158}
159
Ben Skeggsefa58db2011-01-10 16:24:00 +1000160static struct ttm_backend_func nv04_sgdma_backend = {
161 .populate = nouveau_sgdma_populate,
162 .clear = nouveau_sgdma_clear,
163 .bind = nv04_sgdma_bind,
164 .unbind = nv04_sgdma_unbind,
165 .destroy = nouveau_sgdma_destroy
166};
Ben Skeggs6ee73862009-12-11 19:24:15 +1000167
Ben Skeggs79487582011-01-11 14:52:40 +1000168static void
169nv41_sgdma_flush(struct nouveau_sgdma_be *nvbe)
170{
171 struct drm_device *dev = nvbe->dev;
172
173 nv_wr32(dev, 0x100810, 0x00000022);
174 if (!nv_wait(dev, 0x100810, 0x00000100, 0x00000100))
175 NV_ERROR(dev, "vm flush timeout: 0x%08x\n",
176 nv_rd32(dev, 0x100810));
177 nv_wr32(dev, 0x100810, 0x00000000);
178}
179
180static int
181nv41_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
182{
183 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
184 struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
185 struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
186 dma_addr_t *list = nvbe->pages;
187 u32 pte = mem->start << 2;
188 u32 cnt = nvbe->nr_pages;
189
190 nvbe->offset = mem->start << PAGE_SHIFT;
191
192 while (cnt--) {
193 nv_wo32(pgt, pte, (*list++ >> 7) | 1);
194 pte += 4;
195 }
196
197 nv41_sgdma_flush(nvbe);
198 nvbe->bound = true;
199 return 0;
200}
201
202static int
203nv41_sgdma_unbind(struct ttm_backend *be)
204{
205 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
206 struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
207 struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
208 u32 pte = (nvbe->offset >> 12) << 2;
209 u32 cnt = nvbe->nr_pages;
210
211 while (cnt--) {
212 nv_wo32(pgt, pte, 0x00000000);
213 pte += 4;
214 }
215
216 nv41_sgdma_flush(nvbe);
217 nvbe->bound = false;
218 return 0;
219}
220
221static struct ttm_backend_func nv41_sgdma_backend = {
222 .populate = nouveau_sgdma_populate,
223 .clear = nouveau_sgdma_clear,
224 .bind = nv41_sgdma_bind,
225 .unbind = nv41_sgdma_unbind,
226 .destroy = nouveau_sgdma_destroy
227};
228
229static void
230nv44_sgdma_flush(struct nouveau_sgdma_be *nvbe)
231{
232 struct drm_device *dev = nvbe->dev;
233
234 nv_wr32(dev, 0x100814, (nvbe->nr_pages - 1) << 12);
235 nv_wr32(dev, 0x100808, nvbe->offset | 0x20);
236 if (!nv_wait(dev, 0x100808, 0x00000001, 0x00000001))
237 NV_ERROR(dev, "gart flush timeout: 0x%08x\n",
238 nv_rd32(dev, 0x100808));
239 nv_wr32(dev, 0x100808, 0x00000000);
240}
241
242static void
243nv44_sgdma_fill(struct nouveau_gpuobj *pgt, dma_addr_t *list, u32 base, u32 cnt)
244{
245 struct drm_nouveau_private *dev_priv = pgt->dev->dev_private;
246 dma_addr_t dummy = dev_priv->gart_info.dummy.addr;
247 u32 pte, tmp[4];
248
249 pte = base >> 2;
250 base &= ~0x0000000f;
251
252 tmp[0] = nv_ro32(pgt, base + 0x0);
253 tmp[1] = nv_ro32(pgt, base + 0x4);
254 tmp[2] = nv_ro32(pgt, base + 0x8);
255 tmp[3] = nv_ro32(pgt, base + 0xc);
256 while (cnt--) {
257 u32 addr = list ? (*list++ >> 12) : (dummy >> 12);
258 switch (pte++ & 0x3) {
259 case 0:
260 tmp[0] &= ~0x07ffffff;
261 tmp[0] |= addr;
262 break;
263 case 1:
264 tmp[0] &= ~0xf8000000;
265 tmp[0] |= addr << 27;
266 tmp[1] &= ~0x003fffff;
267 tmp[1] |= addr >> 5;
268 break;
269 case 2:
270 tmp[1] &= ~0xffc00000;
271 tmp[1] |= addr << 22;
272 tmp[2] &= ~0x0001ffff;
273 tmp[2] |= addr >> 10;
274 break;
275 case 3:
276 tmp[2] &= ~0xfffe0000;
277 tmp[2] |= addr << 17;
278 tmp[3] &= ~0x00000fff;
279 tmp[3] |= addr >> 15;
280 break;
281 }
282 }
283
284 tmp[3] |= 0x40000000;
285
286 nv_wo32(pgt, base + 0x0, tmp[0]);
287 nv_wo32(pgt, base + 0x4, tmp[1]);
288 nv_wo32(pgt, base + 0x8, tmp[2]);
289 nv_wo32(pgt, base + 0xc, tmp[3]);
290}
291
292static int
293nv44_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
294{
295 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
296 struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
297 struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
298 dma_addr_t *list = nvbe->pages;
299 u32 pte = mem->start << 2, tmp[4];
300 u32 cnt = nvbe->nr_pages;
301 int i;
302
303 nvbe->offset = mem->start << PAGE_SHIFT;
304
305 if (pte & 0x0000000c) {
306 u32 max = 4 - ((pte >> 2) & 0x3);
307 u32 part = (cnt > max) ? max : cnt;
308 nv44_sgdma_fill(pgt, list, pte, part);
309 pte += (part << 2);
310 list += part;
311 cnt -= part;
312 }
313
314 while (cnt >= 4) {
315 for (i = 0; i < 4; i++)
316 tmp[i] = *list++ >> 12;
317 nv_wo32(pgt, pte + 0x0, tmp[0] >> 0 | tmp[1] << 27);
318 nv_wo32(pgt, pte + 0x4, tmp[1] >> 5 | tmp[2] << 22);
319 nv_wo32(pgt, pte + 0x8, tmp[2] >> 10 | tmp[3] << 17);
320 nv_wo32(pgt, pte + 0xc, tmp[3] >> 15 | 0x40000000);
321 pte += 0x10;
322 cnt -= 4;
323 }
324
325 if (cnt)
326 nv44_sgdma_fill(pgt, list, pte, cnt);
327
328 nv44_sgdma_flush(nvbe);
329 nvbe->bound = true;
330 return 0;
331}
332
333static int
334nv44_sgdma_unbind(struct ttm_backend *be)
335{
336 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
337 struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
338 struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
339 u32 pte = (nvbe->offset >> 12) << 2;
340 u32 cnt = nvbe->nr_pages;
341
342 if (pte & 0x0000000c) {
343 u32 max = 4 - ((pte >> 2) & 0x3);
344 u32 part = (cnt > max) ? max : cnt;
345 nv44_sgdma_fill(pgt, NULL, pte, part);
346 pte += (part << 2);
347 cnt -= part;
348 }
349
350 while (cnt >= 4) {
351 nv_wo32(pgt, pte + 0x0, 0x00000000);
352 nv_wo32(pgt, pte + 0x4, 0x00000000);
353 nv_wo32(pgt, pte + 0x8, 0x00000000);
354 nv_wo32(pgt, pte + 0xc, 0x00000000);
355 pte += 0x10;
356 cnt -= 4;
357 }
358
359 if (cnt)
360 nv44_sgdma_fill(pgt, NULL, pte, cnt);
361
362 nv44_sgdma_flush(nvbe);
363 nvbe->bound = false;
364 return 0;
365}
366
367static struct ttm_backend_func nv44_sgdma_backend = {
368 .populate = nouveau_sgdma_populate,
369 .clear = nouveau_sgdma_clear,
370 .bind = nv44_sgdma_bind,
371 .unbind = nv44_sgdma_unbind,
372 .destroy = nouveau_sgdma_destroy
373};
374
Ben Skeggsb571fe22010-11-16 10:13:05 +1000375static int
376nv50_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
377{
378 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
Ben Skeggs26c0c9e2011-02-10 12:59:51 +1000379 struct nouveau_mem *node = mem->mm_node;
380 /* noop: bound in move_notify() */
381 node->pages = nvbe->pages;
382 nvbe->pages = (dma_addr_t *)node;
Ben Skeggsb571fe22010-11-16 10:13:05 +1000383 nvbe->bound = true;
384 return 0;
385}
386
387static int
388nv50_sgdma_unbind(struct ttm_backend *be)
389{
390 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
Ben Skeggs26c0c9e2011-02-10 12:59:51 +1000391 struct nouveau_mem *node = (struct nouveau_mem *)nvbe->pages;
392 /* noop: unbound in move_notify() */
393 nvbe->pages = node->pages;
394 node->pages = NULL;
Ben Skeggsb571fe22010-11-16 10:13:05 +1000395 nvbe->bound = false;
396 return 0;
397}
398
Ben Skeggsb571fe22010-11-16 10:13:05 +1000399static struct ttm_backend_func nv50_sgdma_backend = {
400 .populate = nouveau_sgdma_populate,
401 .clear = nouveau_sgdma_clear,
402 .bind = nv50_sgdma_bind,
403 .unbind = nv50_sgdma_unbind,
404 .destroy = nouveau_sgdma_destroy
405};
406
Ben Skeggs6ee73862009-12-11 19:24:15 +1000407struct ttm_backend *
408nouveau_sgdma_init_ttm(struct drm_device *dev)
409{
410 struct drm_nouveau_private *dev_priv = dev->dev_private;
411 struct nouveau_sgdma_be *nvbe;
412
Ben Skeggs6ee73862009-12-11 19:24:15 +1000413 nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL);
414 if (!nvbe)
415 return NULL;
416
417 nvbe->dev = dev;
418
Ben Skeggs79487582011-01-11 14:52:40 +1000419 nvbe->backend.func = dev_priv->gart_info.func;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000420 return &nvbe->backend;
421}
422
423int
424nouveau_sgdma_init(struct drm_device *dev)
425{
426 struct drm_nouveau_private *dev_priv = dev->dev_private;
427 struct nouveau_gpuobj *gpuobj = NULL;
Ben Skeggs79487582011-01-11 14:52:40 +1000428 u32 aper_size, align;
429 int ret;
430
Ben Skeggs01d15332011-04-08 10:07:34 +1000431 if (dev_priv->card_type >= NV_40 && drm_pci_device_is_pcie(dev))
Ben Skeggs79487582011-01-11 14:52:40 +1000432 aper_size = 512 * 1024 * 1024;
433 else
434 aper_size = 64 * 1024 * 1024;
435
436 /* Dear NVIDIA, NV44+ would like proper present bits in PTEs for
437 * christmas. The cards before it have them, the cards after
438 * it have them, why is NV44 so unloved?
439 */
440 dev_priv->gart_info.dummy.page = alloc_page(GFP_DMA32 | GFP_KERNEL);
441 if (!dev_priv->gart_info.dummy.page)
442 return -ENOMEM;
443
444 dev_priv->gart_info.dummy.addr =
445 pci_map_page(dev->pdev, dev_priv->gart_info.dummy.page,
446 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
447 if (pci_dma_mapping_error(dev->pdev, dev_priv->gart_info.dummy.addr)) {
448 NV_ERROR(dev, "error mapping dummy page\n");
449 __free_page(dev_priv->gart_info.dummy.page);
450 dev_priv->gart_info.dummy.page = NULL;
451 return -ENOMEM;
452 }
Ben Skeggs6ee73862009-12-11 19:24:15 +1000453
Ben Skeggsefa58db2011-01-10 16:24:00 +1000454 if (dev_priv->card_type >= NV_50) {
Ben Skeggs26c0c9e2011-02-10 12:59:51 +1000455 dev_priv->gart_info.aper_base = 0;
Ben Skeggs79487582011-01-11 14:52:40 +1000456 dev_priv->gart_info.aper_size = aper_size;
457 dev_priv->gart_info.type = NOUVEAU_GART_HW;
458 dev_priv->gart_info.func = &nv50_sgdma_backend;
459 } else
460 if (drm_pci_device_is_pcie(dev) &&
Ben Skeggs01d15332011-04-08 10:07:34 +1000461 dev_priv->chipset > 0x40 && dev_priv->chipset != 0x45) {
Ben Skeggs79487582011-01-11 14:52:40 +1000462 if (nv44_graph_class(dev)) {
463 dev_priv->gart_info.func = &nv44_sgdma_backend;
464 align = 512 * 1024;
465 } else {
466 dev_priv->gart_info.func = &nv41_sgdma_backend;
467 align = 16;
468 }
469
470 ret = nouveau_gpuobj_new(dev, NULL, aper_size / 1024, align,
471 NVOBJ_FLAG_ZERO_ALLOC |
472 NVOBJ_FLAG_ZERO_FREE, &gpuobj);
473 if (ret) {
474 NV_ERROR(dev, "Error creating sgdma object: %d\n", ret);
475 return ret;
476 }
477
478 dev_priv->gart_info.sg_ctxdma = gpuobj;
479 dev_priv->gart_info.aper_base = 0;
480 dev_priv->gart_info.aper_size = aper_size;
Ben Skeggs58e6c7a2011-01-11 14:10:09 +1000481 dev_priv->gart_info.type = NOUVEAU_GART_HW;
Ben Skeggsefa58db2011-01-10 16:24:00 +1000482 } else {
Ben Skeggs79487582011-01-11 14:52:40 +1000483 ret = nouveau_gpuobj_new(dev, NULL, (aper_size / 1024) + 8, 16,
484 NVOBJ_FLAG_ZERO_ALLOC |
485 NVOBJ_FLAG_ZERO_FREE, &gpuobj);
Ben Skeggsb571fe22010-11-16 10:13:05 +1000486 if (ret) {
487 NV_ERROR(dev, "Error creating sgdma object: %d\n", ret);
488 return ret;
489 }
Ben Skeggs6ee73862009-12-11 19:24:15 +1000490
Ben Skeggsb3beb162010-09-01 15:24:29 +1000491 nv_wo32(gpuobj, 0, NV_CLASS_DMA_IN_MEMORY |
492 (1 << 12) /* PT present */ |
493 (0 << 13) /* PT *not* linear */ |
Ben Skeggs7f4a1952010-11-16 11:50:09 +1000494 (0 << 14) /* RW */ |
495 (2 << 16) /* PCI */);
Ben Skeggsb3beb162010-09-01 15:24:29 +1000496 nv_wo32(gpuobj, 4, aper_size - 1);
Ben Skeggsb571fe22010-11-16 10:13:05 +1000497
498 dev_priv->gart_info.sg_ctxdma = gpuobj;
499 dev_priv->gart_info.aper_base = 0;
500 dev_priv->gart_info.aper_size = aper_size;
Ben Skeggs58e6c7a2011-01-11 14:10:09 +1000501 dev_priv->gart_info.type = NOUVEAU_GART_PDMA;
Ben Skeggs79487582011-01-11 14:52:40 +1000502 dev_priv->gart_info.func = &nv04_sgdma_backend;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000503 }
Ben Skeggs6ee73862009-12-11 19:24:15 +1000504
Ben Skeggs6ee73862009-12-11 19:24:15 +1000505 return 0;
506}
507
508void
509nouveau_sgdma_takedown(struct drm_device *dev)
510{
511 struct drm_nouveau_private *dev_priv = dev->dev_private;
512
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000513 nouveau_gpuobj_ref(NULL, &dev_priv->gart_info.sg_ctxdma);
Ben Skeggs79487582011-01-11 14:52:40 +1000514
515 if (dev_priv->gart_info.dummy.page) {
516 pci_unmap_page(dev->pdev, dev_priv->gart_info.dummy.addr,
517 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
518 __free_page(dev_priv->gart_info.dummy.page);
519 dev_priv->gart_info.dummy.page = NULL;
520 }
Ben Skeggs6ee73862009-12-11 19:24:15 +1000521}
522
Francisco Jerezfd70b6c2010-12-08 02:37:12 +0100523uint32_t
524nouveau_sgdma_get_physical(struct drm_device *dev, uint32_t offset)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000525{
526 struct drm_nouveau_private *dev_priv = dev->dev_private;
527 struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
Francisco Jerezfd70b6c2010-12-08 02:37:12 +0100528 int pte = (offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000529
Francisco Jerezfd70b6c2010-12-08 02:37:12 +0100530 BUG_ON(dev_priv->card_type >= NV_50);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000531
Francisco Jerezfd70b6c2010-12-08 02:37:12 +0100532 return (nv_ro32(gpuobj, 4 * pte) & ~NV_CTXDMA_PAGE_MASK) |
533 (offset & NV_CTXDMA_PAGE_MASK);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000534}