blob: d4ac970070386cea340283ba459194eb8b1de50a [file] [log] [blame]
Ben Skeggs6ee73862009-12-11 19:24:15 +10001#include "drmP.h"
2#include "nouveau_drv.h"
3#include <linux/pagemap.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +09004#include <linux/slab.h>
Ben Skeggs6ee73862009-12-11 19:24:15 +10005
6#define NV_CTXDMA_PAGE_SHIFT 12
7#define NV_CTXDMA_PAGE_SIZE (1 << NV_CTXDMA_PAGE_SHIFT)
8#define NV_CTXDMA_PAGE_MASK (NV_CTXDMA_PAGE_SIZE - 1)
9
10struct nouveau_sgdma_be {
11 struct ttm_backend backend;
12 struct drm_device *dev;
13
14 dma_addr_t *pages;
15 unsigned nr_pages;
16
17 unsigned pte_start;
18 bool bound;
19};
20
21static int
22nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages,
23 struct page **pages, struct page *dummy_read_page)
24{
25 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
26 struct drm_device *dev = nvbe->dev;
27
28 NV_DEBUG(nvbe->dev, "num_pages = %ld\n", num_pages);
29
30 if (nvbe->pages)
31 return -EINVAL;
32
33 nvbe->pages = kmalloc(sizeof(dma_addr_t) * num_pages, GFP_KERNEL);
34 if (!nvbe->pages)
35 return -ENOMEM;
36
37 nvbe->nr_pages = 0;
38 while (num_pages--) {
39 nvbe->pages[nvbe->nr_pages] =
40 pci_map_page(dev->pdev, pages[nvbe->nr_pages], 0,
41 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
42 if (pci_dma_mapping_error(dev->pdev,
43 nvbe->pages[nvbe->nr_pages])) {
44 be->func->clear(be);
45 return -EFAULT;
46 }
47
48 nvbe->nr_pages++;
49 }
50
51 return 0;
52}
53
54static void
55nouveau_sgdma_clear(struct ttm_backend *be)
56{
57 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
Marcin Slusarzdd19e442010-01-30 15:41:00 +010058 struct drm_device *dev;
Ben Skeggs6ee73862009-12-11 19:24:15 +100059
60 if (nvbe && nvbe->pages) {
Marcin Slusarzdd19e442010-01-30 15:41:00 +010061 dev = nvbe->dev;
62 NV_DEBUG(dev, "\n");
63
Ben Skeggs6ee73862009-12-11 19:24:15 +100064 if (nvbe->bound)
65 be->func->unbind(be);
66
67 while (nvbe->nr_pages--) {
68 pci_unmap_page(dev->pdev, nvbe->pages[nvbe->nr_pages],
69 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
70 }
71 kfree(nvbe->pages);
72 nvbe->pages = NULL;
73 nvbe->nr_pages = 0;
74 }
75}
76
77static inline unsigned
78nouveau_sgdma_pte(struct drm_device *dev, uint64_t offset)
79{
80 struct drm_nouveau_private *dev_priv = dev->dev_private;
81 unsigned pte = (offset >> NV_CTXDMA_PAGE_SHIFT);
82
83 if (dev_priv->card_type < NV_50)
84 return pte + 2;
85
86 return pte << 1;
87}
88
89static int
90nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
91{
92 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
93 struct drm_device *dev = nvbe->dev;
94 struct drm_nouveau_private *dev_priv = dev->dev_private;
95 struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
96 unsigned i, j, pte;
97
Ben Skeggsd961db72010-08-05 10:48:18 +100098 NV_DEBUG(dev, "pg=0x%lx\n", mem->start);
Ben Skeggs6ee73862009-12-11 19:24:15 +100099
Ben Skeggsd961db72010-08-05 10:48:18 +1000100 pte = nouveau_sgdma_pte(nvbe->dev, mem->start << PAGE_SHIFT);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000101 nvbe->pte_start = pte;
102 for (i = 0; i < nvbe->nr_pages; i++) {
103 dma_addr_t dma_offset = nvbe->pages[i];
104 uint32_t offset_l = lower_32_bits(dma_offset);
105 uint32_t offset_h = upper_32_bits(dma_offset);
106
107 for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++) {
Ben Skeggsb3beb162010-09-01 15:24:29 +1000108 if (dev_priv->card_type < NV_50) {
109 nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 3);
110 pte += 1;
111 } else {
112 nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 0x21);
113 nv_wo32(gpuobj, (pte * 4) + 4, offset_h & 0xff);
114 pte += 2;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000115 }
116
117 dma_offset += NV_CTXDMA_PAGE_SIZE;
118 }
119 }
Ben Skeggsf56cb862010-07-08 11:29:10 +1000120 dev_priv->engine.instmem.flush(nvbe->dev);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000121
122 if (dev_priv->card_type == NV_50) {
Ben Skeggs56ac7472010-10-22 10:26:24 +1000123 dev_priv->engine.fifo.tlb_flush(dev);
124 dev_priv->engine.graph.tlb_flush(dev);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000125 }
126
127 nvbe->bound = true;
128 return 0;
129}
130
131static int
132nouveau_sgdma_unbind(struct ttm_backend *be)
133{
134 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
135 struct drm_device *dev = nvbe->dev;
136 struct drm_nouveau_private *dev_priv = dev->dev_private;
137 struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
138 unsigned i, j, pte;
139
140 NV_DEBUG(dev, "\n");
141
142 if (!nvbe->bound)
143 return 0;
144
Ben Skeggs6ee73862009-12-11 19:24:15 +1000145 pte = nvbe->pte_start;
146 for (i = 0; i < nvbe->nr_pages; i++) {
147 dma_addr_t dma_offset = dev_priv->gart_info.sg_dummy_bus;
148
149 for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++) {
Ben Skeggsb3beb162010-09-01 15:24:29 +1000150 if (dev_priv->card_type < NV_50) {
151 nv_wo32(gpuobj, (pte * 4) + 0, dma_offset | 3);
152 pte += 1;
153 } else {
Ben Skeggs17b20342010-09-13 09:57:44 +1000154 nv_wo32(gpuobj, (pte * 4) + 0, 0x00000000);
Ben Skeggsb3beb162010-09-01 15:24:29 +1000155 nv_wo32(gpuobj, (pte * 4) + 4, 0x00000000);
156 pte += 2;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000157 }
158
159 dma_offset += NV_CTXDMA_PAGE_SIZE;
160 }
161 }
Ben Skeggsf56cb862010-07-08 11:29:10 +1000162 dev_priv->engine.instmem.flush(nvbe->dev);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000163
Ben Skeggs40b2a682010-03-15 16:43:47 +1000164 if (dev_priv->card_type == NV_50) {
Ben Skeggs56ac7472010-10-22 10:26:24 +1000165 dev_priv->engine.fifo.tlb_flush(dev);
166 dev_priv->engine.graph.tlb_flush(dev);
Ben Skeggs40b2a682010-03-15 16:43:47 +1000167 }
168
Ben Skeggs6ee73862009-12-11 19:24:15 +1000169 nvbe->bound = false;
170 return 0;
171}
172
173static void
174nouveau_sgdma_destroy(struct ttm_backend *be)
175{
176 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
177
178 if (be) {
179 NV_DEBUG(nvbe->dev, "\n");
180
181 if (nvbe) {
182 if (nvbe->pages)
183 be->func->clear(be);
184 kfree(nvbe);
185 }
186 }
187}
188
189static struct ttm_backend_func nouveau_sgdma_backend = {
190 .populate = nouveau_sgdma_populate,
191 .clear = nouveau_sgdma_clear,
192 .bind = nouveau_sgdma_bind,
193 .unbind = nouveau_sgdma_unbind,
194 .destroy = nouveau_sgdma_destroy
195};
196
197struct ttm_backend *
198nouveau_sgdma_init_ttm(struct drm_device *dev)
199{
200 struct drm_nouveau_private *dev_priv = dev->dev_private;
201 struct nouveau_sgdma_be *nvbe;
202
203 if (!dev_priv->gart_info.sg_ctxdma)
204 return NULL;
205
206 nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL);
207 if (!nvbe)
208 return NULL;
209
210 nvbe->dev = dev;
211
212 nvbe->backend.func = &nouveau_sgdma_backend;
213
214 return &nvbe->backend;
215}
216
217int
218nouveau_sgdma_init(struct drm_device *dev)
219{
220 struct drm_nouveau_private *dev_priv = dev->dev_private;
Ben Skeggsbd6aaea2010-08-12 10:23:06 +1000221 struct pci_dev *pdev = dev->pdev;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000222 struct nouveau_gpuobj *gpuobj = NULL;
223 uint32_t aper_size, obj_size;
224 int i, ret;
225
226 if (dev_priv->card_type < NV_50) {
Francisco Jerez9d5a6c42010-11-01 18:08:26 +0100227 if(dev_priv->ramin_rsvd_vram < 2 * 1024 * 1024)
228 aper_size = 64 * 1024 * 1024;
229 else
230 aper_size = 512 * 1024 * 1024;
231
Ben Skeggs6ee73862009-12-11 19:24:15 +1000232 obj_size = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 4;
233 obj_size += 8; /* ctxdma header */
234 } else {
235 /* 1 entire VM page table */
236 aper_size = (512 * 1024 * 1024);
237 obj_size = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 8;
238 }
239
240 ret = nouveau_gpuobj_new(dev, NULL, obj_size, 16,
Ben Skeggs6ee73862009-12-11 19:24:15 +1000241 NVOBJ_FLAG_ZERO_ALLOC |
242 NVOBJ_FLAG_ZERO_FREE, &gpuobj);
243 if (ret) {
244 NV_ERROR(dev, "Error creating sgdma object: %d\n", ret);
245 return ret;
246 }
247
248 dev_priv->gart_info.sg_dummy_page =
Ben Skeggsb6fd7802010-09-13 09:58:37 +1000249 alloc_page(GFP_KERNEL|__GFP_DMA32|__GFP_ZERO);
Ben Skeggsbd6aaea2010-08-12 10:23:06 +1000250 if (!dev_priv->gart_info.sg_dummy_page) {
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000251 nouveau_gpuobj_ref(NULL, &gpuobj);
Ben Skeggsbd6aaea2010-08-12 10:23:06 +1000252 return -ENOMEM;
253 }
254
Ben Skeggs6ee73862009-12-11 19:24:15 +1000255 set_bit(PG_locked, &dev_priv->gart_info.sg_dummy_page->flags);
256 dev_priv->gart_info.sg_dummy_bus =
Ben Skeggsbd6aaea2010-08-12 10:23:06 +1000257 pci_map_page(pdev, dev_priv->gart_info.sg_dummy_page, 0,
Ben Skeggs6ee73862009-12-11 19:24:15 +1000258 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
Ben Skeggsbd6aaea2010-08-12 10:23:06 +1000259 if (pci_dma_mapping_error(pdev, dev_priv->gart_info.sg_dummy_bus)) {
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000260 nouveau_gpuobj_ref(NULL, &gpuobj);
Ben Skeggsbd6aaea2010-08-12 10:23:06 +1000261 return -EFAULT;
262 }
Ben Skeggs6ee73862009-12-11 19:24:15 +1000263
Ben Skeggs6ee73862009-12-11 19:24:15 +1000264 if (dev_priv->card_type < NV_50) {
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000265 /* special case, allocated from global instmem heap so
266 * cinst is invalid, we use it on all channels though so
267 * cinst needs to be valid, set it the same as pinst
268 */
269 gpuobj->cinst = gpuobj->pinst;
270
Ben Skeggs6ee73862009-12-11 19:24:15 +1000271 /* Maybe use NV_DMA_TARGET_AGP for PCIE? NVIDIA do this, and
272 * confirmed to work on c51. Perhaps means NV_DMA_TARGET_PCIE
273 * on those cards? */
Ben Skeggsb3beb162010-09-01 15:24:29 +1000274 nv_wo32(gpuobj, 0, NV_CLASS_DMA_IN_MEMORY |
275 (1 << 12) /* PT present */ |
276 (0 << 13) /* PT *not* linear */ |
277 (NV_DMA_ACCESS_RW << 14) |
278 (NV_DMA_TARGET_PCI << 16));
279 nv_wo32(gpuobj, 4, aper_size - 1);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000280 for (i = 2; i < 2 + (aper_size >> 12); i++) {
Ben Skeggsb3beb162010-09-01 15:24:29 +1000281 nv_wo32(gpuobj, i * 4,
282 dev_priv->gart_info.sg_dummy_bus | 3);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000283 }
284 } else {
285 for (i = 0; i < obj_size; i += 8) {
Ben Skeggs17b20342010-09-13 09:57:44 +1000286 nv_wo32(gpuobj, i + 0, 0x00000000);
287 nv_wo32(gpuobj, i + 4, 0x00000000);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000288 }
289 }
Ben Skeggsf56cb862010-07-08 11:29:10 +1000290 dev_priv->engine.instmem.flush(dev);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000291
292 dev_priv->gart_info.type = NOUVEAU_GART_SGDMA;
293 dev_priv->gart_info.aper_base = 0;
294 dev_priv->gart_info.aper_size = aper_size;
295 dev_priv->gart_info.sg_ctxdma = gpuobj;
296 return 0;
297}
298
299void
300nouveau_sgdma_takedown(struct drm_device *dev)
301{
302 struct drm_nouveau_private *dev_priv = dev->dev_private;
303
304 if (dev_priv->gart_info.sg_dummy_page) {
305 pci_unmap_page(dev->pdev, dev_priv->gart_info.sg_dummy_bus,
306 NV_CTXDMA_PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
307 unlock_page(dev_priv->gart_info.sg_dummy_page);
308 __free_page(dev_priv->gart_info.sg_dummy_page);
309 dev_priv->gart_info.sg_dummy_page = NULL;
310 dev_priv->gart_info.sg_dummy_bus = 0;
311 }
312
Ben Skeggsa8eaebc2010-09-01 15:24:31 +1000313 nouveau_gpuobj_ref(NULL, &dev_priv->gart_info.sg_ctxdma);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000314}
315
316int
317nouveau_sgdma_get_page(struct drm_device *dev, uint32_t offset, uint32_t *page)
318{
319 struct drm_nouveau_private *dev_priv = dev->dev_private;
320 struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000321 int pte;
322
Ben Skeggsb3beb162010-09-01 15:24:29 +1000323 pte = (offset >> NV_CTXDMA_PAGE_SHIFT) << 2;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000324 if (dev_priv->card_type < NV_50) {
Ben Skeggsb3beb162010-09-01 15:24:29 +1000325 *page = nv_ro32(gpuobj, (pte + 8)) & ~NV_CTXDMA_PAGE_MASK;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000326 return 0;
327 }
328
329 NV_ERROR(dev, "Unimplemented on NV50\n");
330 return -EINVAL;
331}