blob: 134fedbb7669cebd1e7cec8129e20110e8271128 [file] [log] [blame]
Ben Skeggs6ee73862009-12-11 19:24:15 +10001/*
2 * Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
3 * Copyright 2005 Stephane Marchesin
4 *
5 * The Weather Channel (TM) funded Tungsten Graphics to develop the
6 * initial release of the Radeon 8500 driver under the XFree86 license.
7 * This notice must be preserved.
8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
18 * Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * THE AUTHORS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
26 * DEALINGS IN THE SOFTWARE.
27 *
28 * Authors:
29 * Keith Whitwell <keith@tungstengraphics.com>
30 */
31
32
33#include "drmP.h"
34#include "drm.h"
35#include "drm_sarea.h"
36#include "nouveau_drv.h"
37
38static struct mem_block *
39split_block(struct mem_block *p, uint64_t start, uint64_t size,
40 struct drm_file *file_priv)
41{
42 /* Maybe cut off the start of an existing block */
43 if (start > p->start) {
44 struct mem_block *newblock =
45 kmalloc(sizeof(*newblock), GFP_KERNEL);
46 if (!newblock)
47 goto out;
48 newblock->start = start;
49 newblock->size = p->size - (start - p->start);
50 newblock->file_priv = NULL;
51 newblock->next = p->next;
52 newblock->prev = p;
53 p->next->prev = newblock;
54 p->next = newblock;
55 p->size -= newblock->size;
56 p = newblock;
57 }
58
59 /* Maybe cut off the end of an existing block */
60 if (size < p->size) {
61 struct mem_block *newblock =
62 kmalloc(sizeof(*newblock), GFP_KERNEL);
63 if (!newblock)
64 goto out;
65 newblock->start = start + size;
66 newblock->size = p->size - size;
67 newblock->file_priv = NULL;
68 newblock->next = p->next;
69 newblock->prev = p;
70 p->next->prev = newblock;
71 p->next = newblock;
72 p->size = size;
73 }
74
75out:
76 /* Our block is in the middle */
77 p->file_priv = file_priv;
78 return p;
79}
80
81struct mem_block *
82nouveau_mem_alloc_block(struct mem_block *heap, uint64_t size,
83 int align2, struct drm_file *file_priv, int tail)
84{
85 struct mem_block *p;
86 uint64_t mask = (1 << align2) - 1;
87
88 if (!heap)
89 return NULL;
90
91 if (tail) {
92 list_for_each_prev(p, heap) {
93 uint64_t start = ((p->start + p->size) - size) & ~mask;
94
95 if (p->file_priv == NULL && start >= p->start &&
96 start + size <= p->start + p->size)
97 return split_block(p, start, size, file_priv);
98 }
99 } else {
100 list_for_each(p, heap) {
101 uint64_t start = (p->start + mask) & ~mask;
102
103 if (p->file_priv == NULL &&
104 start + size <= p->start + p->size)
105 return split_block(p, start, size, file_priv);
106 }
107 }
108
109 return NULL;
110}
111
112void nouveau_mem_free_block(struct mem_block *p)
113{
114 p->file_priv = NULL;
115
116 /* Assumes a single contiguous range. Needs a special file_priv in
117 * 'heap' to stop it being subsumed.
118 */
119 if (p->next->file_priv == NULL) {
120 struct mem_block *q = p->next;
121 p->size += q->size;
122 p->next = q->next;
123 p->next->prev = p;
124 kfree(q);
125 }
126
127 if (p->prev->file_priv == NULL) {
128 struct mem_block *q = p->prev;
129 q->size += p->size;
130 q->next = p->next;
131 q->next->prev = q;
132 kfree(p);
133 }
134}
135
136/* Initialize. How to check for an uninitialized heap?
137 */
138int nouveau_mem_init_heap(struct mem_block **heap, uint64_t start,
139 uint64_t size)
140{
141 struct mem_block *blocks = kmalloc(sizeof(*blocks), GFP_KERNEL);
142
143 if (!blocks)
144 return -ENOMEM;
145
146 *heap = kmalloc(sizeof(**heap), GFP_KERNEL);
147 if (!*heap) {
148 kfree(blocks);
149 return -ENOMEM;
150 }
151
152 blocks->start = start;
153 blocks->size = size;
154 blocks->file_priv = NULL;
155 blocks->next = blocks->prev = *heap;
156
157 memset(*heap, 0, sizeof(**heap));
158 (*heap)->file_priv = (struct drm_file *) -1;
159 (*heap)->next = (*heap)->prev = blocks;
160 return 0;
161}
162
163/*
164 * Free all blocks associated with the releasing file_priv
165 */
166void nouveau_mem_release(struct drm_file *file_priv, struct mem_block *heap)
167{
168 struct mem_block *p;
169
170 if (!heap || !heap->next)
171 return;
172
173 list_for_each(p, heap) {
174 if (p->file_priv == file_priv)
175 p->file_priv = NULL;
176 }
177
178 /* Assumes a single contiguous range. Needs a special file_priv in
179 * 'heap' to stop it being subsumed.
180 */
181 list_for_each(p, heap) {
182 while ((p->file_priv == NULL) &&
183 (p->next->file_priv == NULL) &&
184 (p->next != heap)) {
185 struct mem_block *q = p->next;
186 p->size += q->size;
187 p->next = q->next;
188 p->next->prev = p;
189 kfree(q);
190 }
191 }
192}
193
194/*
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100195 * NV10-NV40 tiling helpers
196 */
197
198static void
199nv10_mem_set_region_tiling(struct drm_device *dev, int i, uint32_t addr,
200 uint32_t size, uint32_t pitch)
201{
202 struct drm_nouveau_private *dev_priv = dev->dev_private;
203 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
204 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
205 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
206 struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
207
208 tile->addr = addr;
209 tile->size = size;
210 tile->used = !!pitch;
211 nouveau_fence_unref((void **)&tile->fence);
212
213 if (!pfifo->cache_flush(dev))
214 return;
215
216 pfifo->reassign(dev, false);
217 pfifo->cache_flush(dev);
218 pfifo->cache_pull(dev, false);
219
220 nouveau_wait_for_idle(dev);
221
222 pgraph->set_region_tiling(dev, i, addr, size, pitch);
223 pfb->set_region_tiling(dev, i, addr, size, pitch);
224
225 pfifo->cache_pull(dev, true);
226 pfifo->reassign(dev, true);
227}
228
229struct nouveau_tile_reg *
230nv10_mem_set_tiling(struct drm_device *dev, uint32_t addr, uint32_t size,
231 uint32_t pitch)
232{
233 struct drm_nouveau_private *dev_priv = dev->dev_private;
234 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
235 struct nouveau_tile_reg *tile = dev_priv->tile.reg, *found = NULL;
236 int i;
237
238 spin_lock(&dev_priv->tile.lock);
239
240 for (i = 0; i < pfb->num_tiles; i++) {
241 if (tile[i].used)
242 /* Tile region in use. */
243 continue;
244
245 if (tile[i].fence &&
246 !nouveau_fence_signalled(tile[i].fence, NULL))
247 /* Pending tile region. */
248 continue;
249
250 if (max(tile[i].addr, addr) <
251 min(tile[i].addr + tile[i].size, addr + size))
252 /* Kill an intersecting tile region. */
253 nv10_mem_set_region_tiling(dev, i, 0, 0, 0);
254
255 if (pitch && !found) {
256 /* Free tile region. */
257 nv10_mem_set_region_tiling(dev, i, addr, size, pitch);
258 found = &tile[i];
259 }
260 }
261
262 spin_unlock(&dev_priv->tile.lock);
263
264 return found;
265}
266
267void
268nv10_mem_expire_tiling(struct drm_device *dev, struct nouveau_tile_reg *tile,
269 struct nouveau_fence *fence)
270{
271 if (fence) {
272 /* Mark it as pending. */
273 tile->fence = fence;
274 nouveau_fence_ref(fence);
275 }
276
277 tile->used = false;
278}
279
280/*
Ben Skeggs6ee73862009-12-11 19:24:15 +1000281 * NV50 VM helpers
282 */
283int
284nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size,
285 uint32_t flags, uint64_t phys)
286{
287 struct drm_nouveau_private *dev_priv = dev->dev_private;
Ben Skeggs531e7712010-02-11 11:31:44 +1000288 struct nouveau_gpuobj *pgt;
289 unsigned block;
290 int i;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000291
Ben Skeggs531e7712010-02-11 11:31:44 +1000292 virt = ((virt - dev_priv->vm_vram_base) >> 16) << 1;
293 size = (size >> 16) << 1;
294 phys |= ((uint64_t)flags << 32) | 1;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000295
296 dev_priv->engine.instmem.prepare_access(dev, true);
Ben Skeggs531e7712010-02-11 11:31:44 +1000297 while (size) {
298 unsigned offset_h = upper_32_bits(phys);
Ben Skeggs4c27bd32010-02-11 10:25:53 +1000299 unsigned offset_l = lower_32_bits(phys);
Ben Skeggs531e7712010-02-11 11:31:44 +1000300 unsigned pte, end;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000301
Ben Skeggs531e7712010-02-11 11:31:44 +1000302 for (i = 7; i >= 0; i--) {
303 block = 1 << (i + 1);
304 if (size >= block && !(virt & (block - 1)))
305 break;
306 }
307 offset_l |= (i << 7);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000308
Ben Skeggs531e7712010-02-11 11:31:44 +1000309 phys += block << 15;
310 size -= block;
311
312 while (block) {
313 pgt = dev_priv->vm_vram_pt[virt >> 14];
314 pte = virt & 0x3ffe;
315
316 end = pte + block;
317 if (end > 16384)
318 end = 16384;
319 block -= (end - pte);
320 virt += (end - pte);
321
322 while (pte < end) {
323 nv_wo32(dev, pgt, pte++, offset_l);
324 nv_wo32(dev, pgt, pte++, offset_h);
325 }
326 }
Ben Skeggs6ee73862009-12-11 19:24:15 +1000327 }
328 dev_priv->engine.instmem.finish_access(dev);
329
330 nv_wr32(dev, 0x100c80, 0x00050001);
331 if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
332 NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
333 NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80));
334 return -EBUSY;
335 }
336
337 nv_wr32(dev, 0x100c80, 0x00000001);
338 if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
339 NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
340 NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80));
341 return -EBUSY;
342 }
343
344 return 0;
345}
346
347void
348nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size)
349{
Ben Skeggs4c27bd32010-02-11 10:25:53 +1000350 struct drm_nouveau_private *dev_priv = dev->dev_private;
351 struct nouveau_gpuobj *pgt;
352 unsigned pages, pte, end;
353
354 virt -= dev_priv->vm_vram_base;
355 pages = (size >> 16) << 1;
356
357 dev_priv->engine.instmem.prepare_access(dev, true);
358 while (pages) {
359 pgt = dev_priv->vm_vram_pt[virt >> 29];
360 pte = (virt & 0x1ffe0000ULL) >> 15;
361
362 end = pte + pages;
363 if (end > 16384)
364 end = 16384;
365 pages -= (end - pte);
366 virt += (end - pte) << 15;
367
368 while (pte < end)
369 nv_wo32(dev, pgt, pte++, 0);
370 }
371 dev_priv->engine.instmem.finish_access(dev);
372
373 nv_wr32(dev, 0x100c80, 0x00050001);
374 if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
375 NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
376 NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80));
377 return;
378 }
379
380 nv_wr32(dev, 0x100c80, 0x00000001);
381 if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
382 NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
383 NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80));
384 }
Ben Skeggs6ee73862009-12-11 19:24:15 +1000385}
386
387/*
388 * Cleanup everything
389 */
390void nouveau_mem_takedown(struct mem_block **heap)
391{
392 struct mem_block *p;
393
394 if (!*heap)
395 return;
396
397 for (p = (*heap)->next; p != *heap;) {
398 struct mem_block *q = p;
399 p = p->next;
400 kfree(q);
401 }
402
403 kfree(*heap);
404 *heap = NULL;
405}
406
407void nouveau_mem_close(struct drm_device *dev)
408{
409 struct drm_nouveau_private *dev_priv = dev->dev_private;
410
Ben Skeggsac8fb972010-01-15 09:24:20 +1000411 nouveau_bo_unpin(dev_priv->vga_ram);
412 nouveau_bo_ref(NULL, &dev_priv->vga_ram);
413
Ben Skeggs6ee73862009-12-11 19:24:15 +1000414 ttm_bo_device_release(&dev_priv->ttm.bdev);
415
416 nouveau_ttm_global_release(dev_priv);
417
418 if (drm_core_has_AGP(dev) && dev->agp &&
419 drm_core_check_feature(dev, DRIVER_MODESET)) {
420 struct drm_agp_mem *entry, *tempe;
421
422 /* Remove AGP resources, but leave dev->agp
423 intact until drv_cleanup is called. */
424 list_for_each_entry_safe(entry, tempe, &dev->agp->memory, head) {
425 if (entry->bound)
426 drm_unbind_agp(entry->memory);
427 drm_free_agp(entry->memory, entry->pages);
428 kfree(entry);
429 }
430 INIT_LIST_HEAD(&dev->agp->memory);
431
432 if (dev->agp->acquired)
433 drm_agp_release(dev);
434
435 dev->agp->acquired = 0;
436 dev->agp->enabled = 0;
437 }
438
439 if (dev_priv->fb_mtrr) {
440 drm_mtrr_del(dev_priv->fb_mtrr, drm_get_resource_start(dev, 1),
441 drm_get_resource_len(dev, 1), DRM_MTRR_WC);
442 dev_priv->fb_mtrr = 0;
443 }
444}
445
446/*XXX won't work on BSD because of pci_read_config_dword */
447static uint32_t
448nouveau_mem_fb_amount_igp(struct drm_device *dev)
449{
450 struct drm_nouveau_private *dev_priv = dev->dev_private;
451 struct pci_dev *bridge;
452 uint32_t mem;
453
454 bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 1));
455 if (!bridge) {
456 NV_ERROR(dev, "no bridge device\n");
457 return 0;
458 }
459
460 if (dev_priv->flags&NV_NFORCE) {
461 pci_read_config_dword(bridge, 0x7C, &mem);
462 return (uint64_t)(((mem >> 6) & 31) + 1)*1024*1024;
463 } else
464 if (dev_priv->flags&NV_NFORCE2) {
465 pci_read_config_dword(bridge, 0x84, &mem);
466 return (uint64_t)(((mem >> 4) & 127) + 1)*1024*1024;
467 }
468
469 NV_ERROR(dev, "impossible!\n");
470 return 0;
471}
472
473/* returns the amount of FB ram in bytes */
474uint64_t nouveau_mem_fb_amount(struct drm_device *dev)
475{
476 struct drm_nouveau_private *dev_priv = dev->dev_private;
477 uint32_t boot0;
478
479 switch (dev_priv->card_type) {
480 case NV_04:
481 boot0 = nv_rd32(dev, NV03_BOOT_0);
482 if (boot0 & 0x00000100)
483 return (((boot0 >> 12) & 0xf) * 2 + 2) * 1024 * 1024;
484
485 switch (boot0 & NV03_BOOT_0_RAM_AMOUNT) {
486 case NV04_BOOT_0_RAM_AMOUNT_32MB:
487 return 32 * 1024 * 1024;
488 case NV04_BOOT_0_RAM_AMOUNT_16MB:
489 return 16 * 1024 * 1024;
490 case NV04_BOOT_0_RAM_AMOUNT_8MB:
491 return 8 * 1024 * 1024;
492 case NV04_BOOT_0_RAM_AMOUNT_4MB:
493 return 4 * 1024 * 1024;
494 }
495 break;
496 case NV_10:
497 case NV_20:
498 case NV_30:
499 case NV_40:
500 case NV_50:
501 default:
502 if (dev_priv->flags & (NV_NFORCE | NV_NFORCE2)) {
503 return nouveau_mem_fb_amount_igp(dev);
504 } else {
505 uint64_t mem;
506 mem = (nv_rd32(dev, NV04_FIFO_DATA) &
507 NV10_FIFO_DATA_RAM_AMOUNT_MB_MASK) >>
508 NV10_FIFO_DATA_RAM_AMOUNT_MB_SHIFT;
509 return mem * 1024 * 1024;
510 }
511 break;
512 }
513
514 NV_ERROR(dev,
515 "Unable to detect video ram size. Please report your setup to "
516 DRIVER_EMAIL "\n");
517 return 0;
518}
519
Ben Skeggsb694dfb2009-12-15 10:38:32 +1000520#if __OS_HAS_AGP
Ben Skeggs6ee73862009-12-11 19:24:15 +1000521static void nouveau_mem_reset_agp(struct drm_device *dev)
522{
523 uint32_t saved_pci_nv_1, saved_pci_nv_19, pmc_enable;
524
525 saved_pci_nv_1 = nv_rd32(dev, NV04_PBUS_PCI_NV_1);
526 saved_pci_nv_19 = nv_rd32(dev, NV04_PBUS_PCI_NV_19);
527
528 /* clear busmaster bit */
529 nv_wr32(dev, NV04_PBUS_PCI_NV_1, saved_pci_nv_1 & ~0x4);
530 /* clear SBA and AGP bits */
531 nv_wr32(dev, NV04_PBUS_PCI_NV_19, saved_pci_nv_19 & 0xfffff0ff);
532
533 /* power cycle pgraph, if enabled */
534 pmc_enable = nv_rd32(dev, NV03_PMC_ENABLE);
535 if (pmc_enable & NV_PMC_ENABLE_PGRAPH) {
536 nv_wr32(dev, NV03_PMC_ENABLE,
537 pmc_enable & ~NV_PMC_ENABLE_PGRAPH);
538 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) |
539 NV_PMC_ENABLE_PGRAPH);
540 }
541
542 /* and restore (gives effect of resetting AGP) */
543 nv_wr32(dev, NV04_PBUS_PCI_NV_19, saved_pci_nv_19);
544 nv_wr32(dev, NV04_PBUS_PCI_NV_1, saved_pci_nv_1);
545}
Ben Skeggsb694dfb2009-12-15 10:38:32 +1000546#endif
Ben Skeggs6ee73862009-12-11 19:24:15 +1000547
548int
549nouveau_mem_init_agp(struct drm_device *dev)
550{
Ben Skeggsb694dfb2009-12-15 10:38:32 +1000551#if __OS_HAS_AGP
Ben Skeggs6ee73862009-12-11 19:24:15 +1000552 struct drm_nouveau_private *dev_priv = dev->dev_private;
553 struct drm_agp_info info;
554 struct drm_agp_mode mode;
555 int ret;
556
557 if (nouveau_noagp)
558 return 0;
559
560 nouveau_mem_reset_agp(dev);
561
562 if (!dev->agp->acquired) {
563 ret = drm_agp_acquire(dev);
564 if (ret) {
565 NV_ERROR(dev, "Unable to acquire AGP: %d\n", ret);
566 return ret;
567 }
568 }
569
570 ret = drm_agp_info(dev, &info);
571 if (ret) {
572 NV_ERROR(dev, "Unable to get AGP info: %d\n", ret);
573 return ret;
574 }
575
576 /* see agp.h for the AGPSTAT_* modes available */
577 mode.mode = info.mode;
578 ret = drm_agp_enable(dev, mode);
579 if (ret) {
580 NV_ERROR(dev, "Unable to enable AGP: %d\n", ret);
581 return ret;
582 }
583
584 dev_priv->gart_info.type = NOUVEAU_GART_AGP;
585 dev_priv->gart_info.aper_base = info.aperture_base;
586 dev_priv->gart_info.aper_size = info.aperture_size;
Ben Skeggsb694dfb2009-12-15 10:38:32 +1000587#endif
Ben Skeggs6ee73862009-12-11 19:24:15 +1000588 return 0;
589}
590
591int
592nouveau_mem_init(struct drm_device *dev)
593{
594 struct drm_nouveau_private *dev_priv = dev->dev_private;
595 struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
596 int ret, dma_bits = 32;
597
598 dev_priv->fb_phys = drm_get_resource_start(dev, 1);
599 dev_priv->gart_info.type = NOUVEAU_GART_NONE;
600
601 if (dev_priv->card_type >= NV_50 &&
602 pci_dma_supported(dev->pdev, DMA_BIT_MASK(40)))
603 dma_bits = 40;
604
605 ret = pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(dma_bits));
606 if (ret) {
607 NV_ERROR(dev, "Error setting DMA mask: %d\n", ret);
608 return ret;
609 }
610
611 ret = nouveau_ttm_global_init(dev_priv);
612 if (ret)
613 return ret;
614
615 ret = ttm_bo_device_init(&dev_priv->ttm.bdev,
616 dev_priv->ttm.bo_global_ref.ref.object,
617 &nouveau_bo_driver, DRM_FILE_PAGE_OFFSET,
618 dma_bits <= 32 ? true : false);
619 if (ret) {
620 NV_ERROR(dev, "Error initialising bo driver: %d\n", ret);
621 return ret;
622 }
623
624 INIT_LIST_HEAD(&dev_priv->ttm.bo_list);
625 spin_lock_init(&dev_priv->ttm.bo_list_lock);
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100626 spin_lock_init(&dev_priv->tile.lock);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000627
628 dev_priv->fb_available_size = nouveau_mem_fb_amount(dev);
629
630 dev_priv->fb_mappable_pages = dev_priv->fb_available_size;
631 if (dev_priv->fb_mappable_pages > drm_get_resource_len(dev, 1))
632 dev_priv->fb_mappable_pages = drm_get_resource_len(dev, 1);
633 dev_priv->fb_mappable_pages >>= PAGE_SHIFT;
634
635 NV_INFO(dev, "%d MiB VRAM\n", (int)(dev_priv->fb_available_size >> 20));
636
637 /* remove reserved space at end of vram from available amount */
638 dev_priv->fb_available_size -= dev_priv->ramin_rsvd_vram;
639 dev_priv->fb_aper_free = dev_priv->fb_available_size;
640
641 /* mappable vram */
642 ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM,
643 dev_priv->fb_available_size >> PAGE_SHIFT);
644 if (ret) {
645 NV_ERROR(dev, "Failed VRAM mm init: %d\n", ret);
646 return ret;
647 }
648
Ben Skeggsac8fb972010-01-15 09:24:20 +1000649 ret = nouveau_bo_new(dev, NULL, 256*1024, 0, TTM_PL_FLAG_VRAM,
650 0, 0, true, true, &dev_priv->vga_ram);
651 if (ret == 0)
652 ret = nouveau_bo_pin(dev_priv->vga_ram, TTM_PL_FLAG_VRAM);
653 if (ret) {
654 NV_WARN(dev, "failed to reserve VGA memory\n");
655 nouveau_bo_ref(NULL, &dev_priv->vga_ram);
656 }
657
Ben Skeggs6ee73862009-12-11 19:24:15 +1000658 /* GART */
659#if !defined(__powerpc__) && !defined(__ia64__)
660 if (drm_device_is_agp(dev) && dev->agp) {
661 ret = nouveau_mem_init_agp(dev);
662 if (ret)
663 NV_ERROR(dev, "Error initialising AGP: %d\n", ret);
664 }
665#endif
666
667 if (dev_priv->gart_info.type == NOUVEAU_GART_NONE) {
668 ret = nouveau_sgdma_init(dev);
669 if (ret) {
670 NV_ERROR(dev, "Error initialising PCI(E): %d\n", ret);
671 return ret;
672 }
673 }
674
675 NV_INFO(dev, "%d MiB GART (aperture)\n",
676 (int)(dev_priv->gart_info.aper_size >> 20));
677 dev_priv->gart_info.aper_free = dev_priv->gart_info.aper_size;
678
679 ret = ttm_bo_init_mm(bdev, TTM_PL_TT,
680 dev_priv->gart_info.aper_size >> PAGE_SHIFT);
681 if (ret) {
682 NV_ERROR(dev, "Failed TT mm init: %d\n", ret);
683 return ret;
684 }
685
686 dev_priv->fb_mtrr = drm_mtrr_add(drm_get_resource_start(dev, 1),
687 drm_get_resource_len(dev, 1),
688 DRM_MTRR_WC);
Ben Skeggsac8fb972010-01-15 09:24:20 +1000689
Ben Skeggs6ee73862009-12-11 19:24:15 +1000690 return 0;
691}
692
693