blob: 2d02401e8227922ce0bdff61c01e1a26a28ee35a [file] [log] [blame]
Ben Skeggs6ee73862009-12-11 19:24:15 +10001/*
2 * Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
3 * Copyright 2005 Stephane Marchesin
4 *
5 * The Weather Channel (TM) funded Tungsten Graphics to develop the
6 * initial release of the Radeon 8500 driver under the XFree86 license.
7 * This notice must be preserved.
8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
18 * Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * THE AUTHORS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
26 * DEALINGS IN THE SOFTWARE.
27 *
28 * Authors:
29 * Keith Whitwell <keith@tungstengraphics.com>
30 */
31
32
33#include "drmP.h"
34#include "drm.h"
35#include "drm_sarea.h"
Ben Skeggs6ee73862009-12-11 19:24:15 +100036
Francisco Jerezcbab95db2010-10-11 03:43:58 +020037#include "nouveau_drv.h"
38#include "nouveau_pm.h"
Ben Skeggs573a2a32010-08-25 15:26:04 +100039#include "nouveau_mm.h"
Ben Skeggsa11c3192010-08-27 10:00:25 +100040#include "nouveau_vm.h"
Roy Splieta845fff2010-10-04 23:01:08 +020041
Ben Skeggs6ee73862009-12-11 19:24:15 +100042/*
Francisco Jereza0af9ad2009-12-11 16:51:09 +010043 * NV10-NV40 tiling helpers
44 */
45
46static void
Francisco Jereza5cf68b2010-10-24 16:14:41 +020047nv10_mem_update_tile_region(struct drm_device *dev,
48 struct nouveau_tile_reg *tile, uint32_t addr,
49 uint32_t size, uint32_t pitch, uint32_t flags)
Francisco Jereza0af9ad2009-12-11 16:51:09 +010050{
51 struct drm_nouveau_private *dev_priv = dev->dev_private;
52 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
53 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
54 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
Francisco Jereza5cf68b2010-10-24 16:14:41 +020055 int i = tile - dev_priv->tile.reg;
56 unsigned long save;
Francisco Jereza0af9ad2009-12-11 16:51:09 +010057
Marcin Slusarz382d62e2010-10-20 21:50:24 +020058 nouveau_fence_unref(&tile->fence);
Francisco Jereza0af9ad2009-12-11 16:51:09 +010059
Francisco Jereza5cf68b2010-10-24 16:14:41 +020060 if (tile->pitch)
61 pfb->free_tile_region(dev, i);
62
63 if (pitch)
64 pfb->init_tile_region(dev, i, addr, size, pitch, flags);
65
66 spin_lock_irqsave(&dev_priv->context_switch_lock, save);
Francisco Jereza0af9ad2009-12-11 16:51:09 +010067 pfifo->reassign(dev, false);
Francisco Jereza0af9ad2009-12-11 16:51:09 +010068 pfifo->cache_pull(dev, false);
69
70 nouveau_wait_for_idle(dev);
71
Francisco Jereza5cf68b2010-10-24 16:14:41 +020072 pfb->set_tile_region(dev, i);
73 pgraph->set_tile_region(dev, i);
Francisco Jereza0af9ad2009-12-11 16:51:09 +010074
75 pfifo->cache_pull(dev, true);
76 pfifo->reassign(dev, true);
Francisco Jereza5cf68b2010-10-24 16:14:41 +020077 spin_unlock_irqrestore(&dev_priv->context_switch_lock, save);
78}
79
80static struct nouveau_tile_reg *
81nv10_mem_get_tile_region(struct drm_device *dev, int i)
82{
83 struct drm_nouveau_private *dev_priv = dev->dev_private;
84 struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
85
86 spin_lock(&dev_priv->tile.lock);
87
88 if (!tile->used &&
89 (!tile->fence || nouveau_fence_signalled(tile->fence)))
90 tile->used = true;
91 else
92 tile = NULL;
93
94 spin_unlock(&dev_priv->tile.lock);
95 return tile;
96}
97
98void
99nv10_mem_put_tile_region(struct drm_device *dev, struct nouveau_tile_reg *tile,
100 struct nouveau_fence *fence)
101{
102 struct drm_nouveau_private *dev_priv = dev->dev_private;
103
104 if (tile) {
105 spin_lock(&dev_priv->tile.lock);
106 if (fence) {
107 /* Mark it as pending. */
108 tile->fence = fence;
109 nouveau_fence_ref(fence);
110 }
111
112 tile->used = false;
113 spin_unlock(&dev_priv->tile.lock);
114 }
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100115}
116
117struct nouveau_tile_reg *
118nv10_mem_set_tiling(struct drm_device *dev, uint32_t addr, uint32_t size,
Francisco Jereza5cf68b2010-10-24 16:14:41 +0200119 uint32_t pitch, uint32_t flags)
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100120{
121 struct drm_nouveau_private *dev_priv = dev->dev_private;
122 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
Francisco Jereza5cf68b2010-10-24 16:14:41 +0200123 struct nouveau_tile_reg *tile, *found = NULL;
124 int i;
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100125
126 for (i = 0; i < pfb->num_tiles; i++) {
Francisco Jereza5cf68b2010-10-24 16:14:41 +0200127 tile = nv10_mem_get_tile_region(dev, i);
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100128
129 if (pitch && !found) {
Francisco Jerez9f56b122010-09-07 18:24:52 +0200130 found = tile;
Francisco Jereza5cf68b2010-10-24 16:14:41 +0200131 continue;
132
133 } else if (tile && tile->pitch) {
134 /* Kill an unused tile region. */
135 nv10_mem_update_tile_region(dev, tile, 0, 0, 0, 0);
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100136 }
Francisco Jereza5cf68b2010-10-24 16:14:41 +0200137
138 nv10_mem_put_tile_region(dev, tile, NULL);
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100139 }
140
Francisco Jereza5cf68b2010-10-24 16:14:41 +0200141 if (found)
142 nv10_mem_update_tile_region(dev, found, addr, size,
143 pitch, flags);
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100144 return found;
145}
146
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100147/*
Ben Skeggs6ee73862009-12-11 19:24:15 +1000148 * NV50 VM helpers
149 */
150int
151nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size,
152 uint32_t flags, uint64_t phys)
153{
154 struct drm_nouveau_private *dev_priv = dev->dev_private;
Ben Skeggs531e7712010-02-11 11:31:44 +1000155 struct nouveau_gpuobj *pgt;
156 unsigned block;
157 int i;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000158
Ben Skeggs531e7712010-02-11 11:31:44 +1000159 virt = ((virt - dev_priv->vm_vram_base) >> 16) << 1;
160 size = (size >> 16) << 1;
Ben Skeggs6c429662010-02-20 08:10:11 +1000161
162 phys |= ((uint64_t)flags << 32);
163 phys |= 1;
164 if (dev_priv->vram_sys_base) {
165 phys += dev_priv->vram_sys_base;
166 phys |= 0x30;
167 }
Ben Skeggs6ee73862009-12-11 19:24:15 +1000168
Ben Skeggs531e7712010-02-11 11:31:44 +1000169 while (size) {
170 unsigned offset_h = upper_32_bits(phys);
Ben Skeggs4c27bd32010-02-11 10:25:53 +1000171 unsigned offset_l = lower_32_bits(phys);
Ben Skeggs531e7712010-02-11 11:31:44 +1000172 unsigned pte, end;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000173
Ben Skeggs531e7712010-02-11 11:31:44 +1000174 for (i = 7; i >= 0; i--) {
175 block = 1 << (i + 1);
176 if (size >= block && !(virt & (block - 1)))
177 break;
178 }
179 offset_l |= (i << 7);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000180
Ben Skeggs531e7712010-02-11 11:31:44 +1000181 phys += block << 15;
182 size -= block;
183
184 while (block) {
185 pgt = dev_priv->vm_vram_pt[virt >> 14];
186 pte = virt & 0x3ffe;
187
188 end = pte + block;
189 if (end > 16384)
190 end = 16384;
191 block -= (end - pte);
192 virt += (end - pte);
193
194 while (pte < end) {
Ben Skeggsb3beb162010-09-01 15:24:29 +1000195 nv_wo32(pgt, (pte * 4) + 0, offset_l);
196 nv_wo32(pgt, (pte * 4) + 4, offset_h);
197 pte += 2;
Ben Skeggs531e7712010-02-11 11:31:44 +1000198 }
199 }
Ben Skeggs6ee73862009-12-11 19:24:15 +1000200 }
Ben Skeggs6ee73862009-12-11 19:24:15 +1000201
Ben Skeggs56ac7472010-10-22 10:26:24 +1000202 dev_priv->engine.instmem.flush(dev);
203 dev_priv->engine.fifo.tlb_flush(dev);
204 dev_priv->engine.graph.tlb_flush(dev);
Ben Skeggsa11c3192010-08-27 10:00:25 +1000205 nv50_vm_flush_engine(dev, 6);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000206 return 0;
207}
208
209void
210nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size)
211{
Ben Skeggs4c27bd32010-02-11 10:25:53 +1000212 struct drm_nouveau_private *dev_priv = dev->dev_private;
213 struct nouveau_gpuobj *pgt;
214 unsigned pages, pte, end;
215
216 virt -= dev_priv->vm_vram_base;
217 pages = (size >> 16) << 1;
218
Ben Skeggs4c27bd32010-02-11 10:25:53 +1000219 while (pages) {
220 pgt = dev_priv->vm_vram_pt[virt >> 29];
221 pte = (virt & 0x1ffe0000ULL) >> 15;
222
223 end = pte + pages;
224 if (end > 16384)
225 end = 16384;
226 pages -= (end - pte);
227 virt += (end - pte) << 15;
228
Ben Skeggsb3beb162010-09-01 15:24:29 +1000229 while (pte < end) {
230 nv_wo32(pgt, (pte * 4), 0);
231 pte++;
232 }
Ben Skeggs4c27bd32010-02-11 10:25:53 +1000233 }
Ben Skeggs4c27bd32010-02-11 10:25:53 +1000234
Ben Skeggs56ac7472010-10-22 10:26:24 +1000235 dev_priv->engine.instmem.flush(dev);
236 dev_priv->engine.fifo.tlb_flush(dev);
237 dev_priv->engine.graph.tlb_flush(dev);
Ben Skeggsa11c3192010-08-27 10:00:25 +1000238 nv50_vm_flush_engine(dev, 6);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000239}
240
241/*
242 * Cleanup everything
243 */
Ben Skeggsb833ac22010-06-01 15:32:24 +1000244void
Ben Skeggsfbd28952010-09-01 15:24:34 +1000245nouveau_mem_vram_fini(struct drm_device *dev)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000246{
247 struct drm_nouveau_private *dev_priv = dev->dev_private;
248
Ben Skeggsac8fb972010-01-15 09:24:20 +1000249 nouveau_bo_unpin(dev_priv->vga_ram);
250 nouveau_bo_ref(NULL, &dev_priv->vga_ram);
251
Ben Skeggs6ee73862009-12-11 19:24:15 +1000252 ttm_bo_device_release(&dev_priv->ttm.bdev);
253
254 nouveau_ttm_global_release(dev_priv);
255
Ben Skeggsfbd28952010-09-01 15:24:34 +1000256 if (dev_priv->fb_mtrr >= 0) {
257 drm_mtrr_del(dev_priv->fb_mtrr,
258 pci_resource_start(dev->pdev, 1),
259 pci_resource_len(dev->pdev, 1), DRM_MTRR_WC);
260 dev_priv->fb_mtrr = -1;
261 }
262}
263
264void
265nouveau_mem_gart_fini(struct drm_device *dev)
266{
267 nouveau_sgdma_takedown(dev);
268
Ben Skeggscd0b0722010-06-01 15:56:22 +1000269 if (drm_core_has_AGP(dev) && dev->agp) {
Ben Skeggs6ee73862009-12-11 19:24:15 +1000270 struct drm_agp_mem *entry, *tempe;
271
272 /* Remove AGP resources, but leave dev->agp
273 intact until drv_cleanup is called. */
274 list_for_each_entry_safe(entry, tempe, &dev->agp->memory, head) {
275 if (entry->bound)
276 drm_unbind_agp(entry->memory);
277 drm_free_agp(entry->memory, entry->pages);
278 kfree(entry);
279 }
280 INIT_LIST_HEAD(&dev->agp->memory);
281
282 if (dev->agp->acquired)
283 drm_agp_release(dev);
284
285 dev->agp->acquired = 0;
286 dev->agp->enabled = 0;
287 }
Ben Skeggs6ee73862009-12-11 19:24:15 +1000288}
289
Ben Skeggs6ee73862009-12-11 19:24:15 +1000290static uint32_t
Ben Skeggsa76fb4e2010-03-18 09:45:20 +1000291nouveau_mem_detect_nv04(struct drm_device *dev)
292{
Francisco Jerez3c7066b2010-07-13 15:50:23 +0200293 uint32_t boot0 = nv_rd32(dev, NV04_PFB_BOOT_0);
Ben Skeggsa76fb4e2010-03-18 09:45:20 +1000294
295 if (boot0 & 0x00000100)
296 return (((boot0 >> 12) & 0xf) * 2 + 2) * 1024 * 1024;
297
Francisco Jerez3c7066b2010-07-13 15:50:23 +0200298 switch (boot0 & NV04_PFB_BOOT_0_RAM_AMOUNT) {
299 case NV04_PFB_BOOT_0_RAM_AMOUNT_32MB:
Ben Skeggsa76fb4e2010-03-18 09:45:20 +1000300 return 32 * 1024 * 1024;
Francisco Jerez3c7066b2010-07-13 15:50:23 +0200301 case NV04_PFB_BOOT_0_RAM_AMOUNT_16MB:
Ben Skeggsa76fb4e2010-03-18 09:45:20 +1000302 return 16 * 1024 * 1024;
Francisco Jerez3c7066b2010-07-13 15:50:23 +0200303 case NV04_PFB_BOOT_0_RAM_AMOUNT_8MB:
Ben Skeggsa76fb4e2010-03-18 09:45:20 +1000304 return 8 * 1024 * 1024;
Francisco Jerez3c7066b2010-07-13 15:50:23 +0200305 case NV04_PFB_BOOT_0_RAM_AMOUNT_4MB:
Ben Skeggsa76fb4e2010-03-18 09:45:20 +1000306 return 4 * 1024 * 1024;
307 }
308
309 return 0;
310}
311
312static uint32_t
313nouveau_mem_detect_nforce(struct drm_device *dev)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000314{
315 struct drm_nouveau_private *dev_priv = dev->dev_private;
316 struct pci_dev *bridge;
317 uint32_t mem;
318
319 bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 1));
320 if (!bridge) {
321 NV_ERROR(dev, "no bridge device\n");
322 return 0;
323 }
324
Ben Skeggsa76fb4e2010-03-18 09:45:20 +1000325 if (dev_priv->flags & NV_NFORCE) {
Ben Skeggs6ee73862009-12-11 19:24:15 +1000326 pci_read_config_dword(bridge, 0x7C, &mem);
327 return (uint64_t)(((mem >> 6) & 31) + 1)*1024*1024;
328 } else
Ben Skeggsa76fb4e2010-03-18 09:45:20 +1000329 if (dev_priv->flags & NV_NFORCE2) {
Ben Skeggs6ee73862009-12-11 19:24:15 +1000330 pci_read_config_dword(bridge, 0x84, &mem);
331 return (uint64_t)(((mem >> 4) & 127) + 1)*1024*1024;
332 }
333
334 NV_ERROR(dev, "impossible!\n");
335 return 0;
336}
337
Ben Skeggsfbd28952010-09-01 15:24:34 +1000338static int
Ben Skeggsa76fb4e2010-03-18 09:45:20 +1000339nouveau_mem_detect(struct drm_device *dev)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000340{
341 struct drm_nouveau_private *dev_priv = dev->dev_private;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000342
Ben Skeggsa76fb4e2010-03-18 09:45:20 +1000343 if (dev_priv->card_type == NV_04) {
344 dev_priv->vram_size = nouveau_mem_detect_nv04(dev);
345 } else
346 if (dev_priv->flags & (NV_NFORCE | NV_NFORCE2)) {
347 dev_priv->vram_size = nouveau_mem_detect_nforce(dev);
Ben Skeggs7a2e4e02010-06-02 10:12:00 +1000348 } else
349 if (dev_priv->card_type < NV_50) {
Francisco Jerez3c7066b2010-07-13 15:50:23 +0200350 dev_priv->vram_size = nv_rd32(dev, NV04_PFB_FIFO_DATA);
351 dev_priv->vram_size &= NV10_PFB_FIFO_DATA_RAM_AMOUNT_MB_MASK;
Ben Skeggsc556d982010-08-04 13:44:41 +1000352 } else
353 if (dev_priv->card_type < NV_C0) {
Ben Skeggs573a2a32010-08-25 15:26:04 +1000354 if (nv50_vram_init(dev))
355 return -ENOMEM;
Ben Skeggsc556d982010-08-04 13:44:41 +1000356 } else {
357 dev_priv->vram_size = nv_rd32(dev, 0x10f20c) << 20;
358 dev_priv->vram_size *= nv_rd32(dev, 0x121c74);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000359 }
360
Ben Skeggsa76fb4e2010-03-18 09:45:20 +1000361 NV_INFO(dev, "Detected %dMiB VRAM\n", (int)(dev_priv->vram_size >> 20));
362 if (dev_priv->vram_sys_base) {
363 NV_INFO(dev, "Stolen system memory at: 0x%010llx\n",
364 dev_priv->vram_sys_base);
365 }
366
367 if (dev_priv->vram_size)
368 return 0;
369 return -ENOMEM;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000370}
371
Francisco Jerez71d06182010-09-08 02:23:20 +0200372#if __OS_HAS_AGP
373static unsigned long
374get_agp_mode(struct drm_device *dev, unsigned long mode)
375{
376 struct drm_nouveau_private *dev_priv = dev->dev_private;
377
378 /*
379 * FW seems to be broken on nv18, it makes the card lock up
380 * randomly.
381 */
382 if (dev_priv->chipset == 0x18)
383 mode &= ~PCI_AGP_COMMAND_FW;
384
Francisco Jerezde5899b2010-09-08 02:28:23 +0200385 /*
386 * AGP mode set in the command line.
387 */
388 if (nouveau_agpmode > 0) {
389 bool agpv3 = mode & 0x8;
390 int rate = agpv3 ? nouveau_agpmode / 4 : nouveau_agpmode;
391
392 mode = (mode & ~0x7) | (rate & 0x7);
393 }
394
Francisco Jerez71d06182010-09-08 02:23:20 +0200395 return mode;
396}
397#endif
398
Francisco Jereze04d8e82010-07-23 20:29:13 +0200399int
400nouveau_mem_reset_agp(struct drm_device *dev)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000401{
Francisco Jereze04d8e82010-07-23 20:29:13 +0200402#if __OS_HAS_AGP
403 uint32_t saved_pci_nv_1, pmc_enable;
404 int ret;
405
406 /* First of all, disable fast writes, otherwise if it's
407 * already enabled in the AGP bridge and we disable the card's
408 * AGP controller we might be locking ourselves out of it. */
Francisco Jerez316f60a2010-08-26 16:13:49 +0200409 if ((nv_rd32(dev, NV04_PBUS_PCI_NV_19) |
410 dev->agp->mode) & PCI_AGP_COMMAND_FW) {
Francisco Jereze04d8e82010-07-23 20:29:13 +0200411 struct drm_agp_info info;
412 struct drm_agp_mode mode;
413
414 ret = drm_agp_info(dev, &info);
415 if (ret)
416 return ret;
417
Francisco Jerez71d06182010-09-08 02:23:20 +0200418 mode.mode = get_agp_mode(dev, info.mode) & ~PCI_AGP_COMMAND_FW;
Francisco Jereze04d8e82010-07-23 20:29:13 +0200419 ret = drm_agp_enable(dev, mode);
420 if (ret)
421 return ret;
422 }
Ben Skeggs6ee73862009-12-11 19:24:15 +1000423
424 saved_pci_nv_1 = nv_rd32(dev, NV04_PBUS_PCI_NV_1);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000425
426 /* clear busmaster bit */
427 nv_wr32(dev, NV04_PBUS_PCI_NV_1, saved_pci_nv_1 & ~0x4);
Francisco Jereze04d8e82010-07-23 20:29:13 +0200428 /* disable AGP */
429 nv_wr32(dev, NV04_PBUS_PCI_NV_19, 0);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000430
431 /* power cycle pgraph, if enabled */
432 pmc_enable = nv_rd32(dev, NV03_PMC_ENABLE);
433 if (pmc_enable & NV_PMC_ENABLE_PGRAPH) {
434 nv_wr32(dev, NV03_PMC_ENABLE,
435 pmc_enable & ~NV_PMC_ENABLE_PGRAPH);
436 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) |
437 NV_PMC_ENABLE_PGRAPH);
438 }
439
440 /* and restore (gives effect of resetting AGP) */
Ben Skeggs6ee73862009-12-11 19:24:15 +1000441 nv_wr32(dev, NV04_PBUS_PCI_NV_1, saved_pci_nv_1);
Ben Skeggsb694dfb2009-12-15 10:38:32 +1000442#endif
Ben Skeggs6ee73862009-12-11 19:24:15 +1000443
Francisco Jereze04d8e82010-07-23 20:29:13 +0200444 return 0;
445}
446
Ben Skeggs6ee73862009-12-11 19:24:15 +1000447int
448nouveau_mem_init_agp(struct drm_device *dev)
449{
Ben Skeggsb694dfb2009-12-15 10:38:32 +1000450#if __OS_HAS_AGP
Ben Skeggs6ee73862009-12-11 19:24:15 +1000451 struct drm_nouveau_private *dev_priv = dev->dev_private;
452 struct drm_agp_info info;
453 struct drm_agp_mode mode;
454 int ret;
455
Ben Skeggs6ee73862009-12-11 19:24:15 +1000456 if (!dev->agp->acquired) {
457 ret = drm_agp_acquire(dev);
458 if (ret) {
459 NV_ERROR(dev, "Unable to acquire AGP: %d\n", ret);
460 return ret;
461 }
462 }
463
Francisco Jerez2b495262010-07-30 13:57:54 +0200464 nouveau_mem_reset_agp(dev);
465
Ben Skeggs6ee73862009-12-11 19:24:15 +1000466 ret = drm_agp_info(dev, &info);
467 if (ret) {
468 NV_ERROR(dev, "Unable to get AGP info: %d\n", ret);
469 return ret;
470 }
471
472 /* see agp.h for the AGPSTAT_* modes available */
Francisco Jerez71d06182010-09-08 02:23:20 +0200473 mode.mode = get_agp_mode(dev, info.mode);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000474 ret = drm_agp_enable(dev, mode);
475 if (ret) {
476 NV_ERROR(dev, "Unable to enable AGP: %d\n", ret);
477 return ret;
478 }
479
480 dev_priv->gart_info.type = NOUVEAU_GART_AGP;
481 dev_priv->gart_info.aper_base = info.aperture_base;
482 dev_priv->gart_info.aper_size = info.aperture_size;
Ben Skeggsb694dfb2009-12-15 10:38:32 +1000483#endif
Ben Skeggs6ee73862009-12-11 19:24:15 +1000484 return 0;
485}
486
487int
Ben Skeggsfbd28952010-09-01 15:24:34 +1000488nouveau_mem_vram_init(struct drm_device *dev)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000489{
490 struct drm_nouveau_private *dev_priv = dev->dev_private;
491 struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
Ben Skeggsfbd28952010-09-01 15:24:34 +1000492 int ret, dma_bits;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000493
494 if (dev_priv->card_type >= NV_50 &&
495 pci_dma_supported(dev->pdev, DMA_BIT_MASK(40)))
496 dma_bits = 40;
Ben Skeggsfbd28952010-09-01 15:24:34 +1000497 else
498 dma_bits = 32;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000499
500 ret = pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(dma_bits));
Ben Skeggsfbd28952010-09-01 15:24:34 +1000501 if (ret)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000502 return ret;
Ben Skeggsfbd28952010-09-01 15:24:34 +1000503
Ben Skeggsfbd28952010-09-01 15:24:34 +1000504 dev_priv->fb_phys = pci_resource_start(dev->pdev, 1);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000505
506 ret = nouveau_ttm_global_init(dev_priv);
507 if (ret)
508 return ret;
509
510 ret = ttm_bo_device_init(&dev_priv->ttm.bdev,
511 dev_priv->ttm.bo_global_ref.ref.object,
512 &nouveau_bo_driver, DRM_FILE_PAGE_OFFSET,
513 dma_bits <= 32 ? true : false);
514 if (ret) {
515 NV_ERROR(dev, "Error initialising bo driver: %d\n", ret);
516 return ret;
517 }
518
Ben Skeggsfbd28952010-09-01 15:24:34 +1000519 /* reserve space at end of VRAM for PRAMIN */
520 if (dev_priv->chipset == 0x40 || dev_priv->chipset == 0x47 ||
521 dev_priv->chipset == 0x49 || dev_priv->chipset == 0x4b)
522 dev_priv->ramin_rsvd_vram = (2 * 1024 * 1024);
523 else
524 if (dev_priv->card_type >= NV_40)
525 dev_priv->ramin_rsvd_vram = (1 * 1024 * 1024);
526 else
527 dev_priv->ramin_rsvd_vram = (512 * 1024);
528
Ben Skeggs573a2a32010-08-25 15:26:04 +1000529 /* initialise gpu-specific vram backend */
530 ret = nouveau_mem_detect(dev);
531 if (ret)
532 return ret;
533
534 dev_priv->fb_available_size = dev_priv->vram_size;
535 dev_priv->fb_mappable_pages = dev_priv->fb_available_size;
536 if (dev_priv->fb_mappable_pages > pci_resource_len(dev->pdev, 1))
537 dev_priv->fb_mappable_pages = pci_resource_len(dev->pdev, 1);
538 dev_priv->fb_mappable_pages >>= PAGE_SHIFT;
539
Ben Skeggs6ee73862009-12-11 19:24:15 +1000540 dev_priv->fb_available_size -= dev_priv->ramin_rsvd_vram;
541 dev_priv->fb_aper_free = dev_priv->fb_available_size;
542
543 /* mappable vram */
544 ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM,
545 dev_priv->fb_available_size >> PAGE_SHIFT);
546 if (ret) {
547 NV_ERROR(dev, "Failed VRAM mm init: %d\n", ret);
548 return ret;
549 }
550
Ben Skeggsac8fb972010-01-15 09:24:20 +1000551 ret = nouveau_bo_new(dev, NULL, 256*1024, 0, TTM_PL_FLAG_VRAM,
552 0, 0, true, true, &dev_priv->vga_ram);
553 if (ret == 0)
554 ret = nouveau_bo_pin(dev_priv->vga_ram, TTM_PL_FLAG_VRAM);
555 if (ret) {
556 NV_WARN(dev, "failed to reserve VGA memory\n");
557 nouveau_bo_ref(NULL, &dev_priv->vga_ram);
558 }
559
Ben Skeggsfbd28952010-09-01 15:24:34 +1000560 dev_priv->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 1),
561 pci_resource_len(dev->pdev, 1),
562 DRM_MTRR_WC);
563 return 0;
564}
565
566int
567nouveau_mem_gart_init(struct drm_device *dev)
568{
569 struct drm_nouveau_private *dev_priv = dev->dev_private;
570 struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
571 int ret;
572
573 dev_priv->gart_info.type = NOUVEAU_GART_NONE;
574
Ben Skeggs6ee73862009-12-11 19:24:15 +1000575#if !defined(__powerpc__) && !defined(__ia64__)
Francisco Jerezde5899b2010-09-08 02:28:23 +0200576 if (drm_device_is_agp(dev) && dev->agp && nouveau_agpmode) {
Ben Skeggs6ee73862009-12-11 19:24:15 +1000577 ret = nouveau_mem_init_agp(dev);
578 if (ret)
579 NV_ERROR(dev, "Error initialising AGP: %d\n", ret);
580 }
581#endif
582
583 if (dev_priv->gart_info.type == NOUVEAU_GART_NONE) {
584 ret = nouveau_sgdma_init(dev);
585 if (ret) {
586 NV_ERROR(dev, "Error initialising PCI(E): %d\n", ret);
587 return ret;
588 }
589 }
590
591 NV_INFO(dev, "%d MiB GART (aperture)\n",
592 (int)(dev_priv->gart_info.aper_size >> 20));
593 dev_priv->gart_info.aper_free = dev_priv->gart_info.aper_size;
594
595 ret = ttm_bo_init_mm(bdev, TTM_PL_TT,
596 dev_priv->gart_info.aper_size >> PAGE_SHIFT);
597 if (ret) {
598 NV_ERROR(dev, "Failed TT mm init: %d\n", ret);
599 return ret;
600 }
601
Ben Skeggs6ee73862009-12-11 19:24:15 +1000602 return 0;
603}
604
Roy Spliet7760fcb2010-09-17 23:17:24 +0200605void
606nouveau_mem_timing_init(struct drm_device *dev)
607{
Roy Splietcac8f052010-10-20 01:09:56 +0200608 /* cards < NVC0 only */
Roy Spliet7760fcb2010-09-17 23:17:24 +0200609 struct drm_nouveau_private *dev_priv = dev->dev_private;
610 struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
611 struct nouveau_pm_memtimings *memtimings = &pm->memtimings;
612 struct nvbios *bios = &dev_priv->vbios;
613 struct bit_entry P;
614 u8 tUNK_0, tUNK_1, tUNK_2;
615 u8 tRP; /* Byte 3 */
616 u8 tRAS; /* Byte 5 */
617 u8 tRFC; /* Byte 7 */
618 u8 tRC; /* Byte 9 */
619 u8 tUNK_10, tUNK_11, tUNK_12, tUNK_13, tUNK_14;
620 u8 tUNK_18, tUNK_19, tUNK_20, tUNK_21;
621 u8 *mem = NULL, *entry;
622 int i, recordlen, entries;
623
624 if (bios->type == NVBIOS_BIT) {
625 if (bit_table(dev, 'P', &P))
626 return;
627
628 if (P.version == 1)
629 mem = ROMPTR(bios, P.data[4]);
630 else
631 if (P.version == 2)
632 mem = ROMPTR(bios, P.data[8]);
633 else {
634 NV_WARN(dev, "unknown mem for BIT P %d\n", P.version);
635 }
636 } else {
637 NV_DEBUG(dev, "BMP version too old for memory\n");
638 return;
639 }
640
641 if (!mem) {
642 NV_DEBUG(dev, "memory timing table pointer invalid\n");
643 return;
644 }
645
646 if (mem[0] != 0x10) {
647 NV_WARN(dev, "memory timing table 0x%02x unknown\n", mem[0]);
648 return;
649 }
650
651 /* validate record length */
652 entries = mem[2];
653 recordlen = mem[3];
654 if (recordlen < 15) {
655 NV_ERROR(dev, "mem timing table length unknown: %d\n", mem[3]);
656 return;
657 }
658
659 /* parse vbios entries into common format */
660 memtimings->timing =
661 kcalloc(entries, sizeof(*memtimings->timing), GFP_KERNEL);
662 if (!memtimings->timing)
663 return;
664
665 entry = mem + mem[1];
666 for (i = 0; i < entries; i++, entry += recordlen) {
667 struct nouveau_pm_memtiming *timing = &pm->memtimings.timing[i];
668 if (entry[0] == 0)
669 continue;
670
671 tUNK_18 = 1;
672 tUNK_19 = 1;
673 tUNK_20 = 0;
674 tUNK_21 = 0;
Roy Splietcac8f052010-10-20 01:09:56 +0200675 switch (min(recordlen, 22)) {
676 case 22:
Roy Spliet7760fcb2010-09-17 23:17:24 +0200677 tUNK_21 = entry[21];
Roy Splietcac8f052010-10-20 01:09:56 +0200678 case 21:
Roy Spliet7760fcb2010-09-17 23:17:24 +0200679 tUNK_20 = entry[20];
Roy Splietcac8f052010-10-20 01:09:56 +0200680 case 20:
Roy Spliet7760fcb2010-09-17 23:17:24 +0200681 tUNK_19 = entry[19];
Roy Splietcac8f052010-10-20 01:09:56 +0200682 case 19:
Roy Spliet7760fcb2010-09-17 23:17:24 +0200683 tUNK_18 = entry[18];
684 default:
685 tUNK_0 = entry[0];
686 tUNK_1 = entry[1];
687 tUNK_2 = entry[2];
688 tRP = entry[3];
689 tRAS = entry[5];
690 tRFC = entry[7];
691 tRC = entry[9];
692 tUNK_10 = entry[10];
693 tUNK_11 = entry[11];
694 tUNK_12 = entry[12];
695 tUNK_13 = entry[13];
696 tUNK_14 = entry[14];
697 break;
698 }
699
700 timing->reg_100220 = (tRC << 24 | tRFC << 16 | tRAS << 8 | tRP);
701
702 /* XXX: I don't trust the -1's and +1's... they must come
703 * from somewhere! */
704 timing->reg_100224 = ((tUNK_0 + tUNK_19 + 1) << 24 |
705 tUNK_18 << 16 |
706 (tUNK_1 + tUNK_19 + 1) << 8 |
707 (tUNK_2 - 1));
708
709 timing->reg_100228 = (tUNK_12 << 16 | tUNK_11 << 8 | tUNK_10);
710 if(recordlen > 19) {
711 timing->reg_100228 += (tUNK_19 - 1) << 24;
Roy Splietcac8f052010-10-20 01:09:56 +0200712 }/* I cannot back-up this else-statement right now
713 else {
Roy Spliet7760fcb2010-09-17 23:17:24 +0200714 timing->reg_100228 += tUNK_12 << 24;
Roy Splietcac8f052010-10-20 01:09:56 +0200715 }*/
Roy Spliet7760fcb2010-09-17 23:17:24 +0200716
717 /* XXX: reg_10022c */
Roy Splietcac8f052010-10-20 01:09:56 +0200718 timing->reg_10022c = tUNK_2 - 1;
Roy Spliet7760fcb2010-09-17 23:17:24 +0200719
720 timing->reg_100230 = (tUNK_20 << 24 | tUNK_21 << 16 |
721 tUNK_13 << 8 | tUNK_13);
722
723 /* XXX: +6? */
724 timing->reg_100234 = (tRAS << 24 | (tUNK_19 + 6) << 8 | tRC);
Roy Splietcac8f052010-10-20 01:09:56 +0200725 timing->reg_100234 += max(tUNK_10,tUNK_11) << 16;
726
727 /* XXX; reg_100238, reg_10023c
728 * reg: 0x00??????
729 * reg_10023c:
730 * 0 for pre-NV50 cards
731 * 0x????0202 for NV50+ cards (empirical evidence) */
732 if(dev_priv->card_type >= NV_50) {
733 timing->reg_10023c = 0x202;
Roy Spliet7760fcb2010-09-17 23:17:24 +0200734 }
735
Roy Spliet7760fcb2010-09-17 23:17:24 +0200736 NV_DEBUG(dev, "Entry %d: 220: %08x %08x %08x %08x\n", i,
737 timing->reg_100220, timing->reg_100224,
738 timing->reg_100228, timing->reg_10022c);
739 NV_DEBUG(dev, " 230: %08x %08x %08x %08x\n",
740 timing->reg_100230, timing->reg_100234,
741 timing->reg_100238, timing->reg_10023c);
742 }
743
744 memtimings->nr_timing = entries;
745 memtimings->supported = true;
746}
747
748void
749nouveau_mem_timing_fini(struct drm_device *dev)
750{
751 struct drm_nouveau_private *dev_priv = dev->dev_private;
752 struct nouveau_pm_memtimings *mem = &dev_priv->engine.pm.memtimings;
753
754 kfree(mem->timing);
755}
Ben Skeggs573a2a32010-08-25 15:26:04 +1000756
757static int
758nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long p_size)
759{
760 struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
761 struct nouveau_mm *mm;
762 u32 b_size;
763 int ret;
764
765 p_size = (p_size << PAGE_SHIFT) >> 12;
766 b_size = dev_priv->vram_rblock_size >> 12;
767
768 ret = nouveau_mm_init(&mm, 0, p_size, b_size);
769 if (ret)
770 return ret;
771
772 man->priv = mm;
773 return 0;
774}
775
776static int
777nouveau_vram_manager_fini(struct ttm_mem_type_manager *man)
778{
779 struct nouveau_mm *mm = man->priv;
780 int ret;
781
782 ret = nouveau_mm_fini(&mm);
783 if (ret)
784 return ret;
785
786 man->priv = NULL;
787 return 0;
788}
789
790static void
791nouveau_vram_manager_del(struct ttm_mem_type_manager *man,
792 struct ttm_mem_reg *mem)
793{
794 struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
795 struct drm_device *dev = dev_priv->dev;
796
797 nv50_vram_del(dev, (struct nouveau_vram **)&mem->mm_node);
798}
799
800static int
801nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
802 struct ttm_buffer_object *bo,
803 struct ttm_placement *placement,
804 struct ttm_mem_reg *mem)
805{
806 struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
807 struct drm_device *dev = dev_priv->dev;
808 struct nouveau_bo *nvbo = nouveau_bo(bo);
809 struct nouveau_vram *vram;
810 int ret;
811
812 ret = nv50_vram_new(dev, mem->num_pages << PAGE_SHIFT, 65536, 0,
813 (nvbo->tile_flags >> 8) & 0x7f, &vram);
814 if (ret)
815 return ret;
816
817 mem->mm_node = vram;
818 mem->start = vram->offset >> PAGE_SHIFT;
819 return 0;
820}
821
822void
823nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
824{
825 struct ttm_bo_global *glob = man->bdev->glob;
826 struct nouveau_mm *mm = man->priv;
827 struct nouveau_mm_node *r;
828 u64 total = 0, ttotal[3] = {}, tused[3] = {}, tfree[3] = {};
829 int i;
830
831 mutex_lock(&mm->mutex);
832 list_for_each_entry(r, &mm->nodes, nl_entry) {
833 printk(KERN_DEBUG "%s %s-%d: 0x%010llx 0x%010llx\n",
834 prefix, r->free ? "free" : "used", r->type,
835 ((u64)r->offset << 12),
836 (((u64)r->offset + r->length) << 12));
837 total += r->length;
838 ttotal[r->type] += r->length;
839 if (r->free)
840 tfree[r->type] += r->length;
841 else
842 tused[r->type] += r->length;
843 }
844 mutex_unlock(&mm->mutex);
845
846 printk(KERN_DEBUG "%s total: 0x%010llx\n", prefix, total << 12);
847 for (i = 0; i < 3; i++) {
848 printk(KERN_DEBUG "%s type %d: 0x%010llx, "
849 "used 0x%010llx, free 0x%010llx\n", prefix,
850 i, ttotal[i] << 12, tused[i] << 12, tfree[i] << 12);
851 }
852}
853
854const struct ttm_mem_type_manager_func nouveau_vram_manager = {
855 nouveau_vram_manager_init,
856 nouveau_vram_manager_fini,
857 nouveau_vram_manager_new,
858 nouveau_vram_manager_del,
859 nouveau_vram_manager_debug
860};