blob: c1248e0740a304947a1170c0a822edd5f43ffe65 [file] [log] [blame]
Ben Skeggs6ee73862009-12-11 19:24:15 +10001#include "drmP.h"
2#include "drm.h"
3#include "nouveau_drv.h"
Ben Skeggsfbd28952010-09-01 15:24:34 +10004#include "nouveau_ramht.h"
Ben Skeggs6ee73862009-12-11 19:24:15 +10005
6/* returns the size of fifo context */
7static int
8nouveau_fifo_ctx_size(struct drm_device *dev)
9{
10 struct drm_nouveau_private *dev_priv = dev->dev_private;
11
12 if (dev_priv->chipset >= 0x40)
13 return 128;
14 else
15 if (dev_priv->chipset >= 0x17)
16 return 64;
17
18 return 32;
19}
20
Ben Skeggs6ee73862009-12-11 19:24:15 +100021int nv04_instmem_init(struct drm_device *dev)
22{
23 struct drm_nouveau_private *dev_priv = dev->dev_private;
Ben Skeggsfbd28952010-09-01 15:24:34 +100024 struct nouveau_gpuobj *ramht = NULL;
Ben Skeggse05c5a32010-09-01 15:24:35 +100025 u32 offset, length;
Ben Skeggsb833ac22010-06-01 15:32:24 +100026 int ret;
Ben Skeggs6ee73862009-12-11 19:24:15 +100027
Francisco Jerez35fd5b42010-09-06 20:25:28 +020028 /* RAMIN always available */
29 dev_priv->ramin_available = true;
30
Younes Manton3b40d072011-06-24 01:15:58 -040031 /* Reserve space at end of VRAM for PRAMIN */
32 if (dev_priv->card_type >= NV_40) {
33 u32 vs = hweight8((nv_rd32(dev, 0x001540) & 0x0000ff00) >> 8);
34 u32 rsvd;
35
36 /* estimate grctx size, the magics come from nv40_grctx.c */
37 if (dev_priv->chipset == 0x40) rsvd = 0x6aa0 * vs;
38 else if (dev_priv->chipset < 0x43) rsvd = 0x4f00 * vs;
39 else if (nv44_graph_class(dev)) rsvd = 0x4980 * vs;
40 else rsvd = 0x4a40 * vs;
41 rsvd += 16 * 1024;
42 rsvd *= dev_priv->engine.fifo.channels;
43
44 /* pciegart table */
Jon Mason58b65422011-06-27 16:07:50 +000045 if (pci_is_pcie(dev->pdev))
Younes Manton3b40d072011-06-24 01:15:58 -040046 rsvd += 512 * 1024;
47
48 /* object storage */
49 rsvd += 512 * 1024;
50
51 dev_priv->ramin_rsvd_vram = round_up(rsvd, 4096);
52 } else {
53 dev_priv->ramin_rsvd_vram = 512 * 1024;
54 }
55
Ben Skeggsfbd28952010-09-01 15:24:34 +100056 /* Setup shared RAMHT */
Ben Skeggse05c5a32010-09-01 15:24:35 +100057 ret = nouveau_gpuobj_new_fake(dev, 0x10000, ~0, 4096,
Ben Skeggsfbd28952010-09-01 15:24:34 +100058 NVOBJ_FLAG_ZERO_ALLOC, &ramht);
59 if (ret)
60 return ret;
61
62 ret = nouveau_ramht_new(dev, ramht, &dev_priv->ramht);
63 nouveau_gpuobj_ref(NULL, &ramht);
64 if (ret)
65 return ret;
66
Ben Skeggse05c5a32010-09-01 15:24:35 +100067 /* And RAMRO */
68 ret = nouveau_gpuobj_new_fake(dev, 0x11200, ~0, 512,
69 NVOBJ_FLAG_ZERO_ALLOC, &dev_priv->ramro);
70 if (ret)
71 return ret;
72
73 /* And RAMFC */
74 length = dev_priv->engine.fifo.channels * nouveau_fifo_ctx_size(dev);
75 switch (dev_priv->card_type) {
76 case NV_40:
77 offset = 0x20000;
78 break;
79 default:
80 offset = 0x11400;
81 break;
82 }
83
84 ret = nouveau_gpuobj_new_fake(dev, offset, ~0, length,
85 NVOBJ_FLAG_ZERO_ALLOC, &dev_priv->ramfc);
86 if (ret)
87 return ret;
88
89 /* Only allow space after RAMFC to be used for object allocation */
90 offset += length;
Ben Skeggs6ee73862009-12-11 19:24:15 +100091
92 /* It appears RAMRO (or something?) is controlled by 0x2220/0x2230
93 * on certain NV4x chipsets as well as RAMFC. When 0x2230 == 0
94 * ("new style" control) the upper 16-bits of 0x2220 points at this
95 * other mysterious table that's clobbering important things.
96 *
97 * We're now pointing this at RAMIN+0x30000 to avoid RAMFC getting
98 * smashed to pieces on us, so reserve 0x30000-0x40000 too..
99 */
100 if (dev_priv->card_type >= NV_40) {
101 if (offset < 0x40000)
102 offset = 0x40000;
103 }
104
Ben Skeggsb833ac22010-06-01 15:32:24 +1000105 ret = drm_mm_init(&dev_priv->ramin_heap, offset,
106 dev_priv->ramin_rsvd_vram - offset);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000107 if (ret) {
Ben Skeggsb833ac22010-06-01 15:32:24 +1000108 NV_ERROR(dev, "Failed to init RAMIN heap: %d\n", ret);
109 return ret;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000110 }
111
Ben Skeggsb833ac22010-06-01 15:32:24 +1000112 return 0;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000113}
114
115void
116nv04_instmem_takedown(struct drm_device *dev)
117{
Ben Skeggse05c5a32010-09-01 15:24:35 +1000118 struct drm_nouveau_private *dev_priv = dev->dev_private;
119
120 nouveau_ramht_ref(NULL, &dev_priv->ramht, NULL);
121 nouveau_gpuobj_ref(NULL, &dev_priv->ramro);
122 nouveau_gpuobj_ref(NULL, &dev_priv->ramfc);
Jimmy Rentz2abdb052011-04-17 16:15:03 -0400123
124 if (drm_mm_initialized(&dev_priv->ramin_heap))
125 drm_mm_takedown(&dev_priv->ramin_heap);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000126}
127
128int
Ben Skeggs6ee73862009-12-11 19:24:15 +1000129nv04_instmem_suspend(struct drm_device *dev)
130{
131 return 0;
132}
133
134void
135nv04_instmem_resume(struct drm_device *dev)
136{
137}
138
Ben Skeggse41115d2010-11-01 11:45:02 +1000139int
Ben Skeggs6e32fed2011-06-03 14:23:30 +1000140nv04_instmem_get(struct nouveau_gpuobj *gpuobj, struct nouveau_channel *chan,
141 u32 size, u32 align)
Ben Skeggse41115d2010-11-01 11:45:02 +1000142{
143 struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
144 struct drm_mm_node *ramin = NULL;
145
146 do {
147 if (drm_mm_pre_get(&dev_priv->ramin_heap))
148 return -ENOMEM;
149
150 spin_lock(&dev_priv->ramin_lock);
151 ramin = drm_mm_search_free(&dev_priv->ramin_heap, size, align, 0);
152 if (ramin == NULL) {
153 spin_unlock(&dev_priv->ramin_lock);
154 return -ENOMEM;
155 }
156
157 ramin = drm_mm_get_block_atomic(ramin, size, align);
158 spin_unlock(&dev_priv->ramin_lock);
159 } while (ramin == NULL);
160
161 gpuobj->node = ramin;
162 gpuobj->vinst = ramin->start;
163 return 0;
164}
165
166void
167nv04_instmem_put(struct nouveau_gpuobj *gpuobj)
168{
169 struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
170
171 spin_lock(&dev_priv->ramin_lock);
172 drm_mm_put_block(gpuobj->node);
173 gpuobj->node = NULL;
174 spin_unlock(&dev_priv->ramin_lock);
175}
176
177int
178nv04_instmem_map(struct nouveau_gpuobj *gpuobj)
179{
180 gpuobj->pinst = gpuobj->vinst;
181 return 0;
182}
183
184void
185nv04_instmem_unmap(struct nouveau_gpuobj *gpuobj)
186{
187}
188
189void
190nv04_instmem_flush(struct drm_device *dev)
191{
192}