blob: a9e380040fea208cf3875082efd0d7379a7c55dd [file] [log] [blame]
David Howells760285e2012-10-02 18:01:07 +01001#include <drm/drmP.h>
Ben Skeggsc420b2d2012-05-01 20:48:08 +10002
Ben Skeggs6ee73862009-12-11 19:24:15 +10003#include "nouveau_drv.h"
Ben Skeggsc420b2d2012-05-01 20:48:08 +10004#include "nouveau_fifo.h"
Ben Skeggsfbd2895e2010-09-01 15:24:34 +10005#include "nouveau_ramht.h"
Ben Skeggs6ee73862009-12-11 19:24:15 +10006
7/* returns the size of fifo context */
8static int
9nouveau_fifo_ctx_size(struct drm_device *dev)
10{
11 struct drm_nouveau_private *dev_priv = dev->dev_private;
12
13 if (dev_priv->chipset >= 0x40)
Ben Skeggsc420b2d2012-05-01 20:48:08 +100014 return 128 * 32;
Ben Skeggs6ee73862009-12-11 19:24:15 +100015 else
16 if (dev_priv->chipset >= 0x17)
Ben Skeggsc420b2d2012-05-01 20:48:08 +100017 return 64 * 32;
18 else
19 if (dev_priv->chipset >= 0x10)
20 return 32 * 32;
Ben Skeggs6ee73862009-12-11 19:24:15 +100021
Ben Skeggsc420b2d2012-05-01 20:48:08 +100022 return 32 * 16;
Ben Skeggs6ee73862009-12-11 19:24:15 +100023}
24
Ben Skeggs6ee73862009-12-11 19:24:15 +100025int nv04_instmem_init(struct drm_device *dev)
26{
27 struct drm_nouveau_private *dev_priv = dev->dev_private;
Ben Skeggsfbd2895e2010-09-01 15:24:34 +100028 struct nouveau_gpuobj *ramht = NULL;
Ben Skeggse05c5a32010-09-01 15:24:35 +100029 u32 offset, length;
Ben Skeggsb833ac22010-06-01 15:32:24 +100030 int ret;
Ben Skeggs6ee73862009-12-11 19:24:15 +100031
Francisco Jerez35fd5b42010-09-06 20:25:28 +020032 /* RAMIN always available */
33 dev_priv->ramin_available = true;
34
Younes Manton3b40d072011-06-24 01:15:58 -040035 /* Reserve space at end of VRAM for PRAMIN */
36 if (dev_priv->card_type >= NV_40) {
37 u32 vs = hweight8((nv_rd32(dev, 0x001540) & 0x0000ff00) >> 8);
38 u32 rsvd;
39
40 /* estimate grctx size, the magics come from nv40_grctx.c */
41 if (dev_priv->chipset == 0x40) rsvd = 0x6aa0 * vs;
42 else if (dev_priv->chipset < 0x43) rsvd = 0x4f00 * vs;
43 else if (nv44_graph_class(dev)) rsvd = 0x4980 * vs;
44 else rsvd = 0x4a40 * vs;
45 rsvd += 16 * 1024;
Ben Skeggsc420b2d2012-05-01 20:48:08 +100046 rsvd *= 32; /* per-channel */
Younes Manton3b40d072011-06-24 01:15:58 -040047
Ben Skeggsd0f3c7e2012-03-27 15:15:18 +100048 rsvd += 512 * 1024; /* pci(e)gart table */
49 rsvd += 512 * 1024; /* object storage */
Younes Manton3b40d072011-06-24 01:15:58 -040050
51 dev_priv->ramin_rsvd_vram = round_up(rsvd, 4096);
52 } else {
53 dev_priv->ramin_rsvd_vram = 512 * 1024;
54 }
55
Ben Skeggsfbd2895e2010-09-01 15:24:34 +100056 /* Setup shared RAMHT */
Ben Skeggse05c5a32010-09-01 15:24:35 +100057 ret = nouveau_gpuobj_new_fake(dev, 0x10000, ~0, 4096,
Ben Skeggsfbd2895e2010-09-01 15:24:34 +100058 NVOBJ_FLAG_ZERO_ALLOC, &ramht);
59 if (ret)
60 return ret;
61
62 ret = nouveau_ramht_new(dev, ramht, &dev_priv->ramht);
63 nouveau_gpuobj_ref(NULL, &ramht);
64 if (ret)
65 return ret;
66
Ben Skeggse05c5a32010-09-01 15:24:35 +100067 /* And RAMRO */
68 ret = nouveau_gpuobj_new_fake(dev, 0x11200, ~0, 512,
69 NVOBJ_FLAG_ZERO_ALLOC, &dev_priv->ramro);
70 if (ret)
71 return ret;
72
73 /* And RAMFC */
Ben Skeggsc420b2d2012-05-01 20:48:08 +100074 length = nouveau_fifo_ctx_size(dev);
Ben Skeggse05c5a32010-09-01 15:24:35 +100075 switch (dev_priv->card_type) {
76 case NV_40:
77 offset = 0x20000;
78 break;
79 default:
80 offset = 0x11400;
81 break;
82 }
83
84 ret = nouveau_gpuobj_new_fake(dev, offset, ~0, length,
85 NVOBJ_FLAG_ZERO_ALLOC, &dev_priv->ramfc);
86 if (ret)
87 return ret;
88
89 /* Only allow space after RAMFC to be used for object allocation */
90 offset += length;
Ben Skeggs6ee73862009-12-11 19:24:15 +100091
92 /* It appears RAMRO (or something?) is controlled by 0x2220/0x2230
93 * on certain NV4x chipsets as well as RAMFC. When 0x2230 == 0
94 * ("new style" control) the upper 16-bits of 0x2220 points at this
95 * other mysterious table that's clobbering important things.
96 *
97 * We're now pointing this at RAMIN+0x30000 to avoid RAMFC getting
98 * smashed to pieces on us, so reserve 0x30000-0x40000 too..
99 */
100 if (dev_priv->card_type >= NV_40) {
101 if (offset < 0x40000)
102 offset = 0x40000;
103 }
104
Ben Skeggsb833ac22010-06-01 15:32:24 +1000105 ret = drm_mm_init(&dev_priv->ramin_heap, offset,
106 dev_priv->ramin_rsvd_vram - offset);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000107 if (ret) {
Ben Skeggsb833ac22010-06-01 15:32:24 +1000108 NV_ERROR(dev, "Failed to init RAMIN heap: %d\n", ret);
109 return ret;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000110 }
111
Ben Skeggsb833ac22010-06-01 15:32:24 +1000112 return 0;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000113}
114
115void
116nv04_instmem_takedown(struct drm_device *dev)
117{
Ben Skeggse05c5a32010-09-01 15:24:35 +1000118 struct drm_nouveau_private *dev_priv = dev->dev_private;
119
120 nouveau_ramht_ref(NULL, &dev_priv->ramht, NULL);
121 nouveau_gpuobj_ref(NULL, &dev_priv->ramro);
122 nouveau_gpuobj_ref(NULL, &dev_priv->ramfc);
Jimmy Rentz2abdb052011-04-17 16:15:03 -0400123
124 if (drm_mm_initialized(&dev_priv->ramin_heap))
125 drm_mm_takedown(&dev_priv->ramin_heap);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000126}
127
128int
Ben Skeggs6ee73862009-12-11 19:24:15 +1000129nv04_instmem_suspend(struct drm_device *dev)
130{
131 return 0;
132}
133
134void
135nv04_instmem_resume(struct drm_device *dev)
136{
137}
138
Ben Skeggse41115d2010-11-01 11:45:02 +1000139int
Ben Skeggs6e32fed2011-06-03 14:23:30 +1000140nv04_instmem_get(struct nouveau_gpuobj *gpuobj, struct nouveau_channel *chan,
141 u32 size, u32 align)
Ben Skeggse41115d2010-11-01 11:45:02 +1000142{
143 struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
144 struct drm_mm_node *ramin = NULL;
145
146 do {
147 if (drm_mm_pre_get(&dev_priv->ramin_heap))
148 return -ENOMEM;
149
150 spin_lock(&dev_priv->ramin_lock);
151 ramin = drm_mm_search_free(&dev_priv->ramin_heap, size, align, 0);
152 if (ramin == NULL) {
153 spin_unlock(&dev_priv->ramin_lock);
154 return -ENOMEM;
155 }
156
157 ramin = drm_mm_get_block_atomic(ramin, size, align);
158 spin_unlock(&dev_priv->ramin_lock);
159 } while (ramin == NULL);
160
161 gpuobj->node = ramin;
162 gpuobj->vinst = ramin->start;
163 return 0;
164}
165
166void
167nv04_instmem_put(struct nouveau_gpuobj *gpuobj)
168{
169 struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
170
171 spin_lock(&dev_priv->ramin_lock);
172 drm_mm_put_block(gpuobj->node);
173 gpuobj->node = NULL;
174 spin_unlock(&dev_priv->ramin_lock);
175}
176
177int
178nv04_instmem_map(struct nouveau_gpuobj *gpuobj)
179{
180 gpuobj->pinst = gpuobj->vinst;
181 return 0;
182}
183
184void
185nv04_instmem_unmap(struct nouveau_gpuobj *gpuobj)
186{
187}
188
189void
190nv04_instmem_flush(struct drm_device *dev)
191{
192}