blob: 6a41d644e0449fcae41e795e47f21724804ed4da [file] [log] [blame]
Ben Skeggs4b223ee2010-08-03 10:00:56 +10001/*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "drmP.h"
26
27#include "nouveau_drv.h"
28
29int
30nvc0_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj,
31 uint32_t *size)
32{
Ben Skeggs68b83a92010-08-04 15:45:33 +100033 int ret;
34
35 *size = ALIGN(*size, 4096);
36 if (*size == 0)
37 return -EINVAL;
38
39 ret = nouveau_bo_new(dev, NULL, *size, 0, TTM_PL_FLAG_VRAM, 0, 0x0000,
40 true, false, &gpuobj->im_backing);
41 if (ret) {
42 NV_ERROR(dev, "error getting PRAMIN backing pages: %d\n", ret);
43 return ret;
44 }
45
46 ret = nouveau_bo_pin(gpuobj->im_backing, TTM_PL_FLAG_VRAM);
47 if (ret) {
48 NV_ERROR(dev, "error pinning PRAMIN backing VRAM: %d\n", ret);
49 nouveau_bo_ref(NULL, &gpuobj->im_backing);
50 return ret;
51 }
52
Ben Skeggs43efc9c2010-09-01 15:24:32 +100053 gpuobj->vinst = gpuobj->im_backing->bo.mem.mm_node->start << PAGE_SHIFT;
Ben Skeggs4b223ee2010-08-03 10:00:56 +100054 return 0;
55}
56
57void
58nvc0_instmem_clear(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
59{
Ben Skeggs68b83a92010-08-04 15:45:33 +100060 struct drm_nouveau_private *dev_priv = dev->dev_private;
61
62 if (gpuobj && gpuobj->im_backing) {
63 if (gpuobj->im_bound)
64 dev_priv->engine.instmem.unbind(dev, gpuobj);
65 nouveau_bo_unpin(gpuobj->im_backing);
66 nouveau_bo_ref(NULL, &gpuobj->im_backing);
67 gpuobj->im_backing = NULL;
68 }
Ben Skeggs4b223ee2010-08-03 10:00:56 +100069}
70
71int
72nvc0_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
73{
Ben Skeggs68b83a92010-08-04 15:45:33 +100074 struct drm_nouveau_private *dev_priv = dev->dev_private;
75 uint32_t pte, pte_end;
76 uint64_t vram;
77
78 if (!gpuobj->im_backing || !gpuobj->im_pramin || gpuobj->im_bound)
79 return -EINVAL;
80
81 NV_DEBUG(dev, "st=0x%lx sz=0x%lx\n",
82 gpuobj->im_pramin->start, gpuobj->im_pramin->size);
83
84 pte = gpuobj->im_pramin->start >> 12;
85 pte_end = (gpuobj->im_pramin->size >> 12) + pte;
Ben Skeggs43efc9c2010-09-01 15:24:32 +100086 vram = gpuobj->vinst;
Ben Skeggs68b83a92010-08-04 15:45:33 +100087
88 NV_DEBUG(dev, "pramin=0x%lx, pte=%d, pte_end=%d\n",
89 gpuobj->im_pramin->start, pte, pte_end);
Ben Skeggs43efc9c2010-09-01 15:24:32 +100090 NV_DEBUG(dev, "first vram page: 0x%010llx\n", gpuobj->vinst);
Ben Skeggs68b83a92010-08-04 15:45:33 +100091
92 while (pte < pte_end) {
93 nv_wr32(dev, 0x702000 + (pte * 8), (vram >> 8) | 1);
94 nv_wr32(dev, 0x702004 + (pte * 8), 0);
95 vram += 4096;
96 pte++;
97 }
98 dev_priv->engine.instmem.flush(dev);
99
100 if (1) {
101 u32 chan = nv_rd32(dev, 0x1700) << 16;
102 nv_wr32(dev, 0x100cb8, (chan + 0x1000) >> 8);
103 nv_wr32(dev, 0x100cbc, 0x80000005);
104 }
105
106 gpuobj->im_bound = 1;
Ben Skeggs4b223ee2010-08-03 10:00:56 +1000107 return 0;
108}
109
110int
111nvc0_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
112{
Ben Skeggs68b83a92010-08-04 15:45:33 +1000113 struct drm_nouveau_private *dev_priv = dev->dev_private;
114 uint32_t pte, pte_end;
115
116 if (gpuobj->im_bound == 0)
117 return -EINVAL;
118
119 pte = gpuobj->im_pramin->start >> 12;
120 pte_end = (gpuobj->im_pramin->size >> 12) + pte;
121 while (pte < pte_end) {
122 nv_wr32(dev, 0x702000 + (pte * 8), 0);
123 nv_wr32(dev, 0x702004 + (pte * 8), 0);
124 pte++;
125 }
126 dev_priv->engine.instmem.flush(dev);
127
128 gpuobj->im_bound = 0;
Ben Skeggs4b223ee2010-08-03 10:00:56 +1000129 return 0;
130}
131
132void
133nvc0_instmem_flush(struct drm_device *dev)
134{
Ben Skeggs68b83a92010-08-04 15:45:33 +1000135 nv_wr32(dev, 0x070000, 1);
Francisco Jerez4b5c1522010-09-07 17:34:44 +0200136 if (!nv_wait(dev, 0x070000, 0x00000002, 0x00000000))
Ben Skeggs68b83a92010-08-04 15:45:33 +1000137 NV_ERROR(dev, "PRAMIN flush timeout\n");
Ben Skeggs4b223ee2010-08-03 10:00:56 +1000138}
139
140int
141nvc0_instmem_suspend(struct drm_device *dev)
142{
Ben Skeggs147cad02010-08-04 22:48:34 +1000143 struct drm_nouveau_private *dev_priv = dev->dev_private;
Ben Skeggsb515f3a2010-08-16 08:18:16 +1000144 u32 *buf;
Ben Skeggs147cad02010-08-04 22:48:34 +1000145 int i;
146
147 dev_priv->susres.ramin_copy = vmalloc(65536);
148 if (!dev_priv->susres.ramin_copy)
149 return -ENOMEM;
Ben Skeggsb515f3a2010-08-16 08:18:16 +1000150 buf = dev_priv->susres.ramin_copy;
Ben Skeggs147cad02010-08-04 22:48:34 +1000151
Ben Skeggsb515f3a2010-08-16 08:18:16 +1000152 for (i = 0; i < 65536; i += 4)
153 buf[i/4] = nv_rd32(dev, NV04_PRAMIN + i);
Ben Skeggs4b223ee2010-08-03 10:00:56 +1000154 return 0;
155}
156
157void
158nvc0_instmem_resume(struct drm_device *dev)
159{
Ben Skeggs147cad02010-08-04 22:48:34 +1000160 struct drm_nouveau_private *dev_priv = dev->dev_private;
Ben Skeggsb515f3a2010-08-16 08:18:16 +1000161 u32 *buf = dev_priv->susres.ramin_copy;
Ben Skeggs147cad02010-08-04 22:48:34 +1000162 u64 chan;
163 int i;
164
165 chan = dev_priv->vram_size - dev_priv->ramin_rsvd_vram;
166 nv_wr32(dev, 0x001700, chan >> 16);
167
Ben Skeggsb515f3a2010-08-16 08:18:16 +1000168 for (i = 0; i < 65536; i += 4)
169 nv_wr32(dev, NV04_PRAMIN + i, buf[i/4]);
Ben Skeggs147cad02010-08-04 22:48:34 +1000170 vfree(dev_priv->susres.ramin_copy);
171 dev_priv->susres.ramin_copy = NULL;
172
173 nv_wr32(dev, 0x001714, 0xc0000000 | (chan >> 12));
Ben Skeggs4b223ee2010-08-03 10:00:56 +1000174}
175
176int
177nvc0_instmem_init(struct drm_device *dev)
178{
Ben Skeggs68b83a92010-08-04 15:45:33 +1000179 struct drm_nouveau_private *dev_priv = dev->dev_private;
180 u64 chan, pgt3, imem, lim3 = dev_priv->ramin_size - 1;
181 int ret, i;
182
183 dev_priv->ramin_rsvd_vram = 1 * 1024 * 1024;
184 chan = dev_priv->vram_size - dev_priv->ramin_rsvd_vram;
185 imem = 4096 + 4096 + 32768;
186
187 nv_wr32(dev, 0x001700, chan >> 16);
188
189 /* channel setup */
190 nv_wr32(dev, 0x700200, lower_32_bits(chan + 0x1000));
191 nv_wr32(dev, 0x700204, upper_32_bits(chan + 0x1000));
192 nv_wr32(dev, 0x700208, lower_32_bits(lim3));
193 nv_wr32(dev, 0x70020c, upper_32_bits(lim3));
194
195 /* point pgd -> pgt */
196 nv_wr32(dev, 0x701000, 0);
197 nv_wr32(dev, 0x701004, ((chan + 0x2000) >> 8) | 1);
198
199 /* point pgt -> physical vram for channel */
200 pgt3 = 0x2000;
201 for (i = 0; i < dev_priv->ramin_rsvd_vram; i += 4096, pgt3 += 8) {
202 nv_wr32(dev, 0x700000 + pgt3, ((chan + i) >> 8) | 1);
203 nv_wr32(dev, 0x700004 + pgt3, 0);
204 }
205
206 /* clear rest of pgt */
207 for (; i < dev_priv->ramin_size; i += 4096, pgt3 += 8) {
208 nv_wr32(dev, 0x700000 + pgt3, 0);
209 nv_wr32(dev, 0x700004 + pgt3, 0);
210 }
211
212 /* point bar3 at the channel */
213 nv_wr32(dev, 0x001714, 0xc0000000 | (chan >> 12));
214
215 /* Global PRAMIN heap */
216 ret = drm_mm_init(&dev_priv->ramin_heap, imem,
217 dev_priv->ramin_size - imem);
218 if (ret) {
219 NV_ERROR(dev, "Failed to init RAMIN heap\n");
220 return -ENOMEM;
221 }
222
Ben Skeggs4b223ee2010-08-03 10:00:56 +1000223 return 0;
224}
225
226void
227nvc0_instmem_takedown(struct drm_device *dev)
228{
229}
230