blob: ce38e97b9428eaaaf81e248572555a80736c9242 [file] [log] [blame]
Ben Skeggs6ee73862009-12-11 19:24:15 +10001/*
2 * Copyright (C) 2007 Ben Skeggs.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include "drmP.h"
28#include "drm.h"
29#include "nouveau_drv.h"
30#include "nouveau_dma.h"
Ben Skeggsa8eaebc2010-09-01 15:24:31 +100031#include "nouveau_ramht.h"
Ben Skeggs6ee73862009-12-11 19:24:15 +100032
Ben Skeggs75c99da2010-01-08 10:57:39 +100033void
34nouveau_dma_pre_init(struct nouveau_channel *chan)
35{
Ben Skeggs9a391ad2010-02-11 16:37:26 +100036 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
37 struct nouveau_bo *pushbuf = chan->pushbuf_bo;
38
Ben Skeggs96545292010-11-24 10:26:24 +100039 if (dev_priv->card_type >= NV_50) {
Ben Skeggs9a391ad2010-02-11 16:37:26 +100040 const int ib_size = pushbuf->bo.mem.size / 2;
41
42 chan->dma.ib_base = (pushbuf->bo.mem.size - ib_size) >> 2;
43 chan->dma.ib_max = (ib_size / 8) - 1;
44 chan->dma.ib_put = 0;
45 chan->dma.ib_free = chan->dma.ib_max - chan->dma.ib_put;
46
47 chan->dma.max = (pushbuf->bo.mem.size - ib_size) >> 2;
48 } else {
49 chan->dma.max = (pushbuf->bo.mem.size >> 2) - 2;
50 }
51
Ben Skeggs75c99da2010-01-08 10:57:39 +100052 chan->dma.put = 0;
53 chan->dma.cur = chan->dma.put;
54 chan->dma.free = chan->dma.max - chan->dma.cur;
55}
56
Ben Skeggs6ee73862009-12-11 19:24:15 +100057int
58nouveau_dma_init(struct nouveau_channel *chan)
59{
60 struct drm_device *dev = chan->dev;
61 struct drm_nouveau_private *dev_priv = dev->dev_private;
Ben Skeggs6ee73862009-12-11 19:24:15 +100062 int ret, i;
63
Ben Skeggs96545292010-11-24 10:26:24 +100064 if (dev_priv->card_type >= NV_C0) {
65 ret = nouveau_gpuobj_gr_new(chan, 0x9039, 0x9039);
66 if (ret)
67 return ret;
68
69 ret = RING_SPACE(chan, 2);
70 if (ret)
71 return ret;
72
73 BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0000, 1);
74 OUT_RING (chan, 0x00009039);
75 FIRE_RING (chan);
76 return 0;
77 }
78
Ben Skeggs6ee73862009-12-11 19:24:15 +100079 /* Create NV_MEMORY_TO_MEMORY_FORMAT for buffer moves */
Ben Skeggsceac3092010-11-23 10:10:24 +100080 ret = nouveau_gpuobj_gr_new(chan, NvM2MF, dev_priv->card_type < NV_50 ?
81 0x0039 : 0x5039);
Ben Skeggs6ee73862009-12-11 19:24:15 +100082 if (ret)
83 return ret;
84
85 /* NV_MEMORY_TO_MEMORY_FORMAT requires a notifier object */
Ben Skeggs73412c32011-03-04 09:58:36 +100086 ret = nouveau_notifier_alloc(chan, NvNotify0, 32, 0xfd0, 0x1000,
87 &chan->m2mf_ntfy);
Ben Skeggs6ee73862009-12-11 19:24:15 +100088 if (ret)
89 return ret;
90
Ben Skeggs6ee73862009-12-11 19:24:15 +100091 /* Insert NOPS for NOUVEAU_DMA_SKIPS */
92 ret = RING_SPACE(chan, NOUVEAU_DMA_SKIPS);
93 if (ret)
94 return ret;
95
96 for (i = 0; i < NOUVEAU_DMA_SKIPS; i++)
97 OUT_RING(chan, 0);
98
99 /* Initialise NV_MEMORY_TO_MEMORY_FORMAT */
Ben Skeggsa6704782011-02-16 09:10:20 +1000100 ret = RING_SPACE(chan, 6);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000101 if (ret)
102 return ret;
103 BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NAME, 1);
Ben Skeggsa6704782011-02-16 09:10:20 +1000104 OUT_RING (chan, NvM2MF);
105 BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_NOTIFY, 3);
106 OUT_RING (chan, NvNotify0);
107 OUT_RING (chan, chan->vram_handle);
108 OUT_RING (chan, chan->gart_handle);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000109
Ben Skeggs6ee73862009-12-11 19:24:15 +1000110 /* Sit back and pray the channel works.. */
111 FIRE_RING(chan);
112
113 return 0;
114}
115
116void
117OUT_RINGp(struct nouveau_channel *chan, const void *data, unsigned nr_dwords)
118{
119 bool is_iomem;
120 u32 *mem = ttm_kmap_obj_virtual(&chan->pushbuf_bo->kmap, &is_iomem);
121 mem = &mem[chan->dma.cur];
122 if (is_iomem)
123 memcpy_toio((void __force __iomem *)mem, data, nr_dwords * 4);
124 else
125 memcpy(mem, data, nr_dwords * 4);
126 chan->dma.cur += nr_dwords;
127}
128
Ben Skeggsba599532010-01-15 12:08:57 +1000129/* Fetch and adjust GPU GET pointer
130 *
131 * Returns:
132 * value >= 0, the adjusted GET pointer
133 * -EINVAL if GET pointer currently outside main push buffer
134 * -EBUSY if timeout exceeded
135 */
136static inline int
137READ_GET(struct nouveau_channel *chan, uint32_t *prev_get, uint32_t *timeout)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000138{
139 uint32_t val;
140
141 val = nvchan_rd32(chan, chan->user_get);
Ben Skeggsba599532010-01-15 12:08:57 +1000142
143 /* reset counter as long as GET is still advancing, this is
144 * to avoid misdetecting a GPU lockup if the GPU happens to
145 * just be processing an operation that takes a long time
146 */
147 if (val != *prev_get) {
148 *prev_get = val;
149 *timeout = 0;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000150 }
151
Ben Skeggsba599532010-01-15 12:08:57 +1000152 if ((++*timeout & 0xff) == 0) {
153 DRM_UDELAY(1);
154 if (*timeout > 100000)
155 return -EBUSY;
156 }
157
158 if (val < chan->pushbuf_base ||
159 val > chan->pushbuf_base + (chan->dma.max << 2))
160 return -EINVAL;
161
162 return (val - chan->pushbuf_base) >> 2;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000163}
164
Ben Skeggs9a391ad2010-02-11 16:37:26 +1000165void
166nv50_dma_push(struct nouveau_channel *chan, struct nouveau_bo *bo,
Ben Skeggsa1606a92010-02-12 10:27:35 +1000167 int delta, int length)
Ben Skeggs9a391ad2010-02-11 16:37:26 +1000168{
169 struct nouveau_bo *pb = chan->pushbuf_bo;
Ben Skeggsd87897d2010-02-12 11:11:54 +1000170 uint64_t offset = bo->bo.offset + delta;
Ben Skeggs9a391ad2010-02-11 16:37:26 +1000171 int ip = (chan->dma.ib_put * 2) + chan->dma.ib_base;
172
173 BUG_ON(chan->dma.ib_free < 1);
Ben Skeggsd87897d2010-02-12 11:11:54 +1000174 nouveau_bo_wr32(pb, ip++, lower_32_bits(offset));
Ben Skeggsa1606a92010-02-12 10:27:35 +1000175 nouveau_bo_wr32(pb, ip++, upper_32_bits(offset) | length << 8);
Ben Skeggs9a391ad2010-02-11 16:37:26 +1000176
177 chan->dma.ib_put = (chan->dma.ib_put + 1) & chan->dma.ib_max;
Maarten Maathuisce48fa92010-02-25 20:00:38 +0100178
179 DRM_MEMORYBARRIER();
180 /* Flush writes. */
181 nouveau_bo_rd32(pb, 0);
182
Ben Skeggs9a391ad2010-02-11 16:37:26 +1000183 nvchan_wr32(chan, 0x8c, chan->dma.ib_put);
184 chan->dma.ib_free--;
185}
186
187static int
188nv50_dma_push_wait(struct nouveau_channel *chan, int count)
189{
190 uint32_t cnt = 0, prev_get = 0;
191
192 while (chan->dma.ib_free < count) {
193 uint32_t get = nvchan_rd32(chan, 0x88);
194 if (get != prev_get) {
195 prev_get = get;
196 cnt = 0;
197 }
198
199 if ((++cnt & 0xff) == 0) {
200 DRM_UDELAY(1);
201 if (cnt > 100000)
202 return -EBUSY;
203 }
204
205 chan->dma.ib_free = get - chan->dma.ib_put;
206 if (chan->dma.ib_free <= 0)
Ben Skeggs62841ab2010-09-30 09:09:42 +1000207 chan->dma.ib_free += chan->dma.ib_max;
Ben Skeggs9a391ad2010-02-11 16:37:26 +1000208 }
209
210 return 0;
211}
212
213static int
214nv50_dma_wait(struct nouveau_channel *chan, int slots, int count)
215{
216 uint32_t cnt = 0, prev_get = 0;
217 int ret;
218
219 ret = nv50_dma_push_wait(chan, slots + 1);
220 if (unlikely(ret))
221 return ret;
222
223 while (chan->dma.free < count) {
224 int get = READ_GET(chan, &prev_get, &cnt);
225 if (unlikely(get < 0)) {
226 if (get == -EINVAL)
227 continue;
228
229 return get;
230 }
231
232 if (get <= chan->dma.cur) {
233 chan->dma.free = chan->dma.max - chan->dma.cur;
234 if (chan->dma.free >= count)
235 break;
236
237 FIRE_RING(chan);
238 do {
239 get = READ_GET(chan, &prev_get, &cnt);
240 if (unlikely(get < 0)) {
241 if (get == -EINVAL)
242 continue;
243 return get;
244 }
245 } while (get == 0);
246 chan->dma.cur = 0;
247 chan->dma.put = 0;
248 }
249
250 chan->dma.free = get - chan->dma.cur - 1;
251 }
252
253 return 0;
254}
255
Ben Skeggs6ee73862009-12-11 19:24:15 +1000256int
Ben Skeggs9a391ad2010-02-11 16:37:26 +1000257nouveau_dma_wait(struct nouveau_channel *chan, int slots, int size)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000258{
Ben Skeggsba599532010-01-15 12:08:57 +1000259 uint32_t prev_get = 0, cnt = 0;
260 int get;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000261
Ben Skeggs9a391ad2010-02-11 16:37:26 +1000262 if (chan->dma.ib_max)
263 return nv50_dma_wait(chan, slots, size);
264
Ben Skeggs6ee73862009-12-11 19:24:15 +1000265 while (chan->dma.free < size) {
Ben Skeggsba599532010-01-15 12:08:57 +1000266 get = READ_GET(chan, &prev_get, &cnt);
267 if (unlikely(get == -EBUSY))
268 return -EBUSY;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000269
270 /* loop until we have a usable GET pointer. the value
271 * we read from the GPU may be outside the main ring if
272 * PFIFO is processing a buffer called from the main ring,
273 * discard these values until something sensible is seen.
274 *
275 * the other case we discard GET is while the GPU is fetching
276 * from the SKIPS area, so the code below doesn't have to deal
277 * with some fun corner cases.
278 */
Ben Skeggsba599532010-01-15 12:08:57 +1000279 if (unlikely(get == -EINVAL) || get < NOUVEAU_DMA_SKIPS)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000280 continue;
281
282 if (get <= chan->dma.cur) {
283 /* engine is fetching behind us, or is completely
284 * idle (GET == PUT) so we have free space up until
285 * the end of the push buffer
286 *
287 * we can only hit that path once per call due to
288 * looping back to the beginning of the push buffer,
289 * we'll hit the fetching-ahead-of-us path from that
290 * point on.
291 *
292 * the *one* exception to that rule is if we read
293 * GET==PUT, in which case the below conditional will
294 * always succeed and break us out of the wait loop.
295 */
296 chan->dma.free = chan->dma.max - chan->dma.cur;
297 if (chan->dma.free >= size)
298 break;
299
300 /* not enough space left at the end of the push buffer,
301 * instruct the GPU to jump back to the start right
302 * after processing the currently pending commands.
303 */
304 OUT_RING(chan, chan->pushbuf_base | 0x20000000);
Ben Skeggsba599532010-01-15 12:08:57 +1000305
306 /* wait for GET to depart from the skips area.
307 * prevents writing GET==PUT and causing a race
308 * condition that causes us to think the GPU is
309 * idle when it's not.
310 */
311 do {
312 get = READ_GET(chan, &prev_get, &cnt);
313 if (unlikely(get == -EBUSY))
314 return -EBUSY;
315 if (unlikely(get == -EINVAL))
316 continue;
317 } while (get <= NOUVEAU_DMA_SKIPS);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000318 WRITE_PUT(NOUVEAU_DMA_SKIPS);
319
320 /* we're now submitting commands at the start of
321 * the push buffer.
322 */
323 chan->dma.cur =
324 chan->dma.put = NOUVEAU_DMA_SKIPS;
325 }
326
327 /* engine fetching ahead of us, we have space up until the
328 * current GET pointer. the "- 1" is to ensure there's
329 * space left to emit a jump back to the beginning of the
330 * push buffer if we require it. we can never get GET == PUT
331 * here, so this is safe.
332 */
333 chan->dma.free = get - chan->dma.cur - 1;
334 }
335
336 return 0;
337}
338