blob: 2c10d54fc493a9379a7597e30feb049cb328c30b [file] [log] [blame]
Ben Skeggs6ee73862009-12-11 19:24:15 +10001/*
2 * Copyright (C) 2007 Ben Skeggs.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include "drmP.h"
28#include "drm.h"
29
Marcin Slusarzbd35fe52011-03-09 14:22:19 +010030#include <linux/ktime.h>
31#include <linux/hrtimer.h>
32
Ben Skeggs6ee73862009-12-11 19:24:15 +100033#include "nouveau_drv.h"
Francisco Jerez0c6c1c22010-09-22 00:58:54 +020034#include "nouveau_ramht.h"
Ben Skeggsd375e7d52012-04-30 13:30:00 +100035#include "nouveau_fence.h"
Ben Skeggs20abd162012-04-30 11:33:43 -050036#include "nouveau_software.h"
Ben Skeggs6ee73862009-12-11 19:24:15 +100037#include "nouveau_dma.h"
38
Francisco Jerez27307232010-09-21 18:57:11 +020039#define USE_REFCNT(dev) (nouveau_private(dev)->chipset >= 0x10)
Ben Skeggscb1d7712011-01-28 13:44:32 +100040#define USE_SEMA(dev) (nouveau_private(dev)->chipset >= 0x17)
Ben Skeggs6ee73862009-12-11 19:24:15 +100041
Ben Skeggs6ee73862009-12-11 19:24:15 +100042void
43nouveau_fence_update(struct nouveau_channel *chan)
44{
Francisco Jerez27307232010-09-21 18:57:11 +020045 struct drm_device *dev = chan->dev;
46 struct nouveau_fence *tmp, *fence;
Ben Skeggs6ee73862009-12-11 19:24:15 +100047 uint32_t sequence;
48
Francisco Jerez3ba64622010-08-28 17:56:33 +020049 spin_lock(&chan->fence.lock);
50
Francisco Jerez937c3472010-12-08 02:35:45 +010051 /* Fetch the last sequence if the channel is still up and running */
52 if (likely(!list_empty(&chan->fence.pending))) {
53 if (USE_REFCNT(dev))
54 sequence = nvchan_rd32(chan, 0x48);
55 else
56 sequence = atomic_read(&chan->fence.last_sequence_irq);
Ben Skeggs6ee73862009-12-11 19:24:15 +100057
Francisco Jerez937c3472010-12-08 02:35:45 +010058 if (chan->fence.sequence_ack == sequence)
59 goto out;
60 chan->fence.sequence_ack = sequence;
61 }
Ben Skeggs6ee73862009-12-11 19:24:15 +100062
Ben Skeggsd375e7d52012-04-30 13:30:00 +100063 list_for_each_entry_safe(fence, tmp, &chan->fence.pending, head) {
Ben Skeggsb08abd42012-03-21 13:51:03 +100064 if (fence->sequence > chan->fence.sequence_ack)
65 break;
66
Ben Skeggsd375e7d52012-04-30 13:30:00 +100067 fence->channel = NULL;
68 list_del(&fence->head);
Ben Skeggsb08abd42012-03-21 13:51:03 +100069 if (fence->work)
Francisco Jerez8ac38912010-09-21 20:49:39 +020070 fence->work(fence->priv, true);
71
Ben Skeggsd375e7d52012-04-30 13:30:00 +100072 nouveau_fence_unref(&fence);
Ben Skeggs6ee73862009-12-11 19:24:15 +100073 }
Ben Skeggsb08abd42012-03-21 13:51:03 +100074
Francisco Jerez3ba64622010-08-28 17:56:33 +020075out:
Ben Skeggs047d1d32010-05-31 12:00:43 +100076 spin_unlock(&chan->fence.lock);
Ben Skeggs6ee73862009-12-11 19:24:15 +100077}
78
79int
Ben Skeggsd375e7d52012-04-30 13:30:00 +100080nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan)
Ben Skeggs6ee73862009-12-11 19:24:15 +100081{
Francisco Jerez27307232010-09-21 18:57:11 +020082 struct drm_device *dev = chan->dev;
Ben Skeggs529c4952010-11-24 10:30:22 +100083 struct drm_nouveau_private *dev_priv = dev->dev_private;
Ben Skeggs6ee73862009-12-11 19:24:15 +100084 int ret;
85
86 ret = RING_SPACE(chan, 2);
87 if (ret)
88 return ret;
89
90 if (unlikely(chan->fence.sequence == chan->fence.sequence_ack - 1)) {
Ben Skeggs6ee73862009-12-11 19:24:15 +100091 nouveau_fence_update(chan);
Ben Skeggs6ee73862009-12-11 19:24:15 +100092
93 BUG_ON(chan->fence.sequence ==
94 chan->fence.sequence_ack - 1);
95 }
96
97 fence->sequence = ++chan->fence.sequence;
Ben Skeggsd375e7d52012-04-30 13:30:00 +100098 fence->channel = chan;
Ben Skeggs6ee73862009-12-11 19:24:15 +100099
Ben Skeggsd375e7d52012-04-30 13:30:00 +1000100 kref_get(&fence->kref);
Ben Skeggs047d1d32010-05-31 12:00:43 +1000101 spin_lock(&chan->fence.lock);
Ben Skeggsd375e7d52012-04-30 13:30:00 +1000102 list_add_tail(&fence->head, &chan->fence.pending);
Ben Skeggs047d1d32010-05-31 12:00:43 +1000103 spin_unlock(&chan->fence.lock);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000104
Ben Skeggs529c4952010-11-24 10:30:22 +1000105 if (USE_REFCNT(dev)) {
106 if (dev_priv->card_type < NV_C0)
Ben Skeggs6d597022012-04-01 21:09:13 +1000107 BEGIN_NV04(chan, 0, NV10_SUBCHAN_REF_CNT, 1);
Ben Skeggs529c4952010-11-24 10:30:22 +1000108 else
Ben Skeggs6d597022012-04-01 21:09:13 +1000109 BEGIN_NVC0(chan, 0, NV10_SUBCHAN_REF_CNT, 1);
Ben Skeggs529c4952010-11-24 10:30:22 +1000110 } else {
Ben Skeggs6d597022012-04-01 21:09:13 +1000111 BEGIN_NV04(chan, NvSubSw, 0x0150, 1);
Ben Skeggs529c4952010-11-24 10:30:22 +1000112 }
113 OUT_RING (chan, fence->sequence);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000114 FIRE_RING(chan);
Marcin Slusarz695b95b2012-04-25 23:20:33 +0200115 fence->timeout = jiffies + 3 * DRM_HZ;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000116
117 return 0;
118}
119
Ben Skeggs6ee73862009-12-11 19:24:15 +1000120bool
Ben Skeggsd375e7d52012-04-30 13:30:00 +1000121nouveau_fence_done(struct nouveau_fence *fence)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000122{
Ben Skeggsd375e7d52012-04-30 13:30:00 +1000123 if (fence->channel)
124 nouveau_fence_update(fence->channel);
125 return !fence->channel;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000126}
127
128int
Ben Skeggs875ac342012-04-30 12:51:48 +1000129nouveau_fence_wait(struct nouveau_fence *fence, bool lazy, bool intr)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000130{
Marcin Slusarzbd35fe52011-03-09 14:22:19 +0100131 unsigned long sleep_time = NSEC_PER_MSEC / 1000;
132 ktime_t t;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000133 int ret = 0;
134
Ben Skeggsd375e7d52012-04-30 13:30:00 +1000135 while (!nouveau_fence_done(fence)) {
136 if (fence->timeout && time_after_eq(jiffies, fence->timeout)) {
Ben Skeggs6ee73862009-12-11 19:24:15 +1000137 ret = -EBUSY;
138 break;
139 }
140
Ben Skeggs875ac342012-04-30 12:51:48 +1000141 __set_current_state(intr ? TASK_INTERRUPTIBLE :
142 TASK_UNINTERRUPTIBLE);
Marcin Slusarzbd35fe52011-03-09 14:22:19 +0100143 if (lazy) {
144 t = ktime_set(0, sleep_time);
145 schedule_hrtimeout(&t, HRTIMER_MODE_REL);
146 sleep_time *= 2;
147 if (sleep_time > NSEC_PER_MSEC)
148 sleep_time = NSEC_PER_MSEC;
149 }
Ben Skeggs6ee73862009-12-11 19:24:15 +1000150
151 if (intr && signal_pending(current)) {
Ben Skeggs9ddc8c52009-12-15 11:04:25 +1000152 ret = -ERESTARTSYS;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000153 break;
154 }
155 }
156
157 __set_current_state(TASK_RUNNING);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000158 return ret;
159}
160
Ben Skeggsd375e7d52012-04-30 13:30:00 +1000161static void
162nouveau_fence_del(struct kref *kref)
163{
164 struct nouveau_fence *fence = container_of(kref, typeof(*fence), kref);
165 kfree(fence);
166}
167
168void
169nouveau_fence_unref(struct nouveau_fence **pfence)
170{
171 if (*pfence)
172 kref_put(&(*pfence)->kref, nouveau_fence_del);
173 *pfence = NULL;
174}
175
176struct nouveau_fence *
177nouveau_fence_ref(struct nouveau_fence *fence)
178{
179 kref_get(&fence->kref);
180 return fence;
181}
182
183int
184nouveau_fence_new(struct nouveau_channel *chan, struct nouveau_fence **pfence)
185{
186 struct nouveau_fence *fence;
187 int ret = 0;
188
189 fence = kzalloc(sizeof(*fence), GFP_KERNEL);
190 if (!fence)
191 return -ENOMEM;
192 kref_init(&fence->kref);
193
194 if (chan) {
195 ret = nouveau_fence_emit(fence, chan);
196 if (ret)
197 nouveau_fence_unref(&fence);
198 }
199
200 *pfence = fence;
201 return ret;
202}
203
204struct nouveau_semaphore {
205 struct kref ref;
206 struct drm_device *dev;
207 struct drm_mm_node *mem;
208};
209
210void
211nouveau_fence_work(struct nouveau_fence *fence,
212 void (*work)(void *priv, bool signalled),
213 void *priv)
214{
215 if (!fence->channel) {
216 work(priv, true);
217 } else {
218 fence->work = work;
219 fence->priv = priv;
220 }
221}
222
Francisco Jerez0c6c1c22010-09-22 00:58:54 +0200223static struct nouveau_semaphore *
Ben Skeggsc3b90a72011-01-28 12:08:29 +1000224semaphore_alloc(struct drm_device *dev)
Francisco Jerez0c6c1c22010-09-22 00:58:54 +0200225{
226 struct drm_nouveau_private *dev_priv = dev->dev_private;
227 struct nouveau_semaphore *sema;
Ben Skeggsc3b90a72011-01-28 12:08:29 +1000228 int size = (dev_priv->chipset < 0x84) ? 4 : 16;
229 int ret, i;
Francisco Jerez0c6c1c22010-09-22 00:58:54 +0200230
231 if (!USE_SEMA(dev))
232 return NULL;
233
234 sema = kmalloc(sizeof(*sema), GFP_KERNEL);
235 if (!sema)
236 goto fail;
237
Francisco Jerez907af602010-10-09 04:02:09 +0200238 ret = drm_mm_pre_get(&dev_priv->fence.heap);
239 if (ret)
240 goto fail;
241
Francisco Jerez0c6c1c22010-09-22 00:58:54 +0200242 spin_lock(&dev_priv->fence.lock);
Ben Skeggsc3b90a72011-01-28 12:08:29 +1000243 sema->mem = drm_mm_search_free(&dev_priv->fence.heap, size, 0, 0);
Francisco Jerez0c6c1c22010-09-22 00:58:54 +0200244 if (sema->mem)
Ben Skeggsc3b90a72011-01-28 12:08:29 +1000245 sema->mem = drm_mm_get_block_atomic(sema->mem, size, 0);
Francisco Jerez0c6c1c22010-09-22 00:58:54 +0200246 spin_unlock(&dev_priv->fence.lock);
247
248 if (!sema->mem)
249 goto fail;
250
251 kref_init(&sema->ref);
252 sema->dev = dev;
Ben Skeggsc3b90a72011-01-28 12:08:29 +1000253 for (i = sema->mem->start; i < sema->mem->start + size; i += 4)
254 nouveau_bo_wr32(dev_priv->fence.bo, i / 4, 0);
Francisco Jerez0c6c1c22010-09-22 00:58:54 +0200255
256 return sema;
257fail:
258 kfree(sema);
259 return NULL;
260}
261
262static void
Ben Skeggsc3b90a72011-01-28 12:08:29 +1000263semaphore_free(struct kref *ref)
Francisco Jerez0c6c1c22010-09-22 00:58:54 +0200264{
265 struct nouveau_semaphore *sema =
266 container_of(ref, struct nouveau_semaphore, ref);
267 struct drm_nouveau_private *dev_priv = sema->dev->dev_private;
268
269 spin_lock(&dev_priv->fence.lock);
270 drm_mm_put_block(sema->mem);
271 spin_unlock(&dev_priv->fence.lock);
272
273 kfree(sema);
274}
275
276static void
277semaphore_work(void *priv, bool signalled)
278{
279 struct nouveau_semaphore *sema = priv;
280 struct drm_nouveau_private *dev_priv = sema->dev->dev_private;
281
282 if (unlikely(!signalled))
283 nouveau_bo_wr32(dev_priv->fence.bo, sema->mem->start / 4, 1);
284
Ben Skeggsc3b90a72011-01-28 12:08:29 +1000285 kref_put(&sema->ref, semaphore_free);
Francisco Jerez0c6c1c22010-09-22 00:58:54 +0200286}
287
288static int
Ben Skeggsc3b90a72011-01-28 12:08:29 +1000289semaphore_acquire(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
Francisco Jerez0c6c1c22010-09-22 00:58:54 +0200290{
Ben Skeggsc3b90a72011-01-28 12:08:29 +1000291 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
292 struct nouveau_fence *fence = NULL;
Ben Skeggsd02836b2011-06-07 15:21:23 +1000293 u64 offset = chan->fence.vma.offset + sema->mem->start;
Francisco Jerez0c6c1c22010-09-22 00:58:54 +0200294 int ret;
295
Ben Skeggsc3b90a72011-01-28 12:08:29 +1000296 if (dev_priv->chipset < 0x84) {
Ben Skeggsb16a5a12011-06-17 23:41:54 +1000297 ret = RING_SPACE(chan, 4);
Ben Skeggsec238022011-02-02 14:57:05 +1000298 if (ret)
299 return ret;
Francisco Jerez0c6c1c22010-09-22 00:58:54 +0200300
Ben Skeggs6d597022012-04-01 21:09:13 +1000301 BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 3);
Ben Skeggsb16a5a12011-06-17 23:41:54 +1000302 OUT_RING (chan, NvSema);
Ben Skeggsd02836b2011-06-07 15:21:23 +1000303 OUT_RING (chan, offset);
Ben Skeggsc3b90a72011-01-28 12:08:29 +1000304 OUT_RING (chan, 1);
Ben Skeggscb1d7712011-01-28 13:44:32 +1000305 } else
306 if (dev_priv->chipset < 0xc0) {
Ben Skeggsb16a5a12011-06-17 23:41:54 +1000307 ret = RING_SPACE(chan, 7);
Ben Skeggsc3b90a72011-01-28 12:08:29 +1000308 if (ret)
309 return ret;
Francisco Jerez0c6c1c22010-09-22 00:58:54 +0200310
Ben Skeggs6d597022012-04-01 21:09:13 +1000311 BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
Ben Skeggsb16a5a12011-06-17 23:41:54 +1000312 OUT_RING (chan, chan->vram_handle);
Ben Skeggs6d597022012-04-01 21:09:13 +1000313 BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
Ben Skeggse3b7ed52011-02-02 13:21:57 +1000314 OUT_RING (chan, upper_32_bits(offset));
315 OUT_RING (chan, lower_32_bits(offset));
Ben Skeggsc3b90a72011-01-28 12:08:29 +1000316 OUT_RING (chan, 1);
317 OUT_RING (chan, 1); /* ACQUIRE_EQ */
Ben Skeggscb1d7712011-01-28 13:44:32 +1000318 } else {
Ben Skeggscb1d7712011-01-28 13:44:32 +1000319 ret = RING_SPACE(chan, 5);
320 if (ret)
321 return ret;
322
Ben Skeggs6d597022012-04-01 21:09:13 +1000323 BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
Ben Skeggscb1d7712011-01-28 13:44:32 +1000324 OUT_RING (chan, upper_32_bits(offset));
325 OUT_RING (chan, lower_32_bits(offset));
326 OUT_RING (chan, 1);
327 OUT_RING (chan, 0x1001); /* ACQUIRE_EQ */
Francisco Jerez8af29cc2010-10-02 17:04:46 +0200328 }
329
Francisco Jerez0c6c1c22010-09-22 00:58:54 +0200330 /* Delay semaphore destruction until its work is done */
Ben Skeggsd375e7d52012-04-30 13:30:00 +1000331 ret = nouveau_fence_new(chan, &fence);
Francisco Jerez0c6c1c22010-09-22 00:58:54 +0200332 if (ret)
333 return ret;
334
335 kref_get(&sema->ref);
336 nouveau_fence_work(fence, semaphore_work, sema);
Marcin Slusarz382d62e2010-10-20 21:50:24 +0200337 nouveau_fence_unref(&fence);
Ben Skeggsc3b90a72011-01-28 12:08:29 +1000338 return 0;
339}
Francisco Jerez0c6c1c22010-09-22 00:58:54 +0200340
Ben Skeggsc3b90a72011-01-28 12:08:29 +1000341static int
342semaphore_release(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
343{
344 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
345 struct nouveau_fence *fence = NULL;
Ben Skeggsd02836b2011-06-07 15:21:23 +1000346 u64 offset = chan->fence.vma.offset + sema->mem->start;
Ben Skeggsc3b90a72011-01-28 12:08:29 +1000347 int ret;
348
349 if (dev_priv->chipset < 0x84) {
Ben Skeggsb16a5a12011-06-17 23:41:54 +1000350 ret = RING_SPACE(chan, 5);
Ben Skeggsc3b90a72011-01-28 12:08:29 +1000351 if (ret)
352 return ret;
353
Ben Skeggs6d597022012-04-01 21:09:13 +1000354 BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 2);
Ben Skeggsb16a5a12011-06-17 23:41:54 +1000355 OUT_RING (chan, NvSema);
Ben Skeggsd02836b2011-06-07 15:21:23 +1000356 OUT_RING (chan, offset);
Ben Skeggs6d597022012-04-01 21:09:13 +1000357 BEGIN_NV04(chan, 0, NV11_SUBCHAN_SEMAPHORE_RELEASE, 1);
Ben Skeggsc3b90a72011-01-28 12:08:29 +1000358 OUT_RING (chan, 1);
Ben Skeggscb1d7712011-01-28 13:44:32 +1000359 } else
360 if (dev_priv->chipset < 0xc0) {
Ben Skeggsb16a5a12011-06-17 23:41:54 +1000361 ret = RING_SPACE(chan, 7);
Ben Skeggsc3b90a72011-01-28 12:08:29 +1000362 if (ret)
363 return ret;
364
Ben Skeggs6d597022012-04-01 21:09:13 +1000365 BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
Ben Skeggsb16a5a12011-06-17 23:41:54 +1000366 OUT_RING (chan, chan->vram_handle);
Ben Skeggs6d597022012-04-01 21:09:13 +1000367 BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
Ben Skeggse3b7ed52011-02-02 13:21:57 +1000368 OUT_RING (chan, upper_32_bits(offset));
369 OUT_RING (chan, lower_32_bits(offset));
Ben Skeggsc3b90a72011-01-28 12:08:29 +1000370 OUT_RING (chan, 1);
371 OUT_RING (chan, 2); /* RELEASE */
Ben Skeggscb1d7712011-01-28 13:44:32 +1000372 } else {
Ben Skeggscb1d7712011-01-28 13:44:32 +1000373 ret = RING_SPACE(chan, 5);
374 if (ret)
375 return ret;
376
Ben Skeggs6d597022012-04-01 21:09:13 +1000377 BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
Ben Skeggscb1d7712011-01-28 13:44:32 +1000378 OUT_RING (chan, upper_32_bits(offset));
379 OUT_RING (chan, lower_32_bits(offset));
380 OUT_RING (chan, 1);
381 OUT_RING (chan, 0x1002); /* RELEASE */
Ben Skeggsc3b90a72011-01-28 12:08:29 +1000382 }
383
384 /* Delay semaphore destruction until its work is done */
Ben Skeggsd375e7d52012-04-30 13:30:00 +1000385 ret = nouveau_fence_new(chan, &fence);
Ben Skeggsc3b90a72011-01-28 12:08:29 +1000386 if (ret)
387 return ret;
388
389 kref_get(&sema->ref);
390 nouveau_fence_work(fence, semaphore_work, sema);
391 nouveau_fence_unref(&fence);
Francisco Jerez0c6c1c22010-09-22 00:58:54 +0200392 return 0;
393}
394
Ben Skeggs6ee73862009-12-11 19:24:15 +1000395int
Francisco Jerez27307232010-09-21 18:57:11 +0200396nouveau_fence_sync(struct nouveau_fence *fence,
397 struct nouveau_channel *wchan)
398{
Ben Skeggsd375e7d52012-04-30 13:30:00 +1000399 struct nouveau_channel *chan;
Francisco Jerez0c6c1c22010-09-22 00:58:54 +0200400 struct drm_device *dev = wchan->dev;
401 struct nouveau_semaphore *sema;
Francisco Jerez2b478add2010-10-18 03:56:40 +0200402 int ret = 0;
Francisco Jerez27307232010-09-21 18:57:11 +0200403
Ben Skeggsd375e7d52012-04-30 13:30:00 +1000404 chan = fence ? nouveau_channel_get_unlocked(fence->channel) : NULL;
405 if (likely(!chan || chan == wchan || nouveau_fence_done(fence)))
Francisco Jerez2b478add2010-10-18 03:56:40 +0200406 goto out;
Francisco Jerez27307232010-09-21 18:57:11 +0200407
Ben Skeggsc3b90a72011-01-28 12:08:29 +1000408 sema = semaphore_alloc(dev);
Francisco Jerez0c6c1c22010-09-22 00:58:54 +0200409 if (!sema) {
410 /* Early card or broken userspace, fall back to
411 * software sync. */
Marcin Slusarz382d62e2010-10-20 21:50:24 +0200412 ret = nouveau_fence_wait(fence, true, false);
Francisco Jerez2b478add2010-10-18 03:56:40 +0200413 goto out;
Francisco Jerez0c6c1c22010-09-22 00:58:54 +0200414 }
415
Ben Skeggs08cd3d42010-10-12 08:01:59 +1000416 /* try to take chan's mutex, if we can't take it right away
Ben Skeggscff5c132010-10-06 16:16:59 +1000417 * we have to fallback to software sync to prevent locking
418 * order issues
419 */
Ben Skeggs08cd3d42010-10-12 08:01:59 +1000420 if (!mutex_trylock(&chan->mutex)) {
Marcin Slusarz382d62e2010-10-20 21:50:24 +0200421 ret = nouveau_fence_wait(fence, true, false);
Francisco Jerez2b478add2010-10-18 03:56:40 +0200422 goto out_unref;
Ben Skeggscff5c132010-10-06 16:16:59 +1000423 }
424
Francisco Jerez8af29cc2010-10-02 17:04:46 +0200425 /* Make wchan wait until it gets signalled */
Ben Skeggsc3b90a72011-01-28 12:08:29 +1000426 ret = semaphore_acquire(wchan, sema);
Francisco Jerez0c6c1c22010-09-22 00:58:54 +0200427 if (ret)
Francisco Jerez2b478add2010-10-18 03:56:40 +0200428 goto out_unlock;
Francisco Jerez0c6c1c22010-09-22 00:58:54 +0200429
Francisco Jerez8af29cc2010-10-02 17:04:46 +0200430 /* Signal the semaphore from chan */
Ben Skeggsc3b90a72011-01-28 12:08:29 +1000431 ret = semaphore_release(chan, sema);
Francisco Jerez2b478add2010-10-18 03:56:40 +0200432
433out_unlock:
Ben Skeggs08cd3d42010-10-12 08:01:59 +1000434 mutex_unlock(&chan->mutex);
Francisco Jerez2b478add2010-10-18 03:56:40 +0200435out_unref:
Ben Skeggsc3b90a72011-01-28 12:08:29 +1000436 kref_put(&sema->ref, semaphore_free);
Francisco Jerez2b478add2010-10-18 03:56:40 +0200437out:
438 if (chan)
439 nouveau_channel_put_unlocked(&chan);
Francisco Jerez0c6c1c22010-09-22 00:58:54 +0200440 return ret;
Francisco Jerez27307232010-09-21 18:57:11 +0200441}
442
443int
Francisco Jerez27307232010-09-21 18:57:11 +0200444nouveau_fence_channel_init(struct nouveau_channel *chan)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000445{
Francisco Jerez0c6c1c22010-09-22 00:58:54 +0200446 struct drm_device *dev = chan->dev;
447 struct drm_nouveau_private *dev_priv = dev->dev_private;
Francisco Jerez27307232010-09-21 18:57:11 +0200448 struct nouveau_gpuobj *obj = NULL;
449 int ret;
450
Ben Skeggsb16a5a12011-06-17 23:41:54 +1000451 if (dev_priv->card_type < NV_C0) {
Ben Skeggsb16a5a12011-06-17 23:41:54 +1000452 ret = RING_SPACE(chan, 2);
453 if (ret)
454 return ret;
Francisco Jerez27307232010-09-21 18:57:11 +0200455
Ben Skeggs6d597022012-04-01 21:09:13 +1000456 BEGIN_NV04(chan, NvSubSw, NV01_SUBCHAN_OBJECT, 1);
Ben Skeggsb16a5a12011-06-17 23:41:54 +1000457 OUT_RING (chan, NvSw);
458 FIRE_RING (chan);
459 }
Francisco Jerez27307232010-09-21 18:57:11 +0200460
Ben Skeggsb16a5a12011-06-17 23:41:54 +1000461 /* Setup area of memory shared between all channels for x-chan sync */
Ben Skeggse3b7ed52011-02-02 13:21:57 +1000462 if (USE_SEMA(dev) && dev_priv->chipset < 0x84) {
Ben Skeggsa8b214f2010-12-03 09:05:20 +1000463 struct ttm_mem_reg *mem = &dev_priv->fence.bo->bo.mem;
Francisco Jerez0c6c1c22010-09-22 00:58:54 +0200464
Ben Skeggs91a8f1e2011-07-25 20:26:19 +1000465 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_FROM_MEMORY,
Francisco Jerez0c6c1c22010-09-22 00:58:54 +0200466 mem->start << PAGE_SHIFT,
Ben Skeggsa8b214f2010-12-03 09:05:20 +1000467 mem->size, NV_MEM_ACCESS_RW,
Ben Skeggs7f4a1952010-11-16 11:50:09 +1000468 NV_MEM_TARGET_VRAM, &obj);
Francisco Jerez0c6c1c22010-09-22 00:58:54 +0200469 if (ret)
470 return ret;
471
472 ret = nouveau_ramht_insert(chan, NvSema, obj);
473 nouveau_gpuobj_ref(NULL, &obj);
474 if (ret)
475 return ret;
Ben Skeggscfd8be02011-08-23 10:23:11 +1000476 } else
477 if (USE_SEMA(dev)) {
Ben Skeggsd02836b2011-06-07 15:21:23 +1000478 /* map fence bo into channel's vm */
479 ret = nouveau_bo_vma_add(dev_priv->fence.bo, chan->vm,
480 &chan->fence.vma);
481 if (ret)
482 return ret;
Francisco Jerez0c6c1c22010-09-22 00:58:54 +0200483 }
484
Ben Skeggs047d1d32010-05-31 12:00:43 +1000485 atomic_set(&chan->fence.last_sequence_irq, 0);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000486 return 0;
487}
488
489void
Francisco Jerez27307232010-09-21 18:57:11 +0200490nouveau_fence_channel_fini(struct nouveau_channel *chan)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000491{
Ben Skeggsd02836b2011-06-07 15:21:23 +1000492 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
Francisco Jerez27307232010-09-21 18:57:11 +0200493 struct nouveau_fence *tmp, *fence;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000494
Francisco Jerez889fa932010-10-18 03:57:19 +0200495 spin_lock(&chan->fence.lock);
Ben Skeggsd375e7d52012-04-30 13:30:00 +1000496 list_for_each_entry_safe(fence, tmp, &chan->fence.pending, head) {
497 fence->channel = NULL;
498 list_del(&fence->head);
Francisco Jerez8ac38912010-09-21 20:49:39 +0200499
500 if (unlikely(fence->work))
501 fence->work(fence->priv, false);
502
Ben Skeggsd375e7d52012-04-30 13:30:00 +1000503 kref_put(&fence->kref, nouveau_fence_del);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000504 }
Francisco Jerez889fa932010-10-18 03:57:19 +0200505 spin_unlock(&chan->fence.lock);
Ben Skeggsd02836b2011-06-07 15:21:23 +1000506
507 nouveau_bo_vma_del(dev_priv->fence.bo, &chan->fence.vma);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000508}
509
Francisco Jerez0c6c1c22010-09-22 00:58:54 +0200510int
511nouveau_fence_init(struct drm_device *dev)
512{
513 struct drm_nouveau_private *dev_priv = dev->dev_private;
Ben Skeggsc3b90a72011-01-28 12:08:29 +1000514 int size = (dev_priv->chipset < 0x84) ? 4096 : 16384;
Francisco Jerez0c6c1c22010-09-22 00:58:54 +0200515 int ret;
516
517 /* Create a shared VRAM heap for cross-channel sync. */
518 if (USE_SEMA(dev)) {
Ben Skeggs7375c952011-06-07 14:21:29 +1000519 ret = nouveau_bo_new(dev, size, 0, TTM_PL_FLAG_VRAM,
Dave Airlie22b33e82012-04-02 11:53:06 +0100520 0, 0, NULL, &dev_priv->fence.bo);
Francisco Jerez0c6c1c22010-09-22 00:58:54 +0200521 if (ret)
522 return ret;
523
524 ret = nouveau_bo_pin(dev_priv->fence.bo, TTM_PL_FLAG_VRAM);
525 if (ret)
526 goto fail;
527
528 ret = nouveau_bo_map(dev_priv->fence.bo);
529 if (ret)
530 goto fail;
531
532 ret = drm_mm_init(&dev_priv->fence.heap, 0,
533 dev_priv->fence.bo->bo.mem.size);
534 if (ret)
535 goto fail;
536
537 spin_lock_init(&dev_priv->fence.lock);
538 }
539
540 return 0;
541fail:
542 nouveau_bo_unmap(dev_priv->fence.bo);
543 nouveau_bo_ref(NULL, &dev_priv->fence.bo);
544 return ret;
545}
546
547void
548nouveau_fence_fini(struct drm_device *dev)
549{
550 struct drm_nouveau_private *dev_priv = dev->dev_private;
551
552 if (USE_SEMA(dev)) {
553 drm_mm_takedown(&dev_priv->fence.heap);
554 nouveau_bo_unmap(dev_priv->fence.bo);
555 nouveau_bo_unpin(dev_priv->fence.bo);
556 nouveau_bo_ref(NULL, &dev_priv->fence.bo);
557 }
558}