blob: decfe6c4ac07c338b22dc2f94d2398c1bc7fde90 [file] [log] [blame]
Ben Skeggs6ee73862009-12-11 19:24:15 +10001/*
2 * Copyright (C) 2007 Ben Skeggs.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
David Howells760285e2012-10-02 18:01:07 +010027#include <drm/drmP.h>
Ben Skeggs6ee73862009-12-11 19:24:15 +100028
Marcin Slusarzbd35fe52011-03-09 14:22:19 +010029#include <linux/ktime.h>
30#include <linux/hrtimer.h>
Maarten Lankhorst29ba89b2014-01-09 11:03:11 +010031#include <trace/events/fence.h>
Marcin Slusarzbd35fe52011-03-09 14:22:19 +010032
Ben Skeggs867920f2014-08-10 04:10:25 +100033#include <nvif/notify.h>
34#include <nvif/event.h>
35
Ben Skeggsebb945a2012-07-20 08:17:34 +100036#include "nouveau_drm.h"
Ben Skeggs6ee73862009-12-11 19:24:15 +100037#include "nouveau_dma.h"
Ben Skeggsebb945a2012-07-20 08:17:34 +100038#include "nouveau_fence.h"
Ben Skeggs6ee73862009-12-11 19:24:15 +100039
Maarten Lankhorst29ba89b2014-01-09 11:03:11 +010040static const struct fence_ops nouveau_fence_ops_uevent;
41static const struct fence_ops nouveau_fence_ops_legacy;
42
43static inline struct nouveau_fence *
44from_fence(struct fence *fence)
45{
46 return container_of(fence, struct nouveau_fence, base);
47}
48
49static inline struct nouveau_fence_chan *
50nouveau_fctx(struct nouveau_fence *fence)
51{
52 return container_of(fence->base.lock, struct nouveau_fence_chan, lock);
53}
Ben Skeggsc4c70442013-05-07 09:48:30 +100054
55static void
56nouveau_fence_signal(struct nouveau_fence *fence)
57{
Maarten Lankhorst29ba89b2014-01-09 11:03:11 +010058 fence_signal_locked(&fence->base);
59 list_del(&fence->head);
Ben Skeggsc4c70442013-05-07 09:48:30 +100060
Maarten Lankhorst29ba89b2014-01-09 11:03:11 +010061 if (test_bit(FENCE_FLAG_USER_BITS, &fence->base.flags)) {
62 struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
63
64 if (!--fctx->notify_ref)
65 nvif_notify_put(&fctx->notify);
Ben Skeggsc4c70442013-05-07 09:48:30 +100066 }
67
Maarten Lankhorst29ba89b2014-01-09 11:03:11 +010068 fence_put(&fence->base);
69}
70
71static struct nouveau_fence *
72nouveau_local_fence(struct fence *fence, struct nouveau_drm *drm) {
73 struct nouveau_fence_priv *priv = (void*)drm->fence;
74
75 if (fence->ops != &nouveau_fence_ops_legacy &&
76 fence->ops != &nouveau_fence_ops_uevent)
77 return NULL;
78
79 if (fence->context < priv->context_base ||
80 fence->context >= priv->context_base + priv->contexts)
81 return NULL;
82
83 return from_fence(fence);
Ben Skeggsc4c70442013-05-07 09:48:30 +100084}
85
Ben Skeggs5e120f62012-04-30 13:55:29 +100086void
87nouveau_fence_context_del(struct nouveau_fence_chan *fctx)
88{
Maarten Lankhorst29ba89b2014-01-09 11:03:11 +010089 struct nouveau_fence *fence;
90
91 nvif_notify_fini(&fctx->notify);
92
93 spin_lock_irq(&fctx->lock);
94 while (!list_empty(&fctx->pending)) {
95 fence = list_entry(fctx->pending.next, typeof(*fence), head);
96
97 nouveau_fence_signal(fence);
98 fence->channel = NULL;
99 }
100 spin_unlock_irq(&fctx->lock);
101}
102
103static void
104nouveau_fence_update(struct nouveau_channel *chan, struct nouveau_fence_chan *fctx)
105{
106 struct nouveau_fence *fence;
107
108 u32 seq = fctx->read(chan);
109
110 while (!list_empty(&fctx->pending)) {
111 fence = list_entry(fctx->pending.next, typeof(*fence), head);
112
113 if ((int)(seq - fence->base.seqno) < 0)
114 return;
115
Ben Skeggsc4c70442013-05-07 09:48:30 +1000116 nouveau_fence_signal(fence);
Ben Skeggs5e120f62012-04-30 13:55:29 +1000117 }
Maarten Lankhorst29ba89b2014-01-09 11:03:11 +0100118}
119
120static int
121nouveau_fence_wait_uevent_handler(struct nvif_notify *notify)
122{
123 struct nouveau_fence_chan *fctx =
124 container_of(notify, typeof(*fctx), notify);
125 unsigned long flags;
126
127 spin_lock_irqsave(&fctx->lock, flags);
128 if (!list_empty(&fctx->pending)) {
129 struct nouveau_fence *fence;
130
131 fence = list_entry(fctx->pending.next, typeof(*fence), head);
132 nouveau_fence_update(fence->channel, fctx);
133 }
134 spin_unlock_irqrestore(&fctx->lock, flags);
135
136 /* Always return keep here. NVIF refcount is handled with nouveau_fence_update */
137 return NVIF_NOTIFY_KEEP;
Ben Skeggs5e120f62012-04-30 13:55:29 +1000138}
139
140void
Maarten Lankhorst29ba89b2014-01-09 11:03:11 +0100141nouveau_fence_context_new(struct nouveau_channel *chan, struct nouveau_fence_chan *fctx)
Ben Skeggs5e120f62012-04-30 13:55:29 +1000142{
Maarten Lankhorst29ba89b2014-01-09 11:03:11 +0100143 struct nouveau_fence_priv *priv = (void*)chan->drm->fence;
144 int ret;
145
Ben Skeggsf589be82012-07-22 11:55:54 +1000146 INIT_LIST_HEAD(&fctx->flip);
Ben Skeggs5e120f62012-04-30 13:55:29 +1000147 INIT_LIST_HEAD(&fctx->pending);
148 spin_lock_init(&fctx->lock);
Maarten Lankhorst29ba89b2014-01-09 11:03:11 +0100149 fctx->context = priv->context_base + chan->chid;
150
151 if (!priv->uevent)
152 return;
153
154 ret = nvif_notify_init(chan->object, NULL,
155 nouveau_fence_wait_uevent_handler, false,
156 G82_CHANNEL_DMA_V0_NTFY_UEVENT,
157 &(struct nvif_notify_uevent_req) { },
158 sizeof(struct nvif_notify_uevent_req),
159 sizeof(struct nvif_notify_uevent_rep),
160 &fctx->notify);
161
162 WARN_ON(ret);
Ben Skeggs5e120f62012-04-30 13:55:29 +1000163}
Ben Skeggs6ee73862009-12-11 19:24:15 +1000164
Maarten Lankhorst29ba89b2014-01-09 11:03:11 +0100165struct nouveau_fence_work {
166 struct work_struct work;
167 struct fence_cb cb;
168 void (*func)(void *);
169 void *data;
170};
171
Ben Skeggsebb945a2012-07-20 08:17:34 +1000172static void
Ben Skeggsc4c70442013-05-07 09:48:30 +1000173nouveau_fence_work_handler(struct work_struct *kwork)
174{
Maarten Lankhorst29ba89b2014-01-09 11:03:11 +0100175 struct nouveau_fence_work *work = container_of(kwork, typeof(*work), work);
Ben Skeggsc4c70442013-05-07 09:48:30 +1000176 work->func(work->data);
177 kfree(work);
178}
179
Maarten Lankhorst29ba89b2014-01-09 11:03:11 +0100180static void nouveau_fence_work_cb(struct fence *fence, struct fence_cb *cb)
181{
182 struct nouveau_fence_work *work = container_of(cb, typeof(*work), cb);
183
184 schedule_work(&work->work);
185}
186
Ben Skeggsc4c70442013-05-07 09:48:30 +1000187void
Maarten Lankhorstf2c24b82014-04-02 17:14:48 +0200188nouveau_fence_work(struct fence *fence,
Ben Skeggsc4c70442013-05-07 09:48:30 +1000189 void (*func)(void *), void *data)
190{
Maarten Lankhorst29ba89b2014-01-09 11:03:11 +0100191 struct nouveau_fence_work *work;
Ben Skeggsc4c70442013-05-07 09:48:30 +1000192
Maarten Lankhorstf2c24b82014-04-02 17:14:48 +0200193 if (fence_is_signaled(fence))
Maarten Lankhorst29ba89b2014-01-09 11:03:11 +0100194 goto err;
Ben Skeggsc4c70442013-05-07 09:48:30 +1000195
Ben Skeggsc4c70442013-05-07 09:48:30 +1000196 work = kmalloc(sizeof(*work), GFP_KERNEL);
197 if (!work) {
Maarten Lankhorstf2c24b82014-04-02 17:14:48 +0200198 WARN_ON(nouveau_fence_wait((struct nouveau_fence *)fence,
199 false, false));
Maarten Lankhorst29ba89b2014-01-09 11:03:11 +0100200 goto err;
Ben Skeggsc4c70442013-05-07 09:48:30 +1000201 }
202
Maarten Lankhorst29ba89b2014-01-09 11:03:11 +0100203 INIT_WORK(&work->work, nouveau_fence_work_handler);
Ben Skeggsc4c70442013-05-07 09:48:30 +1000204 work->func = func;
205 work->data = data;
Ben Skeggsc4c70442013-05-07 09:48:30 +1000206
Maarten Lankhorstf2c24b82014-04-02 17:14:48 +0200207 if (fence_add_callback(fence, &work->cb, nouveau_fence_work_cb) < 0)
Maarten Lankhorst29ba89b2014-01-09 11:03:11 +0100208 goto err_free;
209 return;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000210
Maarten Lankhorst29ba89b2014-01-09 11:03:11 +0100211err_free:
212 kfree(work);
213err:
214 func(data);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000215}
216
217int
Ben Skeggsd375e7d52012-04-30 13:30:00 +1000218nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000219{
Ben Skeggse193b1d2012-07-19 10:51:42 +1000220 struct nouveau_fence_chan *fctx = chan->fence;
Maarten Lankhorst29ba89b2014-01-09 11:03:11 +0100221 struct nouveau_fence_priv *priv = (void*)chan->drm->fence;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000222 int ret;
223
Ben Skeggs5e120f62012-04-30 13:55:29 +1000224 fence->channel = chan;
Daniel Vetterbfd83032013-12-11 11:34:41 +0100225 fence->timeout = jiffies + (15 * HZ);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000226
Maarten Lankhorst29ba89b2014-01-09 11:03:11 +0100227 if (priv->uevent)
228 fence_init(&fence->base, &nouveau_fence_ops_uevent,
229 &fctx->lock,
230 priv->context_base + chan->chid, ++fctx->sequence);
231 else
232 fence_init(&fence->base, &nouveau_fence_ops_legacy,
233 &fctx->lock,
234 priv->context_base + chan->chid, ++fctx->sequence);
235
236 trace_fence_emit(&fence->base);
Ben Skeggs827520c2013-02-14 13:20:17 +1000237 ret = fctx->emit(fence);
Ben Skeggs5e120f62012-04-30 13:55:29 +1000238 if (!ret) {
Maarten Lankhorst29ba89b2014-01-09 11:03:11 +0100239 fence_get(&fence->base);
240 spin_lock_irq(&fctx->lock);
241 nouveau_fence_update(chan, fctx);
Ben Skeggs5e120f62012-04-30 13:55:29 +1000242 list_add_tail(&fence->head, &fctx->pending);
Maarten Lankhorst29ba89b2014-01-09 11:03:11 +0100243 spin_unlock_irq(&fctx->lock);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000244 }
245
Ben Skeggs5e120f62012-04-30 13:55:29 +1000246 return ret;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000247}
248
Ben Skeggs6ee73862009-12-11 19:24:15 +1000249bool
Ben Skeggsd375e7d52012-04-30 13:30:00 +1000250nouveau_fence_done(struct nouveau_fence *fence)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000251{
Maarten Lankhorst29ba89b2014-01-09 11:03:11 +0100252 if (fence->base.ops == &nouveau_fence_ops_legacy ||
253 fence->base.ops == &nouveau_fence_ops_uevent) {
254 struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
255 unsigned long flags;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000256
Maarten Lankhorst29ba89b2014-01-09 11:03:11 +0100257 if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
258 return true;
Ben Skeggs79ca2772014-08-10 04:10:20 +1000259
Maarten Lankhorst29ba89b2014-01-09 11:03:11 +0100260 spin_lock_irqsave(&fctx->lock, flags);
261 nouveau_fence_update(fence->channel, fctx);
262 spin_unlock_irqrestore(&fctx->lock, flags);
Ben Skeggse18c0802013-01-31 14:57:33 +1000263 }
Maarten Lankhorst29ba89b2014-01-09 11:03:11 +0100264 return fence_is_signaled(&fence->base);
Ben Skeggse18c0802013-01-31 14:57:33 +1000265}
266
Maarten Lankhorst29ba89b2014-01-09 11:03:11 +0100267static long
268nouveau_fence_wait_legacy(struct fence *f, bool intr, long wait)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000269{
Maarten Lankhorst29ba89b2014-01-09 11:03:11 +0100270 struct nouveau_fence *fence = from_fence(f);
Marcin Slusarzbd35fe52011-03-09 14:22:19 +0100271 unsigned long sleep_time = NSEC_PER_MSEC / 1000;
Maarten Lankhorst29ba89b2014-01-09 11:03:11 +0100272 unsigned long t = jiffies, timeout = t + wait;
Ben Skeggse18c0802013-01-31 14:57:33 +1000273
Ben Skeggsd375e7d52012-04-30 13:30:00 +1000274 while (!nouveau_fence_done(fence)) {
Maarten Lankhorst29ba89b2014-01-09 11:03:11 +0100275 ktime_t kt;
276
277 t = jiffies;
278
279 if (wait != MAX_SCHEDULE_TIMEOUT && time_after_eq(t, timeout)) {
280 __set_current_state(TASK_RUNNING);
281 return 0;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000282 }
283
Ben Skeggs875ac342012-04-30 12:51:48 +1000284 __set_current_state(intr ? TASK_INTERRUPTIBLE :
285 TASK_UNINTERRUPTIBLE);
Maarten Lankhorst29ba89b2014-01-09 11:03:11 +0100286
287 kt = ktime_set(0, sleep_time);
288 schedule_hrtimeout(&kt, HRTIMER_MODE_REL);
289 sleep_time *= 2;
290 if (sleep_time > NSEC_PER_MSEC)
291 sleep_time = NSEC_PER_MSEC;
292
293 if (intr && signal_pending(current))
294 return -ERESTARTSYS;
295 }
296
297 __set_current_state(TASK_RUNNING);
298
299 return timeout - t;
300}
301
302static int
303nouveau_fence_wait_busy(struct nouveau_fence *fence, bool intr)
304{
305 int ret = 0;
306
307 while (!nouveau_fence_done(fence)) {
308 if (time_after_eq(jiffies, fence->timeout)) {
309 ret = -EBUSY;
310 break;
Marcin Slusarzbd35fe52011-03-09 14:22:19 +0100311 }
Ben Skeggs6ee73862009-12-11 19:24:15 +1000312
Maarten Lankhorst29ba89b2014-01-09 11:03:11 +0100313 __set_current_state(intr ?
314 TASK_INTERRUPTIBLE :
315 TASK_UNINTERRUPTIBLE);
316
Ben Skeggs6ee73862009-12-11 19:24:15 +1000317 if (intr && signal_pending(current)) {
Ben Skeggs9ddc8c52009-12-15 11:04:25 +1000318 ret = -ERESTARTSYS;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000319 break;
320 }
321 }
322
323 __set_current_state(TASK_RUNNING);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000324 return ret;
325}
326
Ben Skeggs5e120f62012-04-30 13:55:29 +1000327int
Maarten Lankhorst29ba89b2014-01-09 11:03:11 +0100328nouveau_fence_wait(struct nouveau_fence *fence, bool lazy, bool intr)
329{
330 long ret;
331
332 if (!lazy)
333 return nouveau_fence_wait_busy(fence, intr);
334
335 ret = fence_wait_timeout(&fence->base, intr, 15 * HZ);
336 if (ret < 0)
337 return ret;
338 else if (!ret)
339 return -EBUSY;
340 else
341 return 0;
342}
343
344int
Maarten Lankhorst809e9442014-04-09 16:19:30 +0200345nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool exclusive)
Ben Skeggs5e120f62012-04-30 13:55:29 +1000346{
Ben Skeggs827520c2013-02-14 13:20:17 +1000347 struct nouveau_fence_chan *fctx = chan->fence;
Maarten Lankhorst809e9442014-04-09 16:19:30 +0200348 struct fence *fence;
Maarten Lankhorst29ba89b2014-01-09 11:03:11 +0100349 struct reservation_object *resv = nvbo->bo.resv;
350 struct reservation_object_list *fobj;
Maarten Lankhorst809e9442014-04-09 16:19:30 +0200351 struct nouveau_fence *f;
Maarten Lankhorst29ba89b2014-01-09 11:03:11 +0100352 int ret = 0, i;
Ben Skeggs5e120f62012-04-30 13:55:29 +1000353
Maarten Lankhorst809e9442014-04-09 16:19:30 +0200354 if (!exclusive) {
355 ret = reservation_object_reserve_shared(resv);
Maarten Lankhorst29ba89b2014-01-09 11:03:11 +0100356
Maarten Lankhorst809e9442014-04-09 16:19:30 +0200357 if (ret)
358 return ret;
Ben Skeggs5e120f62012-04-30 13:55:29 +1000359 }
360
Maarten Lankhorst29ba89b2014-01-09 11:03:11 +0100361 fobj = reservation_object_get_list(resv);
Maarten Lankhorst809e9442014-04-09 16:19:30 +0200362 fence = reservation_object_get_excl(resv);
363
364 if (fence && (!exclusive || !fobj || !fobj->shared_count)) {
365 struct nouveau_channel *prev = NULL;
366
367 f = nouveau_local_fence(fence, chan->drm);
368 if (f)
369 prev = f->channel;
370
371 if (!prev || (prev != chan && (ret = fctx->sync(f, prev, chan))))
372 ret = fence_wait(fence, true);
373
374 return ret;
375 }
376
377 if (!exclusive || !fobj)
Maarten Lankhorst29ba89b2014-01-09 11:03:11 +0100378 return ret;
379
380 for (i = 0; i < fobj->shared_count && !ret; ++i) {
Maarten Lankhorst809e9442014-04-09 16:19:30 +0200381 struct nouveau_channel *prev = NULL;
382
Maarten Lankhorst29ba89b2014-01-09 11:03:11 +0100383 fence = rcu_dereference_protected(fobj->shared[i],
384 reservation_object_held(resv));
385
Maarten Lankhorst809e9442014-04-09 16:19:30 +0200386 f = nouveau_local_fence(fence, chan->drm);
387 if (f)
388 prev = f->channel;
389
390 if (!prev || (ret = fctx->sync(f, prev, chan)))
Maarten Lankhorst29ba89b2014-01-09 11:03:11 +0100391 ret = fence_wait(fence, true);
Maarten Lankhorst809e9442014-04-09 16:19:30 +0200392
393 if (ret)
394 break;
Maarten Lankhorst29ba89b2014-01-09 11:03:11 +0100395 }
396
397 return ret;
Ben Skeggsd375e7d52012-04-30 13:30:00 +1000398}
399
400void
401nouveau_fence_unref(struct nouveau_fence **pfence)
402{
403 if (*pfence)
Maarten Lankhorst29ba89b2014-01-09 11:03:11 +0100404 fence_put(&(*pfence)->base);
Ben Skeggsd375e7d52012-04-30 13:30:00 +1000405 *pfence = NULL;
406}
407
Ben Skeggsd375e7d52012-04-30 13:30:00 +1000408int
Ben Skeggs264ce192013-02-14 13:43:21 +1000409nouveau_fence_new(struct nouveau_channel *chan, bool sysmem,
410 struct nouveau_fence **pfence)
Ben Skeggsd375e7d52012-04-30 13:30:00 +1000411{
412 struct nouveau_fence *fence;
413 int ret = 0;
414
Ben Skeggse193b1d2012-07-19 10:51:42 +1000415 if (unlikely(!chan->fence))
Ben Skeggs5e120f62012-04-30 13:55:29 +1000416 return -ENODEV;
417
Ben Skeggsd375e7d52012-04-30 13:30:00 +1000418 fence = kzalloc(sizeof(*fence), GFP_KERNEL);
419 if (!fence)
420 return -ENOMEM;
Ben Skeggs264ce192013-02-14 13:43:21 +1000421
422 fence->sysmem = sysmem;
Ben Skeggsd375e7d52012-04-30 13:30:00 +1000423
Cong Dingb5d8f052013-01-15 18:19:49 +0100424 ret = nouveau_fence_emit(fence, chan);
425 if (ret)
426 nouveau_fence_unref(&fence);
Ben Skeggsd375e7d52012-04-30 13:30:00 +1000427
428 *pfence = fence;
429 return ret;
430}
Maarten Lankhorst29ba89b2014-01-09 11:03:11 +0100431
432static const char *nouveau_fence_get_get_driver_name(struct fence *fence)
433{
434 return "nouveau";
435}
436
437static const char *nouveau_fence_get_timeline_name(struct fence *f)
438{
439 struct nouveau_fence *fence = from_fence(f);
440 struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
441
442 return fence->channel ? fctx->name : "dead channel";
443}
444
445/*
446 * In an ideal world, read would not assume the channel context is still alive.
447 * This function may be called from another device, running into free memory as a
448 * result. The drm node should still be there, so we can derive the index from
449 * the fence context.
450 */
451static bool nouveau_fence_is_signaled(struct fence *f)
452{
453 struct nouveau_fence *fence = from_fence(f);
454 struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
455 struct nouveau_channel *chan = fence->channel;
456
457 return (int)(fctx->read(chan) - fence->base.seqno) >= 0;
458}
459
460static bool nouveau_fence_no_signaling(struct fence *f)
461{
462 struct nouveau_fence *fence = from_fence(f);
463
464 /*
465 * caller should have a reference on the fence,
466 * else fence could get freed here
467 */
468 WARN_ON(atomic_read(&fence->base.refcount.refcount) <= 1);
469
470 /*
471 * This needs uevents to work correctly, but fence_add_callback relies on
472 * being able to enable signaling. It will still get signaled eventually,
473 * just not right away.
474 */
475 if (nouveau_fence_is_signaled(f)) {
476 list_del(&fence->head);
477
478 fence_put(&fence->base);
479 return false;
480 }
481
482 return true;
483}
484
485static const struct fence_ops nouveau_fence_ops_legacy = {
486 .get_driver_name = nouveau_fence_get_get_driver_name,
487 .get_timeline_name = nouveau_fence_get_timeline_name,
488 .enable_signaling = nouveau_fence_no_signaling,
489 .signaled = nouveau_fence_is_signaled,
490 .wait = nouveau_fence_wait_legacy,
491 .release = NULL
492};
493
494static bool nouveau_fence_enable_signaling(struct fence *f)
495{
496 struct nouveau_fence *fence = from_fence(f);
497 struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
498 bool ret;
499
500 if (!fctx->notify_ref++)
501 nvif_notify_get(&fctx->notify);
502
503 ret = nouveau_fence_no_signaling(f);
504 if (ret)
505 set_bit(FENCE_FLAG_USER_BITS, &fence->base.flags);
506 else if (!--fctx->notify_ref)
507 nvif_notify_put(&fctx->notify);
508
509 return ret;
510}
511
512static const struct fence_ops nouveau_fence_ops_uevent = {
513 .get_driver_name = nouveau_fence_get_get_driver_name,
514 .get_timeline_name = nouveau_fence_get_timeline_name,
515 .enable_signaling = nouveau_fence_enable_signaling,
516 .signaled = nouveau_fence_is_signaled,
517 .wait = fence_default_wait,
518 .release = NULL
519};