blob: 809d02b21e0893abed0176165110e3357f77af12 [file] [log] [blame]
Erik Gilling010accf2012-03-13 15:34:34 -07001/*
2 * drivers/base/sync.c
3 *
4 * Copyright (C) 2012 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16
Erik Gilling981c8a92012-03-14 19:49:15 -070017#include <linux/debugfs.h>
Erik Gilling4fb837a2012-05-16 13:09:22 -070018#include <linux/export.h>
Erik Gilling010accf2012-03-13 15:34:34 -070019#include <linux/file.h>
20#include <linux/fs.h>
21#include <linux/kernel.h>
Erik Gillinga1eeaca2012-03-19 17:28:32 -070022#include <linux/poll.h>
Erik Gilling010accf2012-03-13 15:34:34 -070023#include <linux/sched.h>
Erik Gilling981c8a92012-03-14 19:49:15 -070024#include <linux/seq_file.h>
Erik Gilling010accf2012-03-13 15:34:34 -070025#include <linux/slab.h>
26#include <linux/sync.h>
27#include <linux/uaccess.h>
28
29#include <linux/anon_inodes.h>
30
Erik Gilling06f70142012-10-16 16:14:48 -070031#define CREATE_TRACE_POINTS
32#include <trace/events/sync.h>
33
Erik Gilling010accf2012-03-13 15:34:34 -070034static void sync_fence_signal_pt(struct sync_pt *pt);
35static int _sync_pt_has_signaled(struct sync_pt *pt);
Erik Gilling220411f2012-07-23 16:43:05 -070036static void sync_fence_free(struct kref *kref);
Erik Gilling7154e872012-08-24 13:48:57 -070037static void sync_dump(void);
Erik Gilling010accf2012-03-13 15:34:34 -070038
Erik Gilling981c8a92012-03-14 19:49:15 -070039static LIST_HEAD(sync_timeline_list_head);
40static DEFINE_SPINLOCK(sync_timeline_list_lock);
41
42static LIST_HEAD(sync_fence_list_head);
43static DEFINE_SPINLOCK(sync_fence_list_lock);
44
Erik Gilling010accf2012-03-13 15:34:34 -070045struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops,
46 int size, const char *name)
47{
48 struct sync_timeline *obj;
Erik Gilling981c8a92012-03-14 19:49:15 -070049 unsigned long flags;
Erik Gilling010accf2012-03-13 15:34:34 -070050
51 if (size < sizeof(struct sync_timeline))
52 return NULL;
53
54 obj = kzalloc(size, GFP_KERNEL);
55 if (obj == NULL)
56 return NULL;
57
Erik Gilling8c9daff2012-08-02 17:26:45 -070058 kref_init(&obj->kref);
Erik Gilling010accf2012-03-13 15:34:34 -070059 obj->ops = ops;
60 strlcpy(obj->name, name, sizeof(obj->name));
61
62 INIT_LIST_HEAD(&obj->child_list_head);
63 spin_lock_init(&obj->child_list_lock);
64
65 INIT_LIST_HEAD(&obj->active_list_head);
66 spin_lock_init(&obj->active_list_lock);
67
Erik Gilling981c8a92012-03-14 19:49:15 -070068 spin_lock_irqsave(&sync_timeline_list_lock, flags);
69 list_add_tail(&obj->sync_timeline_list, &sync_timeline_list_head);
70 spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
71
Erik Gilling010accf2012-03-13 15:34:34 -070072 return obj;
73}
Erik Gilling4fb837a2012-05-16 13:09:22 -070074EXPORT_SYMBOL(sync_timeline_create);
Erik Gilling010accf2012-03-13 15:34:34 -070075
Erik Gilling8c9daff2012-08-02 17:26:45 -070076static void sync_timeline_free(struct kref *kref)
Erik Gilling981c8a92012-03-14 19:49:15 -070077{
Erik Gilling8c9daff2012-08-02 17:26:45 -070078 struct sync_timeline *obj =
79 container_of(kref, struct sync_timeline, kref);
Erik Gilling981c8a92012-03-14 19:49:15 -070080 unsigned long flags;
81
82 if (obj->ops->release_obj)
83 obj->ops->release_obj(obj);
84
85 spin_lock_irqsave(&sync_timeline_list_lock, flags);
86 list_del(&obj->sync_timeline_list);
87 spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
88
89 kfree(obj);
90}
91
Erik Gilling010accf2012-03-13 15:34:34 -070092void sync_timeline_destroy(struct sync_timeline *obj)
93{
Erik Gilling010accf2012-03-13 15:34:34 -070094 obj->destroyed = true;
Erik Gilling010accf2012-03-13 15:34:34 -070095
Erik Gilling8c9daff2012-08-02 17:26:45 -070096 /*
97 * If this is not the last reference, signal any children
98 * that their parent is going away.
99 */
100
101 if (!kref_put(&obj->kref, sync_timeline_free))
Erik Gilling010accf2012-03-13 15:34:34 -0700102 sync_timeline_signal(obj);
103}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700104EXPORT_SYMBOL(sync_timeline_destroy);
Erik Gilling010accf2012-03-13 15:34:34 -0700105
106static void sync_timeline_add_pt(struct sync_timeline *obj, struct sync_pt *pt)
107{
108 unsigned long flags;
109
110 pt->parent = obj;
111
112 spin_lock_irqsave(&obj->child_list_lock, flags);
113 list_add_tail(&pt->child_list, &obj->child_list_head);
114 spin_unlock_irqrestore(&obj->child_list_lock, flags);
115}
116
117static void sync_timeline_remove_pt(struct sync_pt *pt)
118{
119 struct sync_timeline *obj = pt->parent;
120 unsigned long flags;
Erik Gilling010accf2012-03-13 15:34:34 -0700121
122 spin_lock_irqsave(&obj->active_list_lock, flags);
123 if (!list_empty(&pt->active_list))
124 list_del_init(&pt->active_list);
125 spin_unlock_irqrestore(&obj->active_list_lock, flags);
126
127 spin_lock_irqsave(&obj->child_list_lock, flags);
Erik Gilling220411f2012-07-23 16:43:05 -0700128 if (!list_empty(&pt->child_list)) {
129 list_del_init(&pt->child_list);
Erik Gilling220411f2012-07-23 16:43:05 -0700130 }
Erik Gilling010accf2012-03-13 15:34:34 -0700131 spin_unlock_irqrestore(&obj->child_list_lock, flags);
Erik Gilling010accf2012-03-13 15:34:34 -0700132}
133
134void sync_timeline_signal(struct sync_timeline *obj)
135{
136 unsigned long flags;
137 LIST_HEAD(signaled_pts);
138 struct list_head *pos, *n;
139
Erik Gilling06f70142012-10-16 16:14:48 -0700140 trace_sync_timeline(obj);
141
Erik Gilling010accf2012-03-13 15:34:34 -0700142 spin_lock_irqsave(&obj->active_list_lock, flags);
143
144 list_for_each_safe(pos, n, &obj->active_list_head) {
145 struct sync_pt *pt =
146 container_of(pos, struct sync_pt, active_list);
147
Erik Gilling220411f2012-07-23 16:43:05 -0700148 if (_sync_pt_has_signaled(pt)) {
149 list_del_init(pos);
150 list_add(&pt->signaled_list, &signaled_pts);
151 kref_get(&pt->fence->kref);
152 }
Erik Gilling010accf2012-03-13 15:34:34 -0700153 }
154
155 spin_unlock_irqrestore(&obj->active_list_lock, flags);
156
157 list_for_each_safe(pos, n, &signaled_pts) {
158 struct sync_pt *pt =
Erik Gilling220411f2012-07-23 16:43:05 -0700159 container_of(pos, struct sync_pt, signaled_list);
Erik Gilling010accf2012-03-13 15:34:34 -0700160
161 list_del_init(pos);
162 sync_fence_signal_pt(pt);
Erik Gilling220411f2012-07-23 16:43:05 -0700163 kref_put(&pt->fence->kref, sync_fence_free);
Erik Gilling010accf2012-03-13 15:34:34 -0700164 }
165}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700166EXPORT_SYMBOL(sync_timeline_signal);
Erik Gilling010accf2012-03-13 15:34:34 -0700167
168struct sync_pt *sync_pt_create(struct sync_timeline *parent, int size)
169{
170 struct sync_pt *pt;
171
172 if (size < sizeof(struct sync_pt))
173 return NULL;
174
175 pt = kzalloc(size, GFP_KERNEL);
176 if (pt == NULL)
177 return NULL;
178
179 INIT_LIST_HEAD(&pt->active_list);
Erik Gilling8c9daff2012-08-02 17:26:45 -0700180 kref_get(&parent->kref);
Erik Gilling010accf2012-03-13 15:34:34 -0700181 sync_timeline_add_pt(parent, pt);
182
183 return pt;
184}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700185EXPORT_SYMBOL(sync_pt_create);
Erik Gilling010accf2012-03-13 15:34:34 -0700186
187void sync_pt_free(struct sync_pt *pt)
188{
189 if (pt->parent->ops->free_pt)
190 pt->parent->ops->free_pt(pt);
191
192 sync_timeline_remove_pt(pt);
193
Erik Gilling8c9daff2012-08-02 17:26:45 -0700194 kref_put(&pt->parent->kref, sync_timeline_free);
195
Erik Gilling010accf2012-03-13 15:34:34 -0700196 kfree(pt);
197}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700198EXPORT_SYMBOL(sync_pt_free);
Erik Gilling010accf2012-03-13 15:34:34 -0700199
200/* call with pt->parent->active_list_lock held */
201static int _sync_pt_has_signaled(struct sync_pt *pt)
202{
Erik Gillingad433ba2012-03-15 14:59:33 -0700203 int old_status = pt->status;
204
Erik Gilling010accf2012-03-13 15:34:34 -0700205 if (!pt->status)
206 pt->status = pt->parent->ops->has_signaled(pt);
207
208 if (!pt->status && pt->parent->destroyed)
209 pt->status = -ENOENT;
210
Erik Gillingad433ba2012-03-15 14:59:33 -0700211 if (pt->status != old_status)
212 pt->timestamp = ktime_get();
213
Erik Gilling010accf2012-03-13 15:34:34 -0700214 return pt->status;
215}
216
217static struct sync_pt *sync_pt_dup(struct sync_pt *pt)
218{
219 return pt->parent->ops->dup(pt);
220}
221
222/* Adds a sync pt to the active queue. Called when added to a fence */
223static void sync_pt_activate(struct sync_pt *pt)
224{
225 struct sync_timeline *obj = pt->parent;
226 unsigned long flags;
227 int err;
228
229 spin_lock_irqsave(&obj->active_list_lock, flags);
230
231 err = _sync_pt_has_signaled(pt);
Naseer Ahmed35141772013-01-23 17:53:57 -0500232 if (err != 0)
Erik Gilling010accf2012-03-13 15:34:34 -0700233 goto out;
234
235 list_add_tail(&pt->active_list, &obj->active_list_head);
236
237out:
238 spin_unlock_irqrestore(&obj->active_list_lock, flags);
239}
240
241static int sync_fence_release(struct inode *inode, struct file *file);
Erik Gillinga1eeaca2012-03-19 17:28:32 -0700242static unsigned int sync_fence_poll(struct file *file, poll_table *wait);
Erik Gilling010accf2012-03-13 15:34:34 -0700243static long sync_fence_ioctl(struct file *file, unsigned int cmd,
244 unsigned long arg);
245
246
247static const struct file_operations sync_fence_fops = {
248 .release = sync_fence_release,
Erik Gillinga1eeaca2012-03-19 17:28:32 -0700249 .poll = sync_fence_poll,
Erik Gilling010accf2012-03-13 15:34:34 -0700250 .unlocked_ioctl = sync_fence_ioctl,
251};
252
253static struct sync_fence *sync_fence_alloc(const char *name)
254{
255 struct sync_fence *fence;
Erik Gilling981c8a92012-03-14 19:49:15 -0700256 unsigned long flags;
Erik Gilling010accf2012-03-13 15:34:34 -0700257
258 fence = kzalloc(sizeof(struct sync_fence), GFP_KERNEL);
259 if (fence == NULL)
260 return NULL;
261
262 fence->file = anon_inode_getfile("sync_fence", &sync_fence_fops,
263 fence, 0);
264 if (fence->file == NULL)
265 goto err;
266
Erik Gilling220411f2012-07-23 16:43:05 -0700267 kref_init(&fence->kref);
Erik Gilling010accf2012-03-13 15:34:34 -0700268 strlcpy(fence->name, name, sizeof(fence->name));
269
270 INIT_LIST_HEAD(&fence->pt_list_head);
271 INIT_LIST_HEAD(&fence->waiter_list_head);
272 spin_lock_init(&fence->waiter_list_lock);
273
274 init_waitqueue_head(&fence->wq);
Erik Gilling981c8a92012-03-14 19:49:15 -0700275
276 spin_lock_irqsave(&sync_fence_list_lock, flags);
277 list_add_tail(&fence->sync_fence_list, &sync_fence_list_head);
278 spin_unlock_irqrestore(&sync_fence_list_lock, flags);
279
Erik Gilling010accf2012-03-13 15:34:34 -0700280 return fence;
281
282err:
283 kfree(fence);
284 return NULL;
285}
286
287/* TODO: implement a create which takes more that one sync_pt */
288struct sync_fence *sync_fence_create(const char *name, struct sync_pt *pt)
289{
290 struct sync_fence *fence;
291
292 if (pt->fence)
293 return NULL;
294
295 fence = sync_fence_alloc(name);
296 if (fence == NULL)
297 return NULL;
298
299 pt->fence = fence;
300 list_add(&pt->pt_list, &fence->pt_list_head);
301 sync_pt_activate(pt);
302
Erik Gillingd8f388042012-10-15 17:51:01 -0700303 /*
304 * signal the fence in case pt was activated before
305 * sync_pt_activate(pt) was called
306 */
307 sync_fence_signal_pt(pt);
308
Erik Gilling010accf2012-03-13 15:34:34 -0700309 return fence;
310}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700311EXPORT_SYMBOL(sync_fence_create);
Erik Gilling010accf2012-03-13 15:34:34 -0700312
313static int sync_fence_copy_pts(struct sync_fence *dst, struct sync_fence *src)
314{
315 struct list_head *pos;
316
317 list_for_each(pos, &src->pt_list_head) {
318 struct sync_pt *orig_pt =
319 container_of(pos, struct sync_pt, pt_list);
320 struct sync_pt *new_pt = sync_pt_dup(orig_pt);
321
322 if (new_pt == NULL)
323 return -ENOMEM;
324
325 new_pt->fence = dst;
326 list_add(&new_pt->pt_list, &dst->pt_list_head);
327 sync_pt_activate(new_pt);
328 }
329
330 return 0;
331}
332
Erik Gilling9d8ca8a2012-07-11 17:13:50 -0700333static int sync_fence_merge_pts(struct sync_fence *dst, struct sync_fence *src)
334{
335 struct list_head *src_pos, *dst_pos, *n;
336
337 list_for_each(src_pos, &src->pt_list_head) {
338 struct sync_pt *src_pt =
339 container_of(src_pos, struct sync_pt, pt_list);
340 bool collapsed = false;
341
342 list_for_each_safe(dst_pos, n, &dst->pt_list_head) {
343 struct sync_pt *dst_pt =
344 container_of(dst_pos, struct sync_pt, pt_list);
345 /* collapse two sync_pts on the same timeline
346 * to a single sync_pt that will signal at
347 * the later of the two
348 */
349 if (dst_pt->parent == src_pt->parent) {
350 if (dst_pt->parent->ops->compare(dst_pt, src_pt) == -1) {
351 struct sync_pt *new_pt =
352 sync_pt_dup(src_pt);
353 if (new_pt == NULL)
354 return -ENOMEM;
355
356 new_pt->fence = dst;
357 list_replace(&dst_pt->pt_list,
358 &new_pt->pt_list);
359 sync_pt_activate(new_pt);
360 sync_pt_free(dst_pt);
361 }
362 collapsed = true;
363 break;
364 }
365 }
366
367 if (!collapsed) {
368 struct sync_pt *new_pt = sync_pt_dup(src_pt);
369
370 if (new_pt == NULL)
371 return -ENOMEM;
372
373 new_pt->fence = dst;
374 list_add(&new_pt->pt_list, &dst->pt_list_head);
375 sync_pt_activate(new_pt);
376 }
377 }
378
379 return 0;
380}
381
Erik Gilling220411f2012-07-23 16:43:05 -0700382static void sync_fence_detach_pts(struct sync_fence *fence)
383{
384 struct list_head *pos, *n;
385
386 list_for_each_safe(pos, n, &fence->pt_list_head) {
387 struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
388 sync_timeline_remove_pt(pt);
389 }
390}
391
Erik Gilling010accf2012-03-13 15:34:34 -0700392static void sync_fence_free_pts(struct sync_fence *fence)
393{
394 struct list_head *pos, *n;
395
396 list_for_each_safe(pos, n, &fence->pt_list_head) {
397 struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
398 sync_pt_free(pt);
399 }
400}
401
402struct sync_fence *sync_fence_fdget(int fd)
403{
404 struct file *file = fget(fd);
405
406 if (file == NULL)
407 return NULL;
408
409 if (file->f_op != &sync_fence_fops)
410 goto err;
411
412 return file->private_data;
413
414err:
415 fput(file);
416 return NULL;
417}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700418EXPORT_SYMBOL(sync_fence_fdget);
Erik Gilling010accf2012-03-13 15:34:34 -0700419
420void sync_fence_put(struct sync_fence *fence)
421{
422 fput(fence->file);
423}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700424EXPORT_SYMBOL(sync_fence_put);
Erik Gilling010accf2012-03-13 15:34:34 -0700425
426void sync_fence_install(struct sync_fence *fence, int fd)
427{
428 fd_install(fd, fence->file);
429}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700430EXPORT_SYMBOL(sync_fence_install);
Erik Gilling010accf2012-03-13 15:34:34 -0700431
432static int sync_fence_get_status(struct sync_fence *fence)
433{
434 struct list_head *pos;
435 int status = 1;
436
437 list_for_each(pos, &fence->pt_list_head) {
438 struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
439 int pt_status = pt->status;
440
441 if (pt_status < 0) {
442 status = pt_status;
443 break;
444 } else if (status == 1) {
445 status = pt_status;
446 }
447 }
448
449 return status;
450}
451
452struct sync_fence *sync_fence_merge(const char *name,
453 struct sync_fence *a, struct sync_fence *b)
454{
455 struct sync_fence *fence;
456 int err;
457
458 fence = sync_fence_alloc(name);
459 if (fence == NULL)
460 return NULL;
461
462 err = sync_fence_copy_pts(fence, a);
463 if (err < 0)
464 goto err;
465
Erik Gilling9d8ca8a2012-07-11 17:13:50 -0700466 err = sync_fence_merge_pts(fence, b);
Erik Gilling010accf2012-03-13 15:34:34 -0700467 if (err < 0)
468 goto err;
469
Erik Gillingd8f388042012-10-15 17:51:01 -0700470 /*
471 * signal the fence in case one of it's pts were activated before
472 * they were activated
473 */
474 sync_fence_signal_pt(list_first_entry(&fence->pt_list_head,
475 struct sync_pt,
476 pt_list));
Erik Gilling010accf2012-03-13 15:34:34 -0700477
478 return fence;
479err:
480 sync_fence_free_pts(fence);
481 kfree(fence);
482 return NULL;
483}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700484EXPORT_SYMBOL(sync_fence_merge);
Erik Gilling010accf2012-03-13 15:34:34 -0700485
486static void sync_fence_signal_pt(struct sync_pt *pt)
487{
488 LIST_HEAD(signaled_waiters);
489 struct sync_fence *fence = pt->fence;
490 struct list_head *pos;
491 struct list_head *n;
492 unsigned long flags;
493 int status;
494
495 status = sync_fence_get_status(fence);
496
497 spin_lock_irqsave(&fence->waiter_list_lock, flags);
498 /*
499 * this should protect against two threads racing on the signaled
500 * false -> true transition
501 */
502 if (status && !fence->status) {
503 list_for_each_safe(pos, n, &fence->waiter_list_head)
504 list_move(pos, &signaled_waiters);
505
506 fence->status = status;
507 } else {
508 status = 0;
509 }
510 spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
511
512 if (status) {
513 list_for_each_safe(pos, n, &signaled_waiters) {
514 struct sync_fence_waiter *waiter =
515 container_of(pos, struct sync_fence_waiter,
516 waiter_list);
517
Erik Gilling010accf2012-03-13 15:34:34 -0700518 list_del(pos);
Erik Gillingc80114f2012-05-15 16:23:26 -0700519 waiter->callback(fence, waiter);
Erik Gilling010accf2012-03-13 15:34:34 -0700520 }
521 wake_up(&fence->wq);
522 }
523}
524
525int sync_fence_wait_async(struct sync_fence *fence,
Erik Gillingc80114f2012-05-15 16:23:26 -0700526 struct sync_fence_waiter *waiter)
Erik Gilling010accf2012-03-13 15:34:34 -0700527{
Erik Gilling010accf2012-03-13 15:34:34 -0700528 unsigned long flags;
529 int err = 0;
530
Erik Gilling010accf2012-03-13 15:34:34 -0700531 spin_lock_irqsave(&fence->waiter_list_lock, flags);
532
533 if (fence->status) {
Erik Gilling010accf2012-03-13 15:34:34 -0700534 err = fence->status;
535 goto out;
536 }
537
538 list_add_tail(&waiter->waiter_list, &fence->waiter_list_head);
539out:
540 spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
541
542 return err;
543}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700544EXPORT_SYMBOL(sync_fence_wait_async);
Erik Gilling010accf2012-03-13 15:34:34 -0700545
Erik Gillingc80114f2012-05-15 16:23:26 -0700546int sync_fence_cancel_async(struct sync_fence *fence,
547 struct sync_fence_waiter *waiter)
548{
549 struct list_head *pos;
550 struct list_head *n;
551 unsigned long flags;
552 int ret = -ENOENT;
553
554 spin_lock_irqsave(&fence->waiter_list_lock, flags);
555 /*
556 * Make sure waiter is still in waiter_list because it is possible for
557 * the waiter to be removed from the list while the callback is still
558 * pending.
559 */
560 list_for_each_safe(pos, n, &fence->waiter_list_head) {
561 struct sync_fence_waiter *list_waiter =
562 container_of(pos, struct sync_fence_waiter,
563 waiter_list);
564 if (list_waiter == waiter) {
565 list_del(pos);
566 ret = 0;
567 break;
568 }
569 }
570 spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
571 return ret;
572}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700573EXPORT_SYMBOL(sync_fence_cancel_async);
Erik Gillingc80114f2012-05-15 16:23:26 -0700574
Erik Gilling46322b32012-10-11 12:35:22 -0700575static bool sync_fence_check(struct sync_fence *fence)
576{
577 /*
578 * Make sure that reads to fence->status are ordered with the
579 * wait queue event triggering
580 */
581 smp_rmb();
582 return fence->status != 0;
583}
584
Erik Gilling010accf2012-03-13 15:34:34 -0700585int sync_fence_wait(struct sync_fence *fence, long timeout)
586{
Erik Gilling85bb5252012-08-21 17:57:19 -0700587 int err = 0;
Erik Gilling06f70142012-10-16 16:14:48 -0700588 struct sync_pt *pt;
589
590 trace_sync_wait(fence, 1);
591 list_for_each_entry(pt, &fence->pt_list_head, pt_list)
592 trace_sync_pt(pt);
Erik Gilling010accf2012-03-13 15:34:34 -0700593
Erik Gilling85bb5252012-08-21 17:57:19 -0700594 if (timeout > 0) {
Erik Gilling010accf2012-03-13 15:34:34 -0700595 timeout = msecs_to_jiffies(timeout);
596 err = wait_event_interruptible_timeout(fence->wq,
Erik Gilling46322b32012-10-11 12:35:22 -0700597 sync_fence_check(fence),
Erik Gilling010accf2012-03-13 15:34:34 -0700598 timeout);
Erik Gilling85bb5252012-08-21 17:57:19 -0700599 } else if (timeout < 0) {
Erik Gillingc23f3812012-10-15 17:58:46 -0700600 err = wait_event_interruptible(fence->wq,
601 sync_fence_check(fence));
Erik Gilling010accf2012-03-13 15:34:34 -0700602 }
Erik Gilling06f70142012-10-16 16:14:48 -0700603 trace_sync_wait(fence, 0);
Erik Gilling010accf2012-03-13 15:34:34 -0700604
605 if (err < 0)
606 return err;
607
Erik Gillinga2bc03a2012-10-10 18:08:11 -0700608 if (fence->status < 0) {
609 pr_info("fence error %d on [%p]\n", fence->status, fence);
610 sync_dump();
Erik Gilling010accf2012-03-13 15:34:34 -0700611 return fence->status;
Erik Gillinga2bc03a2012-10-10 18:08:11 -0700612 }
Erik Gilling010accf2012-03-13 15:34:34 -0700613
Erik Gilling7154e872012-08-24 13:48:57 -0700614 if (fence->status == 0) {
Erik Gillinge3124322012-09-04 15:29:09 -0700615 pr_info("fence timeout on [%p] after %dms\n", fence,
616 jiffies_to_msecs(timeout));
Erik Gilling7154e872012-08-24 13:48:57 -0700617 sync_dump();
Erik Gilling010accf2012-03-13 15:34:34 -0700618 return -ETIME;
Erik Gilling7154e872012-08-24 13:48:57 -0700619 }
Erik Gilling010accf2012-03-13 15:34:34 -0700620
621 return 0;
622}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700623EXPORT_SYMBOL(sync_fence_wait);
Erik Gilling010accf2012-03-13 15:34:34 -0700624
Erik Gilling220411f2012-07-23 16:43:05 -0700625static void sync_fence_free(struct kref *kref)
626{
627 struct sync_fence *fence = container_of(kref, struct sync_fence, kref);
628
629 sync_fence_free_pts(fence);
630
631 kfree(fence);
632}
633
Erik Gilling010accf2012-03-13 15:34:34 -0700634static int sync_fence_release(struct inode *inode, struct file *file)
635{
636 struct sync_fence *fence = file->private_data;
Erik Gilling981c8a92012-03-14 19:49:15 -0700637 unsigned long flags;
Erik Gilling010accf2012-03-13 15:34:34 -0700638
Erik Gilling220411f2012-07-23 16:43:05 -0700639 /*
640 * We need to remove all ways to access this fence before droping
641 * our ref.
642 *
643 * start with its membership in the global fence list
644 */
Erik Gilling981c8a92012-03-14 19:49:15 -0700645 spin_lock_irqsave(&sync_fence_list_lock, flags);
646 list_del(&fence->sync_fence_list);
647 spin_unlock_irqrestore(&sync_fence_list_lock, flags);
648
Erik Gilling220411f2012-07-23 16:43:05 -0700649 /*
650 * remove its pts from their parents so that sync_timeline_signal()
651 * can't reference the fence.
652 */
653 sync_fence_detach_pts(fence);
Erik Gillingc6ed0cf2012-07-11 17:07:39 -0700654
Erik Gilling220411f2012-07-23 16:43:05 -0700655 kref_put(&fence->kref, sync_fence_free);
Erik Gilling010accf2012-03-13 15:34:34 -0700656
657 return 0;
658}
659
Erik Gillinga1eeaca2012-03-19 17:28:32 -0700660static unsigned int sync_fence_poll(struct file *file, poll_table *wait)
661{
662 struct sync_fence *fence = file->private_data;
663
664 poll_wait(file, &fence->wq, wait);
665
Erik Gilling46322b32012-10-11 12:35:22 -0700666 /*
667 * Make sure that reads to fence->status are ordered with the
668 * wait queue event triggering
669 */
670 smp_rmb();
671
Erik Gillinga1eeaca2012-03-19 17:28:32 -0700672 if (fence->status == 1)
673 return POLLIN;
674 else if (fence->status < 0)
675 return POLLERR;
676 else
677 return 0;
678}
679
Erik Gilling010accf2012-03-13 15:34:34 -0700680static long sync_fence_ioctl_wait(struct sync_fence *fence, unsigned long arg)
681{
Erik Gilling6517f0d2012-09-04 15:28:52 -0700682 __s32 value;
Erik Gilling010accf2012-03-13 15:34:34 -0700683
684 if (copy_from_user(&value, (void __user *)arg, sizeof(value)))
685 return -EFAULT;
686
687 return sync_fence_wait(fence, value);
688}
689
690static long sync_fence_ioctl_merge(struct sync_fence *fence, unsigned long arg)
691{
692 int fd = get_unused_fd();
693 int err;
694 struct sync_fence *fence2, *fence3;
695 struct sync_merge_data data;
696
Rebecca Schultz Zavin3d763662012-08-08 13:46:22 -0700697 if (fd < 0)
698 return fd;
699
700 if (copy_from_user(&data, (void __user *)arg, sizeof(data))) {
701 err = -EFAULT;
702 goto err_put_fd;
703 }
Erik Gilling010accf2012-03-13 15:34:34 -0700704
705 fence2 = sync_fence_fdget(data.fd2);
706 if (fence2 == NULL) {
707 err = -ENOENT;
708 goto err_put_fd;
709 }
710
711 data.name[sizeof(data.name) - 1] = '\0';
712 fence3 = sync_fence_merge(data.name, fence, fence2);
713 if (fence3 == NULL) {
714 err = -ENOMEM;
715 goto err_put_fence2;
716 }
717
718 data.fence = fd;
719 if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
720 err = -EFAULT;
721 goto err_put_fence3;
722 }
723
724 sync_fence_install(fence3, fd);
725 sync_fence_put(fence2);
726 return 0;
727
728err_put_fence3:
729 sync_fence_put(fence3);
730
731err_put_fence2:
732 sync_fence_put(fence2);
733
734err_put_fd:
735 put_unused_fd(fd);
736 return err;
737}
738
Erik Gilling3913bff2012-03-15 17:45:50 -0700739static int sync_fill_pt_info(struct sync_pt *pt, void *data, int size)
740{
741 struct sync_pt_info *info = data;
742 int ret;
743
744 if (size < sizeof(struct sync_pt_info))
745 return -ENOMEM;
746
747 info->len = sizeof(struct sync_pt_info);
748
749 if (pt->parent->ops->fill_driver_data) {
750 ret = pt->parent->ops->fill_driver_data(pt, info->driver_data,
751 size - sizeof(*info));
752 if (ret < 0)
753 return ret;
754
755 info->len += ret;
756 }
757
758 strlcpy(info->obj_name, pt->parent->name, sizeof(info->obj_name));
759 strlcpy(info->driver_name, pt->parent->ops->driver_name,
760 sizeof(info->driver_name));
761 info->status = pt->status;
762 info->timestamp_ns = ktime_to_ns(pt->timestamp);
763
764 return info->len;
765}
766
767static long sync_fence_ioctl_fence_info(struct sync_fence *fence,
768 unsigned long arg)
769{
770 struct sync_fence_info_data *data;
771 struct list_head *pos;
772 __u32 size;
773 __u32 len = 0;
774 int ret;
775
776 if (copy_from_user(&size, (void __user *)arg, sizeof(size)))
777 return -EFAULT;
778
779 if (size < sizeof(struct sync_fence_info_data))
780 return -EINVAL;
781
782 if (size > 4096)
783 size = 4096;
784
785 data = kzalloc(size, GFP_KERNEL);
786 if (data == NULL)
787 return -ENOMEM;
788
789 strlcpy(data->name, fence->name, sizeof(data->name));
790 data->status = fence->status;
791 len = sizeof(struct sync_fence_info_data);
792
793 list_for_each(pos, &fence->pt_list_head) {
794 struct sync_pt *pt =
795 container_of(pos, struct sync_pt, pt_list);
796
797 ret = sync_fill_pt_info(pt, (u8 *)data + len, size - len);
798
799 if (ret < 0)
800 goto out;
801
802 len += ret;
803 }
804
805 data->len = len;
806
807 if (copy_to_user((void __user *)arg, data, len))
808 ret = -EFAULT;
809 else
810 ret = 0;
811
812out:
813 kfree(data);
814
815 return ret;
816}
Erik Gilling010accf2012-03-13 15:34:34 -0700817
818static long sync_fence_ioctl(struct file *file, unsigned int cmd,
819 unsigned long arg)
820{
821 struct sync_fence *fence = file->private_data;
822 switch (cmd) {
823 case SYNC_IOC_WAIT:
824 return sync_fence_ioctl_wait(fence, arg);
825
826 case SYNC_IOC_MERGE:
827 return sync_fence_ioctl_merge(fence, arg);
Erik Gilling981c8a92012-03-14 19:49:15 -0700828
Erik Gilling3913bff2012-03-15 17:45:50 -0700829 case SYNC_IOC_FENCE_INFO:
830 return sync_fence_ioctl_fence_info(fence, arg);
831
Erik Gilling010accf2012-03-13 15:34:34 -0700832 default:
833 return -ENOTTY;
834 }
835}
836
Erik Gilling981c8a92012-03-14 19:49:15 -0700837#ifdef CONFIG_DEBUG_FS
838static const char *sync_status_str(int status)
839{
840 if (status > 0)
841 return "signaled";
842 else if (status == 0)
843 return "active";
844 else
845 return "error";
846}
847
848static void sync_print_pt(struct seq_file *s, struct sync_pt *pt, bool fence)
849{
850 int status = pt->status;
851 seq_printf(s, " %s%spt %s",
852 fence ? pt->parent->name : "",
853 fence ? "_" : "",
854 sync_status_str(status));
855 if (pt->status) {
856 struct timeval tv = ktime_to_timeval(pt->timestamp);
857 seq_printf(s, "@%ld.%06ld", tv.tv_sec, tv.tv_usec);
858 }
859
Erik Gilling43d22282012-10-16 15:16:55 -0700860 if (pt->parent->ops->timeline_value_str &&
861 pt->parent->ops->pt_value_str) {
862 char value[64];
863 pt->parent->ops->pt_value_str(pt, value, sizeof(value));
864 seq_printf(s, ": %s", value);
865 if (fence) {
866 pt->parent->ops->timeline_value_str(pt->parent, value,
867 sizeof(value));
868 seq_printf(s, " / %s", value);
869 }
870 } else if (pt->parent->ops->print_pt) {
Erik Gilling981c8a92012-03-14 19:49:15 -0700871 seq_printf(s, ": ");
872 pt->parent->ops->print_pt(s, pt);
873 }
874
875 seq_printf(s, "\n");
876}
877
878static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj)
879{
880 struct list_head *pos;
881 unsigned long flags;
882
883 seq_printf(s, "%s %s", obj->name, obj->ops->driver_name);
884
Erik Gilling43d22282012-10-16 15:16:55 -0700885 if (obj->ops->timeline_value_str) {
886 char value[64];
887 obj->ops->timeline_value_str(obj, value, sizeof(value));
888 seq_printf(s, ": %s", value);
889 } else if (obj->ops->print_obj) {
Erik Gilling981c8a92012-03-14 19:49:15 -0700890 seq_printf(s, ": ");
891 obj->ops->print_obj(s, obj);
892 }
893
894 seq_printf(s, "\n");
895
896 spin_lock_irqsave(&obj->child_list_lock, flags);
897 list_for_each(pos, &obj->child_list_head) {
898 struct sync_pt *pt =
899 container_of(pos, struct sync_pt, child_list);
900 sync_print_pt(s, pt, false);
901 }
902 spin_unlock_irqrestore(&obj->child_list_lock, flags);
903}
904
905static void sync_print_fence(struct seq_file *s, struct sync_fence *fence)
906{
907 struct list_head *pos;
908 unsigned long flags;
909
Erik Gillinge3124322012-09-04 15:29:09 -0700910 seq_printf(s, "[%p] %s: %s\n", fence, fence->name,
911 sync_status_str(fence->status));
Erik Gilling981c8a92012-03-14 19:49:15 -0700912
913 list_for_each(pos, &fence->pt_list_head) {
914 struct sync_pt *pt =
915 container_of(pos, struct sync_pt, pt_list);
916 sync_print_pt(s, pt, true);
917 }
918
919 spin_lock_irqsave(&fence->waiter_list_lock, flags);
920 list_for_each(pos, &fence->waiter_list_head) {
921 struct sync_fence_waiter *waiter =
922 container_of(pos, struct sync_fence_waiter,
923 waiter_list);
924
Erik Gillingc80114f2012-05-15 16:23:26 -0700925 seq_printf(s, "waiter %pF\n", waiter->callback);
Erik Gilling981c8a92012-03-14 19:49:15 -0700926 }
927 spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
928}
929
930static int sync_debugfs_show(struct seq_file *s, void *unused)
931{
932 unsigned long flags;
933 struct list_head *pos;
934
935 seq_printf(s, "objs:\n--------------\n");
936
937 spin_lock_irqsave(&sync_timeline_list_lock, flags);
938 list_for_each(pos, &sync_timeline_list_head) {
939 struct sync_timeline *obj =
940 container_of(pos, struct sync_timeline,
941 sync_timeline_list);
942
943 sync_print_obj(s, obj);
944 seq_printf(s, "\n");
945 }
946 spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
947
948 seq_printf(s, "fences:\n--------------\n");
949
950 spin_lock_irqsave(&sync_fence_list_lock, flags);
951 list_for_each(pos, &sync_fence_list_head) {
952 struct sync_fence *fence =
953 container_of(pos, struct sync_fence, sync_fence_list);
954
955 sync_print_fence(s, fence);
956 seq_printf(s, "\n");
957 }
958 spin_unlock_irqrestore(&sync_fence_list_lock, flags);
959 return 0;
960}
961
962static int sync_debugfs_open(struct inode *inode, struct file *file)
963{
964 return single_open(file, sync_debugfs_show, inode->i_private);
965}
966
967static const struct file_operations sync_debugfs_fops = {
968 .open = sync_debugfs_open,
969 .read = seq_read,
970 .llseek = seq_lseek,
971 .release = single_release,
972};
973
974static __init int sync_debugfs_init(void)
975{
976 debugfs_create_file("sync", S_IRUGO, NULL, NULL, &sync_debugfs_fops);
977 return 0;
978}
Erik Gilling981c8a92012-03-14 19:49:15 -0700979late_initcall(sync_debugfs_init);
980
Erik Gilling7154e872012-08-24 13:48:57 -0700981#define DUMP_CHUNK 256
982static char sync_dump_buf[64 * 1024];
983void sync_dump(void)
984{
985 struct seq_file s = {
986 .buf = sync_dump_buf,
987 .size = sizeof(sync_dump_buf) - 1,
988 };
989 int i;
990
991 sync_debugfs_show(&s, NULL);
992
993 for (i = 0; i < s.count; i += DUMP_CHUNK) {
994 if ((s.count - i) > DUMP_CHUNK) {
995 char c = s.buf[i + DUMP_CHUNK];
996 s.buf[i + DUMP_CHUNK] = 0;
997 pr_cont("%s", s.buf + i);
998 s.buf[i + DUMP_CHUNK] = c;
999 } else {
1000 s.buf[s.count] = 0;
1001 pr_cont("%s", s.buf + i);
1002 }
1003 }
1004}
1005#else
1006static void sync_dump(void)
1007{
1008}
Erik Gilling981c8a92012-03-14 19:49:15 -07001009#endif