blob: d03cc628b8caae2addb029f695c2422ecfe6939c [file] [log] [blame]
Erik Gilling010accf2012-03-13 15:34:34 -07001/*
2 * drivers/base/sync.c
3 *
4 * Copyright (C) 2012 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16
Erik Gilling981c8a92012-03-14 19:49:15 -070017#include <linux/debugfs.h>
Erik Gilling4fb837a2012-05-16 13:09:22 -070018#include <linux/export.h>
Erik Gilling010accf2012-03-13 15:34:34 -070019#include <linux/file.h>
20#include <linux/fs.h>
21#include <linux/kernel.h>
Erik Gillinga1eeaca2012-03-19 17:28:32 -070022#include <linux/poll.h>
Erik Gilling010accf2012-03-13 15:34:34 -070023#include <linux/sched.h>
Erik Gilling981c8a92012-03-14 19:49:15 -070024#include <linux/seq_file.h>
Erik Gilling010accf2012-03-13 15:34:34 -070025#include <linux/slab.h>
26#include <linux/sync.h>
27#include <linux/uaccess.h>
28
29#include <linux/anon_inodes.h>
30
31static void sync_fence_signal_pt(struct sync_pt *pt);
32static int _sync_pt_has_signaled(struct sync_pt *pt);
33
Erik Gilling981c8a92012-03-14 19:49:15 -070034static LIST_HEAD(sync_timeline_list_head);
35static DEFINE_SPINLOCK(sync_timeline_list_lock);
36
37static LIST_HEAD(sync_fence_list_head);
38static DEFINE_SPINLOCK(sync_fence_list_lock);
39
Erik Gilling010accf2012-03-13 15:34:34 -070040struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops,
41 int size, const char *name)
42{
43 struct sync_timeline *obj;
Erik Gilling981c8a92012-03-14 19:49:15 -070044 unsigned long flags;
Erik Gilling010accf2012-03-13 15:34:34 -070045
46 if (size < sizeof(struct sync_timeline))
47 return NULL;
48
49 obj = kzalloc(size, GFP_KERNEL);
50 if (obj == NULL)
51 return NULL;
52
53 obj->ops = ops;
54 strlcpy(obj->name, name, sizeof(obj->name));
55
56 INIT_LIST_HEAD(&obj->child_list_head);
57 spin_lock_init(&obj->child_list_lock);
58
59 INIT_LIST_HEAD(&obj->active_list_head);
60 spin_lock_init(&obj->active_list_lock);
61
Erik Gilling981c8a92012-03-14 19:49:15 -070062 spin_lock_irqsave(&sync_timeline_list_lock, flags);
63 list_add_tail(&obj->sync_timeline_list, &sync_timeline_list_head);
64 spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
65
Erik Gilling010accf2012-03-13 15:34:34 -070066 return obj;
67}
Erik Gilling4fb837a2012-05-16 13:09:22 -070068EXPORT_SYMBOL(sync_timeline_create);
Erik Gilling010accf2012-03-13 15:34:34 -070069
Erik Gilling981c8a92012-03-14 19:49:15 -070070static void sync_timeline_free(struct sync_timeline *obj)
71{
72 unsigned long flags;
73
74 if (obj->ops->release_obj)
75 obj->ops->release_obj(obj);
76
77 spin_lock_irqsave(&sync_timeline_list_lock, flags);
78 list_del(&obj->sync_timeline_list);
79 spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
80
81 kfree(obj);
82}
83
Erik Gilling010accf2012-03-13 15:34:34 -070084void sync_timeline_destroy(struct sync_timeline *obj)
85{
86 unsigned long flags;
87 bool needs_freeing;
88
89 spin_lock_irqsave(&obj->child_list_lock, flags);
90 obj->destroyed = true;
91 needs_freeing = list_empty(&obj->child_list_head);
92 spin_unlock_irqrestore(&obj->child_list_lock, flags);
93
94 if (needs_freeing)
Erik Gilling981c8a92012-03-14 19:49:15 -070095 sync_timeline_free(obj);
Erik Gilling010accf2012-03-13 15:34:34 -070096 else
97 sync_timeline_signal(obj);
98}
Erik Gilling4fb837a2012-05-16 13:09:22 -070099EXPORT_SYMBOL(sync_timeline_destroy);
Erik Gilling010accf2012-03-13 15:34:34 -0700100
101static void sync_timeline_add_pt(struct sync_timeline *obj, struct sync_pt *pt)
102{
103 unsigned long flags;
104
105 pt->parent = obj;
106
107 spin_lock_irqsave(&obj->child_list_lock, flags);
108 list_add_tail(&pt->child_list, &obj->child_list_head);
109 spin_unlock_irqrestore(&obj->child_list_lock, flags);
110}
111
112static void sync_timeline_remove_pt(struct sync_pt *pt)
113{
114 struct sync_timeline *obj = pt->parent;
115 unsigned long flags;
116 bool needs_freeing;
117
118 spin_lock_irqsave(&obj->active_list_lock, flags);
119 if (!list_empty(&pt->active_list))
120 list_del_init(&pt->active_list);
121 spin_unlock_irqrestore(&obj->active_list_lock, flags);
122
123 spin_lock_irqsave(&obj->child_list_lock, flags);
124 list_del(&pt->child_list);
125 needs_freeing = obj->destroyed && list_empty(&obj->child_list_head);
126 spin_unlock_irqrestore(&obj->child_list_lock, flags);
127
128 if (needs_freeing)
Erik Gilling981c8a92012-03-14 19:49:15 -0700129 sync_timeline_free(obj);
Erik Gilling010accf2012-03-13 15:34:34 -0700130}
131
132void sync_timeline_signal(struct sync_timeline *obj)
133{
134 unsigned long flags;
135 LIST_HEAD(signaled_pts);
136 struct list_head *pos, *n;
137
138 spin_lock_irqsave(&obj->active_list_lock, flags);
139
140 list_for_each_safe(pos, n, &obj->active_list_head) {
141 struct sync_pt *pt =
142 container_of(pos, struct sync_pt, active_list);
143
144 if (_sync_pt_has_signaled(pt))
145 list_move(pos, &signaled_pts);
146 }
147
148 spin_unlock_irqrestore(&obj->active_list_lock, flags);
149
150 list_for_each_safe(pos, n, &signaled_pts) {
151 struct sync_pt *pt =
152 container_of(pos, struct sync_pt, active_list);
153
154 list_del_init(pos);
155 sync_fence_signal_pt(pt);
156 }
157}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700158EXPORT_SYMBOL(sync_timeline_signal);
Erik Gilling010accf2012-03-13 15:34:34 -0700159
160struct sync_pt *sync_pt_create(struct sync_timeline *parent, int size)
161{
162 struct sync_pt *pt;
163
164 if (size < sizeof(struct sync_pt))
165 return NULL;
166
167 pt = kzalloc(size, GFP_KERNEL);
168 if (pt == NULL)
169 return NULL;
170
171 INIT_LIST_HEAD(&pt->active_list);
172 sync_timeline_add_pt(parent, pt);
173
174 return pt;
175}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700176EXPORT_SYMBOL(sync_pt_create);
Erik Gilling010accf2012-03-13 15:34:34 -0700177
178void sync_pt_free(struct sync_pt *pt)
179{
180 if (pt->parent->ops->free_pt)
181 pt->parent->ops->free_pt(pt);
182
183 sync_timeline_remove_pt(pt);
184
185 kfree(pt);
186}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700187EXPORT_SYMBOL(sync_pt_free);
Erik Gilling010accf2012-03-13 15:34:34 -0700188
189/* call with pt->parent->active_list_lock held */
190static int _sync_pt_has_signaled(struct sync_pt *pt)
191{
Erik Gillingad433ba2012-03-15 14:59:33 -0700192 int old_status = pt->status;
193
Erik Gilling010accf2012-03-13 15:34:34 -0700194 if (!pt->status)
195 pt->status = pt->parent->ops->has_signaled(pt);
196
197 if (!pt->status && pt->parent->destroyed)
198 pt->status = -ENOENT;
199
Erik Gillingad433ba2012-03-15 14:59:33 -0700200 if (pt->status != old_status)
201 pt->timestamp = ktime_get();
202
Erik Gilling010accf2012-03-13 15:34:34 -0700203 return pt->status;
204}
205
206static struct sync_pt *sync_pt_dup(struct sync_pt *pt)
207{
208 return pt->parent->ops->dup(pt);
209}
210
211/* Adds a sync pt to the active queue. Called when added to a fence */
212static void sync_pt_activate(struct sync_pt *pt)
213{
214 struct sync_timeline *obj = pt->parent;
215 unsigned long flags;
216 int err;
217
218 spin_lock_irqsave(&obj->active_list_lock, flags);
219
220 err = _sync_pt_has_signaled(pt);
Jeff Boodyc9858b72012-08-17 12:59:08 -0600221 if (err != 0) {
222 sync_fence_signal_pt(pt);
Erik Gilling010accf2012-03-13 15:34:34 -0700223 goto out;
Jeff Boodyc9858b72012-08-17 12:59:08 -0600224 }
Erik Gilling010accf2012-03-13 15:34:34 -0700225
226 list_add_tail(&pt->active_list, &obj->active_list_head);
227
228out:
229 spin_unlock_irqrestore(&obj->active_list_lock, flags);
230}
231
232static int sync_fence_release(struct inode *inode, struct file *file);
Erik Gillinga1eeaca2012-03-19 17:28:32 -0700233static unsigned int sync_fence_poll(struct file *file, poll_table *wait);
Erik Gilling010accf2012-03-13 15:34:34 -0700234static long sync_fence_ioctl(struct file *file, unsigned int cmd,
235 unsigned long arg);
236
237
238static const struct file_operations sync_fence_fops = {
239 .release = sync_fence_release,
Erik Gillinga1eeaca2012-03-19 17:28:32 -0700240 .poll = sync_fence_poll,
Erik Gilling010accf2012-03-13 15:34:34 -0700241 .unlocked_ioctl = sync_fence_ioctl,
242};
243
244static struct sync_fence *sync_fence_alloc(const char *name)
245{
246 struct sync_fence *fence;
Erik Gilling981c8a92012-03-14 19:49:15 -0700247 unsigned long flags;
Erik Gilling010accf2012-03-13 15:34:34 -0700248
249 fence = kzalloc(sizeof(struct sync_fence), GFP_KERNEL);
250 if (fence == NULL)
251 return NULL;
252
253 fence->file = anon_inode_getfile("sync_fence", &sync_fence_fops,
254 fence, 0);
255 if (fence->file == NULL)
256 goto err;
257
258 strlcpy(fence->name, name, sizeof(fence->name));
259
260 INIT_LIST_HEAD(&fence->pt_list_head);
261 INIT_LIST_HEAD(&fence->waiter_list_head);
262 spin_lock_init(&fence->waiter_list_lock);
263
264 init_waitqueue_head(&fence->wq);
Erik Gilling981c8a92012-03-14 19:49:15 -0700265
266 spin_lock_irqsave(&sync_fence_list_lock, flags);
267 list_add_tail(&fence->sync_fence_list, &sync_fence_list_head);
268 spin_unlock_irqrestore(&sync_fence_list_lock, flags);
269
Erik Gilling010accf2012-03-13 15:34:34 -0700270 return fence;
271
272err:
273 kfree(fence);
274 return NULL;
275}
276
277/* TODO: implement a create which takes more that one sync_pt */
278struct sync_fence *sync_fence_create(const char *name, struct sync_pt *pt)
279{
280 struct sync_fence *fence;
281
282 if (pt->fence)
283 return NULL;
284
285 fence = sync_fence_alloc(name);
286 if (fence == NULL)
287 return NULL;
288
289 pt->fence = fence;
290 list_add(&pt->pt_list, &fence->pt_list_head);
291 sync_pt_activate(pt);
292
293 return fence;
294}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700295EXPORT_SYMBOL(sync_fence_create);
Erik Gilling010accf2012-03-13 15:34:34 -0700296
297static int sync_fence_copy_pts(struct sync_fence *dst, struct sync_fence *src)
298{
299 struct list_head *pos;
300
301 list_for_each(pos, &src->pt_list_head) {
302 struct sync_pt *orig_pt =
303 container_of(pos, struct sync_pt, pt_list);
304 struct sync_pt *new_pt = sync_pt_dup(orig_pt);
305
306 if (new_pt == NULL)
307 return -ENOMEM;
308
309 new_pt->fence = dst;
310 list_add(&new_pt->pt_list, &dst->pt_list_head);
311 sync_pt_activate(new_pt);
312 }
313
314 return 0;
315}
316
Erik Gilling9d8ca8a2012-07-11 17:13:50 -0700317static int sync_fence_merge_pts(struct sync_fence *dst, struct sync_fence *src)
318{
319 struct list_head *src_pos, *dst_pos, *n;
320
321 list_for_each(src_pos, &src->pt_list_head) {
322 struct sync_pt *src_pt =
323 container_of(src_pos, struct sync_pt, pt_list);
324 bool collapsed = false;
325
326 list_for_each_safe(dst_pos, n, &dst->pt_list_head) {
327 struct sync_pt *dst_pt =
328 container_of(dst_pos, struct sync_pt, pt_list);
329 /* collapse two sync_pts on the same timeline
330 * to a single sync_pt that will signal at
331 * the later of the two
332 */
333 if (dst_pt->parent == src_pt->parent) {
334 if (dst_pt->parent->ops->compare(dst_pt, src_pt) == -1) {
335 struct sync_pt *new_pt =
336 sync_pt_dup(src_pt);
337 if (new_pt == NULL)
338 return -ENOMEM;
339
340 new_pt->fence = dst;
341 list_replace(&dst_pt->pt_list,
342 &new_pt->pt_list);
343 sync_pt_activate(new_pt);
344 sync_pt_free(dst_pt);
345 }
346 collapsed = true;
347 break;
348 }
349 }
350
351 if (!collapsed) {
352 struct sync_pt *new_pt = sync_pt_dup(src_pt);
353
354 if (new_pt == NULL)
355 return -ENOMEM;
356
357 new_pt->fence = dst;
358 list_add(&new_pt->pt_list, &dst->pt_list_head);
359 sync_pt_activate(new_pt);
360 }
361 }
362
363 return 0;
364}
365
Erik Gilling010accf2012-03-13 15:34:34 -0700366static void sync_fence_free_pts(struct sync_fence *fence)
367{
368 struct list_head *pos, *n;
369
370 list_for_each_safe(pos, n, &fence->pt_list_head) {
371 struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
372 sync_pt_free(pt);
373 }
374}
375
376struct sync_fence *sync_fence_fdget(int fd)
377{
378 struct file *file = fget(fd);
379
380 if (file == NULL)
381 return NULL;
382
383 if (file->f_op != &sync_fence_fops)
384 goto err;
385
386 return file->private_data;
387
388err:
389 fput(file);
390 return NULL;
391}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700392EXPORT_SYMBOL(sync_fence_fdget);
Erik Gilling010accf2012-03-13 15:34:34 -0700393
394void sync_fence_put(struct sync_fence *fence)
395{
396 fput(fence->file);
397}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700398EXPORT_SYMBOL(sync_fence_put);
Erik Gilling010accf2012-03-13 15:34:34 -0700399
400void sync_fence_install(struct sync_fence *fence, int fd)
401{
402 fd_install(fd, fence->file);
403}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700404EXPORT_SYMBOL(sync_fence_install);
Erik Gilling010accf2012-03-13 15:34:34 -0700405
406static int sync_fence_get_status(struct sync_fence *fence)
407{
408 struct list_head *pos;
409 int status = 1;
410
411 list_for_each(pos, &fence->pt_list_head) {
412 struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
413 int pt_status = pt->status;
414
415 if (pt_status < 0) {
416 status = pt_status;
417 break;
418 } else if (status == 1) {
419 status = pt_status;
420 }
421 }
422
423 return status;
424}
425
426struct sync_fence *sync_fence_merge(const char *name,
427 struct sync_fence *a, struct sync_fence *b)
428{
429 struct sync_fence *fence;
430 int err;
431
432 fence = sync_fence_alloc(name);
433 if (fence == NULL)
434 return NULL;
435
436 err = sync_fence_copy_pts(fence, a);
437 if (err < 0)
438 goto err;
439
Erik Gilling9d8ca8a2012-07-11 17:13:50 -0700440 err = sync_fence_merge_pts(fence, b);
Erik Gilling010accf2012-03-13 15:34:34 -0700441 if (err < 0)
442 goto err;
443
444 fence->status = sync_fence_get_status(fence);
445
446 return fence;
447err:
448 sync_fence_free_pts(fence);
449 kfree(fence);
450 return NULL;
451}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700452EXPORT_SYMBOL(sync_fence_merge);
Erik Gilling010accf2012-03-13 15:34:34 -0700453
454static void sync_fence_signal_pt(struct sync_pt *pt)
455{
456 LIST_HEAD(signaled_waiters);
457 struct sync_fence *fence = pt->fence;
458 struct list_head *pos;
459 struct list_head *n;
460 unsigned long flags;
461 int status;
462
463 status = sync_fence_get_status(fence);
464
465 spin_lock_irqsave(&fence->waiter_list_lock, flags);
466 /*
467 * this should protect against two threads racing on the signaled
468 * false -> true transition
469 */
470 if (status && !fence->status) {
471 list_for_each_safe(pos, n, &fence->waiter_list_head)
472 list_move(pos, &signaled_waiters);
473
474 fence->status = status;
475 } else {
476 status = 0;
477 }
478 spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
479
480 if (status) {
481 list_for_each_safe(pos, n, &signaled_waiters) {
482 struct sync_fence_waiter *waiter =
483 container_of(pos, struct sync_fence_waiter,
484 waiter_list);
485
Erik Gilling010accf2012-03-13 15:34:34 -0700486 list_del(pos);
Erik Gillingc80114f2012-05-15 16:23:26 -0700487 waiter->callback(fence, waiter);
Erik Gilling010accf2012-03-13 15:34:34 -0700488 }
489 wake_up(&fence->wq);
490 }
491}
492
493int sync_fence_wait_async(struct sync_fence *fence,
Erik Gillingc80114f2012-05-15 16:23:26 -0700494 struct sync_fence_waiter *waiter)
Erik Gilling010accf2012-03-13 15:34:34 -0700495{
Erik Gilling010accf2012-03-13 15:34:34 -0700496 unsigned long flags;
497 int err = 0;
498
Erik Gilling010accf2012-03-13 15:34:34 -0700499 spin_lock_irqsave(&fence->waiter_list_lock, flags);
500
501 if (fence->status) {
Erik Gilling010accf2012-03-13 15:34:34 -0700502 err = fence->status;
503 goto out;
504 }
505
506 list_add_tail(&waiter->waiter_list, &fence->waiter_list_head);
507out:
508 spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
509
510 return err;
511}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700512EXPORT_SYMBOL(sync_fence_wait_async);
Erik Gilling010accf2012-03-13 15:34:34 -0700513
Erik Gillingc80114f2012-05-15 16:23:26 -0700514int sync_fence_cancel_async(struct sync_fence *fence,
515 struct sync_fence_waiter *waiter)
516{
517 struct list_head *pos;
518 struct list_head *n;
519 unsigned long flags;
520 int ret = -ENOENT;
521
522 spin_lock_irqsave(&fence->waiter_list_lock, flags);
523 /*
524 * Make sure waiter is still in waiter_list because it is possible for
525 * the waiter to be removed from the list while the callback is still
526 * pending.
527 */
528 list_for_each_safe(pos, n, &fence->waiter_list_head) {
529 struct sync_fence_waiter *list_waiter =
530 container_of(pos, struct sync_fence_waiter,
531 waiter_list);
532 if (list_waiter == waiter) {
533 list_del(pos);
534 ret = 0;
535 break;
536 }
537 }
538 spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
539 return ret;
540}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700541EXPORT_SYMBOL(sync_fence_cancel_async);
Erik Gillingc80114f2012-05-15 16:23:26 -0700542
Erik Gilling010accf2012-03-13 15:34:34 -0700543int sync_fence_wait(struct sync_fence *fence, long timeout)
544{
545 int err;
546
547 if (timeout) {
548 timeout = msecs_to_jiffies(timeout);
549 err = wait_event_interruptible_timeout(fence->wq,
550 fence->status != 0,
551 timeout);
552 } else {
553 err = wait_event_interruptible(fence->wq, fence->status != 0);
554 }
555
556 if (err < 0)
557 return err;
558
559 if (fence->status < 0)
560 return fence->status;
561
562 if (fence->status == 0)
563 return -ETIME;
564
565 return 0;
566}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700567EXPORT_SYMBOL(sync_fence_wait);
Erik Gilling010accf2012-03-13 15:34:34 -0700568
569static int sync_fence_release(struct inode *inode, struct file *file)
570{
571 struct sync_fence *fence = file->private_data;
Erik Gilling981c8a92012-03-14 19:49:15 -0700572 unsigned long flags;
Erik Gilling010accf2012-03-13 15:34:34 -0700573
Erik Gilling981c8a92012-03-14 19:49:15 -0700574 spin_lock_irqsave(&sync_fence_list_lock, flags);
575 list_del(&fence->sync_fence_list);
576 spin_unlock_irqrestore(&sync_fence_list_lock, flags);
577
Erik Gillingc6ed0cf2012-07-11 17:07:39 -0700578 sync_fence_free_pts(fence);
579
Erik Gilling010accf2012-03-13 15:34:34 -0700580 kfree(fence);
581
582 return 0;
583}
584
Erik Gillinga1eeaca2012-03-19 17:28:32 -0700585static unsigned int sync_fence_poll(struct file *file, poll_table *wait)
586{
587 struct sync_fence *fence = file->private_data;
588
589 poll_wait(file, &fence->wq, wait);
590
591 if (fence->status == 1)
592 return POLLIN;
593 else if (fence->status < 0)
594 return POLLERR;
595 else
596 return 0;
597}
598
Erik Gilling010accf2012-03-13 15:34:34 -0700599static long sync_fence_ioctl_wait(struct sync_fence *fence, unsigned long arg)
600{
601 __u32 value;
602
603 if (copy_from_user(&value, (void __user *)arg, sizeof(value)))
604 return -EFAULT;
605
606 return sync_fence_wait(fence, value);
607}
608
609static long sync_fence_ioctl_merge(struct sync_fence *fence, unsigned long arg)
610{
611 int fd = get_unused_fd();
612 int err;
613 struct sync_fence *fence2, *fence3;
614 struct sync_merge_data data;
615
616 if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
617 return -EFAULT;
618
619 fence2 = sync_fence_fdget(data.fd2);
620 if (fence2 == NULL) {
621 err = -ENOENT;
622 goto err_put_fd;
623 }
624
625 data.name[sizeof(data.name) - 1] = '\0';
626 fence3 = sync_fence_merge(data.name, fence, fence2);
627 if (fence3 == NULL) {
628 err = -ENOMEM;
629 goto err_put_fence2;
630 }
631
632 data.fence = fd;
633 if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
634 err = -EFAULT;
635 goto err_put_fence3;
636 }
637
638 sync_fence_install(fence3, fd);
639 sync_fence_put(fence2);
640 return 0;
641
642err_put_fence3:
643 sync_fence_put(fence3);
644
645err_put_fence2:
646 sync_fence_put(fence2);
647
648err_put_fd:
649 put_unused_fd(fd);
650 return err;
651}
652
Erik Gilling3913bff2012-03-15 17:45:50 -0700653static int sync_fill_pt_info(struct sync_pt *pt, void *data, int size)
654{
655 struct sync_pt_info *info = data;
656 int ret;
657
658 if (size < sizeof(struct sync_pt_info))
659 return -ENOMEM;
660
661 info->len = sizeof(struct sync_pt_info);
662
663 if (pt->parent->ops->fill_driver_data) {
664 ret = pt->parent->ops->fill_driver_data(pt, info->driver_data,
665 size - sizeof(*info));
666 if (ret < 0)
667 return ret;
668
669 info->len += ret;
670 }
671
672 strlcpy(info->obj_name, pt->parent->name, sizeof(info->obj_name));
673 strlcpy(info->driver_name, pt->parent->ops->driver_name,
674 sizeof(info->driver_name));
675 info->status = pt->status;
676 info->timestamp_ns = ktime_to_ns(pt->timestamp);
677
678 return info->len;
679}
680
681static long sync_fence_ioctl_fence_info(struct sync_fence *fence,
682 unsigned long arg)
683{
684 struct sync_fence_info_data *data;
685 struct list_head *pos;
686 __u32 size;
687 __u32 len = 0;
688 int ret;
689
690 if (copy_from_user(&size, (void __user *)arg, sizeof(size)))
691 return -EFAULT;
692
693 if (size < sizeof(struct sync_fence_info_data))
694 return -EINVAL;
695
696 if (size > 4096)
697 size = 4096;
698
699 data = kzalloc(size, GFP_KERNEL);
700 if (data == NULL)
701 return -ENOMEM;
702
703 strlcpy(data->name, fence->name, sizeof(data->name));
704 data->status = fence->status;
705 len = sizeof(struct sync_fence_info_data);
706
707 list_for_each(pos, &fence->pt_list_head) {
708 struct sync_pt *pt =
709 container_of(pos, struct sync_pt, pt_list);
710
711 ret = sync_fill_pt_info(pt, (u8 *)data + len, size - len);
712
713 if (ret < 0)
714 goto out;
715
716 len += ret;
717 }
718
719 data->len = len;
720
721 if (copy_to_user((void __user *)arg, data, len))
722 ret = -EFAULT;
723 else
724 ret = 0;
725
726out:
727 kfree(data);
728
729 return ret;
730}
Erik Gilling010accf2012-03-13 15:34:34 -0700731
732static long sync_fence_ioctl(struct file *file, unsigned int cmd,
733 unsigned long arg)
734{
735 struct sync_fence *fence = file->private_data;
736 switch (cmd) {
737 case SYNC_IOC_WAIT:
738 return sync_fence_ioctl_wait(fence, arg);
739
740 case SYNC_IOC_MERGE:
741 return sync_fence_ioctl_merge(fence, arg);
Erik Gilling981c8a92012-03-14 19:49:15 -0700742
Erik Gilling3913bff2012-03-15 17:45:50 -0700743 case SYNC_IOC_FENCE_INFO:
744 return sync_fence_ioctl_fence_info(fence, arg);
745
Erik Gilling010accf2012-03-13 15:34:34 -0700746 default:
747 return -ENOTTY;
748 }
749}
750
Erik Gilling981c8a92012-03-14 19:49:15 -0700751#ifdef CONFIG_DEBUG_FS
752static const char *sync_status_str(int status)
753{
754 if (status > 0)
755 return "signaled";
756 else if (status == 0)
757 return "active";
758 else
759 return "error";
760}
761
762static void sync_print_pt(struct seq_file *s, struct sync_pt *pt, bool fence)
763{
764 int status = pt->status;
765 seq_printf(s, " %s%spt %s",
766 fence ? pt->parent->name : "",
767 fence ? "_" : "",
768 sync_status_str(status));
769 if (pt->status) {
770 struct timeval tv = ktime_to_timeval(pt->timestamp);
771 seq_printf(s, "@%ld.%06ld", tv.tv_sec, tv.tv_usec);
772 }
773
774 if (pt->parent->ops->print_pt) {
775 seq_printf(s, ": ");
776 pt->parent->ops->print_pt(s, pt);
777 }
778
779 seq_printf(s, "\n");
780}
781
782static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj)
783{
784 struct list_head *pos;
785 unsigned long flags;
786
787 seq_printf(s, "%s %s", obj->name, obj->ops->driver_name);
788
789 if (obj->ops->print_obj) {
790 seq_printf(s, ": ");
791 obj->ops->print_obj(s, obj);
792 }
793
794 seq_printf(s, "\n");
795
796 spin_lock_irqsave(&obj->child_list_lock, flags);
797 list_for_each(pos, &obj->child_list_head) {
798 struct sync_pt *pt =
799 container_of(pos, struct sync_pt, child_list);
800 sync_print_pt(s, pt, false);
801 }
802 spin_unlock_irqrestore(&obj->child_list_lock, flags);
803}
804
805static void sync_print_fence(struct seq_file *s, struct sync_fence *fence)
806{
807 struct list_head *pos;
808 unsigned long flags;
809
810 seq_printf(s, "%s: %s\n", fence->name, sync_status_str(fence->status));
811
812 list_for_each(pos, &fence->pt_list_head) {
813 struct sync_pt *pt =
814 container_of(pos, struct sync_pt, pt_list);
815 sync_print_pt(s, pt, true);
816 }
817
818 spin_lock_irqsave(&fence->waiter_list_lock, flags);
819 list_for_each(pos, &fence->waiter_list_head) {
820 struct sync_fence_waiter *waiter =
821 container_of(pos, struct sync_fence_waiter,
822 waiter_list);
823
Erik Gillingc80114f2012-05-15 16:23:26 -0700824 seq_printf(s, "waiter %pF\n", waiter->callback);
Erik Gilling981c8a92012-03-14 19:49:15 -0700825 }
826 spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
827}
828
829static int sync_debugfs_show(struct seq_file *s, void *unused)
830{
831 unsigned long flags;
832 struct list_head *pos;
833
834 seq_printf(s, "objs:\n--------------\n");
835
836 spin_lock_irqsave(&sync_timeline_list_lock, flags);
837 list_for_each(pos, &sync_timeline_list_head) {
838 struct sync_timeline *obj =
839 container_of(pos, struct sync_timeline,
840 sync_timeline_list);
841
842 sync_print_obj(s, obj);
843 seq_printf(s, "\n");
844 }
845 spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
846
847 seq_printf(s, "fences:\n--------------\n");
848
849 spin_lock_irqsave(&sync_fence_list_lock, flags);
850 list_for_each(pos, &sync_fence_list_head) {
851 struct sync_fence *fence =
852 container_of(pos, struct sync_fence, sync_fence_list);
853
854 sync_print_fence(s, fence);
855 seq_printf(s, "\n");
856 }
857 spin_unlock_irqrestore(&sync_fence_list_lock, flags);
858 return 0;
859}
860
861static int sync_debugfs_open(struct inode *inode, struct file *file)
862{
863 return single_open(file, sync_debugfs_show, inode->i_private);
864}
865
866static const struct file_operations sync_debugfs_fops = {
867 .open = sync_debugfs_open,
868 .read = seq_read,
869 .llseek = seq_lseek,
870 .release = single_release,
871};
872
873static __init int sync_debugfs_init(void)
874{
875 debugfs_create_file("sync", S_IRUGO, NULL, NULL, &sync_debugfs_fops);
876 return 0;
877}
878
879late_initcall(sync_debugfs_init);
880
881#endif