blob: c0690c8b6a3ce7caf32ac719b6e39d44a99e1096 [file] [log] [blame]
Erik Gilling010accf2012-03-13 15:34:34 -07001/*
2 * drivers/base/sync.c
3 *
4 * Copyright (C) 2012 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16
Erik Gilling981c8a92012-03-14 19:49:15 -070017#include <linux/debugfs.h>
Erik Gilling4fb837a2012-05-16 13:09:22 -070018#include <linux/export.h>
Erik Gilling010accf2012-03-13 15:34:34 -070019#include <linux/file.h>
20#include <linux/fs.h>
21#include <linux/kernel.h>
Erik Gillinga1eeaca2012-03-19 17:28:32 -070022#include <linux/poll.h>
Erik Gilling010accf2012-03-13 15:34:34 -070023#include <linux/sched.h>
Erik Gilling981c8a92012-03-14 19:49:15 -070024#include <linux/seq_file.h>
Erik Gilling010accf2012-03-13 15:34:34 -070025#include <linux/slab.h>
26#include <linux/sync.h>
27#include <linux/uaccess.h>
28
29#include <linux/anon_inodes.h>
30
31static void sync_fence_signal_pt(struct sync_pt *pt);
32static int _sync_pt_has_signaled(struct sync_pt *pt);
33
Erik Gilling981c8a92012-03-14 19:49:15 -070034static LIST_HEAD(sync_timeline_list_head);
35static DEFINE_SPINLOCK(sync_timeline_list_lock);
36
37static LIST_HEAD(sync_fence_list_head);
38static DEFINE_SPINLOCK(sync_fence_list_lock);
39
Erik Gilling010accf2012-03-13 15:34:34 -070040struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops,
41 int size, const char *name)
42{
43 struct sync_timeline *obj;
Erik Gilling981c8a92012-03-14 19:49:15 -070044 unsigned long flags;
Erik Gilling010accf2012-03-13 15:34:34 -070045
46 if (size < sizeof(struct sync_timeline))
47 return NULL;
48
49 obj = kzalloc(size, GFP_KERNEL);
50 if (obj == NULL)
51 return NULL;
52
53 obj->ops = ops;
54 strlcpy(obj->name, name, sizeof(obj->name));
55
56 INIT_LIST_HEAD(&obj->child_list_head);
57 spin_lock_init(&obj->child_list_lock);
58
59 INIT_LIST_HEAD(&obj->active_list_head);
60 spin_lock_init(&obj->active_list_lock);
61
Erik Gilling981c8a92012-03-14 19:49:15 -070062 spin_lock_irqsave(&sync_timeline_list_lock, flags);
63 list_add_tail(&obj->sync_timeline_list, &sync_timeline_list_head);
64 spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
65
Erik Gilling010accf2012-03-13 15:34:34 -070066 return obj;
67}
Erik Gilling4fb837a2012-05-16 13:09:22 -070068EXPORT_SYMBOL(sync_timeline_create);
Erik Gilling010accf2012-03-13 15:34:34 -070069
Erik Gilling981c8a92012-03-14 19:49:15 -070070static void sync_timeline_free(struct sync_timeline *obj)
71{
72 unsigned long flags;
73
74 if (obj->ops->release_obj)
75 obj->ops->release_obj(obj);
76
77 spin_lock_irqsave(&sync_timeline_list_lock, flags);
78 list_del(&obj->sync_timeline_list);
79 spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
80
81 kfree(obj);
82}
83
Erik Gilling010accf2012-03-13 15:34:34 -070084void sync_timeline_destroy(struct sync_timeline *obj)
85{
86 unsigned long flags;
87 bool needs_freeing;
88
89 spin_lock_irqsave(&obj->child_list_lock, flags);
90 obj->destroyed = true;
91 needs_freeing = list_empty(&obj->child_list_head);
92 spin_unlock_irqrestore(&obj->child_list_lock, flags);
93
94 if (needs_freeing)
Erik Gilling981c8a92012-03-14 19:49:15 -070095 sync_timeline_free(obj);
Erik Gilling010accf2012-03-13 15:34:34 -070096 else
97 sync_timeline_signal(obj);
98}
Erik Gilling4fb837a2012-05-16 13:09:22 -070099EXPORT_SYMBOL(sync_timeline_destroy);
Erik Gilling010accf2012-03-13 15:34:34 -0700100
101static void sync_timeline_add_pt(struct sync_timeline *obj, struct sync_pt *pt)
102{
103 unsigned long flags;
104
105 pt->parent = obj;
106
107 spin_lock_irqsave(&obj->child_list_lock, flags);
108 list_add_tail(&pt->child_list, &obj->child_list_head);
109 spin_unlock_irqrestore(&obj->child_list_lock, flags);
110}
111
112static void sync_timeline_remove_pt(struct sync_pt *pt)
113{
114 struct sync_timeline *obj = pt->parent;
115 unsigned long flags;
116 bool needs_freeing;
117
118 spin_lock_irqsave(&obj->active_list_lock, flags);
119 if (!list_empty(&pt->active_list))
120 list_del_init(&pt->active_list);
121 spin_unlock_irqrestore(&obj->active_list_lock, flags);
122
123 spin_lock_irqsave(&obj->child_list_lock, flags);
124 list_del(&pt->child_list);
125 needs_freeing = obj->destroyed && list_empty(&obj->child_list_head);
126 spin_unlock_irqrestore(&obj->child_list_lock, flags);
127
128 if (needs_freeing)
Erik Gilling981c8a92012-03-14 19:49:15 -0700129 sync_timeline_free(obj);
Erik Gilling010accf2012-03-13 15:34:34 -0700130}
131
132void sync_timeline_signal(struct sync_timeline *obj)
133{
134 unsigned long flags;
135 LIST_HEAD(signaled_pts);
136 struct list_head *pos, *n;
137
138 spin_lock_irqsave(&obj->active_list_lock, flags);
139
140 list_for_each_safe(pos, n, &obj->active_list_head) {
141 struct sync_pt *pt =
142 container_of(pos, struct sync_pt, active_list);
143
144 if (_sync_pt_has_signaled(pt))
145 list_move(pos, &signaled_pts);
146 }
147
148 spin_unlock_irqrestore(&obj->active_list_lock, flags);
149
150 list_for_each_safe(pos, n, &signaled_pts) {
151 struct sync_pt *pt =
152 container_of(pos, struct sync_pt, active_list);
153
154 list_del_init(pos);
155 sync_fence_signal_pt(pt);
156 }
157}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700158EXPORT_SYMBOL(sync_timeline_signal);
Erik Gilling010accf2012-03-13 15:34:34 -0700159
160struct sync_pt *sync_pt_create(struct sync_timeline *parent, int size)
161{
162 struct sync_pt *pt;
163
164 if (size < sizeof(struct sync_pt))
165 return NULL;
166
167 pt = kzalloc(size, GFP_KERNEL);
168 if (pt == NULL)
169 return NULL;
170
171 INIT_LIST_HEAD(&pt->active_list);
172 sync_timeline_add_pt(parent, pt);
173
174 return pt;
175}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700176EXPORT_SYMBOL(sync_pt_create);
Erik Gilling010accf2012-03-13 15:34:34 -0700177
178void sync_pt_free(struct sync_pt *pt)
179{
180 if (pt->parent->ops->free_pt)
181 pt->parent->ops->free_pt(pt);
182
183 sync_timeline_remove_pt(pt);
184
185 kfree(pt);
186}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700187EXPORT_SYMBOL(sync_pt_free);
Erik Gilling010accf2012-03-13 15:34:34 -0700188
189/* call with pt->parent->active_list_lock held */
190static int _sync_pt_has_signaled(struct sync_pt *pt)
191{
Erik Gillingad433ba2012-03-15 14:59:33 -0700192 int old_status = pt->status;
193
Erik Gilling010accf2012-03-13 15:34:34 -0700194 if (!pt->status)
195 pt->status = pt->parent->ops->has_signaled(pt);
196
197 if (!pt->status && pt->parent->destroyed)
198 pt->status = -ENOENT;
199
Erik Gillingad433ba2012-03-15 14:59:33 -0700200 if (pt->status != old_status)
201 pt->timestamp = ktime_get();
202
Erik Gilling010accf2012-03-13 15:34:34 -0700203 return pt->status;
204}
205
206static struct sync_pt *sync_pt_dup(struct sync_pt *pt)
207{
208 return pt->parent->ops->dup(pt);
209}
210
211/* Adds a sync pt to the active queue. Called when added to a fence */
212static void sync_pt_activate(struct sync_pt *pt)
213{
214 struct sync_timeline *obj = pt->parent;
215 unsigned long flags;
216 int err;
217
218 spin_lock_irqsave(&obj->active_list_lock, flags);
219
220 err = _sync_pt_has_signaled(pt);
Jeff Boodyc9858b72012-08-17 12:59:08 -0600221 if (err != 0) {
222 sync_fence_signal_pt(pt);
Erik Gilling010accf2012-03-13 15:34:34 -0700223 goto out;
Jeff Boodyc9858b72012-08-17 12:59:08 -0600224 }
Erik Gilling010accf2012-03-13 15:34:34 -0700225
226 list_add_tail(&pt->active_list, &obj->active_list_head);
227
228out:
229 spin_unlock_irqrestore(&obj->active_list_lock, flags);
230}
231
232static int sync_fence_release(struct inode *inode, struct file *file);
Erik Gillinga1eeaca2012-03-19 17:28:32 -0700233static unsigned int sync_fence_poll(struct file *file, poll_table *wait);
Erik Gilling010accf2012-03-13 15:34:34 -0700234static long sync_fence_ioctl(struct file *file, unsigned int cmd,
235 unsigned long arg);
236
237
238static const struct file_operations sync_fence_fops = {
239 .release = sync_fence_release,
Erik Gillinga1eeaca2012-03-19 17:28:32 -0700240 .poll = sync_fence_poll,
Erik Gilling010accf2012-03-13 15:34:34 -0700241 .unlocked_ioctl = sync_fence_ioctl,
242};
243
244static struct sync_fence *sync_fence_alloc(const char *name)
245{
246 struct sync_fence *fence;
Erik Gilling981c8a92012-03-14 19:49:15 -0700247 unsigned long flags;
Erik Gilling010accf2012-03-13 15:34:34 -0700248
249 fence = kzalloc(sizeof(struct sync_fence), GFP_KERNEL);
250 if (fence == NULL)
251 return NULL;
252
253 fence->file = anon_inode_getfile("sync_fence", &sync_fence_fops,
254 fence, 0);
255 if (fence->file == NULL)
256 goto err;
257
258 strlcpy(fence->name, name, sizeof(fence->name));
259
260 INIT_LIST_HEAD(&fence->pt_list_head);
261 INIT_LIST_HEAD(&fence->waiter_list_head);
262 spin_lock_init(&fence->waiter_list_lock);
263
264 init_waitqueue_head(&fence->wq);
Erik Gilling981c8a92012-03-14 19:49:15 -0700265
266 spin_lock_irqsave(&sync_fence_list_lock, flags);
267 list_add_tail(&fence->sync_fence_list, &sync_fence_list_head);
268 spin_unlock_irqrestore(&sync_fence_list_lock, flags);
269
Erik Gilling010accf2012-03-13 15:34:34 -0700270 return fence;
271
272err:
273 kfree(fence);
274 return NULL;
275}
276
277/* TODO: implement a create which takes more that one sync_pt */
278struct sync_fence *sync_fence_create(const char *name, struct sync_pt *pt)
279{
280 struct sync_fence *fence;
281
282 if (pt->fence)
283 return NULL;
284
285 fence = sync_fence_alloc(name);
286 if (fence == NULL)
287 return NULL;
288
289 pt->fence = fence;
290 list_add(&pt->pt_list, &fence->pt_list_head);
291 sync_pt_activate(pt);
292
293 return fence;
294}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700295EXPORT_SYMBOL(sync_fence_create);
Erik Gilling010accf2012-03-13 15:34:34 -0700296
297static int sync_fence_copy_pts(struct sync_fence *dst, struct sync_fence *src)
298{
299 struct list_head *pos;
300
301 list_for_each(pos, &src->pt_list_head) {
302 struct sync_pt *orig_pt =
303 container_of(pos, struct sync_pt, pt_list);
304 struct sync_pt *new_pt = sync_pt_dup(orig_pt);
305
306 if (new_pt == NULL)
307 return -ENOMEM;
308
309 new_pt->fence = dst;
310 list_add(&new_pt->pt_list, &dst->pt_list_head);
311 sync_pt_activate(new_pt);
312 }
313
314 return 0;
315}
316
317static void sync_fence_free_pts(struct sync_fence *fence)
318{
319 struct list_head *pos, *n;
320
321 list_for_each_safe(pos, n, &fence->pt_list_head) {
322 struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
323 sync_pt_free(pt);
324 }
325}
326
327struct sync_fence *sync_fence_fdget(int fd)
328{
329 struct file *file = fget(fd);
330
331 if (file == NULL)
332 return NULL;
333
334 if (file->f_op != &sync_fence_fops)
335 goto err;
336
337 return file->private_data;
338
339err:
340 fput(file);
341 return NULL;
342}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700343EXPORT_SYMBOL(sync_fence_fdget);
Erik Gilling010accf2012-03-13 15:34:34 -0700344
345void sync_fence_put(struct sync_fence *fence)
346{
347 fput(fence->file);
348}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700349EXPORT_SYMBOL(sync_fence_put);
Erik Gilling010accf2012-03-13 15:34:34 -0700350
351void sync_fence_install(struct sync_fence *fence, int fd)
352{
353 fd_install(fd, fence->file);
354}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700355EXPORT_SYMBOL(sync_fence_install);
Erik Gilling010accf2012-03-13 15:34:34 -0700356
357static int sync_fence_get_status(struct sync_fence *fence)
358{
359 struct list_head *pos;
360 int status = 1;
361
362 list_for_each(pos, &fence->pt_list_head) {
363 struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
364 int pt_status = pt->status;
365
366 if (pt_status < 0) {
367 status = pt_status;
368 break;
369 } else if (status == 1) {
370 status = pt_status;
371 }
372 }
373
374 return status;
375}
376
377struct sync_fence *sync_fence_merge(const char *name,
378 struct sync_fence *a, struct sync_fence *b)
379{
380 struct sync_fence *fence;
381 int err;
382
383 fence = sync_fence_alloc(name);
384 if (fence == NULL)
385 return NULL;
386
387 err = sync_fence_copy_pts(fence, a);
388 if (err < 0)
389 goto err;
390
391 err = sync_fence_copy_pts(fence, b);
392 if (err < 0)
393 goto err;
394
395 fence->status = sync_fence_get_status(fence);
396
397 return fence;
398err:
399 sync_fence_free_pts(fence);
400 kfree(fence);
401 return NULL;
402}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700403EXPORT_SYMBOL(sync_fence_merge);
Erik Gilling010accf2012-03-13 15:34:34 -0700404
405static void sync_fence_signal_pt(struct sync_pt *pt)
406{
407 LIST_HEAD(signaled_waiters);
408 struct sync_fence *fence = pt->fence;
409 struct list_head *pos;
410 struct list_head *n;
411 unsigned long flags;
412 int status;
413
414 status = sync_fence_get_status(fence);
415
416 spin_lock_irqsave(&fence->waiter_list_lock, flags);
417 /*
418 * this should protect against two threads racing on the signaled
419 * false -> true transition
420 */
421 if (status && !fence->status) {
422 list_for_each_safe(pos, n, &fence->waiter_list_head)
423 list_move(pos, &signaled_waiters);
424
425 fence->status = status;
426 } else {
427 status = 0;
428 }
429 spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
430
431 if (status) {
432 list_for_each_safe(pos, n, &signaled_waiters) {
433 struct sync_fence_waiter *waiter =
434 container_of(pos, struct sync_fence_waiter,
435 waiter_list);
436
Erik Gilling010accf2012-03-13 15:34:34 -0700437 list_del(pos);
Erik Gillingc80114f2012-05-15 16:23:26 -0700438 waiter->callback(fence, waiter);
Erik Gilling010accf2012-03-13 15:34:34 -0700439 }
440 wake_up(&fence->wq);
441 }
442}
443
444int sync_fence_wait_async(struct sync_fence *fence,
Erik Gillingc80114f2012-05-15 16:23:26 -0700445 struct sync_fence_waiter *waiter)
Erik Gilling010accf2012-03-13 15:34:34 -0700446{
Erik Gilling010accf2012-03-13 15:34:34 -0700447 unsigned long flags;
448 int err = 0;
449
Erik Gilling010accf2012-03-13 15:34:34 -0700450 spin_lock_irqsave(&fence->waiter_list_lock, flags);
451
452 if (fence->status) {
Erik Gilling010accf2012-03-13 15:34:34 -0700453 err = fence->status;
454 goto out;
455 }
456
457 list_add_tail(&waiter->waiter_list, &fence->waiter_list_head);
458out:
459 spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
460
461 return err;
462}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700463EXPORT_SYMBOL(sync_fence_wait_async);
Erik Gilling010accf2012-03-13 15:34:34 -0700464
Erik Gillingc80114f2012-05-15 16:23:26 -0700465int sync_fence_cancel_async(struct sync_fence *fence,
466 struct sync_fence_waiter *waiter)
467{
468 struct list_head *pos;
469 struct list_head *n;
470 unsigned long flags;
471 int ret = -ENOENT;
472
473 spin_lock_irqsave(&fence->waiter_list_lock, flags);
474 /*
475 * Make sure waiter is still in waiter_list because it is possible for
476 * the waiter to be removed from the list while the callback is still
477 * pending.
478 */
479 list_for_each_safe(pos, n, &fence->waiter_list_head) {
480 struct sync_fence_waiter *list_waiter =
481 container_of(pos, struct sync_fence_waiter,
482 waiter_list);
483 if (list_waiter == waiter) {
484 list_del(pos);
485 ret = 0;
486 break;
487 }
488 }
489 spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
490 return ret;
491}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700492EXPORT_SYMBOL(sync_fence_cancel_async);
Erik Gillingc80114f2012-05-15 16:23:26 -0700493
Erik Gilling010accf2012-03-13 15:34:34 -0700494int sync_fence_wait(struct sync_fence *fence, long timeout)
495{
496 int err;
497
498 if (timeout) {
499 timeout = msecs_to_jiffies(timeout);
500 err = wait_event_interruptible_timeout(fence->wq,
501 fence->status != 0,
502 timeout);
503 } else {
504 err = wait_event_interruptible(fence->wq, fence->status != 0);
505 }
506
507 if (err < 0)
508 return err;
509
510 if (fence->status < 0)
511 return fence->status;
512
513 if (fence->status == 0)
514 return -ETIME;
515
516 return 0;
517}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700518EXPORT_SYMBOL(sync_fence_wait);
Erik Gilling010accf2012-03-13 15:34:34 -0700519
520static int sync_fence_release(struct inode *inode, struct file *file)
521{
522 struct sync_fence *fence = file->private_data;
Erik Gilling981c8a92012-03-14 19:49:15 -0700523 unsigned long flags;
Erik Gilling010accf2012-03-13 15:34:34 -0700524
525 sync_fence_free_pts(fence);
Erik Gilling981c8a92012-03-14 19:49:15 -0700526
527 spin_lock_irqsave(&sync_fence_list_lock, flags);
528 list_del(&fence->sync_fence_list);
529 spin_unlock_irqrestore(&sync_fence_list_lock, flags);
530
Erik Gilling010accf2012-03-13 15:34:34 -0700531 kfree(fence);
532
533 return 0;
534}
535
Erik Gillinga1eeaca2012-03-19 17:28:32 -0700536static unsigned int sync_fence_poll(struct file *file, poll_table *wait)
537{
538 struct sync_fence *fence = file->private_data;
539
540 poll_wait(file, &fence->wq, wait);
541
542 if (fence->status == 1)
543 return POLLIN;
544 else if (fence->status < 0)
545 return POLLERR;
546 else
547 return 0;
548}
549
Erik Gilling010accf2012-03-13 15:34:34 -0700550static long sync_fence_ioctl_wait(struct sync_fence *fence, unsigned long arg)
551{
552 __u32 value;
553
554 if (copy_from_user(&value, (void __user *)arg, sizeof(value)))
555 return -EFAULT;
556
557 return sync_fence_wait(fence, value);
558}
559
560static long sync_fence_ioctl_merge(struct sync_fence *fence, unsigned long arg)
561{
562 int fd = get_unused_fd();
563 int err;
564 struct sync_fence *fence2, *fence3;
565 struct sync_merge_data data;
566
567 if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
568 return -EFAULT;
569
570 fence2 = sync_fence_fdget(data.fd2);
571 if (fence2 == NULL) {
572 err = -ENOENT;
573 goto err_put_fd;
574 }
575
576 data.name[sizeof(data.name) - 1] = '\0';
577 fence3 = sync_fence_merge(data.name, fence, fence2);
578 if (fence3 == NULL) {
579 err = -ENOMEM;
580 goto err_put_fence2;
581 }
582
583 data.fence = fd;
584 if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
585 err = -EFAULT;
586 goto err_put_fence3;
587 }
588
589 sync_fence_install(fence3, fd);
590 sync_fence_put(fence2);
591 return 0;
592
593err_put_fence3:
594 sync_fence_put(fence3);
595
596err_put_fence2:
597 sync_fence_put(fence2);
598
599err_put_fd:
600 put_unused_fd(fd);
601 return err;
602}
603
Erik Gilling3913bff2012-03-15 17:45:50 -0700604static int sync_fill_pt_info(struct sync_pt *pt, void *data, int size)
605{
606 struct sync_pt_info *info = data;
607 int ret;
608
609 if (size < sizeof(struct sync_pt_info))
610 return -ENOMEM;
611
612 info->len = sizeof(struct sync_pt_info);
613
614 if (pt->parent->ops->fill_driver_data) {
615 ret = pt->parent->ops->fill_driver_data(pt, info->driver_data,
616 size - sizeof(*info));
617 if (ret < 0)
618 return ret;
619
620 info->len += ret;
621 }
622
623 strlcpy(info->obj_name, pt->parent->name, sizeof(info->obj_name));
624 strlcpy(info->driver_name, pt->parent->ops->driver_name,
625 sizeof(info->driver_name));
626 info->status = pt->status;
627 info->timestamp_ns = ktime_to_ns(pt->timestamp);
628
629 return info->len;
630}
631
632static long sync_fence_ioctl_fence_info(struct sync_fence *fence,
633 unsigned long arg)
634{
635 struct sync_fence_info_data *data;
636 struct list_head *pos;
637 __u32 size;
638 __u32 len = 0;
639 int ret;
640
641 if (copy_from_user(&size, (void __user *)arg, sizeof(size)))
642 return -EFAULT;
643
644 if (size < sizeof(struct sync_fence_info_data))
645 return -EINVAL;
646
647 if (size > 4096)
648 size = 4096;
649
650 data = kzalloc(size, GFP_KERNEL);
651 if (data == NULL)
652 return -ENOMEM;
653
654 strlcpy(data->name, fence->name, sizeof(data->name));
655 data->status = fence->status;
656 len = sizeof(struct sync_fence_info_data);
657
658 list_for_each(pos, &fence->pt_list_head) {
659 struct sync_pt *pt =
660 container_of(pos, struct sync_pt, pt_list);
661
662 ret = sync_fill_pt_info(pt, (u8 *)data + len, size - len);
663
664 if (ret < 0)
665 goto out;
666
667 len += ret;
668 }
669
670 data->len = len;
671
672 if (copy_to_user((void __user *)arg, data, len))
673 ret = -EFAULT;
674 else
675 ret = 0;
676
677out:
678 kfree(data);
679
680 return ret;
681}
Erik Gilling010accf2012-03-13 15:34:34 -0700682
683static long sync_fence_ioctl(struct file *file, unsigned int cmd,
684 unsigned long arg)
685{
686 struct sync_fence *fence = file->private_data;
687 switch (cmd) {
688 case SYNC_IOC_WAIT:
689 return sync_fence_ioctl_wait(fence, arg);
690
691 case SYNC_IOC_MERGE:
692 return sync_fence_ioctl_merge(fence, arg);
Erik Gilling981c8a92012-03-14 19:49:15 -0700693
Erik Gilling3913bff2012-03-15 17:45:50 -0700694 case SYNC_IOC_FENCE_INFO:
695 return sync_fence_ioctl_fence_info(fence, arg);
696
Erik Gilling010accf2012-03-13 15:34:34 -0700697 default:
698 return -ENOTTY;
699 }
700}
701
Erik Gilling981c8a92012-03-14 19:49:15 -0700702#ifdef CONFIG_DEBUG_FS
703static const char *sync_status_str(int status)
704{
705 if (status > 0)
706 return "signaled";
707 else if (status == 0)
708 return "active";
709 else
710 return "error";
711}
712
713static void sync_print_pt(struct seq_file *s, struct sync_pt *pt, bool fence)
714{
715 int status = pt->status;
716 seq_printf(s, " %s%spt %s",
717 fence ? pt->parent->name : "",
718 fence ? "_" : "",
719 sync_status_str(status));
720 if (pt->status) {
721 struct timeval tv = ktime_to_timeval(pt->timestamp);
722 seq_printf(s, "@%ld.%06ld", tv.tv_sec, tv.tv_usec);
723 }
724
725 if (pt->parent->ops->print_pt) {
726 seq_printf(s, ": ");
727 pt->parent->ops->print_pt(s, pt);
728 }
729
730 seq_printf(s, "\n");
731}
732
733static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj)
734{
735 struct list_head *pos;
736 unsigned long flags;
737
738 seq_printf(s, "%s %s", obj->name, obj->ops->driver_name);
739
740 if (obj->ops->print_obj) {
741 seq_printf(s, ": ");
742 obj->ops->print_obj(s, obj);
743 }
744
745 seq_printf(s, "\n");
746
747 spin_lock_irqsave(&obj->child_list_lock, flags);
748 list_for_each(pos, &obj->child_list_head) {
749 struct sync_pt *pt =
750 container_of(pos, struct sync_pt, child_list);
751 sync_print_pt(s, pt, false);
752 }
753 spin_unlock_irqrestore(&obj->child_list_lock, flags);
754}
755
756static void sync_print_fence(struct seq_file *s, struct sync_fence *fence)
757{
758 struct list_head *pos;
759 unsigned long flags;
760
761 seq_printf(s, "%s: %s\n", fence->name, sync_status_str(fence->status));
762
763 list_for_each(pos, &fence->pt_list_head) {
764 struct sync_pt *pt =
765 container_of(pos, struct sync_pt, pt_list);
766 sync_print_pt(s, pt, true);
767 }
768
769 spin_lock_irqsave(&fence->waiter_list_lock, flags);
770 list_for_each(pos, &fence->waiter_list_head) {
771 struct sync_fence_waiter *waiter =
772 container_of(pos, struct sync_fence_waiter,
773 waiter_list);
774
Erik Gillingc80114f2012-05-15 16:23:26 -0700775 seq_printf(s, "waiter %pF\n", waiter->callback);
Erik Gilling981c8a92012-03-14 19:49:15 -0700776 }
777 spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
778}
779
780static int sync_debugfs_show(struct seq_file *s, void *unused)
781{
782 unsigned long flags;
783 struct list_head *pos;
784
785 seq_printf(s, "objs:\n--------------\n");
786
787 spin_lock_irqsave(&sync_timeline_list_lock, flags);
788 list_for_each(pos, &sync_timeline_list_head) {
789 struct sync_timeline *obj =
790 container_of(pos, struct sync_timeline,
791 sync_timeline_list);
792
793 sync_print_obj(s, obj);
794 seq_printf(s, "\n");
795 }
796 spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
797
798 seq_printf(s, "fences:\n--------------\n");
799
800 spin_lock_irqsave(&sync_fence_list_lock, flags);
801 list_for_each(pos, &sync_fence_list_head) {
802 struct sync_fence *fence =
803 container_of(pos, struct sync_fence, sync_fence_list);
804
805 sync_print_fence(s, fence);
806 seq_printf(s, "\n");
807 }
808 spin_unlock_irqrestore(&sync_fence_list_lock, flags);
809 return 0;
810}
811
812static int sync_debugfs_open(struct inode *inode, struct file *file)
813{
814 return single_open(file, sync_debugfs_show, inode->i_private);
815}
816
817static const struct file_operations sync_debugfs_fops = {
818 .open = sync_debugfs_open,
819 .read = seq_read,
820 .llseek = seq_lseek,
821 .release = single_release,
822};
823
824static __init int sync_debugfs_init(void)
825{
826 debugfs_create_file("sync", S_IRUGO, NULL, NULL, &sync_debugfs_fops);
827 return 0;
828}
829
830late_initcall(sync_debugfs_init);
831
832#endif