blob: 9bc0da50874a682d6b3350c79b1ce13e8579071c [file] [log] [blame]
Erik Gilling010accf2012-03-13 15:34:34 -07001/*
2 * drivers/base/sync.c
3 *
4 * Copyright (C) 2012 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16
Erik Gilling981c8a92012-03-14 19:49:15 -070017#include <linux/debugfs.h>
Erik Gilling4fb837a2012-05-16 13:09:22 -070018#include <linux/export.h>
Erik Gilling010accf2012-03-13 15:34:34 -070019#include <linux/file.h>
20#include <linux/fs.h>
21#include <linux/kernel.h>
Erik Gillinga1eeaca2012-03-19 17:28:32 -070022#include <linux/poll.h>
Erik Gilling010accf2012-03-13 15:34:34 -070023#include <linux/sched.h>
Erik Gilling981c8a92012-03-14 19:49:15 -070024#include <linux/seq_file.h>
Erik Gilling010accf2012-03-13 15:34:34 -070025#include <linux/slab.h>
26#include <linux/sync.h>
27#include <linux/uaccess.h>
28
29#include <linux/anon_inodes.h>
30
31static void sync_fence_signal_pt(struct sync_pt *pt);
32static int _sync_pt_has_signaled(struct sync_pt *pt);
33
Erik Gilling981c8a92012-03-14 19:49:15 -070034static LIST_HEAD(sync_timeline_list_head);
35static DEFINE_SPINLOCK(sync_timeline_list_lock);
36
37static LIST_HEAD(sync_fence_list_head);
38static DEFINE_SPINLOCK(sync_fence_list_lock);
39
Erik Gilling010accf2012-03-13 15:34:34 -070040struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops,
41 int size, const char *name)
42{
43 struct sync_timeline *obj;
Erik Gilling981c8a92012-03-14 19:49:15 -070044 unsigned long flags;
Erik Gilling010accf2012-03-13 15:34:34 -070045
46 if (size < sizeof(struct sync_timeline))
47 return NULL;
48
49 obj = kzalloc(size, GFP_KERNEL);
50 if (obj == NULL)
51 return NULL;
52
53 obj->ops = ops;
54 strlcpy(obj->name, name, sizeof(obj->name));
55
56 INIT_LIST_HEAD(&obj->child_list_head);
57 spin_lock_init(&obj->child_list_lock);
58
59 INIT_LIST_HEAD(&obj->active_list_head);
60 spin_lock_init(&obj->active_list_lock);
61
Erik Gilling981c8a92012-03-14 19:49:15 -070062 spin_lock_irqsave(&sync_timeline_list_lock, flags);
63 list_add_tail(&obj->sync_timeline_list, &sync_timeline_list_head);
64 spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
65
Erik Gilling010accf2012-03-13 15:34:34 -070066 return obj;
67}
Erik Gilling4fb837a2012-05-16 13:09:22 -070068EXPORT_SYMBOL(sync_timeline_create);
Erik Gilling010accf2012-03-13 15:34:34 -070069
Erik Gilling981c8a92012-03-14 19:49:15 -070070static void sync_timeline_free(struct sync_timeline *obj)
71{
72 unsigned long flags;
73
74 if (obj->ops->release_obj)
75 obj->ops->release_obj(obj);
76
77 spin_lock_irqsave(&sync_timeline_list_lock, flags);
78 list_del(&obj->sync_timeline_list);
79 spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
80
81 kfree(obj);
82}
83
Erik Gilling010accf2012-03-13 15:34:34 -070084void sync_timeline_destroy(struct sync_timeline *obj)
85{
86 unsigned long flags;
87 bool needs_freeing;
88
89 spin_lock_irqsave(&obj->child_list_lock, flags);
90 obj->destroyed = true;
91 needs_freeing = list_empty(&obj->child_list_head);
92 spin_unlock_irqrestore(&obj->child_list_lock, flags);
93
94 if (needs_freeing)
Erik Gilling981c8a92012-03-14 19:49:15 -070095 sync_timeline_free(obj);
Erik Gilling010accf2012-03-13 15:34:34 -070096 else
97 sync_timeline_signal(obj);
98}
Erik Gilling4fb837a2012-05-16 13:09:22 -070099EXPORT_SYMBOL(sync_timeline_destroy);
Erik Gilling010accf2012-03-13 15:34:34 -0700100
101static void sync_timeline_add_pt(struct sync_timeline *obj, struct sync_pt *pt)
102{
103 unsigned long flags;
104
105 pt->parent = obj;
106
107 spin_lock_irqsave(&obj->child_list_lock, flags);
108 list_add_tail(&pt->child_list, &obj->child_list_head);
109 spin_unlock_irqrestore(&obj->child_list_lock, flags);
110}
111
112static void sync_timeline_remove_pt(struct sync_pt *pt)
113{
114 struct sync_timeline *obj = pt->parent;
115 unsigned long flags;
116 bool needs_freeing;
117
118 spin_lock_irqsave(&obj->active_list_lock, flags);
119 if (!list_empty(&pt->active_list))
120 list_del_init(&pt->active_list);
121 spin_unlock_irqrestore(&obj->active_list_lock, flags);
122
123 spin_lock_irqsave(&obj->child_list_lock, flags);
124 list_del(&pt->child_list);
125 needs_freeing = obj->destroyed && list_empty(&obj->child_list_head);
126 spin_unlock_irqrestore(&obj->child_list_lock, flags);
127
128 if (needs_freeing)
Erik Gilling981c8a92012-03-14 19:49:15 -0700129 sync_timeline_free(obj);
Erik Gilling010accf2012-03-13 15:34:34 -0700130}
131
132void sync_timeline_signal(struct sync_timeline *obj)
133{
134 unsigned long flags;
135 LIST_HEAD(signaled_pts);
136 struct list_head *pos, *n;
137
138 spin_lock_irqsave(&obj->active_list_lock, flags);
139
140 list_for_each_safe(pos, n, &obj->active_list_head) {
141 struct sync_pt *pt =
142 container_of(pos, struct sync_pt, active_list);
143
144 if (_sync_pt_has_signaled(pt))
145 list_move(pos, &signaled_pts);
146 }
147
148 spin_unlock_irqrestore(&obj->active_list_lock, flags);
149
150 list_for_each_safe(pos, n, &signaled_pts) {
151 struct sync_pt *pt =
152 container_of(pos, struct sync_pt, active_list);
153
154 list_del_init(pos);
155 sync_fence_signal_pt(pt);
156 }
157}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700158EXPORT_SYMBOL(sync_timeline_signal);
Erik Gilling010accf2012-03-13 15:34:34 -0700159
160struct sync_pt *sync_pt_create(struct sync_timeline *parent, int size)
161{
162 struct sync_pt *pt;
163
164 if (size < sizeof(struct sync_pt))
165 return NULL;
166
167 pt = kzalloc(size, GFP_KERNEL);
168 if (pt == NULL)
169 return NULL;
170
171 INIT_LIST_HEAD(&pt->active_list);
172 sync_timeline_add_pt(parent, pt);
173
174 return pt;
175}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700176EXPORT_SYMBOL(sync_pt_create);
Erik Gilling010accf2012-03-13 15:34:34 -0700177
178void sync_pt_free(struct sync_pt *pt)
179{
180 if (pt->parent->ops->free_pt)
181 pt->parent->ops->free_pt(pt);
182
183 sync_timeline_remove_pt(pt);
184
185 kfree(pt);
186}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700187EXPORT_SYMBOL(sync_pt_free);
Erik Gilling010accf2012-03-13 15:34:34 -0700188
189/* call with pt->parent->active_list_lock held */
190static int _sync_pt_has_signaled(struct sync_pt *pt)
191{
Erik Gillingad433ba2012-03-15 14:59:33 -0700192 int old_status = pt->status;
193
Erik Gilling010accf2012-03-13 15:34:34 -0700194 if (!pt->status)
195 pt->status = pt->parent->ops->has_signaled(pt);
196
197 if (!pt->status && pt->parent->destroyed)
198 pt->status = -ENOENT;
199
Erik Gillingad433ba2012-03-15 14:59:33 -0700200 if (pt->status != old_status)
201 pt->timestamp = ktime_get();
202
Erik Gilling010accf2012-03-13 15:34:34 -0700203 return pt->status;
204}
205
206static struct sync_pt *sync_pt_dup(struct sync_pt *pt)
207{
208 return pt->parent->ops->dup(pt);
209}
210
211/* Adds a sync pt to the active queue. Called when added to a fence */
212static void sync_pt_activate(struct sync_pt *pt)
213{
214 struct sync_timeline *obj = pt->parent;
215 unsigned long flags;
216 int err;
217
218 spin_lock_irqsave(&obj->active_list_lock, flags);
219
220 err = _sync_pt_has_signaled(pt);
221 if (err != 0)
222 goto out;
223
224 list_add_tail(&pt->active_list, &obj->active_list_head);
225
226out:
227 spin_unlock_irqrestore(&obj->active_list_lock, flags);
228}
229
230static int sync_fence_release(struct inode *inode, struct file *file);
Erik Gillinga1eeaca2012-03-19 17:28:32 -0700231static unsigned int sync_fence_poll(struct file *file, poll_table *wait);
Erik Gilling010accf2012-03-13 15:34:34 -0700232static long sync_fence_ioctl(struct file *file, unsigned int cmd,
233 unsigned long arg);
234
235
236static const struct file_operations sync_fence_fops = {
237 .release = sync_fence_release,
Erik Gillinga1eeaca2012-03-19 17:28:32 -0700238 .poll = sync_fence_poll,
Erik Gilling010accf2012-03-13 15:34:34 -0700239 .unlocked_ioctl = sync_fence_ioctl,
240};
241
242static struct sync_fence *sync_fence_alloc(const char *name)
243{
244 struct sync_fence *fence;
Erik Gilling981c8a92012-03-14 19:49:15 -0700245 unsigned long flags;
Erik Gilling010accf2012-03-13 15:34:34 -0700246
247 fence = kzalloc(sizeof(struct sync_fence), GFP_KERNEL);
248 if (fence == NULL)
249 return NULL;
250
251 fence->file = anon_inode_getfile("sync_fence", &sync_fence_fops,
252 fence, 0);
253 if (fence->file == NULL)
254 goto err;
255
256 strlcpy(fence->name, name, sizeof(fence->name));
257
258 INIT_LIST_HEAD(&fence->pt_list_head);
259 INIT_LIST_HEAD(&fence->waiter_list_head);
260 spin_lock_init(&fence->waiter_list_lock);
261
262 init_waitqueue_head(&fence->wq);
Erik Gilling981c8a92012-03-14 19:49:15 -0700263
264 spin_lock_irqsave(&sync_fence_list_lock, flags);
265 list_add_tail(&fence->sync_fence_list, &sync_fence_list_head);
266 spin_unlock_irqrestore(&sync_fence_list_lock, flags);
267
Erik Gilling010accf2012-03-13 15:34:34 -0700268 return fence;
269
270err:
271 kfree(fence);
272 return NULL;
273}
274
275/* TODO: implement a create which takes more that one sync_pt */
276struct sync_fence *sync_fence_create(const char *name, struct sync_pt *pt)
277{
278 struct sync_fence *fence;
279
280 if (pt->fence)
281 return NULL;
282
283 fence = sync_fence_alloc(name);
284 if (fence == NULL)
285 return NULL;
286
287 pt->fence = fence;
288 list_add(&pt->pt_list, &fence->pt_list_head);
289 sync_pt_activate(pt);
290
291 return fence;
292}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700293EXPORT_SYMBOL(sync_fence_create);
Erik Gilling010accf2012-03-13 15:34:34 -0700294
295static int sync_fence_copy_pts(struct sync_fence *dst, struct sync_fence *src)
296{
297 struct list_head *pos;
298
299 list_for_each(pos, &src->pt_list_head) {
300 struct sync_pt *orig_pt =
301 container_of(pos, struct sync_pt, pt_list);
302 struct sync_pt *new_pt = sync_pt_dup(orig_pt);
303
304 if (new_pt == NULL)
305 return -ENOMEM;
306
307 new_pt->fence = dst;
308 list_add(&new_pt->pt_list, &dst->pt_list_head);
309 sync_pt_activate(new_pt);
310 }
311
312 return 0;
313}
314
315static void sync_fence_free_pts(struct sync_fence *fence)
316{
317 struct list_head *pos, *n;
318
319 list_for_each_safe(pos, n, &fence->pt_list_head) {
320 struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
321 sync_pt_free(pt);
322 }
323}
324
325struct sync_fence *sync_fence_fdget(int fd)
326{
327 struct file *file = fget(fd);
328
329 if (file == NULL)
330 return NULL;
331
332 if (file->f_op != &sync_fence_fops)
333 goto err;
334
335 return file->private_data;
336
337err:
338 fput(file);
339 return NULL;
340}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700341EXPORT_SYMBOL(sync_fence_fdget);
Erik Gilling010accf2012-03-13 15:34:34 -0700342
343void sync_fence_put(struct sync_fence *fence)
344{
345 fput(fence->file);
346}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700347EXPORT_SYMBOL(sync_fence_put);
Erik Gilling010accf2012-03-13 15:34:34 -0700348
349void sync_fence_install(struct sync_fence *fence, int fd)
350{
351 fd_install(fd, fence->file);
352}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700353EXPORT_SYMBOL(sync_fence_install);
Erik Gilling010accf2012-03-13 15:34:34 -0700354
355static int sync_fence_get_status(struct sync_fence *fence)
356{
357 struct list_head *pos;
358 int status = 1;
359
360 list_for_each(pos, &fence->pt_list_head) {
361 struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
362 int pt_status = pt->status;
363
364 if (pt_status < 0) {
365 status = pt_status;
366 break;
367 } else if (status == 1) {
368 status = pt_status;
369 }
370 }
371
372 return status;
373}
374
375struct sync_fence *sync_fence_merge(const char *name,
376 struct sync_fence *a, struct sync_fence *b)
377{
378 struct sync_fence *fence;
379 int err;
380
381 fence = sync_fence_alloc(name);
382 if (fence == NULL)
383 return NULL;
384
385 err = sync_fence_copy_pts(fence, a);
386 if (err < 0)
387 goto err;
388
389 err = sync_fence_copy_pts(fence, b);
390 if (err < 0)
391 goto err;
392
393 fence->status = sync_fence_get_status(fence);
394
395 return fence;
396err:
397 sync_fence_free_pts(fence);
398 kfree(fence);
399 return NULL;
400}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700401EXPORT_SYMBOL(sync_fence_merge);
Erik Gilling010accf2012-03-13 15:34:34 -0700402
403static void sync_fence_signal_pt(struct sync_pt *pt)
404{
405 LIST_HEAD(signaled_waiters);
406 struct sync_fence *fence = pt->fence;
407 struct list_head *pos;
408 struct list_head *n;
409 unsigned long flags;
410 int status;
411
412 status = sync_fence_get_status(fence);
413
414 spin_lock_irqsave(&fence->waiter_list_lock, flags);
415 /*
416 * this should protect against two threads racing on the signaled
417 * false -> true transition
418 */
419 if (status && !fence->status) {
420 list_for_each_safe(pos, n, &fence->waiter_list_head)
421 list_move(pos, &signaled_waiters);
422
423 fence->status = status;
424 } else {
425 status = 0;
426 }
427 spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
428
429 if (status) {
430 list_for_each_safe(pos, n, &signaled_waiters) {
431 struct sync_fence_waiter *waiter =
432 container_of(pos, struct sync_fence_waiter,
433 waiter_list);
434
Erik Gilling010accf2012-03-13 15:34:34 -0700435 list_del(pos);
Erik Gillingc80114f2012-05-15 16:23:26 -0700436 waiter->callback(fence, waiter);
Erik Gilling010accf2012-03-13 15:34:34 -0700437 }
438 wake_up(&fence->wq);
439 }
440}
441
442int sync_fence_wait_async(struct sync_fence *fence,
Erik Gillingc80114f2012-05-15 16:23:26 -0700443 struct sync_fence_waiter *waiter)
Erik Gilling010accf2012-03-13 15:34:34 -0700444{
Erik Gilling010accf2012-03-13 15:34:34 -0700445 unsigned long flags;
446 int err = 0;
447
Erik Gilling010accf2012-03-13 15:34:34 -0700448 spin_lock_irqsave(&fence->waiter_list_lock, flags);
449
450 if (fence->status) {
Erik Gilling010accf2012-03-13 15:34:34 -0700451 err = fence->status;
452 goto out;
453 }
454
455 list_add_tail(&waiter->waiter_list, &fence->waiter_list_head);
456out:
457 spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
458
459 return err;
460}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700461EXPORT_SYMBOL(sync_fence_wait_async);
Erik Gilling010accf2012-03-13 15:34:34 -0700462
Erik Gillingc80114f2012-05-15 16:23:26 -0700463int sync_fence_cancel_async(struct sync_fence *fence,
464 struct sync_fence_waiter *waiter)
465{
466 struct list_head *pos;
467 struct list_head *n;
468 unsigned long flags;
469 int ret = -ENOENT;
470
471 spin_lock_irqsave(&fence->waiter_list_lock, flags);
472 /*
473 * Make sure waiter is still in waiter_list because it is possible for
474 * the waiter to be removed from the list while the callback is still
475 * pending.
476 */
477 list_for_each_safe(pos, n, &fence->waiter_list_head) {
478 struct sync_fence_waiter *list_waiter =
479 container_of(pos, struct sync_fence_waiter,
480 waiter_list);
481 if (list_waiter == waiter) {
482 list_del(pos);
483 ret = 0;
484 break;
485 }
486 }
487 spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
488 return ret;
489}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700490EXPORT_SYMBOL(sync_fence_cancel_async);
Erik Gillingc80114f2012-05-15 16:23:26 -0700491
Erik Gilling010accf2012-03-13 15:34:34 -0700492int sync_fence_wait(struct sync_fence *fence, long timeout)
493{
494 int err;
495
496 if (timeout) {
497 timeout = msecs_to_jiffies(timeout);
498 err = wait_event_interruptible_timeout(fence->wq,
499 fence->status != 0,
500 timeout);
501 } else {
502 err = wait_event_interruptible(fence->wq, fence->status != 0);
503 }
504
505 if (err < 0)
506 return err;
507
508 if (fence->status < 0)
509 return fence->status;
510
511 if (fence->status == 0)
512 return -ETIME;
513
514 return 0;
515}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700516EXPORT_SYMBOL(sync_fence_wait);
Erik Gilling010accf2012-03-13 15:34:34 -0700517
518static int sync_fence_release(struct inode *inode, struct file *file)
519{
520 struct sync_fence *fence = file->private_data;
Erik Gilling981c8a92012-03-14 19:49:15 -0700521 unsigned long flags;
Erik Gilling010accf2012-03-13 15:34:34 -0700522
523 sync_fence_free_pts(fence);
Erik Gilling981c8a92012-03-14 19:49:15 -0700524
525 spin_lock_irqsave(&sync_fence_list_lock, flags);
526 list_del(&fence->sync_fence_list);
527 spin_unlock_irqrestore(&sync_fence_list_lock, flags);
528
Erik Gilling010accf2012-03-13 15:34:34 -0700529 kfree(fence);
530
531 return 0;
532}
533
Erik Gillinga1eeaca2012-03-19 17:28:32 -0700534static unsigned int sync_fence_poll(struct file *file, poll_table *wait)
535{
536 struct sync_fence *fence = file->private_data;
537
538 poll_wait(file, &fence->wq, wait);
539
540 if (fence->status == 1)
541 return POLLIN;
542 else if (fence->status < 0)
543 return POLLERR;
544 else
545 return 0;
546}
547
Erik Gilling010accf2012-03-13 15:34:34 -0700548static long sync_fence_ioctl_wait(struct sync_fence *fence, unsigned long arg)
549{
550 __u32 value;
551
552 if (copy_from_user(&value, (void __user *)arg, sizeof(value)))
553 return -EFAULT;
554
555 return sync_fence_wait(fence, value);
556}
557
558static long sync_fence_ioctl_merge(struct sync_fence *fence, unsigned long arg)
559{
560 int fd = get_unused_fd();
561 int err;
562 struct sync_fence *fence2, *fence3;
563 struct sync_merge_data data;
564
565 if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
566 return -EFAULT;
567
568 fence2 = sync_fence_fdget(data.fd2);
569 if (fence2 == NULL) {
570 err = -ENOENT;
571 goto err_put_fd;
572 }
573
574 data.name[sizeof(data.name) - 1] = '\0';
575 fence3 = sync_fence_merge(data.name, fence, fence2);
576 if (fence3 == NULL) {
577 err = -ENOMEM;
578 goto err_put_fence2;
579 }
580
581 data.fence = fd;
582 if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
583 err = -EFAULT;
584 goto err_put_fence3;
585 }
586
587 sync_fence_install(fence3, fd);
588 sync_fence_put(fence2);
589 return 0;
590
591err_put_fence3:
592 sync_fence_put(fence3);
593
594err_put_fence2:
595 sync_fence_put(fence2);
596
597err_put_fd:
598 put_unused_fd(fd);
599 return err;
600}
601
Erik Gilling3913bff2012-03-15 17:45:50 -0700602static int sync_fill_pt_info(struct sync_pt *pt, void *data, int size)
603{
604 struct sync_pt_info *info = data;
605 int ret;
606
607 if (size < sizeof(struct sync_pt_info))
608 return -ENOMEM;
609
610 info->len = sizeof(struct sync_pt_info);
611
612 if (pt->parent->ops->fill_driver_data) {
613 ret = pt->parent->ops->fill_driver_data(pt, info->driver_data,
614 size - sizeof(*info));
615 if (ret < 0)
616 return ret;
617
618 info->len += ret;
619 }
620
621 strlcpy(info->obj_name, pt->parent->name, sizeof(info->obj_name));
622 strlcpy(info->driver_name, pt->parent->ops->driver_name,
623 sizeof(info->driver_name));
624 info->status = pt->status;
625 info->timestamp_ns = ktime_to_ns(pt->timestamp);
626
627 return info->len;
628}
629
630static long sync_fence_ioctl_fence_info(struct sync_fence *fence,
631 unsigned long arg)
632{
633 struct sync_fence_info_data *data;
634 struct list_head *pos;
635 __u32 size;
636 __u32 len = 0;
637 int ret;
638
639 if (copy_from_user(&size, (void __user *)arg, sizeof(size)))
640 return -EFAULT;
641
642 if (size < sizeof(struct sync_fence_info_data))
643 return -EINVAL;
644
645 if (size > 4096)
646 size = 4096;
647
648 data = kzalloc(size, GFP_KERNEL);
649 if (data == NULL)
650 return -ENOMEM;
651
652 strlcpy(data->name, fence->name, sizeof(data->name));
653 data->status = fence->status;
654 len = sizeof(struct sync_fence_info_data);
655
656 list_for_each(pos, &fence->pt_list_head) {
657 struct sync_pt *pt =
658 container_of(pos, struct sync_pt, pt_list);
659
660 ret = sync_fill_pt_info(pt, (u8 *)data + len, size - len);
661
662 if (ret < 0)
663 goto out;
664
665 len += ret;
666 }
667
668 data->len = len;
669
670 if (copy_to_user((void __user *)arg, data, len))
671 ret = -EFAULT;
672 else
673 ret = 0;
674
675out:
676 kfree(data);
677
678 return ret;
679}
Erik Gilling010accf2012-03-13 15:34:34 -0700680
681static long sync_fence_ioctl(struct file *file, unsigned int cmd,
682 unsigned long arg)
683{
684 struct sync_fence *fence = file->private_data;
685 switch (cmd) {
686 case SYNC_IOC_WAIT:
687 return sync_fence_ioctl_wait(fence, arg);
688
689 case SYNC_IOC_MERGE:
690 return sync_fence_ioctl_merge(fence, arg);
Erik Gilling981c8a92012-03-14 19:49:15 -0700691
Erik Gilling3913bff2012-03-15 17:45:50 -0700692 case SYNC_IOC_FENCE_INFO:
693 return sync_fence_ioctl_fence_info(fence, arg);
694
Erik Gilling010accf2012-03-13 15:34:34 -0700695 default:
696 return -ENOTTY;
697 }
698}
699
Erik Gilling981c8a92012-03-14 19:49:15 -0700700#ifdef CONFIG_DEBUG_FS
701static const char *sync_status_str(int status)
702{
703 if (status > 0)
704 return "signaled";
705 else if (status == 0)
706 return "active";
707 else
708 return "error";
709}
710
711static void sync_print_pt(struct seq_file *s, struct sync_pt *pt, bool fence)
712{
713 int status = pt->status;
714 seq_printf(s, " %s%spt %s",
715 fence ? pt->parent->name : "",
716 fence ? "_" : "",
717 sync_status_str(status));
718 if (pt->status) {
719 struct timeval tv = ktime_to_timeval(pt->timestamp);
720 seq_printf(s, "@%ld.%06ld", tv.tv_sec, tv.tv_usec);
721 }
722
723 if (pt->parent->ops->print_pt) {
724 seq_printf(s, ": ");
725 pt->parent->ops->print_pt(s, pt);
726 }
727
728 seq_printf(s, "\n");
729}
730
731static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj)
732{
733 struct list_head *pos;
734 unsigned long flags;
735
736 seq_printf(s, "%s %s", obj->name, obj->ops->driver_name);
737
738 if (obj->ops->print_obj) {
739 seq_printf(s, ": ");
740 obj->ops->print_obj(s, obj);
741 }
742
743 seq_printf(s, "\n");
744
745 spin_lock_irqsave(&obj->child_list_lock, flags);
746 list_for_each(pos, &obj->child_list_head) {
747 struct sync_pt *pt =
748 container_of(pos, struct sync_pt, child_list);
749 sync_print_pt(s, pt, false);
750 }
751 spin_unlock_irqrestore(&obj->child_list_lock, flags);
752}
753
754static void sync_print_fence(struct seq_file *s, struct sync_fence *fence)
755{
756 struct list_head *pos;
757 unsigned long flags;
758
759 seq_printf(s, "%s: %s\n", fence->name, sync_status_str(fence->status));
760
761 list_for_each(pos, &fence->pt_list_head) {
762 struct sync_pt *pt =
763 container_of(pos, struct sync_pt, pt_list);
764 sync_print_pt(s, pt, true);
765 }
766
767 spin_lock_irqsave(&fence->waiter_list_lock, flags);
768 list_for_each(pos, &fence->waiter_list_head) {
769 struct sync_fence_waiter *waiter =
770 container_of(pos, struct sync_fence_waiter,
771 waiter_list);
772
Erik Gillingc80114f2012-05-15 16:23:26 -0700773 seq_printf(s, "waiter %pF\n", waiter->callback);
Erik Gilling981c8a92012-03-14 19:49:15 -0700774 }
775 spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
776}
777
778static int sync_debugfs_show(struct seq_file *s, void *unused)
779{
780 unsigned long flags;
781 struct list_head *pos;
782
783 seq_printf(s, "objs:\n--------------\n");
784
785 spin_lock_irqsave(&sync_timeline_list_lock, flags);
786 list_for_each(pos, &sync_timeline_list_head) {
787 struct sync_timeline *obj =
788 container_of(pos, struct sync_timeline,
789 sync_timeline_list);
790
791 sync_print_obj(s, obj);
792 seq_printf(s, "\n");
793 }
794 spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
795
796 seq_printf(s, "fences:\n--------------\n");
797
798 spin_lock_irqsave(&sync_fence_list_lock, flags);
799 list_for_each(pos, &sync_fence_list_head) {
800 struct sync_fence *fence =
801 container_of(pos, struct sync_fence, sync_fence_list);
802
803 sync_print_fence(s, fence);
804 seq_printf(s, "\n");
805 }
806 spin_unlock_irqrestore(&sync_fence_list_lock, flags);
807 return 0;
808}
809
810static int sync_debugfs_open(struct inode *inode, struct file *file)
811{
812 return single_open(file, sync_debugfs_show, inode->i_private);
813}
814
815static const struct file_operations sync_debugfs_fops = {
816 .open = sync_debugfs_open,
817 .read = seq_read,
818 .llseek = seq_lseek,
819 .release = single_release,
820};
821
822static __init int sync_debugfs_init(void)
823{
824 debugfs_create_file("sync", S_IRUGO, NULL, NULL, &sync_debugfs_fops);
825 return 0;
826}
827
828late_initcall(sync_debugfs_init);
829
830#endif