blob: 988f2339d70f3fb4a0ef1552243dbc778f9ab3df [file] [log] [blame]
Erik Gilling7ad530b2013-02-28 16:42:57 -08001/*
2 * drivers/base/sync.c
3 *
4 * Copyright (C) 2012 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16
Erik Gillingaf7582f2013-02-28 16:43:00 -080017#include <linux/debugfs.h>
Erik Gilling8edb4ad2013-02-28 16:43:06 -080018#include <linux/export.h>
Erik Gilling7ad530b2013-02-28 16:42:57 -080019#include <linux/file.h>
20#include <linux/fs.h>
21#include <linux/kernel.h>
Erik Gilling57b505b2013-02-28 16:43:04 -080022#include <linux/poll.h>
Erik Gilling7ad530b2013-02-28 16:42:57 -080023#include <linux/sched.h>
Erik Gillingaf7582f2013-02-28 16:43:00 -080024#include <linux/seq_file.h>
Erik Gilling7ad530b2013-02-28 16:42:57 -080025#include <linux/slab.h>
26#include <linux/uaccess.h>
27#include <linux/anon_inodes.h>
28
29#include "sync.h"
30
31static void sync_fence_signal_pt(struct sync_pt *pt);
32static int _sync_pt_has_signaled(struct sync_pt *pt);
Erik Gilling01544172013-02-28 16:43:10 -080033static void sync_fence_free(struct kref *kref);
Erik Gillingf56388f2013-02-28 16:43:15 -080034static void sync_dump(void);
Erik Gilling7ad530b2013-02-28 16:42:57 -080035
Erik Gillingaf7582f2013-02-28 16:43:00 -080036static LIST_HEAD(sync_timeline_list_head);
37static DEFINE_SPINLOCK(sync_timeline_list_lock);
38
39static LIST_HEAD(sync_fence_list_head);
40static DEFINE_SPINLOCK(sync_fence_list_lock);
41
Erik Gilling7ad530b2013-02-28 16:42:57 -080042struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops,
43 int size, const char *name)
44{
45 struct sync_timeline *obj;
Erik Gillingaf7582f2013-02-28 16:43:00 -080046 unsigned long flags;
Erik Gilling7ad530b2013-02-28 16:42:57 -080047
48 if (size < sizeof(struct sync_timeline))
49 return NULL;
50
51 obj = kzalloc(size, GFP_KERNEL);
52 if (obj == NULL)
53 return NULL;
54
Erik Gillingc5b86b72013-02-28 16:43:11 -080055 kref_init(&obj->kref);
Erik Gilling7ad530b2013-02-28 16:42:57 -080056 obj->ops = ops;
57 strlcpy(obj->name, name, sizeof(obj->name));
58
59 INIT_LIST_HEAD(&obj->child_list_head);
60 spin_lock_init(&obj->child_list_lock);
61
62 INIT_LIST_HEAD(&obj->active_list_head);
63 spin_lock_init(&obj->active_list_lock);
64
Erik Gillingaf7582f2013-02-28 16:43:00 -080065 spin_lock_irqsave(&sync_timeline_list_lock, flags);
66 list_add_tail(&obj->sync_timeline_list, &sync_timeline_list_head);
67 spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
68
Erik Gilling7ad530b2013-02-28 16:42:57 -080069 return obj;
70}
Erik Gilling8edb4ad2013-02-28 16:43:06 -080071EXPORT_SYMBOL(sync_timeline_create);
Erik Gilling7ad530b2013-02-28 16:42:57 -080072
Erik Gillingc5b86b72013-02-28 16:43:11 -080073static void sync_timeline_free(struct kref *kref)
Erik Gillingaf7582f2013-02-28 16:43:00 -080074{
Erik Gillingc5b86b72013-02-28 16:43:11 -080075 struct sync_timeline *obj =
76 container_of(kref, struct sync_timeline, kref);
Erik Gillingaf7582f2013-02-28 16:43:00 -080077 unsigned long flags;
78
79 if (obj->ops->release_obj)
80 obj->ops->release_obj(obj);
81
82 spin_lock_irqsave(&sync_timeline_list_lock, flags);
83 list_del(&obj->sync_timeline_list);
84 spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
85
86 kfree(obj);
87}
88
Erik Gilling7ad530b2013-02-28 16:42:57 -080089void sync_timeline_destroy(struct sync_timeline *obj)
90{
Erik Gilling7ad530b2013-02-28 16:42:57 -080091 obj->destroyed = true;
Erik Gilling7ad530b2013-02-28 16:42:57 -080092
Erik Gillingc5b86b72013-02-28 16:43:11 -080093 /*
94 * If this is not the last reference, signal any children
95 * that their parent is going away.
96 */
97
98 if (!kref_put(&obj->kref, sync_timeline_free))
Erik Gilling7ad530b2013-02-28 16:42:57 -080099 sync_timeline_signal(obj);
100}
Erik Gilling8edb4ad2013-02-28 16:43:06 -0800101EXPORT_SYMBOL(sync_timeline_destroy);
Erik Gilling7ad530b2013-02-28 16:42:57 -0800102
103static void sync_timeline_add_pt(struct sync_timeline *obj, struct sync_pt *pt)
104{
105 unsigned long flags;
106
107 pt->parent = obj;
108
109 spin_lock_irqsave(&obj->child_list_lock, flags);
110 list_add_tail(&pt->child_list, &obj->child_list_head);
111 spin_unlock_irqrestore(&obj->child_list_lock, flags);
112}
113
114static void sync_timeline_remove_pt(struct sync_pt *pt)
115{
116 struct sync_timeline *obj = pt->parent;
117 unsigned long flags;
Erik Gilling7ad530b2013-02-28 16:42:57 -0800118
119 spin_lock_irqsave(&obj->active_list_lock, flags);
120 if (!list_empty(&pt->active_list))
121 list_del_init(&pt->active_list);
122 spin_unlock_irqrestore(&obj->active_list_lock, flags);
123
124 spin_lock_irqsave(&obj->child_list_lock, flags);
Erik Gilling01544172013-02-28 16:43:10 -0800125 if (!list_empty(&pt->child_list)) {
126 list_del_init(&pt->child_list);
Erik Gilling01544172013-02-28 16:43:10 -0800127 }
Erik Gilling7ad530b2013-02-28 16:42:57 -0800128 spin_unlock_irqrestore(&obj->child_list_lock, flags);
Erik Gilling7ad530b2013-02-28 16:42:57 -0800129}
130
131void sync_timeline_signal(struct sync_timeline *obj)
132{
133 unsigned long flags;
134 LIST_HEAD(signaled_pts);
135 struct list_head *pos, *n;
136
137 spin_lock_irqsave(&obj->active_list_lock, flags);
138
139 list_for_each_safe(pos, n, &obj->active_list_head) {
140 struct sync_pt *pt =
141 container_of(pos, struct sync_pt, active_list);
142
Erik Gilling01544172013-02-28 16:43:10 -0800143 if (_sync_pt_has_signaled(pt)) {
144 list_del_init(pos);
145 list_add(&pt->signaled_list, &signaled_pts);
146 kref_get(&pt->fence->kref);
147 }
Erik Gilling7ad530b2013-02-28 16:42:57 -0800148 }
149
150 spin_unlock_irqrestore(&obj->active_list_lock, flags);
151
152 list_for_each_safe(pos, n, &signaled_pts) {
153 struct sync_pt *pt =
Erik Gilling01544172013-02-28 16:43:10 -0800154 container_of(pos, struct sync_pt, signaled_list);
Erik Gilling7ad530b2013-02-28 16:42:57 -0800155
156 list_del_init(pos);
157 sync_fence_signal_pt(pt);
Erik Gilling01544172013-02-28 16:43:10 -0800158 kref_put(&pt->fence->kref, sync_fence_free);
Erik Gilling7ad530b2013-02-28 16:42:57 -0800159 }
160}
Erik Gilling8edb4ad2013-02-28 16:43:06 -0800161EXPORT_SYMBOL(sync_timeline_signal);
Erik Gilling7ad530b2013-02-28 16:42:57 -0800162
163struct sync_pt *sync_pt_create(struct sync_timeline *parent, int size)
164{
165 struct sync_pt *pt;
166
167 if (size < sizeof(struct sync_pt))
168 return NULL;
169
170 pt = kzalloc(size, GFP_KERNEL);
171 if (pt == NULL)
172 return NULL;
173
174 INIT_LIST_HEAD(&pt->active_list);
Erik Gillingc5b86b72013-02-28 16:43:11 -0800175 kref_get(&parent->kref);
Erik Gilling7ad530b2013-02-28 16:42:57 -0800176 sync_timeline_add_pt(parent, pt);
177
178 return pt;
179}
Erik Gilling8edb4ad2013-02-28 16:43:06 -0800180EXPORT_SYMBOL(sync_pt_create);
Erik Gilling7ad530b2013-02-28 16:42:57 -0800181
182void sync_pt_free(struct sync_pt *pt)
183{
184 if (pt->parent->ops->free_pt)
185 pt->parent->ops->free_pt(pt);
186
187 sync_timeline_remove_pt(pt);
188
Erik Gillingc5b86b72013-02-28 16:43:11 -0800189 kref_put(&pt->parent->kref, sync_timeline_free);
190
Erik Gilling7ad530b2013-02-28 16:42:57 -0800191 kfree(pt);
192}
Erik Gilling8edb4ad2013-02-28 16:43:06 -0800193EXPORT_SYMBOL(sync_pt_free);
Erik Gilling7ad530b2013-02-28 16:42:57 -0800194
195/* call with pt->parent->active_list_lock held */
196static int _sync_pt_has_signaled(struct sync_pt *pt)
197{
Erik Gilling97a84842013-02-28 16:42:59 -0800198 int old_status = pt->status;
199
Erik Gilling7ad530b2013-02-28 16:42:57 -0800200 if (!pt->status)
201 pt->status = pt->parent->ops->has_signaled(pt);
202
203 if (!pt->status && pt->parent->destroyed)
204 pt->status = -ENOENT;
205
Erik Gilling97a84842013-02-28 16:42:59 -0800206 if (pt->status != old_status)
207 pt->timestamp = ktime_get();
208
Erik Gilling7ad530b2013-02-28 16:42:57 -0800209 return pt->status;
210}
211
212static struct sync_pt *sync_pt_dup(struct sync_pt *pt)
213{
214 return pt->parent->ops->dup(pt);
215}
216
217/* Adds a sync pt to the active queue. Called when added to a fence */
218static void sync_pt_activate(struct sync_pt *pt)
219{
220 struct sync_timeline *obj = pt->parent;
221 unsigned long flags;
222 int err;
223
224 spin_lock_irqsave(&obj->active_list_lock, flags);
225
226 err = _sync_pt_has_signaled(pt);
227 if (err != 0)
228 goto out;
229
230 list_add_tail(&pt->active_list, &obj->active_list_head);
231
232out:
233 spin_unlock_irqrestore(&obj->active_list_lock, flags);
234}
235
236static int sync_fence_release(struct inode *inode, struct file *file);
Erik Gilling57b505b2013-02-28 16:43:04 -0800237static unsigned int sync_fence_poll(struct file *file, poll_table *wait);
Erik Gilling7ad530b2013-02-28 16:42:57 -0800238static long sync_fence_ioctl(struct file *file, unsigned int cmd,
239 unsigned long arg);
240
241
242static const struct file_operations sync_fence_fops = {
243 .release = sync_fence_release,
Erik Gilling57b505b2013-02-28 16:43:04 -0800244 .poll = sync_fence_poll,
Erik Gilling7ad530b2013-02-28 16:42:57 -0800245 .unlocked_ioctl = sync_fence_ioctl,
246};
247
248static struct sync_fence *sync_fence_alloc(const char *name)
249{
250 struct sync_fence *fence;
Erik Gillingaf7582f2013-02-28 16:43:00 -0800251 unsigned long flags;
Erik Gilling7ad530b2013-02-28 16:42:57 -0800252
253 fence = kzalloc(sizeof(struct sync_fence), GFP_KERNEL);
254 if (fence == NULL)
255 return NULL;
256
257 fence->file = anon_inode_getfile("sync_fence", &sync_fence_fops,
258 fence, 0);
259 if (fence->file == NULL)
260 goto err;
261
Erik Gilling01544172013-02-28 16:43:10 -0800262 kref_init(&fence->kref);
Erik Gilling7ad530b2013-02-28 16:42:57 -0800263 strlcpy(fence->name, name, sizeof(fence->name));
264
265 INIT_LIST_HEAD(&fence->pt_list_head);
266 INIT_LIST_HEAD(&fence->waiter_list_head);
267 spin_lock_init(&fence->waiter_list_lock);
268
269 init_waitqueue_head(&fence->wq);
Erik Gillingaf7582f2013-02-28 16:43:00 -0800270
271 spin_lock_irqsave(&sync_fence_list_lock, flags);
272 list_add_tail(&fence->sync_fence_list, &sync_fence_list_head);
273 spin_unlock_irqrestore(&sync_fence_list_lock, flags);
274
Erik Gilling7ad530b2013-02-28 16:42:57 -0800275 return fence;
276
277err:
278 kfree(fence);
279 return NULL;
280}
281
282/* TODO: implement a create which takes more that one sync_pt */
283struct sync_fence *sync_fence_create(const char *name, struct sync_pt *pt)
284{
285 struct sync_fence *fence;
286
287 if (pt->fence)
288 return NULL;
289
290 fence = sync_fence_alloc(name);
291 if (fence == NULL)
292 return NULL;
293
294 pt->fence = fence;
295 list_add(&pt->pt_list, &fence->pt_list_head);
296 sync_pt_activate(pt);
297
Erik Gillingeeb2f572013-02-28 16:43:19 -0800298 /*
299 * signal the fence in case pt was activated before
300 * sync_pt_activate(pt) was called
301 */
302 sync_fence_signal_pt(pt);
303
Erik Gilling7ad530b2013-02-28 16:42:57 -0800304 return fence;
305}
Erik Gilling8edb4ad2013-02-28 16:43:06 -0800306EXPORT_SYMBOL(sync_fence_create);
Erik Gilling7ad530b2013-02-28 16:42:57 -0800307
308static int sync_fence_copy_pts(struct sync_fence *dst, struct sync_fence *src)
309{
310 struct list_head *pos;
311
312 list_for_each(pos, &src->pt_list_head) {
313 struct sync_pt *orig_pt =
314 container_of(pos, struct sync_pt, pt_list);
315 struct sync_pt *new_pt = sync_pt_dup(orig_pt);
316
317 if (new_pt == NULL)
318 return -ENOMEM;
319
320 new_pt->fence = dst;
321 list_add(&new_pt->pt_list, &dst->pt_list_head);
322 sync_pt_activate(new_pt);
323 }
324
325 return 0;
326}
327
Erik Gillingc6f668c2013-02-28 16:43:09 -0800328static int sync_fence_merge_pts(struct sync_fence *dst, struct sync_fence *src)
329{
330 struct list_head *src_pos, *dst_pos, *n;
331
332 list_for_each(src_pos, &src->pt_list_head) {
333 struct sync_pt *src_pt =
334 container_of(src_pos, struct sync_pt, pt_list);
335 bool collapsed = false;
336
337 list_for_each_safe(dst_pos, n, &dst->pt_list_head) {
338 struct sync_pt *dst_pt =
339 container_of(dst_pos, struct sync_pt, pt_list);
340 /* collapse two sync_pts on the same timeline
341 * to a single sync_pt that will signal at
342 * the later of the two
343 */
344 if (dst_pt->parent == src_pt->parent) {
345 if (dst_pt->parent->ops->compare(dst_pt, src_pt)
346 == -1) {
347 struct sync_pt *new_pt =
348 sync_pt_dup(src_pt);
349 if (new_pt == NULL)
350 return -ENOMEM;
351
352 new_pt->fence = dst;
353 list_replace(&dst_pt->pt_list,
354 &new_pt->pt_list);
355 sync_pt_activate(new_pt);
356 sync_pt_free(dst_pt);
357 }
358 collapsed = true;
359 break;
360 }
361 }
362
363 if (!collapsed) {
364 struct sync_pt *new_pt = sync_pt_dup(src_pt);
365
366 if (new_pt == NULL)
367 return -ENOMEM;
368
369 new_pt->fence = dst;
370 list_add(&new_pt->pt_list, &dst->pt_list_head);
371 sync_pt_activate(new_pt);
372 }
373 }
374
375 return 0;
376}
377
Erik Gilling01544172013-02-28 16:43:10 -0800378static void sync_fence_detach_pts(struct sync_fence *fence)
379{
380 struct list_head *pos, *n;
381
382 list_for_each_safe(pos, n, &fence->pt_list_head) {
383 struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
384 sync_timeline_remove_pt(pt);
385 }
386}
387
Erik Gilling7ad530b2013-02-28 16:42:57 -0800388static void sync_fence_free_pts(struct sync_fence *fence)
389{
390 struct list_head *pos, *n;
391
392 list_for_each_safe(pos, n, &fence->pt_list_head) {
393 struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
394 sync_pt_free(pt);
395 }
396}
397
398struct sync_fence *sync_fence_fdget(int fd)
399{
400 struct file *file = fget(fd);
401
402 if (file == NULL)
403 return NULL;
404
405 if (file->f_op != &sync_fence_fops)
406 goto err;
407
408 return file->private_data;
409
410err:
411 fput(file);
412 return NULL;
413}
Erik Gilling8edb4ad2013-02-28 16:43:06 -0800414EXPORT_SYMBOL(sync_fence_fdget);
Erik Gilling7ad530b2013-02-28 16:42:57 -0800415
416void sync_fence_put(struct sync_fence *fence)
417{
418 fput(fence->file);
419}
Erik Gilling8edb4ad2013-02-28 16:43:06 -0800420EXPORT_SYMBOL(sync_fence_put);
Erik Gilling7ad530b2013-02-28 16:42:57 -0800421
422void sync_fence_install(struct sync_fence *fence, int fd)
423{
424 fd_install(fd, fence->file);
425}
Erik Gilling8edb4ad2013-02-28 16:43:06 -0800426EXPORT_SYMBOL(sync_fence_install);
Erik Gilling7ad530b2013-02-28 16:42:57 -0800427
428static int sync_fence_get_status(struct sync_fence *fence)
429{
430 struct list_head *pos;
431 int status = 1;
432
433 list_for_each(pos, &fence->pt_list_head) {
434 struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
435 int pt_status = pt->status;
436
437 if (pt_status < 0) {
438 status = pt_status;
439 break;
440 } else if (status == 1) {
441 status = pt_status;
442 }
443 }
444
445 return status;
446}
447
448struct sync_fence *sync_fence_merge(const char *name,
449 struct sync_fence *a, struct sync_fence *b)
450{
451 struct sync_fence *fence;
452 int err;
453
454 fence = sync_fence_alloc(name);
455 if (fence == NULL)
456 return NULL;
457
458 err = sync_fence_copy_pts(fence, a);
459 if (err < 0)
460 goto err;
461
Erik Gillingc6f668c2013-02-28 16:43:09 -0800462 err = sync_fence_merge_pts(fence, b);
Erik Gilling7ad530b2013-02-28 16:42:57 -0800463 if (err < 0)
464 goto err;
465
Erik Gillingeeb2f572013-02-28 16:43:19 -0800466 /*
467 * signal the fence in case one of it's pts were activated before
468 * they were activated
469 */
470 sync_fence_signal_pt(list_first_entry(&fence->pt_list_head,
471 struct sync_pt,
472 pt_list));
Erik Gilling7ad530b2013-02-28 16:42:57 -0800473
474 return fence;
475err:
476 sync_fence_free_pts(fence);
477 kfree(fence);
478 return NULL;
479}
Erik Gilling8edb4ad2013-02-28 16:43:06 -0800480EXPORT_SYMBOL(sync_fence_merge);
Erik Gilling7ad530b2013-02-28 16:42:57 -0800481
482static void sync_fence_signal_pt(struct sync_pt *pt)
483{
484 LIST_HEAD(signaled_waiters);
485 struct sync_fence *fence = pt->fence;
486 struct list_head *pos;
487 struct list_head *n;
488 unsigned long flags;
489 int status;
490
491 status = sync_fence_get_status(fence);
492
493 spin_lock_irqsave(&fence->waiter_list_lock, flags);
494 /*
495 * this should protect against two threads racing on the signaled
496 * false -> true transition
497 */
498 if (status && !fence->status) {
499 list_for_each_safe(pos, n, &fence->waiter_list_head)
500 list_move(pos, &signaled_waiters);
501
502 fence->status = status;
503 } else {
504 status = 0;
505 }
506 spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
507
508 if (status) {
509 list_for_each_safe(pos, n, &signaled_waiters) {
510 struct sync_fence_waiter *waiter =
511 container_of(pos, struct sync_fence_waiter,
512 waiter_list);
513
Erik Gilling7ad530b2013-02-28 16:42:57 -0800514 list_del(pos);
Erik Gillingc0f61a42013-02-28 16:43:05 -0800515 waiter->callback(fence, waiter);
Erik Gilling7ad530b2013-02-28 16:42:57 -0800516 }
517 wake_up(&fence->wq);
518 }
519}
520
521int sync_fence_wait_async(struct sync_fence *fence,
Erik Gillingc0f61a42013-02-28 16:43:05 -0800522 struct sync_fence_waiter *waiter)
Erik Gilling7ad530b2013-02-28 16:42:57 -0800523{
Erik Gilling7ad530b2013-02-28 16:42:57 -0800524 unsigned long flags;
525 int err = 0;
526
Erik Gilling7ad530b2013-02-28 16:42:57 -0800527 spin_lock_irqsave(&fence->waiter_list_lock, flags);
528
529 if (fence->status) {
Erik Gilling7ad530b2013-02-28 16:42:57 -0800530 err = fence->status;
531 goto out;
532 }
533
534 list_add_tail(&waiter->waiter_list, &fence->waiter_list_head);
535out:
536 spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
537
538 return err;
539}
Erik Gilling8edb4ad2013-02-28 16:43:06 -0800540EXPORT_SYMBOL(sync_fence_wait_async);
Erik Gilling7ad530b2013-02-28 16:42:57 -0800541
Erik Gillingc0f61a42013-02-28 16:43:05 -0800542int sync_fence_cancel_async(struct sync_fence *fence,
543 struct sync_fence_waiter *waiter)
544{
545 struct list_head *pos;
546 struct list_head *n;
547 unsigned long flags;
548 int ret = -ENOENT;
549
550 spin_lock_irqsave(&fence->waiter_list_lock, flags);
551 /*
552 * Make sure waiter is still in waiter_list because it is possible for
553 * the waiter to be removed from the list while the callback is still
554 * pending.
555 */
556 list_for_each_safe(pos, n, &fence->waiter_list_head) {
557 struct sync_fence_waiter *list_waiter =
558 container_of(pos, struct sync_fence_waiter,
559 waiter_list);
560 if (list_waiter == waiter) {
561 list_del(pos);
562 ret = 0;
563 break;
564 }
565 }
566 spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
567 return ret;
568}
Erik Gilling8edb4ad2013-02-28 16:43:06 -0800569EXPORT_SYMBOL(sync_fence_cancel_async);
Erik Gillingc0f61a42013-02-28 16:43:05 -0800570
Erik Gillingc6792122013-02-28 16:43:18 -0800571static bool sync_fence_check(struct sync_fence *fence)
572{
573 /*
574 * Make sure that reads to fence->status are ordered with the
575 * wait queue event triggering
576 */
577 smp_rmb();
578 return fence->status != 0;
579}
580
Erik Gilling7ad530b2013-02-28 16:42:57 -0800581int sync_fence_wait(struct sync_fence *fence, long timeout)
582{
Erik Gilling3b640f52013-02-28 16:43:14 -0800583 int err = 0;
Erik Gilling7ad530b2013-02-28 16:42:57 -0800584
Erik Gilling3b640f52013-02-28 16:43:14 -0800585 if (timeout > 0) {
Erik Gilling7ad530b2013-02-28 16:42:57 -0800586 timeout = msecs_to_jiffies(timeout);
587 err = wait_event_interruptible_timeout(fence->wq,
Erik Gillingc6792122013-02-28 16:43:18 -0800588 sync_fence_check(fence),
Erik Gilling7ad530b2013-02-28 16:42:57 -0800589 timeout);
Erik Gilling3b640f52013-02-28 16:43:14 -0800590 } else if (timeout < 0) {
Erik Gilling4b5de082013-02-28 16:43:20 -0800591 err = wait_event_interruptible(fence->wq,
592 sync_fence_check(fence));
Erik Gilling7ad530b2013-02-28 16:42:57 -0800593 }
594
595 if (err < 0)
596 return err;
597
Erik Gilling75606452013-02-28 16:43:17 -0800598 if (fence->status < 0) {
599 pr_info("fence error %d on [%p]\n", fence->status, fence);
600 sync_dump();
Erik Gilling7ad530b2013-02-28 16:42:57 -0800601 return fence->status;
Erik Gilling75606452013-02-28 16:43:17 -0800602 }
Erik Gilling7ad530b2013-02-28 16:42:57 -0800603
Erik Gillingf56388f2013-02-28 16:43:15 -0800604 if (fence->status == 0) {
Erik Gilling1d5db2c2013-02-28 16:43:16 -0800605 pr_info("fence timeout on [%p] after %dms\n", fence,
606 jiffies_to_msecs(timeout));
Erik Gillingf56388f2013-02-28 16:43:15 -0800607 sync_dump();
Erik Gilling7ad530b2013-02-28 16:42:57 -0800608 return -ETIME;
Erik Gillingf56388f2013-02-28 16:43:15 -0800609 }
Erik Gilling7ad530b2013-02-28 16:42:57 -0800610
611 return 0;
612}
Erik Gilling8edb4ad2013-02-28 16:43:06 -0800613EXPORT_SYMBOL(sync_fence_wait);
Erik Gilling7ad530b2013-02-28 16:42:57 -0800614
Erik Gilling01544172013-02-28 16:43:10 -0800615static void sync_fence_free(struct kref *kref)
616{
617 struct sync_fence *fence = container_of(kref, struct sync_fence, kref);
618
619 sync_fence_free_pts(fence);
620
621 kfree(fence);
622}
623
Erik Gilling7ad530b2013-02-28 16:42:57 -0800624static int sync_fence_release(struct inode *inode, struct file *file)
625{
626 struct sync_fence *fence = file->private_data;
Erik Gillingaf7582f2013-02-28 16:43:00 -0800627 unsigned long flags;
Erik Gilling7ad530b2013-02-28 16:42:57 -0800628
Erik Gilling01544172013-02-28 16:43:10 -0800629 /*
630 * We need to remove all ways to access this fence before droping
631 * our ref.
632 *
633 * start with its membership in the global fence list
634 */
Erik Gillingaf7582f2013-02-28 16:43:00 -0800635 spin_lock_irqsave(&sync_fence_list_lock, flags);
636 list_del(&fence->sync_fence_list);
637 spin_unlock_irqrestore(&sync_fence_list_lock, flags);
638
Erik Gilling01544172013-02-28 16:43:10 -0800639 /*
640 * remove its pts from their parents so that sync_timeline_signal()
641 * can't reference the fence.
642 */
643 sync_fence_detach_pts(fence);
Erik Gillingcc3c5cd2013-02-28 16:43:08 -0800644
Erik Gilling01544172013-02-28 16:43:10 -0800645 kref_put(&fence->kref, sync_fence_free);
Erik Gilling7ad530b2013-02-28 16:42:57 -0800646
647 return 0;
648}
649
Erik Gilling57b505b2013-02-28 16:43:04 -0800650static unsigned int sync_fence_poll(struct file *file, poll_table *wait)
651{
652 struct sync_fence *fence = file->private_data;
653
654 poll_wait(file, &fence->wq, wait);
655
Erik Gillingc6792122013-02-28 16:43:18 -0800656 /*
657 * Make sure that reads to fence->status are ordered with the
658 * wait queue event triggering
659 */
660 smp_rmb();
661
Erik Gilling57b505b2013-02-28 16:43:04 -0800662 if (fence->status == 1)
663 return POLLIN;
664 else if (fence->status < 0)
665 return POLLERR;
666 else
667 return 0;
668}
669
Erik Gilling7ad530b2013-02-28 16:42:57 -0800670static long sync_fence_ioctl_wait(struct sync_fence *fence, unsigned long arg)
671{
672 __s32 value;
673
674 if (copy_from_user(&value, (void __user *)arg, sizeof(value)))
675 return -EFAULT;
676
677 return sync_fence_wait(fence, value);
678}
679
680static long sync_fence_ioctl_merge(struct sync_fence *fence, unsigned long arg)
681{
682 int fd = get_unused_fd();
683 int err;
684 struct sync_fence *fence2, *fence3;
685 struct sync_merge_data data;
686
Rebecca Schultz Zavin92ea915a2013-02-28 16:43:12 -0800687 if (fd < 0)
688 return fd;
689
690 if (copy_from_user(&data, (void __user *)arg, sizeof(data))) {
691 err = -EFAULT;
692 goto err_put_fd;
693 }
Erik Gilling7ad530b2013-02-28 16:42:57 -0800694
695 fence2 = sync_fence_fdget(data.fd2);
696 if (fence2 == NULL) {
697 err = -ENOENT;
698 goto err_put_fd;
699 }
700
701 data.name[sizeof(data.name) - 1] = '\0';
702 fence3 = sync_fence_merge(data.name, fence, fence2);
703 if (fence3 == NULL) {
704 err = -ENOMEM;
705 goto err_put_fence2;
706 }
707
708 data.fence = fd;
709 if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
710 err = -EFAULT;
711 goto err_put_fence3;
712 }
713
714 sync_fence_install(fence3, fd);
715 sync_fence_put(fence2);
716 return 0;
717
718err_put_fence3:
719 sync_fence_put(fence3);
720
721err_put_fence2:
722 sync_fence_put(fence2);
723
724err_put_fd:
725 put_unused_fd(fd);
726 return err;
727}
728
Erik Gilling79ba1522013-02-28 16:43:02 -0800729static int sync_fill_pt_info(struct sync_pt *pt, void *data, int size)
730{
731 struct sync_pt_info *info = data;
732 int ret;
733
734 if (size < sizeof(struct sync_pt_info))
735 return -ENOMEM;
736
737 info->len = sizeof(struct sync_pt_info);
738
739 if (pt->parent->ops->fill_driver_data) {
740 ret = pt->parent->ops->fill_driver_data(pt, info->driver_data,
741 size - sizeof(*info));
742 if (ret < 0)
743 return ret;
744
745 info->len += ret;
746 }
747
748 strlcpy(info->obj_name, pt->parent->name, sizeof(info->obj_name));
749 strlcpy(info->driver_name, pt->parent->ops->driver_name,
750 sizeof(info->driver_name));
751 info->status = pt->status;
752 info->timestamp_ns = ktime_to_ns(pt->timestamp);
753
754 return info->len;
755}
756
757static long sync_fence_ioctl_fence_info(struct sync_fence *fence,
758 unsigned long arg)
759{
760 struct sync_fence_info_data *data;
761 struct list_head *pos;
762 __u32 size;
763 __u32 len = 0;
764 int ret;
765
766 if (copy_from_user(&size, (void __user *)arg, sizeof(size)))
767 return -EFAULT;
768
769 if (size < sizeof(struct sync_fence_info_data))
770 return -EINVAL;
771
772 if (size > 4096)
773 size = 4096;
774
775 data = kzalloc(size, GFP_KERNEL);
776 if (data == NULL)
777 return -ENOMEM;
778
779 strlcpy(data->name, fence->name, sizeof(data->name));
780 data->status = fence->status;
781 len = sizeof(struct sync_fence_info_data);
782
783 list_for_each(pos, &fence->pt_list_head) {
784 struct sync_pt *pt =
785 container_of(pos, struct sync_pt, pt_list);
786
787 ret = sync_fill_pt_info(pt, (u8 *)data + len, size - len);
788
789 if (ret < 0)
790 goto out;
791
792 len += ret;
793 }
794
795 data->len = len;
796
797 if (copy_to_user((void __user *)arg, data, len))
798 ret = -EFAULT;
799 else
800 ret = 0;
801
802out:
803 kfree(data);
804
805 return ret;
806}
Erik Gilling7ad530b2013-02-28 16:42:57 -0800807
808static long sync_fence_ioctl(struct file *file, unsigned int cmd,
809 unsigned long arg)
810{
811 struct sync_fence *fence = file->private_data;
812 switch (cmd) {
813 case SYNC_IOC_WAIT:
814 return sync_fence_ioctl_wait(fence, arg);
815
816 case SYNC_IOC_MERGE:
817 return sync_fence_ioctl_merge(fence, arg);
Erik Gillingaf7582f2013-02-28 16:43:00 -0800818
Erik Gilling79ba1522013-02-28 16:43:02 -0800819 case SYNC_IOC_FENCE_INFO:
820 return sync_fence_ioctl_fence_info(fence, arg);
821
Erik Gilling7ad530b2013-02-28 16:42:57 -0800822 default:
823 return -ENOTTY;
824 }
825}
826
Erik Gillingaf7582f2013-02-28 16:43:00 -0800827#ifdef CONFIG_DEBUG_FS
828static const char *sync_status_str(int status)
829{
830 if (status > 0)
831 return "signaled";
832 else if (status == 0)
833 return "active";
834 else
835 return "error";
836}
837
838static void sync_print_pt(struct seq_file *s, struct sync_pt *pt, bool fence)
839{
840 int status = pt->status;
841 seq_printf(s, " %s%spt %s",
842 fence ? pt->parent->name : "",
843 fence ? "_" : "",
844 sync_status_str(status));
845 if (pt->status) {
846 struct timeval tv = ktime_to_timeval(pt->timestamp);
847 seq_printf(s, "@%ld.%06ld", tv.tv_sec, tv.tv_usec);
848 }
849
Erik Gillingdbd52392013-02-28 16:43:21 -0800850 if (pt->parent->ops->timeline_value_str &&
851 pt->parent->ops->pt_value_str) {
852 char value[64];
853 pt->parent->ops->pt_value_str(pt, value, sizeof(value));
854 seq_printf(s, ": %s", value);
855 if (fence) {
856 pt->parent->ops->timeline_value_str(pt->parent, value,
857 sizeof(value));
858 seq_printf(s, " / %s", value);
859 }
860 } else if (pt->parent->ops->print_pt) {
Erik Gillingaf7582f2013-02-28 16:43:00 -0800861 seq_printf(s, ": ");
862 pt->parent->ops->print_pt(s, pt);
863 }
864
865 seq_printf(s, "\n");
866}
867
868static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj)
869{
870 struct list_head *pos;
871 unsigned long flags;
872
873 seq_printf(s, "%s %s", obj->name, obj->ops->driver_name);
874
Erik Gillingdbd52392013-02-28 16:43:21 -0800875 if (obj->ops->timeline_value_str) {
876 char value[64];
877 obj->ops->timeline_value_str(obj, value, sizeof(value));
878 seq_printf(s, ": %s", value);
879 } else if (obj->ops->print_obj) {
Erik Gillingaf7582f2013-02-28 16:43:00 -0800880 seq_printf(s, ": ");
881 obj->ops->print_obj(s, obj);
882 }
883
884 seq_printf(s, "\n");
885
886 spin_lock_irqsave(&obj->child_list_lock, flags);
887 list_for_each(pos, &obj->child_list_head) {
888 struct sync_pt *pt =
889 container_of(pos, struct sync_pt, child_list);
890 sync_print_pt(s, pt, false);
891 }
892 spin_unlock_irqrestore(&obj->child_list_lock, flags);
893}
894
895static void sync_print_fence(struct seq_file *s, struct sync_fence *fence)
896{
897 struct list_head *pos;
898 unsigned long flags;
899
Erik Gilling1d5db2c2013-02-28 16:43:16 -0800900 seq_printf(s, "[%p] %s: %s\n", fence, fence->name,
901 sync_status_str(fence->status));
Erik Gillingaf7582f2013-02-28 16:43:00 -0800902
903 list_for_each(pos, &fence->pt_list_head) {
904 struct sync_pt *pt =
905 container_of(pos, struct sync_pt, pt_list);
906 sync_print_pt(s, pt, true);
907 }
908
909 spin_lock_irqsave(&fence->waiter_list_lock, flags);
910 list_for_each(pos, &fence->waiter_list_head) {
911 struct sync_fence_waiter *waiter =
912 container_of(pos, struct sync_fence_waiter,
913 waiter_list);
914
Erik Gillingc0f61a42013-02-28 16:43:05 -0800915 seq_printf(s, "waiter %pF\n", waiter->callback);
Erik Gillingaf7582f2013-02-28 16:43:00 -0800916 }
917 spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
918}
919
920static int sync_debugfs_show(struct seq_file *s, void *unused)
921{
922 unsigned long flags;
923 struct list_head *pos;
924
925 seq_printf(s, "objs:\n--------------\n");
926
927 spin_lock_irqsave(&sync_timeline_list_lock, flags);
928 list_for_each(pos, &sync_timeline_list_head) {
929 struct sync_timeline *obj =
930 container_of(pos, struct sync_timeline,
931 sync_timeline_list);
932
933 sync_print_obj(s, obj);
934 seq_printf(s, "\n");
935 }
936 spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
937
938 seq_printf(s, "fences:\n--------------\n");
939
940 spin_lock_irqsave(&sync_fence_list_lock, flags);
941 list_for_each(pos, &sync_fence_list_head) {
942 struct sync_fence *fence =
943 container_of(pos, struct sync_fence, sync_fence_list);
944
945 sync_print_fence(s, fence);
946 seq_printf(s, "\n");
947 }
948 spin_unlock_irqrestore(&sync_fence_list_lock, flags);
949 return 0;
950}
951
952static int sync_debugfs_open(struct inode *inode, struct file *file)
953{
954 return single_open(file, sync_debugfs_show, inode->i_private);
955}
956
957static const struct file_operations sync_debugfs_fops = {
958 .open = sync_debugfs_open,
959 .read = seq_read,
960 .llseek = seq_lseek,
961 .release = single_release,
962};
963
964static __init int sync_debugfs_init(void)
965{
966 debugfs_create_file("sync", S_IRUGO, NULL, NULL, &sync_debugfs_fops);
967 return 0;
968}
Erik Gillingaf7582f2013-02-28 16:43:00 -0800969late_initcall(sync_debugfs_init);
970
Erik Gillingf56388f2013-02-28 16:43:15 -0800971#define DUMP_CHUNK 256
972static char sync_dump_buf[64 * 1024];
973void sync_dump(void)
974{
975 struct seq_file s = {
976 .buf = sync_dump_buf,
977 .size = sizeof(sync_dump_buf) - 1,
978 };
979 int i;
980
981 sync_debugfs_show(&s, NULL);
982
983 for (i = 0; i < s.count; i += DUMP_CHUNK) {
984 if ((s.count - i) > DUMP_CHUNK) {
985 char c = s.buf[i + DUMP_CHUNK];
986 s.buf[i + DUMP_CHUNK] = 0;
987 pr_cont("%s", s.buf + i);
988 s.buf[i + DUMP_CHUNK] = c;
989 } else {
990 s.buf[s.count] = 0;
991 pr_cont("%s", s.buf + i);
992 }
993 }
994}
995#else
996static void sync_dump(void)
997{
998}
Erik Gillingaf7582f2013-02-28 16:43:00 -0800999#endif