blob: 645a698a1baadcfa69428e90553de0d3f8540ce1 [file] [log] [blame]
Erik Gilling010accf2012-03-13 15:34:34 -07001/*
2 * drivers/base/sync.c
3 *
4 * Copyright (C) 2012 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16
Erik Gilling981c8a92012-03-14 19:49:15 -070017#include <linux/debugfs.h>
Erik Gilling4fb837a2012-05-16 13:09:22 -070018#include <linux/export.h>
Erik Gilling010accf2012-03-13 15:34:34 -070019#include <linux/file.h>
20#include <linux/fs.h>
21#include <linux/kernel.h>
Erik Gillinga1eeaca2012-03-19 17:28:32 -070022#include <linux/poll.h>
Erik Gilling010accf2012-03-13 15:34:34 -070023#include <linux/sched.h>
Erik Gilling981c8a92012-03-14 19:49:15 -070024#include <linux/seq_file.h>
Erik Gilling010accf2012-03-13 15:34:34 -070025#include <linux/slab.h>
26#include <linux/sync.h>
27#include <linux/uaccess.h>
28
29#include <linux/anon_inodes.h>
30
Erik Gilling06f70142012-10-16 16:14:48 -070031#define CREATE_TRACE_POINTS
32#include <trace/events/sync.h>
33
Erik Gilling010accf2012-03-13 15:34:34 -070034static void sync_fence_signal_pt(struct sync_pt *pt);
35static int _sync_pt_has_signaled(struct sync_pt *pt);
Erik Gilling220411f2012-07-23 16:43:05 -070036static void sync_fence_free(struct kref *kref);
Prakash Kamliya7fbec622013-07-22 17:48:16 +053037static void sync_dump(struct sync_fence *fence);
Erik Gilling010accf2012-03-13 15:34:34 -070038
Erik Gilling981c8a92012-03-14 19:49:15 -070039static LIST_HEAD(sync_timeline_list_head);
40static DEFINE_SPINLOCK(sync_timeline_list_lock);
41
42static LIST_HEAD(sync_fence_list_head);
43static DEFINE_SPINLOCK(sync_fence_list_lock);
44
Erik Gilling010accf2012-03-13 15:34:34 -070045struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops,
46 int size, const char *name)
47{
48 struct sync_timeline *obj;
Erik Gilling981c8a92012-03-14 19:49:15 -070049 unsigned long flags;
Erik Gilling010accf2012-03-13 15:34:34 -070050
51 if (size < sizeof(struct sync_timeline))
52 return NULL;
53
54 obj = kzalloc(size, GFP_KERNEL);
55 if (obj == NULL)
56 return NULL;
57
Erik Gilling8c9daff2012-08-02 17:26:45 -070058 kref_init(&obj->kref);
Erik Gilling010accf2012-03-13 15:34:34 -070059 obj->ops = ops;
60 strlcpy(obj->name, name, sizeof(obj->name));
61
62 INIT_LIST_HEAD(&obj->child_list_head);
63 spin_lock_init(&obj->child_list_lock);
64
65 INIT_LIST_HEAD(&obj->active_list_head);
66 spin_lock_init(&obj->active_list_lock);
67
Erik Gilling981c8a92012-03-14 19:49:15 -070068 spin_lock_irqsave(&sync_timeline_list_lock, flags);
69 list_add_tail(&obj->sync_timeline_list, &sync_timeline_list_head);
70 spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
71
Erik Gilling010accf2012-03-13 15:34:34 -070072 return obj;
73}
Erik Gilling4fb837a2012-05-16 13:09:22 -070074EXPORT_SYMBOL(sync_timeline_create);
Erik Gilling010accf2012-03-13 15:34:34 -070075
Erik Gilling8c9daff2012-08-02 17:26:45 -070076static void sync_timeline_free(struct kref *kref)
Erik Gilling981c8a92012-03-14 19:49:15 -070077{
Erik Gilling8c9daff2012-08-02 17:26:45 -070078 struct sync_timeline *obj =
79 container_of(kref, struct sync_timeline, kref);
Erik Gilling981c8a92012-03-14 19:49:15 -070080 unsigned long flags;
81
82 if (obj->ops->release_obj)
83 obj->ops->release_obj(obj);
84
85 spin_lock_irqsave(&sync_timeline_list_lock, flags);
86 list_del(&obj->sync_timeline_list);
87 spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
88
89 kfree(obj);
90}
91
Erik Gilling010accf2012-03-13 15:34:34 -070092void sync_timeline_destroy(struct sync_timeline *obj)
93{
Erik Gilling010accf2012-03-13 15:34:34 -070094 obj->destroyed = true;
Prakash Kamliya698f2412013-09-26 17:59:19 +053095 smp_wmb();
Erik Gilling010accf2012-03-13 15:34:34 -070096
Erik Gilling8c9daff2012-08-02 17:26:45 -070097 /*
Prakash Kamliya698f2412013-09-26 17:59:19 +053098 * signal any children that their parent is going away.
Erik Gilling8c9daff2012-08-02 17:26:45 -070099 */
Prakash Kamliya698f2412013-09-26 17:59:19 +0530100 sync_timeline_signal(obj);
Erik Gilling8c9daff2012-08-02 17:26:45 -0700101
Prakash Kamliya698f2412013-09-26 17:59:19 +0530102 kref_put(&obj->kref, sync_timeline_free);
Erik Gilling010accf2012-03-13 15:34:34 -0700103}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700104EXPORT_SYMBOL(sync_timeline_destroy);
Erik Gilling010accf2012-03-13 15:34:34 -0700105
106static void sync_timeline_add_pt(struct sync_timeline *obj, struct sync_pt *pt)
107{
108 unsigned long flags;
109
110 pt->parent = obj;
111
112 spin_lock_irqsave(&obj->child_list_lock, flags);
113 list_add_tail(&pt->child_list, &obj->child_list_head);
114 spin_unlock_irqrestore(&obj->child_list_lock, flags);
115}
116
117static void sync_timeline_remove_pt(struct sync_pt *pt)
118{
119 struct sync_timeline *obj = pt->parent;
120 unsigned long flags;
Erik Gilling010accf2012-03-13 15:34:34 -0700121
122 spin_lock_irqsave(&obj->active_list_lock, flags);
123 if (!list_empty(&pt->active_list))
124 list_del_init(&pt->active_list);
125 spin_unlock_irqrestore(&obj->active_list_lock, flags);
126
127 spin_lock_irqsave(&obj->child_list_lock, flags);
Erik Gilling220411f2012-07-23 16:43:05 -0700128 if (!list_empty(&pt->child_list)) {
129 list_del_init(&pt->child_list);
Erik Gilling220411f2012-07-23 16:43:05 -0700130 }
Erik Gilling010accf2012-03-13 15:34:34 -0700131 spin_unlock_irqrestore(&obj->child_list_lock, flags);
Erik Gilling010accf2012-03-13 15:34:34 -0700132}
133
134void sync_timeline_signal(struct sync_timeline *obj)
135{
136 unsigned long flags;
137 LIST_HEAD(signaled_pts);
138 struct list_head *pos, *n;
139
Erik Gilling06f70142012-10-16 16:14:48 -0700140 trace_sync_timeline(obj);
141
Erik Gilling010accf2012-03-13 15:34:34 -0700142 spin_lock_irqsave(&obj->active_list_lock, flags);
143
144 list_for_each_safe(pos, n, &obj->active_list_head) {
145 struct sync_pt *pt =
146 container_of(pos, struct sync_pt, active_list);
147
Erik Gilling220411f2012-07-23 16:43:05 -0700148 if (_sync_pt_has_signaled(pt)) {
149 list_del_init(pos);
150 list_add(&pt->signaled_list, &signaled_pts);
151 kref_get(&pt->fence->kref);
152 }
Erik Gilling010accf2012-03-13 15:34:34 -0700153 }
154
155 spin_unlock_irqrestore(&obj->active_list_lock, flags);
156
157 list_for_each_safe(pos, n, &signaled_pts) {
158 struct sync_pt *pt =
Erik Gilling220411f2012-07-23 16:43:05 -0700159 container_of(pos, struct sync_pt, signaled_list);
Erik Gilling010accf2012-03-13 15:34:34 -0700160
161 list_del_init(pos);
162 sync_fence_signal_pt(pt);
Erik Gilling220411f2012-07-23 16:43:05 -0700163 kref_put(&pt->fence->kref, sync_fence_free);
Erik Gilling010accf2012-03-13 15:34:34 -0700164 }
165}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700166EXPORT_SYMBOL(sync_timeline_signal);
Erik Gilling010accf2012-03-13 15:34:34 -0700167
168struct sync_pt *sync_pt_create(struct sync_timeline *parent, int size)
169{
170 struct sync_pt *pt;
171
172 if (size < sizeof(struct sync_pt))
173 return NULL;
174
175 pt = kzalloc(size, GFP_KERNEL);
176 if (pt == NULL)
177 return NULL;
178
179 INIT_LIST_HEAD(&pt->active_list);
Erik Gilling8c9daff2012-08-02 17:26:45 -0700180 kref_get(&parent->kref);
Erik Gilling010accf2012-03-13 15:34:34 -0700181 sync_timeline_add_pt(parent, pt);
182
183 return pt;
184}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700185EXPORT_SYMBOL(sync_pt_create);
Erik Gilling010accf2012-03-13 15:34:34 -0700186
187void sync_pt_free(struct sync_pt *pt)
188{
189 if (pt->parent->ops->free_pt)
190 pt->parent->ops->free_pt(pt);
191
192 sync_timeline_remove_pt(pt);
193
Erik Gilling8c9daff2012-08-02 17:26:45 -0700194 kref_put(&pt->parent->kref, sync_timeline_free);
195
Erik Gilling010accf2012-03-13 15:34:34 -0700196 kfree(pt);
197}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700198EXPORT_SYMBOL(sync_pt_free);
Erik Gilling010accf2012-03-13 15:34:34 -0700199
200/* call with pt->parent->active_list_lock held */
201static int _sync_pt_has_signaled(struct sync_pt *pt)
202{
Erik Gillingad433ba2012-03-15 14:59:33 -0700203 int old_status = pt->status;
204
Erik Gilling010accf2012-03-13 15:34:34 -0700205 if (!pt->status)
206 pt->status = pt->parent->ops->has_signaled(pt);
207
208 if (!pt->status && pt->parent->destroyed)
209 pt->status = -ENOENT;
210
Erik Gillingad433ba2012-03-15 14:59:33 -0700211 if (pt->status != old_status)
212 pt->timestamp = ktime_get();
213
Erik Gilling010accf2012-03-13 15:34:34 -0700214 return pt->status;
215}
216
217static struct sync_pt *sync_pt_dup(struct sync_pt *pt)
218{
219 return pt->parent->ops->dup(pt);
220}
221
222/* Adds a sync pt to the active queue. Called when added to a fence */
223static void sync_pt_activate(struct sync_pt *pt)
224{
225 struct sync_timeline *obj = pt->parent;
226 unsigned long flags;
227 int err;
228
229 spin_lock_irqsave(&obj->active_list_lock, flags);
230
231 err = _sync_pt_has_signaled(pt);
Naseer Ahmed35141772013-01-23 17:53:57 -0500232 if (err != 0)
Erik Gilling010accf2012-03-13 15:34:34 -0700233 goto out;
234
235 list_add_tail(&pt->active_list, &obj->active_list_head);
236
237out:
238 spin_unlock_irqrestore(&obj->active_list_lock, flags);
239}
240
241static int sync_fence_release(struct inode *inode, struct file *file);
Erik Gillinga1eeaca2012-03-19 17:28:32 -0700242static unsigned int sync_fence_poll(struct file *file, poll_table *wait);
Erik Gilling010accf2012-03-13 15:34:34 -0700243static long sync_fence_ioctl(struct file *file, unsigned int cmd,
244 unsigned long arg);
245
246
247static const struct file_operations sync_fence_fops = {
248 .release = sync_fence_release,
Erik Gillinga1eeaca2012-03-19 17:28:32 -0700249 .poll = sync_fence_poll,
Erik Gilling010accf2012-03-13 15:34:34 -0700250 .unlocked_ioctl = sync_fence_ioctl,
251};
252
253static struct sync_fence *sync_fence_alloc(const char *name)
254{
255 struct sync_fence *fence;
Erik Gilling981c8a92012-03-14 19:49:15 -0700256 unsigned long flags;
Erik Gilling010accf2012-03-13 15:34:34 -0700257
258 fence = kzalloc(sizeof(struct sync_fence), GFP_KERNEL);
259 if (fence == NULL)
260 return NULL;
261
262 fence->file = anon_inode_getfile("sync_fence", &sync_fence_fops,
263 fence, 0);
264 if (fence->file == NULL)
265 goto err;
266
Erik Gilling220411f2012-07-23 16:43:05 -0700267 kref_init(&fence->kref);
Erik Gilling010accf2012-03-13 15:34:34 -0700268 strlcpy(fence->name, name, sizeof(fence->name));
269
270 INIT_LIST_HEAD(&fence->pt_list_head);
271 INIT_LIST_HEAD(&fence->waiter_list_head);
272 spin_lock_init(&fence->waiter_list_lock);
273
274 init_waitqueue_head(&fence->wq);
Erik Gilling981c8a92012-03-14 19:49:15 -0700275
276 spin_lock_irqsave(&sync_fence_list_lock, flags);
277 list_add_tail(&fence->sync_fence_list, &sync_fence_list_head);
278 spin_unlock_irqrestore(&sync_fence_list_lock, flags);
279
Erik Gilling010accf2012-03-13 15:34:34 -0700280 return fence;
281
282err:
283 kfree(fence);
284 return NULL;
285}
286
287/* TODO: implement a create which takes more that one sync_pt */
288struct sync_fence *sync_fence_create(const char *name, struct sync_pt *pt)
289{
290 struct sync_fence *fence;
291
292 if (pt->fence)
293 return NULL;
294
295 fence = sync_fence_alloc(name);
296 if (fence == NULL)
297 return NULL;
298
299 pt->fence = fence;
300 list_add(&pt->pt_list, &fence->pt_list_head);
301 sync_pt_activate(pt);
302
Erik Gillingd8f388042012-10-15 17:51:01 -0700303 /*
304 * signal the fence in case pt was activated before
305 * sync_pt_activate(pt) was called
306 */
307 sync_fence_signal_pt(pt);
308
Erik Gilling010accf2012-03-13 15:34:34 -0700309 return fence;
310}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700311EXPORT_SYMBOL(sync_fence_create);
Erik Gilling010accf2012-03-13 15:34:34 -0700312
313static int sync_fence_copy_pts(struct sync_fence *dst, struct sync_fence *src)
314{
315 struct list_head *pos;
316
317 list_for_each(pos, &src->pt_list_head) {
318 struct sync_pt *orig_pt =
319 container_of(pos, struct sync_pt, pt_list);
320 struct sync_pt *new_pt = sync_pt_dup(orig_pt);
321
322 if (new_pt == NULL)
323 return -ENOMEM;
324
325 new_pt->fence = dst;
326 list_add(&new_pt->pt_list, &dst->pt_list_head);
Erik Gilling010accf2012-03-13 15:34:34 -0700327 }
328
329 return 0;
330}
331
Erik Gilling9d8ca8a2012-07-11 17:13:50 -0700332static int sync_fence_merge_pts(struct sync_fence *dst, struct sync_fence *src)
333{
334 struct list_head *src_pos, *dst_pos, *n;
335
336 list_for_each(src_pos, &src->pt_list_head) {
337 struct sync_pt *src_pt =
338 container_of(src_pos, struct sync_pt, pt_list);
339 bool collapsed = false;
340
341 list_for_each_safe(dst_pos, n, &dst->pt_list_head) {
342 struct sync_pt *dst_pt =
343 container_of(dst_pos, struct sync_pt, pt_list);
344 /* collapse two sync_pts on the same timeline
345 * to a single sync_pt that will signal at
346 * the later of the two
347 */
348 if (dst_pt->parent == src_pt->parent) {
349 if (dst_pt->parent->ops->compare(dst_pt, src_pt) == -1) {
350 struct sync_pt *new_pt =
351 sync_pt_dup(src_pt);
352 if (new_pt == NULL)
353 return -ENOMEM;
354
355 new_pt->fence = dst;
356 list_replace(&dst_pt->pt_list,
357 &new_pt->pt_list);
Erik Gilling9d8ca8a2012-07-11 17:13:50 -0700358 sync_pt_free(dst_pt);
359 }
360 collapsed = true;
361 break;
362 }
363 }
364
365 if (!collapsed) {
366 struct sync_pt *new_pt = sync_pt_dup(src_pt);
367
368 if (new_pt == NULL)
369 return -ENOMEM;
370
371 new_pt->fence = dst;
372 list_add(&new_pt->pt_list, &dst->pt_list_head);
Erik Gilling9d8ca8a2012-07-11 17:13:50 -0700373 }
374 }
375
376 return 0;
377}
378
Erik Gilling220411f2012-07-23 16:43:05 -0700379static void sync_fence_detach_pts(struct sync_fence *fence)
380{
381 struct list_head *pos, *n;
382
383 list_for_each_safe(pos, n, &fence->pt_list_head) {
384 struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
385 sync_timeline_remove_pt(pt);
386 }
387}
388
Erik Gilling010accf2012-03-13 15:34:34 -0700389static void sync_fence_free_pts(struct sync_fence *fence)
390{
391 struct list_head *pos, *n;
392
393 list_for_each_safe(pos, n, &fence->pt_list_head) {
394 struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
395 sync_pt_free(pt);
396 }
397}
398
399struct sync_fence *sync_fence_fdget(int fd)
400{
401 struct file *file = fget(fd);
402
403 if (file == NULL)
404 return NULL;
405
406 if (file->f_op != &sync_fence_fops)
407 goto err;
408
409 return file->private_data;
410
411err:
412 fput(file);
413 return NULL;
414}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700415EXPORT_SYMBOL(sync_fence_fdget);
Erik Gilling010accf2012-03-13 15:34:34 -0700416
417void sync_fence_put(struct sync_fence *fence)
418{
419 fput(fence->file);
420}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700421EXPORT_SYMBOL(sync_fence_put);
Erik Gilling010accf2012-03-13 15:34:34 -0700422
423void sync_fence_install(struct sync_fence *fence, int fd)
424{
425 fd_install(fd, fence->file);
426}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700427EXPORT_SYMBOL(sync_fence_install);
Erik Gilling010accf2012-03-13 15:34:34 -0700428
429static int sync_fence_get_status(struct sync_fence *fence)
430{
431 struct list_head *pos;
432 int status = 1;
433
434 list_for_each(pos, &fence->pt_list_head) {
435 struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
436 int pt_status = pt->status;
437
438 if (pt_status < 0) {
439 status = pt_status;
440 break;
441 } else if (status == 1) {
442 status = pt_status;
443 }
444 }
445
446 return status;
447}
448
449struct sync_fence *sync_fence_merge(const char *name,
450 struct sync_fence *a, struct sync_fence *b)
451{
452 struct sync_fence *fence;
Ørjan Eidea4384912012-12-05 16:38:08 +0100453 struct list_head *pos;
Erik Gilling010accf2012-03-13 15:34:34 -0700454 int err;
455
456 fence = sync_fence_alloc(name);
457 if (fence == NULL)
458 return NULL;
459
460 err = sync_fence_copy_pts(fence, a);
461 if (err < 0)
462 goto err;
463
Erik Gilling9d8ca8a2012-07-11 17:13:50 -0700464 err = sync_fence_merge_pts(fence, b);
Erik Gilling010accf2012-03-13 15:34:34 -0700465 if (err < 0)
466 goto err;
467
Ørjan Eidea4384912012-12-05 16:38:08 +0100468 list_for_each(pos, &fence->pt_list_head) {
469 struct sync_pt *pt =
470 container_of(pos, struct sync_pt, pt_list);
471 sync_pt_activate(pt);
472 }
473
Erik Gillingd8f388042012-10-15 17:51:01 -0700474 /*
475 * signal the fence in case one of it's pts were activated before
476 * they were activated
477 */
478 sync_fence_signal_pt(list_first_entry(&fence->pt_list_head,
479 struct sync_pt,
480 pt_list));
Erik Gilling010accf2012-03-13 15:34:34 -0700481
482 return fence;
483err:
484 sync_fence_free_pts(fence);
485 kfree(fence);
486 return NULL;
487}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700488EXPORT_SYMBOL(sync_fence_merge);
Erik Gilling010accf2012-03-13 15:34:34 -0700489
490static void sync_fence_signal_pt(struct sync_pt *pt)
491{
492 LIST_HEAD(signaled_waiters);
493 struct sync_fence *fence = pt->fence;
494 struct list_head *pos;
495 struct list_head *n;
496 unsigned long flags;
497 int status;
498
499 status = sync_fence_get_status(fence);
500
501 spin_lock_irqsave(&fence->waiter_list_lock, flags);
502 /*
503 * this should protect against two threads racing on the signaled
504 * false -> true transition
505 */
506 if (status && !fence->status) {
507 list_for_each_safe(pos, n, &fence->waiter_list_head)
508 list_move(pos, &signaled_waiters);
509
510 fence->status = status;
511 } else {
512 status = 0;
513 }
514 spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
515
516 if (status) {
517 list_for_each_safe(pos, n, &signaled_waiters) {
518 struct sync_fence_waiter *waiter =
519 container_of(pos, struct sync_fence_waiter,
520 waiter_list);
521
Erik Gilling010accf2012-03-13 15:34:34 -0700522 list_del(pos);
Erik Gillingc80114f2012-05-15 16:23:26 -0700523 waiter->callback(fence, waiter);
Erik Gilling010accf2012-03-13 15:34:34 -0700524 }
525 wake_up(&fence->wq);
526 }
527}
528
529int sync_fence_wait_async(struct sync_fence *fence,
Erik Gillingc80114f2012-05-15 16:23:26 -0700530 struct sync_fence_waiter *waiter)
Erik Gilling010accf2012-03-13 15:34:34 -0700531{
Erik Gilling010accf2012-03-13 15:34:34 -0700532 unsigned long flags;
533 int err = 0;
534
Erik Gilling010accf2012-03-13 15:34:34 -0700535 spin_lock_irqsave(&fence->waiter_list_lock, flags);
536
537 if (fence->status) {
Erik Gilling010accf2012-03-13 15:34:34 -0700538 err = fence->status;
539 goto out;
540 }
541
542 list_add_tail(&waiter->waiter_list, &fence->waiter_list_head);
543out:
544 spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
545
546 return err;
547}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700548EXPORT_SYMBOL(sync_fence_wait_async);
Erik Gilling010accf2012-03-13 15:34:34 -0700549
Erik Gillingc80114f2012-05-15 16:23:26 -0700550int sync_fence_cancel_async(struct sync_fence *fence,
551 struct sync_fence_waiter *waiter)
552{
553 struct list_head *pos;
554 struct list_head *n;
555 unsigned long flags;
556 int ret = -ENOENT;
557
558 spin_lock_irqsave(&fence->waiter_list_lock, flags);
559 /*
560 * Make sure waiter is still in waiter_list because it is possible for
561 * the waiter to be removed from the list while the callback is still
562 * pending.
563 */
564 list_for_each_safe(pos, n, &fence->waiter_list_head) {
565 struct sync_fence_waiter *list_waiter =
566 container_of(pos, struct sync_fence_waiter,
567 waiter_list);
568 if (list_waiter == waiter) {
569 list_del(pos);
570 ret = 0;
571 break;
572 }
573 }
574 spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
575 return ret;
576}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700577EXPORT_SYMBOL(sync_fence_cancel_async);
Erik Gillingc80114f2012-05-15 16:23:26 -0700578
Erik Gilling46322b32012-10-11 12:35:22 -0700579static bool sync_fence_check(struct sync_fence *fence)
580{
581 /*
582 * Make sure that reads to fence->status are ordered with the
583 * wait queue event triggering
584 */
585 smp_rmb();
586 return fence->status != 0;
587}
588
Erik Gilling010accf2012-03-13 15:34:34 -0700589int sync_fence_wait(struct sync_fence *fence, long timeout)
590{
Erik Gilling85bb5252012-08-21 17:57:19 -0700591 int err = 0;
Erik Gilling06f70142012-10-16 16:14:48 -0700592 struct sync_pt *pt;
593
594 trace_sync_wait(fence, 1);
595 list_for_each_entry(pt, &fence->pt_list_head, pt_list)
596 trace_sync_pt(pt);
Erik Gilling010accf2012-03-13 15:34:34 -0700597
Erik Gilling85bb5252012-08-21 17:57:19 -0700598 if (timeout > 0) {
Erik Gilling010accf2012-03-13 15:34:34 -0700599 timeout = msecs_to_jiffies(timeout);
600 err = wait_event_interruptible_timeout(fence->wq,
Erik Gilling46322b32012-10-11 12:35:22 -0700601 sync_fence_check(fence),
Erik Gilling010accf2012-03-13 15:34:34 -0700602 timeout);
Erik Gilling85bb5252012-08-21 17:57:19 -0700603 } else if (timeout < 0) {
Erik Gillingc23f3812012-10-15 17:58:46 -0700604 err = wait_event_interruptible(fence->wq,
605 sync_fence_check(fence));
Erik Gilling010accf2012-03-13 15:34:34 -0700606 }
Erik Gilling06f70142012-10-16 16:14:48 -0700607 trace_sync_wait(fence, 0);
Erik Gilling010accf2012-03-13 15:34:34 -0700608
609 if (err < 0)
610 return err;
611
Erik Gillinga2bc03a2012-10-10 18:08:11 -0700612 if (fence->status < 0) {
613 pr_info("fence error %d on [%p]\n", fence->status, fence);
Prakash Kamliya7fbec622013-07-22 17:48:16 +0530614 sync_dump(fence);
Erik Gilling010accf2012-03-13 15:34:34 -0700615 return fence->status;
Erik Gillinga2bc03a2012-10-10 18:08:11 -0700616 }
Erik Gilling010accf2012-03-13 15:34:34 -0700617
Jamie Gennis39bdd912013-02-19 18:49:20 -0800618 if (fence->status == 0) {
619 if (timeout > 0) {
620 pr_info("fence timeout on [%p] after %dms\n", fence,
621 jiffies_to_msecs(timeout));
Prakash Kamliya7fbec622013-07-22 17:48:16 +0530622 sync_dump(fence);
Jamie Gennis39bdd912013-02-19 18:49:20 -0800623 }
Erik Gilling010accf2012-03-13 15:34:34 -0700624 return -ETIME;
Erik Gilling7154e872012-08-24 13:48:57 -0700625 }
Erik Gilling010accf2012-03-13 15:34:34 -0700626
627 return 0;
628}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700629EXPORT_SYMBOL(sync_fence_wait);
Erik Gilling010accf2012-03-13 15:34:34 -0700630
Erik Gilling220411f2012-07-23 16:43:05 -0700631static void sync_fence_free(struct kref *kref)
632{
633 struct sync_fence *fence = container_of(kref, struct sync_fence, kref);
634
635 sync_fence_free_pts(fence);
636
637 kfree(fence);
638}
639
Erik Gilling010accf2012-03-13 15:34:34 -0700640static int sync_fence_release(struct inode *inode, struct file *file)
641{
642 struct sync_fence *fence = file->private_data;
Erik Gilling981c8a92012-03-14 19:49:15 -0700643 unsigned long flags;
Erik Gilling010accf2012-03-13 15:34:34 -0700644
Erik Gilling220411f2012-07-23 16:43:05 -0700645 /*
646 * We need to remove all ways to access this fence before droping
647 * our ref.
648 *
649 * start with its membership in the global fence list
650 */
Erik Gilling981c8a92012-03-14 19:49:15 -0700651 spin_lock_irqsave(&sync_fence_list_lock, flags);
652 list_del(&fence->sync_fence_list);
653 spin_unlock_irqrestore(&sync_fence_list_lock, flags);
654
Erik Gilling220411f2012-07-23 16:43:05 -0700655 /*
656 * remove its pts from their parents so that sync_timeline_signal()
657 * can't reference the fence.
658 */
659 sync_fence_detach_pts(fence);
Erik Gillingc6ed0cf2012-07-11 17:07:39 -0700660
Erik Gilling220411f2012-07-23 16:43:05 -0700661 kref_put(&fence->kref, sync_fence_free);
Erik Gilling010accf2012-03-13 15:34:34 -0700662
663 return 0;
664}
665
Erik Gillinga1eeaca2012-03-19 17:28:32 -0700666static unsigned int sync_fence_poll(struct file *file, poll_table *wait)
667{
668 struct sync_fence *fence = file->private_data;
669
670 poll_wait(file, &fence->wq, wait);
671
Erik Gilling46322b32012-10-11 12:35:22 -0700672 /*
673 * Make sure that reads to fence->status are ordered with the
674 * wait queue event triggering
675 */
676 smp_rmb();
677
Erik Gillinga1eeaca2012-03-19 17:28:32 -0700678 if (fence->status == 1)
679 return POLLIN;
680 else if (fence->status < 0)
681 return POLLERR;
682 else
683 return 0;
684}
685
Erik Gilling010accf2012-03-13 15:34:34 -0700686static long sync_fence_ioctl_wait(struct sync_fence *fence, unsigned long arg)
687{
Erik Gilling6517f0d2012-09-04 15:28:52 -0700688 __s32 value;
Erik Gilling010accf2012-03-13 15:34:34 -0700689
690 if (copy_from_user(&value, (void __user *)arg, sizeof(value)))
691 return -EFAULT;
692
693 return sync_fence_wait(fence, value);
694}
695
696static long sync_fence_ioctl_merge(struct sync_fence *fence, unsigned long arg)
697{
698 int fd = get_unused_fd();
699 int err;
700 struct sync_fence *fence2, *fence3;
701 struct sync_merge_data data;
702
Rebecca Schultz Zavin3d763662012-08-08 13:46:22 -0700703 if (fd < 0)
704 return fd;
705
706 if (copy_from_user(&data, (void __user *)arg, sizeof(data))) {
707 err = -EFAULT;
708 goto err_put_fd;
709 }
Erik Gilling010accf2012-03-13 15:34:34 -0700710
711 fence2 = sync_fence_fdget(data.fd2);
712 if (fence2 == NULL) {
713 err = -ENOENT;
714 goto err_put_fd;
715 }
716
717 data.name[sizeof(data.name) - 1] = '\0';
718 fence3 = sync_fence_merge(data.name, fence, fence2);
719 if (fence3 == NULL) {
720 err = -ENOMEM;
721 goto err_put_fence2;
722 }
723
724 data.fence = fd;
725 if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
726 err = -EFAULT;
727 goto err_put_fence3;
728 }
729
730 sync_fence_install(fence3, fd);
731 sync_fence_put(fence2);
732 return 0;
733
734err_put_fence3:
735 sync_fence_put(fence3);
736
737err_put_fence2:
738 sync_fence_put(fence2);
739
740err_put_fd:
741 put_unused_fd(fd);
742 return err;
743}
744
Erik Gilling3913bff2012-03-15 17:45:50 -0700745static int sync_fill_pt_info(struct sync_pt *pt, void *data, int size)
746{
747 struct sync_pt_info *info = data;
748 int ret;
749
750 if (size < sizeof(struct sync_pt_info))
751 return -ENOMEM;
752
753 info->len = sizeof(struct sync_pt_info);
754
755 if (pt->parent->ops->fill_driver_data) {
756 ret = pt->parent->ops->fill_driver_data(pt, info->driver_data,
757 size - sizeof(*info));
758 if (ret < 0)
759 return ret;
760
761 info->len += ret;
762 }
763
764 strlcpy(info->obj_name, pt->parent->name, sizeof(info->obj_name));
765 strlcpy(info->driver_name, pt->parent->ops->driver_name,
766 sizeof(info->driver_name));
767 info->status = pt->status;
768 info->timestamp_ns = ktime_to_ns(pt->timestamp);
769
770 return info->len;
771}
772
773static long sync_fence_ioctl_fence_info(struct sync_fence *fence,
774 unsigned long arg)
775{
776 struct sync_fence_info_data *data;
777 struct list_head *pos;
778 __u32 size;
779 __u32 len = 0;
780 int ret;
781
782 if (copy_from_user(&size, (void __user *)arg, sizeof(size)))
783 return -EFAULT;
784
785 if (size < sizeof(struct sync_fence_info_data))
786 return -EINVAL;
787
788 if (size > 4096)
789 size = 4096;
790
791 data = kzalloc(size, GFP_KERNEL);
792 if (data == NULL)
793 return -ENOMEM;
794
795 strlcpy(data->name, fence->name, sizeof(data->name));
796 data->status = fence->status;
797 len = sizeof(struct sync_fence_info_data);
798
799 list_for_each(pos, &fence->pt_list_head) {
800 struct sync_pt *pt =
801 container_of(pos, struct sync_pt, pt_list);
802
803 ret = sync_fill_pt_info(pt, (u8 *)data + len, size - len);
804
805 if (ret < 0)
806 goto out;
807
808 len += ret;
809 }
810
811 data->len = len;
812
813 if (copy_to_user((void __user *)arg, data, len))
814 ret = -EFAULT;
815 else
816 ret = 0;
817
818out:
819 kfree(data);
820
821 return ret;
822}
Erik Gilling010accf2012-03-13 15:34:34 -0700823
824static long sync_fence_ioctl(struct file *file, unsigned int cmd,
825 unsigned long arg)
826{
827 struct sync_fence *fence = file->private_data;
828 switch (cmd) {
829 case SYNC_IOC_WAIT:
830 return sync_fence_ioctl_wait(fence, arg);
831
832 case SYNC_IOC_MERGE:
833 return sync_fence_ioctl_merge(fence, arg);
Erik Gilling981c8a92012-03-14 19:49:15 -0700834
Erik Gilling3913bff2012-03-15 17:45:50 -0700835 case SYNC_IOC_FENCE_INFO:
836 return sync_fence_ioctl_fence_info(fence, arg);
837
Erik Gilling010accf2012-03-13 15:34:34 -0700838 default:
839 return -ENOTTY;
840 }
841}
842
Erik Gilling981c8a92012-03-14 19:49:15 -0700843#ifdef CONFIG_DEBUG_FS
844static const char *sync_status_str(int status)
845{
846 if (status > 0)
847 return "signaled";
848 else if (status == 0)
849 return "active";
850 else
851 return "error";
852}
853
854static void sync_print_pt(struct seq_file *s, struct sync_pt *pt, bool fence)
855{
856 int status = pt->status;
857 seq_printf(s, " %s%spt %s",
858 fence ? pt->parent->name : "",
859 fence ? "_" : "",
860 sync_status_str(status));
861 if (pt->status) {
862 struct timeval tv = ktime_to_timeval(pt->timestamp);
863 seq_printf(s, "@%ld.%06ld", tv.tv_sec, tv.tv_usec);
864 }
865
Erik Gilling43d22282012-10-16 15:16:55 -0700866 if (pt->parent->ops->timeline_value_str &&
867 pt->parent->ops->pt_value_str) {
868 char value[64];
869 pt->parent->ops->pt_value_str(pt, value, sizeof(value));
870 seq_printf(s, ": %s", value);
871 if (fence) {
872 pt->parent->ops->timeline_value_str(pt->parent, value,
873 sizeof(value));
874 seq_printf(s, " / %s", value);
875 }
876 } else if (pt->parent->ops->print_pt) {
Erik Gilling981c8a92012-03-14 19:49:15 -0700877 seq_printf(s, ": ");
878 pt->parent->ops->print_pt(s, pt);
879 }
880
881 seq_printf(s, "\n");
882}
883
884static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj)
885{
886 struct list_head *pos;
887 unsigned long flags;
888
889 seq_printf(s, "%s %s", obj->name, obj->ops->driver_name);
890
Erik Gilling43d22282012-10-16 15:16:55 -0700891 if (obj->ops->timeline_value_str) {
892 char value[64];
893 obj->ops->timeline_value_str(obj, value, sizeof(value));
894 seq_printf(s, ": %s", value);
895 } else if (obj->ops->print_obj) {
Erik Gilling981c8a92012-03-14 19:49:15 -0700896 seq_printf(s, ": ");
897 obj->ops->print_obj(s, obj);
898 }
899
900 seq_printf(s, "\n");
901
902 spin_lock_irqsave(&obj->child_list_lock, flags);
903 list_for_each(pos, &obj->child_list_head) {
904 struct sync_pt *pt =
905 container_of(pos, struct sync_pt, child_list);
906 sync_print_pt(s, pt, false);
907 }
908 spin_unlock_irqrestore(&obj->child_list_lock, flags);
909}
910
911static void sync_print_fence(struct seq_file *s, struct sync_fence *fence)
912{
913 struct list_head *pos;
914 unsigned long flags;
915
Erik Gillinge3124322012-09-04 15:29:09 -0700916 seq_printf(s, "[%p] %s: %s\n", fence, fence->name,
917 sync_status_str(fence->status));
Erik Gilling981c8a92012-03-14 19:49:15 -0700918
919 list_for_each(pos, &fence->pt_list_head) {
920 struct sync_pt *pt =
921 container_of(pos, struct sync_pt, pt_list);
922 sync_print_pt(s, pt, true);
923 }
924
925 spin_lock_irqsave(&fence->waiter_list_lock, flags);
926 list_for_each(pos, &fence->waiter_list_head) {
927 struct sync_fence_waiter *waiter =
928 container_of(pos, struct sync_fence_waiter,
929 waiter_list);
930
Erik Gillingc80114f2012-05-15 16:23:26 -0700931 seq_printf(s, "waiter %pF\n", waiter->callback);
Erik Gilling981c8a92012-03-14 19:49:15 -0700932 }
933 spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
934}
935
936static int sync_debugfs_show(struct seq_file *s, void *unused)
937{
938 unsigned long flags;
939 struct list_head *pos;
940
941 seq_printf(s, "objs:\n--------------\n");
942
943 spin_lock_irqsave(&sync_timeline_list_lock, flags);
944 list_for_each(pos, &sync_timeline_list_head) {
945 struct sync_timeline *obj =
946 container_of(pos, struct sync_timeline,
947 sync_timeline_list);
948
949 sync_print_obj(s, obj);
950 seq_printf(s, "\n");
951 }
952 spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
953
954 seq_printf(s, "fences:\n--------------\n");
955
956 spin_lock_irqsave(&sync_fence_list_lock, flags);
957 list_for_each(pos, &sync_fence_list_head) {
958 struct sync_fence *fence =
959 container_of(pos, struct sync_fence, sync_fence_list);
960
961 sync_print_fence(s, fence);
962 seq_printf(s, "\n");
963 }
964 spin_unlock_irqrestore(&sync_fence_list_lock, flags);
965 return 0;
966}
967
968static int sync_debugfs_open(struct inode *inode, struct file *file)
969{
970 return single_open(file, sync_debugfs_show, inode->i_private);
971}
972
973static const struct file_operations sync_debugfs_fops = {
974 .open = sync_debugfs_open,
975 .read = seq_read,
976 .llseek = seq_lseek,
977 .release = single_release,
978};
979
980static __init int sync_debugfs_init(void)
981{
982 debugfs_create_file("sync", S_IRUGO, NULL, NULL, &sync_debugfs_fops);
983 return 0;
984}
Erik Gilling981c8a92012-03-14 19:49:15 -0700985late_initcall(sync_debugfs_init);
986
Erik Gilling7154e872012-08-24 13:48:57 -0700987#define DUMP_CHUNK 256
988static char sync_dump_buf[64 * 1024];
Prakash Kamliya7fbec622013-07-22 17:48:16 +0530989static void sync_dump(struct sync_fence *fence)
Erik Gilling7154e872012-08-24 13:48:57 -0700990{
991 struct seq_file s = {
992 .buf = sync_dump_buf,
993 .size = sizeof(sync_dump_buf) - 1,
994 };
995 int i;
996
Prakash Kamliya7fbec622013-07-22 17:48:16 +0530997 seq_printf(&s, "fence:\n--------------\n");
998 sync_print_fence(&s, fence);
999 seq_printf(&s, "\n");
Erik Gilling7154e872012-08-24 13:48:57 -07001000
1001 for (i = 0; i < s.count; i += DUMP_CHUNK) {
1002 if ((s.count - i) > DUMP_CHUNK) {
1003 char c = s.buf[i + DUMP_CHUNK];
1004 s.buf[i + DUMP_CHUNK] = 0;
1005 pr_cont("%s", s.buf + i);
1006 s.buf[i + DUMP_CHUNK] = c;
1007 } else {
1008 s.buf[s.count] = 0;
1009 pr_cont("%s", s.buf + i);
1010 }
1011 }
1012}
1013#else
Prakash Kamliya7fbec622013-07-22 17:48:16 +05301014static void sync_dump(struct sync_fence *fence)
Erik Gilling7154e872012-08-24 13:48:57 -07001015{
1016}
Erik Gilling981c8a92012-03-14 19:49:15 -07001017#endif