blob: ea0b66424e2cd5e123fb0810fd344cebfd61ff38 [file] [log] [blame]
Erik Gilling010accf2012-03-13 15:34:34 -07001/*
2 * drivers/base/sync.c
3 *
4 * Copyright (C) 2012 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16
17#include <linux/file.h>
18#include <linux/fs.h>
19#include <linux/kernel.h>
20#include <linux/sched.h>
21#include <linux/slab.h>
22#include <linux/sync.h>
23#include <linux/uaccess.h>
24
25#include <linux/anon_inodes.h>
26
27static void sync_fence_signal_pt(struct sync_pt *pt);
28static int _sync_pt_has_signaled(struct sync_pt *pt);
29
30struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops,
31 int size, const char *name)
32{
33 struct sync_timeline *obj;
34
35 if (size < sizeof(struct sync_timeline))
36 return NULL;
37
38 obj = kzalloc(size, GFP_KERNEL);
39 if (obj == NULL)
40 return NULL;
41
42 obj->ops = ops;
43 strlcpy(obj->name, name, sizeof(obj->name));
44
45 INIT_LIST_HEAD(&obj->child_list_head);
46 spin_lock_init(&obj->child_list_lock);
47
48 INIT_LIST_HEAD(&obj->active_list_head);
49 spin_lock_init(&obj->active_list_lock);
50
51 return obj;
52}
53
54void sync_timeline_destroy(struct sync_timeline *obj)
55{
56 unsigned long flags;
57 bool needs_freeing;
58
59 spin_lock_irqsave(&obj->child_list_lock, flags);
60 obj->destroyed = true;
61 needs_freeing = list_empty(&obj->child_list_head);
62 spin_unlock_irqrestore(&obj->child_list_lock, flags);
63
64 if (needs_freeing)
65 kfree(obj);
66 else
67 sync_timeline_signal(obj);
68}
69
70static void sync_timeline_add_pt(struct sync_timeline *obj, struct sync_pt *pt)
71{
72 unsigned long flags;
73
74 pt->parent = obj;
75
76 spin_lock_irqsave(&obj->child_list_lock, flags);
77 list_add_tail(&pt->child_list, &obj->child_list_head);
78 spin_unlock_irqrestore(&obj->child_list_lock, flags);
79}
80
81static void sync_timeline_remove_pt(struct sync_pt *pt)
82{
83 struct sync_timeline *obj = pt->parent;
84 unsigned long flags;
85 bool needs_freeing;
86
87 spin_lock_irqsave(&obj->active_list_lock, flags);
88 if (!list_empty(&pt->active_list))
89 list_del_init(&pt->active_list);
90 spin_unlock_irqrestore(&obj->active_list_lock, flags);
91
92 spin_lock_irqsave(&obj->child_list_lock, flags);
93 list_del(&pt->child_list);
94 needs_freeing = obj->destroyed && list_empty(&obj->child_list_head);
95 spin_unlock_irqrestore(&obj->child_list_lock, flags);
96
97 if (needs_freeing)
98 kfree(obj);
99}
100
101void sync_timeline_signal(struct sync_timeline *obj)
102{
103 unsigned long flags;
104 LIST_HEAD(signaled_pts);
105 struct list_head *pos, *n;
106
107 spin_lock_irqsave(&obj->active_list_lock, flags);
108
109 list_for_each_safe(pos, n, &obj->active_list_head) {
110 struct sync_pt *pt =
111 container_of(pos, struct sync_pt, active_list);
112
113 if (_sync_pt_has_signaled(pt))
114 list_move(pos, &signaled_pts);
115 }
116
117 spin_unlock_irqrestore(&obj->active_list_lock, flags);
118
119 list_for_each_safe(pos, n, &signaled_pts) {
120 struct sync_pt *pt =
121 container_of(pos, struct sync_pt, active_list);
122
123 list_del_init(pos);
124 sync_fence_signal_pt(pt);
125 }
126}
127
128struct sync_pt *sync_pt_create(struct sync_timeline *parent, int size)
129{
130 struct sync_pt *pt;
131
132 if (size < sizeof(struct sync_pt))
133 return NULL;
134
135 pt = kzalloc(size, GFP_KERNEL);
136 if (pt == NULL)
137 return NULL;
138
139 INIT_LIST_HEAD(&pt->active_list);
140 sync_timeline_add_pt(parent, pt);
141
142 return pt;
143}
144
145void sync_pt_free(struct sync_pt *pt)
146{
147 if (pt->parent->ops->free_pt)
148 pt->parent->ops->free_pt(pt);
149
150 sync_timeline_remove_pt(pt);
151
152 kfree(pt);
153}
154
155/* call with pt->parent->active_list_lock held */
156static int _sync_pt_has_signaled(struct sync_pt *pt)
157{
Erik Gillingad433ba2012-03-15 14:59:33 -0700158 int old_status = pt->status;
159
Erik Gilling010accf2012-03-13 15:34:34 -0700160 if (!pt->status)
161 pt->status = pt->parent->ops->has_signaled(pt);
162
163 if (!pt->status && pt->parent->destroyed)
164 pt->status = -ENOENT;
165
Erik Gillingad433ba2012-03-15 14:59:33 -0700166 if (pt->status != old_status)
167 pt->timestamp = ktime_get();
168
Erik Gilling010accf2012-03-13 15:34:34 -0700169 return pt->status;
170}
171
172static struct sync_pt *sync_pt_dup(struct sync_pt *pt)
173{
174 return pt->parent->ops->dup(pt);
175}
176
177/* Adds a sync pt to the active queue. Called when added to a fence */
178static void sync_pt_activate(struct sync_pt *pt)
179{
180 struct sync_timeline *obj = pt->parent;
181 unsigned long flags;
182 int err;
183
184 spin_lock_irqsave(&obj->active_list_lock, flags);
185
186 err = _sync_pt_has_signaled(pt);
187 if (err != 0)
188 goto out;
189
190 list_add_tail(&pt->active_list, &obj->active_list_head);
191
192out:
193 spin_unlock_irqrestore(&obj->active_list_lock, flags);
194}
195
196static int sync_fence_release(struct inode *inode, struct file *file);
197static long sync_fence_ioctl(struct file *file, unsigned int cmd,
198 unsigned long arg);
199
200
201static const struct file_operations sync_fence_fops = {
202 .release = sync_fence_release,
203 .unlocked_ioctl = sync_fence_ioctl,
204};
205
206static struct sync_fence *sync_fence_alloc(const char *name)
207{
208 struct sync_fence *fence;
209
210 fence = kzalloc(sizeof(struct sync_fence), GFP_KERNEL);
211 if (fence == NULL)
212 return NULL;
213
214 fence->file = anon_inode_getfile("sync_fence", &sync_fence_fops,
215 fence, 0);
216 if (fence->file == NULL)
217 goto err;
218
219 strlcpy(fence->name, name, sizeof(fence->name));
220
221 INIT_LIST_HEAD(&fence->pt_list_head);
222 INIT_LIST_HEAD(&fence->waiter_list_head);
223 spin_lock_init(&fence->waiter_list_lock);
224
225 init_waitqueue_head(&fence->wq);
226 return fence;
227
228err:
229 kfree(fence);
230 return NULL;
231}
232
233/* TODO: implement a create which takes more that one sync_pt */
234struct sync_fence *sync_fence_create(const char *name, struct sync_pt *pt)
235{
236 struct sync_fence *fence;
237
238 if (pt->fence)
239 return NULL;
240
241 fence = sync_fence_alloc(name);
242 if (fence == NULL)
243 return NULL;
244
245 pt->fence = fence;
246 list_add(&pt->pt_list, &fence->pt_list_head);
247 sync_pt_activate(pt);
248
249 return fence;
250}
251
252static int sync_fence_copy_pts(struct sync_fence *dst, struct sync_fence *src)
253{
254 struct list_head *pos;
255
256 list_for_each(pos, &src->pt_list_head) {
257 struct sync_pt *orig_pt =
258 container_of(pos, struct sync_pt, pt_list);
259 struct sync_pt *new_pt = sync_pt_dup(orig_pt);
260
261 if (new_pt == NULL)
262 return -ENOMEM;
263
264 new_pt->fence = dst;
265 list_add(&new_pt->pt_list, &dst->pt_list_head);
266 sync_pt_activate(new_pt);
267 }
268
269 return 0;
270}
271
272static void sync_fence_free_pts(struct sync_fence *fence)
273{
274 struct list_head *pos, *n;
275
276 list_for_each_safe(pos, n, &fence->pt_list_head) {
277 struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
278 sync_pt_free(pt);
279 }
280}
281
282struct sync_fence *sync_fence_fdget(int fd)
283{
284 struct file *file = fget(fd);
285
286 if (file == NULL)
287 return NULL;
288
289 if (file->f_op != &sync_fence_fops)
290 goto err;
291
292 return file->private_data;
293
294err:
295 fput(file);
296 return NULL;
297}
298
299void sync_fence_put(struct sync_fence *fence)
300{
301 fput(fence->file);
302}
303
304void sync_fence_install(struct sync_fence *fence, int fd)
305{
306 fd_install(fd, fence->file);
307}
308
309static int sync_fence_get_status(struct sync_fence *fence)
310{
311 struct list_head *pos;
312 int status = 1;
313
314 list_for_each(pos, &fence->pt_list_head) {
315 struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
316 int pt_status = pt->status;
317
318 if (pt_status < 0) {
319 status = pt_status;
320 break;
321 } else if (status == 1) {
322 status = pt_status;
323 }
324 }
325
326 return status;
327}
328
329struct sync_fence *sync_fence_merge(const char *name,
330 struct sync_fence *a, struct sync_fence *b)
331{
332 struct sync_fence *fence;
333 int err;
334
335 fence = sync_fence_alloc(name);
336 if (fence == NULL)
337 return NULL;
338
339 err = sync_fence_copy_pts(fence, a);
340 if (err < 0)
341 goto err;
342
343 err = sync_fence_copy_pts(fence, b);
344 if (err < 0)
345 goto err;
346
347 fence->status = sync_fence_get_status(fence);
348
349 return fence;
350err:
351 sync_fence_free_pts(fence);
352 kfree(fence);
353 return NULL;
354}
355
356static void sync_fence_signal_pt(struct sync_pt *pt)
357{
358 LIST_HEAD(signaled_waiters);
359 struct sync_fence *fence = pt->fence;
360 struct list_head *pos;
361 struct list_head *n;
362 unsigned long flags;
363 int status;
364
365 status = sync_fence_get_status(fence);
366
367 spin_lock_irqsave(&fence->waiter_list_lock, flags);
368 /*
369 * this should protect against two threads racing on the signaled
370 * false -> true transition
371 */
372 if (status && !fence->status) {
373 list_for_each_safe(pos, n, &fence->waiter_list_head)
374 list_move(pos, &signaled_waiters);
375
376 fence->status = status;
377 } else {
378 status = 0;
379 }
380 spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
381
382 if (status) {
383 list_for_each_safe(pos, n, &signaled_waiters) {
384 struct sync_fence_waiter *waiter =
385 container_of(pos, struct sync_fence_waiter,
386 waiter_list);
387
388 waiter->callback(fence, waiter->callback_data);
389 list_del(pos);
390 kfree(waiter);
391 }
392 wake_up(&fence->wq);
393 }
394}
395
396int sync_fence_wait_async(struct sync_fence *fence,
397 void (*callback)(struct sync_fence *, void *data),
398 void *callback_data)
399{
400 struct sync_fence_waiter *waiter;
401 unsigned long flags;
402 int err = 0;
403
404 waiter = kzalloc(sizeof(struct sync_fence_waiter), GFP_KERNEL);
405 if (waiter == NULL)
406 return -ENOMEM;
407
408 waiter->callback = callback;
409 waiter->callback_data = callback_data;
410
411 spin_lock_irqsave(&fence->waiter_list_lock, flags);
412
413 if (fence->status) {
414 kfree(waiter);
415 err = fence->status;
416 goto out;
417 }
418
419 list_add_tail(&waiter->waiter_list, &fence->waiter_list_head);
420out:
421 spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
422
423 return err;
424}
425
426int sync_fence_wait(struct sync_fence *fence, long timeout)
427{
428 int err;
429
430 if (timeout) {
431 timeout = msecs_to_jiffies(timeout);
432 err = wait_event_interruptible_timeout(fence->wq,
433 fence->status != 0,
434 timeout);
435 } else {
436 err = wait_event_interruptible(fence->wq, fence->status != 0);
437 }
438
439 if (err < 0)
440 return err;
441
442 if (fence->status < 0)
443 return fence->status;
444
445 if (fence->status == 0)
446 return -ETIME;
447
448 return 0;
449}
450
451static int sync_fence_release(struct inode *inode, struct file *file)
452{
453 struct sync_fence *fence = file->private_data;
454
455 sync_fence_free_pts(fence);
456 kfree(fence);
457
458 return 0;
459}
460
461static long sync_fence_ioctl_wait(struct sync_fence *fence, unsigned long arg)
462{
463 __u32 value;
464
465 if (copy_from_user(&value, (void __user *)arg, sizeof(value)))
466 return -EFAULT;
467
468 return sync_fence_wait(fence, value);
469}
470
471static long sync_fence_ioctl_merge(struct sync_fence *fence, unsigned long arg)
472{
473 int fd = get_unused_fd();
474 int err;
475 struct sync_fence *fence2, *fence3;
476 struct sync_merge_data data;
477
478 if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
479 return -EFAULT;
480
481 fence2 = sync_fence_fdget(data.fd2);
482 if (fence2 == NULL) {
483 err = -ENOENT;
484 goto err_put_fd;
485 }
486
487 data.name[sizeof(data.name) - 1] = '\0';
488 fence3 = sync_fence_merge(data.name, fence, fence2);
489 if (fence3 == NULL) {
490 err = -ENOMEM;
491 goto err_put_fence2;
492 }
493
494 data.fence = fd;
495 if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
496 err = -EFAULT;
497 goto err_put_fence3;
498 }
499
500 sync_fence_install(fence3, fd);
501 sync_fence_put(fence2);
502 return 0;
503
504err_put_fence3:
505 sync_fence_put(fence3);
506
507err_put_fence2:
508 sync_fence_put(fence2);
509
510err_put_fd:
511 put_unused_fd(fd);
512 return err;
513}
514
515
516static long sync_fence_ioctl(struct file *file, unsigned int cmd,
517 unsigned long arg)
518{
519 struct sync_fence *fence = file->private_data;
520 switch (cmd) {
521 case SYNC_IOC_WAIT:
522 return sync_fence_ioctl_wait(fence, arg);
523
524 case SYNC_IOC_MERGE:
525 return sync_fence_ioctl_merge(fence, arg);
526 default:
527 return -ENOTTY;
528 }
529}
530