blob: 4a9e63df83e98a32931527892c30f1aee29edd68 [file] [log] [blame]
Erik Gilling7ad530b2013-02-28 16:42:57 -08001/*
2 * drivers/base/sync.c
3 *
4 * Copyright (C) 2012 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16
17#include <linux/file.h>
18#include <linux/fs.h>
19#include <linux/kernel.h>
20#include <linux/sched.h>
21#include <linux/slab.h>
22#include <linux/uaccess.h>
23#include <linux/anon_inodes.h>
24
25#include "sync.h"
26
27static void sync_fence_signal_pt(struct sync_pt *pt);
28static int _sync_pt_has_signaled(struct sync_pt *pt);
29
30struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops,
31 int size, const char *name)
32{
33 struct sync_timeline *obj;
34
35 if (size < sizeof(struct sync_timeline))
36 return NULL;
37
38 obj = kzalloc(size, GFP_KERNEL);
39 if (obj == NULL)
40 return NULL;
41
42 obj->ops = ops;
43 strlcpy(obj->name, name, sizeof(obj->name));
44
45 INIT_LIST_HEAD(&obj->child_list_head);
46 spin_lock_init(&obj->child_list_lock);
47
48 INIT_LIST_HEAD(&obj->active_list_head);
49 spin_lock_init(&obj->active_list_lock);
50
51 return obj;
52}
53
54void sync_timeline_destroy(struct sync_timeline *obj)
55{
56 unsigned long flags;
57 bool needs_freeing;
58
59 spin_lock_irqsave(&obj->child_list_lock, flags);
60 obj->destroyed = true;
61 needs_freeing = list_empty(&obj->child_list_head);
62 spin_unlock_irqrestore(&obj->child_list_lock, flags);
63
64 if (needs_freeing)
65 kfree(obj);
66 else
67 sync_timeline_signal(obj);
68}
69
70static void sync_timeline_add_pt(struct sync_timeline *obj, struct sync_pt *pt)
71{
72 unsigned long flags;
73
74 pt->parent = obj;
75
76 spin_lock_irqsave(&obj->child_list_lock, flags);
77 list_add_tail(&pt->child_list, &obj->child_list_head);
78 spin_unlock_irqrestore(&obj->child_list_lock, flags);
79}
80
81static void sync_timeline_remove_pt(struct sync_pt *pt)
82{
83 struct sync_timeline *obj = pt->parent;
84 unsigned long flags;
85 bool needs_freeing;
86
87 spin_lock_irqsave(&obj->active_list_lock, flags);
88 if (!list_empty(&pt->active_list))
89 list_del_init(&pt->active_list);
90 spin_unlock_irqrestore(&obj->active_list_lock, flags);
91
92 spin_lock_irqsave(&obj->child_list_lock, flags);
93 list_del(&pt->child_list);
94 needs_freeing = obj->destroyed && list_empty(&obj->child_list_head);
95 spin_unlock_irqrestore(&obj->child_list_lock, flags);
96
97 if (needs_freeing)
98 kfree(obj);
99}
100
101void sync_timeline_signal(struct sync_timeline *obj)
102{
103 unsigned long flags;
104 LIST_HEAD(signaled_pts);
105 struct list_head *pos, *n;
106
107 spin_lock_irqsave(&obj->active_list_lock, flags);
108
109 list_for_each_safe(pos, n, &obj->active_list_head) {
110 struct sync_pt *pt =
111 container_of(pos, struct sync_pt, active_list);
112
113 if (_sync_pt_has_signaled(pt))
114 list_move(pos, &signaled_pts);
115 }
116
117 spin_unlock_irqrestore(&obj->active_list_lock, flags);
118
119 list_for_each_safe(pos, n, &signaled_pts) {
120 struct sync_pt *pt =
121 container_of(pos, struct sync_pt, active_list);
122
123 list_del_init(pos);
124 sync_fence_signal_pt(pt);
125 }
126}
127
128struct sync_pt *sync_pt_create(struct sync_timeline *parent, int size)
129{
130 struct sync_pt *pt;
131
132 if (size < sizeof(struct sync_pt))
133 return NULL;
134
135 pt = kzalloc(size, GFP_KERNEL);
136 if (pt == NULL)
137 return NULL;
138
139 INIT_LIST_HEAD(&pt->active_list);
140 sync_timeline_add_pt(parent, pt);
141
142 return pt;
143}
144
145void sync_pt_free(struct sync_pt *pt)
146{
147 if (pt->parent->ops->free_pt)
148 pt->parent->ops->free_pt(pt);
149
150 sync_timeline_remove_pt(pt);
151
152 kfree(pt);
153}
154
155/* call with pt->parent->active_list_lock held */
156static int _sync_pt_has_signaled(struct sync_pt *pt)
157{
158 if (!pt->status)
159 pt->status = pt->parent->ops->has_signaled(pt);
160
161 if (!pt->status && pt->parent->destroyed)
162 pt->status = -ENOENT;
163
164 return pt->status;
165}
166
167static struct sync_pt *sync_pt_dup(struct sync_pt *pt)
168{
169 return pt->parent->ops->dup(pt);
170}
171
172/* Adds a sync pt to the active queue. Called when added to a fence */
173static void sync_pt_activate(struct sync_pt *pt)
174{
175 struct sync_timeline *obj = pt->parent;
176 unsigned long flags;
177 int err;
178
179 spin_lock_irqsave(&obj->active_list_lock, flags);
180
181 err = _sync_pt_has_signaled(pt);
182 if (err != 0)
183 goto out;
184
185 list_add_tail(&pt->active_list, &obj->active_list_head);
186
187out:
188 spin_unlock_irqrestore(&obj->active_list_lock, flags);
189}
190
191static int sync_fence_release(struct inode *inode, struct file *file);
192static long sync_fence_ioctl(struct file *file, unsigned int cmd,
193 unsigned long arg);
194
195
196static const struct file_operations sync_fence_fops = {
197 .release = sync_fence_release,
198 .unlocked_ioctl = sync_fence_ioctl,
199};
200
201static struct sync_fence *sync_fence_alloc(const char *name)
202{
203 struct sync_fence *fence;
204
205 fence = kzalloc(sizeof(struct sync_fence), GFP_KERNEL);
206 if (fence == NULL)
207 return NULL;
208
209 fence->file = anon_inode_getfile("sync_fence", &sync_fence_fops,
210 fence, 0);
211 if (fence->file == NULL)
212 goto err;
213
214 strlcpy(fence->name, name, sizeof(fence->name));
215
216 INIT_LIST_HEAD(&fence->pt_list_head);
217 INIT_LIST_HEAD(&fence->waiter_list_head);
218 spin_lock_init(&fence->waiter_list_lock);
219
220 init_waitqueue_head(&fence->wq);
221 return fence;
222
223err:
224 kfree(fence);
225 return NULL;
226}
227
228/* TODO: implement a create which takes more that one sync_pt */
229struct sync_fence *sync_fence_create(const char *name, struct sync_pt *pt)
230{
231 struct sync_fence *fence;
232
233 if (pt->fence)
234 return NULL;
235
236 fence = sync_fence_alloc(name);
237 if (fence == NULL)
238 return NULL;
239
240 pt->fence = fence;
241 list_add(&pt->pt_list, &fence->pt_list_head);
242 sync_pt_activate(pt);
243
244 return fence;
245}
246
247static int sync_fence_copy_pts(struct sync_fence *dst, struct sync_fence *src)
248{
249 struct list_head *pos;
250
251 list_for_each(pos, &src->pt_list_head) {
252 struct sync_pt *orig_pt =
253 container_of(pos, struct sync_pt, pt_list);
254 struct sync_pt *new_pt = sync_pt_dup(orig_pt);
255
256 if (new_pt == NULL)
257 return -ENOMEM;
258
259 new_pt->fence = dst;
260 list_add(&new_pt->pt_list, &dst->pt_list_head);
261 sync_pt_activate(new_pt);
262 }
263
264 return 0;
265}
266
267static void sync_fence_free_pts(struct sync_fence *fence)
268{
269 struct list_head *pos, *n;
270
271 list_for_each_safe(pos, n, &fence->pt_list_head) {
272 struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
273 sync_pt_free(pt);
274 }
275}
276
277struct sync_fence *sync_fence_fdget(int fd)
278{
279 struct file *file = fget(fd);
280
281 if (file == NULL)
282 return NULL;
283
284 if (file->f_op != &sync_fence_fops)
285 goto err;
286
287 return file->private_data;
288
289err:
290 fput(file);
291 return NULL;
292}
293
294void sync_fence_put(struct sync_fence *fence)
295{
296 fput(fence->file);
297}
298
299void sync_fence_install(struct sync_fence *fence, int fd)
300{
301 fd_install(fd, fence->file);
302}
303
304static int sync_fence_get_status(struct sync_fence *fence)
305{
306 struct list_head *pos;
307 int status = 1;
308
309 list_for_each(pos, &fence->pt_list_head) {
310 struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
311 int pt_status = pt->status;
312
313 if (pt_status < 0) {
314 status = pt_status;
315 break;
316 } else if (status == 1) {
317 status = pt_status;
318 }
319 }
320
321 return status;
322}
323
324struct sync_fence *sync_fence_merge(const char *name,
325 struct sync_fence *a, struct sync_fence *b)
326{
327 struct sync_fence *fence;
328 int err;
329
330 fence = sync_fence_alloc(name);
331 if (fence == NULL)
332 return NULL;
333
334 err = sync_fence_copy_pts(fence, a);
335 if (err < 0)
336 goto err;
337
338 err = sync_fence_copy_pts(fence, b);
339 if (err < 0)
340 goto err;
341
342 fence->status = sync_fence_get_status(fence);
343
344 return fence;
345err:
346 sync_fence_free_pts(fence);
347 kfree(fence);
348 return NULL;
349}
350
351static void sync_fence_signal_pt(struct sync_pt *pt)
352{
353 LIST_HEAD(signaled_waiters);
354 struct sync_fence *fence = pt->fence;
355 struct list_head *pos;
356 struct list_head *n;
357 unsigned long flags;
358 int status;
359
360 status = sync_fence_get_status(fence);
361
362 spin_lock_irqsave(&fence->waiter_list_lock, flags);
363 /*
364 * this should protect against two threads racing on the signaled
365 * false -> true transition
366 */
367 if (status && !fence->status) {
368 list_for_each_safe(pos, n, &fence->waiter_list_head)
369 list_move(pos, &signaled_waiters);
370
371 fence->status = status;
372 } else {
373 status = 0;
374 }
375 spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
376
377 if (status) {
378 list_for_each_safe(pos, n, &signaled_waiters) {
379 struct sync_fence_waiter *waiter =
380 container_of(pos, struct sync_fence_waiter,
381 waiter_list);
382
383 waiter->callback(fence, waiter->callback_data);
384 list_del(pos);
385 kfree(waiter);
386 }
387 wake_up(&fence->wq);
388 }
389}
390
391int sync_fence_wait_async(struct sync_fence *fence,
392 void (*callback)(struct sync_fence *, void *data),
393 void *callback_data)
394{
395 struct sync_fence_waiter *waiter;
396 unsigned long flags;
397 int err = 0;
398
399 waiter = kzalloc(sizeof(struct sync_fence_waiter), GFP_KERNEL);
400 if (waiter == NULL)
401 return -ENOMEM;
402
403 waiter->callback = callback;
404 waiter->callback_data = callback_data;
405
406 spin_lock_irqsave(&fence->waiter_list_lock, flags);
407
408 if (fence->status) {
409 kfree(waiter);
410 err = fence->status;
411 goto out;
412 }
413
414 list_add_tail(&waiter->waiter_list, &fence->waiter_list_head);
415out:
416 spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
417
418 return err;
419}
420
421int sync_fence_wait(struct sync_fence *fence, long timeout)
422{
423 int err;
424
425 if (timeout) {
426 timeout = msecs_to_jiffies(timeout);
427 err = wait_event_interruptible_timeout(fence->wq,
428 fence->status != 0,
429 timeout);
430 } else {
431 err = wait_event_interruptible(fence->wq, fence->status != 0);
432 }
433
434 if (err < 0)
435 return err;
436
437 if (fence->status < 0)
438 return fence->status;
439
440 if (fence->status == 0)
441 return -ETIME;
442
443 return 0;
444}
445
446static int sync_fence_release(struct inode *inode, struct file *file)
447{
448 struct sync_fence *fence = file->private_data;
449
450 sync_fence_free_pts(fence);
451 kfree(fence);
452
453 return 0;
454}
455
456static long sync_fence_ioctl_wait(struct sync_fence *fence, unsigned long arg)
457{
458 __s32 value;
459
460 if (copy_from_user(&value, (void __user *)arg, sizeof(value)))
461 return -EFAULT;
462
463 return sync_fence_wait(fence, value);
464}
465
466static long sync_fence_ioctl_merge(struct sync_fence *fence, unsigned long arg)
467{
468 int fd = get_unused_fd();
469 int err;
470 struct sync_fence *fence2, *fence3;
471 struct sync_merge_data data;
472
473 if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
474 return -EFAULT;
475
476 fence2 = sync_fence_fdget(data.fd2);
477 if (fence2 == NULL) {
478 err = -ENOENT;
479 goto err_put_fd;
480 }
481
482 data.name[sizeof(data.name) - 1] = '\0';
483 fence3 = sync_fence_merge(data.name, fence, fence2);
484 if (fence3 == NULL) {
485 err = -ENOMEM;
486 goto err_put_fence2;
487 }
488
489 data.fence = fd;
490 if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
491 err = -EFAULT;
492 goto err_put_fence3;
493 }
494
495 sync_fence_install(fence3, fd);
496 sync_fence_put(fence2);
497 return 0;
498
499err_put_fence3:
500 sync_fence_put(fence3);
501
502err_put_fence2:
503 sync_fence_put(fence2);
504
505err_put_fd:
506 put_unused_fd(fd);
507 return err;
508}
509
510
511static long sync_fence_ioctl(struct file *file, unsigned int cmd,
512 unsigned long arg)
513{
514 struct sync_fence *fence = file->private_data;
515 switch (cmd) {
516 case SYNC_IOC_WAIT:
517 return sync_fence_ioctl_wait(fence, arg);
518
519 case SYNC_IOC_MERGE:
520 return sync_fence_ioctl_merge(fence, arg);
521 default:
522 return -ENOTTY;
523 }
524}
525