blob: 9b733c510cbfc3030600ff33de96ee54b965b6e3 [file] [log] [blame]
Dave Airliee9083422017-04-04 13:26:24 +10001/*
2 * Copyright 2017 Red Hat
Dave Airlie5e60a102017-08-25 10:52:22 -07003 * Parts ported from amdgpu (fence wait code).
4 * Copyright 2016 Advanced Micro Devices, Inc.
Dave Airliee9083422017-04-04 13:26:24 +10005 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
23 * IN THE SOFTWARE.
24 *
25 * Authors:
26 *
27 */
28
29/**
30 * DOC: Overview
31 *
32 * DRM synchronisation objects (syncobj) are a persistent objects,
33 * that contain an optional fence. The fence can be updated with a new
34 * fence, or be NULL.
35 *
Dave Airlie5e60a102017-08-25 10:52:22 -070036 * syncobj's can be waited upon, where it will wait for the underlying
37 * fence.
38 *
Dave Airliee9083422017-04-04 13:26:24 +100039 * syncobj's can be export to fd's and back, these fd's are opaque and
40 * have no other use case, except passing the syncobj between processes.
41 *
42 * Their primary use-case is to implement Vulkan fences and semaphores.
43 *
44 * syncobj have a kref reference count, but also have an optional file.
45 * The file is only created once the syncobj is exported.
46 * The file takes a reference on the kref.
47 */
48
49#include <drm/drmP.h>
50#include <linux/file.h>
51#include <linux/fs.h>
52#include <linux/anon_inodes.h>
Dave Airlie3ee45a32017-04-26 04:09:02 +010053#include <linux/sync_file.h>
Jason Ekstrande7aca5032017-08-25 10:52:24 -070054#include <linux/sched/signal.h>
Dave Airliee9083422017-04-04 13:26:24 +100055
56#include "drm_internal.h"
57#include <drm/drm_syncobj.h>
58
59/**
60 * drm_syncobj_find - lookup and reference a sync object.
61 * @file_private: drm file private pointer
62 * @handle: sync object handle to lookup.
63 *
64 * Returns a reference to the syncobj pointed to by handle or NULL.
65 */
66struct drm_syncobj *drm_syncobj_find(struct drm_file *file_private,
67 u32 handle)
68{
69 struct drm_syncobj *syncobj;
70
71 spin_lock(&file_private->syncobj_table_lock);
72
73 /* Check if we currently have a reference on the object */
74 syncobj = idr_find(&file_private->syncobj_idr, handle);
75 if (syncobj)
76 drm_syncobj_get(syncobj);
77
78 spin_unlock(&file_private->syncobj_table_lock);
79
80 return syncobj;
81}
82EXPORT_SYMBOL(drm_syncobj_find);
83
Jason Ekstrand9c19fb12017-08-28 07:39:25 -070084static void drm_syncobj_add_callback_locked(struct drm_syncobj *syncobj,
85 struct drm_syncobj_cb *cb,
86 drm_syncobj_func_t func)
87{
88 cb->func = func;
89 list_add_tail(&cb->node, &syncobj->cb_list);
90}
91
Jason Ekstrande7aca5032017-08-25 10:52:24 -070092static int drm_syncobj_fence_get_or_add_callback(struct drm_syncobj *syncobj,
93 struct dma_fence **fence,
94 struct drm_syncobj_cb *cb,
95 drm_syncobj_func_t func)
96{
97 int ret;
98
99 *fence = drm_syncobj_fence_get(syncobj);
100 if (*fence)
101 return 1;
102
103 spin_lock(&syncobj->lock);
104 /* We've already tried once to get a fence and failed. Now that we
105 * have the lock, try one more time just to be sure we don't add a
106 * callback when a fence has already been set.
107 */
108 if (syncobj->fence) {
Ville Syrjälä563eaf52017-11-02 22:03:35 +0200109 *fence = dma_fence_get(rcu_dereference_protected(syncobj->fence,
110 lockdep_is_held(&syncobj->lock)));
Jason Ekstrande7aca5032017-08-25 10:52:24 -0700111 ret = 1;
112 } else {
113 *fence = NULL;
114 drm_syncobj_add_callback_locked(syncobj, cb, func);
115 ret = 0;
116 }
117 spin_unlock(&syncobj->lock);
118
119 return ret;
120}
121
Jason Ekstrand9c19fb12017-08-28 07:39:25 -0700122/**
123 * drm_syncobj_add_callback - adds a callback to syncobj::cb_list
124 * @syncobj: Sync object to which to add the callback
125 * @cb: Callback to add
126 * @func: Func to use when initializing the drm_syncobj_cb struct
127 *
128 * This adds a callback to be called next time the fence is replaced
129 */
130void drm_syncobj_add_callback(struct drm_syncobj *syncobj,
131 struct drm_syncobj_cb *cb,
132 drm_syncobj_func_t func)
133{
134 spin_lock(&syncobj->lock);
135 drm_syncobj_add_callback_locked(syncobj, cb, func);
136 spin_unlock(&syncobj->lock);
137}
138EXPORT_SYMBOL(drm_syncobj_add_callback);
139
140/**
141 * drm_syncobj_add_callback - removes a callback to syncobj::cb_list
142 * @syncobj: Sync object from which to remove the callback
143 * @cb: Callback to remove
144 */
145void drm_syncobj_remove_callback(struct drm_syncobj *syncobj,
146 struct drm_syncobj_cb *cb)
147{
148 spin_lock(&syncobj->lock);
149 list_del_init(&cb->node);
150 spin_unlock(&syncobj->lock);
151}
152EXPORT_SYMBOL(drm_syncobj_remove_callback);
153
Dave Airliee9083422017-04-04 13:26:24 +1000154/**
155 * drm_syncobj_replace_fence - replace fence in a sync object.
Dave Airliee9083422017-04-04 13:26:24 +1000156 * @syncobj: Sync object to replace fence in
157 * @fence: fence to install in sync file.
158 *
159 * This replaces the fence on a sync object.
160 */
Chris Wilson00fc2c22017-07-05 21:12:44 +0100161void drm_syncobj_replace_fence(struct drm_syncobj *syncobj,
Dave Airliee9083422017-04-04 13:26:24 +1000162 struct dma_fence *fence)
163{
Chris Wilson00fc2c22017-07-05 21:12:44 +0100164 struct dma_fence *old_fence;
Jason Ekstrand9c19fb12017-08-28 07:39:25 -0700165 struct drm_syncobj_cb *cur, *tmp;
Dave Airliee9083422017-04-04 13:26:24 +1000166
167 if (fence)
168 dma_fence_get(fence);
Jason Ekstrand9c19fb12017-08-28 07:39:25 -0700169
170 spin_lock(&syncobj->lock);
171
Ville Syrjälä563eaf52017-11-02 22:03:35 +0200172 old_fence = rcu_dereference_protected(syncobj->fence,
173 lockdep_is_held(&syncobj->lock));
174 rcu_assign_pointer(syncobj->fence, fence);
Jason Ekstrand9c19fb12017-08-28 07:39:25 -0700175
176 if (fence != old_fence) {
177 list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node) {
178 list_del_init(&cur->node);
179 cur->func(syncobj, cur);
180 }
181 }
182
183 spin_unlock(&syncobj->lock);
Dave Airliee9083422017-04-04 13:26:24 +1000184
185 dma_fence_put(old_fence);
186}
187EXPORT_SYMBOL(drm_syncobj_replace_fence);
188
Jason Ekstrand1fc08212017-08-25 10:52:25 -0700189struct drm_syncobj_null_fence {
190 struct dma_fence base;
191 spinlock_t lock;
192};
193
194static const char *drm_syncobj_null_fence_get_name(struct dma_fence *fence)
195{
196 return "syncobjnull";
197}
198
199static bool drm_syncobj_null_fence_enable_signaling(struct dma_fence *fence)
200{
201 dma_fence_enable_sw_signaling(fence);
202 return !dma_fence_is_signaled(fence);
203}
204
205static const struct dma_fence_ops drm_syncobj_null_fence_ops = {
206 .get_driver_name = drm_syncobj_null_fence_get_name,
207 .get_timeline_name = drm_syncobj_null_fence_get_name,
208 .enable_signaling = drm_syncobj_null_fence_enable_signaling,
209 .wait = dma_fence_default_wait,
210 .release = NULL,
211};
212
213static int drm_syncobj_assign_null_handle(struct drm_syncobj *syncobj)
214{
215 struct drm_syncobj_null_fence *fence;
216 fence = kzalloc(sizeof(*fence), GFP_KERNEL);
217 if (fence == NULL)
218 return -ENOMEM;
219
220 spin_lock_init(&fence->lock);
221 dma_fence_init(&fence->base, &drm_syncobj_null_fence_ops,
222 &fence->lock, 0, 0);
223 dma_fence_signal(&fence->base);
224
225 drm_syncobj_replace_fence(syncobj, &fence->base);
226
227 dma_fence_put(&fence->base);
228
229 return 0;
230}
231
Jason Ekstrandafaf5922017-08-25 10:52:19 -0700232int drm_syncobj_find_fence(struct drm_file *file_private,
233 u32 handle,
234 struct dma_fence **fence)
Dave Airliee9083422017-04-04 13:26:24 +1000235{
236 struct drm_syncobj *syncobj = drm_syncobj_find(file_private, handle);
237 int ret = 0;
238
239 if (!syncobj)
240 return -ENOENT;
241
Jason Ekstrand309a5482017-08-25 10:52:20 -0700242 *fence = drm_syncobj_fence_get(syncobj);
Dave Airliee9083422017-04-04 13:26:24 +1000243 if (!*fence) {
244 ret = -EINVAL;
245 }
246 drm_syncobj_put(syncobj);
247 return ret;
248}
Jason Ekstrandafaf5922017-08-25 10:52:19 -0700249EXPORT_SYMBOL(drm_syncobj_find_fence);
Dave Airliee9083422017-04-04 13:26:24 +1000250
251/**
252 * drm_syncobj_free - free a sync object.
253 * @kref: kref to free.
254 *
255 * Only to be called from kref_put in drm_syncobj_put.
256 */
257void drm_syncobj_free(struct kref *kref)
258{
259 struct drm_syncobj *syncobj = container_of(kref,
260 struct drm_syncobj,
261 refcount);
Jason Ekstrand9c19fb12017-08-28 07:39:25 -0700262 drm_syncobj_replace_fence(syncobj, NULL);
Dave Airliee9083422017-04-04 13:26:24 +1000263 kfree(syncobj);
264}
265EXPORT_SYMBOL(drm_syncobj_free);
266
Marek Olšák1321fd22017-09-12 22:42:12 +0200267/**
268 * drm_syncobj_create - create a new syncobj
269 * @out_syncobj: returned syncobj
270 * @flags: DRM_SYNCOBJ_* flags
271 * @fence: if non-NULL, the syncobj will represent this fence
272 */
273int drm_syncobj_create(struct drm_syncobj **out_syncobj, uint32_t flags,
274 struct dma_fence *fence)
Dave Airliee9083422017-04-04 13:26:24 +1000275{
276 int ret;
277 struct drm_syncobj *syncobj;
278
279 syncobj = kzalloc(sizeof(struct drm_syncobj), GFP_KERNEL);
280 if (!syncobj)
281 return -ENOMEM;
282
283 kref_init(&syncobj->refcount);
Jason Ekstrand9c19fb12017-08-28 07:39:25 -0700284 INIT_LIST_HEAD(&syncobj->cb_list);
285 spin_lock_init(&syncobj->lock);
Dave Airliee9083422017-04-04 13:26:24 +1000286
Jason Ekstrand1fc08212017-08-25 10:52:25 -0700287 if (flags & DRM_SYNCOBJ_CREATE_SIGNALED) {
288 ret = drm_syncobj_assign_null_handle(syncobj);
289 if (ret < 0) {
290 drm_syncobj_put(syncobj);
291 return ret;
292 }
293 }
294
Marek Olšák1321fd22017-09-12 22:42:12 +0200295 if (fence)
296 drm_syncobj_replace_fence(syncobj, fence);
297
298 *out_syncobj = syncobj;
299 return 0;
300}
301EXPORT_SYMBOL(drm_syncobj_create);
302
303/**
304 * drm_syncobj_get_handle - get a handle from a syncobj
305 */
306int drm_syncobj_get_handle(struct drm_file *file_private,
307 struct drm_syncobj *syncobj, u32 *handle)
308{
309 int ret;
310
311 /* take a reference to put in the idr */
312 drm_syncobj_get(syncobj);
313
Dave Airliee9083422017-04-04 13:26:24 +1000314 idr_preload(GFP_KERNEL);
315 spin_lock(&file_private->syncobj_table_lock);
316 ret = idr_alloc(&file_private->syncobj_idr, syncobj, 1, 0, GFP_NOWAIT);
317 spin_unlock(&file_private->syncobj_table_lock);
318
319 idr_preload_end();
320
321 if (ret < 0) {
322 drm_syncobj_put(syncobj);
323 return ret;
324 }
325
326 *handle = ret;
327 return 0;
328}
Marek Olšák1321fd22017-09-12 22:42:12 +0200329EXPORT_SYMBOL(drm_syncobj_get_handle);
330
331static int drm_syncobj_create_as_handle(struct drm_file *file_private,
332 u32 *handle, uint32_t flags)
333{
334 int ret;
335 struct drm_syncobj *syncobj;
336
337 ret = drm_syncobj_create(&syncobj, flags, NULL);
338 if (ret)
339 return ret;
340
341 ret = drm_syncobj_get_handle(file_private, syncobj, handle);
342 drm_syncobj_put(syncobj);
343 return ret;
344}
Dave Airliee9083422017-04-04 13:26:24 +1000345
346static int drm_syncobj_destroy(struct drm_file *file_private,
347 u32 handle)
348{
349 struct drm_syncobj *syncobj;
350
351 spin_lock(&file_private->syncobj_table_lock);
352 syncobj = idr_remove(&file_private->syncobj_idr, handle);
353 spin_unlock(&file_private->syncobj_table_lock);
354
355 if (!syncobj)
356 return -EINVAL;
357
358 drm_syncobj_put(syncobj);
359 return 0;
360}
361
362static int drm_syncobj_file_release(struct inode *inode, struct file *file)
363{
364 struct drm_syncobj *syncobj = file->private_data;
365
366 drm_syncobj_put(syncobj);
367 return 0;
368}
369
370static const struct file_operations drm_syncobj_file_fops = {
371 .release = drm_syncobj_file_release,
372};
373
374static int drm_syncobj_alloc_file(struct drm_syncobj *syncobj)
375{
376 struct file *file = anon_inode_getfile("syncobj_file",
377 &drm_syncobj_file_fops,
378 syncobj, 0);
379 if (IS_ERR(file))
380 return PTR_ERR(file);
381
382 drm_syncobj_get(syncobj);
383 if (cmpxchg(&syncobj->file, NULL, file)) {
384 /* lost the race */
385 fput(file);
386 }
387
388 return 0;
389}
390
Marek Olšák684fd0a2017-09-12 22:42:13 +0200391int drm_syncobj_get_fd(struct drm_syncobj *syncobj, int *p_fd)
392{
393 int ret;
394 int fd;
395
396 fd = get_unused_fd_flags(O_CLOEXEC);
397 if (fd < 0)
398 return fd;
399
400 if (!syncobj->file) {
401 ret = drm_syncobj_alloc_file(syncobj);
402 if (ret) {
403 put_unused_fd(fd);
404 return ret;
405 }
406 }
407 fd_install(fd, syncobj->file);
408 *p_fd = fd;
409 return 0;
410}
411EXPORT_SYMBOL(drm_syncobj_get_fd);
412
Dave Airliee9083422017-04-04 13:26:24 +1000413static int drm_syncobj_handle_to_fd(struct drm_file *file_private,
414 u32 handle, int *p_fd)
415{
416 struct drm_syncobj *syncobj = drm_syncobj_find(file_private, handle);
417 int ret;
Dave Airliee9083422017-04-04 13:26:24 +1000418
419 if (!syncobj)
420 return -EINVAL;
421
Marek Olšák684fd0a2017-09-12 22:42:13 +0200422 ret = drm_syncobj_get_fd(syncobj, p_fd);
Dave Airliee9083422017-04-04 13:26:24 +1000423 drm_syncobj_put(syncobj);
424 return ret;
425}
426
427static struct drm_syncobj *drm_syncobj_fdget(int fd)
428{
429 struct file *file = fget(fd);
430
431 if (!file)
432 return NULL;
433 if (file->f_op != &drm_syncobj_file_fops)
434 goto err;
435
436 return file->private_data;
437err:
438 fput(file);
439 return NULL;
440};
441
442static int drm_syncobj_fd_to_handle(struct drm_file *file_private,
443 int fd, u32 *handle)
444{
445 struct drm_syncobj *syncobj = drm_syncobj_fdget(fd);
446 int ret;
447
448 if (!syncobj)
449 return -EINVAL;
450
451 /* take a reference to put in the idr */
452 drm_syncobj_get(syncobj);
453
454 idr_preload(GFP_KERNEL);
455 spin_lock(&file_private->syncobj_table_lock);
456 ret = idr_alloc(&file_private->syncobj_idr, syncobj, 1, 0, GFP_NOWAIT);
457 spin_unlock(&file_private->syncobj_table_lock);
458 idr_preload_end();
459
460 if (ret < 0) {
461 fput(syncobj->file);
462 return ret;
463 }
464 *handle = ret;
465 return 0;
466}
467
Ville Syrjäläa32c94a2017-09-01 19:53:25 +0300468static int drm_syncobj_import_sync_file_fence(struct drm_file *file_private,
469 int fd, int handle)
Dave Airlie3ee45a32017-04-26 04:09:02 +0100470{
471 struct dma_fence *fence = sync_file_get_fence(fd);
472 struct drm_syncobj *syncobj;
473
474 if (!fence)
475 return -EINVAL;
476
477 syncobj = drm_syncobj_find(file_private, handle);
478 if (!syncobj) {
479 dma_fence_put(fence);
480 return -ENOENT;
481 }
482
Chris Wilson00fc2c22017-07-05 21:12:44 +0100483 drm_syncobj_replace_fence(syncobj, fence);
Dave Airlie3ee45a32017-04-26 04:09:02 +0100484 dma_fence_put(fence);
485 drm_syncobj_put(syncobj);
486 return 0;
487}
488
Ville Syrjäläa32c94a2017-09-01 19:53:25 +0300489static int drm_syncobj_export_sync_file(struct drm_file *file_private,
490 int handle, int *p_fd)
Dave Airlie3ee45a32017-04-26 04:09:02 +0100491{
492 int ret;
493 struct dma_fence *fence;
494 struct sync_file *sync_file;
495 int fd = get_unused_fd_flags(O_CLOEXEC);
496
497 if (fd < 0)
498 return fd;
499
Jason Ekstrandafaf5922017-08-25 10:52:19 -0700500 ret = drm_syncobj_find_fence(file_private, handle, &fence);
Dave Airlie3ee45a32017-04-26 04:09:02 +0100501 if (ret)
502 goto err_put_fd;
503
504 sync_file = sync_file_create(fence);
505
506 dma_fence_put(fence);
507
508 if (!sync_file) {
509 ret = -EINVAL;
510 goto err_put_fd;
511 }
512
513 fd_install(fd, sync_file->file);
514
515 *p_fd = fd;
516 return 0;
517err_put_fd:
518 put_unused_fd(fd);
519 return ret;
520}
Dave Airliee9083422017-04-04 13:26:24 +1000521/**
522 * drm_syncobj_open - initalizes syncobj file-private structures at devnode open time
Dave Airliee9083422017-04-04 13:26:24 +1000523 * @file_private: drm file-private structure to set up
524 *
525 * Called at device open time, sets up the structure for handling refcounting
526 * of sync objects.
527 */
528void
529drm_syncobj_open(struct drm_file *file_private)
530{
531 idr_init(&file_private->syncobj_idr);
532 spin_lock_init(&file_private->syncobj_table_lock);
533}
534
535static int
536drm_syncobj_release_handle(int id, void *ptr, void *data)
537{
538 struct drm_syncobj *syncobj = ptr;
539
540 drm_syncobj_put(syncobj);
541 return 0;
542}
543
544/**
545 * drm_syncobj_release - release file-private sync object resources
Dave Airliee9083422017-04-04 13:26:24 +1000546 * @file_private: drm file-private structure to clean up
547 *
548 * Called at close time when the filp is going away.
549 *
550 * Releases any remaining references on objects by this filp.
551 */
552void
553drm_syncobj_release(struct drm_file *file_private)
554{
555 idr_for_each(&file_private->syncobj_idr,
556 &drm_syncobj_release_handle, file_private);
557 idr_destroy(&file_private->syncobj_idr);
558}
559
560int
561drm_syncobj_create_ioctl(struct drm_device *dev, void *data,
562 struct drm_file *file_private)
563{
564 struct drm_syncobj_create *args = data;
565
566 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
567 return -ENODEV;
568
569 /* no valid flags yet */
Jason Ekstrand1fc08212017-08-25 10:52:25 -0700570 if (args->flags & ~DRM_SYNCOBJ_CREATE_SIGNALED)
Dave Airliee9083422017-04-04 13:26:24 +1000571 return -EINVAL;
572
Marek Olšák1321fd22017-09-12 22:42:12 +0200573 return drm_syncobj_create_as_handle(file_private,
574 &args->handle, args->flags);
Dave Airliee9083422017-04-04 13:26:24 +1000575}
576
577int
578drm_syncobj_destroy_ioctl(struct drm_device *dev, void *data,
579 struct drm_file *file_private)
580{
581 struct drm_syncobj_destroy *args = data;
582
583 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
584 return -ENODEV;
585
586 /* make sure padding is empty */
587 if (args->pad)
588 return -EINVAL;
589 return drm_syncobj_destroy(file_private, args->handle);
590}
591
592int
593drm_syncobj_handle_to_fd_ioctl(struct drm_device *dev, void *data,
594 struct drm_file *file_private)
595{
596 struct drm_syncobj_handle *args = data;
597
598 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
599 return -ENODEV;
600
Dave Airlie3ee45a32017-04-26 04:09:02 +0100601 if (args->pad)
Dave Airliee9083422017-04-04 13:26:24 +1000602 return -EINVAL;
603
Dave Airlie3ee45a32017-04-26 04:09:02 +0100604 if (args->flags != 0 &&
605 args->flags != DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE)
606 return -EINVAL;
607
608 if (args->flags & DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE)
609 return drm_syncobj_export_sync_file(file_private, args->handle,
610 &args->fd);
611
Dave Airliee9083422017-04-04 13:26:24 +1000612 return drm_syncobj_handle_to_fd(file_private, args->handle,
613 &args->fd);
614}
615
616int
617drm_syncobj_fd_to_handle_ioctl(struct drm_device *dev, void *data,
618 struct drm_file *file_private)
619{
620 struct drm_syncobj_handle *args = data;
621
622 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
623 return -ENODEV;
624
Dave Airlie3ee45a32017-04-26 04:09:02 +0100625 if (args->pad)
Dave Airliee9083422017-04-04 13:26:24 +1000626 return -EINVAL;
627
Dave Airlie3ee45a32017-04-26 04:09:02 +0100628 if (args->flags != 0 &&
629 args->flags != DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE)
630 return -EINVAL;
631
632 if (args->flags & DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE)
633 return drm_syncobj_import_sync_file_fence(file_private,
634 args->fd,
635 args->handle);
636
Dave Airliee9083422017-04-04 13:26:24 +1000637 return drm_syncobj_fd_to_handle(file_private, args->fd,
638 &args->handle);
639}
Dave Airlie5e60a102017-08-25 10:52:22 -0700640
Jason Ekstrande7aca5032017-08-25 10:52:24 -0700641struct syncobj_wait_entry {
642 struct task_struct *task;
643 struct dma_fence *fence;
644 struct dma_fence_cb fence_cb;
645 struct drm_syncobj_cb syncobj_cb;
646};
647
648static void syncobj_wait_fence_func(struct dma_fence *fence,
649 struct dma_fence_cb *cb)
650{
651 struct syncobj_wait_entry *wait =
652 container_of(cb, struct syncobj_wait_entry, fence_cb);
653
654 wake_up_process(wait->task);
655}
656
657static void syncobj_wait_syncobj_func(struct drm_syncobj *syncobj,
658 struct drm_syncobj_cb *cb)
659{
660 struct syncobj_wait_entry *wait =
661 container_of(cb, struct syncobj_wait_entry, syncobj_cb);
662
663 /* This happens inside the syncobj lock */
Ville Syrjälä563eaf52017-11-02 22:03:35 +0200664 wait->fence = dma_fence_get(rcu_dereference_protected(syncobj->fence,
665 lockdep_is_held(&syncobj->lock)));
Jason Ekstrande7aca5032017-08-25 10:52:24 -0700666 wake_up_process(wait->task);
667}
668
669static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
670 uint32_t count,
671 uint32_t flags,
672 signed long timeout,
673 uint32_t *idx)
674{
675 struct syncobj_wait_entry *entries;
676 struct dma_fence *fence;
677 signed long ret;
678 uint32_t signaled_count, i;
679
680 entries = kcalloc(count, sizeof(*entries), GFP_KERNEL);
681 if (!entries)
682 return -ENOMEM;
683
684 /* Walk the list of sync objects and initialize entries. We do
685 * this up-front so that we can properly return -EINVAL if there is
686 * a syncobj with a missing fence and then never have the chance of
687 * returning -EINVAL again.
688 */
689 signaled_count = 0;
690 for (i = 0; i < count; ++i) {
691 entries[i].task = current;
692 entries[i].fence = drm_syncobj_fence_get(syncobjs[i]);
693 if (!entries[i].fence) {
694 if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
695 continue;
696 } else {
697 ret = -EINVAL;
698 goto cleanup_entries;
699 }
700 }
701
702 if (dma_fence_is_signaled(entries[i].fence)) {
703 if (signaled_count == 0 && idx)
704 *idx = i;
705 signaled_count++;
706 }
707 }
708
709 /* Initialize ret to the max of timeout and 1. That way, the
710 * default return value indicates a successful wait and not a
711 * timeout.
712 */
713 ret = max_t(signed long, timeout, 1);
714
715 if (signaled_count == count ||
716 (signaled_count > 0 &&
717 !(flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL)))
718 goto cleanup_entries;
719
720 /* There's a very annoying laxness in the dma_fence API here, in
721 * that backends are not required to automatically report when a
722 * fence is signaled prior to fence->ops->enable_signaling() being
723 * called. So here if we fail to match signaled_count, we need to
724 * fallthough and try a 0 timeout wait!
725 */
726
727 if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
728 for (i = 0; i < count; ++i) {
729 drm_syncobj_fence_get_or_add_callback(syncobjs[i],
730 &entries[i].fence,
731 &entries[i].syncobj_cb,
732 syncobj_wait_syncobj_func);
733 }
734 }
735
736 do {
737 set_current_state(TASK_INTERRUPTIBLE);
738
739 signaled_count = 0;
740 for (i = 0; i < count; ++i) {
741 fence = entries[i].fence;
742 if (!fence)
743 continue;
744
745 if (dma_fence_is_signaled(fence) ||
746 (!entries[i].fence_cb.func &&
747 dma_fence_add_callback(fence,
748 &entries[i].fence_cb,
749 syncobj_wait_fence_func))) {
750 /* The fence has been signaled */
751 if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL) {
752 signaled_count++;
753 } else {
754 if (idx)
755 *idx = i;
756 goto done_waiting;
757 }
758 }
759 }
760
761 if (signaled_count == count)
762 goto done_waiting;
763
764 if (timeout == 0) {
765 /* If we are doing a 0 timeout wait and we got
766 * here, then we just timed out.
767 */
768 ret = 0;
769 goto done_waiting;
770 }
771
772 ret = schedule_timeout(ret);
773
774 if (ret > 0 && signal_pending(current))
775 ret = -ERESTARTSYS;
776 } while (ret > 0);
777
778done_waiting:
779 __set_current_state(TASK_RUNNING);
780
781cleanup_entries:
782 for (i = 0; i < count; ++i) {
783 if (entries[i].syncobj_cb.func)
784 drm_syncobj_remove_callback(syncobjs[i],
785 &entries[i].syncobj_cb);
786 if (entries[i].fence_cb.func)
787 dma_fence_remove_callback(entries[i].fence,
788 &entries[i].fence_cb);
789 dma_fence_put(entries[i].fence);
790 }
791 kfree(entries);
792
793 return ret;
794}
795
Dave Airlie5e60a102017-08-25 10:52:22 -0700796/**
797 * drm_timeout_abs_to_jiffies - calculate jiffies timeout from absolute value
798 *
799 * @timeout_nsec: timeout nsec component in ns, 0 for poll
800 *
801 * Calculate the timeout in jiffies from an absolute time in sec/nsec.
802 */
803static signed long drm_timeout_abs_to_jiffies(int64_t timeout_nsec)
804{
805 ktime_t abs_timeout, now;
806 u64 timeout_ns, timeout_jiffies64;
807
808 /* make 0 timeout means poll - absolute 0 doesn't seem valid */
809 if (timeout_nsec == 0)
810 return 0;
811
812 abs_timeout = ns_to_ktime(timeout_nsec);
813 now = ktime_get();
814
815 if (!ktime_after(abs_timeout, now))
816 return 0;
817
818 timeout_ns = ktime_to_ns(ktime_sub(abs_timeout, now));
819
820 timeout_jiffies64 = nsecs_to_jiffies64(timeout_ns);
821 /* clamp timeout to avoid infinite timeout */
822 if (timeout_jiffies64 >= MAX_SCHEDULE_TIMEOUT - 1)
823 return MAX_SCHEDULE_TIMEOUT - 1;
824
825 return timeout_jiffies64 + 1;
826}
827
Jason Ekstrande7aca5032017-08-25 10:52:24 -0700828static int drm_syncobj_array_wait(struct drm_device *dev,
829 struct drm_file *file_private,
830 struct drm_syncobj_wait *wait,
831 struct drm_syncobj **syncobjs)
Dave Airlie5e60a102017-08-25 10:52:22 -0700832{
833 signed long timeout = drm_timeout_abs_to_jiffies(wait->timeout_nsec);
834 signed long ret = 0;
835 uint32_t first = ~0;
836
Jason Ekstrande7aca5032017-08-25 10:52:24 -0700837 ret = drm_syncobj_array_wait_timeout(syncobjs,
838 wait->count_handles,
839 wait->flags,
840 timeout, &first);
Dave Airlie5e60a102017-08-25 10:52:22 -0700841 if (ret < 0)
842 return ret;
843
844 wait->first_signaled = first;
845 if (ret == 0)
846 return -ETIME;
847 return 0;
848}
849
Jason Ekstrand3e6fb722017-08-25 10:52:26 -0700850static int drm_syncobj_array_find(struct drm_file *file_private,
Ville Syrjälä9e554462017-09-01 19:53:26 +0300851 void __user *user_handles,
852 uint32_t count_handles,
Jason Ekstrand3e6fb722017-08-25 10:52:26 -0700853 struct drm_syncobj ***syncobjs_out)
854{
855 uint32_t i, *handles;
856 struct drm_syncobj **syncobjs;
857 int ret;
858
859 handles = kmalloc_array(count_handles, sizeof(*handles), GFP_KERNEL);
860 if (handles == NULL)
861 return -ENOMEM;
862
863 if (copy_from_user(handles, user_handles,
864 sizeof(uint32_t) * count_handles)) {
865 ret = -EFAULT;
866 goto err_free_handles;
867 }
868
869 syncobjs = kmalloc_array(count_handles, sizeof(*syncobjs), GFP_KERNEL);
870 if (syncobjs == NULL) {
871 ret = -ENOMEM;
872 goto err_free_handles;
873 }
874
875 for (i = 0; i < count_handles; i++) {
876 syncobjs[i] = drm_syncobj_find(file_private, handles[i]);
877 if (!syncobjs[i]) {
878 ret = -ENOENT;
879 goto err_put_syncobjs;
880 }
881 }
882
883 kfree(handles);
884 *syncobjs_out = syncobjs;
885 return 0;
886
887err_put_syncobjs:
888 while (i-- > 0)
889 drm_syncobj_put(syncobjs[i]);
890 kfree(syncobjs);
891err_free_handles:
892 kfree(handles);
893
894 return ret;
895}
896
897static void drm_syncobj_array_free(struct drm_syncobj **syncobjs,
898 uint32_t count)
899{
900 uint32_t i;
901 for (i = 0; i < count; i++)
902 drm_syncobj_put(syncobjs[i]);
903 kfree(syncobjs);
904}
905
Dave Airlie5e60a102017-08-25 10:52:22 -0700906int
907drm_syncobj_wait_ioctl(struct drm_device *dev, void *data,
908 struct drm_file *file_private)
909{
910 struct drm_syncobj_wait *args = data;
Jason Ekstrande7aca5032017-08-25 10:52:24 -0700911 struct drm_syncobj **syncobjs;
Dave Airlie5e60a102017-08-25 10:52:22 -0700912 int ret = 0;
Dave Airlie5e60a102017-08-25 10:52:22 -0700913
914 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
915 return -ENODEV;
916
Jason Ekstrande7aca5032017-08-25 10:52:24 -0700917 if (args->flags & ~(DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL |
918 DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT))
Dave Airlie5e60a102017-08-25 10:52:22 -0700919 return -EINVAL;
920
921 if (args->count_handles == 0)
922 return -EINVAL;
923
Jason Ekstrand3e6fb722017-08-25 10:52:26 -0700924 ret = drm_syncobj_array_find(file_private,
925 u64_to_user_ptr(args->handles),
926 args->count_handles,
927 &syncobjs);
928 if (ret < 0)
929 return ret;
Dave Airlie5e60a102017-08-25 10:52:22 -0700930
Jason Ekstrande7aca5032017-08-25 10:52:24 -0700931 ret = drm_syncobj_array_wait(dev, file_private,
932 args, syncobjs);
Dave Airlie5e60a102017-08-25 10:52:22 -0700933
Jason Ekstrand3e6fb722017-08-25 10:52:26 -0700934 drm_syncobj_array_free(syncobjs, args->count_handles);
Dave Airlie5e60a102017-08-25 10:52:22 -0700935
936 return ret;
937}
Jason Ekstrandaa4035d2017-08-28 14:10:27 -0700938
939int
940drm_syncobj_reset_ioctl(struct drm_device *dev, void *data,
941 struct drm_file *file_private)
942{
943 struct drm_syncobj_array *args = data;
944 struct drm_syncobj **syncobjs;
945 uint32_t i;
946 int ret;
947
948 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
949 return -ENODEV;
950
951 if (args->pad != 0)
952 return -EINVAL;
953
954 if (args->count_handles == 0)
955 return -EINVAL;
956
957 ret = drm_syncobj_array_find(file_private,
958 u64_to_user_ptr(args->handles),
959 args->count_handles,
960 &syncobjs);
961 if (ret < 0)
962 return ret;
963
964 for (i = 0; i < args->count_handles; i++)
965 drm_syncobj_replace_fence(syncobjs[i], NULL);
966
967 drm_syncobj_array_free(syncobjs, args->count_handles);
968
969 return 0;
970}
Jason Ekstrandffa94432017-08-28 14:10:28 -0700971
972int
973drm_syncobj_signal_ioctl(struct drm_device *dev, void *data,
974 struct drm_file *file_private)
975{
976 struct drm_syncobj_array *args = data;
977 struct drm_syncobj **syncobjs;
978 uint32_t i;
979 int ret;
980
981 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
982 return -ENODEV;
983
984 if (args->pad != 0)
985 return -EINVAL;
986
987 if (args->count_handles == 0)
988 return -EINVAL;
989
990 ret = drm_syncobj_array_find(file_private,
991 u64_to_user_ptr(args->handles),
992 args->count_handles,
993 &syncobjs);
994 if (ret < 0)
995 return ret;
996
997 for (i = 0; i < args->count_handles; i++) {
998 ret = drm_syncobj_assign_null_handle(syncobjs[i]);
999 if (ret < 0)
1000 break;
1001 }
1002
1003 drm_syncobj_array_free(syncobjs, args->count_handles);
1004
1005 return ret;
1006}