blob: 4e8563c36d6e484359d829e4c90f6a2fc712276c [file] [log] [blame]
Dave Airliee9083422017-04-04 13:26:24 +10001/*
2 * Copyright 2017 Red Hat
Dave Airlie5e60a102017-08-25 10:52:22 -07003 * Parts ported from amdgpu (fence wait code).
4 * Copyright 2016 Advanced Micro Devices, Inc.
Dave Airliee9083422017-04-04 13:26:24 +10005 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
23 * IN THE SOFTWARE.
24 *
25 * Authors:
26 *
27 */
28
29/**
30 * DOC: Overview
31 *
32 * DRM synchronisation objects (syncobj) are a persistent objects,
33 * that contain an optional fence. The fence can be updated with a new
34 * fence, or be NULL.
35 *
Dave Airlie5e60a102017-08-25 10:52:22 -070036 * syncobj's can be waited upon, where it will wait for the underlying
37 * fence.
38 *
Dave Airliee9083422017-04-04 13:26:24 +100039 * syncobj's can be export to fd's and back, these fd's are opaque and
40 * have no other use case, except passing the syncobj between processes.
41 *
42 * Their primary use-case is to implement Vulkan fences and semaphores.
43 *
44 * syncobj have a kref reference count, but also have an optional file.
45 * The file is only created once the syncobj is exported.
46 * The file takes a reference on the kref.
47 */
48
49#include <drm/drmP.h>
50#include <linux/file.h>
51#include <linux/fs.h>
52#include <linux/anon_inodes.h>
Dave Airlie3ee45a32017-04-26 04:09:02 +010053#include <linux/sync_file.h>
Dave Airliee9083422017-04-04 13:26:24 +100054
55#include "drm_internal.h"
56#include <drm/drm_syncobj.h>
57
58/**
59 * drm_syncobj_find - lookup and reference a sync object.
60 * @file_private: drm file private pointer
61 * @handle: sync object handle to lookup.
62 *
63 * Returns a reference to the syncobj pointed to by handle or NULL.
64 */
65struct drm_syncobj *drm_syncobj_find(struct drm_file *file_private,
66 u32 handle)
67{
68 struct drm_syncobj *syncobj;
69
70 spin_lock(&file_private->syncobj_table_lock);
71
72 /* Check if we currently have a reference on the object */
73 syncobj = idr_find(&file_private->syncobj_idr, handle);
74 if (syncobj)
75 drm_syncobj_get(syncobj);
76
77 spin_unlock(&file_private->syncobj_table_lock);
78
79 return syncobj;
80}
81EXPORT_SYMBOL(drm_syncobj_find);
82
83/**
84 * drm_syncobj_replace_fence - replace fence in a sync object.
Dave Airliee9083422017-04-04 13:26:24 +100085 * @syncobj: Sync object to replace fence in
86 * @fence: fence to install in sync file.
87 *
88 * This replaces the fence on a sync object.
89 */
Chris Wilson00fc2c22017-07-05 21:12:44 +010090void drm_syncobj_replace_fence(struct drm_syncobj *syncobj,
Dave Airliee9083422017-04-04 13:26:24 +100091 struct dma_fence *fence)
92{
Chris Wilson00fc2c22017-07-05 21:12:44 +010093 struct dma_fence *old_fence;
Dave Airliee9083422017-04-04 13:26:24 +100094
95 if (fence)
96 dma_fence_get(fence);
97 old_fence = xchg(&syncobj->fence, fence);
98
99 dma_fence_put(old_fence);
100}
101EXPORT_SYMBOL(drm_syncobj_replace_fence);
102
Jason Ekstrandafaf5922017-08-25 10:52:19 -0700103int drm_syncobj_find_fence(struct drm_file *file_private,
104 u32 handle,
105 struct dma_fence **fence)
Dave Airliee9083422017-04-04 13:26:24 +1000106{
107 struct drm_syncobj *syncobj = drm_syncobj_find(file_private, handle);
108 int ret = 0;
109
110 if (!syncobj)
111 return -ENOENT;
112
Jason Ekstrand309a5482017-08-25 10:52:20 -0700113 *fence = drm_syncobj_fence_get(syncobj);
Dave Airliee9083422017-04-04 13:26:24 +1000114 if (!*fence) {
115 ret = -EINVAL;
116 }
117 drm_syncobj_put(syncobj);
118 return ret;
119}
Jason Ekstrandafaf5922017-08-25 10:52:19 -0700120EXPORT_SYMBOL(drm_syncobj_find_fence);
Dave Airliee9083422017-04-04 13:26:24 +1000121
122/**
123 * drm_syncobj_free - free a sync object.
124 * @kref: kref to free.
125 *
126 * Only to be called from kref_put in drm_syncobj_put.
127 */
128void drm_syncobj_free(struct kref *kref)
129{
130 struct drm_syncobj *syncobj = container_of(kref,
131 struct drm_syncobj,
132 refcount);
133 dma_fence_put(syncobj->fence);
134 kfree(syncobj);
135}
136EXPORT_SYMBOL(drm_syncobj_free);
137
138static int drm_syncobj_create(struct drm_file *file_private,
139 u32 *handle)
140{
141 int ret;
142 struct drm_syncobj *syncobj;
143
144 syncobj = kzalloc(sizeof(struct drm_syncobj), GFP_KERNEL);
145 if (!syncobj)
146 return -ENOMEM;
147
148 kref_init(&syncobj->refcount);
149
150 idr_preload(GFP_KERNEL);
151 spin_lock(&file_private->syncobj_table_lock);
152 ret = idr_alloc(&file_private->syncobj_idr, syncobj, 1, 0, GFP_NOWAIT);
153 spin_unlock(&file_private->syncobj_table_lock);
154
155 idr_preload_end();
156
157 if (ret < 0) {
158 drm_syncobj_put(syncobj);
159 return ret;
160 }
161
162 *handle = ret;
163 return 0;
164}
165
166static int drm_syncobj_destroy(struct drm_file *file_private,
167 u32 handle)
168{
169 struct drm_syncobj *syncobj;
170
171 spin_lock(&file_private->syncobj_table_lock);
172 syncobj = idr_remove(&file_private->syncobj_idr, handle);
173 spin_unlock(&file_private->syncobj_table_lock);
174
175 if (!syncobj)
176 return -EINVAL;
177
178 drm_syncobj_put(syncobj);
179 return 0;
180}
181
182static int drm_syncobj_file_release(struct inode *inode, struct file *file)
183{
184 struct drm_syncobj *syncobj = file->private_data;
185
186 drm_syncobj_put(syncobj);
187 return 0;
188}
189
190static const struct file_operations drm_syncobj_file_fops = {
191 .release = drm_syncobj_file_release,
192};
193
194static int drm_syncobj_alloc_file(struct drm_syncobj *syncobj)
195{
196 struct file *file = anon_inode_getfile("syncobj_file",
197 &drm_syncobj_file_fops,
198 syncobj, 0);
199 if (IS_ERR(file))
200 return PTR_ERR(file);
201
202 drm_syncobj_get(syncobj);
203 if (cmpxchg(&syncobj->file, NULL, file)) {
204 /* lost the race */
205 fput(file);
206 }
207
208 return 0;
209}
210
211static int drm_syncobj_handle_to_fd(struct drm_file *file_private,
212 u32 handle, int *p_fd)
213{
214 struct drm_syncobj *syncobj = drm_syncobj_find(file_private, handle);
215 int ret;
216 int fd;
217
218 if (!syncobj)
219 return -EINVAL;
220
221 fd = get_unused_fd_flags(O_CLOEXEC);
222 if (fd < 0) {
223 drm_syncobj_put(syncobj);
224 return fd;
225 }
226
227 if (!syncobj->file) {
228 ret = drm_syncobj_alloc_file(syncobj);
229 if (ret)
230 goto out_put_fd;
231 }
232 fd_install(fd, syncobj->file);
233 drm_syncobj_put(syncobj);
234 *p_fd = fd;
235 return 0;
236out_put_fd:
237 put_unused_fd(fd);
238 drm_syncobj_put(syncobj);
239 return ret;
240}
241
242static struct drm_syncobj *drm_syncobj_fdget(int fd)
243{
244 struct file *file = fget(fd);
245
246 if (!file)
247 return NULL;
248 if (file->f_op != &drm_syncobj_file_fops)
249 goto err;
250
251 return file->private_data;
252err:
253 fput(file);
254 return NULL;
255};
256
257static int drm_syncobj_fd_to_handle(struct drm_file *file_private,
258 int fd, u32 *handle)
259{
260 struct drm_syncobj *syncobj = drm_syncobj_fdget(fd);
261 int ret;
262
263 if (!syncobj)
264 return -EINVAL;
265
266 /* take a reference to put in the idr */
267 drm_syncobj_get(syncobj);
268
269 idr_preload(GFP_KERNEL);
270 spin_lock(&file_private->syncobj_table_lock);
271 ret = idr_alloc(&file_private->syncobj_idr, syncobj, 1, 0, GFP_NOWAIT);
272 spin_unlock(&file_private->syncobj_table_lock);
273 idr_preload_end();
274
275 if (ret < 0) {
276 fput(syncobj->file);
277 return ret;
278 }
279 *handle = ret;
280 return 0;
281}
282
Dave Airlie3ee45a32017-04-26 04:09:02 +0100283int drm_syncobj_import_sync_file_fence(struct drm_file *file_private,
284 int fd, int handle)
285{
286 struct dma_fence *fence = sync_file_get_fence(fd);
287 struct drm_syncobj *syncobj;
288
289 if (!fence)
290 return -EINVAL;
291
292 syncobj = drm_syncobj_find(file_private, handle);
293 if (!syncobj) {
294 dma_fence_put(fence);
295 return -ENOENT;
296 }
297
Chris Wilson00fc2c22017-07-05 21:12:44 +0100298 drm_syncobj_replace_fence(syncobj, fence);
Dave Airlie3ee45a32017-04-26 04:09:02 +0100299 dma_fence_put(fence);
300 drm_syncobj_put(syncobj);
301 return 0;
302}
303
304int drm_syncobj_export_sync_file(struct drm_file *file_private,
305 int handle, int *p_fd)
306{
307 int ret;
308 struct dma_fence *fence;
309 struct sync_file *sync_file;
310 int fd = get_unused_fd_flags(O_CLOEXEC);
311
312 if (fd < 0)
313 return fd;
314
Jason Ekstrandafaf5922017-08-25 10:52:19 -0700315 ret = drm_syncobj_find_fence(file_private, handle, &fence);
Dave Airlie3ee45a32017-04-26 04:09:02 +0100316 if (ret)
317 goto err_put_fd;
318
319 sync_file = sync_file_create(fence);
320
321 dma_fence_put(fence);
322
323 if (!sync_file) {
324 ret = -EINVAL;
325 goto err_put_fd;
326 }
327
328 fd_install(fd, sync_file->file);
329
330 *p_fd = fd;
331 return 0;
332err_put_fd:
333 put_unused_fd(fd);
334 return ret;
335}
Dave Airliee9083422017-04-04 13:26:24 +1000336/**
337 * drm_syncobj_open - initalizes syncobj file-private structures at devnode open time
Dave Airliee9083422017-04-04 13:26:24 +1000338 * @file_private: drm file-private structure to set up
339 *
340 * Called at device open time, sets up the structure for handling refcounting
341 * of sync objects.
342 */
343void
344drm_syncobj_open(struct drm_file *file_private)
345{
346 idr_init(&file_private->syncobj_idr);
347 spin_lock_init(&file_private->syncobj_table_lock);
348}
349
350static int
351drm_syncobj_release_handle(int id, void *ptr, void *data)
352{
353 struct drm_syncobj *syncobj = ptr;
354
355 drm_syncobj_put(syncobj);
356 return 0;
357}
358
359/**
360 * drm_syncobj_release - release file-private sync object resources
Dave Airliee9083422017-04-04 13:26:24 +1000361 * @file_private: drm file-private structure to clean up
362 *
363 * Called at close time when the filp is going away.
364 *
365 * Releases any remaining references on objects by this filp.
366 */
367void
368drm_syncobj_release(struct drm_file *file_private)
369{
370 idr_for_each(&file_private->syncobj_idr,
371 &drm_syncobj_release_handle, file_private);
372 idr_destroy(&file_private->syncobj_idr);
373}
374
375int
376drm_syncobj_create_ioctl(struct drm_device *dev, void *data,
377 struct drm_file *file_private)
378{
379 struct drm_syncobj_create *args = data;
380
381 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
382 return -ENODEV;
383
384 /* no valid flags yet */
385 if (args->flags)
386 return -EINVAL;
387
388 return drm_syncobj_create(file_private,
389 &args->handle);
390}
391
392int
393drm_syncobj_destroy_ioctl(struct drm_device *dev, void *data,
394 struct drm_file *file_private)
395{
396 struct drm_syncobj_destroy *args = data;
397
398 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
399 return -ENODEV;
400
401 /* make sure padding is empty */
402 if (args->pad)
403 return -EINVAL;
404 return drm_syncobj_destroy(file_private, args->handle);
405}
406
407int
408drm_syncobj_handle_to_fd_ioctl(struct drm_device *dev, void *data,
409 struct drm_file *file_private)
410{
411 struct drm_syncobj_handle *args = data;
412
413 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
414 return -ENODEV;
415
Dave Airlie3ee45a32017-04-26 04:09:02 +0100416 if (args->pad)
Dave Airliee9083422017-04-04 13:26:24 +1000417 return -EINVAL;
418
Dave Airlie3ee45a32017-04-26 04:09:02 +0100419 if (args->flags != 0 &&
420 args->flags != DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE)
421 return -EINVAL;
422
423 if (args->flags & DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE)
424 return drm_syncobj_export_sync_file(file_private, args->handle,
425 &args->fd);
426
Dave Airliee9083422017-04-04 13:26:24 +1000427 return drm_syncobj_handle_to_fd(file_private, args->handle,
428 &args->fd);
429}
430
431int
432drm_syncobj_fd_to_handle_ioctl(struct drm_device *dev, void *data,
433 struct drm_file *file_private)
434{
435 struct drm_syncobj_handle *args = data;
436
437 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
438 return -ENODEV;
439
Dave Airlie3ee45a32017-04-26 04:09:02 +0100440 if (args->pad)
Dave Airliee9083422017-04-04 13:26:24 +1000441 return -EINVAL;
442
Dave Airlie3ee45a32017-04-26 04:09:02 +0100443 if (args->flags != 0 &&
444 args->flags != DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE)
445 return -EINVAL;
446
447 if (args->flags & DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE)
448 return drm_syncobj_import_sync_file_fence(file_private,
449 args->fd,
450 args->handle);
451
Dave Airliee9083422017-04-04 13:26:24 +1000452 return drm_syncobj_fd_to_handle(file_private, args->fd,
453 &args->handle);
454}
Dave Airlie5e60a102017-08-25 10:52:22 -0700455
456/**
457 * drm_timeout_abs_to_jiffies - calculate jiffies timeout from absolute value
458 *
459 * @timeout_nsec: timeout nsec component in ns, 0 for poll
460 *
461 * Calculate the timeout in jiffies from an absolute time in sec/nsec.
462 */
463static signed long drm_timeout_abs_to_jiffies(int64_t timeout_nsec)
464{
465 ktime_t abs_timeout, now;
466 u64 timeout_ns, timeout_jiffies64;
467
468 /* make 0 timeout means poll - absolute 0 doesn't seem valid */
469 if (timeout_nsec == 0)
470 return 0;
471
472 abs_timeout = ns_to_ktime(timeout_nsec);
473 now = ktime_get();
474
475 if (!ktime_after(abs_timeout, now))
476 return 0;
477
478 timeout_ns = ktime_to_ns(ktime_sub(abs_timeout, now));
479
480 timeout_jiffies64 = nsecs_to_jiffies64(timeout_ns);
481 /* clamp timeout to avoid infinite timeout */
482 if (timeout_jiffies64 >= MAX_SCHEDULE_TIMEOUT - 1)
483 return MAX_SCHEDULE_TIMEOUT - 1;
484
485 return timeout_jiffies64 + 1;
486}
487
488static int drm_syncobj_wait_fences(struct drm_device *dev,
489 struct drm_file *file_private,
490 struct drm_syncobj_wait *wait,
491 struct dma_fence **fences)
492{
493 signed long timeout = drm_timeout_abs_to_jiffies(wait->timeout_nsec);
494 signed long ret = 0;
495 uint32_t first = ~0;
496
497 if (wait->flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL) {
498 uint32_t i;
499 for (i = 0; i < wait->count_handles; i++) {
500 ret = dma_fence_wait_timeout(fences[i], true, timeout);
501
502 /* Various dma_fence wait callbacks will return
503 * ENOENT to indicate that the fence has already
504 * been signaled. We need to sanitize this to 0 so
505 * we don't return early and the client doesn't see
506 * an unexpected error.
507 */
508 if (ret == -ENOENT)
509 ret = 0;
510
511 if (ret < 0)
512 return ret;
513 if (ret == 0)
514 break;
515 timeout = ret;
516 }
517 first = 0;
518 } else {
519 ret = dma_fence_wait_any_timeout(fences,
520 wait->count_handles,
521 true, timeout,
522 &first);
523 }
524
525 if (ret < 0)
526 return ret;
527
528 wait->first_signaled = first;
529 if (ret == 0)
530 return -ETIME;
531 return 0;
532}
533
534int
535drm_syncobj_wait_ioctl(struct drm_device *dev, void *data,
536 struct drm_file *file_private)
537{
538 struct drm_syncobj_wait *args = data;
539 uint32_t *handles;
540 struct dma_fence **fences;
541 int ret = 0;
542 uint32_t i;
543
544 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
545 return -ENODEV;
546
547 if (args->flags != 0 && args->flags != DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL)
548 return -EINVAL;
549
550 if (args->count_handles == 0)
551 return -EINVAL;
552
553 /* Get the handles from userspace */
554 handles = kmalloc_array(args->count_handles, sizeof(uint32_t),
555 GFP_KERNEL);
556 if (handles == NULL)
557 return -ENOMEM;
558
559 if (copy_from_user(handles,
560 u64_to_user_ptr(args->handles),
561 sizeof(uint32_t) * args->count_handles)) {
562 ret = -EFAULT;
563 goto err_free_handles;
564 }
565
566 fences = kcalloc(args->count_handles,
567 sizeof(struct dma_fence *), GFP_KERNEL);
568 if (!fences) {
569 ret = -ENOMEM;
570 goto err_free_handles;
571 }
572
573 for (i = 0; i < args->count_handles; i++) {
574 ret = drm_syncobj_find_fence(file_private, handles[i],
575 &fences[i]);
576 if (ret)
577 goto err_free_fence_array;
578 }
579
580 ret = drm_syncobj_wait_fences(dev, file_private,
581 args, fences);
582
583err_free_fence_array:
584 for (i = 0; i < args->count_handles; i++)
585 dma_fence_put(fences[i]);
586 kfree(fences);
587err_free_handles:
588 kfree(handles);
589
590 return ret;
591}