blob: 8bdb4a3bd7bf130d7f2e8fc83dcf4c8eaeb8db74 [file] [log] [blame]
Dave Airliee9083422017-04-04 13:26:24 +10001/*
2 * Copyright 2017 Red Hat
Dave Airlie5e60a102017-08-25 10:52:22 -07003 * Parts ported from amdgpu (fence wait code).
4 * Copyright 2016 Advanced Micro Devices, Inc.
Dave Airliee9083422017-04-04 13:26:24 +10005 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
23 * IN THE SOFTWARE.
24 *
25 * Authors:
26 *
27 */
28
29/**
30 * DOC: Overview
31 *
Daniel Vetter924fe8d2017-12-14 21:30:52 +010032 * DRM synchronisation objects (syncobj, see struct &drm_syncobj) are
33 * persistent objects that contain an optional fence. The fence can be updated
34 * with a new fence, or be NULL.
Dave Airliee9083422017-04-04 13:26:24 +100035 *
Dave Airlie5e60a102017-08-25 10:52:22 -070036 * syncobj's can be waited upon, where it will wait for the underlying
37 * fence.
38 *
Dave Airliee9083422017-04-04 13:26:24 +100039 * syncobj's can be export to fd's and back, these fd's are opaque and
40 * have no other use case, except passing the syncobj between processes.
41 *
42 * Their primary use-case is to implement Vulkan fences and semaphores.
43 *
44 * syncobj have a kref reference count, but also have an optional file.
45 * The file is only created once the syncobj is exported.
46 * The file takes a reference on the kref.
47 */
48
49#include <drm/drmP.h>
50#include <linux/file.h>
51#include <linux/fs.h>
52#include <linux/anon_inodes.h>
Dave Airlie3ee45a32017-04-26 04:09:02 +010053#include <linux/sync_file.h>
Jason Ekstrande7aca5032017-08-25 10:52:24 -070054#include <linux/sched/signal.h>
Dave Airliee9083422017-04-04 13:26:24 +100055
56#include "drm_internal.h"
57#include <drm/drm_syncobj.h>
58
Christian König61a98b12018-12-11 18:34:41 +080059struct syncobj_wait_entry {
60 struct list_head node;
61 struct task_struct *task;
62 struct dma_fence *fence;
63 struct dma_fence_cb fence_cb;
64};
65
66static void syncobj_wait_syncobj_func(struct drm_syncobj *syncobj,
67 struct syncobj_wait_entry *wait);
68
Dave Airliee9083422017-04-04 13:26:24 +100069/**
70 * drm_syncobj_find - lookup and reference a sync object.
71 * @file_private: drm file private pointer
72 * @handle: sync object handle to lookup.
73 *
Daniel Vetter924fe8d2017-12-14 21:30:52 +010074 * Returns a reference to the syncobj pointed to by handle or NULL. The
75 * reference must be released by calling drm_syncobj_put().
Dave Airliee9083422017-04-04 13:26:24 +100076 */
77struct drm_syncobj *drm_syncobj_find(struct drm_file *file_private,
78 u32 handle)
79{
80 struct drm_syncobj *syncobj;
81
82 spin_lock(&file_private->syncobj_table_lock);
83
84 /* Check if we currently have a reference on the object */
85 syncobj = idr_find(&file_private->syncobj_idr, handle);
86 if (syncobj)
87 drm_syncobj_get(syncobj);
88
89 spin_unlock(&file_private->syncobj_table_lock);
90
91 return syncobj;
92}
93EXPORT_SYMBOL(drm_syncobj_find);
94
Christian König61a98b12018-12-11 18:34:41 +080095static void drm_syncobj_fence_add_wait(struct drm_syncobj *syncobj,
96 struct syncobj_wait_entry *wait)
Chunming Zhou43cf1fc2018-10-23 17:37:45 +080097{
Christian König61a98b12018-12-11 18:34:41 +080098 if (wait->fence)
99 return;
Jason Ekstrand337fe9f2018-09-26 02:17:03 -0500100
Eric Anholt131280a2018-11-08 08:04:22 -0800101 spin_lock(&syncobj->lock);
102 /* We've already tried once to get a fence and failed. Now that we
103 * have the lock, try one more time just to be sure we don't add a
104 * callback when a fence has already been set.
105 */
Christian König61a98b12018-12-11 18:34:41 +0800106 if (syncobj->fence)
107 wait->fence = dma_fence_get(
108 rcu_dereference_protected(syncobj->fence, 1));
109 else
110 list_add_tail(&wait->node, &syncobj->cb_list);
Eric Anholt131280a2018-11-08 08:04:22 -0800111 spin_unlock(&syncobj->lock);
Chunming Zhou48197bc2018-10-18 14:18:36 +0800112}
Eric Anholt131280a2018-11-08 08:04:22 -0800113
Christian König61a98b12018-12-11 18:34:41 +0800114static void drm_syncobj_remove_wait(struct drm_syncobj *syncobj,
115 struct syncobj_wait_entry *wait)
Eric Anholt131280a2018-11-08 08:04:22 -0800116{
Christian König61a98b12018-12-11 18:34:41 +0800117 if (!wait->node.next)
118 return;
119
Eric Anholt131280a2018-11-08 08:04:22 -0800120 spin_lock(&syncobj->lock);
Christian König61a98b12018-12-11 18:34:41 +0800121 list_del_init(&wait->node);
Eric Anholt131280a2018-11-08 08:04:22 -0800122 spin_unlock(&syncobj->lock);
123}
124
Dave Airliee9083422017-04-04 13:26:24 +1000125/**
126 * drm_syncobj_replace_fence - replace fence in a sync object.
Dave Airliee9083422017-04-04 13:26:24 +1000127 * @syncobj: Sync object to replace fence in
128 * @fence: fence to install in sync file.
129 *
Christian König0b258ed2018-11-14 14:24:27 +0100130 * This replaces the fence on a sync object.
Dave Airliee9083422017-04-04 13:26:24 +1000131 */
Chris Wilson00fc2c22017-07-05 21:12:44 +0100132void drm_syncobj_replace_fence(struct drm_syncobj *syncobj,
Dave Airliee9083422017-04-04 13:26:24 +1000133 struct dma_fence *fence)
134{
Eric Anholt131280a2018-11-08 08:04:22 -0800135 struct dma_fence *old_fence;
Christian König61a98b12018-12-11 18:34:41 +0800136 struct syncobj_wait_entry *cur, *tmp;
Dave Airliee9083422017-04-04 13:26:24 +1000137
Eric Anholt131280a2018-11-08 08:04:22 -0800138 if (fence)
139 dma_fence_get(fence);
Jason Ekstrand9c19fb12017-08-28 07:39:25 -0700140
Eric Anholt131280a2018-11-08 08:04:22 -0800141 spin_lock(&syncobj->lock);
142
143 old_fence = rcu_dereference_protected(syncobj->fence,
144 lockdep_is_held(&syncobj->lock));
145 rcu_assign_pointer(syncobj->fence, fence);
146
147 if (fence != old_fence) {
Jason Ekstrand9c19fb12017-08-28 07:39:25 -0700148 list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node) {
149 list_del_init(&cur->node);
Christian König61a98b12018-12-11 18:34:41 +0800150 syncobj_wait_syncobj_func(syncobj, cur);
Jason Ekstrand9c19fb12017-08-28 07:39:25 -0700151 }
152 }
Eric Anholt131280a2018-11-08 08:04:22 -0800153
154 spin_unlock(&syncobj->lock);
155
156 dma_fence_put(old_fence);
Dave Airliee9083422017-04-04 13:26:24 +1000157}
158EXPORT_SYMBOL(drm_syncobj_replace_fence);
159
Christian König86bbd892018-11-13 14:14:00 +0100160/**
161 * drm_syncobj_assign_null_handle - assign a stub fence to the sync object
162 * @syncobj: sync object to assign the fence on
163 *
164 * Assign a already signaled stub fence to the sync object.
165 */
166static void drm_syncobj_assign_null_handle(struct drm_syncobj *syncobj)
Jason Ekstrand1fc08212017-08-25 10:52:25 -0700167{
Christian König86bbd892018-11-13 14:14:00 +0100168 struct dma_fence *fence = dma_fence_get_stub();
Jason Ekstrand1fc08212017-08-25 10:52:25 -0700169
Christian König0b258ed2018-11-14 14:24:27 +0100170 drm_syncobj_replace_fence(syncobj, fence);
Christian König86bbd892018-11-13 14:14:00 +0100171 dma_fence_put(fence);
Jason Ekstrand1fc08212017-08-25 10:52:25 -0700172}
173
Daniel Vetter924fe8d2017-12-14 21:30:52 +0100174/**
175 * drm_syncobj_find_fence - lookup and reference the fence in a sync object
176 * @file_private: drm file private pointer
177 * @handle: sync object handle to lookup.
Chunming Zhou0a6730e2018-08-30 14:48:29 +0800178 * @point: timeline point
Chunming Zhou871edc92018-10-17 15:03:18 +0800179 * @flags: DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT or not
Daniel Vetter924fe8d2017-12-14 21:30:52 +0100180 * @fence: out parameter for the fence
181 *
182 * This is just a convenience function that combines drm_syncobj_find() and
Eric Anholt131280a2018-11-08 08:04:22 -0800183 * drm_syncobj_fence_get().
Daniel Vetter924fe8d2017-12-14 21:30:52 +0100184 *
185 * Returns 0 on success or a negative error value on failure. On success @fence
186 * contains a reference to the fence, which must be released by calling
187 * dma_fence_put().
188 */
Jason Ekstrandafaf5922017-08-25 10:52:19 -0700189int drm_syncobj_find_fence(struct drm_file *file_private,
Chunming Zhou649fdce2018-10-15 16:55:47 +0800190 u32 handle, u64 point, u64 flags,
Jason Ekstrandafaf5922017-08-25 10:52:19 -0700191 struct dma_fence **fence)
Dave Airliee9083422017-04-04 13:26:24 +1000192{
193 struct drm_syncobj *syncobj = drm_syncobj_find(file_private, handle);
Eric Anholt131280a2018-11-08 08:04:22 -0800194 int ret = 0;
Dave Airliee9083422017-04-04 13:26:24 +1000195
Eric Anholt131280a2018-11-08 08:04:22 -0800196 if (!syncobj)
197 return -ENOENT;
198
199 *fence = drm_syncobj_fence_get(syncobj);
200 if (!*fence) {
201 ret = -EINVAL;
202 }
203 drm_syncobj_put(syncobj);
Dave Airliee9083422017-04-04 13:26:24 +1000204 return ret;
205}
Jason Ekstrandafaf5922017-08-25 10:52:19 -0700206EXPORT_SYMBOL(drm_syncobj_find_fence);
Dave Airliee9083422017-04-04 13:26:24 +1000207
208/**
209 * drm_syncobj_free - free a sync object.
210 * @kref: kref to free.
211 *
212 * Only to be called from kref_put in drm_syncobj_put.
213 */
214void drm_syncobj_free(struct kref *kref)
215{
216 struct drm_syncobj *syncobj = container_of(kref,
217 struct drm_syncobj,
218 refcount);
Christian König0b258ed2018-11-14 14:24:27 +0100219 drm_syncobj_replace_fence(syncobj, NULL);
Dave Airliee9083422017-04-04 13:26:24 +1000220 kfree(syncobj);
221}
222EXPORT_SYMBOL(drm_syncobj_free);
223
Marek Olšák1321fd22017-09-12 22:42:12 +0200224/**
225 * drm_syncobj_create - create a new syncobj
226 * @out_syncobj: returned syncobj
227 * @flags: DRM_SYNCOBJ_* flags
228 * @fence: if non-NULL, the syncobj will represent this fence
Daniel Vetter924fe8d2017-12-14 21:30:52 +0100229 *
230 * This is the first function to create a sync object. After creating, drivers
231 * probably want to make it available to userspace, either through
232 * drm_syncobj_get_handle() or drm_syncobj_get_fd().
233 *
234 * Returns 0 on success or a negative error value on failure.
Marek Olšák1321fd22017-09-12 22:42:12 +0200235 */
236int drm_syncobj_create(struct drm_syncobj **out_syncobj, uint32_t flags,
237 struct dma_fence *fence)
Dave Airliee9083422017-04-04 13:26:24 +1000238{
Dave Airliee9083422017-04-04 13:26:24 +1000239 struct drm_syncobj *syncobj;
240
241 syncobj = kzalloc(sizeof(struct drm_syncobj), GFP_KERNEL);
242 if (!syncobj)
243 return -ENOMEM;
244
245 kref_init(&syncobj->refcount);
Jason Ekstrand9c19fb12017-08-28 07:39:25 -0700246 INIT_LIST_HEAD(&syncobj->cb_list);
Eric Anholt131280a2018-11-08 08:04:22 -0800247 spin_lock_init(&syncobj->lock);
Dave Airliee9083422017-04-04 13:26:24 +1000248
Christian König86bbd892018-11-13 14:14:00 +0100249 if (flags & DRM_SYNCOBJ_CREATE_SIGNALED)
250 drm_syncobj_assign_null_handle(syncobj);
Jason Ekstrand1fc08212017-08-25 10:52:25 -0700251
Marek Olšák1321fd22017-09-12 22:42:12 +0200252 if (fence)
Christian König0b258ed2018-11-14 14:24:27 +0100253 drm_syncobj_replace_fence(syncobj, fence);
Marek Olšák1321fd22017-09-12 22:42:12 +0200254
255 *out_syncobj = syncobj;
256 return 0;
257}
258EXPORT_SYMBOL(drm_syncobj_create);
259
260/**
261 * drm_syncobj_get_handle - get a handle from a syncobj
Daniel Vetter924fe8d2017-12-14 21:30:52 +0100262 * @file_private: drm file private pointer
263 * @syncobj: Sync object to export
264 * @handle: out parameter with the new handle
265 *
266 * Exports a sync object created with drm_syncobj_create() as a handle on
267 * @file_private to userspace.
268 *
269 * Returns 0 on success or a negative error value on failure.
Marek Olšák1321fd22017-09-12 22:42:12 +0200270 */
271int drm_syncobj_get_handle(struct drm_file *file_private,
272 struct drm_syncobj *syncobj, u32 *handle)
273{
274 int ret;
275
276 /* take a reference to put in the idr */
277 drm_syncobj_get(syncobj);
278
Dave Airliee9083422017-04-04 13:26:24 +1000279 idr_preload(GFP_KERNEL);
280 spin_lock(&file_private->syncobj_table_lock);
281 ret = idr_alloc(&file_private->syncobj_idr, syncobj, 1, 0, GFP_NOWAIT);
282 spin_unlock(&file_private->syncobj_table_lock);
283
284 idr_preload_end();
285
286 if (ret < 0) {
287 drm_syncobj_put(syncobj);
288 return ret;
289 }
290
291 *handle = ret;
292 return 0;
293}
Marek Olšák1321fd22017-09-12 22:42:12 +0200294EXPORT_SYMBOL(drm_syncobj_get_handle);
295
296static int drm_syncobj_create_as_handle(struct drm_file *file_private,
297 u32 *handle, uint32_t flags)
298{
299 int ret;
300 struct drm_syncobj *syncobj;
301
302 ret = drm_syncobj_create(&syncobj, flags, NULL);
303 if (ret)
304 return ret;
305
306 ret = drm_syncobj_get_handle(file_private, syncobj, handle);
307 drm_syncobj_put(syncobj);
308 return ret;
309}
Dave Airliee9083422017-04-04 13:26:24 +1000310
311static int drm_syncobj_destroy(struct drm_file *file_private,
312 u32 handle)
313{
314 struct drm_syncobj *syncobj;
315
316 spin_lock(&file_private->syncobj_table_lock);
317 syncobj = idr_remove(&file_private->syncobj_idr, handle);
318 spin_unlock(&file_private->syncobj_table_lock);
319
320 if (!syncobj)
321 return -EINVAL;
322
323 drm_syncobj_put(syncobj);
324 return 0;
325}
326
327static int drm_syncobj_file_release(struct inode *inode, struct file *file)
328{
329 struct drm_syncobj *syncobj = file->private_data;
330
331 drm_syncobj_put(syncobj);
332 return 0;
333}
334
335static const struct file_operations drm_syncobj_file_fops = {
336 .release = drm_syncobj_file_release,
337};
338
Daniel Vetter924fe8d2017-12-14 21:30:52 +0100339/**
340 * drm_syncobj_get_fd - get a file descriptor from a syncobj
341 * @syncobj: Sync object to export
342 * @p_fd: out parameter with the new file descriptor
343 *
344 * Exports a sync object created with drm_syncobj_create() as a file descriptor.
345 *
346 * Returns 0 on success or a negative error value on failure.
347 */
Marek Olšák684fd0a2017-09-12 22:42:13 +0200348int drm_syncobj_get_fd(struct drm_syncobj *syncobj, int *p_fd)
349{
Chris Wilsone7cdf5c2017-12-19 12:07:00 +0000350 struct file *file;
Marek Olšák684fd0a2017-09-12 22:42:13 +0200351 int fd;
352
353 fd = get_unused_fd_flags(O_CLOEXEC);
354 if (fd < 0)
355 return fd;
356
Chris Wilsone7cdf5c2017-12-19 12:07:00 +0000357 file = anon_inode_getfile("syncobj_file",
358 &drm_syncobj_file_fops,
359 syncobj, 0);
360 if (IS_ERR(file)) {
361 put_unused_fd(fd);
362 return PTR_ERR(file);
Marek Olšák684fd0a2017-09-12 22:42:13 +0200363 }
Chris Wilsone7cdf5c2017-12-19 12:07:00 +0000364
365 drm_syncobj_get(syncobj);
366 fd_install(fd, file);
367
Marek Olšák684fd0a2017-09-12 22:42:13 +0200368 *p_fd = fd;
369 return 0;
370}
371EXPORT_SYMBOL(drm_syncobj_get_fd);
372
Dave Airliee9083422017-04-04 13:26:24 +1000373static int drm_syncobj_handle_to_fd(struct drm_file *file_private,
374 u32 handle, int *p_fd)
375{
376 struct drm_syncobj *syncobj = drm_syncobj_find(file_private, handle);
377 int ret;
Dave Airliee9083422017-04-04 13:26:24 +1000378
379 if (!syncobj)
380 return -EINVAL;
381
Marek Olšák684fd0a2017-09-12 22:42:13 +0200382 ret = drm_syncobj_get_fd(syncobj, p_fd);
Dave Airliee9083422017-04-04 13:26:24 +1000383 drm_syncobj_put(syncobj);
384 return ret;
385}
386
Dave Airliee9083422017-04-04 13:26:24 +1000387static int drm_syncobj_fd_to_handle(struct drm_file *file_private,
388 int fd, u32 *handle)
389{
Chris Wilsone7cdf5c2017-12-19 12:07:00 +0000390 struct drm_syncobj *syncobj;
Al Virofb386242019-04-14 12:50:52 -0400391 struct fd f = fdget(fd);
Dave Airliee9083422017-04-04 13:26:24 +1000392 int ret;
393
Al Virofb386242019-04-14 12:50:52 -0400394 if (!f.file)
Dave Airliee9083422017-04-04 13:26:24 +1000395 return -EINVAL;
396
Al Virofb386242019-04-14 12:50:52 -0400397 if (f.file->f_op != &drm_syncobj_file_fops) {
398 fdput(f);
Chris Wilsone7cdf5c2017-12-19 12:07:00 +0000399 return -EINVAL;
400 }
401
Dave Airliee9083422017-04-04 13:26:24 +1000402 /* take a reference to put in the idr */
Al Virofb386242019-04-14 12:50:52 -0400403 syncobj = f.file->private_data;
Dave Airliee9083422017-04-04 13:26:24 +1000404 drm_syncobj_get(syncobj);
405
406 idr_preload(GFP_KERNEL);
407 spin_lock(&file_private->syncobj_table_lock);
408 ret = idr_alloc(&file_private->syncobj_idr, syncobj, 1, 0, GFP_NOWAIT);
409 spin_unlock(&file_private->syncobj_table_lock);
410 idr_preload_end();
411
Chris Wilsone7cdf5c2017-12-19 12:07:00 +0000412 if (ret > 0) {
413 *handle = ret;
414 ret = 0;
415 } else
416 drm_syncobj_put(syncobj);
417
Al Virofb386242019-04-14 12:50:52 -0400418 fdput(f);
Chris Wilsone7cdf5c2017-12-19 12:07:00 +0000419 return ret;
Dave Airliee9083422017-04-04 13:26:24 +1000420}
421
Ville Syrjäläa32c94a2017-09-01 19:53:25 +0300422static int drm_syncobj_import_sync_file_fence(struct drm_file *file_private,
423 int fd, int handle)
Dave Airlie3ee45a32017-04-26 04:09:02 +0100424{
425 struct dma_fence *fence = sync_file_get_fence(fd);
426 struct drm_syncobj *syncobj;
427
428 if (!fence)
429 return -EINVAL;
430
431 syncobj = drm_syncobj_find(file_private, handle);
432 if (!syncobj) {
433 dma_fence_put(fence);
434 return -ENOENT;
435 }
436
Christian König0b258ed2018-11-14 14:24:27 +0100437 drm_syncobj_replace_fence(syncobj, fence);
Dave Airlie3ee45a32017-04-26 04:09:02 +0100438 dma_fence_put(fence);
439 drm_syncobj_put(syncobj);
440 return 0;
441}
442
Ville Syrjäläa32c94a2017-09-01 19:53:25 +0300443static int drm_syncobj_export_sync_file(struct drm_file *file_private,
444 int handle, int *p_fd)
Dave Airlie3ee45a32017-04-26 04:09:02 +0100445{
446 int ret;
447 struct dma_fence *fence;
448 struct sync_file *sync_file;
449 int fd = get_unused_fd_flags(O_CLOEXEC);
450
451 if (fd < 0)
452 return fd;
453
Chunming Zhou649fdce2018-10-15 16:55:47 +0800454 ret = drm_syncobj_find_fence(file_private, handle, 0, 0, &fence);
Dave Airlie3ee45a32017-04-26 04:09:02 +0100455 if (ret)
456 goto err_put_fd;
457
458 sync_file = sync_file_create(fence);
459
460 dma_fence_put(fence);
461
462 if (!sync_file) {
463 ret = -EINVAL;
464 goto err_put_fd;
465 }
466
467 fd_install(fd, sync_file->file);
468
469 *p_fd = fd;
470 return 0;
471err_put_fd:
472 put_unused_fd(fd);
473 return ret;
474}
Dave Airliee9083422017-04-04 13:26:24 +1000475/**
476 * drm_syncobj_open - initalizes syncobj file-private structures at devnode open time
Dave Airliee9083422017-04-04 13:26:24 +1000477 * @file_private: drm file-private structure to set up
478 *
479 * Called at device open time, sets up the structure for handling refcounting
480 * of sync objects.
481 */
482void
483drm_syncobj_open(struct drm_file *file_private)
484{
Chris Wilsone86584c2018-02-12 14:55:33 +0000485 idr_init_base(&file_private->syncobj_idr, 1);
Dave Airliee9083422017-04-04 13:26:24 +1000486 spin_lock_init(&file_private->syncobj_table_lock);
487}
488
489static int
490drm_syncobj_release_handle(int id, void *ptr, void *data)
491{
492 struct drm_syncobj *syncobj = ptr;
493
494 drm_syncobj_put(syncobj);
495 return 0;
496}
497
498/**
499 * drm_syncobj_release - release file-private sync object resources
Dave Airliee9083422017-04-04 13:26:24 +1000500 * @file_private: drm file-private structure to clean up
501 *
502 * Called at close time when the filp is going away.
503 *
504 * Releases any remaining references on objects by this filp.
505 */
506void
507drm_syncobj_release(struct drm_file *file_private)
508{
509 idr_for_each(&file_private->syncobj_idr,
510 &drm_syncobj_release_handle, file_private);
511 idr_destroy(&file_private->syncobj_idr);
512}
513
514int
515drm_syncobj_create_ioctl(struct drm_device *dev, void *data,
516 struct drm_file *file_private)
517{
518 struct drm_syncobj_create *args = data;
519
520 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
Chris Wilson69fdf422018-09-13 20:20:50 +0100521 return -EOPNOTSUPP;
Dave Airliee9083422017-04-04 13:26:24 +1000522
523 /* no valid flags yet */
Eric Anholt131280a2018-11-08 08:04:22 -0800524 if (args->flags & ~DRM_SYNCOBJ_CREATE_SIGNALED)
Dave Airliee9083422017-04-04 13:26:24 +1000525 return -EINVAL;
526
Marek Olšák1321fd22017-09-12 22:42:12 +0200527 return drm_syncobj_create_as_handle(file_private,
528 &args->handle, args->flags);
Dave Airliee9083422017-04-04 13:26:24 +1000529}
530
531int
532drm_syncobj_destroy_ioctl(struct drm_device *dev, void *data,
533 struct drm_file *file_private)
534{
535 struct drm_syncobj_destroy *args = data;
536
537 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
Chris Wilson69fdf422018-09-13 20:20:50 +0100538 return -EOPNOTSUPP;
Dave Airliee9083422017-04-04 13:26:24 +1000539
540 /* make sure padding is empty */
541 if (args->pad)
542 return -EINVAL;
543 return drm_syncobj_destroy(file_private, args->handle);
544}
545
546int
547drm_syncobj_handle_to_fd_ioctl(struct drm_device *dev, void *data,
548 struct drm_file *file_private)
549{
550 struct drm_syncobj_handle *args = data;
551
552 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
Chris Wilson69fdf422018-09-13 20:20:50 +0100553 return -EOPNOTSUPP;
Dave Airliee9083422017-04-04 13:26:24 +1000554
Dave Airlie3ee45a32017-04-26 04:09:02 +0100555 if (args->pad)
Dave Airliee9083422017-04-04 13:26:24 +1000556 return -EINVAL;
557
Dave Airlie3ee45a32017-04-26 04:09:02 +0100558 if (args->flags != 0 &&
559 args->flags != DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE)
560 return -EINVAL;
561
562 if (args->flags & DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE)
563 return drm_syncobj_export_sync_file(file_private, args->handle,
564 &args->fd);
565
Dave Airliee9083422017-04-04 13:26:24 +1000566 return drm_syncobj_handle_to_fd(file_private, args->handle,
567 &args->fd);
568}
569
570int
571drm_syncobj_fd_to_handle_ioctl(struct drm_device *dev, void *data,
572 struct drm_file *file_private)
573{
574 struct drm_syncobj_handle *args = data;
575
576 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
Chris Wilson69fdf422018-09-13 20:20:50 +0100577 return -EOPNOTSUPP;
Dave Airliee9083422017-04-04 13:26:24 +1000578
Dave Airlie3ee45a32017-04-26 04:09:02 +0100579 if (args->pad)
Dave Airliee9083422017-04-04 13:26:24 +1000580 return -EINVAL;
581
Dave Airlie3ee45a32017-04-26 04:09:02 +0100582 if (args->flags != 0 &&
583 args->flags != DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE)
584 return -EINVAL;
585
586 if (args->flags & DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE)
587 return drm_syncobj_import_sync_file_fence(file_private,
588 args->fd,
589 args->handle);
590
Dave Airliee9083422017-04-04 13:26:24 +1000591 return drm_syncobj_fd_to_handle(file_private, args->fd,
592 &args->handle);
593}
Dave Airlie5e60a102017-08-25 10:52:22 -0700594
Jason Ekstrande7aca5032017-08-25 10:52:24 -0700595static void syncobj_wait_fence_func(struct dma_fence *fence,
596 struct dma_fence_cb *cb)
597{
598 struct syncobj_wait_entry *wait =
599 container_of(cb, struct syncobj_wait_entry, fence_cb);
600
601 wake_up_process(wait->task);
602}
603
604static void syncobj_wait_syncobj_func(struct drm_syncobj *syncobj,
Christian König61a98b12018-12-11 18:34:41 +0800605 struct syncobj_wait_entry *wait)
Jason Ekstrande7aca5032017-08-25 10:52:24 -0700606{
Eric Anholt131280a2018-11-08 08:04:22 -0800607 /* This happens inside the syncobj lock */
608 wait->fence = dma_fence_get(rcu_dereference_protected(syncobj->fence,
609 lockdep_is_held(&syncobj->lock)));
Jason Ekstrande7aca5032017-08-25 10:52:24 -0700610 wake_up_process(wait->task);
611}
612
613static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
614 uint32_t count,
615 uint32_t flags,
616 signed long timeout,
617 uint32_t *idx)
618{
619 struct syncobj_wait_entry *entries;
620 struct dma_fence *fence;
Jason Ekstrande7aca5032017-08-25 10:52:24 -0700621 uint32_t signaled_count, i;
622
623 entries = kcalloc(count, sizeof(*entries), GFP_KERNEL);
624 if (!entries)
625 return -ENOMEM;
626
627 /* Walk the list of sync objects and initialize entries. We do
628 * this up-front so that we can properly return -EINVAL if there is
629 * a syncobj with a missing fence and then never have the chance of
630 * returning -EINVAL again.
631 */
632 signaled_count = 0;
633 for (i = 0; i < count; ++i) {
634 entries[i].task = current;
Eric Anholt131280a2018-11-08 08:04:22 -0800635 entries[i].fence = drm_syncobj_fence_get(syncobjs[i]);
Jason Ekstrande7aca5032017-08-25 10:52:24 -0700636 if (!entries[i].fence) {
637 if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
638 continue;
639 } else {
Chris Wilson12fec622018-09-20 21:05:30 +0100640 timeout = -EINVAL;
Jason Ekstrande7aca5032017-08-25 10:52:24 -0700641 goto cleanup_entries;
642 }
643 }
644
645 if (dma_fence_is_signaled(entries[i].fence)) {
646 if (signaled_count == 0 && idx)
647 *idx = i;
648 signaled_count++;
649 }
650 }
651
Jason Ekstrande7aca5032017-08-25 10:52:24 -0700652 if (signaled_count == count ||
653 (signaled_count > 0 &&
654 !(flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL)))
655 goto cleanup_entries;
656
657 /* There's a very annoying laxness in the dma_fence API here, in
658 * that backends are not required to automatically report when a
659 * fence is signaled prior to fence->ops->enable_signaling() being
660 * called. So here if we fail to match signaled_count, we need to
661 * fallthough and try a 0 timeout wait!
662 */
663
664 if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
Christian König61a98b12018-12-11 18:34:41 +0800665 for (i = 0; i < count; ++i)
666 drm_syncobj_fence_add_wait(syncobjs[i], &entries[i]);
Jason Ekstrande7aca5032017-08-25 10:52:24 -0700667 }
668
669 do {
670 set_current_state(TASK_INTERRUPTIBLE);
671
672 signaled_count = 0;
673 for (i = 0; i < count; ++i) {
674 fence = entries[i].fence;
675 if (!fence)
676 continue;
677
678 if (dma_fence_is_signaled(fence) ||
679 (!entries[i].fence_cb.func &&
680 dma_fence_add_callback(fence,
681 &entries[i].fence_cb,
682 syncobj_wait_fence_func))) {
683 /* The fence has been signaled */
684 if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL) {
685 signaled_count++;
686 } else {
687 if (idx)
688 *idx = i;
689 goto done_waiting;
690 }
691 }
692 }
693
694 if (signaled_count == count)
695 goto done_waiting;
696
697 if (timeout == 0) {
Chris Wilson12fec622018-09-20 21:05:30 +0100698 timeout = -ETIME;
Jason Ekstrande7aca5032017-08-25 10:52:24 -0700699 goto done_waiting;
700 }
701
Chris Wilson12fec622018-09-20 21:05:30 +0100702 if (signal_pending(current)) {
703 timeout = -ERESTARTSYS;
704 goto done_waiting;
705 }
Jason Ekstrande7aca5032017-08-25 10:52:24 -0700706
Chris Wilson12fec622018-09-20 21:05:30 +0100707 timeout = schedule_timeout(timeout);
708 } while (1);
Jason Ekstrande7aca5032017-08-25 10:52:24 -0700709
710done_waiting:
711 __set_current_state(TASK_RUNNING);
712
713cleanup_entries:
714 for (i = 0; i < count; ++i) {
Christian König61a98b12018-12-11 18:34:41 +0800715 drm_syncobj_remove_wait(syncobjs[i], &entries[i]);
Jason Ekstrande7aca5032017-08-25 10:52:24 -0700716 if (entries[i].fence_cb.func)
717 dma_fence_remove_callback(entries[i].fence,
718 &entries[i].fence_cb);
719 dma_fence_put(entries[i].fence);
720 }
721 kfree(entries);
722
Chris Wilson12fec622018-09-20 21:05:30 +0100723 return timeout;
Jason Ekstrande7aca5032017-08-25 10:52:24 -0700724}
725
Dave Airlie5e60a102017-08-25 10:52:22 -0700726/**
727 * drm_timeout_abs_to_jiffies - calculate jiffies timeout from absolute value
728 *
729 * @timeout_nsec: timeout nsec component in ns, 0 for poll
730 *
731 * Calculate the timeout in jiffies from an absolute time in sec/nsec.
732 */
733static signed long drm_timeout_abs_to_jiffies(int64_t timeout_nsec)
734{
735 ktime_t abs_timeout, now;
736 u64 timeout_ns, timeout_jiffies64;
737
738 /* make 0 timeout means poll - absolute 0 doesn't seem valid */
739 if (timeout_nsec == 0)
740 return 0;
741
742 abs_timeout = ns_to_ktime(timeout_nsec);
743 now = ktime_get();
744
745 if (!ktime_after(abs_timeout, now))
746 return 0;
747
748 timeout_ns = ktime_to_ns(ktime_sub(abs_timeout, now));
749
750 timeout_jiffies64 = nsecs_to_jiffies64(timeout_ns);
751 /* clamp timeout to avoid infinite timeout */
752 if (timeout_jiffies64 >= MAX_SCHEDULE_TIMEOUT - 1)
753 return MAX_SCHEDULE_TIMEOUT - 1;
754
755 return timeout_jiffies64 + 1;
756}
757
Jason Ekstrande7aca5032017-08-25 10:52:24 -0700758static int drm_syncobj_array_wait(struct drm_device *dev,
759 struct drm_file *file_private,
760 struct drm_syncobj_wait *wait,
761 struct drm_syncobj **syncobjs)
Dave Airlie5e60a102017-08-25 10:52:22 -0700762{
763 signed long timeout = drm_timeout_abs_to_jiffies(wait->timeout_nsec);
Dave Airlie5e60a102017-08-25 10:52:22 -0700764 uint32_t first = ~0;
765
Chris Wilson12fec622018-09-20 21:05:30 +0100766 timeout = drm_syncobj_array_wait_timeout(syncobjs,
767 wait->count_handles,
768 wait->flags,
769 timeout, &first);
770 if (timeout < 0)
771 return timeout;
Dave Airlie5e60a102017-08-25 10:52:22 -0700772
773 wait->first_signaled = first;
Dave Airlie5e60a102017-08-25 10:52:22 -0700774 return 0;
775}
776
Jason Ekstrand3e6fb722017-08-25 10:52:26 -0700777static int drm_syncobj_array_find(struct drm_file *file_private,
Ville Syrjälä9e554462017-09-01 19:53:26 +0300778 void __user *user_handles,
779 uint32_t count_handles,
Jason Ekstrand3e6fb722017-08-25 10:52:26 -0700780 struct drm_syncobj ***syncobjs_out)
781{
782 uint32_t i, *handles;
783 struct drm_syncobj **syncobjs;
784 int ret;
785
786 handles = kmalloc_array(count_handles, sizeof(*handles), GFP_KERNEL);
787 if (handles == NULL)
788 return -ENOMEM;
789
790 if (copy_from_user(handles, user_handles,
791 sizeof(uint32_t) * count_handles)) {
792 ret = -EFAULT;
793 goto err_free_handles;
794 }
795
796 syncobjs = kmalloc_array(count_handles, sizeof(*syncobjs), GFP_KERNEL);
797 if (syncobjs == NULL) {
798 ret = -ENOMEM;
799 goto err_free_handles;
800 }
801
802 for (i = 0; i < count_handles; i++) {
803 syncobjs[i] = drm_syncobj_find(file_private, handles[i]);
804 if (!syncobjs[i]) {
805 ret = -ENOENT;
806 goto err_put_syncobjs;
807 }
808 }
809
810 kfree(handles);
811 *syncobjs_out = syncobjs;
812 return 0;
813
814err_put_syncobjs:
815 while (i-- > 0)
816 drm_syncobj_put(syncobjs[i]);
817 kfree(syncobjs);
818err_free_handles:
819 kfree(handles);
820
821 return ret;
822}
823
824static void drm_syncobj_array_free(struct drm_syncobj **syncobjs,
825 uint32_t count)
826{
827 uint32_t i;
828 for (i = 0; i < count; i++)
829 drm_syncobj_put(syncobjs[i]);
830 kfree(syncobjs);
831}
832
Dave Airlie5e60a102017-08-25 10:52:22 -0700833int
834drm_syncobj_wait_ioctl(struct drm_device *dev, void *data,
835 struct drm_file *file_private)
836{
837 struct drm_syncobj_wait *args = data;
Jason Ekstrande7aca5032017-08-25 10:52:24 -0700838 struct drm_syncobj **syncobjs;
Dave Airlie5e60a102017-08-25 10:52:22 -0700839 int ret = 0;
Dave Airlie5e60a102017-08-25 10:52:22 -0700840
841 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
Chris Wilson69fdf422018-09-13 20:20:50 +0100842 return -EOPNOTSUPP;
Dave Airlie5e60a102017-08-25 10:52:22 -0700843
Jason Ekstrande7aca5032017-08-25 10:52:24 -0700844 if (args->flags & ~(DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL |
845 DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT))
Dave Airlie5e60a102017-08-25 10:52:22 -0700846 return -EINVAL;
847
848 if (args->count_handles == 0)
849 return -EINVAL;
850
Jason Ekstrand3e6fb722017-08-25 10:52:26 -0700851 ret = drm_syncobj_array_find(file_private,
852 u64_to_user_ptr(args->handles),
853 args->count_handles,
854 &syncobjs);
855 if (ret < 0)
856 return ret;
Dave Airlie5e60a102017-08-25 10:52:22 -0700857
Jason Ekstrande7aca5032017-08-25 10:52:24 -0700858 ret = drm_syncobj_array_wait(dev, file_private,
859 args, syncobjs);
Dave Airlie5e60a102017-08-25 10:52:22 -0700860
Jason Ekstrand3e6fb722017-08-25 10:52:26 -0700861 drm_syncobj_array_free(syncobjs, args->count_handles);
Dave Airlie5e60a102017-08-25 10:52:22 -0700862
863 return ret;
864}
Jason Ekstrandaa4035d2017-08-28 14:10:27 -0700865
866int
867drm_syncobj_reset_ioctl(struct drm_device *dev, void *data,
868 struct drm_file *file_private)
869{
870 struct drm_syncobj_array *args = data;
871 struct drm_syncobj **syncobjs;
872 uint32_t i;
873 int ret;
874
875 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
Chris Wilson69fdf422018-09-13 20:20:50 +0100876 return -EOPNOTSUPP;
Jason Ekstrandaa4035d2017-08-28 14:10:27 -0700877
878 if (args->pad != 0)
879 return -EINVAL;
880
881 if (args->count_handles == 0)
882 return -EINVAL;
883
884 ret = drm_syncobj_array_find(file_private,
885 u64_to_user_ptr(args->handles),
886 args->count_handles,
887 &syncobjs);
888 if (ret < 0)
889 return ret;
890
Eric Anholt131280a2018-11-08 08:04:22 -0800891 for (i = 0; i < args->count_handles; i++)
Christian König0b258ed2018-11-14 14:24:27 +0100892 drm_syncobj_replace_fence(syncobjs[i], NULL);
Eric Anholt131280a2018-11-08 08:04:22 -0800893
Jason Ekstrandaa4035d2017-08-28 14:10:27 -0700894 drm_syncobj_array_free(syncobjs, args->count_handles);
895
Eric Anholt131280a2018-11-08 08:04:22 -0800896 return 0;
Jason Ekstrandaa4035d2017-08-28 14:10:27 -0700897}
Jason Ekstrandffa94432017-08-28 14:10:28 -0700898
899int
900drm_syncobj_signal_ioctl(struct drm_device *dev, void *data,
901 struct drm_file *file_private)
902{
903 struct drm_syncobj_array *args = data;
904 struct drm_syncobj **syncobjs;
905 uint32_t i;
906 int ret;
907
908 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
Chris Wilson69fdf422018-09-13 20:20:50 +0100909 return -EOPNOTSUPP;
Jason Ekstrandffa94432017-08-28 14:10:28 -0700910
911 if (args->pad != 0)
912 return -EINVAL;
913
914 if (args->count_handles == 0)
915 return -EINVAL;
916
917 ret = drm_syncobj_array_find(file_private,
918 u64_to_user_ptr(args->handles),
919 args->count_handles,
920 &syncobjs);
921 if (ret < 0)
922 return ret;
923
Christian König86bbd892018-11-13 14:14:00 +0100924 for (i = 0; i < args->count_handles; i++)
925 drm_syncobj_assign_null_handle(syncobjs[i]);
Jason Ekstrandffa94432017-08-28 14:10:28 -0700926
927 drm_syncobj_array_free(syncobjs, args->count_handles);
928
929 return ret;
930}