blob: da8175d9c6ffcbaf4d7acb82f2d18da34f1acb6b [file] [log] [blame]
Dave Airliee9083422017-04-04 13:26:24 +10001/*
2 * Copyright 2017 Red Hat
Dave Airlie5e60a102017-08-25 10:52:22 -07003 * Parts ported from amdgpu (fence wait code).
4 * Copyright 2016 Advanced Micro Devices, Inc.
Dave Airliee9083422017-04-04 13:26:24 +10005 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
23 * IN THE SOFTWARE.
24 *
25 * Authors:
26 *
27 */
28
29/**
30 * DOC: Overview
31 *
Daniel Vetter924fe8d2017-12-14 21:30:52 +010032 * DRM synchronisation objects (syncobj, see struct &drm_syncobj) are
33 * persistent objects that contain an optional fence. The fence can be updated
34 * with a new fence, or be NULL.
Dave Airliee9083422017-04-04 13:26:24 +100035 *
Dave Airlie5e60a102017-08-25 10:52:22 -070036 * syncobj's can be waited upon, where it will wait for the underlying
37 * fence.
38 *
Dave Airliee9083422017-04-04 13:26:24 +100039 * syncobj's can be export to fd's and back, these fd's are opaque and
40 * have no other use case, except passing the syncobj between processes.
41 *
42 * Their primary use-case is to implement Vulkan fences and semaphores.
43 *
44 * syncobj have a kref reference count, but also have an optional file.
45 * The file is only created once the syncobj is exported.
46 * The file takes a reference on the kref.
47 */
48
49#include <drm/drmP.h>
50#include <linux/file.h>
51#include <linux/fs.h>
52#include <linux/anon_inodes.h>
Dave Airlie3ee45a32017-04-26 04:09:02 +010053#include <linux/sync_file.h>
Jason Ekstrande7aca5032017-08-25 10:52:24 -070054#include <linux/sched/signal.h>
Dave Airliee9083422017-04-04 13:26:24 +100055
56#include "drm_internal.h"
57#include <drm/drm_syncobj.h>
58
Chunming Zhou48197bc2018-10-18 14:18:36 +080059/* merge normal syncobj to timeline syncobj, the point interval is 1 */
60#define DRM_SYNCOBJ_BINARY_POINT 1
61
Chunming Zhoue28bd102018-08-30 14:48:28 +080062struct drm_syncobj_stub_fence {
63 struct dma_fence base;
64 spinlock_t lock;
65};
66
67static const char *drm_syncobj_stub_fence_get_name(struct dma_fence *fence)
68{
69 return "syncobjstub";
70}
71
Chunming Zhoue28bd102018-08-30 14:48:28 +080072static const struct dma_fence_ops drm_syncobj_stub_fence_ops = {
73 .get_driver_name = drm_syncobj_stub_fence_get_name,
74 .get_timeline_name = drm_syncobj_stub_fence_get_name,
Chunming Zhoue28bd102018-08-30 14:48:28 +080075};
76
Chunming Zhou48197bc2018-10-18 14:18:36 +080077struct drm_syncobj_signal_pt {
78 struct dma_fence_array *fence_array;
79 u64 value;
80 struct list_head list;
81};
Chunming Zhoue28bd102018-08-30 14:48:28 +080082
Chunming Zhou4fb2c932018-10-26 14:20:27 +080083static DEFINE_SPINLOCK(signaled_fence_lock);
84static struct dma_fence signaled_fence;
85
86static struct dma_fence *drm_syncobj_get_stub_fence(void)
87{
88 spin_lock(&signaled_fence_lock);
89 if (!signaled_fence.ops) {
90 dma_fence_init(&signaled_fence,
91 &drm_syncobj_stub_fence_ops,
92 &signaled_fence_lock,
93 0, 0);
94 dma_fence_signal_locked(&signaled_fence);
95 }
96 spin_unlock(&signaled_fence_lock);
97
98 return dma_fence_get(&signaled_fence);
99}
Dave Airliee9083422017-04-04 13:26:24 +1000100/**
101 * drm_syncobj_find - lookup and reference a sync object.
102 * @file_private: drm file private pointer
103 * @handle: sync object handle to lookup.
104 *
Daniel Vetter924fe8d2017-12-14 21:30:52 +0100105 * Returns a reference to the syncobj pointed to by handle or NULL. The
106 * reference must be released by calling drm_syncobj_put().
Dave Airliee9083422017-04-04 13:26:24 +1000107 */
108struct drm_syncobj *drm_syncobj_find(struct drm_file *file_private,
109 u32 handle)
110{
111 struct drm_syncobj *syncobj;
112
113 spin_lock(&file_private->syncobj_table_lock);
114
115 /* Check if we currently have a reference on the object */
116 syncobj = idr_find(&file_private->syncobj_idr, handle);
117 if (syncobj)
118 drm_syncobj_get(syncobj);
119
120 spin_unlock(&file_private->syncobj_table_lock);
121
122 return syncobj;
123}
124EXPORT_SYMBOL(drm_syncobj_find);
125
Chris Wilson9cbe67c2018-10-31 12:07:10 +0000126static struct dma_fence *
127drm_syncobj_find_signal_pt_for_point(struct drm_syncobj *syncobj,
128 uint64_t point)
Chunming Zhou48197bc2018-10-18 14:18:36 +0800129{
130 struct drm_syncobj_signal_pt *signal_pt;
131
132 if ((syncobj->type == DRM_SYNCOBJ_TYPE_TIMELINE) &&
Chunming Zhou4fb2c932018-10-26 14:20:27 +0800133 (point <= syncobj->timeline))
134 return drm_syncobj_get_stub_fence();
Chunming Zhou48197bc2018-10-18 14:18:36 +0800135
136 list_for_each_entry(signal_pt, &syncobj->signal_pt_list, list) {
137 if (point > signal_pt->value)
138 continue;
139 if ((syncobj->type == DRM_SYNCOBJ_TYPE_BINARY) &&
140 (point != signal_pt->value))
141 continue;
142 return dma_fence_get(&signal_pt->fence_array->base);
143 }
144 return NULL;
145}
146
Chunming Zhou43cf1fc2018-10-23 17:37:45 +0800147static void drm_syncobj_add_callback_locked(struct drm_syncobj *syncobj,
148 struct drm_syncobj_cb *cb,
149 drm_syncobj_func_t func)
150{
151 cb->func = func;
152 list_add_tail(&cb->node, &syncobj->cb_list);
153}
154
155static void drm_syncobj_fence_get_or_add_callback(struct drm_syncobj *syncobj,
156 struct dma_fence **fence,
157 struct drm_syncobj_cb *cb,
158 drm_syncobj_func_t func)
159{
160 u64 pt_value = 0;
161
Jason Ekstrand337fe9f2018-09-26 02:17:03 -0500162 WARN_ON(*fence);
163
Chunming Zhou43cf1fc2018-10-23 17:37:45 +0800164 if (syncobj->type == DRM_SYNCOBJ_TYPE_BINARY) {
165 /*BINARY syncobj always wait on last pt */
166 pt_value = syncobj->signal_point;
167
168 if (pt_value == 0)
169 pt_value += DRM_SYNCOBJ_BINARY_POINT;
170 }
171
172 mutex_lock(&syncobj->cb_mutex);
173 spin_lock(&syncobj->pt_lock);
174 *fence = drm_syncobj_find_signal_pt_for_point(syncobj, pt_value);
175 spin_unlock(&syncobj->pt_lock);
176 if (!*fence)
177 drm_syncobj_add_callback_locked(syncobj, cb, func);
178 mutex_unlock(&syncobj->cb_mutex);
179}
180
Chris Wilson9cbe67c2018-10-31 12:07:10 +0000181static void drm_syncobj_remove_callback(struct drm_syncobj *syncobj,
182 struct drm_syncobj_cb *cb)
Chunming Zhou43cf1fc2018-10-23 17:37:45 +0800183{
184 mutex_lock(&syncobj->cb_mutex);
185 list_del_init(&cb->node);
186 mutex_unlock(&syncobj->cb_mutex);
187}
188
189static void drm_syncobj_init(struct drm_syncobj *syncobj)
190{
191 spin_lock(&syncobj->pt_lock);
192 syncobj->timeline_context = dma_fence_context_alloc(1);
193 syncobj->timeline = 0;
194 syncobj->signal_point = 0;
195 init_waitqueue_head(&syncobj->wq);
196
197 INIT_LIST_HEAD(&syncobj->signal_pt_list);
198 spin_unlock(&syncobj->pt_lock);
199}
200
201static void drm_syncobj_fini(struct drm_syncobj *syncobj)
202{
203 struct drm_syncobj_signal_pt *signal_pt = NULL, *tmp;
204
205 spin_lock(&syncobj->pt_lock);
206 list_for_each_entry_safe(signal_pt, tmp,
207 &syncobj->signal_pt_list, list) {
208 list_del(&signal_pt->list);
209 dma_fence_put(&signal_pt->fence_array->base);
210 kfree(signal_pt);
211 }
212 spin_unlock(&syncobj->pt_lock);
213}
214
Chunming Zhou48197bc2018-10-18 14:18:36 +0800215static int drm_syncobj_create_signal_pt(struct drm_syncobj *syncobj,
216 struct dma_fence *fence,
217 u64 point)
218{
219 struct drm_syncobj_signal_pt *signal_pt =
220 kzalloc(sizeof(struct drm_syncobj_signal_pt), GFP_KERNEL);
221 struct drm_syncobj_signal_pt *tail_pt;
222 struct dma_fence **fences;
223 int num_fences = 0;
224 int ret = 0, i;
225
226 if (!signal_pt)
227 return -ENOMEM;
228 if (!fence)
229 goto out;
230
231 fences = kmalloc_array(sizeof(void *), 2, GFP_KERNEL);
232 if (!fences) {
233 ret = -ENOMEM;
234 goto out;
235 }
236 fences[num_fences++] = dma_fence_get(fence);
237 /* timeline syncobj must take this dependency */
238 if (syncobj->type == DRM_SYNCOBJ_TYPE_TIMELINE) {
Chunming Zhou43cf1fc2018-10-23 17:37:45 +0800239 spin_lock(&syncobj->pt_lock);
Chunming Zhou48197bc2018-10-18 14:18:36 +0800240 if (!list_empty(&syncobj->signal_pt_list)) {
241 tail_pt = list_last_entry(&syncobj->signal_pt_list,
242 struct drm_syncobj_signal_pt, list);
243 fences[num_fences++] =
244 dma_fence_get(&tail_pt->fence_array->base);
245 }
Chunming Zhou43cf1fc2018-10-23 17:37:45 +0800246 spin_unlock(&syncobj->pt_lock);
Chunming Zhou48197bc2018-10-18 14:18:36 +0800247 }
248 signal_pt->fence_array = dma_fence_array_create(num_fences, fences,
249 syncobj->timeline_context,
250 point, false);
251 if (!signal_pt->fence_array) {
252 ret = -ENOMEM;
253 goto fail;
254 }
255
Chunming Zhou43cf1fc2018-10-23 17:37:45 +0800256 spin_lock(&syncobj->pt_lock);
Chunming Zhou48197bc2018-10-18 14:18:36 +0800257 if (syncobj->signal_point >= point) {
258 DRM_WARN("A later signal is ready!");
Chunming Zhou43cf1fc2018-10-23 17:37:45 +0800259 spin_unlock(&syncobj->pt_lock);
Chunming Zhou48197bc2018-10-18 14:18:36 +0800260 goto exist;
261 }
262 signal_pt->value = point;
263 list_add_tail(&signal_pt->list, &syncobj->signal_pt_list);
264 syncobj->signal_point = point;
Chunming Zhou43cf1fc2018-10-23 17:37:45 +0800265 spin_unlock(&syncobj->pt_lock);
Chunming Zhou48197bc2018-10-18 14:18:36 +0800266 wake_up_all(&syncobj->wq);
267
268 return 0;
269exist:
270 dma_fence_put(&signal_pt->fence_array->base);
271fail:
272 for (i = 0; i < num_fences; i++)
273 dma_fence_put(fences[i]);
274 kfree(fences);
275out:
276 kfree(signal_pt);
277 return ret;
278}
279
280static void drm_syncobj_garbage_collection(struct drm_syncobj *syncobj)
281{
282 struct drm_syncobj_signal_pt *signal_pt, *tmp, *tail_pt;
283
Chunming Zhou43cf1fc2018-10-23 17:37:45 +0800284 spin_lock(&syncobj->pt_lock);
Chunming Zhou48197bc2018-10-18 14:18:36 +0800285 tail_pt = list_last_entry(&syncobj->signal_pt_list,
286 struct drm_syncobj_signal_pt,
287 list);
288 list_for_each_entry_safe(signal_pt, tmp,
289 &syncobj->signal_pt_list, list) {
290 if (syncobj->type == DRM_SYNCOBJ_TYPE_BINARY &&
291 signal_pt == tail_pt)
292 continue;
293 if (dma_fence_is_signaled(&signal_pt->fence_array->base)) {
294 syncobj->timeline = signal_pt->value;
295 list_del(&signal_pt->list);
296 dma_fence_put(&signal_pt->fence_array->base);
297 kfree(signal_pt);
298 } else {
299 /*signal_pt is in order in list, from small to big, so
300 * the later must not be signal either */
301 break;
302 }
303 }
304
Chunming Zhou43cf1fc2018-10-23 17:37:45 +0800305 spin_unlock(&syncobj->pt_lock);
Chunming Zhou48197bc2018-10-18 14:18:36 +0800306}
Dave Airliee9083422017-04-04 13:26:24 +1000307/**
308 * drm_syncobj_replace_fence - replace fence in a sync object.
Dave Airliee9083422017-04-04 13:26:24 +1000309 * @syncobj: Sync object to replace fence in
Chunming Zhou9a09a422018-08-30 14:48:30 +0800310 * @point: timeline point
Dave Airliee9083422017-04-04 13:26:24 +1000311 * @fence: fence to install in sync file.
312 *
Chunming Zhou9a09a422018-08-30 14:48:30 +0800313 * This replaces the fence on a sync object, or a timeline point fence.
Dave Airliee9083422017-04-04 13:26:24 +1000314 */
Chris Wilson00fc2c22017-07-05 21:12:44 +0100315void drm_syncobj_replace_fence(struct drm_syncobj *syncobj,
Chunming Zhou9a09a422018-08-30 14:48:30 +0800316 u64 point,
Dave Airliee9083422017-04-04 13:26:24 +1000317 struct dma_fence *fence)
318{
Chunming Zhou48197bc2018-10-18 14:18:36 +0800319 u64 pt_value = point;
Dave Airliee9083422017-04-04 13:26:24 +1000320
Chunming Zhou48197bc2018-10-18 14:18:36 +0800321 drm_syncobj_garbage_collection(syncobj);
322 if (syncobj->type == DRM_SYNCOBJ_TYPE_BINARY) {
323 if (!fence) {
324 drm_syncobj_fini(syncobj);
325 drm_syncobj_init(syncobj);
326 return;
327 }
328 pt_value = syncobj->signal_point +
329 DRM_SYNCOBJ_BINARY_POINT;
330 }
331 drm_syncobj_create_signal_pt(syncobj, fence, pt_value);
332 if (fence) {
333 struct drm_syncobj_cb *cur, *tmp;
Chunming Zhou43cf1fc2018-10-23 17:37:45 +0800334 LIST_HEAD(cb_list);
Jason Ekstrand9c19fb12017-08-28 07:39:25 -0700335
Chunming Zhou43cf1fc2018-10-23 17:37:45 +0800336 mutex_lock(&syncobj->cb_mutex);
Jason Ekstrand9c19fb12017-08-28 07:39:25 -0700337 list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node) {
338 list_del_init(&cur->node);
339 cur->func(syncobj, cur);
340 }
Chunming Zhou43cf1fc2018-10-23 17:37:45 +0800341 mutex_unlock(&syncobj->cb_mutex);
Jason Ekstrand9c19fb12017-08-28 07:39:25 -0700342 }
Dave Airliee9083422017-04-04 13:26:24 +1000343}
344EXPORT_SYMBOL(drm_syncobj_replace_fence);
345
Jason Ekstrand1fc08212017-08-25 10:52:25 -0700346static int drm_syncobj_assign_null_handle(struct drm_syncobj *syncobj)
347{
Chunming Zhoue28bd102018-08-30 14:48:28 +0800348 struct drm_syncobj_stub_fence *fence;
Jason Ekstrand1fc08212017-08-25 10:52:25 -0700349 fence = kzalloc(sizeof(*fence), GFP_KERNEL);
350 if (fence == NULL)
351 return -ENOMEM;
352
353 spin_lock_init(&fence->lock);
Chunming Zhoue28bd102018-08-30 14:48:28 +0800354 dma_fence_init(&fence->base, &drm_syncobj_stub_fence_ops,
Jason Ekstrand1fc08212017-08-25 10:52:25 -0700355 &fence->lock, 0, 0);
356 dma_fence_signal(&fence->base);
357
Chunming Zhou9a09a422018-08-30 14:48:30 +0800358 drm_syncobj_replace_fence(syncobj, 0, &fence->base);
Jason Ekstrand1fc08212017-08-25 10:52:25 -0700359
360 dma_fence_put(&fence->base);
361
362 return 0;
363}
364
Chunming Zhou48197bc2018-10-18 14:18:36 +0800365static int
366drm_syncobj_point_get(struct drm_syncobj *syncobj, u64 point, u64 flags,
367 struct dma_fence **fence)
368{
369 int ret = 0;
370
371 if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
372 ret = wait_event_interruptible(syncobj->wq,
373 point <= syncobj->signal_point);
374 if (ret < 0)
375 return ret;
376 }
Chunming Zhou43cf1fc2018-10-23 17:37:45 +0800377 spin_lock(&syncobj->pt_lock);
Chunming Zhou48197bc2018-10-18 14:18:36 +0800378 *fence = drm_syncobj_find_signal_pt_for_point(syncobj, point);
379 if (!*fence)
380 ret = -EINVAL;
Chunming Zhou43cf1fc2018-10-23 17:37:45 +0800381 spin_unlock(&syncobj->pt_lock);
Chunming Zhou48197bc2018-10-18 14:18:36 +0800382 return ret;
383}
384
385/**
386 * drm_syncobj_search_fence - lookup and reference the fence in a sync object or
387 * in a timeline point
388 * @syncobj: sync object pointer
389 * @point: timeline point
390 * @flags: DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT or not
391 * @fence: out parameter for the fence
392 *
393 * if flags is DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT, the function will block
394 * here until specific timeline points is reached.
395 * if not, you need a submit thread and block in userspace until all future
396 * timeline points have materialized, only then you can submit to the kernel,
397 * otherwise, function will fail to return fence.
398 *
399 * Returns 0 on success or a negative error value on failure. On success @fence
400 * contains a reference to the fence, which must be released by calling
401 * dma_fence_put().
402 */
403int drm_syncobj_search_fence(struct drm_syncobj *syncobj, u64 point,
404 u64 flags, struct dma_fence **fence)
405{
406 u64 pt_value = point;
407
408 if (!syncobj)
409 return -ENOENT;
410
411 drm_syncobj_garbage_collection(syncobj);
412 if (syncobj->type == DRM_SYNCOBJ_TYPE_BINARY) {
413 /*BINARY syncobj always wait on last pt */
414 pt_value = syncobj->signal_point;
415
416 if (pt_value == 0)
417 pt_value += DRM_SYNCOBJ_BINARY_POINT;
418 }
419 return drm_syncobj_point_get(syncobj, pt_value, flags, fence);
420}
421EXPORT_SYMBOL(drm_syncobj_search_fence);
422
Daniel Vetter924fe8d2017-12-14 21:30:52 +0100423/**
424 * drm_syncobj_find_fence - lookup and reference the fence in a sync object
425 * @file_private: drm file private pointer
426 * @handle: sync object handle to lookup.
Chunming Zhou0a6730e2018-08-30 14:48:29 +0800427 * @point: timeline point
Chunming Zhou871edc92018-10-17 15:03:18 +0800428 * @flags: DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT or not
Daniel Vetter924fe8d2017-12-14 21:30:52 +0100429 * @fence: out parameter for the fence
430 *
431 * This is just a convenience function that combines drm_syncobj_find() and
Chunming Zhou48197bc2018-10-18 14:18:36 +0800432 * drm_syncobj_lookup_fence().
Daniel Vetter924fe8d2017-12-14 21:30:52 +0100433 *
434 * Returns 0 on success or a negative error value on failure. On success @fence
435 * contains a reference to the fence, which must be released by calling
436 * dma_fence_put().
437 */
Jason Ekstrandafaf5922017-08-25 10:52:19 -0700438int drm_syncobj_find_fence(struct drm_file *file_private,
Chunming Zhou649fdce2018-10-15 16:55:47 +0800439 u32 handle, u64 point, u64 flags,
Jason Ekstrandafaf5922017-08-25 10:52:19 -0700440 struct dma_fence **fence)
Dave Airliee9083422017-04-04 13:26:24 +1000441{
442 struct drm_syncobj *syncobj = drm_syncobj_find(file_private, handle);
Chunming Zhou48197bc2018-10-18 14:18:36 +0800443 int ret;
Dave Airliee9083422017-04-04 13:26:24 +1000444
Chunming Zhou48197bc2018-10-18 14:18:36 +0800445 ret = drm_syncobj_search_fence(syncobj, point, flags, fence);
Eric Anholtaecbde62018-11-05 15:01:10 -0800446 if (syncobj)
447 drm_syncobj_put(syncobj);
Dave Airliee9083422017-04-04 13:26:24 +1000448 return ret;
449}
Jason Ekstrandafaf5922017-08-25 10:52:19 -0700450EXPORT_SYMBOL(drm_syncobj_find_fence);
Dave Airliee9083422017-04-04 13:26:24 +1000451
452/**
453 * drm_syncobj_free - free a sync object.
454 * @kref: kref to free.
455 *
456 * Only to be called from kref_put in drm_syncobj_put.
457 */
458void drm_syncobj_free(struct kref *kref)
459{
460 struct drm_syncobj *syncobj = container_of(kref,
461 struct drm_syncobj,
462 refcount);
Chunming Zhou48197bc2018-10-18 14:18:36 +0800463 drm_syncobj_fini(syncobj);
Dave Airliee9083422017-04-04 13:26:24 +1000464 kfree(syncobj);
465}
466EXPORT_SYMBOL(drm_syncobj_free);
467
Marek Olšák1321fd22017-09-12 22:42:12 +0200468/**
469 * drm_syncobj_create - create a new syncobj
470 * @out_syncobj: returned syncobj
471 * @flags: DRM_SYNCOBJ_* flags
472 * @fence: if non-NULL, the syncobj will represent this fence
Daniel Vetter924fe8d2017-12-14 21:30:52 +0100473 *
474 * This is the first function to create a sync object. After creating, drivers
475 * probably want to make it available to userspace, either through
476 * drm_syncobj_get_handle() or drm_syncobj_get_fd().
477 *
478 * Returns 0 on success or a negative error value on failure.
Marek Olšák1321fd22017-09-12 22:42:12 +0200479 */
480int drm_syncobj_create(struct drm_syncobj **out_syncobj, uint32_t flags,
481 struct dma_fence *fence)
Dave Airliee9083422017-04-04 13:26:24 +1000482{
483 int ret;
484 struct drm_syncobj *syncobj;
485
486 syncobj = kzalloc(sizeof(struct drm_syncobj), GFP_KERNEL);
487 if (!syncobj)
488 return -ENOMEM;
489
490 kref_init(&syncobj->refcount);
Jason Ekstrand9c19fb12017-08-28 07:39:25 -0700491 INIT_LIST_HEAD(&syncobj->cb_list);
Chunming Zhou43cf1fc2018-10-23 17:37:45 +0800492 spin_lock_init(&syncobj->pt_lock);
493 mutex_init(&syncobj->cb_mutex);
Chunming Zhou48197bc2018-10-18 14:18:36 +0800494 if (flags & DRM_SYNCOBJ_CREATE_TYPE_TIMELINE)
495 syncobj->type = DRM_SYNCOBJ_TYPE_TIMELINE;
496 else
497 syncobj->type = DRM_SYNCOBJ_TYPE_BINARY;
498 drm_syncobj_init(syncobj);
Dave Airliee9083422017-04-04 13:26:24 +1000499
Jason Ekstrand1fc08212017-08-25 10:52:25 -0700500 if (flags & DRM_SYNCOBJ_CREATE_SIGNALED) {
501 ret = drm_syncobj_assign_null_handle(syncobj);
502 if (ret < 0) {
503 drm_syncobj_put(syncobj);
504 return ret;
505 }
506 }
507
Marek Olšák1321fd22017-09-12 22:42:12 +0200508 if (fence)
Chunming Zhou9a09a422018-08-30 14:48:30 +0800509 drm_syncobj_replace_fence(syncobj, 0, fence);
Marek Olšák1321fd22017-09-12 22:42:12 +0200510
511 *out_syncobj = syncobj;
512 return 0;
513}
514EXPORT_SYMBOL(drm_syncobj_create);
515
516/**
517 * drm_syncobj_get_handle - get a handle from a syncobj
Daniel Vetter924fe8d2017-12-14 21:30:52 +0100518 * @file_private: drm file private pointer
519 * @syncobj: Sync object to export
520 * @handle: out parameter with the new handle
521 *
522 * Exports a sync object created with drm_syncobj_create() as a handle on
523 * @file_private to userspace.
524 *
525 * Returns 0 on success or a negative error value on failure.
Marek Olšák1321fd22017-09-12 22:42:12 +0200526 */
527int drm_syncobj_get_handle(struct drm_file *file_private,
528 struct drm_syncobj *syncobj, u32 *handle)
529{
530 int ret;
531
532 /* take a reference to put in the idr */
533 drm_syncobj_get(syncobj);
534
Dave Airliee9083422017-04-04 13:26:24 +1000535 idr_preload(GFP_KERNEL);
536 spin_lock(&file_private->syncobj_table_lock);
537 ret = idr_alloc(&file_private->syncobj_idr, syncobj, 1, 0, GFP_NOWAIT);
538 spin_unlock(&file_private->syncobj_table_lock);
539
540 idr_preload_end();
541
542 if (ret < 0) {
543 drm_syncobj_put(syncobj);
544 return ret;
545 }
546
547 *handle = ret;
548 return 0;
549}
Marek Olšák1321fd22017-09-12 22:42:12 +0200550EXPORT_SYMBOL(drm_syncobj_get_handle);
551
552static int drm_syncobj_create_as_handle(struct drm_file *file_private,
553 u32 *handle, uint32_t flags)
554{
555 int ret;
556 struct drm_syncobj *syncobj;
557
558 ret = drm_syncobj_create(&syncobj, flags, NULL);
559 if (ret)
560 return ret;
561
562 ret = drm_syncobj_get_handle(file_private, syncobj, handle);
563 drm_syncobj_put(syncobj);
564 return ret;
565}
Dave Airliee9083422017-04-04 13:26:24 +1000566
567static int drm_syncobj_destroy(struct drm_file *file_private,
568 u32 handle)
569{
570 struct drm_syncobj *syncobj;
571
572 spin_lock(&file_private->syncobj_table_lock);
573 syncobj = idr_remove(&file_private->syncobj_idr, handle);
574 spin_unlock(&file_private->syncobj_table_lock);
575
576 if (!syncobj)
577 return -EINVAL;
578
579 drm_syncobj_put(syncobj);
580 return 0;
581}
582
583static int drm_syncobj_file_release(struct inode *inode, struct file *file)
584{
585 struct drm_syncobj *syncobj = file->private_data;
586
587 drm_syncobj_put(syncobj);
588 return 0;
589}
590
591static const struct file_operations drm_syncobj_file_fops = {
592 .release = drm_syncobj_file_release,
593};
594
Daniel Vetter924fe8d2017-12-14 21:30:52 +0100595/**
596 * drm_syncobj_get_fd - get a file descriptor from a syncobj
597 * @syncobj: Sync object to export
598 * @p_fd: out parameter with the new file descriptor
599 *
600 * Exports a sync object created with drm_syncobj_create() as a file descriptor.
601 *
602 * Returns 0 on success or a negative error value on failure.
603 */
Marek Olšák684fd0a2017-09-12 22:42:13 +0200604int drm_syncobj_get_fd(struct drm_syncobj *syncobj, int *p_fd)
605{
Chris Wilsone7cdf5c2017-12-19 12:07:00 +0000606 struct file *file;
Marek Olšák684fd0a2017-09-12 22:42:13 +0200607 int fd;
608
609 fd = get_unused_fd_flags(O_CLOEXEC);
610 if (fd < 0)
611 return fd;
612
Chris Wilsone7cdf5c2017-12-19 12:07:00 +0000613 file = anon_inode_getfile("syncobj_file",
614 &drm_syncobj_file_fops,
615 syncobj, 0);
616 if (IS_ERR(file)) {
617 put_unused_fd(fd);
618 return PTR_ERR(file);
Marek Olšák684fd0a2017-09-12 22:42:13 +0200619 }
Chris Wilsone7cdf5c2017-12-19 12:07:00 +0000620
621 drm_syncobj_get(syncobj);
622 fd_install(fd, file);
623
Marek Olšák684fd0a2017-09-12 22:42:13 +0200624 *p_fd = fd;
625 return 0;
626}
627EXPORT_SYMBOL(drm_syncobj_get_fd);
628
Dave Airliee9083422017-04-04 13:26:24 +1000629static int drm_syncobj_handle_to_fd(struct drm_file *file_private,
630 u32 handle, int *p_fd)
631{
632 struct drm_syncobj *syncobj = drm_syncobj_find(file_private, handle);
633 int ret;
Dave Airliee9083422017-04-04 13:26:24 +1000634
635 if (!syncobj)
636 return -EINVAL;
637
Marek Olšák684fd0a2017-09-12 22:42:13 +0200638 ret = drm_syncobj_get_fd(syncobj, p_fd);
Dave Airliee9083422017-04-04 13:26:24 +1000639 drm_syncobj_put(syncobj);
640 return ret;
641}
642
Dave Airliee9083422017-04-04 13:26:24 +1000643static int drm_syncobj_fd_to_handle(struct drm_file *file_private,
644 int fd, u32 *handle)
645{
Chris Wilsone7cdf5c2017-12-19 12:07:00 +0000646 struct drm_syncobj *syncobj;
647 struct file *file;
Dave Airliee9083422017-04-04 13:26:24 +1000648 int ret;
649
Chris Wilsone7cdf5c2017-12-19 12:07:00 +0000650 file = fget(fd);
651 if (!file)
Dave Airliee9083422017-04-04 13:26:24 +1000652 return -EINVAL;
653
Chris Wilsone7cdf5c2017-12-19 12:07:00 +0000654 if (file->f_op != &drm_syncobj_file_fops) {
655 fput(file);
656 return -EINVAL;
657 }
658
Dave Airliee9083422017-04-04 13:26:24 +1000659 /* take a reference to put in the idr */
Chris Wilsone7cdf5c2017-12-19 12:07:00 +0000660 syncobj = file->private_data;
Dave Airliee9083422017-04-04 13:26:24 +1000661 drm_syncobj_get(syncobj);
662
663 idr_preload(GFP_KERNEL);
664 spin_lock(&file_private->syncobj_table_lock);
665 ret = idr_alloc(&file_private->syncobj_idr, syncobj, 1, 0, GFP_NOWAIT);
666 spin_unlock(&file_private->syncobj_table_lock);
667 idr_preload_end();
668
Chris Wilsone7cdf5c2017-12-19 12:07:00 +0000669 if (ret > 0) {
670 *handle = ret;
671 ret = 0;
672 } else
673 drm_syncobj_put(syncobj);
674
675 fput(file);
676 return ret;
Dave Airliee9083422017-04-04 13:26:24 +1000677}
678
Ville Syrjäläa32c94a2017-09-01 19:53:25 +0300679static int drm_syncobj_import_sync_file_fence(struct drm_file *file_private,
680 int fd, int handle)
Dave Airlie3ee45a32017-04-26 04:09:02 +0100681{
682 struct dma_fence *fence = sync_file_get_fence(fd);
683 struct drm_syncobj *syncobj;
684
685 if (!fence)
686 return -EINVAL;
687
688 syncobj = drm_syncobj_find(file_private, handle);
689 if (!syncobj) {
690 dma_fence_put(fence);
691 return -ENOENT;
692 }
693
Chunming Zhou9a09a422018-08-30 14:48:30 +0800694 drm_syncobj_replace_fence(syncobj, 0, fence);
Dave Airlie3ee45a32017-04-26 04:09:02 +0100695 dma_fence_put(fence);
696 drm_syncobj_put(syncobj);
697 return 0;
698}
699
Ville Syrjäläa32c94a2017-09-01 19:53:25 +0300700static int drm_syncobj_export_sync_file(struct drm_file *file_private,
701 int handle, int *p_fd)
Dave Airlie3ee45a32017-04-26 04:09:02 +0100702{
703 int ret;
704 struct dma_fence *fence;
705 struct sync_file *sync_file;
706 int fd = get_unused_fd_flags(O_CLOEXEC);
707
708 if (fd < 0)
709 return fd;
710
Chunming Zhou649fdce2018-10-15 16:55:47 +0800711 ret = drm_syncobj_find_fence(file_private, handle, 0, 0, &fence);
Dave Airlie3ee45a32017-04-26 04:09:02 +0100712 if (ret)
713 goto err_put_fd;
714
715 sync_file = sync_file_create(fence);
716
717 dma_fence_put(fence);
718
719 if (!sync_file) {
720 ret = -EINVAL;
721 goto err_put_fd;
722 }
723
724 fd_install(fd, sync_file->file);
725
726 *p_fd = fd;
727 return 0;
728err_put_fd:
729 put_unused_fd(fd);
730 return ret;
731}
Dave Airliee9083422017-04-04 13:26:24 +1000732/**
733 * drm_syncobj_open - initalizes syncobj file-private structures at devnode open time
Dave Airliee9083422017-04-04 13:26:24 +1000734 * @file_private: drm file-private structure to set up
735 *
736 * Called at device open time, sets up the structure for handling refcounting
737 * of sync objects.
738 */
739void
740drm_syncobj_open(struct drm_file *file_private)
741{
Chris Wilsone86584c2018-02-12 14:55:33 +0000742 idr_init_base(&file_private->syncobj_idr, 1);
Dave Airliee9083422017-04-04 13:26:24 +1000743 spin_lock_init(&file_private->syncobj_table_lock);
744}
745
746static int
747drm_syncobj_release_handle(int id, void *ptr, void *data)
748{
749 struct drm_syncobj *syncobj = ptr;
750
751 drm_syncobj_put(syncobj);
752 return 0;
753}
754
755/**
756 * drm_syncobj_release - release file-private sync object resources
Dave Airliee9083422017-04-04 13:26:24 +1000757 * @file_private: drm file-private structure to clean up
758 *
759 * Called at close time when the filp is going away.
760 *
761 * Releases any remaining references on objects by this filp.
762 */
763void
764drm_syncobj_release(struct drm_file *file_private)
765{
766 idr_for_each(&file_private->syncobj_idr,
767 &drm_syncobj_release_handle, file_private);
768 idr_destroy(&file_private->syncobj_idr);
769}
770
771int
772drm_syncobj_create_ioctl(struct drm_device *dev, void *data,
773 struct drm_file *file_private)
774{
775 struct drm_syncobj_create *args = data;
776
777 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
Chris Wilson69fdf422018-09-13 20:20:50 +0100778 return -EOPNOTSUPP;
Dave Airliee9083422017-04-04 13:26:24 +1000779
780 /* no valid flags yet */
Chunming Zhou48197bc2018-10-18 14:18:36 +0800781 if (args->flags & ~(DRM_SYNCOBJ_CREATE_SIGNALED |
782 DRM_SYNCOBJ_CREATE_TYPE_TIMELINE))
Dave Airliee9083422017-04-04 13:26:24 +1000783 return -EINVAL;
784
Marek Olšák1321fd22017-09-12 22:42:12 +0200785 return drm_syncobj_create_as_handle(file_private,
786 &args->handle, args->flags);
Dave Airliee9083422017-04-04 13:26:24 +1000787}
788
789int
790drm_syncobj_destroy_ioctl(struct drm_device *dev, void *data,
791 struct drm_file *file_private)
792{
793 struct drm_syncobj_destroy *args = data;
794
795 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
Chris Wilson69fdf422018-09-13 20:20:50 +0100796 return -EOPNOTSUPP;
Dave Airliee9083422017-04-04 13:26:24 +1000797
798 /* make sure padding is empty */
799 if (args->pad)
800 return -EINVAL;
801 return drm_syncobj_destroy(file_private, args->handle);
802}
803
804int
805drm_syncobj_handle_to_fd_ioctl(struct drm_device *dev, void *data,
806 struct drm_file *file_private)
807{
808 struct drm_syncobj_handle *args = data;
809
810 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
Chris Wilson69fdf422018-09-13 20:20:50 +0100811 return -EOPNOTSUPP;
Dave Airliee9083422017-04-04 13:26:24 +1000812
Dave Airlie3ee45a32017-04-26 04:09:02 +0100813 if (args->pad)
Dave Airliee9083422017-04-04 13:26:24 +1000814 return -EINVAL;
815
Dave Airlie3ee45a32017-04-26 04:09:02 +0100816 if (args->flags != 0 &&
817 args->flags != DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE)
818 return -EINVAL;
819
820 if (args->flags & DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE)
821 return drm_syncobj_export_sync_file(file_private, args->handle,
822 &args->fd);
823
Dave Airliee9083422017-04-04 13:26:24 +1000824 return drm_syncobj_handle_to_fd(file_private, args->handle,
825 &args->fd);
826}
827
828int
829drm_syncobj_fd_to_handle_ioctl(struct drm_device *dev, void *data,
830 struct drm_file *file_private)
831{
832 struct drm_syncobj_handle *args = data;
833
834 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
Chris Wilson69fdf422018-09-13 20:20:50 +0100835 return -EOPNOTSUPP;
Dave Airliee9083422017-04-04 13:26:24 +1000836
Dave Airlie3ee45a32017-04-26 04:09:02 +0100837 if (args->pad)
Dave Airliee9083422017-04-04 13:26:24 +1000838 return -EINVAL;
839
Dave Airlie3ee45a32017-04-26 04:09:02 +0100840 if (args->flags != 0 &&
841 args->flags != DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE)
842 return -EINVAL;
843
844 if (args->flags & DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE)
845 return drm_syncobj_import_sync_file_fence(file_private,
846 args->fd,
847 args->handle);
848
Dave Airliee9083422017-04-04 13:26:24 +1000849 return drm_syncobj_fd_to_handle(file_private, args->fd,
850 &args->handle);
851}
Dave Airlie5e60a102017-08-25 10:52:22 -0700852
Jason Ekstrande7aca5032017-08-25 10:52:24 -0700853struct syncobj_wait_entry {
854 struct task_struct *task;
855 struct dma_fence *fence;
856 struct dma_fence_cb fence_cb;
857 struct drm_syncobj_cb syncobj_cb;
858};
859
860static void syncobj_wait_fence_func(struct dma_fence *fence,
861 struct dma_fence_cb *cb)
862{
863 struct syncobj_wait_entry *wait =
864 container_of(cb, struct syncobj_wait_entry, fence_cb);
865
866 wake_up_process(wait->task);
867}
868
869static void syncobj_wait_syncobj_func(struct drm_syncobj *syncobj,
870 struct drm_syncobj_cb *cb)
871{
872 struct syncobj_wait_entry *wait =
873 container_of(cb, struct syncobj_wait_entry, syncobj_cb);
874
Chunming Zhou48197bc2018-10-18 14:18:36 +0800875 drm_syncobj_search_fence(syncobj, 0, 0, &wait->fence);
876
Jason Ekstrande7aca5032017-08-25 10:52:24 -0700877 wake_up_process(wait->task);
878}
879
880static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
881 uint32_t count,
882 uint32_t flags,
883 signed long timeout,
884 uint32_t *idx)
885{
886 struct syncobj_wait_entry *entries;
887 struct dma_fence *fence;
Jason Ekstrande7aca5032017-08-25 10:52:24 -0700888 uint32_t signaled_count, i;
889
890 entries = kcalloc(count, sizeof(*entries), GFP_KERNEL);
891 if (!entries)
892 return -ENOMEM;
893
894 /* Walk the list of sync objects and initialize entries. We do
895 * this up-front so that we can properly return -EINVAL if there is
896 * a syncobj with a missing fence and then never have the chance of
897 * returning -EINVAL again.
898 */
899 signaled_count = 0;
900 for (i = 0; i < count; ++i) {
901 entries[i].task = current;
Chunming Zhou48197bc2018-10-18 14:18:36 +0800902 drm_syncobj_search_fence(syncobjs[i], 0, 0,
903 &entries[i].fence);
Jason Ekstrande7aca5032017-08-25 10:52:24 -0700904 if (!entries[i].fence) {
905 if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
906 continue;
907 } else {
Chris Wilson12fec622018-09-20 21:05:30 +0100908 timeout = -EINVAL;
Jason Ekstrande7aca5032017-08-25 10:52:24 -0700909 goto cleanup_entries;
910 }
911 }
912
913 if (dma_fence_is_signaled(entries[i].fence)) {
914 if (signaled_count == 0 && idx)
915 *idx = i;
916 signaled_count++;
917 }
918 }
919
Jason Ekstrande7aca5032017-08-25 10:52:24 -0700920 if (signaled_count == count ||
921 (signaled_count > 0 &&
922 !(flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL)))
923 goto cleanup_entries;
924
925 /* There's a very annoying laxness in the dma_fence API here, in
926 * that backends are not required to automatically report when a
927 * fence is signaled prior to fence->ops->enable_signaling() being
928 * called. So here if we fail to match signaled_count, we need to
929 * fallthough and try a 0 timeout wait!
930 */
931
932 if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
933 for (i = 0; i < count; ++i) {
Jason Ekstrand337fe9f2018-09-26 02:17:03 -0500934 if (entries[i].fence)
935 continue;
936
Jason Ekstrande7aca5032017-08-25 10:52:24 -0700937 drm_syncobj_fence_get_or_add_callback(syncobjs[i],
938 &entries[i].fence,
939 &entries[i].syncobj_cb,
940 syncobj_wait_syncobj_func);
941 }
942 }
943
944 do {
945 set_current_state(TASK_INTERRUPTIBLE);
946
947 signaled_count = 0;
948 for (i = 0; i < count; ++i) {
949 fence = entries[i].fence;
950 if (!fence)
951 continue;
952
953 if (dma_fence_is_signaled(fence) ||
954 (!entries[i].fence_cb.func &&
955 dma_fence_add_callback(fence,
956 &entries[i].fence_cb,
957 syncobj_wait_fence_func))) {
958 /* The fence has been signaled */
959 if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL) {
960 signaled_count++;
961 } else {
962 if (idx)
963 *idx = i;
964 goto done_waiting;
965 }
966 }
967 }
968
969 if (signaled_count == count)
970 goto done_waiting;
971
972 if (timeout == 0) {
Chris Wilson12fec622018-09-20 21:05:30 +0100973 timeout = -ETIME;
Jason Ekstrande7aca5032017-08-25 10:52:24 -0700974 goto done_waiting;
975 }
976
Chris Wilson12fec622018-09-20 21:05:30 +0100977 if (signal_pending(current)) {
978 timeout = -ERESTARTSYS;
979 goto done_waiting;
980 }
Jason Ekstrande7aca5032017-08-25 10:52:24 -0700981
Chris Wilson12fec622018-09-20 21:05:30 +0100982 timeout = schedule_timeout(timeout);
983 } while (1);
Jason Ekstrande7aca5032017-08-25 10:52:24 -0700984
985done_waiting:
986 __set_current_state(TASK_RUNNING);
987
988cleanup_entries:
989 for (i = 0; i < count; ++i) {
990 if (entries[i].syncobj_cb.func)
991 drm_syncobj_remove_callback(syncobjs[i],
992 &entries[i].syncobj_cb);
993 if (entries[i].fence_cb.func)
994 dma_fence_remove_callback(entries[i].fence,
995 &entries[i].fence_cb);
996 dma_fence_put(entries[i].fence);
997 }
998 kfree(entries);
999
Chris Wilson12fec622018-09-20 21:05:30 +01001000 return timeout;
Jason Ekstrande7aca5032017-08-25 10:52:24 -07001001}
1002
Dave Airlie5e60a102017-08-25 10:52:22 -07001003/**
1004 * drm_timeout_abs_to_jiffies - calculate jiffies timeout from absolute value
1005 *
1006 * @timeout_nsec: timeout nsec component in ns, 0 for poll
1007 *
1008 * Calculate the timeout in jiffies from an absolute time in sec/nsec.
1009 */
1010static signed long drm_timeout_abs_to_jiffies(int64_t timeout_nsec)
1011{
1012 ktime_t abs_timeout, now;
1013 u64 timeout_ns, timeout_jiffies64;
1014
1015 /* make 0 timeout means poll - absolute 0 doesn't seem valid */
1016 if (timeout_nsec == 0)
1017 return 0;
1018
1019 abs_timeout = ns_to_ktime(timeout_nsec);
1020 now = ktime_get();
1021
1022 if (!ktime_after(abs_timeout, now))
1023 return 0;
1024
1025 timeout_ns = ktime_to_ns(ktime_sub(abs_timeout, now));
1026
1027 timeout_jiffies64 = nsecs_to_jiffies64(timeout_ns);
1028 /* clamp timeout to avoid infinite timeout */
1029 if (timeout_jiffies64 >= MAX_SCHEDULE_TIMEOUT - 1)
1030 return MAX_SCHEDULE_TIMEOUT - 1;
1031
1032 return timeout_jiffies64 + 1;
1033}
1034
Jason Ekstrande7aca5032017-08-25 10:52:24 -07001035static int drm_syncobj_array_wait(struct drm_device *dev,
1036 struct drm_file *file_private,
1037 struct drm_syncobj_wait *wait,
1038 struct drm_syncobj **syncobjs)
Dave Airlie5e60a102017-08-25 10:52:22 -07001039{
1040 signed long timeout = drm_timeout_abs_to_jiffies(wait->timeout_nsec);
Dave Airlie5e60a102017-08-25 10:52:22 -07001041 uint32_t first = ~0;
1042
Chris Wilson12fec622018-09-20 21:05:30 +01001043 timeout = drm_syncobj_array_wait_timeout(syncobjs,
1044 wait->count_handles,
1045 wait->flags,
1046 timeout, &first);
1047 if (timeout < 0)
1048 return timeout;
Dave Airlie5e60a102017-08-25 10:52:22 -07001049
1050 wait->first_signaled = first;
Dave Airlie5e60a102017-08-25 10:52:22 -07001051 return 0;
1052}
1053
Jason Ekstrand3e6fb722017-08-25 10:52:26 -07001054static int drm_syncobj_array_find(struct drm_file *file_private,
Ville Syrjälä9e554462017-09-01 19:53:26 +03001055 void __user *user_handles,
1056 uint32_t count_handles,
Jason Ekstrand3e6fb722017-08-25 10:52:26 -07001057 struct drm_syncobj ***syncobjs_out)
1058{
1059 uint32_t i, *handles;
1060 struct drm_syncobj **syncobjs;
1061 int ret;
1062
1063 handles = kmalloc_array(count_handles, sizeof(*handles), GFP_KERNEL);
1064 if (handles == NULL)
1065 return -ENOMEM;
1066
1067 if (copy_from_user(handles, user_handles,
1068 sizeof(uint32_t) * count_handles)) {
1069 ret = -EFAULT;
1070 goto err_free_handles;
1071 }
1072
1073 syncobjs = kmalloc_array(count_handles, sizeof(*syncobjs), GFP_KERNEL);
1074 if (syncobjs == NULL) {
1075 ret = -ENOMEM;
1076 goto err_free_handles;
1077 }
1078
1079 for (i = 0; i < count_handles; i++) {
1080 syncobjs[i] = drm_syncobj_find(file_private, handles[i]);
1081 if (!syncobjs[i]) {
1082 ret = -ENOENT;
1083 goto err_put_syncobjs;
1084 }
1085 }
1086
1087 kfree(handles);
1088 *syncobjs_out = syncobjs;
1089 return 0;
1090
1091err_put_syncobjs:
1092 while (i-- > 0)
1093 drm_syncobj_put(syncobjs[i]);
1094 kfree(syncobjs);
1095err_free_handles:
1096 kfree(handles);
1097
1098 return ret;
1099}
1100
1101static void drm_syncobj_array_free(struct drm_syncobj **syncobjs,
1102 uint32_t count)
1103{
1104 uint32_t i;
1105 for (i = 0; i < count; i++)
1106 drm_syncobj_put(syncobjs[i]);
1107 kfree(syncobjs);
1108}
1109
Dave Airlie5e60a102017-08-25 10:52:22 -07001110int
1111drm_syncobj_wait_ioctl(struct drm_device *dev, void *data,
1112 struct drm_file *file_private)
1113{
1114 struct drm_syncobj_wait *args = data;
Jason Ekstrande7aca5032017-08-25 10:52:24 -07001115 struct drm_syncobj **syncobjs;
Dave Airlie5e60a102017-08-25 10:52:22 -07001116 int ret = 0;
Dave Airlie5e60a102017-08-25 10:52:22 -07001117
1118 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
Chris Wilson69fdf422018-09-13 20:20:50 +01001119 return -EOPNOTSUPP;
Dave Airlie5e60a102017-08-25 10:52:22 -07001120
Jason Ekstrande7aca5032017-08-25 10:52:24 -07001121 if (args->flags & ~(DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL |
1122 DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT))
Dave Airlie5e60a102017-08-25 10:52:22 -07001123 return -EINVAL;
1124
1125 if (args->count_handles == 0)
1126 return -EINVAL;
1127
Jason Ekstrand3e6fb722017-08-25 10:52:26 -07001128 ret = drm_syncobj_array_find(file_private,
1129 u64_to_user_ptr(args->handles),
1130 args->count_handles,
1131 &syncobjs);
1132 if (ret < 0)
1133 return ret;
Dave Airlie5e60a102017-08-25 10:52:22 -07001134
Jason Ekstrande7aca5032017-08-25 10:52:24 -07001135 ret = drm_syncobj_array_wait(dev, file_private,
1136 args, syncobjs);
Dave Airlie5e60a102017-08-25 10:52:22 -07001137
Jason Ekstrand3e6fb722017-08-25 10:52:26 -07001138 drm_syncobj_array_free(syncobjs, args->count_handles);
Dave Airlie5e60a102017-08-25 10:52:22 -07001139
1140 return ret;
1141}
Jason Ekstrandaa4035d2017-08-28 14:10:27 -07001142
1143int
1144drm_syncobj_reset_ioctl(struct drm_device *dev, void *data,
1145 struct drm_file *file_private)
1146{
1147 struct drm_syncobj_array *args = data;
1148 struct drm_syncobj **syncobjs;
1149 uint32_t i;
1150 int ret;
1151
1152 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
Chris Wilson69fdf422018-09-13 20:20:50 +01001153 return -EOPNOTSUPP;
Jason Ekstrandaa4035d2017-08-28 14:10:27 -07001154
1155 if (args->pad != 0)
1156 return -EINVAL;
1157
1158 if (args->count_handles == 0)
1159 return -EINVAL;
1160
1161 ret = drm_syncobj_array_find(file_private,
1162 u64_to_user_ptr(args->handles),
1163 args->count_handles,
1164 &syncobjs);
1165 if (ret < 0)
1166 return ret;
1167
Chunming Zhou48197bc2018-10-18 14:18:36 +08001168 for (i = 0; i < args->count_handles; i++) {
1169 drm_syncobj_fini(syncobjs[i]);
1170 drm_syncobj_init(syncobjs[i]);
1171 }
Jason Ekstrandaa4035d2017-08-28 14:10:27 -07001172 drm_syncobj_array_free(syncobjs, args->count_handles);
1173
Chunming Zhou48197bc2018-10-18 14:18:36 +08001174 return ret;
Jason Ekstrandaa4035d2017-08-28 14:10:27 -07001175}
Jason Ekstrandffa94432017-08-28 14:10:28 -07001176
1177int
1178drm_syncobj_signal_ioctl(struct drm_device *dev, void *data,
1179 struct drm_file *file_private)
1180{
1181 struct drm_syncobj_array *args = data;
1182 struct drm_syncobj **syncobjs;
1183 uint32_t i;
1184 int ret;
1185
1186 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
Chris Wilson69fdf422018-09-13 20:20:50 +01001187 return -EOPNOTSUPP;
Jason Ekstrandffa94432017-08-28 14:10:28 -07001188
1189 if (args->pad != 0)
1190 return -EINVAL;
1191
1192 if (args->count_handles == 0)
1193 return -EINVAL;
1194
1195 ret = drm_syncobj_array_find(file_private,
1196 u64_to_user_ptr(args->handles),
1197 args->count_handles,
1198 &syncobjs);
1199 if (ret < 0)
1200 return ret;
1201
1202 for (i = 0; i < args->count_handles; i++) {
1203 ret = drm_syncobj_assign_null_handle(syncobjs[i]);
1204 if (ret < 0)
1205 break;
1206 }
1207
1208 drm_syncobj_array_free(syncobjs, args->count_handles);
1209
1210 return ret;
1211}