blob: cd6795cb0bcb7ad9ede73cafc0ffa1da9a1dc2ca [file] [log] [blame]
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -08001// SPDX-License-Identifier: GPL-2.0-only
2/*
Sumukh Hallymysore Ravindra709d2122020-05-20 12:53:14 +05303 * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -08004 */
5#define pr_fmt(fmt) "synx: " fmt
6
7#include <linux/fs.h>
8#include <linux/module.h>
9#include <linux/poll.h>
10#include <linux/slab.h>
11#include <linux/uaccess.h>
12
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -080013#include "synx_api.h"
14#include "synx_util.h"
Carlos Hopkins81eb3412019-02-22 13:57:54 -080015#include "synx_debugfs.h"
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -080016
17struct synx_device *synx_dev;
18
19void synx_external_callback(s32 sync_obj, int status, void *data)
20{
Sumukh Hallymysore Ravindrab28b8b82019-07-16 16:11:05 -070021 s32 synx_obj;
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -080022 struct synx_table_row *row = NULL;
23 struct synx_external_data *bind_data = data;
24
25 if (bind_data) {
Sumukh Hallymysore Ravindrab28b8b82019-07-16 16:11:05 -070026 synx_obj = bind_data->synx_obj;
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -080027 row = synx_from_key(bind_data->synx_obj, bind_data->secure_key);
28 kfree(bind_data);
29 }
30
31 if (row) {
Sumukh Hallymysore Ravindrabc18e0f2019-12-12 13:51:45 -080032 mutex_lock(&synx_dev->row_locks[row->index]);
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -080033 row->signaling_id = sync_obj;
Sumukh Hallymysore Ravindrabc18e0f2019-12-12 13:51:45 -080034 mutex_unlock(&synx_dev->row_locks[row->index]);
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -080035
36 pr_debug("signaling synx 0x%x from external callback %d\n",
Sumukh Hallymysore Ravindrab28b8b82019-07-16 16:11:05 -070037 synx_obj, sync_obj);
Sumukh Hallymysore Ravindraeb85bfd2019-02-14 12:01:18 -080038 synx_signal_core(row, status);
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -080039 } else {
40 pr_err("invalid callback from sync external obj %d\n",
41 sync_obj);
42 }
43}
44
45bool synx_fence_enable_signaling(struct dma_fence *fence)
46{
47 return true;
48}
49
50const char *synx_fence_driver_name(struct dma_fence *fence)
51{
52 return "Global Synx driver";
53}
54
55void synx_fence_release(struct dma_fence *fence)
56{
57 struct synx_table_row *row = NULL;
58
59 pr_debug("Enter %s\n", __func__);
60
61 row = synx_from_fence(fence);
62 if (row) {
63 /* metadata (row) will be cleared in the deinit function */
64 synx_deinit_object(row);
65 }
66
67 pr_debug("Exit %s\n", __func__);
68}
69
70static struct dma_fence_ops synx_fence_ops = {
71 .wait = dma_fence_default_wait,
72 .enable_signaling = synx_fence_enable_signaling,
73 .get_driver_name = synx_fence_driver_name,
74 .get_timeline_name = synx_fence_driver_name,
75 .release = synx_fence_release,
76};
77
78int synx_create(s32 *synx_obj, const char *name)
79{
80 int rc;
81 long idx;
82 bool bit;
83 s32 id;
84 struct synx_table_row *row = NULL;
85
86 pr_debug("Enter %s\n", __func__);
87
88 do {
89 idx = find_first_zero_bit(synx_dev->bitmap, SYNX_MAX_OBJS);
90 if (idx >= SYNX_MAX_OBJS)
91 return -ENOMEM;
92 pr_debug("index location available at idx: %ld\n", idx);
93 bit = test_and_set_bit(idx, synx_dev->bitmap);
94 } while (bit);
95
96 /* global synx id */
97 id = synx_create_handle(synx_dev->synx_table + idx);
Sumukh Hallymysore Ravindra29edeb72019-04-12 17:50:45 -070098 if (id < 0) {
99 pr_err("unable to allocate the synx handle\n");
100 clear_bit(idx, synx_dev->bitmap);
101 return -EINVAL;
102 }
103
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -0800104 rc = synx_init_object(synx_dev->synx_table,
105 idx, id, name, &synx_fence_ops);
Sumukh Hallymysore Ravindra29edeb72019-04-12 17:50:45 -0700106 if (rc < 0) {
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -0800107 pr_err("unable to init row at idx = %ld\n", idx);
108 clear_bit(idx, synx_dev->bitmap);
109 return -EINVAL;
110 }
111
112 row = synx_dev->synx_table + idx;
113 rc = synx_activate(row);
114 if (rc) {
115 pr_err("unable to activate row at idx = %ld\n", idx);
116 synx_deinit_object(row);
117 return -EINVAL;
118 }
119
Sumukh Hallymysore Ravindrab28b8b82019-07-16 16:11:05 -0700120 *synx_obj = id;
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -0800121
Jordan Crouse1d790b72019-02-14 16:08:29 -0700122 pr_debug("row: synx id: 0x%x, index: %d\n",
Sumukh Hallymysore Ravindrab28b8b82019-07-16 16:11:05 -0700123 id, row->index);
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -0800124 pr_debug("Exit %s\n", __func__);
125
126 return rc;
127}
128
129int synx_register_callback(s32 synx_obj,
130 void *userdata, synx_callback cb_func)
131{
132 u32 state = SYNX_STATE_INVALID;
133 struct synx_callback_info *synx_cb;
134 struct synx_callback_info *temp_cb_info;
135 struct synx_table_row *row = NULL;
136
137 row = synx_from_handle(synx_obj);
138 if (!row || !cb_func)
139 return -EINVAL;
140
Sumukh Hallymysore Ravindrabc18e0f2019-12-12 13:51:45 -0800141 mutex_lock(&synx_dev->row_locks[row->index]);
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -0800142
Sumukh Hallymysore Ravindrabc18e0f2019-12-12 13:51:45 -0800143 state = synx_status(row);
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -0800144 /* do not register if callback registered earlier */
145 list_for_each_entry(temp_cb_info, &row->callback_list, list) {
146 if (temp_cb_info->callback_func == cb_func &&
147 temp_cb_info->cb_data == userdata) {
148 pr_err("duplicate registration for synx 0x%x\n",
Sumukh Hallymysore Ravindrab28b8b82019-07-16 16:11:05 -0700149 synx_obj);
Sumukh Hallymysore Ravindrabc18e0f2019-12-12 13:51:45 -0800150 mutex_unlock(&synx_dev->row_locks[row->index]);
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -0800151 return -EALREADY;
152 }
153 }
154
Sumukh Hallymysore Ravindrabc18e0f2019-12-12 13:51:45 -0800155 synx_cb = kzalloc(sizeof(*synx_cb), GFP_KERNEL);
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -0800156 if (!synx_cb) {
Sumukh Hallymysore Ravindrabc18e0f2019-12-12 13:51:45 -0800157 mutex_unlock(&synx_dev->row_locks[row->index]);
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -0800158 return -ENOMEM;
159 }
160
161 synx_cb->callback_func = cb_func;
162 synx_cb->cb_data = userdata;
163 synx_cb->synx_obj = synx_obj;
164 INIT_WORK(&synx_cb->cb_dispatch_work, synx_util_cb_dispatch);
165
166 /* trigger callback if synx object is already in SIGNALED state */
167 if (state == SYNX_STATE_SIGNALED_SUCCESS ||
168 state == SYNX_STATE_SIGNALED_ERROR) {
169 synx_cb->status = state;
170 pr_debug("callback triggered for synx 0x%x\n",
171 synx_cb->synx_obj);
172 queue_work(synx_dev->work_queue,
173 &synx_cb->cb_dispatch_work);
Sumukh Hallymysore Ravindrabc18e0f2019-12-12 13:51:45 -0800174 mutex_unlock(&synx_dev->row_locks[row->index]);
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -0800175 return 0;
176 }
177
178 list_add_tail(&synx_cb->list, &row->callback_list);
Sumukh Hallymysore Ravindrabc18e0f2019-12-12 13:51:45 -0800179 mutex_unlock(&synx_dev->row_locks[row->index]);
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -0800180
181 return 0;
182}
183
184int synx_deregister_callback(s32 synx_obj,
185 synx_callback cb_func,
186 void *userdata,
187 synx_callback cancel_cb_func)
188{
189 u32 state = SYNX_STATE_INVALID;
190 struct synx_table_row *row = NULL;
191 struct synx_callback_info *synx_cb, *temp;
192
193 row = synx_from_handle(synx_obj);
194 if (!row) {
195 pr_err("invalid synx 0x%x\n", synx_obj);
196 return -EINVAL;
197 }
198
Sumukh Hallymysore Ravindrabc18e0f2019-12-12 13:51:45 -0800199 mutex_lock(&synx_dev->row_locks[row->index]);
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -0800200
Sumukh Hallymysore Ravindrabc18e0f2019-12-12 13:51:45 -0800201 state = synx_status(row);
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -0800202 pr_debug("de-registering callback for synx 0x%x\n",
Sumukh Hallymysore Ravindrab28b8b82019-07-16 16:11:05 -0700203 synx_obj);
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -0800204 list_for_each_entry_safe(synx_cb, temp, &row->callback_list, list) {
205 if (synx_cb->callback_func == cb_func &&
206 synx_cb->cb_data == userdata) {
207 list_del_init(&synx_cb->list);
208 if (cancel_cb_func) {
209 synx_cb->status = SYNX_CALLBACK_RESULT_CANCELED;
210 synx_cb->callback_func = cancel_cb_func;
211 queue_work(synx_dev->work_queue,
212 &synx_cb->cb_dispatch_work);
213 } else {
214 kfree(synx_cb);
215 }
216 }
217 }
218
Sumukh Hallymysore Ravindrabc18e0f2019-12-12 13:51:45 -0800219 mutex_unlock(&synx_dev->row_locks[row->index]);
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -0800220 return 0;
221}
222
Sumukh Hallymysore Ravindraeb85bfd2019-02-14 12:01:18 -0800223int synx_signal_core(struct synx_table_row *row, u32 status)
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -0800224{
Sumukh Hallymysore Ravindra3c6ef892019-02-11 15:49:33 -0800225 int rc, ret;
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -0800226 u32 i = 0;
227 u32 idx = 0;
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -0800228 s32 sync_id;
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -0800229 struct synx_external_data *data = NULL;
230 struct synx_bind_desc bind_descs[SYNX_MAX_NUM_BINDINGS];
231 struct bind_operations *bind_ops = NULL;
232
233 pr_debug("Enter %s\n", __func__);
234
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -0800235 if (!row) {
Sumukh Hallymysore Ravindraeb85bfd2019-02-14 12:01:18 -0800236 pr_err("invalid synx row\n");
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -0800237 return -EINVAL;
238 }
239
240 if (status != SYNX_STATE_SIGNALED_SUCCESS &&
241 status != SYNX_STATE_SIGNALED_ERROR) {
242 pr_err("signaling with undefined status = %d\n",
243 status);
244 return -EINVAL;
245 }
246
247 if (is_merged_synx(row)) {
Sumukh Hallymysore Ravindrab28b8b82019-07-16 16:11:05 -0700248 pr_err("signaling a composite synx object at %d\n",
249 row->index);
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -0800250 return -EINVAL;
251 }
252
Sumukh Hallymysore Ravindrabc18e0f2019-12-12 13:51:45 -0800253 mutex_lock(&synx_dev->row_locks[row->index]);
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -0800254
Sumukh Hallymysore Ravindrabd281fb2019-09-25 13:33:08 -0700255 if (!row->index) {
Sumukh Hallymysore Ravindrabc18e0f2019-12-12 13:51:45 -0800256 mutex_unlock(&synx_dev->row_locks[row->index]);
Sumukh Hallymysore Ravindrabd281fb2019-09-25 13:33:08 -0700257 pr_err("object already cleaned up at %d\n",
258 row->index);
259 return -EINVAL;
260 }
261
Sumukh Hallymysore Ravindrabc18e0f2019-12-12 13:51:45 -0800262 if (synx_status(row) != SYNX_STATE_ACTIVE) {
263 mutex_unlock(&synx_dev->row_locks[row->index]);
Sumukh Hallymysore Ravindrab28b8b82019-07-16 16:11:05 -0700264 pr_err("object already signaled synx at %d\n",
265 row->index);
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -0800266 return -EALREADY;
267 }
268
269 /* set fence error to model {signal w/ error} */
270 if (status == SYNX_STATE_SIGNALED_ERROR)
271 dma_fence_set_error(row->fence, -EINVAL);
272
Sumukh Hallymysore Ravindrabc18e0f2019-12-12 13:51:45 -0800273 rc = dma_fence_signal(row->fence);
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -0800274 if (rc < 0) {
Sumukh Hallymysore Ravindrab28b8b82019-07-16 16:11:05 -0700275 pr_err("unable to signal synx at %d, err: %d\n",
276 row->index, rc);
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -0800277 if (status != SYNX_STATE_SIGNALED_ERROR) {
278 dma_fence_set_error(row->fence, -EINVAL);
279 status = SYNX_STATE_SIGNALED_ERROR;
280 }
281 }
282
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -0800283 synx_callback_dispatch(row);
284
285 /*
286 * signal the external bound sync obj/s even if fence signal fails,
287 * w/ error signal state (set above) to prevent deadlock
288 */
289 if (row->num_bound_synxs > 0) {
290 memset(bind_descs, 0,
291 sizeof(struct synx_bind_desc) * SYNX_MAX_NUM_BINDINGS);
292 for (i = 0; i < row->num_bound_synxs; i++) {
293 /* signal invoked by external sync obj */
294 if (row->signaling_id ==
295 row->bound_synxs[i].external_desc.id[0]) {
296 pr_debug("signaling_bound_sync: %d, skipping\n",
297 row->signaling_id);
298 memset(&row->bound_synxs[i], 0,
299 sizeof(struct synx_bind_desc));
300 continue;
301 }
302 memcpy(&bind_descs[idx++],
303 &row->bound_synxs[i],
304 sizeof(struct synx_bind_desc));
305 /* clear the memory, its been backed up above */
306 memset(&row->bound_synxs[i], 0,
307 sizeof(struct synx_bind_desc));
308 }
309 row->num_bound_synxs = 0;
310 }
Sumukh Hallymysore Ravindrabc18e0f2019-12-12 13:51:45 -0800311 mutex_unlock(&synx_dev->row_locks[row->index]);
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -0800312
313 for (i = 0; i < idx; i++) {
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -0800314 sync_id = bind_descs[i].external_desc.id[0];
315 data = bind_descs[i].external_data;
Sumukh Hallymysore Ravindra6b7d0252019-06-12 14:05:18 -0700316 bind_ops = synx_get_bind_ops(
317 bind_descs[i].external_desc.type);
318 if (!bind_ops) {
319 pr_err("invalid bind ops for %u\n",
320 bind_descs[i].external_desc.type);
321 kfree(data);
322 continue;
323 }
324 /*
325 * we are already signaled, so don't want to
326 * recursively be signaled
327 */
328 ret = bind_ops->deregister_callback(
329 synx_external_callback, data, sync_id);
330 if (ret < 0)
331 pr_err("de-registration fail on sync: %d, err: %d\n",
332 sync_id, ret);
333 pr_debug("signaling external sync: %d, status: %u\n",
334 sync_id, status);
335 /* optional function to enable external signaling */
336 if (bind_ops->enable_signaling) {
337 ret = bind_ops->enable_signaling(sync_id);
338 if (ret < 0) {
339 pr_err("enable signaling fail on sync: %d, err: %d\n",
340 sync_id, ret);
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -0800341 continue;
342 }
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -0800343 }
344
Sumukh Hallymysore Ravindra6b7d0252019-06-12 14:05:18 -0700345 ret = bind_ops->signal(sync_id, status);
346 if (ret < 0)
347 pr_err("signaling fail on sync: %d, err: %d\n",
348 sync_id, ret);
349
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -0800350 /*
351 * release the memory allocated for external data.
352 * It is safe to release this memory as external cb
353 * has been already deregistered before this.
354 */
355 kfree(data);
356 }
357
358 pr_debug("Exit %s\n", __func__);
359 return rc;
360}
361
Sumukh Hallymysore Ravindraeb85bfd2019-02-14 12:01:18 -0800362int synx_signal(s32 synx_obj, u32 status)
363{
364 struct synx_table_row *row = NULL;
365
366 row = synx_from_handle(synx_obj);
367 if (!row) {
368 pr_err("invalid synx: 0x%x\n", synx_obj);
369 return -EINVAL;
370 }
371
372 return synx_signal_core(row, status);
373}
374
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -0800375int synx_merge(s32 *synx_objs, u32 num_objs, s32 *synx_merged)
376{
377 int rc;
378 long idx = 0;
379 bool bit;
380 s32 id;
381 u32 count = 0;
382 struct dma_fence **fences = NULL;
383 struct synx_table_row *row = NULL;
384
385 pr_debug("Enter %s\n", __func__);
386
387 if (!synx_objs || !synx_merged) {
388 pr_err("invalid pointer(s)\n");
389 return -EINVAL;
390 }
391
392 rc = synx_util_validate_merge(synx_objs, num_objs, &fences, &count);
393 if (rc < 0) {
394 pr_err("validation failed, merge not allowed\n");
395 rc = -EINVAL;
396 goto free;
397 }
398
399 do {
400 idx = find_first_zero_bit(synx_dev->bitmap, SYNX_MAX_OBJS);
401 if (idx >= SYNX_MAX_OBJS) {
402 rc = -ENOMEM;
403 goto free;
404 }
405 bit = test_and_set_bit(idx, synx_dev->bitmap);
406 } while (bit);
407
408 /* global synx id */
409 id = synx_create_handle(synx_dev->synx_table + idx);
410
411 rc = synx_init_group_object(synx_dev->synx_table,
412 idx, id, fences, count);
413 if (rc < 0) {
414 pr_err("unable to init row at idx = %ld\n", idx);
415 goto clear;
416 }
417
418 row = synx_dev->synx_table + idx;
419 rc = synx_activate(row);
420 if (rc) {
421 pr_err("unable to activate row at idx = %ld, synx 0x%x\n",
422 idx, id);
423 goto clear;
424 }
425
Sumukh Hallymysore Ravindrab28b8b82019-07-16 16:11:05 -0700426 *synx_merged = id;
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -0800427
Jordan Crouse1d790b72019-02-14 16:08:29 -0700428 pr_debug("row (merged): synx 0x%x, index: %d\n",
Sumukh Hallymysore Ravindrab28b8b82019-07-16 16:11:05 -0700429 id, row->index);
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -0800430 pr_debug("Exit %s\n", __func__);
431
432 return 0;
433
434clear:
435 clear_bit(idx, synx_dev->bitmap);
436free:
437 synx_merge_error(synx_objs, count);
438 if (num_objs <= count)
439 kfree(fences);
440 return rc;
441}
442
Sumukh Hallymysore Ravindrab28b8b82019-07-16 16:11:05 -0700443static int synx_release_core(struct synx_table_row *row)
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -0800444{
445 s32 idx;
446 struct dma_fence *fence = NULL;
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -0800447
448 /*
449 * metadata might be cleared after invoking dma_fence_put
450 * (definitely for merged synx on invoing deinit)
451 * be carefull while accessing the metadata
452 */
Sumukh Hallymysore Ravindrabc18e0f2019-12-12 13:51:45 -0800453 mutex_lock(&synx_dev->row_locks[row->index]);
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -0800454 fence = row->fence;
455 idx = row->index;
Sumukh Hallymysore Ravindrabd281fb2019-09-25 13:33:08 -0700456 if (!idx) {
Sumukh Hallymysore Ravindrabc18e0f2019-12-12 13:51:45 -0800457 mutex_unlock(&synx_dev->row_locks[idx]);
Sumukh Hallymysore Ravindrabd281fb2019-09-25 13:33:08 -0700458 pr_err("object already cleaned up at %d\n", idx);
459 return -EINVAL;
460 }
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -0800461 /*
462 * we need to clear the metadata for merged synx obj upon synx_release
463 * itself as it does not invoke the synx_fence_release function.
464 * See synx_export for more explanation.
465 */
466 if (is_merged_synx(row))
467 synx_deinit_object(row);
468
469 /* do not reference fence and row in the function after this */
470 dma_fence_put(fence);
Sumukh Hallymysore Ravindrabc18e0f2019-12-12 13:51:45 -0800471 mutex_unlock(&synx_dev->row_locks[idx]);
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -0800472 pr_debug("Exit %s\n", __func__);
473
474 return 0;
475}
476
Sumukh Hallymysore Ravindrab28b8b82019-07-16 16:11:05 -0700477int synx_release(s32 synx_obj)
478{
479 struct synx_table_row *row = NULL;
480
481 pr_debug("Enter %s\n", __func__);
482
483 row = synx_from_handle(synx_obj);
484 if (!row) {
485 pr_err("invalid synx: 0x%x\n", synx_obj);
486 return -EINVAL;
487 }
488
489 return synx_release_core(row);
490}
491
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -0800492int synx_wait(s32 synx_obj, u64 timeout_ms)
493{
494 unsigned long timeleft;
495 struct synx_table_row *row = NULL;
496
497 pr_debug("Enter %s\n", __func__);
498
499 row = synx_from_handle(synx_obj);
500 if (!row) {
501 pr_err("invalid synx: 0x%x\n", synx_obj);
502 return -EINVAL;
503 }
504
Sumukh Hallymysore Ravindrabc18e0f2019-12-12 13:51:45 -0800505 mutex_lock(&synx_dev->row_locks[row->index]);
Sumukh Hallymysore Ravindrabd281fb2019-09-25 13:33:08 -0700506 if (!row->index) {
Sumukh Hallymysore Ravindrabc18e0f2019-12-12 13:51:45 -0800507 mutex_unlock(&synx_dev->row_locks[row->index]);
Sumukh Hallymysore Ravindrabd281fb2019-09-25 13:33:08 -0700508 pr_err("object already cleaned up at %d\n",
509 row->index);
510 return -EINVAL;
511 }
Sumukh Hallymysore Ravindrabc18e0f2019-12-12 13:51:45 -0800512 mutex_unlock(&synx_dev->row_locks[row->index]);
Sumukh Hallymysore Ravindrabd281fb2019-09-25 13:33:08 -0700513
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -0800514 timeleft = dma_fence_wait_timeout(row->fence, (bool) 0,
515 msecs_to_jiffies(timeout_ms));
516 if (timeleft <= 0) {
517 pr_err("timed out for synx obj 0x%x\n", synx_obj);
518 return -ETIMEDOUT;
519 }
520
521 if (synx_status(row) != SYNX_STATE_SIGNALED_SUCCESS) {
522 pr_err("signaled error on synx obj 0x%x\n", synx_obj);
523 return -EINVAL;
524 }
525
526 pr_debug("Exit %s\n", __func__);
527
528 return 0;
529}
530
531int synx_bind(s32 synx_obj, struct synx_external_desc external_sync)
532{
533 int rc = 0;
534 u32 i = 0;
535 struct synx_table_row *row = NULL;
536 struct synx_external_data *data = NULL;
537 struct bind_operations *bind_ops = NULL;
538
539 pr_debug("Enter %s\n", __func__);
540
541 row = (struct synx_table_row *)synx_from_handle(synx_obj);
542 if (!row) {
543 pr_err("invalid synx: 0x%x\n", synx_obj);
544 return -EINVAL;
545 }
546
547 if (is_merged_synx(row)) {
548 pr_err("cannot bind to merged fence: 0x%x\n", synx_obj);
549 return -EINVAL;
550 }
551
Sumukh Hallymysore Ravindra6b7d0252019-06-12 14:05:18 -0700552 bind_ops = synx_get_bind_ops(external_sync.type);
553 if (!bind_ops) {
554 pr_err("invalid bind ops for %u\n",
555 external_sync.type);
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -0800556 return -EINVAL;
557 }
558
559 data = kzalloc(sizeof(*data), GFP_KERNEL);
560 if (!data)
561 return -ENOMEM;
562
Sumukh Hallymysore Ravindrabc18e0f2019-12-12 13:51:45 -0800563 mutex_lock(&synx_dev->row_locks[row->index]);
564 if (synx_status(row) != SYNX_STATE_ACTIVE) {
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -0800565 pr_err("bind to non-active synx is prohibited 0x%x\n",
566 synx_obj);
Sumukh Hallymysore Ravindrabc18e0f2019-12-12 13:51:45 -0800567 mutex_unlock(&synx_dev->row_locks[row->index]);
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -0800568 kfree(data);
569 return -EINVAL;
570 }
571
572 if (row->num_bound_synxs >= SYNX_MAX_NUM_BINDINGS) {
573 pr_err("max number of bindings reached for synx_objs 0x%x\n",
574 synx_obj);
Sumukh Hallymysore Ravindrabc18e0f2019-12-12 13:51:45 -0800575 mutex_unlock(&synx_dev->row_locks[row->index]);
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -0800576 kfree(data);
577 return -ENOMEM;
578 }
579
580 /* don't bind external sync obj is already done */
581 for (i = 0; i < row->num_bound_synxs; i++) {
582 if (external_sync.id[0] ==
583 row->bound_synxs[i].external_desc.id[0]) {
584 pr_err("duplicate binding for external sync %d\n",
585 external_sync.id[0]);
Sumukh Hallymysore Ravindrabc18e0f2019-12-12 13:51:45 -0800586 mutex_unlock(&synx_dev->row_locks[row->index]);
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -0800587 kfree(data);
588 return -EALREADY;
589 }
590 }
591
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -0800592 /* data passed to external callback */
Sumukh Hallymysore Ravindrab28b8b82019-07-16 16:11:05 -0700593 data->synx_obj = synx_obj;
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -0800594 data->secure_key = synx_generate_secure_key(row);
595
596 rc = bind_ops->register_callback(synx_external_callback,
597 data, external_sync.id[0]);
598 if (rc < 0) {
599 pr_err("callback registration failed for %d\n",
600 external_sync.id[0]);
Sumukh Hallymysore Ravindrabc18e0f2019-12-12 13:51:45 -0800601 mutex_unlock(&synx_dev->row_locks[row->index]);
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -0800602 kfree(data);
603 return rc;
604 }
605
606 memcpy(&row->bound_synxs[row->num_bound_synxs],
607 &external_sync, sizeof(struct synx_external_desc));
608 row->bound_synxs[row->num_bound_synxs].external_data = data;
609 row->num_bound_synxs = row->num_bound_synxs + 1;
Sumukh Hallymysore Ravindrabc18e0f2019-12-12 13:51:45 -0800610 mutex_unlock(&synx_dev->row_locks[row->index]);
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -0800611
612 pr_debug("added external sync %d to bindings of 0x%x\n",
613 external_sync.id[0], synx_obj);
614
615 pr_debug("Exit %s\n", __func__);
616 return rc;
617}
618
619int synx_get_status(s32 synx_obj)
620{
621 struct synx_table_row *row = NULL;
622
623 pr_debug("getting the status for synx 0x%x\n", synx_obj);
624
625 row = (struct synx_table_row *)synx_from_handle(synx_obj);
626 if (!row) {
627 pr_err("invalid synx: 0x%x\n", synx_obj);
628 return SYNX_STATE_INVALID;
629 }
630
631 return synx_status(row);
632}
633
634int synx_addrefcount(s32 synx_obj, s32 count)
635{
636 struct synx_table_row *row = NULL;
637
638 row = synx_from_handle(synx_obj);
639 if (!row) {
640 pr_err("invalid synx: 0x%x\n", synx_obj);
641 return -EINVAL;
642 }
643
644 if ((count < 0) || (count > SYNX_MAX_REF_COUNTS)) {
645 pr_err("invalid count, consider reducing : 0x%x\n",
646 synx_obj);
647 return -EINVAL;
648 }
649
Sumukh Hallymysore Ravindrabc18e0f2019-12-12 13:51:45 -0800650 mutex_lock(&synx_dev->row_locks[row->index]);
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -0800651 while (count--)
652 dma_fence_get(row->fence);
Sumukh Hallymysore Ravindrabc18e0f2019-12-12 13:51:45 -0800653 mutex_unlock(&synx_dev->row_locks[row->index]);
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -0800654
655 return 0;
656}
657
Sumukh Hallymysore Ravindra9e0b6972019-06-24 14:36:34 -0700658int synx_import(s32 synx_obj, u32 import_key, s32 *new_synx_obj)
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -0800659{
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -0800660 s32 id;
Sumukh Hallymysore Ravindra9e0b6972019-06-24 14:36:34 -0700661 struct dma_fence *fence;
Sumukh Hallymysore Ravindrab28b8b82019-07-16 16:11:05 -0700662 struct synx_obj_node *obj_node;
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -0800663 struct synx_table_row *row = NULL;
Sumukh Hallymysore Ravindrabc18e0f2019-12-12 13:51:45 -0800664 u32 index;
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -0800665
666 pr_debug("Enter %s\n", __func__);
667
Sumukh Hallymysore Ravindra17d08162019-02-20 14:00:47 -0800668 if (!new_synx_obj)
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -0800669 return -EINVAL;
670
Sumukh Hallymysore Ravindra9e0b6972019-06-24 14:36:34 -0700671 row = synx_from_import_key(synx_obj, import_key);
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -0800672 if (!row)
673 return -EINVAL;
674
Sumukh Hallymysore Ravindrab28b8b82019-07-16 16:11:05 -0700675 obj_node = kzalloc(sizeof(*obj_node), GFP_KERNEL);
676 if (!obj_node)
677 return -ENOMEM;
678
Sumukh Hallymysore Ravindrabc18e0f2019-12-12 13:51:45 -0800679 mutex_lock(&synx_dev->row_locks[row->index]);
Sumukh Hallymysore Ravindrabd281fb2019-09-25 13:33:08 -0700680 if (!row->index) {
Sumukh Hallymysore Ravindrabc18e0f2019-12-12 13:51:45 -0800681 mutex_unlock(&synx_dev->row_locks[row->index]);
Sumukh Hallymysore Ravindrabd281fb2019-09-25 13:33:08 -0700682 pr_err("object already cleaned up at %d\n",
683 row->index);
684 kfree(obj_node);
685 return -EINVAL;
686 }
Sumukh Hallymysore Ravindrabc18e0f2019-12-12 13:51:45 -0800687
688 /* new global synx id */
689 id = synx_create_handle(row);
690 if (id < 0) {
691 fence = row->fence;
692 index = row->index;
693 if (is_merged_synx(row)) {
694 memset(row, 0, sizeof(*row));
695 clear_bit(index, synx_dev->bitmap);
696 mutex_unlock(&synx_dev->row_locks[index]);
697 }
698 /* release the reference obtained during export */
699 dma_fence_put(fence);
700 kfree(obj_node);
701 pr_err("error creating handle for import\n");
702 return -EINVAL;
703 }
704
Sumukh Hallymysore Ravindrab28b8b82019-07-16 16:11:05 -0700705 obj_node->synx_obj = id;
706 list_add(&obj_node->list, &row->synx_obj_list);
Sumukh Hallymysore Ravindrabc18e0f2019-12-12 13:51:45 -0800707 mutex_unlock(&synx_dev->row_locks[row->index]);
Sumukh Hallymysore Ravindrab28b8b82019-07-16 16:11:05 -0700708
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -0800709 *new_synx_obj = id;
710 pr_debug("Exit %s\n", __func__);
711
712 return 0;
713}
714
Sumukh Hallymysore Ravindra9e0b6972019-06-24 14:36:34 -0700715int synx_export(s32 synx_obj, u32 *import_key)
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -0800716{
Sumukh Hallymysore Ravindra9e0b6972019-06-24 14:36:34 -0700717 int rc;
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -0800718 struct synx_table_row *row = NULL;
719
Sumukh Hallymysore Ravindra9e0b6972019-06-24 14:36:34 -0700720 pr_debug("Enter %s\n", __func__);
721
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -0800722 row = synx_from_handle(synx_obj);
723 if (!row)
724 return -EINVAL;
725
Sumukh Hallymysore Ravindrab28b8b82019-07-16 16:11:05 -0700726 rc = synx_generate_import_key(row, synx_obj, import_key);
Sumukh Hallymysore Ravindra9e0b6972019-06-24 14:36:34 -0700727 if (rc < 0)
728 return rc;
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -0800729
Sumukh Hallymysore Ravindrabc18e0f2019-12-12 13:51:45 -0800730 mutex_lock(&synx_dev->row_locks[row->index]);
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -0800731 /*
732 * to make sure the synx is not lost if the process dies or
733 * synx is released before any other process gets a chance to
734 * import it. The assumption is that an import will match this
735 * and account for the extra reference. Otherwise, this will
736 * be a dangling reference and needs to be garbage collected.
737 */
738 dma_fence_get(row->fence);
Sumukh Hallymysore Ravindrabc18e0f2019-12-12 13:51:45 -0800739 mutex_unlock(&synx_dev->row_locks[row->index]);
Sumukh Hallymysore Ravindra9e0b6972019-06-24 14:36:34 -0700740 pr_debug("Exit %s\n", __func__);
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -0800741
742 return 0;
743}
744
745
746static int synx_handle_create(struct synx_private_ioctl_arg *k_ioctl)
747{
748 struct synx_info synx_create_info;
749 int result;
750
751 if (k_ioctl->size != sizeof(synx_create_info))
752 return -EINVAL;
753
754 if (copy_from_user(&synx_create_info,
755 u64_to_user_ptr(k_ioctl->ioctl_ptr),
756 k_ioctl->size))
757 return -EFAULT;
758
759 result = synx_create(&synx_create_info.synx_obj,
760 synx_create_info.name);
761
762 if (!result)
763 if (copy_to_user(u64_to_user_ptr(k_ioctl->ioctl_ptr),
764 &synx_create_info,
765 k_ioctl->size))
766 return -EFAULT;
767
768 return result;
769}
770
771static int synx_handle_getstatus(struct synx_private_ioctl_arg *k_ioctl)
772{
773 struct synx_signal synx_status;
774
775 if (k_ioctl->size != sizeof(synx_status))
776 return -EINVAL;
777
778 if (copy_from_user(&synx_status,
779 u64_to_user_ptr(k_ioctl->ioctl_ptr),
780 k_ioctl->size))
781 return -EFAULT;
782
783 synx_status.synx_state = synx_get_status(synx_status.synx_obj);
784
785 if (copy_to_user(u64_to_user_ptr(k_ioctl->ioctl_ptr),
786 &synx_status,
787 k_ioctl->size))
788 return -EFAULT;
789
790 return 0;
791}
792
793static int synx_handle_import(struct synx_private_ioctl_arg *k_ioctl)
794{
795 struct synx_id_info id_info;
796
797 if (k_ioctl->size != sizeof(id_info))
798 return -EINVAL;
799
800 if (copy_from_user(&id_info,
801 u64_to_user_ptr(k_ioctl->ioctl_ptr),
802 k_ioctl->size))
803 return -EFAULT;
804
805 if (synx_import(id_info.synx_obj, id_info.secure_key,
806 &id_info.new_synx_obj))
807 return -EINVAL;
808
809 if (copy_to_user(u64_to_user_ptr(k_ioctl->ioctl_ptr),
810 &id_info,
811 k_ioctl->size))
812 return -EFAULT;
813
814 return 0;
815}
816
817static int synx_handle_export(struct synx_private_ioctl_arg *k_ioctl)
818{
819 struct synx_id_info id_info;
820
821 if (k_ioctl->size != sizeof(id_info))
822 return -EINVAL;
823
824 if (copy_from_user(&id_info,
825 u64_to_user_ptr(k_ioctl->ioctl_ptr),
826 k_ioctl->size))
827 return -EFAULT;
828
829 if (synx_export(id_info.synx_obj, &id_info.secure_key))
830 return -EINVAL;
831
832 if (copy_to_user(u64_to_user_ptr(k_ioctl->ioctl_ptr),
833 &id_info,
834 k_ioctl->size))
835 return -EFAULT;
836
837 return 0;
838}
839
840static int synx_handle_signal(struct synx_private_ioctl_arg *k_ioctl)
841{
842 struct synx_signal synx_signal_info;
843
844 if (k_ioctl->size != sizeof(synx_signal_info))
845 return -EINVAL;
846
847 if (copy_from_user(&synx_signal_info,
848 u64_to_user_ptr(k_ioctl->ioctl_ptr),
849 k_ioctl->size))
850 return -EFAULT;
851
852 return synx_signal(synx_signal_info.synx_obj,
853 synx_signal_info.synx_state);
854}
855
856static int synx_handle_merge(struct synx_private_ioctl_arg *k_ioctl)
857{
858 struct synx_merge synx_merge_info;
859 s32 *synx_objs;
860 u32 num_objs;
861 u32 size;
862 int result;
863
864 if (k_ioctl->size != sizeof(synx_merge_info))
865 return -EINVAL;
866
867 if (copy_from_user(&synx_merge_info,
868 u64_to_user_ptr(k_ioctl->ioctl_ptr),
869 k_ioctl->size))
870 return -EFAULT;
871
872 if (synx_merge_info.num_objs >= SYNX_MAX_OBJS)
873 return -EINVAL;
874
875 size = sizeof(u32) * synx_merge_info.num_objs;
876 synx_objs = kcalloc(synx_merge_info.num_objs,
877 sizeof(*synx_objs), GFP_KERNEL);
878 if (!synx_objs)
879 return -ENOMEM;
880
881 if (copy_from_user(synx_objs,
882 u64_to_user_ptr(synx_merge_info.synx_objs),
883 sizeof(u32) * synx_merge_info.num_objs)) {
884 kfree(synx_objs);
885 return -EFAULT;
886 }
887
888 num_objs = synx_merge_info.num_objs;
889
890 result = synx_merge(synx_objs,
891 num_objs,
892 &synx_merge_info.merged);
893
894 if (!result)
895 if (copy_to_user(u64_to_user_ptr(k_ioctl->ioctl_ptr),
896 &synx_merge_info,
897 k_ioctl->size)) {
898 kfree(synx_objs);
899 return -EFAULT;
900 }
901
902 kfree(synx_objs);
903
904 return result;
905}
906
907static int synx_handle_wait(struct synx_private_ioctl_arg *k_ioctl)
908{
909 struct synx_wait synx_wait_info;
910
911 if (k_ioctl->size != sizeof(synx_wait_info))
912 return -EINVAL;
913
914 if (copy_from_user(&synx_wait_info,
915 u64_to_user_ptr(k_ioctl->ioctl_ptr),
916 k_ioctl->size))
917 return -EFAULT;
918
919 k_ioctl->result = synx_wait(synx_wait_info.synx_obj,
920 synx_wait_info.timeout_ms);
921
922 return 0;
923}
924
925static int synx_handle_register_user_payload(
Sumukh Hallymysore Ravindra91c79a62019-09-18 12:50:20 -0700926 struct synx_private_ioctl_arg *k_ioctl,
927 struct synx_client *client)
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -0800928{
Sumukh Hallymysore Ravindrab28b8b82019-07-16 16:11:05 -0700929 s32 synx_obj;
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -0800930 u32 state = SYNX_STATE_INVALID;
931 struct synx_userpayload_info userpayload_info;
932 struct synx_cb_data *user_payload_kernel;
933 struct synx_cb_data *user_payload_iter, *temp;
934 struct synx_table_row *row = NULL;
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -0800935
936 pr_debug("Enter %s\n", __func__);
937
938 if (k_ioctl->size != sizeof(userpayload_info))
939 return -EINVAL;
940
941 if (copy_from_user(&userpayload_info,
942 u64_to_user_ptr(k_ioctl->ioctl_ptr),
943 k_ioctl->size))
944 return -EFAULT;
945
Sumukh Hallymysore Ravindrab28b8b82019-07-16 16:11:05 -0700946 synx_obj = userpayload_info.synx_obj;
947 row = synx_from_handle(synx_obj);
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -0800948 if (!row) {
Sumukh Hallymysore Ravindrab28b8b82019-07-16 16:11:05 -0700949 pr_err("invalid synx: 0x%x\n", synx_obj);
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -0800950 return -EINVAL;
951 }
952
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -0800953 if (!client) {
Sumukh Hallymysore Ravindra91c79a62019-09-18 12:50:20 -0700954 pr_err("invalid client for process %d\n", current->pid);
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -0800955 return -EINVAL;
956 }
957
958 user_payload_kernel = kzalloc(sizeof(*user_payload_kernel), GFP_KERNEL);
959 if (!user_payload_kernel)
960 return -ENOMEM;
961
962 user_payload_kernel->client = client;
Sumukh Hallymysore Ravindrab28b8b82019-07-16 16:11:05 -0700963 user_payload_kernel->data.synx_obj = synx_obj;
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -0800964 memcpy(user_payload_kernel->data.payload_data,
965 userpayload_info.payload,
966 SYNX_PAYLOAD_WORDS * sizeof(__u64));
967
Sumukh Hallymysore Ravindrabc18e0f2019-12-12 13:51:45 -0800968 mutex_lock(&synx_dev->row_locks[row->index]);
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -0800969
Sumukh Hallymysore Ravindrabc18e0f2019-12-12 13:51:45 -0800970 state = synx_status(row);
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -0800971 if (state == SYNX_STATE_SIGNALED_SUCCESS ||
972 state == SYNX_STATE_SIGNALED_ERROR) {
973 user_payload_kernel->data.status = state;
Sumukh Hallymysore Ravindrabc18e0f2019-12-12 13:51:45 -0800974 mutex_lock(&client->eventq_lock);
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -0800975 list_add_tail(&user_payload_kernel->list, &client->eventq);
Sumukh Hallymysore Ravindrabc18e0f2019-12-12 13:51:45 -0800976 mutex_unlock(&client->eventq_lock);
977 mutex_unlock(&synx_dev->row_locks[row->index]);
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -0800978 wake_up_all(&client->wq);
979 return 0;
980 }
981
982 list_for_each_entry_safe(user_payload_iter,
983 temp, &row->user_payload_list, list) {
984 if (user_payload_iter->data.payload_data[0] ==
985 user_payload_kernel->data.payload_data[0] &&
986 user_payload_iter->data.payload_data[1] ==
987 user_payload_kernel->data.payload_data[1]) {
988 pr_err("callback already registered on 0x%x\n",
Sumukh Hallymysore Ravindrab28b8b82019-07-16 16:11:05 -0700989 synx_obj);
Sumukh Hallymysore Ravindrabc18e0f2019-12-12 13:51:45 -0800990 mutex_unlock(&synx_dev->row_locks[row->index]);
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -0800991 kfree(user_payload_kernel);
992 return -EALREADY;
993 }
994 }
995
996 list_add_tail(&user_payload_kernel->list, &row->user_payload_list);
Sumukh Hallymysore Ravindrabc18e0f2019-12-12 13:51:45 -0800997 mutex_unlock(&synx_dev->row_locks[row->index]);
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -0800998
999 pr_debug("Exit %s\n", __func__);
1000 return 0;
1001}
1002
1003static int synx_handle_deregister_user_payload(
Sumukh Hallymysore Ravindra91c79a62019-09-18 12:50:20 -07001004 struct synx_private_ioctl_arg *k_ioctl,
1005 struct synx_client *client)
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -08001006{
Sumukh Hallymysore Ravindrab28b8b82019-07-16 16:11:05 -07001007 s32 synx_obj;
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -08001008 u32 state = SYNX_STATE_INVALID;
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -08001009 struct synx_userpayload_info userpayload_info;
1010 struct synx_cb_data *user_payload_kernel, *temp;
1011 struct synx_table_row *row = NULL;
1012 struct synx_user_payload *data = NULL;
1013 u32 match_found = 0;
1014
1015 pr_debug("Enter %s\n", __func__);
1016 if (k_ioctl->size != sizeof(userpayload_info))
1017 return -EINVAL;
1018
1019 if (copy_from_user(&userpayload_info,
1020 u64_to_user_ptr(k_ioctl->ioctl_ptr),
1021 k_ioctl->size))
1022 return -EFAULT;
1023
Sumukh Hallymysore Ravindrab28b8b82019-07-16 16:11:05 -07001024 synx_obj = userpayload_info.synx_obj;
1025 row = synx_from_handle(synx_obj);
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -08001026 if (!row) {
Sumukh Hallymysore Ravindrab28b8b82019-07-16 16:11:05 -07001027 pr_err("invalid synx: 0x%x\n", synx_obj);
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -08001028 return -EINVAL;
1029 }
1030
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -08001031 if (!client) {
Sumukh Hallymysore Ravindra91c79a62019-09-18 12:50:20 -07001032 pr_err("invalid client for process %d\n", current->pid);
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -08001033 return -EINVAL;
1034 }
1035
Sumukh Hallymysore Ravindrabc18e0f2019-12-12 13:51:45 -08001036 mutex_lock(&synx_dev->row_locks[row->index]);
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -08001037
1038 state = synx_status_locked(row);
1039 list_for_each_entry_safe(user_payload_kernel, temp,
1040 &row->user_payload_list, list) {
1041 if (user_payload_kernel->data.payload_data[0] ==
1042 userpayload_info.payload[0] &&
1043 user_payload_kernel->data.payload_data[1] ==
1044 userpayload_info.payload[1]) {
1045 list_del_init(&user_payload_kernel->list);
1046 match_found = 1;
1047 pr_debug("registered callback removed\n");
1048 break;
1049 }
1050 }
1051
Sumukh Hallymysore Ravindrabc18e0f2019-12-12 13:51:45 -08001052 mutex_unlock(&synx_dev->row_locks[row->index]);
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -08001053
1054 if (match_found)
1055 kfree(user_payload_kernel);
1056
1057 /* registration of cancellation cb */
1058 if (userpayload_info.payload[2] != 0) {
1059 user_payload_kernel = kzalloc(sizeof(
1060 *user_payload_kernel),
1061 GFP_KERNEL);
1062 if (!user_payload_kernel)
1063 return -ENOMEM;
1064
1065 data = &user_payload_kernel->data;
1066 memcpy(data->payload_data,
1067 userpayload_info.payload,
1068 SYNX_PAYLOAD_WORDS * sizeof(__u64));
1069
1070 user_payload_kernel->client = client;
Sumukh Hallymysore Ravindrab28b8b82019-07-16 16:11:05 -07001071 data->synx_obj = synx_obj;
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -08001072 data->status = SYNX_CALLBACK_RESULT_CANCELED;
1073
Sumukh Hallymysore Ravindrabc18e0f2019-12-12 13:51:45 -08001074 mutex_lock(&client->eventq_lock);
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -08001075 list_add_tail(&user_payload_kernel->list, &client->eventq);
Sumukh Hallymysore Ravindrabc18e0f2019-12-12 13:51:45 -08001076 mutex_unlock(&client->eventq_lock);
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -08001077 pr_debug("registered cancellation callback\n");
1078 wake_up_all(&client->wq);
1079 }
1080
1081 pr_debug("Exit %s\n", __func__);
1082 return 0;
1083}
1084
1085static int synx_handle_bind(struct synx_private_ioctl_arg *k_ioctl)
1086{
1087 struct synx_bind synx_bind_info;
1088
1089 if (k_ioctl->size != sizeof(synx_bind_info))
1090 return -EINVAL;
1091
1092 if (copy_from_user(&synx_bind_info,
1093 u64_to_user_ptr(k_ioctl->ioctl_ptr),
1094 k_ioctl->size))
1095 return -EFAULT;
1096
Jordan Crouse1d790b72019-02-14 16:08:29 -07001097 pr_debug("calling synx_bind: 0x%x\n", synx_bind_info.synx_obj);
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -08001098 k_ioctl->result = synx_bind(synx_bind_info.synx_obj,
1099 synx_bind_info.ext_sync_desc);
1100
1101 return k_ioctl->result;
1102}
1103
1104static int synx_handle_addrefcount(struct synx_private_ioctl_arg *k_ioctl)
1105{
1106 struct synx_addrefcount addrefcount_info;
1107
1108 if (k_ioctl->size != sizeof(addrefcount_info))
1109 return -EINVAL;
1110
1111 if (copy_from_user(&addrefcount_info,
1112 u64_to_user_ptr(k_ioctl->ioctl_ptr),
1113 k_ioctl->size))
1114 return -EFAULT;
1115
1116 pr_debug("calling synx_addrefcount: 0x%x, %d\n",
1117 addrefcount_info.synx_obj, addrefcount_info.count);
1118 k_ioctl->result = synx_addrefcount(addrefcount_info.synx_obj,
1119 addrefcount_info.count);
1120
1121 return k_ioctl->result;
1122}
1123
1124static int synx_handle_release(struct synx_private_ioctl_arg *k_ioctl)
1125{
1126 struct synx_info info;
1127
1128 if (k_ioctl->size != sizeof(info))
1129 return -EINVAL;
1130
1131 if (copy_from_user(&info,
1132 u64_to_user_ptr(k_ioctl->ioctl_ptr),
1133 k_ioctl->size))
1134 return -EFAULT;
1135
1136 return synx_release(info.synx_obj);
1137}
1138
1139static struct synx_device *get_synx_device(struct file *filep)
1140{
1141 struct synx_client *client = filep->private_data;
1142
1143 return client->device;
1144}
1145
1146static long synx_ioctl(struct file *filep,
1147 unsigned int cmd,
1148 unsigned long arg)
1149{
1150 s32 rc = 0;
1151 struct synx_device *synx_dev = NULL;
Sumukh Hallymysore Ravindra91c79a62019-09-18 12:50:20 -07001152 struct synx_client *client;
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -08001153 struct synx_private_ioctl_arg k_ioctl;
1154
1155 pr_debug("Enter %s\n", __func__);
1156
1157 synx_dev = get_synx_device(filep);
Sumukh Hallymysore Ravindra91c79a62019-09-18 12:50:20 -07001158 client = filep->private_data;
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -08001159
1160 if (cmd != SYNX_PRIVATE_IOCTL_CMD) {
1161 pr_err("invalid ioctl cmd\n");
1162 return -ENOIOCTLCMD;
1163 }
1164
1165 if (copy_from_user(&k_ioctl,
1166 (struct synx_private_ioctl_arg *)arg,
1167 sizeof(k_ioctl))) {
1168 pr_err("invalid ioctl args\n");
1169 return -EFAULT;
1170 }
1171
1172 if (!k_ioctl.ioctl_ptr)
1173 return -EINVAL;
1174
1175 switch (k_ioctl.id) {
1176 case SYNX_CREATE:
1177 rc = synx_handle_create(&k_ioctl);
1178 break;
1179 case SYNX_RELEASE:
1180 rc = synx_handle_release(&k_ioctl);
1181 break;
1182 case SYNX_REGISTER_PAYLOAD:
1183 rc = synx_handle_register_user_payload(
Sumukh Hallymysore Ravindra91c79a62019-09-18 12:50:20 -07001184 &k_ioctl, client);
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -08001185 break;
1186 case SYNX_DEREGISTER_PAYLOAD:
1187 rc = synx_handle_deregister_user_payload(
Sumukh Hallymysore Ravindra91c79a62019-09-18 12:50:20 -07001188 &k_ioctl, client);
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -08001189 break;
1190 case SYNX_SIGNAL:
1191 rc = synx_handle_signal(&k_ioctl);
1192 break;
1193 case SYNX_MERGE:
1194 rc = synx_handle_merge(&k_ioctl);
1195 break;
1196 case SYNX_WAIT:
1197 rc = synx_handle_wait(&k_ioctl);
1198 if (copy_to_user((void *)arg,
1199 &k_ioctl,
1200 sizeof(k_ioctl))) {
1201 pr_err("invalid ioctl args\n");
1202 rc = -EFAULT;
1203 }
1204 break;
1205 case SYNX_BIND:
1206 rc = synx_handle_bind(&k_ioctl);
1207 break;
1208 case SYNX_ADDREFCOUNT:
1209 rc = synx_handle_addrefcount(&k_ioctl);
1210 break;
1211 case SYNX_GETSTATUS:
1212 rc = synx_handle_getstatus(&k_ioctl);
1213 break;
1214 case SYNX_IMPORT:
1215 rc = synx_handle_import(&k_ioctl);
1216 break;
1217 case SYNX_EXPORT:
1218 rc = synx_handle_export(&k_ioctl);
1219 break;
1220 default:
1221 rc = -EINVAL;
1222 }
1223
1224 pr_debug("Exit %s\n", __func__);
1225 return rc;
1226}
1227
1228static ssize_t synx_read(struct file *filep,
1229 char __user *buf, size_t size, loff_t *f_pos)
1230{
1231 ssize_t rc = 0;
1232 struct synx_client *client = NULL;
1233 struct synx_cb_data *user_payload_kernel;
1234
1235 pr_debug("Enter %s\n", __func__);
1236
1237 client = filep->private_data;
1238
1239 if (size != sizeof(struct synx_user_payload)) {
1240 pr_err("invalid read size\n");
1241 return -EINVAL;
1242 }
1243
Sumukh Hallymysore Ravindrabc18e0f2019-12-12 13:51:45 -08001244 mutex_lock(&client->eventq_lock);
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -08001245 user_payload_kernel = list_first_entry_or_null(
1246 &client->eventq,
1247 struct synx_cb_data,
1248 list);
1249 if (!user_payload_kernel) {
Sumukh Hallymysore Ravindrabc18e0f2019-12-12 13:51:45 -08001250 mutex_unlock(&client->eventq_lock);
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -08001251 return 0;
1252 }
1253 list_del_init(&user_payload_kernel->list);
Sumukh Hallymysore Ravindrabc18e0f2019-12-12 13:51:45 -08001254 mutex_unlock(&client->eventq_lock);
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -08001255
1256 rc = size;
1257 if (copy_to_user(buf,
1258 &user_payload_kernel->data,
1259 sizeof(struct synx_user_payload))) {
1260 pr_err("couldn't copy user callback data\n");
1261 rc = -EFAULT;
1262 }
1263 kfree(user_payload_kernel);
1264
1265 pr_debug("Exit %s\n", __func__);
1266 return rc;
1267}
1268
1269static unsigned int synx_poll(struct file *filep,
1270 struct poll_table_struct *poll_table)
1271{
1272 int rc = 0;
1273 struct synx_client *client = NULL;
1274
1275 pr_debug("Enter %s\n", __func__);
1276
1277 client = filep->private_data;
1278
1279 poll_wait(filep, &client->wq, poll_table);
Sumukh Hallymysore Ravindrabc18e0f2019-12-12 13:51:45 -08001280 mutex_lock(&client->eventq_lock);
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -08001281 /* if list has pending cb events, notify */
1282 if (!list_empty(&client->eventq))
1283 rc = POLLPRI;
Sumukh Hallymysore Ravindrabc18e0f2019-12-12 13:51:45 -08001284 mutex_unlock(&client->eventq_lock);
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -08001285
1286 pr_debug("Exit %s\n", __func__);
1287
1288 return rc;
1289}
1290
1291static int synx_open(struct inode *inode, struct file *filep)
1292{
1293 struct synx_device *synx_dev = NULL;
1294 struct synx_client *client = NULL;
1295
Sumukh Hallymysore Ravindra91c79a62019-09-18 12:50:20 -07001296 pr_debug("Enter %s from pid: %d\n", __func__, current->pid);
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -08001297
1298 synx_dev = container_of(inode->i_cdev, struct synx_device, cdev);
1299
1300 client = kzalloc(sizeof(*client), GFP_KERNEL);
1301 if (!client)
1302 return -ENOMEM;
1303
1304 client->device = synx_dev;
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -08001305 init_waitqueue_head(&client->wq);
1306 INIT_LIST_HEAD(&client->eventq);
Sumukh Hallymysore Ravindrabc18e0f2019-12-12 13:51:45 -08001307 mutex_init(&client->eventq_lock);
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -08001308
1309 mutex_lock(&synx_dev->table_lock);
1310 list_add_tail(&client->list, &synx_dev->client_list);
1311 synx_dev->open_cnt++;
1312 mutex_unlock(&synx_dev->table_lock);
1313
1314 filep->private_data = client;
1315
1316 pr_debug("Exit %s\n", __func__);
1317
1318 return 0;
1319}
1320
Sumukh Hallymysore Ravindra38f7bb32019-12-04 15:31:50 -08001321static void synx_object_cleanup(struct synx_client *client)
1322{
1323 int i;
1324 struct synx_cb_data *payload_info, *temp_payload_info;
1325
1326 for (i = 1; i < SYNX_MAX_OBJS; i++) {
1327 struct synx_table_row *row =
1328 synx_dev->synx_table + i;
1329
Sumukh Hallymysore Ravindra709d2122020-05-20 12:53:14 +05301330 mutex_lock(&synx_dev->row_locks[i]);
Sumukh Hallymysore Ravindra38f7bb32019-12-04 15:31:50 -08001331 if (row->index) {
1332 list_for_each_entry_safe(payload_info,
1333 temp_payload_info,
1334 &row->user_payload_list, list) {
1335 if (payload_info->client == client) {
1336 list_del_init(&payload_info->list);
1337 kfree(payload_info);
1338 pr_debug("cleaned up client payload\n");
1339 }
1340 }
1341 }
Sumukh Hallymysore Ravindra709d2122020-05-20 12:53:14 +05301342 mutex_unlock(&synx_dev->row_locks[i]);
Sumukh Hallymysore Ravindra38f7bb32019-12-04 15:31:50 -08001343 }
1344}
1345
Sumukh Hallymysore Ravindra7df26222019-09-27 11:33:07 -07001346static void synx_table_cleanup(void)
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -08001347{
1348 int rc = 0;
1349 int i;
Sumukh Hallymysore Ravindrab588aa12019-07-17 09:57:41 -07001350 struct synx_import_data *data, *tmp_data;
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -08001351
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -08001352 synx_dev->open_cnt--;
1353 if (!synx_dev->open_cnt) {
1354 for (i = 1; i < SYNX_MAX_OBJS; i++) {
1355 struct synx_table_row *row =
1356 synx_dev->synx_table + i;
1357 /*
1358 * signal all ACTIVE objects as ERR, but we don't care
1359 * about the return status here apart from logging it.
1360 */
Sumukh Hallymysore Ravindrab28b8b82019-07-16 16:11:05 -07001361 if (row->index && !is_merged_synx(row) &&
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -08001362 (synx_status(row) == SYNX_STATE_ACTIVE)) {
Sumukh Hallymysore Ravindrab28b8b82019-07-16 16:11:05 -07001363 pr_debug("synx still active at shutdown at %d\n",
1364 row->index);
Sumukh Hallymysore Ravindraeb85bfd2019-02-14 12:01:18 -08001365 rc = synx_signal_core(row,
1366 SYNX_STATE_SIGNALED_ERROR);
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -08001367 if (rc < 0)
Sumukh Hallymysore Ravindrab28b8b82019-07-16 16:11:05 -07001368 pr_err("cleanup signal fail at %d\n",
1369 row->index);
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -08001370 }
1371 }
1372
1373 /*
1374 * flush the work queue to wait for pending signal callbacks
1375 * to finish
1376 */
1377 flush_workqueue(synx_dev->work_queue);
1378
1379 /*
Sumukh Hallymysore Ravindrab588aa12019-07-17 09:57:41 -07001380 * now that all objs have been signaled, destroy remaining
1381 * synx objs.
1382 * Start with merged synx objs, thereby releasing references
1383 * owned by the merged obj on its constituing synx objs.
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -08001384 */
1385 for (i = 1; i < SYNX_MAX_OBJS; i++) {
1386 struct synx_table_row *row =
1387 synx_dev->synx_table + i;
1388
Sumukh Hallymysore Ravindrab588aa12019-07-17 09:57:41 -07001389 if (row->index && is_merged_synx(row)) {
Sumukh Hallymysore Ravindrab28b8b82019-07-16 16:11:05 -07001390 rc = synx_release_core(row);
Sumukh Hallymysore Ravindrab588aa12019-07-17 09:57:41 -07001391 if (rc < 0)
Sumukh Hallymysore Ravindrab28b8b82019-07-16 16:11:05 -07001392 pr_err("cleanup destroy fail at %d\n",
1393 row->index);
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -08001394 }
1395 }
Sumukh Hallymysore Ravindrab588aa12019-07-17 09:57:41 -07001396
1397 for (i = 1; i < SYNX_MAX_OBJS; i++) {
1398 struct synx_table_row *row =
1399 synx_dev->synx_table + i;
1400 /*
1401 * iterate till all un-cleared reference/s for
1402 * synx obj is released since synx_release_core
1403 * removes only one reference per invocation.
1404 */
1405 while (row->index) {
1406 rc = synx_release_core(row);
1407 if (rc < 0)
1408 pr_err("cleanup destroy fail at %d\n",
1409 row->index);
1410 }
1411 }
1412
1413 /* clean remaining un-imported synx data */
1414 list_for_each_entry_safe(data, tmp_data,
1415 &synx_dev->import_list, list) {
1416 pr_debug("clearing import data 0x%x\n",
1417 data->synx_obj);
1418 list_del_init(&data->list);
1419 kfree(data);
1420 }
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -08001421 }
Sumukh Hallymysore Ravindra7df26222019-09-27 11:33:07 -07001422}
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -08001423
Sumukh Hallymysore Ravindra7df26222019-09-27 11:33:07 -07001424static int synx_close(struct inode *inode, struct file *filep)
1425{
1426 struct synx_device *synx_dev = NULL;
1427 struct synx_client *client;
1428
1429 pr_debug("Enter %s from pid: %d\n", __func__, current->pid);
1430
1431 synx_dev = get_synx_device(filep);
1432 client = filep->private_data;
1433
1434 mutex_lock(&synx_dev->table_lock);
Sumukh Hallymysore Ravindra38f7bb32019-12-04 15:31:50 -08001435 synx_object_cleanup(client);
Sumukh Hallymysore Ravindra7df26222019-09-27 11:33:07 -07001436 synx_table_cleanup();
Sumukh Hallymysore Ravindra91c79a62019-09-18 12:50:20 -07001437 list_del_init(&client->list);
1438 kfree(client);
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -08001439 mutex_unlock(&synx_dev->table_lock);
1440
1441 pr_debug("Exit %s\n", __func__);
1442
1443 return 0;
1444}
1445
1446static const struct file_operations synx_fops = {
1447 .owner = THIS_MODULE,
1448 .open = synx_open,
1449 .read = synx_read,
Sumukh Hallymysore Ravindraa7c34142019-04-10 13:08:35 -07001450 .release = synx_close,
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -08001451 .poll = synx_poll,
1452 .unlocked_ioctl = synx_ioctl,
Sumukh Hallymysore Ravindra0be37d32019-03-13 19:57:42 -07001453#ifdef CONFIG_COMPAT
1454 .compat_ioctl = synx_ioctl,
1455#endif
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -08001456};
1457
Sumukh Hallymysore Ravindra7df26222019-09-27 11:33:07 -07001458int synx_initialize(struct synx_initialization_params *params)
1459{
1460 pr_debug("Enter %s from pid: %d\n", __func__, current->pid);
1461
1462 mutex_lock(&synx_dev->table_lock);
1463 synx_dev->open_cnt++;
1464 mutex_unlock(&synx_dev->table_lock);
1465
1466 if (params)
1467 pr_debug("synx client session initialized for %s\n",
1468 params->name);
1469 return 0;
1470}
1471
1472int synx_uninitialize(void)
1473{
1474 pr_debug("Enter %s from pid: %d\n",
1475 __func__, current->pid);
1476
1477 mutex_lock(&synx_dev->table_lock);
1478 synx_table_cleanup();
1479 mutex_unlock(&synx_dev->table_lock);
1480
1481 return 0;
1482}
1483
Sumukh Hallymysore Ravindra6b7d0252019-06-12 14:05:18 -07001484int synx_register_ops(const struct synx_register_params *params)
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -08001485{
Sumukh Hallymysore Ravindra6b7d0252019-06-12 14:05:18 -07001486 s32 rc;
1487 struct synx_registered_ops *client_ops;
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -08001488
Sumukh Hallymysore Ravindra6b7d0252019-06-12 14:05:18 -07001489 if (!params || !params->name ||
1490 !is_valid_type(params->type) ||
1491 !params->ops.register_callback ||
1492 !params->ops.deregister_callback ||
1493 !params->ops.signal) {
1494 pr_err("invalid register params\n");
1495 return -EINVAL;
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -08001496 }
Sumukh Hallymysore Ravindra6b7d0252019-06-12 14:05:18 -07001497
Sumukh Hallymysore Ravindra25871fc2019-10-09 11:16:22 -07001498 mutex_lock(&synx_dev->vtbl_lock);
Sumukh Hallymysore Ravindra6b7d0252019-06-12 14:05:18 -07001499 client_ops = &synx_dev->bind_vtbl[params->type];
1500 if (!client_ops->valid) {
1501 client_ops->valid = true;
1502 memcpy(&client_ops->ops, &params->ops,
1503 sizeof(client_ops->ops));
1504 strlcpy(client_ops->name, params->name,
1505 sizeof(client_ops->name));
1506 client_ops->type = params->type;
1507 pr_info("registered bind ops for %s\n",
1508 params->name);
1509 rc = 0;
1510 } else {
1511 pr_info("client already registered by %s\n",
1512 client_ops->name);
1513 rc = -EINVAL;
1514 }
Sumukh Hallymysore Ravindra25871fc2019-10-09 11:16:22 -07001515 mutex_unlock(&synx_dev->vtbl_lock);
Sumukh Hallymysore Ravindra6b7d0252019-06-12 14:05:18 -07001516
1517 return rc;
1518}
1519
1520int synx_deregister_ops(const struct synx_register_params *params)
1521{
1522 struct synx_registered_ops *client_ops;
1523
1524 if (!params || !params->name ||
1525 !is_valid_type(params->type)) {
1526 pr_err("invalid params\n");
1527 return -EINVAL;
1528 }
1529
Sumukh Hallymysore Ravindra25871fc2019-10-09 11:16:22 -07001530 mutex_lock(&synx_dev->vtbl_lock);
Sumukh Hallymysore Ravindra6b7d0252019-06-12 14:05:18 -07001531 client_ops = &synx_dev->bind_vtbl[params->type];
1532 memset(client_ops, 0, sizeof(*client_ops));
1533 pr_info("deregistered bind ops for %s\n",
1534 params->name);
Sumukh Hallymysore Ravindra25871fc2019-10-09 11:16:22 -07001535 mutex_unlock(&synx_dev->vtbl_lock);
Sumukh Hallymysore Ravindra6b7d0252019-06-12 14:05:18 -07001536
1537 return 0;
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -08001538}
1539
1540static int __init synx_init(void)
1541{
1542 int rc;
1543 int idx;
1544
1545 pr_info("synx device init start\n");
1546
1547 synx_dev = kzalloc(sizeof(*synx_dev), GFP_KERNEL);
1548 if (!synx_dev)
1549 return -ENOMEM;
1550
1551 mutex_init(&synx_dev->table_lock);
Sumukh Hallymysore Ravindra25871fc2019-10-09 11:16:22 -07001552 mutex_init(&synx_dev->vtbl_lock);
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -08001553
1554 for (idx = 0; idx < SYNX_MAX_OBJS; idx++)
Sumukh Hallymysore Ravindrabc18e0f2019-12-12 13:51:45 -08001555 mutex_init(&synx_dev->row_locks[idx]);
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -08001556
1557 idr_init(&synx_dev->synx_ids);
Sumukh Hallymysore Ravindra29edeb72019-04-12 17:50:45 -07001558 spin_lock_init(&synx_dev->idr_lock);
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -08001559
1560 rc = alloc_chrdev_region(&synx_dev->dev, 0, 1, SYNX_DEVICE_NAME);
1561 if (rc < 0) {
1562 pr_err("region allocation failed\n");
1563 goto alloc_fail;
1564 }
1565
1566 cdev_init(&synx_dev->cdev, &synx_fops);
1567 synx_dev->cdev.owner = THIS_MODULE;
1568 rc = cdev_add(&synx_dev->cdev, synx_dev->dev, 1);
1569 if (rc < 0) {
1570 pr_err("device registation failed\n");
1571 goto reg_fail;
1572 }
1573
1574 synx_dev->class = class_create(THIS_MODULE, SYNX_DEVICE_NAME);
1575 device_create(synx_dev->class, NULL, synx_dev->dev,
1576 NULL, SYNX_DEVICE_NAME);
1577
1578 /*
1579 * we treat zero as invalid handle, so we will keep the 0th bit set
1580 * always
1581 */
1582 set_bit(0, synx_dev->bitmap);
1583
1584 synx_dev->work_queue = alloc_workqueue(SYNX_WORKQUEUE_NAME,
1585 WQ_HIGHPRI | WQ_UNBOUND, 1);
1586 if (!synx_dev->work_queue) {
1587 pr_err("high priority work queue creation failed\n");
1588 rc = -EINVAL;
1589 goto fail;
1590 }
1591
1592 INIT_LIST_HEAD(&synx_dev->client_list);
Sumukh Hallymysore Ravindra9e0b6972019-06-24 14:36:34 -07001593 INIT_LIST_HEAD(&synx_dev->import_list);
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -08001594 synx_dev->dma_context = dma_fence_context_alloc(1);
1595
Carlos Hopkins81eb3412019-02-22 13:57:54 -08001596 synx_dev->debugfs_root = init_synx_debug_dir(synx_dev);
Sumukh Hallymysore Ravindrafdf150a2019-01-14 14:07:57 -08001597 pr_info("synx device init success\n");
1598
1599 return 0;
1600
1601fail:
1602 device_destroy(synx_dev->class, synx_dev->dev);
1603 class_destroy(synx_dev->class);
1604reg_fail:
1605 unregister_chrdev_region(synx_dev->dev, 1);
1606alloc_fail:
1607 mutex_destroy(&synx_dev->table_lock);
1608 idr_destroy(&synx_dev->synx_ids);
1609 kfree(synx_dev);
1610 return rc;
1611}
1612
1613device_initcall(synx_init);
1614
1615MODULE_DESCRIPTION("Global Synx Driver");
1616MODULE_LICENSE("GPL v2");