blob: b0dd4ac598b33303b7729d2a53aa59872603f99f [file] [log] [blame]
Kamal Agrawal056794d2021-09-13 15:59:25 +05301/* Copyright (c) 2016-2019,2021, The Linux Foundation. All rights reserved.
Shrenuj Bansala419c792016-10-20 14:05:11 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13/*
14 * KGSL drawobj management
15 * A drawobj is a single submission from userland. The drawobj
16 * encapsulates everything about the submission : command buffers, flags and
17 * sync points.
18 *
19 * Sync points are events that need to expire before the
20 * drawobj can be queued to the hardware. All synpoints are contained in an
21 * array of kgsl_drawobj_sync_event structs in the drawobj. There can be
22 * multiple types of events both internal ones (GPU events) and external
23 * triggers. As the events expire bits are cleared in a pending bitmap stored
24 * in the drawobj. The GPU will submit the command as soon as the bitmap
25 * goes to zero indicating no more pending events.
26 */
27
28#include <linux/uaccess.h>
29#include <linux/list.h>
30#include <linux/compat.h>
31
32#include "kgsl.h"
33#include "kgsl_device.h"
34#include "kgsl_drawobj.h"
35#include "kgsl_sync.h"
36#include "kgsl_trace.h"
37#include "kgsl_compat.h"
38
39/*
Tarun Karra2b8b3632016-11-14 16:38:27 -080040 * Define an kmem cache for the memobj & sparseobj structures since we
41 * allocate and free them so frequently
Shrenuj Bansala419c792016-10-20 14:05:11 -070042 */
43static struct kmem_cache *memobjs_cache;
Tarun Karra2b8b3632016-11-14 16:38:27 -080044static struct kmem_cache *sparseobjs_cache;
45
Puranam V G Tejaswi0ebbdcd2018-12-13 15:47:00 +053046static void free_fence_names(struct kgsl_drawobj_sync *syncobj)
47{
48 unsigned int i;
49
50 for (i = 0; i < syncobj->numsyncs; i++) {
51 struct kgsl_drawobj_sync_event *event = &syncobj->synclist[i];
52
53 if (event->type == KGSL_CMD_SYNCPOINT_TYPE_FENCE)
54 kfree(event->info.fences);
55 }
56}
Shrenuj Bansala419c792016-10-20 14:05:11 -070057
Lynus Vaze99b92b2017-04-24 18:04:54 +053058void kgsl_drawobj_destroy_object(struct kref *kref)
Shrenuj Bansala419c792016-10-20 14:05:11 -070059{
60 struct kgsl_drawobj *drawobj = container_of(kref,
61 struct kgsl_drawobj, refcount);
62 struct kgsl_drawobj_sync *syncobj;
63
64 kgsl_context_put(drawobj->context);
65
66 switch (drawobj->type) {
67 case SYNCOBJ_TYPE:
68 syncobj = SYNCOBJ(drawobj);
Puranam V G Tejaswi0ebbdcd2018-12-13 15:47:00 +053069 free_fence_names(syncobj);
Shrenuj Bansala419c792016-10-20 14:05:11 -070070 kfree(syncobj->synclist);
71 kfree(syncobj);
72 break;
73 case CMDOBJ_TYPE:
74 case MARKEROBJ_TYPE:
75 kfree(CMDOBJ(drawobj));
76 break;
Tarun Karra2b8b3632016-11-14 16:38:27 -080077 case SPARSEOBJ_TYPE:
78 kfree(SPARSEOBJ(drawobj));
79 break;
Shrenuj Bansala419c792016-10-20 14:05:11 -070080 }
81}
82
Shrenuj Bansala419c792016-10-20 14:05:11 -070083void kgsl_dump_syncpoints(struct kgsl_device *device,
84 struct kgsl_drawobj_sync *syncobj)
85{
86 struct kgsl_drawobj_sync_event *event;
87 unsigned int i;
88
89 for (i = 0; i < syncobj->numsyncs; i++) {
90 event = &syncobj->synclist[i];
91
92 if (!kgsl_drawobj_event_pending(syncobj, i))
93 continue;
94
95 switch (event->type) {
96 case KGSL_CMD_SYNCPOINT_TYPE_TIMESTAMP: {
97 unsigned int retired;
98
99 kgsl_readtimestamp(event->device,
100 event->context, KGSL_TIMESTAMP_RETIRED,
101 &retired);
102
103 dev_err(device->dev,
104 " [timestamp] context %d timestamp %d (retired %d)\n",
105 event->context->id, event->timestamp,
106 retired);
107 break;
108 }
Puranam V G Tejaswi0ebbdcd2018-12-13 15:47:00 +0530109 case KGSL_CMD_SYNCPOINT_TYPE_FENCE: {
110 int j;
111 struct event_fence_info *info = &event->info;
112
113 for (j = 0; j < info->num_fences; j++)
114 dev_err(device->dev, "[%d] fence: %s\n",
115 i, info->fences[j].name);
Shrenuj Bansala419c792016-10-20 14:05:11 -0700116 break;
117 }
Puranam V G Tejaswi0ebbdcd2018-12-13 15:47:00 +0530118 }
Shrenuj Bansala419c792016-10-20 14:05:11 -0700119 }
120}
121
122static void syncobj_timer(unsigned long data)
123{
124 struct kgsl_device *device;
125 struct kgsl_drawobj_sync *syncobj = (struct kgsl_drawobj_sync *) data;
Lynus Vaze99b92b2017-04-24 18:04:54 +0530126 struct kgsl_drawobj *drawobj;
Shrenuj Bansala419c792016-10-20 14:05:11 -0700127 struct kgsl_drawobj_sync_event *event;
128 unsigned int i;
129
Lynus Vaze99b92b2017-04-24 18:04:54 +0530130 if (syncobj == NULL)
Shrenuj Bansala419c792016-10-20 14:05:11 -0700131 return;
132
Lynus Vaze99b92b2017-04-24 18:04:54 +0530133 drawobj = DRAWOBJ(syncobj);
134
135 if (!kref_get_unless_zero(&drawobj->refcount))
136 return;
137
138 if (drawobj->context == NULL) {
139 kgsl_drawobj_put(drawobj);
140 return;
141 }
142
Shrenuj Bansala419c792016-10-20 14:05:11 -0700143 device = drawobj->context->device;
144
145 dev_err(device->dev,
146 "kgsl: possible gpu syncpoint deadlock for context %d timestamp %d\n",
147 drawobj->context->id, drawobj->timestamp);
148
149 set_bit(ADRENO_CONTEXT_FENCE_LOG, &drawobj->context->priv);
150 kgsl_context_dump(drawobj->context);
151 clear_bit(ADRENO_CONTEXT_FENCE_LOG, &drawobj->context->priv);
152
153 dev_err(device->dev, " pending events:\n");
154
155 for (i = 0; i < syncobj->numsyncs; i++) {
156 event = &syncobj->synclist[i];
157
158 if (!kgsl_drawobj_event_pending(syncobj, i))
159 continue;
160
161 switch (event->type) {
162 case KGSL_CMD_SYNCPOINT_TYPE_TIMESTAMP:
163 dev_err(device->dev, " [%d] TIMESTAMP %d:%d\n",
164 i, event->context->id, event->timestamp);
165 break;
Puranam V G Tejaswi0ebbdcd2018-12-13 15:47:00 +0530166 case KGSL_CMD_SYNCPOINT_TYPE_FENCE: {
167 int j;
168 struct event_fence_info *info = &event->info;
169
170 for (j = 0; j < info->num_fences; j++)
171 dev_err(device->dev, " [%d] FENCE %s\n",
172 i, info->fences[j].name);
Shrenuj Bansala419c792016-10-20 14:05:11 -0700173 break;
174 }
Puranam V G Tejaswi0ebbdcd2018-12-13 15:47:00 +0530175 }
Shrenuj Bansala419c792016-10-20 14:05:11 -0700176 }
177
Lynus Vaze99b92b2017-04-24 18:04:54 +0530178 kgsl_drawobj_put(drawobj);
Shrenuj Bansala419c792016-10-20 14:05:11 -0700179 dev_err(device->dev, "--gpu syncpoint deadlock print end--\n");
180}
181
182/*
Lynus Vaz27da44d2017-07-26 13:50:10 +0530183 * a generic function to retire a pending sync event and (possibly) kick the
184 * dispatcher.
185 * Returns false if the event was already marked for cancellation in another
186 * thread. This function should return true if this thread is responsible for
187 * freeing up the memory, and the event will not be cancelled.
Shrenuj Bansala419c792016-10-20 14:05:11 -0700188 */
Lynus Vaz27da44d2017-07-26 13:50:10 +0530189static bool drawobj_sync_expire(struct kgsl_device *device,
Shrenuj Bansala419c792016-10-20 14:05:11 -0700190 struct kgsl_drawobj_sync_event *event)
191{
192 struct kgsl_drawobj_sync *syncobj = event->syncobj;
193 /*
194 * Clear the event from the pending mask - if it is already clear, then
195 * leave without doing anything useful
196 */
197 if (!test_and_clear_bit(event->id, &syncobj->pending))
Lynus Vaz27da44d2017-07-26 13:50:10 +0530198 return false;
Shrenuj Bansala419c792016-10-20 14:05:11 -0700199
200 /*
201 * If no more pending events, delete the timer and schedule the command
202 * for dispatch
203 */
204 if (!kgsl_drawobj_events_pending(event->syncobj)) {
205 del_timer_sync(&syncobj->timer);
206
207 if (device->ftbl->drawctxt_sched)
208 device->ftbl->drawctxt_sched(device,
209 event->syncobj->base.context);
210 }
Lynus Vaz27da44d2017-07-26 13:50:10 +0530211 return true;
Shrenuj Bansala419c792016-10-20 14:05:11 -0700212}
213
214/*
215 * This function is called by the GPU event when the sync event timestamp
216 * expires
217 */
218static void drawobj_sync_func(struct kgsl_device *device,
219 struct kgsl_event_group *group, void *priv, int result)
220{
221 struct kgsl_drawobj_sync_event *event = priv;
222
223 trace_syncpoint_timestamp_expire(event->syncobj,
224 event->context, event->timestamp);
225
Rajesh Kemisettia6f4f422019-07-18 20:31:08 +0530226 /*
227 * Put down the context ref count only if
228 * this thread successfully clears the pending bit mask.
229 */
230 if (drawobj_sync_expire(device, event))
231 kgsl_context_put(event->context);
232
Lynus Vaze99b92b2017-04-24 18:04:54 +0530233 kgsl_drawobj_put(&event->syncobj->base);
Shrenuj Bansala419c792016-10-20 14:05:11 -0700234}
235
236static inline void memobj_list_free(struct list_head *list)
237{
238 struct kgsl_memobj_node *mem, *tmpmem;
239
240 /* Free the cmd mem here */
241 list_for_each_entry_safe(mem, tmpmem, list, node) {
242 list_del_init(&mem->node);
243 kmem_cache_free(memobjs_cache, mem);
244 }
245}
246
Tarun Karra2b8b3632016-11-14 16:38:27 -0800247static void drawobj_destroy_sparse(struct kgsl_drawobj *drawobj)
248{
249 struct kgsl_sparseobj_node *mem, *tmpmem;
250 struct list_head *list = &SPARSEOBJ(drawobj)->sparselist;
251
252 /* Free the sparse mem here */
253 list_for_each_entry_safe(mem, tmpmem, list, node) {
254 list_del_init(&mem->node);
255 kmem_cache_free(sparseobjs_cache, mem);
256 }
257}
258
Shrenuj Bansala419c792016-10-20 14:05:11 -0700259static void drawobj_destroy_sync(struct kgsl_drawobj *drawobj)
260{
261 struct kgsl_drawobj_sync *syncobj = SYNCOBJ(drawobj);
Shrenuj Bansala419c792016-10-20 14:05:11 -0700262 unsigned int i;
263
264 /* Zap the canary timer */
265 del_timer_sync(&syncobj->timer);
266
267 /*
Shrenuj Bansala419c792016-10-20 14:05:11 -0700268 * Clear all pending events - this will render any subsequent async
269 * callbacks harmless
270 */
271 for (i = 0; i < syncobj->numsyncs; i++) {
272 struct kgsl_drawobj_sync_event *event = &syncobj->synclist[i];
273
Rajesh Kemisettia6f4f422019-07-18 20:31:08 +0530274 /*
275 * Don't do anything if the event has already expired.
276 * If this thread clears the pending bit mask then it is
277 * responsible for doing context put.
278 */
279 if (!test_and_clear_bit(i, &syncobj->pending))
Shrenuj Bansala419c792016-10-20 14:05:11 -0700280 continue;
281
282 switch (event->type) {
283 case KGSL_CMD_SYNCPOINT_TYPE_TIMESTAMP:
284 kgsl_cancel_event(drawobj->device,
285 &event->context->events, event->timestamp,
286 drawobj_sync_func, event);
Rajesh Kemisettia6f4f422019-07-18 20:31:08 +0530287 /*
288 * Do context put here to make sure the context is alive
289 * till this thread cancels kgsl event.
290 */
291 kgsl_context_put(event->context);
Shrenuj Bansala419c792016-10-20 14:05:11 -0700292 break;
293 case KGSL_CMD_SYNCPOINT_TYPE_FENCE:
Lynus Vaz27da44d2017-07-26 13:50:10 +0530294 kgsl_sync_fence_async_cancel(event->handle);
295 kgsl_drawobj_put(drawobj);
Shrenuj Bansala419c792016-10-20 14:05:11 -0700296 break;
297 }
298 }
299
300 /*
301 * If we cancelled an event, there's a good chance that the context is
302 * on a dispatcher queue, so schedule to get it removed.
303 */
Rajesh Kemisettia6f4f422019-07-18 20:31:08 +0530304 if (!bitmap_empty(&syncobj->pending, KGSL_MAX_SYNCPOINTS) &&
Shrenuj Bansala419c792016-10-20 14:05:11 -0700305 drawobj->device->ftbl->drawctxt_sched)
306 drawobj->device->ftbl->drawctxt_sched(drawobj->device,
307 drawobj->context);
308
309}
310
311static void drawobj_destroy_cmd(struct kgsl_drawobj *drawobj)
312{
313 struct kgsl_drawobj_cmd *cmdobj = CMDOBJ(drawobj);
314
315 /*
316 * Release the refcount on the mem entry associated with the
317 * ib profiling buffer
318 */
319 if (cmdobj->base.flags & KGSL_DRAWOBJ_PROFILING)
320 kgsl_mem_entry_put(cmdobj->profiling_buf_entry);
321
322 /* Destroy the cmdlist we created */
323 memobj_list_free(&cmdobj->cmdlist);
324
325 /* Destroy the memlist we created */
326 memobj_list_free(&cmdobj->memlist);
327}
328
329/**
330 * kgsl_drawobj_destroy() - Destroy a kgsl object structure
331 * @obj: Pointer to the kgsl object to destroy
332 *
333 * Start the process of destroying a command batch. Cancel any pending events
334 * and decrement the refcount. Asynchronous events can still signal after
335 * kgsl_drawobj_destroy has returned.
336 */
337void kgsl_drawobj_destroy(struct kgsl_drawobj *drawobj)
338{
339 if (!drawobj)
340 return;
341
342 if (drawobj->type & SYNCOBJ_TYPE)
343 drawobj_destroy_sync(drawobj);
344 else if (drawobj->type & (CMDOBJ_TYPE | MARKEROBJ_TYPE))
345 drawobj_destroy_cmd(drawobj);
Tarun Karra2b8b3632016-11-14 16:38:27 -0800346 else if (drawobj->type == SPARSEOBJ_TYPE)
347 drawobj_destroy_sparse(drawobj);
Shrenuj Bansala419c792016-10-20 14:05:11 -0700348 else
349 return;
350
Lynus Vaze99b92b2017-04-24 18:04:54 +0530351 kgsl_drawobj_put(drawobj);
Shrenuj Bansala419c792016-10-20 14:05:11 -0700352}
353EXPORT_SYMBOL(kgsl_drawobj_destroy);
354
Lynus Vaz27da44d2017-07-26 13:50:10 +0530355static bool drawobj_sync_fence_func(void *priv)
Shrenuj Bansala419c792016-10-20 14:05:11 -0700356{
357 struct kgsl_drawobj_sync_event *event = priv;
Puranam V G Tejaswi0ebbdcd2018-12-13 15:47:00 +0530358 int i;
Shrenuj Bansala419c792016-10-20 14:05:11 -0700359
Puranam V G Tejaswi0ebbdcd2018-12-13 15:47:00 +0530360 for (i = 0; i < event->info.num_fences; i++)
361 trace_syncpoint_fence_expire(event->syncobj,
362 event->info.fences[i].name);
Shrenuj Bansala419c792016-10-20 14:05:11 -0700363
Lynus Vaz27da44d2017-07-26 13:50:10 +0530364 /*
365 * Only call kgsl_drawobj_put() if it's not marked for cancellation
366 * in another thread.
367 */
368 if (drawobj_sync_expire(event->device, event)) {
369 kgsl_drawobj_put(&event->syncobj->base);
370 return true;
371 }
372 return false;
Shrenuj Bansala419c792016-10-20 14:05:11 -0700373}
374
375/* drawobj_add_sync_fence() - Add a new sync fence syncpoint
376 * @device: KGSL device
377 * @syncobj: KGSL sync obj to add the sync point to
378 * @priv: Private structure passed by the user
379 *
380 * Add a new fence sync syncpoint to the sync obj.
381 */
382static int drawobj_add_sync_fence(struct kgsl_device *device,
383 struct kgsl_drawobj_sync *syncobj, void *priv)
384{
385 struct kgsl_cmd_syncpoint_fence *sync = priv;
386 struct kgsl_drawobj *drawobj = DRAWOBJ(syncobj);
387 struct kgsl_drawobj_sync_event *event;
Puranam V G Tejaswi0ebbdcd2018-12-13 15:47:00 +0530388 unsigned int id, i;
Shrenuj Bansala419c792016-10-20 14:05:11 -0700389
390 kref_get(&drawobj->refcount);
391
392 id = syncobj->numsyncs++;
393
394 event = &syncobj->synclist[id];
395
396 event->id = id;
397 event->type = KGSL_CMD_SYNCPOINT_TYPE_FENCE;
398 event->syncobj = syncobj;
399 event->device = device;
400 event->context = NULL;
401
402 set_bit(event->id, &syncobj->pending);
403
404 event->handle = kgsl_sync_fence_async_wait(sync->fd,
Lynus Vaze99b92b2017-04-24 18:04:54 +0530405 drawobj_sync_fence_func, event,
Puranam V G Tejaswi0ebbdcd2018-12-13 15:47:00 +0530406 &event->info);
Shrenuj Bansala419c792016-10-20 14:05:11 -0700407
408 if (IS_ERR_OR_NULL(event->handle)) {
409 int ret = PTR_ERR(event->handle);
410
411 clear_bit(event->id, &syncobj->pending);
412 event->handle = NULL;
413
Lynus Vaze99b92b2017-04-24 18:04:54 +0530414 kgsl_drawobj_put(drawobj);
Shrenuj Bansala419c792016-10-20 14:05:11 -0700415
416 /*
417 * If ret == 0 the fence was already signaled - print a trace
418 * message so we can track that
419 */
420 if (ret == 0)
421 trace_syncpoint_fence_expire(syncobj, "signaled");
422
423 return ret;
424 }
425
Puranam V G Tejaswi0ebbdcd2018-12-13 15:47:00 +0530426 for (i = 0; i < event->info.num_fences; i++)
427 trace_syncpoint_fence(syncobj, event->info.fences[i].name);
Shrenuj Bansala419c792016-10-20 14:05:11 -0700428
429 return 0;
430}
431
432/* drawobj_add_sync_timestamp() - Add a new sync point for a sync obj
433 * @device: KGSL device
434 * @syncobj: KGSL sync obj to add the sync point to
435 * @priv: Private structure passed by the user
436 *
437 * Add a new sync point timestamp event to the sync obj.
438 */
439static int drawobj_add_sync_timestamp(struct kgsl_device *device,
440 struct kgsl_drawobj_sync *syncobj, void *priv)
441{
442 struct kgsl_cmd_syncpoint_timestamp *sync = priv;
443 struct kgsl_drawobj *drawobj = DRAWOBJ(syncobj);
444 struct kgsl_context *context = kgsl_context_get(device,
445 sync->context_id);
446 struct kgsl_drawobj_sync_event *event;
447 int ret = -EINVAL;
448 unsigned int id;
449
450 if (context == NULL)
451 return -EINVAL;
452
453 /*
454 * We allow somebody to create a sync point on their own context.
455 * This has the effect of delaying a command from submitting until the
456 * dependent command has cleared. That said we obviously can't let them
457 * create a sync point on a future timestamp.
458 */
459
460 if (context == drawobj->context) {
461 unsigned int queued;
462
463 kgsl_readtimestamp(device, context, KGSL_TIMESTAMP_QUEUED,
464 &queued);
465
466 if (timestamp_cmp(sync->timestamp, queued) > 0) {
467 KGSL_DRV_ERR(device,
468 "Cannot create syncpoint for future timestamp %d (current %d)\n",
469 sync->timestamp, queued);
470 goto done;
471 }
472 }
473
474 kref_get(&drawobj->refcount);
475
476 id = syncobj->numsyncs++;
477
478 event = &syncobj->synclist[id];
479 event->id = id;
480
481 event->type = KGSL_CMD_SYNCPOINT_TYPE_TIMESTAMP;
482 event->syncobj = syncobj;
483 event->context = context;
484 event->timestamp = sync->timestamp;
485 event->device = device;
486
487 set_bit(event->id, &syncobj->pending);
488
489 ret = kgsl_add_event(device, &context->events, sync->timestamp,
490 drawobj_sync_func, event);
491
492 if (ret) {
493 clear_bit(event->id, &syncobj->pending);
Lynus Vaze99b92b2017-04-24 18:04:54 +0530494 kgsl_drawobj_put(drawobj);
Shrenuj Bansala419c792016-10-20 14:05:11 -0700495 } else {
496 trace_syncpoint_timestamp(syncobj, context, sync->timestamp);
497 }
498
499done:
500 if (ret)
501 kgsl_context_put(context);
502
503 return ret;
504}
505
506/**
507 * kgsl_drawobj_sync_add_sync() - Add a sync point to a command
508 * batch
509 * @device: Pointer to the KGSL device struct for the GPU
510 * @syncobj: Pointer to the sync obj
511 * @sync: Pointer to the user-specified struct defining the syncpoint
512 *
513 * Create a new sync point in the sync obj based on the
514 * user specified parameters
515 */
516int kgsl_drawobj_sync_add_sync(struct kgsl_device *device,
517 struct kgsl_drawobj_sync *syncobj,
518 struct kgsl_cmd_syncpoint *sync)
519{
520 void *priv;
521 int ret, psize;
522 struct kgsl_drawobj *drawobj = DRAWOBJ(syncobj);
523 int (*func)(struct kgsl_device *device,
524 struct kgsl_drawobj_sync *syncobj,
525 void *priv);
526
527 switch (sync->type) {
528 case KGSL_CMD_SYNCPOINT_TYPE_TIMESTAMP:
529 psize = sizeof(struct kgsl_cmd_syncpoint_timestamp);
530 func = drawobj_add_sync_timestamp;
531 break;
532 case KGSL_CMD_SYNCPOINT_TYPE_FENCE:
533 psize = sizeof(struct kgsl_cmd_syncpoint_fence);
534 func = drawobj_add_sync_fence;
535 break;
536 default:
537 KGSL_DRV_ERR(device,
538 "bad syncpoint type ctxt %d type 0x%x size %zu\n",
539 drawobj->context->id, sync->type, sync->size);
540 return -EINVAL;
541 }
542
543 if (sync->size != psize) {
544 KGSL_DRV_ERR(device,
545 "bad syncpoint size ctxt %d type 0x%x size %zu\n",
546 drawobj->context->id, sync->type, sync->size);
547 return -EINVAL;
548 }
549
550 priv = kzalloc(sync->size, GFP_KERNEL);
551 if (priv == NULL)
552 return -ENOMEM;
553
554 if (copy_from_user(priv, sync->priv, sync->size)) {
555 kfree(priv);
556 return -EFAULT;
557 }
558
559 ret = func(device, syncobj, priv);
560 kfree(priv);
561
562 return ret;
563}
564
565static void add_profiling_buffer(struct kgsl_device *device,
566 struct kgsl_drawobj_cmd *cmdobj,
567 uint64_t gpuaddr, uint64_t size,
568 unsigned int id, uint64_t offset)
569{
570 struct kgsl_mem_entry *entry;
571 struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj);
Kamal Agrawal056794d2021-09-13 15:59:25 +0530572 u64 start;
Shrenuj Bansala419c792016-10-20 14:05:11 -0700573
574 if (!(drawobj->flags & KGSL_DRAWOBJ_PROFILING))
575 return;
576
577 /* Only the first buffer entry counts - ignore the rest */
578 if (cmdobj->profiling_buf_entry != NULL)
579 return;
580
581 if (id != 0)
582 entry = kgsl_sharedmem_find_id(drawobj->context->proc_priv,
583 id);
584 else
585 entry = kgsl_sharedmem_find(drawobj->context->proc_priv,
586 gpuaddr);
587
588 if (entry != NULL) {
Kamal Agrawal056794d2021-09-13 15:59:25 +0530589 start = id ? (entry->memdesc.gpuaddr + offset) : gpuaddr;
590 /*
591 * Make sure there is enough room in the object to store the
592 * entire profiling buffer object
593 */
594 if (!kgsl_gpuaddr_in_memdesc(&entry->memdesc, gpuaddr, size) ||
595 !kgsl_gpuaddr_in_memdesc(&entry->memdesc, start,
596 sizeof(struct kgsl_drawobj_profiling_buffer))) {
Shrenuj Bansala419c792016-10-20 14:05:11 -0700597 kgsl_mem_entry_put(entry);
598 entry = NULL;
599 }
600 }
601
602 if (entry == NULL) {
603 KGSL_DRV_ERR(device,
604 "ignore bad profile buffer ctxt %d id %d offset %lld gpuaddr %llx size %lld\n",
605 drawobj->context->id, id, offset, gpuaddr, size);
606 return;
607 }
608
Kamal Agrawal056794d2021-09-13 15:59:25 +0530609 cmdobj->profiling_buffer_gpuaddr = start;
Jordan Crouse5eb952a2019-09-09 10:41:36 -0600610 cmdobj->profiling_buf_entry = entry;
Shrenuj Bansala419c792016-10-20 14:05:11 -0700611}
612
613/**
614 * kgsl_drawobj_cmd_add_ibdesc() - Add a legacy ibdesc to a command
615 * batch
616 * @cmdobj: Pointer to the ib
617 * @ibdesc: Pointer to the user-specified struct defining the memory or IB
618 *
619 * Create a new memory entry in the ib based on the
620 * user specified parameters
621 */
622int kgsl_drawobj_cmd_add_ibdesc(struct kgsl_device *device,
623 struct kgsl_drawobj_cmd *cmdobj, struct kgsl_ibdesc *ibdesc)
624{
625 uint64_t gpuaddr = (uint64_t) ibdesc->gpuaddr;
626 uint64_t size = (uint64_t) ibdesc->sizedwords << 2;
627 struct kgsl_memobj_node *mem;
628 struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj);
629
630 /* sanitize the ibdesc ctrl flags */
631 ibdesc->ctrl &= KGSL_IBDESC_MEMLIST | KGSL_IBDESC_PROFILING_BUFFER;
632
633 if (drawobj->flags & KGSL_DRAWOBJ_MEMLIST &&
634 ibdesc->ctrl & KGSL_IBDESC_MEMLIST) {
635 if (ibdesc->ctrl & KGSL_IBDESC_PROFILING_BUFFER) {
636 add_profiling_buffer(device, cmdobj,
637 gpuaddr, size, 0, 0);
638 return 0;
639 }
640 }
641
642 /* Ignore if SYNC or MARKER is specified */
643 if (drawobj->type & (SYNCOBJ_TYPE | MARKEROBJ_TYPE))
644 return 0;
645
646 mem = kmem_cache_alloc(memobjs_cache, GFP_KERNEL);
647 if (mem == NULL)
648 return -ENOMEM;
649
650 mem->gpuaddr = gpuaddr;
651 mem->size = size;
652 mem->priv = 0;
653 mem->id = 0;
654 mem->offset = 0;
655 mem->flags = 0;
656
657 if (drawobj->flags & KGSL_DRAWOBJ_MEMLIST &&
658 ibdesc->ctrl & KGSL_IBDESC_MEMLIST)
659 /* add to the memlist */
660 list_add_tail(&mem->node, &cmdobj->memlist);
661 else {
662 /* set the preamble flag if directed to */
663 if (drawobj->context->flags & KGSL_CONTEXT_PREAMBLE &&
664 list_empty(&cmdobj->cmdlist))
665 mem->flags = KGSL_CMDLIST_CTXTSWITCH_PREAMBLE;
666
667 /* add to the cmd list */
668 list_add_tail(&mem->node, &cmdobj->cmdlist);
669 }
670
671 return 0;
672}
673
Tarun Karra2b8b3632016-11-14 16:38:27 -0800674static void *_drawobj_create(struct kgsl_device *device,
675 struct kgsl_context *context, unsigned int size,
Shrenuj Bansala419c792016-10-20 14:05:11 -0700676 unsigned int type)
677{
Tarun Karra2b8b3632016-11-14 16:38:27 -0800678 void *obj = kzalloc(size, GFP_KERNEL);
679 struct kgsl_drawobj *drawobj;
680
681 if (obj == NULL)
682 return ERR_PTR(-ENOMEM);
683
Shrenuj Bansala419c792016-10-20 14:05:11 -0700684 /*
685 * Increase the reference count on the context so it doesn't disappear
686 * during the lifetime of this object
687 */
Tarun Karra2b8b3632016-11-14 16:38:27 -0800688 if (!_kgsl_context_get(context)) {
689 kfree(obj);
690 return ERR_PTR(-ENOENT);
691 }
692
693 drawobj = obj;
Shrenuj Bansala419c792016-10-20 14:05:11 -0700694
695 kref_init(&drawobj->refcount);
696
697 drawobj->device = device;
698 drawobj->context = context;
699 drawobj->type = type;
700
Tarun Karra2b8b3632016-11-14 16:38:27 -0800701 return obj;
702}
703
704/**
705 * kgsl_drawobj_sparse_create() - Create a new sparse obj structure
706 * @device: Pointer to a KGSL device struct
707 * @context: Pointer to a KGSL context struct
708 * @flags: Flags for the sparse obj
709 *
710 * Allocate an new kgsl_drawobj_sparse structure
711 */
712struct kgsl_drawobj_sparse *kgsl_drawobj_sparse_create(
713 struct kgsl_device *device,
714 struct kgsl_context *context, unsigned int flags)
715{
716 struct kgsl_drawobj_sparse *sparseobj = _drawobj_create(device,
717 context, sizeof(*sparseobj), SPARSEOBJ_TYPE);
718
719 if (!IS_ERR(sparseobj))
720 INIT_LIST_HEAD(&sparseobj->sparselist);
721
722 return sparseobj;
Shrenuj Bansala419c792016-10-20 14:05:11 -0700723}
724
725/**
726 * kgsl_drawobj_sync_create() - Create a new sync obj
727 * structure
728 * @device: Pointer to a KGSL device struct
729 * @context: Pointer to a KGSL context struct
730 *
731 * Allocate an new kgsl_drawobj_sync structure
732 */
733struct kgsl_drawobj_sync *kgsl_drawobj_sync_create(struct kgsl_device *device,
734 struct kgsl_context *context)
735{
Tarun Karra2b8b3632016-11-14 16:38:27 -0800736 struct kgsl_drawobj_sync *syncobj = _drawobj_create(device,
737 context, sizeof(*syncobj), SYNCOBJ_TYPE);
Shrenuj Bansala419c792016-10-20 14:05:11 -0700738
739 /* Add a timer to help debug sync deadlocks */
Tarun Karra2b8b3632016-11-14 16:38:27 -0800740 if (!IS_ERR(syncobj))
741 setup_timer(&syncobj->timer, syncobj_timer,
742 (unsigned long) syncobj);
Shrenuj Bansala419c792016-10-20 14:05:11 -0700743
744 return syncobj;
745}
746
747/**
748 * kgsl_drawobj_cmd_create() - Create a new command obj
749 * structure
750 * @device: Pointer to a KGSL device struct
751 * @context: Pointer to a KGSL context struct
752 * @flags: Flags for the command obj
753 * @type: type of cmdobj MARKER/CMD
754 *
755 * Allocate a new kgsl_drawobj_cmd structure
756 */
757struct kgsl_drawobj_cmd *kgsl_drawobj_cmd_create(struct kgsl_device *device,
758 struct kgsl_context *context, unsigned int flags,
759 unsigned int type)
760{
Tarun Karra2b8b3632016-11-14 16:38:27 -0800761 struct kgsl_drawobj_cmd *cmdobj = _drawobj_create(device,
762 context, sizeof(*cmdobj),
763 (type & (CMDOBJ_TYPE | MARKEROBJ_TYPE)));
Shrenuj Bansala419c792016-10-20 14:05:11 -0700764
Tarun Karra2b8b3632016-11-14 16:38:27 -0800765 if (!IS_ERR(cmdobj)) {
766 /* sanitize our flags for drawobj's */
767 cmdobj->base.flags = flags & (KGSL_DRAWOBJ_CTX_SWITCH
Shrenuj Bansala419c792016-10-20 14:05:11 -0700768 | KGSL_DRAWOBJ_MARKER
769 | KGSL_DRAWOBJ_END_OF_FRAME
770 | KGSL_DRAWOBJ_PWR_CONSTRAINT
771 | KGSL_DRAWOBJ_MEMLIST
772 | KGSL_DRAWOBJ_PROFILING
773 | KGSL_DRAWOBJ_PROFILING_KTIME);
774
Tarun Karra2b8b3632016-11-14 16:38:27 -0800775 INIT_LIST_HEAD(&cmdobj->cmdlist);
776 INIT_LIST_HEAD(&cmdobj->memlist);
777 }
Shrenuj Bansala419c792016-10-20 14:05:11 -0700778
779 return cmdobj;
780}
781
782#ifdef CONFIG_COMPAT
783static int add_ibdesc_list_compat(struct kgsl_device *device,
784 struct kgsl_drawobj_cmd *cmdobj, void __user *ptr, int count)
785{
786 int i, ret = 0;
787 struct kgsl_ibdesc_compat ibdesc32;
788 struct kgsl_ibdesc ibdesc;
789
790 for (i = 0; i < count; i++) {
791 memset(&ibdesc32, 0, sizeof(ibdesc32));
792
793 if (copy_from_user(&ibdesc32, ptr, sizeof(ibdesc32))) {
794 ret = -EFAULT;
795 break;
796 }
797
798 ibdesc.gpuaddr = (unsigned long) ibdesc32.gpuaddr;
799 ibdesc.sizedwords = (size_t) ibdesc32.sizedwords;
800 ibdesc.ctrl = (unsigned int) ibdesc32.ctrl;
801
802 ret = kgsl_drawobj_cmd_add_ibdesc(device, cmdobj, &ibdesc);
803 if (ret)
804 break;
805
806 ptr += sizeof(ibdesc32);
807 }
808
809 return ret;
810}
811
812static int add_syncpoints_compat(struct kgsl_device *device,
813 struct kgsl_drawobj_sync *syncobj, void __user *ptr, int count)
814{
815 struct kgsl_cmd_syncpoint_compat sync32;
816 struct kgsl_cmd_syncpoint sync;
817 int i, ret = 0;
818
819 for (i = 0; i < count; i++) {
820 memset(&sync32, 0, sizeof(sync32));
821
822 if (copy_from_user(&sync32, ptr, sizeof(sync32))) {
823 ret = -EFAULT;
824 break;
825 }
826
827 sync.type = sync32.type;
828 sync.priv = compat_ptr(sync32.priv);
829 sync.size = (size_t) sync32.size;
830
831 ret = kgsl_drawobj_sync_add_sync(device, syncobj, &sync);
832 if (ret)
833 break;
834
835 ptr += sizeof(sync32);
836 }
837
838 return ret;
839}
840#else
841static int add_ibdesc_list_compat(struct kgsl_device *device,
842 struct kgsl_drawobj_cmd *cmdobj, void __user *ptr, int count)
843{
844 return -EINVAL;
845}
846
847static int add_syncpoints_compat(struct kgsl_device *device,
848 struct kgsl_drawobj_sync *syncobj, void __user *ptr, int count)
849{
850 return -EINVAL;
851}
852#endif
853
854/* Returns:
855 * -EINVAL: Bad data
856 * 0: All data fields are empty (nothing to do)
857 * 1: All list information is valid
858 */
859static int _verify_input_list(unsigned int count, void __user *ptr,
860 unsigned int size)
861{
862 /* Return early if nothing going on */
863 if (count == 0 && ptr == NULL && size == 0)
864 return 0;
865
866 /* Sanity check inputs */
867 if (count == 0 || ptr == NULL || size == 0)
868 return -EINVAL;
869
870 return 1;
871}
872
873int kgsl_drawobj_cmd_add_ibdesc_list(struct kgsl_device *device,
874 struct kgsl_drawobj_cmd *cmdobj, void __user *ptr, int count)
875{
876 struct kgsl_ibdesc ibdesc;
877 struct kgsl_drawobj *baseobj = DRAWOBJ(cmdobj);
878 int i, ret;
879
880 /* Ignore everything if this is a MARKER */
881 if (baseobj->type & MARKEROBJ_TYPE)
882 return 0;
883
884 ret = _verify_input_list(count, ptr, sizeof(ibdesc));
885 if (ret <= 0)
886 return -EINVAL;
887
888 if (is_compat_task())
889 return add_ibdesc_list_compat(device, cmdobj, ptr, count);
890
891 for (i = 0; i < count; i++) {
892 memset(&ibdesc, 0, sizeof(ibdesc));
893
894 if (copy_from_user(&ibdesc, ptr, sizeof(ibdesc)))
895 return -EFAULT;
896
897 ret = kgsl_drawobj_cmd_add_ibdesc(device, cmdobj, &ibdesc);
898 if (ret)
899 return ret;
900
901 ptr += sizeof(ibdesc);
902 }
903
904 return 0;
905}
906
907int kgsl_drawobj_sync_add_syncpoints(struct kgsl_device *device,
908 struct kgsl_drawobj_sync *syncobj, void __user *ptr, int count)
909{
910 struct kgsl_cmd_syncpoint sync;
911 int i, ret;
912
913 if (count == 0)
914 return 0;
915
916 syncobj->synclist = kcalloc(count,
917 sizeof(struct kgsl_drawobj_sync_event), GFP_KERNEL);
918
919 if (syncobj->synclist == NULL)
920 return -ENOMEM;
921
922 if (is_compat_task())
923 return add_syncpoints_compat(device, syncobj, ptr, count);
924
925 for (i = 0; i < count; i++) {
926 memset(&sync, 0, sizeof(sync));
927
928 if (copy_from_user(&sync, ptr, sizeof(sync)))
929 return -EFAULT;
930
931 ret = kgsl_drawobj_sync_add_sync(device, syncobj, &sync);
932 if (ret)
933 return ret;
934
935 ptr += sizeof(sync);
936 }
937
938 return 0;
939}
940
Tarun Karra2b8b3632016-11-14 16:38:27 -0800941static int kgsl_drawobj_add_memobject(struct list_head *head,
Shrenuj Bansala419c792016-10-20 14:05:11 -0700942 struct kgsl_command_object *obj)
943{
944 struct kgsl_memobj_node *mem;
945
946 mem = kmem_cache_alloc(memobjs_cache, GFP_KERNEL);
947 if (mem == NULL)
948 return -ENOMEM;
949
950 mem->gpuaddr = obj->gpuaddr;
951 mem->size = obj->size;
952 mem->id = obj->id;
953 mem->offset = obj->offset;
954 mem->flags = obj->flags;
955 mem->priv = 0;
956
957 list_add_tail(&mem->node, head);
958 return 0;
959}
960
Tarun Karra2b8b3632016-11-14 16:38:27 -0800961static int kgsl_drawobj_add_sparseobject(struct list_head *head,
962 struct kgsl_sparse_binding_object *obj, unsigned int virt_id)
963{
964 struct kgsl_sparseobj_node *mem;
965
966 mem = kmem_cache_alloc(sparseobjs_cache, GFP_KERNEL);
967 if (mem == NULL)
968 return -ENOMEM;
969
970 mem->virt_id = virt_id;
971 mem->obj.id = obj->id;
972 mem->obj.virtoffset = obj->virtoffset;
973 mem->obj.physoffset = obj->physoffset;
974 mem->obj.size = obj->size;
975 mem->obj.flags = obj->flags;
976
977 list_add_tail(&mem->node, head);
978 return 0;
979}
980
981int kgsl_drawobj_sparse_add_sparselist(struct kgsl_device *device,
982 struct kgsl_drawobj_sparse *sparseobj, unsigned int id,
983 void __user *ptr, unsigned int size, unsigned int count)
984{
985 struct kgsl_sparse_binding_object obj;
986 int i, ret = 0;
987
988 ret = _verify_input_list(count, ptr, size);
989 if (ret <= 0)
990 return ret;
991
992 for (i = 0; i < count; i++) {
993 memset(&obj, 0, sizeof(obj));
994
995 ret = _copy_from_user(&obj, ptr, sizeof(obj), size);
996 if (ret)
997 return ret;
998
999 if (!(obj.flags & (KGSL_SPARSE_BIND | KGSL_SPARSE_UNBIND)))
1000 return -EINVAL;
1001
1002 ret = kgsl_drawobj_add_sparseobject(&sparseobj->sparselist,
1003 &obj, id);
1004 if (ret)
1005 return ret;
1006
1007 ptr += sizeof(obj);
1008 }
1009
1010 sparseobj->size = size;
1011 sparseobj->count = count;
1012
1013 return 0;
1014}
1015
1016
Shrenuj Bansala419c792016-10-20 14:05:11 -07001017#define CMDLIST_FLAGS \
1018 (KGSL_CMDLIST_IB | \
1019 KGSL_CMDLIST_CTXTSWITCH_PREAMBLE | \
1020 KGSL_CMDLIST_IB_PREAMBLE)
1021
1022/* This can only accept MARKEROBJ_TYPE and CMDOBJ_TYPE */
1023int kgsl_drawobj_cmd_add_cmdlist(struct kgsl_device *device,
1024 struct kgsl_drawobj_cmd *cmdobj, void __user *ptr,
1025 unsigned int size, unsigned int count)
1026{
1027 struct kgsl_command_object obj;
1028 struct kgsl_drawobj *baseobj = DRAWOBJ(cmdobj);
1029 int i, ret;
1030
1031 /* Ignore everything if this is a MARKER */
1032 if (baseobj->type & MARKEROBJ_TYPE)
1033 return 0;
1034
1035 ret = _verify_input_list(count, ptr, size);
1036 if (ret <= 0)
1037 return ret;
1038
1039 for (i = 0; i < count; i++) {
1040 memset(&obj, 0, sizeof(obj));
1041
1042 ret = _copy_from_user(&obj, ptr, sizeof(obj), size);
1043 if (ret)
1044 return ret;
1045
1046 /* Sanity check the flags */
1047 if (!(obj.flags & CMDLIST_FLAGS)) {
1048 KGSL_DRV_ERR(device,
1049 "invalid cmdobj ctxt %d flags %d id %d offset %lld addr %lld size %lld\n",
1050 baseobj->context->id, obj.flags, obj.id,
1051 obj.offset, obj.gpuaddr, obj.size);
1052 return -EINVAL;
1053 }
1054
Tarun Karra2b8b3632016-11-14 16:38:27 -08001055 ret = kgsl_drawobj_add_memobject(&cmdobj->cmdlist, &obj);
Shrenuj Bansala419c792016-10-20 14:05:11 -07001056 if (ret)
1057 return ret;
1058
1059 ptr += sizeof(obj);
1060 }
1061
1062 return 0;
1063}
1064
1065int kgsl_drawobj_cmd_add_memlist(struct kgsl_device *device,
1066 struct kgsl_drawobj_cmd *cmdobj, void __user *ptr,
1067 unsigned int size, unsigned int count)
1068{
1069 struct kgsl_command_object obj;
1070 struct kgsl_drawobj *baseobj = DRAWOBJ(cmdobj);
1071 int i, ret;
1072
1073 /* Ignore everything if this is a MARKER */
1074 if (baseobj->type & MARKEROBJ_TYPE)
1075 return 0;
1076
1077 ret = _verify_input_list(count, ptr, size);
1078 if (ret <= 0)
1079 return ret;
1080
1081 for (i = 0; i < count; i++) {
1082 memset(&obj, 0, sizeof(obj));
1083
1084 ret = _copy_from_user(&obj, ptr, sizeof(obj), size);
1085 if (ret)
1086 return ret;
1087
1088 if (!(obj.flags & KGSL_OBJLIST_MEMOBJ)) {
1089 KGSL_DRV_ERR(device,
1090 "invalid memobj ctxt %d flags %d id %d offset %lld addr %lld size %lld\n",
1091 DRAWOBJ(cmdobj)->context->id, obj.flags,
1092 obj.id, obj.offset, obj.gpuaddr, obj.size);
1093 return -EINVAL;
1094 }
1095
1096 if (obj.flags & KGSL_OBJLIST_PROFILE)
1097 add_profiling_buffer(device, cmdobj, obj.gpuaddr,
1098 obj.size, obj.id, obj.offset);
1099 else {
Tarun Karra2b8b3632016-11-14 16:38:27 -08001100 ret = kgsl_drawobj_add_memobject(&cmdobj->memlist,
1101 &obj);
Shrenuj Bansala419c792016-10-20 14:05:11 -07001102 if (ret)
1103 return ret;
1104 }
1105
1106 ptr += sizeof(obj);
1107 }
1108
1109 return 0;
1110}
1111
1112int kgsl_drawobj_sync_add_synclist(struct kgsl_device *device,
1113 struct kgsl_drawobj_sync *syncobj, void __user *ptr,
1114 unsigned int size, unsigned int count)
1115{
1116 struct kgsl_command_syncpoint syncpoint;
1117 struct kgsl_cmd_syncpoint sync;
1118 int i, ret;
1119
1120 /* If creating a sync and the data is not there or wrong then error */
1121 ret = _verify_input_list(count, ptr, size);
1122 if (ret <= 0)
1123 return -EINVAL;
1124
1125 syncobj->synclist = kcalloc(count,
1126 sizeof(struct kgsl_drawobj_sync_event), GFP_KERNEL);
1127
1128 if (syncobj->synclist == NULL)
1129 return -ENOMEM;
1130
1131 for (i = 0; i < count; i++) {
1132 memset(&syncpoint, 0, sizeof(syncpoint));
1133
1134 ret = _copy_from_user(&syncpoint, ptr, sizeof(syncpoint), size);
1135 if (ret)
1136 return ret;
1137
1138 sync.type = syncpoint.type;
1139 sync.priv = to_user_ptr(syncpoint.priv);
1140 sync.size = syncpoint.size;
1141
1142 ret = kgsl_drawobj_sync_add_sync(device, syncobj, &sync);
1143 if (ret)
1144 return ret;
1145
1146 ptr += sizeof(syncpoint);
1147 }
1148
1149 return 0;
1150}
1151
Tarun Karra2b8b3632016-11-14 16:38:27 -08001152void kgsl_drawobjs_cache_exit(void)
Shrenuj Bansala419c792016-10-20 14:05:11 -07001153{
Tarun Karra2b8b3632016-11-14 16:38:27 -08001154 kmem_cache_destroy(memobjs_cache);
1155 kmem_cache_destroy(sparseobjs_cache);
Shrenuj Bansala419c792016-10-20 14:05:11 -07001156}
1157
Tarun Karra2b8b3632016-11-14 16:38:27 -08001158int kgsl_drawobjs_cache_init(void)
Shrenuj Bansala419c792016-10-20 14:05:11 -07001159{
1160 memobjs_cache = KMEM_CACHE(kgsl_memobj_node, 0);
Tarun Karra2b8b3632016-11-14 16:38:27 -08001161 sparseobjs_cache = KMEM_CACHE(kgsl_sparseobj_node, 0);
1162
1163 if (!memobjs_cache || !sparseobjs_cache)
Shrenuj Bansala419c792016-10-20 14:05:11 -07001164 return -ENOMEM;
Shrenuj Bansala419c792016-10-20 14:05:11 -07001165
1166 return 0;
1167}