blob: 3dbaea4a0ccb46cfb97983ffe17930fc1b091d7f [file] [log] [blame]
Shrenuj Bansala419c792016-10-20 14:05:11 -07001/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13/*
14 * KGSL drawobj management
15 * A drawobj is a single submission from userland. The drawobj
16 * encapsulates everything about the submission : command buffers, flags and
17 * sync points.
18 *
19 * Sync points are events that need to expire before the
20 * drawobj can be queued to the hardware. All synpoints are contained in an
21 * array of kgsl_drawobj_sync_event structs in the drawobj. There can be
22 * multiple types of events both internal ones (GPU events) and external
23 * triggers. As the events expire bits are cleared in a pending bitmap stored
24 * in the drawobj. The GPU will submit the command as soon as the bitmap
25 * goes to zero indicating no more pending events.
26 */
27
28#include <linux/uaccess.h>
29#include <linux/list.h>
30#include <linux/compat.h>
31
32#include "kgsl.h"
33#include "kgsl_device.h"
34#include "kgsl_drawobj.h"
35#include "kgsl_sync.h"
36#include "kgsl_trace.h"
37#include "kgsl_compat.h"
38
39/*
Tarun Karra2b8b3632016-11-14 16:38:27 -080040 * Define an kmem cache for the memobj & sparseobj structures since we
41 * allocate and free them so frequently
Shrenuj Bansala419c792016-10-20 14:05:11 -070042 */
43static struct kmem_cache *memobjs_cache;
Tarun Karra2b8b3632016-11-14 16:38:27 -080044static struct kmem_cache *sparseobjs_cache;
45
Shrenuj Bansala419c792016-10-20 14:05:11 -070046
Lynus Vaze99b92b2017-04-24 18:04:54 +053047void kgsl_drawobj_destroy_object(struct kref *kref)
Shrenuj Bansala419c792016-10-20 14:05:11 -070048{
49 struct kgsl_drawobj *drawobj = container_of(kref,
50 struct kgsl_drawobj, refcount);
51 struct kgsl_drawobj_sync *syncobj;
52
53 kgsl_context_put(drawobj->context);
54
55 switch (drawobj->type) {
56 case SYNCOBJ_TYPE:
57 syncobj = SYNCOBJ(drawobj);
58 kfree(syncobj->synclist);
59 kfree(syncobj);
60 break;
61 case CMDOBJ_TYPE:
62 case MARKEROBJ_TYPE:
63 kfree(CMDOBJ(drawobj));
64 break;
Tarun Karra2b8b3632016-11-14 16:38:27 -080065 case SPARSEOBJ_TYPE:
66 kfree(SPARSEOBJ(drawobj));
67 break;
Shrenuj Bansala419c792016-10-20 14:05:11 -070068 }
69}
70
Shrenuj Bansala419c792016-10-20 14:05:11 -070071void kgsl_dump_syncpoints(struct kgsl_device *device,
72 struct kgsl_drawobj_sync *syncobj)
73{
74 struct kgsl_drawobj_sync_event *event;
75 unsigned int i;
76
77 for (i = 0; i < syncobj->numsyncs; i++) {
78 event = &syncobj->synclist[i];
79
80 if (!kgsl_drawobj_event_pending(syncobj, i))
81 continue;
82
83 switch (event->type) {
84 case KGSL_CMD_SYNCPOINT_TYPE_TIMESTAMP: {
85 unsigned int retired;
86
87 kgsl_readtimestamp(event->device,
88 event->context, KGSL_TIMESTAMP_RETIRED,
89 &retired);
90
91 dev_err(device->dev,
92 " [timestamp] context %d timestamp %d (retired %d)\n",
93 event->context->id, event->timestamp,
94 retired);
95 break;
96 }
Lynus Vaze99b92b2017-04-24 18:04:54 +053097 case KGSL_CMD_SYNCPOINT_TYPE_FENCE:
98 dev_err(device->dev, " fence: %s\n",
99 event->fence_name);
Shrenuj Bansala419c792016-10-20 14:05:11 -0700100 break;
101 }
102 }
103}
104
105static void syncobj_timer(unsigned long data)
106{
107 struct kgsl_device *device;
108 struct kgsl_drawobj_sync *syncobj = (struct kgsl_drawobj_sync *) data;
Lynus Vaze99b92b2017-04-24 18:04:54 +0530109 struct kgsl_drawobj *drawobj;
Shrenuj Bansala419c792016-10-20 14:05:11 -0700110 struct kgsl_drawobj_sync_event *event;
111 unsigned int i;
112
Lynus Vaze99b92b2017-04-24 18:04:54 +0530113 if (syncobj == NULL)
Shrenuj Bansala419c792016-10-20 14:05:11 -0700114 return;
115
Lynus Vaze99b92b2017-04-24 18:04:54 +0530116 drawobj = DRAWOBJ(syncobj);
117
118 if (!kref_get_unless_zero(&drawobj->refcount))
119 return;
120
121 if (drawobj->context == NULL) {
122 kgsl_drawobj_put(drawobj);
123 return;
124 }
125
Shrenuj Bansala419c792016-10-20 14:05:11 -0700126 device = drawobj->context->device;
127
128 dev_err(device->dev,
129 "kgsl: possible gpu syncpoint deadlock for context %d timestamp %d\n",
130 drawobj->context->id, drawobj->timestamp);
131
132 set_bit(ADRENO_CONTEXT_FENCE_LOG, &drawobj->context->priv);
133 kgsl_context_dump(drawobj->context);
134 clear_bit(ADRENO_CONTEXT_FENCE_LOG, &drawobj->context->priv);
135
136 dev_err(device->dev, " pending events:\n");
137
138 for (i = 0; i < syncobj->numsyncs; i++) {
139 event = &syncobj->synclist[i];
140
141 if (!kgsl_drawobj_event_pending(syncobj, i))
142 continue;
143
144 switch (event->type) {
145 case KGSL_CMD_SYNCPOINT_TYPE_TIMESTAMP:
146 dev_err(device->dev, " [%d] TIMESTAMP %d:%d\n",
147 i, event->context->id, event->timestamp);
148 break;
Lynus Vaze99b92b2017-04-24 18:04:54 +0530149 case KGSL_CMD_SYNCPOINT_TYPE_FENCE:
Lynus Vazc031a9b2017-01-25 13:00:13 +0530150 dev_err(device->dev, " [%d] FENCE %s\n",
Lynus Vaze99b92b2017-04-24 18:04:54 +0530151 i, event->fence_name);
Shrenuj Bansala419c792016-10-20 14:05:11 -0700152 break;
153 }
154 }
155
Lynus Vaze99b92b2017-04-24 18:04:54 +0530156 kgsl_drawobj_put(drawobj);
Shrenuj Bansala419c792016-10-20 14:05:11 -0700157 dev_err(device->dev, "--gpu syncpoint deadlock print end--\n");
158}
159
160/*
Lynus Vaz27da44d2017-07-26 13:50:10 +0530161 * a generic function to retire a pending sync event and (possibly) kick the
162 * dispatcher.
163 * Returns false if the event was already marked for cancellation in another
164 * thread. This function should return true if this thread is responsible for
165 * freeing up the memory, and the event will not be cancelled.
Shrenuj Bansala419c792016-10-20 14:05:11 -0700166 */
Lynus Vaz27da44d2017-07-26 13:50:10 +0530167static bool drawobj_sync_expire(struct kgsl_device *device,
Shrenuj Bansala419c792016-10-20 14:05:11 -0700168 struct kgsl_drawobj_sync_event *event)
169{
170 struct kgsl_drawobj_sync *syncobj = event->syncobj;
171 /*
172 * Clear the event from the pending mask - if it is already clear, then
173 * leave without doing anything useful
174 */
175 if (!test_and_clear_bit(event->id, &syncobj->pending))
Lynus Vaz27da44d2017-07-26 13:50:10 +0530176 return false;
Shrenuj Bansala419c792016-10-20 14:05:11 -0700177
178 /*
179 * If no more pending events, delete the timer and schedule the command
180 * for dispatch
181 */
182 if (!kgsl_drawobj_events_pending(event->syncobj)) {
183 del_timer_sync(&syncobj->timer);
184
185 if (device->ftbl->drawctxt_sched)
186 device->ftbl->drawctxt_sched(device,
187 event->syncobj->base.context);
188 }
Lynus Vaz27da44d2017-07-26 13:50:10 +0530189 return true;
Shrenuj Bansala419c792016-10-20 14:05:11 -0700190}
191
192/*
193 * This function is called by the GPU event when the sync event timestamp
194 * expires
195 */
196static void drawobj_sync_func(struct kgsl_device *device,
197 struct kgsl_event_group *group, void *priv, int result)
198{
199 struct kgsl_drawobj_sync_event *event = priv;
200
201 trace_syncpoint_timestamp_expire(event->syncobj,
202 event->context, event->timestamp);
203
204 drawobj_sync_expire(device, event);
205 kgsl_context_put(event->context);
Lynus Vaze99b92b2017-04-24 18:04:54 +0530206 kgsl_drawobj_put(&event->syncobj->base);
Shrenuj Bansala419c792016-10-20 14:05:11 -0700207}
208
209static inline void memobj_list_free(struct list_head *list)
210{
211 struct kgsl_memobj_node *mem, *tmpmem;
212
213 /* Free the cmd mem here */
214 list_for_each_entry_safe(mem, tmpmem, list, node) {
215 list_del_init(&mem->node);
216 kmem_cache_free(memobjs_cache, mem);
217 }
218}
219
Tarun Karra2b8b3632016-11-14 16:38:27 -0800220static void drawobj_destroy_sparse(struct kgsl_drawobj *drawobj)
221{
222 struct kgsl_sparseobj_node *mem, *tmpmem;
223 struct list_head *list = &SPARSEOBJ(drawobj)->sparselist;
224
225 /* Free the sparse mem here */
226 list_for_each_entry_safe(mem, tmpmem, list, node) {
227 list_del_init(&mem->node);
228 kmem_cache_free(sparseobjs_cache, mem);
229 }
230}
231
Shrenuj Bansala419c792016-10-20 14:05:11 -0700232static void drawobj_destroy_sync(struct kgsl_drawobj *drawobj)
233{
234 struct kgsl_drawobj_sync *syncobj = SYNCOBJ(drawobj);
Lynus Vaz27da44d2017-07-26 13:50:10 +0530235 unsigned long pending = 0;
Shrenuj Bansala419c792016-10-20 14:05:11 -0700236 unsigned int i;
237
238 /* Zap the canary timer */
239 del_timer_sync(&syncobj->timer);
240
241 /*
Lynus Vaz27da44d2017-07-26 13:50:10 +0530242 * Copy off the pending list and clear each pending event atomically -
243 * this will render any subsequent asynchronous callback harmless.
244 * This marks each event for deletion. If any pending fence callbacks
245 * run between now and the actual cancel, the associated structures
246 * are kfreed only in the cancel call.
Shrenuj Bansala419c792016-10-20 14:05:11 -0700247 */
Lynus Vaz27da44d2017-07-26 13:50:10 +0530248 for_each_set_bit(i, &syncobj->pending, KGSL_MAX_SYNCPOINTS) {
249 if (test_and_clear_bit(i, &syncobj->pending))
250 __set_bit(i, &pending);
251 }
Shrenuj Bansala419c792016-10-20 14:05:11 -0700252
253 /*
254 * Clear all pending events - this will render any subsequent async
255 * callbacks harmless
256 */
257 for (i = 0; i < syncobj->numsyncs; i++) {
258 struct kgsl_drawobj_sync_event *event = &syncobj->synclist[i];
259
260 /* Don't do anything if the event has already expired */
261 if (!test_bit(i, &pending))
262 continue;
263
264 switch (event->type) {
265 case KGSL_CMD_SYNCPOINT_TYPE_TIMESTAMP:
266 kgsl_cancel_event(drawobj->device,
267 &event->context->events, event->timestamp,
268 drawobj_sync_func, event);
269 break;
270 case KGSL_CMD_SYNCPOINT_TYPE_FENCE:
Lynus Vaz27da44d2017-07-26 13:50:10 +0530271 kgsl_sync_fence_async_cancel(event->handle);
272 kgsl_drawobj_put(drawobj);
Shrenuj Bansala419c792016-10-20 14:05:11 -0700273 break;
274 }
275 }
276
277 /*
278 * If we cancelled an event, there's a good chance that the context is
279 * on a dispatcher queue, so schedule to get it removed.
280 */
281 if (!bitmap_empty(&pending, KGSL_MAX_SYNCPOINTS) &&
282 drawobj->device->ftbl->drawctxt_sched)
283 drawobj->device->ftbl->drawctxt_sched(drawobj->device,
284 drawobj->context);
285
286}
287
288static void drawobj_destroy_cmd(struct kgsl_drawobj *drawobj)
289{
290 struct kgsl_drawobj_cmd *cmdobj = CMDOBJ(drawobj);
291
292 /*
293 * Release the refcount on the mem entry associated with the
294 * ib profiling buffer
295 */
296 if (cmdobj->base.flags & KGSL_DRAWOBJ_PROFILING)
297 kgsl_mem_entry_put(cmdobj->profiling_buf_entry);
298
299 /* Destroy the cmdlist we created */
300 memobj_list_free(&cmdobj->cmdlist);
301
302 /* Destroy the memlist we created */
303 memobj_list_free(&cmdobj->memlist);
304}
305
306/**
307 * kgsl_drawobj_destroy() - Destroy a kgsl object structure
308 * @obj: Pointer to the kgsl object to destroy
309 *
310 * Start the process of destroying a command batch. Cancel any pending events
311 * and decrement the refcount. Asynchronous events can still signal after
312 * kgsl_drawobj_destroy has returned.
313 */
314void kgsl_drawobj_destroy(struct kgsl_drawobj *drawobj)
315{
316 if (!drawobj)
317 return;
318
319 if (drawobj->type & SYNCOBJ_TYPE)
320 drawobj_destroy_sync(drawobj);
321 else if (drawobj->type & (CMDOBJ_TYPE | MARKEROBJ_TYPE))
322 drawobj_destroy_cmd(drawobj);
Tarun Karra2b8b3632016-11-14 16:38:27 -0800323 else if (drawobj->type == SPARSEOBJ_TYPE)
324 drawobj_destroy_sparse(drawobj);
Shrenuj Bansala419c792016-10-20 14:05:11 -0700325 else
326 return;
327
Lynus Vaze99b92b2017-04-24 18:04:54 +0530328 kgsl_drawobj_put(drawobj);
Shrenuj Bansala419c792016-10-20 14:05:11 -0700329}
330EXPORT_SYMBOL(kgsl_drawobj_destroy);
331
Lynus Vaz27da44d2017-07-26 13:50:10 +0530332static bool drawobj_sync_fence_func(void *priv)
Shrenuj Bansala419c792016-10-20 14:05:11 -0700333{
334 struct kgsl_drawobj_sync_event *event = priv;
335
Lynus Vaze99b92b2017-04-24 18:04:54 +0530336 trace_syncpoint_fence_expire(event->syncobj, event->fence_name);
Shrenuj Bansala419c792016-10-20 14:05:11 -0700337
Lynus Vaz27da44d2017-07-26 13:50:10 +0530338 /*
339 * Only call kgsl_drawobj_put() if it's not marked for cancellation
340 * in another thread.
341 */
342 if (drawobj_sync_expire(event->device, event)) {
343 kgsl_drawobj_put(&event->syncobj->base);
344 return true;
345 }
346 return false;
Shrenuj Bansala419c792016-10-20 14:05:11 -0700347}
348
349/* drawobj_add_sync_fence() - Add a new sync fence syncpoint
350 * @device: KGSL device
351 * @syncobj: KGSL sync obj to add the sync point to
352 * @priv: Private structure passed by the user
353 *
354 * Add a new fence sync syncpoint to the sync obj.
355 */
356static int drawobj_add_sync_fence(struct kgsl_device *device,
357 struct kgsl_drawobj_sync *syncobj, void *priv)
358{
359 struct kgsl_cmd_syncpoint_fence *sync = priv;
360 struct kgsl_drawobj *drawobj = DRAWOBJ(syncobj);
361 struct kgsl_drawobj_sync_event *event;
362 unsigned int id;
363
364 kref_get(&drawobj->refcount);
365
366 id = syncobj->numsyncs++;
367
368 event = &syncobj->synclist[id];
369
370 event->id = id;
371 event->type = KGSL_CMD_SYNCPOINT_TYPE_FENCE;
372 event->syncobj = syncobj;
373 event->device = device;
374 event->context = NULL;
375
376 set_bit(event->id, &syncobj->pending);
377
378 event->handle = kgsl_sync_fence_async_wait(sync->fd,
Lynus Vaze99b92b2017-04-24 18:04:54 +0530379 drawobj_sync_fence_func, event,
380 event->fence_name, sizeof(event->fence_name));
Shrenuj Bansala419c792016-10-20 14:05:11 -0700381
382 if (IS_ERR_OR_NULL(event->handle)) {
383 int ret = PTR_ERR(event->handle);
384
385 clear_bit(event->id, &syncobj->pending);
386 event->handle = NULL;
387
Lynus Vaze99b92b2017-04-24 18:04:54 +0530388 kgsl_drawobj_put(drawobj);
Shrenuj Bansala419c792016-10-20 14:05:11 -0700389
390 /*
391 * If ret == 0 the fence was already signaled - print a trace
392 * message so we can track that
393 */
394 if (ret == 0)
395 trace_syncpoint_fence_expire(syncobj, "signaled");
396
397 return ret;
398 }
399
Lynus Vaze99b92b2017-04-24 18:04:54 +0530400 trace_syncpoint_fence(syncobj, event->fence_name);
Shrenuj Bansala419c792016-10-20 14:05:11 -0700401
402 return 0;
403}
404
405/* drawobj_add_sync_timestamp() - Add a new sync point for a sync obj
406 * @device: KGSL device
407 * @syncobj: KGSL sync obj to add the sync point to
408 * @priv: Private structure passed by the user
409 *
410 * Add a new sync point timestamp event to the sync obj.
411 */
412static int drawobj_add_sync_timestamp(struct kgsl_device *device,
413 struct kgsl_drawobj_sync *syncobj, void *priv)
414{
415 struct kgsl_cmd_syncpoint_timestamp *sync = priv;
416 struct kgsl_drawobj *drawobj = DRAWOBJ(syncobj);
417 struct kgsl_context *context = kgsl_context_get(device,
418 sync->context_id);
419 struct kgsl_drawobj_sync_event *event;
420 int ret = -EINVAL;
421 unsigned int id;
422
423 if (context == NULL)
424 return -EINVAL;
425
426 /*
427 * We allow somebody to create a sync point on their own context.
428 * This has the effect of delaying a command from submitting until the
429 * dependent command has cleared. That said we obviously can't let them
430 * create a sync point on a future timestamp.
431 */
432
433 if (context == drawobj->context) {
434 unsigned int queued;
435
436 kgsl_readtimestamp(device, context, KGSL_TIMESTAMP_QUEUED,
437 &queued);
438
439 if (timestamp_cmp(sync->timestamp, queued) > 0) {
440 KGSL_DRV_ERR(device,
441 "Cannot create syncpoint for future timestamp %d (current %d)\n",
442 sync->timestamp, queued);
443 goto done;
444 }
445 }
446
447 kref_get(&drawobj->refcount);
448
449 id = syncobj->numsyncs++;
450
451 event = &syncobj->synclist[id];
452 event->id = id;
453
454 event->type = KGSL_CMD_SYNCPOINT_TYPE_TIMESTAMP;
455 event->syncobj = syncobj;
456 event->context = context;
457 event->timestamp = sync->timestamp;
458 event->device = device;
459
460 set_bit(event->id, &syncobj->pending);
461
462 ret = kgsl_add_event(device, &context->events, sync->timestamp,
463 drawobj_sync_func, event);
464
465 if (ret) {
466 clear_bit(event->id, &syncobj->pending);
Lynus Vaze99b92b2017-04-24 18:04:54 +0530467 kgsl_drawobj_put(drawobj);
Shrenuj Bansala419c792016-10-20 14:05:11 -0700468 } else {
469 trace_syncpoint_timestamp(syncobj, context, sync->timestamp);
470 }
471
472done:
473 if (ret)
474 kgsl_context_put(context);
475
476 return ret;
477}
478
479/**
480 * kgsl_drawobj_sync_add_sync() - Add a sync point to a command
481 * batch
482 * @device: Pointer to the KGSL device struct for the GPU
483 * @syncobj: Pointer to the sync obj
484 * @sync: Pointer to the user-specified struct defining the syncpoint
485 *
486 * Create a new sync point in the sync obj based on the
487 * user specified parameters
488 */
489int kgsl_drawobj_sync_add_sync(struct kgsl_device *device,
490 struct kgsl_drawobj_sync *syncobj,
491 struct kgsl_cmd_syncpoint *sync)
492{
493 void *priv;
494 int ret, psize;
495 struct kgsl_drawobj *drawobj = DRAWOBJ(syncobj);
496 int (*func)(struct kgsl_device *device,
497 struct kgsl_drawobj_sync *syncobj,
498 void *priv);
499
500 switch (sync->type) {
501 case KGSL_CMD_SYNCPOINT_TYPE_TIMESTAMP:
502 psize = sizeof(struct kgsl_cmd_syncpoint_timestamp);
503 func = drawobj_add_sync_timestamp;
504 break;
505 case KGSL_CMD_SYNCPOINT_TYPE_FENCE:
506 psize = sizeof(struct kgsl_cmd_syncpoint_fence);
507 func = drawobj_add_sync_fence;
508 break;
509 default:
510 KGSL_DRV_ERR(device,
511 "bad syncpoint type ctxt %d type 0x%x size %zu\n",
512 drawobj->context->id, sync->type, sync->size);
513 return -EINVAL;
514 }
515
516 if (sync->size != psize) {
517 KGSL_DRV_ERR(device,
518 "bad syncpoint size ctxt %d type 0x%x size %zu\n",
519 drawobj->context->id, sync->type, sync->size);
520 return -EINVAL;
521 }
522
523 priv = kzalloc(sync->size, GFP_KERNEL);
524 if (priv == NULL)
525 return -ENOMEM;
526
527 if (copy_from_user(priv, sync->priv, sync->size)) {
528 kfree(priv);
529 return -EFAULT;
530 }
531
532 ret = func(device, syncobj, priv);
533 kfree(priv);
534
535 return ret;
536}
537
538static void add_profiling_buffer(struct kgsl_device *device,
539 struct kgsl_drawobj_cmd *cmdobj,
540 uint64_t gpuaddr, uint64_t size,
541 unsigned int id, uint64_t offset)
542{
543 struct kgsl_mem_entry *entry;
544 struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj);
545
546 if (!(drawobj->flags & KGSL_DRAWOBJ_PROFILING))
547 return;
548
549 /* Only the first buffer entry counts - ignore the rest */
550 if (cmdobj->profiling_buf_entry != NULL)
551 return;
552
553 if (id != 0)
554 entry = kgsl_sharedmem_find_id(drawobj->context->proc_priv,
555 id);
556 else
557 entry = kgsl_sharedmem_find(drawobj->context->proc_priv,
558 gpuaddr);
559
560 if (entry != NULL) {
561 if (!kgsl_gpuaddr_in_memdesc(&entry->memdesc, gpuaddr, size)) {
562 kgsl_mem_entry_put(entry);
563 entry = NULL;
564 }
565 }
566
567 if (entry == NULL) {
568 KGSL_DRV_ERR(device,
569 "ignore bad profile buffer ctxt %d id %d offset %lld gpuaddr %llx size %lld\n",
570 drawobj->context->id, id, offset, gpuaddr, size);
571 return;
572 }
573
574 cmdobj->profiling_buf_entry = entry;
575
576 if (id != 0)
577 cmdobj->profiling_buffer_gpuaddr =
578 entry->memdesc.gpuaddr + offset;
579 else
580 cmdobj->profiling_buffer_gpuaddr = gpuaddr;
581}
582
583/**
584 * kgsl_drawobj_cmd_add_ibdesc() - Add a legacy ibdesc to a command
585 * batch
586 * @cmdobj: Pointer to the ib
587 * @ibdesc: Pointer to the user-specified struct defining the memory or IB
588 *
589 * Create a new memory entry in the ib based on the
590 * user specified parameters
591 */
592int kgsl_drawobj_cmd_add_ibdesc(struct kgsl_device *device,
593 struct kgsl_drawobj_cmd *cmdobj, struct kgsl_ibdesc *ibdesc)
594{
595 uint64_t gpuaddr = (uint64_t) ibdesc->gpuaddr;
596 uint64_t size = (uint64_t) ibdesc->sizedwords << 2;
597 struct kgsl_memobj_node *mem;
598 struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj);
599
600 /* sanitize the ibdesc ctrl flags */
601 ibdesc->ctrl &= KGSL_IBDESC_MEMLIST | KGSL_IBDESC_PROFILING_BUFFER;
602
603 if (drawobj->flags & KGSL_DRAWOBJ_MEMLIST &&
604 ibdesc->ctrl & KGSL_IBDESC_MEMLIST) {
605 if (ibdesc->ctrl & KGSL_IBDESC_PROFILING_BUFFER) {
606 add_profiling_buffer(device, cmdobj,
607 gpuaddr, size, 0, 0);
608 return 0;
609 }
610 }
611
612 /* Ignore if SYNC or MARKER is specified */
613 if (drawobj->type & (SYNCOBJ_TYPE | MARKEROBJ_TYPE))
614 return 0;
615
616 mem = kmem_cache_alloc(memobjs_cache, GFP_KERNEL);
617 if (mem == NULL)
618 return -ENOMEM;
619
620 mem->gpuaddr = gpuaddr;
621 mem->size = size;
622 mem->priv = 0;
623 mem->id = 0;
624 mem->offset = 0;
625 mem->flags = 0;
626
627 if (drawobj->flags & KGSL_DRAWOBJ_MEMLIST &&
628 ibdesc->ctrl & KGSL_IBDESC_MEMLIST)
629 /* add to the memlist */
630 list_add_tail(&mem->node, &cmdobj->memlist);
631 else {
632 /* set the preamble flag if directed to */
633 if (drawobj->context->flags & KGSL_CONTEXT_PREAMBLE &&
634 list_empty(&cmdobj->cmdlist))
635 mem->flags = KGSL_CMDLIST_CTXTSWITCH_PREAMBLE;
636
637 /* add to the cmd list */
638 list_add_tail(&mem->node, &cmdobj->cmdlist);
639 }
640
641 return 0;
642}
643
Tarun Karra2b8b3632016-11-14 16:38:27 -0800644static void *_drawobj_create(struct kgsl_device *device,
645 struct kgsl_context *context, unsigned int size,
Shrenuj Bansala419c792016-10-20 14:05:11 -0700646 unsigned int type)
647{
Tarun Karra2b8b3632016-11-14 16:38:27 -0800648 void *obj = kzalloc(size, GFP_KERNEL);
649 struct kgsl_drawobj *drawobj;
650
651 if (obj == NULL)
652 return ERR_PTR(-ENOMEM);
653
Shrenuj Bansala419c792016-10-20 14:05:11 -0700654 /*
655 * Increase the reference count on the context so it doesn't disappear
656 * during the lifetime of this object
657 */
Tarun Karra2b8b3632016-11-14 16:38:27 -0800658 if (!_kgsl_context_get(context)) {
659 kfree(obj);
660 return ERR_PTR(-ENOENT);
661 }
662
663 drawobj = obj;
Shrenuj Bansala419c792016-10-20 14:05:11 -0700664
665 kref_init(&drawobj->refcount);
666
667 drawobj->device = device;
668 drawobj->context = context;
669 drawobj->type = type;
670
Tarun Karra2b8b3632016-11-14 16:38:27 -0800671 return obj;
672}
673
674/**
675 * kgsl_drawobj_sparse_create() - Create a new sparse obj structure
676 * @device: Pointer to a KGSL device struct
677 * @context: Pointer to a KGSL context struct
678 * @flags: Flags for the sparse obj
679 *
680 * Allocate an new kgsl_drawobj_sparse structure
681 */
682struct kgsl_drawobj_sparse *kgsl_drawobj_sparse_create(
683 struct kgsl_device *device,
684 struct kgsl_context *context, unsigned int flags)
685{
686 struct kgsl_drawobj_sparse *sparseobj = _drawobj_create(device,
687 context, sizeof(*sparseobj), SPARSEOBJ_TYPE);
688
689 if (!IS_ERR(sparseobj))
690 INIT_LIST_HEAD(&sparseobj->sparselist);
691
692 return sparseobj;
Shrenuj Bansala419c792016-10-20 14:05:11 -0700693}
694
695/**
696 * kgsl_drawobj_sync_create() - Create a new sync obj
697 * structure
698 * @device: Pointer to a KGSL device struct
699 * @context: Pointer to a KGSL context struct
700 *
701 * Allocate an new kgsl_drawobj_sync structure
702 */
703struct kgsl_drawobj_sync *kgsl_drawobj_sync_create(struct kgsl_device *device,
704 struct kgsl_context *context)
705{
Tarun Karra2b8b3632016-11-14 16:38:27 -0800706 struct kgsl_drawobj_sync *syncobj = _drawobj_create(device,
707 context, sizeof(*syncobj), SYNCOBJ_TYPE);
Shrenuj Bansala419c792016-10-20 14:05:11 -0700708
709 /* Add a timer to help debug sync deadlocks */
Tarun Karra2b8b3632016-11-14 16:38:27 -0800710 if (!IS_ERR(syncobj))
711 setup_timer(&syncobj->timer, syncobj_timer,
712 (unsigned long) syncobj);
Shrenuj Bansala419c792016-10-20 14:05:11 -0700713
714 return syncobj;
715}
716
717/**
718 * kgsl_drawobj_cmd_create() - Create a new command obj
719 * structure
720 * @device: Pointer to a KGSL device struct
721 * @context: Pointer to a KGSL context struct
722 * @flags: Flags for the command obj
723 * @type: type of cmdobj MARKER/CMD
724 *
725 * Allocate a new kgsl_drawobj_cmd structure
726 */
727struct kgsl_drawobj_cmd *kgsl_drawobj_cmd_create(struct kgsl_device *device,
728 struct kgsl_context *context, unsigned int flags,
729 unsigned int type)
730{
Tarun Karra2b8b3632016-11-14 16:38:27 -0800731 struct kgsl_drawobj_cmd *cmdobj = _drawobj_create(device,
732 context, sizeof(*cmdobj),
733 (type & (CMDOBJ_TYPE | MARKEROBJ_TYPE)));
Shrenuj Bansala419c792016-10-20 14:05:11 -0700734
Tarun Karra2b8b3632016-11-14 16:38:27 -0800735 if (!IS_ERR(cmdobj)) {
736 /* sanitize our flags for drawobj's */
737 cmdobj->base.flags = flags & (KGSL_DRAWOBJ_CTX_SWITCH
Shrenuj Bansala419c792016-10-20 14:05:11 -0700738 | KGSL_DRAWOBJ_MARKER
739 | KGSL_DRAWOBJ_END_OF_FRAME
740 | KGSL_DRAWOBJ_PWR_CONSTRAINT
741 | KGSL_DRAWOBJ_MEMLIST
742 | KGSL_DRAWOBJ_PROFILING
743 | KGSL_DRAWOBJ_PROFILING_KTIME);
744
Tarun Karra2b8b3632016-11-14 16:38:27 -0800745 INIT_LIST_HEAD(&cmdobj->cmdlist);
746 INIT_LIST_HEAD(&cmdobj->memlist);
747 }
Shrenuj Bansala419c792016-10-20 14:05:11 -0700748
749 return cmdobj;
750}
751
752#ifdef CONFIG_COMPAT
753static int add_ibdesc_list_compat(struct kgsl_device *device,
754 struct kgsl_drawobj_cmd *cmdobj, void __user *ptr, int count)
755{
756 int i, ret = 0;
757 struct kgsl_ibdesc_compat ibdesc32;
758 struct kgsl_ibdesc ibdesc;
759
760 for (i = 0; i < count; i++) {
761 memset(&ibdesc32, 0, sizeof(ibdesc32));
762
763 if (copy_from_user(&ibdesc32, ptr, sizeof(ibdesc32))) {
764 ret = -EFAULT;
765 break;
766 }
767
768 ibdesc.gpuaddr = (unsigned long) ibdesc32.gpuaddr;
769 ibdesc.sizedwords = (size_t) ibdesc32.sizedwords;
770 ibdesc.ctrl = (unsigned int) ibdesc32.ctrl;
771
772 ret = kgsl_drawobj_cmd_add_ibdesc(device, cmdobj, &ibdesc);
773 if (ret)
774 break;
775
776 ptr += sizeof(ibdesc32);
777 }
778
779 return ret;
780}
781
782static int add_syncpoints_compat(struct kgsl_device *device,
783 struct kgsl_drawobj_sync *syncobj, void __user *ptr, int count)
784{
785 struct kgsl_cmd_syncpoint_compat sync32;
786 struct kgsl_cmd_syncpoint sync;
787 int i, ret = 0;
788
789 for (i = 0; i < count; i++) {
790 memset(&sync32, 0, sizeof(sync32));
791
792 if (copy_from_user(&sync32, ptr, sizeof(sync32))) {
793 ret = -EFAULT;
794 break;
795 }
796
797 sync.type = sync32.type;
798 sync.priv = compat_ptr(sync32.priv);
799 sync.size = (size_t) sync32.size;
800
801 ret = kgsl_drawobj_sync_add_sync(device, syncobj, &sync);
802 if (ret)
803 break;
804
805 ptr += sizeof(sync32);
806 }
807
808 return ret;
809}
810#else
811static int add_ibdesc_list_compat(struct kgsl_device *device,
812 struct kgsl_drawobj_cmd *cmdobj, void __user *ptr, int count)
813{
814 return -EINVAL;
815}
816
817static int add_syncpoints_compat(struct kgsl_device *device,
818 struct kgsl_drawobj_sync *syncobj, void __user *ptr, int count)
819{
820 return -EINVAL;
821}
822#endif
823
824/* Returns:
825 * -EINVAL: Bad data
826 * 0: All data fields are empty (nothing to do)
827 * 1: All list information is valid
828 */
829static int _verify_input_list(unsigned int count, void __user *ptr,
830 unsigned int size)
831{
832 /* Return early if nothing going on */
833 if (count == 0 && ptr == NULL && size == 0)
834 return 0;
835
836 /* Sanity check inputs */
837 if (count == 0 || ptr == NULL || size == 0)
838 return -EINVAL;
839
840 return 1;
841}
842
843int kgsl_drawobj_cmd_add_ibdesc_list(struct kgsl_device *device,
844 struct kgsl_drawobj_cmd *cmdobj, void __user *ptr, int count)
845{
846 struct kgsl_ibdesc ibdesc;
847 struct kgsl_drawobj *baseobj = DRAWOBJ(cmdobj);
848 int i, ret;
849
850 /* Ignore everything if this is a MARKER */
851 if (baseobj->type & MARKEROBJ_TYPE)
852 return 0;
853
854 ret = _verify_input_list(count, ptr, sizeof(ibdesc));
855 if (ret <= 0)
856 return -EINVAL;
857
858 if (is_compat_task())
859 return add_ibdesc_list_compat(device, cmdobj, ptr, count);
860
861 for (i = 0; i < count; i++) {
862 memset(&ibdesc, 0, sizeof(ibdesc));
863
864 if (copy_from_user(&ibdesc, ptr, sizeof(ibdesc)))
865 return -EFAULT;
866
867 ret = kgsl_drawobj_cmd_add_ibdesc(device, cmdobj, &ibdesc);
868 if (ret)
869 return ret;
870
871 ptr += sizeof(ibdesc);
872 }
873
874 return 0;
875}
876
877int kgsl_drawobj_sync_add_syncpoints(struct kgsl_device *device,
878 struct kgsl_drawobj_sync *syncobj, void __user *ptr, int count)
879{
880 struct kgsl_cmd_syncpoint sync;
881 int i, ret;
882
883 if (count == 0)
884 return 0;
885
886 syncobj->synclist = kcalloc(count,
887 sizeof(struct kgsl_drawobj_sync_event), GFP_KERNEL);
888
889 if (syncobj->synclist == NULL)
890 return -ENOMEM;
891
892 if (is_compat_task())
893 return add_syncpoints_compat(device, syncobj, ptr, count);
894
895 for (i = 0; i < count; i++) {
896 memset(&sync, 0, sizeof(sync));
897
898 if (copy_from_user(&sync, ptr, sizeof(sync)))
899 return -EFAULT;
900
901 ret = kgsl_drawobj_sync_add_sync(device, syncobj, &sync);
902 if (ret)
903 return ret;
904
905 ptr += sizeof(sync);
906 }
907
908 return 0;
909}
910
Tarun Karra2b8b3632016-11-14 16:38:27 -0800911static int kgsl_drawobj_add_memobject(struct list_head *head,
Shrenuj Bansala419c792016-10-20 14:05:11 -0700912 struct kgsl_command_object *obj)
913{
914 struct kgsl_memobj_node *mem;
915
916 mem = kmem_cache_alloc(memobjs_cache, GFP_KERNEL);
917 if (mem == NULL)
918 return -ENOMEM;
919
920 mem->gpuaddr = obj->gpuaddr;
921 mem->size = obj->size;
922 mem->id = obj->id;
923 mem->offset = obj->offset;
924 mem->flags = obj->flags;
925 mem->priv = 0;
926
927 list_add_tail(&mem->node, head);
928 return 0;
929}
930
Tarun Karra2b8b3632016-11-14 16:38:27 -0800931static int kgsl_drawobj_add_sparseobject(struct list_head *head,
932 struct kgsl_sparse_binding_object *obj, unsigned int virt_id)
933{
934 struct kgsl_sparseobj_node *mem;
935
936 mem = kmem_cache_alloc(sparseobjs_cache, GFP_KERNEL);
937 if (mem == NULL)
938 return -ENOMEM;
939
940 mem->virt_id = virt_id;
941 mem->obj.id = obj->id;
942 mem->obj.virtoffset = obj->virtoffset;
943 mem->obj.physoffset = obj->physoffset;
944 mem->obj.size = obj->size;
945 mem->obj.flags = obj->flags;
946
947 list_add_tail(&mem->node, head);
948 return 0;
949}
950
951int kgsl_drawobj_sparse_add_sparselist(struct kgsl_device *device,
952 struct kgsl_drawobj_sparse *sparseobj, unsigned int id,
953 void __user *ptr, unsigned int size, unsigned int count)
954{
955 struct kgsl_sparse_binding_object obj;
956 int i, ret = 0;
957
958 ret = _verify_input_list(count, ptr, size);
959 if (ret <= 0)
960 return ret;
961
962 for (i = 0; i < count; i++) {
963 memset(&obj, 0, sizeof(obj));
964
965 ret = _copy_from_user(&obj, ptr, sizeof(obj), size);
966 if (ret)
967 return ret;
968
969 if (!(obj.flags & (KGSL_SPARSE_BIND | KGSL_SPARSE_UNBIND)))
970 return -EINVAL;
971
972 ret = kgsl_drawobj_add_sparseobject(&sparseobj->sparselist,
973 &obj, id);
974 if (ret)
975 return ret;
976
977 ptr += sizeof(obj);
978 }
979
980 sparseobj->size = size;
981 sparseobj->count = count;
982
983 return 0;
984}
985
986
Shrenuj Bansala419c792016-10-20 14:05:11 -0700987#define CMDLIST_FLAGS \
988 (KGSL_CMDLIST_IB | \
989 KGSL_CMDLIST_CTXTSWITCH_PREAMBLE | \
990 KGSL_CMDLIST_IB_PREAMBLE)
991
992/* This can only accept MARKEROBJ_TYPE and CMDOBJ_TYPE */
993int kgsl_drawobj_cmd_add_cmdlist(struct kgsl_device *device,
994 struct kgsl_drawobj_cmd *cmdobj, void __user *ptr,
995 unsigned int size, unsigned int count)
996{
997 struct kgsl_command_object obj;
998 struct kgsl_drawobj *baseobj = DRAWOBJ(cmdobj);
999 int i, ret;
1000
1001 /* Ignore everything if this is a MARKER */
1002 if (baseobj->type & MARKEROBJ_TYPE)
1003 return 0;
1004
1005 ret = _verify_input_list(count, ptr, size);
1006 if (ret <= 0)
1007 return ret;
1008
1009 for (i = 0; i < count; i++) {
1010 memset(&obj, 0, sizeof(obj));
1011
1012 ret = _copy_from_user(&obj, ptr, sizeof(obj), size);
1013 if (ret)
1014 return ret;
1015
1016 /* Sanity check the flags */
1017 if (!(obj.flags & CMDLIST_FLAGS)) {
1018 KGSL_DRV_ERR(device,
1019 "invalid cmdobj ctxt %d flags %d id %d offset %lld addr %lld size %lld\n",
1020 baseobj->context->id, obj.flags, obj.id,
1021 obj.offset, obj.gpuaddr, obj.size);
1022 return -EINVAL;
1023 }
1024
Tarun Karra2b8b3632016-11-14 16:38:27 -08001025 ret = kgsl_drawobj_add_memobject(&cmdobj->cmdlist, &obj);
Shrenuj Bansala419c792016-10-20 14:05:11 -07001026 if (ret)
1027 return ret;
1028
1029 ptr += sizeof(obj);
1030 }
1031
1032 return 0;
1033}
1034
1035int kgsl_drawobj_cmd_add_memlist(struct kgsl_device *device,
1036 struct kgsl_drawobj_cmd *cmdobj, void __user *ptr,
1037 unsigned int size, unsigned int count)
1038{
1039 struct kgsl_command_object obj;
1040 struct kgsl_drawobj *baseobj = DRAWOBJ(cmdobj);
1041 int i, ret;
1042
1043 /* Ignore everything if this is a MARKER */
1044 if (baseobj->type & MARKEROBJ_TYPE)
1045 return 0;
1046
1047 ret = _verify_input_list(count, ptr, size);
1048 if (ret <= 0)
1049 return ret;
1050
1051 for (i = 0; i < count; i++) {
1052 memset(&obj, 0, sizeof(obj));
1053
1054 ret = _copy_from_user(&obj, ptr, sizeof(obj), size);
1055 if (ret)
1056 return ret;
1057
1058 if (!(obj.flags & KGSL_OBJLIST_MEMOBJ)) {
1059 KGSL_DRV_ERR(device,
1060 "invalid memobj ctxt %d flags %d id %d offset %lld addr %lld size %lld\n",
1061 DRAWOBJ(cmdobj)->context->id, obj.flags,
1062 obj.id, obj.offset, obj.gpuaddr, obj.size);
1063 return -EINVAL;
1064 }
1065
1066 if (obj.flags & KGSL_OBJLIST_PROFILE)
1067 add_profiling_buffer(device, cmdobj, obj.gpuaddr,
1068 obj.size, obj.id, obj.offset);
1069 else {
Tarun Karra2b8b3632016-11-14 16:38:27 -08001070 ret = kgsl_drawobj_add_memobject(&cmdobj->memlist,
1071 &obj);
Shrenuj Bansala419c792016-10-20 14:05:11 -07001072 if (ret)
1073 return ret;
1074 }
1075
1076 ptr += sizeof(obj);
1077 }
1078
1079 return 0;
1080}
1081
1082int kgsl_drawobj_sync_add_synclist(struct kgsl_device *device,
1083 struct kgsl_drawobj_sync *syncobj, void __user *ptr,
1084 unsigned int size, unsigned int count)
1085{
1086 struct kgsl_command_syncpoint syncpoint;
1087 struct kgsl_cmd_syncpoint sync;
1088 int i, ret;
1089
1090 /* If creating a sync and the data is not there or wrong then error */
1091 ret = _verify_input_list(count, ptr, size);
1092 if (ret <= 0)
1093 return -EINVAL;
1094
1095 syncobj->synclist = kcalloc(count,
1096 sizeof(struct kgsl_drawobj_sync_event), GFP_KERNEL);
1097
1098 if (syncobj->synclist == NULL)
1099 return -ENOMEM;
1100
1101 for (i = 0; i < count; i++) {
1102 memset(&syncpoint, 0, sizeof(syncpoint));
1103
1104 ret = _copy_from_user(&syncpoint, ptr, sizeof(syncpoint), size);
1105 if (ret)
1106 return ret;
1107
1108 sync.type = syncpoint.type;
1109 sync.priv = to_user_ptr(syncpoint.priv);
1110 sync.size = syncpoint.size;
1111
1112 ret = kgsl_drawobj_sync_add_sync(device, syncobj, &sync);
1113 if (ret)
1114 return ret;
1115
1116 ptr += sizeof(syncpoint);
1117 }
1118
1119 return 0;
1120}
1121
Tarun Karra2b8b3632016-11-14 16:38:27 -08001122void kgsl_drawobjs_cache_exit(void)
Shrenuj Bansala419c792016-10-20 14:05:11 -07001123{
Tarun Karra2b8b3632016-11-14 16:38:27 -08001124 kmem_cache_destroy(memobjs_cache);
1125 kmem_cache_destroy(sparseobjs_cache);
Shrenuj Bansala419c792016-10-20 14:05:11 -07001126}
1127
Tarun Karra2b8b3632016-11-14 16:38:27 -08001128int kgsl_drawobjs_cache_init(void)
Shrenuj Bansala419c792016-10-20 14:05:11 -07001129{
1130 memobjs_cache = KMEM_CACHE(kgsl_memobj_node, 0);
Tarun Karra2b8b3632016-11-14 16:38:27 -08001131 sparseobjs_cache = KMEM_CACHE(kgsl_sparseobj_node, 0);
1132
1133 if (!memobjs_cache || !sparseobjs_cache)
Shrenuj Bansala419c792016-10-20 14:05:11 -07001134 return -ENOMEM;
Shrenuj Bansala419c792016-10-20 14:05:11 -07001135
1136 return 0;
1137}