blob: ebcf0dd6e4bcf121bd9ed13f20876616deef2e19 [file] [log] [blame]
Puranam V G Tejaswi6aaa0d02020-07-24 08:54:54 +05301/* Copyright (c) 2002,2007-2020, The Linux Foundation. All rights reserved.
Shrenuj Bansala419c792016-10-20 14:05:11 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include <linux/slab.h>
15#include <linux/msm_kgsl.h>
16#include <linux/sched.h>
17#include <linux/debugfs.h>
18
19#include "kgsl.h"
20#include "kgsl_sharedmem.h"
21#include "adreno.h"
22#include "adreno_trace.h"
23
24static void wait_callback(struct kgsl_device *device,
25 struct kgsl_event_group *group, void *priv, int result)
26{
27 struct adreno_context *drawctxt = priv;
28
29 wake_up_all(&drawctxt->waiting);
30}
31
32static int _check_context_timestamp(struct kgsl_device *device,
33 struct kgsl_context *context, unsigned int timestamp)
34{
35 /* Bail if the drawctxt has been invalidated or destroyed */
36 if (kgsl_context_detached(context) || kgsl_context_invalid(context))
37 return 1;
38
39 return kgsl_check_timestamp(device, context, timestamp);
40}
41
42/**
43 * adreno_drawctxt_dump() - dump information about a draw context
44 * @device: KGSL device that owns the context
45 * @context: KGSL context to dump information about
46 *
47 * Dump specific information about the context to the kernel log. Used for
48 * fence timeout callbacks
49 */
50void adreno_drawctxt_dump(struct kgsl_device *device,
51 struct kgsl_context *context)
52{
53 unsigned int queue, start, retire;
54 struct adreno_context *drawctxt = ADRENO_CONTEXT(context);
55 int index, pos;
56 char buf[120];
57
58 kgsl_readtimestamp(device, context, KGSL_TIMESTAMP_QUEUED, &queue);
59 kgsl_readtimestamp(device, context, KGSL_TIMESTAMP_CONSUMED, &start);
60 kgsl_readtimestamp(device, context, KGSL_TIMESTAMP_RETIRED, &retire);
61
62 /*
63 * We may have kgsl sync obj timer running, which also uses same
64 * lock, take a lock with software interrupt disabled (bh)
65 * to avoid spin lock recursion.
66 *
67 * Use Spin trylock because dispatcher can acquire drawctxt->lock
68 * if context is pending and the fence it is waiting on just got
69 * signalled. Dispatcher acquires drawctxt->lock and tries to
70 * delete the sync obj timer using del_timer_sync().
71 * del_timer_sync() waits till timer and its pending handlers
72 * are deleted. But if the timer expires at the same time,
73 * timer handler could be waiting on drawctxt->lock leading to a
74 * deadlock. To prevent this use spin_trylock_bh.
75 */
76 if (!spin_trylock_bh(&drawctxt->lock)) {
77 dev_err(device->dev, " context[%d]: could not get lock\n",
78 context->id);
79 return;
80 }
81
82 dev_err(device->dev,
83 " context[%d]: queue=%d, submit=%d, start=%d, retire=%d\n",
84 context->id, queue, drawctxt->submitted_timestamp,
85 start, retire);
86
87 if (drawctxt->drawqueue_head != drawctxt->drawqueue_tail) {
88 struct kgsl_drawobj *drawobj =
89 drawctxt->drawqueue[drawctxt->drawqueue_head];
90
91 if (test_bit(ADRENO_CONTEXT_FENCE_LOG, &context->priv)) {
92 dev_err(device->dev,
93 " possible deadlock. Context %d might be blocked for itself\n",
94 context->id);
95 goto stats;
96 }
97
Lynus Vaze99b92b2017-04-24 18:04:54 +053098 if (!kref_get_unless_zero(&drawobj->refcount))
99 goto stats;
100
Shrenuj Bansala419c792016-10-20 14:05:11 -0700101 if (drawobj->type == SYNCOBJ_TYPE) {
102 struct kgsl_drawobj_sync *syncobj = SYNCOBJ(drawobj);
103
104 if (kgsl_drawobj_events_pending(syncobj)) {
105 dev_err(device->dev,
106 " context[%d] (ts=%d) Active sync points:\n",
107 context->id, drawobj->timestamp);
108
109 kgsl_dump_syncpoints(device, syncobj);
110 }
111 }
Lynus Vaze99b92b2017-04-24 18:04:54 +0530112
113 kgsl_drawobj_put(drawobj);
Shrenuj Bansala419c792016-10-20 14:05:11 -0700114 }
115
116stats:
117 memset(buf, 0, sizeof(buf));
118
119 pos = 0;
120
121 for (index = 0; index < SUBMIT_RETIRE_TICKS_SIZE; index++) {
122 uint64_t msecs;
123 unsigned int usecs;
124
125 if (!drawctxt->submit_retire_ticks[index])
126 continue;
127 msecs = drawctxt->submit_retire_ticks[index] * 10;
128 usecs = do_div(msecs, 192);
129 usecs = do_div(msecs, 1000);
130 pos += snprintf(buf + pos, sizeof(buf) - pos, "%d.%0d ",
131 (unsigned int)msecs, usecs);
132 }
133 dev_err(device->dev, " context[%d]: submit times: %s\n",
134 context->id, buf);
135
136 spin_unlock_bh(&drawctxt->lock);
137}
138
139/**
140 * adreno_drawctxt_wait() - sleep until a timestamp expires
141 * @adreno_dev: pointer to the adreno_device struct
142 * @drawctxt: Pointer to the draw context to sleep for
143 * @timetamp: Timestamp to wait on
144 * @timeout: Number of jiffies to wait (0 for infinite)
145 *
146 * Register an event to wait for a timestamp on a context and sleep until it
147 * has past. Returns < 0 on error, -ETIMEDOUT if the timeout expires or 0
148 * on success
149 */
150int adreno_drawctxt_wait(struct adreno_device *adreno_dev,
151 struct kgsl_context *context,
152 uint32_t timestamp, unsigned int timeout)
153{
154 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
155 struct adreno_context *drawctxt = ADRENO_CONTEXT(context);
156 int ret;
157 long ret_temp;
158
159 if (kgsl_context_detached(context))
160 return -ENOENT;
161
162 if (kgsl_context_invalid(context))
163 return -EDEADLK;
164
165 trace_adreno_drawctxt_wait_start(-1, context->id, timestamp);
166
167 ret = kgsl_add_event(device, &context->events, timestamp,
168 wait_callback, (void *) drawctxt);
169 if (ret)
170 goto done;
171
172 /*
173 * If timeout is 0, wait forever. msecs_to_jiffies will force
174 * values larger than INT_MAX to an infinite timeout.
175 */
176 if (timeout == 0)
177 timeout = UINT_MAX;
178
179 ret_temp = wait_event_interruptible_timeout(drawctxt->waiting,
180 _check_context_timestamp(device, context, timestamp),
181 msecs_to_jiffies(timeout));
182
183 if (ret_temp == 0) {
184 ret = -ETIMEDOUT;
185 goto done;
186 } else if (ret_temp < 0) {
187 ret = (int) ret_temp;
188 goto done;
189 }
190 ret = 0;
191
192 /* -EDEADLK if the context was invalidated while we were waiting */
193 if (kgsl_context_invalid(context))
194 ret = -EDEADLK;
195
196
197 /* Return -EINVAL if the context was detached while we were waiting */
198 if (kgsl_context_detached(context))
199 ret = -ENOENT;
200
201done:
202 trace_adreno_drawctxt_wait_done(-1, context->id, timestamp, ret);
203 return ret;
204}
205
206/**
207 * adreno_drawctxt_wait_rb() - Wait for the last RB timestamp at which this
208 * context submitted a command to the corresponding RB
209 * @adreno_dev: The device on which the timestamp is active
210 * @context: The context which subbmitted command to RB
211 * @timestamp: The RB timestamp of last command submitted to RB by context
212 * @timeout: Timeout value for the wait
213 * Caller must hold the device mutex
214 */
215static int adreno_drawctxt_wait_rb(struct adreno_device *adreno_dev,
216 struct kgsl_context *context,
217 uint32_t timestamp, unsigned int timeout)
218{
219 struct adreno_context *drawctxt = ADRENO_CONTEXT(context);
220 int ret = 0;
221
222 /*
Hareesh Gundu7d5a8f22017-02-21 13:23:46 +0530223 * If the context is invalid (OR) not submitted commands to GPU
224 * then return immediately - we may end up waiting for a timestamp
225 * that will never come
Shrenuj Bansala419c792016-10-20 14:05:11 -0700226 */
Hareesh Gundu7d5a8f22017-02-21 13:23:46 +0530227 if (kgsl_context_invalid(context) ||
228 !test_bit(KGSL_CONTEXT_PRIV_SUBMITTED, &context->priv))
Shrenuj Bansala419c792016-10-20 14:05:11 -0700229 goto done;
230
231 trace_adreno_drawctxt_wait_start(drawctxt->rb->id, context->id,
232 timestamp);
233
234 ret = adreno_ringbuffer_waittimestamp(drawctxt->rb, timestamp, timeout);
235done:
236 trace_adreno_drawctxt_wait_done(drawctxt->rb->id, context->id,
237 timestamp, ret);
238 return ret;
239}
240
241static int drawctxt_detach_drawobjs(struct adreno_context *drawctxt,
242 struct kgsl_drawobj **list)
243{
244 int count = 0;
245
246 while (drawctxt->drawqueue_head != drawctxt->drawqueue_tail) {
247 struct kgsl_drawobj *drawobj =
248 drawctxt->drawqueue[drawctxt->drawqueue_head];
249
250 drawctxt->drawqueue_head = (drawctxt->drawqueue_head + 1) %
251 ADRENO_CONTEXT_DRAWQUEUE_SIZE;
252
253 list[count++] = drawobj;
254 }
255
256 return count;
257}
258
259/**
260 * adreno_drawctxt_invalidate() - Invalidate an adreno draw context
261 * @device: Pointer to the KGSL device structure for the GPU
262 * @context: Pointer to the KGSL context structure
263 *
264 * Invalidate the context and remove all queued commands and cancel any pending
265 * waiters
266 */
267void adreno_drawctxt_invalidate(struct kgsl_device *device,
268 struct kgsl_context *context)
269{
270 struct adreno_context *drawctxt = ADRENO_CONTEXT(context);
271 struct kgsl_drawobj *list[ADRENO_CONTEXT_DRAWQUEUE_SIZE];
272 int i, count;
273
274 trace_adreno_drawctxt_invalidate(drawctxt);
275
276 spin_lock(&drawctxt->lock);
277 set_bit(KGSL_CONTEXT_PRIV_INVALID, &context->priv);
278
279 /*
280 * set the timestamp to the last value since the context is invalidated
281 * and we want the pending events for this context to go away
282 */
283 kgsl_sharedmem_writel(device, &device->memstore,
284 KGSL_MEMSTORE_OFFSET(context->id, soptimestamp),
285 drawctxt->timestamp);
286
287 kgsl_sharedmem_writel(device, &device->memstore,
288 KGSL_MEMSTORE_OFFSET(context->id, eoptimestamp),
289 drawctxt->timestamp);
290
291 /* Get rid of commands still waiting in the queue */
292 count = drawctxt_detach_drawobjs(drawctxt, list);
293 spin_unlock(&drawctxt->lock);
294
295 for (i = 0; i < count; i++) {
296 kgsl_cancel_events_timestamp(device, &context->events,
297 list[i]->timestamp);
298 kgsl_drawobj_destroy(list[i]);
299 }
300
301 /* Make sure all pending events are processed or cancelled */
302 kgsl_flush_event_group(device, &context->events);
303
304 /* Give the bad news to everybody waiting around */
305 wake_up_all(&drawctxt->waiting);
306 wake_up_all(&drawctxt->wq);
Hareesh Gundu28b9efd2017-08-24 23:11:09 +0530307 wake_up_all(&drawctxt->timeout);
Shrenuj Bansala419c792016-10-20 14:05:11 -0700308}
309
310/*
311 * Set the priority of the context based on the flags passed into context
312 * create. If the priority is not set in the flags, then the kernel can
313 * assign any priority it desires for the context.
314 */
315#define KGSL_CONTEXT_PRIORITY_MED 0x8
316
317static inline void _set_context_priority(struct adreno_context *drawctxt)
318{
319 /* If the priority is not set by user, set it for them */
320 if ((drawctxt->base.flags & KGSL_CONTEXT_PRIORITY_MASK) ==
321 KGSL_CONTEXT_PRIORITY_UNDEF)
322 drawctxt->base.flags |= (KGSL_CONTEXT_PRIORITY_MED <<
323 KGSL_CONTEXT_PRIORITY_SHIFT);
324
325 /* Store the context priority */
326 drawctxt->base.priority =
327 (drawctxt->base.flags & KGSL_CONTEXT_PRIORITY_MASK) >>
328 KGSL_CONTEXT_PRIORITY_SHIFT;
329}
330
331/**
332 * adreno_drawctxt_create - create a new adreno draw context
333 * @dev_priv: the owner of the context
334 * @flags: flags for the context (passed from user space)
335 *
336 * Create and return a new draw context for the 3D core.
337 */
338struct kgsl_context *
339adreno_drawctxt_create(struct kgsl_device_private *dev_priv,
340 uint32_t *flags)
341{
342 struct adreno_context *drawctxt;
343 struct kgsl_device *device = dev_priv->device;
344 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
Harshdeep Dhatt2e42f122017-05-31 17:27:19 -0600345 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
Shrenuj Bansala419c792016-10-20 14:05:11 -0700346 int ret;
Lynus Vazeb7af682017-04-17 18:36:01 +0530347 unsigned int local;
Shrenuj Bansala419c792016-10-20 14:05:11 -0700348
349 local = *flags & (KGSL_CONTEXT_PREAMBLE |
350 KGSL_CONTEXT_NO_GMEM_ALLOC |
351 KGSL_CONTEXT_PER_CONTEXT_TS |
352 KGSL_CONTEXT_USER_GENERATED_TS |
353 KGSL_CONTEXT_NO_FAULT_TOLERANCE |
Hareesh Gunduccfb89b2017-04-14 18:36:20 +0530354 KGSL_CONTEXT_INVALIDATE_ON_FAULT |
Shrenuj Bansala419c792016-10-20 14:05:11 -0700355 KGSL_CONTEXT_CTX_SWITCH |
356 KGSL_CONTEXT_PRIORITY_MASK |
357 KGSL_CONTEXT_TYPE_MASK |
358 KGSL_CONTEXT_PWR_CONSTRAINT |
359 KGSL_CONTEXT_IFH_NOP |
360 KGSL_CONTEXT_SECURE |
361 KGSL_CONTEXT_PREEMPT_STYLE_MASK |
Tarun Karra2b8b3632016-11-14 16:38:27 -0800362 KGSL_CONTEXT_NO_SNAPSHOT |
363 KGSL_CONTEXT_SPARSE);
Shrenuj Bansala419c792016-10-20 14:05:11 -0700364
365 /* Check for errors before trying to initialize */
366
367 /* If preemption is not supported, ignore preemption request */
368 if (!test_bit(ADRENO_DEVICE_PREEMPTION, &adreno_dev->priv))
369 local &= ~KGSL_CONTEXT_PREEMPT_STYLE_MASK;
370
371 /* We no longer support legacy context switching */
372 if ((local & KGSL_CONTEXT_PREAMBLE) == 0 ||
373 (local & KGSL_CONTEXT_NO_GMEM_ALLOC) == 0) {
374 KGSL_DEV_ERR_ONCE(device,
375 "legacy context switch not supported\n");
376 return ERR_PTR(-EINVAL);
377 }
378
379 /* Make sure that our target can support secure contexts if requested */
380 if (!kgsl_mmu_is_secured(&dev_priv->device->mmu) &&
381 (local & KGSL_CONTEXT_SECURE)) {
382 KGSL_DEV_ERR_ONCE(device, "Secure context not supported\n");
383 return ERR_PTR(-EOPNOTSUPP);
384 }
385
386 drawctxt = kzalloc(sizeof(struct adreno_context), GFP_KERNEL);
387
388 if (drawctxt == NULL)
389 return ERR_PTR(-ENOMEM);
390
391 drawctxt->timestamp = 0;
392
393 drawctxt->base.flags = local;
394
395 /* Always enable per-context timestamps */
396 drawctxt->base.flags |= KGSL_CONTEXT_PER_CONTEXT_TS;
397 drawctxt->type = (drawctxt->base.flags & KGSL_CONTEXT_TYPE_MASK)
398 >> KGSL_CONTEXT_TYPE_SHIFT;
399 spin_lock_init(&drawctxt->lock);
400 init_waitqueue_head(&drawctxt->wq);
401 init_waitqueue_head(&drawctxt->waiting);
Hareesh Gundu28b9efd2017-08-24 23:11:09 +0530402 init_waitqueue_head(&drawctxt->timeout);
Shrenuj Bansala419c792016-10-20 14:05:11 -0700403
404 /* Set the context priority */
405 _set_context_priority(drawctxt);
406 /* set the context ringbuffer */
407 drawctxt->rb = adreno_ctx_get_rb(adreno_dev, drawctxt);
408
409 /*
410 * Set up the plist node for the dispatcher. Insert the node into the
411 * drawctxt pending list based on priority.
412 */
413 plist_node_init(&drawctxt->pending, drawctxt->base.priority);
414
415 /*
416 * Now initialize the common part of the context. This allocates the
417 * context id, and then possibly another thread could look it up.
418 * So we want all of our initializtion that doesn't require the context
419 * id to be done before this call.
420 */
421 ret = kgsl_context_init(dev_priv, &drawctxt->base);
422 if (ret != 0) {
423 kfree(drawctxt);
424 return ERR_PTR(ret);
425 }
426
427 kgsl_sharedmem_writel(device, &device->memstore,
428 KGSL_MEMSTORE_OFFSET(drawctxt->base.id, soptimestamp),
429 0);
430 kgsl_sharedmem_writel(device, &device->memstore,
431 KGSL_MEMSTORE_OFFSET(drawctxt->base.id, eoptimestamp),
432 0);
433
434 adreno_context_debugfs_init(ADRENO_DEVICE(device), drawctxt);
435
436 INIT_LIST_HEAD(&drawctxt->active_node);
437
Harshdeep Dhattec6cd7d2017-11-27 13:27:57 -0700438 if (gpudev->preemption_context_init) {
439 ret = gpudev->preemption_context_init(&drawctxt->base);
440 if (ret != 0) {
441 kgsl_context_detach(&drawctxt->base);
442 return ERR_PTR(ret);
443 }
444 }
445
Shrenuj Bansala419c792016-10-20 14:05:11 -0700446 /* copy back whatever flags we dediced were valid */
447 *flags = drawctxt->base.flags;
448 return &drawctxt->base;
449}
450
451/**
452 * adreno_drawctxt_sched() - Schedule a previously blocked context
453 * @device: pointer to a KGSL device
454 * @drawctxt: drawctxt to rechedule
455 *
456 * This function is called by the core when it knows that a previously blocked
457 * context has been unblocked. The default adreno response is to reschedule the
458 * context on the dispatcher
459 */
460void adreno_drawctxt_sched(struct kgsl_device *device,
461 struct kgsl_context *context)
462{
463 adreno_dispatcher_queue_context(device, ADRENO_CONTEXT(context));
464}
465
466/**
467 * adreno_drawctxt_detach(): detach a context from the GPU
468 * @context: Generic KGSL context container for the context
469 *
470 */
471void adreno_drawctxt_detach(struct kgsl_context *context)
472{
473 struct kgsl_device *device;
474 struct adreno_device *adreno_dev;
Lynus Vaz0cd94572018-10-29 17:49:53 +0530475 struct adreno_gpudev *gpudev;
Shrenuj Bansala419c792016-10-20 14:05:11 -0700476 struct adreno_context *drawctxt;
477 struct adreno_ringbuffer *rb;
478 int ret, count, i;
479 struct kgsl_drawobj *list[ADRENO_CONTEXT_DRAWQUEUE_SIZE];
480
481 if (context == NULL)
482 return;
483
484 device = context->device;
485 adreno_dev = ADRENO_DEVICE(device);
Lynus Vaz0cd94572018-10-29 17:49:53 +0530486 gpudev = ADRENO_GPU_DEVICE(adreno_dev);
Shrenuj Bansala419c792016-10-20 14:05:11 -0700487 drawctxt = ADRENO_CONTEXT(context);
488 rb = drawctxt->rb;
489
Puranam V G Tejaswi6aaa0d02020-07-24 08:54:54 +0530490 spin_lock(&drawctxt->lock);
491
Shrenuj Bansala419c792016-10-20 14:05:11 -0700492 spin_lock(&adreno_dev->active_list_lock);
493 list_del_init(&drawctxt->active_node);
494 spin_unlock(&adreno_dev->active_list_lock);
495
Shrenuj Bansala419c792016-10-20 14:05:11 -0700496 count = drawctxt_detach_drawobjs(drawctxt, list);
497 spin_unlock(&drawctxt->lock);
498
499 for (i = 0; i < count; i++) {
500 /*
501 * If the context is deteached while we are waiting for
502 * the next command in GFT SKIP CMD, print the context
503 * detached status here.
504 */
505 adreno_fault_skipcmd_detached(adreno_dev, drawctxt, list[i]);
506 kgsl_drawobj_destroy(list[i]);
507 }
508
Lynus Vaz002ce2e2017-10-18 12:01:29 +0530509 debugfs_remove_recursive(drawctxt->debug_root);
510
Shrenuj Bansala419c792016-10-20 14:05:11 -0700511 /*
512 * internal_timestamp is set in adreno_ringbuffer_addcmds,
513 * which holds the device mutex.
514 */
515 mutex_lock(&device->mutex);
516
517 /*
518 * Wait for the last global timestamp to pass before continuing.
519 * The maxumum wait time is 30s, some large IB's can take longer
520 * than 10s and if hang happens then the time for the context's
521 * commands to retire will be greater than 10s. 30s should be sufficient
522 * time to wait for the commands even if a hang happens.
523 */
524 ret = adreno_drawctxt_wait_rb(adreno_dev, context,
525 drawctxt->internal_timestamp, 30 * 1000);
526
527 /*
Hareesh Gundu28b9efd2017-08-24 23:11:09 +0530528 * If the wait for global fails due to timeout then mark it as
529 * context detach timeout fault and schedule dispatcher to kick
530 * in GPU recovery. For a ADRENO_CTX_DETATCH_TIMEOUT_FAULT we clear
531 * the policy and invalidate the context. If EAGAIN error is returned
Shrenuj Bansala419c792016-10-20 14:05:11 -0700532 * then recovery will kick in and there will be no more commands in the
Hareesh Gundu28b9efd2017-08-24 23:11:09 +0530533 * RB pipe from this context which is what we are waiting for, so ignore
534 * -EAGAIN error.
Shrenuj Bansala419c792016-10-20 14:05:11 -0700535 */
536 if (ret && ret != -EAGAIN) {
Hareesh Gundu28b9efd2017-08-24 23:11:09 +0530537 KGSL_DRV_ERR(device,
538 "Wait for global ctx=%d ts=%d type=%d error=%d\n",
539 drawctxt->base.id, drawctxt->internal_timestamp,
Shrenuj Bansala419c792016-10-20 14:05:11 -0700540 drawctxt->type, ret);
Hareesh Gundu28b9efd2017-08-24 23:11:09 +0530541
542 adreno_set_gpu_fault(adreno_dev,
543 ADRENO_CTX_DETATCH_TIMEOUT_FAULT);
544 mutex_unlock(&device->mutex);
545
546 /* Schedule dispatcher to kick in recovery */
547 adreno_dispatcher_schedule(device);
548
549 /* Wait for context to be invalidated and release context */
550 wait_event_interruptible_timeout(drawctxt->timeout,
551 kgsl_context_invalid(&drawctxt->base),
552 msecs_to_jiffies(5000));
553 return;
Shrenuj Bansala419c792016-10-20 14:05:11 -0700554 }
555
556 kgsl_sharedmem_writel(device, &device->memstore,
557 KGSL_MEMSTORE_OFFSET(context->id, soptimestamp),
558 drawctxt->timestamp);
559
560 kgsl_sharedmem_writel(device, &device->memstore,
561 KGSL_MEMSTORE_OFFSET(context->id, eoptimestamp),
562 drawctxt->timestamp);
563
564 adreno_profile_process_results(adreno_dev);
565
566 mutex_unlock(&device->mutex);
567
Lynus Vaz0cd94572018-10-29 17:49:53 +0530568 if (gpudev->preemption_context_destroy)
569 gpudev->preemption_context_destroy(context);
570
Shrenuj Bansala419c792016-10-20 14:05:11 -0700571 /* wake threads waiting to submit commands from this context */
572 wake_up_all(&drawctxt->waiting);
573 wake_up_all(&drawctxt->wq);
574}
575
576void adreno_drawctxt_destroy(struct kgsl_context *context)
577{
578 struct adreno_context *drawctxt;
579
580 if (context == NULL)
581 return;
582
Shrenuj Bansala419c792016-10-20 14:05:11 -0700583 drawctxt = ADRENO_CONTEXT(context);
Shrenuj Bansala419c792016-10-20 14:05:11 -0700584 kfree(drawctxt);
585}
586
587static void _drawctxt_switch_wait_callback(struct kgsl_device *device,
588 struct kgsl_event_group *group,
589 void *priv, int result)
590{
591 struct adreno_context *drawctxt = (struct adreno_context *) priv;
592
593 kgsl_context_put(&drawctxt->base);
594}
595
596/**
597 * adreno_drawctxt_switch - switch the current draw context in a given RB
598 * @adreno_dev - The 3D device that owns the context
599 * @rb: The ringubffer pointer on which the current context is being changed
600 * @drawctxt - the 3D context to switch to
601 * @flags: Control flags for the switch
602 *
603 * Switch the current draw context in given RB
604 */
605
606int adreno_drawctxt_switch(struct adreno_device *adreno_dev,
607 struct adreno_ringbuffer *rb,
608 struct adreno_context *drawctxt,
609 unsigned int flags)
610{
611 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
612 struct kgsl_pagetable *new_pt;
613 int ret = 0;
614
615 /* We always expect a valid rb */
616 if (!rb)
617 return -EINVAL;
618
619 /* already current? */
620 if (rb->drawctxt_active == drawctxt)
621 return ret;
622
623 /*
624 * Submitting pt switch commands from a detached context can
625 * lead to a race condition where the pt is destroyed before
626 * the pt switch commands get executed by the GPU, leading to
627 * pagefaults.
628 */
629 if (drawctxt != NULL && kgsl_context_detached(&drawctxt->base))
630 return -ENOENT;
631
632 trace_adreno_drawctxt_switch(rb, drawctxt);
633
634 /* Get a refcount to the new instance */
635 if (drawctxt) {
636 if (!_kgsl_context_get(&drawctxt->base))
637 return -ENOENT;
638
639 new_pt = drawctxt->base.proc_priv->pagetable;
640 } else {
641 /* No context - set the default pagetable and thats it. */
642 new_pt = device->mmu.defaultpagetable;
643 }
644 ret = adreno_ringbuffer_set_pt_ctx(rb, new_pt, drawctxt, flags);
645 if (ret)
646 return ret;
647
648 if (rb->drawctxt_active) {
649 /* Wait for the timestamp to expire */
650 if (kgsl_add_event(device, &rb->events, rb->timestamp,
651 _drawctxt_switch_wait_callback,
652 rb->drawctxt_active)) {
653 kgsl_context_put(&rb->drawctxt_active->base);
654 }
655 }
656
657 rb->drawctxt_active = drawctxt;
658 return 0;
659}