blob: 9f4e185bed16b8156a628e91cbc5aa8f61af2649 [file] [log] [blame]
Shrenuj Bansala419c792016-10-20 14:05:11 -07001/* Copyright (c) 2002,2007-2017, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include <linux/slab.h>
15#include <linux/msm_kgsl.h>
16#include <linux/sched.h>
17#include <linux/debugfs.h>
18
19#include "kgsl.h"
20#include "kgsl_sharedmem.h"
21#include "adreno.h"
22#include "adreno_trace.h"
23
24static void wait_callback(struct kgsl_device *device,
25 struct kgsl_event_group *group, void *priv, int result)
26{
27 struct adreno_context *drawctxt = priv;
28
29 wake_up_all(&drawctxt->waiting);
30}
31
32static int _check_context_timestamp(struct kgsl_device *device,
33 struct kgsl_context *context, unsigned int timestamp)
34{
35 /* Bail if the drawctxt has been invalidated or destroyed */
36 if (kgsl_context_detached(context) || kgsl_context_invalid(context))
37 return 1;
38
39 return kgsl_check_timestamp(device, context, timestamp);
40}
41
42/**
43 * adreno_drawctxt_dump() - dump information about a draw context
44 * @device: KGSL device that owns the context
45 * @context: KGSL context to dump information about
46 *
47 * Dump specific information about the context to the kernel log. Used for
48 * fence timeout callbacks
49 */
50void adreno_drawctxt_dump(struct kgsl_device *device,
51 struct kgsl_context *context)
52{
53 unsigned int queue, start, retire;
54 struct adreno_context *drawctxt = ADRENO_CONTEXT(context);
55 int index, pos;
56 char buf[120];
57
58 kgsl_readtimestamp(device, context, KGSL_TIMESTAMP_QUEUED, &queue);
59 kgsl_readtimestamp(device, context, KGSL_TIMESTAMP_CONSUMED, &start);
60 kgsl_readtimestamp(device, context, KGSL_TIMESTAMP_RETIRED, &retire);
61
62 /*
63 * We may have kgsl sync obj timer running, which also uses same
64 * lock, take a lock with software interrupt disabled (bh)
65 * to avoid spin lock recursion.
66 *
67 * Use Spin trylock because dispatcher can acquire drawctxt->lock
68 * if context is pending and the fence it is waiting on just got
69 * signalled. Dispatcher acquires drawctxt->lock and tries to
70 * delete the sync obj timer using del_timer_sync().
71 * del_timer_sync() waits till timer and its pending handlers
72 * are deleted. But if the timer expires at the same time,
73 * timer handler could be waiting on drawctxt->lock leading to a
74 * deadlock. To prevent this use spin_trylock_bh.
75 */
76 if (!spin_trylock_bh(&drawctxt->lock)) {
77 dev_err(device->dev, " context[%d]: could not get lock\n",
78 context->id);
79 return;
80 }
81
82 dev_err(device->dev,
83 " context[%d]: queue=%d, submit=%d, start=%d, retire=%d\n",
84 context->id, queue, drawctxt->submitted_timestamp,
85 start, retire);
86
87 if (drawctxt->drawqueue_head != drawctxt->drawqueue_tail) {
88 struct kgsl_drawobj *drawobj =
89 drawctxt->drawqueue[drawctxt->drawqueue_head];
90
91 if (test_bit(ADRENO_CONTEXT_FENCE_LOG, &context->priv)) {
92 dev_err(device->dev,
93 " possible deadlock. Context %d might be blocked for itself\n",
94 context->id);
95 goto stats;
96 }
97
98 if (drawobj->type == SYNCOBJ_TYPE) {
99 struct kgsl_drawobj_sync *syncobj = SYNCOBJ(drawobj);
100
101 if (kgsl_drawobj_events_pending(syncobj)) {
102 dev_err(device->dev,
103 " context[%d] (ts=%d) Active sync points:\n",
104 context->id, drawobj->timestamp);
105
106 kgsl_dump_syncpoints(device, syncobj);
107 }
108 }
109 }
110
111stats:
112 memset(buf, 0, sizeof(buf));
113
114 pos = 0;
115
116 for (index = 0; index < SUBMIT_RETIRE_TICKS_SIZE; index++) {
117 uint64_t msecs;
118 unsigned int usecs;
119
120 if (!drawctxt->submit_retire_ticks[index])
121 continue;
122 msecs = drawctxt->submit_retire_ticks[index] * 10;
123 usecs = do_div(msecs, 192);
124 usecs = do_div(msecs, 1000);
125 pos += snprintf(buf + pos, sizeof(buf) - pos, "%d.%0d ",
126 (unsigned int)msecs, usecs);
127 }
128 dev_err(device->dev, " context[%d]: submit times: %s\n",
129 context->id, buf);
130
131 spin_unlock_bh(&drawctxt->lock);
132}
133
134/**
135 * adreno_drawctxt_wait() - sleep until a timestamp expires
136 * @adreno_dev: pointer to the adreno_device struct
137 * @drawctxt: Pointer to the draw context to sleep for
138 * @timetamp: Timestamp to wait on
139 * @timeout: Number of jiffies to wait (0 for infinite)
140 *
141 * Register an event to wait for a timestamp on a context and sleep until it
142 * has past. Returns < 0 on error, -ETIMEDOUT if the timeout expires or 0
143 * on success
144 */
145int adreno_drawctxt_wait(struct adreno_device *adreno_dev,
146 struct kgsl_context *context,
147 uint32_t timestamp, unsigned int timeout)
148{
149 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
150 struct adreno_context *drawctxt = ADRENO_CONTEXT(context);
151 int ret;
152 long ret_temp;
153
154 if (kgsl_context_detached(context))
155 return -ENOENT;
156
157 if (kgsl_context_invalid(context))
158 return -EDEADLK;
159
160 trace_adreno_drawctxt_wait_start(-1, context->id, timestamp);
161
162 ret = kgsl_add_event(device, &context->events, timestamp,
163 wait_callback, (void *) drawctxt);
164 if (ret)
165 goto done;
166
167 /*
168 * If timeout is 0, wait forever. msecs_to_jiffies will force
169 * values larger than INT_MAX to an infinite timeout.
170 */
171 if (timeout == 0)
172 timeout = UINT_MAX;
173
174 ret_temp = wait_event_interruptible_timeout(drawctxt->waiting,
175 _check_context_timestamp(device, context, timestamp),
176 msecs_to_jiffies(timeout));
177
178 if (ret_temp == 0) {
179 ret = -ETIMEDOUT;
180 goto done;
181 } else if (ret_temp < 0) {
182 ret = (int) ret_temp;
183 goto done;
184 }
185 ret = 0;
186
187 /* -EDEADLK if the context was invalidated while we were waiting */
188 if (kgsl_context_invalid(context))
189 ret = -EDEADLK;
190
191
192 /* Return -EINVAL if the context was detached while we were waiting */
193 if (kgsl_context_detached(context))
194 ret = -ENOENT;
195
196done:
197 trace_adreno_drawctxt_wait_done(-1, context->id, timestamp, ret);
198 return ret;
199}
200
201/**
202 * adreno_drawctxt_wait_rb() - Wait for the last RB timestamp at which this
203 * context submitted a command to the corresponding RB
204 * @adreno_dev: The device on which the timestamp is active
205 * @context: The context which subbmitted command to RB
206 * @timestamp: The RB timestamp of last command submitted to RB by context
207 * @timeout: Timeout value for the wait
208 * Caller must hold the device mutex
209 */
210static int adreno_drawctxt_wait_rb(struct adreno_device *adreno_dev,
211 struct kgsl_context *context,
212 uint32_t timestamp, unsigned int timeout)
213{
214 struct adreno_context *drawctxt = ADRENO_CONTEXT(context);
215 int ret = 0;
216
217 /*
Hareesh Gundu7d5a8f22017-02-21 13:23:46 +0530218 * If the context is invalid (OR) not submitted commands to GPU
219 * then return immediately - we may end up waiting for a timestamp
220 * that will never come
Shrenuj Bansala419c792016-10-20 14:05:11 -0700221 */
Hareesh Gundu7d5a8f22017-02-21 13:23:46 +0530222 if (kgsl_context_invalid(context) ||
223 !test_bit(KGSL_CONTEXT_PRIV_SUBMITTED, &context->priv))
Shrenuj Bansala419c792016-10-20 14:05:11 -0700224 goto done;
225
226 trace_adreno_drawctxt_wait_start(drawctxt->rb->id, context->id,
227 timestamp);
228
229 ret = adreno_ringbuffer_waittimestamp(drawctxt->rb, timestamp, timeout);
230done:
231 trace_adreno_drawctxt_wait_done(drawctxt->rb->id, context->id,
232 timestamp, ret);
233 return ret;
234}
235
236static int drawctxt_detach_drawobjs(struct adreno_context *drawctxt,
237 struct kgsl_drawobj **list)
238{
239 int count = 0;
240
241 while (drawctxt->drawqueue_head != drawctxt->drawqueue_tail) {
242 struct kgsl_drawobj *drawobj =
243 drawctxt->drawqueue[drawctxt->drawqueue_head];
244
245 drawctxt->drawqueue_head = (drawctxt->drawqueue_head + 1) %
246 ADRENO_CONTEXT_DRAWQUEUE_SIZE;
247
248 list[count++] = drawobj;
249 }
250
251 return count;
252}
253
254/**
255 * adreno_drawctxt_invalidate() - Invalidate an adreno draw context
256 * @device: Pointer to the KGSL device structure for the GPU
257 * @context: Pointer to the KGSL context structure
258 *
259 * Invalidate the context and remove all queued commands and cancel any pending
260 * waiters
261 */
262void adreno_drawctxt_invalidate(struct kgsl_device *device,
263 struct kgsl_context *context)
264{
265 struct adreno_context *drawctxt = ADRENO_CONTEXT(context);
266 struct kgsl_drawobj *list[ADRENO_CONTEXT_DRAWQUEUE_SIZE];
267 int i, count;
268
269 trace_adreno_drawctxt_invalidate(drawctxt);
270
271 spin_lock(&drawctxt->lock);
272 set_bit(KGSL_CONTEXT_PRIV_INVALID, &context->priv);
273
274 /*
275 * set the timestamp to the last value since the context is invalidated
276 * and we want the pending events for this context to go away
277 */
278 kgsl_sharedmem_writel(device, &device->memstore,
279 KGSL_MEMSTORE_OFFSET(context->id, soptimestamp),
280 drawctxt->timestamp);
281
282 kgsl_sharedmem_writel(device, &device->memstore,
283 KGSL_MEMSTORE_OFFSET(context->id, eoptimestamp),
284 drawctxt->timestamp);
285
286 /* Get rid of commands still waiting in the queue */
287 count = drawctxt_detach_drawobjs(drawctxt, list);
288 spin_unlock(&drawctxt->lock);
289
290 for (i = 0; i < count; i++) {
291 kgsl_cancel_events_timestamp(device, &context->events,
292 list[i]->timestamp);
293 kgsl_drawobj_destroy(list[i]);
294 }
295
296 /* Make sure all pending events are processed or cancelled */
297 kgsl_flush_event_group(device, &context->events);
298
299 /* Give the bad news to everybody waiting around */
300 wake_up_all(&drawctxt->waiting);
301 wake_up_all(&drawctxt->wq);
302}
303
304/*
305 * Set the priority of the context based on the flags passed into context
306 * create. If the priority is not set in the flags, then the kernel can
307 * assign any priority it desires for the context.
308 */
309#define KGSL_CONTEXT_PRIORITY_MED 0x8
310
311static inline void _set_context_priority(struct adreno_context *drawctxt)
312{
313 /* If the priority is not set by user, set it for them */
314 if ((drawctxt->base.flags & KGSL_CONTEXT_PRIORITY_MASK) ==
315 KGSL_CONTEXT_PRIORITY_UNDEF)
316 drawctxt->base.flags |= (KGSL_CONTEXT_PRIORITY_MED <<
317 KGSL_CONTEXT_PRIORITY_SHIFT);
318
319 /* Store the context priority */
320 drawctxt->base.priority =
321 (drawctxt->base.flags & KGSL_CONTEXT_PRIORITY_MASK) >>
322 KGSL_CONTEXT_PRIORITY_SHIFT;
323}
324
325/**
326 * adreno_drawctxt_create - create a new adreno draw context
327 * @dev_priv: the owner of the context
328 * @flags: flags for the context (passed from user space)
329 *
330 * Create and return a new draw context for the 3D core.
331 */
332struct kgsl_context *
333adreno_drawctxt_create(struct kgsl_device_private *dev_priv,
334 uint32_t *flags)
335{
336 struct adreno_context *drawctxt;
337 struct kgsl_device *device = dev_priv->device;
338 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
339 int ret;
340 unsigned long local;
341
342 local = *flags & (KGSL_CONTEXT_PREAMBLE |
343 KGSL_CONTEXT_NO_GMEM_ALLOC |
344 KGSL_CONTEXT_PER_CONTEXT_TS |
345 KGSL_CONTEXT_USER_GENERATED_TS |
346 KGSL_CONTEXT_NO_FAULT_TOLERANCE |
347 KGSL_CONTEXT_CTX_SWITCH |
348 KGSL_CONTEXT_PRIORITY_MASK |
349 KGSL_CONTEXT_TYPE_MASK |
350 KGSL_CONTEXT_PWR_CONSTRAINT |
351 KGSL_CONTEXT_IFH_NOP |
352 KGSL_CONTEXT_SECURE |
353 KGSL_CONTEXT_PREEMPT_STYLE_MASK |
Tarun Karra2b8b3632016-11-14 16:38:27 -0800354 KGSL_CONTEXT_NO_SNAPSHOT |
355 KGSL_CONTEXT_SPARSE);
Shrenuj Bansala419c792016-10-20 14:05:11 -0700356
357 /* Check for errors before trying to initialize */
358
359 /* If preemption is not supported, ignore preemption request */
360 if (!test_bit(ADRENO_DEVICE_PREEMPTION, &adreno_dev->priv))
361 local &= ~KGSL_CONTEXT_PREEMPT_STYLE_MASK;
362
363 /* We no longer support legacy context switching */
364 if ((local & KGSL_CONTEXT_PREAMBLE) == 0 ||
365 (local & KGSL_CONTEXT_NO_GMEM_ALLOC) == 0) {
366 KGSL_DEV_ERR_ONCE(device,
367 "legacy context switch not supported\n");
368 return ERR_PTR(-EINVAL);
369 }
370
371 /* Make sure that our target can support secure contexts if requested */
372 if (!kgsl_mmu_is_secured(&dev_priv->device->mmu) &&
373 (local & KGSL_CONTEXT_SECURE)) {
374 KGSL_DEV_ERR_ONCE(device, "Secure context not supported\n");
375 return ERR_PTR(-EOPNOTSUPP);
376 }
377
378 drawctxt = kzalloc(sizeof(struct adreno_context), GFP_KERNEL);
379
380 if (drawctxt == NULL)
381 return ERR_PTR(-ENOMEM);
382
383 drawctxt->timestamp = 0;
384
385 drawctxt->base.flags = local;
386
387 /* Always enable per-context timestamps */
388 drawctxt->base.flags |= KGSL_CONTEXT_PER_CONTEXT_TS;
389 drawctxt->type = (drawctxt->base.flags & KGSL_CONTEXT_TYPE_MASK)
390 >> KGSL_CONTEXT_TYPE_SHIFT;
391 spin_lock_init(&drawctxt->lock);
392 init_waitqueue_head(&drawctxt->wq);
393 init_waitqueue_head(&drawctxt->waiting);
394
395 /* Set the context priority */
396 _set_context_priority(drawctxt);
397 /* set the context ringbuffer */
398 drawctxt->rb = adreno_ctx_get_rb(adreno_dev, drawctxt);
399
400 /*
401 * Set up the plist node for the dispatcher. Insert the node into the
402 * drawctxt pending list based on priority.
403 */
404 plist_node_init(&drawctxt->pending, drawctxt->base.priority);
405
406 /*
407 * Now initialize the common part of the context. This allocates the
408 * context id, and then possibly another thread could look it up.
409 * So we want all of our initializtion that doesn't require the context
410 * id to be done before this call.
411 */
412 ret = kgsl_context_init(dev_priv, &drawctxt->base);
413 if (ret != 0) {
414 kfree(drawctxt);
415 return ERR_PTR(ret);
416 }
417
418 kgsl_sharedmem_writel(device, &device->memstore,
419 KGSL_MEMSTORE_OFFSET(drawctxt->base.id, soptimestamp),
420 0);
421 kgsl_sharedmem_writel(device, &device->memstore,
422 KGSL_MEMSTORE_OFFSET(drawctxt->base.id, eoptimestamp),
423 0);
424
425 adreno_context_debugfs_init(ADRENO_DEVICE(device), drawctxt);
426
427 INIT_LIST_HEAD(&drawctxt->active_node);
428
429 /* copy back whatever flags we dediced were valid */
430 *flags = drawctxt->base.flags;
431 return &drawctxt->base;
432}
433
434/**
435 * adreno_drawctxt_sched() - Schedule a previously blocked context
436 * @device: pointer to a KGSL device
437 * @drawctxt: drawctxt to rechedule
438 *
439 * This function is called by the core when it knows that a previously blocked
440 * context has been unblocked. The default adreno response is to reschedule the
441 * context on the dispatcher
442 */
443void adreno_drawctxt_sched(struct kgsl_device *device,
444 struct kgsl_context *context)
445{
446 adreno_dispatcher_queue_context(device, ADRENO_CONTEXT(context));
447}
448
449/**
450 * adreno_drawctxt_detach(): detach a context from the GPU
451 * @context: Generic KGSL context container for the context
452 *
453 */
454void adreno_drawctxt_detach(struct kgsl_context *context)
455{
456 struct kgsl_device *device;
457 struct adreno_device *adreno_dev;
458 struct adreno_context *drawctxt;
459 struct adreno_ringbuffer *rb;
460 int ret, count, i;
461 struct kgsl_drawobj *list[ADRENO_CONTEXT_DRAWQUEUE_SIZE];
462
463 if (context == NULL)
464 return;
465
466 device = context->device;
467 adreno_dev = ADRENO_DEVICE(device);
468 drawctxt = ADRENO_CONTEXT(context);
469 rb = drawctxt->rb;
470
471 spin_lock(&adreno_dev->active_list_lock);
472 list_del_init(&drawctxt->active_node);
473 spin_unlock(&adreno_dev->active_list_lock);
474
475 spin_lock(&drawctxt->lock);
476 count = drawctxt_detach_drawobjs(drawctxt, list);
477 spin_unlock(&drawctxt->lock);
478
479 for (i = 0; i < count; i++) {
480 /*
481 * If the context is deteached while we are waiting for
482 * the next command in GFT SKIP CMD, print the context
483 * detached status here.
484 */
485 adreno_fault_skipcmd_detached(adreno_dev, drawctxt, list[i]);
486 kgsl_drawobj_destroy(list[i]);
487 }
488
489 /*
490 * internal_timestamp is set in adreno_ringbuffer_addcmds,
491 * which holds the device mutex.
492 */
493 mutex_lock(&device->mutex);
494
495 /*
496 * Wait for the last global timestamp to pass before continuing.
497 * The maxumum wait time is 30s, some large IB's can take longer
498 * than 10s and if hang happens then the time for the context's
499 * commands to retire will be greater than 10s. 30s should be sufficient
500 * time to wait for the commands even if a hang happens.
501 */
502 ret = adreno_drawctxt_wait_rb(adreno_dev, context,
503 drawctxt->internal_timestamp, 30 * 1000);
504
505 /*
506 * If the wait for global fails due to timeout then nothing after this
507 * point is likely to work very well - Get GPU snapshot and BUG_ON()
508 * so we can take advantage of the debug tools to figure out what the
509 * h - e - double hockey sticks happened. If EAGAIN error is returned
510 * then recovery will kick in and there will be no more commands in the
511 * RB pipe from this context which is waht we are waiting for, so ignore
512 * -EAGAIN error
513 */
514 if (ret && ret != -EAGAIN) {
515 KGSL_DRV_ERR(device, "Wait for global ts=%d type=%d error=%d\n",
516 drawctxt->internal_timestamp,
517 drawctxt->type, ret);
518 device->force_panic = 1;
519 kgsl_device_snapshot(device, context);
520 }
521
522 kgsl_sharedmem_writel(device, &device->memstore,
523 KGSL_MEMSTORE_OFFSET(context->id, soptimestamp),
524 drawctxt->timestamp);
525
526 kgsl_sharedmem_writel(device, &device->memstore,
527 KGSL_MEMSTORE_OFFSET(context->id, eoptimestamp),
528 drawctxt->timestamp);
529
530 adreno_profile_process_results(adreno_dev);
531
532 mutex_unlock(&device->mutex);
533
534 /* wake threads waiting to submit commands from this context */
535 wake_up_all(&drawctxt->waiting);
536 wake_up_all(&drawctxt->wq);
537}
538
539void adreno_drawctxt_destroy(struct kgsl_context *context)
540{
541 struct adreno_context *drawctxt;
542
543 if (context == NULL)
544 return;
545
546 drawctxt = ADRENO_CONTEXT(context);
547 debugfs_remove_recursive(drawctxt->debug_root);
548 kfree(drawctxt);
549}
550
551static void _drawctxt_switch_wait_callback(struct kgsl_device *device,
552 struct kgsl_event_group *group,
553 void *priv, int result)
554{
555 struct adreno_context *drawctxt = (struct adreno_context *) priv;
556
557 kgsl_context_put(&drawctxt->base);
558}
559
560/**
561 * adreno_drawctxt_switch - switch the current draw context in a given RB
562 * @adreno_dev - The 3D device that owns the context
563 * @rb: The ringubffer pointer on which the current context is being changed
564 * @drawctxt - the 3D context to switch to
565 * @flags: Control flags for the switch
566 *
567 * Switch the current draw context in given RB
568 */
569
570int adreno_drawctxt_switch(struct adreno_device *adreno_dev,
571 struct adreno_ringbuffer *rb,
572 struct adreno_context *drawctxt,
573 unsigned int flags)
574{
575 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
576 struct kgsl_pagetable *new_pt;
577 int ret = 0;
578
579 /* We always expect a valid rb */
580 if (!rb)
581 return -EINVAL;
582
583 /* already current? */
584 if (rb->drawctxt_active == drawctxt)
585 return ret;
586
587 /*
588 * Submitting pt switch commands from a detached context can
589 * lead to a race condition where the pt is destroyed before
590 * the pt switch commands get executed by the GPU, leading to
591 * pagefaults.
592 */
593 if (drawctxt != NULL && kgsl_context_detached(&drawctxt->base))
594 return -ENOENT;
595
596 trace_adreno_drawctxt_switch(rb, drawctxt);
597
598 /* Get a refcount to the new instance */
599 if (drawctxt) {
600 if (!_kgsl_context_get(&drawctxt->base))
601 return -ENOENT;
602
603 new_pt = drawctxt->base.proc_priv->pagetable;
604 } else {
605 /* No context - set the default pagetable and thats it. */
606 new_pt = device->mmu.defaultpagetable;
607 }
608 ret = adreno_ringbuffer_set_pt_ctx(rb, new_pt, drawctxt, flags);
609 if (ret)
610 return ret;
611
612 if (rb->drawctxt_active) {
613 /* Wait for the timestamp to expire */
614 if (kgsl_add_event(device, &rb->events, rb->timestamp,
615 _drawctxt_switch_wait_callback,
616 rb->drawctxt_active)) {
617 kgsl_context_put(&rb->drawctxt_active->base);
618 }
619 }
620
621 rb->drawctxt_active = drawctxt;
622 return 0;
623}