Shrenuj Bansal | a419c79 | 2016-10-20 14:05:11 -0700 | [diff] [blame] | 1 | /* Copyright (c) 2002,2007-2017, The Linux Foundation. All rights reserved. |
| 2 | * |
| 3 | * This program is free software; you can redistribute it and/or modify |
| 4 | * it under the terms of the GNU General Public License version 2 and |
| 5 | * only version 2 as published by the Free Software Foundation. |
| 6 | * |
| 7 | * This program is distributed in the hope that it will be useful, |
| 8 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 10 | * GNU General Public License for more details. |
| 11 | * |
| 12 | */ |
| 13 | |
| 14 | #include <linux/slab.h> |
| 15 | #include <linux/msm_kgsl.h> |
| 16 | #include <linux/sched.h> |
| 17 | #include <linux/debugfs.h> |
| 18 | |
| 19 | #include "kgsl.h" |
| 20 | #include "kgsl_sharedmem.h" |
| 21 | #include "adreno.h" |
| 22 | #include "adreno_trace.h" |
| 23 | |
| 24 | static void wait_callback(struct kgsl_device *device, |
| 25 | struct kgsl_event_group *group, void *priv, int result) |
| 26 | { |
| 27 | struct adreno_context *drawctxt = priv; |
| 28 | |
| 29 | wake_up_all(&drawctxt->waiting); |
| 30 | } |
| 31 | |
| 32 | static int _check_context_timestamp(struct kgsl_device *device, |
| 33 | struct kgsl_context *context, unsigned int timestamp) |
| 34 | { |
| 35 | /* Bail if the drawctxt has been invalidated or destroyed */ |
| 36 | if (kgsl_context_detached(context) || kgsl_context_invalid(context)) |
| 37 | return 1; |
| 38 | |
| 39 | return kgsl_check_timestamp(device, context, timestamp); |
| 40 | } |
| 41 | |
| 42 | /** |
| 43 | * adreno_drawctxt_dump() - dump information about a draw context |
| 44 | * @device: KGSL device that owns the context |
| 45 | * @context: KGSL context to dump information about |
| 46 | * |
| 47 | * Dump specific information about the context to the kernel log. Used for |
| 48 | * fence timeout callbacks |
| 49 | */ |
| 50 | void adreno_drawctxt_dump(struct kgsl_device *device, |
| 51 | struct kgsl_context *context) |
| 52 | { |
| 53 | unsigned int queue, start, retire; |
| 54 | struct adreno_context *drawctxt = ADRENO_CONTEXT(context); |
| 55 | int index, pos; |
| 56 | char buf[120]; |
| 57 | |
| 58 | kgsl_readtimestamp(device, context, KGSL_TIMESTAMP_QUEUED, &queue); |
| 59 | kgsl_readtimestamp(device, context, KGSL_TIMESTAMP_CONSUMED, &start); |
| 60 | kgsl_readtimestamp(device, context, KGSL_TIMESTAMP_RETIRED, &retire); |
| 61 | |
| 62 | /* |
| 63 | * We may have kgsl sync obj timer running, which also uses same |
| 64 | * lock, take a lock with software interrupt disabled (bh) |
| 65 | * to avoid spin lock recursion. |
| 66 | * |
| 67 | * Use Spin trylock because dispatcher can acquire drawctxt->lock |
| 68 | * if context is pending and the fence it is waiting on just got |
| 69 | * signalled. Dispatcher acquires drawctxt->lock and tries to |
| 70 | * delete the sync obj timer using del_timer_sync(). |
| 71 | * del_timer_sync() waits till timer and its pending handlers |
| 72 | * are deleted. But if the timer expires at the same time, |
| 73 | * timer handler could be waiting on drawctxt->lock leading to a |
| 74 | * deadlock. To prevent this use spin_trylock_bh. |
| 75 | */ |
| 76 | if (!spin_trylock_bh(&drawctxt->lock)) { |
| 77 | dev_err(device->dev, " context[%d]: could not get lock\n", |
| 78 | context->id); |
| 79 | return; |
| 80 | } |
| 81 | |
| 82 | dev_err(device->dev, |
| 83 | " context[%d]: queue=%d, submit=%d, start=%d, retire=%d\n", |
| 84 | context->id, queue, drawctxt->submitted_timestamp, |
| 85 | start, retire); |
| 86 | |
| 87 | if (drawctxt->drawqueue_head != drawctxt->drawqueue_tail) { |
| 88 | struct kgsl_drawobj *drawobj = |
| 89 | drawctxt->drawqueue[drawctxt->drawqueue_head]; |
| 90 | |
| 91 | if (test_bit(ADRENO_CONTEXT_FENCE_LOG, &context->priv)) { |
| 92 | dev_err(device->dev, |
| 93 | " possible deadlock. Context %d might be blocked for itself\n", |
| 94 | context->id); |
| 95 | goto stats; |
| 96 | } |
| 97 | |
Lynus Vaz | e99b92b | 2017-04-24 18:04:54 +0530 | [diff] [blame] | 98 | if (!kref_get_unless_zero(&drawobj->refcount)) |
| 99 | goto stats; |
| 100 | |
Shrenuj Bansal | a419c79 | 2016-10-20 14:05:11 -0700 | [diff] [blame] | 101 | if (drawobj->type == SYNCOBJ_TYPE) { |
| 102 | struct kgsl_drawobj_sync *syncobj = SYNCOBJ(drawobj); |
| 103 | |
| 104 | if (kgsl_drawobj_events_pending(syncobj)) { |
| 105 | dev_err(device->dev, |
| 106 | " context[%d] (ts=%d) Active sync points:\n", |
| 107 | context->id, drawobj->timestamp); |
| 108 | |
| 109 | kgsl_dump_syncpoints(device, syncobj); |
| 110 | } |
| 111 | } |
Lynus Vaz | e99b92b | 2017-04-24 18:04:54 +0530 | [diff] [blame] | 112 | |
| 113 | kgsl_drawobj_put(drawobj); |
Shrenuj Bansal | a419c79 | 2016-10-20 14:05:11 -0700 | [diff] [blame] | 114 | } |
| 115 | |
| 116 | stats: |
| 117 | memset(buf, 0, sizeof(buf)); |
| 118 | |
| 119 | pos = 0; |
| 120 | |
| 121 | for (index = 0; index < SUBMIT_RETIRE_TICKS_SIZE; index++) { |
| 122 | uint64_t msecs; |
| 123 | unsigned int usecs; |
| 124 | |
| 125 | if (!drawctxt->submit_retire_ticks[index]) |
| 126 | continue; |
| 127 | msecs = drawctxt->submit_retire_ticks[index] * 10; |
| 128 | usecs = do_div(msecs, 192); |
| 129 | usecs = do_div(msecs, 1000); |
| 130 | pos += snprintf(buf + pos, sizeof(buf) - pos, "%d.%0d ", |
| 131 | (unsigned int)msecs, usecs); |
| 132 | } |
| 133 | dev_err(device->dev, " context[%d]: submit times: %s\n", |
| 134 | context->id, buf); |
| 135 | |
| 136 | spin_unlock_bh(&drawctxt->lock); |
| 137 | } |
| 138 | |
| 139 | /** |
| 140 | * adreno_drawctxt_wait() - sleep until a timestamp expires |
| 141 | * @adreno_dev: pointer to the adreno_device struct |
| 142 | * @drawctxt: Pointer to the draw context to sleep for |
| 143 | * @timetamp: Timestamp to wait on |
| 144 | * @timeout: Number of jiffies to wait (0 for infinite) |
| 145 | * |
| 146 | * Register an event to wait for a timestamp on a context and sleep until it |
| 147 | * has past. Returns < 0 on error, -ETIMEDOUT if the timeout expires or 0 |
| 148 | * on success |
| 149 | */ |
| 150 | int adreno_drawctxt_wait(struct adreno_device *adreno_dev, |
| 151 | struct kgsl_context *context, |
| 152 | uint32_t timestamp, unsigned int timeout) |
| 153 | { |
| 154 | struct kgsl_device *device = KGSL_DEVICE(adreno_dev); |
| 155 | struct adreno_context *drawctxt = ADRENO_CONTEXT(context); |
| 156 | int ret; |
| 157 | long ret_temp; |
| 158 | |
| 159 | if (kgsl_context_detached(context)) |
| 160 | return -ENOENT; |
| 161 | |
| 162 | if (kgsl_context_invalid(context)) |
| 163 | return -EDEADLK; |
| 164 | |
| 165 | trace_adreno_drawctxt_wait_start(-1, context->id, timestamp); |
| 166 | |
| 167 | ret = kgsl_add_event(device, &context->events, timestamp, |
| 168 | wait_callback, (void *) drawctxt); |
| 169 | if (ret) |
| 170 | goto done; |
| 171 | |
| 172 | /* |
| 173 | * If timeout is 0, wait forever. msecs_to_jiffies will force |
| 174 | * values larger than INT_MAX to an infinite timeout. |
| 175 | */ |
| 176 | if (timeout == 0) |
| 177 | timeout = UINT_MAX; |
| 178 | |
| 179 | ret_temp = wait_event_interruptible_timeout(drawctxt->waiting, |
| 180 | _check_context_timestamp(device, context, timestamp), |
| 181 | msecs_to_jiffies(timeout)); |
| 182 | |
| 183 | if (ret_temp == 0) { |
| 184 | ret = -ETIMEDOUT; |
| 185 | goto done; |
| 186 | } else if (ret_temp < 0) { |
| 187 | ret = (int) ret_temp; |
| 188 | goto done; |
| 189 | } |
| 190 | ret = 0; |
| 191 | |
| 192 | /* -EDEADLK if the context was invalidated while we were waiting */ |
| 193 | if (kgsl_context_invalid(context)) |
| 194 | ret = -EDEADLK; |
| 195 | |
| 196 | |
| 197 | /* Return -EINVAL if the context was detached while we were waiting */ |
| 198 | if (kgsl_context_detached(context)) |
| 199 | ret = -ENOENT; |
| 200 | |
| 201 | done: |
| 202 | trace_adreno_drawctxt_wait_done(-1, context->id, timestamp, ret); |
| 203 | return ret; |
| 204 | } |
| 205 | |
| 206 | /** |
| 207 | * adreno_drawctxt_wait_rb() - Wait for the last RB timestamp at which this |
| 208 | * context submitted a command to the corresponding RB |
| 209 | * @adreno_dev: The device on which the timestamp is active |
| 210 | * @context: The context which subbmitted command to RB |
| 211 | * @timestamp: The RB timestamp of last command submitted to RB by context |
| 212 | * @timeout: Timeout value for the wait |
| 213 | * Caller must hold the device mutex |
| 214 | */ |
| 215 | static int adreno_drawctxt_wait_rb(struct adreno_device *adreno_dev, |
| 216 | struct kgsl_context *context, |
| 217 | uint32_t timestamp, unsigned int timeout) |
| 218 | { |
| 219 | struct adreno_context *drawctxt = ADRENO_CONTEXT(context); |
| 220 | int ret = 0; |
| 221 | |
| 222 | /* |
Hareesh Gundu | 7d5a8f2 | 2017-02-21 13:23:46 +0530 | [diff] [blame] | 223 | * If the context is invalid (OR) not submitted commands to GPU |
| 224 | * then return immediately - we may end up waiting for a timestamp |
| 225 | * that will never come |
Shrenuj Bansal | a419c79 | 2016-10-20 14:05:11 -0700 | [diff] [blame] | 226 | */ |
Hareesh Gundu | 7d5a8f2 | 2017-02-21 13:23:46 +0530 | [diff] [blame] | 227 | if (kgsl_context_invalid(context) || |
| 228 | !test_bit(KGSL_CONTEXT_PRIV_SUBMITTED, &context->priv)) |
Shrenuj Bansal | a419c79 | 2016-10-20 14:05:11 -0700 | [diff] [blame] | 229 | goto done; |
| 230 | |
| 231 | trace_adreno_drawctxt_wait_start(drawctxt->rb->id, context->id, |
| 232 | timestamp); |
| 233 | |
| 234 | ret = adreno_ringbuffer_waittimestamp(drawctxt->rb, timestamp, timeout); |
| 235 | done: |
| 236 | trace_adreno_drawctxt_wait_done(drawctxt->rb->id, context->id, |
| 237 | timestamp, ret); |
| 238 | return ret; |
| 239 | } |
| 240 | |
| 241 | static int drawctxt_detach_drawobjs(struct adreno_context *drawctxt, |
| 242 | struct kgsl_drawobj **list) |
| 243 | { |
| 244 | int count = 0; |
| 245 | |
| 246 | while (drawctxt->drawqueue_head != drawctxt->drawqueue_tail) { |
| 247 | struct kgsl_drawobj *drawobj = |
| 248 | drawctxt->drawqueue[drawctxt->drawqueue_head]; |
| 249 | |
| 250 | drawctxt->drawqueue_head = (drawctxt->drawqueue_head + 1) % |
| 251 | ADRENO_CONTEXT_DRAWQUEUE_SIZE; |
| 252 | |
| 253 | list[count++] = drawobj; |
| 254 | } |
| 255 | |
| 256 | return count; |
| 257 | } |
| 258 | |
| 259 | /** |
| 260 | * adreno_drawctxt_invalidate() - Invalidate an adreno draw context |
| 261 | * @device: Pointer to the KGSL device structure for the GPU |
| 262 | * @context: Pointer to the KGSL context structure |
| 263 | * |
| 264 | * Invalidate the context and remove all queued commands and cancel any pending |
| 265 | * waiters |
| 266 | */ |
| 267 | void adreno_drawctxt_invalidate(struct kgsl_device *device, |
| 268 | struct kgsl_context *context) |
| 269 | { |
| 270 | struct adreno_context *drawctxt = ADRENO_CONTEXT(context); |
| 271 | struct kgsl_drawobj *list[ADRENO_CONTEXT_DRAWQUEUE_SIZE]; |
| 272 | int i, count; |
| 273 | |
| 274 | trace_adreno_drawctxt_invalidate(drawctxt); |
| 275 | |
| 276 | spin_lock(&drawctxt->lock); |
| 277 | set_bit(KGSL_CONTEXT_PRIV_INVALID, &context->priv); |
| 278 | |
| 279 | /* |
| 280 | * set the timestamp to the last value since the context is invalidated |
| 281 | * and we want the pending events for this context to go away |
| 282 | */ |
| 283 | kgsl_sharedmem_writel(device, &device->memstore, |
| 284 | KGSL_MEMSTORE_OFFSET(context->id, soptimestamp), |
| 285 | drawctxt->timestamp); |
| 286 | |
| 287 | kgsl_sharedmem_writel(device, &device->memstore, |
| 288 | KGSL_MEMSTORE_OFFSET(context->id, eoptimestamp), |
| 289 | drawctxt->timestamp); |
| 290 | |
| 291 | /* Get rid of commands still waiting in the queue */ |
| 292 | count = drawctxt_detach_drawobjs(drawctxt, list); |
| 293 | spin_unlock(&drawctxt->lock); |
| 294 | |
| 295 | for (i = 0; i < count; i++) { |
| 296 | kgsl_cancel_events_timestamp(device, &context->events, |
| 297 | list[i]->timestamp); |
| 298 | kgsl_drawobj_destroy(list[i]); |
| 299 | } |
| 300 | |
| 301 | /* Make sure all pending events are processed or cancelled */ |
| 302 | kgsl_flush_event_group(device, &context->events); |
| 303 | |
| 304 | /* Give the bad news to everybody waiting around */ |
| 305 | wake_up_all(&drawctxt->waiting); |
| 306 | wake_up_all(&drawctxt->wq); |
| 307 | } |
| 308 | |
| 309 | /* |
| 310 | * Set the priority of the context based on the flags passed into context |
| 311 | * create. If the priority is not set in the flags, then the kernel can |
| 312 | * assign any priority it desires for the context. |
| 313 | */ |
| 314 | #define KGSL_CONTEXT_PRIORITY_MED 0x8 |
| 315 | |
| 316 | static inline void _set_context_priority(struct adreno_context *drawctxt) |
| 317 | { |
| 318 | /* If the priority is not set by user, set it for them */ |
| 319 | if ((drawctxt->base.flags & KGSL_CONTEXT_PRIORITY_MASK) == |
| 320 | KGSL_CONTEXT_PRIORITY_UNDEF) |
| 321 | drawctxt->base.flags |= (KGSL_CONTEXT_PRIORITY_MED << |
| 322 | KGSL_CONTEXT_PRIORITY_SHIFT); |
| 323 | |
| 324 | /* Store the context priority */ |
| 325 | drawctxt->base.priority = |
| 326 | (drawctxt->base.flags & KGSL_CONTEXT_PRIORITY_MASK) >> |
| 327 | KGSL_CONTEXT_PRIORITY_SHIFT; |
| 328 | } |
| 329 | |
| 330 | /** |
| 331 | * adreno_drawctxt_create - create a new adreno draw context |
| 332 | * @dev_priv: the owner of the context |
| 333 | * @flags: flags for the context (passed from user space) |
| 334 | * |
| 335 | * Create and return a new draw context for the 3D core. |
| 336 | */ |
| 337 | struct kgsl_context * |
| 338 | adreno_drawctxt_create(struct kgsl_device_private *dev_priv, |
| 339 | uint32_t *flags) |
| 340 | { |
| 341 | struct adreno_context *drawctxt; |
| 342 | struct kgsl_device *device = dev_priv->device; |
| 343 | struct adreno_device *adreno_dev = ADRENO_DEVICE(device); |
| 344 | int ret; |
Lynus Vaz | eb7af68 | 2017-04-17 18:36:01 +0530 | [diff] [blame] | 345 | unsigned int local; |
Shrenuj Bansal | a419c79 | 2016-10-20 14:05:11 -0700 | [diff] [blame] | 346 | |
| 347 | local = *flags & (KGSL_CONTEXT_PREAMBLE | |
| 348 | KGSL_CONTEXT_NO_GMEM_ALLOC | |
| 349 | KGSL_CONTEXT_PER_CONTEXT_TS | |
| 350 | KGSL_CONTEXT_USER_GENERATED_TS | |
| 351 | KGSL_CONTEXT_NO_FAULT_TOLERANCE | |
Hareesh Gundu | ccfb89b | 2017-04-14 18:36:20 +0530 | [diff] [blame] | 352 | KGSL_CONTEXT_INVALIDATE_ON_FAULT | |
Shrenuj Bansal | a419c79 | 2016-10-20 14:05:11 -0700 | [diff] [blame] | 353 | KGSL_CONTEXT_CTX_SWITCH | |
| 354 | KGSL_CONTEXT_PRIORITY_MASK | |
| 355 | KGSL_CONTEXT_TYPE_MASK | |
| 356 | KGSL_CONTEXT_PWR_CONSTRAINT | |
| 357 | KGSL_CONTEXT_IFH_NOP | |
| 358 | KGSL_CONTEXT_SECURE | |
| 359 | KGSL_CONTEXT_PREEMPT_STYLE_MASK | |
Tarun Karra | 2b8b363 | 2016-11-14 16:38:27 -0800 | [diff] [blame] | 360 | KGSL_CONTEXT_NO_SNAPSHOT | |
| 361 | KGSL_CONTEXT_SPARSE); |
Shrenuj Bansal | a419c79 | 2016-10-20 14:05:11 -0700 | [diff] [blame] | 362 | |
| 363 | /* Check for errors before trying to initialize */ |
| 364 | |
| 365 | /* If preemption is not supported, ignore preemption request */ |
| 366 | if (!test_bit(ADRENO_DEVICE_PREEMPTION, &adreno_dev->priv)) |
| 367 | local &= ~KGSL_CONTEXT_PREEMPT_STYLE_MASK; |
| 368 | |
| 369 | /* We no longer support legacy context switching */ |
| 370 | if ((local & KGSL_CONTEXT_PREAMBLE) == 0 || |
| 371 | (local & KGSL_CONTEXT_NO_GMEM_ALLOC) == 0) { |
| 372 | KGSL_DEV_ERR_ONCE(device, |
| 373 | "legacy context switch not supported\n"); |
| 374 | return ERR_PTR(-EINVAL); |
| 375 | } |
| 376 | |
| 377 | /* Make sure that our target can support secure contexts if requested */ |
| 378 | if (!kgsl_mmu_is_secured(&dev_priv->device->mmu) && |
| 379 | (local & KGSL_CONTEXT_SECURE)) { |
| 380 | KGSL_DEV_ERR_ONCE(device, "Secure context not supported\n"); |
| 381 | return ERR_PTR(-EOPNOTSUPP); |
| 382 | } |
| 383 | |
| 384 | drawctxt = kzalloc(sizeof(struct adreno_context), GFP_KERNEL); |
| 385 | |
| 386 | if (drawctxt == NULL) |
| 387 | return ERR_PTR(-ENOMEM); |
| 388 | |
| 389 | drawctxt->timestamp = 0; |
| 390 | |
| 391 | drawctxt->base.flags = local; |
| 392 | |
| 393 | /* Always enable per-context timestamps */ |
| 394 | drawctxt->base.flags |= KGSL_CONTEXT_PER_CONTEXT_TS; |
| 395 | drawctxt->type = (drawctxt->base.flags & KGSL_CONTEXT_TYPE_MASK) |
| 396 | >> KGSL_CONTEXT_TYPE_SHIFT; |
| 397 | spin_lock_init(&drawctxt->lock); |
| 398 | init_waitqueue_head(&drawctxt->wq); |
| 399 | init_waitqueue_head(&drawctxt->waiting); |
| 400 | |
| 401 | /* Set the context priority */ |
| 402 | _set_context_priority(drawctxt); |
| 403 | /* set the context ringbuffer */ |
| 404 | drawctxt->rb = adreno_ctx_get_rb(adreno_dev, drawctxt); |
| 405 | |
| 406 | /* |
| 407 | * Set up the plist node for the dispatcher. Insert the node into the |
| 408 | * drawctxt pending list based on priority. |
| 409 | */ |
| 410 | plist_node_init(&drawctxt->pending, drawctxt->base.priority); |
| 411 | |
| 412 | /* |
| 413 | * Now initialize the common part of the context. This allocates the |
| 414 | * context id, and then possibly another thread could look it up. |
| 415 | * So we want all of our initializtion that doesn't require the context |
| 416 | * id to be done before this call. |
| 417 | */ |
| 418 | ret = kgsl_context_init(dev_priv, &drawctxt->base); |
| 419 | if (ret != 0) { |
| 420 | kfree(drawctxt); |
| 421 | return ERR_PTR(ret); |
| 422 | } |
| 423 | |
| 424 | kgsl_sharedmem_writel(device, &device->memstore, |
| 425 | KGSL_MEMSTORE_OFFSET(drawctxt->base.id, soptimestamp), |
| 426 | 0); |
| 427 | kgsl_sharedmem_writel(device, &device->memstore, |
| 428 | KGSL_MEMSTORE_OFFSET(drawctxt->base.id, eoptimestamp), |
| 429 | 0); |
| 430 | |
| 431 | adreno_context_debugfs_init(ADRENO_DEVICE(device), drawctxt); |
| 432 | |
| 433 | INIT_LIST_HEAD(&drawctxt->active_node); |
| 434 | |
| 435 | /* copy back whatever flags we dediced were valid */ |
| 436 | *flags = drawctxt->base.flags; |
| 437 | return &drawctxt->base; |
| 438 | } |
| 439 | |
| 440 | /** |
| 441 | * adreno_drawctxt_sched() - Schedule a previously blocked context |
| 442 | * @device: pointer to a KGSL device |
| 443 | * @drawctxt: drawctxt to rechedule |
| 444 | * |
| 445 | * This function is called by the core when it knows that a previously blocked |
| 446 | * context has been unblocked. The default adreno response is to reschedule the |
| 447 | * context on the dispatcher |
| 448 | */ |
| 449 | void adreno_drawctxt_sched(struct kgsl_device *device, |
| 450 | struct kgsl_context *context) |
| 451 | { |
| 452 | adreno_dispatcher_queue_context(device, ADRENO_CONTEXT(context)); |
| 453 | } |
| 454 | |
| 455 | /** |
| 456 | * adreno_drawctxt_detach(): detach a context from the GPU |
| 457 | * @context: Generic KGSL context container for the context |
| 458 | * |
| 459 | */ |
| 460 | void adreno_drawctxt_detach(struct kgsl_context *context) |
| 461 | { |
| 462 | struct kgsl_device *device; |
| 463 | struct adreno_device *adreno_dev; |
| 464 | struct adreno_context *drawctxt; |
| 465 | struct adreno_ringbuffer *rb; |
| 466 | int ret, count, i; |
| 467 | struct kgsl_drawobj *list[ADRENO_CONTEXT_DRAWQUEUE_SIZE]; |
| 468 | |
| 469 | if (context == NULL) |
| 470 | return; |
| 471 | |
| 472 | device = context->device; |
| 473 | adreno_dev = ADRENO_DEVICE(device); |
| 474 | drawctxt = ADRENO_CONTEXT(context); |
| 475 | rb = drawctxt->rb; |
| 476 | |
| 477 | spin_lock(&adreno_dev->active_list_lock); |
| 478 | list_del_init(&drawctxt->active_node); |
| 479 | spin_unlock(&adreno_dev->active_list_lock); |
| 480 | |
| 481 | spin_lock(&drawctxt->lock); |
| 482 | count = drawctxt_detach_drawobjs(drawctxt, list); |
| 483 | spin_unlock(&drawctxt->lock); |
| 484 | |
| 485 | for (i = 0; i < count; i++) { |
| 486 | /* |
| 487 | * If the context is deteached while we are waiting for |
| 488 | * the next command in GFT SKIP CMD, print the context |
| 489 | * detached status here. |
| 490 | */ |
| 491 | adreno_fault_skipcmd_detached(adreno_dev, drawctxt, list[i]); |
| 492 | kgsl_drawobj_destroy(list[i]); |
| 493 | } |
| 494 | |
| 495 | /* |
| 496 | * internal_timestamp is set in adreno_ringbuffer_addcmds, |
| 497 | * which holds the device mutex. |
| 498 | */ |
| 499 | mutex_lock(&device->mutex); |
| 500 | |
| 501 | /* |
| 502 | * Wait for the last global timestamp to pass before continuing. |
| 503 | * The maxumum wait time is 30s, some large IB's can take longer |
| 504 | * than 10s and if hang happens then the time for the context's |
| 505 | * commands to retire will be greater than 10s. 30s should be sufficient |
| 506 | * time to wait for the commands even if a hang happens. |
| 507 | */ |
| 508 | ret = adreno_drawctxt_wait_rb(adreno_dev, context, |
| 509 | drawctxt->internal_timestamp, 30 * 1000); |
| 510 | |
| 511 | /* |
| 512 | * If the wait for global fails due to timeout then nothing after this |
| 513 | * point is likely to work very well - Get GPU snapshot and BUG_ON() |
| 514 | * so we can take advantage of the debug tools to figure out what the |
| 515 | * h - e - double hockey sticks happened. If EAGAIN error is returned |
| 516 | * then recovery will kick in and there will be no more commands in the |
| 517 | * RB pipe from this context which is waht we are waiting for, so ignore |
| 518 | * -EAGAIN error |
| 519 | */ |
| 520 | if (ret && ret != -EAGAIN) { |
| 521 | KGSL_DRV_ERR(device, "Wait for global ts=%d type=%d error=%d\n", |
| 522 | drawctxt->internal_timestamp, |
| 523 | drawctxt->type, ret); |
| 524 | device->force_panic = 1; |
| 525 | kgsl_device_snapshot(device, context); |
| 526 | } |
| 527 | |
| 528 | kgsl_sharedmem_writel(device, &device->memstore, |
| 529 | KGSL_MEMSTORE_OFFSET(context->id, soptimestamp), |
| 530 | drawctxt->timestamp); |
| 531 | |
| 532 | kgsl_sharedmem_writel(device, &device->memstore, |
| 533 | KGSL_MEMSTORE_OFFSET(context->id, eoptimestamp), |
| 534 | drawctxt->timestamp); |
| 535 | |
| 536 | adreno_profile_process_results(adreno_dev); |
| 537 | |
| 538 | mutex_unlock(&device->mutex); |
| 539 | |
| 540 | /* wake threads waiting to submit commands from this context */ |
| 541 | wake_up_all(&drawctxt->waiting); |
| 542 | wake_up_all(&drawctxt->wq); |
| 543 | } |
| 544 | |
| 545 | void adreno_drawctxt_destroy(struct kgsl_context *context) |
| 546 | { |
| 547 | struct adreno_context *drawctxt; |
| 548 | |
| 549 | if (context == NULL) |
| 550 | return; |
| 551 | |
| 552 | drawctxt = ADRENO_CONTEXT(context); |
| 553 | debugfs_remove_recursive(drawctxt->debug_root); |
| 554 | kfree(drawctxt); |
| 555 | } |
| 556 | |
| 557 | static void _drawctxt_switch_wait_callback(struct kgsl_device *device, |
| 558 | struct kgsl_event_group *group, |
| 559 | void *priv, int result) |
| 560 | { |
| 561 | struct adreno_context *drawctxt = (struct adreno_context *) priv; |
| 562 | |
| 563 | kgsl_context_put(&drawctxt->base); |
| 564 | } |
| 565 | |
| 566 | /** |
| 567 | * adreno_drawctxt_switch - switch the current draw context in a given RB |
| 568 | * @adreno_dev - The 3D device that owns the context |
| 569 | * @rb: The ringubffer pointer on which the current context is being changed |
| 570 | * @drawctxt - the 3D context to switch to |
| 571 | * @flags: Control flags for the switch |
| 572 | * |
| 573 | * Switch the current draw context in given RB |
| 574 | */ |
| 575 | |
| 576 | int adreno_drawctxt_switch(struct adreno_device *adreno_dev, |
| 577 | struct adreno_ringbuffer *rb, |
| 578 | struct adreno_context *drawctxt, |
| 579 | unsigned int flags) |
| 580 | { |
| 581 | struct kgsl_device *device = KGSL_DEVICE(adreno_dev); |
| 582 | struct kgsl_pagetable *new_pt; |
| 583 | int ret = 0; |
| 584 | |
| 585 | /* We always expect a valid rb */ |
| 586 | if (!rb) |
| 587 | return -EINVAL; |
| 588 | |
| 589 | /* already current? */ |
| 590 | if (rb->drawctxt_active == drawctxt) |
| 591 | return ret; |
| 592 | |
| 593 | /* |
| 594 | * Submitting pt switch commands from a detached context can |
| 595 | * lead to a race condition where the pt is destroyed before |
| 596 | * the pt switch commands get executed by the GPU, leading to |
| 597 | * pagefaults. |
| 598 | */ |
| 599 | if (drawctxt != NULL && kgsl_context_detached(&drawctxt->base)) |
| 600 | return -ENOENT; |
| 601 | |
| 602 | trace_adreno_drawctxt_switch(rb, drawctxt); |
| 603 | |
| 604 | /* Get a refcount to the new instance */ |
| 605 | if (drawctxt) { |
| 606 | if (!_kgsl_context_get(&drawctxt->base)) |
| 607 | return -ENOENT; |
| 608 | |
| 609 | new_pt = drawctxt->base.proc_priv->pagetable; |
| 610 | } else { |
| 611 | /* No context - set the default pagetable and thats it. */ |
| 612 | new_pt = device->mmu.defaultpagetable; |
| 613 | } |
| 614 | ret = adreno_ringbuffer_set_pt_ctx(rb, new_pt, drawctxt, flags); |
| 615 | if (ret) |
| 616 | return ret; |
| 617 | |
| 618 | if (rb->drawctxt_active) { |
| 619 | /* Wait for the timestamp to expire */ |
| 620 | if (kgsl_add_event(device, &rb->events, rb->timestamp, |
| 621 | _drawctxt_switch_wait_callback, |
| 622 | rb->drawctxt_active)) { |
| 623 | kgsl_context_put(&rb->drawctxt_active->base); |
| 624 | } |
| 625 | } |
| 626 | |
| 627 | rb->drawctxt_active = drawctxt; |
| 628 | return 0; |
| 629 | } |