Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2015 Advanced Micro Devices, Inc. |
| 3 | * |
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 5 | * copy of this software and associated documentation files (the "Software"), |
| 6 | * to deal in the Software without restriction, including without limitation |
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
| 8 | * and/or sell copies of the Software, and to permit persons to whom the |
| 9 | * Software is furnished to do so, subject to the following conditions: |
| 10 | * |
| 11 | * The above copyright notice and this permission notice shall be included in |
| 12 | * all copies or substantial portions of the Software. |
| 13 | * |
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| 17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
| 18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
| 19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
| 20 | * OTHER DEALINGS IN THE SOFTWARE. |
| 21 | * |
| 22 | * Authors: monk liu <monk.liu@amd.com> |
| 23 | */ |
| 24 | |
| 25 | #include <drm/drmP.h> |
| 26 | #include "amdgpu.h" |
| 27 | |
Chunming Zhou | d033a6d | 2015-11-05 15:23:09 +0800 | [diff] [blame] | 28 | int amdgpu_ctx_init(struct amdgpu_device *adev, enum amd_sched_priority pri, |
Christian König | 47f3850 | 2015-08-04 17:51:05 +0200 | [diff] [blame] | 29 | struct amdgpu_ctx *ctx) |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 30 | { |
Christian König | 21c16bf | 2015-07-07 17:24:49 +0200 | [diff] [blame] | 31 | unsigned i, j; |
Christian König | 47f3850 | 2015-08-04 17:51:05 +0200 | [diff] [blame] | 32 | int r; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 33 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 34 | memset(ctx, 0, sizeof(*ctx)); |
Chunming Zhou | 9cb7e5a | 2015-07-21 13:17:19 +0800 | [diff] [blame] | 35 | ctx->adev = adev; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 36 | kref_init(&ctx->refcount); |
Christian König | 21c16bf | 2015-07-07 17:24:49 +0200 | [diff] [blame] | 37 | spin_lock_init(&ctx->ring_lock); |
Chunming Zhou | 37cd0ca | 2015-12-10 15:45:11 +0800 | [diff] [blame] | 38 | ctx->fences = kzalloc(sizeof(struct fence *) * amdgpu_sched_jobs * |
| 39 | AMDGPU_MAX_RINGS, GFP_KERNEL); |
| 40 | if (!ctx->fences) |
| 41 | return -ENOMEM; |
Chunming Zhou | 23ca0e4 | 2015-07-06 13:42:58 +0800 | [diff] [blame] | 42 | |
Chunming Zhou | 37cd0ca | 2015-12-10 15:45:11 +0800 | [diff] [blame] | 43 | for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { |
| 44 | ctx->rings[i].sequence = 1; |
| 45 | ctx->rings[i].fences = (void *)ctx->fences + sizeof(struct fence *) * |
| 46 | amdgpu_sched_jobs * i; |
| 47 | } |
Chunming Zhou | 9cb7e5a | 2015-07-21 13:17:19 +0800 | [diff] [blame] | 48 | if (amdgpu_enable_scheduler) { |
| 49 | /* create context entity for each ring */ |
| 50 | for (i = 0; i < adev->num_rings; i++) { |
Christian König | 432a4ff | 2015-08-12 11:46:04 +0200 | [diff] [blame] | 51 | struct amd_sched_rq *rq; |
Chunming Zhou | 37cd0ca | 2015-12-10 15:45:11 +0800 | [diff] [blame] | 52 | if (pri >= AMD_SCHED_MAX_PRIORITY) { |
| 53 | kfree(ctx->fences); |
Chunming Zhou | d033a6d | 2015-11-05 15:23:09 +0800 | [diff] [blame] | 54 | return -EINVAL; |
Chunming Zhou | 37cd0ca | 2015-12-10 15:45:11 +0800 | [diff] [blame] | 55 | } |
Chunming Zhou | d033a6d | 2015-11-05 15:23:09 +0800 | [diff] [blame] | 56 | rq = &adev->rings[i]->sched.sched_rq[pri]; |
Christian König | 4f839a2 | 2015-09-08 20:22:31 +0200 | [diff] [blame] | 57 | r = amd_sched_entity_init(&adev->rings[i]->sched, |
Christian König | 91404fb | 2015-08-05 18:33:21 +0200 | [diff] [blame] | 58 | &ctx->rings[i].entity, |
| 59 | rq, amdgpu_sched_jobs); |
Chunming Zhou | 9cb7e5a | 2015-07-21 13:17:19 +0800 | [diff] [blame] | 60 | if (r) |
| 61 | break; |
| 62 | } |
| 63 | |
| 64 | if (i < adev->num_rings) { |
| 65 | for (j = 0; j < i; j++) |
Christian König | 4f839a2 | 2015-09-08 20:22:31 +0200 | [diff] [blame] | 66 | amd_sched_entity_fini(&adev->rings[j]->sched, |
Christian König | 91404fb | 2015-08-05 18:33:21 +0200 | [diff] [blame] | 67 | &ctx->rings[j].entity); |
Chunming Zhou | 37cd0ca | 2015-12-10 15:45:11 +0800 | [diff] [blame] | 68 | kfree(ctx->fences); |
Christian König | 47f3850 | 2015-08-04 17:51:05 +0200 | [diff] [blame] | 69 | return r; |
Chunming Zhou | 9cb7e5a | 2015-07-21 13:17:19 +0800 | [diff] [blame] | 70 | } |
| 71 | } |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 72 | return 0; |
| 73 | } |
| 74 | |
Christian König | 47f3850 | 2015-08-04 17:51:05 +0200 | [diff] [blame] | 75 | void amdgpu_ctx_fini(struct amdgpu_ctx *ctx) |
| 76 | { |
| 77 | struct amdgpu_device *adev = ctx->adev; |
| 78 | unsigned i, j; |
| 79 | |
Dave Airlie | fe295b2 | 2015-11-03 11:07:11 -0500 | [diff] [blame] | 80 | if (!adev) |
| 81 | return; |
| 82 | |
Christian König | 47f3850 | 2015-08-04 17:51:05 +0200 | [diff] [blame] | 83 | for (i = 0; i < AMDGPU_MAX_RINGS; ++i) |
Chunming Zhou | 37cd0ca | 2015-12-10 15:45:11 +0800 | [diff] [blame] | 84 | for (j = 0; j < amdgpu_sched_jobs; ++j) |
Christian König | 47f3850 | 2015-08-04 17:51:05 +0200 | [diff] [blame] | 85 | fence_put(ctx->rings[i].fences[j]); |
Chunming Zhou | 37cd0ca | 2015-12-10 15:45:11 +0800 | [diff] [blame] | 86 | kfree(ctx->fences); |
Christian König | 47f3850 | 2015-08-04 17:51:05 +0200 | [diff] [blame] | 87 | |
| 88 | if (amdgpu_enable_scheduler) { |
| 89 | for (i = 0; i < adev->num_rings; i++) |
Christian König | 4f839a2 | 2015-09-08 20:22:31 +0200 | [diff] [blame] | 90 | amd_sched_entity_fini(&adev->rings[i]->sched, |
Christian König | 91404fb | 2015-08-05 18:33:21 +0200 | [diff] [blame] | 91 | &ctx->rings[i].entity); |
Christian König | 47f3850 | 2015-08-04 17:51:05 +0200 | [diff] [blame] | 92 | } |
| 93 | } |
| 94 | |
| 95 | static int amdgpu_ctx_alloc(struct amdgpu_device *adev, |
| 96 | struct amdgpu_fpriv *fpriv, |
| 97 | uint32_t *id) |
| 98 | { |
| 99 | struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr; |
| 100 | struct amdgpu_ctx *ctx; |
| 101 | int r; |
| 102 | |
| 103 | ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); |
| 104 | if (!ctx) |
| 105 | return -ENOMEM; |
| 106 | |
| 107 | mutex_lock(&mgr->lock); |
| 108 | r = idr_alloc(&mgr->ctx_handles, ctx, 1, 0, GFP_KERNEL); |
| 109 | if (r < 0) { |
| 110 | mutex_unlock(&mgr->lock); |
| 111 | kfree(ctx); |
| 112 | return r; |
| 113 | } |
| 114 | *id = (uint32_t)r; |
Chunming Zhou | d033a6d | 2015-11-05 15:23:09 +0800 | [diff] [blame] | 115 | r = amdgpu_ctx_init(adev, AMD_SCHED_PRIORITY_NORMAL, ctx); |
Chunming Zhou | c648ed7 | 2015-12-10 15:50:02 +0800 | [diff] [blame] | 116 | if (r) { |
| 117 | idr_remove(&mgr->ctx_handles, *id); |
| 118 | *id = 0; |
| 119 | kfree(ctx); |
| 120 | } |
Christian König | 47f3850 | 2015-08-04 17:51:05 +0200 | [diff] [blame] | 121 | mutex_unlock(&mgr->lock); |
Christian König | 47f3850 | 2015-08-04 17:51:05 +0200 | [diff] [blame] | 122 | return r; |
| 123 | } |
| 124 | |
| 125 | static void amdgpu_ctx_do_release(struct kref *ref) |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 126 | { |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 127 | struct amdgpu_ctx *ctx; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 128 | |
Christian König | 47f3850 | 2015-08-04 17:51:05 +0200 | [diff] [blame] | 129 | ctx = container_of(ref, struct amdgpu_ctx, refcount); |
| 130 | |
| 131 | amdgpu_ctx_fini(ctx); |
| 132 | |
| 133 | kfree(ctx); |
| 134 | } |
| 135 | |
| 136 | static int amdgpu_ctx_free(struct amdgpu_fpriv *fpriv, uint32_t id) |
| 137 | { |
| 138 | struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr; |
| 139 | struct amdgpu_ctx *ctx; |
| 140 | |
| 141 | mutex_lock(&mgr->lock); |
| 142 | ctx = idr_find(&mgr->ctx_handles, id); |
| 143 | if (ctx) { |
| 144 | idr_remove(&mgr->ctx_handles, id); |
Chunming Zhou | 23ca0e4 | 2015-07-06 13:42:58 +0800 | [diff] [blame] | 145 | kref_put(&ctx->refcount, amdgpu_ctx_do_release); |
Christian König | 47f3850 | 2015-08-04 17:51:05 +0200 | [diff] [blame] | 146 | mutex_unlock(&mgr->lock); |
Marek Olšák | f11358d | 2015-05-05 00:56:45 +0200 | [diff] [blame] | 147 | return 0; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 148 | } |
Christian König | 47f3850 | 2015-08-04 17:51:05 +0200 | [diff] [blame] | 149 | mutex_unlock(&mgr->lock); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 150 | return -EINVAL; |
| 151 | } |
| 152 | |
Marek Olšák | d94aed5 | 2015-05-05 21:13:49 +0200 | [diff] [blame] | 153 | static int amdgpu_ctx_query(struct amdgpu_device *adev, |
| 154 | struct amdgpu_fpriv *fpriv, uint32_t id, |
| 155 | union drm_amdgpu_ctx_out *out) |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 156 | { |
| 157 | struct amdgpu_ctx *ctx; |
Chunming Zhou | 23ca0e4 | 2015-07-06 13:42:58 +0800 | [diff] [blame] | 158 | struct amdgpu_ctx_mgr *mgr; |
Marek Olšák | d94aed5 | 2015-05-05 21:13:49 +0200 | [diff] [blame] | 159 | unsigned reset_counter; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 160 | |
Chunming Zhou | 23ca0e4 | 2015-07-06 13:42:58 +0800 | [diff] [blame] | 161 | if (!fpriv) |
| 162 | return -EINVAL; |
| 163 | |
| 164 | mgr = &fpriv->ctx_mgr; |
Marek Olšák | 0147ee0 | 2015-05-05 20:52:00 +0200 | [diff] [blame] | 165 | mutex_lock(&mgr->lock); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 166 | ctx = idr_find(&mgr->ctx_handles, id); |
Marek Olšák | d94aed5 | 2015-05-05 21:13:49 +0200 | [diff] [blame] | 167 | if (!ctx) { |
Marek Olšák | 0147ee0 | 2015-05-05 20:52:00 +0200 | [diff] [blame] | 168 | mutex_unlock(&mgr->lock); |
Marek Olšák | d94aed5 | 2015-05-05 21:13:49 +0200 | [diff] [blame] | 169 | return -EINVAL; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 170 | } |
Marek Olšák | d94aed5 | 2015-05-05 21:13:49 +0200 | [diff] [blame] | 171 | |
| 172 | /* TODO: these two are always zero */ |
Alex Deucher | 0b492a4 | 2015-08-16 22:48:26 -0400 | [diff] [blame] | 173 | out->state.flags = 0x0; |
| 174 | out->state.hangs = 0x0; |
Marek Olšák | d94aed5 | 2015-05-05 21:13:49 +0200 | [diff] [blame] | 175 | |
| 176 | /* determine if a GPU reset has occured since the last call */ |
| 177 | reset_counter = atomic_read(&adev->gpu_reset_counter); |
| 178 | /* TODO: this should ideally return NO, GUILTY, or INNOCENT. */ |
| 179 | if (ctx->reset_counter == reset_counter) |
| 180 | out->state.reset_status = AMDGPU_CTX_NO_RESET; |
| 181 | else |
| 182 | out->state.reset_status = AMDGPU_CTX_UNKNOWN_RESET; |
| 183 | ctx->reset_counter = reset_counter; |
| 184 | |
Marek Olšák | 0147ee0 | 2015-05-05 20:52:00 +0200 | [diff] [blame] | 185 | mutex_unlock(&mgr->lock); |
Marek Olšák | d94aed5 | 2015-05-05 21:13:49 +0200 | [diff] [blame] | 186 | return 0; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 187 | } |
| 188 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 189 | int amdgpu_ctx_ioctl(struct drm_device *dev, void *data, |
Marek Olšák | d94aed5 | 2015-05-05 21:13:49 +0200 | [diff] [blame] | 190 | struct drm_file *filp) |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 191 | { |
| 192 | int r; |
| 193 | uint32_t id; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 194 | |
| 195 | union drm_amdgpu_ctx *args = data; |
| 196 | struct amdgpu_device *adev = dev->dev_private; |
| 197 | struct amdgpu_fpriv *fpriv = filp->driver_priv; |
| 198 | |
| 199 | r = 0; |
| 200 | id = args->in.ctx_id; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 201 | |
| 202 | switch (args->in.op) { |
| 203 | case AMDGPU_CTX_OP_ALLOC_CTX: |
Alex Deucher | 0b492a4 | 2015-08-16 22:48:26 -0400 | [diff] [blame] | 204 | r = amdgpu_ctx_alloc(adev, fpriv, &id); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 205 | args->out.alloc.ctx_id = id; |
| 206 | break; |
| 207 | case AMDGPU_CTX_OP_FREE_CTX: |
Christian König | 47f3850 | 2015-08-04 17:51:05 +0200 | [diff] [blame] | 208 | r = amdgpu_ctx_free(fpriv, id); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 209 | break; |
| 210 | case AMDGPU_CTX_OP_QUERY_STATE: |
Marek Olšák | d94aed5 | 2015-05-05 21:13:49 +0200 | [diff] [blame] | 211 | r = amdgpu_ctx_query(adev, fpriv, id, &args->out); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 212 | break; |
| 213 | default: |
| 214 | return -EINVAL; |
| 215 | } |
| 216 | |
| 217 | return r; |
| 218 | } |
Jammy Zhou | 66b3cf2 | 2015-05-08 17:29:40 +0800 | [diff] [blame] | 219 | |
| 220 | struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id) |
| 221 | { |
| 222 | struct amdgpu_ctx *ctx; |
Chunming Zhou | 23ca0e4 | 2015-07-06 13:42:58 +0800 | [diff] [blame] | 223 | struct amdgpu_ctx_mgr *mgr; |
| 224 | |
| 225 | if (!fpriv) |
| 226 | return NULL; |
| 227 | |
| 228 | mgr = &fpriv->ctx_mgr; |
Jammy Zhou | 66b3cf2 | 2015-05-08 17:29:40 +0800 | [diff] [blame] | 229 | |
| 230 | mutex_lock(&mgr->lock); |
| 231 | ctx = idr_find(&mgr->ctx_handles, id); |
| 232 | if (ctx) |
| 233 | kref_get(&ctx->refcount); |
| 234 | mutex_unlock(&mgr->lock); |
| 235 | return ctx; |
| 236 | } |
| 237 | |
| 238 | int amdgpu_ctx_put(struct amdgpu_ctx *ctx) |
| 239 | { |
Jammy Zhou | 66b3cf2 | 2015-05-08 17:29:40 +0800 | [diff] [blame] | 240 | if (ctx == NULL) |
| 241 | return -EINVAL; |
| 242 | |
Jammy Zhou | 66b3cf2 | 2015-05-08 17:29:40 +0800 | [diff] [blame] | 243 | kref_put(&ctx->refcount, amdgpu_ctx_do_release); |
Jammy Zhou | 66b3cf2 | 2015-05-08 17:29:40 +0800 | [diff] [blame] | 244 | return 0; |
| 245 | } |
Christian König | 21c16bf | 2015-07-07 17:24:49 +0200 | [diff] [blame] | 246 | |
| 247 | uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring, |
Christian König | ce882e6 | 2015-08-19 15:00:55 +0200 | [diff] [blame] | 248 | struct fence *fence) |
Christian König | 21c16bf | 2015-07-07 17:24:49 +0200 | [diff] [blame] | 249 | { |
| 250 | struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx]; |
Christian König | ce882e6 | 2015-08-19 15:00:55 +0200 | [diff] [blame] | 251 | uint64_t seq = cring->sequence; |
Chunming Zhou | b43a9a7 | 2015-07-21 15:13:53 +0800 | [diff] [blame] | 252 | unsigned idx = 0; |
| 253 | struct fence *other = NULL; |
Christian König | 21c16bf | 2015-07-07 17:24:49 +0200 | [diff] [blame] | 254 | |
Chunming Zhou | 5b01123 | 2015-12-10 17:34:33 +0800 | [diff] [blame] | 255 | idx = seq & (amdgpu_sched_jobs - 1); |
Chunming Zhou | b43a9a7 | 2015-07-21 15:13:53 +0800 | [diff] [blame] | 256 | other = cring->fences[idx]; |
Christian König | 21c16bf | 2015-07-07 17:24:49 +0200 | [diff] [blame] | 257 | if (other) { |
| 258 | signed long r; |
| 259 | r = fence_wait_timeout(other, false, MAX_SCHEDULE_TIMEOUT); |
| 260 | if (r < 0) |
| 261 | DRM_ERROR("Error (%ld) waiting for fence!\n", r); |
| 262 | } |
| 263 | |
| 264 | fence_get(fence); |
| 265 | |
| 266 | spin_lock(&ctx->ring_lock); |
| 267 | cring->fences[idx] = fence; |
Christian König | ce882e6 | 2015-08-19 15:00:55 +0200 | [diff] [blame] | 268 | cring->sequence++; |
Christian König | 21c16bf | 2015-07-07 17:24:49 +0200 | [diff] [blame] | 269 | spin_unlock(&ctx->ring_lock); |
| 270 | |
| 271 | fence_put(other); |
| 272 | |
| 273 | return seq; |
| 274 | } |
| 275 | |
| 276 | struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx, |
| 277 | struct amdgpu_ring *ring, uint64_t seq) |
| 278 | { |
| 279 | struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx]; |
| 280 | struct fence *fence; |
| 281 | |
| 282 | spin_lock(&ctx->ring_lock); |
Chunming Zhou | b43a9a7 | 2015-07-21 15:13:53 +0800 | [diff] [blame] | 283 | |
Christian König | ce882e6 | 2015-08-19 15:00:55 +0200 | [diff] [blame] | 284 | if (seq >= cring->sequence) { |
Christian König | 21c16bf | 2015-07-07 17:24:49 +0200 | [diff] [blame] | 285 | spin_unlock(&ctx->ring_lock); |
| 286 | return ERR_PTR(-EINVAL); |
| 287 | } |
| 288 | |
Chunming Zhou | b43a9a7 | 2015-07-21 15:13:53 +0800 | [diff] [blame] | 289 | |
Chunming Zhou | 37cd0ca | 2015-12-10 15:45:11 +0800 | [diff] [blame] | 290 | if (seq + amdgpu_sched_jobs < cring->sequence) { |
Christian König | 21c16bf | 2015-07-07 17:24:49 +0200 | [diff] [blame] | 291 | spin_unlock(&ctx->ring_lock); |
| 292 | return NULL; |
| 293 | } |
| 294 | |
Chunming Zhou | 5b01123 | 2015-12-10 17:34:33 +0800 | [diff] [blame] | 295 | fence = fence_get(cring->fences[seq & (amdgpu_sched_jobs - 1)]); |
Christian König | 21c16bf | 2015-07-07 17:24:49 +0200 | [diff] [blame] | 296 | spin_unlock(&ctx->ring_lock); |
| 297 | |
| 298 | return fence; |
| 299 | } |
Christian König | efd4ccb | 2015-08-04 16:20:31 +0200 | [diff] [blame] | 300 | |
| 301 | void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr) |
| 302 | { |
| 303 | mutex_init(&mgr->lock); |
| 304 | idr_init(&mgr->ctx_handles); |
| 305 | } |
| 306 | |
| 307 | void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr) |
| 308 | { |
| 309 | struct amdgpu_ctx *ctx; |
| 310 | struct idr *idp; |
| 311 | uint32_t id; |
| 312 | |
| 313 | idp = &mgr->ctx_handles; |
| 314 | |
| 315 | idr_for_each_entry(idp, ctx, id) { |
| 316 | if (kref_put(&ctx->refcount, amdgpu_ctx_do_release) != 1) |
| 317 | DRM_ERROR("ctx %p is still alive\n", ctx); |
| 318 | } |
| 319 | |
| 320 | idr_destroy(&mgr->ctx_handles); |
| 321 | mutex_destroy(&mgr->lock); |
| 322 | } |