blob: d2ef24f4b56d7b7c960fbf7036ba37bc18c6bfb2 [file] [log] [blame]
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: monk liu <monk.liu@amd.com>
23 */
24
25#include <drm/drmP.h>
Andres Rodriguezc2636dc2016-12-22 17:06:50 -050026#include <drm/drm_auth.h>
Alex Deucherd38ceaf2015-04-20 16:55:21 -040027#include "amdgpu.h"
28
Andres Rodriguezc2636dc2016-12-22 17:06:50 -050029static int amdgpu_ctx_priority_permit(struct drm_file *filp,
30 enum amd_sched_priority priority)
31{
32 /* NORMAL and below are accessible by everyone */
33 if (priority <= AMD_SCHED_PRIORITY_NORMAL)
34 return 0;
35
36 if (capable(CAP_SYS_NICE))
37 return 0;
38
39 if (drm_is_current_master(filp))
40 return 0;
41
42 return -EACCES;
43}
44
45static int amdgpu_ctx_init(struct amdgpu_device *adev,
46 enum amd_sched_priority priority,
47 struct drm_file *filp,
48 struct amdgpu_ctx *ctx)
Alex Deucherd38ceaf2015-04-20 16:55:21 -040049{
Christian König21c16bf2015-07-07 17:24:49 +020050 unsigned i, j;
Christian König47f38502015-08-04 17:51:05 +020051 int r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -040052
Andres Rodriguezc2636dc2016-12-22 17:06:50 -050053 if (priority < 0 || priority >= AMD_SCHED_PRIORITY_MAX)
54 return -EINVAL;
55
56 r = amdgpu_ctx_priority_permit(filp, priority);
57 if (r)
58 return r;
59
Alex Deucherd38ceaf2015-04-20 16:55:21 -040060 memset(ctx, 0, sizeof(*ctx));
Chunming Zhou9cb7e5a2015-07-21 13:17:19 +080061 ctx->adev = adev;
Alex Deucherd38ceaf2015-04-20 16:55:21 -040062 kref_init(&ctx->refcount);
Christian König21c16bf2015-07-07 17:24:49 +020063 spin_lock_init(&ctx->ring_lock);
Christian Königa750b472016-02-11 10:20:53 +010064 ctx->fences = kcalloc(amdgpu_sched_jobs * AMDGPU_MAX_RINGS,
Chris Wilsonf54d1862016-10-25 13:00:45 +010065 sizeof(struct dma_fence*), GFP_KERNEL);
Chunming Zhou37cd0ca2015-12-10 15:45:11 +080066 if (!ctx->fences)
67 return -ENOMEM;
Chunming Zhou23ca0e42015-07-06 13:42:58 +080068
Chunming Zhou37cd0ca2015-12-10 15:45:11 +080069 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
70 ctx->rings[i].sequence = 1;
Christian Königa750b472016-02-11 10:20:53 +010071 ctx->rings[i].fences = &ctx->fences[amdgpu_sched_jobs * i];
Chunming Zhou37cd0ca2015-12-10 15:45:11 +080072 }
Nicolai Hähnlece199ad2016-10-04 09:43:30 +020073
74 ctx->reset_counter = atomic_read(&adev->gpu_reset_counter);
Andres Rodriguezc23be4a2017-06-06 20:20:38 -040075 ctx->init_priority = priority;
76 ctx->override_priority = AMD_SCHED_PRIORITY_UNSET;
Nicolai Hähnlece199ad2016-10-04 09:43:30 +020077
Chunming Zhoucadf97b2016-01-15 11:25:00 +080078 /* create context entity for each ring */
79 for (i = 0; i < adev->num_rings; i++) {
Christian König20874172016-02-11 09:56:44 +010080 struct amdgpu_ring *ring = adev->rings[i];
Chunming Zhoucadf97b2016-01-15 11:25:00 +080081 struct amd_sched_rq *rq;
Christian König20874172016-02-11 09:56:44 +010082
Andres Rodriguezc2636dc2016-12-22 17:06:50 -050083 rq = &ring->sched.sched_rq[priority];
Monk Liu75fbed22017-05-11 13:36:33 +080084
85 if (ring == &adev->gfx.kiq.ring)
86 continue;
87
Christian König20874172016-02-11 09:56:44 +010088 r = amd_sched_entity_init(&ring->sched, &ctx->rings[i].entity,
Chunming Zhoucadf97b2016-01-15 11:25:00 +080089 rq, amdgpu_sched_jobs);
90 if (r)
Huang Rui8ed81472016-10-26 17:07:03 +080091 goto failed;
Chunming Zhoucadf97b2016-01-15 11:25:00 +080092 }
93
Andres Rodriguezeffd9242017-02-16 00:47:32 -050094 r = amdgpu_queue_mgr_init(adev, &ctx->queue_mgr);
95 if (r)
96 goto failed;
97
Alex Deucherd38ceaf2015-04-20 16:55:21 -040098 return 0;
Huang Rui8ed81472016-10-26 17:07:03 +080099
100failed:
101 for (j = 0; j < i; j++)
102 amd_sched_entity_fini(&adev->rings[j]->sched,
103 &ctx->rings[j].entity);
104 kfree(ctx->fences);
105 ctx->fences = NULL;
106 return r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400107}
108
Christian König20874172016-02-11 09:56:44 +0100109static void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
Christian König47f38502015-08-04 17:51:05 +0200110{
111 struct amdgpu_device *adev = ctx->adev;
112 unsigned i, j;
113
Dave Airliefe295b22015-11-03 11:07:11 -0500114 if (!adev)
115 return;
116
Christian König47f38502015-08-04 17:51:05 +0200117 for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
Chunming Zhou37cd0ca2015-12-10 15:45:11 +0800118 for (j = 0; j < amdgpu_sched_jobs; ++j)
Chris Wilsonf54d1862016-10-25 13:00:45 +0100119 dma_fence_put(ctx->rings[i].fences[j]);
Chunming Zhou37cd0ca2015-12-10 15:45:11 +0800120 kfree(ctx->fences);
Grazvydas Ignotas54ddf3a2016-09-25 23:34:46 +0300121 ctx->fences = NULL;
Christian König47f38502015-08-04 17:51:05 +0200122
Chunming Zhoucadf97b2016-01-15 11:25:00 +0800123 for (i = 0; i < adev->num_rings; i++)
124 amd_sched_entity_fini(&adev->rings[i]->sched,
125 &ctx->rings[i].entity);
Andres Rodriguezeffd9242017-02-16 00:47:32 -0500126
127 amdgpu_queue_mgr_fini(adev, &ctx->queue_mgr);
Christian König47f38502015-08-04 17:51:05 +0200128}
129
130static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
131 struct amdgpu_fpriv *fpriv,
Andres Rodriguezc2636dc2016-12-22 17:06:50 -0500132 struct drm_file *filp,
133 enum amd_sched_priority priority,
Christian König47f38502015-08-04 17:51:05 +0200134 uint32_t *id)
135{
136 struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
137 struct amdgpu_ctx *ctx;
138 int r;
139
140 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
141 if (!ctx)
142 return -ENOMEM;
143
144 mutex_lock(&mgr->lock);
145 r = idr_alloc(&mgr->ctx_handles, ctx, 1, 0, GFP_KERNEL);
146 if (r < 0) {
147 mutex_unlock(&mgr->lock);
148 kfree(ctx);
149 return r;
150 }
Andres Rodriguezc2636dc2016-12-22 17:06:50 -0500151
Christian König47f38502015-08-04 17:51:05 +0200152 *id = (uint32_t)r;
Andres Rodriguezc2636dc2016-12-22 17:06:50 -0500153 r = amdgpu_ctx_init(adev, priority, filp, ctx);
Chunming Zhouc648ed72015-12-10 15:50:02 +0800154 if (r) {
155 idr_remove(&mgr->ctx_handles, *id);
156 *id = 0;
157 kfree(ctx);
158 }
Christian König47f38502015-08-04 17:51:05 +0200159 mutex_unlock(&mgr->lock);
Christian König47f38502015-08-04 17:51:05 +0200160 return r;
161}
162
163static void amdgpu_ctx_do_release(struct kref *ref)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400164{
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400165 struct amdgpu_ctx *ctx;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400166
Christian König47f38502015-08-04 17:51:05 +0200167 ctx = container_of(ref, struct amdgpu_ctx, refcount);
168
169 amdgpu_ctx_fini(ctx);
170
171 kfree(ctx);
172}
173
174static int amdgpu_ctx_free(struct amdgpu_fpriv *fpriv, uint32_t id)
175{
176 struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
177 struct amdgpu_ctx *ctx;
178
179 mutex_lock(&mgr->lock);
Matthew Wilcoxd3e709e2016-12-22 13:30:22 -0500180 ctx = idr_remove(&mgr->ctx_handles, id);
181 if (ctx)
Chunming Zhou23ca0e42015-07-06 13:42:58 +0800182 kref_put(&ctx->refcount, amdgpu_ctx_do_release);
Christian König47f38502015-08-04 17:51:05 +0200183 mutex_unlock(&mgr->lock);
Matthew Wilcoxd3e709e2016-12-22 13:30:22 -0500184 return ctx ? 0 : -EINVAL;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400185}
186
Marek Olšákd94aed52015-05-05 21:13:49 +0200187static int amdgpu_ctx_query(struct amdgpu_device *adev,
188 struct amdgpu_fpriv *fpriv, uint32_t id,
189 union drm_amdgpu_ctx_out *out)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400190{
191 struct amdgpu_ctx *ctx;
Chunming Zhou23ca0e42015-07-06 13:42:58 +0800192 struct amdgpu_ctx_mgr *mgr;
Marek Olšákd94aed52015-05-05 21:13:49 +0200193 unsigned reset_counter;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400194
Chunming Zhou23ca0e42015-07-06 13:42:58 +0800195 if (!fpriv)
196 return -EINVAL;
197
198 mgr = &fpriv->ctx_mgr;
Marek Olšák0147ee02015-05-05 20:52:00 +0200199 mutex_lock(&mgr->lock);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400200 ctx = idr_find(&mgr->ctx_handles, id);
Marek Olšákd94aed52015-05-05 21:13:49 +0200201 if (!ctx) {
Marek Olšák0147ee02015-05-05 20:52:00 +0200202 mutex_unlock(&mgr->lock);
Marek Olšákd94aed52015-05-05 21:13:49 +0200203 return -EINVAL;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400204 }
Marek Olšákd94aed52015-05-05 21:13:49 +0200205
206 /* TODO: these two are always zero */
Alex Deucher0b492a42015-08-16 22:48:26 -0400207 out->state.flags = 0x0;
208 out->state.hangs = 0x0;
Marek Olšákd94aed52015-05-05 21:13:49 +0200209
210 /* determine if a GPU reset has occured since the last call */
211 reset_counter = atomic_read(&adev->gpu_reset_counter);
212 /* TODO: this should ideally return NO, GUILTY, or INNOCENT. */
213 if (ctx->reset_counter == reset_counter)
214 out->state.reset_status = AMDGPU_CTX_NO_RESET;
215 else
216 out->state.reset_status = AMDGPU_CTX_UNKNOWN_RESET;
217 ctx->reset_counter = reset_counter;
218
Marek Olšák0147ee02015-05-05 20:52:00 +0200219 mutex_unlock(&mgr->lock);
Marek Olšákd94aed52015-05-05 21:13:49 +0200220 return 0;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400221}
222
Andres Rodriguezc2636dc2016-12-22 17:06:50 -0500223static enum amd_sched_priority amdgpu_to_sched_priority(int amdgpu_priority)
224{
225 switch (amdgpu_priority) {
226 case AMDGPU_CTX_PRIORITY_HIGH_HW:
227 return AMD_SCHED_PRIORITY_HIGH_HW;
228 case AMDGPU_CTX_PRIORITY_HIGH_SW:
229 return AMD_SCHED_PRIORITY_HIGH_SW;
230 case AMDGPU_CTX_PRIORITY_NORMAL:
231 return AMD_SCHED_PRIORITY_NORMAL;
232 case AMDGPU_CTX_PRIORITY_LOW_SW:
233 case AMDGPU_CTX_PRIORITY_LOW_HW:
234 return AMD_SCHED_PRIORITY_LOW;
Andres Rodriguezf3d19bf2017-06-26 16:12:10 -0400235 case AMDGPU_CTX_PRIORITY_UNSET:
236 return AMD_SCHED_PRIORITY_UNSET;
Andres Rodriguezc2636dc2016-12-22 17:06:50 -0500237 default:
238 WARN(1, "Invalid context priority %d\n", amdgpu_priority);
Andres Rodriguezb6d8a432017-05-24 17:00:10 -0400239 return AMD_SCHED_PRIORITY_INVALID;
Andres Rodriguezc2636dc2016-12-22 17:06:50 -0500240 }
241}
242
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400243int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
Marek Olšákd94aed52015-05-05 21:13:49 +0200244 struct drm_file *filp)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400245{
246 int r;
247 uint32_t id;
Andres Rodriguezc2636dc2016-12-22 17:06:50 -0500248 enum amd_sched_priority priority;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400249
250 union drm_amdgpu_ctx *args = data;
251 struct amdgpu_device *adev = dev->dev_private;
252 struct amdgpu_fpriv *fpriv = filp->driver_priv;
253
254 r = 0;
255 id = args->in.ctx_id;
Andres Rodriguezc2636dc2016-12-22 17:06:50 -0500256 priority = amdgpu_to_sched_priority(args->in.priority);
257
Andres Rodriguezb6d8a432017-05-24 17:00:10 -0400258 /* For backwards compatibility reasons, we need to accept
259 * ioctls with garbage in the priority field */
260 if (priority == AMD_SCHED_PRIORITY_INVALID)
261 priority = AMD_SCHED_PRIORITY_NORMAL;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400262
263 switch (args->in.op) {
Christian Königa750b472016-02-11 10:20:53 +0100264 case AMDGPU_CTX_OP_ALLOC_CTX:
Andres Rodriguezc2636dc2016-12-22 17:06:50 -0500265 r = amdgpu_ctx_alloc(adev, fpriv, filp, priority, &id);
Christian Königa750b472016-02-11 10:20:53 +0100266 args->out.alloc.ctx_id = id;
267 break;
268 case AMDGPU_CTX_OP_FREE_CTX:
269 r = amdgpu_ctx_free(fpriv, id);
270 break;
271 case AMDGPU_CTX_OP_QUERY_STATE:
272 r = amdgpu_ctx_query(adev, fpriv, id, &args->out);
273 break;
274 default:
275 return -EINVAL;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400276 }
277
278 return r;
279}
Jammy Zhou66b3cf22015-05-08 17:29:40 +0800280
281struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id)
282{
283 struct amdgpu_ctx *ctx;
Chunming Zhou23ca0e42015-07-06 13:42:58 +0800284 struct amdgpu_ctx_mgr *mgr;
285
286 if (!fpriv)
287 return NULL;
288
289 mgr = &fpriv->ctx_mgr;
Jammy Zhou66b3cf22015-05-08 17:29:40 +0800290
291 mutex_lock(&mgr->lock);
292 ctx = idr_find(&mgr->ctx_handles, id);
293 if (ctx)
294 kref_get(&ctx->refcount);
295 mutex_unlock(&mgr->lock);
296 return ctx;
297}
298
299int amdgpu_ctx_put(struct amdgpu_ctx *ctx)
300{
Jammy Zhou66b3cf22015-05-08 17:29:40 +0800301 if (ctx == NULL)
302 return -EINVAL;
303
Jammy Zhou66b3cf22015-05-08 17:29:40 +0800304 kref_put(&ctx->refcount, amdgpu_ctx_do_release);
Jammy Zhou66b3cf22015-05-08 17:29:40 +0800305 return 0;
306}
Christian König21c16bf2015-07-07 17:24:49 +0200307
Monk Liueb01abc2017-09-15 13:40:31 +0800308int amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
309 struct dma_fence *fence, uint64_t* handler)
Christian König21c16bf2015-07-07 17:24:49 +0200310{
311 struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];
Christian Königce882e62015-08-19 15:00:55 +0200312 uint64_t seq = cring->sequence;
Chunming Zhoub43a9a72015-07-21 15:13:53 +0800313 unsigned idx = 0;
Chris Wilsonf54d1862016-10-25 13:00:45 +0100314 struct dma_fence *other = NULL;
Christian König21c16bf2015-07-07 17:24:49 +0200315
Chunming Zhou5b011232015-12-10 17:34:33 +0800316 idx = seq & (amdgpu_sched_jobs - 1);
Chunming Zhoub43a9a72015-07-21 15:13:53 +0800317 other = cring->fences[idx];
Christian König21c16bf2015-07-07 17:24:49 +0200318 if (other) {
319 signed long r;
Monk Liueb01abc2017-09-15 13:40:31 +0800320 r = dma_fence_wait_timeout(other, true, MAX_SCHEDULE_TIMEOUT);
Christian König21c16bf2015-07-07 17:24:49 +0200321 if (r < 0)
Monk Liueb01abc2017-09-15 13:40:31 +0800322 return r;
Christian König21c16bf2015-07-07 17:24:49 +0200323 }
324
Chris Wilsonf54d1862016-10-25 13:00:45 +0100325 dma_fence_get(fence);
Christian König21c16bf2015-07-07 17:24:49 +0200326
327 spin_lock(&ctx->ring_lock);
328 cring->fences[idx] = fence;
Christian Königce882e62015-08-19 15:00:55 +0200329 cring->sequence++;
Christian König21c16bf2015-07-07 17:24:49 +0200330 spin_unlock(&ctx->ring_lock);
331
Chris Wilsonf54d1862016-10-25 13:00:45 +0100332 dma_fence_put(other);
Monk Liueb01abc2017-09-15 13:40:31 +0800333 if (handler)
334 *handler = seq;
Christian König21c16bf2015-07-07 17:24:49 +0200335
Monk Liueb01abc2017-09-15 13:40:31 +0800336 return 0;
Christian König21c16bf2015-07-07 17:24:49 +0200337}
338
Chris Wilsonf54d1862016-10-25 13:00:45 +0100339struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
340 struct amdgpu_ring *ring, uint64_t seq)
Christian König21c16bf2015-07-07 17:24:49 +0200341{
342 struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];
Chris Wilsonf54d1862016-10-25 13:00:45 +0100343 struct dma_fence *fence;
Christian König21c16bf2015-07-07 17:24:49 +0200344
345 spin_lock(&ctx->ring_lock);
Chunming Zhoub43a9a72015-07-21 15:13:53 +0800346
Monk Liud7b1eeb2017-04-07 18:39:07 +0800347 if (seq == ~0ull)
348 seq = ctx->rings[ring->idx].sequence - 1;
349
Christian Königce882e62015-08-19 15:00:55 +0200350 if (seq >= cring->sequence) {
Christian König21c16bf2015-07-07 17:24:49 +0200351 spin_unlock(&ctx->ring_lock);
352 return ERR_PTR(-EINVAL);
353 }
354
Chunming Zhoub43a9a72015-07-21 15:13:53 +0800355
Chunming Zhou37cd0ca2015-12-10 15:45:11 +0800356 if (seq + amdgpu_sched_jobs < cring->sequence) {
Christian König21c16bf2015-07-07 17:24:49 +0200357 spin_unlock(&ctx->ring_lock);
358 return NULL;
359 }
360
Chris Wilsonf54d1862016-10-25 13:00:45 +0100361 fence = dma_fence_get(cring->fences[seq & (amdgpu_sched_jobs - 1)]);
Christian König21c16bf2015-07-07 17:24:49 +0200362 spin_unlock(&ctx->ring_lock);
363
364 return fence;
365}
Christian Königefd4ccb2015-08-04 16:20:31 +0200366
Andres Rodriguezc23be4a2017-06-06 20:20:38 -0400367void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
368 enum amd_sched_priority priority)
369{
370 int i;
371 struct amdgpu_device *adev = ctx->adev;
372 struct amd_sched_rq *rq;
373 struct amd_sched_entity *entity;
374 struct amdgpu_ring *ring;
375 enum amd_sched_priority ctx_prio;
376
377 ctx->override_priority = priority;
378
379 ctx_prio = (ctx->override_priority == AMD_SCHED_PRIORITY_UNSET) ?
380 ctx->init_priority : ctx->override_priority;
381
382 for (i = 0; i < adev->num_rings; i++) {
383 ring = adev->rings[i];
384 entity = &ctx->rings[i].entity;
385 rq = &ring->sched.sched_rq[ctx_prio];
386
387 if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
388 continue;
389
390 amd_sched_entity_set_rq(entity, rq);
391 }
392}
393
Christian Königefd4ccb2015-08-04 16:20:31 +0200394void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr)
395{
396 mutex_init(&mgr->lock);
397 idr_init(&mgr->ctx_handles);
398}
399
400void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr)
401{
402 struct amdgpu_ctx *ctx;
403 struct idr *idp;
404 uint32_t id;
405
406 idp = &mgr->ctx_handles;
407
408 idr_for_each_entry(idp, ctx, id) {
409 if (kref_put(&ctx->refcount, amdgpu_ctx_do_release) != 1)
410 DRM_ERROR("ctx %p is still alive\n", ctx);
411 }
412
413 idr_destroy(&mgr->ctx_handles);
414 mutex_destroy(&mgr->lock);
415}