Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2015 Advanced Micro Devices, Inc. |
| 3 | * |
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 5 | * copy of this software and associated documentation files (the "Software"), |
| 6 | * to deal in the Software without restriction, including without limitation |
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
| 8 | * and/or sell copies of the Software, and to permit persons to whom the |
| 9 | * Software is furnished to do so, subject to the following conditions: |
| 10 | * |
| 11 | * The above copyright notice and this permission notice shall be included in |
| 12 | * all copies or substantial portions of the Software. |
| 13 | * |
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| 17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
| 18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
| 19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
| 20 | * OTHER DEALINGS IN THE SOFTWARE. |
| 21 | * |
| 22 | * Authors: monk liu <monk.liu@amd.com> |
| 23 | */ |
| 24 | |
| 25 | #include <drm/drmP.h> |
Andres Rodriguez | c2636dc | 2016-12-22 17:06:50 -0500 | [diff] [blame] | 26 | #include <drm/drm_auth.h> |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 27 | #include "amdgpu.h" |
Andres Rodriguez | 52c6a62 | 2017-06-26 16:17:13 -0400 | [diff] [blame] | 28 | #include "amdgpu_sched.h" |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 29 | |
Christian König | 1b1f2fe | 2018-08-01 16:00:52 +0200 | [diff] [blame] | 30 | #define to_amdgpu_ctx_entity(e) \ |
| 31 | container_of((e), struct amdgpu_ctx_entity, entity) |
| 32 | |
| 33 | const unsigned int amdgpu_ctx_num_entities[AMDGPU_HW_IP_NUM] = { |
| 34 | [AMDGPU_HW_IP_GFX] = 1, |
| 35 | [AMDGPU_HW_IP_COMPUTE] = 4, |
| 36 | [AMDGPU_HW_IP_DMA] = 2, |
| 37 | [AMDGPU_HW_IP_UVD] = 1, |
| 38 | [AMDGPU_HW_IP_VCE] = 1, |
| 39 | [AMDGPU_HW_IP_UVD_ENC] = 1, |
| 40 | [AMDGPU_HW_IP_VCN_DEC] = 1, |
| 41 | [AMDGPU_HW_IP_VCN_ENC] = 1, |
Alex Deucher | f10d910 | 2018-11-27 11:41:27 -0500 | [diff] [blame] | 42 | [AMDGPU_HW_IP_VCN_JPEG] = 1, |
Christian König | 1b1f2fe | 2018-08-01 16:00:52 +0200 | [diff] [blame] | 43 | }; |
| 44 | |
| 45 | static int amdgput_ctx_total_num_entities(void) |
| 46 | { |
| 47 | unsigned i, num_entities = 0; |
| 48 | |
| 49 | for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) |
| 50 | num_entities += amdgpu_ctx_num_entities[i]; |
| 51 | |
| 52 | return num_entities; |
| 53 | } |
Christian König | 0d346a1 | 2018-07-19 14:22:25 +0200 | [diff] [blame] | 54 | |
Andres Rodriguez | c2636dc | 2016-12-22 17:06:50 -0500 | [diff] [blame] | 55 | static int amdgpu_ctx_priority_permit(struct drm_file *filp, |
Lucas Stach | 1b1f42d | 2017-12-06 17:49:39 +0100 | [diff] [blame] | 56 | enum drm_sched_priority priority) |
Andres Rodriguez | c2636dc | 2016-12-22 17:06:50 -0500 | [diff] [blame] | 57 | { |
| 58 | /* NORMAL and below are accessible by everyone */ |
Lucas Stach | 1b1f42d | 2017-12-06 17:49:39 +0100 | [diff] [blame] | 59 | if (priority <= DRM_SCHED_PRIORITY_NORMAL) |
Andres Rodriguez | c2636dc | 2016-12-22 17:06:50 -0500 | [diff] [blame] | 60 | return 0; |
| 61 | |
| 62 | if (capable(CAP_SYS_NICE)) |
| 63 | return 0; |
| 64 | |
| 65 | if (drm_is_current_master(filp)) |
| 66 | return 0; |
| 67 | |
| 68 | return -EACCES; |
| 69 | } |
| 70 | |
| 71 | static int amdgpu_ctx_init(struct amdgpu_device *adev, |
Lucas Stach | 1b1f42d | 2017-12-06 17:49:39 +0100 | [diff] [blame] | 72 | enum drm_sched_priority priority, |
Andres Rodriguez | c2636dc | 2016-12-22 17:06:50 -0500 | [diff] [blame] | 73 | struct drm_file *filp, |
| 74 | struct amdgpu_ctx *ctx) |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 75 | { |
Christian König | 1b1f2fe | 2018-08-01 16:00:52 +0200 | [diff] [blame] | 76 | unsigned num_entities = amdgput_ctx_total_num_entities(); |
| 77 | unsigned i, j; |
Christian König | 47f3850 | 2015-08-04 17:51:05 +0200 | [diff] [blame] | 78 | int r; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 79 | |
Lucas Stach | 1b1f42d | 2017-12-06 17:49:39 +0100 | [diff] [blame] | 80 | if (priority < 0 || priority >= DRM_SCHED_PRIORITY_MAX) |
Andres Rodriguez | c2636dc | 2016-12-22 17:06:50 -0500 | [diff] [blame] | 81 | return -EINVAL; |
| 82 | |
| 83 | r = amdgpu_ctx_priority_permit(filp, priority); |
| 84 | if (r) |
| 85 | return r; |
| 86 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 87 | memset(ctx, 0, sizeof(*ctx)); |
Chunming Zhou | 9cb7e5a | 2015-07-21 13:17:19 +0800 | [diff] [blame] | 88 | ctx->adev = adev; |
Christian König | 1b1f2fe | 2018-08-01 16:00:52 +0200 | [diff] [blame] | 89 | |
| 90 | ctx->fences = kcalloc(amdgpu_sched_jobs * num_entities, |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 91 | sizeof(struct dma_fence*), GFP_KERNEL); |
Chunming Zhou | 37cd0ca | 2015-12-10 15:45:11 +0800 | [diff] [blame] | 92 | if (!ctx->fences) |
| 93 | return -ENOMEM; |
Chunming Zhou | 23ca0e4 | 2015-07-06 13:42:58 +0800 | [diff] [blame] | 94 | |
Christian König | 1b1f2fe | 2018-08-01 16:00:52 +0200 | [diff] [blame] | 95 | ctx->entities[0] = kcalloc(num_entities, |
| 96 | sizeof(struct amdgpu_ctx_entity), |
| 97 | GFP_KERNEL); |
| 98 | if (!ctx->entities[0]) { |
| 99 | r = -ENOMEM; |
| 100 | goto error_free_fences; |
Chunming Zhou | 37cd0ca | 2015-12-10 15:45:11 +0800 | [diff] [blame] | 101 | } |
Nicolai Hähnle | ce199ad | 2016-10-04 09:43:30 +0200 | [diff] [blame] | 102 | |
Christian König | 1b1f2fe | 2018-08-01 16:00:52 +0200 | [diff] [blame] | 103 | for (i = 0; i < num_entities; ++i) { |
| 104 | struct amdgpu_ctx_entity *entity = &ctx->entities[0][i]; |
| 105 | |
| 106 | entity->sequence = 1; |
| 107 | entity->fences = &ctx->fences[amdgpu_sched_jobs * i]; |
| 108 | } |
| 109 | for (i = 1; i < AMDGPU_HW_IP_NUM; ++i) |
| 110 | ctx->entities[i] = ctx->entities[i - 1] + |
| 111 | amdgpu_ctx_num_entities[i - 1]; |
| 112 | |
| 113 | kref_init(&ctx->refcount); |
| 114 | spin_lock_init(&ctx->ring_lock); |
| 115 | mutex_init(&ctx->lock); |
| 116 | |
Nicolai Hähnle | ce199ad | 2016-10-04 09:43:30 +0200 | [diff] [blame] | 117 | ctx->reset_counter = atomic_read(&adev->gpu_reset_counter); |
Monk Liu | 668ca1b | 2017-10-17 14:39:23 +0800 | [diff] [blame] | 118 | ctx->reset_counter_query = ctx->reset_counter; |
Christian König | e55f2b6 | 2017-10-09 15:18:43 +0200 | [diff] [blame] | 119 | ctx->vram_lost_counter = atomic_read(&adev->vram_lost_counter); |
Andres Rodriguez | c23be4a | 2017-06-06 20:20:38 -0400 | [diff] [blame] | 120 | ctx->init_priority = priority; |
Lucas Stach | 1b1f42d | 2017-12-06 17:49:39 +0100 | [diff] [blame] | 121 | ctx->override_priority = DRM_SCHED_PRIORITY_UNSET; |
Nicolai Hähnle | ce199ad | 2016-10-04 09:43:30 +0200 | [diff] [blame] | 122 | |
Christian König | 1b1f2fe | 2018-08-01 16:00:52 +0200 | [diff] [blame] | 123 | for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) { |
| 124 | struct amdgpu_ring *rings[AMDGPU_MAX_RINGS]; |
| 125 | struct drm_sched_rq *rqs[AMDGPU_MAX_RINGS]; |
| 126 | unsigned num_rings; |
Christian König | 2087417 | 2016-02-11 09:56:44 +0100 | [diff] [blame] | 127 | |
Christian König | 1b1f2fe | 2018-08-01 16:00:52 +0200 | [diff] [blame] | 128 | switch (i) { |
| 129 | case AMDGPU_HW_IP_GFX: |
| 130 | rings[0] = &adev->gfx.gfx_ring[0]; |
| 131 | num_rings = 1; |
| 132 | break; |
| 133 | case AMDGPU_HW_IP_COMPUTE: |
| 134 | for (j = 0; j < adev->gfx.num_compute_rings; ++j) |
| 135 | rings[j] = &adev->gfx.compute_ring[j]; |
| 136 | num_rings = adev->gfx.num_compute_rings; |
| 137 | break; |
| 138 | case AMDGPU_HW_IP_DMA: |
| 139 | for (j = 0; j < adev->sdma.num_instances; ++j) |
| 140 | rings[j] = &adev->sdma.instance[j].ring; |
| 141 | num_rings = adev->sdma.num_instances; |
| 142 | break; |
| 143 | case AMDGPU_HW_IP_UVD: |
| 144 | rings[0] = &adev->uvd.inst[0].ring; |
| 145 | num_rings = 1; |
| 146 | break; |
| 147 | case AMDGPU_HW_IP_VCE: |
| 148 | rings[0] = &adev->vce.ring[0]; |
| 149 | num_rings = 1; |
| 150 | break; |
| 151 | case AMDGPU_HW_IP_UVD_ENC: |
| 152 | rings[0] = &adev->uvd.inst[0].ring_enc[0]; |
| 153 | num_rings = 1; |
| 154 | break; |
| 155 | case AMDGPU_HW_IP_VCN_DEC: |
| 156 | rings[0] = &adev->vcn.ring_dec; |
| 157 | num_rings = 1; |
| 158 | break; |
| 159 | case AMDGPU_HW_IP_VCN_ENC: |
| 160 | rings[0] = &adev->vcn.ring_enc[0]; |
| 161 | num_rings = 1; |
| 162 | break; |
| 163 | case AMDGPU_HW_IP_VCN_JPEG: |
| 164 | rings[0] = &adev->vcn.ring_jpeg; |
| 165 | num_rings = 1; |
| 166 | break; |
Christian König | 845e6fd | 2018-07-13 09:12:44 +0200 | [diff] [blame] | 167 | } |
Christian König | 1b1f2fe | 2018-08-01 16:00:52 +0200 | [diff] [blame] | 168 | |
| 169 | for (j = 0; j < num_rings; ++j) |
| 170 | rqs[j] = &rings[j]->sched.sched_rq[priority]; |
| 171 | |
| 172 | for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) |
| 173 | r = drm_sched_entity_init(&ctx->entities[i][j].entity, |
| 174 | rqs, num_rings, &ctx->guilty); |
Chunming Zhou | cadf97b | 2016-01-15 11:25:00 +0800 | [diff] [blame] | 175 | if (r) |
Christian König | 1b1f2fe | 2018-08-01 16:00:52 +0200 | [diff] [blame] | 176 | goto error_cleanup_entities; |
Chunming Zhou | cadf97b | 2016-01-15 11:25:00 +0800 | [diff] [blame] | 177 | } |
| 178 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 179 | return 0; |
Huang Rui | 8ed8147 | 2016-10-26 17:07:03 +0800 | [diff] [blame] | 180 | |
Christian König | 1b1f2fe | 2018-08-01 16:00:52 +0200 | [diff] [blame] | 181 | error_cleanup_entities: |
| 182 | for (i = 0; i < num_entities; ++i) |
| 183 | drm_sched_entity_destroy(&ctx->entities[0][i].entity); |
| 184 | kfree(ctx->entities[0]); |
| 185 | |
| 186 | error_free_fences: |
Huang Rui | 8ed8147 | 2016-10-26 17:07:03 +0800 | [diff] [blame] | 187 | kfree(ctx->fences); |
| 188 | ctx->fences = NULL; |
| 189 | return r; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 190 | } |
| 191 | |
Emily Deng | 8ee3a52 | 2018-04-16 10:07:02 +0800 | [diff] [blame] | 192 | static void amdgpu_ctx_fini(struct kref *ref) |
Christian König | 47f3850 | 2015-08-04 17:51:05 +0200 | [diff] [blame] | 193 | { |
Emily Deng | 8ee3a52 | 2018-04-16 10:07:02 +0800 | [diff] [blame] | 194 | struct amdgpu_ctx *ctx = container_of(ref, struct amdgpu_ctx, refcount); |
Christian König | 1b1f2fe | 2018-08-01 16:00:52 +0200 | [diff] [blame] | 195 | unsigned num_entities = amdgput_ctx_total_num_entities(); |
Christian König | 47f3850 | 2015-08-04 17:51:05 +0200 | [diff] [blame] | 196 | struct amdgpu_device *adev = ctx->adev; |
| 197 | unsigned i, j; |
| 198 | |
Dave Airlie | fe295b2 | 2015-11-03 11:07:11 -0500 | [diff] [blame] | 199 | if (!adev) |
| 200 | return; |
| 201 | |
Christian König | 1b1f2fe | 2018-08-01 16:00:52 +0200 | [diff] [blame] | 202 | for (i = 0; i < num_entities; ++i) |
Chunming Zhou | 37cd0ca | 2015-12-10 15:45:11 +0800 | [diff] [blame] | 203 | for (j = 0; j < amdgpu_sched_jobs; ++j) |
Christian König | 1b1f2fe | 2018-08-01 16:00:52 +0200 | [diff] [blame] | 204 | dma_fence_put(ctx->entities[0][i].fences[j]); |
Chunming Zhou | 37cd0ca | 2015-12-10 15:45:11 +0800 | [diff] [blame] | 205 | kfree(ctx->fences); |
Christian König | 1b1f2fe | 2018-08-01 16:00:52 +0200 | [diff] [blame] | 206 | kfree(ctx->entities[0]); |
Christian König | 47f3850 | 2015-08-04 17:51:05 +0200 | [diff] [blame] | 207 | |
Andrey Grodzovsky | 0ae9444 | 2017-10-10 16:50:17 -0400 | [diff] [blame] | 208 | mutex_destroy(&ctx->lock); |
Emily Deng | 8ee3a52 | 2018-04-16 10:07:02 +0800 | [diff] [blame] | 209 | |
| 210 | kfree(ctx); |
Christian König | 47f3850 | 2015-08-04 17:51:05 +0200 | [diff] [blame] | 211 | } |
| 212 | |
Christian König | 0d346a1 | 2018-07-19 14:22:25 +0200 | [diff] [blame] | 213 | int amdgpu_ctx_get_entity(struct amdgpu_ctx *ctx, u32 hw_ip, u32 instance, |
| 214 | u32 ring, struct drm_sched_entity **entity) |
Christian König | 869a53d | 2018-07-16 15:19:20 +0200 | [diff] [blame] | 215 | { |
Christian König | 1b1f2fe | 2018-08-01 16:00:52 +0200 | [diff] [blame] | 216 | if (hw_ip >= AMDGPU_HW_IP_NUM) { |
| 217 | DRM_ERROR("unknown HW IP type: %d\n", hw_ip); |
| 218 | return -EINVAL; |
| 219 | } |
Christian König | 869a53d | 2018-07-16 15:19:20 +0200 | [diff] [blame] | 220 | |
| 221 | /* Right now all IPs have only one instance - multiple rings. */ |
| 222 | if (instance != 0) { |
| 223 | DRM_DEBUG("invalid ip instance: %d\n", instance); |
| 224 | return -EINVAL; |
| 225 | } |
| 226 | |
Christian König | 1b1f2fe | 2018-08-01 16:00:52 +0200 | [diff] [blame] | 227 | if (ring >= amdgpu_ctx_num_entities[hw_ip]) { |
| 228 | DRM_DEBUG("invalid ring: %d %d\n", hw_ip, ring); |
Christian König | 869a53d | 2018-07-16 15:19:20 +0200 | [diff] [blame] | 229 | return -EINVAL; |
| 230 | } |
| 231 | |
Christian König | 1b1f2fe | 2018-08-01 16:00:52 +0200 | [diff] [blame] | 232 | *entity = &ctx->entities[hw_ip][ring].entity; |
Christian König | 869a53d | 2018-07-16 15:19:20 +0200 | [diff] [blame] | 233 | return 0; |
| 234 | } |
| 235 | |
Christian König | 47f3850 | 2015-08-04 17:51:05 +0200 | [diff] [blame] | 236 | static int amdgpu_ctx_alloc(struct amdgpu_device *adev, |
| 237 | struct amdgpu_fpriv *fpriv, |
Andres Rodriguez | c2636dc | 2016-12-22 17:06:50 -0500 | [diff] [blame] | 238 | struct drm_file *filp, |
Lucas Stach | 1b1f42d | 2017-12-06 17:49:39 +0100 | [diff] [blame] | 239 | enum drm_sched_priority priority, |
Christian König | 47f3850 | 2015-08-04 17:51:05 +0200 | [diff] [blame] | 240 | uint32_t *id) |
| 241 | { |
| 242 | struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr; |
| 243 | struct amdgpu_ctx *ctx; |
| 244 | int r; |
| 245 | |
| 246 | ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); |
| 247 | if (!ctx) |
| 248 | return -ENOMEM; |
| 249 | |
| 250 | mutex_lock(&mgr->lock); |
| 251 | r = idr_alloc(&mgr->ctx_handles, ctx, 1, 0, GFP_KERNEL); |
| 252 | if (r < 0) { |
| 253 | mutex_unlock(&mgr->lock); |
| 254 | kfree(ctx); |
| 255 | return r; |
| 256 | } |
Andres Rodriguez | c2636dc | 2016-12-22 17:06:50 -0500 | [diff] [blame] | 257 | |
Christian König | 47f3850 | 2015-08-04 17:51:05 +0200 | [diff] [blame] | 258 | *id = (uint32_t)r; |
Andres Rodriguez | c2636dc | 2016-12-22 17:06:50 -0500 | [diff] [blame] | 259 | r = amdgpu_ctx_init(adev, priority, filp, ctx); |
Chunming Zhou | c648ed7 | 2015-12-10 15:50:02 +0800 | [diff] [blame] | 260 | if (r) { |
| 261 | idr_remove(&mgr->ctx_handles, *id); |
| 262 | *id = 0; |
| 263 | kfree(ctx); |
| 264 | } |
Christian König | 47f3850 | 2015-08-04 17:51:05 +0200 | [diff] [blame] | 265 | mutex_unlock(&mgr->lock); |
Christian König | 47f3850 | 2015-08-04 17:51:05 +0200 | [diff] [blame] | 266 | return r; |
| 267 | } |
| 268 | |
| 269 | static void amdgpu_ctx_do_release(struct kref *ref) |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 270 | { |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 271 | struct amdgpu_ctx *ctx; |
Christian König | 1b1f2fe | 2018-08-01 16:00:52 +0200 | [diff] [blame] | 272 | unsigned num_entities; |
Emily Deng | 8ee3a52 | 2018-04-16 10:07:02 +0800 | [diff] [blame] | 273 | u32 i; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 274 | |
Christian König | 47f3850 | 2015-08-04 17:51:05 +0200 | [diff] [blame] | 275 | ctx = container_of(ref, struct amdgpu_ctx, refcount); |
| 276 | |
Christian König | 1b1f2fe | 2018-08-01 16:00:52 +0200 | [diff] [blame] | 277 | num_entities = 0; |
| 278 | for (i = 0; i < AMDGPU_HW_IP_NUM; i++) |
| 279 | num_entities += amdgpu_ctx_num_entities[i]; |
Andrey Grodzovsky | 20b6b78 | 2018-05-15 14:12:21 -0400 | [diff] [blame] | 280 | |
Christian König | 1b1f2fe | 2018-08-01 16:00:52 +0200 | [diff] [blame] | 281 | for (i = 0; i < num_entities; i++) |
| 282 | drm_sched_entity_destroy(&ctx->entities[0][i].entity); |
Christian König | 47f3850 | 2015-08-04 17:51:05 +0200 | [diff] [blame] | 283 | |
Emily Deng | 8ee3a52 | 2018-04-16 10:07:02 +0800 | [diff] [blame] | 284 | amdgpu_ctx_fini(ref); |
Christian König | 47f3850 | 2015-08-04 17:51:05 +0200 | [diff] [blame] | 285 | } |
| 286 | |
| 287 | static int amdgpu_ctx_free(struct amdgpu_fpriv *fpriv, uint32_t id) |
| 288 | { |
| 289 | struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr; |
| 290 | struct amdgpu_ctx *ctx; |
| 291 | |
| 292 | mutex_lock(&mgr->lock); |
Matthew Wilcox | d3e709e | 2016-12-22 13:30:22 -0500 | [diff] [blame] | 293 | ctx = idr_remove(&mgr->ctx_handles, id); |
| 294 | if (ctx) |
Chunming Zhou | 23ca0e4 | 2015-07-06 13:42:58 +0800 | [diff] [blame] | 295 | kref_put(&ctx->refcount, amdgpu_ctx_do_release); |
Christian König | 47f3850 | 2015-08-04 17:51:05 +0200 | [diff] [blame] | 296 | mutex_unlock(&mgr->lock); |
Matthew Wilcox | d3e709e | 2016-12-22 13:30:22 -0500 | [diff] [blame] | 297 | return ctx ? 0 : -EINVAL; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 298 | } |
| 299 | |
Marek Olšák | d94aed5 | 2015-05-05 21:13:49 +0200 | [diff] [blame] | 300 | static int amdgpu_ctx_query(struct amdgpu_device *adev, |
| 301 | struct amdgpu_fpriv *fpriv, uint32_t id, |
| 302 | union drm_amdgpu_ctx_out *out) |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 303 | { |
| 304 | struct amdgpu_ctx *ctx; |
Chunming Zhou | 23ca0e4 | 2015-07-06 13:42:58 +0800 | [diff] [blame] | 305 | struct amdgpu_ctx_mgr *mgr; |
Marek Olšák | d94aed5 | 2015-05-05 21:13:49 +0200 | [diff] [blame] | 306 | unsigned reset_counter; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 307 | |
Chunming Zhou | 23ca0e4 | 2015-07-06 13:42:58 +0800 | [diff] [blame] | 308 | if (!fpriv) |
| 309 | return -EINVAL; |
| 310 | |
| 311 | mgr = &fpriv->ctx_mgr; |
Marek Olšák | 0147ee0 | 2015-05-05 20:52:00 +0200 | [diff] [blame] | 312 | mutex_lock(&mgr->lock); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 313 | ctx = idr_find(&mgr->ctx_handles, id); |
Marek Olšák | d94aed5 | 2015-05-05 21:13:49 +0200 | [diff] [blame] | 314 | if (!ctx) { |
Marek Olšák | 0147ee0 | 2015-05-05 20:52:00 +0200 | [diff] [blame] | 315 | mutex_unlock(&mgr->lock); |
Marek Olšák | d94aed5 | 2015-05-05 21:13:49 +0200 | [diff] [blame] | 316 | return -EINVAL; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 317 | } |
Marek Olšák | d94aed5 | 2015-05-05 21:13:49 +0200 | [diff] [blame] | 318 | |
| 319 | /* TODO: these two are always zero */ |
Alex Deucher | 0b492a4 | 2015-08-16 22:48:26 -0400 | [diff] [blame] | 320 | out->state.flags = 0x0; |
| 321 | out->state.hangs = 0x0; |
Marek Olšák | d94aed5 | 2015-05-05 21:13:49 +0200 | [diff] [blame] | 322 | |
| 323 | /* determine if a GPU reset has occured since the last call */ |
| 324 | reset_counter = atomic_read(&adev->gpu_reset_counter); |
| 325 | /* TODO: this should ideally return NO, GUILTY, or INNOCENT. */ |
Monk Liu | 668ca1b | 2017-10-17 14:39:23 +0800 | [diff] [blame] | 326 | if (ctx->reset_counter_query == reset_counter) |
Marek Olšák | d94aed5 | 2015-05-05 21:13:49 +0200 | [diff] [blame] | 327 | out->state.reset_status = AMDGPU_CTX_NO_RESET; |
| 328 | else |
| 329 | out->state.reset_status = AMDGPU_CTX_UNKNOWN_RESET; |
Monk Liu | 668ca1b | 2017-10-17 14:39:23 +0800 | [diff] [blame] | 330 | ctx->reset_counter_query = reset_counter; |
Marek Olšák | d94aed5 | 2015-05-05 21:13:49 +0200 | [diff] [blame] | 331 | |
Marek Olšák | 0147ee0 | 2015-05-05 20:52:00 +0200 | [diff] [blame] | 332 | mutex_unlock(&mgr->lock); |
Marek Olšák | d94aed5 | 2015-05-05 21:13:49 +0200 | [diff] [blame] | 333 | return 0; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 334 | } |
| 335 | |
Monk Liu | bc1b1bf | 2017-10-17 14:58:01 +0800 | [diff] [blame] | 336 | static int amdgpu_ctx_query2(struct amdgpu_device *adev, |
| 337 | struct amdgpu_fpriv *fpriv, uint32_t id, |
| 338 | union drm_amdgpu_ctx_out *out) |
| 339 | { |
| 340 | struct amdgpu_ctx *ctx; |
| 341 | struct amdgpu_ctx_mgr *mgr; |
| 342 | |
| 343 | if (!fpriv) |
| 344 | return -EINVAL; |
| 345 | |
| 346 | mgr = &fpriv->ctx_mgr; |
| 347 | mutex_lock(&mgr->lock); |
| 348 | ctx = idr_find(&mgr->ctx_handles, id); |
| 349 | if (!ctx) { |
| 350 | mutex_unlock(&mgr->lock); |
| 351 | return -EINVAL; |
| 352 | } |
| 353 | |
| 354 | out->state.flags = 0x0; |
| 355 | out->state.hangs = 0x0; |
| 356 | |
| 357 | if (ctx->reset_counter != atomic_read(&adev->gpu_reset_counter)) |
| 358 | out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RESET; |
| 359 | |
| 360 | if (ctx->vram_lost_counter != atomic_read(&adev->vram_lost_counter)) |
| 361 | out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_VRAMLOST; |
| 362 | |
| 363 | if (atomic_read(&ctx->guilty)) |
| 364 | out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_GUILTY; |
| 365 | |
| 366 | mutex_unlock(&mgr->lock); |
| 367 | return 0; |
| 368 | } |
| 369 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 370 | int amdgpu_ctx_ioctl(struct drm_device *dev, void *data, |
Marek Olšák | d94aed5 | 2015-05-05 21:13:49 +0200 | [diff] [blame] | 371 | struct drm_file *filp) |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 372 | { |
| 373 | int r; |
| 374 | uint32_t id; |
Lucas Stach | 1b1f42d | 2017-12-06 17:49:39 +0100 | [diff] [blame] | 375 | enum drm_sched_priority priority; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 376 | |
| 377 | union drm_amdgpu_ctx *args = data; |
| 378 | struct amdgpu_device *adev = dev->dev_private; |
| 379 | struct amdgpu_fpriv *fpriv = filp->driver_priv; |
| 380 | |
| 381 | r = 0; |
| 382 | id = args->in.ctx_id; |
Andres Rodriguez | c2636dc | 2016-12-22 17:06:50 -0500 | [diff] [blame] | 383 | priority = amdgpu_to_sched_priority(args->in.priority); |
| 384 | |
Andres Rodriguez | b6d8a43 | 2017-05-24 17:00:10 -0400 | [diff] [blame] | 385 | /* For backwards compatibility reasons, we need to accept |
| 386 | * ioctls with garbage in the priority field */ |
Lucas Stach | 1b1f42d | 2017-12-06 17:49:39 +0100 | [diff] [blame] | 387 | if (priority == DRM_SCHED_PRIORITY_INVALID) |
| 388 | priority = DRM_SCHED_PRIORITY_NORMAL; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 389 | |
| 390 | switch (args->in.op) { |
Christian König | a750b47 | 2016-02-11 10:20:53 +0100 | [diff] [blame] | 391 | case AMDGPU_CTX_OP_ALLOC_CTX: |
Andres Rodriguez | c2636dc | 2016-12-22 17:06:50 -0500 | [diff] [blame] | 392 | r = amdgpu_ctx_alloc(adev, fpriv, filp, priority, &id); |
Christian König | a750b47 | 2016-02-11 10:20:53 +0100 | [diff] [blame] | 393 | args->out.alloc.ctx_id = id; |
| 394 | break; |
| 395 | case AMDGPU_CTX_OP_FREE_CTX: |
| 396 | r = amdgpu_ctx_free(fpriv, id); |
| 397 | break; |
| 398 | case AMDGPU_CTX_OP_QUERY_STATE: |
| 399 | r = amdgpu_ctx_query(adev, fpriv, id, &args->out); |
| 400 | break; |
Monk Liu | bc1b1bf | 2017-10-17 14:58:01 +0800 | [diff] [blame] | 401 | case AMDGPU_CTX_OP_QUERY_STATE2: |
| 402 | r = amdgpu_ctx_query2(adev, fpriv, id, &args->out); |
| 403 | break; |
Christian König | a750b47 | 2016-02-11 10:20:53 +0100 | [diff] [blame] | 404 | default: |
| 405 | return -EINVAL; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 406 | } |
| 407 | |
| 408 | return r; |
| 409 | } |
Jammy Zhou | 66b3cf2 | 2015-05-08 17:29:40 +0800 | [diff] [blame] | 410 | |
| 411 | struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id) |
| 412 | { |
| 413 | struct amdgpu_ctx *ctx; |
Chunming Zhou | 23ca0e4 | 2015-07-06 13:42:58 +0800 | [diff] [blame] | 414 | struct amdgpu_ctx_mgr *mgr; |
| 415 | |
| 416 | if (!fpriv) |
| 417 | return NULL; |
| 418 | |
| 419 | mgr = &fpriv->ctx_mgr; |
Jammy Zhou | 66b3cf2 | 2015-05-08 17:29:40 +0800 | [diff] [blame] | 420 | |
| 421 | mutex_lock(&mgr->lock); |
| 422 | ctx = idr_find(&mgr->ctx_handles, id); |
| 423 | if (ctx) |
| 424 | kref_get(&ctx->refcount); |
| 425 | mutex_unlock(&mgr->lock); |
| 426 | return ctx; |
| 427 | } |
| 428 | |
| 429 | int amdgpu_ctx_put(struct amdgpu_ctx *ctx) |
| 430 | { |
Jammy Zhou | 66b3cf2 | 2015-05-08 17:29:40 +0800 | [diff] [blame] | 431 | if (ctx == NULL) |
| 432 | return -EINVAL; |
| 433 | |
Jammy Zhou | 66b3cf2 | 2015-05-08 17:29:40 +0800 | [diff] [blame] | 434 | kref_put(&ctx->refcount, amdgpu_ctx_do_release); |
Jammy Zhou | 66b3cf2 | 2015-05-08 17:29:40 +0800 | [diff] [blame] | 435 | return 0; |
| 436 | } |
Christian König | 21c16bf | 2015-07-07 17:24:49 +0200 | [diff] [blame] | 437 | |
Christian König | 85eff20 | 2018-08-24 14:23:33 +0200 | [diff] [blame] | 438 | void amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, |
| 439 | struct drm_sched_entity *entity, |
| 440 | struct dma_fence *fence, uint64_t* handle) |
Christian König | 21c16bf | 2015-07-07 17:24:49 +0200 | [diff] [blame] | 441 | { |
Christian König | 1b1f2fe | 2018-08-01 16:00:52 +0200 | [diff] [blame] | 442 | struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity); |
| 443 | uint64_t seq = centity->sequence; |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 444 | struct dma_fence *other = NULL; |
Christian König | 0d346a1 | 2018-07-19 14:22:25 +0200 | [diff] [blame] | 445 | unsigned idx = 0; |
Christian König | 21c16bf | 2015-07-07 17:24:49 +0200 | [diff] [blame] | 446 | |
Chunming Zhou | 5b01123 | 2015-12-10 17:34:33 +0800 | [diff] [blame] | 447 | idx = seq & (amdgpu_sched_jobs - 1); |
Christian König | 1b1f2fe | 2018-08-01 16:00:52 +0200 | [diff] [blame] | 448 | other = centity->fences[idx]; |
Andrey Grodzovsky | 0ae9444 | 2017-10-10 16:50:17 -0400 | [diff] [blame] | 449 | if (other) |
| 450 | BUG_ON(!dma_fence_is_signaled(other)); |
Christian König | 21c16bf | 2015-07-07 17:24:49 +0200 | [diff] [blame] | 451 | |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 452 | dma_fence_get(fence); |
Christian König | 21c16bf | 2015-07-07 17:24:49 +0200 | [diff] [blame] | 453 | |
| 454 | spin_lock(&ctx->ring_lock); |
Christian König | 1b1f2fe | 2018-08-01 16:00:52 +0200 | [diff] [blame] | 455 | centity->fences[idx] = fence; |
| 456 | centity->sequence++; |
Christian König | 21c16bf | 2015-07-07 17:24:49 +0200 | [diff] [blame] | 457 | spin_unlock(&ctx->ring_lock); |
| 458 | |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 459 | dma_fence_put(other); |
Christian König | 0d346a1 | 2018-07-19 14:22:25 +0200 | [diff] [blame] | 460 | if (handle) |
| 461 | *handle = seq; |
Christian König | 21c16bf | 2015-07-07 17:24:49 +0200 | [diff] [blame] | 462 | } |
| 463 | |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 464 | struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx, |
Christian König | 0d346a1 | 2018-07-19 14:22:25 +0200 | [diff] [blame] | 465 | struct drm_sched_entity *entity, |
| 466 | uint64_t seq) |
Christian König | 21c16bf | 2015-07-07 17:24:49 +0200 | [diff] [blame] | 467 | { |
Christian König | 1b1f2fe | 2018-08-01 16:00:52 +0200 | [diff] [blame] | 468 | struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity); |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 469 | struct dma_fence *fence; |
Christian König | 21c16bf | 2015-07-07 17:24:49 +0200 | [diff] [blame] | 470 | |
| 471 | spin_lock(&ctx->ring_lock); |
Chunming Zhou | b43a9a7 | 2015-07-21 15:13:53 +0800 | [diff] [blame] | 472 | |
Monk Liu | d7b1eeb | 2017-04-07 18:39:07 +0800 | [diff] [blame] | 473 | if (seq == ~0ull) |
Christian König | 1b1f2fe | 2018-08-01 16:00:52 +0200 | [diff] [blame] | 474 | seq = centity->sequence - 1; |
Monk Liu | d7b1eeb | 2017-04-07 18:39:07 +0800 | [diff] [blame] | 475 | |
Christian König | 1b1f2fe | 2018-08-01 16:00:52 +0200 | [diff] [blame] | 476 | if (seq >= centity->sequence) { |
Christian König | 21c16bf | 2015-07-07 17:24:49 +0200 | [diff] [blame] | 477 | spin_unlock(&ctx->ring_lock); |
| 478 | return ERR_PTR(-EINVAL); |
| 479 | } |
| 480 | |
Chunming Zhou | b43a9a7 | 2015-07-21 15:13:53 +0800 | [diff] [blame] | 481 | |
Christian König | 1b1f2fe | 2018-08-01 16:00:52 +0200 | [diff] [blame] | 482 | if (seq + amdgpu_sched_jobs < centity->sequence) { |
Christian König | 21c16bf | 2015-07-07 17:24:49 +0200 | [diff] [blame] | 483 | spin_unlock(&ctx->ring_lock); |
| 484 | return NULL; |
| 485 | } |
| 486 | |
Christian König | 1b1f2fe | 2018-08-01 16:00:52 +0200 | [diff] [blame] | 487 | fence = dma_fence_get(centity->fences[seq & (amdgpu_sched_jobs - 1)]); |
Christian König | 21c16bf | 2015-07-07 17:24:49 +0200 | [diff] [blame] | 488 | spin_unlock(&ctx->ring_lock); |
| 489 | |
| 490 | return fence; |
| 491 | } |
Christian König | efd4ccb | 2015-08-04 16:20:31 +0200 | [diff] [blame] | 492 | |
Andres Rodriguez | c23be4a | 2017-06-06 20:20:38 -0400 | [diff] [blame] | 493 | void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx, |
Lucas Stach | 1b1f42d | 2017-12-06 17:49:39 +0100 | [diff] [blame] | 494 | enum drm_sched_priority priority) |
Andres Rodriguez | c23be4a | 2017-06-06 20:20:38 -0400 | [diff] [blame] | 495 | { |
Christian König | 1b1f2fe | 2018-08-01 16:00:52 +0200 | [diff] [blame] | 496 | unsigned num_entities = amdgput_ctx_total_num_entities(); |
Lucas Stach | 1b1f42d | 2017-12-06 17:49:39 +0100 | [diff] [blame] | 497 | enum drm_sched_priority ctx_prio; |
Christian König | 1b1f2fe | 2018-08-01 16:00:52 +0200 | [diff] [blame] | 498 | unsigned i; |
Andres Rodriguez | c23be4a | 2017-06-06 20:20:38 -0400 | [diff] [blame] | 499 | |
| 500 | ctx->override_priority = priority; |
| 501 | |
Lucas Stach | 1b1f42d | 2017-12-06 17:49:39 +0100 | [diff] [blame] | 502 | ctx_prio = (ctx->override_priority == DRM_SCHED_PRIORITY_UNSET) ? |
Andres Rodriguez | c23be4a | 2017-06-06 20:20:38 -0400 | [diff] [blame] | 503 | ctx->init_priority : ctx->override_priority; |
| 504 | |
Christian König | 1b1f2fe | 2018-08-01 16:00:52 +0200 | [diff] [blame] | 505 | for (i = 0; i < num_entities; i++) { |
| 506 | struct drm_sched_entity *entity = &ctx->entities[0][i].entity; |
Andres Rodriguez | c23be4a | 2017-06-06 20:20:38 -0400 | [diff] [blame] | 507 | |
Christian König | 7febe4b | 2018-08-01 16:22:39 +0200 | [diff] [blame] | 508 | drm_sched_entity_set_priority(entity, ctx_prio); |
Andres Rodriguez | c23be4a | 2017-06-06 20:20:38 -0400 | [diff] [blame] | 509 | } |
| 510 | } |
| 511 | |
Christian König | 0d346a1 | 2018-07-19 14:22:25 +0200 | [diff] [blame] | 512 | int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx, |
| 513 | struct drm_sched_entity *entity) |
Andrey Grodzovsky | 0ae9444 | 2017-10-10 16:50:17 -0400 | [diff] [blame] | 514 | { |
Christian König | 1b1f2fe | 2018-08-01 16:00:52 +0200 | [diff] [blame] | 515 | struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity); |
| 516 | unsigned idx = centity->sequence & (amdgpu_sched_jobs - 1); |
| 517 | struct dma_fence *other = centity->fences[idx]; |
Andrey Grodzovsky | 0ae9444 | 2017-10-10 16:50:17 -0400 | [diff] [blame] | 518 | |
| 519 | if (other) { |
| 520 | signed long r; |
Andrey Grodzovsky | 719a39a | 2018-04-30 10:04:42 -0400 | [diff] [blame] | 521 | r = dma_fence_wait(other, true); |
Andrey Grodzovsky | 0ae9444 | 2017-10-10 16:50:17 -0400 | [diff] [blame] | 522 | if (r < 0) { |
Andrey Grodzovsky | 719a39a | 2018-04-30 10:04:42 -0400 | [diff] [blame] | 523 | if (r != -ERESTARTSYS) |
| 524 | DRM_ERROR("Error (%ld) waiting for fence!\n", r); |
| 525 | |
Andrey Grodzovsky | 0ae9444 | 2017-10-10 16:50:17 -0400 | [diff] [blame] | 526 | return r; |
| 527 | } |
| 528 | } |
| 529 | |
| 530 | return 0; |
| 531 | } |
| 532 | |
Christian König | efd4ccb | 2015-08-04 16:20:31 +0200 | [diff] [blame] | 533 | void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr) |
| 534 | { |
| 535 | mutex_init(&mgr->lock); |
| 536 | idr_init(&mgr->ctx_handles); |
| 537 | } |
| 538 | |
Andrey Grodzovsky | c49d828 | 2018-06-05 12:56:26 -0400 | [diff] [blame] | 539 | void amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr) |
Emily Deng | 8ee3a52 | 2018-04-16 10:07:02 +0800 | [diff] [blame] | 540 | { |
Christian König | 1b1f2fe | 2018-08-01 16:00:52 +0200 | [diff] [blame] | 541 | unsigned num_entities = amdgput_ctx_total_num_entities(); |
Emily Deng | 8ee3a52 | 2018-04-16 10:07:02 +0800 | [diff] [blame] | 542 | struct amdgpu_ctx *ctx; |
| 543 | struct idr *idp; |
| 544 | uint32_t id, i; |
Andrey Grodzovsky | 48ad368 | 2018-05-30 15:28:52 -0400 | [diff] [blame] | 545 | long max_wait = MAX_WAIT_SCHED_ENTITY_Q_EMPTY; |
Emily Deng | 8ee3a52 | 2018-04-16 10:07:02 +0800 | [diff] [blame] | 546 | |
| 547 | idp = &mgr->ctx_handles; |
| 548 | |
Andrey Grodzovsky | 48ad368 | 2018-05-30 15:28:52 -0400 | [diff] [blame] | 549 | mutex_lock(&mgr->lock); |
Emily Deng | 8ee3a52 | 2018-04-16 10:07:02 +0800 | [diff] [blame] | 550 | idr_for_each_entry(idp, ctx, id) { |
| 551 | |
Andrey Grodzovsky | 48ad368 | 2018-05-30 15:28:52 -0400 | [diff] [blame] | 552 | if (!ctx->adev) { |
| 553 | mutex_unlock(&mgr->lock); |
Emily Deng | 8ee3a52 | 2018-04-16 10:07:02 +0800 | [diff] [blame] | 554 | return; |
Andrey Grodzovsky | 48ad368 | 2018-05-30 15:28:52 -0400 | [diff] [blame] | 555 | } |
Emily Deng | 8ee3a52 | 2018-04-16 10:07:02 +0800 | [diff] [blame] | 556 | |
Christian König | 1b1f2fe | 2018-08-01 16:00:52 +0200 | [diff] [blame] | 557 | for (i = 0; i < num_entities; i++) { |
| 558 | struct drm_sched_entity *entity; |
Andrey Grodzovsky | 20b6b78 | 2018-05-15 14:12:21 -0400 | [diff] [blame] | 559 | |
Christian König | 1b1f2fe | 2018-08-01 16:00:52 +0200 | [diff] [blame] | 560 | entity = &ctx->entities[0][i].entity; |
| 561 | max_wait = drm_sched_entity_flush(entity, max_wait); |
Andrey Grodzovsky | 20b6b78 | 2018-05-15 14:12:21 -0400 | [diff] [blame] | 562 | } |
Emily Deng | 8ee3a52 | 2018-04-16 10:07:02 +0800 | [diff] [blame] | 563 | } |
Andrey Grodzovsky | 48ad368 | 2018-05-30 15:28:52 -0400 | [diff] [blame] | 564 | mutex_unlock(&mgr->lock); |
Emily Deng | 8ee3a52 | 2018-04-16 10:07:02 +0800 | [diff] [blame] | 565 | } |
| 566 | |
Andrey Grodzovsky | c49d828 | 2018-06-05 12:56:26 -0400 | [diff] [blame] | 567 | void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr) |
Emily Deng | 8ee3a52 | 2018-04-16 10:07:02 +0800 | [diff] [blame] | 568 | { |
Christian König | 1b1f2fe | 2018-08-01 16:00:52 +0200 | [diff] [blame] | 569 | unsigned num_entities = amdgput_ctx_total_num_entities(); |
Emily Deng | 8ee3a52 | 2018-04-16 10:07:02 +0800 | [diff] [blame] | 570 | struct amdgpu_ctx *ctx; |
| 571 | struct idr *idp; |
| 572 | uint32_t id, i; |
| 573 | |
| 574 | idp = &mgr->ctx_handles; |
| 575 | |
| 576 | idr_for_each_entry(idp, ctx, id) { |
| 577 | |
| 578 | if (!ctx->adev) |
| 579 | return; |
| 580 | |
Christian König | 1b1f2fe | 2018-08-01 16:00:52 +0200 | [diff] [blame] | 581 | if (kref_read(&ctx->refcount) != 1) { |
| 582 | DRM_ERROR("ctx %p is still alive\n", ctx); |
| 583 | continue; |
Andrey Grodzovsky | 20b6b78 | 2018-05-15 14:12:21 -0400 | [diff] [blame] | 584 | } |
Christian König | 1b1f2fe | 2018-08-01 16:00:52 +0200 | [diff] [blame] | 585 | |
| 586 | for (i = 0; i < num_entities; i++) |
| 587 | drm_sched_entity_fini(&ctx->entities[0][i].entity); |
Emily Deng | 8ee3a52 | 2018-04-16 10:07:02 +0800 | [diff] [blame] | 588 | } |
| 589 | } |
| 590 | |
Christian König | efd4ccb | 2015-08-04 16:20:31 +0200 | [diff] [blame] | 591 | void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr) |
| 592 | { |
| 593 | struct amdgpu_ctx *ctx; |
| 594 | struct idr *idp; |
| 595 | uint32_t id; |
| 596 | |
Andrey Grodzovsky | c49d828 | 2018-06-05 12:56:26 -0400 | [diff] [blame] | 597 | amdgpu_ctx_mgr_entity_fini(mgr); |
Emily Deng | 8ee3a52 | 2018-04-16 10:07:02 +0800 | [diff] [blame] | 598 | |
Christian König | efd4ccb | 2015-08-04 16:20:31 +0200 | [diff] [blame] | 599 | idp = &mgr->ctx_handles; |
| 600 | |
| 601 | idr_for_each_entry(idp, ctx, id) { |
Emily Deng | 8ee3a52 | 2018-04-16 10:07:02 +0800 | [diff] [blame] | 602 | if (kref_put(&ctx->refcount, amdgpu_ctx_fini) != 1) |
Christian König | efd4ccb | 2015-08-04 16:20:31 +0200 | [diff] [blame] | 603 | DRM_ERROR("ctx %p is still alive\n", ctx); |
| 604 | } |
| 605 | |
| 606 | idr_destroy(&mgr->ctx_handles); |
| 607 | mutex_destroy(&mgr->lock); |
| 608 | } |