Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2015 Advanced Micro Devices, Inc. |
| 3 | * |
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 5 | * copy of this software and associated documentation files (the "Software"), |
| 6 | * to deal in the Software without restriction, including without limitation |
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
| 8 | * and/or sell copies of the Software, and to permit persons to whom the |
| 9 | * Software is furnished to do so, subject to the following conditions: |
| 10 | * |
| 11 | * The above copyright notice and this permission notice shall be included in |
| 12 | * all copies or substantial portions of the Software. |
| 13 | * |
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| 17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
| 18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
| 19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
| 20 | * OTHER DEALINGS IN THE SOFTWARE. |
| 21 | * |
| 22 | * Authors: monk liu <monk.liu@amd.com> |
| 23 | */ |
| 24 | |
| 25 | #include <drm/drmP.h> |
Andres Rodriguez | c2636dc | 2016-12-22 17:06:50 -0500 | [diff] [blame] | 26 | #include <drm/drm_auth.h> |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 27 | #include "amdgpu.h" |
Andres Rodriguez | 52c6a62 | 2017-06-26 16:17:13 -0400 | [diff] [blame] | 28 | #include "amdgpu_sched.h" |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 29 | |
Christian König | 1b1f2fe | 2018-08-01 16:00:52 +0200 | [diff] [blame^] | 30 | #define to_amdgpu_ctx_entity(e) \ |
| 31 | container_of((e), struct amdgpu_ctx_entity, entity) |
| 32 | |
| 33 | const unsigned int amdgpu_ctx_num_entities[AMDGPU_HW_IP_NUM] = { |
| 34 | [AMDGPU_HW_IP_GFX] = 1, |
| 35 | [AMDGPU_HW_IP_COMPUTE] = 4, |
| 36 | [AMDGPU_HW_IP_DMA] = 2, |
| 37 | [AMDGPU_HW_IP_UVD] = 1, |
| 38 | [AMDGPU_HW_IP_VCE] = 1, |
| 39 | [AMDGPU_HW_IP_UVD_ENC] = 1, |
| 40 | [AMDGPU_HW_IP_VCN_DEC] = 1, |
| 41 | [AMDGPU_HW_IP_VCN_ENC] = 1, |
| 42 | }; |
| 43 | |
| 44 | static int amdgput_ctx_total_num_entities(void) |
| 45 | { |
| 46 | unsigned i, num_entities = 0; |
| 47 | |
| 48 | for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) |
| 49 | num_entities += amdgpu_ctx_num_entities[i]; |
| 50 | |
| 51 | return num_entities; |
| 52 | } |
Christian König | 0d346a1 | 2018-07-19 14:22:25 +0200 | [diff] [blame] | 53 | |
Andres Rodriguez | c2636dc | 2016-12-22 17:06:50 -0500 | [diff] [blame] | 54 | static int amdgpu_ctx_priority_permit(struct drm_file *filp, |
Lucas Stach | 1b1f42d | 2017-12-06 17:49:39 +0100 | [diff] [blame] | 55 | enum drm_sched_priority priority) |
Andres Rodriguez | c2636dc | 2016-12-22 17:06:50 -0500 | [diff] [blame] | 56 | { |
| 57 | /* NORMAL and below are accessible by everyone */ |
Lucas Stach | 1b1f42d | 2017-12-06 17:49:39 +0100 | [diff] [blame] | 58 | if (priority <= DRM_SCHED_PRIORITY_NORMAL) |
Andres Rodriguez | c2636dc | 2016-12-22 17:06:50 -0500 | [diff] [blame] | 59 | return 0; |
| 60 | |
| 61 | if (capable(CAP_SYS_NICE)) |
| 62 | return 0; |
| 63 | |
| 64 | if (drm_is_current_master(filp)) |
| 65 | return 0; |
| 66 | |
| 67 | return -EACCES; |
| 68 | } |
| 69 | |
| 70 | static int amdgpu_ctx_init(struct amdgpu_device *adev, |
Lucas Stach | 1b1f42d | 2017-12-06 17:49:39 +0100 | [diff] [blame] | 71 | enum drm_sched_priority priority, |
Andres Rodriguez | c2636dc | 2016-12-22 17:06:50 -0500 | [diff] [blame] | 72 | struct drm_file *filp, |
| 73 | struct amdgpu_ctx *ctx) |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 74 | { |
Christian König | 1b1f2fe | 2018-08-01 16:00:52 +0200 | [diff] [blame^] | 75 | unsigned num_entities = amdgput_ctx_total_num_entities(); |
| 76 | unsigned i, j; |
Christian König | 47f3850 | 2015-08-04 17:51:05 +0200 | [diff] [blame] | 77 | int r; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 78 | |
Lucas Stach | 1b1f42d | 2017-12-06 17:49:39 +0100 | [diff] [blame] | 79 | if (priority < 0 || priority >= DRM_SCHED_PRIORITY_MAX) |
Andres Rodriguez | c2636dc | 2016-12-22 17:06:50 -0500 | [diff] [blame] | 80 | return -EINVAL; |
| 81 | |
| 82 | r = amdgpu_ctx_priority_permit(filp, priority); |
| 83 | if (r) |
| 84 | return r; |
| 85 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 86 | memset(ctx, 0, sizeof(*ctx)); |
Chunming Zhou | 9cb7e5a | 2015-07-21 13:17:19 +0800 | [diff] [blame] | 87 | ctx->adev = adev; |
Christian König | 1b1f2fe | 2018-08-01 16:00:52 +0200 | [diff] [blame^] | 88 | |
| 89 | ctx->fences = kcalloc(amdgpu_sched_jobs * num_entities, |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 90 | sizeof(struct dma_fence*), GFP_KERNEL); |
Chunming Zhou | 37cd0ca | 2015-12-10 15:45:11 +0800 | [diff] [blame] | 91 | if (!ctx->fences) |
| 92 | return -ENOMEM; |
Chunming Zhou | 23ca0e4 | 2015-07-06 13:42:58 +0800 | [diff] [blame] | 93 | |
Christian König | 1b1f2fe | 2018-08-01 16:00:52 +0200 | [diff] [blame^] | 94 | ctx->entities[0] = kcalloc(num_entities, |
| 95 | sizeof(struct amdgpu_ctx_entity), |
| 96 | GFP_KERNEL); |
| 97 | if (!ctx->entities[0]) { |
| 98 | r = -ENOMEM; |
| 99 | goto error_free_fences; |
Chunming Zhou | 37cd0ca | 2015-12-10 15:45:11 +0800 | [diff] [blame] | 100 | } |
Nicolai Hähnle | ce199ad | 2016-10-04 09:43:30 +0200 | [diff] [blame] | 101 | |
Christian König | 1b1f2fe | 2018-08-01 16:00:52 +0200 | [diff] [blame^] | 102 | for (i = 0; i < num_entities; ++i) { |
| 103 | struct amdgpu_ctx_entity *entity = &ctx->entities[0][i]; |
| 104 | |
| 105 | entity->sequence = 1; |
| 106 | entity->fences = &ctx->fences[amdgpu_sched_jobs * i]; |
| 107 | } |
| 108 | for (i = 1; i < AMDGPU_HW_IP_NUM; ++i) |
| 109 | ctx->entities[i] = ctx->entities[i - 1] + |
| 110 | amdgpu_ctx_num_entities[i - 1]; |
| 111 | |
| 112 | kref_init(&ctx->refcount); |
| 113 | spin_lock_init(&ctx->ring_lock); |
| 114 | mutex_init(&ctx->lock); |
| 115 | |
Nicolai Hähnle | ce199ad | 2016-10-04 09:43:30 +0200 | [diff] [blame] | 116 | ctx->reset_counter = atomic_read(&adev->gpu_reset_counter); |
Monk Liu | 668ca1b | 2017-10-17 14:39:23 +0800 | [diff] [blame] | 117 | ctx->reset_counter_query = ctx->reset_counter; |
Christian König | e55f2b6 | 2017-10-09 15:18:43 +0200 | [diff] [blame] | 118 | ctx->vram_lost_counter = atomic_read(&adev->vram_lost_counter); |
Andres Rodriguez | c23be4a | 2017-06-06 20:20:38 -0400 | [diff] [blame] | 119 | ctx->init_priority = priority; |
Lucas Stach | 1b1f42d | 2017-12-06 17:49:39 +0100 | [diff] [blame] | 120 | ctx->override_priority = DRM_SCHED_PRIORITY_UNSET; |
Nicolai Hähnle | ce199ad | 2016-10-04 09:43:30 +0200 | [diff] [blame] | 121 | |
Christian König | 1b1f2fe | 2018-08-01 16:00:52 +0200 | [diff] [blame^] | 122 | for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) { |
| 123 | struct amdgpu_ring *rings[AMDGPU_MAX_RINGS]; |
| 124 | struct drm_sched_rq *rqs[AMDGPU_MAX_RINGS]; |
| 125 | unsigned num_rings; |
Christian König | 2087417 | 2016-02-11 09:56:44 +0100 | [diff] [blame] | 126 | |
Christian König | 1b1f2fe | 2018-08-01 16:00:52 +0200 | [diff] [blame^] | 127 | switch (i) { |
| 128 | case AMDGPU_HW_IP_GFX: |
| 129 | rings[0] = &adev->gfx.gfx_ring[0]; |
| 130 | num_rings = 1; |
| 131 | break; |
| 132 | case AMDGPU_HW_IP_COMPUTE: |
| 133 | for (j = 0; j < adev->gfx.num_compute_rings; ++j) |
| 134 | rings[j] = &adev->gfx.compute_ring[j]; |
| 135 | num_rings = adev->gfx.num_compute_rings; |
| 136 | break; |
| 137 | case AMDGPU_HW_IP_DMA: |
| 138 | for (j = 0; j < adev->sdma.num_instances; ++j) |
| 139 | rings[j] = &adev->sdma.instance[j].ring; |
| 140 | num_rings = adev->sdma.num_instances; |
| 141 | break; |
| 142 | case AMDGPU_HW_IP_UVD: |
| 143 | rings[0] = &adev->uvd.inst[0].ring; |
| 144 | num_rings = 1; |
| 145 | break; |
| 146 | case AMDGPU_HW_IP_VCE: |
| 147 | rings[0] = &adev->vce.ring[0]; |
| 148 | num_rings = 1; |
| 149 | break; |
| 150 | case AMDGPU_HW_IP_UVD_ENC: |
| 151 | rings[0] = &adev->uvd.inst[0].ring_enc[0]; |
| 152 | num_rings = 1; |
| 153 | break; |
| 154 | case AMDGPU_HW_IP_VCN_DEC: |
| 155 | rings[0] = &adev->vcn.ring_dec; |
| 156 | num_rings = 1; |
| 157 | break; |
| 158 | case AMDGPU_HW_IP_VCN_ENC: |
| 159 | rings[0] = &adev->vcn.ring_enc[0]; |
| 160 | num_rings = 1; |
| 161 | break; |
| 162 | case AMDGPU_HW_IP_VCN_JPEG: |
| 163 | rings[0] = &adev->vcn.ring_jpeg; |
| 164 | num_rings = 1; |
| 165 | break; |
Christian König | 845e6fd | 2018-07-13 09:12:44 +0200 | [diff] [blame] | 166 | } |
Christian König | 1b1f2fe | 2018-08-01 16:00:52 +0200 | [diff] [blame^] | 167 | |
| 168 | for (j = 0; j < num_rings; ++j) |
| 169 | rqs[j] = &rings[j]->sched.sched_rq[priority]; |
| 170 | |
| 171 | for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) |
| 172 | r = drm_sched_entity_init(&ctx->entities[i][j].entity, |
| 173 | rqs, num_rings, &ctx->guilty); |
Chunming Zhou | cadf97b | 2016-01-15 11:25:00 +0800 | [diff] [blame] | 174 | if (r) |
Christian König | 1b1f2fe | 2018-08-01 16:00:52 +0200 | [diff] [blame^] | 175 | goto error_cleanup_entities; |
Chunming Zhou | cadf97b | 2016-01-15 11:25:00 +0800 | [diff] [blame] | 176 | } |
| 177 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 178 | return 0; |
Huang Rui | 8ed8147 | 2016-10-26 17:07:03 +0800 | [diff] [blame] | 179 | |
Christian König | 1b1f2fe | 2018-08-01 16:00:52 +0200 | [diff] [blame^] | 180 | error_cleanup_entities: |
| 181 | for (i = 0; i < num_entities; ++i) |
| 182 | drm_sched_entity_destroy(&ctx->entities[0][i].entity); |
| 183 | kfree(ctx->entities[0]); |
| 184 | |
| 185 | error_free_fences: |
Huang Rui | 8ed8147 | 2016-10-26 17:07:03 +0800 | [diff] [blame] | 186 | kfree(ctx->fences); |
| 187 | ctx->fences = NULL; |
| 188 | return r; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 189 | } |
| 190 | |
Emily Deng | 8ee3a52 | 2018-04-16 10:07:02 +0800 | [diff] [blame] | 191 | static void amdgpu_ctx_fini(struct kref *ref) |
Christian König | 47f3850 | 2015-08-04 17:51:05 +0200 | [diff] [blame] | 192 | { |
Emily Deng | 8ee3a52 | 2018-04-16 10:07:02 +0800 | [diff] [blame] | 193 | struct amdgpu_ctx *ctx = container_of(ref, struct amdgpu_ctx, refcount); |
Christian König | 1b1f2fe | 2018-08-01 16:00:52 +0200 | [diff] [blame^] | 194 | unsigned num_entities = amdgput_ctx_total_num_entities(); |
Christian König | 47f3850 | 2015-08-04 17:51:05 +0200 | [diff] [blame] | 195 | struct amdgpu_device *adev = ctx->adev; |
| 196 | unsigned i, j; |
| 197 | |
Dave Airlie | fe295b2 | 2015-11-03 11:07:11 -0500 | [diff] [blame] | 198 | if (!adev) |
| 199 | return; |
| 200 | |
Christian König | 1b1f2fe | 2018-08-01 16:00:52 +0200 | [diff] [blame^] | 201 | for (i = 0; i < num_entities; ++i) |
Chunming Zhou | 37cd0ca | 2015-12-10 15:45:11 +0800 | [diff] [blame] | 202 | for (j = 0; j < amdgpu_sched_jobs; ++j) |
Christian König | 1b1f2fe | 2018-08-01 16:00:52 +0200 | [diff] [blame^] | 203 | dma_fence_put(ctx->entities[0][i].fences[j]); |
Chunming Zhou | 37cd0ca | 2015-12-10 15:45:11 +0800 | [diff] [blame] | 204 | kfree(ctx->fences); |
Christian König | 1b1f2fe | 2018-08-01 16:00:52 +0200 | [diff] [blame^] | 205 | kfree(ctx->entities[0]); |
Christian König | 47f3850 | 2015-08-04 17:51:05 +0200 | [diff] [blame] | 206 | |
Andrey Grodzovsky | 0ae9444 | 2017-10-10 16:50:17 -0400 | [diff] [blame] | 207 | mutex_destroy(&ctx->lock); |
Emily Deng | 8ee3a52 | 2018-04-16 10:07:02 +0800 | [diff] [blame] | 208 | |
| 209 | kfree(ctx); |
Christian König | 47f3850 | 2015-08-04 17:51:05 +0200 | [diff] [blame] | 210 | } |
| 211 | |
Christian König | 0d346a1 | 2018-07-19 14:22:25 +0200 | [diff] [blame] | 212 | int amdgpu_ctx_get_entity(struct amdgpu_ctx *ctx, u32 hw_ip, u32 instance, |
| 213 | u32 ring, struct drm_sched_entity **entity) |
Christian König | 869a53d | 2018-07-16 15:19:20 +0200 | [diff] [blame] | 214 | { |
Christian König | 1b1f2fe | 2018-08-01 16:00:52 +0200 | [diff] [blame^] | 215 | if (hw_ip >= AMDGPU_HW_IP_NUM) { |
| 216 | DRM_ERROR("unknown HW IP type: %d\n", hw_ip); |
| 217 | return -EINVAL; |
| 218 | } |
Christian König | 869a53d | 2018-07-16 15:19:20 +0200 | [diff] [blame] | 219 | |
| 220 | /* Right now all IPs have only one instance - multiple rings. */ |
| 221 | if (instance != 0) { |
| 222 | DRM_DEBUG("invalid ip instance: %d\n", instance); |
| 223 | return -EINVAL; |
| 224 | } |
| 225 | |
Christian König | 1b1f2fe | 2018-08-01 16:00:52 +0200 | [diff] [blame^] | 226 | if (ring >= amdgpu_ctx_num_entities[hw_ip]) { |
| 227 | DRM_DEBUG("invalid ring: %d %d\n", hw_ip, ring); |
Christian König | 869a53d | 2018-07-16 15:19:20 +0200 | [diff] [blame] | 228 | return -EINVAL; |
| 229 | } |
| 230 | |
Christian König | 1b1f2fe | 2018-08-01 16:00:52 +0200 | [diff] [blame^] | 231 | *entity = &ctx->entities[hw_ip][ring].entity; |
Christian König | 869a53d | 2018-07-16 15:19:20 +0200 | [diff] [blame] | 232 | return 0; |
| 233 | } |
| 234 | |
Christian König | 47f3850 | 2015-08-04 17:51:05 +0200 | [diff] [blame] | 235 | static int amdgpu_ctx_alloc(struct amdgpu_device *adev, |
| 236 | struct amdgpu_fpriv *fpriv, |
Andres Rodriguez | c2636dc | 2016-12-22 17:06:50 -0500 | [diff] [blame] | 237 | struct drm_file *filp, |
Lucas Stach | 1b1f42d | 2017-12-06 17:49:39 +0100 | [diff] [blame] | 238 | enum drm_sched_priority priority, |
Christian König | 47f3850 | 2015-08-04 17:51:05 +0200 | [diff] [blame] | 239 | uint32_t *id) |
| 240 | { |
| 241 | struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr; |
| 242 | struct amdgpu_ctx *ctx; |
| 243 | int r; |
| 244 | |
| 245 | ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); |
| 246 | if (!ctx) |
| 247 | return -ENOMEM; |
| 248 | |
| 249 | mutex_lock(&mgr->lock); |
| 250 | r = idr_alloc(&mgr->ctx_handles, ctx, 1, 0, GFP_KERNEL); |
| 251 | if (r < 0) { |
| 252 | mutex_unlock(&mgr->lock); |
| 253 | kfree(ctx); |
| 254 | return r; |
| 255 | } |
Andres Rodriguez | c2636dc | 2016-12-22 17:06:50 -0500 | [diff] [blame] | 256 | |
Christian König | 47f3850 | 2015-08-04 17:51:05 +0200 | [diff] [blame] | 257 | *id = (uint32_t)r; |
Andres Rodriguez | c2636dc | 2016-12-22 17:06:50 -0500 | [diff] [blame] | 258 | r = amdgpu_ctx_init(adev, priority, filp, ctx); |
Chunming Zhou | c648ed7 | 2015-12-10 15:50:02 +0800 | [diff] [blame] | 259 | if (r) { |
| 260 | idr_remove(&mgr->ctx_handles, *id); |
| 261 | *id = 0; |
| 262 | kfree(ctx); |
| 263 | } |
Christian König | 47f3850 | 2015-08-04 17:51:05 +0200 | [diff] [blame] | 264 | mutex_unlock(&mgr->lock); |
Christian König | 47f3850 | 2015-08-04 17:51:05 +0200 | [diff] [blame] | 265 | return r; |
| 266 | } |
| 267 | |
| 268 | static void amdgpu_ctx_do_release(struct kref *ref) |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 269 | { |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 270 | struct amdgpu_ctx *ctx; |
Christian König | 1b1f2fe | 2018-08-01 16:00:52 +0200 | [diff] [blame^] | 271 | unsigned num_entities; |
Emily Deng | 8ee3a52 | 2018-04-16 10:07:02 +0800 | [diff] [blame] | 272 | u32 i; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 273 | |
Christian König | 47f3850 | 2015-08-04 17:51:05 +0200 | [diff] [blame] | 274 | ctx = container_of(ref, struct amdgpu_ctx, refcount); |
| 275 | |
Christian König | 1b1f2fe | 2018-08-01 16:00:52 +0200 | [diff] [blame^] | 276 | num_entities = 0; |
| 277 | for (i = 0; i < AMDGPU_HW_IP_NUM; i++) |
| 278 | num_entities += amdgpu_ctx_num_entities[i]; |
Andrey Grodzovsky | 20b6b78 | 2018-05-15 14:12:21 -0400 | [diff] [blame] | 279 | |
Christian König | 1b1f2fe | 2018-08-01 16:00:52 +0200 | [diff] [blame^] | 280 | for (i = 0; i < num_entities; i++) |
| 281 | drm_sched_entity_destroy(&ctx->entities[0][i].entity); |
Christian König | 47f3850 | 2015-08-04 17:51:05 +0200 | [diff] [blame] | 282 | |
Emily Deng | 8ee3a52 | 2018-04-16 10:07:02 +0800 | [diff] [blame] | 283 | amdgpu_ctx_fini(ref); |
Christian König | 47f3850 | 2015-08-04 17:51:05 +0200 | [diff] [blame] | 284 | } |
| 285 | |
| 286 | static int amdgpu_ctx_free(struct amdgpu_fpriv *fpriv, uint32_t id) |
| 287 | { |
| 288 | struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr; |
| 289 | struct amdgpu_ctx *ctx; |
| 290 | |
| 291 | mutex_lock(&mgr->lock); |
Matthew Wilcox | d3e709e | 2016-12-22 13:30:22 -0500 | [diff] [blame] | 292 | ctx = idr_remove(&mgr->ctx_handles, id); |
| 293 | if (ctx) |
Chunming Zhou | 23ca0e4 | 2015-07-06 13:42:58 +0800 | [diff] [blame] | 294 | kref_put(&ctx->refcount, amdgpu_ctx_do_release); |
Christian König | 47f3850 | 2015-08-04 17:51:05 +0200 | [diff] [blame] | 295 | mutex_unlock(&mgr->lock); |
Matthew Wilcox | d3e709e | 2016-12-22 13:30:22 -0500 | [diff] [blame] | 296 | return ctx ? 0 : -EINVAL; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 297 | } |
| 298 | |
Marek Olšák | d94aed5 | 2015-05-05 21:13:49 +0200 | [diff] [blame] | 299 | static int amdgpu_ctx_query(struct amdgpu_device *adev, |
| 300 | struct amdgpu_fpriv *fpriv, uint32_t id, |
| 301 | union drm_amdgpu_ctx_out *out) |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 302 | { |
| 303 | struct amdgpu_ctx *ctx; |
Chunming Zhou | 23ca0e4 | 2015-07-06 13:42:58 +0800 | [diff] [blame] | 304 | struct amdgpu_ctx_mgr *mgr; |
Marek Olšák | d94aed5 | 2015-05-05 21:13:49 +0200 | [diff] [blame] | 305 | unsigned reset_counter; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 306 | |
Chunming Zhou | 23ca0e4 | 2015-07-06 13:42:58 +0800 | [diff] [blame] | 307 | if (!fpriv) |
| 308 | return -EINVAL; |
| 309 | |
| 310 | mgr = &fpriv->ctx_mgr; |
Marek Olšák | 0147ee0 | 2015-05-05 20:52:00 +0200 | [diff] [blame] | 311 | mutex_lock(&mgr->lock); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 312 | ctx = idr_find(&mgr->ctx_handles, id); |
Marek Olšák | d94aed5 | 2015-05-05 21:13:49 +0200 | [diff] [blame] | 313 | if (!ctx) { |
Marek Olšák | 0147ee0 | 2015-05-05 20:52:00 +0200 | [diff] [blame] | 314 | mutex_unlock(&mgr->lock); |
Marek Olšák | d94aed5 | 2015-05-05 21:13:49 +0200 | [diff] [blame] | 315 | return -EINVAL; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 316 | } |
Marek Olšák | d94aed5 | 2015-05-05 21:13:49 +0200 | [diff] [blame] | 317 | |
| 318 | /* TODO: these two are always zero */ |
Alex Deucher | 0b492a4 | 2015-08-16 22:48:26 -0400 | [diff] [blame] | 319 | out->state.flags = 0x0; |
| 320 | out->state.hangs = 0x0; |
Marek Olšák | d94aed5 | 2015-05-05 21:13:49 +0200 | [diff] [blame] | 321 | |
| 322 | /* determine if a GPU reset has occured since the last call */ |
| 323 | reset_counter = atomic_read(&adev->gpu_reset_counter); |
| 324 | /* TODO: this should ideally return NO, GUILTY, or INNOCENT. */ |
Monk Liu | 668ca1b | 2017-10-17 14:39:23 +0800 | [diff] [blame] | 325 | if (ctx->reset_counter_query == reset_counter) |
Marek Olšák | d94aed5 | 2015-05-05 21:13:49 +0200 | [diff] [blame] | 326 | out->state.reset_status = AMDGPU_CTX_NO_RESET; |
| 327 | else |
| 328 | out->state.reset_status = AMDGPU_CTX_UNKNOWN_RESET; |
Monk Liu | 668ca1b | 2017-10-17 14:39:23 +0800 | [diff] [blame] | 329 | ctx->reset_counter_query = reset_counter; |
Marek Olšák | d94aed5 | 2015-05-05 21:13:49 +0200 | [diff] [blame] | 330 | |
Marek Olšák | 0147ee0 | 2015-05-05 20:52:00 +0200 | [diff] [blame] | 331 | mutex_unlock(&mgr->lock); |
Marek Olšák | d94aed5 | 2015-05-05 21:13:49 +0200 | [diff] [blame] | 332 | return 0; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 333 | } |
| 334 | |
Monk Liu | bc1b1bf | 2017-10-17 14:58:01 +0800 | [diff] [blame] | 335 | static int amdgpu_ctx_query2(struct amdgpu_device *adev, |
| 336 | struct amdgpu_fpriv *fpriv, uint32_t id, |
| 337 | union drm_amdgpu_ctx_out *out) |
| 338 | { |
| 339 | struct amdgpu_ctx *ctx; |
| 340 | struct amdgpu_ctx_mgr *mgr; |
| 341 | |
| 342 | if (!fpriv) |
| 343 | return -EINVAL; |
| 344 | |
| 345 | mgr = &fpriv->ctx_mgr; |
| 346 | mutex_lock(&mgr->lock); |
| 347 | ctx = idr_find(&mgr->ctx_handles, id); |
| 348 | if (!ctx) { |
| 349 | mutex_unlock(&mgr->lock); |
| 350 | return -EINVAL; |
| 351 | } |
| 352 | |
| 353 | out->state.flags = 0x0; |
| 354 | out->state.hangs = 0x0; |
| 355 | |
| 356 | if (ctx->reset_counter != atomic_read(&adev->gpu_reset_counter)) |
| 357 | out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RESET; |
| 358 | |
| 359 | if (ctx->vram_lost_counter != atomic_read(&adev->vram_lost_counter)) |
| 360 | out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_VRAMLOST; |
| 361 | |
| 362 | if (atomic_read(&ctx->guilty)) |
| 363 | out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_GUILTY; |
| 364 | |
| 365 | mutex_unlock(&mgr->lock); |
| 366 | return 0; |
| 367 | } |
| 368 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 369 | int amdgpu_ctx_ioctl(struct drm_device *dev, void *data, |
Marek Olšák | d94aed5 | 2015-05-05 21:13:49 +0200 | [diff] [blame] | 370 | struct drm_file *filp) |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 371 | { |
| 372 | int r; |
| 373 | uint32_t id; |
Lucas Stach | 1b1f42d | 2017-12-06 17:49:39 +0100 | [diff] [blame] | 374 | enum drm_sched_priority priority; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 375 | |
| 376 | union drm_amdgpu_ctx *args = data; |
| 377 | struct amdgpu_device *adev = dev->dev_private; |
| 378 | struct amdgpu_fpriv *fpriv = filp->driver_priv; |
| 379 | |
| 380 | r = 0; |
| 381 | id = args->in.ctx_id; |
Andres Rodriguez | c2636dc | 2016-12-22 17:06:50 -0500 | [diff] [blame] | 382 | priority = amdgpu_to_sched_priority(args->in.priority); |
| 383 | |
Andres Rodriguez | b6d8a43 | 2017-05-24 17:00:10 -0400 | [diff] [blame] | 384 | /* For backwards compatibility reasons, we need to accept |
| 385 | * ioctls with garbage in the priority field */ |
Lucas Stach | 1b1f42d | 2017-12-06 17:49:39 +0100 | [diff] [blame] | 386 | if (priority == DRM_SCHED_PRIORITY_INVALID) |
| 387 | priority = DRM_SCHED_PRIORITY_NORMAL; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 388 | |
| 389 | switch (args->in.op) { |
Christian König | a750b47 | 2016-02-11 10:20:53 +0100 | [diff] [blame] | 390 | case AMDGPU_CTX_OP_ALLOC_CTX: |
Andres Rodriguez | c2636dc | 2016-12-22 17:06:50 -0500 | [diff] [blame] | 391 | r = amdgpu_ctx_alloc(adev, fpriv, filp, priority, &id); |
Christian König | a750b47 | 2016-02-11 10:20:53 +0100 | [diff] [blame] | 392 | args->out.alloc.ctx_id = id; |
| 393 | break; |
| 394 | case AMDGPU_CTX_OP_FREE_CTX: |
| 395 | r = amdgpu_ctx_free(fpriv, id); |
| 396 | break; |
| 397 | case AMDGPU_CTX_OP_QUERY_STATE: |
| 398 | r = amdgpu_ctx_query(adev, fpriv, id, &args->out); |
| 399 | break; |
Monk Liu | bc1b1bf | 2017-10-17 14:58:01 +0800 | [diff] [blame] | 400 | case AMDGPU_CTX_OP_QUERY_STATE2: |
| 401 | r = amdgpu_ctx_query2(adev, fpriv, id, &args->out); |
| 402 | break; |
Christian König | a750b47 | 2016-02-11 10:20:53 +0100 | [diff] [blame] | 403 | default: |
| 404 | return -EINVAL; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 405 | } |
| 406 | |
| 407 | return r; |
| 408 | } |
Jammy Zhou | 66b3cf2 | 2015-05-08 17:29:40 +0800 | [diff] [blame] | 409 | |
| 410 | struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id) |
| 411 | { |
| 412 | struct amdgpu_ctx *ctx; |
Chunming Zhou | 23ca0e4 | 2015-07-06 13:42:58 +0800 | [diff] [blame] | 413 | struct amdgpu_ctx_mgr *mgr; |
| 414 | |
| 415 | if (!fpriv) |
| 416 | return NULL; |
| 417 | |
| 418 | mgr = &fpriv->ctx_mgr; |
Jammy Zhou | 66b3cf2 | 2015-05-08 17:29:40 +0800 | [diff] [blame] | 419 | |
| 420 | mutex_lock(&mgr->lock); |
| 421 | ctx = idr_find(&mgr->ctx_handles, id); |
| 422 | if (ctx) |
| 423 | kref_get(&ctx->refcount); |
| 424 | mutex_unlock(&mgr->lock); |
| 425 | return ctx; |
| 426 | } |
| 427 | |
| 428 | int amdgpu_ctx_put(struct amdgpu_ctx *ctx) |
| 429 | { |
Jammy Zhou | 66b3cf2 | 2015-05-08 17:29:40 +0800 | [diff] [blame] | 430 | if (ctx == NULL) |
| 431 | return -EINVAL; |
| 432 | |
Jammy Zhou | 66b3cf2 | 2015-05-08 17:29:40 +0800 | [diff] [blame] | 433 | kref_put(&ctx->refcount, amdgpu_ctx_do_release); |
Jammy Zhou | 66b3cf2 | 2015-05-08 17:29:40 +0800 | [diff] [blame] | 434 | return 0; |
| 435 | } |
Christian König | 21c16bf | 2015-07-07 17:24:49 +0200 | [diff] [blame] | 436 | |
Christian König | 0d346a1 | 2018-07-19 14:22:25 +0200 | [diff] [blame] | 437 | int amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, |
| 438 | struct drm_sched_entity *entity, |
| 439 | struct dma_fence *fence, uint64_t* handle) |
Christian König | 21c16bf | 2015-07-07 17:24:49 +0200 | [diff] [blame] | 440 | { |
Christian König | 1b1f2fe | 2018-08-01 16:00:52 +0200 | [diff] [blame^] | 441 | struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity); |
| 442 | uint64_t seq = centity->sequence; |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 443 | struct dma_fence *other = NULL; |
Christian König | 0d346a1 | 2018-07-19 14:22:25 +0200 | [diff] [blame] | 444 | unsigned idx = 0; |
Christian König | 21c16bf | 2015-07-07 17:24:49 +0200 | [diff] [blame] | 445 | |
Chunming Zhou | 5b01123 | 2015-12-10 17:34:33 +0800 | [diff] [blame] | 446 | idx = seq & (amdgpu_sched_jobs - 1); |
Christian König | 1b1f2fe | 2018-08-01 16:00:52 +0200 | [diff] [blame^] | 447 | other = centity->fences[idx]; |
Andrey Grodzovsky | 0ae9444 | 2017-10-10 16:50:17 -0400 | [diff] [blame] | 448 | if (other) |
| 449 | BUG_ON(!dma_fence_is_signaled(other)); |
Christian König | 21c16bf | 2015-07-07 17:24:49 +0200 | [diff] [blame] | 450 | |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 451 | dma_fence_get(fence); |
Christian König | 21c16bf | 2015-07-07 17:24:49 +0200 | [diff] [blame] | 452 | |
| 453 | spin_lock(&ctx->ring_lock); |
Christian König | 1b1f2fe | 2018-08-01 16:00:52 +0200 | [diff] [blame^] | 454 | centity->fences[idx] = fence; |
| 455 | centity->sequence++; |
Christian König | 21c16bf | 2015-07-07 17:24:49 +0200 | [diff] [blame] | 456 | spin_unlock(&ctx->ring_lock); |
| 457 | |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 458 | dma_fence_put(other); |
Christian König | 0d346a1 | 2018-07-19 14:22:25 +0200 | [diff] [blame] | 459 | if (handle) |
| 460 | *handle = seq; |
Christian König | 21c16bf | 2015-07-07 17:24:49 +0200 | [diff] [blame] | 461 | |
Monk Liu | eb01abc | 2017-09-15 13:40:31 +0800 | [diff] [blame] | 462 | return 0; |
Christian König | 21c16bf | 2015-07-07 17:24:49 +0200 | [diff] [blame] | 463 | } |
| 464 | |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 465 | struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx, |
Christian König | 0d346a1 | 2018-07-19 14:22:25 +0200 | [diff] [blame] | 466 | struct drm_sched_entity *entity, |
| 467 | uint64_t seq) |
Christian König | 21c16bf | 2015-07-07 17:24:49 +0200 | [diff] [blame] | 468 | { |
Christian König | 1b1f2fe | 2018-08-01 16:00:52 +0200 | [diff] [blame^] | 469 | struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity); |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 470 | struct dma_fence *fence; |
Christian König | 21c16bf | 2015-07-07 17:24:49 +0200 | [diff] [blame] | 471 | |
| 472 | spin_lock(&ctx->ring_lock); |
Chunming Zhou | b43a9a7 | 2015-07-21 15:13:53 +0800 | [diff] [blame] | 473 | |
Monk Liu | d7b1eeb | 2017-04-07 18:39:07 +0800 | [diff] [blame] | 474 | if (seq == ~0ull) |
Christian König | 1b1f2fe | 2018-08-01 16:00:52 +0200 | [diff] [blame^] | 475 | seq = centity->sequence - 1; |
Monk Liu | d7b1eeb | 2017-04-07 18:39:07 +0800 | [diff] [blame] | 476 | |
Christian König | 1b1f2fe | 2018-08-01 16:00:52 +0200 | [diff] [blame^] | 477 | if (seq >= centity->sequence) { |
Christian König | 21c16bf | 2015-07-07 17:24:49 +0200 | [diff] [blame] | 478 | spin_unlock(&ctx->ring_lock); |
| 479 | return ERR_PTR(-EINVAL); |
| 480 | } |
| 481 | |
Chunming Zhou | b43a9a7 | 2015-07-21 15:13:53 +0800 | [diff] [blame] | 482 | |
Christian König | 1b1f2fe | 2018-08-01 16:00:52 +0200 | [diff] [blame^] | 483 | if (seq + amdgpu_sched_jobs < centity->sequence) { |
Christian König | 21c16bf | 2015-07-07 17:24:49 +0200 | [diff] [blame] | 484 | spin_unlock(&ctx->ring_lock); |
| 485 | return NULL; |
| 486 | } |
| 487 | |
Christian König | 1b1f2fe | 2018-08-01 16:00:52 +0200 | [diff] [blame^] | 488 | fence = dma_fence_get(centity->fences[seq & (amdgpu_sched_jobs - 1)]); |
Christian König | 21c16bf | 2015-07-07 17:24:49 +0200 | [diff] [blame] | 489 | spin_unlock(&ctx->ring_lock); |
| 490 | |
| 491 | return fence; |
| 492 | } |
Christian König | efd4ccb | 2015-08-04 16:20:31 +0200 | [diff] [blame] | 493 | |
Andres Rodriguez | c23be4a | 2017-06-06 20:20:38 -0400 | [diff] [blame] | 494 | void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx, |
Lucas Stach | 1b1f42d | 2017-12-06 17:49:39 +0100 | [diff] [blame] | 495 | enum drm_sched_priority priority) |
Andres Rodriguez | c23be4a | 2017-06-06 20:20:38 -0400 | [diff] [blame] | 496 | { |
Christian König | 1b1f2fe | 2018-08-01 16:00:52 +0200 | [diff] [blame^] | 497 | unsigned num_entities = amdgput_ctx_total_num_entities(); |
Lucas Stach | 1b1f42d | 2017-12-06 17:49:39 +0100 | [diff] [blame] | 498 | enum drm_sched_priority ctx_prio; |
Christian König | 1b1f2fe | 2018-08-01 16:00:52 +0200 | [diff] [blame^] | 499 | unsigned i; |
Andres Rodriguez | c23be4a | 2017-06-06 20:20:38 -0400 | [diff] [blame] | 500 | |
| 501 | ctx->override_priority = priority; |
| 502 | |
Lucas Stach | 1b1f42d | 2017-12-06 17:49:39 +0100 | [diff] [blame] | 503 | ctx_prio = (ctx->override_priority == DRM_SCHED_PRIORITY_UNSET) ? |
Andres Rodriguez | c23be4a | 2017-06-06 20:20:38 -0400 | [diff] [blame] | 504 | ctx->init_priority : ctx->override_priority; |
| 505 | |
Christian König | 1b1f2fe | 2018-08-01 16:00:52 +0200 | [diff] [blame^] | 506 | for (i = 0; i < num_entities; i++) { |
| 507 | struct drm_sched_entity *entity = &ctx->entities[0][i].entity; |
Andres Rodriguez | c23be4a | 2017-06-06 20:20:38 -0400 | [diff] [blame] | 508 | |
Christian König | 7febe4b | 2018-08-01 16:22:39 +0200 | [diff] [blame] | 509 | drm_sched_entity_set_priority(entity, ctx_prio); |
Andres Rodriguez | c23be4a | 2017-06-06 20:20:38 -0400 | [diff] [blame] | 510 | } |
| 511 | } |
| 512 | |
Christian König | 0d346a1 | 2018-07-19 14:22:25 +0200 | [diff] [blame] | 513 | int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx, |
| 514 | struct drm_sched_entity *entity) |
Andrey Grodzovsky | 0ae9444 | 2017-10-10 16:50:17 -0400 | [diff] [blame] | 515 | { |
Christian König | 1b1f2fe | 2018-08-01 16:00:52 +0200 | [diff] [blame^] | 516 | struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity); |
| 517 | unsigned idx = centity->sequence & (amdgpu_sched_jobs - 1); |
| 518 | struct dma_fence *other = centity->fences[idx]; |
Andrey Grodzovsky | 0ae9444 | 2017-10-10 16:50:17 -0400 | [diff] [blame] | 519 | |
| 520 | if (other) { |
| 521 | signed long r; |
Andrey Grodzovsky | 719a39a | 2018-04-30 10:04:42 -0400 | [diff] [blame] | 522 | r = dma_fence_wait(other, true); |
Andrey Grodzovsky | 0ae9444 | 2017-10-10 16:50:17 -0400 | [diff] [blame] | 523 | if (r < 0) { |
Andrey Grodzovsky | 719a39a | 2018-04-30 10:04:42 -0400 | [diff] [blame] | 524 | if (r != -ERESTARTSYS) |
| 525 | DRM_ERROR("Error (%ld) waiting for fence!\n", r); |
| 526 | |
Andrey Grodzovsky | 0ae9444 | 2017-10-10 16:50:17 -0400 | [diff] [blame] | 527 | return r; |
| 528 | } |
| 529 | } |
| 530 | |
| 531 | return 0; |
| 532 | } |
| 533 | |
Christian König | efd4ccb | 2015-08-04 16:20:31 +0200 | [diff] [blame] | 534 | void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr) |
| 535 | { |
| 536 | mutex_init(&mgr->lock); |
| 537 | idr_init(&mgr->ctx_handles); |
| 538 | } |
| 539 | |
Andrey Grodzovsky | c49d828 | 2018-06-05 12:56:26 -0400 | [diff] [blame] | 540 | void amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr) |
Emily Deng | 8ee3a52 | 2018-04-16 10:07:02 +0800 | [diff] [blame] | 541 | { |
Christian König | 1b1f2fe | 2018-08-01 16:00:52 +0200 | [diff] [blame^] | 542 | unsigned num_entities = amdgput_ctx_total_num_entities(); |
Emily Deng | 8ee3a52 | 2018-04-16 10:07:02 +0800 | [diff] [blame] | 543 | struct amdgpu_ctx *ctx; |
| 544 | struct idr *idp; |
| 545 | uint32_t id, i; |
Andrey Grodzovsky | 48ad368 | 2018-05-30 15:28:52 -0400 | [diff] [blame] | 546 | long max_wait = MAX_WAIT_SCHED_ENTITY_Q_EMPTY; |
Emily Deng | 8ee3a52 | 2018-04-16 10:07:02 +0800 | [diff] [blame] | 547 | |
| 548 | idp = &mgr->ctx_handles; |
| 549 | |
Andrey Grodzovsky | 48ad368 | 2018-05-30 15:28:52 -0400 | [diff] [blame] | 550 | mutex_lock(&mgr->lock); |
Emily Deng | 8ee3a52 | 2018-04-16 10:07:02 +0800 | [diff] [blame] | 551 | idr_for_each_entry(idp, ctx, id) { |
| 552 | |
Andrey Grodzovsky | 48ad368 | 2018-05-30 15:28:52 -0400 | [diff] [blame] | 553 | if (!ctx->adev) { |
| 554 | mutex_unlock(&mgr->lock); |
Emily Deng | 8ee3a52 | 2018-04-16 10:07:02 +0800 | [diff] [blame] | 555 | return; |
Andrey Grodzovsky | 48ad368 | 2018-05-30 15:28:52 -0400 | [diff] [blame] | 556 | } |
Emily Deng | 8ee3a52 | 2018-04-16 10:07:02 +0800 | [diff] [blame] | 557 | |
Christian König | 1b1f2fe | 2018-08-01 16:00:52 +0200 | [diff] [blame^] | 558 | for (i = 0; i < num_entities; i++) { |
| 559 | struct drm_sched_entity *entity; |
Andrey Grodzovsky | 20b6b78 | 2018-05-15 14:12:21 -0400 | [diff] [blame] | 560 | |
Christian König | 1b1f2fe | 2018-08-01 16:00:52 +0200 | [diff] [blame^] | 561 | entity = &ctx->entities[0][i].entity; |
| 562 | max_wait = drm_sched_entity_flush(entity, max_wait); |
Andrey Grodzovsky | 20b6b78 | 2018-05-15 14:12:21 -0400 | [diff] [blame] | 563 | } |
Emily Deng | 8ee3a52 | 2018-04-16 10:07:02 +0800 | [diff] [blame] | 564 | } |
Andrey Grodzovsky | 48ad368 | 2018-05-30 15:28:52 -0400 | [diff] [blame] | 565 | mutex_unlock(&mgr->lock); |
Emily Deng | 8ee3a52 | 2018-04-16 10:07:02 +0800 | [diff] [blame] | 566 | } |
| 567 | |
Andrey Grodzovsky | c49d828 | 2018-06-05 12:56:26 -0400 | [diff] [blame] | 568 | void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr) |
Emily Deng | 8ee3a52 | 2018-04-16 10:07:02 +0800 | [diff] [blame] | 569 | { |
Christian König | 1b1f2fe | 2018-08-01 16:00:52 +0200 | [diff] [blame^] | 570 | unsigned num_entities = amdgput_ctx_total_num_entities(); |
Emily Deng | 8ee3a52 | 2018-04-16 10:07:02 +0800 | [diff] [blame] | 571 | struct amdgpu_ctx *ctx; |
| 572 | struct idr *idp; |
| 573 | uint32_t id, i; |
| 574 | |
| 575 | idp = &mgr->ctx_handles; |
| 576 | |
| 577 | idr_for_each_entry(idp, ctx, id) { |
| 578 | |
| 579 | if (!ctx->adev) |
| 580 | return; |
| 581 | |
Christian König | 1b1f2fe | 2018-08-01 16:00:52 +0200 | [diff] [blame^] | 582 | if (kref_read(&ctx->refcount) != 1) { |
| 583 | DRM_ERROR("ctx %p is still alive\n", ctx); |
| 584 | continue; |
Andrey Grodzovsky | 20b6b78 | 2018-05-15 14:12:21 -0400 | [diff] [blame] | 585 | } |
Christian König | 1b1f2fe | 2018-08-01 16:00:52 +0200 | [diff] [blame^] | 586 | |
| 587 | for (i = 0; i < num_entities; i++) |
| 588 | drm_sched_entity_fini(&ctx->entities[0][i].entity); |
Emily Deng | 8ee3a52 | 2018-04-16 10:07:02 +0800 | [diff] [blame] | 589 | } |
| 590 | } |
| 591 | |
Christian König | efd4ccb | 2015-08-04 16:20:31 +0200 | [diff] [blame] | 592 | void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr) |
| 593 | { |
| 594 | struct amdgpu_ctx *ctx; |
| 595 | struct idr *idp; |
| 596 | uint32_t id; |
| 597 | |
Andrey Grodzovsky | c49d828 | 2018-06-05 12:56:26 -0400 | [diff] [blame] | 598 | amdgpu_ctx_mgr_entity_fini(mgr); |
Emily Deng | 8ee3a52 | 2018-04-16 10:07:02 +0800 | [diff] [blame] | 599 | |
Christian König | efd4ccb | 2015-08-04 16:20:31 +0200 | [diff] [blame] | 600 | idp = &mgr->ctx_handles; |
| 601 | |
| 602 | idr_for_each_entry(idp, ctx, id) { |
Emily Deng | 8ee3a52 | 2018-04-16 10:07:02 +0800 | [diff] [blame] | 603 | if (kref_put(&ctx->refcount, amdgpu_ctx_fini) != 1) |
Christian König | efd4ccb | 2015-08-04 16:20:31 +0200 | [diff] [blame] | 604 | DRM_ERROR("ctx %p is still alive\n", ctx); |
| 605 | } |
| 606 | |
| 607 | idr_destroy(&mgr->ctx_handles); |
| 608 | mutex_destroy(&mgr->lock); |
| 609 | } |