blob: 02d563cfb4a7344c4e787248ef3858d3bccc4242 [file] [log] [blame]
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: monk liu <monk.liu@amd.com>
23 */
24
25#include <drm/drmP.h>
Andres Rodriguezc2636dc2016-12-22 17:06:50 -050026#include <drm/drm_auth.h>
Alex Deucherd38ceaf2015-04-20 16:55:21 -040027#include "amdgpu.h"
Andres Rodriguez52c6a622017-06-26 16:17:13 -040028#include "amdgpu_sched.h"
Alex Deucherd38ceaf2015-04-20 16:55:21 -040029
Andres Rodriguezc2636dc2016-12-22 17:06:50 -050030static int amdgpu_ctx_priority_permit(struct drm_file *filp,
Lucas Stach1b1f42d2017-12-06 17:49:39 +010031 enum drm_sched_priority priority)
Andres Rodriguezc2636dc2016-12-22 17:06:50 -050032{
33 /* NORMAL and below are accessible by everyone */
Lucas Stach1b1f42d2017-12-06 17:49:39 +010034 if (priority <= DRM_SCHED_PRIORITY_NORMAL)
Andres Rodriguezc2636dc2016-12-22 17:06:50 -050035 return 0;
36
37 if (capable(CAP_SYS_NICE))
38 return 0;
39
40 if (drm_is_current_master(filp))
41 return 0;
42
43 return -EACCES;
44}
45
46static int amdgpu_ctx_init(struct amdgpu_device *adev,
Lucas Stach1b1f42d2017-12-06 17:49:39 +010047 enum drm_sched_priority priority,
Andres Rodriguezc2636dc2016-12-22 17:06:50 -050048 struct drm_file *filp,
49 struct amdgpu_ctx *ctx)
Alex Deucherd38ceaf2015-04-20 16:55:21 -040050{
Christian König21c16bf2015-07-07 17:24:49 +020051 unsigned i, j;
Christian König47f38502015-08-04 17:51:05 +020052 int r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -040053
Lucas Stach1b1f42d2017-12-06 17:49:39 +010054 if (priority < 0 || priority >= DRM_SCHED_PRIORITY_MAX)
Andres Rodriguezc2636dc2016-12-22 17:06:50 -050055 return -EINVAL;
56
57 r = amdgpu_ctx_priority_permit(filp, priority);
58 if (r)
59 return r;
60
Alex Deucherd38ceaf2015-04-20 16:55:21 -040061 memset(ctx, 0, sizeof(*ctx));
Chunming Zhou9cb7e5a2015-07-21 13:17:19 +080062 ctx->adev = adev;
Alex Deucherd38ceaf2015-04-20 16:55:21 -040063 kref_init(&ctx->refcount);
Christian König21c16bf2015-07-07 17:24:49 +020064 spin_lock_init(&ctx->ring_lock);
Christian Königa750b472016-02-11 10:20:53 +010065 ctx->fences = kcalloc(amdgpu_sched_jobs * AMDGPU_MAX_RINGS,
Chris Wilsonf54d1862016-10-25 13:00:45 +010066 sizeof(struct dma_fence*), GFP_KERNEL);
Chunming Zhou37cd0ca2015-12-10 15:45:11 +080067 if (!ctx->fences)
68 return -ENOMEM;
Chunming Zhou23ca0e42015-07-06 13:42:58 +080069
Andrey Grodzovsky0ae94442017-10-10 16:50:17 -040070 mutex_init(&ctx->lock);
71
Chunming Zhou37cd0ca2015-12-10 15:45:11 +080072 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
73 ctx->rings[i].sequence = 1;
Christian Königa750b472016-02-11 10:20:53 +010074 ctx->rings[i].fences = &ctx->fences[amdgpu_sched_jobs * i];
Chunming Zhou37cd0ca2015-12-10 15:45:11 +080075 }
Nicolai Hähnlece199ad2016-10-04 09:43:30 +020076
77 ctx->reset_counter = atomic_read(&adev->gpu_reset_counter);
Monk Liu668ca1b2017-10-17 14:39:23 +080078 ctx->reset_counter_query = ctx->reset_counter;
Christian Könige55f2b62017-10-09 15:18:43 +020079 ctx->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
Andres Rodriguezc23be4a2017-06-06 20:20:38 -040080 ctx->init_priority = priority;
Lucas Stach1b1f42d2017-12-06 17:49:39 +010081 ctx->override_priority = DRM_SCHED_PRIORITY_UNSET;
Nicolai Hähnlece199ad2016-10-04 09:43:30 +020082
Chunming Zhoucadf97b2016-01-15 11:25:00 +080083 /* create context entity for each ring */
84 for (i = 0; i < adev->num_rings; i++) {
Christian König20874172016-02-11 09:56:44 +010085 struct amdgpu_ring *ring = adev->rings[i];
Lucas Stach1b1f42d2017-12-06 17:49:39 +010086 struct drm_sched_rq *rq;
Christian König20874172016-02-11 09:56:44 +010087
Andres Rodriguezc2636dc2016-12-22 17:06:50 -050088 rq = &ring->sched.sched_rq[priority];
Monk Liu75fbed22017-05-11 13:36:33 +080089
90 if (ring == &adev->gfx.kiq.ring)
91 continue;
92
Nayan Deshmukhaa16b6c2018-07-13 15:21:14 +053093 r = drm_sched_entity_init(&ctx->rings[i].entity,
94 &rq, 1, &ctx->guilty);
Chunming Zhoucadf97b2016-01-15 11:25:00 +080095 if (r)
Huang Rui8ed81472016-10-26 17:07:03 +080096 goto failed;
Chunming Zhoucadf97b2016-01-15 11:25:00 +080097 }
98
Andres Rodriguezeffd9242017-02-16 00:47:32 -050099 r = amdgpu_queue_mgr_init(adev, &ctx->queue_mgr);
100 if (r)
101 goto failed;
102
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400103 return 0;
Huang Rui8ed81472016-10-26 17:07:03 +0800104
105failed:
106 for (j = 0; j < i; j++)
Nayan Deshmukhcdc50172018-07-20 17:51:05 +0530107 drm_sched_entity_destroy(&ctx->rings[j].entity);
Huang Rui8ed81472016-10-26 17:07:03 +0800108 kfree(ctx->fences);
109 ctx->fences = NULL;
110 return r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400111}
112
Emily Deng8ee3a522018-04-16 10:07:02 +0800113static void amdgpu_ctx_fini(struct kref *ref)
Christian König47f38502015-08-04 17:51:05 +0200114{
Emily Deng8ee3a522018-04-16 10:07:02 +0800115 struct amdgpu_ctx *ctx = container_of(ref, struct amdgpu_ctx, refcount);
Christian König47f38502015-08-04 17:51:05 +0200116 struct amdgpu_device *adev = ctx->adev;
117 unsigned i, j;
118
Dave Airliefe295b22015-11-03 11:07:11 -0500119 if (!adev)
120 return;
121
Christian König47f38502015-08-04 17:51:05 +0200122 for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
Chunming Zhou37cd0ca2015-12-10 15:45:11 +0800123 for (j = 0; j < amdgpu_sched_jobs; ++j)
Chris Wilsonf54d1862016-10-25 13:00:45 +0100124 dma_fence_put(ctx->rings[i].fences[j]);
Chunming Zhou37cd0ca2015-12-10 15:45:11 +0800125 kfree(ctx->fences);
Grazvydas Ignotas54ddf3a2016-09-25 23:34:46 +0300126 ctx->fences = NULL;
Christian König47f38502015-08-04 17:51:05 +0200127
Andres Rodriguezeffd9242017-02-16 00:47:32 -0500128 amdgpu_queue_mgr_fini(adev, &ctx->queue_mgr);
Andrey Grodzovsky0ae94442017-10-10 16:50:17 -0400129
130 mutex_destroy(&ctx->lock);
Emily Deng8ee3a522018-04-16 10:07:02 +0800131
132 kfree(ctx);
Christian König47f38502015-08-04 17:51:05 +0200133}
134
135static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
136 struct amdgpu_fpriv *fpriv,
Andres Rodriguezc2636dc2016-12-22 17:06:50 -0500137 struct drm_file *filp,
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100138 enum drm_sched_priority priority,
Christian König47f38502015-08-04 17:51:05 +0200139 uint32_t *id)
140{
141 struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
142 struct amdgpu_ctx *ctx;
143 int r;
144
145 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
146 if (!ctx)
147 return -ENOMEM;
148
149 mutex_lock(&mgr->lock);
150 r = idr_alloc(&mgr->ctx_handles, ctx, 1, 0, GFP_KERNEL);
151 if (r < 0) {
152 mutex_unlock(&mgr->lock);
153 kfree(ctx);
154 return r;
155 }
Andres Rodriguezc2636dc2016-12-22 17:06:50 -0500156
Christian König47f38502015-08-04 17:51:05 +0200157 *id = (uint32_t)r;
Andres Rodriguezc2636dc2016-12-22 17:06:50 -0500158 r = amdgpu_ctx_init(adev, priority, filp, ctx);
Chunming Zhouc648ed72015-12-10 15:50:02 +0800159 if (r) {
160 idr_remove(&mgr->ctx_handles, *id);
161 *id = 0;
162 kfree(ctx);
163 }
Christian König47f38502015-08-04 17:51:05 +0200164 mutex_unlock(&mgr->lock);
Christian König47f38502015-08-04 17:51:05 +0200165 return r;
166}
167
168static void amdgpu_ctx_do_release(struct kref *ref)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400169{
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400170 struct amdgpu_ctx *ctx;
Emily Deng8ee3a522018-04-16 10:07:02 +0800171 u32 i;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400172
Christian König47f38502015-08-04 17:51:05 +0200173 ctx = container_of(ref, struct amdgpu_ctx, refcount);
174
Andrey Grodzovsky20b6b782018-05-15 14:12:21 -0400175 for (i = 0; i < ctx->adev->num_rings; i++) {
176
177 if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring)
178 continue;
179
Nayan Deshmukhcdc50172018-07-20 17:51:05 +0530180 drm_sched_entity_destroy(&ctx->rings[i].entity);
Andrey Grodzovsky20b6b782018-05-15 14:12:21 -0400181 }
Christian König47f38502015-08-04 17:51:05 +0200182
Emily Deng8ee3a522018-04-16 10:07:02 +0800183 amdgpu_ctx_fini(ref);
Christian König47f38502015-08-04 17:51:05 +0200184}
185
186static int amdgpu_ctx_free(struct amdgpu_fpriv *fpriv, uint32_t id)
187{
188 struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
189 struct amdgpu_ctx *ctx;
190
191 mutex_lock(&mgr->lock);
Matthew Wilcoxd3e709e2016-12-22 13:30:22 -0500192 ctx = idr_remove(&mgr->ctx_handles, id);
193 if (ctx)
Chunming Zhou23ca0e42015-07-06 13:42:58 +0800194 kref_put(&ctx->refcount, amdgpu_ctx_do_release);
Christian König47f38502015-08-04 17:51:05 +0200195 mutex_unlock(&mgr->lock);
Matthew Wilcoxd3e709e2016-12-22 13:30:22 -0500196 return ctx ? 0 : -EINVAL;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400197}
198
Marek Olšákd94aed52015-05-05 21:13:49 +0200199static int amdgpu_ctx_query(struct amdgpu_device *adev,
200 struct amdgpu_fpriv *fpriv, uint32_t id,
201 union drm_amdgpu_ctx_out *out)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400202{
203 struct amdgpu_ctx *ctx;
Chunming Zhou23ca0e42015-07-06 13:42:58 +0800204 struct amdgpu_ctx_mgr *mgr;
Marek Olšákd94aed52015-05-05 21:13:49 +0200205 unsigned reset_counter;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400206
Chunming Zhou23ca0e42015-07-06 13:42:58 +0800207 if (!fpriv)
208 return -EINVAL;
209
210 mgr = &fpriv->ctx_mgr;
Marek Olšák0147ee02015-05-05 20:52:00 +0200211 mutex_lock(&mgr->lock);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400212 ctx = idr_find(&mgr->ctx_handles, id);
Marek Olšákd94aed52015-05-05 21:13:49 +0200213 if (!ctx) {
Marek Olšák0147ee02015-05-05 20:52:00 +0200214 mutex_unlock(&mgr->lock);
Marek Olšákd94aed52015-05-05 21:13:49 +0200215 return -EINVAL;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400216 }
Marek Olšákd94aed52015-05-05 21:13:49 +0200217
218 /* TODO: these two are always zero */
Alex Deucher0b492a42015-08-16 22:48:26 -0400219 out->state.flags = 0x0;
220 out->state.hangs = 0x0;
Marek Olšákd94aed52015-05-05 21:13:49 +0200221
222 /* determine if a GPU reset has occured since the last call */
223 reset_counter = atomic_read(&adev->gpu_reset_counter);
224 /* TODO: this should ideally return NO, GUILTY, or INNOCENT. */
Monk Liu668ca1b2017-10-17 14:39:23 +0800225 if (ctx->reset_counter_query == reset_counter)
Marek Olšákd94aed52015-05-05 21:13:49 +0200226 out->state.reset_status = AMDGPU_CTX_NO_RESET;
227 else
228 out->state.reset_status = AMDGPU_CTX_UNKNOWN_RESET;
Monk Liu668ca1b2017-10-17 14:39:23 +0800229 ctx->reset_counter_query = reset_counter;
Marek Olšákd94aed52015-05-05 21:13:49 +0200230
Marek Olšák0147ee02015-05-05 20:52:00 +0200231 mutex_unlock(&mgr->lock);
Marek Olšákd94aed52015-05-05 21:13:49 +0200232 return 0;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400233}
234
Monk Liubc1b1bf2017-10-17 14:58:01 +0800235static int amdgpu_ctx_query2(struct amdgpu_device *adev,
236 struct amdgpu_fpriv *fpriv, uint32_t id,
237 union drm_amdgpu_ctx_out *out)
238{
239 struct amdgpu_ctx *ctx;
240 struct amdgpu_ctx_mgr *mgr;
241
242 if (!fpriv)
243 return -EINVAL;
244
245 mgr = &fpriv->ctx_mgr;
246 mutex_lock(&mgr->lock);
247 ctx = idr_find(&mgr->ctx_handles, id);
248 if (!ctx) {
249 mutex_unlock(&mgr->lock);
250 return -EINVAL;
251 }
252
253 out->state.flags = 0x0;
254 out->state.hangs = 0x0;
255
256 if (ctx->reset_counter != atomic_read(&adev->gpu_reset_counter))
257 out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RESET;
258
259 if (ctx->vram_lost_counter != atomic_read(&adev->vram_lost_counter))
260 out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_VRAMLOST;
261
262 if (atomic_read(&ctx->guilty))
263 out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_GUILTY;
264
265 mutex_unlock(&mgr->lock);
266 return 0;
267}
268
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400269int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
Marek Olšákd94aed52015-05-05 21:13:49 +0200270 struct drm_file *filp)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400271{
272 int r;
273 uint32_t id;
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100274 enum drm_sched_priority priority;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400275
276 union drm_amdgpu_ctx *args = data;
277 struct amdgpu_device *adev = dev->dev_private;
278 struct amdgpu_fpriv *fpriv = filp->driver_priv;
279
280 r = 0;
281 id = args->in.ctx_id;
Andres Rodriguezc2636dc2016-12-22 17:06:50 -0500282 priority = amdgpu_to_sched_priority(args->in.priority);
283
Andres Rodriguezb6d8a432017-05-24 17:00:10 -0400284 /* For backwards compatibility reasons, we need to accept
285 * ioctls with garbage in the priority field */
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100286 if (priority == DRM_SCHED_PRIORITY_INVALID)
287 priority = DRM_SCHED_PRIORITY_NORMAL;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400288
289 switch (args->in.op) {
Christian Königa750b472016-02-11 10:20:53 +0100290 case AMDGPU_CTX_OP_ALLOC_CTX:
Andres Rodriguezc2636dc2016-12-22 17:06:50 -0500291 r = amdgpu_ctx_alloc(adev, fpriv, filp, priority, &id);
Christian Königa750b472016-02-11 10:20:53 +0100292 args->out.alloc.ctx_id = id;
293 break;
294 case AMDGPU_CTX_OP_FREE_CTX:
295 r = amdgpu_ctx_free(fpriv, id);
296 break;
297 case AMDGPU_CTX_OP_QUERY_STATE:
298 r = amdgpu_ctx_query(adev, fpriv, id, &args->out);
299 break;
Monk Liubc1b1bf2017-10-17 14:58:01 +0800300 case AMDGPU_CTX_OP_QUERY_STATE2:
301 r = amdgpu_ctx_query2(adev, fpriv, id, &args->out);
302 break;
Christian Königa750b472016-02-11 10:20:53 +0100303 default:
304 return -EINVAL;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400305 }
306
307 return r;
308}
Jammy Zhou66b3cf22015-05-08 17:29:40 +0800309
310struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id)
311{
312 struct amdgpu_ctx *ctx;
Chunming Zhou23ca0e42015-07-06 13:42:58 +0800313 struct amdgpu_ctx_mgr *mgr;
314
315 if (!fpriv)
316 return NULL;
317
318 mgr = &fpriv->ctx_mgr;
Jammy Zhou66b3cf22015-05-08 17:29:40 +0800319
320 mutex_lock(&mgr->lock);
321 ctx = idr_find(&mgr->ctx_handles, id);
322 if (ctx)
323 kref_get(&ctx->refcount);
324 mutex_unlock(&mgr->lock);
325 return ctx;
326}
327
328int amdgpu_ctx_put(struct amdgpu_ctx *ctx)
329{
Jammy Zhou66b3cf22015-05-08 17:29:40 +0800330 if (ctx == NULL)
331 return -EINVAL;
332
Jammy Zhou66b3cf22015-05-08 17:29:40 +0800333 kref_put(&ctx->refcount, amdgpu_ctx_do_release);
Jammy Zhou66b3cf22015-05-08 17:29:40 +0800334 return 0;
335}
Christian König21c16bf2015-07-07 17:24:49 +0200336
Monk Liueb01abc2017-09-15 13:40:31 +0800337int amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
338 struct dma_fence *fence, uint64_t* handler)
Christian König21c16bf2015-07-07 17:24:49 +0200339{
340 struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];
Christian Königce882e62015-08-19 15:00:55 +0200341 uint64_t seq = cring->sequence;
Chunming Zhoub43a9a72015-07-21 15:13:53 +0800342 unsigned idx = 0;
Chris Wilsonf54d1862016-10-25 13:00:45 +0100343 struct dma_fence *other = NULL;
Christian König21c16bf2015-07-07 17:24:49 +0200344
Chunming Zhou5b011232015-12-10 17:34:33 +0800345 idx = seq & (amdgpu_sched_jobs - 1);
Chunming Zhoub43a9a72015-07-21 15:13:53 +0800346 other = cring->fences[idx];
Andrey Grodzovsky0ae94442017-10-10 16:50:17 -0400347 if (other)
348 BUG_ON(!dma_fence_is_signaled(other));
Christian König21c16bf2015-07-07 17:24:49 +0200349
Chris Wilsonf54d1862016-10-25 13:00:45 +0100350 dma_fence_get(fence);
Christian König21c16bf2015-07-07 17:24:49 +0200351
352 spin_lock(&ctx->ring_lock);
353 cring->fences[idx] = fence;
Christian Königce882e62015-08-19 15:00:55 +0200354 cring->sequence++;
Christian König21c16bf2015-07-07 17:24:49 +0200355 spin_unlock(&ctx->ring_lock);
356
Chris Wilsonf54d1862016-10-25 13:00:45 +0100357 dma_fence_put(other);
Monk Liueb01abc2017-09-15 13:40:31 +0800358 if (handler)
359 *handler = seq;
Christian König21c16bf2015-07-07 17:24:49 +0200360
Monk Liueb01abc2017-09-15 13:40:31 +0800361 return 0;
Christian König21c16bf2015-07-07 17:24:49 +0200362}
363
Chris Wilsonf54d1862016-10-25 13:00:45 +0100364struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
365 struct amdgpu_ring *ring, uint64_t seq)
Christian König21c16bf2015-07-07 17:24:49 +0200366{
367 struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];
Chris Wilsonf54d1862016-10-25 13:00:45 +0100368 struct dma_fence *fence;
Christian König21c16bf2015-07-07 17:24:49 +0200369
370 spin_lock(&ctx->ring_lock);
Chunming Zhoub43a9a72015-07-21 15:13:53 +0800371
Monk Liud7b1eeb2017-04-07 18:39:07 +0800372 if (seq == ~0ull)
373 seq = ctx->rings[ring->idx].sequence - 1;
374
Christian Königce882e62015-08-19 15:00:55 +0200375 if (seq >= cring->sequence) {
Christian König21c16bf2015-07-07 17:24:49 +0200376 spin_unlock(&ctx->ring_lock);
377 return ERR_PTR(-EINVAL);
378 }
379
Chunming Zhoub43a9a72015-07-21 15:13:53 +0800380
Chunming Zhou37cd0ca2015-12-10 15:45:11 +0800381 if (seq + amdgpu_sched_jobs < cring->sequence) {
Christian König21c16bf2015-07-07 17:24:49 +0200382 spin_unlock(&ctx->ring_lock);
383 return NULL;
384 }
385
Chris Wilsonf54d1862016-10-25 13:00:45 +0100386 fence = dma_fence_get(cring->fences[seq & (amdgpu_sched_jobs - 1)]);
Christian König21c16bf2015-07-07 17:24:49 +0200387 spin_unlock(&ctx->ring_lock);
388
389 return fence;
390}
Christian Königefd4ccb2015-08-04 16:20:31 +0200391
Andres Rodriguezc23be4a2017-06-06 20:20:38 -0400392void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100393 enum drm_sched_priority priority)
Andres Rodriguezc23be4a2017-06-06 20:20:38 -0400394{
395 int i;
396 struct amdgpu_device *adev = ctx->adev;
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100397 struct drm_sched_entity *entity;
Andres Rodriguezc23be4a2017-06-06 20:20:38 -0400398 struct amdgpu_ring *ring;
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100399 enum drm_sched_priority ctx_prio;
Andres Rodriguezc23be4a2017-06-06 20:20:38 -0400400
401 ctx->override_priority = priority;
402
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100403 ctx_prio = (ctx->override_priority == DRM_SCHED_PRIORITY_UNSET) ?
Andres Rodriguezc23be4a2017-06-06 20:20:38 -0400404 ctx->init_priority : ctx->override_priority;
405
406 for (i = 0; i < adev->num_rings; i++) {
407 ring = adev->rings[i];
408 entity = &ctx->rings[i].entity;
Andres Rodriguezc23be4a2017-06-06 20:20:38 -0400409
410 if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
411 continue;
412
Christian König7febe4b2018-08-01 16:22:39 +0200413 drm_sched_entity_set_priority(entity, ctx_prio);
Andres Rodriguezc23be4a2017-06-06 20:20:38 -0400414 }
415}
416
Andrey Grodzovsky0ae94442017-10-10 16:50:17 -0400417int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx, unsigned ring_id)
418{
419 struct amdgpu_ctx_ring *cring = &ctx->rings[ring_id];
420 unsigned idx = cring->sequence & (amdgpu_sched_jobs - 1);
421 struct dma_fence *other = cring->fences[idx];
422
423 if (other) {
424 signed long r;
Andrey Grodzovsky719a39a2018-04-30 10:04:42 -0400425 r = dma_fence_wait(other, true);
Andrey Grodzovsky0ae94442017-10-10 16:50:17 -0400426 if (r < 0) {
Andrey Grodzovsky719a39a2018-04-30 10:04:42 -0400427 if (r != -ERESTARTSYS)
428 DRM_ERROR("Error (%ld) waiting for fence!\n", r);
429
Andrey Grodzovsky0ae94442017-10-10 16:50:17 -0400430 return r;
431 }
432 }
433
434 return 0;
435}
436
Christian Königefd4ccb2015-08-04 16:20:31 +0200437void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr)
438{
439 mutex_init(&mgr->lock);
440 idr_init(&mgr->ctx_handles);
441}
442
Andrey Grodzovskyc49d8282018-06-05 12:56:26 -0400443void amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr)
Emily Deng8ee3a522018-04-16 10:07:02 +0800444{
445 struct amdgpu_ctx *ctx;
446 struct idr *idp;
447 uint32_t id, i;
Andrey Grodzovsky48ad3682018-05-30 15:28:52 -0400448 long max_wait = MAX_WAIT_SCHED_ENTITY_Q_EMPTY;
Emily Deng8ee3a522018-04-16 10:07:02 +0800449
450 idp = &mgr->ctx_handles;
451
Andrey Grodzovsky48ad3682018-05-30 15:28:52 -0400452 mutex_lock(&mgr->lock);
Emily Deng8ee3a522018-04-16 10:07:02 +0800453 idr_for_each_entry(idp, ctx, id) {
454
Andrey Grodzovsky48ad3682018-05-30 15:28:52 -0400455 if (!ctx->adev) {
456 mutex_unlock(&mgr->lock);
Emily Deng8ee3a522018-04-16 10:07:02 +0800457 return;
Andrey Grodzovsky48ad3682018-05-30 15:28:52 -0400458 }
Emily Deng8ee3a522018-04-16 10:07:02 +0800459
Andrey Grodzovsky20b6b782018-05-15 14:12:21 -0400460 for (i = 0; i < ctx->adev->num_rings; i++) {
461
462 if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring)
463 continue;
464
Nayan Deshmukhcdc50172018-07-20 17:51:05 +0530465 max_wait = drm_sched_entity_flush(&ctx->rings[i].entity,
466 max_wait);
Andrey Grodzovsky20b6b782018-05-15 14:12:21 -0400467 }
Emily Deng8ee3a522018-04-16 10:07:02 +0800468 }
Andrey Grodzovsky48ad3682018-05-30 15:28:52 -0400469 mutex_unlock(&mgr->lock);
Emily Deng8ee3a522018-04-16 10:07:02 +0800470}
471
Andrey Grodzovskyc49d8282018-06-05 12:56:26 -0400472void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
Emily Deng8ee3a522018-04-16 10:07:02 +0800473{
474 struct amdgpu_ctx *ctx;
475 struct idr *idp;
476 uint32_t id, i;
477
478 idp = &mgr->ctx_handles;
479
480 idr_for_each_entry(idp, ctx, id) {
481
482 if (!ctx->adev)
483 return;
484
Andrey Grodzovsky20b6b782018-05-15 14:12:21 -0400485 for (i = 0; i < ctx->adev->num_rings; i++) {
486
487 if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring)
488 continue;
489
Emily Deng8ee3a522018-04-16 10:07:02 +0800490 if (kref_read(&ctx->refcount) == 1)
Nayan Deshmukhcdc50172018-07-20 17:51:05 +0530491 drm_sched_entity_fini(&ctx->rings[i].entity);
Emily Deng8ee3a522018-04-16 10:07:02 +0800492 else
493 DRM_ERROR("ctx %p is still alive\n", ctx);
Andrey Grodzovsky20b6b782018-05-15 14:12:21 -0400494 }
Emily Deng8ee3a522018-04-16 10:07:02 +0800495 }
496}
497
Christian Königefd4ccb2015-08-04 16:20:31 +0200498void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr)
499{
500 struct amdgpu_ctx *ctx;
501 struct idr *idp;
502 uint32_t id;
503
Andrey Grodzovskyc49d8282018-06-05 12:56:26 -0400504 amdgpu_ctx_mgr_entity_fini(mgr);
Emily Deng8ee3a522018-04-16 10:07:02 +0800505
Christian Königefd4ccb2015-08-04 16:20:31 +0200506 idp = &mgr->ctx_handles;
507
508 idr_for_each_entry(idp, ctx, id) {
Emily Deng8ee3a522018-04-16 10:07:02 +0800509 if (kref_put(&ctx->refcount, amdgpu_ctx_fini) != 1)
Christian Königefd4ccb2015-08-04 16:20:31 +0200510 DRM_ERROR("ctx %p is still alive\n", ctx);
511 }
512
513 idr_destroy(&mgr->ctx_handles);
514 mutex_destroy(&mgr->lock);
515}