blob: 6741a62a7d1577eeebda30af4cac669ca4db2471 [file] [log] [blame]
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: monk liu <monk.liu@amd.com>
23 */
24
25#include <drm/drmP.h>
Andres Rodriguezc2636dc2016-12-22 17:06:50 -050026#include <drm/drm_auth.h>
Alex Deucherd38ceaf2015-04-20 16:55:21 -040027#include "amdgpu.h"
Andres Rodriguez52c6a622017-06-26 16:17:13 -040028#include "amdgpu_sched.h"
Alex Deucherd38ceaf2015-04-20 16:55:21 -040029
Andres Rodriguezc2636dc2016-12-22 17:06:50 -050030static int amdgpu_ctx_priority_permit(struct drm_file *filp,
Lucas Stach1b1f42d2017-12-06 17:49:39 +010031 enum drm_sched_priority priority)
Andres Rodriguezc2636dc2016-12-22 17:06:50 -050032{
33 /* NORMAL and below are accessible by everyone */
Lucas Stach1b1f42d2017-12-06 17:49:39 +010034 if (priority <= DRM_SCHED_PRIORITY_NORMAL)
Andres Rodriguezc2636dc2016-12-22 17:06:50 -050035 return 0;
36
37 if (capable(CAP_SYS_NICE))
38 return 0;
39
40 if (drm_is_current_master(filp))
41 return 0;
42
43 return -EACCES;
44}
45
46static int amdgpu_ctx_init(struct amdgpu_device *adev,
Lucas Stach1b1f42d2017-12-06 17:49:39 +010047 enum drm_sched_priority priority,
Andres Rodriguezc2636dc2016-12-22 17:06:50 -050048 struct drm_file *filp,
49 struct amdgpu_ctx *ctx)
Alex Deucherd38ceaf2015-04-20 16:55:21 -040050{
Christian König21c16bf2015-07-07 17:24:49 +020051 unsigned i, j;
Christian König47f38502015-08-04 17:51:05 +020052 int r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -040053
Lucas Stach1b1f42d2017-12-06 17:49:39 +010054 if (priority < 0 || priority >= DRM_SCHED_PRIORITY_MAX)
Andres Rodriguezc2636dc2016-12-22 17:06:50 -050055 return -EINVAL;
56
57 r = amdgpu_ctx_priority_permit(filp, priority);
58 if (r)
59 return r;
60
Alex Deucherd38ceaf2015-04-20 16:55:21 -040061 memset(ctx, 0, sizeof(*ctx));
Chunming Zhou9cb7e5a2015-07-21 13:17:19 +080062 ctx->adev = adev;
Alex Deucherd38ceaf2015-04-20 16:55:21 -040063 kref_init(&ctx->refcount);
Christian König21c16bf2015-07-07 17:24:49 +020064 spin_lock_init(&ctx->ring_lock);
Christian Königa750b472016-02-11 10:20:53 +010065 ctx->fences = kcalloc(amdgpu_sched_jobs * AMDGPU_MAX_RINGS,
Chris Wilsonf54d1862016-10-25 13:00:45 +010066 sizeof(struct dma_fence*), GFP_KERNEL);
Chunming Zhou37cd0ca2015-12-10 15:45:11 +080067 if (!ctx->fences)
68 return -ENOMEM;
Chunming Zhou23ca0e42015-07-06 13:42:58 +080069
Andrey Grodzovsky0ae94442017-10-10 16:50:17 -040070 mutex_init(&ctx->lock);
71
Chunming Zhou37cd0ca2015-12-10 15:45:11 +080072 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
73 ctx->rings[i].sequence = 1;
Christian Königa750b472016-02-11 10:20:53 +010074 ctx->rings[i].fences = &ctx->fences[amdgpu_sched_jobs * i];
Chunming Zhou37cd0ca2015-12-10 15:45:11 +080075 }
Nicolai Hähnlece199ad2016-10-04 09:43:30 +020076
77 ctx->reset_counter = atomic_read(&adev->gpu_reset_counter);
Monk Liu668ca1b2017-10-17 14:39:23 +080078 ctx->reset_counter_query = ctx->reset_counter;
Christian Könige55f2b62017-10-09 15:18:43 +020079 ctx->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
Andres Rodriguezc23be4a2017-06-06 20:20:38 -040080 ctx->init_priority = priority;
Lucas Stach1b1f42d2017-12-06 17:49:39 +010081 ctx->override_priority = DRM_SCHED_PRIORITY_UNSET;
Nicolai Hähnlece199ad2016-10-04 09:43:30 +020082
Chunming Zhoucadf97b2016-01-15 11:25:00 +080083 /* create context entity for each ring */
84 for (i = 0; i < adev->num_rings; i++) {
Christian König20874172016-02-11 09:56:44 +010085 struct amdgpu_ring *ring = adev->rings[i];
Lucas Stach1b1f42d2017-12-06 17:49:39 +010086 struct drm_sched_rq *rq;
Christian König20874172016-02-11 09:56:44 +010087
Andres Rodriguezc2636dc2016-12-22 17:06:50 -050088 rq = &ring->sched.sched_rq[priority];
Monk Liu75fbed22017-05-11 13:36:33 +080089
90 if (ring == &adev->gfx.kiq.ring)
91 continue;
92
Lucas Stach1b1f42d2017-12-06 17:49:39 +010093 r = drm_sched_entity_init(&ring->sched, &ctx->rings[i].entity,
Monk Liu11029002017-10-23 12:25:24 +080094 rq, amdgpu_sched_jobs, &ctx->guilty);
Chunming Zhoucadf97b2016-01-15 11:25:00 +080095 if (r)
Huang Rui8ed81472016-10-26 17:07:03 +080096 goto failed;
Chunming Zhoucadf97b2016-01-15 11:25:00 +080097 }
98
Andres Rodriguezeffd9242017-02-16 00:47:32 -050099 r = amdgpu_queue_mgr_init(adev, &ctx->queue_mgr);
100 if (r)
101 goto failed;
102
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400103 return 0;
Huang Rui8ed81472016-10-26 17:07:03 +0800104
105failed:
106 for (j = 0; j < i; j++)
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100107 drm_sched_entity_fini(&adev->rings[j]->sched,
Huang Rui8ed81472016-10-26 17:07:03 +0800108 &ctx->rings[j].entity);
109 kfree(ctx->fences);
110 ctx->fences = NULL;
111 return r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400112}
113
Emily Deng8ee3a522018-04-16 10:07:02 +0800114static void amdgpu_ctx_fini(struct kref *ref)
Christian König47f38502015-08-04 17:51:05 +0200115{
Emily Deng8ee3a522018-04-16 10:07:02 +0800116 struct amdgpu_ctx *ctx = container_of(ref, struct amdgpu_ctx, refcount);
Christian König47f38502015-08-04 17:51:05 +0200117 struct amdgpu_device *adev = ctx->adev;
118 unsigned i, j;
119
Dave Airliefe295b22015-11-03 11:07:11 -0500120 if (!adev)
121 return;
122
Christian König47f38502015-08-04 17:51:05 +0200123 for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
Chunming Zhou37cd0ca2015-12-10 15:45:11 +0800124 for (j = 0; j < amdgpu_sched_jobs; ++j)
Chris Wilsonf54d1862016-10-25 13:00:45 +0100125 dma_fence_put(ctx->rings[i].fences[j]);
Chunming Zhou37cd0ca2015-12-10 15:45:11 +0800126 kfree(ctx->fences);
Grazvydas Ignotas54ddf3a2016-09-25 23:34:46 +0300127 ctx->fences = NULL;
Christian König47f38502015-08-04 17:51:05 +0200128
Andres Rodriguezeffd9242017-02-16 00:47:32 -0500129 amdgpu_queue_mgr_fini(adev, &ctx->queue_mgr);
Andrey Grodzovsky0ae94442017-10-10 16:50:17 -0400130
131 mutex_destroy(&ctx->lock);
Emily Deng8ee3a522018-04-16 10:07:02 +0800132
133 kfree(ctx);
Christian König47f38502015-08-04 17:51:05 +0200134}
135
136static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
137 struct amdgpu_fpriv *fpriv,
Andres Rodriguezc2636dc2016-12-22 17:06:50 -0500138 struct drm_file *filp,
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100139 enum drm_sched_priority priority,
Christian König47f38502015-08-04 17:51:05 +0200140 uint32_t *id)
141{
142 struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
143 struct amdgpu_ctx *ctx;
144 int r;
145
146 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
147 if (!ctx)
148 return -ENOMEM;
149
150 mutex_lock(&mgr->lock);
151 r = idr_alloc(&mgr->ctx_handles, ctx, 1, 0, GFP_KERNEL);
152 if (r < 0) {
153 mutex_unlock(&mgr->lock);
154 kfree(ctx);
155 return r;
156 }
Andres Rodriguezc2636dc2016-12-22 17:06:50 -0500157
Christian König47f38502015-08-04 17:51:05 +0200158 *id = (uint32_t)r;
Andres Rodriguezc2636dc2016-12-22 17:06:50 -0500159 r = amdgpu_ctx_init(adev, priority, filp, ctx);
Chunming Zhouc648ed72015-12-10 15:50:02 +0800160 if (r) {
161 idr_remove(&mgr->ctx_handles, *id);
162 *id = 0;
163 kfree(ctx);
164 }
Christian König47f38502015-08-04 17:51:05 +0200165 mutex_unlock(&mgr->lock);
Christian König47f38502015-08-04 17:51:05 +0200166 return r;
167}
168
169static void amdgpu_ctx_do_release(struct kref *ref)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400170{
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400171 struct amdgpu_ctx *ctx;
Emily Deng8ee3a522018-04-16 10:07:02 +0800172 u32 i;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400173
Christian König47f38502015-08-04 17:51:05 +0200174 ctx = container_of(ref, struct amdgpu_ctx, refcount);
175
Emily Deng8ee3a522018-04-16 10:07:02 +0800176 for (i = 0; i < ctx->adev->num_rings; i++)
177 drm_sched_entity_fini(&ctx->adev->rings[i]->sched,
178 &ctx->rings[i].entity);
Christian König47f38502015-08-04 17:51:05 +0200179
Emily Deng8ee3a522018-04-16 10:07:02 +0800180 amdgpu_ctx_fini(ref);
Christian König47f38502015-08-04 17:51:05 +0200181}
182
183static int amdgpu_ctx_free(struct amdgpu_fpriv *fpriv, uint32_t id)
184{
185 struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
186 struct amdgpu_ctx *ctx;
187
188 mutex_lock(&mgr->lock);
Matthew Wilcoxd3e709e2016-12-22 13:30:22 -0500189 ctx = idr_remove(&mgr->ctx_handles, id);
190 if (ctx)
Chunming Zhou23ca0e42015-07-06 13:42:58 +0800191 kref_put(&ctx->refcount, amdgpu_ctx_do_release);
Christian König47f38502015-08-04 17:51:05 +0200192 mutex_unlock(&mgr->lock);
Matthew Wilcoxd3e709e2016-12-22 13:30:22 -0500193 return ctx ? 0 : -EINVAL;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400194}
195
Marek Olšákd94aed52015-05-05 21:13:49 +0200196static int amdgpu_ctx_query(struct amdgpu_device *adev,
197 struct amdgpu_fpriv *fpriv, uint32_t id,
198 union drm_amdgpu_ctx_out *out)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400199{
200 struct amdgpu_ctx *ctx;
Chunming Zhou23ca0e42015-07-06 13:42:58 +0800201 struct amdgpu_ctx_mgr *mgr;
Marek Olšákd94aed52015-05-05 21:13:49 +0200202 unsigned reset_counter;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400203
Chunming Zhou23ca0e42015-07-06 13:42:58 +0800204 if (!fpriv)
205 return -EINVAL;
206
207 mgr = &fpriv->ctx_mgr;
Marek Olšák0147ee02015-05-05 20:52:00 +0200208 mutex_lock(&mgr->lock);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400209 ctx = idr_find(&mgr->ctx_handles, id);
Marek Olšákd94aed52015-05-05 21:13:49 +0200210 if (!ctx) {
Marek Olšák0147ee02015-05-05 20:52:00 +0200211 mutex_unlock(&mgr->lock);
Marek Olšákd94aed52015-05-05 21:13:49 +0200212 return -EINVAL;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400213 }
Marek Olšákd94aed52015-05-05 21:13:49 +0200214
215 /* TODO: these two are always zero */
Alex Deucher0b492a42015-08-16 22:48:26 -0400216 out->state.flags = 0x0;
217 out->state.hangs = 0x0;
Marek Olšákd94aed52015-05-05 21:13:49 +0200218
219 /* determine if a GPU reset has occured since the last call */
220 reset_counter = atomic_read(&adev->gpu_reset_counter);
221 /* TODO: this should ideally return NO, GUILTY, or INNOCENT. */
Monk Liu668ca1b2017-10-17 14:39:23 +0800222 if (ctx->reset_counter_query == reset_counter)
Marek Olšákd94aed52015-05-05 21:13:49 +0200223 out->state.reset_status = AMDGPU_CTX_NO_RESET;
224 else
225 out->state.reset_status = AMDGPU_CTX_UNKNOWN_RESET;
Monk Liu668ca1b2017-10-17 14:39:23 +0800226 ctx->reset_counter_query = reset_counter;
Marek Olšákd94aed52015-05-05 21:13:49 +0200227
Marek Olšák0147ee02015-05-05 20:52:00 +0200228 mutex_unlock(&mgr->lock);
Marek Olšákd94aed52015-05-05 21:13:49 +0200229 return 0;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400230}
231
Monk Liubc1b1bf2017-10-17 14:58:01 +0800232static int amdgpu_ctx_query2(struct amdgpu_device *adev,
233 struct amdgpu_fpriv *fpriv, uint32_t id,
234 union drm_amdgpu_ctx_out *out)
235{
236 struct amdgpu_ctx *ctx;
237 struct amdgpu_ctx_mgr *mgr;
238
239 if (!fpriv)
240 return -EINVAL;
241
242 mgr = &fpriv->ctx_mgr;
243 mutex_lock(&mgr->lock);
244 ctx = idr_find(&mgr->ctx_handles, id);
245 if (!ctx) {
246 mutex_unlock(&mgr->lock);
247 return -EINVAL;
248 }
249
250 out->state.flags = 0x0;
251 out->state.hangs = 0x0;
252
253 if (ctx->reset_counter != atomic_read(&adev->gpu_reset_counter))
254 out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RESET;
255
256 if (ctx->vram_lost_counter != atomic_read(&adev->vram_lost_counter))
257 out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_VRAMLOST;
258
259 if (atomic_read(&ctx->guilty))
260 out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_GUILTY;
261
262 mutex_unlock(&mgr->lock);
263 return 0;
264}
265
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400266int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
Marek Olšákd94aed52015-05-05 21:13:49 +0200267 struct drm_file *filp)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400268{
269 int r;
270 uint32_t id;
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100271 enum drm_sched_priority priority;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400272
273 union drm_amdgpu_ctx *args = data;
274 struct amdgpu_device *adev = dev->dev_private;
275 struct amdgpu_fpriv *fpriv = filp->driver_priv;
276
277 r = 0;
278 id = args->in.ctx_id;
Andres Rodriguezc2636dc2016-12-22 17:06:50 -0500279 priority = amdgpu_to_sched_priority(args->in.priority);
280
Andres Rodriguezb6d8a432017-05-24 17:00:10 -0400281 /* For backwards compatibility reasons, we need to accept
282 * ioctls with garbage in the priority field */
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100283 if (priority == DRM_SCHED_PRIORITY_INVALID)
284 priority = DRM_SCHED_PRIORITY_NORMAL;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400285
286 switch (args->in.op) {
Christian Königa750b472016-02-11 10:20:53 +0100287 case AMDGPU_CTX_OP_ALLOC_CTX:
Andres Rodriguezc2636dc2016-12-22 17:06:50 -0500288 r = amdgpu_ctx_alloc(adev, fpriv, filp, priority, &id);
Christian Königa750b472016-02-11 10:20:53 +0100289 args->out.alloc.ctx_id = id;
290 break;
291 case AMDGPU_CTX_OP_FREE_CTX:
292 r = amdgpu_ctx_free(fpriv, id);
293 break;
294 case AMDGPU_CTX_OP_QUERY_STATE:
295 r = amdgpu_ctx_query(adev, fpriv, id, &args->out);
296 break;
Monk Liubc1b1bf2017-10-17 14:58:01 +0800297 case AMDGPU_CTX_OP_QUERY_STATE2:
298 r = amdgpu_ctx_query2(adev, fpriv, id, &args->out);
299 break;
Christian Königa750b472016-02-11 10:20:53 +0100300 default:
301 return -EINVAL;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400302 }
303
304 return r;
305}
Jammy Zhou66b3cf22015-05-08 17:29:40 +0800306
307struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id)
308{
309 struct amdgpu_ctx *ctx;
Chunming Zhou23ca0e42015-07-06 13:42:58 +0800310 struct amdgpu_ctx_mgr *mgr;
311
312 if (!fpriv)
313 return NULL;
314
315 mgr = &fpriv->ctx_mgr;
Jammy Zhou66b3cf22015-05-08 17:29:40 +0800316
317 mutex_lock(&mgr->lock);
318 ctx = idr_find(&mgr->ctx_handles, id);
319 if (ctx)
320 kref_get(&ctx->refcount);
321 mutex_unlock(&mgr->lock);
322 return ctx;
323}
324
325int amdgpu_ctx_put(struct amdgpu_ctx *ctx)
326{
Jammy Zhou66b3cf22015-05-08 17:29:40 +0800327 if (ctx == NULL)
328 return -EINVAL;
329
Jammy Zhou66b3cf22015-05-08 17:29:40 +0800330 kref_put(&ctx->refcount, amdgpu_ctx_do_release);
Jammy Zhou66b3cf22015-05-08 17:29:40 +0800331 return 0;
332}
Christian König21c16bf2015-07-07 17:24:49 +0200333
Monk Liueb01abc2017-09-15 13:40:31 +0800334int amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
335 struct dma_fence *fence, uint64_t* handler)
Christian König21c16bf2015-07-07 17:24:49 +0200336{
337 struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];
Christian Königce882e62015-08-19 15:00:55 +0200338 uint64_t seq = cring->sequence;
Chunming Zhoub43a9a72015-07-21 15:13:53 +0800339 unsigned idx = 0;
Chris Wilsonf54d1862016-10-25 13:00:45 +0100340 struct dma_fence *other = NULL;
Christian König21c16bf2015-07-07 17:24:49 +0200341
Chunming Zhou5b011232015-12-10 17:34:33 +0800342 idx = seq & (amdgpu_sched_jobs - 1);
Chunming Zhoub43a9a72015-07-21 15:13:53 +0800343 other = cring->fences[idx];
Andrey Grodzovsky0ae94442017-10-10 16:50:17 -0400344 if (other)
345 BUG_ON(!dma_fence_is_signaled(other));
Christian König21c16bf2015-07-07 17:24:49 +0200346
Chris Wilsonf54d1862016-10-25 13:00:45 +0100347 dma_fence_get(fence);
Christian König21c16bf2015-07-07 17:24:49 +0200348
349 spin_lock(&ctx->ring_lock);
350 cring->fences[idx] = fence;
Christian Königce882e62015-08-19 15:00:55 +0200351 cring->sequence++;
Christian König21c16bf2015-07-07 17:24:49 +0200352 spin_unlock(&ctx->ring_lock);
353
Chris Wilsonf54d1862016-10-25 13:00:45 +0100354 dma_fence_put(other);
Monk Liueb01abc2017-09-15 13:40:31 +0800355 if (handler)
356 *handler = seq;
Christian König21c16bf2015-07-07 17:24:49 +0200357
Monk Liueb01abc2017-09-15 13:40:31 +0800358 return 0;
Christian König21c16bf2015-07-07 17:24:49 +0200359}
360
Chris Wilsonf54d1862016-10-25 13:00:45 +0100361struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
362 struct amdgpu_ring *ring, uint64_t seq)
Christian König21c16bf2015-07-07 17:24:49 +0200363{
364 struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];
Chris Wilsonf54d1862016-10-25 13:00:45 +0100365 struct dma_fence *fence;
Christian König21c16bf2015-07-07 17:24:49 +0200366
367 spin_lock(&ctx->ring_lock);
Chunming Zhoub43a9a72015-07-21 15:13:53 +0800368
Monk Liud7b1eeb2017-04-07 18:39:07 +0800369 if (seq == ~0ull)
370 seq = ctx->rings[ring->idx].sequence - 1;
371
Christian Königce882e62015-08-19 15:00:55 +0200372 if (seq >= cring->sequence) {
Christian König21c16bf2015-07-07 17:24:49 +0200373 spin_unlock(&ctx->ring_lock);
374 return ERR_PTR(-EINVAL);
375 }
376
Chunming Zhoub43a9a72015-07-21 15:13:53 +0800377
Chunming Zhou37cd0ca2015-12-10 15:45:11 +0800378 if (seq + amdgpu_sched_jobs < cring->sequence) {
Christian König21c16bf2015-07-07 17:24:49 +0200379 spin_unlock(&ctx->ring_lock);
380 return NULL;
381 }
382
Chris Wilsonf54d1862016-10-25 13:00:45 +0100383 fence = dma_fence_get(cring->fences[seq & (amdgpu_sched_jobs - 1)]);
Christian König21c16bf2015-07-07 17:24:49 +0200384 spin_unlock(&ctx->ring_lock);
385
386 return fence;
387}
Christian Königefd4ccb2015-08-04 16:20:31 +0200388
Andres Rodriguezc23be4a2017-06-06 20:20:38 -0400389void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100390 enum drm_sched_priority priority)
Andres Rodriguezc23be4a2017-06-06 20:20:38 -0400391{
392 int i;
393 struct amdgpu_device *adev = ctx->adev;
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100394 struct drm_sched_rq *rq;
395 struct drm_sched_entity *entity;
Andres Rodriguezc23be4a2017-06-06 20:20:38 -0400396 struct amdgpu_ring *ring;
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100397 enum drm_sched_priority ctx_prio;
Andres Rodriguezc23be4a2017-06-06 20:20:38 -0400398
399 ctx->override_priority = priority;
400
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100401 ctx_prio = (ctx->override_priority == DRM_SCHED_PRIORITY_UNSET) ?
Andres Rodriguezc23be4a2017-06-06 20:20:38 -0400402 ctx->init_priority : ctx->override_priority;
403
404 for (i = 0; i < adev->num_rings; i++) {
405 ring = adev->rings[i];
406 entity = &ctx->rings[i].entity;
407 rq = &ring->sched.sched_rq[ctx_prio];
408
409 if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
410 continue;
411
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100412 drm_sched_entity_set_rq(entity, rq);
Andres Rodriguezc23be4a2017-06-06 20:20:38 -0400413 }
414}
415
Andrey Grodzovsky0ae94442017-10-10 16:50:17 -0400416int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx, unsigned ring_id)
417{
418 struct amdgpu_ctx_ring *cring = &ctx->rings[ring_id];
419 unsigned idx = cring->sequence & (amdgpu_sched_jobs - 1);
420 struct dma_fence *other = cring->fences[idx];
421
422 if (other) {
423 signed long r;
Andrey Grodzovsky719a39a2018-04-30 10:04:42 -0400424 r = dma_fence_wait(other, true);
Andrey Grodzovsky0ae94442017-10-10 16:50:17 -0400425 if (r < 0) {
Andrey Grodzovsky719a39a2018-04-30 10:04:42 -0400426 if (r != -ERESTARTSYS)
427 DRM_ERROR("Error (%ld) waiting for fence!\n", r);
428
Andrey Grodzovsky0ae94442017-10-10 16:50:17 -0400429 return r;
430 }
431 }
432
433 return 0;
434}
435
Christian Königefd4ccb2015-08-04 16:20:31 +0200436void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr)
437{
438 mutex_init(&mgr->lock);
439 idr_init(&mgr->ctx_handles);
440}
441
Emily Deng8ee3a522018-04-16 10:07:02 +0800442void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
443{
444 struct amdgpu_ctx *ctx;
445 struct idr *idp;
446 uint32_t id, i;
447
448 idp = &mgr->ctx_handles;
449
450 idr_for_each_entry(idp, ctx, id) {
451
452 if (!ctx->adev)
453 return;
454
455 for (i = 0; i < ctx->adev->num_rings; i++)
456 if (kref_read(&ctx->refcount) == 1)
457 drm_sched_entity_do_release(&ctx->adev->rings[i]->sched,
458 &ctx->rings[i].entity);
459 else
460 DRM_ERROR("ctx %p is still alive\n", ctx);
461 }
462}
463
464void amdgpu_ctx_mgr_entity_cleanup(struct amdgpu_ctx_mgr *mgr)
465{
466 struct amdgpu_ctx *ctx;
467 struct idr *idp;
468 uint32_t id, i;
469
470 idp = &mgr->ctx_handles;
471
472 idr_for_each_entry(idp, ctx, id) {
473
474 if (!ctx->adev)
475 return;
476
477 for (i = 0; i < ctx->adev->num_rings; i++)
478 if (kref_read(&ctx->refcount) == 1)
479 drm_sched_entity_cleanup(&ctx->adev->rings[i]->sched,
480 &ctx->rings[i].entity);
481 else
482 DRM_ERROR("ctx %p is still alive\n", ctx);
483 }
484}
485
Christian Königefd4ccb2015-08-04 16:20:31 +0200486void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr)
487{
488 struct amdgpu_ctx *ctx;
489 struct idr *idp;
490 uint32_t id;
491
Emily Deng8ee3a522018-04-16 10:07:02 +0800492 amdgpu_ctx_mgr_entity_cleanup(mgr);
493
Christian Königefd4ccb2015-08-04 16:20:31 +0200494 idp = &mgr->ctx_handles;
495
496 idr_for_each_entry(idp, ctx, id) {
Emily Deng8ee3a522018-04-16 10:07:02 +0800497 if (kref_put(&ctx->refcount, amdgpu_ctx_fini) != 1)
Christian Königefd4ccb2015-08-04 16:20:31 +0200498 DRM_ERROR("ctx %p is still alive\n", ctx);
499 }
500
501 idr_destroy(&mgr->ctx_handles);
502 mutex_destroy(&mgr->lock);
503}