blob: 52388b1b52c21c00a0521781b34157fc091e1e59 [file] [log] [blame]
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: monk liu <monk.liu@amd.com>
23 */
24
25#include <drm/drmP.h>
Andres Rodriguezc2636dc2016-12-22 17:06:50 -050026#include <drm/drm_auth.h>
Alex Deucherd38ceaf2015-04-20 16:55:21 -040027#include "amdgpu.h"
28
Andres Rodriguezc2636dc2016-12-22 17:06:50 -050029static int amdgpu_ctx_priority_permit(struct drm_file *filp,
30 enum amd_sched_priority priority)
31{
32 /* NORMAL and below are accessible by everyone */
33 if (priority <= AMD_SCHED_PRIORITY_NORMAL)
34 return 0;
35
36 if (capable(CAP_SYS_NICE))
37 return 0;
38
39 if (drm_is_current_master(filp))
40 return 0;
41
42 return -EACCES;
43}
44
45static int amdgpu_ctx_init(struct amdgpu_device *adev,
46 enum amd_sched_priority priority,
47 struct drm_file *filp,
48 struct amdgpu_ctx *ctx)
Alex Deucherd38ceaf2015-04-20 16:55:21 -040049{
Christian König21c16bf2015-07-07 17:24:49 +020050 unsigned i, j;
Christian König47f38502015-08-04 17:51:05 +020051 int r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -040052
Andres Rodriguezc2636dc2016-12-22 17:06:50 -050053 if (priority < 0 || priority >= AMD_SCHED_PRIORITY_MAX)
54 return -EINVAL;
55
56 r = amdgpu_ctx_priority_permit(filp, priority);
57 if (r)
58 return r;
59
Alex Deucherd38ceaf2015-04-20 16:55:21 -040060 memset(ctx, 0, sizeof(*ctx));
Chunming Zhou9cb7e5a2015-07-21 13:17:19 +080061 ctx->adev = adev;
Alex Deucherd38ceaf2015-04-20 16:55:21 -040062 kref_init(&ctx->refcount);
Christian König21c16bf2015-07-07 17:24:49 +020063 spin_lock_init(&ctx->ring_lock);
Christian Königa750b472016-02-11 10:20:53 +010064 ctx->fences = kcalloc(amdgpu_sched_jobs * AMDGPU_MAX_RINGS,
Chris Wilsonf54d1862016-10-25 13:00:45 +010065 sizeof(struct dma_fence*), GFP_KERNEL);
Chunming Zhou37cd0ca2015-12-10 15:45:11 +080066 if (!ctx->fences)
67 return -ENOMEM;
Chunming Zhou23ca0e42015-07-06 13:42:58 +080068
Chunming Zhou37cd0ca2015-12-10 15:45:11 +080069 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
70 ctx->rings[i].sequence = 1;
Christian Königa750b472016-02-11 10:20:53 +010071 ctx->rings[i].fences = &ctx->fences[amdgpu_sched_jobs * i];
Chunming Zhou37cd0ca2015-12-10 15:45:11 +080072 }
Nicolai Hähnlece199ad2016-10-04 09:43:30 +020073
74 ctx->reset_counter = atomic_read(&adev->gpu_reset_counter);
75
Chunming Zhoucadf97b2016-01-15 11:25:00 +080076 /* create context entity for each ring */
77 for (i = 0; i < adev->num_rings; i++) {
Christian König20874172016-02-11 09:56:44 +010078 struct amdgpu_ring *ring = adev->rings[i];
Chunming Zhoucadf97b2016-01-15 11:25:00 +080079 struct amd_sched_rq *rq;
Christian König20874172016-02-11 09:56:44 +010080
Andres Rodriguezc2636dc2016-12-22 17:06:50 -050081 rq = &ring->sched.sched_rq[priority];
Monk Liu75fbed22017-05-11 13:36:33 +080082
83 if (ring == &adev->gfx.kiq.ring)
84 continue;
85
Christian König20874172016-02-11 09:56:44 +010086 r = amd_sched_entity_init(&ring->sched, &ctx->rings[i].entity,
Chunming Zhoucadf97b2016-01-15 11:25:00 +080087 rq, amdgpu_sched_jobs);
88 if (r)
Huang Rui8ed81472016-10-26 17:07:03 +080089 goto failed;
Chunming Zhoucadf97b2016-01-15 11:25:00 +080090 }
91
Andres Rodriguezeffd9242017-02-16 00:47:32 -050092 r = amdgpu_queue_mgr_init(adev, &ctx->queue_mgr);
93 if (r)
94 goto failed;
95
Alex Deucherd38ceaf2015-04-20 16:55:21 -040096 return 0;
Huang Rui8ed81472016-10-26 17:07:03 +080097
98failed:
99 for (j = 0; j < i; j++)
100 amd_sched_entity_fini(&adev->rings[j]->sched,
101 &ctx->rings[j].entity);
102 kfree(ctx->fences);
103 ctx->fences = NULL;
104 return r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400105}
106
Christian König20874172016-02-11 09:56:44 +0100107static void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
Christian König47f38502015-08-04 17:51:05 +0200108{
109 struct amdgpu_device *adev = ctx->adev;
110 unsigned i, j;
111
Dave Airliefe295b22015-11-03 11:07:11 -0500112 if (!adev)
113 return;
114
Christian König47f38502015-08-04 17:51:05 +0200115 for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
Chunming Zhou37cd0ca2015-12-10 15:45:11 +0800116 for (j = 0; j < amdgpu_sched_jobs; ++j)
Chris Wilsonf54d1862016-10-25 13:00:45 +0100117 dma_fence_put(ctx->rings[i].fences[j]);
Chunming Zhou37cd0ca2015-12-10 15:45:11 +0800118 kfree(ctx->fences);
Grazvydas Ignotas54ddf3a2016-09-25 23:34:46 +0300119 ctx->fences = NULL;
Christian König47f38502015-08-04 17:51:05 +0200120
Chunming Zhoucadf97b2016-01-15 11:25:00 +0800121 for (i = 0; i < adev->num_rings; i++)
122 amd_sched_entity_fini(&adev->rings[i]->sched,
123 &ctx->rings[i].entity);
Andres Rodriguezeffd9242017-02-16 00:47:32 -0500124
125 amdgpu_queue_mgr_fini(adev, &ctx->queue_mgr);
Christian König47f38502015-08-04 17:51:05 +0200126}
127
128static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
129 struct amdgpu_fpriv *fpriv,
Andres Rodriguezc2636dc2016-12-22 17:06:50 -0500130 struct drm_file *filp,
131 enum amd_sched_priority priority,
Christian König47f38502015-08-04 17:51:05 +0200132 uint32_t *id)
133{
134 struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
135 struct amdgpu_ctx *ctx;
136 int r;
137
138 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
139 if (!ctx)
140 return -ENOMEM;
141
142 mutex_lock(&mgr->lock);
143 r = idr_alloc(&mgr->ctx_handles, ctx, 1, 0, GFP_KERNEL);
144 if (r < 0) {
145 mutex_unlock(&mgr->lock);
146 kfree(ctx);
147 return r;
148 }
Andres Rodriguezc2636dc2016-12-22 17:06:50 -0500149
Christian König47f38502015-08-04 17:51:05 +0200150 *id = (uint32_t)r;
Andres Rodriguezc2636dc2016-12-22 17:06:50 -0500151 r = amdgpu_ctx_init(adev, priority, filp, ctx);
Chunming Zhouc648ed72015-12-10 15:50:02 +0800152 if (r) {
153 idr_remove(&mgr->ctx_handles, *id);
154 *id = 0;
155 kfree(ctx);
156 }
Christian König47f38502015-08-04 17:51:05 +0200157 mutex_unlock(&mgr->lock);
Christian König47f38502015-08-04 17:51:05 +0200158 return r;
159}
160
161static void amdgpu_ctx_do_release(struct kref *ref)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400162{
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400163 struct amdgpu_ctx *ctx;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400164
Christian König47f38502015-08-04 17:51:05 +0200165 ctx = container_of(ref, struct amdgpu_ctx, refcount);
166
167 amdgpu_ctx_fini(ctx);
168
169 kfree(ctx);
170}
171
172static int amdgpu_ctx_free(struct amdgpu_fpriv *fpriv, uint32_t id)
173{
174 struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
175 struct amdgpu_ctx *ctx;
176
177 mutex_lock(&mgr->lock);
Matthew Wilcoxd3e709e2016-12-22 13:30:22 -0500178 ctx = idr_remove(&mgr->ctx_handles, id);
179 if (ctx)
Chunming Zhou23ca0e42015-07-06 13:42:58 +0800180 kref_put(&ctx->refcount, amdgpu_ctx_do_release);
Christian König47f38502015-08-04 17:51:05 +0200181 mutex_unlock(&mgr->lock);
Matthew Wilcoxd3e709e2016-12-22 13:30:22 -0500182 return ctx ? 0 : -EINVAL;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400183}
184
Marek Olšákd94aed52015-05-05 21:13:49 +0200185static int amdgpu_ctx_query(struct amdgpu_device *adev,
186 struct amdgpu_fpriv *fpriv, uint32_t id,
187 union drm_amdgpu_ctx_out *out)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400188{
189 struct amdgpu_ctx *ctx;
Chunming Zhou23ca0e42015-07-06 13:42:58 +0800190 struct amdgpu_ctx_mgr *mgr;
Marek Olšákd94aed52015-05-05 21:13:49 +0200191 unsigned reset_counter;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400192
Chunming Zhou23ca0e42015-07-06 13:42:58 +0800193 if (!fpriv)
194 return -EINVAL;
195
196 mgr = &fpriv->ctx_mgr;
Marek Olšák0147ee02015-05-05 20:52:00 +0200197 mutex_lock(&mgr->lock);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400198 ctx = idr_find(&mgr->ctx_handles, id);
Marek Olšákd94aed52015-05-05 21:13:49 +0200199 if (!ctx) {
Marek Olšák0147ee02015-05-05 20:52:00 +0200200 mutex_unlock(&mgr->lock);
Marek Olšákd94aed52015-05-05 21:13:49 +0200201 return -EINVAL;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400202 }
Marek Olšákd94aed52015-05-05 21:13:49 +0200203
204 /* TODO: these two are always zero */
Alex Deucher0b492a42015-08-16 22:48:26 -0400205 out->state.flags = 0x0;
206 out->state.hangs = 0x0;
Marek Olšákd94aed52015-05-05 21:13:49 +0200207
208 /* determine if a GPU reset has occured since the last call */
209 reset_counter = atomic_read(&adev->gpu_reset_counter);
210 /* TODO: this should ideally return NO, GUILTY, or INNOCENT. */
211 if (ctx->reset_counter == reset_counter)
212 out->state.reset_status = AMDGPU_CTX_NO_RESET;
213 else
214 out->state.reset_status = AMDGPU_CTX_UNKNOWN_RESET;
215 ctx->reset_counter = reset_counter;
216
Marek Olšák0147ee02015-05-05 20:52:00 +0200217 mutex_unlock(&mgr->lock);
Marek Olšákd94aed52015-05-05 21:13:49 +0200218 return 0;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400219}
220
Andres Rodriguezc2636dc2016-12-22 17:06:50 -0500221static enum amd_sched_priority amdgpu_to_sched_priority(int amdgpu_priority)
222{
223 switch (amdgpu_priority) {
224 case AMDGPU_CTX_PRIORITY_HIGH_HW:
225 return AMD_SCHED_PRIORITY_HIGH_HW;
226 case AMDGPU_CTX_PRIORITY_HIGH_SW:
227 return AMD_SCHED_PRIORITY_HIGH_SW;
228 case AMDGPU_CTX_PRIORITY_NORMAL:
229 return AMD_SCHED_PRIORITY_NORMAL;
230 case AMDGPU_CTX_PRIORITY_LOW_SW:
231 case AMDGPU_CTX_PRIORITY_LOW_HW:
232 return AMD_SCHED_PRIORITY_LOW;
233 default:
234 WARN(1, "Invalid context priority %d\n", amdgpu_priority);
235 return AMD_SCHED_PRIORITY_NORMAL;
236 }
237}
238
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400239int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
Marek Olšákd94aed52015-05-05 21:13:49 +0200240 struct drm_file *filp)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400241{
242 int r;
243 uint32_t id;
Andres Rodriguezc2636dc2016-12-22 17:06:50 -0500244 enum amd_sched_priority priority;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400245
246 union drm_amdgpu_ctx *args = data;
247 struct amdgpu_device *adev = dev->dev_private;
248 struct amdgpu_fpriv *fpriv = filp->driver_priv;
249
250 r = 0;
251 id = args->in.ctx_id;
Andres Rodriguezc2636dc2016-12-22 17:06:50 -0500252 priority = amdgpu_to_sched_priority(args->in.priority);
253
254 if (priority >= AMD_SCHED_PRIORITY_MAX)
255 return -EINVAL;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400256
257 switch (args->in.op) {
Christian Königa750b472016-02-11 10:20:53 +0100258 case AMDGPU_CTX_OP_ALLOC_CTX:
Andres Rodriguezc2636dc2016-12-22 17:06:50 -0500259 r = amdgpu_ctx_alloc(adev, fpriv, filp, priority, &id);
Christian Königa750b472016-02-11 10:20:53 +0100260 args->out.alloc.ctx_id = id;
261 break;
262 case AMDGPU_CTX_OP_FREE_CTX:
263 r = amdgpu_ctx_free(fpriv, id);
264 break;
265 case AMDGPU_CTX_OP_QUERY_STATE:
266 r = amdgpu_ctx_query(adev, fpriv, id, &args->out);
267 break;
268 default:
269 return -EINVAL;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400270 }
271
272 return r;
273}
Jammy Zhou66b3cf22015-05-08 17:29:40 +0800274
275struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id)
276{
277 struct amdgpu_ctx *ctx;
Chunming Zhou23ca0e42015-07-06 13:42:58 +0800278 struct amdgpu_ctx_mgr *mgr;
279
280 if (!fpriv)
281 return NULL;
282
283 mgr = &fpriv->ctx_mgr;
Jammy Zhou66b3cf22015-05-08 17:29:40 +0800284
285 mutex_lock(&mgr->lock);
286 ctx = idr_find(&mgr->ctx_handles, id);
287 if (ctx)
288 kref_get(&ctx->refcount);
289 mutex_unlock(&mgr->lock);
290 return ctx;
291}
292
293int amdgpu_ctx_put(struct amdgpu_ctx *ctx)
294{
Jammy Zhou66b3cf22015-05-08 17:29:40 +0800295 if (ctx == NULL)
296 return -EINVAL;
297
Jammy Zhou66b3cf22015-05-08 17:29:40 +0800298 kref_put(&ctx->refcount, amdgpu_ctx_do_release);
Jammy Zhou66b3cf22015-05-08 17:29:40 +0800299 return 0;
300}
Christian König21c16bf2015-07-07 17:24:49 +0200301
Monk Liueb01abc2017-09-15 13:40:31 +0800302int amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
303 struct dma_fence *fence, uint64_t* handler)
Christian König21c16bf2015-07-07 17:24:49 +0200304{
305 struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];
Christian Königce882e62015-08-19 15:00:55 +0200306 uint64_t seq = cring->sequence;
Chunming Zhoub43a9a72015-07-21 15:13:53 +0800307 unsigned idx = 0;
Chris Wilsonf54d1862016-10-25 13:00:45 +0100308 struct dma_fence *other = NULL;
Christian König21c16bf2015-07-07 17:24:49 +0200309
Chunming Zhou5b011232015-12-10 17:34:33 +0800310 idx = seq & (amdgpu_sched_jobs - 1);
Chunming Zhoub43a9a72015-07-21 15:13:53 +0800311 other = cring->fences[idx];
Christian König21c16bf2015-07-07 17:24:49 +0200312 if (other) {
313 signed long r;
Monk Liueb01abc2017-09-15 13:40:31 +0800314 r = dma_fence_wait_timeout(other, true, MAX_SCHEDULE_TIMEOUT);
Christian König21c16bf2015-07-07 17:24:49 +0200315 if (r < 0)
Monk Liueb01abc2017-09-15 13:40:31 +0800316 return r;
Christian König21c16bf2015-07-07 17:24:49 +0200317 }
318
Chris Wilsonf54d1862016-10-25 13:00:45 +0100319 dma_fence_get(fence);
Christian König21c16bf2015-07-07 17:24:49 +0200320
321 spin_lock(&ctx->ring_lock);
322 cring->fences[idx] = fence;
Christian Königce882e62015-08-19 15:00:55 +0200323 cring->sequence++;
Christian König21c16bf2015-07-07 17:24:49 +0200324 spin_unlock(&ctx->ring_lock);
325
Chris Wilsonf54d1862016-10-25 13:00:45 +0100326 dma_fence_put(other);
Monk Liueb01abc2017-09-15 13:40:31 +0800327 if (handler)
328 *handler = seq;
Christian König21c16bf2015-07-07 17:24:49 +0200329
Monk Liueb01abc2017-09-15 13:40:31 +0800330 return 0;
Christian König21c16bf2015-07-07 17:24:49 +0200331}
332
Chris Wilsonf54d1862016-10-25 13:00:45 +0100333struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
334 struct amdgpu_ring *ring, uint64_t seq)
Christian König21c16bf2015-07-07 17:24:49 +0200335{
336 struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];
Chris Wilsonf54d1862016-10-25 13:00:45 +0100337 struct dma_fence *fence;
Christian König21c16bf2015-07-07 17:24:49 +0200338
339 spin_lock(&ctx->ring_lock);
Chunming Zhoub43a9a72015-07-21 15:13:53 +0800340
Monk Liud7b1eeb2017-04-07 18:39:07 +0800341 if (seq == ~0ull)
342 seq = ctx->rings[ring->idx].sequence - 1;
343
Christian Königce882e62015-08-19 15:00:55 +0200344 if (seq >= cring->sequence) {
Christian König21c16bf2015-07-07 17:24:49 +0200345 spin_unlock(&ctx->ring_lock);
346 return ERR_PTR(-EINVAL);
347 }
348
Chunming Zhoub43a9a72015-07-21 15:13:53 +0800349
Chunming Zhou37cd0ca2015-12-10 15:45:11 +0800350 if (seq + amdgpu_sched_jobs < cring->sequence) {
Christian König21c16bf2015-07-07 17:24:49 +0200351 spin_unlock(&ctx->ring_lock);
352 return NULL;
353 }
354
Chris Wilsonf54d1862016-10-25 13:00:45 +0100355 fence = dma_fence_get(cring->fences[seq & (amdgpu_sched_jobs - 1)]);
Christian König21c16bf2015-07-07 17:24:49 +0200356 spin_unlock(&ctx->ring_lock);
357
358 return fence;
359}
Christian Königefd4ccb2015-08-04 16:20:31 +0200360
361void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr)
362{
363 mutex_init(&mgr->lock);
364 idr_init(&mgr->ctx_handles);
365}
366
367void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr)
368{
369 struct amdgpu_ctx *ctx;
370 struct idr *idp;
371 uint32_t id;
372
373 idp = &mgr->ctx_handles;
374
375 idr_for_each_entry(idp, ctx, id) {
376 if (kref_put(&ctx->refcount, amdgpu_ctx_do_release) != 1)
377 DRM_ERROR("ctx %p is still alive\n", ctx);
378 }
379
380 idr_destroy(&mgr->ctx_handles);
381 mutex_destroy(&mgr->lock);
382}