blob: 557fb60f416b40bcd4d70735576f42bb6f7fac9f [file] [log] [blame]
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: monk liu <monk.liu@amd.com>
23 */
24
25#include <drm/drmP.h>
26#include "amdgpu.h"
27
28static void amdgpu_ctx_do_release(struct kref *ref)
29{
30 struct amdgpu_ctx *ctx;
Chunming Zhou9cb7e5a2015-07-21 13:17:19 +080031 struct amdgpu_device *adev;
Christian König21c16bf2015-07-07 17:24:49 +020032 unsigned i, j;
Alex Deucherd38ceaf2015-04-20 16:55:21 -040033
34 ctx = container_of(ref, struct amdgpu_ctx, refcount);
Chunming Zhou9cb7e5a2015-07-21 13:17:19 +080035 adev = ctx->adev;
36
Christian König21c16bf2015-07-07 17:24:49 +020037
38 for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
39 for (j = 0; j < AMDGPU_CTX_MAX_CS_PENDING; ++j)
40 fence_put(ctx->rings[i].fences[j]);
Chunming Zhou9cb7e5a2015-07-21 13:17:19 +080041
42 if (amdgpu_enable_scheduler) {
43 for (i = 0; i < adev->num_rings; i++)
44 amd_context_entity_fini(adev->rings[i]->scheduler,
45 &ctx->rings[i].c_entity);
46 }
47
Alex Deucherd38ceaf2015-04-20 16:55:21 -040048 kfree(ctx);
49}
50
Alex Deucher0b492a42015-08-16 22:48:26 -040051int amdgpu_ctx_alloc(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv,
52 uint32_t *id)
Alex Deucherd38ceaf2015-04-20 16:55:21 -040053{
Alex Deucherd38ceaf2015-04-20 16:55:21 -040054 struct amdgpu_ctx *ctx;
55 struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
Chunming Zhou9cb7e5a2015-07-21 13:17:19 +080056 int i, j, r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -040057
58 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
59 if (!ctx)
60 return -ENOMEM;
61
Marek Olšák0147ee02015-05-05 20:52:00 +020062 mutex_lock(&mgr->lock);
Alex Deucherd38ceaf2015-04-20 16:55:21 -040063 r = idr_alloc(&mgr->ctx_handles, ctx, 0, 0, GFP_KERNEL);
64 if (r < 0) {
Marek Olšák0147ee02015-05-05 20:52:00 +020065 mutex_unlock(&mgr->lock);
Alex Deucherd38ceaf2015-04-20 16:55:21 -040066 kfree(ctx);
67 return r;
68 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -040069 *id = (uint32_t)r;
70
71 memset(ctx, 0, sizeof(*ctx));
Chunming Zhou9cb7e5a2015-07-21 13:17:19 +080072 ctx->adev = adev;
Alex Deucherd38ceaf2015-04-20 16:55:21 -040073 kref_init(&ctx->refcount);
Christian König21c16bf2015-07-07 17:24:49 +020074 spin_lock_init(&ctx->ring_lock);
75 for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
76 ctx->rings[i].sequence = 1;
Marek Olšák0147ee02015-05-05 20:52:00 +020077 mutex_unlock(&mgr->lock);
Chunming Zhou9cb7e5a2015-07-21 13:17:19 +080078 if (amdgpu_enable_scheduler) {
79 /* create context entity for each ring */
80 for (i = 0; i < adev->num_rings; i++) {
81 struct amd_run_queue *rq;
82 if (fpriv)
83 rq = &adev->rings[i]->scheduler->sched_rq;
84 else
85 rq = &adev->rings[i]->scheduler->kernel_rq;
86 r = amd_context_entity_init(adev->rings[i]->scheduler,
87 &ctx->rings[i].c_entity,
88 NULL, rq, *id);
89 if (r)
90 break;
91 }
92
93 if (i < adev->num_rings) {
94 for (j = 0; j < i; j++)
95 amd_context_entity_fini(adev->rings[j]->scheduler,
96 &ctx->rings[j].c_entity);
97 kfree(ctx);
98 return -EINVAL;
99 }
100 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400101
102 return 0;
103}
104
105int amdgpu_ctx_free(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv, uint32_t id)
106{
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400107 struct amdgpu_ctx *ctx;
108 struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
109
Marek Olšák0147ee02015-05-05 20:52:00 +0200110 mutex_lock(&mgr->lock);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400111 ctx = idr_find(&mgr->ctx_handles, id);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400112 if (ctx) {
Alex Deucher0b492a42015-08-16 22:48:26 -0400113 idr_remove(&mgr->ctx_handles, id);
Marek Olšákf11358d2015-05-05 00:56:45 +0200114 kref_put(&ctx->refcount, amdgpu_ctx_do_release);
Marek Olšák0147ee02015-05-05 20:52:00 +0200115 mutex_unlock(&mgr->lock);
Marek Olšákf11358d2015-05-05 00:56:45 +0200116 return 0;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400117 }
Marek Olšák0147ee02015-05-05 20:52:00 +0200118 mutex_unlock(&mgr->lock);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400119 return -EINVAL;
120}
121
Marek Olšákd94aed52015-05-05 21:13:49 +0200122static int amdgpu_ctx_query(struct amdgpu_device *adev,
123 struct amdgpu_fpriv *fpriv, uint32_t id,
124 union drm_amdgpu_ctx_out *out)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400125{
126 struct amdgpu_ctx *ctx;
127 struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
Marek Olšákd94aed52015-05-05 21:13:49 +0200128 unsigned reset_counter;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400129
Marek Olšák0147ee02015-05-05 20:52:00 +0200130 mutex_lock(&mgr->lock);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400131 ctx = idr_find(&mgr->ctx_handles, id);
Marek Olšákd94aed52015-05-05 21:13:49 +0200132 if (!ctx) {
Marek Olšák0147ee02015-05-05 20:52:00 +0200133 mutex_unlock(&mgr->lock);
Marek Olšákd94aed52015-05-05 21:13:49 +0200134 return -EINVAL;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400135 }
Marek Olšákd94aed52015-05-05 21:13:49 +0200136
137 /* TODO: these two are always zero */
Alex Deucher0b492a42015-08-16 22:48:26 -0400138 out->state.flags = 0x0;
139 out->state.hangs = 0x0;
Marek Olšákd94aed52015-05-05 21:13:49 +0200140
141 /* determine if a GPU reset has occured since the last call */
142 reset_counter = atomic_read(&adev->gpu_reset_counter);
143 /* TODO: this should ideally return NO, GUILTY, or INNOCENT. */
144 if (ctx->reset_counter == reset_counter)
145 out->state.reset_status = AMDGPU_CTX_NO_RESET;
146 else
147 out->state.reset_status = AMDGPU_CTX_UNKNOWN_RESET;
148 ctx->reset_counter = reset_counter;
149
Marek Olšák0147ee02015-05-05 20:52:00 +0200150 mutex_unlock(&mgr->lock);
Marek Olšákd94aed52015-05-05 21:13:49 +0200151 return 0;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400152}
153
154void amdgpu_ctx_fini(struct amdgpu_fpriv *fpriv)
155{
156 struct idr *idp;
157 struct amdgpu_ctx *ctx;
158 uint32_t id;
159 struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
160 idp = &mgr->ctx_handles;
161
162 idr_for_each_entry(idp,ctx,id) {
163 if (kref_put(&ctx->refcount, amdgpu_ctx_do_release) != 1)
Alex Deucher0b492a42015-08-16 22:48:26 -0400164 DRM_ERROR("ctx %p is still alive\n", ctx);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400165 }
166
Christian Königcdecb652015-07-16 12:01:06 +0200167 idr_destroy(&mgr->ctx_handles);
Marek Olšák0147ee02015-05-05 20:52:00 +0200168 mutex_destroy(&mgr->lock);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400169}
170
171int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
Marek Olšákd94aed52015-05-05 21:13:49 +0200172 struct drm_file *filp)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400173{
174 int r;
175 uint32_t id;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400176
177 union drm_amdgpu_ctx *args = data;
178 struct amdgpu_device *adev = dev->dev_private;
179 struct amdgpu_fpriv *fpriv = filp->driver_priv;
180
181 r = 0;
182 id = args->in.ctx_id;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400183
184 switch (args->in.op) {
185 case AMDGPU_CTX_OP_ALLOC_CTX:
Alex Deucher0b492a42015-08-16 22:48:26 -0400186 r = amdgpu_ctx_alloc(adev, fpriv, &id);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400187 args->out.alloc.ctx_id = id;
188 break;
189 case AMDGPU_CTX_OP_FREE_CTX:
190 r = amdgpu_ctx_free(adev, fpriv, id);
191 break;
192 case AMDGPU_CTX_OP_QUERY_STATE:
Marek Olšákd94aed52015-05-05 21:13:49 +0200193 r = amdgpu_ctx_query(adev, fpriv, id, &args->out);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400194 break;
195 default:
196 return -EINVAL;
197 }
198
199 return r;
200}
Jammy Zhou66b3cf22015-05-08 17:29:40 +0800201
202struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id)
203{
204 struct amdgpu_ctx *ctx;
205 struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
206
207 mutex_lock(&mgr->lock);
208 ctx = idr_find(&mgr->ctx_handles, id);
209 if (ctx)
210 kref_get(&ctx->refcount);
211 mutex_unlock(&mgr->lock);
212 return ctx;
213}
214
215int amdgpu_ctx_put(struct amdgpu_ctx *ctx)
216{
Jammy Zhou66b3cf22015-05-08 17:29:40 +0800217 if (ctx == NULL)
218 return -EINVAL;
219
Jammy Zhou66b3cf22015-05-08 17:29:40 +0800220 kref_put(&ctx->refcount, amdgpu_ctx_do_release);
Jammy Zhou66b3cf22015-05-08 17:29:40 +0800221 return 0;
222}
Christian König21c16bf2015-07-07 17:24:49 +0200223
224uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
225 struct fence *fence)
226{
227 struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];
228 uint64_t seq = cring->sequence;
229 unsigned idx = seq % AMDGPU_CTX_MAX_CS_PENDING;
230 struct fence *other = cring->fences[idx];
231
232 if (other) {
233 signed long r;
234 r = fence_wait_timeout(other, false, MAX_SCHEDULE_TIMEOUT);
235 if (r < 0)
236 DRM_ERROR("Error (%ld) waiting for fence!\n", r);
237 }
238
239 fence_get(fence);
240
241 spin_lock(&ctx->ring_lock);
242 cring->fences[idx] = fence;
243 cring->sequence++;
244 spin_unlock(&ctx->ring_lock);
245
246 fence_put(other);
247
248 return seq;
249}
250
251struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
252 struct amdgpu_ring *ring, uint64_t seq)
253{
254 struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];
255 struct fence *fence;
256
257 spin_lock(&ctx->ring_lock);
258 if (seq >= cring->sequence) {
259 spin_unlock(&ctx->ring_lock);
260 return ERR_PTR(-EINVAL);
261 }
262
Christian Königcf6f1d32015-07-18 19:20:05 +0200263 if (seq + AMDGPU_CTX_MAX_CS_PENDING < cring->sequence) {
Christian König21c16bf2015-07-07 17:24:49 +0200264 spin_unlock(&ctx->ring_lock);
265 return NULL;
266 }
267
268 fence = fence_get(cring->fences[seq % AMDGPU_CTX_MAX_CS_PENDING]);
269 spin_unlock(&ctx->ring_lock);
270
271 return fence;
272}