blob: a5d8242ace95ca25d992499b6ed708b4d4f932a5 [file] [log] [blame]
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: monk liu <monk.liu@amd.com>
23 */
24
25#include <drm/drmP.h>
26#include "amdgpu.h"
27
28static void amdgpu_ctx_do_release(struct kref *ref)
29{
30 struct amdgpu_ctx *ctx;
Chunming Zhou9cb7e5a2015-07-21 13:17:19 +080031 struct amdgpu_device *adev;
Christian König21c16bf2015-07-07 17:24:49 +020032 unsigned i, j;
Alex Deucherd38ceaf2015-04-20 16:55:21 -040033
34 ctx = container_of(ref, struct amdgpu_ctx, refcount);
Chunming Zhou9cb7e5a2015-07-21 13:17:19 +080035 adev = ctx->adev;
36
Christian König21c16bf2015-07-07 17:24:49 +020037
38 for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
39 for (j = 0; j < AMDGPU_CTX_MAX_CS_PENDING; ++j)
40 fence_put(ctx->rings[i].fences[j]);
Chunming Zhou9cb7e5a2015-07-21 13:17:19 +080041
42 if (amdgpu_enable_scheduler) {
43 for (i = 0; i < adev->num_rings; i++)
44 amd_context_entity_fini(adev->rings[i]->scheduler,
45 &ctx->rings[i].c_entity);
46 }
47
Alex Deucherd38ceaf2015-04-20 16:55:21 -040048 kfree(ctx);
49}
50
Chunming Zhou23ca0e42015-07-06 13:42:58 +080051static void amdgpu_ctx_init(struct amdgpu_device *adev,
52 struct amdgpu_fpriv *fpriv,
53 struct amdgpu_ctx *ctx,
54 uint32_t id)
Alex Deucherd38ceaf2015-04-20 16:55:21 -040055{
Chunming Zhou23ca0e42015-07-06 13:42:58 +080056 int i;
Alex Deucherd38ceaf2015-04-20 16:55:21 -040057 memset(ctx, 0, sizeof(*ctx));
Chunming Zhou9cb7e5a2015-07-21 13:17:19 +080058 ctx->adev = adev;
Alex Deucherd38ceaf2015-04-20 16:55:21 -040059 kref_init(&ctx->refcount);
Christian König21c16bf2015-07-07 17:24:49 +020060 spin_lock_init(&ctx->ring_lock);
61 for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
62 ctx->rings[i].sequence = 1;
Chunming Zhou23ca0e42015-07-06 13:42:58 +080063}
64
65int amdgpu_ctx_alloc(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv,
66 uint32_t *id)
67{
68 struct amdgpu_ctx *ctx;
69 int i, j, r;
70
71 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
72 if (!ctx)
73 return -ENOMEM;
74 if (fpriv) {
75 struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
76 mutex_lock(&mgr->lock);
77 r = idr_alloc(&mgr->ctx_handles, ctx, 1, 0, GFP_KERNEL);
78 if (r < 0) {
79 mutex_unlock(&mgr->lock);
80 kfree(ctx);
81 return r;
82 }
83 *id = (uint32_t)r;
84 amdgpu_ctx_init(adev, fpriv, ctx, *id);
85 mutex_unlock(&mgr->lock);
86 } else {
87 if (adev->kernel_ctx) {
88 DRM_ERROR("kernel cnotext has been created.\n");
89 kfree(ctx);
90 return 0;
91 }
92 *id = AMD_KERNEL_CONTEXT_ID;
93 amdgpu_ctx_init(adev, fpriv, ctx, *id);
94
95 adev->kernel_ctx = ctx;
96 }
97
Chunming Zhou9cb7e5a2015-07-21 13:17:19 +080098 if (amdgpu_enable_scheduler) {
99 /* create context entity for each ring */
100 for (i = 0; i < adev->num_rings; i++) {
101 struct amd_run_queue *rq;
102 if (fpriv)
103 rq = &adev->rings[i]->scheduler->sched_rq;
104 else
105 rq = &adev->rings[i]->scheduler->kernel_rq;
106 r = amd_context_entity_init(adev->rings[i]->scheduler,
107 &ctx->rings[i].c_entity,
108 NULL, rq, *id);
109 if (r)
110 break;
111 }
112
113 if (i < adev->num_rings) {
114 for (j = 0; j < i; j++)
115 amd_context_entity_fini(adev->rings[j]->scheduler,
116 &ctx->rings[j].c_entity);
117 kfree(ctx);
118 return -EINVAL;
119 }
120 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400121
122 return 0;
123}
124
125int amdgpu_ctx_free(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv, uint32_t id)
126{
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400127 struct amdgpu_ctx *ctx;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400128
Chunming Zhou23ca0e42015-07-06 13:42:58 +0800129 if (fpriv) {
130 struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
131 mutex_lock(&mgr->lock);
132 ctx = idr_find(&mgr->ctx_handles, id);
133 if (ctx) {
134 idr_remove(&mgr->ctx_handles, id);
135 kref_put(&ctx->refcount, amdgpu_ctx_do_release);
136 mutex_unlock(&mgr->lock);
137 return 0;
138 }
Marek Olšák0147ee02015-05-05 20:52:00 +0200139 mutex_unlock(&mgr->lock);
Chunming Zhou23ca0e42015-07-06 13:42:58 +0800140 } else {
141 ctx = adev->kernel_ctx;
142 kref_put(&ctx->refcount, amdgpu_ctx_do_release);
Marek Olšákf11358d2015-05-05 00:56:45 +0200143 return 0;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400144 }
145 return -EINVAL;
146}
147
Marek Olšákd94aed52015-05-05 21:13:49 +0200148static int amdgpu_ctx_query(struct amdgpu_device *adev,
149 struct amdgpu_fpriv *fpriv, uint32_t id,
150 union drm_amdgpu_ctx_out *out)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400151{
152 struct amdgpu_ctx *ctx;
Chunming Zhou23ca0e42015-07-06 13:42:58 +0800153 struct amdgpu_ctx_mgr *mgr;
Marek Olšákd94aed52015-05-05 21:13:49 +0200154 unsigned reset_counter;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400155
Chunming Zhou23ca0e42015-07-06 13:42:58 +0800156 if (!fpriv)
157 return -EINVAL;
158
159 mgr = &fpriv->ctx_mgr;
Marek Olšák0147ee02015-05-05 20:52:00 +0200160 mutex_lock(&mgr->lock);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400161 ctx = idr_find(&mgr->ctx_handles, id);
Marek Olšákd94aed52015-05-05 21:13:49 +0200162 if (!ctx) {
Marek Olšák0147ee02015-05-05 20:52:00 +0200163 mutex_unlock(&mgr->lock);
Marek Olšákd94aed52015-05-05 21:13:49 +0200164 return -EINVAL;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400165 }
Marek Olšákd94aed52015-05-05 21:13:49 +0200166
167 /* TODO: these two are always zero */
Alex Deucher0b492a42015-08-16 22:48:26 -0400168 out->state.flags = 0x0;
169 out->state.hangs = 0x0;
Marek Olšákd94aed52015-05-05 21:13:49 +0200170
171 /* determine if a GPU reset has occured since the last call */
172 reset_counter = atomic_read(&adev->gpu_reset_counter);
173 /* TODO: this should ideally return NO, GUILTY, or INNOCENT. */
174 if (ctx->reset_counter == reset_counter)
175 out->state.reset_status = AMDGPU_CTX_NO_RESET;
176 else
177 out->state.reset_status = AMDGPU_CTX_UNKNOWN_RESET;
178 ctx->reset_counter = reset_counter;
179
Marek Olšák0147ee02015-05-05 20:52:00 +0200180 mutex_unlock(&mgr->lock);
Marek Olšákd94aed52015-05-05 21:13:49 +0200181 return 0;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400182}
183
184void amdgpu_ctx_fini(struct amdgpu_fpriv *fpriv)
185{
186 struct idr *idp;
187 struct amdgpu_ctx *ctx;
188 uint32_t id;
189 struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
190 idp = &mgr->ctx_handles;
191
192 idr_for_each_entry(idp,ctx,id) {
193 if (kref_put(&ctx->refcount, amdgpu_ctx_do_release) != 1)
Alex Deucher0b492a42015-08-16 22:48:26 -0400194 DRM_ERROR("ctx %p is still alive\n", ctx);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400195 }
196
Christian Königcdecb652015-07-16 12:01:06 +0200197 idr_destroy(&mgr->ctx_handles);
Marek Olšák0147ee02015-05-05 20:52:00 +0200198 mutex_destroy(&mgr->lock);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400199}
200
201int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
Marek Olšákd94aed52015-05-05 21:13:49 +0200202 struct drm_file *filp)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400203{
204 int r;
205 uint32_t id;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400206
207 union drm_amdgpu_ctx *args = data;
208 struct amdgpu_device *adev = dev->dev_private;
209 struct amdgpu_fpriv *fpriv = filp->driver_priv;
210
211 r = 0;
212 id = args->in.ctx_id;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400213
214 switch (args->in.op) {
215 case AMDGPU_CTX_OP_ALLOC_CTX:
Alex Deucher0b492a42015-08-16 22:48:26 -0400216 r = amdgpu_ctx_alloc(adev, fpriv, &id);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400217 args->out.alloc.ctx_id = id;
218 break;
219 case AMDGPU_CTX_OP_FREE_CTX:
220 r = amdgpu_ctx_free(adev, fpriv, id);
221 break;
222 case AMDGPU_CTX_OP_QUERY_STATE:
Marek Olšákd94aed52015-05-05 21:13:49 +0200223 r = amdgpu_ctx_query(adev, fpriv, id, &args->out);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400224 break;
225 default:
226 return -EINVAL;
227 }
228
229 return r;
230}
Jammy Zhou66b3cf22015-05-08 17:29:40 +0800231
232struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id)
233{
234 struct amdgpu_ctx *ctx;
Chunming Zhou23ca0e42015-07-06 13:42:58 +0800235 struct amdgpu_ctx_mgr *mgr;
236
237 if (!fpriv)
238 return NULL;
239
240 mgr = &fpriv->ctx_mgr;
Jammy Zhou66b3cf22015-05-08 17:29:40 +0800241
242 mutex_lock(&mgr->lock);
243 ctx = idr_find(&mgr->ctx_handles, id);
244 if (ctx)
245 kref_get(&ctx->refcount);
246 mutex_unlock(&mgr->lock);
247 return ctx;
248}
249
250int amdgpu_ctx_put(struct amdgpu_ctx *ctx)
251{
Jammy Zhou66b3cf22015-05-08 17:29:40 +0800252 if (ctx == NULL)
253 return -EINVAL;
254
Jammy Zhou66b3cf22015-05-08 17:29:40 +0800255 kref_put(&ctx->refcount, amdgpu_ctx_do_release);
Jammy Zhou66b3cf22015-05-08 17:29:40 +0800256 return 0;
257}
Christian König21c16bf2015-07-07 17:24:49 +0200258
259uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
260 struct fence *fence)
261{
262 struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];
Chunming Zhoub43a9a72015-07-21 15:13:53 +0800263 uint64_t seq = 0;
264 unsigned idx = 0;
265 struct fence *other = NULL;
Christian König21c16bf2015-07-07 17:24:49 +0200266
Chunming Zhoub43a9a72015-07-21 15:13:53 +0800267 if (amdgpu_enable_scheduler)
268 seq = atomic64_read(&cring->c_entity.last_queued_v_seq);
269 else
270 seq = cring->sequence;
271 idx = seq % AMDGPU_CTX_MAX_CS_PENDING;
272 other = cring->fences[idx];
Christian König21c16bf2015-07-07 17:24:49 +0200273 if (other) {
274 signed long r;
275 r = fence_wait_timeout(other, false, MAX_SCHEDULE_TIMEOUT);
276 if (r < 0)
277 DRM_ERROR("Error (%ld) waiting for fence!\n", r);
278 }
279
280 fence_get(fence);
281
282 spin_lock(&ctx->ring_lock);
283 cring->fences[idx] = fence;
Chunming Zhoub43a9a72015-07-21 15:13:53 +0800284 if (!amdgpu_enable_scheduler)
285 cring->sequence++;
Christian König21c16bf2015-07-07 17:24:49 +0200286 spin_unlock(&ctx->ring_lock);
287
288 fence_put(other);
289
290 return seq;
291}
292
293struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
294 struct amdgpu_ring *ring, uint64_t seq)
295{
296 struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];
297 struct fence *fence;
Chunming Zhoub43a9a72015-07-21 15:13:53 +0800298 uint64_t queued_seq;
Chunming Zhou4b559c92015-07-21 15:53:04 +0800299 int r;
300
301 if (amdgpu_enable_scheduler) {
302 r = amd_sched_wait_emit(&cring->c_entity,
303 seq,
304 true,
305 AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS);
306 if (r)
307 return NULL;
308 }
Christian König21c16bf2015-07-07 17:24:49 +0200309
310 spin_lock(&ctx->ring_lock);
Chunming Zhoub43a9a72015-07-21 15:13:53 +0800311 if (amdgpu_enable_scheduler)
312 queued_seq = atomic64_read(&cring->c_entity.last_queued_v_seq) + 1;
313 else
314 queued_seq = cring->sequence;
315
316 if (seq >= queued_seq) {
Christian König21c16bf2015-07-07 17:24:49 +0200317 spin_unlock(&ctx->ring_lock);
318 return ERR_PTR(-EINVAL);
319 }
320
Chunming Zhoub43a9a72015-07-21 15:13:53 +0800321
322 if (seq + AMDGPU_CTX_MAX_CS_PENDING < queued_seq) {
Christian König21c16bf2015-07-07 17:24:49 +0200323 spin_unlock(&ctx->ring_lock);
324 return NULL;
325 }
326
327 fence = fence_get(cring->fences[seq % AMDGPU_CTX_MAX_CS_PENDING]);
328 spin_unlock(&ctx->ring_lock);
329
330 return fence;
331}