blob: 6c66ac8a1891478051cf94a13b549d0a86127825 [file] [log] [blame]
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: monk liu <monk.liu@amd.com>
23 */
24
25#include <drm/drmP.h>
26#include "amdgpu.h"
27
28static void amdgpu_ctx_do_release(struct kref *ref)
29{
30 struct amdgpu_ctx *ctx;
31 struct amdgpu_ctx_mgr *mgr;
32
33 ctx = container_of(ref, struct amdgpu_ctx, refcount);
34 mgr = &ctx->fpriv->ctx_mgr;
35
Alex Deucherd38ceaf2015-04-20 16:55:21 -040036 idr_remove(&mgr->ctx_handles, ctx->id);
Alex Deucherd38ceaf2015-04-20 16:55:21 -040037 kfree(ctx);
38}
39
40int amdgpu_ctx_alloc(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv, uint32_t *id, uint32_t flags)
41{
42 int r;
43 struct amdgpu_ctx *ctx;
44 struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
45
46 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
47 if (!ctx)
48 return -ENOMEM;
49
Marek Olšák0147ee02015-05-05 20:52:00 +020050 mutex_lock(&mgr->lock);
Alex Deucherd38ceaf2015-04-20 16:55:21 -040051 r = idr_alloc(&mgr->ctx_handles, ctx, 0, 0, GFP_KERNEL);
52 if (r < 0) {
Marek Olšák0147ee02015-05-05 20:52:00 +020053 mutex_unlock(&mgr->lock);
Alex Deucherd38ceaf2015-04-20 16:55:21 -040054 kfree(ctx);
55 return r;
56 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -040057 *id = (uint32_t)r;
58
59 memset(ctx, 0, sizeof(*ctx));
60 ctx->id = *id;
61 ctx->fpriv = fpriv;
62 kref_init(&ctx->refcount);
Marek Olšák0147ee02015-05-05 20:52:00 +020063 mutex_unlock(&mgr->lock);
Alex Deucherd38ceaf2015-04-20 16:55:21 -040064
65 return 0;
66}
67
68int amdgpu_ctx_free(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv, uint32_t id)
69{
Alex Deucherd38ceaf2015-04-20 16:55:21 -040070 struct amdgpu_ctx *ctx;
71 struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
72
Marek Olšák0147ee02015-05-05 20:52:00 +020073 mutex_lock(&mgr->lock);
Alex Deucherd38ceaf2015-04-20 16:55:21 -040074 ctx = idr_find(&mgr->ctx_handles, id);
Alex Deucherd38ceaf2015-04-20 16:55:21 -040075 if (ctx) {
Marek Olšákf11358d2015-05-05 00:56:45 +020076 kref_put(&ctx->refcount, amdgpu_ctx_do_release);
Marek Olšák0147ee02015-05-05 20:52:00 +020077 mutex_unlock(&mgr->lock);
Marek Olšákf11358d2015-05-05 00:56:45 +020078 return 0;
Alex Deucherd38ceaf2015-04-20 16:55:21 -040079 }
Marek Olšák0147ee02015-05-05 20:52:00 +020080 mutex_unlock(&mgr->lock);
Alex Deucherd38ceaf2015-04-20 16:55:21 -040081 return -EINVAL;
82}
83
Marek Olšákd94aed52015-05-05 21:13:49 +020084static int amdgpu_ctx_query(struct amdgpu_device *adev,
85 struct amdgpu_fpriv *fpriv, uint32_t id,
86 union drm_amdgpu_ctx_out *out)
Alex Deucherd38ceaf2015-04-20 16:55:21 -040087{
88 struct amdgpu_ctx *ctx;
89 struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
Marek Olšákd94aed52015-05-05 21:13:49 +020090 unsigned reset_counter;
Alex Deucherd38ceaf2015-04-20 16:55:21 -040091
Marek Olšák0147ee02015-05-05 20:52:00 +020092 mutex_lock(&mgr->lock);
Alex Deucherd38ceaf2015-04-20 16:55:21 -040093 ctx = idr_find(&mgr->ctx_handles, id);
Marek Olšákd94aed52015-05-05 21:13:49 +020094 if (!ctx) {
Marek Olšák0147ee02015-05-05 20:52:00 +020095 mutex_unlock(&mgr->lock);
Marek Olšákd94aed52015-05-05 21:13:49 +020096 return -EINVAL;
Alex Deucherd38ceaf2015-04-20 16:55:21 -040097 }
Marek Olšákd94aed52015-05-05 21:13:49 +020098
99 /* TODO: these two are always zero */
100 out->state.flags = ctx->state.flags;
101 out->state.hangs = ctx->state.hangs;
102
103 /* determine if a GPU reset has occured since the last call */
104 reset_counter = atomic_read(&adev->gpu_reset_counter);
105 /* TODO: this should ideally return NO, GUILTY, or INNOCENT. */
106 if (ctx->reset_counter == reset_counter)
107 out->state.reset_status = AMDGPU_CTX_NO_RESET;
108 else
109 out->state.reset_status = AMDGPU_CTX_UNKNOWN_RESET;
110 ctx->reset_counter = reset_counter;
111
Marek Olšák0147ee02015-05-05 20:52:00 +0200112 mutex_unlock(&mgr->lock);
Marek Olšákd94aed52015-05-05 21:13:49 +0200113 return 0;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400114}
115
116void amdgpu_ctx_fini(struct amdgpu_fpriv *fpriv)
117{
118 struct idr *idp;
119 struct amdgpu_ctx *ctx;
120 uint32_t id;
121 struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
122 idp = &mgr->ctx_handles;
123
124 idr_for_each_entry(idp,ctx,id) {
125 if (kref_put(&ctx->refcount, amdgpu_ctx_do_release) != 1)
126 DRM_ERROR("ctx (id=%ul) is still alive\n",ctx->id);
127 }
128
Marek Olšák0147ee02015-05-05 20:52:00 +0200129 mutex_destroy(&mgr->lock);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400130}
131
132int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
Marek Olšákd94aed52015-05-05 21:13:49 +0200133 struct drm_file *filp)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400134{
135 int r;
136 uint32_t id;
137 uint32_t flags;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400138
139 union drm_amdgpu_ctx *args = data;
140 struct amdgpu_device *adev = dev->dev_private;
141 struct amdgpu_fpriv *fpriv = filp->driver_priv;
142
143 r = 0;
144 id = args->in.ctx_id;
145 flags = args->in.flags;
146
147 switch (args->in.op) {
148 case AMDGPU_CTX_OP_ALLOC_CTX:
149 r = amdgpu_ctx_alloc(adev, fpriv, &id, flags);
150 args->out.alloc.ctx_id = id;
151 break;
152 case AMDGPU_CTX_OP_FREE_CTX:
153 r = amdgpu_ctx_free(adev, fpriv, id);
154 break;
155 case AMDGPU_CTX_OP_QUERY_STATE:
Marek Olšákd94aed52015-05-05 21:13:49 +0200156 r = amdgpu_ctx_query(adev, fpriv, id, &args->out);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400157 break;
158 default:
159 return -EINVAL;
160 }
161
162 return r;
163}
Jammy Zhou66b3cf22015-05-08 17:29:40 +0800164
165struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id)
166{
167 struct amdgpu_ctx *ctx;
168 struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
169
170 mutex_lock(&mgr->lock);
171 ctx = idr_find(&mgr->ctx_handles, id);
172 if (ctx)
173 kref_get(&ctx->refcount);
174 mutex_unlock(&mgr->lock);
175 return ctx;
176}
177
178int amdgpu_ctx_put(struct amdgpu_ctx *ctx)
179{
180 struct amdgpu_fpriv *fpriv;
181 struct amdgpu_ctx_mgr *mgr;
182
183 if (ctx == NULL)
184 return -EINVAL;
185
186 fpriv = ctx->fpriv;
187 mgr = &fpriv->ctx_mgr;
188 mutex_lock(&mgr->lock);
189 kref_put(&ctx->refcount, amdgpu_ctx_do_release);
190 mutex_unlock(&mgr->lock);
191
192 return 0;
193}