blob: 6579eb4c1f287007a3f22179c4593db621d12228 [file] [log] [blame]
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include <linux/seq_file.h>
29#include "drmP.h"
30#include "radeon_drm.h"
31#include "radeon_reg.h"
32#include "radeon.h"
33#include "atom.h"
34
35int radeon_debugfs_ib_init(struct radeon_device *rdev);
36
37/*
38 * IB.
39 */
40int radeon_ib_get(struct radeon_device *rdev, struct radeon_ib **ib)
41{
42 struct radeon_fence *fence;
43 struct radeon_ib *nib;
Jerome Glisse91cb91b2010-02-15 21:36:13 +010044 int r = 0, i, c;
Jerome Glisse771fe6b2009-06-05 14:42:42 +020045
46 *ib = NULL;
47 r = radeon_fence_create(rdev, &fence);
48 if (r) {
Jerome Glisse91cb91b2010-02-15 21:36:13 +010049 dev_err(rdev->dev, "failed to create fence for new IB\n");
Jerome Glisse771fe6b2009-06-05 14:42:42 +020050 return r;
51 }
52 mutex_lock(&rdev->ib_pool.mutex);
Jerome Glisse91cb91b2010-02-15 21:36:13 +010053 for (i = rdev->ib_pool.head_id, c = 0, nib = NULL; c < RADEON_IB_POOL_SIZE; c++, i++) {
54 i &= (RADEON_IB_POOL_SIZE - 1);
55 if (rdev->ib_pool.ibs[i].free) {
56 nib = &rdev->ib_pool.ibs[i];
57 break;
58 }
Jerome Glisse771fe6b2009-06-05 14:42:42 +020059 }
Jerome Glisse91cb91b2010-02-15 21:36:13 +010060 if (nib == NULL) {
61 /* This should never happen, it means we allocated all
62 * IB and haven't scheduled one yet, return EBUSY to
63 * userspace hoping that on ioctl recall we get better
64 * luck
65 */
66 dev_err(rdev->dev, "no free indirect buffer !\n");
Dave Airlieecb114a2009-09-15 11:12:56 +100067 mutex_unlock(&rdev->ib_pool.mutex);
Jerome Glisse91cb91b2010-02-15 21:36:13 +010068 radeon_fence_unref(&fence);
69 return -EBUSY;
Jerome Glisse771fe6b2009-06-05 14:42:42 +020070 }
Jerome Glisse91cb91b2010-02-15 21:36:13 +010071 rdev->ib_pool.head_id = (nib->idx + 1) & (RADEON_IB_POOL_SIZE - 1);
72 nib->free = false;
73 if (nib->fence) {
Dave Airlieecb114a2009-09-15 11:12:56 +100074 mutex_unlock(&rdev->ib_pool.mutex);
Jerome Glisse91cb91b2010-02-15 21:36:13 +010075 r = radeon_fence_wait(nib->fence, false);
76 if (r) {
77 dev_err(rdev->dev, "error waiting fence of IB(%u:0x%016lX:%u)\n",
78 nib->idx, (unsigned long)nib->gpu_addr, nib->length_dw);
79 mutex_lock(&rdev->ib_pool.mutex);
80 nib->free = true;
81 mutex_unlock(&rdev->ib_pool.mutex);
82 radeon_fence_unref(&fence);
83 return r;
84 }
85 mutex_lock(&rdev->ib_pool.mutex);
Jerome Glisse771fe6b2009-06-05 14:42:42 +020086 }
87 radeon_fence_unref(&nib->fence);
Jerome Glisse91cb91b2010-02-15 21:36:13 +010088 nib->fence = fence;
Jerome Glisse771fe6b2009-06-05 14:42:42 +020089 nib->length_dw = 0;
Dave Airlieecb114a2009-09-15 11:12:56 +100090 mutex_unlock(&rdev->ib_pool.mutex);
Jerome Glisse771fe6b2009-06-05 14:42:42 +020091 *ib = nib;
Jerome Glisse91cb91b2010-02-15 21:36:13 +010092 return 0;
Jerome Glisse771fe6b2009-06-05 14:42:42 +020093}
94
95void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
96{
97 struct radeon_ib *tmp = *ib;
98
99 *ib = NULL;
100 if (tmp == NULL) {
101 return;
102 }
Jerome Glisse7d404c72010-02-18 13:13:29 +0000103 if (!tmp->fence->emited)
104 radeon_fence_unref(&tmp->fence);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200105 mutex_lock(&rdev->ib_pool.mutex);
Jerome Glisse91cb91b2010-02-15 21:36:13 +0100106 tmp->free = true;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200107 mutex_unlock(&rdev->ib_pool.mutex);
108}
109
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200110int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
111{
112 int r = 0;
113
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200114 if (!ib->length_dw || !rdev->cp.ready) {
115 /* TODO: Nothings in the ib we should report. */
Jerome Glisse91cb91b2010-02-15 21:36:13 +0100116 DRM_ERROR("radeon: couldn't schedule IB(%u).\n", ib->idx);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200117 return -EINVAL;
118 }
Dave Airlieecb114a2009-09-15 11:12:56 +1000119
Dave Airlie6cdf6582009-06-29 18:29:13 +1000120 /* 64 dwords should be enough for fence too */
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200121 r = radeon_ring_lock(rdev, 64);
122 if (r) {
123 DRM_ERROR("radeon: scheduling IB failled (%d).\n", r);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200124 return r;
125 }
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000126 radeon_ring_ib_execute(rdev, ib);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200127 radeon_fence_emit(rdev, ib->fence);
Dave Airlieecb114a2009-09-15 11:12:56 +1000128 mutex_lock(&rdev->ib_pool.mutex);
Jerome Glisse91cb91b2010-02-15 21:36:13 +0100129 /* once scheduled IB is considered free and protected by the fence */
130 ib->free = true;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200131 mutex_unlock(&rdev->ib_pool.mutex);
Dave Airlieecb114a2009-09-15 11:12:56 +1000132 radeon_ring_unlock_commit(rdev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200133 return 0;
134}
135
136int radeon_ib_pool_init(struct radeon_device *rdev)
137{
138 void *ptr;
139 uint64_t gpu_addr;
140 int i;
141 int r = 0;
142
Jerome Glisse9f022dd2009-09-11 15:35:22 +0200143 if (rdev->ib_pool.robj)
144 return 0;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200145 /* Allocate 1M object buffer */
Jerome Glisse4c788672009-11-20 14:29:23 +0100146 r = radeon_bo_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024,
147 true, RADEON_GEM_DOMAIN_GTT,
148 &rdev->ib_pool.robj);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200149 if (r) {
150 DRM_ERROR("radeon: failed to ib pool (%d).\n", r);
151 return r;
152 }
Jerome Glisse4c788672009-11-20 14:29:23 +0100153 r = radeon_bo_reserve(rdev->ib_pool.robj, false);
154 if (unlikely(r != 0))
155 return r;
156 r = radeon_bo_pin(rdev->ib_pool.robj, RADEON_GEM_DOMAIN_GTT, &gpu_addr);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200157 if (r) {
Jerome Glisse4c788672009-11-20 14:29:23 +0100158 radeon_bo_unreserve(rdev->ib_pool.robj);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200159 DRM_ERROR("radeon: failed to pin ib pool (%d).\n", r);
160 return r;
161 }
Jerome Glisse4c788672009-11-20 14:29:23 +0100162 r = radeon_bo_kmap(rdev->ib_pool.robj, &ptr);
163 radeon_bo_unreserve(rdev->ib_pool.robj);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200164 if (r) {
165 DRM_ERROR("radeon: failed to map ib poll (%d).\n", r);
166 return r;
167 }
168 for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
169 unsigned offset;
170
171 offset = i * 64 * 1024;
172 rdev->ib_pool.ibs[i].gpu_addr = gpu_addr + offset;
173 rdev->ib_pool.ibs[i].ptr = ptr + offset;
174 rdev->ib_pool.ibs[i].idx = i;
175 rdev->ib_pool.ibs[i].length_dw = 0;
Jerome Glisse91cb91b2010-02-15 21:36:13 +0100176 rdev->ib_pool.ibs[i].free = true;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200177 }
Jerome Glisse91cb91b2010-02-15 21:36:13 +0100178 rdev->ib_pool.head_id = 0;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200179 rdev->ib_pool.ready = true;
180 DRM_INFO("radeon: ib pool ready.\n");
181 if (radeon_debugfs_ib_init(rdev)) {
182 DRM_ERROR("Failed to register debugfs file for IB !\n");
183 }
184 return r;
185}
186
187void radeon_ib_pool_fini(struct radeon_device *rdev)
188{
Jerome Glisse4c788672009-11-20 14:29:23 +0100189 int r;
190
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200191 if (!rdev->ib_pool.ready) {
192 return;
193 }
194 mutex_lock(&rdev->ib_pool.mutex);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200195 if (rdev->ib_pool.robj) {
Jerome Glisse4c788672009-11-20 14:29:23 +0100196 r = radeon_bo_reserve(rdev->ib_pool.robj, false);
197 if (likely(r == 0)) {
198 radeon_bo_kunmap(rdev->ib_pool.robj);
199 radeon_bo_unpin(rdev->ib_pool.robj);
200 radeon_bo_unreserve(rdev->ib_pool.robj);
201 }
202 radeon_bo_unref(&rdev->ib_pool.robj);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200203 rdev->ib_pool.robj = NULL;
204 }
205 mutex_unlock(&rdev->ib_pool.mutex);
206}
207
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200208
209/*
210 * Ring.
211 */
212void radeon_ring_free_size(struct radeon_device *rdev)
213{
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000214 if (rdev->family >= CHIP_R600)
215 rdev->cp.rptr = RREG32(R600_CP_RB_RPTR);
216 else
217 rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200218 /* This works because ring_size is a power of 2 */
219 rdev->cp.ring_free_dw = (rdev->cp.rptr + (rdev->cp.ring_size / 4));
220 rdev->cp.ring_free_dw -= rdev->cp.wptr;
221 rdev->cp.ring_free_dw &= rdev->cp.ptr_mask;
222 if (!rdev->cp.ring_free_dw) {
223 rdev->cp.ring_free_dw = rdev->cp.ring_size / 4;
224 }
225}
226
227int radeon_ring_lock(struct radeon_device *rdev, unsigned ndw)
228{
229 int r;
230
231 /* Align requested size with padding so unlock_commit can
232 * pad safely */
233 ndw = (ndw + rdev->cp.align_mask) & ~rdev->cp.align_mask;
234 mutex_lock(&rdev->cp.mutex);
235 while (ndw > (rdev->cp.ring_free_dw - 1)) {
236 radeon_ring_free_size(rdev);
237 if (ndw < rdev->cp.ring_free_dw) {
238 break;
239 }
240 r = radeon_fence_wait_next(rdev);
241 if (r) {
242 mutex_unlock(&rdev->cp.mutex);
243 return r;
244 }
245 }
246 rdev->cp.count_dw = ndw;
247 rdev->cp.wptr_old = rdev->cp.wptr;
248 return 0;
249}
250
251void radeon_ring_unlock_commit(struct radeon_device *rdev)
252{
253 unsigned count_dw_pad;
254 unsigned i;
255
256 /* We pad to match fetch size */
257 count_dw_pad = (rdev->cp.align_mask + 1) -
258 (rdev->cp.wptr & rdev->cp.align_mask);
259 for (i = 0; i < count_dw_pad; i++) {
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000260 radeon_ring_write(rdev, 2 << 30);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200261 }
262 DRM_MEMORYBARRIER();
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000263 radeon_cp_commit(rdev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200264 mutex_unlock(&rdev->cp.mutex);
265}
266
267void radeon_ring_unlock_undo(struct radeon_device *rdev)
268{
269 rdev->cp.wptr = rdev->cp.wptr_old;
270 mutex_unlock(&rdev->cp.mutex);
271}
272
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200273int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size)
274{
275 int r;
276
277 rdev->cp.ring_size = ring_size;
278 /* Allocate ring buffer */
279 if (rdev->cp.ring_obj == NULL) {
Jerome Glisse4c788672009-11-20 14:29:23 +0100280 r = radeon_bo_create(rdev, NULL, rdev->cp.ring_size, true,
281 RADEON_GEM_DOMAIN_GTT,
282 &rdev->cp.ring_obj);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200283 if (r) {
Jerome Glisse4c788672009-11-20 14:29:23 +0100284 dev_err(rdev->dev, "(%d) ring create failed\n", r);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200285 return r;
286 }
Jerome Glisse4c788672009-11-20 14:29:23 +0100287 r = radeon_bo_reserve(rdev->cp.ring_obj, false);
288 if (unlikely(r != 0))
289 return r;
290 r = radeon_bo_pin(rdev->cp.ring_obj, RADEON_GEM_DOMAIN_GTT,
291 &rdev->cp.gpu_addr);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200292 if (r) {
Jerome Glisse4c788672009-11-20 14:29:23 +0100293 radeon_bo_unreserve(rdev->cp.ring_obj);
294 dev_err(rdev->dev, "(%d) ring pin failed\n", r);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200295 return r;
296 }
Jerome Glisse4c788672009-11-20 14:29:23 +0100297 r = radeon_bo_kmap(rdev->cp.ring_obj,
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200298 (void **)&rdev->cp.ring);
Jerome Glisse4c788672009-11-20 14:29:23 +0100299 radeon_bo_unreserve(rdev->cp.ring_obj);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200300 if (r) {
Jerome Glisse4c788672009-11-20 14:29:23 +0100301 dev_err(rdev->dev, "(%d) ring map failed\n", r);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200302 return r;
303 }
304 }
305 rdev->cp.ptr_mask = (rdev->cp.ring_size / 4) - 1;
306 rdev->cp.ring_free_dw = rdev->cp.ring_size / 4;
307 return 0;
308}
309
310void radeon_ring_fini(struct radeon_device *rdev)
311{
Jerome Glisse4c788672009-11-20 14:29:23 +0100312 int r;
313
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200314 mutex_lock(&rdev->cp.mutex);
315 if (rdev->cp.ring_obj) {
Jerome Glisse4c788672009-11-20 14:29:23 +0100316 r = radeon_bo_reserve(rdev->cp.ring_obj, false);
317 if (likely(r == 0)) {
318 radeon_bo_kunmap(rdev->cp.ring_obj);
319 radeon_bo_unpin(rdev->cp.ring_obj);
320 radeon_bo_unreserve(rdev->cp.ring_obj);
321 }
322 radeon_bo_unref(&rdev->cp.ring_obj);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200323 rdev->cp.ring = NULL;
324 rdev->cp.ring_obj = NULL;
325 }
326 mutex_unlock(&rdev->cp.mutex);
327}
328
329
330/*
331 * Debugfs info
332 */
333#if defined(CONFIG_DEBUG_FS)
334static int radeon_debugfs_ib_info(struct seq_file *m, void *data)
335{
336 struct drm_info_node *node = (struct drm_info_node *) m->private;
337 struct radeon_ib *ib = node->info_ent->data;
338 unsigned i;
339
340 if (ib == NULL) {
341 return 0;
342 }
Jerome Glisse91cb91b2010-02-15 21:36:13 +0100343 seq_printf(m, "IB %04u\n", ib->idx);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200344 seq_printf(m, "IB fence %p\n", ib->fence);
345 seq_printf(m, "IB size %05u dwords\n", ib->length_dw);
346 for (i = 0; i < ib->length_dw; i++) {
347 seq_printf(m, "[%05u]=0x%08X\n", i, ib->ptr[i]);
348 }
349 return 0;
350}
351
352static struct drm_info_list radeon_debugfs_ib_list[RADEON_IB_POOL_SIZE];
353static char radeon_debugfs_ib_names[RADEON_IB_POOL_SIZE][32];
354#endif
355
356int radeon_debugfs_ib_init(struct radeon_device *rdev)
357{
358#if defined(CONFIG_DEBUG_FS)
359 unsigned i;
360
361 for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
362 sprintf(radeon_debugfs_ib_names[i], "radeon_ib_%04u", i);
363 radeon_debugfs_ib_list[i].name = radeon_debugfs_ib_names[i];
364 radeon_debugfs_ib_list[i].show = &radeon_debugfs_ib_info;
365 radeon_debugfs_ib_list[i].driver_features = 0;
366 radeon_debugfs_ib_list[i].data = &rdev->ib_pool.ibs[i];
367 }
368 return radeon_debugfs_add_files(rdev, radeon_debugfs_ib_list,
369 RADEON_IB_POOL_SIZE);
370#else
371 return 0;
372#endif
373}