blob: cc33b3d7c33b8f4bfa293cd3ce973fdc8b92aff6 [file] [log] [blame]
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include <linux/seq_file.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090029#include <linux/slab.h>
Jerome Glisse771fe6b2009-06-05 14:42:42 +020030#include "drmP.h"
31#include "radeon_drm.h"
32#include "radeon_reg.h"
33#include "radeon.h"
34#include "atom.h"
35
36int radeon_debugfs_ib_init(struct radeon_device *rdev);
Christian Königaf9720f2011-10-24 17:08:44 +020037int radeon_debugfs_ring_init(struct radeon_device *rdev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +020038
Andi Kleence580fa2011-10-13 16:08:47 -070039u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
40{
41 struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
42 u32 pg_idx, pg_offset;
43 u32 idx_value = 0;
44 int new_page;
45
46 pg_idx = (idx * 4) / PAGE_SIZE;
47 pg_offset = (idx * 4) % PAGE_SIZE;
48
49 if (ibc->kpage_idx[0] == pg_idx)
50 return ibc->kpage[0][pg_offset/4];
51 if (ibc->kpage_idx[1] == pg_idx)
52 return ibc->kpage[1][pg_offset/4];
53
54 new_page = radeon_cs_update_pages(p, pg_idx);
55 if (new_page < 0) {
56 p->parser_error = new_page;
57 return 0;
58 }
59
60 idx_value = ibc->kpage[new_page][pg_offset/4];
61 return idx_value;
62}
63
Christian Könige32eb502011-10-23 12:56:27 +020064void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
Andi Kleence580fa2011-10-13 16:08:47 -070065{
66#if DRM_DEBUG_CODE
Christian Könige32eb502011-10-23 12:56:27 +020067 if (ring->count_dw <= 0) {
Andi Kleence580fa2011-10-13 16:08:47 -070068 DRM_ERROR("radeon: writting more dword to ring than expected !\n");
69 }
70#endif
Christian Könige32eb502011-10-23 12:56:27 +020071 ring->ring[ring->wptr++] = v;
72 ring->wptr &= ring->ptr_mask;
73 ring->count_dw--;
74 ring->ring_free_dw--;
Andi Kleence580fa2011-10-13 16:08:47 -070075}
76
Jerome Glisse771fe6b2009-06-05 14:42:42 +020077/*
78 * IB.
79 */
Jerome Glissec1341e52011-12-21 12:13:47 -050080bool radeon_ib_try_free(struct radeon_device *rdev, struct radeon_ib *ib)
Jerome Glisseb15ba512011-11-15 11:48:34 -050081{
82 bool done = false;
83
84 /* only free ib which have been emited */
85 if (ib->fence && ib->fence->emitted) {
86 if (radeon_fence_signaled(ib->fence)) {
87 radeon_fence_unref(&ib->fence);
88 radeon_sa_bo_free(rdev, &ib->sa_bo);
89 done = true;
90 }
91 }
92 return done;
93}
94
Jerome Glisse69e130a2011-12-21 12:13:46 -050095int radeon_ib_get(struct radeon_device *rdev, int ring,
96 struct radeon_ib **ib, unsigned size)
Jerome Glisse771fe6b2009-06-05 14:42:42 +020097{
98 struct radeon_fence *fence;
Jerome Glisseb15ba512011-11-15 11:48:34 -050099 unsigned cretry = 0;
100 int r = 0, i, idx;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200101
102 *ib = NULL;
Jerome Glisse69e130a2011-12-21 12:13:46 -0500103 /* align size on 256 bytes */
104 size = ALIGN(size, 256);
Jerome Glisseb15ba512011-11-15 11:48:34 -0500105
Christian König7b1f2482011-09-23 15:11:23 +0200106 r = radeon_fence_create(rdev, &fence, ring);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200107 if (r) {
Jerome Glisse91cb91b2010-02-15 21:36:13 +0100108 dev_err(rdev->dev, "failed to create fence for new IB\n");
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200109 return r;
110 }
Jerome Glisseb15ba512011-11-15 11:48:34 -0500111
Jerome Glisse9fc04b52012-01-23 11:52:15 -0500112 radeon_mutex_lock(&rdev->ib_pool.mutex);
Jerome Glisseb15ba512011-11-15 11:48:34 -0500113 idx = rdev->ib_pool.head_id;
114retry:
115 if (cretry > 5) {
116 dev_err(rdev->dev, "failed to get an ib after 5 retry\n");
Jerome Glisse9fc04b52012-01-23 11:52:15 -0500117 radeon_mutex_unlock(&rdev->ib_pool.mutex);
Jerome Glisse91cb91b2010-02-15 21:36:13 +0100118 radeon_fence_unref(&fence);
Jerome Glisseb15ba512011-11-15 11:48:34 -0500119 return -ENOMEM;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200120 }
Jerome Glisseb15ba512011-11-15 11:48:34 -0500121 cretry++;
122 for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
123 radeon_ib_try_free(rdev, &rdev->ib_pool.ibs[idx]);
124 if (rdev->ib_pool.ibs[idx].fence == NULL) {
125 r = radeon_sa_bo_new(rdev, &rdev->ib_pool.sa_manager,
126 &rdev->ib_pool.ibs[idx].sa_bo,
Jerome Glisse69e130a2011-12-21 12:13:46 -0500127 size, 256);
Jerome Glisseb15ba512011-11-15 11:48:34 -0500128 if (!r) {
129 *ib = &rdev->ib_pool.ibs[idx];
130 (*ib)->ptr = rdev->ib_pool.sa_manager.cpu_ptr;
131 (*ib)->ptr += ((*ib)->sa_bo.offset >> 2);
132 (*ib)->gpu_addr = rdev->ib_pool.sa_manager.gpu_addr;
133 (*ib)->gpu_addr += (*ib)->sa_bo.offset;
134 (*ib)->fence = fence;
Jerome Glisse721604a2012-01-05 22:11:05 -0500135 (*ib)->vm_id = 0;
Alex Deucherdfcf5f32012-03-20 17:18:14 -0400136 (*ib)->is_const_ib = false;
Jerome Glisseb15ba512011-11-15 11:48:34 -0500137 /* ib are most likely to be allocated in a ring fashion
138 * thus rdev->ib_pool.head_id should be the id of the
139 * oldest ib
140 */
141 rdev->ib_pool.head_id = (1 + idx);
142 rdev->ib_pool.head_id &= (RADEON_IB_POOL_SIZE - 1);
Jerome Glisse9fc04b52012-01-23 11:52:15 -0500143 radeon_mutex_unlock(&rdev->ib_pool.mutex);
Jerome Glisseb15ba512011-11-15 11:48:34 -0500144 return 0;
145 }
Jerome Glisse91cb91b2010-02-15 21:36:13 +0100146 }
Jerome Glisseb15ba512011-11-15 11:48:34 -0500147 idx = (idx + 1) & (RADEON_IB_POOL_SIZE - 1);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200148 }
Jerome Glisseb15ba512011-11-15 11:48:34 -0500149 /* this should be rare event, ie all ib scheduled none signaled yet.
150 */
151 for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
Jerome Glissec1341e52011-12-21 12:13:47 -0500152 if (rdev->ib_pool.ibs[idx].fence && rdev->ib_pool.ibs[idx].fence->emitted) {
Jerome Glisseb15ba512011-11-15 11:48:34 -0500153 r = radeon_fence_wait(rdev->ib_pool.ibs[idx].fence, false);
154 if (!r) {
155 goto retry;
156 }
157 /* an error happened */
158 break;
159 }
160 idx = (idx + 1) & (RADEON_IB_POOL_SIZE - 1);
161 }
Jerome Glisse9fc04b52012-01-23 11:52:15 -0500162 radeon_mutex_unlock(&rdev->ib_pool.mutex);
Jerome Glisseb15ba512011-11-15 11:48:34 -0500163 radeon_fence_unref(&fence);
164 return r;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200165}
166
167void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
168{
169 struct radeon_ib *tmp = *ib;
170
171 *ib = NULL;
172 if (tmp == NULL) {
173 return;
174 }
Jerome Glisse9fc04b52012-01-23 11:52:15 -0500175 radeon_mutex_lock(&rdev->ib_pool.mutex);
Jerome Glisseb15ba512011-11-15 11:48:34 -0500176 if (tmp->fence && !tmp->fence->emitted) {
177 radeon_sa_bo_free(rdev, &tmp->sa_bo);
178 radeon_fence_unref(&tmp->fence);
179 }
Jerome Glisse9fc04b52012-01-23 11:52:15 -0500180 radeon_mutex_unlock(&rdev->ib_pool.mutex);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200181}
182
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200183int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
184{
Christian Könige32eb502011-10-23 12:56:27 +0200185 struct radeon_ring *ring = &rdev->ring[ib->fence->ring];
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200186 int r = 0;
187
Christian Könige32eb502011-10-23 12:56:27 +0200188 if (!ib->length_dw || !ring->ready) {
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200189 /* TODO: Nothings in the ib we should report. */
Jerome Glisse91cb91b2010-02-15 21:36:13 +0100190 DRM_ERROR("radeon: couldn't schedule IB(%u).\n", ib->idx);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200191 return -EINVAL;
192 }
Dave Airlieecb114a2009-09-15 11:12:56 +1000193
Dave Airlie6cdf6582009-06-29 18:29:13 +1000194 /* 64 dwords should be enough for fence too */
Christian Könige32eb502011-10-23 12:56:27 +0200195 r = radeon_ring_lock(rdev, ring, 64);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200196 if (r) {
Paul Bolleec4f2ac2011-01-28 23:32:04 +0100197 DRM_ERROR("radeon: scheduling IB failed (%d).\n", r);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200198 return r;
199 }
Christian König4c87bc22011-10-19 19:02:21 +0200200 radeon_ring_ib_execute(rdev, ib->fence->ring, ib);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200201 radeon_fence_emit(rdev, ib->fence);
Christian Könige32eb502011-10-23 12:56:27 +0200202 radeon_ring_unlock_commit(rdev, ring);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200203 return 0;
204}
205
206int radeon_ib_pool_init(struct radeon_device *rdev)
207{
Jerome Glissed54fbd42012-01-24 12:08:52 -0500208 struct radeon_sa_manager tmp;
Jerome Glisseb15ba512011-11-15 11:48:34 -0500209 int i, r;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200210
Jerome Glissed54fbd42012-01-24 12:08:52 -0500211 r = radeon_sa_bo_manager_init(rdev, &tmp,
212 RADEON_IB_POOL_SIZE*64*1024,
213 RADEON_GEM_DOMAIN_GTT);
214 if (r) {
215 return r;
216 }
217
Jerome Glisse9fc04b52012-01-23 11:52:15 -0500218 radeon_mutex_lock(&rdev->ib_pool.mutex);
Jerome Glisseb15ba512011-11-15 11:48:34 -0500219 if (rdev->ib_pool.ready) {
Jerome Glisse9fc04b52012-01-23 11:52:15 -0500220 radeon_mutex_unlock(&rdev->ib_pool.mutex);
Jerome Glissed54fbd42012-01-24 12:08:52 -0500221 radeon_sa_bo_manager_fini(rdev, &tmp);
Jerome Glisse9f022dd2009-09-11 15:35:22 +0200222 return 0;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200223 }
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200224
Jerome Glissed54fbd42012-01-24 12:08:52 -0500225 rdev->ib_pool.sa_manager = tmp;
226 INIT_LIST_HEAD(&rdev->ib_pool.sa_manager.sa_bo);
Jerome Glisseb15ba512011-11-15 11:48:34 -0500227 for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
228 rdev->ib_pool.ibs[i].fence = NULL;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200229 rdev->ib_pool.ibs[i].idx = i;
230 rdev->ib_pool.ibs[i].length_dw = 0;
Jerome Glisseb15ba512011-11-15 11:48:34 -0500231 INIT_LIST_HEAD(&rdev->ib_pool.ibs[i].sa_bo.list);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200232 }
Jerome Glisse91cb91b2010-02-15 21:36:13 +0100233 rdev->ib_pool.head_id = 0;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200234 rdev->ib_pool.ready = true;
235 DRM_INFO("radeon: ib pool ready.\n");
Jerome Glisseb15ba512011-11-15 11:48:34 -0500236
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200237 if (radeon_debugfs_ib_init(rdev)) {
238 DRM_ERROR("Failed to register debugfs file for IB !\n");
239 }
Christian Königaf9720f2011-10-24 17:08:44 +0200240 if (radeon_debugfs_ring_init(rdev)) {
241 DRM_ERROR("Failed to register debugfs file for rings !\n");
242 }
Jerome Glisse9fc04b52012-01-23 11:52:15 -0500243 radeon_mutex_unlock(&rdev->ib_pool.mutex);
Jerome Glisseb15ba512011-11-15 11:48:34 -0500244 return 0;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200245}
246
247void radeon_ib_pool_fini(struct radeon_device *rdev)
248{
Jerome Glisseb15ba512011-11-15 11:48:34 -0500249 unsigned i;
Jerome Glisse4c788672009-11-20 14:29:23 +0100250
Jerome Glisse9fc04b52012-01-23 11:52:15 -0500251 radeon_mutex_lock(&rdev->ib_pool.mutex);
Jerome Glisseb15ba512011-11-15 11:48:34 -0500252 if (rdev->ib_pool.ready) {
253 for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
254 radeon_sa_bo_free(rdev, &rdev->ib_pool.ibs[i].sa_bo);
255 radeon_fence_unref(&rdev->ib_pool.ibs[i].fence);
Alex Deucherca2af922010-05-06 11:02:24 -0400256 }
Jerome Glisseb15ba512011-11-15 11:48:34 -0500257 radeon_sa_bo_manager_fini(rdev, &rdev->ib_pool.sa_manager);
258 rdev->ib_pool.ready = false;
Alex Deucherca2af922010-05-06 11:02:24 -0400259 }
Jerome Glisse9fc04b52012-01-23 11:52:15 -0500260 radeon_mutex_unlock(&rdev->ib_pool.mutex);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200261}
262
Jerome Glisseb15ba512011-11-15 11:48:34 -0500263int radeon_ib_pool_start(struct radeon_device *rdev)
264{
265 return radeon_sa_bo_manager_start(rdev, &rdev->ib_pool.sa_manager);
266}
267
268int radeon_ib_pool_suspend(struct radeon_device *rdev)
269{
270 return radeon_sa_bo_manager_suspend(rdev, &rdev->ib_pool.sa_manager);
271}
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200272
273/*
274 * Ring.
275 */
Christian Könige32eb502011-10-23 12:56:27 +0200276int radeon_ring_index(struct radeon_device *rdev, struct radeon_ring *ring)
Christian Königbf852792011-10-13 13:19:22 +0200277{
278 /* r1xx-r5xx only has CP ring */
279 if (rdev->family < CHIP_R600)
280 return RADEON_RING_TYPE_GFX_INDEX;
281
282 if (rdev->family >= CHIP_CAYMAN) {
Christian Könige32eb502011-10-23 12:56:27 +0200283 if (ring == &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX])
Christian Königbf852792011-10-13 13:19:22 +0200284 return CAYMAN_RING_TYPE_CP1_INDEX;
Christian Könige32eb502011-10-23 12:56:27 +0200285 else if (ring == &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX])
Christian Königbf852792011-10-13 13:19:22 +0200286 return CAYMAN_RING_TYPE_CP2_INDEX;
287 }
288 return RADEON_RING_TYPE_GFX_INDEX;
289}
290
Christian Könige32eb502011-10-23 12:56:27 +0200291void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *ring)
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200292{
Alex Deucher78c55602011-11-17 14:25:56 -0500293 u32 rptr;
294
Alex Deucher724c80e2010-08-27 18:25:25 -0400295 if (rdev->wb.enabled)
Alex Deucher78c55602011-11-17 14:25:56 -0500296 rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]);
Christian König5596a9d2011-10-13 12:48:45 +0200297 else
Alex Deucher78c55602011-11-17 14:25:56 -0500298 rptr = RREG32(ring->rptr_reg);
299 ring->rptr = (rptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200300 /* This works because ring_size is a power of 2 */
Christian Könige32eb502011-10-23 12:56:27 +0200301 ring->ring_free_dw = (ring->rptr + (ring->ring_size / 4));
302 ring->ring_free_dw -= ring->wptr;
303 ring->ring_free_dw &= ring->ptr_mask;
304 if (!ring->ring_free_dw) {
305 ring->ring_free_dw = ring->ring_size / 4;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200306 }
307}
308
Christian König7b1f2482011-09-23 15:11:23 +0200309
Christian Könige32eb502011-10-23 12:56:27 +0200310int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ndw)
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200311{
312 int r;
313
314 /* Align requested size with padding so unlock_commit can
315 * pad safely */
Christian Könige32eb502011-10-23 12:56:27 +0200316 ndw = (ndw + ring->align_mask) & ~ring->align_mask;
317 while (ndw > (ring->ring_free_dw - 1)) {
318 radeon_ring_free_size(rdev, ring);
319 if (ndw < ring->ring_free_dw) {
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200320 break;
321 }
Christian Könige32eb502011-10-23 12:56:27 +0200322 r = radeon_fence_wait_next(rdev, radeon_ring_index(rdev, ring));
Matthew Garrett91700f32010-04-30 15:24:17 -0400323 if (r)
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200324 return r;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200325 }
Christian Könige32eb502011-10-23 12:56:27 +0200326 ring->count_dw = ndw;
327 ring->wptr_old = ring->wptr;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200328 return 0;
329}
330
Christian Könige32eb502011-10-23 12:56:27 +0200331int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ndw)
Matthew Garrett91700f32010-04-30 15:24:17 -0400332{
333 int r;
334
Christian Könige32eb502011-10-23 12:56:27 +0200335 mutex_lock(&ring->mutex);
336 r = radeon_ring_alloc(rdev, ring, ndw);
Matthew Garrett91700f32010-04-30 15:24:17 -0400337 if (r) {
Christian Könige32eb502011-10-23 12:56:27 +0200338 mutex_unlock(&ring->mutex);
Matthew Garrett91700f32010-04-30 15:24:17 -0400339 return r;
340 }
341 return 0;
342}
343
Christian Könige32eb502011-10-23 12:56:27 +0200344void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring)
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200345{
346 unsigned count_dw_pad;
347 unsigned i;
348
349 /* We pad to match fetch size */
Christian Könige32eb502011-10-23 12:56:27 +0200350 count_dw_pad = (ring->align_mask + 1) -
351 (ring->wptr & ring->align_mask);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200352 for (i = 0; i < count_dw_pad; i++) {
Alex Deucher78c55602011-11-17 14:25:56 -0500353 radeon_ring_write(ring, ring->nop);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200354 }
355 DRM_MEMORYBARRIER();
Alex Deucher78c55602011-11-17 14:25:56 -0500356 WREG32(ring->wptr_reg, (ring->wptr << ring->ptr_reg_shift) & ring->ptr_reg_mask);
Christian Könige32eb502011-10-23 12:56:27 +0200357 (void)RREG32(ring->wptr_reg);
Matthew Garrett91700f32010-04-30 15:24:17 -0400358}
359
Christian Könige32eb502011-10-23 12:56:27 +0200360void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *ring)
Matthew Garrett91700f32010-04-30 15:24:17 -0400361{
Christian Könige32eb502011-10-23 12:56:27 +0200362 radeon_ring_commit(rdev, ring);
363 mutex_unlock(&ring->mutex);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200364}
365
Christian Könige32eb502011-10-23 12:56:27 +0200366void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *ring)
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200367{
Christian Könige32eb502011-10-23 12:56:27 +0200368 ring->wptr = ring->wptr_old;
369 mutex_unlock(&ring->mutex);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200370}
371
Christian Könige32eb502011-10-23 12:56:27 +0200372int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size,
Alex Deucher78c55602011-11-17 14:25:56 -0500373 unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg,
374 u32 ptr_reg_shift, u32 ptr_reg_mask, u32 nop)
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200375{
376 int r;
377
Christian Könige32eb502011-10-23 12:56:27 +0200378 ring->ring_size = ring_size;
379 ring->rptr_offs = rptr_offs;
380 ring->rptr_reg = rptr_reg;
381 ring->wptr_reg = wptr_reg;
Alex Deucher78c55602011-11-17 14:25:56 -0500382 ring->ptr_reg_shift = ptr_reg_shift;
383 ring->ptr_reg_mask = ptr_reg_mask;
384 ring->nop = nop;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200385 /* Allocate ring buffer */
Christian Könige32eb502011-10-23 12:56:27 +0200386 if (ring->ring_obj == NULL) {
387 r = radeon_bo_create(rdev, ring->ring_size, PAGE_SIZE, true,
Jerome Glisse4c788672009-11-20 14:29:23 +0100388 RADEON_GEM_DOMAIN_GTT,
Christian Könige32eb502011-10-23 12:56:27 +0200389 &ring->ring_obj);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200390 if (r) {
Jerome Glisse4c788672009-11-20 14:29:23 +0100391 dev_err(rdev->dev, "(%d) ring create failed\n", r);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200392 return r;
393 }
Christian Könige32eb502011-10-23 12:56:27 +0200394 r = radeon_bo_reserve(ring->ring_obj, false);
Jerome Glisse4c788672009-11-20 14:29:23 +0100395 if (unlikely(r != 0))
396 return r;
Christian Könige32eb502011-10-23 12:56:27 +0200397 r = radeon_bo_pin(ring->ring_obj, RADEON_GEM_DOMAIN_GTT,
398 &ring->gpu_addr);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200399 if (r) {
Christian Könige32eb502011-10-23 12:56:27 +0200400 radeon_bo_unreserve(ring->ring_obj);
Jerome Glisse4c788672009-11-20 14:29:23 +0100401 dev_err(rdev->dev, "(%d) ring pin failed\n", r);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200402 return r;
403 }
Christian Könige32eb502011-10-23 12:56:27 +0200404 r = radeon_bo_kmap(ring->ring_obj,
405 (void **)&ring->ring);
406 radeon_bo_unreserve(ring->ring_obj);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200407 if (r) {
Jerome Glisse4c788672009-11-20 14:29:23 +0100408 dev_err(rdev->dev, "(%d) ring map failed\n", r);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200409 return r;
410 }
411 }
Christian Könige32eb502011-10-23 12:56:27 +0200412 ring->ptr_mask = (ring->ring_size / 4) - 1;
413 ring->ring_free_dw = ring->ring_size / 4;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200414 return 0;
415}
416
Christian Könige32eb502011-10-23 12:56:27 +0200417void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *ring)
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200418{
Jerome Glisse4c788672009-11-20 14:29:23 +0100419 int r;
Alex Deucherca2af922010-05-06 11:02:24 -0400420 struct radeon_bo *ring_obj;
Jerome Glisse4c788672009-11-20 14:29:23 +0100421
Christian Könige32eb502011-10-23 12:56:27 +0200422 mutex_lock(&ring->mutex);
423 ring_obj = ring->ring_obj;
424 ring->ring = NULL;
425 ring->ring_obj = NULL;
426 mutex_unlock(&ring->mutex);
Alex Deucherca2af922010-05-06 11:02:24 -0400427
428 if (ring_obj) {
429 r = radeon_bo_reserve(ring_obj, false);
430 if (likely(r == 0)) {
431 radeon_bo_kunmap(ring_obj);
432 radeon_bo_unpin(ring_obj);
433 radeon_bo_unreserve(ring_obj);
434 }
435 radeon_bo_unref(&ring_obj);
436 }
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200437}
438
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200439/*
440 * Debugfs info
441 */
442#if defined(CONFIG_DEBUG_FS)
Christian Königaf9720f2011-10-24 17:08:44 +0200443
444static int radeon_debugfs_ring_info(struct seq_file *m, void *data)
445{
446 struct drm_info_node *node = (struct drm_info_node *) m->private;
447 struct drm_device *dev = node->minor->dev;
448 struct radeon_device *rdev = dev->dev_private;
449 int ridx = *(int*)node->info_ent->data;
450 struct radeon_ring *ring = &rdev->ring[ridx];
451 unsigned count, i, j;
452
453 radeon_ring_free_size(rdev, ring);
454 count = (ring->ring_size / 4) - ring->ring_free_dw;
455 seq_printf(m, "wptr(0x%04x): 0x%08x\n", ring->wptr_reg, RREG32(ring->wptr_reg));
456 seq_printf(m, "rptr(0x%04x): 0x%08x\n", ring->rptr_reg, RREG32(ring->rptr_reg));
457 seq_printf(m, "driver's copy of the wptr: 0x%08x\n", ring->wptr);
458 seq_printf(m, "driver's copy of the rptr: 0x%08x\n", ring->rptr);
459 seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw);
460 seq_printf(m, "%u dwords in ring\n", count);
461 i = ring->rptr;
462 for (j = 0; j <= count; j++) {
463 seq_printf(m, "r[%04d]=0x%08x\n", i, ring->ring[i]);
464 i = (i + 1) & ring->ptr_mask;
465 }
466 return 0;
467}
468
469static int radeon_ring_type_gfx_index = RADEON_RING_TYPE_GFX_INDEX;
470static int cayman_ring_type_cp1_index = CAYMAN_RING_TYPE_CP1_INDEX;
471static int cayman_ring_type_cp2_index = CAYMAN_RING_TYPE_CP2_INDEX;
472
473static struct drm_info_list radeon_debugfs_ring_info_list[] = {
474 {"radeon_ring_gfx", radeon_debugfs_ring_info, 0, &radeon_ring_type_gfx_index},
475 {"radeon_ring_cp1", radeon_debugfs_ring_info, 0, &cayman_ring_type_cp1_index},
476 {"radeon_ring_cp2", radeon_debugfs_ring_info, 0, &cayman_ring_type_cp2_index},
477};
478
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200479static int radeon_debugfs_ib_info(struct seq_file *m, void *data)
480{
481 struct drm_info_node *node = (struct drm_info_node *) m->private;
Christian König293f9fd2012-02-23 15:18:45 +0100482 struct drm_device *dev = node->minor->dev;
483 struct radeon_device *rdev = dev->dev_private;
484 struct radeon_ib *ib = &rdev->ib_pool.ibs[*((unsigned*)node->info_ent->data)];
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200485 unsigned i;
486
487 if (ib == NULL) {
488 return 0;
489 }
Jerome Glisse91cb91b2010-02-15 21:36:13 +0100490 seq_printf(m, "IB %04u\n", ib->idx);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200491 seq_printf(m, "IB fence %p\n", ib->fence);
492 seq_printf(m, "IB size %05u dwords\n", ib->length_dw);
493 for (i = 0; i < ib->length_dw; i++) {
494 seq_printf(m, "[%05u]=0x%08X\n", i, ib->ptr[i]);
495 }
496 return 0;
497}
498
499static struct drm_info_list radeon_debugfs_ib_list[RADEON_IB_POOL_SIZE];
500static char radeon_debugfs_ib_names[RADEON_IB_POOL_SIZE][32];
Christian König293f9fd2012-02-23 15:18:45 +0100501static unsigned radeon_debugfs_ib_idx[RADEON_IB_POOL_SIZE];
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200502#endif
503
Christian Königaf9720f2011-10-24 17:08:44 +0200504int radeon_debugfs_ring_init(struct radeon_device *rdev)
505{
506#if defined(CONFIG_DEBUG_FS)
Michel Dänzerf0d14da2012-02-21 17:39:15 +0100507 if (rdev->family >= CHIP_CAYMAN)
508 return radeon_debugfs_add_files(rdev, radeon_debugfs_ring_info_list,
509 ARRAY_SIZE(radeon_debugfs_ring_info_list));
510 else
511 return radeon_debugfs_add_files(rdev, radeon_debugfs_ring_info_list, 1);
Christian Königaf9720f2011-10-24 17:08:44 +0200512#else
513 return 0;
514#endif
515}
516
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200517int radeon_debugfs_ib_init(struct radeon_device *rdev)
518{
519#if defined(CONFIG_DEBUG_FS)
520 unsigned i;
521
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200522 for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
523 sprintf(radeon_debugfs_ib_names[i], "radeon_ib_%04u", i);
Christian König293f9fd2012-02-23 15:18:45 +0100524 radeon_debugfs_ib_idx[i] = i;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200525 radeon_debugfs_ib_list[i].name = radeon_debugfs_ib_names[i];
526 radeon_debugfs_ib_list[i].show = &radeon_debugfs_ib_info;
527 radeon_debugfs_ib_list[i].driver_features = 0;
Christian König293f9fd2012-02-23 15:18:45 +0100528 radeon_debugfs_ib_list[i].data = &radeon_debugfs_ib_idx[i];
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200529 }
530 return radeon_debugfs_add_files(rdev, radeon_debugfs_ib_list,
531 RADEON_IB_POOL_SIZE);
532#else
533 return 0;
534#endif
535}