blob: 465fb34c197bbd30da8fe90c4225286021b51704 [file] [log] [blame]
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include <linux/seq_file.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090029#include <linux/slab.h>
Jerome Glisse771fe6b2009-06-05 14:42:42 +020030#include "drmP.h"
31#include "radeon_drm.h"
32#include "radeon_reg.h"
33#include "radeon.h"
34#include "atom.h"
35
36int radeon_debugfs_ib_init(struct radeon_device *rdev);
Christian Königaf9720f2011-10-24 17:08:44 +020037int radeon_debugfs_ring_init(struct radeon_device *rdev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +020038
Andi Kleence580fa2011-10-13 16:08:47 -070039u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
40{
41 struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
42 u32 pg_idx, pg_offset;
43 u32 idx_value = 0;
44 int new_page;
45
46 pg_idx = (idx * 4) / PAGE_SIZE;
47 pg_offset = (idx * 4) % PAGE_SIZE;
48
49 if (ibc->kpage_idx[0] == pg_idx)
50 return ibc->kpage[0][pg_offset/4];
51 if (ibc->kpage_idx[1] == pg_idx)
52 return ibc->kpage[1][pg_offset/4];
53
54 new_page = radeon_cs_update_pages(p, pg_idx);
55 if (new_page < 0) {
56 p->parser_error = new_page;
57 return 0;
58 }
59
60 idx_value = ibc->kpage[new_page][pg_offset/4];
61 return idx_value;
62}
63
Christian Könige32eb502011-10-23 12:56:27 +020064void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
Andi Kleence580fa2011-10-13 16:08:47 -070065{
66#if DRM_DEBUG_CODE
Christian Könige32eb502011-10-23 12:56:27 +020067 if (ring->count_dw <= 0) {
Andi Kleence580fa2011-10-13 16:08:47 -070068 DRM_ERROR("radeon: writting more dword to ring than expected !\n");
69 }
70#endif
Christian Könige32eb502011-10-23 12:56:27 +020071 ring->ring[ring->wptr++] = v;
72 ring->wptr &= ring->ptr_mask;
73 ring->count_dw--;
74 ring->ring_free_dw--;
Andi Kleence580fa2011-10-13 16:08:47 -070075}
76
Jerome Glisse771fe6b2009-06-05 14:42:42 +020077/*
78 * IB.
79 */
Jerome Glissec1341e52011-12-21 12:13:47 -050080bool radeon_ib_try_free(struct radeon_device *rdev, struct radeon_ib *ib)
Jerome Glisseb15ba512011-11-15 11:48:34 -050081{
82 bool done = false;
83
84 /* only free ib which have been emited */
85 if (ib->fence && ib->fence->emitted) {
86 if (radeon_fence_signaled(ib->fence)) {
87 radeon_fence_unref(&ib->fence);
88 radeon_sa_bo_free(rdev, &ib->sa_bo);
89 done = true;
90 }
91 }
92 return done;
93}
94
Jerome Glisse69e130a2011-12-21 12:13:46 -050095int radeon_ib_get(struct radeon_device *rdev, int ring,
96 struct radeon_ib **ib, unsigned size)
Jerome Glisse771fe6b2009-06-05 14:42:42 +020097{
98 struct radeon_fence *fence;
Jerome Glisseb15ba512011-11-15 11:48:34 -050099 unsigned cretry = 0;
100 int r = 0, i, idx;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200101
102 *ib = NULL;
Jerome Glisse69e130a2011-12-21 12:13:46 -0500103 /* align size on 256 bytes */
104 size = ALIGN(size, 256);
Jerome Glisseb15ba512011-11-15 11:48:34 -0500105
Christian König7b1f2482011-09-23 15:11:23 +0200106 r = radeon_fence_create(rdev, &fence, ring);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200107 if (r) {
Jerome Glisse91cb91b2010-02-15 21:36:13 +0100108 dev_err(rdev->dev, "failed to create fence for new IB\n");
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200109 return r;
110 }
Jerome Glisseb15ba512011-11-15 11:48:34 -0500111
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200112 mutex_lock(&rdev->ib_pool.mutex);
Jerome Glisseb15ba512011-11-15 11:48:34 -0500113 idx = rdev->ib_pool.head_id;
114retry:
115 if (cretry > 5) {
116 dev_err(rdev->dev, "failed to get an ib after 5 retry\n");
Dave Airlieecb114a2009-09-15 11:12:56 +1000117 mutex_unlock(&rdev->ib_pool.mutex);
Jerome Glisse91cb91b2010-02-15 21:36:13 +0100118 radeon_fence_unref(&fence);
Jerome Glisseb15ba512011-11-15 11:48:34 -0500119 return -ENOMEM;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200120 }
Jerome Glisseb15ba512011-11-15 11:48:34 -0500121 cretry++;
122 for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
123 radeon_ib_try_free(rdev, &rdev->ib_pool.ibs[idx]);
124 if (rdev->ib_pool.ibs[idx].fence == NULL) {
125 r = radeon_sa_bo_new(rdev, &rdev->ib_pool.sa_manager,
126 &rdev->ib_pool.ibs[idx].sa_bo,
Jerome Glisse69e130a2011-12-21 12:13:46 -0500127 size, 256);
Jerome Glisseb15ba512011-11-15 11:48:34 -0500128 if (!r) {
129 *ib = &rdev->ib_pool.ibs[idx];
130 (*ib)->ptr = rdev->ib_pool.sa_manager.cpu_ptr;
131 (*ib)->ptr += ((*ib)->sa_bo.offset >> 2);
132 (*ib)->gpu_addr = rdev->ib_pool.sa_manager.gpu_addr;
133 (*ib)->gpu_addr += (*ib)->sa_bo.offset;
134 (*ib)->fence = fence;
135 /* ib are most likely to be allocated in a ring fashion
136 * thus rdev->ib_pool.head_id should be the id of the
137 * oldest ib
138 */
139 rdev->ib_pool.head_id = (1 + idx);
140 rdev->ib_pool.head_id &= (RADEON_IB_POOL_SIZE - 1);
141 mutex_unlock(&rdev->ib_pool.mutex);
142 return 0;
143 }
Jerome Glisse91cb91b2010-02-15 21:36:13 +0100144 }
Jerome Glisseb15ba512011-11-15 11:48:34 -0500145 idx = (idx + 1) & (RADEON_IB_POOL_SIZE - 1);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200146 }
Jerome Glisseb15ba512011-11-15 11:48:34 -0500147 /* this should be rare event, ie all ib scheduled none signaled yet.
148 */
149 for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
Jerome Glissec1341e52011-12-21 12:13:47 -0500150 if (rdev->ib_pool.ibs[idx].fence && rdev->ib_pool.ibs[idx].fence->emitted) {
Jerome Glisseb15ba512011-11-15 11:48:34 -0500151 r = radeon_fence_wait(rdev->ib_pool.ibs[idx].fence, false);
152 if (!r) {
153 goto retry;
154 }
155 /* an error happened */
156 break;
157 }
158 idx = (idx + 1) & (RADEON_IB_POOL_SIZE - 1);
159 }
Dave Airlieecb114a2009-09-15 11:12:56 +1000160 mutex_unlock(&rdev->ib_pool.mutex);
Jerome Glisseb15ba512011-11-15 11:48:34 -0500161 radeon_fence_unref(&fence);
162 return r;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200163}
164
165void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
166{
167 struct radeon_ib *tmp = *ib;
168
169 *ib = NULL;
170 if (tmp == NULL) {
171 return;
172 }
173 mutex_lock(&rdev->ib_pool.mutex);
Jerome Glisseb15ba512011-11-15 11:48:34 -0500174 if (tmp->fence && !tmp->fence->emitted) {
175 radeon_sa_bo_free(rdev, &tmp->sa_bo);
176 radeon_fence_unref(&tmp->fence);
177 }
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200178 mutex_unlock(&rdev->ib_pool.mutex);
179}
180
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200181int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
182{
Christian Könige32eb502011-10-23 12:56:27 +0200183 struct radeon_ring *ring = &rdev->ring[ib->fence->ring];
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200184 int r = 0;
185
Christian Könige32eb502011-10-23 12:56:27 +0200186 if (!ib->length_dw || !ring->ready) {
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200187 /* TODO: Nothings in the ib we should report. */
Jerome Glisse91cb91b2010-02-15 21:36:13 +0100188 DRM_ERROR("radeon: couldn't schedule IB(%u).\n", ib->idx);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200189 return -EINVAL;
190 }
Dave Airlieecb114a2009-09-15 11:12:56 +1000191
Dave Airlie6cdf6582009-06-29 18:29:13 +1000192 /* 64 dwords should be enough for fence too */
Christian Könige32eb502011-10-23 12:56:27 +0200193 r = radeon_ring_lock(rdev, ring, 64);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200194 if (r) {
Paul Bolleec4f2ac2011-01-28 23:32:04 +0100195 DRM_ERROR("radeon: scheduling IB failed (%d).\n", r);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200196 return r;
197 }
Christian König4c87bc22011-10-19 19:02:21 +0200198 radeon_ring_ib_execute(rdev, ib->fence->ring, ib);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200199 radeon_fence_emit(rdev, ib->fence);
Christian Könige32eb502011-10-23 12:56:27 +0200200 radeon_ring_unlock_commit(rdev, ring);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200201 return 0;
202}
203
204int radeon_ib_pool_init(struct radeon_device *rdev)
205{
Jerome Glisseb15ba512011-11-15 11:48:34 -0500206 int i, r;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200207
Jerome Glisseb15ba512011-11-15 11:48:34 -0500208 mutex_lock(&rdev->ib_pool.mutex);
209 if (rdev->ib_pool.ready) {
210 mutex_unlock(&rdev->ib_pool.mutex);
Jerome Glisse9f022dd2009-09-11 15:35:22 +0200211 return 0;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200212 }
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200213
Jerome Glisseb15ba512011-11-15 11:48:34 -0500214 r = radeon_sa_bo_manager_init(rdev, &rdev->ib_pool.sa_manager,
215 RADEON_IB_POOL_SIZE*64*1024,
216 RADEON_GEM_DOMAIN_GTT);
217 if (r) {
218 mutex_unlock(&rdev->ib_pool.mutex);
219 return r;
220 }
221
222 for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
223 rdev->ib_pool.ibs[i].fence = NULL;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200224 rdev->ib_pool.ibs[i].idx = i;
225 rdev->ib_pool.ibs[i].length_dw = 0;
Jerome Glisseb15ba512011-11-15 11:48:34 -0500226 INIT_LIST_HEAD(&rdev->ib_pool.ibs[i].sa_bo.list);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200227 }
Jerome Glisse91cb91b2010-02-15 21:36:13 +0100228 rdev->ib_pool.head_id = 0;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200229 rdev->ib_pool.ready = true;
230 DRM_INFO("radeon: ib pool ready.\n");
Jerome Glisseb15ba512011-11-15 11:48:34 -0500231
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200232 if (radeon_debugfs_ib_init(rdev)) {
233 DRM_ERROR("Failed to register debugfs file for IB !\n");
234 }
Christian Königaf9720f2011-10-24 17:08:44 +0200235 if (radeon_debugfs_ring_init(rdev)) {
236 DRM_ERROR("Failed to register debugfs file for rings !\n");
237 }
Jerome Glisseb15ba512011-11-15 11:48:34 -0500238 mutex_unlock(&rdev->ib_pool.mutex);
239 return 0;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200240}
241
242void radeon_ib_pool_fini(struct radeon_device *rdev)
243{
Jerome Glisseb15ba512011-11-15 11:48:34 -0500244 unsigned i;
Jerome Glisse4c788672009-11-20 14:29:23 +0100245
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200246 mutex_lock(&rdev->ib_pool.mutex);
Jerome Glisseb15ba512011-11-15 11:48:34 -0500247 if (rdev->ib_pool.ready) {
248 for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
249 radeon_sa_bo_free(rdev, &rdev->ib_pool.ibs[i].sa_bo);
250 radeon_fence_unref(&rdev->ib_pool.ibs[i].fence);
Alex Deucherca2af922010-05-06 11:02:24 -0400251 }
Jerome Glisseb15ba512011-11-15 11:48:34 -0500252 radeon_sa_bo_manager_fini(rdev, &rdev->ib_pool.sa_manager);
253 rdev->ib_pool.ready = false;
Alex Deucherca2af922010-05-06 11:02:24 -0400254 }
Jerome Glisseb15ba512011-11-15 11:48:34 -0500255 mutex_unlock(&rdev->ib_pool.mutex);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200256}
257
Jerome Glisseb15ba512011-11-15 11:48:34 -0500258int radeon_ib_pool_start(struct radeon_device *rdev)
259{
260 return radeon_sa_bo_manager_start(rdev, &rdev->ib_pool.sa_manager);
261}
262
263int radeon_ib_pool_suspend(struct radeon_device *rdev)
264{
265 return radeon_sa_bo_manager_suspend(rdev, &rdev->ib_pool.sa_manager);
266}
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200267
268/*
269 * Ring.
270 */
Christian Könige32eb502011-10-23 12:56:27 +0200271int radeon_ring_index(struct radeon_device *rdev, struct radeon_ring *ring)
Christian Königbf852792011-10-13 13:19:22 +0200272{
273 /* r1xx-r5xx only has CP ring */
274 if (rdev->family < CHIP_R600)
275 return RADEON_RING_TYPE_GFX_INDEX;
276
277 if (rdev->family >= CHIP_CAYMAN) {
Christian Könige32eb502011-10-23 12:56:27 +0200278 if (ring == &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX])
Christian Königbf852792011-10-13 13:19:22 +0200279 return CAYMAN_RING_TYPE_CP1_INDEX;
Christian Könige32eb502011-10-23 12:56:27 +0200280 else if (ring == &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX])
Christian Königbf852792011-10-13 13:19:22 +0200281 return CAYMAN_RING_TYPE_CP2_INDEX;
282 }
283 return RADEON_RING_TYPE_GFX_INDEX;
284}
285
Christian Könige32eb502011-10-23 12:56:27 +0200286void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *ring)
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200287{
Alex Deucher78c55602011-11-17 14:25:56 -0500288 u32 rptr;
289
Alex Deucher724c80e2010-08-27 18:25:25 -0400290 if (rdev->wb.enabled)
Alex Deucher78c55602011-11-17 14:25:56 -0500291 rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]);
Christian König5596a9d2011-10-13 12:48:45 +0200292 else
Alex Deucher78c55602011-11-17 14:25:56 -0500293 rptr = RREG32(ring->rptr_reg);
294 ring->rptr = (rptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200295 /* This works because ring_size is a power of 2 */
Christian Könige32eb502011-10-23 12:56:27 +0200296 ring->ring_free_dw = (ring->rptr + (ring->ring_size / 4));
297 ring->ring_free_dw -= ring->wptr;
298 ring->ring_free_dw &= ring->ptr_mask;
299 if (!ring->ring_free_dw) {
300 ring->ring_free_dw = ring->ring_size / 4;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200301 }
302}
303
Christian König7b1f2482011-09-23 15:11:23 +0200304
Christian Könige32eb502011-10-23 12:56:27 +0200305int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ndw)
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200306{
307 int r;
308
309 /* Align requested size with padding so unlock_commit can
310 * pad safely */
Christian Könige32eb502011-10-23 12:56:27 +0200311 ndw = (ndw + ring->align_mask) & ~ring->align_mask;
312 while (ndw > (ring->ring_free_dw - 1)) {
313 radeon_ring_free_size(rdev, ring);
314 if (ndw < ring->ring_free_dw) {
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200315 break;
316 }
Christian Könige32eb502011-10-23 12:56:27 +0200317 r = radeon_fence_wait_next(rdev, radeon_ring_index(rdev, ring));
Matthew Garrett91700f32010-04-30 15:24:17 -0400318 if (r)
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200319 return r;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200320 }
Christian Könige32eb502011-10-23 12:56:27 +0200321 ring->count_dw = ndw;
322 ring->wptr_old = ring->wptr;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200323 return 0;
324}
325
Christian Könige32eb502011-10-23 12:56:27 +0200326int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ndw)
Matthew Garrett91700f32010-04-30 15:24:17 -0400327{
328 int r;
329
Christian Könige32eb502011-10-23 12:56:27 +0200330 mutex_lock(&ring->mutex);
331 r = radeon_ring_alloc(rdev, ring, ndw);
Matthew Garrett91700f32010-04-30 15:24:17 -0400332 if (r) {
Christian Könige32eb502011-10-23 12:56:27 +0200333 mutex_unlock(&ring->mutex);
Matthew Garrett91700f32010-04-30 15:24:17 -0400334 return r;
335 }
336 return 0;
337}
338
Christian Könige32eb502011-10-23 12:56:27 +0200339void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring)
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200340{
341 unsigned count_dw_pad;
342 unsigned i;
343
344 /* We pad to match fetch size */
Christian Könige32eb502011-10-23 12:56:27 +0200345 count_dw_pad = (ring->align_mask + 1) -
346 (ring->wptr & ring->align_mask);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200347 for (i = 0; i < count_dw_pad; i++) {
Alex Deucher78c55602011-11-17 14:25:56 -0500348 radeon_ring_write(ring, ring->nop);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200349 }
350 DRM_MEMORYBARRIER();
Alex Deucher78c55602011-11-17 14:25:56 -0500351 WREG32(ring->wptr_reg, (ring->wptr << ring->ptr_reg_shift) & ring->ptr_reg_mask);
Christian Könige32eb502011-10-23 12:56:27 +0200352 (void)RREG32(ring->wptr_reg);
Matthew Garrett91700f32010-04-30 15:24:17 -0400353}
354
Christian Könige32eb502011-10-23 12:56:27 +0200355void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *ring)
Matthew Garrett91700f32010-04-30 15:24:17 -0400356{
Christian Könige32eb502011-10-23 12:56:27 +0200357 radeon_ring_commit(rdev, ring);
358 mutex_unlock(&ring->mutex);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200359}
360
Christian Könige32eb502011-10-23 12:56:27 +0200361void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *ring)
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200362{
Christian Könige32eb502011-10-23 12:56:27 +0200363 ring->wptr = ring->wptr_old;
364 mutex_unlock(&ring->mutex);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200365}
366
Christian Könige32eb502011-10-23 12:56:27 +0200367int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size,
Alex Deucher78c55602011-11-17 14:25:56 -0500368 unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg,
369 u32 ptr_reg_shift, u32 ptr_reg_mask, u32 nop)
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200370{
371 int r;
372
Christian Könige32eb502011-10-23 12:56:27 +0200373 ring->ring_size = ring_size;
374 ring->rptr_offs = rptr_offs;
375 ring->rptr_reg = rptr_reg;
376 ring->wptr_reg = wptr_reg;
Alex Deucher78c55602011-11-17 14:25:56 -0500377 ring->ptr_reg_shift = ptr_reg_shift;
378 ring->ptr_reg_mask = ptr_reg_mask;
379 ring->nop = nop;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200380 /* Allocate ring buffer */
Christian Könige32eb502011-10-23 12:56:27 +0200381 if (ring->ring_obj == NULL) {
382 r = radeon_bo_create(rdev, ring->ring_size, PAGE_SIZE, true,
Jerome Glisse4c788672009-11-20 14:29:23 +0100383 RADEON_GEM_DOMAIN_GTT,
Christian Könige32eb502011-10-23 12:56:27 +0200384 &ring->ring_obj);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200385 if (r) {
Jerome Glisse4c788672009-11-20 14:29:23 +0100386 dev_err(rdev->dev, "(%d) ring create failed\n", r);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200387 return r;
388 }
Christian Könige32eb502011-10-23 12:56:27 +0200389 r = radeon_bo_reserve(ring->ring_obj, false);
Jerome Glisse4c788672009-11-20 14:29:23 +0100390 if (unlikely(r != 0))
391 return r;
Christian Könige32eb502011-10-23 12:56:27 +0200392 r = radeon_bo_pin(ring->ring_obj, RADEON_GEM_DOMAIN_GTT,
393 &ring->gpu_addr);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200394 if (r) {
Christian Könige32eb502011-10-23 12:56:27 +0200395 radeon_bo_unreserve(ring->ring_obj);
Jerome Glisse4c788672009-11-20 14:29:23 +0100396 dev_err(rdev->dev, "(%d) ring pin failed\n", r);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200397 return r;
398 }
Christian Könige32eb502011-10-23 12:56:27 +0200399 r = radeon_bo_kmap(ring->ring_obj,
400 (void **)&ring->ring);
401 radeon_bo_unreserve(ring->ring_obj);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200402 if (r) {
Jerome Glisse4c788672009-11-20 14:29:23 +0100403 dev_err(rdev->dev, "(%d) ring map failed\n", r);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200404 return r;
405 }
406 }
Christian Könige32eb502011-10-23 12:56:27 +0200407 ring->ptr_mask = (ring->ring_size / 4) - 1;
408 ring->ring_free_dw = ring->ring_size / 4;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200409 return 0;
410}
411
Christian Könige32eb502011-10-23 12:56:27 +0200412void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *ring)
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200413{
Jerome Glisse4c788672009-11-20 14:29:23 +0100414 int r;
Alex Deucherca2af922010-05-06 11:02:24 -0400415 struct radeon_bo *ring_obj;
Jerome Glisse4c788672009-11-20 14:29:23 +0100416
Christian Könige32eb502011-10-23 12:56:27 +0200417 mutex_lock(&ring->mutex);
418 ring_obj = ring->ring_obj;
419 ring->ring = NULL;
420 ring->ring_obj = NULL;
421 mutex_unlock(&ring->mutex);
Alex Deucherca2af922010-05-06 11:02:24 -0400422
423 if (ring_obj) {
424 r = radeon_bo_reserve(ring_obj, false);
425 if (likely(r == 0)) {
426 radeon_bo_kunmap(ring_obj);
427 radeon_bo_unpin(ring_obj);
428 radeon_bo_unreserve(ring_obj);
429 }
430 radeon_bo_unref(&ring_obj);
431 }
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200432}
433
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200434/*
435 * Debugfs info
436 */
437#if defined(CONFIG_DEBUG_FS)
Christian Königaf9720f2011-10-24 17:08:44 +0200438
439static int radeon_debugfs_ring_info(struct seq_file *m, void *data)
440{
441 struct drm_info_node *node = (struct drm_info_node *) m->private;
442 struct drm_device *dev = node->minor->dev;
443 struct radeon_device *rdev = dev->dev_private;
444 int ridx = *(int*)node->info_ent->data;
445 struct radeon_ring *ring = &rdev->ring[ridx];
446 unsigned count, i, j;
447
448 radeon_ring_free_size(rdev, ring);
449 count = (ring->ring_size / 4) - ring->ring_free_dw;
450 seq_printf(m, "wptr(0x%04x): 0x%08x\n", ring->wptr_reg, RREG32(ring->wptr_reg));
451 seq_printf(m, "rptr(0x%04x): 0x%08x\n", ring->rptr_reg, RREG32(ring->rptr_reg));
452 seq_printf(m, "driver's copy of the wptr: 0x%08x\n", ring->wptr);
453 seq_printf(m, "driver's copy of the rptr: 0x%08x\n", ring->rptr);
454 seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw);
455 seq_printf(m, "%u dwords in ring\n", count);
456 i = ring->rptr;
457 for (j = 0; j <= count; j++) {
458 seq_printf(m, "r[%04d]=0x%08x\n", i, ring->ring[i]);
459 i = (i + 1) & ring->ptr_mask;
460 }
461 return 0;
462}
463
464static int radeon_ring_type_gfx_index = RADEON_RING_TYPE_GFX_INDEX;
465static int cayman_ring_type_cp1_index = CAYMAN_RING_TYPE_CP1_INDEX;
466static int cayman_ring_type_cp2_index = CAYMAN_RING_TYPE_CP2_INDEX;
467
468static struct drm_info_list radeon_debugfs_ring_info_list[] = {
469 {"radeon_ring_gfx", radeon_debugfs_ring_info, 0, &radeon_ring_type_gfx_index},
470 {"radeon_ring_cp1", radeon_debugfs_ring_info, 0, &cayman_ring_type_cp1_index},
471 {"radeon_ring_cp2", radeon_debugfs_ring_info, 0, &cayman_ring_type_cp2_index},
472};
473
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200474static int radeon_debugfs_ib_info(struct seq_file *m, void *data)
475{
476 struct drm_info_node *node = (struct drm_info_node *) m->private;
477 struct radeon_ib *ib = node->info_ent->data;
478 unsigned i;
479
480 if (ib == NULL) {
481 return 0;
482 }
Jerome Glisse91cb91b2010-02-15 21:36:13 +0100483 seq_printf(m, "IB %04u\n", ib->idx);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200484 seq_printf(m, "IB fence %p\n", ib->fence);
485 seq_printf(m, "IB size %05u dwords\n", ib->length_dw);
486 for (i = 0; i < ib->length_dw; i++) {
487 seq_printf(m, "[%05u]=0x%08X\n", i, ib->ptr[i]);
488 }
489 return 0;
490}
491
492static struct drm_info_list radeon_debugfs_ib_list[RADEON_IB_POOL_SIZE];
493static char radeon_debugfs_ib_names[RADEON_IB_POOL_SIZE][32];
494#endif
495
Christian Königaf9720f2011-10-24 17:08:44 +0200496int radeon_debugfs_ring_init(struct radeon_device *rdev)
497{
498#if defined(CONFIG_DEBUG_FS)
499 return radeon_debugfs_add_files(rdev, radeon_debugfs_ring_info_list,
500 ARRAY_SIZE(radeon_debugfs_ring_info_list));
501#else
502 return 0;
503#endif
504}
505
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200506int radeon_debugfs_ib_init(struct radeon_device *rdev)
507{
508#if defined(CONFIG_DEBUG_FS)
509 unsigned i;
510
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200511 for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
512 sprintf(radeon_debugfs_ib_names[i], "radeon_ib_%04u", i);
513 radeon_debugfs_ib_list[i].name = radeon_debugfs_ib_names[i];
514 radeon_debugfs_ib_list[i].show = &radeon_debugfs_ib_info;
515 radeon_debugfs_ib_list[i].driver_features = 0;
516 radeon_debugfs_ib_list[i].data = &rdev->ib_pool.ibs[i];
517 }
518 return radeon_debugfs_add_files(rdev, radeon_debugfs_ib_list,
519 RADEON_IB_POOL_SIZE);
520#else
521 return 0;
522#endif
523}