blob: 261e98a276dbb813631e56bf48358e003491c32f [file] [log] [blame]
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include <linux/seq_file.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090029#include <linux/slab.h>
Jerome Glisse771fe6b2009-06-05 14:42:42 +020030#include "drmP.h"
31#include "radeon_drm.h"
32#include "radeon_reg.h"
33#include "radeon.h"
34#include "atom.h"
35
36int radeon_debugfs_ib_init(struct radeon_device *rdev);
37
Jerome Glisse9f93ed32010-01-28 18:22:31 +010038void radeon_ib_bogus_cleanup(struct radeon_device *rdev)
39{
40 struct radeon_ib *ib, *n;
41
42 list_for_each_entry_safe(ib, n, &rdev->ib_pool.bogus_ib, list) {
43 list_del(&ib->list);
44 vfree(ib->ptr);
45 kfree(ib);
46 }
47}
48
49void radeon_ib_bogus_add(struct radeon_device *rdev, struct radeon_ib *ib)
50{
51 struct radeon_ib *bib;
52
53 bib = kmalloc(sizeof(*bib), GFP_KERNEL);
54 if (bib == NULL)
55 return;
56 bib->ptr = vmalloc(ib->length_dw * 4);
57 if (bib->ptr == NULL) {
58 kfree(bib);
59 return;
60 }
61 memcpy(bib->ptr, ib->ptr, ib->length_dw * 4);
62 bib->length_dw = ib->length_dw;
63 mutex_lock(&rdev->ib_pool.mutex);
64 list_add_tail(&bib->list, &rdev->ib_pool.bogus_ib);
65 mutex_unlock(&rdev->ib_pool.mutex);
66}
67
Jerome Glisse771fe6b2009-06-05 14:42:42 +020068/*
69 * IB.
70 */
71int radeon_ib_get(struct radeon_device *rdev, struct radeon_ib **ib)
72{
73 struct radeon_fence *fence;
74 struct radeon_ib *nib;
Jerome Glisse91cb91b2010-02-15 21:36:13 +010075 int r = 0, i, c;
Jerome Glisse771fe6b2009-06-05 14:42:42 +020076
77 *ib = NULL;
78 r = radeon_fence_create(rdev, &fence);
79 if (r) {
Jerome Glisse91cb91b2010-02-15 21:36:13 +010080 dev_err(rdev->dev, "failed to create fence for new IB\n");
Jerome Glisse771fe6b2009-06-05 14:42:42 +020081 return r;
82 }
83 mutex_lock(&rdev->ib_pool.mutex);
Jerome Glisse91cb91b2010-02-15 21:36:13 +010084 for (i = rdev->ib_pool.head_id, c = 0, nib = NULL; c < RADEON_IB_POOL_SIZE; c++, i++) {
85 i &= (RADEON_IB_POOL_SIZE - 1);
86 if (rdev->ib_pool.ibs[i].free) {
87 nib = &rdev->ib_pool.ibs[i];
88 break;
89 }
Jerome Glisse771fe6b2009-06-05 14:42:42 +020090 }
Jerome Glisse91cb91b2010-02-15 21:36:13 +010091 if (nib == NULL) {
92 /* This should never happen, it means we allocated all
93 * IB and haven't scheduled one yet, return EBUSY to
94 * userspace hoping that on ioctl recall we get better
95 * luck
96 */
97 dev_err(rdev->dev, "no free indirect buffer !\n");
Dave Airlieecb114a2009-09-15 11:12:56 +100098 mutex_unlock(&rdev->ib_pool.mutex);
Jerome Glisse91cb91b2010-02-15 21:36:13 +010099 radeon_fence_unref(&fence);
100 return -EBUSY;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200101 }
Jerome Glisse91cb91b2010-02-15 21:36:13 +0100102 rdev->ib_pool.head_id = (nib->idx + 1) & (RADEON_IB_POOL_SIZE - 1);
103 nib->free = false;
104 if (nib->fence) {
Dave Airlieecb114a2009-09-15 11:12:56 +1000105 mutex_unlock(&rdev->ib_pool.mutex);
Jerome Glisse91cb91b2010-02-15 21:36:13 +0100106 r = radeon_fence_wait(nib->fence, false);
107 if (r) {
108 dev_err(rdev->dev, "error waiting fence of IB(%u:0x%016lX:%u)\n",
109 nib->idx, (unsigned long)nib->gpu_addr, nib->length_dw);
110 mutex_lock(&rdev->ib_pool.mutex);
111 nib->free = true;
112 mutex_unlock(&rdev->ib_pool.mutex);
113 radeon_fence_unref(&fence);
114 return r;
115 }
116 mutex_lock(&rdev->ib_pool.mutex);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200117 }
118 radeon_fence_unref(&nib->fence);
Jerome Glisse91cb91b2010-02-15 21:36:13 +0100119 nib->fence = fence;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200120 nib->length_dw = 0;
Dave Airlieecb114a2009-09-15 11:12:56 +1000121 mutex_unlock(&rdev->ib_pool.mutex);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200122 *ib = nib;
Jerome Glisse91cb91b2010-02-15 21:36:13 +0100123 return 0;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200124}
125
126void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
127{
128 struct radeon_ib *tmp = *ib;
129
130 *ib = NULL;
131 if (tmp == NULL) {
132 return;
133 }
Jerome Glisse7d404c72010-02-18 13:13:29 +0000134 if (!tmp->fence->emited)
135 radeon_fence_unref(&tmp->fence);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200136 mutex_lock(&rdev->ib_pool.mutex);
Jerome Glisse91cb91b2010-02-15 21:36:13 +0100137 tmp->free = true;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200138 mutex_unlock(&rdev->ib_pool.mutex);
139}
140
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200141int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
142{
143 int r = 0;
144
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200145 if (!ib->length_dw || !rdev->cp.ready) {
146 /* TODO: Nothings in the ib we should report. */
Jerome Glisse91cb91b2010-02-15 21:36:13 +0100147 DRM_ERROR("radeon: couldn't schedule IB(%u).\n", ib->idx);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200148 return -EINVAL;
149 }
Dave Airlieecb114a2009-09-15 11:12:56 +1000150
Dave Airlie6cdf6582009-06-29 18:29:13 +1000151 /* 64 dwords should be enough for fence too */
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200152 r = radeon_ring_lock(rdev, 64);
153 if (r) {
154 DRM_ERROR("radeon: scheduling IB failled (%d).\n", r);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200155 return r;
156 }
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000157 radeon_ring_ib_execute(rdev, ib);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200158 radeon_fence_emit(rdev, ib->fence);
Dave Airlieecb114a2009-09-15 11:12:56 +1000159 mutex_lock(&rdev->ib_pool.mutex);
Jerome Glisse91cb91b2010-02-15 21:36:13 +0100160 /* once scheduled IB is considered free and protected by the fence */
161 ib->free = true;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200162 mutex_unlock(&rdev->ib_pool.mutex);
Dave Airlieecb114a2009-09-15 11:12:56 +1000163 radeon_ring_unlock_commit(rdev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200164 return 0;
165}
166
167int radeon_ib_pool_init(struct radeon_device *rdev)
168{
169 void *ptr;
170 uint64_t gpu_addr;
171 int i;
172 int r = 0;
173
Jerome Glisse9f022dd2009-09-11 15:35:22 +0200174 if (rdev->ib_pool.robj)
175 return 0;
Jerome Glisse9f93ed32010-01-28 18:22:31 +0100176 INIT_LIST_HEAD(&rdev->ib_pool.bogus_ib);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200177 /* Allocate 1M object buffer */
Jerome Glisse4c788672009-11-20 14:29:23 +0100178 r = radeon_bo_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024,
179 true, RADEON_GEM_DOMAIN_GTT,
180 &rdev->ib_pool.robj);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200181 if (r) {
182 DRM_ERROR("radeon: failed to ib pool (%d).\n", r);
183 return r;
184 }
Jerome Glisse4c788672009-11-20 14:29:23 +0100185 r = radeon_bo_reserve(rdev->ib_pool.robj, false);
186 if (unlikely(r != 0))
187 return r;
188 r = radeon_bo_pin(rdev->ib_pool.robj, RADEON_GEM_DOMAIN_GTT, &gpu_addr);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200189 if (r) {
Jerome Glisse4c788672009-11-20 14:29:23 +0100190 radeon_bo_unreserve(rdev->ib_pool.robj);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200191 DRM_ERROR("radeon: failed to pin ib pool (%d).\n", r);
192 return r;
193 }
Jerome Glisse4c788672009-11-20 14:29:23 +0100194 r = radeon_bo_kmap(rdev->ib_pool.robj, &ptr);
195 radeon_bo_unreserve(rdev->ib_pool.robj);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200196 if (r) {
197 DRM_ERROR("radeon: failed to map ib poll (%d).\n", r);
198 return r;
199 }
200 for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
201 unsigned offset;
202
203 offset = i * 64 * 1024;
204 rdev->ib_pool.ibs[i].gpu_addr = gpu_addr + offset;
205 rdev->ib_pool.ibs[i].ptr = ptr + offset;
206 rdev->ib_pool.ibs[i].idx = i;
207 rdev->ib_pool.ibs[i].length_dw = 0;
Jerome Glisse91cb91b2010-02-15 21:36:13 +0100208 rdev->ib_pool.ibs[i].free = true;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200209 }
Jerome Glisse91cb91b2010-02-15 21:36:13 +0100210 rdev->ib_pool.head_id = 0;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200211 rdev->ib_pool.ready = true;
212 DRM_INFO("radeon: ib pool ready.\n");
213 if (radeon_debugfs_ib_init(rdev)) {
214 DRM_ERROR("Failed to register debugfs file for IB !\n");
215 }
216 return r;
217}
218
219void radeon_ib_pool_fini(struct radeon_device *rdev)
220{
Jerome Glisse4c788672009-11-20 14:29:23 +0100221 int r;
Alex Deucherca2af922010-05-06 11:02:24 -0400222 struct radeon_bo *robj;
Jerome Glisse4c788672009-11-20 14:29:23 +0100223
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200224 if (!rdev->ib_pool.ready) {
225 return;
226 }
227 mutex_lock(&rdev->ib_pool.mutex);
Jerome Glisse9f93ed32010-01-28 18:22:31 +0100228 radeon_ib_bogus_cleanup(rdev);
Alex Deucherca2af922010-05-06 11:02:24 -0400229 robj = rdev->ib_pool.robj;
230 rdev->ib_pool.robj = NULL;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200231 mutex_unlock(&rdev->ib_pool.mutex);
Alex Deucherca2af922010-05-06 11:02:24 -0400232
233 if (robj) {
234 r = radeon_bo_reserve(robj, false);
235 if (likely(r == 0)) {
236 radeon_bo_kunmap(robj);
237 radeon_bo_unpin(robj);
238 radeon_bo_unreserve(robj);
239 }
240 radeon_bo_unref(&robj);
241 }
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200242}
243
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200244
245/*
246 * Ring.
247 */
248void radeon_ring_free_size(struct radeon_device *rdev)
249{
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000250 if (rdev->family >= CHIP_R600)
251 rdev->cp.rptr = RREG32(R600_CP_RB_RPTR);
252 else
253 rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200254 /* This works because ring_size is a power of 2 */
255 rdev->cp.ring_free_dw = (rdev->cp.rptr + (rdev->cp.ring_size / 4));
256 rdev->cp.ring_free_dw -= rdev->cp.wptr;
257 rdev->cp.ring_free_dw &= rdev->cp.ptr_mask;
258 if (!rdev->cp.ring_free_dw) {
259 rdev->cp.ring_free_dw = rdev->cp.ring_size / 4;
260 }
261}
262
Matthew Garrett91700f32010-04-30 15:24:17 -0400263int radeon_ring_alloc(struct radeon_device *rdev, unsigned ndw)
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200264{
265 int r;
266
267 /* Align requested size with padding so unlock_commit can
268 * pad safely */
269 ndw = (ndw + rdev->cp.align_mask) & ~rdev->cp.align_mask;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200270 while (ndw > (rdev->cp.ring_free_dw - 1)) {
271 radeon_ring_free_size(rdev);
272 if (ndw < rdev->cp.ring_free_dw) {
273 break;
274 }
275 r = radeon_fence_wait_next(rdev);
Matthew Garrett91700f32010-04-30 15:24:17 -0400276 if (r)
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200277 return r;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200278 }
279 rdev->cp.count_dw = ndw;
280 rdev->cp.wptr_old = rdev->cp.wptr;
281 return 0;
282}
283
Matthew Garrett91700f32010-04-30 15:24:17 -0400284int radeon_ring_lock(struct radeon_device *rdev, unsigned ndw)
285{
286 int r;
287
288 mutex_lock(&rdev->cp.mutex);
289 r = radeon_ring_alloc(rdev, ndw);
290 if (r) {
291 mutex_unlock(&rdev->cp.mutex);
292 return r;
293 }
294 return 0;
295}
296
297void radeon_ring_commit(struct radeon_device *rdev)
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200298{
299 unsigned count_dw_pad;
300 unsigned i;
301
302 /* We pad to match fetch size */
303 count_dw_pad = (rdev->cp.align_mask + 1) -
304 (rdev->cp.wptr & rdev->cp.align_mask);
305 for (i = 0; i < count_dw_pad; i++) {
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000306 radeon_ring_write(rdev, 2 << 30);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200307 }
308 DRM_MEMORYBARRIER();
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000309 radeon_cp_commit(rdev);
Matthew Garrett91700f32010-04-30 15:24:17 -0400310}
311
312void radeon_ring_unlock_commit(struct radeon_device *rdev)
313{
314 radeon_ring_commit(rdev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200315 mutex_unlock(&rdev->cp.mutex);
316}
317
318void radeon_ring_unlock_undo(struct radeon_device *rdev)
319{
320 rdev->cp.wptr = rdev->cp.wptr_old;
321 mutex_unlock(&rdev->cp.mutex);
322}
323
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200324int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size)
325{
326 int r;
327
328 rdev->cp.ring_size = ring_size;
329 /* Allocate ring buffer */
330 if (rdev->cp.ring_obj == NULL) {
Jerome Glisse4c788672009-11-20 14:29:23 +0100331 r = radeon_bo_create(rdev, NULL, rdev->cp.ring_size, true,
332 RADEON_GEM_DOMAIN_GTT,
333 &rdev->cp.ring_obj);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200334 if (r) {
Jerome Glisse4c788672009-11-20 14:29:23 +0100335 dev_err(rdev->dev, "(%d) ring create failed\n", r);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200336 return r;
337 }
Jerome Glisse4c788672009-11-20 14:29:23 +0100338 r = radeon_bo_reserve(rdev->cp.ring_obj, false);
339 if (unlikely(r != 0))
340 return r;
341 r = radeon_bo_pin(rdev->cp.ring_obj, RADEON_GEM_DOMAIN_GTT,
342 &rdev->cp.gpu_addr);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200343 if (r) {
Jerome Glisse4c788672009-11-20 14:29:23 +0100344 radeon_bo_unreserve(rdev->cp.ring_obj);
345 dev_err(rdev->dev, "(%d) ring pin failed\n", r);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200346 return r;
347 }
Jerome Glisse4c788672009-11-20 14:29:23 +0100348 r = radeon_bo_kmap(rdev->cp.ring_obj,
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200349 (void **)&rdev->cp.ring);
Jerome Glisse4c788672009-11-20 14:29:23 +0100350 radeon_bo_unreserve(rdev->cp.ring_obj);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200351 if (r) {
Jerome Glisse4c788672009-11-20 14:29:23 +0100352 dev_err(rdev->dev, "(%d) ring map failed\n", r);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200353 return r;
354 }
355 }
356 rdev->cp.ptr_mask = (rdev->cp.ring_size / 4) - 1;
357 rdev->cp.ring_free_dw = rdev->cp.ring_size / 4;
358 return 0;
359}
360
361void radeon_ring_fini(struct radeon_device *rdev)
362{
Jerome Glisse4c788672009-11-20 14:29:23 +0100363 int r;
Alex Deucherca2af922010-05-06 11:02:24 -0400364 struct radeon_bo *ring_obj;
Jerome Glisse4c788672009-11-20 14:29:23 +0100365
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200366 mutex_lock(&rdev->cp.mutex);
Alex Deucherca2af922010-05-06 11:02:24 -0400367 ring_obj = rdev->cp.ring_obj;
368 rdev->cp.ring = NULL;
369 rdev->cp.ring_obj = NULL;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200370 mutex_unlock(&rdev->cp.mutex);
Alex Deucherca2af922010-05-06 11:02:24 -0400371
372 if (ring_obj) {
373 r = radeon_bo_reserve(ring_obj, false);
374 if (likely(r == 0)) {
375 radeon_bo_kunmap(ring_obj);
376 radeon_bo_unpin(ring_obj);
377 radeon_bo_unreserve(ring_obj);
378 }
379 radeon_bo_unref(&ring_obj);
380 }
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200381}
382
383
384/*
385 * Debugfs info
386 */
387#if defined(CONFIG_DEBUG_FS)
388static int radeon_debugfs_ib_info(struct seq_file *m, void *data)
389{
390 struct drm_info_node *node = (struct drm_info_node *) m->private;
391 struct radeon_ib *ib = node->info_ent->data;
392 unsigned i;
393
394 if (ib == NULL) {
395 return 0;
396 }
Jerome Glisse91cb91b2010-02-15 21:36:13 +0100397 seq_printf(m, "IB %04u\n", ib->idx);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200398 seq_printf(m, "IB fence %p\n", ib->fence);
399 seq_printf(m, "IB size %05u dwords\n", ib->length_dw);
400 for (i = 0; i < ib->length_dw; i++) {
401 seq_printf(m, "[%05u]=0x%08X\n", i, ib->ptr[i]);
402 }
403 return 0;
404}
405
Jerome Glisse9f93ed32010-01-28 18:22:31 +0100406static int radeon_debugfs_ib_bogus_info(struct seq_file *m, void *data)
407{
408 struct drm_info_node *node = (struct drm_info_node *) m->private;
409 struct radeon_device *rdev = node->info_ent->data;
410 struct radeon_ib *ib;
411 unsigned i;
412
413 mutex_lock(&rdev->ib_pool.mutex);
414 if (list_empty(&rdev->ib_pool.bogus_ib)) {
415 mutex_unlock(&rdev->ib_pool.mutex);
416 seq_printf(m, "no bogus IB recorded\n");
417 return 0;
418 }
419 ib = list_first_entry(&rdev->ib_pool.bogus_ib, struct radeon_ib, list);
420 list_del_init(&ib->list);
421 mutex_unlock(&rdev->ib_pool.mutex);
422 seq_printf(m, "IB size %05u dwords\n", ib->length_dw);
423 for (i = 0; i < ib->length_dw; i++) {
424 seq_printf(m, "[%05u]=0x%08X\n", i, ib->ptr[i]);
425 }
426 vfree(ib->ptr);
427 kfree(ib);
428 return 0;
429}
430
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200431static struct drm_info_list radeon_debugfs_ib_list[RADEON_IB_POOL_SIZE];
432static char radeon_debugfs_ib_names[RADEON_IB_POOL_SIZE][32];
Jerome Glisse9f93ed32010-01-28 18:22:31 +0100433
434static struct drm_info_list radeon_debugfs_ib_bogus_info_list[] = {
435 {"radeon_ib_bogus", radeon_debugfs_ib_bogus_info, 0, NULL},
436};
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200437#endif
438
439int radeon_debugfs_ib_init(struct radeon_device *rdev)
440{
441#if defined(CONFIG_DEBUG_FS)
442 unsigned i;
Jerome Glisse9f93ed32010-01-28 18:22:31 +0100443 int r;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200444
Jerome Glisse9f93ed32010-01-28 18:22:31 +0100445 radeon_debugfs_ib_bogus_info_list[0].data = rdev;
446 r = radeon_debugfs_add_files(rdev, radeon_debugfs_ib_bogus_info_list, 1);
447 if (r)
448 return r;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200449 for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
450 sprintf(radeon_debugfs_ib_names[i], "radeon_ib_%04u", i);
451 radeon_debugfs_ib_list[i].name = radeon_debugfs_ib_names[i];
452 radeon_debugfs_ib_list[i].show = &radeon_debugfs_ib_info;
453 radeon_debugfs_ib_list[i].driver_features = 0;
454 radeon_debugfs_ib_list[i].data = &rdev->ib_pool.ibs[i];
455 }
456 return radeon_debugfs_add_files(rdev, radeon_debugfs_ib_list,
457 RADEON_IB_POOL_SIZE);
458#else
459 return 0;
460#endif
461}