Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2008 Advanced Micro Devices, Inc. |
| 3 | * Copyright 2008 Red Hat Inc. |
| 4 | * Copyright 2009 Jerome Glisse. |
| 5 | * |
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 7 | * copy of this software and associated documentation files (the "Software"), |
| 8 | * to deal in the Software without restriction, including without limitation |
| 9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
| 10 | * and/or sell copies of the Software, and to permit persons to whom the |
| 11 | * Software is furnished to do so, subject to the following conditions: |
| 12 | * |
| 13 | * The above copyright notice and this permission notice shall be included in |
| 14 | * all copies or substantial portions of the Software. |
| 15 | * |
| 16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| 19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
| 20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
| 21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
| 22 | * OTHER DEALINGS IN THE SOFTWARE. |
| 23 | * |
| 24 | * Authors: Dave Airlie |
| 25 | * Alex Deucher |
| 26 | * Jerome Glisse |
| 27 | */ |
| 28 | #include <linux/seq_file.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 29 | #include <linux/slab.h> |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 30 | #include "drmP.h" |
| 31 | #include "radeon_drm.h" |
| 32 | #include "radeon_reg.h" |
| 33 | #include "radeon.h" |
| 34 | #include "atom.h" |
| 35 | |
| 36 | int radeon_debugfs_ib_init(struct radeon_device *rdev); |
Christian König | ec1a6cc | 2012-05-02 15:11:11 +0200 | [diff] [blame] | 37 | int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *ring); |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 38 | |
Andi Kleen | ce580fa | 2011-10-13 16:08:47 -0700 | [diff] [blame] | 39 | u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx) |
| 40 | { |
| 41 | struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx]; |
| 42 | u32 pg_idx, pg_offset; |
| 43 | u32 idx_value = 0; |
| 44 | int new_page; |
| 45 | |
| 46 | pg_idx = (idx * 4) / PAGE_SIZE; |
| 47 | pg_offset = (idx * 4) % PAGE_SIZE; |
| 48 | |
| 49 | if (ibc->kpage_idx[0] == pg_idx) |
| 50 | return ibc->kpage[0][pg_offset/4]; |
| 51 | if (ibc->kpage_idx[1] == pg_idx) |
| 52 | return ibc->kpage[1][pg_offset/4]; |
| 53 | |
| 54 | new_page = radeon_cs_update_pages(p, pg_idx); |
| 55 | if (new_page < 0) { |
| 56 | p->parser_error = new_page; |
| 57 | return 0; |
| 58 | } |
| 59 | |
| 60 | idx_value = ibc->kpage[new_page][pg_offset/4]; |
| 61 | return idx_value; |
| 62 | } |
| 63 | |
Christian König | e32eb50 | 2011-10-23 12:56:27 +0200 | [diff] [blame] | 64 | void radeon_ring_write(struct radeon_ring *ring, uint32_t v) |
Andi Kleen | ce580fa | 2011-10-13 16:08:47 -0700 | [diff] [blame] | 65 | { |
| 66 | #if DRM_DEBUG_CODE |
Christian König | e32eb50 | 2011-10-23 12:56:27 +0200 | [diff] [blame] | 67 | if (ring->count_dw <= 0) { |
Andi Kleen | ce580fa | 2011-10-13 16:08:47 -0700 | [diff] [blame] | 68 | DRM_ERROR("radeon: writting more dword to ring than expected !\n"); |
| 69 | } |
| 70 | #endif |
Christian König | e32eb50 | 2011-10-23 12:56:27 +0200 | [diff] [blame] | 71 | ring->ring[ring->wptr++] = v; |
| 72 | ring->wptr &= ring->ptr_mask; |
| 73 | ring->count_dw--; |
| 74 | ring->ring_free_dw--; |
Andi Kleen | ce580fa | 2011-10-13 16:08:47 -0700 | [diff] [blame] | 75 | } |
| 76 | |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 77 | /* |
| 78 | * IB. |
| 79 | */ |
Jerome Glisse | c1341e5 | 2011-12-21 12:13:47 -0500 | [diff] [blame] | 80 | bool radeon_ib_try_free(struct radeon_device *rdev, struct radeon_ib *ib) |
Jerome Glisse | b15ba51 | 2011-11-15 11:48:34 -0500 | [diff] [blame] | 81 | { |
| 82 | bool done = false; |
| 83 | |
| 84 | /* only free ib which have been emited */ |
Jerome Glisse | bb63556 | 2012-05-09 15:34:46 +0200 | [diff] [blame] | 85 | if (ib->fence && ib->fence->seq < RADEON_FENCE_NOTEMITED_SEQ) { |
Jerome Glisse | b15ba51 | 2011-11-15 11:48:34 -0500 | [diff] [blame] | 86 | if (radeon_fence_signaled(ib->fence)) { |
| 87 | radeon_fence_unref(&ib->fence); |
Christian König | 557017a | 2012-05-09 15:34:54 +0200 | [diff] [blame] | 88 | radeon_sa_bo_free(rdev, &ib->sa_bo, NULL); |
Jerome Glisse | b15ba51 | 2011-11-15 11:48:34 -0500 | [diff] [blame] | 89 | done = true; |
| 90 | } |
| 91 | } |
| 92 | return done; |
| 93 | } |
| 94 | |
Jerome Glisse | 69e130a | 2011-12-21 12:13:46 -0500 | [diff] [blame] | 95 | int radeon_ib_get(struct radeon_device *rdev, int ring, |
| 96 | struct radeon_ib **ib, unsigned size) |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 97 | { |
| 98 | struct radeon_fence *fence; |
Jerome Glisse | b15ba51 | 2011-11-15 11:48:34 -0500 | [diff] [blame] | 99 | unsigned cretry = 0; |
| 100 | int r = 0, i, idx; |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 101 | |
| 102 | *ib = NULL; |
Jerome Glisse | 69e130a | 2011-12-21 12:13:46 -0500 | [diff] [blame] | 103 | /* align size on 256 bytes */ |
| 104 | size = ALIGN(size, 256); |
Jerome Glisse | b15ba51 | 2011-11-15 11:48:34 -0500 | [diff] [blame] | 105 | |
Christian König | 7b1f248 | 2011-09-23 15:11:23 +0200 | [diff] [blame] | 106 | r = radeon_fence_create(rdev, &fence, ring); |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 107 | if (r) { |
Jerome Glisse | 91cb91b | 2010-02-15 21:36:13 +0100 | [diff] [blame] | 108 | dev_err(rdev->dev, "failed to create fence for new IB\n"); |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 109 | return r; |
| 110 | } |
Jerome Glisse | b15ba51 | 2011-11-15 11:48:34 -0500 | [diff] [blame] | 111 | |
Jerome Glisse | 9fc04b5 | 2012-01-23 11:52:15 -0500 | [diff] [blame] | 112 | radeon_mutex_lock(&rdev->ib_pool.mutex); |
Jerome Glisse | b15ba51 | 2011-11-15 11:48:34 -0500 | [diff] [blame] | 113 | idx = rdev->ib_pool.head_id; |
| 114 | retry: |
| 115 | if (cretry > 5) { |
| 116 | dev_err(rdev->dev, "failed to get an ib after 5 retry\n"); |
Jerome Glisse | 9fc04b5 | 2012-01-23 11:52:15 -0500 | [diff] [blame] | 117 | radeon_mutex_unlock(&rdev->ib_pool.mutex); |
Jerome Glisse | 91cb91b | 2010-02-15 21:36:13 +0100 | [diff] [blame] | 118 | radeon_fence_unref(&fence); |
Jerome Glisse | b15ba51 | 2011-11-15 11:48:34 -0500 | [diff] [blame] | 119 | return -ENOMEM; |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 120 | } |
Jerome Glisse | b15ba51 | 2011-11-15 11:48:34 -0500 | [diff] [blame] | 121 | cretry++; |
| 122 | for (i = 0; i < RADEON_IB_POOL_SIZE; i++) { |
| 123 | radeon_ib_try_free(rdev, &rdev->ib_pool.ibs[idx]); |
| 124 | if (rdev->ib_pool.ibs[idx].fence == NULL) { |
| 125 | r = radeon_sa_bo_new(rdev, &rdev->ib_pool.sa_manager, |
| 126 | &rdev->ib_pool.ibs[idx].sa_bo, |
Christian König | 557017a | 2012-05-09 15:34:54 +0200 | [diff] [blame] | 127 | size, 256, false); |
Jerome Glisse | b15ba51 | 2011-11-15 11:48:34 -0500 | [diff] [blame] | 128 | if (!r) { |
| 129 | *ib = &rdev->ib_pool.ibs[idx]; |
Christian König | 2e0d991 | 2012-05-09 15:34:53 +0200 | [diff] [blame] | 130 | (*ib)->ptr = radeon_sa_bo_cpu_addr((*ib)->sa_bo); |
| 131 | (*ib)->gpu_addr = radeon_sa_bo_gpu_addr((*ib)->sa_bo); |
Jerome Glisse | b15ba51 | 2011-11-15 11:48:34 -0500 | [diff] [blame] | 132 | (*ib)->fence = fence; |
Jerome Glisse | 721604a | 2012-01-05 22:11:05 -0500 | [diff] [blame] | 133 | (*ib)->vm_id = 0; |
Alex Deucher | dfcf5f3 | 2012-03-20 17:18:14 -0400 | [diff] [blame] | 134 | (*ib)->is_const_ib = false; |
Jerome Glisse | b15ba51 | 2011-11-15 11:48:34 -0500 | [diff] [blame] | 135 | /* ib are most likely to be allocated in a ring fashion |
| 136 | * thus rdev->ib_pool.head_id should be the id of the |
| 137 | * oldest ib |
| 138 | */ |
| 139 | rdev->ib_pool.head_id = (1 + idx); |
| 140 | rdev->ib_pool.head_id &= (RADEON_IB_POOL_SIZE - 1); |
Jerome Glisse | 9fc04b5 | 2012-01-23 11:52:15 -0500 | [diff] [blame] | 141 | radeon_mutex_unlock(&rdev->ib_pool.mutex); |
Jerome Glisse | b15ba51 | 2011-11-15 11:48:34 -0500 | [diff] [blame] | 142 | return 0; |
| 143 | } |
Jerome Glisse | 91cb91b | 2010-02-15 21:36:13 +0100 | [diff] [blame] | 144 | } |
Jerome Glisse | b15ba51 | 2011-11-15 11:48:34 -0500 | [diff] [blame] | 145 | idx = (idx + 1) & (RADEON_IB_POOL_SIZE - 1); |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 146 | } |
Jerome Glisse | b15ba51 | 2011-11-15 11:48:34 -0500 | [diff] [blame] | 147 | /* this should be rare event, ie all ib scheduled none signaled yet. |
| 148 | */ |
| 149 | for (i = 0; i < RADEON_IB_POOL_SIZE; i++) { |
Jerome Glisse | bb63556 | 2012-05-09 15:34:46 +0200 | [diff] [blame] | 150 | struct radeon_fence *fence = rdev->ib_pool.ibs[idx].fence; |
| 151 | if (fence && fence->seq < RADEON_FENCE_NOTEMITED_SEQ) { |
| 152 | r = radeon_fence_wait(fence, false); |
Jerome Glisse | b15ba51 | 2011-11-15 11:48:34 -0500 | [diff] [blame] | 153 | if (!r) { |
| 154 | goto retry; |
| 155 | } |
| 156 | /* an error happened */ |
| 157 | break; |
| 158 | } |
| 159 | idx = (idx + 1) & (RADEON_IB_POOL_SIZE - 1); |
| 160 | } |
Jerome Glisse | 9fc04b5 | 2012-01-23 11:52:15 -0500 | [diff] [blame] | 161 | radeon_mutex_unlock(&rdev->ib_pool.mutex); |
Jerome Glisse | b15ba51 | 2011-11-15 11:48:34 -0500 | [diff] [blame] | 162 | radeon_fence_unref(&fence); |
| 163 | return r; |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 164 | } |
| 165 | |
| 166 | void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib) |
| 167 | { |
| 168 | struct radeon_ib *tmp = *ib; |
| 169 | |
| 170 | *ib = NULL; |
| 171 | if (tmp == NULL) { |
| 172 | return; |
| 173 | } |
Jerome Glisse | 9fc04b5 | 2012-01-23 11:52:15 -0500 | [diff] [blame] | 174 | radeon_mutex_lock(&rdev->ib_pool.mutex); |
Jerome Glisse | bb63556 | 2012-05-09 15:34:46 +0200 | [diff] [blame] | 175 | if (tmp->fence && tmp->fence->seq == RADEON_FENCE_NOTEMITED_SEQ) { |
Christian König | 557017a | 2012-05-09 15:34:54 +0200 | [diff] [blame] | 176 | radeon_sa_bo_free(rdev, &tmp->sa_bo, NULL); |
Jerome Glisse | b15ba51 | 2011-11-15 11:48:34 -0500 | [diff] [blame] | 177 | radeon_fence_unref(&tmp->fence); |
| 178 | } |
Jerome Glisse | 9fc04b5 | 2012-01-23 11:52:15 -0500 | [diff] [blame] | 179 | radeon_mutex_unlock(&rdev->ib_pool.mutex); |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 180 | } |
| 181 | |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 182 | int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib) |
| 183 | { |
Christian König | e32eb50 | 2011-10-23 12:56:27 +0200 | [diff] [blame] | 184 | struct radeon_ring *ring = &rdev->ring[ib->fence->ring]; |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 185 | int r = 0; |
| 186 | |
Christian König | e32eb50 | 2011-10-23 12:56:27 +0200 | [diff] [blame] | 187 | if (!ib->length_dw || !ring->ready) { |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 188 | /* TODO: Nothings in the ib we should report. */ |
Jerome Glisse | 91cb91b | 2010-02-15 21:36:13 +0100 | [diff] [blame] | 189 | DRM_ERROR("radeon: couldn't schedule IB(%u).\n", ib->idx); |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 190 | return -EINVAL; |
| 191 | } |
Dave Airlie | ecb114a | 2009-09-15 11:12:56 +1000 | [diff] [blame] | 192 | |
Dave Airlie | 6cdf658 | 2009-06-29 18:29:13 +1000 | [diff] [blame] | 193 | /* 64 dwords should be enough for fence too */ |
Christian König | e32eb50 | 2011-10-23 12:56:27 +0200 | [diff] [blame] | 194 | r = radeon_ring_lock(rdev, ring, 64); |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 195 | if (r) { |
Paul Bolle | ec4f2ac | 2011-01-28 23:32:04 +0100 | [diff] [blame] | 196 | DRM_ERROR("radeon: scheduling IB failed (%d).\n", r); |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 197 | return r; |
| 198 | } |
Christian König | 4c87bc2 | 2011-10-19 19:02:21 +0200 | [diff] [blame] | 199 | radeon_ring_ib_execute(rdev, ib->fence->ring, ib); |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 200 | radeon_fence_emit(rdev, ib->fence); |
Christian König | e32eb50 | 2011-10-23 12:56:27 +0200 | [diff] [blame] | 201 | radeon_ring_unlock_commit(rdev, ring); |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 202 | return 0; |
| 203 | } |
| 204 | |
| 205 | int radeon_ib_pool_init(struct radeon_device *rdev) |
| 206 | { |
Jerome Glisse | d54fbd4 | 2012-01-24 12:08:52 -0500 | [diff] [blame] | 207 | struct radeon_sa_manager tmp; |
Jerome Glisse | b15ba51 | 2011-11-15 11:48:34 -0500 | [diff] [blame] | 208 | int i, r; |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 209 | |
Jerome Glisse | d54fbd4 | 2012-01-24 12:08:52 -0500 | [diff] [blame] | 210 | r = radeon_sa_bo_manager_init(rdev, &tmp, |
| 211 | RADEON_IB_POOL_SIZE*64*1024, |
| 212 | RADEON_GEM_DOMAIN_GTT); |
| 213 | if (r) { |
| 214 | return r; |
| 215 | } |
| 216 | |
Jerome Glisse | 9fc04b5 | 2012-01-23 11:52:15 -0500 | [diff] [blame] | 217 | radeon_mutex_lock(&rdev->ib_pool.mutex); |
Jerome Glisse | b15ba51 | 2011-11-15 11:48:34 -0500 | [diff] [blame] | 218 | if (rdev->ib_pool.ready) { |
Jerome Glisse | 9fc04b5 | 2012-01-23 11:52:15 -0500 | [diff] [blame] | 219 | radeon_mutex_unlock(&rdev->ib_pool.mutex); |
Jerome Glisse | d54fbd4 | 2012-01-24 12:08:52 -0500 | [diff] [blame] | 220 | radeon_sa_bo_manager_fini(rdev, &tmp); |
Jerome Glisse | 9f022dd | 2009-09-11 15:35:22 +0200 | [diff] [blame] | 221 | return 0; |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 222 | } |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 223 | |
Jerome Glisse | d54fbd4 | 2012-01-24 12:08:52 -0500 | [diff] [blame] | 224 | rdev->ib_pool.sa_manager = tmp; |
| 225 | INIT_LIST_HEAD(&rdev->ib_pool.sa_manager.sa_bo); |
Jerome Glisse | b15ba51 | 2011-11-15 11:48:34 -0500 | [diff] [blame] | 226 | for (i = 0; i < RADEON_IB_POOL_SIZE; i++) { |
| 227 | rdev->ib_pool.ibs[i].fence = NULL; |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 228 | rdev->ib_pool.ibs[i].idx = i; |
| 229 | rdev->ib_pool.ibs[i].length_dw = 0; |
Christian König | 2e0d991 | 2012-05-09 15:34:53 +0200 | [diff] [blame] | 230 | rdev->ib_pool.ibs[i].sa_bo = NULL; |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 231 | } |
Jerome Glisse | 91cb91b | 2010-02-15 21:36:13 +0100 | [diff] [blame] | 232 | rdev->ib_pool.head_id = 0; |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 233 | rdev->ib_pool.ready = true; |
| 234 | DRM_INFO("radeon: ib pool ready.\n"); |
Jerome Glisse | b15ba51 | 2011-11-15 11:48:34 -0500 | [diff] [blame] | 235 | |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 236 | if (radeon_debugfs_ib_init(rdev)) { |
| 237 | DRM_ERROR("Failed to register debugfs file for IB !\n"); |
| 238 | } |
Jerome Glisse | 9fc04b5 | 2012-01-23 11:52:15 -0500 | [diff] [blame] | 239 | radeon_mutex_unlock(&rdev->ib_pool.mutex); |
Jerome Glisse | b15ba51 | 2011-11-15 11:48:34 -0500 | [diff] [blame] | 240 | return 0; |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 241 | } |
| 242 | |
| 243 | void radeon_ib_pool_fini(struct radeon_device *rdev) |
| 244 | { |
Jerome Glisse | b15ba51 | 2011-11-15 11:48:34 -0500 | [diff] [blame] | 245 | unsigned i; |
Jerome Glisse | 4c78867 | 2009-11-20 14:29:23 +0100 | [diff] [blame] | 246 | |
Jerome Glisse | 9fc04b5 | 2012-01-23 11:52:15 -0500 | [diff] [blame] | 247 | radeon_mutex_lock(&rdev->ib_pool.mutex); |
Jerome Glisse | b15ba51 | 2011-11-15 11:48:34 -0500 | [diff] [blame] | 248 | if (rdev->ib_pool.ready) { |
| 249 | for (i = 0; i < RADEON_IB_POOL_SIZE; i++) { |
Christian König | 557017a | 2012-05-09 15:34:54 +0200 | [diff] [blame] | 250 | radeon_sa_bo_free(rdev, &rdev->ib_pool.ibs[i].sa_bo, NULL); |
Jerome Glisse | b15ba51 | 2011-11-15 11:48:34 -0500 | [diff] [blame] | 251 | radeon_fence_unref(&rdev->ib_pool.ibs[i].fence); |
Alex Deucher | ca2af92 | 2010-05-06 11:02:24 -0400 | [diff] [blame] | 252 | } |
Jerome Glisse | b15ba51 | 2011-11-15 11:48:34 -0500 | [diff] [blame] | 253 | radeon_sa_bo_manager_fini(rdev, &rdev->ib_pool.sa_manager); |
| 254 | rdev->ib_pool.ready = false; |
Alex Deucher | ca2af92 | 2010-05-06 11:02:24 -0400 | [diff] [blame] | 255 | } |
Jerome Glisse | 9fc04b5 | 2012-01-23 11:52:15 -0500 | [diff] [blame] | 256 | radeon_mutex_unlock(&rdev->ib_pool.mutex); |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 257 | } |
| 258 | |
Jerome Glisse | b15ba51 | 2011-11-15 11:48:34 -0500 | [diff] [blame] | 259 | int radeon_ib_pool_start(struct radeon_device *rdev) |
| 260 | { |
| 261 | return radeon_sa_bo_manager_start(rdev, &rdev->ib_pool.sa_manager); |
| 262 | } |
| 263 | |
| 264 | int radeon_ib_pool_suspend(struct radeon_device *rdev) |
| 265 | { |
| 266 | return radeon_sa_bo_manager_suspend(rdev, &rdev->ib_pool.sa_manager); |
| 267 | } |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 268 | |
Christian König | 7bd560e | 2012-05-02 15:11:12 +0200 | [diff] [blame] | 269 | int radeon_ib_ring_tests(struct radeon_device *rdev) |
| 270 | { |
| 271 | unsigned i; |
| 272 | int r; |
| 273 | |
| 274 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
| 275 | struct radeon_ring *ring = &rdev->ring[i]; |
| 276 | |
| 277 | if (!ring->ready) |
| 278 | continue; |
| 279 | |
| 280 | r = radeon_ib_test(rdev, i, ring); |
| 281 | if (r) { |
| 282 | ring->ready = false; |
| 283 | |
| 284 | if (i == RADEON_RING_TYPE_GFX_INDEX) { |
| 285 | /* oh, oh, that's really bad */ |
| 286 | DRM_ERROR("radeon: failed testing IB on GFX ring (%d).\n", r); |
| 287 | rdev->accel_working = false; |
| 288 | return r; |
| 289 | |
| 290 | } else { |
| 291 | /* still not good, but we can live with it */ |
| 292 | DRM_ERROR("radeon: failed testing IB on ring %d (%d).\n", i, r); |
| 293 | } |
| 294 | } |
| 295 | } |
| 296 | return 0; |
| 297 | } |
| 298 | |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 299 | /* |
| 300 | * Ring. |
| 301 | */ |
Christian König | e32eb50 | 2011-10-23 12:56:27 +0200 | [diff] [blame] | 302 | int radeon_ring_index(struct radeon_device *rdev, struct radeon_ring *ring) |
Christian König | bf85279 | 2011-10-13 13:19:22 +0200 | [diff] [blame] | 303 | { |
| 304 | /* r1xx-r5xx only has CP ring */ |
| 305 | if (rdev->family < CHIP_R600) |
| 306 | return RADEON_RING_TYPE_GFX_INDEX; |
| 307 | |
| 308 | if (rdev->family >= CHIP_CAYMAN) { |
Christian König | e32eb50 | 2011-10-23 12:56:27 +0200 | [diff] [blame] | 309 | if (ring == &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]) |
Christian König | bf85279 | 2011-10-13 13:19:22 +0200 | [diff] [blame] | 310 | return CAYMAN_RING_TYPE_CP1_INDEX; |
Christian König | e32eb50 | 2011-10-23 12:56:27 +0200 | [diff] [blame] | 311 | else if (ring == &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]) |
Christian König | bf85279 | 2011-10-13 13:19:22 +0200 | [diff] [blame] | 312 | return CAYMAN_RING_TYPE_CP2_INDEX; |
| 313 | } |
| 314 | return RADEON_RING_TYPE_GFX_INDEX; |
| 315 | } |
| 316 | |
Christian König | e32eb50 | 2011-10-23 12:56:27 +0200 | [diff] [blame] | 317 | void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *ring) |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 318 | { |
Alex Deucher | 78c5560 | 2011-11-17 14:25:56 -0500 | [diff] [blame] | 319 | u32 rptr; |
| 320 | |
Alex Deucher | 724c80e | 2010-08-27 18:25:25 -0400 | [diff] [blame] | 321 | if (rdev->wb.enabled) |
Alex Deucher | 78c5560 | 2011-11-17 14:25:56 -0500 | [diff] [blame] | 322 | rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]); |
Christian König | 5596a9d | 2011-10-13 12:48:45 +0200 | [diff] [blame] | 323 | else |
Alex Deucher | 78c5560 | 2011-11-17 14:25:56 -0500 | [diff] [blame] | 324 | rptr = RREG32(ring->rptr_reg); |
| 325 | ring->rptr = (rptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift; |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 326 | /* This works because ring_size is a power of 2 */ |
Christian König | e32eb50 | 2011-10-23 12:56:27 +0200 | [diff] [blame] | 327 | ring->ring_free_dw = (ring->rptr + (ring->ring_size / 4)); |
| 328 | ring->ring_free_dw -= ring->wptr; |
| 329 | ring->ring_free_dw &= ring->ptr_mask; |
| 330 | if (!ring->ring_free_dw) { |
| 331 | ring->ring_free_dw = ring->ring_size / 4; |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 332 | } |
| 333 | } |
| 334 | |
Christian König | 7b1f248 | 2011-09-23 15:11:23 +0200 | [diff] [blame] | 335 | |
Christian König | e32eb50 | 2011-10-23 12:56:27 +0200 | [diff] [blame] | 336 | int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ndw) |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 337 | { |
| 338 | int r; |
| 339 | |
| 340 | /* Align requested size with padding so unlock_commit can |
| 341 | * pad safely */ |
Christian König | e32eb50 | 2011-10-23 12:56:27 +0200 | [diff] [blame] | 342 | ndw = (ndw + ring->align_mask) & ~ring->align_mask; |
| 343 | while (ndw > (ring->ring_free_dw - 1)) { |
| 344 | radeon_ring_free_size(rdev, ring); |
| 345 | if (ndw < ring->ring_free_dw) { |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 346 | break; |
| 347 | } |
Christian König | 8a47cc9 | 2012-05-09 15:34:48 +0200 | [diff] [blame] | 348 | r = radeon_fence_wait_next_locked(rdev, radeon_ring_index(rdev, ring)); |
Matthew Garrett | 91700f3 | 2010-04-30 15:24:17 -0400 | [diff] [blame] | 349 | if (r) |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 350 | return r; |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 351 | } |
Christian König | e32eb50 | 2011-10-23 12:56:27 +0200 | [diff] [blame] | 352 | ring->count_dw = ndw; |
| 353 | ring->wptr_old = ring->wptr; |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 354 | return 0; |
| 355 | } |
| 356 | |
Christian König | e32eb50 | 2011-10-23 12:56:27 +0200 | [diff] [blame] | 357 | int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ndw) |
Matthew Garrett | 91700f3 | 2010-04-30 15:24:17 -0400 | [diff] [blame] | 358 | { |
| 359 | int r; |
| 360 | |
Christian König | d6999bc | 2012-05-09 15:34:45 +0200 | [diff] [blame] | 361 | mutex_lock(&rdev->ring_lock); |
Christian König | e32eb50 | 2011-10-23 12:56:27 +0200 | [diff] [blame] | 362 | r = radeon_ring_alloc(rdev, ring, ndw); |
Matthew Garrett | 91700f3 | 2010-04-30 15:24:17 -0400 | [diff] [blame] | 363 | if (r) { |
Christian König | d6999bc | 2012-05-09 15:34:45 +0200 | [diff] [blame] | 364 | mutex_unlock(&rdev->ring_lock); |
Matthew Garrett | 91700f3 | 2010-04-30 15:24:17 -0400 | [diff] [blame] | 365 | return r; |
| 366 | } |
| 367 | return 0; |
| 368 | } |
| 369 | |
Christian König | e32eb50 | 2011-10-23 12:56:27 +0200 | [diff] [blame] | 370 | void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring) |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 371 | { |
| 372 | unsigned count_dw_pad; |
| 373 | unsigned i; |
| 374 | |
| 375 | /* We pad to match fetch size */ |
Christian König | e32eb50 | 2011-10-23 12:56:27 +0200 | [diff] [blame] | 376 | count_dw_pad = (ring->align_mask + 1) - |
| 377 | (ring->wptr & ring->align_mask); |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 378 | for (i = 0; i < count_dw_pad; i++) { |
Alex Deucher | 78c5560 | 2011-11-17 14:25:56 -0500 | [diff] [blame] | 379 | radeon_ring_write(ring, ring->nop); |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 380 | } |
| 381 | DRM_MEMORYBARRIER(); |
Alex Deucher | 78c5560 | 2011-11-17 14:25:56 -0500 | [diff] [blame] | 382 | WREG32(ring->wptr_reg, (ring->wptr << ring->ptr_reg_shift) & ring->ptr_reg_mask); |
Christian König | e32eb50 | 2011-10-23 12:56:27 +0200 | [diff] [blame] | 383 | (void)RREG32(ring->wptr_reg); |
Matthew Garrett | 91700f3 | 2010-04-30 15:24:17 -0400 | [diff] [blame] | 384 | } |
| 385 | |
Christian König | e32eb50 | 2011-10-23 12:56:27 +0200 | [diff] [blame] | 386 | void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *ring) |
Matthew Garrett | 91700f3 | 2010-04-30 15:24:17 -0400 | [diff] [blame] | 387 | { |
Christian König | e32eb50 | 2011-10-23 12:56:27 +0200 | [diff] [blame] | 388 | radeon_ring_commit(rdev, ring); |
Christian König | d6999bc | 2012-05-09 15:34:45 +0200 | [diff] [blame] | 389 | mutex_unlock(&rdev->ring_lock); |
| 390 | } |
| 391 | |
| 392 | void radeon_ring_undo(struct radeon_ring *ring) |
| 393 | { |
| 394 | ring->wptr = ring->wptr_old; |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 395 | } |
| 396 | |
Christian König | e32eb50 | 2011-10-23 12:56:27 +0200 | [diff] [blame] | 397 | void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *ring) |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 398 | { |
Christian König | d6999bc | 2012-05-09 15:34:45 +0200 | [diff] [blame] | 399 | radeon_ring_undo(ring); |
| 400 | mutex_unlock(&rdev->ring_lock); |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 401 | } |
| 402 | |
Christian König | 7b9ef16 | 2012-05-02 15:11:23 +0200 | [diff] [blame] | 403 | void radeon_ring_force_activity(struct radeon_device *rdev, struct radeon_ring *ring) |
| 404 | { |
| 405 | int r; |
| 406 | |
Christian König | 7b9ef16 | 2012-05-02 15:11:23 +0200 | [diff] [blame] | 407 | radeon_ring_free_size(rdev, ring); |
| 408 | if (ring->rptr == ring->wptr) { |
| 409 | r = radeon_ring_alloc(rdev, ring, 1); |
| 410 | if (!r) { |
| 411 | radeon_ring_write(ring, ring->nop); |
| 412 | radeon_ring_commit(rdev, ring); |
| 413 | } |
| 414 | } |
Christian König | 7b9ef16 | 2012-05-02 15:11:23 +0200 | [diff] [blame] | 415 | } |
| 416 | |
Christian König | 069211e | 2012-05-02 15:11:20 +0200 | [diff] [blame] | 417 | void radeon_ring_lockup_update(struct radeon_ring *ring) |
| 418 | { |
| 419 | ring->last_rptr = ring->rptr; |
| 420 | ring->last_activity = jiffies; |
| 421 | } |
| 422 | |
| 423 | /** |
| 424 | * radeon_ring_test_lockup() - check if ring is lockedup by recording information |
| 425 | * @rdev: radeon device structure |
| 426 | * @ring: radeon_ring structure holding ring information |
| 427 | * |
| 428 | * We don't need to initialize the lockup tracking information as we will either |
| 429 | * have CP rptr to a different value of jiffies wrap around which will force |
| 430 | * initialization of the lockup tracking informations. |
| 431 | * |
| 432 | * A possible false positivie is if we get call after while and last_cp_rptr == |
| 433 | * the current CP rptr, even if it's unlikely it might happen. To avoid this |
| 434 | * if the elapsed time since last call is bigger than 2 second than we return |
| 435 | * false and update the tracking information. Due to this the caller must call |
| 436 | * radeon_ring_test_lockup several time in less than 2sec for lockup to be reported |
| 437 | * the fencing code should be cautious about that. |
| 438 | * |
| 439 | * Caller should write to the ring to force CP to do something so we don't get |
| 440 | * false positive when CP is just gived nothing to do. |
| 441 | * |
| 442 | **/ |
| 443 | bool radeon_ring_test_lockup(struct radeon_device *rdev, struct radeon_ring *ring) |
| 444 | { |
| 445 | unsigned long cjiffies, elapsed; |
| 446 | uint32_t rptr; |
| 447 | |
| 448 | cjiffies = jiffies; |
| 449 | if (!time_after(cjiffies, ring->last_activity)) { |
| 450 | /* likely a wrap around */ |
| 451 | radeon_ring_lockup_update(ring); |
| 452 | return false; |
| 453 | } |
| 454 | rptr = RREG32(ring->rptr_reg); |
| 455 | ring->rptr = (rptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift; |
| 456 | if (ring->rptr != ring->last_rptr) { |
| 457 | /* CP is still working no lockup */ |
| 458 | radeon_ring_lockup_update(ring); |
| 459 | return false; |
| 460 | } |
| 461 | elapsed = jiffies_to_msecs(cjiffies - ring->last_activity); |
Christian König | 3368ff0 | 2012-05-02 15:11:21 +0200 | [diff] [blame] | 462 | if (radeon_lockup_timeout && elapsed >= radeon_lockup_timeout) { |
Christian König | 069211e | 2012-05-02 15:11:20 +0200 | [diff] [blame] | 463 | dev_err(rdev->dev, "GPU lockup CP stall for more than %lumsec\n", elapsed); |
| 464 | return true; |
| 465 | } |
| 466 | /* give a chance to the GPU ... */ |
| 467 | return false; |
| 468 | } |
| 469 | |
Christian König | e32eb50 | 2011-10-23 12:56:27 +0200 | [diff] [blame] | 470 | int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size, |
Alex Deucher | 78c5560 | 2011-11-17 14:25:56 -0500 | [diff] [blame] | 471 | unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg, |
| 472 | u32 ptr_reg_shift, u32 ptr_reg_mask, u32 nop) |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 473 | { |
| 474 | int r; |
| 475 | |
Christian König | e32eb50 | 2011-10-23 12:56:27 +0200 | [diff] [blame] | 476 | ring->ring_size = ring_size; |
| 477 | ring->rptr_offs = rptr_offs; |
| 478 | ring->rptr_reg = rptr_reg; |
| 479 | ring->wptr_reg = wptr_reg; |
Alex Deucher | 78c5560 | 2011-11-17 14:25:56 -0500 | [diff] [blame] | 480 | ring->ptr_reg_shift = ptr_reg_shift; |
| 481 | ring->ptr_reg_mask = ptr_reg_mask; |
| 482 | ring->nop = nop; |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 483 | /* Allocate ring buffer */ |
Christian König | e32eb50 | 2011-10-23 12:56:27 +0200 | [diff] [blame] | 484 | if (ring->ring_obj == NULL) { |
| 485 | r = radeon_bo_create(rdev, ring->ring_size, PAGE_SIZE, true, |
Jerome Glisse | 4c78867 | 2009-11-20 14:29:23 +0100 | [diff] [blame] | 486 | RADEON_GEM_DOMAIN_GTT, |
Christian König | e32eb50 | 2011-10-23 12:56:27 +0200 | [diff] [blame] | 487 | &ring->ring_obj); |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 488 | if (r) { |
Jerome Glisse | 4c78867 | 2009-11-20 14:29:23 +0100 | [diff] [blame] | 489 | dev_err(rdev->dev, "(%d) ring create failed\n", r); |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 490 | return r; |
| 491 | } |
Christian König | e32eb50 | 2011-10-23 12:56:27 +0200 | [diff] [blame] | 492 | r = radeon_bo_reserve(ring->ring_obj, false); |
Jerome Glisse | 4c78867 | 2009-11-20 14:29:23 +0100 | [diff] [blame] | 493 | if (unlikely(r != 0)) |
| 494 | return r; |
Christian König | e32eb50 | 2011-10-23 12:56:27 +0200 | [diff] [blame] | 495 | r = radeon_bo_pin(ring->ring_obj, RADEON_GEM_DOMAIN_GTT, |
| 496 | &ring->gpu_addr); |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 497 | if (r) { |
Christian König | e32eb50 | 2011-10-23 12:56:27 +0200 | [diff] [blame] | 498 | radeon_bo_unreserve(ring->ring_obj); |
Jerome Glisse | 4c78867 | 2009-11-20 14:29:23 +0100 | [diff] [blame] | 499 | dev_err(rdev->dev, "(%d) ring pin failed\n", r); |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 500 | return r; |
| 501 | } |
Christian König | e32eb50 | 2011-10-23 12:56:27 +0200 | [diff] [blame] | 502 | r = radeon_bo_kmap(ring->ring_obj, |
| 503 | (void **)&ring->ring); |
| 504 | radeon_bo_unreserve(ring->ring_obj); |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 505 | if (r) { |
Jerome Glisse | 4c78867 | 2009-11-20 14:29:23 +0100 | [diff] [blame] | 506 | dev_err(rdev->dev, "(%d) ring map failed\n", r); |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 507 | return r; |
| 508 | } |
| 509 | } |
Christian König | e32eb50 | 2011-10-23 12:56:27 +0200 | [diff] [blame] | 510 | ring->ptr_mask = (ring->ring_size / 4) - 1; |
| 511 | ring->ring_free_dw = ring->ring_size / 4; |
Christian König | ec1a6cc | 2012-05-02 15:11:11 +0200 | [diff] [blame] | 512 | if (radeon_debugfs_ring_init(rdev, ring)) { |
| 513 | DRM_ERROR("Failed to register debugfs file for rings !\n"); |
| 514 | } |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 515 | return 0; |
| 516 | } |
| 517 | |
Christian König | e32eb50 | 2011-10-23 12:56:27 +0200 | [diff] [blame] | 518 | void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *ring) |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 519 | { |
Jerome Glisse | 4c78867 | 2009-11-20 14:29:23 +0100 | [diff] [blame] | 520 | int r; |
Alex Deucher | ca2af92 | 2010-05-06 11:02:24 -0400 | [diff] [blame] | 521 | struct radeon_bo *ring_obj; |
Jerome Glisse | 4c78867 | 2009-11-20 14:29:23 +0100 | [diff] [blame] | 522 | |
Christian König | d6999bc | 2012-05-09 15:34:45 +0200 | [diff] [blame] | 523 | mutex_lock(&rdev->ring_lock); |
Christian König | e32eb50 | 2011-10-23 12:56:27 +0200 | [diff] [blame] | 524 | ring_obj = ring->ring_obj; |
Christian König | d6999bc | 2012-05-09 15:34:45 +0200 | [diff] [blame] | 525 | ring->ready = false; |
Christian König | e32eb50 | 2011-10-23 12:56:27 +0200 | [diff] [blame] | 526 | ring->ring = NULL; |
| 527 | ring->ring_obj = NULL; |
Christian König | d6999bc | 2012-05-09 15:34:45 +0200 | [diff] [blame] | 528 | mutex_unlock(&rdev->ring_lock); |
Alex Deucher | ca2af92 | 2010-05-06 11:02:24 -0400 | [diff] [blame] | 529 | |
| 530 | if (ring_obj) { |
| 531 | r = radeon_bo_reserve(ring_obj, false); |
| 532 | if (likely(r == 0)) { |
| 533 | radeon_bo_kunmap(ring_obj); |
| 534 | radeon_bo_unpin(ring_obj); |
| 535 | radeon_bo_unreserve(ring_obj); |
| 536 | } |
| 537 | radeon_bo_unref(&ring_obj); |
| 538 | } |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 539 | } |
| 540 | |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 541 | /* |
| 542 | * Debugfs info |
| 543 | */ |
| 544 | #if defined(CONFIG_DEBUG_FS) |
Christian König | af9720f | 2011-10-24 17:08:44 +0200 | [diff] [blame] | 545 | |
| 546 | static int radeon_debugfs_ring_info(struct seq_file *m, void *data) |
| 547 | { |
| 548 | struct drm_info_node *node = (struct drm_info_node *) m->private; |
| 549 | struct drm_device *dev = node->minor->dev; |
| 550 | struct radeon_device *rdev = dev->dev_private; |
| 551 | int ridx = *(int*)node->info_ent->data; |
| 552 | struct radeon_ring *ring = &rdev->ring[ridx]; |
| 553 | unsigned count, i, j; |
| 554 | |
| 555 | radeon_ring_free_size(rdev, ring); |
| 556 | count = (ring->ring_size / 4) - ring->ring_free_dw; |
| 557 | seq_printf(m, "wptr(0x%04x): 0x%08x\n", ring->wptr_reg, RREG32(ring->wptr_reg)); |
| 558 | seq_printf(m, "rptr(0x%04x): 0x%08x\n", ring->rptr_reg, RREG32(ring->rptr_reg)); |
| 559 | seq_printf(m, "driver's copy of the wptr: 0x%08x\n", ring->wptr); |
| 560 | seq_printf(m, "driver's copy of the rptr: 0x%08x\n", ring->rptr); |
| 561 | seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw); |
| 562 | seq_printf(m, "%u dwords in ring\n", count); |
| 563 | i = ring->rptr; |
| 564 | for (j = 0; j <= count; j++) { |
| 565 | seq_printf(m, "r[%04d]=0x%08x\n", i, ring->ring[i]); |
| 566 | i = (i + 1) & ring->ptr_mask; |
| 567 | } |
| 568 | return 0; |
| 569 | } |
| 570 | |
| 571 | static int radeon_ring_type_gfx_index = RADEON_RING_TYPE_GFX_INDEX; |
| 572 | static int cayman_ring_type_cp1_index = CAYMAN_RING_TYPE_CP1_INDEX; |
| 573 | static int cayman_ring_type_cp2_index = CAYMAN_RING_TYPE_CP2_INDEX; |
| 574 | |
| 575 | static struct drm_info_list radeon_debugfs_ring_info_list[] = { |
| 576 | {"radeon_ring_gfx", radeon_debugfs_ring_info, 0, &radeon_ring_type_gfx_index}, |
| 577 | {"radeon_ring_cp1", radeon_debugfs_ring_info, 0, &cayman_ring_type_cp1_index}, |
| 578 | {"radeon_ring_cp2", radeon_debugfs_ring_info, 0, &cayman_ring_type_cp2_index}, |
| 579 | }; |
| 580 | |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 581 | static int radeon_debugfs_ib_info(struct seq_file *m, void *data) |
| 582 | { |
| 583 | struct drm_info_node *node = (struct drm_info_node *) m->private; |
Christian König | 293f9fd | 2012-02-23 15:18:45 +0100 | [diff] [blame] | 584 | struct drm_device *dev = node->minor->dev; |
| 585 | struct radeon_device *rdev = dev->dev_private; |
| 586 | struct radeon_ib *ib = &rdev->ib_pool.ibs[*((unsigned*)node->info_ent->data)]; |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 587 | unsigned i; |
| 588 | |
| 589 | if (ib == NULL) { |
| 590 | return 0; |
| 591 | } |
Jerome Glisse | 91cb91b | 2010-02-15 21:36:13 +0100 | [diff] [blame] | 592 | seq_printf(m, "IB %04u\n", ib->idx); |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 593 | seq_printf(m, "IB fence %p\n", ib->fence); |
| 594 | seq_printf(m, "IB size %05u dwords\n", ib->length_dw); |
| 595 | for (i = 0; i < ib->length_dw; i++) { |
| 596 | seq_printf(m, "[%05u]=0x%08X\n", i, ib->ptr[i]); |
| 597 | } |
| 598 | return 0; |
| 599 | } |
| 600 | |
| 601 | static struct drm_info_list radeon_debugfs_ib_list[RADEON_IB_POOL_SIZE]; |
| 602 | static char radeon_debugfs_ib_names[RADEON_IB_POOL_SIZE][32]; |
Christian König | 293f9fd | 2012-02-23 15:18:45 +0100 | [diff] [blame] | 603 | static unsigned radeon_debugfs_ib_idx[RADEON_IB_POOL_SIZE]; |
Christian König | 711a972 | 2012-05-09 15:34:51 +0200 | [diff] [blame] | 604 | |
| 605 | static int radeon_debugfs_sa_info(struct seq_file *m, void *data) |
| 606 | { |
| 607 | struct drm_info_node *node = (struct drm_info_node *) m->private; |
| 608 | struct drm_device *dev = node->minor->dev; |
| 609 | struct radeon_device *rdev = dev->dev_private; |
| 610 | |
| 611 | radeon_sa_bo_dump_debug_info(&rdev->ib_pool.sa_manager, m); |
| 612 | |
| 613 | return 0; |
| 614 | |
| 615 | } |
| 616 | |
| 617 | static struct drm_info_list radeon_debugfs_sa_list[] = { |
| 618 | {"radeon_sa_info", &radeon_debugfs_sa_info, 0, NULL}, |
| 619 | }; |
| 620 | |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 621 | #endif |
| 622 | |
Christian König | ec1a6cc | 2012-05-02 15:11:11 +0200 | [diff] [blame] | 623 | int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *ring) |
Christian König | af9720f | 2011-10-24 17:08:44 +0200 | [diff] [blame] | 624 | { |
| 625 | #if defined(CONFIG_DEBUG_FS) |
Christian König | ec1a6cc | 2012-05-02 15:11:11 +0200 | [diff] [blame] | 626 | unsigned i; |
| 627 | for (i = 0; i < ARRAY_SIZE(radeon_debugfs_ring_info_list); ++i) { |
| 628 | struct drm_info_list *info = &radeon_debugfs_ring_info_list[i]; |
| 629 | int ridx = *(int*)radeon_debugfs_ring_info_list[i].data; |
| 630 | unsigned r; |
| 631 | |
| 632 | if (&rdev->ring[ridx] != ring) |
| 633 | continue; |
| 634 | |
| 635 | r = radeon_debugfs_add_files(rdev, info, 1); |
| 636 | if (r) |
| 637 | return r; |
| 638 | } |
Christian König | af9720f | 2011-10-24 17:08:44 +0200 | [diff] [blame] | 639 | #endif |
Christian König | ec1a6cc | 2012-05-02 15:11:11 +0200 | [diff] [blame] | 640 | return 0; |
Christian König | af9720f | 2011-10-24 17:08:44 +0200 | [diff] [blame] | 641 | } |
| 642 | |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 643 | int radeon_debugfs_ib_init(struct radeon_device *rdev) |
| 644 | { |
| 645 | #if defined(CONFIG_DEBUG_FS) |
| 646 | unsigned i; |
Christian König | 711a972 | 2012-05-09 15:34:51 +0200 | [diff] [blame] | 647 | int r; |
| 648 | |
| 649 | r = radeon_debugfs_add_files(rdev, radeon_debugfs_sa_list, 1); |
| 650 | if (r) |
| 651 | return r; |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 652 | |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 653 | for (i = 0; i < RADEON_IB_POOL_SIZE; i++) { |
| 654 | sprintf(radeon_debugfs_ib_names[i], "radeon_ib_%04u", i); |
Christian König | 293f9fd | 2012-02-23 15:18:45 +0100 | [diff] [blame] | 655 | radeon_debugfs_ib_idx[i] = i; |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 656 | radeon_debugfs_ib_list[i].name = radeon_debugfs_ib_names[i]; |
| 657 | radeon_debugfs_ib_list[i].show = &radeon_debugfs_ib_info; |
| 658 | radeon_debugfs_ib_list[i].driver_features = 0; |
Christian König | 293f9fd | 2012-02-23 15:18:45 +0100 | [diff] [blame] | 659 | radeon_debugfs_ib_list[i].data = &radeon_debugfs_ib_idx[i]; |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 660 | } |
| 661 | return radeon_debugfs_add_files(rdev, radeon_debugfs_ib_list, |
| 662 | RADEON_IB_POOL_SIZE); |
| 663 | #else |
| 664 | return 0; |
| 665 | #endif |
| 666 | } |