Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2008 Advanced Micro Devices, Inc. |
| 3 | * Copyright 2008 Red Hat Inc. |
| 4 | * Copyright 2009 Jerome Glisse. |
| 5 | * |
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 7 | * copy of this software and associated documentation files (the "Software"), |
| 8 | * to deal in the Software without restriction, including without limitation |
| 9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
| 10 | * and/or sell copies of the Software, and to permit persons to whom the |
| 11 | * Software is furnished to do so, subject to the following conditions: |
| 12 | * |
| 13 | * The above copyright notice and this permission notice shall be included in |
| 14 | * all copies or substantial portions of the Software. |
| 15 | * |
| 16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| 19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
| 20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
| 21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
| 22 | * OTHER DEALINGS IN THE SOFTWARE. |
| 23 | * |
| 24 | * Authors: Dave Airlie |
| 25 | * Alex Deucher |
| 26 | * Jerome Glisse |
Jerome Glisse | c507f7e | 2012-05-09 15:34:58 +0200 | [diff] [blame] | 27 | * Christian König |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 28 | */ |
| 29 | #include <linux/seq_file.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 30 | #include <linux/slab.h> |
David Howells | 760285e | 2012-10-02 18:01:07 +0100 | [diff] [blame] | 31 | #include <drm/drmP.h> |
| 32 | #include <drm/radeon_drm.h> |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 33 | #include "radeon_reg.h" |
| 34 | #include "radeon.h" |
| 35 | #include "atom.h" |
| 36 | |
Jerome Glisse | c507f7e | 2012-05-09 15:34:58 +0200 | [diff] [blame] | 37 | /* |
Alex Deucher | 7592328 | 2012-07-17 14:02:38 -0400 | [diff] [blame] | 38 | * IB |
| 39 | * IBs (Indirect Buffers) and areas of GPU accessible memory where |
| 40 | * commands are stored. You can put a pointer to the IB in the |
| 41 | * command ring and the hw will fetch the commands from the IB |
| 42 | * and execute them. Generally userspace acceleration drivers |
| 43 | * produce command buffers which are send to the kernel and |
| 44 | * put in IBs for execution by the requested ring. |
Jerome Glisse | c507f7e | 2012-05-09 15:34:58 +0200 | [diff] [blame] | 45 | */ |
Lauri Kasanen | 1109ca0 | 2012-08-31 13:43:50 -0400 | [diff] [blame] | 46 | static int radeon_debugfs_sa_init(struct radeon_device *rdev); |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 47 | |
Alex Deucher | 7592328 | 2012-07-17 14:02:38 -0400 | [diff] [blame] | 48 | /** |
| 49 | * radeon_ib_get - request an IB (Indirect Buffer) |
| 50 | * |
| 51 | * @rdev: radeon_device pointer |
| 52 | * @ring: ring index the IB is associated with |
| 53 | * @ib: IB object returned |
| 54 | * @size: requested IB size |
| 55 | * |
| 56 | * Request an IB (all asics). IBs are allocated using the |
| 57 | * suballocator. |
| 58 | * Returns 0 on success, error on failure. |
| 59 | */ |
Jerome Glisse | 69e130a | 2011-12-21 12:13:46 -0500 | [diff] [blame] | 60 | int radeon_ib_get(struct radeon_device *rdev, int ring, |
Christian König | 4bf3dd9 | 2012-08-06 18:57:44 +0200 | [diff] [blame] | 61 | struct radeon_ib *ib, struct radeon_vm *vm, |
| 62 | unsigned size) |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 63 | { |
Christian König | 220907d | 2012-05-10 16:46:43 +0200 | [diff] [blame] | 64 | int i, r; |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 65 | |
Jerome Glisse | f2e3922 | 2012-05-09 15:35:02 +0200 | [diff] [blame] | 66 | r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &ib->sa_bo, size, 256, true); |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 67 | if (r) { |
Jerome Glisse | c507f7e | 2012-05-09 15:34:58 +0200 | [diff] [blame] | 68 | dev_err(rdev->dev, "failed to get a new IB (%d)\n", r); |
Jerome Glisse | c507f7e | 2012-05-09 15:34:58 +0200 | [diff] [blame] | 69 | return r; |
| 70 | } |
Jerome Glisse | b15ba51 | 2011-11-15 11:48:34 -0500 | [diff] [blame] | 71 | |
Christian König | 220907d | 2012-05-10 16:46:43 +0200 | [diff] [blame] | 72 | r = radeon_semaphore_create(rdev, &ib->semaphore); |
| 73 | if (r) { |
| 74 | return r; |
| 75 | } |
| 76 | |
Christian König | 876dc9f | 2012-05-08 14:24:01 +0200 | [diff] [blame] | 77 | ib->ring = ring; |
| 78 | ib->fence = NULL; |
Jerome Glisse | f2e3922 | 2012-05-09 15:35:02 +0200 | [diff] [blame] | 79 | ib->ptr = radeon_sa_bo_cpu_addr(ib->sa_bo); |
Christian König | 4bf3dd9 | 2012-08-06 18:57:44 +0200 | [diff] [blame] | 80 | ib->vm = vm; |
| 81 | if (vm) { |
Christian König | ca19f21 | 2012-09-11 16:09:59 +0200 | [diff] [blame] | 82 | /* ib pool is bound at RADEON_VA_IB_OFFSET in virtual address |
| 83 | * space and soffset is the offset inside the pool bo |
Christian König | 4bf3dd9 | 2012-08-06 18:57:44 +0200 | [diff] [blame] | 84 | */ |
Christian König | ca19f21 | 2012-09-11 16:09:59 +0200 | [diff] [blame] | 85 | ib->gpu_addr = ib->sa_bo->soffset + RADEON_VA_IB_OFFSET; |
Christian König | 4bf3dd9 | 2012-08-06 18:57:44 +0200 | [diff] [blame] | 86 | } else { |
| 87 | ib->gpu_addr = radeon_sa_bo_gpu_addr(ib->sa_bo); |
| 88 | } |
Jerome Glisse | f2e3922 | 2012-05-09 15:35:02 +0200 | [diff] [blame] | 89 | ib->is_const_ib = false; |
Christian König | 220907d | 2012-05-10 16:46:43 +0200 | [diff] [blame] | 90 | for (i = 0; i < RADEON_NUM_RINGS; ++i) |
| 91 | ib->sync_to[i] = NULL; |
Jerome Glisse | c507f7e | 2012-05-09 15:34:58 +0200 | [diff] [blame] | 92 | |
| 93 | return 0; |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 94 | } |
| 95 | |
Alex Deucher | 7592328 | 2012-07-17 14:02:38 -0400 | [diff] [blame] | 96 | /** |
| 97 | * radeon_ib_free - free an IB (Indirect Buffer) |
| 98 | * |
| 99 | * @rdev: radeon_device pointer |
| 100 | * @ib: IB object to free |
| 101 | * |
| 102 | * Free an IB (all asics). |
| 103 | */ |
Jerome Glisse | f2e3922 | 2012-05-09 15:35:02 +0200 | [diff] [blame] | 104 | void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib) |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 105 | { |
Christian König | 220907d | 2012-05-10 16:46:43 +0200 | [diff] [blame] | 106 | radeon_semaphore_free(rdev, &ib->semaphore, ib->fence); |
Jerome Glisse | f2e3922 | 2012-05-09 15:35:02 +0200 | [diff] [blame] | 107 | radeon_sa_bo_free(rdev, &ib->sa_bo, ib->fence); |
| 108 | radeon_fence_unref(&ib->fence); |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 109 | } |
| 110 | |
Alex Deucher | 7592328 | 2012-07-17 14:02:38 -0400 | [diff] [blame] | 111 | /** |
Alex Deucher | 43f1214 | 2013-02-01 17:32:42 +0100 | [diff] [blame] | 112 | * radeon_ib_sync_to - sync to fence before executing the IB |
| 113 | * |
| 114 | * @ib: IB object to add fence to |
| 115 | * @fence: fence to sync to |
| 116 | * |
| 117 | * Sync to the fence before executing the IB |
| 118 | */ |
| 119 | void radeon_ib_sync_to(struct radeon_ib *ib, struct radeon_fence *fence) |
| 120 | { |
| 121 | struct radeon_fence *other; |
| 122 | |
| 123 | if (!fence) |
| 124 | return; |
| 125 | |
| 126 | other = ib->sync_to[fence->ring]; |
| 127 | ib->sync_to[fence->ring] = radeon_fence_later(fence, other); |
| 128 | } |
| 129 | |
| 130 | /** |
Alex Deucher | 7592328 | 2012-07-17 14:02:38 -0400 | [diff] [blame] | 131 | * radeon_ib_schedule - schedule an IB (Indirect Buffer) on the ring |
| 132 | * |
| 133 | * @rdev: radeon_device pointer |
| 134 | * @ib: IB object to schedule |
| 135 | * @const_ib: Const IB to schedule (SI only) |
| 136 | * |
| 137 | * Schedule an IB on the associated ring (all asics). |
| 138 | * Returns 0 on success, error on failure. |
| 139 | * |
| 140 | * On SI, there are two parallel engines fed from the primary ring, |
| 141 | * the CE (Constant Engine) and the DE (Drawing Engine). Since |
| 142 | * resource descriptors have moved to memory, the CE allows you to |
| 143 | * prime the caches while the DE is updating register state so that |
| 144 | * the resource descriptors will be already in cache when the draw is |
| 145 | * processed. To accomplish this, the userspace driver submits two |
| 146 | * IBs, one for the CE and one for the DE. If there is a CE IB (called |
| 147 | * a CONST_IB), it will be put on the ring prior to the DE IB. Prior |
| 148 | * to SI there was just a DE IB. |
| 149 | */ |
Christian König | 4ef7256 | 2012-07-13 13:06:00 +0200 | [diff] [blame] | 150 | int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib, |
| 151 | struct radeon_ib *const_ib) |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 152 | { |
Christian König | 876dc9f | 2012-05-08 14:24:01 +0200 | [diff] [blame] | 153 | struct radeon_ring *ring = &rdev->ring[ib->ring]; |
Christian König | 220907d | 2012-05-10 16:46:43 +0200 | [diff] [blame] | 154 | bool need_sync = false; |
| 155 | int i, r = 0; |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 156 | |
Christian König | e32eb50 | 2011-10-23 12:56:27 +0200 | [diff] [blame] | 157 | if (!ib->length_dw || !ring->ready) { |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 158 | /* TODO: Nothings in the ib we should report. */ |
Jerome Glisse | c507f7e | 2012-05-09 15:34:58 +0200 | [diff] [blame] | 159 | dev_err(rdev->dev, "couldn't schedule ib\n"); |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 160 | return -EINVAL; |
| 161 | } |
Dave Airlie | ecb114a | 2009-09-15 11:12:56 +1000 | [diff] [blame] | 162 | |
Dave Airlie | 6cdf658 | 2009-06-29 18:29:13 +1000 | [diff] [blame] | 163 | /* 64 dwords should be enough for fence too */ |
Christian König | 220907d | 2012-05-10 16:46:43 +0200 | [diff] [blame] | 164 | r = radeon_ring_lock(rdev, ring, 64 + RADEON_NUM_RINGS * 8); |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 165 | if (r) { |
Jerome Glisse | c507f7e | 2012-05-09 15:34:58 +0200 | [diff] [blame] | 166 | dev_err(rdev->dev, "scheduling IB failed (%d).\n", r); |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 167 | return r; |
| 168 | } |
Christian König | 220907d | 2012-05-10 16:46:43 +0200 | [diff] [blame] | 169 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
| 170 | struct radeon_fence *fence = ib->sync_to[i]; |
| 171 | if (radeon_fence_need_sync(fence, ib->ring)) { |
| 172 | need_sync = true; |
| 173 | radeon_semaphore_sync_rings(rdev, ib->semaphore, |
| 174 | fence->ring, ib->ring); |
| 175 | radeon_fence_note_sync(fence, ib->ring); |
| 176 | } |
| 177 | } |
| 178 | /* immediately free semaphore when we don't need to sync */ |
| 179 | if (!need_sync) { |
| 180 | radeon_semaphore_free(rdev, &ib->semaphore, NULL); |
| 181 | } |
Christian König | 9b40e5d | 2012-08-08 12:22:43 +0200 | [diff] [blame] | 182 | /* if we can't remember our last VM flush then flush now! */ |
Jerome Glisse | 466476d | 2013-04-16 12:20:15 -0400 | [diff] [blame] | 183 | /* XXX figure out why we have to flush for every IB */ |
| 184 | if (ib->vm /*&& !ib->vm->last_flush*/) { |
Alex Deucher | 498522b | 2012-10-02 14:43:38 -0400 | [diff] [blame] | 185 | radeon_ring_vm_flush(rdev, ib->ring, ib->vm); |
Christian König | 9b40e5d | 2012-08-08 12:22:43 +0200 | [diff] [blame] | 186 | } |
Christian König | 4ef7256 | 2012-07-13 13:06:00 +0200 | [diff] [blame] | 187 | if (const_ib) { |
| 188 | radeon_ring_ib_execute(rdev, const_ib->ring, const_ib); |
| 189 | radeon_semaphore_free(rdev, &const_ib->semaphore, NULL); |
| 190 | } |
Christian König | 876dc9f | 2012-05-08 14:24:01 +0200 | [diff] [blame] | 191 | radeon_ring_ib_execute(rdev, ib->ring, ib); |
| 192 | r = radeon_fence_emit(rdev, &ib->fence, ib->ring); |
| 193 | if (r) { |
| 194 | dev_err(rdev->dev, "failed to emit fence for new IB (%d)\n", r); |
| 195 | radeon_ring_unlock_undo(rdev, ring); |
| 196 | return r; |
| 197 | } |
Christian König | 4ef7256 | 2012-07-13 13:06:00 +0200 | [diff] [blame] | 198 | if (const_ib) { |
| 199 | const_ib->fence = radeon_fence_ref(ib->fence); |
| 200 | } |
Christian König | 9b40e5d | 2012-08-08 12:22:43 +0200 | [diff] [blame] | 201 | /* we just flushed the VM, remember that */ |
| 202 | if (ib->vm && !ib->vm->last_flush) { |
| 203 | ib->vm->last_flush = radeon_fence_ref(ib->fence); |
| 204 | } |
Christian König | e32eb50 | 2011-10-23 12:56:27 +0200 | [diff] [blame] | 205 | radeon_ring_unlock_commit(rdev, ring); |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 206 | return 0; |
| 207 | } |
| 208 | |
Alex Deucher | 7592328 | 2012-07-17 14:02:38 -0400 | [diff] [blame] | 209 | /** |
| 210 | * radeon_ib_pool_init - Init the IB (Indirect Buffer) pool |
| 211 | * |
| 212 | * @rdev: radeon_device pointer |
| 213 | * |
| 214 | * Initialize the suballocator to manage a pool of memory |
| 215 | * for use as IBs (all asics). |
| 216 | * Returns 0 on success, error on failure. |
| 217 | */ |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 218 | int radeon_ib_pool_init(struct radeon_device *rdev) |
| 219 | { |
Jerome Glisse | c507f7e | 2012-05-09 15:34:58 +0200 | [diff] [blame] | 220 | int r; |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 221 | |
Jerome Glisse | c507f7e | 2012-05-09 15:34:58 +0200 | [diff] [blame] | 222 | if (rdev->ib_pool_ready) { |
Jerome Glisse | 9f022dd | 2009-09-11 15:35:22 +0200 | [diff] [blame] | 223 | return 0; |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 224 | } |
Jerome Glisse | c507f7e | 2012-05-09 15:34:58 +0200 | [diff] [blame] | 225 | r = radeon_sa_bo_manager_init(rdev, &rdev->ring_tmp_bo, |
Christian König | c3b7fe8 | 2012-05-09 15:34:56 +0200 | [diff] [blame] | 226 | RADEON_IB_POOL_SIZE*64*1024, |
| 227 | RADEON_GEM_DOMAIN_GTT); |
| 228 | if (r) { |
Christian König | c3b7fe8 | 2012-05-09 15:34:56 +0200 | [diff] [blame] | 229 | return r; |
| 230 | } |
Christian König | 2898c34 | 2012-07-05 11:55:34 +0200 | [diff] [blame] | 231 | |
| 232 | r = radeon_sa_bo_manager_start(rdev, &rdev->ring_tmp_bo); |
| 233 | if (r) { |
| 234 | return r; |
| 235 | } |
| 236 | |
Jerome Glisse | c507f7e | 2012-05-09 15:34:58 +0200 | [diff] [blame] | 237 | rdev->ib_pool_ready = true; |
| 238 | if (radeon_debugfs_sa_init(rdev)) { |
| 239 | dev_err(rdev->dev, "failed to register debugfs file for SA\n"); |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 240 | } |
Jerome Glisse | b15ba51 | 2011-11-15 11:48:34 -0500 | [diff] [blame] | 241 | return 0; |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 242 | } |
| 243 | |
Alex Deucher | 7592328 | 2012-07-17 14:02:38 -0400 | [diff] [blame] | 244 | /** |
| 245 | * radeon_ib_pool_fini - Free the IB (Indirect Buffer) pool |
| 246 | * |
| 247 | * @rdev: radeon_device pointer |
| 248 | * |
| 249 | * Tear down the suballocator managing the pool of memory |
| 250 | * for use as IBs (all asics). |
| 251 | */ |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 252 | void radeon_ib_pool_fini(struct radeon_device *rdev) |
| 253 | { |
Jerome Glisse | c507f7e | 2012-05-09 15:34:58 +0200 | [diff] [blame] | 254 | if (rdev->ib_pool_ready) { |
Christian König | 2898c34 | 2012-07-05 11:55:34 +0200 | [diff] [blame] | 255 | radeon_sa_bo_manager_suspend(rdev, &rdev->ring_tmp_bo); |
Jerome Glisse | c507f7e | 2012-05-09 15:34:58 +0200 | [diff] [blame] | 256 | radeon_sa_bo_manager_fini(rdev, &rdev->ring_tmp_bo); |
| 257 | rdev->ib_pool_ready = false; |
Alex Deucher | ca2af92 | 2010-05-06 11:02:24 -0400 | [diff] [blame] | 258 | } |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 259 | } |
| 260 | |
Alex Deucher | 7592328 | 2012-07-17 14:02:38 -0400 | [diff] [blame] | 261 | /** |
| 262 | * radeon_ib_ring_tests - test IBs on the rings |
| 263 | * |
| 264 | * @rdev: radeon_device pointer |
| 265 | * |
| 266 | * Test an IB (Indirect Buffer) on each ring. |
| 267 | * If the test fails, disable the ring. |
| 268 | * Returns 0 on success, error if the primary GFX ring |
| 269 | * IB test fails. |
| 270 | */ |
Christian König | 7bd560e | 2012-05-02 15:11:12 +0200 | [diff] [blame] | 271 | int radeon_ib_ring_tests(struct radeon_device *rdev) |
| 272 | { |
| 273 | unsigned i; |
| 274 | int r; |
| 275 | |
| 276 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
| 277 | struct radeon_ring *ring = &rdev->ring[i]; |
| 278 | |
| 279 | if (!ring->ready) |
| 280 | continue; |
| 281 | |
| 282 | r = radeon_ib_test(rdev, i, ring); |
| 283 | if (r) { |
| 284 | ring->ready = false; |
| 285 | |
| 286 | if (i == RADEON_RING_TYPE_GFX_INDEX) { |
| 287 | /* oh, oh, that's really bad */ |
| 288 | DRM_ERROR("radeon: failed testing IB on GFX ring (%d).\n", r); |
| 289 | rdev->accel_working = false; |
| 290 | return r; |
| 291 | |
| 292 | } else { |
| 293 | /* still not good, but we can live with it */ |
| 294 | DRM_ERROR("radeon: failed testing IB on ring %d (%d).\n", i, r); |
| 295 | } |
| 296 | } |
| 297 | } |
| 298 | return 0; |
| 299 | } |
| 300 | |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 301 | /* |
Alex Deucher | 7592328 | 2012-07-17 14:02:38 -0400 | [diff] [blame] | 302 | * Rings |
| 303 | * Most engines on the GPU are fed via ring buffers. Ring |
| 304 | * buffers are areas of GPU accessible memory that the host |
| 305 | * writes commands into and the GPU reads commands out of. |
| 306 | * There is a rptr (read pointer) that determines where the |
| 307 | * GPU is currently reading, and a wptr (write pointer) |
| 308 | * which determines where the host has written. When the |
| 309 | * pointers are equal, the ring is idle. When the host |
| 310 | * writes commands to the ring buffer, it increments the |
| 311 | * wptr. The GPU then starts fetching commands and executes |
| 312 | * them until the pointers are equal again. |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 313 | */ |
Lauri Kasanen | 1109ca0 | 2012-08-31 13:43:50 -0400 | [diff] [blame] | 314 | static int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *ring); |
Jerome Glisse | c507f7e | 2012-05-09 15:34:58 +0200 | [diff] [blame] | 315 | |
Alex Deucher | 7592328 | 2012-07-17 14:02:38 -0400 | [diff] [blame] | 316 | /** |
| 317 | * radeon_ring_write - write a value to the ring |
| 318 | * |
| 319 | * @ring: radeon_ring structure holding ring information |
| 320 | * @v: dword (dw) value to write |
| 321 | * |
| 322 | * Write a value to the requested ring buffer (all asics). |
| 323 | */ |
Jerome Glisse | c507f7e | 2012-05-09 15:34:58 +0200 | [diff] [blame] | 324 | void radeon_ring_write(struct radeon_ring *ring, uint32_t v) |
| 325 | { |
| 326 | #if DRM_DEBUG_CODE |
| 327 | if (ring->count_dw <= 0) { |
Thomas Friebel | 8ad33cd | 2012-10-15 13:16:22 -0400 | [diff] [blame] | 328 | DRM_ERROR("radeon: writing more dwords to the ring than expected!\n"); |
Jerome Glisse | c507f7e | 2012-05-09 15:34:58 +0200 | [diff] [blame] | 329 | } |
| 330 | #endif |
| 331 | ring->ring[ring->wptr++] = v; |
| 332 | ring->wptr &= ring->ptr_mask; |
| 333 | ring->count_dw--; |
| 334 | ring->ring_free_dw--; |
| 335 | } |
| 336 | |
Alex Deucher | 7592328 | 2012-07-17 14:02:38 -0400 | [diff] [blame] | 337 | /** |
| 338 | * radeon_ring_supports_scratch_reg - check if the ring supports |
| 339 | * writing to scratch registers |
| 340 | * |
| 341 | * @rdev: radeon_device pointer |
| 342 | * @ring: radeon_ring structure holding ring information |
| 343 | * |
| 344 | * Check if a specific ring supports writing to scratch registers (all asics). |
| 345 | * Returns true if the ring supports writing to scratch regs, false if not. |
| 346 | */ |
Alex Deucher | 89d3580 | 2012-07-17 14:02:31 -0400 | [diff] [blame] | 347 | bool radeon_ring_supports_scratch_reg(struct radeon_device *rdev, |
| 348 | struct radeon_ring *ring) |
| 349 | { |
| 350 | switch (ring->idx) { |
| 351 | case RADEON_RING_TYPE_GFX_INDEX: |
| 352 | case CAYMAN_RING_TYPE_CP1_INDEX: |
| 353 | case CAYMAN_RING_TYPE_CP2_INDEX: |
| 354 | return true; |
| 355 | default: |
| 356 | return false; |
| 357 | } |
| 358 | } |
| 359 | |
Alex Deucher | 7592328 | 2012-07-17 14:02:38 -0400 | [diff] [blame] | 360 | /** |
| 361 | * radeon_ring_free_size - update the free size |
| 362 | * |
| 363 | * @rdev: radeon_device pointer |
| 364 | * @ring: radeon_ring structure holding ring information |
| 365 | * |
| 366 | * Update the free dw slots in the ring buffer (all asics). |
| 367 | */ |
Christian König | e32eb50 | 2011-10-23 12:56:27 +0200 | [diff] [blame] | 368 | void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *ring) |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 369 | { |
Alex Deucher | 78c5560 | 2011-11-17 14:25:56 -0500 | [diff] [blame] | 370 | u32 rptr; |
| 371 | |
Christian König | f2ba57b | 2013-04-08 12:41:29 +0200 | [diff] [blame] | 372 | if (rdev->wb.enabled && ring != &rdev->ring[R600_RING_TYPE_UVD_INDEX]) |
Alex Deucher | 78c5560 | 2011-11-17 14:25:56 -0500 | [diff] [blame] | 373 | rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]); |
Christian König | 5596a9d | 2011-10-13 12:48:45 +0200 | [diff] [blame] | 374 | else |
Alex Deucher | 78c5560 | 2011-11-17 14:25:56 -0500 | [diff] [blame] | 375 | rptr = RREG32(ring->rptr_reg); |
| 376 | ring->rptr = (rptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift; |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 377 | /* This works because ring_size is a power of 2 */ |
Christian König | e32eb50 | 2011-10-23 12:56:27 +0200 | [diff] [blame] | 378 | ring->ring_free_dw = (ring->rptr + (ring->ring_size / 4)); |
| 379 | ring->ring_free_dw -= ring->wptr; |
| 380 | ring->ring_free_dw &= ring->ptr_mask; |
| 381 | if (!ring->ring_free_dw) { |
| 382 | ring->ring_free_dw = ring->ring_size / 4; |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 383 | } |
| 384 | } |
| 385 | |
Alex Deucher | 7592328 | 2012-07-17 14:02:38 -0400 | [diff] [blame] | 386 | /** |
| 387 | * radeon_ring_alloc - allocate space on the ring buffer |
| 388 | * |
| 389 | * @rdev: radeon_device pointer |
| 390 | * @ring: radeon_ring structure holding ring information |
| 391 | * @ndw: number of dwords to allocate in the ring buffer |
| 392 | * |
| 393 | * Allocate @ndw dwords in the ring buffer (all asics). |
| 394 | * Returns 0 on success, error on failure. |
| 395 | */ |
Christian König | e32eb50 | 2011-10-23 12:56:27 +0200 | [diff] [blame] | 396 | int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ndw) |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 397 | { |
| 398 | int r; |
| 399 | |
Alex Deucher | fd5d93a | 2013-01-30 14:24:09 -0500 | [diff] [blame] | 400 | /* make sure we aren't trying to allocate more space than there is on the ring */ |
| 401 | if (ndw > (ring->ring_size / 4)) |
| 402 | return -ENOMEM; |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 403 | /* Align requested size with padding so unlock_commit can |
| 404 | * pad safely */ |
Christian König | e32eb50 | 2011-10-23 12:56:27 +0200 | [diff] [blame] | 405 | ndw = (ndw + ring->align_mask) & ~ring->align_mask; |
| 406 | while (ndw > (ring->ring_free_dw - 1)) { |
| 407 | radeon_ring_free_size(rdev, ring); |
| 408 | if (ndw < ring->ring_free_dw) { |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 409 | break; |
| 410 | } |
Alex Deucher | 8b25ed3 | 2012-07-17 14:02:30 -0400 | [diff] [blame] | 411 | r = radeon_fence_wait_next_locked(rdev, ring->idx); |
Matthew Garrett | 91700f3 | 2010-04-30 15:24:17 -0400 | [diff] [blame] | 412 | if (r) |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 413 | return r; |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 414 | } |
Christian König | e32eb50 | 2011-10-23 12:56:27 +0200 | [diff] [blame] | 415 | ring->count_dw = ndw; |
| 416 | ring->wptr_old = ring->wptr; |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 417 | return 0; |
| 418 | } |
| 419 | |
Alex Deucher | 7592328 | 2012-07-17 14:02:38 -0400 | [diff] [blame] | 420 | /** |
| 421 | * radeon_ring_lock - lock the ring and allocate space on it |
| 422 | * |
| 423 | * @rdev: radeon_device pointer |
| 424 | * @ring: radeon_ring structure holding ring information |
| 425 | * @ndw: number of dwords to allocate in the ring buffer |
| 426 | * |
| 427 | * Lock the ring and allocate @ndw dwords in the ring buffer |
| 428 | * (all asics). |
| 429 | * Returns 0 on success, error on failure. |
| 430 | */ |
Christian König | e32eb50 | 2011-10-23 12:56:27 +0200 | [diff] [blame] | 431 | int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ndw) |
Matthew Garrett | 91700f3 | 2010-04-30 15:24:17 -0400 | [diff] [blame] | 432 | { |
| 433 | int r; |
| 434 | |
Christian König | d6999bc | 2012-05-09 15:34:45 +0200 | [diff] [blame] | 435 | mutex_lock(&rdev->ring_lock); |
Christian König | e32eb50 | 2011-10-23 12:56:27 +0200 | [diff] [blame] | 436 | r = radeon_ring_alloc(rdev, ring, ndw); |
Matthew Garrett | 91700f3 | 2010-04-30 15:24:17 -0400 | [diff] [blame] | 437 | if (r) { |
Christian König | d6999bc | 2012-05-09 15:34:45 +0200 | [diff] [blame] | 438 | mutex_unlock(&rdev->ring_lock); |
Matthew Garrett | 91700f3 | 2010-04-30 15:24:17 -0400 | [diff] [blame] | 439 | return r; |
| 440 | } |
| 441 | return 0; |
| 442 | } |
| 443 | |
Alex Deucher | 7592328 | 2012-07-17 14:02:38 -0400 | [diff] [blame] | 444 | /** |
| 445 | * radeon_ring_commit - tell the GPU to execute the new |
| 446 | * commands on the ring buffer |
| 447 | * |
| 448 | * @rdev: radeon_device pointer |
| 449 | * @ring: radeon_ring structure holding ring information |
| 450 | * |
| 451 | * Update the wptr (write pointer) to tell the GPU to |
| 452 | * execute new commands on the ring buffer (all asics). |
| 453 | */ |
Christian König | e32eb50 | 2011-10-23 12:56:27 +0200 | [diff] [blame] | 454 | void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring) |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 455 | { |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 456 | /* We pad to match fetch size */ |
Christian König | 07a7133 | 2012-07-07 12:11:32 +0200 | [diff] [blame] | 457 | while (ring->wptr & ring->align_mask) { |
Alex Deucher | 78c5560 | 2011-11-17 14:25:56 -0500 | [diff] [blame] | 458 | radeon_ring_write(ring, ring->nop); |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 459 | } |
| 460 | DRM_MEMORYBARRIER(); |
Alex Deucher | 78c5560 | 2011-11-17 14:25:56 -0500 | [diff] [blame] | 461 | WREG32(ring->wptr_reg, (ring->wptr << ring->ptr_reg_shift) & ring->ptr_reg_mask); |
Christian König | e32eb50 | 2011-10-23 12:56:27 +0200 | [diff] [blame] | 462 | (void)RREG32(ring->wptr_reg); |
Matthew Garrett | 91700f3 | 2010-04-30 15:24:17 -0400 | [diff] [blame] | 463 | } |
| 464 | |
Alex Deucher | 7592328 | 2012-07-17 14:02:38 -0400 | [diff] [blame] | 465 | /** |
| 466 | * radeon_ring_unlock_commit - tell the GPU to execute the new |
| 467 | * commands on the ring buffer and unlock it |
| 468 | * |
| 469 | * @rdev: radeon_device pointer |
| 470 | * @ring: radeon_ring structure holding ring information |
| 471 | * |
| 472 | * Call radeon_ring_commit() then unlock the ring (all asics). |
| 473 | */ |
Christian König | e32eb50 | 2011-10-23 12:56:27 +0200 | [diff] [blame] | 474 | void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *ring) |
Matthew Garrett | 91700f3 | 2010-04-30 15:24:17 -0400 | [diff] [blame] | 475 | { |
Christian König | e32eb50 | 2011-10-23 12:56:27 +0200 | [diff] [blame] | 476 | radeon_ring_commit(rdev, ring); |
Christian König | d6999bc | 2012-05-09 15:34:45 +0200 | [diff] [blame] | 477 | mutex_unlock(&rdev->ring_lock); |
| 478 | } |
| 479 | |
Alex Deucher | 7592328 | 2012-07-17 14:02:38 -0400 | [diff] [blame] | 480 | /** |
| 481 | * radeon_ring_undo - reset the wptr |
| 482 | * |
| 483 | * @ring: radeon_ring structure holding ring information |
| 484 | * |
Paul Bolle | 501f9d4c | 2012-11-20 22:31:06 +0100 | [diff] [blame] | 485 | * Reset the driver's copy of the wptr (all asics). |
Alex Deucher | 7592328 | 2012-07-17 14:02:38 -0400 | [diff] [blame] | 486 | */ |
Christian König | d6999bc | 2012-05-09 15:34:45 +0200 | [diff] [blame] | 487 | void radeon_ring_undo(struct radeon_ring *ring) |
| 488 | { |
| 489 | ring->wptr = ring->wptr_old; |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 490 | } |
| 491 | |
Alex Deucher | 7592328 | 2012-07-17 14:02:38 -0400 | [diff] [blame] | 492 | /** |
| 493 | * radeon_ring_unlock_undo - reset the wptr and unlock the ring |
| 494 | * |
| 495 | * @ring: radeon_ring structure holding ring information |
| 496 | * |
| 497 | * Call radeon_ring_undo() then unlock the ring (all asics). |
| 498 | */ |
Christian König | e32eb50 | 2011-10-23 12:56:27 +0200 | [diff] [blame] | 499 | void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *ring) |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 500 | { |
Christian König | d6999bc | 2012-05-09 15:34:45 +0200 | [diff] [blame] | 501 | radeon_ring_undo(ring); |
| 502 | mutex_unlock(&rdev->ring_lock); |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 503 | } |
| 504 | |
Alex Deucher | 7592328 | 2012-07-17 14:02:38 -0400 | [diff] [blame] | 505 | /** |
| 506 | * radeon_ring_force_activity - add some nop packets to the ring |
| 507 | * |
| 508 | * @rdev: radeon_device pointer |
| 509 | * @ring: radeon_ring structure holding ring information |
| 510 | * |
| 511 | * Add some nop packets to the ring to force activity (all asics). |
| 512 | * Used for lockup detection to see if the rptr is advancing. |
| 513 | */ |
Christian König | 7b9ef16 | 2012-05-02 15:11:23 +0200 | [diff] [blame] | 514 | void radeon_ring_force_activity(struct radeon_device *rdev, struct radeon_ring *ring) |
| 515 | { |
| 516 | int r; |
| 517 | |
Christian König | 7b9ef16 | 2012-05-02 15:11:23 +0200 | [diff] [blame] | 518 | radeon_ring_free_size(rdev, ring); |
| 519 | if (ring->rptr == ring->wptr) { |
| 520 | r = radeon_ring_alloc(rdev, ring, 1); |
| 521 | if (!r) { |
| 522 | radeon_ring_write(ring, ring->nop); |
| 523 | radeon_ring_commit(rdev, ring); |
| 524 | } |
| 525 | } |
Christian König | 7b9ef16 | 2012-05-02 15:11:23 +0200 | [diff] [blame] | 526 | } |
| 527 | |
Alex Deucher | 7592328 | 2012-07-17 14:02:38 -0400 | [diff] [blame] | 528 | /** |
Paul Bolle | 501f9d4c | 2012-11-20 22:31:06 +0100 | [diff] [blame] | 529 | * radeon_ring_lockup_update - update lockup variables |
Alex Deucher | 7592328 | 2012-07-17 14:02:38 -0400 | [diff] [blame] | 530 | * |
| 531 | * @ring: radeon_ring structure holding ring information |
| 532 | * |
| 533 | * Update the last rptr value and timestamp (all asics). |
| 534 | */ |
Christian König | 069211e | 2012-05-02 15:11:20 +0200 | [diff] [blame] | 535 | void radeon_ring_lockup_update(struct radeon_ring *ring) |
| 536 | { |
| 537 | ring->last_rptr = ring->rptr; |
| 538 | ring->last_activity = jiffies; |
| 539 | } |
| 540 | |
| 541 | /** |
| 542 | * radeon_ring_test_lockup() - check if ring is lockedup by recording information |
| 543 | * @rdev: radeon device structure |
| 544 | * @ring: radeon_ring structure holding ring information |
| 545 | * |
| 546 | * We don't need to initialize the lockup tracking information as we will either |
| 547 | * have CP rptr to a different value of jiffies wrap around which will force |
| 548 | * initialization of the lockup tracking informations. |
| 549 | * |
| 550 | * A possible false positivie is if we get call after while and last_cp_rptr == |
| 551 | * the current CP rptr, even if it's unlikely it might happen. To avoid this |
| 552 | * if the elapsed time since last call is bigger than 2 second than we return |
| 553 | * false and update the tracking information. Due to this the caller must call |
| 554 | * radeon_ring_test_lockup several time in less than 2sec for lockup to be reported |
| 555 | * the fencing code should be cautious about that. |
| 556 | * |
| 557 | * Caller should write to the ring to force CP to do something so we don't get |
| 558 | * false positive when CP is just gived nothing to do. |
| 559 | * |
| 560 | **/ |
| 561 | bool radeon_ring_test_lockup(struct radeon_device *rdev, struct radeon_ring *ring) |
| 562 | { |
| 563 | unsigned long cjiffies, elapsed; |
| 564 | uint32_t rptr; |
| 565 | |
| 566 | cjiffies = jiffies; |
| 567 | if (!time_after(cjiffies, ring->last_activity)) { |
| 568 | /* likely a wrap around */ |
| 569 | radeon_ring_lockup_update(ring); |
| 570 | return false; |
| 571 | } |
| 572 | rptr = RREG32(ring->rptr_reg); |
| 573 | ring->rptr = (rptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift; |
| 574 | if (ring->rptr != ring->last_rptr) { |
| 575 | /* CP is still working no lockup */ |
| 576 | radeon_ring_lockup_update(ring); |
| 577 | return false; |
| 578 | } |
| 579 | elapsed = jiffies_to_msecs(cjiffies - ring->last_activity); |
Christian König | 3368ff0 | 2012-05-02 15:11:21 +0200 | [diff] [blame] | 580 | if (radeon_lockup_timeout && elapsed >= radeon_lockup_timeout) { |
Christian König | 069211e | 2012-05-02 15:11:20 +0200 | [diff] [blame] | 581 | dev_err(rdev->dev, "GPU lockup CP stall for more than %lumsec\n", elapsed); |
| 582 | return true; |
| 583 | } |
| 584 | /* give a chance to the GPU ... */ |
| 585 | return false; |
| 586 | } |
| 587 | |
Christian König | 55d7c22 | 2012-07-09 11:52:44 +0200 | [diff] [blame] | 588 | /** |
| 589 | * radeon_ring_backup - Back up the content of a ring |
| 590 | * |
| 591 | * @rdev: radeon_device pointer |
| 592 | * @ring: the ring we want to back up |
| 593 | * |
| 594 | * Saves all unprocessed commits from a ring, returns the number of dwords saved. |
| 595 | */ |
| 596 | unsigned radeon_ring_backup(struct radeon_device *rdev, struct radeon_ring *ring, |
| 597 | uint32_t **data) |
| 598 | { |
| 599 | unsigned size, ptr, i; |
Christian König | 55d7c22 | 2012-07-09 11:52:44 +0200 | [diff] [blame] | 600 | |
| 601 | /* just in case lock the ring */ |
| 602 | mutex_lock(&rdev->ring_lock); |
| 603 | *data = NULL; |
| 604 | |
Alex Deucher | 89d3580 | 2012-07-17 14:02:31 -0400 | [diff] [blame] | 605 | if (ring->ring_obj == NULL) { |
Christian König | 55d7c22 | 2012-07-09 11:52:44 +0200 | [diff] [blame] | 606 | mutex_unlock(&rdev->ring_lock); |
| 607 | return 0; |
| 608 | } |
| 609 | |
| 610 | /* it doesn't make sense to save anything if all fences are signaled */ |
Alex Deucher | 8b25ed3 | 2012-07-17 14:02:30 -0400 | [diff] [blame] | 611 | if (!radeon_fence_count_emitted(rdev, ring->idx)) { |
Christian König | 55d7c22 | 2012-07-09 11:52:44 +0200 | [diff] [blame] | 612 | mutex_unlock(&rdev->ring_lock); |
| 613 | return 0; |
| 614 | } |
| 615 | |
| 616 | /* calculate the number of dw on the ring */ |
Alex Deucher | 89d3580 | 2012-07-17 14:02:31 -0400 | [diff] [blame] | 617 | if (ring->rptr_save_reg) |
| 618 | ptr = RREG32(ring->rptr_save_reg); |
| 619 | else if (rdev->wb.enabled) |
| 620 | ptr = le32_to_cpu(*ring->next_rptr_cpu_addr); |
| 621 | else { |
| 622 | /* no way to read back the next rptr */ |
| 623 | mutex_unlock(&rdev->ring_lock); |
| 624 | return 0; |
| 625 | } |
| 626 | |
Christian König | 55d7c22 | 2012-07-09 11:52:44 +0200 | [diff] [blame] | 627 | size = ring->wptr + (ring->ring_size / 4); |
| 628 | size -= ptr; |
| 629 | size &= ring->ptr_mask; |
| 630 | if (size == 0) { |
| 631 | mutex_unlock(&rdev->ring_lock); |
| 632 | return 0; |
| 633 | } |
| 634 | |
| 635 | /* and then save the content of the ring */ |
Dan Carpenter | 1e179d4e | 2012-07-20 14:17:00 +0300 | [diff] [blame] | 636 | *data = kmalloc_array(size, sizeof(uint32_t), GFP_KERNEL); |
| 637 | if (!*data) { |
| 638 | mutex_unlock(&rdev->ring_lock); |
| 639 | return 0; |
| 640 | } |
Christian König | 55d7c22 | 2012-07-09 11:52:44 +0200 | [diff] [blame] | 641 | for (i = 0; i < size; ++i) { |
| 642 | (*data)[i] = ring->ring[ptr++]; |
| 643 | ptr &= ring->ptr_mask; |
| 644 | } |
| 645 | |
| 646 | mutex_unlock(&rdev->ring_lock); |
| 647 | return size; |
| 648 | } |
| 649 | |
| 650 | /** |
| 651 | * radeon_ring_restore - append saved commands to the ring again |
| 652 | * |
| 653 | * @rdev: radeon_device pointer |
| 654 | * @ring: ring to append commands to |
| 655 | * @size: number of dwords we want to write |
| 656 | * @data: saved commands |
| 657 | * |
| 658 | * Allocates space on the ring and restore the previously saved commands. |
| 659 | */ |
| 660 | int radeon_ring_restore(struct radeon_device *rdev, struct radeon_ring *ring, |
| 661 | unsigned size, uint32_t *data) |
| 662 | { |
| 663 | int i, r; |
| 664 | |
| 665 | if (!size || !data) |
| 666 | return 0; |
| 667 | |
| 668 | /* restore the saved ring content */ |
| 669 | r = radeon_ring_lock(rdev, ring, size); |
| 670 | if (r) |
| 671 | return r; |
| 672 | |
| 673 | for (i = 0; i < size; ++i) { |
| 674 | radeon_ring_write(ring, data[i]); |
| 675 | } |
| 676 | |
| 677 | radeon_ring_unlock_commit(rdev, ring); |
| 678 | kfree(data); |
| 679 | return 0; |
| 680 | } |
| 681 | |
Alex Deucher | 7592328 | 2012-07-17 14:02:38 -0400 | [diff] [blame] | 682 | /** |
| 683 | * radeon_ring_init - init driver ring struct. |
| 684 | * |
| 685 | * @rdev: radeon_device pointer |
| 686 | * @ring: radeon_ring structure holding ring information |
| 687 | * @ring_size: size of the ring |
| 688 | * @rptr_offs: offset of the rptr writeback location in the WB buffer |
| 689 | * @rptr_reg: MMIO offset of the rptr register |
| 690 | * @wptr_reg: MMIO offset of the wptr register |
| 691 | * @ptr_reg_shift: bit offset of the rptr/wptr values |
| 692 | * @ptr_reg_mask: bit mask of the rptr/wptr values |
| 693 | * @nop: nop packet for this ring |
| 694 | * |
| 695 | * Initialize the driver information for the selected ring (all asics). |
| 696 | * Returns 0 on success, error on failure. |
| 697 | */ |
Christian König | e32eb50 | 2011-10-23 12:56:27 +0200 | [diff] [blame] | 698 | int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size, |
Alex Deucher | 78c5560 | 2011-11-17 14:25:56 -0500 | [diff] [blame] | 699 | unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg, |
| 700 | u32 ptr_reg_shift, u32 ptr_reg_mask, u32 nop) |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 701 | { |
| 702 | int r; |
| 703 | |
Christian König | e32eb50 | 2011-10-23 12:56:27 +0200 | [diff] [blame] | 704 | ring->ring_size = ring_size; |
| 705 | ring->rptr_offs = rptr_offs; |
| 706 | ring->rptr_reg = rptr_reg; |
| 707 | ring->wptr_reg = wptr_reg; |
Alex Deucher | 78c5560 | 2011-11-17 14:25:56 -0500 | [diff] [blame] | 708 | ring->ptr_reg_shift = ptr_reg_shift; |
| 709 | ring->ptr_reg_mask = ptr_reg_mask; |
| 710 | ring->nop = nop; |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 711 | /* Allocate ring buffer */ |
Christian König | e32eb50 | 2011-10-23 12:56:27 +0200 | [diff] [blame] | 712 | if (ring->ring_obj == NULL) { |
| 713 | r = radeon_bo_create(rdev, ring->ring_size, PAGE_SIZE, true, |
Alex Deucher | 40f5cf9 | 2012-05-10 18:33:13 -0400 | [diff] [blame] | 714 | RADEON_GEM_DOMAIN_GTT, |
| 715 | NULL, &ring->ring_obj); |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 716 | if (r) { |
Jerome Glisse | 4c78867 | 2009-11-20 14:29:23 +0100 | [diff] [blame] | 717 | dev_err(rdev->dev, "(%d) ring create failed\n", r); |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 718 | return r; |
| 719 | } |
Christian König | e32eb50 | 2011-10-23 12:56:27 +0200 | [diff] [blame] | 720 | r = radeon_bo_reserve(ring->ring_obj, false); |
Jerome Glisse | 4c78867 | 2009-11-20 14:29:23 +0100 | [diff] [blame] | 721 | if (unlikely(r != 0)) |
| 722 | return r; |
Christian König | e32eb50 | 2011-10-23 12:56:27 +0200 | [diff] [blame] | 723 | r = radeon_bo_pin(ring->ring_obj, RADEON_GEM_DOMAIN_GTT, |
| 724 | &ring->gpu_addr); |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 725 | if (r) { |
Christian König | e32eb50 | 2011-10-23 12:56:27 +0200 | [diff] [blame] | 726 | radeon_bo_unreserve(ring->ring_obj); |
Jerome Glisse | 4c78867 | 2009-11-20 14:29:23 +0100 | [diff] [blame] | 727 | dev_err(rdev->dev, "(%d) ring pin failed\n", r); |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 728 | return r; |
| 729 | } |
Christian König | e32eb50 | 2011-10-23 12:56:27 +0200 | [diff] [blame] | 730 | r = radeon_bo_kmap(ring->ring_obj, |
| 731 | (void **)&ring->ring); |
| 732 | radeon_bo_unreserve(ring->ring_obj); |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 733 | if (r) { |
Jerome Glisse | 4c78867 | 2009-11-20 14:29:23 +0100 | [diff] [blame] | 734 | dev_err(rdev->dev, "(%d) ring map failed\n", r); |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 735 | return r; |
| 736 | } |
| 737 | } |
Christian König | e32eb50 | 2011-10-23 12:56:27 +0200 | [diff] [blame] | 738 | ring->ptr_mask = (ring->ring_size / 4) - 1; |
| 739 | ring->ring_free_dw = ring->ring_size / 4; |
Alex Deucher | 89d3580 | 2012-07-17 14:02:31 -0400 | [diff] [blame] | 740 | if (rdev->wb.enabled) { |
| 741 | u32 index = RADEON_WB_RING0_NEXT_RPTR + (ring->idx * 4); |
| 742 | ring->next_rptr_gpu_addr = rdev->wb.gpu_addr + index; |
| 743 | ring->next_rptr_cpu_addr = &rdev->wb.wb[index/4]; |
| 744 | } |
Christian König | ec1a6cc | 2012-05-02 15:11:11 +0200 | [diff] [blame] | 745 | if (radeon_debugfs_ring_init(rdev, ring)) { |
| 746 | DRM_ERROR("Failed to register debugfs file for rings !\n"); |
| 747 | } |
Christian König | 48c0ac9 | 2012-08-20 15:38:47 +0200 | [diff] [blame] | 748 | radeon_ring_lockup_update(ring); |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 749 | return 0; |
| 750 | } |
| 751 | |
Alex Deucher | 7592328 | 2012-07-17 14:02:38 -0400 | [diff] [blame] | 752 | /** |
| 753 | * radeon_ring_fini - tear down the driver ring struct. |
| 754 | * |
| 755 | * @rdev: radeon_device pointer |
| 756 | * @ring: radeon_ring structure holding ring information |
| 757 | * |
| 758 | * Tear down the driver information for the selected ring (all asics). |
| 759 | */ |
Christian König | e32eb50 | 2011-10-23 12:56:27 +0200 | [diff] [blame] | 760 | void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *ring) |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 761 | { |
Jerome Glisse | 4c78867 | 2009-11-20 14:29:23 +0100 | [diff] [blame] | 762 | int r; |
Alex Deucher | ca2af92 | 2010-05-06 11:02:24 -0400 | [diff] [blame] | 763 | struct radeon_bo *ring_obj; |
Jerome Glisse | 4c78867 | 2009-11-20 14:29:23 +0100 | [diff] [blame] | 764 | |
Christian König | d6999bc | 2012-05-09 15:34:45 +0200 | [diff] [blame] | 765 | mutex_lock(&rdev->ring_lock); |
Christian König | e32eb50 | 2011-10-23 12:56:27 +0200 | [diff] [blame] | 766 | ring_obj = ring->ring_obj; |
Christian König | d6999bc | 2012-05-09 15:34:45 +0200 | [diff] [blame] | 767 | ring->ready = false; |
Christian König | e32eb50 | 2011-10-23 12:56:27 +0200 | [diff] [blame] | 768 | ring->ring = NULL; |
| 769 | ring->ring_obj = NULL; |
Christian König | d6999bc | 2012-05-09 15:34:45 +0200 | [diff] [blame] | 770 | mutex_unlock(&rdev->ring_lock); |
Alex Deucher | ca2af92 | 2010-05-06 11:02:24 -0400 | [diff] [blame] | 771 | |
| 772 | if (ring_obj) { |
| 773 | r = radeon_bo_reserve(ring_obj, false); |
| 774 | if (likely(r == 0)) { |
| 775 | radeon_bo_kunmap(ring_obj); |
| 776 | radeon_bo_unpin(ring_obj); |
| 777 | radeon_bo_unreserve(ring_obj); |
| 778 | } |
| 779 | radeon_bo_unref(&ring_obj); |
| 780 | } |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 781 | } |
| 782 | |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 783 | /* |
| 784 | * Debugfs info |
| 785 | */ |
| 786 | #if defined(CONFIG_DEBUG_FS) |
Christian König | af9720f | 2011-10-24 17:08:44 +0200 | [diff] [blame] | 787 | |
| 788 | static int radeon_debugfs_ring_info(struct seq_file *m, void *data) |
| 789 | { |
| 790 | struct drm_info_node *node = (struct drm_info_node *) m->private; |
| 791 | struct drm_device *dev = node->minor->dev; |
| 792 | struct radeon_device *rdev = dev->dev_private; |
| 793 | int ridx = *(int*)node->info_ent->data; |
| 794 | struct radeon_ring *ring = &rdev->ring[ridx]; |
| 795 | unsigned count, i, j; |
Jerome Glisse | 4d00919 | 2013-01-02 17:30:34 -0500 | [diff] [blame] | 796 | u32 tmp; |
Christian König | af9720f | 2011-10-24 17:08:44 +0200 | [diff] [blame] | 797 | |
| 798 | radeon_ring_free_size(rdev, ring); |
| 799 | count = (ring->ring_size / 4) - ring->ring_free_dw; |
Jerome Glisse | 4d00919 | 2013-01-02 17:30:34 -0500 | [diff] [blame] | 800 | tmp = RREG32(ring->wptr_reg) >> ring->ptr_reg_shift; |
| 801 | seq_printf(m, "wptr(0x%04x): 0x%08x [%5d]\n", ring->wptr_reg, tmp, tmp); |
| 802 | tmp = RREG32(ring->rptr_reg) >> ring->ptr_reg_shift; |
| 803 | seq_printf(m, "rptr(0x%04x): 0x%08x [%5d]\n", ring->rptr_reg, tmp, tmp); |
Christian König | 45df680 | 2012-07-06 16:22:55 +0200 | [diff] [blame] | 804 | if (ring->rptr_save_reg) { |
| 805 | seq_printf(m, "rptr next(0x%04x): 0x%08x\n", ring->rptr_save_reg, |
| 806 | RREG32(ring->rptr_save_reg)); |
| 807 | } |
Jerome Glisse | 4d00919 | 2013-01-02 17:30:34 -0500 | [diff] [blame] | 808 | seq_printf(m, "driver's copy of the wptr: 0x%08x [%5d]\n", ring->wptr, ring->wptr); |
| 809 | seq_printf(m, "driver's copy of the rptr: 0x%08x [%5d]\n", ring->rptr, ring->rptr); |
Jerome Glisse | 5f0839c | 2013-01-11 15:19:43 -0500 | [diff] [blame] | 810 | seq_printf(m, "last semaphore signal addr : 0x%016llx\n", ring->last_semaphore_signal_addr); |
| 811 | seq_printf(m, "last semaphore wait addr : 0x%016llx\n", ring->last_semaphore_wait_addr); |
Christian König | af9720f | 2011-10-24 17:08:44 +0200 | [diff] [blame] | 812 | seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw); |
| 813 | seq_printf(m, "%u dwords in ring\n", count); |
Jerome Glisse | 4d00919 | 2013-01-02 17:30:34 -0500 | [diff] [blame] | 814 | /* print 8 dw before current rptr as often it's the last executed |
| 815 | * packet that is the root issue |
| 816 | */ |
| 817 | i = (ring->rptr + ring->ptr_mask + 1 - 32) & ring->ptr_mask; |
| 818 | for (j = 0; j <= (count + 32); j++) { |
| 819 | seq_printf(m, "r[%5d]=0x%08x\n", i, ring->ring[i]); |
Christian König | af9720f | 2011-10-24 17:08:44 +0200 | [diff] [blame] | 820 | i = (i + 1) & ring->ptr_mask; |
| 821 | } |
| 822 | return 0; |
| 823 | } |
| 824 | |
Christian König | f2ba57b | 2013-04-08 12:41:29 +0200 | [diff] [blame] | 825 | static int radeon_gfx_index = RADEON_RING_TYPE_GFX_INDEX; |
| 826 | static int cayman_cp1_index = CAYMAN_RING_TYPE_CP1_INDEX; |
| 827 | static int cayman_cp2_index = CAYMAN_RING_TYPE_CP2_INDEX; |
| 828 | static int radeon_dma1_index = R600_RING_TYPE_DMA_INDEX; |
| 829 | static int radeon_dma2_index = CAYMAN_RING_TYPE_DMA1_INDEX; |
| 830 | static int r600_uvd_index = R600_RING_TYPE_UVD_INDEX; |
Christian König | af9720f | 2011-10-24 17:08:44 +0200 | [diff] [blame] | 831 | |
| 832 | static struct drm_info_list radeon_debugfs_ring_info_list[] = { |
Christian König | f2ba57b | 2013-04-08 12:41:29 +0200 | [diff] [blame] | 833 | {"radeon_ring_gfx", radeon_debugfs_ring_info, 0, &radeon_gfx_index}, |
| 834 | {"radeon_ring_cp1", radeon_debugfs_ring_info, 0, &cayman_cp1_index}, |
| 835 | {"radeon_ring_cp2", radeon_debugfs_ring_info, 0, &cayman_cp2_index}, |
| 836 | {"radeon_ring_dma1", radeon_debugfs_ring_info, 0, &radeon_dma1_index}, |
| 837 | {"radeon_ring_dma2", radeon_debugfs_ring_info, 0, &radeon_dma2_index}, |
| 838 | {"radeon_ring_uvd", radeon_debugfs_ring_info, 0, &r600_uvd_index}, |
Christian König | af9720f | 2011-10-24 17:08:44 +0200 | [diff] [blame] | 839 | }; |
| 840 | |
Christian König | 711a972 | 2012-05-09 15:34:51 +0200 | [diff] [blame] | 841 | static int radeon_debugfs_sa_info(struct seq_file *m, void *data) |
| 842 | { |
| 843 | struct drm_info_node *node = (struct drm_info_node *) m->private; |
| 844 | struct drm_device *dev = node->minor->dev; |
| 845 | struct radeon_device *rdev = dev->dev_private; |
| 846 | |
Jerome Glisse | c507f7e | 2012-05-09 15:34:58 +0200 | [diff] [blame] | 847 | radeon_sa_bo_dump_debug_info(&rdev->ring_tmp_bo, m); |
Christian König | 711a972 | 2012-05-09 15:34:51 +0200 | [diff] [blame] | 848 | |
| 849 | return 0; |
| 850 | |
| 851 | } |
| 852 | |
| 853 | static struct drm_info_list radeon_debugfs_sa_list[] = { |
| 854 | {"radeon_sa_info", &radeon_debugfs_sa_info, 0, NULL}, |
| 855 | }; |
| 856 | |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 857 | #endif |
| 858 | |
Lauri Kasanen | 1109ca0 | 2012-08-31 13:43:50 -0400 | [diff] [blame] | 859 | static int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *ring) |
Christian König | af9720f | 2011-10-24 17:08:44 +0200 | [diff] [blame] | 860 | { |
| 861 | #if defined(CONFIG_DEBUG_FS) |
Christian König | ec1a6cc | 2012-05-02 15:11:11 +0200 | [diff] [blame] | 862 | unsigned i; |
| 863 | for (i = 0; i < ARRAY_SIZE(radeon_debugfs_ring_info_list); ++i) { |
| 864 | struct drm_info_list *info = &radeon_debugfs_ring_info_list[i]; |
| 865 | int ridx = *(int*)radeon_debugfs_ring_info_list[i].data; |
| 866 | unsigned r; |
| 867 | |
| 868 | if (&rdev->ring[ridx] != ring) |
| 869 | continue; |
| 870 | |
| 871 | r = radeon_debugfs_add_files(rdev, info, 1); |
| 872 | if (r) |
| 873 | return r; |
| 874 | } |
Christian König | af9720f | 2011-10-24 17:08:44 +0200 | [diff] [blame] | 875 | #endif |
Christian König | ec1a6cc | 2012-05-02 15:11:11 +0200 | [diff] [blame] | 876 | return 0; |
Christian König | af9720f | 2011-10-24 17:08:44 +0200 | [diff] [blame] | 877 | } |
| 878 | |
Lauri Kasanen | 1109ca0 | 2012-08-31 13:43:50 -0400 | [diff] [blame] | 879 | static int radeon_debugfs_sa_init(struct radeon_device *rdev) |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 880 | { |
| 881 | #if defined(CONFIG_DEBUG_FS) |
Jerome Glisse | c507f7e | 2012-05-09 15:34:58 +0200 | [diff] [blame] | 882 | return radeon_debugfs_add_files(rdev, radeon_debugfs_sa_list, 1); |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 883 | #else |
| 884 | return 0; |
| 885 | #endif |
| 886 | } |