Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2009 Jerome Glisse. |
| 3 | * All Rights Reserved. |
| 4 | * |
| 5 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 6 | * copy of this software and associated documentation files (the |
| 7 | * "Software"), to deal in the Software without restriction, including |
| 8 | * without limitation the rights to use, copy, modify, merge, publish, |
| 9 | * distribute, sub license, and/or sell copies of the Software, and to |
| 10 | * permit persons to whom the Software is furnished to do so, subject to |
| 11 | * the following conditions: |
| 12 | * |
| 13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 15 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
| 16 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
| 17 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
| 18 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
| 19 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
| 20 | * |
| 21 | * The above copyright notice and this permission notice (including the |
| 22 | * next paragraph) shall be included in all copies or substantial portions |
| 23 | * of the Software. |
| 24 | * |
| 25 | */ |
| 26 | /* |
| 27 | * Authors: |
| 28 | * Jerome Glisse <glisse@freedesktop.org> |
| 29 | * Dave Airlie |
| 30 | */ |
| 31 | #include <linux/seq_file.h> |
Arun Sharma | 60063497 | 2011-07-26 16:09:06 -0700 | [diff] [blame] | 32 | #include <linux/atomic.h> |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 33 | #include <linux/wait.h> |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 34 | #include <linux/kref.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 35 | #include <linux/slab.h> |
Christian König | f2ba57b | 2013-04-08 12:41:29 +0200 | [diff] [blame] | 36 | #include <linux/firmware.h> |
David Howells | 760285e | 2012-10-02 18:01:07 +0100 | [diff] [blame] | 37 | #include <drm/drmP.h> |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 38 | #include "radeon_reg.h" |
| 39 | #include "radeon.h" |
Dave Airlie | 99ee7fa | 2010-11-23 11:47:49 +1000 | [diff] [blame] | 40 | #include "radeon_trace.h" |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 41 | |
Alex Deucher | d66b7ec | 2012-07-17 14:02:37 -0400 | [diff] [blame] | 42 | /* |
| 43 | * Fences |
| 44 | * Fences mark an event in the GPUs pipeline and are used |
| 45 | * for GPU/CPU synchronization. When the fence is written, |
| 46 | * it is expected that all buffers associated with that fence |
| 47 | * are no longer in use by the associated ring on the GPU and |
| 48 | * that the the relevant GPU caches have been flushed. Whether |
| 49 | * we use a scratch register or memory location depends on the asic |
| 50 | * and whether writeback is enabled. |
| 51 | */ |
| 52 | |
| 53 | /** |
| 54 | * radeon_fence_write - write a fence value |
| 55 | * |
| 56 | * @rdev: radeon_device pointer |
| 57 | * @seq: sequence number to write |
| 58 | * @ring: ring index the fence is associated with |
| 59 | * |
| 60 | * Writes a fence value to memory or a scratch register (all asics). |
| 61 | */ |
Alex Deucher | 7465280 | 2011-08-25 13:39:48 -0400 | [diff] [blame] | 62 | static void radeon_fence_write(struct radeon_device *rdev, u32 seq, int ring) |
Alex Deucher | b81157d | 2011-06-13 17:39:06 -0400 | [diff] [blame] | 63 | { |
Christian König | bf66625 | 2012-07-09 10:52:39 +0200 | [diff] [blame] | 64 | struct radeon_fence_driver *drv = &rdev->fence_drv[ring]; |
| 65 | if (likely(rdev->wb.enabled || !drv->scratch_reg)) { |
Jerome Glisse | 089920f | 2013-06-06 17:51:21 -0400 | [diff] [blame] | 66 | if (drv->cpu_addr) { |
| 67 | *drv->cpu_addr = cpu_to_le32(seq); |
| 68 | } |
Jerome Glisse | 30eb77f | 2011-11-20 20:45:34 +0000 | [diff] [blame] | 69 | } else { |
Christian König | bf66625 | 2012-07-09 10:52:39 +0200 | [diff] [blame] | 70 | WREG32(drv->scratch_reg, seq); |
Jerome Glisse | 30eb77f | 2011-11-20 20:45:34 +0000 | [diff] [blame] | 71 | } |
Alex Deucher | b81157d | 2011-06-13 17:39:06 -0400 | [diff] [blame] | 72 | } |
| 73 | |
Alex Deucher | d66b7ec | 2012-07-17 14:02:37 -0400 | [diff] [blame] | 74 | /** |
| 75 | * radeon_fence_read - read a fence value |
| 76 | * |
| 77 | * @rdev: radeon_device pointer |
| 78 | * @ring: ring index the fence is associated with |
| 79 | * |
| 80 | * Reads a fence value from memory or a scratch register (all asics). |
| 81 | * Returns the value of the fence read from memory or register. |
| 82 | */ |
Alex Deucher | 7465280 | 2011-08-25 13:39:48 -0400 | [diff] [blame] | 83 | static u32 radeon_fence_read(struct radeon_device *rdev, int ring) |
Alex Deucher | b81157d | 2011-06-13 17:39:06 -0400 | [diff] [blame] | 84 | { |
Christian König | bf66625 | 2012-07-09 10:52:39 +0200 | [diff] [blame] | 85 | struct radeon_fence_driver *drv = &rdev->fence_drv[ring]; |
Alex Deucher | 7465280 | 2011-08-25 13:39:48 -0400 | [diff] [blame] | 86 | u32 seq = 0; |
Alex Deucher | b81157d | 2011-06-13 17:39:06 -0400 | [diff] [blame] | 87 | |
Christian König | bf66625 | 2012-07-09 10:52:39 +0200 | [diff] [blame] | 88 | if (likely(rdev->wb.enabled || !drv->scratch_reg)) { |
Jerome Glisse | 089920f | 2013-06-06 17:51:21 -0400 | [diff] [blame] | 89 | if (drv->cpu_addr) { |
| 90 | seq = le32_to_cpu(*drv->cpu_addr); |
| 91 | } else { |
| 92 | seq = lower_32_bits(atomic64_read(&drv->last_seq)); |
| 93 | } |
Jerome Glisse | 30eb77f | 2011-11-20 20:45:34 +0000 | [diff] [blame] | 94 | } else { |
Christian König | bf66625 | 2012-07-09 10:52:39 +0200 | [diff] [blame] | 95 | seq = RREG32(drv->scratch_reg); |
Jerome Glisse | 30eb77f | 2011-11-20 20:45:34 +0000 | [diff] [blame] | 96 | } |
Alex Deucher | b81157d | 2011-06-13 17:39:06 -0400 | [diff] [blame] | 97 | return seq; |
| 98 | } |
| 99 | |
Alex Deucher | d66b7ec | 2012-07-17 14:02:37 -0400 | [diff] [blame] | 100 | /** |
Christian König | 0bfa4b4 | 2014-08-27 15:21:58 +0200 | [diff] [blame] | 101 | * radeon_fence_schedule_check - schedule lockup check |
| 102 | * |
| 103 | * @rdev: radeon_device pointer |
| 104 | * @ring: ring index we should work with |
| 105 | * |
| 106 | * Queues a delayed work item to check for lockups. |
| 107 | */ |
| 108 | static void radeon_fence_schedule_check(struct radeon_device *rdev, int ring) |
| 109 | { |
| 110 | /* |
| 111 | * Do not reset the timer here with mod_delayed_work, |
| 112 | * this can livelock in an interaction with TTM delayed destroy. |
| 113 | */ |
| 114 | queue_delayed_work(system_power_efficient_wq, |
| 115 | &rdev->fence_drv[ring].lockup_work, |
| 116 | RADEON_FENCE_JIFFIES_TIMEOUT); |
| 117 | } |
| 118 | |
| 119 | /** |
Alex Deucher | d66b7ec | 2012-07-17 14:02:37 -0400 | [diff] [blame] | 120 | * radeon_fence_emit - emit a fence on the requested ring |
| 121 | * |
| 122 | * @rdev: radeon_device pointer |
| 123 | * @fence: radeon fence object |
| 124 | * @ring: ring index the fence is associated with |
| 125 | * |
| 126 | * Emits a fence command on the requested ring (all asics). |
| 127 | * Returns 0 on success, -ENOMEM on failure. |
| 128 | */ |
Christian König | 876dc9f | 2012-05-08 14:24:01 +0200 | [diff] [blame] | 129 | int radeon_fence_emit(struct radeon_device *rdev, |
| 130 | struct radeon_fence **fence, |
| 131 | int ring) |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 132 | { |
Maarten Lankhorst | 954605c | 2014-01-09 11:03:12 +0100 | [diff] [blame^] | 133 | u64 seq = ++rdev->fence_drv[ring].sync_seq[ring]; |
| 134 | |
Jerome Glisse | 3b7a2b2 | 2012-05-09 15:34:47 +0200 | [diff] [blame] | 135 | /* we are protected by the ring emission mutex */ |
Christian König | 876dc9f | 2012-05-08 14:24:01 +0200 | [diff] [blame] | 136 | *fence = kmalloc(sizeof(struct radeon_fence), GFP_KERNEL); |
| 137 | if ((*fence) == NULL) { |
| 138 | return -ENOMEM; |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 139 | } |
Christian König | 876dc9f | 2012-05-08 14:24:01 +0200 | [diff] [blame] | 140 | (*fence)->rdev = rdev; |
Maarten Lankhorst | 954605c | 2014-01-09 11:03:12 +0100 | [diff] [blame^] | 141 | (*fence)->seq = seq; |
Christian König | 876dc9f | 2012-05-08 14:24:01 +0200 | [diff] [blame] | 142 | (*fence)->ring = ring; |
Maarten Lankhorst | 954605c | 2014-01-09 11:03:12 +0100 | [diff] [blame^] | 143 | fence_init(&(*fence)->base, &radeon_fence_ops, |
| 144 | &rdev->fence_queue.lock, rdev->fence_context + ring, seq); |
Christian König | 876dc9f | 2012-05-08 14:24:01 +0200 | [diff] [blame] | 145 | radeon_fence_ring_emit(rdev, ring, *fence); |
Christian König | 1d78416 | 2014-01-23 14:24:17 +0100 | [diff] [blame] | 146 | trace_radeon_fence_emit(rdev->ddev, ring, (*fence)->seq); |
Christian König | 0bfa4b4 | 2014-08-27 15:21:58 +0200 | [diff] [blame] | 147 | radeon_fence_schedule_check(rdev, ring); |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 148 | return 0; |
| 149 | } |
| 150 | |
Alex Deucher | d66b7ec | 2012-07-17 14:02:37 -0400 | [diff] [blame] | 151 | /** |
Maarten Lankhorst | 954605c | 2014-01-09 11:03:12 +0100 | [diff] [blame^] | 152 | * radeon_fence_check_signaled - callback from fence_queue |
| 153 | * |
| 154 | * this function is called with fence_queue lock held, which is also used |
| 155 | * for the fence locking itself, so unlocked variants are used for |
| 156 | * fence_signal, and remove_wait_queue. |
| 157 | */ |
| 158 | static int radeon_fence_check_signaled(wait_queue_t *wait, unsigned mode, int flags, void *key) |
| 159 | { |
| 160 | struct radeon_fence *fence; |
| 161 | u64 seq; |
| 162 | |
| 163 | fence = container_of(wait, struct radeon_fence, fence_wake); |
| 164 | |
| 165 | /* |
| 166 | * We cannot use radeon_fence_process here because we're already |
| 167 | * in the waitqueue, in a call from wake_up_all. |
| 168 | */ |
| 169 | seq = atomic64_read(&fence->rdev->fence_drv[fence->ring].last_seq); |
| 170 | if (seq >= fence->seq) { |
| 171 | int ret = fence_signal_locked(&fence->base); |
| 172 | |
| 173 | if (!ret) |
| 174 | FENCE_TRACE(&fence->base, "signaled from irq context\n"); |
| 175 | else |
| 176 | FENCE_TRACE(&fence->base, "was already signaled\n"); |
| 177 | |
| 178 | radeon_irq_kms_sw_irq_put(fence->rdev, fence->ring); |
| 179 | __remove_wait_queue(&fence->rdev->fence_queue, &fence->fence_wake); |
| 180 | fence_put(&fence->base); |
| 181 | } else |
| 182 | FENCE_TRACE(&fence->base, "pending\n"); |
| 183 | return 0; |
| 184 | } |
| 185 | |
| 186 | /** |
Christian König | 0bfa4b4 | 2014-08-27 15:21:58 +0200 | [diff] [blame] | 187 | * radeon_fence_activity - check for fence activity |
Alex Deucher | d66b7ec | 2012-07-17 14:02:37 -0400 | [diff] [blame] | 188 | * |
| 189 | * @rdev: radeon_device pointer |
| 190 | * @ring: ring index the fence is associated with |
| 191 | * |
Christian König | 0bfa4b4 | 2014-08-27 15:21:58 +0200 | [diff] [blame] | 192 | * Checks the current fence value and calculates the last |
| 193 | * signalled fence value. Returns true if activity occured |
| 194 | * on the ring, and the fence_queue should be waken up. |
Alex Deucher | d66b7ec | 2012-07-17 14:02:37 -0400 | [diff] [blame] | 195 | */ |
Christian König | 0bfa4b4 | 2014-08-27 15:21:58 +0200 | [diff] [blame] | 196 | static bool radeon_fence_activity(struct radeon_device *rdev, int ring) |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 197 | { |
Christian König | f492c17 | 2012-09-13 10:33:47 +0200 | [diff] [blame] | 198 | uint64_t seq, last_seq, last_emitted; |
Jerome Glisse | bb63556 | 2012-05-09 15:34:46 +0200 | [diff] [blame] | 199 | unsigned count_loop = 0; |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 200 | bool wake = false; |
| 201 | |
Jerome Glisse | bb63556 | 2012-05-09 15:34:46 +0200 | [diff] [blame] | 202 | /* Note there is a scenario here for an infinite loop but it's |
| 203 | * very unlikely to happen. For it to happen, the current polling |
| 204 | * process need to be interrupted by another process and another |
| 205 | * process needs to update the last_seq btw the atomic read and |
| 206 | * xchg of the current process. |
| 207 | * |
| 208 | * More over for this to go in infinite loop there need to be |
| 209 | * continuously new fence signaled ie radeon_fence_read needs |
| 210 | * to return a different value each time for both the currently |
| 211 | * polling process and the other process that xchg the last_seq |
| 212 | * btw atomic read and xchg of the current process. And the |
| 213 | * value the other process set as last seq must be higher than |
| 214 | * the seq value we just read. Which means that current process |
| 215 | * need to be interrupted after radeon_fence_read and before |
| 216 | * atomic xchg. |
| 217 | * |
| 218 | * To be even more safe we count the number of time we loop and |
| 219 | * we bail after 10 loop just accepting the fact that we might |
| 220 | * have temporarly set the last_seq not to the true real last |
| 221 | * seq but to an older one. |
| 222 | */ |
| 223 | last_seq = atomic64_read(&rdev->fence_drv[ring].last_seq); |
| 224 | do { |
Christian König | f492c17 | 2012-09-13 10:33:47 +0200 | [diff] [blame] | 225 | last_emitted = rdev->fence_drv[ring].sync_seq[ring]; |
Jerome Glisse | bb63556 | 2012-05-09 15:34:46 +0200 | [diff] [blame] | 226 | seq = radeon_fence_read(rdev, ring); |
| 227 | seq |= last_seq & 0xffffffff00000000LL; |
| 228 | if (seq < last_seq) { |
Christian König | f492c17 | 2012-09-13 10:33:47 +0200 | [diff] [blame] | 229 | seq &= 0xffffffff; |
| 230 | seq |= last_emitted & 0xffffffff00000000LL; |
Jerome Glisse | bb63556 | 2012-05-09 15:34:46 +0200 | [diff] [blame] | 231 | } |
Christian König | 36abaca | 2012-05-02 15:11:13 +0200 | [diff] [blame] | 232 | |
Christian König | f492c17 | 2012-09-13 10:33:47 +0200 | [diff] [blame] | 233 | if (seq <= last_seq || seq > last_emitted) { |
Jerome Glisse | 3b7a2b2 | 2012-05-09 15:34:47 +0200 | [diff] [blame] | 234 | break; |
Jerome Glisse | bb63556 | 2012-05-09 15:34:46 +0200 | [diff] [blame] | 235 | } |
| 236 | /* If we loop over we don't want to return without |
| 237 | * checking if a fence is signaled as it means that the |
| 238 | * seq we just read is different from the previous on. |
| 239 | */ |
| 240 | wake = true; |
Jerome Glisse | 3b7a2b2 | 2012-05-09 15:34:47 +0200 | [diff] [blame] | 241 | last_seq = seq; |
Jerome Glisse | bb63556 | 2012-05-09 15:34:46 +0200 | [diff] [blame] | 242 | if ((count_loop++) > 10) { |
| 243 | /* We looped over too many time leave with the |
| 244 | * fact that we might have set an older fence |
| 245 | * seq then the current real last seq as signaled |
| 246 | * by the hw. |
| 247 | */ |
| 248 | break; |
| 249 | } |
Jerome Glisse | bb63556 | 2012-05-09 15:34:46 +0200 | [diff] [blame] | 250 | } while (atomic64_xchg(&rdev->fence_drv[ring].last_seq, seq) > seq); |
| 251 | |
Christian König | 0bfa4b4 | 2014-08-27 15:21:58 +0200 | [diff] [blame] | 252 | if (seq < last_emitted) |
| 253 | radeon_fence_schedule_check(rdev, ring); |
| 254 | |
| 255 | return wake; |
| 256 | } |
| 257 | |
| 258 | /** |
| 259 | * radeon_fence_check_lockup - check for hardware lockup |
| 260 | * |
| 261 | * @work: delayed work item |
| 262 | * |
| 263 | * Checks for fence activity and if there is none probe |
| 264 | * the hardware if a lockup occured. |
| 265 | */ |
| 266 | static void radeon_fence_check_lockup(struct work_struct *work) |
| 267 | { |
| 268 | struct radeon_fence_driver *fence_drv; |
| 269 | struct radeon_device *rdev; |
| 270 | int ring; |
| 271 | |
| 272 | fence_drv = container_of(work, struct radeon_fence_driver, |
| 273 | lockup_work.work); |
| 274 | rdev = fence_drv->rdev; |
| 275 | ring = fence_drv - &rdev->fence_drv[0]; |
| 276 | |
| 277 | if (!down_read_trylock(&rdev->exclusive_lock)) { |
| 278 | /* just reschedule the check if a reset is going on */ |
| 279 | radeon_fence_schedule_check(rdev, ring); |
| 280 | return; |
| 281 | } |
| 282 | |
Maarten Lankhorst | 954605c | 2014-01-09 11:03:12 +0100 | [diff] [blame^] | 283 | if (fence_drv->delayed_irq && rdev->ddev->irq_enabled) { |
| 284 | unsigned long irqflags; |
| 285 | |
| 286 | fence_drv->delayed_irq = false; |
| 287 | spin_lock_irqsave(&rdev->irq.lock, irqflags); |
| 288 | radeon_irq_set(rdev); |
| 289 | spin_unlock_irqrestore(&rdev->irq.lock, irqflags); |
| 290 | } |
| 291 | |
Christian König | 0bfa4b4 | 2014-08-27 15:21:58 +0200 | [diff] [blame] | 292 | if (radeon_fence_activity(rdev, ring)) |
| 293 | wake_up_all(&rdev->fence_queue); |
| 294 | |
| 295 | else if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) { |
| 296 | |
| 297 | /* good news we believe it's a lockup */ |
| 298 | dev_warn(rdev->dev, "GPU lockup (current fence id " |
| 299 | "0x%016llx last fence id 0x%016llx on ring %d)\n", |
| 300 | (uint64_t)atomic64_read(&fence_drv->last_seq), |
| 301 | fence_drv->sync_seq[ring], ring); |
| 302 | |
| 303 | /* remember that we need an reset */ |
| 304 | rdev->needs_reset = true; |
| 305 | wake_up_all(&rdev->fence_queue); |
| 306 | } |
| 307 | up_read(&rdev->exclusive_lock); |
| 308 | } |
| 309 | |
| 310 | /** |
| 311 | * radeon_fence_process - process a fence |
| 312 | * |
| 313 | * @rdev: radeon_device pointer |
| 314 | * @ring: ring index the fence is associated with |
| 315 | * |
| 316 | * Checks the current fence value and wakes the fence queue |
| 317 | * if the sequence number has increased (all asics). |
| 318 | */ |
| 319 | void radeon_fence_process(struct radeon_device *rdev, int ring) |
| 320 | { |
| 321 | if (radeon_fence_activity(rdev, ring)) |
Jerome Glisse | 0085c950 | 2012-05-09 15:34:55 +0200 | [diff] [blame] | 322 | wake_up_all(&rdev->fence_queue); |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 323 | } |
| 324 | |
Alex Deucher | d66b7ec | 2012-07-17 14:02:37 -0400 | [diff] [blame] | 325 | /** |
Christian König | f9eaf9a | 2013-10-29 20:14:47 +0100 | [diff] [blame] | 326 | * radeon_fence_seq_signaled - check if a fence sequence number has signaled |
Alex Deucher | d66b7ec | 2012-07-17 14:02:37 -0400 | [diff] [blame] | 327 | * |
| 328 | * @rdev: radeon device pointer |
| 329 | * @seq: sequence number |
| 330 | * @ring: ring index the fence is associated with |
| 331 | * |
Christian König | f9eaf9a | 2013-10-29 20:14:47 +0100 | [diff] [blame] | 332 | * Check if the last signaled fence sequnce number is >= the requested |
Alex Deucher | d66b7ec | 2012-07-17 14:02:37 -0400 | [diff] [blame] | 333 | * sequence number (all asics). |
| 334 | * Returns true if the fence has signaled (current fence value |
| 335 | * is >= requested value) or false if it has not (current fence |
| 336 | * value is < the requested value. Helper function for |
| 337 | * radeon_fence_signaled(). |
| 338 | */ |
Jerome Glisse | 3b7a2b2 | 2012-05-09 15:34:47 +0200 | [diff] [blame] | 339 | static bool radeon_fence_seq_signaled(struct radeon_device *rdev, |
| 340 | u64 seq, unsigned ring) |
| 341 | { |
| 342 | if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) { |
| 343 | return true; |
| 344 | } |
| 345 | /* poll new last sequence at least once */ |
| 346 | radeon_fence_process(rdev, ring); |
| 347 | if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) { |
| 348 | return true; |
| 349 | } |
| 350 | return false; |
| 351 | } |
| 352 | |
Maarten Lankhorst | 954605c | 2014-01-09 11:03:12 +0100 | [diff] [blame^] | 353 | static bool radeon_fence_is_signaled(struct fence *f) |
| 354 | { |
| 355 | struct radeon_fence *fence = to_radeon_fence(f); |
| 356 | struct radeon_device *rdev = fence->rdev; |
| 357 | unsigned ring = fence->ring; |
| 358 | u64 seq = fence->seq; |
| 359 | |
| 360 | if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) { |
| 361 | return true; |
| 362 | } |
| 363 | |
| 364 | if (down_read_trylock(&rdev->exclusive_lock)) { |
| 365 | radeon_fence_process(rdev, ring); |
| 366 | up_read(&rdev->exclusive_lock); |
| 367 | |
| 368 | if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) { |
| 369 | return true; |
| 370 | } |
| 371 | } |
| 372 | return false; |
| 373 | } |
| 374 | |
| 375 | /** |
| 376 | * radeon_fence_enable_signaling - enable signalling on fence |
| 377 | * @fence: fence |
| 378 | * |
| 379 | * This function is called with fence_queue lock held, and adds a callback |
| 380 | * to fence_queue that checks if this fence is signaled, and if so it |
| 381 | * signals the fence and removes itself. |
| 382 | */ |
| 383 | static bool radeon_fence_enable_signaling(struct fence *f) |
| 384 | { |
| 385 | struct radeon_fence *fence = to_radeon_fence(f); |
| 386 | struct radeon_device *rdev = fence->rdev; |
| 387 | |
| 388 | if (atomic64_read(&rdev->fence_drv[fence->ring].last_seq) >= fence->seq) |
| 389 | return false; |
| 390 | |
| 391 | if (down_read_trylock(&rdev->exclusive_lock)) { |
| 392 | radeon_irq_kms_sw_irq_get(rdev, fence->ring); |
| 393 | |
| 394 | if (radeon_fence_activity(rdev, fence->ring)) |
| 395 | wake_up_all_locked(&rdev->fence_queue); |
| 396 | |
| 397 | /* did fence get signaled after we enabled the sw irq? */ |
| 398 | if (atomic64_read(&rdev->fence_drv[fence->ring].last_seq) >= fence->seq) { |
| 399 | radeon_irq_kms_sw_irq_put(rdev, fence->ring); |
| 400 | up_read(&rdev->exclusive_lock); |
| 401 | return false; |
| 402 | } |
| 403 | |
| 404 | up_read(&rdev->exclusive_lock); |
| 405 | } else { |
| 406 | /* we're probably in a lockup, lets not fiddle too much */ |
| 407 | if (radeon_irq_kms_sw_irq_get_delayed(rdev, fence->ring)) |
| 408 | rdev->fence_drv[fence->ring].delayed_irq = true; |
| 409 | radeon_fence_schedule_check(rdev, fence->ring); |
| 410 | } |
| 411 | |
| 412 | fence->fence_wake.flags = 0; |
| 413 | fence->fence_wake.private = NULL; |
| 414 | fence->fence_wake.func = radeon_fence_check_signaled; |
| 415 | __add_wait_queue(&rdev->fence_queue, &fence->fence_wake); |
| 416 | fence_get(f); |
| 417 | |
| 418 | FENCE_TRACE(&fence->base, "armed on ring %i!\n", fence->ring); |
| 419 | return true; |
| 420 | } |
| 421 | |
Alex Deucher | d66b7ec | 2012-07-17 14:02:37 -0400 | [diff] [blame] | 422 | /** |
| 423 | * radeon_fence_signaled - check if a fence has signaled |
| 424 | * |
| 425 | * @fence: radeon fence object |
| 426 | * |
| 427 | * Check if the requested fence has signaled (all asics). |
| 428 | * Returns true if the fence has signaled or false if it has not. |
| 429 | */ |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 430 | bool radeon_fence_signaled(struct radeon_fence *fence) |
| 431 | { |
Christian König | d6d5c5b | 2014-08-27 15:22:00 +0200 | [diff] [blame] | 432 | if (!fence) |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 433 | return true; |
Maarten Lankhorst | 954605c | 2014-01-09 11:03:12 +0100 | [diff] [blame^] | 434 | |
| 435 | if (radeon_fence_seq_signaled(fence->rdev, fence->seq, fence->ring)) { |
| 436 | int ret; |
| 437 | |
| 438 | ret = fence_signal(&fence->base); |
| 439 | if (!ret) |
| 440 | FENCE_TRACE(&fence->base, "signaled from radeon_fence_signaled\n"); |
Jerome Glisse | 3b7a2b2 | 2012-05-09 15:34:47 +0200 | [diff] [blame] | 441 | return true; |
Maarten Lankhorst | 954605c | 2014-01-09 11:03:12 +0100 | [diff] [blame^] | 442 | } |
Jerome Glisse | 3b7a2b2 | 2012-05-09 15:34:47 +0200 | [diff] [blame] | 443 | return false; |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 444 | } |
| 445 | |
Alex Deucher | d66b7ec | 2012-07-17 14:02:37 -0400 | [diff] [blame] | 446 | /** |
Christian König | f9eaf9a | 2013-10-29 20:14:47 +0100 | [diff] [blame] | 447 | * radeon_fence_any_seq_signaled - check if any sequence number is signaled |
Alex Deucher | d66b7ec | 2012-07-17 14:02:37 -0400 | [diff] [blame] | 448 | * |
| 449 | * @rdev: radeon device pointer |
Christian König | f9eaf9a | 2013-10-29 20:14:47 +0100 | [diff] [blame] | 450 | * @seq: sequence numbers |
| 451 | * |
| 452 | * Check if the last signaled fence sequnce number is >= the requested |
| 453 | * sequence number (all asics). |
| 454 | * Returns true if any has signaled (current value is >= requested value) |
| 455 | * or false if it has not. Helper function for radeon_fence_wait_seq. |
| 456 | */ |
| 457 | static bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq) |
| 458 | { |
| 459 | unsigned i; |
| 460 | |
| 461 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
| 462 | if (seq[i] && radeon_fence_seq_signaled(rdev, seq[i], i)) |
| 463 | return true; |
| 464 | } |
| 465 | return false; |
| 466 | } |
| 467 | |
| 468 | /** |
Maarten Lankhorst | 9867d00 | 2014-08-27 15:21:59 +0200 | [diff] [blame] | 469 | * radeon_fence_wait_seq_timeout - wait for a specific sequence numbers |
Christian König | f9eaf9a | 2013-10-29 20:14:47 +0100 | [diff] [blame] | 470 | * |
| 471 | * @rdev: radeon device pointer |
| 472 | * @target_seq: sequence number(s) we want to wait for |
Alex Deucher | d66b7ec | 2012-07-17 14:02:37 -0400 | [diff] [blame] | 473 | * @intr: use interruptable sleep |
Maarten Lankhorst | 9867d00 | 2014-08-27 15:21:59 +0200 | [diff] [blame] | 474 | * @timeout: maximum time to wait, or MAX_SCHEDULE_TIMEOUT for infinite wait |
Alex Deucher | d66b7ec | 2012-07-17 14:02:37 -0400 | [diff] [blame] | 475 | * |
Christian König | f9eaf9a | 2013-10-29 20:14:47 +0100 | [diff] [blame] | 476 | * Wait for the requested sequence number(s) to be written by any ring |
| 477 | * (all asics). Sequnce number array is indexed by ring id. |
Alex Deucher | d66b7ec | 2012-07-17 14:02:37 -0400 | [diff] [blame] | 478 | * @intr selects whether to use interruptable (true) or non-interruptable |
| 479 | * (false) sleep when waiting for the sequence number. Helper function |
Christian König | f9eaf9a | 2013-10-29 20:14:47 +0100 | [diff] [blame] | 480 | * for radeon_fence_wait_*(). |
Maarten Lankhorst | 9867d00 | 2014-08-27 15:21:59 +0200 | [diff] [blame] | 481 | * Returns remaining time if the sequence number has passed, 0 when |
| 482 | * the wait timeout, or an error for all other cases. |
Christian König | f9eaf9a | 2013-10-29 20:14:47 +0100 | [diff] [blame] | 483 | * -EDEADLK is returned when a GPU lockup has been detected. |
Alex Deucher | d66b7ec | 2012-07-17 14:02:37 -0400 | [diff] [blame] | 484 | */ |
Maarten Lankhorst | 9867d00 | 2014-08-27 15:21:59 +0200 | [diff] [blame] | 485 | static long radeon_fence_wait_seq_timeout(struct radeon_device *rdev, |
| 486 | u64 *target_seq, bool intr, |
| 487 | long timeout) |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 488 | { |
Christian König | 0bfa4b4 | 2014-08-27 15:21:58 +0200 | [diff] [blame] | 489 | long r; |
| 490 | int i; |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 491 | |
Christian König | 0bfa4b4 | 2014-08-27 15:21:58 +0200 | [diff] [blame] | 492 | if (radeon_fence_any_seq_signaled(rdev, target_seq)) |
Maarten Lankhorst | 9867d00 | 2014-08-27 15:21:59 +0200 | [diff] [blame] | 493 | return timeout; |
Christian König | f9eaf9a | 2013-10-29 20:14:47 +0100 | [diff] [blame] | 494 | |
Christian König | 0bfa4b4 | 2014-08-27 15:21:58 +0200 | [diff] [blame] | 495 | /* enable IRQs and tracing */ |
| 496 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
| 497 | if (!target_seq[i]) |
| 498 | continue; |
Christian König | f9eaf9a | 2013-10-29 20:14:47 +0100 | [diff] [blame] | 499 | |
Christian König | 0bfa4b4 | 2014-08-27 15:21:58 +0200 | [diff] [blame] | 500 | trace_radeon_fence_wait_begin(rdev->ddev, i, target_seq[i]); |
| 501 | radeon_irq_kms_sw_irq_get(rdev, i); |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 502 | } |
Christian König | 0bfa4b4 | 2014-08-27 15:21:58 +0200 | [diff] [blame] | 503 | |
| 504 | if (intr) { |
| 505 | r = wait_event_interruptible_timeout(rdev->fence_queue, ( |
| 506 | radeon_fence_any_seq_signaled(rdev, target_seq) |
Maarten Lankhorst | 9867d00 | 2014-08-27 15:21:59 +0200 | [diff] [blame] | 507 | || rdev->needs_reset), timeout); |
Christian König | 0bfa4b4 | 2014-08-27 15:21:58 +0200 | [diff] [blame] | 508 | } else { |
| 509 | r = wait_event_timeout(rdev->fence_queue, ( |
| 510 | radeon_fence_any_seq_signaled(rdev, target_seq) |
Maarten Lankhorst | 9867d00 | 2014-08-27 15:21:59 +0200 | [diff] [blame] | 511 | || rdev->needs_reset), timeout); |
Christian König | 0bfa4b4 | 2014-08-27 15:21:58 +0200 | [diff] [blame] | 512 | } |
| 513 | |
| 514 | if (rdev->needs_reset) |
| 515 | r = -EDEADLK; |
| 516 | |
| 517 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
| 518 | if (!target_seq[i]) |
| 519 | continue; |
| 520 | |
| 521 | radeon_irq_kms_sw_irq_put(rdev, i); |
| 522 | trace_radeon_fence_wait_end(rdev->ddev, i, target_seq[i]); |
| 523 | } |
| 524 | |
Maarten Lankhorst | 9867d00 | 2014-08-27 15:21:59 +0200 | [diff] [blame] | 525 | return r; |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 526 | } |
| 527 | |
Alex Deucher | d66b7ec | 2012-07-17 14:02:37 -0400 | [diff] [blame] | 528 | /** |
| 529 | * radeon_fence_wait - wait for a fence to signal |
| 530 | * |
| 531 | * @fence: radeon fence object |
Maarten Lankhorst | 9867d00 | 2014-08-27 15:21:59 +0200 | [diff] [blame] | 532 | * @intr: use interruptible sleep |
Alex Deucher | d66b7ec | 2012-07-17 14:02:37 -0400 | [diff] [blame] | 533 | * |
| 534 | * Wait for the requested fence to signal (all asics). |
| 535 | * @intr selects whether to use interruptable (true) or non-interruptable |
| 536 | * (false) sleep when waiting for the fence. |
| 537 | * Returns 0 if the fence has passed, error for all other cases. |
| 538 | */ |
Jerome Glisse | 3b7a2b2 | 2012-05-09 15:34:47 +0200 | [diff] [blame] | 539 | int radeon_fence_wait(struct radeon_fence *fence, bool intr) |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 540 | { |
Christian König | f9eaf9a | 2013-10-29 20:14:47 +0100 | [diff] [blame] | 541 | uint64_t seq[RADEON_NUM_RINGS] = {}; |
Maarten Lankhorst | 9867d00 | 2014-08-27 15:21:59 +0200 | [diff] [blame] | 542 | long r; |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 543 | |
Christian König | f9eaf9a | 2013-10-29 20:14:47 +0100 | [diff] [blame] | 544 | seq[fence->ring] = fence->seq; |
Maarten Lankhorst | 9867d00 | 2014-08-27 15:21:59 +0200 | [diff] [blame] | 545 | r = radeon_fence_wait_seq_timeout(fence->rdev, seq, intr, MAX_SCHEDULE_TIMEOUT); |
| 546 | if (r < 0) { |
Jerome Glisse | 3b7a2b2 | 2012-05-09 15:34:47 +0200 | [diff] [blame] | 547 | return r; |
Maarten Lankhorst | 9867d00 | 2014-08-27 15:21:59 +0200 | [diff] [blame] | 548 | } |
Christian König | f9eaf9a | 2013-10-29 20:14:47 +0100 | [diff] [blame] | 549 | |
Maarten Lankhorst | 954605c | 2014-01-09 11:03:12 +0100 | [diff] [blame^] | 550 | r = fence_signal(&fence->base); |
| 551 | if (!r) |
| 552 | FENCE_TRACE(&fence->base, "signaled from fence_wait\n"); |
Jerome Glisse | 3b7a2b2 | 2012-05-09 15:34:47 +0200 | [diff] [blame] | 553 | return 0; |
| 554 | } |
| 555 | |
Alex Deucher | d66b7ec | 2012-07-17 14:02:37 -0400 | [diff] [blame] | 556 | /** |
| 557 | * radeon_fence_wait_any - wait for a fence to signal on any ring |
| 558 | * |
| 559 | * @rdev: radeon device pointer |
| 560 | * @fences: radeon fence object(s) |
| 561 | * @intr: use interruptable sleep |
| 562 | * |
| 563 | * Wait for any requested fence to signal (all asics). Fence |
| 564 | * array is indexed by ring id. @intr selects whether to use |
| 565 | * interruptable (true) or non-interruptable (false) sleep when |
| 566 | * waiting for the fences. Used by the suballocator. |
| 567 | * Returns 0 if any fence has passed, error for all other cases. |
| 568 | */ |
Jerome Glisse | 0085c950 | 2012-05-09 15:34:55 +0200 | [diff] [blame] | 569 | int radeon_fence_wait_any(struct radeon_device *rdev, |
| 570 | struct radeon_fence **fences, |
| 571 | bool intr) |
| 572 | { |
| 573 | uint64_t seq[RADEON_NUM_RINGS]; |
Christian König | f9eaf9a | 2013-10-29 20:14:47 +0100 | [diff] [blame] | 574 | unsigned i, num_rings = 0; |
Maarten Lankhorst | 9867d00 | 2014-08-27 15:21:59 +0200 | [diff] [blame] | 575 | long r; |
Jerome Glisse | 0085c950 | 2012-05-09 15:34:55 +0200 | [diff] [blame] | 576 | |
| 577 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
| 578 | seq[i] = 0; |
| 579 | |
| 580 | if (!fences[i]) { |
| 581 | continue; |
| 582 | } |
| 583 | |
Christian König | 876dc9f | 2012-05-08 14:24:01 +0200 | [diff] [blame] | 584 | seq[i] = fences[i]->seq; |
Christian König | f9eaf9a | 2013-10-29 20:14:47 +0100 | [diff] [blame] | 585 | ++num_rings; |
Jerome Glisse | 0085c950 | 2012-05-09 15:34:55 +0200 | [diff] [blame] | 586 | } |
| 587 | |
Christian König | f9eaf9a | 2013-10-29 20:14:47 +0100 | [diff] [blame] | 588 | /* nothing to wait for ? */ |
| 589 | if (num_rings == 0) |
| 590 | return -ENOENT; |
| 591 | |
Maarten Lankhorst | 9867d00 | 2014-08-27 15:21:59 +0200 | [diff] [blame] | 592 | r = radeon_fence_wait_seq_timeout(rdev, seq, intr, MAX_SCHEDULE_TIMEOUT); |
| 593 | if (r < 0) { |
Jerome Glisse | 0085c950 | 2012-05-09 15:34:55 +0200 | [diff] [blame] | 594 | return r; |
| 595 | } |
| 596 | return 0; |
| 597 | } |
| 598 | |
Alex Deucher | d66b7ec | 2012-07-17 14:02:37 -0400 | [diff] [blame] | 599 | /** |
Christian König | 3761552 | 2014-02-18 15:58:31 +0100 | [diff] [blame] | 600 | * radeon_fence_wait_next - wait for the next fence to signal |
Alex Deucher | d66b7ec | 2012-07-17 14:02:37 -0400 | [diff] [blame] | 601 | * |
| 602 | * @rdev: radeon device pointer |
| 603 | * @ring: ring index the fence is associated with |
| 604 | * |
| 605 | * Wait for the next fence on the requested ring to signal (all asics). |
| 606 | * Returns 0 if the next fence has passed, error for all other cases. |
| 607 | * Caller must hold ring lock. |
| 608 | */ |
Christian König | 3761552 | 2014-02-18 15:58:31 +0100 | [diff] [blame] | 609 | int radeon_fence_wait_next(struct radeon_device *rdev, int ring) |
Jerome Glisse | 3b7a2b2 | 2012-05-09 15:34:47 +0200 | [diff] [blame] | 610 | { |
Christian König | f9eaf9a | 2013-10-29 20:14:47 +0100 | [diff] [blame] | 611 | uint64_t seq[RADEON_NUM_RINGS] = {}; |
Maarten Lankhorst | 9867d00 | 2014-08-27 15:21:59 +0200 | [diff] [blame] | 612 | long r; |
Jerome Glisse | 3b7a2b2 | 2012-05-09 15:34:47 +0200 | [diff] [blame] | 613 | |
Christian König | f9eaf9a | 2013-10-29 20:14:47 +0100 | [diff] [blame] | 614 | seq[ring] = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL; |
| 615 | if (seq[ring] >= rdev->fence_drv[ring].sync_seq[ring]) { |
Christian König | 8a47cc9 | 2012-05-09 15:34:48 +0200 | [diff] [blame] | 616 | /* nothing to wait for, last_seq is |
| 617 | already the last emited fence */ |
| 618 | return -ENOENT; |
Jerome Glisse | 3b7a2b2 | 2012-05-09 15:34:47 +0200 | [diff] [blame] | 619 | } |
Maarten Lankhorst | 9867d00 | 2014-08-27 15:21:59 +0200 | [diff] [blame] | 620 | r = radeon_fence_wait_seq_timeout(rdev, seq, false, MAX_SCHEDULE_TIMEOUT); |
| 621 | if (r < 0) |
| 622 | return r; |
| 623 | return 0; |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 624 | } |
| 625 | |
Alex Deucher | d66b7ec | 2012-07-17 14:02:37 -0400 | [diff] [blame] | 626 | /** |
Christian König | 3761552 | 2014-02-18 15:58:31 +0100 | [diff] [blame] | 627 | * radeon_fence_wait_empty - wait for all fences to signal |
Alex Deucher | d66b7ec | 2012-07-17 14:02:37 -0400 | [diff] [blame] | 628 | * |
| 629 | * @rdev: radeon device pointer |
| 630 | * @ring: ring index the fence is associated with |
| 631 | * |
| 632 | * Wait for all fences on the requested ring to signal (all asics). |
| 633 | * Returns 0 if the fences have passed, error for all other cases. |
| 634 | * Caller must hold ring lock. |
| 635 | */ |
Christian König | 3761552 | 2014-02-18 15:58:31 +0100 | [diff] [blame] | 636 | int radeon_fence_wait_empty(struct radeon_device *rdev, int ring) |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 637 | { |
Christian König | f9eaf9a | 2013-10-29 20:14:47 +0100 | [diff] [blame] | 638 | uint64_t seq[RADEON_NUM_RINGS] = {}; |
Maarten Lankhorst | 9867d00 | 2014-08-27 15:21:59 +0200 | [diff] [blame] | 639 | long r; |
Christian König | 7ecc45e | 2012-06-29 11:33:12 +0200 | [diff] [blame] | 640 | |
Christian König | f9eaf9a | 2013-10-29 20:14:47 +0100 | [diff] [blame] | 641 | seq[ring] = rdev->fence_drv[ring].sync_seq[ring]; |
Christian König | 721529b | 2013-11-05 14:09:54 +0100 | [diff] [blame] | 642 | if (!seq[ring]) |
| 643 | return 0; |
| 644 | |
Maarten Lankhorst | 9867d00 | 2014-08-27 15:21:59 +0200 | [diff] [blame] | 645 | r = radeon_fence_wait_seq_timeout(rdev, seq, false, MAX_SCHEDULE_TIMEOUT); |
| 646 | if (r < 0) { |
Christian König | f9eaf9a | 2013-10-29 20:14:47 +0100 | [diff] [blame] | 647 | if (r == -EDEADLK) |
Jerome Glisse | 5f8f635 | 2012-12-17 11:04:32 -0500 | [diff] [blame] | 648 | return -EDEADLK; |
Christian König | f9eaf9a | 2013-10-29 20:14:47 +0100 | [diff] [blame] | 649 | |
Maarten Lankhorst | 9867d00 | 2014-08-27 15:21:59 +0200 | [diff] [blame] | 650 | dev_err(rdev->dev, "error waiting for ring[%d] to become idle (%ld)\n", |
Jerome Glisse | 5f8f635 | 2012-12-17 11:04:32 -0500 | [diff] [blame] | 651 | ring, r); |
Christian König | 7ecc45e | 2012-06-29 11:33:12 +0200 | [diff] [blame] | 652 | } |
Jerome Glisse | 5f8f635 | 2012-12-17 11:04:32 -0500 | [diff] [blame] | 653 | return 0; |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 654 | } |
| 655 | |
Alex Deucher | d66b7ec | 2012-07-17 14:02:37 -0400 | [diff] [blame] | 656 | /** |
| 657 | * radeon_fence_ref - take a ref on a fence |
| 658 | * |
| 659 | * @fence: radeon fence object |
| 660 | * |
| 661 | * Take a reference on a fence (all asics). |
| 662 | * Returns the fence. |
| 663 | */ |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 664 | struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence) |
| 665 | { |
Maarten Lankhorst | 954605c | 2014-01-09 11:03:12 +0100 | [diff] [blame^] | 666 | fence_get(&fence->base); |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 667 | return fence; |
| 668 | } |
| 669 | |
Alex Deucher | d66b7ec | 2012-07-17 14:02:37 -0400 | [diff] [blame] | 670 | /** |
| 671 | * radeon_fence_unref - remove a ref on a fence |
| 672 | * |
| 673 | * @fence: radeon fence object |
| 674 | * |
| 675 | * Remove a reference on a fence (all asics). |
| 676 | */ |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 677 | void radeon_fence_unref(struct radeon_fence **fence) |
| 678 | { |
| 679 | struct radeon_fence *tmp = *fence; |
| 680 | |
| 681 | *fence = NULL; |
| 682 | if (tmp) { |
Maarten Lankhorst | 954605c | 2014-01-09 11:03:12 +0100 | [diff] [blame^] | 683 | fence_put(&tmp->base); |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 684 | } |
| 685 | } |
| 686 | |
Alex Deucher | d66b7ec | 2012-07-17 14:02:37 -0400 | [diff] [blame] | 687 | /** |
| 688 | * radeon_fence_count_emitted - get the count of emitted fences |
| 689 | * |
| 690 | * @rdev: radeon device pointer |
| 691 | * @ring: ring index the fence is associated with |
| 692 | * |
| 693 | * Get the number of fences emitted on the requested ring (all asics). |
| 694 | * Returns the number of emitted fences on the ring. Used by the |
| 695 | * dynpm code to ring track activity. |
| 696 | */ |
Jerome Glisse | 3b7a2b2 | 2012-05-09 15:34:47 +0200 | [diff] [blame] | 697 | unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring) |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 698 | { |
Jerome Glisse | 3b7a2b2 | 2012-05-09 15:34:47 +0200 | [diff] [blame] | 699 | uint64_t emitted; |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 700 | |
Jerome Glisse | 3b7a2b2 | 2012-05-09 15:34:47 +0200 | [diff] [blame] | 701 | /* We are not protected by ring lock when reading the last sequence |
| 702 | * but it's ok to report slightly wrong fence count here. |
| 703 | */ |
Jerome Glisse | 0085c950 | 2012-05-09 15:34:55 +0200 | [diff] [blame] | 704 | radeon_fence_process(rdev, ring); |
Christian König | 68e250b | 2012-05-10 15:57:31 +0200 | [diff] [blame] | 705 | emitted = rdev->fence_drv[ring].sync_seq[ring] |
| 706 | - atomic64_read(&rdev->fence_drv[ring].last_seq); |
Jerome Glisse | 3b7a2b2 | 2012-05-09 15:34:47 +0200 | [diff] [blame] | 707 | /* to avoid 32bits warp around */ |
| 708 | if (emitted > 0x10000000) { |
| 709 | emitted = 0x10000000; |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 710 | } |
Jerome Glisse | 3b7a2b2 | 2012-05-09 15:34:47 +0200 | [diff] [blame] | 711 | return (unsigned)emitted; |
Christian König | 47492a2 | 2011-10-20 12:38:09 +0200 | [diff] [blame] | 712 | } |
| 713 | |
Alex Deucher | d66b7ec | 2012-07-17 14:02:37 -0400 | [diff] [blame] | 714 | /** |
| 715 | * radeon_fence_need_sync - do we need a semaphore |
| 716 | * |
| 717 | * @fence: radeon fence object |
| 718 | * @dst_ring: which ring to check against |
| 719 | * |
| 720 | * Check if the fence needs to be synced against another ring |
| 721 | * (all asics). If so, we need to emit a semaphore. |
| 722 | * Returns true if we need to sync with another ring, false if |
| 723 | * not. |
| 724 | */ |
Christian König | 68e250b | 2012-05-10 15:57:31 +0200 | [diff] [blame] | 725 | bool radeon_fence_need_sync(struct radeon_fence *fence, int dst_ring) |
| 726 | { |
| 727 | struct radeon_fence_driver *fdrv; |
| 728 | |
| 729 | if (!fence) { |
| 730 | return false; |
| 731 | } |
| 732 | |
| 733 | if (fence->ring == dst_ring) { |
| 734 | return false; |
| 735 | } |
| 736 | |
| 737 | /* we are protected by the ring mutex */ |
| 738 | fdrv = &fence->rdev->fence_drv[dst_ring]; |
| 739 | if (fence->seq <= fdrv->sync_seq[fence->ring]) { |
| 740 | return false; |
| 741 | } |
| 742 | |
| 743 | return true; |
| 744 | } |
| 745 | |
Alex Deucher | d66b7ec | 2012-07-17 14:02:37 -0400 | [diff] [blame] | 746 | /** |
| 747 | * radeon_fence_note_sync - record the sync point |
| 748 | * |
| 749 | * @fence: radeon fence object |
| 750 | * @dst_ring: which ring to check against |
| 751 | * |
| 752 | * Note the sequence number at which point the fence will |
| 753 | * be synced with the requested ring (all asics). |
| 754 | */ |
Christian König | 68e250b | 2012-05-10 15:57:31 +0200 | [diff] [blame] | 755 | void radeon_fence_note_sync(struct radeon_fence *fence, int dst_ring) |
| 756 | { |
| 757 | struct radeon_fence_driver *dst, *src; |
| 758 | unsigned i; |
| 759 | |
| 760 | if (!fence) { |
| 761 | return; |
| 762 | } |
| 763 | |
| 764 | if (fence->ring == dst_ring) { |
| 765 | return; |
| 766 | } |
| 767 | |
| 768 | /* we are protected by the ring mutex */ |
| 769 | src = &fence->rdev->fence_drv[fence->ring]; |
| 770 | dst = &fence->rdev->fence_drv[dst_ring]; |
| 771 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
| 772 | if (i == dst_ring) { |
| 773 | continue; |
| 774 | } |
| 775 | dst->sync_seq[i] = max(dst->sync_seq[i], src->sync_seq[i]); |
| 776 | } |
| 777 | } |
| 778 | |
Alex Deucher | d66b7ec | 2012-07-17 14:02:37 -0400 | [diff] [blame] | 779 | /** |
| 780 | * radeon_fence_driver_start_ring - make the fence driver |
| 781 | * ready for use on the requested ring. |
| 782 | * |
| 783 | * @rdev: radeon device pointer |
| 784 | * @ring: ring index to start the fence driver on |
| 785 | * |
| 786 | * Make the fence driver ready for processing (all asics). |
| 787 | * Not all asics have all rings, so each asic will only |
| 788 | * start the fence driver on the rings it has. |
| 789 | * Returns 0 for success, errors for failure. |
| 790 | */ |
Jerome Glisse | 30eb77f | 2011-11-20 20:45:34 +0000 | [diff] [blame] | 791 | int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring) |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 792 | { |
Jerome Glisse | 30eb77f | 2011-11-20 20:45:34 +0000 | [diff] [blame] | 793 | uint64_t index; |
| 794 | int r; |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 795 | |
Jerome Glisse | 30eb77f | 2011-11-20 20:45:34 +0000 | [diff] [blame] | 796 | radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg); |
Jerome Glisse | 86a1881 | 2012-12-12 16:43:15 -0500 | [diff] [blame] | 797 | if (rdev->wb.use_event || !radeon_ring_supports_scratch_reg(rdev, &rdev->ring[ring])) { |
Christian König | 581bc3a | 2013-04-24 14:11:09 +0200 | [diff] [blame] | 798 | rdev->fence_drv[ring].scratch_reg = 0; |
Christian König | f2ba57b | 2013-04-08 12:41:29 +0200 | [diff] [blame] | 799 | if (ring != R600_RING_TYPE_UVD_INDEX) { |
Christian König | f2ba57b | 2013-04-08 12:41:29 +0200 | [diff] [blame] | 800 | index = R600_WB_EVENT_OFFSET + ring * 4; |
| 801 | rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4]; |
| 802 | rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + |
| 803 | index; |
| 804 | |
| 805 | } else { |
| 806 | /* put fence directly behind firmware */ |
Christian König | 4ad9c1c | 2013-08-05 14:10:55 +0200 | [diff] [blame] | 807 | index = ALIGN(rdev->uvd_fw->size, 8); |
Christian König | d7c605a | 2013-04-14 12:47:59 +0200 | [diff] [blame] | 808 | rdev->fence_drv[ring].cpu_addr = rdev->uvd.cpu_addr + index; |
| 809 | rdev->fence_drv[ring].gpu_addr = rdev->uvd.gpu_addr + index; |
Christian König | f2ba57b | 2013-04-08 12:41:29 +0200 | [diff] [blame] | 810 | } |
| 811 | |
Jerome Glisse | 30eb77f | 2011-11-20 20:45:34 +0000 | [diff] [blame] | 812 | } else { |
Alex Deucher | 7465280 | 2011-08-25 13:39:48 -0400 | [diff] [blame] | 813 | r = radeon_scratch_get(rdev, &rdev->fence_drv[ring].scratch_reg); |
| 814 | if (r) { |
| 815 | dev_err(rdev->dev, "fence failed to get scratch register\n"); |
Alex Deucher | 7465280 | 2011-08-25 13:39:48 -0400 | [diff] [blame] | 816 | return r; |
| 817 | } |
Jerome Glisse | 30eb77f | 2011-11-20 20:45:34 +0000 | [diff] [blame] | 818 | index = RADEON_WB_SCRATCH_OFFSET + |
| 819 | rdev->fence_drv[ring].scratch_reg - |
| 820 | rdev->scratch.reg_base; |
Christian König | f2ba57b | 2013-04-08 12:41:29 +0200 | [diff] [blame] | 821 | rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4]; |
| 822 | rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index; |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 823 | } |
Christian König | 31be618 | 2012-07-07 13:10:39 +0200 | [diff] [blame] | 824 | radeon_fence_write(rdev, atomic64_read(&rdev->fence_drv[ring].last_seq), ring); |
Jerome Glisse | 30eb77f | 2011-11-20 20:45:34 +0000 | [diff] [blame] | 825 | rdev->fence_drv[ring].initialized = true; |
Jerome Glisse | 3b7a2b2 | 2012-05-09 15:34:47 +0200 | [diff] [blame] | 826 | dev_info(rdev->dev, "fence driver on ring %d use gpu addr 0x%016llx and cpu addr 0x%p\n", |
Jerome Glisse | 30eb77f | 2011-11-20 20:45:34 +0000 | [diff] [blame] | 827 | ring, rdev->fence_drv[ring].gpu_addr, rdev->fence_drv[ring].cpu_addr); |
Jerome Glisse | 30eb77f | 2011-11-20 20:45:34 +0000 | [diff] [blame] | 828 | return 0; |
| 829 | } |
| 830 | |
Alex Deucher | d66b7ec | 2012-07-17 14:02:37 -0400 | [diff] [blame] | 831 | /** |
| 832 | * radeon_fence_driver_init_ring - init the fence driver |
| 833 | * for the requested ring. |
| 834 | * |
| 835 | * @rdev: radeon device pointer |
| 836 | * @ring: ring index to start the fence driver on |
| 837 | * |
| 838 | * Init the fence driver for the requested ring (all asics). |
| 839 | * Helper function for radeon_fence_driver_init(). |
| 840 | */ |
Jerome Glisse | 30eb77f | 2011-11-20 20:45:34 +0000 | [diff] [blame] | 841 | static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring) |
| 842 | { |
Christian König | 68e250b | 2012-05-10 15:57:31 +0200 | [diff] [blame] | 843 | int i; |
| 844 | |
Jerome Glisse | 30eb77f | 2011-11-20 20:45:34 +0000 | [diff] [blame] | 845 | rdev->fence_drv[ring].scratch_reg = -1; |
| 846 | rdev->fence_drv[ring].cpu_addr = NULL; |
| 847 | rdev->fence_drv[ring].gpu_addr = 0; |
Christian König | 68e250b | 2012-05-10 15:57:31 +0200 | [diff] [blame] | 848 | for (i = 0; i < RADEON_NUM_RINGS; ++i) |
| 849 | rdev->fence_drv[ring].sync_seq[i] = 0; |
Jerome Glisse | bb63556 | 2012-05-09 15:34:46 +0200 | [diff] [blame] | 850 | atomic64_set(&rdev->fence_drv[ring].last_seq, 0); |
Jerome Glisse | 30eb77f | 2011-11-20 20:45:34 +0000 | [diff] [blame] | 851 | rdev->fence_drv[ring].initialized = false; |
Christian König | 0bfa4b4 | 2014-08-27 15:21:58 +0200 | [diff] [blame] | 852 | INIT_DELAYED_WORK(&rdev->fence_drv[ring].lockup_work, |
| 853 | radeon_fence_check_lockup); |
| 854 | rdev->fence_drv[ring].rdev = rdev; |
Jerome Glisse | 30eb77f | 2011-11-20 20:45:34 +0000 | [diff] [blame] | 855 | } |
| 856 | |
Alex Deucher | d66b7ec | 2012-07-17 14:02:37 -0400 | [diff] [blame] | 857 | /** |
| 858 | * radeon_fence_driver_init - init the fence driver |
| 859 | * for all possible rings. |
| 860 | * |
| 861 | * @rdev: radeon device pointer |
| 862 | * |
| 863 | * Init the fence driver for all possible rings (all asics). |
| 864 | * Not all asics have all rings, so each asic will only |
| 865 | * start the fence driver on the rings it has using |
| 866 | * radeon_fence_driver_start_ring(). |
| 867 | * Returns 0 for success. |
| 868 | */ |
Jerome Glisse | 30eb77f | 2011-11-20 20:45:34 +0000 | [diff] [blame] | 869 | int radeon_fence_driver_init(struct radeon_device *rdev) |
| 870 | { |
Jerome Glisse | 30eb77f | 2011-11-20 20:45:34 +0000 | [diff] [blame] | 871 | int ring; |
| 872 | |
Jerome Glisse | 0085c950 | 2012-05-09 15:34:55 +0200 | [diff] [blame] | 873 | init_waitqueue_head(&rdev->fence_queue); |
Jerome Glisse | 30eb77f | 2011-11-20 20:45:34 +0000 | [diff] [blame] | 874 | for (ring = 0; ring < RADEON_NUM_RINGS; ring++) { |
| 875 | radeon_fence_driver_init_ring(rdev, ring); |
Alex Deucher | 7465280 | 2011-08-25 13:39:48 -0400 | [diff] [blame] | 876 | } |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 877 | if (radeon_debugfs_fence_init(rdev)) { |
Jerome Glisse | 0a0c759 | 2009-12-11 20:36:19 +0100 | [diff] [blame] | 878 | dev_err(rdev->dev, "fence debugfs file creation failed\n"); |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 879 | } |
| 880 | return 0; |
| 881 | } |
| 882 | |
Alex Deucher | d66b7ec | 2012-07-17 14:02:37 -0400 | [diff] [blame] | 883 | /** |
| 884 | * radeon_fence_driver_fini - tear down the fence driver |
| 885 | * for all possible rings. |
| 886 | * |
| 887 | * @rdev: radeon device pointer |
| 888 | * |
| 889 | * Tear down the fence driver for all possible rings (all asics). |
| 890 | */ |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 891 | void radeon_fence_driver_fini(struct radeon_device *rdev) |
| 892 | { |
Jerome Glisse | 5f8f635 | 2012-12-17 11:04:32 -0500 | [diff] [blame] | 893 | int ring, r; |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 894 | |
Christian König | 8a47cc9 | 2012-05-09 15:34:48 +0200 | [diff] [blame] | 895 | mutex_lock(&rdev->ring_lock); |
Alex Deucher | 7465280 | 2011-08-25 13:39:48 -0400 | [diff] [blame] | 896 | for (ring = 0; ring < RADEON_NUM_RINGS; ring++) { |
| 897 | if (!rdev->fence_drv[ring].initialized) |
| 898 | continue; |
Christian König | 3761552 | 2014-02-18 15:58:31 +0100 | [diff] [blame] | 899 | r = radeon_fence_wait_empty(rdev, ring); |
Jerome Glisse | 5f8f635 | 2012-12-17 11:04:32 -0500 | [diff] [blame] | 900 | if (r) { |
| 901 | /* no need to trigger GPU reset as we are unloading */ |
Christian König | eb98c70 | 2014-08-27 15:21:56 +0200 | [diff] [blame] | 902 | radeon_fence_driver_force_completion(rdev, ring); |
Jerome Glisse | 5f8f635 | 2012-12-17 11:04:32 -0500 | [diff] [blame] | 903 | } |
Christian König | 0bfa4b4 | 2014-08-27 15:21:58 +0200 | [diff] [blame] | 904 | cancel_delayed_work_sync(&rdev->fence_drv[ring].lockup_work); |
Jerome Glisse | 0085c950 | 2012-05-09 15:34:55 +0200 | [diff] [blame] | 905 | wake_up_all(&rdev->fence_queue); |
Alex Deucher | 7465280 | 2011-08-25 13:39:48 -0400 | [diff] [blame] | 906 | radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg); |
Alex Deucher | 7465280 | 2011-08-25 13:39:48 -0400 | [diff] [blame] | 907 | rdev->fence_drv[ring].initialized = false; |
| 908 | } |
Christian König | 8a47cc9 | 2012-05-09 15:34:48 +0200 | [diff] [blame] | 909 | mutex_unlock(&rdev->ring_lock); |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 910 | } |
| 911 | |
Jerome Glisse | 76903b9 | 2012-12-17 10:29:06 -0500 | [diff] [blame] | 912 | /** |
| 913 | * radeon_fence_driver_force_completion - force all fence waiter to complete |
| 914 | * |
| 915 | * @rdev: radeon device pointer |
Christian König | eb98c70 | 2014-08-27 15:21:56 +0200 | [diff] [blame] | 916 | * @ring: the ring to complete |
Jerome Glisse | 76903b9 | 2012-12-17 10:29:06 -0500 | [diff] [blame] | 917 | * |
| 918 | * In case of GPU reset failure make sure no process keep waiting on fence |
| 919 | * that will never complete. |
| 920 | */ |
Christian König | eb98c70 | 2014-08-27 15:21:56 +0200 | [diff] [blame] | 921 | void radeon_fence_driver_force_completion(struct radeon_device *rdev, int ring) |
Jerome Glisse | 76903b9 | 2012-12-17 10:29:06 -0500 | [diff] [blame] | 922 | { |
Christian König | 0bfa4b4 | 2014-08-27 15:21:58 +0200 | [diff] [blame] | 923 | if (rdev->fence_drv[ring].initialized) { |
Jerome Glisse | 76903b9 | 2012-12-17 10:29:06 -0500 | [diff] [blame] | 924 | radeon_fence_write(rdev, rdev->fence_drv[ring].sync_seq[ring], ring); |
Christian König | 0bfa4b4 | 2014-08-27 15:21:58 +0200 | [diff] [blame] | 925 | cancel_delayed_work_sync(&rdev->fence_drv[ring].lockup_work); |
| 926 | } |
Jerome Glisse | 76903b9 | 2012-12-17 10:29:06 -0500 | [diff] [blame] | 927 | } |
| 928 | |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 929 | |
| 930 | /* |
| 931 | * Fence debugfs |
| 932 | */ |
| 933 | #if defined(CONFIG_DEBUG_FS) |
| 934 | static int radeon_debugfs_fence_info(struct seq_file *m, void *data) |
| 935 | { |
| 936 | struct drm_info_node *node = (struct drm_info_node *)m->private; |
| 937 | struct drm_device *dev = node->minor->dev; |
| 938 | struct radeon_device *rdev = dev->dev_private; |
Christian König | 68e250b | 2012-05-10 15:57:31 +0200 | [diff] [blame] | 939 | int i, j; |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 940 | |
Alex Deucher | 7465280 | 2011-08-25 13:39:48 -0400 | [diff] [blame] | 941 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
| 942 | if (!rdev->fence_drv[i].initialized) |
| 943 | continue; |
| 944 | |
Christian König | e290b63 | 2013-12-12 09:42:39 +0100 | [diff] [blame] | 945 | radeon_fence_process(rdev, i); |
| 946 | |
Alex Deucher | 7465280 | 2011-08-25 13:39:48 -0400 | [diff] [blame] | 947 | seq_printf(m, "--- ring %d ---\n", i); |
Dave Airlie | d3029b4 | 2012-05-09 17:27:29 +0100 | [diff] [blame] | 948 | seq_printf(m, "Last signaled fence 0x%016llx\n", |
| 949 | (unsigned long long)atomic64_read(&rdev->fence_drv[i].last_seq)); |
Christian König | 68e250b | 2012-05-10 15:57:31 +0200 | [diff] [blame] | 950 | seq_printf(m, "Last emitted 0x%016llx\n", |
| 951 | rdev->fence_drv[i].sync_seq[i]); |
| 952 | |
| 953 | for (j = 0; j < RADEON_NUM_RINGS; ++j) { |
| 954 | if (i != j && rdev->fence_drv[j].initialized) |
| 955 | seq_printf(m, "Last sync to ring %d 0x%016llx\n", |
| 956 | j, rdev->fence_drv[i].sync_seq[j]); |
| 957 | } |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 958 | } |
| 959 | return 0; |
| 960 | } |
| 961 | |
Christian König | 478b6e7 | 2014-06-02 17:33:10 +0200 | [diff] [blame] | 962 | /** |
| 963 | * radeon_debugfs_gpu_reset - manually trigger a gpu reset |
| 964 | * |
| 965 | * Manually trigger a gpu reset at the next fence wait. |
| 966 | */ |
| 967 | static int radeon_debugfs_gpu_reset(struct seq_file *m, void *data) |
| 968 | { |
| 969 | struct drm_info_node *node = (struct drm_info_node *) m->private; |
| 970 | struct drm_device *dev = node->minor->dev; |
| 971 | struct radeon_device *rdev = dev->dev_private; |
| 972 | |
| 973 | down_read(&rdev->exclusive_lock); |
| 974 | seq_printf(m, "%d\n", rdev->needs_reset); |
| 975 | rdev->needs_reset = true; |
Christian König | f0d970b | 2014-08-27 15:21:53 +0200 | [diff] [blame] | 976 | wake_up_all(&rdev->fence_queue); |
Christian König | 478b6e7 | 2014-06-02 17:33:10 +0200 | [diff] [blame] | 977 | up_read(&rdev->exclusive_lock); |
| 978 | |
| 979 | return 0; |
| 980 | } |
| 981 | |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 982 | static struct drm_info_list radeon_debugfs_fence_list[] = { |
| 983 | {"radeon_fence_info", &radeon_debugfs_fence_info, 0, NULL}, |
Christian König | 478b6e7 | 2014-06-02 17:33:10 +0200 | [diff] [blame] | 984 | {"radeon_gpu_reset", &radeon_debugfs_gpu_reset, 0, NULL} |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 985 | }; |
| 986 | #endif |
| 987 | |
| 988 | int radeon_debugfs_fence_init(struct radeon_device *rdev) |
| 989 | { |
| 990 | #if defined(CONFIG_DEBUG_FS) |
Christian König | 478b6e7 | 2014-06-02 17:33:10 +0200 | [diff] [blame] | 991 | return radeon_debugfs_add_files(rdev, radeon_debugfs_fence_list, 2); |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 992 | #else |
| 993 | return 0; |
| 994 | #endif |
| 995 | } |
Maarten Lankhorst | 954605c | 2014-01-09 11:03:12 +0100 | [diff] [blame^] | 996 | |
| 997 | static const char *radeon_fence_get_driver_name(struct fence *fence) |
| 998 | { |
| 999 | return "radeon"; |
| 1000 | } |
| 1001 | |
| 1002 | static const char *radeon_fence_get_timeline_name(struct fence *f) |
| 1003 | { |
| 1004 | struct radeon_fence *fence = to_radeon_fence(f); |
| 1005 | switch (fence->ring) { |
| 1006 | case RADEON_RING_TYPE_GFX_INDEX: return "radeon.gfx"; |
| 1007 | case CAYMAN_RING_TYPE_CP1_INDEX: return "radeon.cp1"; |
| 1008 | case CAYMAN_RING_TYPE_CP2_INDEX: return "radeon.cp2"; |
| 1009 | case R600_RING_TYPE_DMA_INDEX: return "radeon.dma"; |
| 1010 | case CAYMAN_RING_TYPE_DMA1_INDEX: return "radeon.dma1"; |
| 1011 | case R600_RING_TYPE_UVD_INDEX: return "radeon.uvd"; |
| 1012 | case TN_RING_TYPE_VCE1_INDEX: return "radeon.vce1"; |
| 1013 | case TN_RING_TYPE_VCE2_INDEX: return "radeon.vce2"; |
| 1014 | default: WARN_ON_ONCE(1); return "radeon.unk"; |
| 1015 | } |
| 1016 | } |
| 1017 | |
| 1018 | static inline bool radeon_test_signaled(struct radeon_fence *fence) |
| 1019 | { |
| 1020 | return test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags); |
| 1021 | } |
| 1022 | |
| 1023 | static signed long radeon_fence_default_wait(struct fence *f, bool intr, |
| 1024 | signed long t) |
| 1025 | { |
| 1026 | struct radeon_fence *fence = to_radeon_fence(f); |
| 1027 | struct radeon_device *rdev = fence->rdev; |
| 1028 | bool signaled; |
| 1029 | |
| 1030 | fence_enable_sw_signaling(&fence->base); |
| 1031 | |
| 1032 | /* |
| 1033 | * This function has to return -EDEADLK, but cannot hold |
| 1034 | * exclusive_lock during the wait because some callers |
| 1035 | * may already hold it. This means checking needs_reset without |
| 1036 | * lock, and not fiddling with any gpu internals. |
| 1037 | * |
| 1038 | * The callback installed with fence_enable_sw_signaling will |
| 1039 | * run before our wait_event_*timeout call, so we will see |
| 1040 | * both the signaled fence and the changes to needs_reset. |
| 1041 | */ |
| 1042 | |
| 1043 | if (intr) |
| 1044 | t = wait_event_interruptible_timeout(rdev->fence_queue, |
| 1045 | ((signaled = radeon_test_signaled(fence)) || |
| 1046 | rdev->needs_reset), t); |
| 1047 | else |
| 1048 | t = wait_event_timeout(rdev->fence_queue, |
| 1049 | ((signaled = radeon_test_signaled(fence)) || |
| 1050 | rdev->needs_reset), t); |
| 1051 | |
| 1052 | if (t > 0 && !signaled) |
| 1053 | return -EDEADLK; |
| 1054 | return t; |
| 1055 | } |
| 1056 | |
| 1057 | const struct fence_ops radeon_fence_ops = { |
| 1058 | .get_driver_name = radeon_fence_get_driver_name, |
| 1059 | .get_timeline_name = radeon_fence_get_timeline_name, |
| 1060 | .enable_signaling = radeon_fence_enable_signaling, |
| 1061 | .signaled = radeon_fence_is_signaled, |
| 1062 | .wait = radeon_fence_default_wait, |
| 1063 | .release = NULL, |
| 1064 | }; |