Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2009 Jerome Glisse. |
| 3 | * All Rights Reserved. |
| 4 | * |
| 5 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 6 | * copy of this software and associated documentation files (the |
| 7 | * "Software"), to deal in the Software without restriction, including |
| 8 | * without limitation the rights to use, copy, modify, merge, publish, |
| 9 | * distribute, sub license, and/or sell copies of the Software, and to |
| 10 | * permit persons to whom the Software is furnished to do so, subject to |
| 11 | * the following conditions: |
| 12 | * |
| 13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 15 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
| 16 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
| 17 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
| 18 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
| 19 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
| 20 | * |
| 21 | * The above copyright notice and this permission notice (including the |
| 22 | * next paragraph) shall be included in all copies or substantial portions |
| 23 | * of the Software. |
| 24 | * |
| 25 | */ |
| 26 | /* |
| 27 | * Authors: |
| 28 | * Jerome Glisse <glisse@freedesktop.org> |
| 29 | * Dave Airlie |
| 30 | */ |
| 31 | #include <linux/seq_file.h> |
Arun Sharma | 60063497 | 2011-07-26 16:09:06 -0700 | [diff] [blame] | 32 | #include <linux/atomic.h> |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 33 | #include <linux/wait.h> |
| 34 | #include <linux/list.h> |
| 35 | #include <linux/kref.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 36 | #include <linux/slab.h> |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 37 | #include "drmP.h" |
| 38 | #include "drm.h" |
| 39 | #include "radeon_reg.h" |
| 40 | #include "radeon.h" |
Dave Airlie | 99ee7fa | 2010-11-23 11:47:49 +1000 | [diff] [blame] | 41 | #include "radeon_trace.h" |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 42 | |
Alex Deucher | 7465280 | 2011-08-25 13:39:48 -0400 | [diff] [blame] | 43 | static void radeon_fence_write(struct radeon_device *rdev, u32 seq, int ring) |
Alex Deucher | b81157d | 2011-06-13 17:39:06 -0400 | [diff] [blame] | 44 | { |
| 45 | if (rdev->wb.enabled) { |
Jerome Glisse | 30eb77f | 2011-11-20 20:45:34 +0000 | [diff] [blame] | 46 | *rdev->fence_drv[ring].cpu_addr = cpu_to_le32(seq); |
| 47 | } else { |
Alex Deucher | 7465280 | 2011-08-25 13:39:48 -0400 | [diff] [blame] | 48 | WREG32(rdev->fence_drv[ring].scratch_reg, seq); |
Jerome Glisse | 30eb77f | 2011-11-20 20:45:34 +0000 | [diff] [blame] | 49 | } |
Alex Deucher | b81157d | 2011-06-13 17:39:06 -0400 | [diff] [blame] | 50 | } |
| 51 | |
Alex Deucher | 7465280 | 2011-08-25 13:39:48 -0400 | [diff] [blame] | 52 | static u32 radeon_fence_read(struct radeon_device *rdev, int ring) |
Alex Deucher | b81157d | 2011-06-13 17:39:06 -0400 | [diff] [blame] | 53 | { |
Alex Deucher | 7465280 | 2011-08-25 13:39:48 -0400 | [diff] [blame] | 54 | u32 seq = 0; |
Alex Deucher | b81157d | 2011-06-13 17:39:06 -0400 | [diff] [blame] | 55 | |
| 56 | if (rdev->wb.enabled) { |
Jerome Glisse | 30eb77f | 2011-11-20 20:45:34 +0000 | [diff] [blame] | 57 | seq = le32_to_cpu(*rdev->fence_drv[ring].cpu_addr); |
| 58 | } else { |
Alex Deucher | 7465280 | 2011-08-25 13:39:48 -0400 | [diff] [blame] | 59 | seq = RREG32(rdev->fence_drv[ring].scratch_reg); |
Jerome Glisse | 30eb77f | 2011-11-20 20:45:34 +0000 | [diff] [blame] | 60 | } |
Alex Deucher | b81157d | 2011-06-13 17:39:06 -0400 | [diff] [blame] | 61 | return seq; |
| 62 | } |
| 63 | |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 64 | int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence) |
| 65 | { |
Jerome Glisse | 3b7a2b2 | 2012-05-09 15:34:47 +0200 | [diff] [blame] | 66 | /* we are protected by the ring emission mutex */ |
Jerome Glisse | bb63556 | 2012-05-09 15:34:46 +0200 | [diff] [blame] | 67 | if (fence->seq && fence->seq < RADEON_FENCE_NOTEMITED_SEQ) { |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 68 | return 0; |
| 69 | } |
Jerome Glisse | bb63556 | 2012-05-09 15:34:46 +0200 | [diff] [blame] | 70 | fence->seq = ++rdev->fence_drv[fence->ring].seq; |
Christian König | 25a9e35 | 2012-05-02 15:11:10 +0200 | [diff] [blame] | 71 | radeon_fence_ring_emit(rdev, fence->ring, fence); |
Dave Airlie | 99ee7fa | 2010-11-23 11:47:49 +1000 | [diff] [blame] | 72 | trace_radeon_fence_emit(rdev->ddev, fence->seq); |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 73 | return 0; |
| 74 | } |
| 75 | |
Jerome Glisse | 3b7a2b2 | 2012-05-09 15:34:47 +0200 | [diff] [blame] | 76 | void radeon_fence_process(struct radeon_device *rdev, int ring) |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 77 | { |
Jerome Glisse | bb63556 | 2012-05-09 15:34:46 +0200 | [diff] [blame] | 78 | uint64_t seq, last_seq; |
| 79 | unsigned count_loop = 0; |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 80 | bool wake = false; |
| 81 | |
Jerome Glisse | bb63556 | 2012-05-09 15:34:46 +0200 | [diff] [blame] | 82 | /* Note there is a scenario here for an infinite loop but it's |
| 83 | * very unlikely to happen. For it to happen, the current polling |
| 84 | * process need to be interrupted by another process and another |
| 85 | * process needs to update the last_seq btw the atomic read and |
| 86 | * xchg of the current process. |
| 87 | * |
| 88 | * More over for this to go in infinite loop there need to be |
| 89 | * continuously new fence signaled ie radeon_fence_read needs |
| 90 | * to return a different value each time for both the currently |
| 91 | * polling process and the other process that xchg the last_seq |
| 92 | * btw atomic read and xchg of the current process. And the |
| 93 | * value the other process set as last seq must be higher than |
| 94 | * the seq value we just read. Which means that current process |
| 95 | * need to be interrupted after radeon_fence_read and before |
| 96 | * atomic xchg. |
| 97 | * |
| 98 | * To be even more safe we count the number of time we loop and |
| 99 | * we bail after 10 loop just accepting the fact that we might |
| 100 | * have temporarly set the last_seq not to the true real last |
| 101 | * seq but to an older one. |
| 102 | */ |
| 103 | last_seq = atomic64_read(&rdev->fence_drv[ring].last_seq); |
| 104 | do { |
| 105 | seq = radeon_fence_read(rdev, ring); |
| 106 | seq |= last_seq & 0xffffffff00000000LL; |
| 107 | if (seq < last_seq) { |
| 108 | seq += 0x100000000LL; |
| 109 | } |
Christian König | 36abaca | 2012-05-02 15:11:13 +0200 | [diff] [blame] | 110 | |
Jerome Glisse | 3b7a2b2 | 2012-05-09 15:34:47 +0200 | [diff] [blame] | 111 | if (seq == last_seq) { |
| 112 | break; |
Jerome Glisse | bb63556 | 2012-05-09 15:34:46 +0200 | [diff] [blame] | 113 | } |
| 114 | /* If we loop over we don't want to return without |
| 115 | * checking if a fence is signaled as it means that the |
| 116 | * seq we just read is different from the previous on. |
| 117 | */ |
| 118 | wake = true; |
Jerome Glisse | 3b7a2b2 | 2012-05-09 15:34:47 +0200 | [diff] [blame] | 119 | last_seq = seq; |
Jerome Glisse | bb63556 | 2012-05-09 15:34:46 +0200 | [diff] [blame] | 120 | if ((count_loop++) > 10) { |
| 121 | /* We looped over too many time leave with the |
| 122 | * fact that we might have set an older fence |
| 123 | * seq then the current real last seq as signaled |
| 124 | * by the hw. |
| 125 | */ |
| 126 | break; |
| 127 | } |
Jerome Glisse | bb63556 | 2012-05-09 15:34:46 +0200 | [diff] [blame] | 128 | } while (atomic64_xchg(&rdev->fence_drv[ring].last_seq, seq) > seq); |
| 129 | |
Jerome Glisse | 3b7a2b2 | 2012-05-09 15:34:47 +0200 | [diff] [blame] | 130 | if (wake) { |
| 131 | rdev->fence_drv[ring].last_activity = jiffies; |
Jerome Glisse | 0085c950 | 2012-05-09 15:34:55 +0200 | [diff] [blame] | 132 | wake_up_all(&rdev->fence_queue); |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 133 | } |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 134 | } |
| 135 | |
| 136 | static void radeon_fence_destroy(struct kref *kref) |
| 137 | { |
Jerome Glisse | 3b7a2b2 | 2012-05-09 15:34:47 +0200 | [diff] [blame] | 138 | struct radeon_fence *fence; |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 139 | |
| 140 | fence = container_of(kref, struct radeon_fence, kref); |
Jerome Glisse | bb63556 | 2012-05-09 15:34:46 +0200 | [diff] [blame] | 141 | fence->seq = RADEON_FENCE_NOTEMITED_SEQ; |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 142 | kfree(fence); |
| 143 | } |
| 144 | |
Alex Deucher | 7465280 | 2011-08-25 13:39:48 -0400 | [diff] [blame] | 145 | int radeon_fence_create(struct radeon_device *rdev, |
| 146 | struct radeon_fence **fence, |
| 147 | int ring) |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 148 | { |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 149 | *fence = kmalloc(sizeof(struct radeon_fence), GFP_KERNEL); |
| 150 | if ((*fence) == NULL) { |
| 151 | return -ENOMEM; |
| 152 | } |
| 153 | kref_init(&((*fence)->kref)); |
| 154 | (*fence)->rdev = rdev; |
Jerome Glisse | bb63556 | 2012-05-09 15:34:46 +0200 | [diff] [blame] | 155 | (*fence)->seq = RADEON_FENCE_NOTEMITED_SEQ; |
Alex Deucher | 7465280 | 2011-08-25 13:39:48 -0400 | [diff] [blame] | 156 | (*fence)->ring = ring; |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 157 | return 0; |
| 158 | } |
| 159 | |
Jerome Glisse | 3b7a2b2 | 2012-05-09 15:34:47 +0200 | [diff] [blame] | 160 | static bool radeon_fence_seq_signaled(struct radeon_device *rdev, |
| 161 | u64 seq, unsigned ring) |
| 162 | { |
| 163 | if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) { |
| 164 | return true; |
| 165 | } |
| 166 | /* poll new last sequence at least once */ |
| 167 | radeon_fence_process(rdev, ring); |
| 168 | if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) { |
| 169 | return true; |
| 170 | } |
| 171 | return false; |
| 172 | } |
| 173 | |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 174 | bool radeon_fence_signaled(struct radeon_fence *fence) |
| 175 | { |
Jerome Glisse | 3b7a2b2 | 2012-05-09 15:34:47 +0200 | [diff] [blame] | 176 | if (!fence) { |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 177 | return true; |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 178 | } |
Jerome Glisse | bb63556 | 2012-05-09 15:34:46 +0200 | [diff] [blame] | 179 | if (fence->seq == RADEON_FENCE_NOTEMITED_SEQ) { |
Christian König | 851a6bd | 2011-10-24 15:05:29 +0200 | [diff] [blame] | 180 | WARN(1, "Querying an unemitted fence : %p !\n", fence); |
Jerome Glisse | 3b7a2b2 | 2012-05-09 15:34:47 +0200 | [diff] [blame] | 181 | return true; |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 182 | } |
Jerome Glisse | 3b7a2b2 | 2012-05-09 15:34:47 +0200 | [diff] [blame] | 183 | if (fence->seq == RADEON_FENCE_SIGNALED_SEQ) { |
| 184 | return true; |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 185 | } |
Jerome Glisse | 3b7a2b2 | 2012-05-09 15:34:47 +0200 | [diff] [blame] | 186 | if (radeon_fence_seq_signaled(fence->rdev, fence->seq, fence->ring)) { |
| 187 | fence->seq = RADEON_FENCE_SIGNALED_SEQ; |
| 188 | return true; |
| 189 | } |
| 190 | return false; |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 191 | } |
| 192 | |
Jerome Glisse | 3b7a2b2 | 2012-05-09 15:34:47 +0200 | [diff] [blame] | 193 | static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 target_seq, |
Christian König | 8a47cc9 | 2012-05-09 15:34:48 +0200 | [diff] [blame] | 194 | unsigned ring, bool intr, bool lock_ring) |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 195 | { |
Jerome Glisse | 3b7a2b2 | 2012-05-09 15:34:47 +0200 | [diff] [blame] | 196 | unsigned long timeout, last_activity; |
Jerome Glisse | bb63556 | 2012-05-09 15:34:46 +0200 | [diff] [blame] | 197 | uint64_t seq; |
Jerome Glisse | 3b7a2b2 | 2012-05-09 15:34:47 +0200 | [diff] [blame] | 198 | unsigned i; |
Christian König | 36abaca | 2012-05-02 15:11:13 +0200 | [diff] [blame] | 199 | bool signaled; |
Jerome Glisse | 3b7a2b2 | 2012-05-09 15:34:47 +0200 | [diff] [blame] | 200 | int r; |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 201 | |
Jerome Glisse | 3b7a2b2 | 2012-05-09 15:34:47 +0200 | [diff] [blame] | 202 | while (target_seq > atomic64_read(&rdev->fence_drv[ring].last_seq)) { |
| 203 | if (!rdev->ring[ring].ready) { |
| 204 | return -EBUSY; |
| 205 | } |
Christian König | 36abaca | 2012-05-02 15:11:13 +0200 | [diff] [blame] | 206 | |
Christian König | 36abaca | 2012-05-02 15:11:13 +0200 | [diff] [blame] | 207 | timeout = jiffies - RADEON_FENCE_JIFFIES_TIMEOUT; |
Jerome Glisse | 3b7a2b2 | 2012-05-09 15:34:47 +0200 | [diff] [blame] | 208 | if (time_after(rdev->fence_drv[ring].last_activity, timeout)) { |
Christian König | 36abaca | 2012-05-02 15:11:13 +0200 | [diff] [blame] | 209 | /* the normal case, timeout is somewhere before last_activity */ |
Jerome Glisse | 3b7a2b2 | 2012-05-09 15:34:47 +0200 | [diff] [blame] | 210 | timeout = rdev->fence_drv[ring].last_activity - timeout; |
Christian König | 36abaca | 2012-05-02 15:11:13 +0200 | [diff] [blame] | 211 | } else { |
| 212 | /* either jiffies wrapped around, or no fence was signaled in the last 500ms |
Jerome Glisse | 3b7a2b2 | 2012-05-09 15:34:47 +0200 | [diff] [blame] | 213 | * anyway we will just wait for the minimum amount and then check for a lockup |
| 214 | */ |
Christian König | 36abaca | 2012-05-02 15:11:13 +0200 | [diff] [blame] | 215 | timeout = 1; |
| 216 | } |
Jerome Glisse | 3b7a2b2 | 2012-05-09 15:34:47 +0200 | [diff] [blame] | 217 | seq = atomic64_read(&rdev->fence_drv[ring].last_seq); |
Jerome Glisse | bb63556 | 2012-05-09 15:34:46 +0200 | [diff] [blame] | 218 | /* Save current last activity valuee, used to check for GPU lockups */ |
Jerome Glisse | 3b7a2b2 | 2012-05-09 15:34:47 +0200 | [diff] [blame] | 219 | last_activity = rdev->fence_drv[ring].last_activity; |
Christian König | 36abaca | 2012-05-02 15:11:13 +0200 | [diff] [blame] | 220 | |
| 221 | trace_radeon_fence_wait_begin(rdev->ddev, seq); |
Jerome Glisse | 3b7a2b2 | 2012-05-09 15:34:47 +0200 | [diff] [blame] | 222 | radeon_irq_kms_sw_irq_get(rdev, ring); |
Christian König | 36abaca | 2012-05-02 15:11:13 +0200 | [diff] [blame] | 223 | if (intr) { |
Jerome Glisse | 0085c950 | 2012-05-09 15:34:55 +0200 | [diff] [blame] | 224 | r = wait_event_interruptible_timeout(rdev->fence_queue, |
Jerome Glisse | 3b7a2b2 | 2012-05-09 15:34:47 +0200 | [diff] [blame] | 225 | (signaled = radeon_fence_seq_signaled(rdev, target_seq, ring)), |
| 226 | timeout); |
| 227 | } else { |
Jerome Glisse | 0085c950 | 2012-05-09 15:34:55 +0200 | [diff] [blame] | 228 | r = wait_event_timeout(rdev->fence_queue, |
Jerome Glisse | 3b7a2b2 | 2012-05-09 15:34:47 +0200 | [diff] [blame] | 229 | (signaled = radeon_fence_seq_signaled(rdev, target_seq, ring)), |
| 230 | timeout); |
Christian König | 36abaca | 2012-05-02 15:11:13 +0200 | [diff] [blame] | 231 | } |
Jerome Glisse | 3b7a2b2 | 2012-05-09 15:34:47 +0200 | [diff] [blame] | 232 | radeon_irq_kms_sw_irq_put(rdev, ring); |
Jerome Glisse | 90aca4d | 2010-03-09 14:45:12 +0000 | [diff] [blame] | 233 | if (unlikely(r < 0)) { |
Thomas Hellstrom | 5cc6fba | 2009-12-07 18:36:19 +0100 | [diff] [blame] | 234 | return r; |
Jerome Glisse | 90aca4d | 2010-03-09 14:45:12 +0000 | [diff] [blame] | 235 | } |
Christian König | 36abaca | 2012-05-02 15:11:13 +0200 | [diff] [blame] | 236 | trace_radeon_fence_wait_end(rdev->ddev, seq); |
Christian König | 25a9e35 | 2012-05-02 15:11:10 +0200 | [diff] [blame] | 237 | |
Christian König | 36abaca | 2012-05-02 15:11:13 +0200 | [diff] [blame] | 238 | if (unlikely(!signaled)) { |
| 239 | /* we were interrupted for some reason and fence |
| 240 | * isn't signaled yet, resume waiting */ |
| 241 | if (r) { |
| 242 | continue; |
| 243 | } |
Christian König | 25a9e35 | 2012-05-02 15:11:10 +0200 | [diff] [blame] | 244 | |
Jerome Glisse | 3b7a2b2 | 2012-05-09 15:34:47 +0200 | [diff] [blame] | 245 | /* check if sequence value has changed since last_activity */ |
| 246 | if (seq != atomic64_read(&rdev->fence_drv[ring].last_seq)) { |
| 247 | continue; |
| 248 | } |
Christian König | 8a47cc9 | 2012-05-09 15:34:48 +0200 | [diff] [blame] | 249 | |
| 250 | if (lock_ring) { |
| 251 | mutex_lock(&rdev->ring_lock); |
| 252 | } |
| 253 | |
Jerome Glisse | bb63556 | 2012-05-09 15:34:46 +0200 | [diff] [blame] | 254 | /* test if somebody else has already decided that this is a lockup */ |
Jerome Glisse | 3b7a2b2 | 2012-05-09 15:34:47 +0200 | [diff] [blame] | 255 | if (last_activity != rdev->fence_drv[ring].last_activity) { |
Christian König | 8a47cc9 | 2012-05-09 15:34:48 +0200 | [diff] [blame] | 256 | if (lock_ring) { |
| 257 | mutex_unlock(&rdev->ring_lock); |
| 258 | } |
Christian König | 36abaca | 2012-05-02 15:11:13 +0200 | [diff] [blame] | 259 | continue; |
| 260 | } |
| 261 | |
Jerome Glisse | 3b7a2b2 | 2012-05-09 15:34:47 +0200 | [diff] [blame] | 262 | if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) { |
Christian König | 36abaca | 2012-05-02 15:11:13 +0200 | [diff] [blame] | 263 | /* good news we believe it's a lockup */ |
Jerome Glisse | bb63556 | 2012-05-09 15:34:46 +0200 | [diff] [blame] | 264 | dev_warn(rdev->dev, "GPU lockup (waiting for 0x%016llx last fence id 0x%016llx)\n", |
Jerome Glisse | 3b7a2b2 | 2012-05-09 15:34:47 +0200 | [diff] [blame] | 265 | target_seq, seq); |
| 266 | |
| 267 | /* change last activity so nobody else think there is a lockup */ |
| 268 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
| 269 | rdev->fence_drv[i].last_activity = jiffies; |
| 270 | } |
Jerome Glisse | bb63556 | 2012-05-09 15:34:46 +0200 | [diff] [blame] | 271 | |
Christian König | 36abaca | 2012-05-02 15:11:13 +0200 | [diff] [blame] | 272 | /* mark the ring as not ready any more */ |
Jerome Glisse | 3b7a2b2 | 2012-05-09 15:34:47 +0200 | [diff] [blame] | 273 | rdev->ring[ring].ready = false; |
Christian König | 8a47cc9 | 2012-05-09 15:34:48 +0200 | [diff] [blame] | 274 | if (lock_ring) { |
| 275 | mutex_unlock(&rdev->ring_lock); |
| 276 | } |
Christian König | 6c6f478 | 2012-05-02 15:11:19 +0200 | [diff] [blame] | 277 | return -EDEADLK; |
Christian König | 36abaca | 2012-05-02 15:11:13 +0200 | [diff] [blame] | 278 | } |
Christian König | 8a47cc9 | 2012-05-09 15:34:48 +0200 | [diff] [blame] | 279 | |
| 280 | if (lock_ring) { |
| 281 | mutex_unlock(&rdev->ring_lock); |
| 282 | } |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 283 | } |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 284 | } |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 285 | return 0; |
| 286 | } |
| 287 | |
Jerome Glisse | 3b7a2b2 | 2012-05-09 15:34:47 +0200 | [diff] [blame] | 288 | int radeon_fence_wait(struct radeon_fence *fence, bool intr) |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 289 | { |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 290 | int r; |
| 291 | |
Jerome Glisse | 3b7a2b2 | 2012-05-09 15:34:47 +0200 | [diff] [blame] | 292 | if (fence == NULL) { |
| 293 | WARN(1, "Querying an invalid fence : %p !\n", fence); |
| 294 | return -EINVAL; |
Christian König | 25a9e35 | 2012-05-02 15:11:10 +0200 | [diff] [blame] | 295 | } |
Jerome Glisse | 3b7a2b2 | 2012-05-09 15:34:47 +0200 | [diff] [blame] | 296 | |
Christian König | 8a47cc9 | 2012-05-09 15:34:48 +0200 | [diff] [blame] | 297 | r = radeon_fence_wait_seq(fence->rdev, fence->seq, |
| 298 | fence->ring, intr, true); |
Jerome Glisse | 3b7a2b2 | 2012-05-09 15:34:47 +0200 | [diff] [blame] | 299 | if (r) { |
| 300 | return r; |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 301 | } |
Jerome Glisse | 3b7a2b2 | 2012-05-09 15:34:47 +0200 | [diff] [blame] | 302 | fence->seq = RADEON_FENCE_SIGNALED_SEQ; |
| 303 | return 0; |
| 304 | } |
| 305 | |
Jerome Glisse | 0085c950 | 2012-05-09 15:34:55 +0200 | [diff] [blame] | 306 | bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq) |
| 307 | { |
| 308 | unsigned i; |
| 309 | |
| 310 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
| 311 | if (seq[i] && radeon_fence_seq_signaled(rdev, seq[i], i)) { |
| 312 | return true; |
| 313 | } |
| 314 | } |
| 315 | return false; |
| 316 | } |
| 317 | |
| 318 | static int radeon_fence_wait_any_seq(struct radeon_device *rdev, |
| 319 | u64 *target_seq, bool intr) |
| 320 | { |
| 321 | unsigned long timeout, last_activity, tmp; |
| 322 | unsigned i, ring = RADEON_NUM_RINGS; |
| 323 | bool signaled; |
| 324 | int r; |
| 325 | |
| 326 | for (i = 0, last_activity = 0; i < RADEON_NUM_RINGS; ++i) { |
| 327 | if (!target_seq[i]) { |
| 328 | continue; |
| 329 | } |
| 330 | |
| 331 | /* use the most recent one as indicator */ |
| 332 | if (time_after(rdev->fence_drv[i].last_activity, last_activity)) { |
| 333 | last_activity = rdev->fence_drv[i].last_activity; |
| 334 | } |
| 335 | |
| 336 | /* For lockup detection just pick the lowest ring we are |
| 337 | * actively waiting for |
| 338 | */ |
| 339 | if (i < ring) { |
| 340 | ring = i; |
| 341 | } |
| 342 | } |
| 343 | |
| 344 | /* nothing to wait for ? */ |
| 345 | if (ring == RADEON_NUM_RINGS) { |
| 346 | return 0; |
| 347 | } |
| 348 | |
| 349 | while (!radeon_fence_any_seq_signaled(rdev, target_seq)) { |
| 350 | timeout = jiffies - RADEON_FENCE_JIFFIES_TIMEOUT; |
| 351 | if (time_after(last_activity, timeout)) { |
| 352 | /* the normal case, timeout is somewhere before last_activity */ |
| 353 | timeout = last_activity - timeout; |
| 354 | } else { |
| 355 | /* either jiffies wrapped around, or no fence was signaled in the last 500ms |
| 356 | * anyway we will just wait for the minimum amount and then check for a lockup |
| 357 | */ |
| 358 | timeout = 1; |
| 359 | } |
| 360 | |
| 361 | trace_radeon_fence_wait_begin(rdev->ddev, target_seq[ring]); |
| 362 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
| 363 | if (target_seq[i]) { |
| 364 | radeon_irq_kms_sw_irq_get(rdev, i); |
| 365 | } |
| 366 | } |
| 367 | if (intr) { |
| 368 | r = wait_event_interruptible_timeout(rdev->fence_queue, |
| 369 | (signaled = radeon_fence_any_seq_signaled(rdev, target_seq)), |
| 370 | timeout); |
| 371 | } else { |
| 372 | r = wait_event_timeout(rdev->fence_queue, |
| 373 | (signaled = radeon_fence_any_seq_signaled(rdev, target_seq)), |
| 374 | timeout); |
| 375 | } |
| 376 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
| 377 | if (target_seq[i]) { |
| 378 | radeon_irq_kms_sw_irq_put(rdev, i); |
| 379 | } |
| 380 | } |
| 381 | if (unlikely(r < 0)) { |
| 382 | return r; |
| 383 | } |
| 384 | trace_radeon_fence_wait_end(rdev->ddev, target_seq[ring]); |
| 385 | |
| 386 | if (unlikely(!signaled)) { |
| 387 | /* we were interrupted for some reason and fence |
| 388 | * isn't signaled yet, resume waiting */ |
| 389 | if (r) { |
| 390 | continue; |
| 391 | } |
| 392 | |
| 393 | mutex_lock(&rdev->ring_lock); |
| 394 | for (i = 0, tmp = 0; i < RADEON_NUM_RINGS; ++i) { |
| 395 | if (time_after(rdev->fence_drv[i].last_activity, tmp)) { |
| 396 | tmp = rdev->fence_drv[i].last_activity; |
| 397 | } |
| 398 | } |
| 399 | /* test if somebody else has already decided that this is a lockup */ |
| 400 | if (last_activity != tmp) { |
| 401 | last_activity = tmp; |
| 402 | mutex_unlock(&rdev->ring_lock); |
| 403 | continue; |
| 404 | } |
| 405 | |
| 406 | if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) { |
| 407 | /* good news we believe it's a lockup */ |
| 408 | dev_warn(rdev->dev, "GPU lockup (waiting for 0x%016llx)\n", |
| 409 | target_seq[ring]); |
| 410 | |
| 411 | /* change last activity so nobody else think there is a lockup */ |
| 412 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
| 413 | rdev->fence_drv[i].last_activity = jiffies; |
| 414 | } |
| 415 | |
| 416 | /* mark the ring as not ready any more */ |
| 417 | rdev->ring[ring].ready = false; |
| 418 | mutex_unlock(&rdev->ring_lock); |
| 419 | return -EDEADLK; |
| 420 | } |
| 421 | mutex_unlock(&rdev->ring_lock); |
| 422 | } |
| 423 | } |
| 424 | return 0; |
| 425 | } |
| 426 | |
| 427 | int radeon_fence_wait_any(struct radeon_device *rdev, |
| 428 | struct radeon_fence **fences, |
| 429 | bool intr) |
| 430 | { |
| 431 | uint64_t seq[RADEON_NUM_RINGS]; |
| 432 | unsigned i; |
| 433 | int r; |
| 434 | |
| 435 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
| 436 | seq[i] = 0; |
| 437 | |
| 438 | if (!fences[i]) { |
| 439 | continue; |
| 440 | } |
| 441 | |
| 442 | if (fences[i]->seq == RADEON_FENCE_SIGNALED_SEQ) { |
| 443 | /* something was allready signaled */ |
| 444 | return 0; |
| 445 | } |
| 446 | |
| 447 | if (fences[i]->seq < RADEON_FENCE_NOTEMITED_SEQ) { |
| 448 | seq[i] = fences[i]->seq; |
| 449 | } |
| 450 | } |
| 451 | |
| 452 | r = radeon_fence_wait_any_seq(rdev, seq, intr); |
| 453 | if (r) { |
| 454 | return r; |
| 455 | } |
| 456 | return 0; |
| 457 | } |
| 458 | |
Christian König | 8a47cc9 | 2012-05-09 15:34:48 +0200 | [diff] [blame] | 459 | int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring) |
Jerome Glisse | 3b7a2b2 | 2012-05-09 15:34:47 +0200 | [diff] [blame] | 460 | { |
| 461 | uint64_t seq; |
| 462 | |
| 463 | /* We are not protected by ring lock when reading current seq but |
| 464 | * it's ok as worst case is we return to early while we could have |
| 465 | * wait. |
| 466 | */ |
| 467 | seq = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL; |
| 468 | if (seq >= rdev->fence_drv[ring].seq) { |
Christian König | 8a47cc9 | 2012-05-09 15:34:48 +0200 | [diff] [blame] | 469 | /* nothing to wait for, last_seq is |
| 470 | already the last emited fence */ |
| 471 | return -ENOENT; |
Jerome Glisse | 3b7a2b2 | 2012-05-09 15:34:47 +0200 | [diff] [blame] | 472 | } |
Christian König | 8a47cc9 | 2012-05-09 15:34:48 +0200 | [diff] [blame] | 473 | return radeon_fence_wait_seq(rdev, seq, ring, false, false); |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 474 | } |
| 475 | |
Christian König | 8a47cc9 | 2012-05-09 15:34:48 +0200 | [diff] [blame] | 476 | int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring) |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 477 | { |
Jerome Glisse | 3b7a2b2 | 2012-05-09 15:34:47 +0200 | [diff] [blame] | 478 | /* We are not protected by ring lock when reading current seq |
| 479 | * but it's ok as wait empty is call from place where no more |
| 480 | * activity can be scheduled so there won't be concurrent access |
| 481 | * to seq value. |
| 482 | */ |
Christian König | 8a47cc9 | 2012-05-09 15:34:48 +0200 | [diff] [blame] | 483 | return radeon_fence_wait_seq(rdev, rdev->fence_drv[ring].seq, |
| 484 | ring, false, false); |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 485 | } |
| 486 | |
| 487 | struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence) |
| 488 | { |
| 489 | kref_get(&fence->kref); |
| 490 | return fence; |
| 491 | } |
| 492 | |
| 493 | void radeon_fence_unref(struct radeon_fence **fence) |
| 494 | { |
| 495 | struct radeon_fence *tmp = *fence; |
| 496 | |
| 497 | *fence = NULL; |
| 498 | if (tmp) { |
Paul Bolle | cdb650a | 2011-02-27 01:34:08 +0100 | [diff] [blame] | 499 | kref_put(&tmp->kref, radeon_fence_destroy); |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 500 | } |
| 501 | } |
| 502 | |
Jerome Glisse | 3b7a2b2 | 2012-05-09 15:34:47 +0200 | [diff] [blame] | 503 | unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring) |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 504 | { |
Jerome Glisse | 3b7a2b2 | 2012-05-09 15:34:47 +0200 | [diff] [blame] | 505 | uint64_t emitted; |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 506 | |
Jerome Glisse | 3b7a2b2 | 2012-05-09 15:34:47 +0200 | [diff] [blame] | 507 | /* We are not protected by ring lock when reading the last sequence |
| 508 | * but it's ok to report slightly wrong fence count here. |
| 509 | */ |
Jerome Glisse | 0085c950 | 2012-05-09 15:34:55 +0200 | [diff] [blame] | 510 | radeon_fence_process(rdev, ring); |
Jerome Glisse | 3b7a2b2 | 2012-05-09 15:34:47 +0200 | [diff] [blame] | 511 | emitted = rdev->fence_drv[ring].seq - atomic64_read(&rdev->fence_drv[ring].last_seq); |
| 512 | /* to avoid 32bits warp around */ |
| 513 | if (emitted > 0x10000000) { |
| 514 | emitted = 0x10000000; |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 515 | } |
Jerome Glisse | 3b7a2b2 | 2012-05-09 15:34:47 +0200 | [diff] [blame] | 516 | return (unsigned)emitted; |
Christian König | 47492a2 | 2011-10-20 12:38:09 +0200 | [diff] [blame] | 517 | } |
| 518 | |
Jerome Glisse | 30eb77f | 2011-11-20 20:45:34 +0000 | [diff] [blame] | 519 | int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring) |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 520 | { |
Jerome Glisse | 30eb77f | 2011-11-20 20:45:34 +0000 | [diff] [blame] | 521 | uint64_t index; |
| 522 | int r; |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 523 | |
Jerome Glisse | 30eb77f | 2011-11-20 20:45:34 +0000 | [diff] [blame] | 524 | radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg); |
| 525 | if (rdev->wb.use_event) { |
| 526 | rdev->fence_drv[ring].scratch_reg = 0; |
| 527 | index = R600_WB_EVENT_OFFSET + ring * 4; |
| 528 | } else { |
Alex Deucher | 7465280 | 2011-08-25 13:39:48 -0400 | [diff] [blame] | 529 | r = radeon_scratch_get(rdev, &rdev->fence_drv[ring].scratch_reg); |
| 530 | if (r) { |
| 531 | dev_err(rdev->dev, "fence failed to get scratch register\n"); |
Alex Deucher | 7465280 | 2011-08-25 13:39:48 -0400 | [diff] [blame] | 532 | return r; |
| 533 | } |
Jerome Glisse | 30eb77f | 2011-11-20 20:45:34 +0000 | [diff] [blame] | 534 | index = RADEON_WB_SCRATCH_OFFSET + |
| 535 | rdev->fence_drv[ring].scratch_reg - |
| 536 | rdev->scratch.reg_base; |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 537 | } |
Jerome Glisse | 30eb77f | 2011-11-20 20:45:34 +0000 | [diff] [blame] | 538 | rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4]; |
| 539 | rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index; |
Jerome Glisse | bb63556 | 2012-05-09 15:34:46 +0200 | [diff] [blame] | 540 | radeon_fence_write(rdev, rdev->fence_drv[ring].seq, ring); |
Jerome Glisse | 30eb77f | 2011-11-20 20:45:34 +0000 | [diff] [blame] | 541 | rdev->fence_drv[ring].initialized = true; |
Jerome Glisse | 3b7a2b2 | 2012-05-09 15:34:47 +0200 | [diff] [blame] | 542 | dev_info(rdev->dev, "fence driver on ring %d use gpu addr 0x%016llx and cpu addr 0x%p\n", |
Jerome Glisse | 30eb77f | 2011-11-20 20:45:34 +0000 | [diff] [blame] | 543 | ring, rdev->fence_drv[ring].gpu_addr, rdev->fence_drv[ring].cpu_addr); |
Jerome Glisse | 30eb77f | 2011-11-20 20:45:34 +0000 | [diff] [blame] | 544 | return 0; |
| 545 | } |
| 546 | |
| 547 | static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring) |
| 548 | { |
| 549 | rdev->fence_drv[ring].scratch_reg = -1; |
| 550 | rdev->fence_drv[ring].cpu_addr = NULL; |
| 551 | rdev->fence_drv[ring].gpu_addr = 0; |
Jerome Glisse | bb63556 | 2012-05-09 15:34:46 +0200 | [diff] [blame] | 552 | rdev->fence_drv[ring].seq = 0; |
| 553 | atomic64_set(&rdev->fence_drv[ring].last_seq, 0); |
Jerome Glisse | 3b7a2b2 | 2012-05-09 15:34:47 +0200 | [diff] [blame] | 554 | rdev->fence_drv[ring].last_activity = jiffies; |
Jerome Glisse | 30eb77f | 2011-11-20 20:45:34 +0000 | [diff] [blame] | 555 | rdev->fence_drv[ring].initialized = false; |
| 556 | } |
| 557 | |
| 558 | int radeon_fence_driver_init(struct radeon_device *rdev) |
| 559 | { |
Jerome Glisse | 30eb77f | 2011-11-20 20:45:34 +0000 | [diff] [blame] | 560 | int ring; |
| 561 | |
Jerome Glisse | 0085c950 | 2012-05-09 15:34:55 +0200 | [diff] [blame] | 562 | init_waitqueue_head(&rdev->fence_queue); |
Jerome Glisse | 30eb77f | 2011-11-20 20:45:34 +0000 | [diff] [blame] | 563 | for (ring = 0; ring < RADEON_NUM_RINGS; ring++) { |
| 564 | radeon_fence_driver_init_ring(rdev, ring); |
Alex Deucher | 7465280 | 2011-08-25 13:39:48 -0400 | [diff] [blame] | 565 | } |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 566 | if (radeon_debugfs_fence_init(rdev)) { |
Jerome Glisse | 0a0c759 | 2009-12-11 20:36:19 +0100 | [diff] [blame] | 567 | dev_err(rdev->dev, "fence debugfs file creation failed\n"); |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 568 | } |
| 569 | return 0; |
| 570 | } |
| 571 | |
| 572 | void radeon_fence_driver_fini(struct radeon_device *rdev) |
| 573 | { |
Alex Deucher | 7465280 | 2011-08-25 13:39:48 -0400 | [diff] [blame] | 574 | int ring; |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 575 | |
Christian König | 8a47cc9 | 2012-05-09 15:34:48 +0200 | [diff] [blame] | 576 | mutex_lock(&rdev->ring_lock); |
Alex Deucher | 7465280 | 2011-08-25 13:39:48 -0400 | [diff] [blame] | 577 | for (ring = 0; ring < RADEON_NUM_RINGS; ring++) { |
| 578 | if (!rdev->fence_drv[ring].initialized) |
| 579 | continue; |
Christian König | 8a47cc9 | 2012-05-09 15:34:48 +0200 | [diff] [blame] | 580 | radeon_fence_wait_empty_locked(rdev, ring); |
Jerome Glisse | 0085c950 | 2012-05-09 15:34:55 +0200 | [diff] [blame] | 581 | wake_up_all(&rdev->fence_queue); |
Alex Deucher | 7465280 | 2011-08-25 13:39:48 -0400 | [diff] [blame] | 582 | radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg); |
Alex Deucher | 7465280 | 2011-08-25 13:39:48 -0400 | [diff] [blame] | 583 | rdev->fence_drv[ring].initialized = false; |
| 584 | } |
Christian König | 8a47cc9 | 2012-05-09 15:34:48 +0200 | [diff] [blame] | 585 | mutex_unlock(&rdev->ring_lock); |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 586 | } |
| 587 | |
| 588 | |
| 589 | /* |
| 590 | * Fence debugfs |
| 591 | */ |
| 592 | #if defined(CONFIG_DEBUG_FS) |
| 593 | static int radeon_debugfs_fence_info(struct seq_file *m, void *data) |
| 594 | { |
| 595 | struct drm_info_node *node = (struct drm_info_node *)m->private; |
| 596 | struct drm_device *dev = node->minor->dev; |
| 597 | struct radeon_device *rdev = dev->dev_private; |
Alex Deucher | 7465280 | 2011-08-25 13:39:48 -0400 | [diff] [blame] | 598 | int i; |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 599 | |
Alex Deucher | 7465280 | 2011-08-25 13:39:48 -0400 | [diff] [blame] | 600 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
| 601 | if (!rdev->fence_drv[i].initialized) |
| 602 | continue; |
| 603 | |
| 604 | seq_printf(m, "--- ring %d ---\n", i); |
Dave Airlie | d3029b4 | 2012-05-09 17:27:29 +0100 | [diff] [blame] | 605 | seq_printf(m, "Last signaled fence 0x%016llx\n", |
| 606 | (unsigned long long)atomic64_read(&rdev->fence_drv[i].last_seq)); |
Jerome Glisse | 3b7a2b2 | 2012-05-09 15:34:47 +0200 | [diff] [blame] | 607 | seq_printf(m, "Last emitted 0x%016llx\n", |
| 608 | rdev->fence_drv[i].seq); |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 609 | } |
| 610 | return 0; |
| 611 | } |
| 612 | |
| 613 | static struct drm_info_list radeon_debugfs_fence_list[] = { |
| 614 | {"radeon_fence_info", &radeon_debugfs_fence_info, 0, NULL}, |
| 615 | }; |
| 616 | #endif |
| 617 | |
| 618 | int radeon_debugfs_fence_init(struct radeon_device *rdev) |
| 619 | { |
| 620 | #if defined(CONFIG_DEBUG_FS) |
| 621 | return radeon_debugfs_add_files(rdev, radeon_debugfs_fence_list, 1); |
| 622 | #else |
| 623 | return 0; |
| 624 | #endif |
| 625 | } |