blob: 11f5f402d22cb331deed24dcb3b671e92a54892e [file] [log] [blame]
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001/*
2 * Copyright 2009 Jerome Glisse.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 */
26/*
27 * Authors:
28 * Jerome Glisse <glisse@freedesktop.org>
29 * Dave Airlie
30 */
31#include <linux/seq_file.h>
Arun Sharma600634972011-07-26 16:09:06 -070032#include <linux/atomic.h>
Jerome Glisse771fe6b2009-06-05 14:42:42 +020033#include <linux/wait.h>
34#include <linux/list.h>
35#include <linux/kref.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090036#include <linux/slab.h>
Jerome Glisse771fe6b2009-06-05 14:42:42 +020037#include "drmP.h"
38#include "drm.h"
39#include "radeon_reg.h"
40#include "radeon.h"
Dave Airlie99ee7fa2010-11-23 11:47:49 +100041#include "radeon_trace.h"
Jerome Glisse771fe6b2009-06-05 14:42:42 +020042
Alex Deucher74652802011-08-25 13:39:48 -040043static void radeon_fence_write(struct radeon_device *rdev, u32 seq, int ring)
Alex Deucherb81157d2011-06-13 17:39:06 -040044{
45 if (rdev->wb.enabled) {
Jerome Glisse30eb77f2011-11-20 20:45:34 +000046 *rdev->fence_drv[ring].cpu_addr = cpu_to_le32(seq);
47 } else {
Alex Deucher74652802011-08-25 13:39:48 -040048 WREG32(rdev->fence_drv[ring].scratch_reg, seq);
Jerome Glisse30eb77f2011-11-20 20:45:34 +000049 }
Alex Deucherb81157d2011-06-13 17:39:06 -040050}
51
Alex Deucher74652802011-08-25 13:39:48 -040052static u32 radeon_fence_read(struct radeon_device *rdev, int ring)
Alex Deucherb81157d2011-06-13 17:39:06 -040053{
Alex Deucher74652802011-08-25 13:39:48 -040054 u32 seq = 0;
Alex Deucherb81157d2011-06-13 17:39:06 -040055
56 if (rdev->wb.enabled) {
Jerome Glisse30eb77f2011-11-20 20:45:34 +000057 seq = le32_to_cpu(*rdev->fence_drv[ring].cpu_addr);
58 } else {
Alex Deucher74652802011-08-25 13:39:48 -040059 seq = RREG32(rdev->fence_drv[ring].scratch_reg);
Jerome Glisse30eb77f2011-11-20 20:45:34 +000060 }
Alex Deucherb81157d2011-06-13 17:39:06 -040061 return seq;
62}
63
Jerome Glisse771fe6b2009-06-05 14:42:42 +020064int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
65{
Jerome Glisse3b7a2b22012-05-09 15:34:47 +020066 /* we are protected by the ring emission mutex */
Jerome Glissebb635562012-05-09 15:34:46 +020067 if (fence->seq && fence->seq < RADEON_FENCE_NOTEMITED_SEQ) {
Jerome Glisse771fe6b2009-06-05 14:42:42 +020068 return 0;
69 }
Jerome Glissebb635562012-05-09 15:34:46 +020070 fence->seq = ++rdev->fence_drv[fence->ring].seq;
Christian König25a9e352012-05-02 15:11:10 +020071 radeon_fence_ring_emit(rdev, fence->ring, fence);
Dave Airlie99ee7fa2010-11-23 11:47:49 +100072 trace_radeon_fence_emit(rdev->ddev, fence->seq);
Jerome Glisse771fe6b2009-06-05 14:42:42 +020073 return 0;
74}
75
Jerome Glisse3b7a2b22012-05-09 15:34:47 +020076void radeon_fence_process(struct radeon_device *rdev, int ring)
Jerome Glisse771fe6b2009-06-05 14:42:42 +020077{
Jerome Glissebb635562012-05-09 15:34:46 +020078 uint64_t seq, last_seq;
79 unsigned count_loop = 0;
Jerome Glisse771fe6b2009-06-05 14:42:42 +020080 bool wake = false;
81
Jerome Glissebb635562012-05-09 15:34:46 +020082 /* Note there is a scenario here for an infinite loop but it's
83 * very unlikely to happen. For it to happen, the current polling
84 * process need to be interrupted by another process and another
85 * process needs to update the last_seq btw the atomic read and
86 * xchg of the current process.
87 *
88 * More over for this to go in infinite loop there need to be
89 * continuously new fence signaled ie radeon_fence_read needs
90 * to return a different value each time for both the currently
91 * polling process and the other process that xchg the last_seq
92 * btw atomic read and xchg of the current process. And the
93 * value the other process set as last seq must be higher than
94 * the seq value we just read. Which means that current process
95 * need to be interrupted after radeon_fence_read and before
96 * atomic xchg.
97 *
98 * To be even more safe we count the number of time we loop and
99 * we bail after 10 loop just accepting the fact that we might
100 * have temporarly set the last_seq not to the true real last
101 * seq but to an older one.
102 */
103 last_seq = atomic64_read(&rdev->fence_drv[ring].last_seq);
104 do {
105 seq = radeon_fence_read(rdev, ring);
106 seq |= last_seq & 0xffffffff00000000LL;
107 if (seq < last_seq) {
108 seq += 0x100000000LL;
109 }
Christian König36abaca2012-05-02 15:11:13 +0200110
Jerome Glisse3b7a2b22012-05-09 15:34:47 +0200111 if (seq == last_seq) {
112 break;
Jerome Glissebb635562012-05-09 15:34:46 +0200113 }
114 /* If we loop over we don't want to return without
115 * checking if a fence is signaled as it means that the
116 * seq we just read is different from the previous on.
117 */
118 wake = true;
Jerome Glisse3b7a2b22012-05-09 15:34:47 +0200119 last_seq = seq;
Jerome Glissebb635562012-05-09 15:34:46 +0200120 if ((count_loop++) > 10) {
121 /* We looped over too many time leave with the
122 * fact that we might have set an older fence
123 * seq then the current real last seq as signaled
124 * by the hw.
125 */
126 break;
127 }
Jerome Glissebb635562012-05-09 15:34:46 +0200128 } while (atomic64_xchg(&rdev->fence_drv[ring].last_seq, seq) > seq);
129
Jerome Glisse3b7a2b22012-05-09 15:34:47 +0200130 if (wake) {
131 rdev->fence_drv[ring].last_activity = jiffies;
Jerome Glisse0085c9502012-05-09 15:34:55 +0200132 wake_up_all(&rdev->fence_queue);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200133 }
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200134}
135
136static void radeon_fence_destroy(struct kref *kref)
137{
Jerome Glisse3b7a2b22012-05-09 15:34:47 +0200138 struct radeon_fence *fence;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200139
140 fence = container_of(kref, struct radeon_fence, kref);
Jerome Glissebb635562012-05-09 15:34:46 +0200141 fence->seq = RADEON_FENCE_NOTEMITED_SEQ;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200142 kfree(fence);
143}
144
Alex Deucher74652802011-08-25 13:39:48 -0400145int radeon_fence_create(struct radeon_device *rdev,
146 struct radeon_fence **fence,
147 int ring)
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200148{
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200149 *fence = kmalloc(sizeof(struct radeon_fence), GFP_KERNEL);
150 if ((*fence) == NULL) {
151 return -ENOMEM;
152 }
153 kref_init(&((*fence)->kref));
154 (*fence)->rdev = rdev;
Jerome Glissebb635562012-05-09 15:34:46 +0200155 (*fence)->seq = RADEON_FENCE_NOTEMITED_SEQ;
Alex Deucher74652802011-08-25 13:39:48 -0400156 (*fence)->ring = ring;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200157 return 0;
158}
159
Jerome Glisse3b7a2b22012-05-09 15:34:47 +0200160static bool radeon_fence_seq_signaled(struct radeon_device *rdev,
161 u64 seq, unsigned ring)
162{
163 if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
164 return true;
165 }
166 /* poll new last sequence at least once */
167 radeon_fence_process(rdev, ring);
168 if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
169 return true;
170 }
171 return false;
172}
173
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200174bool radeon_fence_signaled(struct radeon_fence *fence)
175{
Jerome Glisse3b7a2b22012-05-09 15:34:47 +0200176 if (!fence) {
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200177 return true;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200178 }
Jerome Glissebb635562012-05-09 15:34:46 +0200179 if (fence->seq == RADEON_FENCE_NOTEMITED_SEQ) {
Christian König851a6bd2011-10-24 15:05:29 +0200180 WARN(1, "Querying an unemitted fence : %p !\n", fence);
Jerome Glisse3b7a2b22012-05-09 15:34:47 +0200181 return true;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200182 }
Jerome Glisse3b7a2b22012-05-09 15:34:47 +0200183 if (fence->seq == RADEON_FENCE_SIGNALED_SEQ) {
184 return true;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200185 }
Jerome Glisse3b7a2b22012-05-09 15:34:47 +0200186 if (radeon_fence_seq_signaled(fence->rdev, fence->seq, fence->ring)) {
187 fence->seq = RADEON_FENCE_SIGNALED_SEQ;
188 return true;
189 }
190 return false;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200191}
192
Jerome Glisse3b7a2b22012-05-09 15:34:47 +0200193static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 target_seq,
Christian König8a47cc92012-05-09 15:34:48 +0200194 unsigned ring, bool intr, bool lock_ring)
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200195{
Jerome Glisse3b7a2b22012-05-09 15:34:47 +0200196 unsigned long timeout, last_activity;
Jerome Glissebb635562012-05-09 15:34:46 +0200197 uint64_t seq;
Jerome Glisse3b7a2b22012-05-09 15:34:47 +0200198 unsigned i;
Christian König36abaca2012-05-02 15:11:13 +0200199 bool signaled;
Jerome Glisse3b7a2b22012-05-09 15:34:47 +0200200 int r;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200201
Jerome Glisse3b7a2b22012-05-09 15:34:47 +0200202 while (target_seq > atomic64_read(&rdev->fence_drv[ring].last_seq)) {
203 if (!rdev->ring[ring].ready) {
204 return -EBUSY;
205 }
Christian König36abaca2012-05-02 15:11:13 +0200206
Christian König36abaca2012-05-02 15:11:13 +0200207 timeout = jiffies - RADEON_FENCE_JIFFIES_TIMEOUT;
Jerome Glisse3b7a2b22012-05-09 15:34:47 +0200208 if (time_after(rdev->fence_drv[ring].last_activity, timeout)) {
Christian König36abaca2012-05-02 15:11:13 +0200209 /* the normal case, timeout is somewhere before last_activity */
Jerome Glisse3b7a2b22012-05-09 15:34:47 +0200210 timeout = rdev->fence_drv[ring].last_activity - timeout;
Christian König36abaca2012-05-02 15:11:13 +0200211 } else {
212 /* either jiffies wrapped around, or no fence was signaled in the last 500ms
Jerome Glisse3b7a2b22012-05-09 15:34:47 +0200213 * anyway we will just wait for the minimum amount and then check for a lockup
214 */
Christian König36abaca2012-05-02 15:11:13 +0200215 timeout = 1;
216 }
Jerome Glisse3b7a2b22012-05-09 15:34:47 +0200217 seq = atomic64_read(&rdev->fence_drv[ring].last_seq);
Jerome Glissebb635562012-05-09 15:34:46 +0200218 /* Save current last activity valuee, used to check for GPU lockups */
Jerome Glisse3b7a2b22012-05-09 15:34:47 +0200219 last_activity = rdev->fence_drv[ring].last_activity;
Christian König36abaca2012-05-02 15:11:13 +0200220
221 trace_radeon_fence_wait_begin(rdev->ddev, seq);
Jerome Glisse3b7a2b22012-05-09 15:34:47 +0200222 radeon_irq_kms_sw_irq_get(rdev, ring);
Christian König36abaca2012-05-02 15:11:13 +0200223 if (intr) {
Jerome Glisse0085c9502012-05-09 15:34:55 +0200224 r = wait_event_interruptible_timeout(rdev->fence_queue,
Jerome Glisse3b7a2b22012-05-09 15:34:47 +0200225 (signaled = radeon_fence_seq_signaled(rdev, target_seq, ring)),
226 timeout);
227 } else {
Jerome Glisse0085c9502012-05-09 15:34:55 +0200228 r = wait_event_timeout(rdev->fence_queue,
Jerome Glisse3b7a2b22012-05-09 15:34:47 +0200229 (signaled = radeon_fence_seq_signaled(rdev, target_seq, ring)),
230 timeout);
Christian König36abaca2012-05-02 15:11:13 +0200231 }
Jerome Glisse3b7a2b22012-05-09 15:34:47 +0200232 radeon_irq_kms_sw_irq_put(rdev, ring);
Jerome Glisse90aca4d2010-03-09 14:45:12 +0000233 if (unlikely(r < 0)) {
Thomas Hellstrom5cc6fba2009-12-07 18:36:19 +0100234 return r;
Jerome Glisse90aca4d2010-03-09 14:45:12 +0000235 }
Christian König36abaca2012-05-02 15:11:13 +0200236 trace_radeon_fence_wait_end(rdev->ddev, seq);
Christian König25a9e352012-05-02 15:11:10 +0200237
Christian König36abaca2012-05-02 15:11:13 +0200238 if (unlikely(!signaled)) {
239 /* we were interrupted for some reason and fence
240 * isn't signaled yet, resume waiting */
241 if (r) {
242 continue;
243 }
Christian König25a9e352012-05-02 15:11:10 +0200244
Jerome Glisse3b7a2b22012-05-09 15:34:47 +0200245 /* check if sequence value has changed since last_activity */
246 if (seq != atomic64_read(&rdev->fence_drv[ring].last_seq)) {
247 continue;
248 }
Christian König8a47cc92012-05-09 15:34:48 +0200249
250 if (lock_ring) {
251 mutex_lock(&rdev->ring_lock);
252 }
253
Jerome Glissebb635562012-05-09 15:34:46 +0200254 /* test if somebody else has already decided that this is a lockup */
Jerome Glisse3b7a2b22012-05-09 15:34:47 +0200255 if (last_activity != rdev->fence_drv[ring].last_activity) {
Christian König8a47cc92012-05-09 15:34:48 +0200256 if (lock_ring) {
257 mutex_unlock(&rdev->ring_lock);
258 }
Christian König36abaca2012-05-02 15:11:13 +0200259 continue;
260 }
261
Jerome Glisse3b7a2b22012-05-09 15:34:47 +0200262 if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) {
Christian König36abaca2012-05-02 15:11:13 +0200263 /* good news we believe it's a lockup */
Jerome Glissebb635562012-05-09 15:34:46 +0200264 dev_warn(rdev->dev, "GPU lockup (waiting for 0x%016llx last fence id 0x%016llx)\n",
Jerome Glisse3b7a2b22012-05-09 15:34:47 +0200265 target_seq, seq);
266
267 /* change last activity so nobody else think there is a lockup */
268 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
269 rdev->fence_drv[i].last_activity = jiffies;
270 }
Jerome Glissebb635562012-05-09 15:34:46 +0200271
Christian König36abaca2012-05-02 15:11:13 +0200272 /* mark the ring as not ready any more */
Jerome Glisse3b7a2b22012-05-09 15:34:47 +0200273 rdev->ring[ring].ready = false;
Christian König8a47cc92012-05-09 15:34:48 +0200274 if (lock_ring) {
275 mutex_unlock(&rdev->ring_lock);
276 }
Christian König6c6f4782012-05-02 15:11:19 +0200277 return -EDEADLK;
Christian König36abaca2012-05-02 15:11:13 +0200278 }
Christian König8a47cc92012-05-09 15:34:48 +0200279
280 if (lock_ring) {
281 mutex_unlock(&rdev->ring_lock);
282 }
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200283 }
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200284 }
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200285 return 0;
286}
287
Jerome Glisse3b7a2b22012-05-09 15:34:47 +0200288int radeon_fence_wait(struct radeon_fence *fence, bool intr)
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200289{
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200290 int r;
291
Jerome Glisse3b7a2b22012-05-09 15:34:47 +0200292 if (fence == NULL) {
293 WARN(1, "Querying an invalid fence : %p !\n", fence);
294 return -EINVAL;
Christian König25a9e352012-05-02 15:11:10 +0200295 }
Jerome Glisse3b7a2b22012-05-09 15:34:47 +0200296
Christian König8a47cc92012-05-09 15:34:48 +0200297 r = radeon_fence_wait_seq(fence->rdev, fence->seq,
298 fence->ring, intr, true);
Jerome Glisse3b7a2b22012-05-09 15:34:47 +0200299 if (r) {
300 return r;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200301 }
Jerome Glisse3b7a2b22012-05-09 15:34:47 +0200302 fence->seq = RADEON_FENCE_SIGNALED_SEQ;
303 return 0;
304}
305
Jerome Glisse0085c9502012-05-09 15:34:55 +0200306bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq)
307{
308 unsigned i;
309
310 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
311 if (seq[i] && radeon_fence_seq_signaled(rdev, seq[i], i)) {
312 return true;
313 }
314 }
315 return false;
316}
317
318static int radeon_fence_wait_any_seq(struct radeon_device *rdev,
319 u64 *target_seq, bool intr)
320{
321 unsigned long timeout, last_activity, tmp;
322 unsigned i, ring = RADEON_NUM_RINGS;
323 bool signaled;
324 int r;
325
326 for (i = 0, last_activity = 0; i < RADEON_NUM_RINGS; ++i) {
327 if (!target_seq[i]) {
328 continue;
329 }
330
331 /* use the most recent one as indicator */
332 if (time_after(rdev->fence_drv[i].last_activity, last_activity)) {
333 last_activity = rdev->fence_drv[i].last_activity;
334 }
335
336 /* For lockup detection just pick the lowest ring we are
337 * actively waiting for
338 */
339 if (i < ring) {
340 ring = i;
341 }
342 }
343
344 /* nothing to wait for ? */
345 if (ring == RADEON_NUM_RINGS) {
346 return 0;
347 }
348
349 while (!radeon_fence_any_seq_signaled(rdev, target_seq)) {
350 timeout = jiffies - RADEON_FENCE_JIFFIES_TIMEOUT;
351 if (time_after(last_activity, timeout)) {
352 /* the normal case, timeout is somewhere before last_activity */
353 timeout = last_activity - timeout;
354 } else {
355 /* either jiffies wrapped around, or no fence was signaled in the last 500ms
356 * anyway we will just wait for the minimum amount and then check for a lockup
357 */
358 timeout = 1;
359 }
360
361 trace_radeon_fence_wait_begin(rdev->ddev, target_seq[ring]);
362 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
363 if (target_seq[i]) {
364 radeon_irq_kms_sw_irq_get(rdev, i);
365 }
366 }
367 if (intr) {
368 r = wait_event_interruptible_timeout(rdev->fence_queue,
369 (signaled = radeon_fence_any_seq_signaled(rdev, target_seq)),
370 timeout);
371 } else {
372 r = wait_event_timeout(rdev->fence_queue,
373 (signaled = radeon_fence_any_seq_signaled(rdev, target_seq)),
374 timeout);
375 }
376 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
377 if (target_seq[i]) {
378 radeon_irq_kms_sw_irq_put(rdev, i);
379 }
380 }
381 if (unlikely(r < 0)) {
382 return r;
383 }
384 trace_radeon_fence_wait_end(rdev->ddev, target_seq[ring]);
385
386 if (unlikely(!signaled)) {
387 /* we were interrupted for some reason and fence
388 * isn't signaled yet, resume waiting */
389 if (r) {
390 continue;
391 }
392
393 mutex_lock(&rdev->ring_lock);
394 for (i = 0, tmp = 0; i < RADEON_NUM_RINGS; ++i) {
395 if (time_after(rdev->fence_drv[i].last_activity, tmp)) {
396 tmp = rdev->fence_drv[i].last_activity;
397 }
398 }
399 /* test if somebody else has already decided that this is a lockup */
400 if (last_activity != tmp) {
401 last_activity = tmp;
402 mutex_unlock(&rdev->ring_lock);
403 continue;
404 }
405
406 if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) {
407 /* good news we believe it's a lockup */
408 dev_warn(rdev->dev, "GPU lockup (waiting for 0x%016llx)\n",
409 target_seq[ring]);
410
411 /* change last activity so nobody else think there is a lockup */
412 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
413 rdev->fence_drv[i].last_activity = jiffies;
414 }
415
416 /* mark the ring as not ready any more */
417 rdev->ring[ring].ready = false;
418 mutex_unlock(&rdev->ring_lock);
419 return -EDEADLK;
420 }
421 mutex_unlock(&rdev->ring_lock);
422 }
423 }
424 return 0;
425}
426
427int radeon_fence_wait_any(struct radeon_device *rdev,
428 struct radeon_fence **fences,
429 bool intr)
430{
431 uint64_t seq[RADEON_NUM_RINGS];
432 unsigned i;
433 int r;
434
435 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
436 seq[i] = 0;
437
438 if (!fences[i]) {
439 continue;
440 }
441
442 if (fences[i]->seq == RADEON_FENCE_SIGNALED_SEQ) {
443 /* something was allready signaled */
444 return 0;
445 }
446
447 if (fences[i]->seq < RADEON_FENCE_NOTEMITED_SEQ) {
448 seq[i] = fences[i]->seq;
449 }
450 }
451
452 r = radeon_fence_wait_any_seq(rdev, seq, intr);
453 if (r) {
454 return r;
455 }
456 return 0;
457}
458
Christian König8a47cc92012-05-09 15:34:48 +0200459int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring)
Jerome Glisse3b7a2b22012-05-09 15:34:47 +0200460{
461 uint64_t seq;
462
463 /* We are not protected by ring lock when reading current seq but
464 * it's ok as worst case is we return to early while we could have
465 * wait.
466 */
467 seq = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL;
468 if (seq >= rdev->fence_drv[ring].seq) {
Christian König8a47cc92012-05-09 15:34:48 +0200469 /* nothing to wait for, last_seq is
470 already the last emited fence */
471 return -ENOENT;
Jerome Glisse3b7a2b22012-05-09 15:34:47 +0200472 }
Christian König8a47cc92012-05-09 15:34:48 +0200473 return radeon_fence_wait_seq(rdev, seq, ring, false, false);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200474}
475
Christian König8a47cc92012-05-09 15:34:48 +0200476int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring)
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200477{
Jerome Glisse3b7a2b22012-05-09 15:34:47 +0200478 /* We are not protected by ring lock when reading current seq
479 * but it's ok as wait empty is call from place where no more
480 * activity can be scheduled so there won't be concurrent access
481 * to seq value.
482 */
Christian König8a47cc92012-05-09 15:34:48 +0200483 return radeon_fence_wait_seq(rdev, rdev->fence_drv[ring].seq,
484 ring, false, false);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200485}
486
487struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence)
488{
489 kref_get(&fence->kref);
490 return fence;
491}
492
493void radeon_fence_unref(struct radeon_fence **fence)
494{
495 struct radeon_fence *tmp = *fence;
496
497 *fence = NULL;
498 if (tmp) {
Paul Bollecdb650a2011-02-27 01:34:08 +0100499 kref_put(&tmp->kref, radeon_fence_destroy);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200500 }
501}
502
Jerome Glisse3b7a2b22012-05-09 15:34:47 +0200503unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring)
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200504{
Jerome Glisse3b7a2b22012-05-09 15:34:47 +0200505 uint64_t emitted;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200506
Jerome Glisse3b7a2b22012-05-09 15:34:47 +0200507 /* We are not protected by ring lock when reading the last sequence
508 * but it's ok to report slightly wrong fence count here.
509 */
Jerome Glisse0085c9502012-05-09 15:34:55 +0200510 radeon_fence_process(rdev, ring);
Jerome Glisse3b7a2b22012-05-09 15:34:47 +0200511 emitted = rdev->fence_drv[ring].seq - atomic64_read(&rdev->fence_drv[ring].last_seq);
512 /* to avoid 32bits warp around */
513 if (emitted > 0x10000000) {
514 emitted = 0x10000000;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200515 }
Jerome Glisse3b7a2b22012-05-09 15:34:47 +0200516 return (unsigned)emitted;
Christian König47492a22011-10-20 12:38:09 +0200517}
518
Jerome Glisse30eb77f2011-11-20 20:45:34 +0000519int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200520{
Jerome Glisse30eb77f2011-11-20 20:45:34 +0000521 uint64_t index;
522 int r;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200523
Jerome Glisse30eb77f2011-11-20 20:45:34 +0000524 radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
525 if (rdev->wb.use_event) {
526 rdev->fence_drv[ring].scratch_reg = 0;
527 index = R600_WB_EVENT_OFFSET + ring * 4;
528 } else {
Alex Deucher74652802011-08-25 13:39:48 -0400529 r = radeon_scratch_get(rdev, &rdev->fence_drv[ring].scratch_reg);
530 if (r) {
531 dev_err(rdev->dev, "fence failed to get scratch register\n");
Alex Deucher74652802011-08-25 13:39:48 -0400532 return r;
533 }
Jerome Glisse30eb77f2011-11-20 20:45:34 +0000534 index = RADEON_WB_SCRATCH_OFFSET +
535 rdev->fence_drv[ring].scratch_reg -
536 rdev->scratch.reg_base;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200537 }
Jerome Glisse30eb77f2011-11-20 20:45:34 +0000538 rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
539 rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index;
Jerome Glissebb635562012-05-09 15:34:46 +0200540 radeon_fence_write(rdev, rdev->fence_drv[ring].seq, ring);
Jerome Glisse30eb77f2011-11-20 20:45:34 +0000541 rdev->fence_drv[ring].initialized = true;
Jerome Glisse3b7a2b22012-05-09 15:34:47 +0200542 dev_info(rdev->dev, "fence driver on ring %d use gpu addr 0x%016llx and cpu addr 0x%p\n",
Jerome Glisse30eb77f2011-11-20 20:45:34 +0000543 ring, rdev->fence_drv[ring].gpu_addr, rdev->fence_drv[ring].cpu_addr);
Jerome Glisse30eb77f2011-11-20 20:45:34 +0000544 return 0;
545}
546
547static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring)
548{
549 rdev->fence_drv[ring].scratch_reg = -1;
550 rdev->fence_drv[ring].cpu_addr = NULL;
551 rdev->fence_drv[ring].gpu_addr = 0;
Jerome Glissebb635562012-05-09 15:34:46 +0200552 rdev->fence_drv[ring].seq = 0;
553 atomic64_set(&rdev->fence_drv[ring].last_seq, 0);
Jerome Glisse3b7a2b22012-05-09 15:34:47 +0200554 rdev->fence_drv[ring].last_activity = jiffies;
Jerome Glisse30eb77f2011-11-20 20:45:34 +0000555 rdev->fence_drv[ring].initialized = false;
556}
557
558int radeon_fence_driver_init(struct radeon_device *rdev)
559{
Jerome Glisse30eb77f2011-11-20 20:45:34 +0000560 int ring;
561
Jerome Glisse0085c9502012-05-09 15:34:55 +0200562 init_waitqueue_head(&rdev->fence_queue);
Jerome Glisse30eb77f2011-11-20 20:45:34 +0000563 for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
564 radeon_fence_driver_init_ring(rdev, ring);
Alex Deucher74652802011-08-25 13:39:48 -0400565 }
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200566 if (radeon_debugfs_fence_init(rdev)) {
Jerome Glisse0a0c7592009-12-11 20:36:19 +0100567 dev_err(rdev->dev, "fence debugfs file creation failed\n");
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200568 }
569 return 0;
570}
571
572void radeon_fence_driver_fini(struct radeon_device *rdev)
573{
Alex Deucher74652802011-08-25 13:39:48 -0400574 int ring;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200575
Christian König8a47cc92012-05-09 15:34:48 +0200576 mutex_lock(&rdev->ring_lock);
Alex Deucher74652802011-08-25 13:39:48 -0400577 for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
578 if (!rdev->fence_drv[ring].initialized)
579 continue;
Christian König8a47cc92012-05-09 15:34:48 +0200580 radeon_fence_wait_empty_locked(rdev, ring);
Jerome Glisse0085c9502012-05-09 15:34:55 +0200581 wake_up_all(&rdev->fence_queue);
Alex Deucher74652802011-08-25 13:39:48 -0400582 radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
Alex Deucher74652802011-08-25 13:39:48 -0400583 rdev->fence_drv[ring].initialized = false;
584 }
Christian König8a47cc92012-05-09 15:34:48 +0200585 mutex_unlock(&rdev->ring_lock);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200586}
587
588
589/*
590 * Fence debugfs
591 */
592#if defined(CONFIG_DEBUG_FS)
593static int radeon_debugfs_fence_info(struct seq_file *m, void *data)
594{
595 struct drm_info_node *node = (struct drm_info_node *)m->private;
596 struct drm_device *dev = node->minor->dev;
597 struct radeon_device *rdev = dev->dev_private;
Alex Deucher74652802011-08-25 13:39:48 -0400598 int i;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200599
Alex Deucher74652802011-08-25 13:39:48 -0400600 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
601 if (!rdev->fence_drv[i].initialized)
602 continue;
603
604 seq_printf(m, "--- ring %d ---\n", i);
Dave Airlied3029b42012-05-09 17:27:29 +0100605 seq_printf(m, "Last signaled fence 0x%016llx\n",
606 (unsigned long long)atomic64_read(&rdev->fence_drv[i].last_seq));
Jerome Glisse3b7a2b22012-05-09 15:34:47 +0200607 seq_printf(m, "Last emitted 0x%016llx\n",
608 rdev->fence_drv[i].seq);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200609 }
610 return 0;
611}
612
613static struct drm_info_list radeon_debugfs_fence_list[] = {
614 {"radeon_fence_info", &radeon_debugfs_fence_info, 0, NULL},
615};
616#endif
617
618int radeon_debugfs_fence_init(struct radeon_device *rdev)
619{
620#if defined(CONFIG_DEBUG_FS)
621 return radeon_debugfs_add_files(rdev, radeon_debugfs_fence_list, 1);
622#else
623 return 0;
624#endif
625}