blob: feb2bbc6ef6d74525b425163a5b9d3c22a5b26b1 [file] [log] [blame]
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001/*
2 * Copyright 2009 Jerome Glisse.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 */
26/*
27 * Authors:
28 * Jerome Glisse <glisse@freedesktop.org>
29 * Dave Airlie
30 */
31#include <linux/seq_file.h>
Arun Sharma600634972011-07-26 16:09:06 -070032#include <linux/atomic.h>
Jerome Glisse771fe6b2009-06-05 14:42:42 +020033#include <linux/wait.h>
34#include <linux/list.h>
35#include <linux/kref.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090036#include <linux/slab.h>
Jerome Glisse771fe6b2009-06-05 14:42:42 +020037#include "drmP.h"
38#include "drm.h"
39#include "radeon_reg.h"
40#include "radeon.h"
Dave Airlie99ee7fa2010-11-23 11:47:49 +100041#include "radeon_trace.h"
Jerome Glisse771fe6b2009-06-05 14:42:42 +020042
Alex Deucher74652802011-08-25 13:39:48 -040043static void radeon_fence_write(struct radeon_device *rdev, u32 seq, int ring)
Alex Deucherb81157d2011-06-13 17:39:06 -040044{
45 if (rdev->wb.enabled) {
Jerome Glisse30eb77f2011-11-20 20:45:34 +000046 *rdev->fence_drv[ring].cpu_addr = cpu_to_le32(seq);
47 } else {
Alex Deucher74652802011-08-25 13:39:48 -040048 WREG32(rdev->fence_drv[ring].scratch_reg, seq);
Jerome Glisse30eb77f2011-11-20 20:45:34 +000049 }
Alex Deucherb81157d2011-06-13 17:39:06 -040050}
51
Alex Deucher74652802011-08-25 13:39:48 -040052static u32 radeon_fence_read(struct radeon_device *rdev, int ring)
Alex Deucherb81157d2011-06-13 17:39:06 -040053{
Alex Deucher74652802011-08-25 13:39:48 -040054 u32 seq = 0;
Alex Deucherb81157d2011-06-13 17:39:06 -040055
56 if (rdev->wb.enabled) {
Jerome Glisse30eb77f2011-11-20 20:45:34 +000057 seq = le32_to_cpu(*rdev->fence_drv[ring].cpu_addr);
58 } else {
Alex Deucher74652802011-08-25 13:39:48 -040059 seq = RREG32(rdev->fence_drv[ring].scratch_reg);
Jerome Glisse30eb77f2011-11-20 20:45:34 +000060 }
Alex Deucherb81157d2011-06-13 17:39:06 -040061 return seq;
62}
63
Jerome Glisse771fe6b2009-06-05 14:42:42 +020064int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
65{
66 unsigned long irq_flags;
67
Alex Deucher74652802011-08-25 13:39:48 -040068 write_lock_irqsave(&rdev->fence_lock, irq_flags);
Jerome Glissebb635562012-05-09 15:34:46 +020069 if (fence->seq && fence->seq < RADEON_FENCE_NOTEMITED_SEQ) {
Alex Deucher74652802011-08-25 13:39:48 -040070 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
Jerome Glisse771fe6b2009-06-05 14:42:42 +020071 return 0;
72 }
Jerome Glissebb635562012-05-09 15:34:46 +020073 /* we are protected by the ring emission mutex */
74 fence->seq = ++rdev->fence_drv[fence->ring].seq;
Christian König25a9e352012-05-02 15:11:10 +020075 radeon_fence_ring_emit(rdev, fence->ring, fence);
Dave Airlie99ee7fa2010-11-23 11:47:49 +100076 trace_radeon_fence_emit(rdev->ddev, fence->seq);
Christian König36abaca2012-05-02 15:11:13 +020077 /* are we the first fence on a previusly idle ring? */
78 if (list_empty(&rdev->fence_drv[fence->ring].emitted)) {
79 rdev->fence_drv[fence->ring].last_activity = jiffies;
80 }
Alex Deucher74652802011-08-25 13:39:48 -040081 list_move_tail(&fence->list, &rdev->fence_drv[fence->ring].emitted);
82 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
Jerome Glisse771fe6b2009-06-05 14:42:42 +020083 return 0;
84}
85
Alex Deucher74652802011-08-25 13:39:48 -040086static bool radeon_fence_poll_locked(struct radeon_device *rdev, int ring)
Jerome Glisse771fe6b2009-06-05 14:42:42 +020087{
88 struct radeon_fence *fence;
89 struct list_head *i, *n;
Jerome Glissebb635562012-05-09 15:34:46 +020090 uint64_t seq, last_seq;
91 unsigned count_loop = 0;
Jerome Glisse771fe6b2009-06-05 14:42:42 +020092 bool wake = false;
93
Jerome Glissebb635562012-05-09 15:34:46 +020094 /* Note there is a scenario here for an infinite loop but it's
95 * very unlikely to happen. For it to happen, the current polling
96 * process need to be interrupted by another process and another
97 * process needs to update the last_seq btw the atomic read and
98 * xchg of the current process.
99 *
100 * More over for this to go in infinite loop there need to be
101 * continuously new fence signaled ie radeon_fence_read needs
102 * to return a different value each time for both the currently
103 * polling process and the other process that xchg the last_seq
104 * btw atomic read and xchg of the current process. And the
105 * value the other process set as last seq must be higher than
106 * the seq value we just read. Which means that current process
107 * need to be interrupted after radeon_fence_read and before
108 * atomic xchg.
109 *
110 * To be even more safe we count the number of time we loop and
111 * we bail after 10 loop just accepting the fact that we might
112 * have temporarly set the last_seq not to the true real last
113 * seq but to an older one.
114 */
115 last_seq = atomic64_read(&rdev->fence_drv[ring].last_seq);
116 do {
117 seq = radeon_fence_read(rdev, ring);
118 seq |= last_seq & 0xffffffff00000000LL;
119 if (seq < last_seq) {
120 seq += 0x100000000LL;
121 }
Christian König36abaca2012-05-02 15:11:13 +0200122
Jerome Glissebb635562012-05-09 15:34:46 +0200123 if (!wake && seq == last_seq) {
124 return false;
125 }
126 /* If we loop over we don't want to return without
127 * checking if a fence is signaled as it means that the
128 * seq we just read is different from the previous on.
129 */
130 wake = true;
131 if ((count_loop++) > 10) {
132 /* We looped over too many time leave with the
133 * fact that we might have set an older fence
134 * seq then the current real last seq as signaled
135 * by the hw.
136 */
137 break;
138 }
139 last_seq = seq;
140 } while (atomic64_xchg(&rdev->fence_drv[ring].last_seq, seq) > seq);
141
142 /* reset wake to false */
143 wake = false;
Christian König36abaca2012-05-02 15:11:13 +0200144 rdev->fence_drv[ring].last_activity = jiffies;
145
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200146 n = NULL;
Alex Deucher74652802011-08-25 13:39:48 -0400147 list_for_each(i, &rdev->fence_drv[ring].emitted) {
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200148 fence = list_entry(i, struct radeon_fence, list);
149 if (fence->seq == seq) {
150 n = i;
151 break;
152 }
153 }
154 /* all fence previous to this one are considered as signaled */
155 if (n) {
156 i = n;
157 do {
158 n = i->prev;
Alex Deucher74652802011-08-25 13:39:48 -0400159 list_move_tail(i, &rdev->fence_drv[ring].signaled);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200160 fence = list_entry(i, struct radeon_fence, list);
Jerome Glissebb635562012-05-09 15:34:46 +0200161 fence->seq = RADEON_FENCE_SIGNALED_SEQ;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200162 i = n;
Alex Deucher74652802011-08-25 13:39:48 -0400163 } while (i != &rdev->fence_drv[ring].emitted);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200164 wake = true;
165 }
166 return wake;
167}
168
169static void radeon_fence_destroy(struct kref *kref)
170{
171 unsigned long irq_flags;
172 struct radeon_fence *fence;
173
174 fence = container_of(kref, struct radeon_fence, kref);
Alex Deucher74652802011-08-25 13:39:48 -0400175 write_lock_irqsave(&fence->rdev->fence_lock, irq_flags);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200176 list_del(&fence->list);
Jerome Glissebb635562012-05-09 15:34:46 +0200177 fence->seq = RADEON_FENCE_NOTEMITED_SEQ;
Alex Deucher74652802011-08-25 13:39:48 -0400178 write_unlock_irqrestore(&fence->rdev->fence_lock, irq_flags);
Christian König93504fc2012-01-05 22:11:06 -0500179 if (fence->semaphore)
180 radeon_semaphore_free(fence->rdev, fence->semaphore);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200181 kfree(fence);
182}
183
Alex Deucher74652802011-08-25 13:39:48 -0400184int radeon_fence_create(struct radeon_device *rdev,
185 struct radeon_fence **fence,
186 int ring)
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200187{
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200188 *fence = kmalloc(sizeof(struct radeon_fence), GFP_KERNEL);
189 if ((*fence) == NULL) {
190 return -ENOMEM;
191 }
192 kref_init(&((*fence)->kref));
193 (*fence)->rdev = rdev;
Jerome Glissebb635562012-05-09 15:34:46 +0200194 (*fence)->seq = RADEON_FENCE_NOTEMITED_SEQ;
Alex Deucher74652802011-08-25 13:39:48 -0400195 (*fence)->ring = ring;
Christian König93504fc2012-01-05 22:11:06 -0500196 (*fence)->semaphore = NULL;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200197 INIT_LIST_HEAD(&(*fence)->list);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200198 return 0;
199}
200
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200201bool radeon_fence_signaled(struct radeon_fence *fence)
202{
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200203 unsigned long irq_flags;
204 bool signaled = false;
205
Darren Jenkins3655d542009-12-30 12:20:05 +1100206 if (!fence)
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200207 return true;
Darren Jenkins3655d542009-12-30 12:20:05 +1100208
Alex Deucher74652802011-08-25 13:39:48 -0400209 write_lock_irqsave(&fence->rdev->fence_lock, irq_flags);
Jerome Glissebb635562012-05-09 15:34:46 +0200210 signaled = (fence->seq == RADEON_FENCE_SIGNALED_SEQ);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200211 /* if we are shuting down report all fence as signaled */
212 if (fence->rdev->shutdown) {
213 signaled = true;
214 }
Jerome Glissebb635562012-05-09 15:34:46 +0200215 if (fence->seq == RADEON_FENCE_NOTEMITED_SEQ) {
Christian König851a6bd2011-10-24 15:05:29 +0200216 WARN(1, "Querying an unemitted fence : %p !\n", fence);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200217 signaled = true;
218 }
219 if (!signaled) {
Alex Deucher74652802011-08-25 13:39:48 -0400220 radeon_fence_poll_locked(fence->rdev, fence->ring);
Jerome Glissebb635562012-05-09 15:34:46 +0200221 signaled = (fence->seq == RADEON_FENCE_SIGNALED_SEQ);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200222 }
Alex Deucher74652802011-08-25 13:39:48 -0400223 write_unlock_irqrestore(&fence->rdev->fence_lock, irq_flags);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200224 return signaled;
225}
226
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000227int radeon_fence_wait(struct radeon_fence *fence, bool intr)
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200228{
229 struct radeon_device *rdev;
Jerome Glissebb635562012-05-09 15:34:46 +0200230 unsigned long irq_flags, timeout, last_activity;
231 uint64_t seq;
Christian König36abaca2012-05-02 15:11:13 +0200232 int i, r;
233 bool signaled;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200234
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200235 if (fence == NULL) {
236 WARN(1, "Querying an invalid fence : %p !\n", fence);
Christian König36abaca2012-05-02 15:11:13 +0200237 return -EINVAL;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200238 }
Christian König36abaca2012-05-02 15:11:13 +0200239
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200240 rdev = fence->rdev;
Christian König36abaca2012-05-02 15:11:13 +0200241 signaled = radeon_fence_signaled(fence);
242 while (!signaled) {
243 read_lock_irqsave(&rdev->fence_lock, irq_flags);
244 timeout = jiffies - RADEON_FENCE_JIFFIES_TIMEOUT;
245 if (time_after(rdev->fence_drv[fence->ring].last_activity, timeout)) {
246 /* the normal case, timeout is somewhere before last_activity */
247 timeout = rdev->fence_drv[fence->ring].last_activity - timeout;
248 } else {
249 /* either jiffies wrapped around, or no fence was signaled in the last 500ms
250 * anyway we will just wait for the minimum amount and then check for a lockup */
251 timeout = 1;
252 }
253 /* save current sequence value used to check for GPU lockups */
Jerome Glissebb635562012-05-09 15:34:46 +0200254 seq = atomic64_read(&rdev->fence_drv[fence->ring].last_seq);
255 /* Save current last activity valuee, used to check for GPU lockups */
256 last_activity = rdev->fence_drv[fence->ring].last_activity;
Christian König36abaca2012-05-02 15:11:13 +0200257 read_unlock_irqrestore(&rdev->fence_lock, irq_flags);
258
259 trace_radeon_fence_wait_begin(rdev->ddev, seq);
Alex Deucher1b370782011-11-17 20:13:28 -0500260 radeon_irq_kms_sw_irq_get(rdev, fence->ring);
Christian König36abaca2012-05-02 15:11:13 +0200261 if (intr) {
262 r = wait_event_interruptible_timeout(
263 rdev->fence_drv[fence->ring].queue,
264 (signaled = radeon_fence_signaled(fence)), timeout);
265 } else {
266 r = wait_event_timeout(
267 rdev->fence_drv[fence->ring].queue,
268 (signaled = radeon_fence_signaled(fence)), timeout);
269 }
Alex Deucher1b370782011-11-17 20:13:28 -0500270 radeon_irq_kms_sw_irq_put(rdev, fence->ring);
Jerome Glisse90aca4d2010-03-09 14:45:12 +0000271 if (unlikely(r < 0)) {
Thomas Hellstrom5cc6fba2009-12-07 18:36:19 +0100272 return r;
Jerome Glisse90aca4d2010-03-09 14:45:12 +0000273 }
Christian König36abaca2012-05-02 15:11:13 +0200274 trace_radeon_fence_wait_end(rdev->ddev, seq);
Christian König25a9e352012-05-02 15:11:10 +0200275
Christian König36abaca2012-05-02 15:11:13 +0200276 if (unlikely(!signaled)) {
277 /* we were interrupted for some reason and fence
278 * isn't signaled yet, resume waiting */
279 if (r) {
280 continue;
281 }
Christian König25a9e352012-05-02 15:11:10 +0200282
Christian König36abaca2012-05-02 15:11:13 +0200283 write_lock_irqsave(&rdev->fence_lock, irq_flags);
Jerome Glissebb635562012-05-09 15:34:46 +0200284 /* test if somebody else has already decided that this is a lockup */
285 if (last_activity != rdev->fence_drv[fence->ring].last_activity) {
Christian König36abaca2012-05-02 15:11:13 +0200286 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
287 continue;
288 }
289
Christian König36abaca2012-05-02 15:11:13 +0200290 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
291
292 if (radeon_ring_is_lockup(rdev, fence->ring, &rdev->ring[fence->ring])) {
Christian König36abaca2012-05-02 15:11:13 +0200293 /* good news we believe it's a lockup */
Jerome Glissebb635562012-05-09 15:34:46 +0200294 dev_warn(rdev->dev, "GPU lockup (waiting for 0x%016llx last fence id 0x%016llx)\n",
295 fence->seq, seq);
296
297 /* change last activity so nobody else think there is a lockup */
298 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
299 rdev->fence_drv[i].last_activity = jiffies;
300 }
Christian König36abaca2012-05-02 15:11:13 +0200301
302 /* mark the ring as not ready any more */
303 rdev->ring[fence->ring].ready = false;
Christian König6c6f4782012-05-02 15:11:19 +0200304 return -EDEADLK;
Christian König36abaca2012-05-02 15:11:13 +0200305 }
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200306 }
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200307 }
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200308 return 0;
309}
310
Alex Deucher74652802011-08-25 13:39:48 -0400311int radeon_fence_wait_next(struct radeon_device *rdev, int ring)
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200312{
313 unsigned long irq_flags;
314 struct radeon_fence *fence;
315 int r;
316
Alex Deucher74652802011-08-25 13:39:48 -0400317 write_lock_irqsave(&rdev->fence_lock, irq_flags);
Christian König25a9e352012-05-02 15:11:10 +0200318 if (!rdev->ring[ring].ready) {
319 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
320 return -EBUSY;
321 }
Alex Deucher74652802011-08-25 13:39:48 -0400322 if (list_empty(&rdev->fence_drv[ring].emitted)) {
323 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
Christian König2f6bfe12012-05-02 15:11:15 +0200324 return -ENOENT;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200325 }
Alex Deucher74652802011-08-25 13:39:48 -0400326 fence = list_entry(rdev->fence_drv[ring].emitted.next,
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200327 struct radeon_fence, list);
328 radeon_fence_ref(fence);
Alex Deucher74652802011-08-25 13:39:48 -0400329 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200330 r = radeon_fence_wait(fence, false);
331 radeon_fence_unref(&fence);
332 return r;
333}
334
Christian Königadea5c22012-05-02 15:11:16 +0200335int radeon_fence_wait_empty(struct radeon_device *rdev, int ring)
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200336{
337 unsigned long irq_flags;
338 struct radeon_fence *fence;
339 int r;
340
Alex Deucher74652802011-08-25 13:39:48 -0400341 write_lock_irqsave(&rdev->fence_lock, irq_flags);
Christian König25a9e352012-05-02 15:11:10 +0200342 if (!rdev->ring[ring].ready) {
343 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
344 return -EBUSY;
345 }
Alex Deucher74652802011-08-25 13:39:48 -0400346 if (list_empty(&rdev->fence_drv[ring].emitted)) {
347 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200348 return 0;
349 }
Alex Deucher74652802011-08-25 13:39:48 -0400350 fence = list_entry(rdev->fence_drv[ring].emitted.prev,
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200351 struct radeon_fence, list);
352 radeon_fence_ref(fence);
Alex Deucher74652802011-08-25 13:39:48 -0400353 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200354 r = radeon_fence_wait(fence, false);
355 radeon_fence_unref(&fence);
356 return r;
357}
358
359struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence)
360{
361 kref_get(&fence->kref);
362 return fence;
363}
364
365void radeon_fence_unref(struct radeon_fence **fence)
366{
367 struct radeon_fence *tmp = *fence;
368
369 *fence = NULL;
370 if (tmp) {
Paul Bollecdb650a2011-02-27 01:34:08 +0100371 kref_put(&tmp->kref, radeon_fence_destroy);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200372 }
373}
374
Alex Deucher74652802011-08-25 13:39:48 -0400375void radeon_fence_process(struct radeon_device *rdev, int ring)
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200376{
377 unsigned long irq_flags;
378 bool wake;
379
Alex Deucher74652802011-08-25 13:39:48 -0400380 write_lock_irqsave(&rdev->fence_lock, irq_flags);
381 wake = radeon_fence_poll_locked(rdev, ring);
382 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200383 if (wake) {
Alex Deucher74652802011-08-25 13:39:48 -0400384 wake_up_all(&rdev->fence_drv[ring].queue);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200385 }
386}
387
Christian König47492a22011-10-20 12:38:09 +0200388int radeon_fence_count_emitted(struct radeon_device *rdev, int ring)
389{
390 unsigned long irq_flags;
391 int not_processed = 0;
392
393 read_lock_irqsave(&rdev->fence_lock, irq_flags);
Dave Airlie40e8c732012-02-13 12:18:37 +0000394 if (!rdev->fence_drv[ring].initialized) {
395 read_unlock_irqrestore(&rdev->fence_lock, irq_flags);
Christian König47492a22011-10-20 12:38:09 +0200396 return 0;
Dave Airlie40e8c732012-02-13 12:18:37 +0000397 }
Christian König47492a22011-10-20 12:38:09 +0200398
399 if (!list_empty(&rdev->fence_drv[ring].emitted)) {
400 struct list_head *ptr;
401 list_for_each(ptr, &rdev->fence_drv[ring].emitted) {
402 /* count up to 3, that's enought info */
403 if (++not_processed >= 3)
404 break;
405 }
406 }
407 read_unlock_irqrestore(&rdev->fence_lock, irq_flags);
408 return not_processed;
409}
410
Jerome Glisse30eb77f2011-11-20 20:45:34 +0000411int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200412{
413 unsigned long irq_flags;
Jerome Glisse30eb77f2011-11-20 20:45:34 +0000414 uint64_t index;
415 int r;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200416
Jerome Glisse30eb77f2011-11-20 20:45:34 +0000417 write_lock_irqsave(&rdev->fence_lock, irq_flags);
418 radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
419 if (rdev->wb.use_event) {
420 rdev->fence_drv[ring].scratch_reg = 0;
421 index = R600_WB_EVENT_OFFSET + ring * 4;
422 } else {
Alex Deucher74652802011-08-25 13:39:48 -0400423 r = radeon_scratch_get(rdev, &rdev->fence_drv[ring].scratch_reg);
424 if (r) {
425 dev_err(rdev->dev, "fence failed to get scratch register\n");
426 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
427 return r;
428 }
Jerome Glisse30eb77f2011-11-20 20:45:34 +0000429 index = RADEON_WB_SCRATCH_OFFSET +
430 rdev->fence_drv[ring].scratch_reg -
431 rdev->scratch.reg_base;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200432 }
Jerome Glisse30eb77f2011-11-20 20:45:34 +0000433 rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
434 rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index;
Jerome Glissebb635562012-05-09 15:34:46 +0200435 radeon_fence_write(rdev, rdev->fence_drv[ring].seq, ring);
Jerome Glisse30eb77f2011-11-20 20:45:34 +0000436 rdev->fence_drv[ring].initialized = true;
Jerome Glissebb635562012-05-09 15:34:46 +0200437 DRM_INFO("fence driver on ring %d use gpu addr 0x%016llx and cpu addr 0x%p\n",
Jerome Glisse30eb77f2011-11-20 20:45:34 +0000438 ring, rdev->fence_drv[ring].gpu_addr, rdev->fence_drv[ring].cpu_addr);
439 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
440 return 0;
441}
442
443static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring)
444{
445 rdev->fence_drv[ring].scratch_reg = -1;
446 rdev->fence_drv[ring].cpu_addr = NULL;
447 rdev->fence_drv[ring].gpu_addr = 0;
Jerome Glissebb635562012-05-09 15:34:46 +0200448 rdev->fence_drv[ring].seq = 0;
449 atomic64_set(&rdev->fence_drv[ring].last_seq, 0);
Jerome Glisse30eb77f2011-11-20 20:45:34 +0000450 INIT_LIST_HEAD(&rdev->fence_drv[ring].emitted);
451 INIT_LIST_HEAD(&rdev->fence_drv[ring].signaled);
452 init_waitqueue_head(&rdev->fence_drv[ring].queue);
453 rdev->fence_drv[ring].initialized = false;
454}
455
456int radeon_fence_driver_init(struct radeon_device *rdev)
457{
458 unsigned long irq_flags;
459 int ring;
460
461 write_lock_irqsave(&rdev->fence_lock, irq_flags);
462 for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
463 radeon_fence_driver_init_ring(rdev, ring);
Alex Deucher74652802011-08-25 13:39:48 -0400464 }
Jerome Glisse30eb77f2011-11-20 20:45:34 +0000465 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200466 if (radeon_debugfs_fence_init(rdev)) {
Jerome Glisse0a0c7592009-12-11 20:36:19 +0100467 dev_err(rdev->dev, "fence debugfs file creation failed\n");
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200468 }
469 return 0;
470}
471
472void radeon_fence_driver_fini(struct radeon_device *rdev)
473{
474 unsigned long irq_flags;
Alex Deucher74652802011-08-25 13:39:48 -0400475 int ring;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200476
Alex Deucher74652802011-08-25 13:39:48 -0400477 for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
478 if (!rdev->fence_drv[ring].initialized)
479 continue;
Christian Königadea5c22012-05-02 15:11:16 +0200480 radeon_fence_wait_empty(rdev, ring);
Alex Deucher74652802011-08-25 13:39:48 -0400481 wake_up_all(&rdev->fence_drv[ring].queue);
482 write_lock_irqsave(&rdev->fence_lock, irq_flags);
483 radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
484 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
485 rdev->fence_drv[ring].initialized = false;
486 }
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200487}
488
489
490/*
491 * Fence debugfs
492 */
493#if defined(CONFIG_DEBUG_FS)
494static int radeon_debugfs_fence_info(struct seq_file *m, void *data)
495{
496 struct drm_info_node *node = (struct drm_info_node *)m->private;
497 struct drm_device *dev = node->minor->dev;
498 struct radeon_device *rdev = dev->dev_private;
499 struct radeon_fence *fence;
Alex Deucher74652802011-08-25 13:39:48 -0400500 int i;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200501
Alex Deucher74652802011-08-25 13:39:48 -0400502 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
503 if (!rdev->fence_drv[i].initialized)
504 continue;
505
506 seq_printf(m, "--- ring %d ---\n", i);
Jerome Glissebb635562012-05-09 15:34:46 +0200507 seq_printf(m, "Last signaled fence 0x%016lx\n",
508 atomic64_read(&rdev->fence_drv[i].last_seq));
Alex Deucher74652802011-08-25 13:39:48 -0400509 if (!list_empty(&rdev->fence_drv[i].emitted)) {
510 fence = list_entry(rdev->fence_drv[i].emitted.prev,
511 struct radeon_fence, list);
Jerome Glissebb635562012-05-09 15:34:46 +0200512 seq_printf(m, "Last emitted fence %p with 0x%016llx\n",
Alex Deucher74652802011-08-25 13:39:48 -0400513 fence, fence->seq);
514 }
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200515 }
516 return 0;
517}
518
519static struct drm_info_list radeon_debugfs_fence_list[] = {
520 {"radeon_fence_info", &radeon_debugfs_fence_info, 0, NULL},
521};
522#endif
523
524int radeon_debugfs_fence_init(struct radeon_device *rdev)
525{
526#if defined(CONFIG_DEBUG_FS)
527 return radeon_debugfs_add_files(rdev, radeon_debugfs_fence_list, 1);
528#else
529 return 0;
530#endif
531}