blob: 69902e623cbc62adaeff1a7ee4367e1c5ae65131 [file] [log] [blame]
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001/*
2 * Copyright 2009 Jerome Glisse.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 */
26/*
27 * Authors:
28 * Jerome Glisse <glisse@freedesktop.org>
29 * Dave Airlie
30 */
31#include <linux/seq_file.h>
Arun Sharma600634972011-07-26 16:09:06 -070032#include <linux/atomic.h>
Jerome Glisse771fe6b2009-06-05 14:42:42 +020033#include <linux/wait.h>
34#include <linux/list.h>
35#include <linux/kref.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090036#include <linux/slab.h>
Jerome Glisse771fe6b2009-06-05 14:42:42 +020037#include "drmP.h"
38#include "drm.h"
39#include "radeon_reg.h"
40#include "radeon.h"
Dave Airlie99ee7fa2010-11-23 11:47:49 +100041#include "radeon_trace.h"
Jerome Glisse771fe6b2009-06-05 14:42:42 +020042
Alex Deucher74652802011-08-25 13:39:48 -040043static void radeon_fence_write(struct radeon_device *rdev, u32 seq, int ring)
Alex Deucherb81157d2011-06-13 17:39:06 -040044{
Alex Deucher74652802011-08-25 13:39:48 -040045 u32 scratch_index;
46
Alex Deucherb81157d2011-06-13 17:39:06 -040047 if (rdev->wb.enabled) {
Alex Deucherb81157d2011-06-13 17:39:06 -040048 if (rdev->wb.use_event)
Alex Deucher74652802011-08-25 13:39:48 -040049 scratch_index = R600_WB_EVENT_OFFSET +
50 rdev->fence_drv[ring].scratch_reg - rdev->scratch.reg_base;
Alex Deucherb81157d2011-06-13 17:39:06 -040051 else
Alex Deucher74652802011-08-25 13:39:48 -040052 scratch_index = RADEON_WB_SCRATCH_OFFSET +
53 rdev->fence_drv[ring].scratch_reg - rdev->scratch.reg_base;
Justin P. Mattock69932482011-07-26 23:06:29 -070054 rdev->wb.wb[scratch_index/4] = cpu_to_le32(seq);
Alex Deucherb81157d2011-06-13 17:39:06 -040055 } else
Alex Deucher74652802011-08-25 13:39:48 -040056 WREG32(rdev->fence_drv[ring].scratch_reg, seq);
Alex Deucherb81157d2011-06-13 17:39:06 -040057}
58
Alex Deucher74652802011-08-25 13:39:48 -040059static u32 radeon_fence_read(struct radeon_device *rdev, int ring)
Alex Deucherb81157d2011-06-13 17:39:06 -040060{
Alex Deucher74652802011-08-25 13:39:48 -040061 u32 seq = 0;
62 u32 scratch_index;
Alex Deucherb81157d2011-06-13 17:39:06 -040063
64 if (rdev->wb.enabled) {
Alex Deucherb81157d2011-06-13 17:39:06 -040065 if (rdev->wb.use_event)
Alex Deucher74652802011-08-25 13:39:48 -040066 scratch_index = R600_WB_EVENT_OFFSET +
67 rdev->fence_drv[ring].scratch_reg - rdev->scratch.reg_base;
Alex Deucherb81157d2011-06-13 17:39:06 -040068 else
Alex Deucher74652802011-08-25 13:39:48 -040069 scratch_index = RADEON_WB_SCRATCH_OFFSET +
70 rdev->fence_drv[ring].scratch_reg - rdev->scratch.reg_base;
Alex Deucherb81157d2011-06-13 17:39:06 -040071 seq = le32_to_cpu(rdev->wb.wb[scratch_index/4]);
72 } else
Alex Deucher74652802011-08-25 13:39:48 -040073 seq = RREG32(rdev->fence_drv[ring].scratch_reg);
Alex Deucherb81157d2011-06-13 17:39:06 -040074 return seq;
75}
76
Jerome Glisse771fe6b2009-06-05 14:42:42 +020077int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
78{
79 unsigned long irq_flags;
80
Alex Deucher74652802011-08-25 13:39:48 -040081 write_lock_irqsave(&rdev->fence_lock, irq_flags);
Christian König851a6bd2011-10-24 15:05:29 +020082 if (fence->emitted) {
Alex Deucher74652802011-08-25 13:39:48 -040083 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
Jerome Glisse771fe6b2009-06-05 14:42:42 +020084 return 0;
85 }
Alex Deucher74652802011-08-25 13:39:48 -040086 fence->seq = atomic_add_return(1, &rdev->fence_drv[fence->ring].seq);
Christian Königbf852792011-10-13 13:19:22 +020087 if (!rdev->cp[fence->ring].ready)
Jerome Glisse771fe6b2009-06-05 14:42:42 +020088 /* FIXME: cp is not running assume everythings is done right
89 * away
90 */
Alex Deucher74652802011-08-25 13:39:48 -040091 radeon_fence_write(rdev, fence->seq, fence->ring);
Alex Deucherb81157d2011-06-13 17:39:06 -040092 else
Christian König4c87bc22011-10-19 19:02:21 +020093 radeon_fence_ring_emit(rdev, fence->ring, fence);
Jerome Glisse3ce0a232009-09-08 10:10:24 +100094
Dave Airlie99ee7fa2010-11-23 11:47:49 +100095 trace_radeon_fence_emit(rdev->ddev, fence->seq);
Christian König851a6bd2011-10-24 15:05:29 +020096 fence->emitted = true;
Alex Deucher74652802011-08-25 13:39:48 -040097 list_move_tail(&fence->list, &rdev->fence_drv[fence->ring].emitted);
98 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
Jerome Glisse771fe6b2009-06-05 14:42:42 +020099 return 0;
100}
101
Alex Deucher74652802011-08-25 13:39:48 -0400102static bool radeon_fence_poll_locked(struct radeon_device *rdev, int ring)
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200103{
104 struct radeon_fence *fence;
105 struct list_head *i, *n;
106 uint32_t seq;
107 bool wake = false;
Jerome Glisse225758d2010-03-09 14:45:10 +0000108 unsigned long cjiffies;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200109
Alex Deucher74652802011-08-25 13:39:48 -0400110 seq = radeon_fence_read(rdev, ring);
111 if (seq != rdev->fence_drv[ring].last_seq) {
112 rdev->fence_drv[ring].last_seq = seq;
113 rdev->fence_drv[ring].last_jiffies = jiffies;
114 rdev->fence_drv[ring].last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
Jerome Glisse225758d2010-03-09 14:45:10 +0000115 } else {
116 cjiffies = jiffies;
Alex Deucher74652802011-08-25 13:39:48 -0400117 if (time_after(cjiffies, rdev->fence_drv[ring].last_jiffies)) {
118 cjiffies -= rdev->fence_drv[ring].last_jiffies;
119 if (time_after(rdev->fence_drv[ring].last_timeout, cjiffies)) {
Jerome Glisse225758d2010-03-09 14:45:10 +0000120 /* update the timeout */
Alex Deucher74652802011-08-25 13:39:48 -0400121 rdev->fence_drv[ring].last_timeout -= cjiffies;
Jerome Glisse225758d2010-03-09 14:45:10 +0000122 } else {
123 /* the 500ms timeout is elapsed we should test
124 * for GPU lockup
125 */
Alex Deucher74652802011-08-25 13:39:48 -0400126 rdev->fence_drv[ring].last_timeout = 1;
Jerome Glisse225758d2010-03-09 14:45:10 +0000127 }
128 } else {
129 /* wrap around update last jiffies, we will just wait
130 * a little longer
131 */
Alex Deucher74652802011-08-25 13:39:48 -0400132 rdev->fence_drv[ring].last_jiffies = cjiffies;
Jerome Glisse225758d2010-03-09 14:45:10 +0000133 }
134 return false;
135 }
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200136 n = NULL;
Alex Deucher74652802011-08-25 13:39:48 -0400137 list_for_each(i, &rdev->fence_drv[ring].emitted) {
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200138 fence = list_entry(i, struct radeon_fence, list);
139 if (fence->seq == seq) {
140 n = i;
141 break;
142 }
143 }
144 /* all fence previous to this one are considered as signaled */
145 if (n) {
146 i = n;
147 do {
148 n = i->prev;
Alex Deucher74652802011-08-25 13:39:48 -0400149 list_move_tail(i, &rdev->fence_drv[ring].signaled);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200150 fence = list_entry(i, struct radeon_fence, list);
151 fence->signaled = true;
152 i = n;
Alex Deucher74652802011-08-25 13:39:48 -0400153 } while (i != &rdev->fence_drv[ring].emitted);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200154 wake = true;
155 }
156 return wake;
157}
158
159static void radeon_fence_destroy(struct kref *kref)
160{
161 unsigned long irq_flags;
162 struct radeon_fence *fence;
163
164 fence = container_of(kref, struct radeon_fence, kref);
Alex Deucher74652802011-08-25 13:39:48 -0400165 write_lock_irqsave(&fence->rdev->fence_lock, irq_flags);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200166 list_del(&fence->list);
Christian König851a6bd2011-10-24 15:05:29 +0200167 fence->emitted = false;
Alex Deucher74652802011-08-25 13:39:48 -0400168 write_unlock_irqrestore(&fence->rdev->fence_lock, irq_flags);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200169 kfree(fence);
170}
171
Alex Deucher74652802011-08-25 13:39:48 -0400172int radeon_fence_create(struct radeon_device *rdev,
173 struct radeon_fence **fence,
174 int ring)
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200175{
176 unsigned long irq_flags;
177
178 *fence = kmalloc(sizeof(struct radeon_fence), GFP_KERNEL);
179 if ((*fence) == NULL) {
180 return -ENOMEM;
181 }
182 kref_init(&((*fence)->kref));
183 (*fence)->rdev = rdev;
Christian König851a6bd2011-10-24 15:05:29 +0200184 (*fence)->emitted = false;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200185 (*fence)->signaled = false;
186 (*fence)->seq = 0;
Alex Deucher74652802011-08-25 13:39:48 -0400187 (*fence)->ring = ring;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200188 INIT_LIST_HEAD(&(*fence)->list);
189
Alex Deucher74652802011-08-25 13:39:48 -0400190 write_lock_irqsave(&rdev->fence_lock, irq_flags);
191 list_add_tail(&(*fence)->list, &rdev->fence_drv[ring].created);
192 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200193 return 0;
194}
195
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200196bool radeon_fence_signaled(struct radeon_fence *fence)
197{
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200198 unsigned long irq_flags;
199 bool signaled = false;
200
Darren Jenkins3655d542009-12-30 12:20:05 +1100201 if (!fence)
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200202 return true;
Darren Jenkins3655d542009-12-30 12:20:05 +1100203
204 if (fence->rdev->gpu_lockup)
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200205 return true;
Darren Jenkins3655d542009-12-30 12:20:05 +1100206
Alex Deucher74652802011-08-25 13:39:48 -0400207 write_lock_irqsave(&fence->rdev->fence_lock, irq_flags);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200208 signaled = fence->signaled;
209 /* if we are shuting down report all fence as signaled */
210 if (fence->rdev->shutdown) {
211 signaled = true;
212 }
Christian König851a6bd2011-10-24 15:05:29 +0200213 if (!fence->emitted) {
214 WARN(1, "Querying an unemitted fence : %p !\n", fence);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200215 signaled = true;
216 }
217 if (!signaled) {
Alex Deucher74652802011-08-25 13:39:48 -0400218 radeon_fence_poll_locked(fence->rdev, fence->ring);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200219 signaled = fence->signaled;
220 }
Alex Deucher74652802011-08-25 13:39:48 -0400221 write_unlock_irqrestore(&fence->rdev->fence_lock, irq_flags);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200222 return signaled;
223}
224
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000225int radeon_fence_wait(struct radeon_fence *fence, bool intr)
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200226{
227 struct radeon_device *rdev;
Jerome Glisse225758d2010-03-09 14:45:10 +0000228 unsigned long irq_flags, timeout;
229 u32 seq;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200230 int r;
231
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200232 if (fence == NULL) {
233 WARN(1, "Querying an invalid fence : %p !\n", fence);
234 return 0;
235 }
236 rdev = fence->rdev;
237 if (radeon_fence_signaled(fence)) {
238 return 0;
239 }
Alex Deucher74652802011-08-25 13:39:48 -0400240 timeout = rdev->fence_drv[fence->ring].last_timeout;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200241retry:
Jerome Glisse225758d2010-03-09 14:45:10 +0000242 /* save current sequence used to check for GPU lockup */
Alex Deucher74652802011-08-25 13:39:48 -0400243 seq = rdev->fence_drv[fence->ring].last_seq;
Dave Airlie99ee7fa2010-11-23 11:47:49 +1000244 trace_radeon_fence_wait_begin(rdev->ddev, seq);
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000245 if (intr) {
Dave Airlie1614f8b2009-12-01 16:04:56 +1000246 radeon_irq_kms_sw_irq_get(rdev);
Alex Deucher74652802011-08-25 13:39:48 -0400247 r = wait_event_interruptible_timeout(rdev->fence_drv[fence->ring].queue,
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200248 radeon_fence_signaled(fence), timeout);
Dave Airlie1614f8b2009-12-01 16:04:56 +1000249 radeon_irq_kms_sw_irq_put(rdev);
Jerome Glisse90aca4d2010-03-09 14:45:12 +0000250 if (unlikely(r < 0)) {
Thomas Hellstrom5cc6fba2009-12-07 18:36:19 +0100251 return r;
Jerome Glisse90aca4d2010-03-09 14:45:12 +0000252 }
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200253 } else {
Dave Airlie1614f8b2009-12-01 16:04:56 +1000254 radeon_irq_kms_sw_irq_get(rdev);
Alex Deucher74652802011-08-25 13:39:48 -0400255 r = wait_event_timeout(rdev->fence_drv[fence->ring].queue,
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200256 radeon_fence_signaled(fence), timeout);
Dave Airlie1614f8b2009-12-01 16:04:56 +1000257 radeon_irq_kms_sw_irq_put(rdev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200258 }
Dave Airlie99ee7fa2010-11-23 11:47:49 +1000259 trace_radeon_fence_wait_end(rdev->ddev, seq);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200260 if (unlikely(!radeon_fence_signaled(fence))) {
Jerome Glisse225758d2010-03-09 14:45:10 +0000261 /* we were interrupted for some reason and fence isn't
262 * isn't signaled yet, resume wait
263 */
264 if (r) {
265 timeout = r;
266 goto retry;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200267 }
Alex Deucher74652802011-08-25 13:39:48 -0400268 /* don't protect read access to rdev->fence_drv[t].last_seq
Jerome Glisse225758d2010-03-09 14:45:10 +0000269 * if we experiencing a lockup the value doesn't change
270 */
Alex Deucher74652802011-08-25 13:39:48 -0400271 if (seq == rdev->fence_drv[fence->ring].last_seq &&
Christian Königbf852792011-10-13 13:19:22 +0200272 radeon_gpu_is_lockup(rdev, &rdev->cp[fence->ring])) {
Jerome Glisse225758d2010-03-09 14:45:10 +0000273 /* good news we believe it's a lockup */
Dave Jones19703052011-10-21 12:51:02 -0400274 printk(KERN_WARNING "GPU lockup (waiting for 0x%08X last fence id 0x%08X)\n",
Joe Perchesfce7d612010-10-30 21:08:30 +0000275 fence->seq, seq);
Jerome Glisse225758d2010-03-09 14:45:10 +0000276 /* FIXME: what should we do ? marking everyone
277 * as signaled for now
278 */
Jerome Glisse90aca4d2010-03-09 14:45:12 +0000279 rdev->gpu_lockup = true;
Jerome Glisse90aca4d2010-03-09 14:45:12 +0000280 r = radeon_gpu_reset(rdev);
281 if (r)
282 return r;
Alex Deucher74652802011-08-25 13:39:48 -0400283 radeon_fence_write(rdev, fence->seq, fence->ring);
Jerome Glisse90aca4d2010-03-09 14:45:12 +0000284 rdev->gpu_lockup = false;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200285 }
Jerome Glisse225758d2010-03-09 14:45:10 +0000286 timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
Alex Deucher74652802011-08-25 13:39:48 -0400287 write_lock_irqsave(&rdev->fence_lock, irq_flags);
288 rdev->fence_drv[fence->ring].last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
289 rdev->fence_drv[fence->ring].last_jiffies = jiffies;
290 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200291 goto retry;
292 }
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200293 return 0;
294}
295
Alex Deucher74652802011-08-25 13:39:48 -0400296int radeon_fence_wait_next(struct radeon_device *rdev, int ring)
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200297{
298 unsigned long irq_flags;
299 struct radeon_fence *fence;
300 int r;
301
302 if (rdev->gpu_lockup) {
303 return 0;
304 }
Alex Deucher74652802011-08-25 13:39:48 -0400305 write_lock_irqsave(&rdev->fence_lock, irq_flags);
306 if (list_empty(&rdev->fence_drv[ring].emitted)) {
307 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200308 return 0;
309 }
Alex Deucher74652802011-08-25 13:39:48 -0400310 fence = list_entry(rdev->fence_drv[ring].emitted.next,
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200311 struct radeon_fence, list);
312 radeon_fence_ref(fence);
Alex Deucher74652802011-08-25 13:39:48 -0400313 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200314 r = radeon_fence_wait(fence, false);
315 radeon_fence_unref(&fence);
316 return r;
317}
318
Alex Deucher74652802011-08-25 13:39:48 -0400319int radeon_fence_wait_last(struct radeon_device *rdev, int ring)
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200320{
321 unsigned long irq_flags;
322 struct radeon_fence *fence;
323 int r;
324
325 if (rdev->gpu_lockup) {
326 return 0;
327 }
Alex Deucher74652802011-08-25 13:39:48 -0400328 write_lock_irqsave(&rdev->fence_lock, irq_flags);
329 if (list_empty(&rdev->fence_drv[ring].emitted)) {
330 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200331 return 0;
332 }
Alex Deucher74652802011-08-25 13:39:48 -0400333 fence = list_entry(rdev->fence_drv[ring].emitted.prev,
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200334 struct radeon_fence, list);
335 radeon_fence_ref(fence);
Alex Deucher74652802011-08-25 13:39:48 -0400336 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200337 r = radeon_fence_wait(fence, false);
338 radeon_fence_unref(&fence);
339 return r;
340}
341
342struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence)
343{
344 kref_get(&fence->kref);
345 return fence;
346}
347
348void radeon_fence_unref(struct radeon_fence **fence)
349{
350 struct radeon_fence *tmp = *fence;
351
352 *fence = NULL;
353 if (tmp) {
Paul Bollecdb650a2011-02-27 01:34:08 +0100354 kref_put(&tmp->kref, radeon_fence_destroy);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200355 }
356}
357
Alex Deucher74652802011-08-25 13:39:48 -0400358void radeon_fence_process(struct radeon_device *rdev, int ring)
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200359{
360 unsigned long irq_flags;
361 bool wake;
362
Alex Deucher74652802011-08-25 13:39:48 -0400363 write_lock_irqsave(&rdev->fence_lock, irq_flags);
364 wake = radeon_fence_poll_locked(rdev, ring);
365 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200366 if (wake) {
Alex Deucher74652802011-08-25 13:39:48 -0400367 wake_up_all(&rdev->fence_drv[ring].queue);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200368 }
369}
370
Alex Deucher74652802011-08-25 13:39:48 -0400371int radeon_fence_driver_init(struct radeon_device *rdev, int num_rings)
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200372{
373 unsigned long irq_flags;
Alex Deucher74652802011-08-25 13:39:48 -0400374 int r, ring;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200375
Alex Deucher74652802011-08-25 13:39:48 -0400376 for (ring = 0; ring < num_rings; ring++) {
377 write_lock_irqsave(&rdev->fence_lock, irq_flags);
378 r = radeon_scratch_get(rdev, &rdev->fence_drv[ring].scratch_reg);
379 if (r) {
380 dev_err(rdev->dev, "fence failed to get scratch register\n");
381 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
382 return r;
383 }
384 radeon_fence_write(rdev, 0, ring);
385 atomic_set(&rdev->fence_drv[ring].seq, 0);
386 INIT_LIST_HEAD(&rdev->fence_drv[ring].created);
387 INIT_LIST_HEAD(&rdev->fence_drv[ring].emitted);
388 INIT_LIST_HEAD(&rdev->fence_drv[ring].signaled);
389 init_waitqueue_head(&rdev->fence_drv[ring].queue);
390 rdev->fence_drv[ring].initialized = true;
391 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200392 }
Alex Deucher74652802011-08-25 13:39:48 -0400393 for (ring = num_rings; ring < RADEON_NUM_RINGS; ring++) {
394 write_lock_irqsave(&rdev->fence_lock, irq_flags);
395 INIT_LIST_HEAD(&rdev->fence_drv[ring].created);
396 INIT_LIST_HEAD(&rdev->fence_drv[ring].emitted);
397 INIT_LIST_HEAD(&rdev->fence_drv[ring].signaled);
398 rdev->fence_drv[ring].initialized = false;
399 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
400 }
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200401 if (radeon_debugfs_fence_init(rdev)) {
Jerome Glisse0a0c7592009-12-11 20:36:19 +0100402 dev_err(rdev->dev, "fence debugfs file creation failed\n");
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200403 }
404 return 0;
405}
406
407void radeon_fence_driver_fini(struct radeon_device *rdev)
408{
409 unsigned long irq_flags;
Alex Deucher74652802011-08-25 13:39:48 -0400410 int ring;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200411
Alex Deucher74652802011-08-25 13:39:48 -0400412 for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
413 if (!rdev->fence_drv[ring].initialized)
414 continue;
415 wake_up_all(&rdev->fence_drv[ring].queue);
416 write_lock_irqsave(&rdev->fence_lock, irq_flags);
417 radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
418 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
419 rdev->fence_drv[ring].initialized = false;
420 }
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200421}
422
423
424/*
425 * Fence debugfs
426 */
427#if defined(CONFIG_DEBUG_FS)
428static int radeon_debugfs_fence_info(struct seq_file *m, void *data)
429{
430 struct drm_info_node *node = (struct drm_info_node *)m->private;
431 struct drm_device *dev = node->minor->dev;
432 struct radeon_device *rdev = dev->dev_private;
433 struct radeon_fence *fence;
Alex Deucher74652802011-08-25 13:39:48 -0400434 int i;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200435
Alex Deucher74652802011-08-25 13:39:48 -0400436 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
437 if (!rdev->fence_drv[i].initialized)
438 continue;
439
440 seq_printf(m, "--- ring %d ---\n", i);
441 seq_printf(m, "Last signaled fence 0x%08X\n",
442 radeon_fence_read(rdev, i));
443 if (!list_empty(&rdev->fence_drv[i].emitted)) {
444 fence = list_entry(rdev->fence_drv[i].emitted.prev,
445 struct radeon_fence, list);
446 seq_printf(m, "Last emitted fence %p with 0x%08X\n",
447 fence, fence->seq);
448 }
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200449 }
450 return 0;
451}
452
453static struct drm_info_list radeon_debugfs_fence_list[] = {
454 {"radeon_fence_info", &radeon_debugfs_fence_info, 0, NULL},
455};
456#endif
457
458int radeon_debugfs_fence_init(struct radeon_device *rdev)
459{
460#if defined(CONFIG_DEBUG_FS)
461 return radeon_debugfs_add_files(rdev, radeon_debugfs_fence_list, 1);
462#else
463 return 0;
464#endif
465}