blob: b1f9a81b5d1dabcf37da7de738817a6d8db5f84a [file] [log] [blame]
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001/*
2 * Copyright 2009 Jerome Glisse.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 */
26/*
27 * Authors:
28 * Jerome Glisse <glisse@freedesktop.org>
29 * Dave Airlie
30 */
31#include <linux/seq_file.h>
32#include <asm/atomic.h>
33#include <linux/wait.h>
34#include <linux/list.h>
35#include <linux/kref.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090036#include <linux/slab.h>
Jerome Glisse771fe6b2009-06-05 14:42:42 +020037#include "drmP.h"
38#include "drm.h"
39#include "radeon_reg.h"
40#include "radeon.h"
41
42int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
43{
44 unsigned long irq_flags;
45
46 write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
47 if (fence->emited) {
48 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
49 return 0;
50 }
51 fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
52 if (!rdev->cp.ready) {
53 /* FIXME: cp is not running assume everythings is done right
54 * away
55 */
56 WREG32(rdev->fence_drv.scratch_reg, fence->seq);
Jerome Glisse3ce0a232009-09-08 10:10:24 +100057 } else
Jerome Glisse771fe6b2009-06-05 14:42:42 +020058 radeon_fence_ring_emit(rdev, fence);
Jerome Glisse3ce0a232009-09-08 10:10:24 +100059
Jerome Glisse771fe6b2009-06-05 14:42:42 +020060 fence->emited = true;
Jerome Glisse771fe6b2009-06-05 14:42:42 +020061 list_del(&fence->list);
62 list_add_tail(&fence->list, &rdev->fence_drv.emited);
63 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
64 return 0;
65}
66
67static bool radeon_fence_poll_locked(struct radeon_device *rdev)
68{
69 struct radeon_fence *fence;
70 struct list_head *i, *n;
71 uint32_t seq;
72 bool wake = false;
Jerome Glisse225758d2010-03-09 14:45:10 +000073 unsigned long cjiffies;
Jerome Glisse771fe6b2009-06-05 14:42:42 +020074
Jerome Glisse771fe6b2009-06-05 14:42:42 +020075 seq = RREG32(rdev->fence_drv.scratch_reg);
Jerome Glisse225758d2010-03-09 14:45:10 +000076 if (seq != rdev->fence_drv.last_seq) {
77 rdev->fence_drv.last_seq = seq;
78 rdev->fence_drv.last_jiffies = jiffies;
79 rdev->fence_drv.last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
80 } else {
81 cjiffies = jiffies;
82 if (time_after(cjiffies, rdev->fence_drv.last_jiffies)) {
83 cjiffies -= rdev->fence_drv.last_jiffies;
84 if (time_after(rdev->fence_drv.last_timeout, cjiffies)) {
85 /* update the timeout */
86 rdev->fence_drv.last_timeout -= cjiffies;
87 } else {
88 /* the 500ms timeout is elapsed we should test
89 * for GPU lockup
90 */
91 rdev->fence_drv.last_timeout = 1;
92 }
93 } else {
94 /* wrap around update last jiffies, we will just wait
95 * a little longer
96 */
97 rdev->fence_drv.last_jiffies = cjiffies;
98 }
99 return false;
100 }
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200101 n = NULL;
102 list_for_each(i, &rdev->fence_drv.emited) {
103 fence = list_entry(i, struct radeon_fence, list);
104 if (fence->seq == seq) {
105 n = i;
106 break;
107 }
108 }
109 /* all fence previous to this one are considered as signaled */
110 if (n) {
111 i = n;
112 do {
113 n = i->prev;
114 list_del(i);
115 list_add_tail(i, &rdev->fence_drv.signaled);
116 fence = list_entry(i, struct radeon_fence, list);
117 fence->signaled = true;
118 i = n;
119 } while (i != &rdev->fence_drv.emited);
120 wake = true;
121 }
122 return wake;
123}
124
125static void radeon_fence_destroy(struct kref *kref)
126{
127 unsigned long irq_flags;
128 struct radeon_fence *fence;
129
130 fence = container_of(kref, struct radeon_fence, kref);
131 write_lock_irqsave(&fence->rdev->fence_drv.lock, irq_flags);
132 list_del(&fence->list);
133 fence->emited = false;
134 write_unlock_irqrestore(&fence->rdev->fence_drv.lock, irq_flags);
135 kfree(fence);
136}
137
138int radeon_fence_create(struct radeon_device *rdev, struct radeon_fence **fence)
139{
140 unsigned long irq_flags;
141
142 *fence = kmalloc(sizeof(struct radeon_fence), GFP_KERNEL);
143 if ((*fence) == NULL) {
144 return -ENOMEM;
145 }
146 kref_init(&((*fence)->kref));
147 (*fence)->rdev = rdev;
148 (*fence)->emited = false;
149 (*fence)->signaled = false;
150 (*fence)->seq = 0;
151 INIT_LIST_HEAD(&(*fence)->list);
152
153 write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
154 list_add_tail(&(*fence)->list, &rdev->fence_drv.created);
155 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
156 return 0;
157}
158
159
160bool radeon_fence_signaled(struct radeon_fence *fence)
161{
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200162 unsigned long irq_flags;
163 bool signaled = false;
164
Darren Jenkins3655d542009-12-30 12:20:05 +1100165 if (!fence)
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200166 return true;
Darren Jenkins3655d542009-12-30 12:20:05 +1100167
168 if (fence->rdev->gpu_lockup)
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200169 return true;
Darren Jenkins3655d542009-12-30 12:20:05 +1100170
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200171 write_lock_irqsave(&fence->rdev->fence_drv.lock, irq_flags);
172 signaled = fence->signaled;
173 /* if we are shuting down report all fence as signaled */
174 if (fence->rdev->shutdown) {
175 signaled = true;
176 }
177 if (!fence->emited) {
178 WARN(1, "Querying an unemited fence : %p !\n", fence);
179 signaled = true;
180 }
181 if (!signaled) {
182 radeon_fence_poll_locked(fence->rdev);
183 signaled = fence->signaled;
184 }
185 write_unlock_irqrestore(&fence->rdev->fence_drv.lock, irq_flags);
186 return signaled;
187}
188
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000189int radeon_fence_wait(struct radeon_fence *fence, bool intr)
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200190{
191 struct radeon_device *rdev;
Jerome Glisse225758d2010-03-09 14:45:10 +0000192 unsigned long irq_flags, timeout;
193 u32 seq;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200194 int r;
195
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200196 if (fence == NULL) {
197 WARN(1, "Querying an invalid fence : %p !\n", fence);
198 return 0;
199 }
200 rdev = fence->rdev;
201 if (radeon_fence_signaled(fence)) {
202 return 0;
203 }
Jerome Glisse225758d2010-03-09 14:45:10 +0000204 timeout = rdev->fence_drv.last_timeout;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200205retry:
Jerome Glisse225758d2010-03-09 14:45:10 +0000206 /* save current sequence used to check for GPU lockup */
207 seq = rdev->fence_drv.last_seq;
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000208 if (intr) {
Dave Airlie1614f8b2009-12-01 16:04:56 +1000209 radeon_irq_kms_sw_irq_get(rdev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200210 r = wait_event_interruptible_timeout(rdev->fence_drv.queue,
211 radeon_fence_signaled(fence), timeout);
Dave Airlie1614f8b2009-12-01 16:04:56 +1000212 radeon_irq_kms_sw_irq_put(rdev);
Jerome Glisse90aca4d2010-03-09 14:45:12 +0000213 if (unlikely(r < 0)) {
Thomas Hellstrom5cc6fba2009-12-07 18:36:19 +0100214 return r;
Jerome Glisse90aca4d2010-03-09 14:45:12 +0000215 }
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200216 } else {
Dave Airlie1614f8b2009-12-01 16:04:56 +1000217 radeon_irq_kms_sw_irq_get(rdev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200218 r = wait_event_timeout(rdev->fence_drv.queue,
219 radeon_fence_signaled(fence), timeout);
Dave Airlie1614f8b2009-12-01 16:04:56 +1000220 radeon_irq_kms_sw_irq_put(rdev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200221 }
222 if (unlikely(!radeon_fence_signaled(fence))) {
Jerome Glisse225758d2010-03-09 14:45:10 +0000223 /* we were interrupted for some reason and fence isn't
224 * isn't signaled yet, resume wait
225 */
226 if (r) {
227 timeout = r;
228 goto retry;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200229 }
Jerome Glisse225758d2010-03-09 14:45:10 +0000230 /* don't protect read access to rdev->fence_drv.last_seq
231 * if we experiencing a lockup the value doesn't change
232 */
233 if (seq == rdev->fence_drv.last_seq && radeon_gpu_is_lockup(rdev)) {
234 /* good news we believe it's a lockup */
Jerome Glisse90aca4d2010-03-09 14:45:12 +0000235 WARN(1, "GPU lockup (waiting for 0x%08X last fence id 0x%08X)\n", fence->seq, seq);
Jerome Glisse225758d2010-03-09 14:45:10 +0000236 /* FIXME: what should we do ? marking everyone
237 * as signaled for now
238 */
Jerome Glisse90aca4d2010-03-09 14:45:12 +0000239 rdev->gpu_lockup = true;
Jerome Glisse90aca4d2010-03-09 14:45:12 +0000240 r = radeon_gpu_reset(rdev);
241 if (r)
242 return r;
Jerome Glissea1e9ada2010-04-26 22:23:42 +0200243 WREG32(rdev->fence_drv.scratch_reg, fence->seq);
Jerome Glisse90aca4d2010-03-09 14:45:12 +0000244 rdev->gpu_lockup = false;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200245 }
Jerome Glisse225758d2010-03-09 14:45:10 +0000246 timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
247 write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
248 rdev->fence_drv.last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
249 rdev->fence_drv.last_jiffies = jiffies;
250 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200251 goto retry;
252 }
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200253 return 0;
254}
255
256int radeon_fence_wait_next(struct radeon_device *rdev)
257{
258 unsigned long irq_flags;
259 struct radeon_fence *fence;
260 int r;
261
262 if (rdev->gpu_lockup) {
263 return 0;
264 }
265 write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
266 if (list_empty(&rdev->fence_drv.emited)) {
267 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
268 return 0;
269 }
270 fence = list_entry(rdev->fence_drv.emited.next,
271 struct radeon_fence, list);
272 radeon_fence_ref(fence);
273 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
274 r = radeon_fence_wait(fence, false);
275 radeon_fence_unref(&fence);
276 return r;
277}
278
279int radeon_fence_wait_last(struct radeon_device *rdev)
280{
281 unsigned long irq_flags;
282 struct radeon_fence *fence;
283 int r;
284
285 if (rdev->gpu_lockup) {
286 return 0;
287 }
288 write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
289 if (list_empty(&rdev->fence_drv.emited)) {
290 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
291 return 0;
292 }
293 fence = list_entry(rdev->fence_drv.emited.prev,
294 struct radeon_fence, list);
295 radeon_fence_ref(fence);
296 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
297 r = radeon_fence_wait(fence, false);
298 radeon_fence_unref(&fence);
299 return r;
300}
301
302struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence)
303{
304 kref_get(&fence->kref);
305 return fence;
306}
307
308void radeon_fence_unref(struct radeon_fence **fence)
309{
310 struct radeon_fence *tmp = *fence;
311
312 *fence = NULL;
313 if (tmp) {
314 kref_put(&tmp->kref, &radeon_fence_destroy);
315 }
316}
317
318void radeon_fence_process(struct radeon_device *rdev)
319{
320 unsigned long irq_flags;
321 bool wake;
322
323 write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
324 wake = radeon_fence_poll_locked(rdev);
325 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
326 if (wake) {
327 wake_up_all(&rdev->fence_drv.queue);
328 }
329}
330
331int radeon_fence_driver_init(struct radeon_device *rdev)
332{
333 unsigned long irq_flags;
334 int r;
335
336 write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
337 r = radeon_scratch_get(rdev, &rdev->fence_drv.scratch_reg);
338 if (r) {
Jerome Glisse0a0c7592009-12-11 20:36:19 +0100339 dev_err(rdev->dev, "fence failed to get scratch register\n");
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200340 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
341 return r;
342 }
343 WREG32(rdev->fence_drv.scratch_reg, 0);
344 atomic_set(&rdev->fence_drv.seq, 0);
345 INIT_LIST_HEAD(&rdev->fence_drv.created);
346 INIT_LIST_HEAD(&rdev->fence_drv.emited);
347 INIT_LIST_HEAD(&rdev->fence_drv.signaled);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200348 init_waitqueue_head(&rdev->fence_drv.queue);
Jerome Glisse0a0c7592009-12-11 20:36:19 +0100349 rdev->fence_drv.initialized = true;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200350 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
351 if (radeon_debugfs_fence_init(rdev)) {
Jerome Glisse0a0c7592009-12-11 20:36:19 +0100352 dev_err(rdev->dev, "fence debugfs file creation failed\n");
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200353 }
354 return 0;
355}
356
357void radeon_fence_driver_fini(struct radeon_device *rdev)
358{
359 unsigned long irq_flags;
360
Jerome Glisse0a0c7592009-12-11 20:36:19 +0100361 if (!rdev->fence_drv.initialized)
362 return;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200363 wake_up_all(&rdev->fence_drv.queue);
364 write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
365 radeon_scratch_free(rdev, rdev->fence_drv.scratch_reg);
366 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
Jerome Glisse0a0c7592009-12-11 20:36:19 +0100367 rdev->fence_drv.initialized = false;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200368}
369
370
371/*
372 * Fence debugfs
373 */
374#if defined(CONFIG_DEBUG_FS)
375static int radeon_debugfs_fence_info(struct seq_file *m, void *data)
376{
377 struct drm_info_node *node = (struct drm_info_node *)m->private;
378 struct drm_device *dev = node->minor->dev;
379 struct radeon_device *rdev = dev->dev_private;
380 struct radeon_fence *fence;
381
382 seq_printf(m, "Last signaled fence 0x%08X\n",
383 RREG32(rdev->fence_drv.scratch_reg));
384 if (!list_empty(&rdev->fence_drv.emited)) {
385 fence = list_entry(rdev->fence_drv.emited.prev,
386 struct radeon_fence, list);
387 seq_printf(m, "Last emited fence %p with 0x%08X\n",
388 fence, fence->seq);
389 }
390 return 0;
391}
392
393static struct drm_info_list radeon_debugfs_fence_list[] = {
394 {"radeon_fence_info", &radeon_debugfs_fence_info, 0, NULL},
395};
396#endif
397
398int radeon_debugfs_fence_init(struct radeon_device *rdev)
399{
400#if defined(CONFIG_DEBUG_FS)
401 return radeon_debugfs_add_files(rdev, radeon_debugfs_fence_list, 1);
402#else
403 return 0;
404#endif
405}