blob: 60e6d668f6b4fcb294e2bc0fbc24362f5c723632 [file] [log] [blame]
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001/*
2 * Copyright 2009 Jerome Glisse.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 */
26/*
27 * Authors:
28 * Jerome Glisse <glisse@freedesktop.org>
29 * Dave Airlie
30 */
31#include <linux/seq_file.h>
32#include <linux/atomic.h>
33#include <linux/wait.h>
34#include <linux/kref.h>
35#include <linux/slab.h>
36#include <linux/firmware.h>
37#include <drm/drmP.h>
38#include "amdgpu.h"
39#include "amdgpu_trace.h"
40
41/*
42 * Fences
43 * Fences mark an event in the GPUs pipeline and are used
44 * for GPU/CPU synchronization. When the fence is written,
45 * it is expected that all buffers associated with that fence
46 * are no longer in use by the associated ring on the GPU and
47 * that the the relevant GPU caches have been flushed.
48 */
49
50/**
51 * amdgpu_fence_write - write a fence value
52 *
53 * @ring: ring the fence is associated with
54 * @seq: sequence number to write
55 *
56 * Writes a fence value to memory (all asics).
57 */
58static void amdgpu_fence_write(struct amdgpu_ring *ring, u32 seq)
59{
60 struct amdgpu_fence_driver *drv = &ring->fence_drv;
61
62 if (drv->cpu_addr)
63 *drv->cpu_addr = cpu_to_le32(seq);
64}
65
66/**
67 * amdgpu_fence_read - read a fence value
68 *
69 * @ring: ring the fence is associated with
70 *
71 * Reads a fence value from memory (all asics).
72 * Returns the value of the fence read from memory.
73 */
74static u32 amdgpu_fence_read(struct amdgpu_ring *ring)
75{
76 struct amdgpu_fence_driver *drv = &ring->fence_drv;
77 u32 seq = 0;
78
79 if (drv->cpu_addr)
80 seq = le32_to_cpu(*drv->cpu_addr);
81 else
82 seq = lower_32_bits(atomic64_read(&drv->last_seq));
83
84 return seq;
85}
86
87/**
88 * amdgpu_fence_schedule_check - schedule lockup check
89 *
90 * @ring: pointer to struct amdgpu_ring
91 *
92 * Queues a delayed work item to check for lockups.
93 */
94static void amdgpu_fence_schedule_check(struct amdgpu_ring *ring)
95{
96 /*
97 * Do not reset the timer here with mod_delayed_work,
98 * this can livelock in an interaction with TTM delayed destroy.
99 */
100 queue_delayed_work(system_power_efficient_wq,
101 &ring->fence_drv.lockup_work,
102 AMDGPU_FENCE_JIFFIES_TIMEOUT);
103}
104
105/**
106 * amdgpu_fence_emit - emit a fence on the requested ring
107 *
108 * @ring: ring the fence is associated with
109 * @owner: creator of the fence
110 * @fence: amdgpu fence object
111 *
112 * Emits a fence command on the requested ring (all asics).
113 * Returns 0 on success, -ENOMEM on failure.
114 */
115int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner,
116 struct amdgpu_fence **fence)
117{
118 struct amdgpu_device *adev = ring->adev;
119
120 /* we are protected by the ring emission mutex */
121 *fence = kmalloc(sizeof(struct amdgpu_fence), GFP_KERNEL);
122 if ((*fence) == NULL) {
123 return -ENOMEM;
124 }
125 (*fence)->seq = ++ring->fence_drv.sync_seq[ring->idx];
126 (*fence)->ring = ring;
127 (*fence)->owner = owner;
128 fence_init(&(*fence)->base, &amdgpu_fence_ops,
monk.liu7f06c232015-07-30 18:28:12 +0800129 &ring->fence_drv.fence_queue.lock,
130 adev->fence_context + ring->idx,
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400131 (*fence)->seq);
Chunming Zhou890ee232015-06-01 14:35:03 +0800132 amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
133 (*fence)->seq,
134 AMDGPU_FENCE_FLAG_INT);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400135 trace_amdgpu_fence_emit(ring->adev->ddev, ring->idx, (*fence)->seq);
136 return 0;
137}
138
139/**
140 * amdgpu_fence_check_signaled - callback from fence_queue
141 *
142 * this function is called with fence_queue lock held, which is also used
143 * for the fence locking itself, so unlocked variants are used for
144 * fence_signal, and remove_wait_queue.
145 */
146static int amdgpu_fence_check_signaled(wait_queue_t *wait, unsigned mode, int flags, void *key)
147{
148 struct amdgpu_fence *fence;
149 struct amdgpu_device *adev;
150 u64 seq;
151 int ret;
152
153 fence = container_of(wait, struct amdgpu_fence, fence_wake);
154 adev = fence->ring->adev;
155
156 /*
157 * We cannot use amdgpu_fence_process here because we're already
158 * in the waitqueue, in a call from wake_up_all.
159 */
160 seq = atomic64_read(&fence->ring->fence_drv.last_seq);
161 if (seq >= fence->seq) {
162 ret = fence_signal_locked(&fence->base);
163 if (!ret)
164 FENCE_TRACE(&fence->base, "signaled from irq context\n");
165 else
166 FENCE_TRACE(&fence->base, "was already signaled\n");
167
monk.liu7f06c232015-07-30 18:28:12 +0800168 __remove_wait_queue(&fence->ring->fence_drv.fence_queue, &fence->fence_wake);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400169 fence_put(&fence->base);
170 } else
171 FENCE_TRACE(&fence->base, "pending\n");
172 return 0;
173}
174
175/**
176 * amdgpu_fence_activity - check for fence activity
177 *
178 * @ring: pointer to struct amdgpu_ring
179 *
180 * Checks the current fence value and calculates the last
181 * signalled fence value. Returns true if activity occured
182 * on the ring, and the fence_queue should be waken up.
183 */
184static bool amdgpu_fence_activity(struct amdgpu_ring *ring)
185{
186 uint64_t seq, last_seq, last_emitted;
187 unsigned count_loop = 0;
188 bool wake = false;
189
190 /* Note there is a scenario here for an infinite loop but it's
191 * very unlikely to happen. For it to happen, the current polling
192 * process need to be interrupted by another process and another
193 * process needs to update the last_seq btw the atomic read and
194 * xchg of the current process.
195 *
196 * More over for this to go in infinite loop there need to be
Jammy Zhou86c2b792015-05-13 22:52:42 +0800197 * continuously new fence signaled ie amdgpu_fence_read needs
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400198 * to return a different value each time for both the currently
199 * polling process and the other process that xchg the last_seq
200 * btw atomic read and xchg of the current process. And the
201 * value the other process set as last seq must be higher than
202 * the seq value we just read. Which means that current process
Jammy Zhou86c2b792015-05-13 22:52:42 +0800203 * need to be interrupted after amdgpu_fence_read and before
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400204 * atomic xchg.
205 *
206 * To be even more safe we count the number of time we loop and
207 * we bail after 10 loop just accepting the fact that we might
208 * have temporarly set the last_seq not to the true real last
209 * seq but to an older one.
210 */
211 last_seq = atomic64_read(&ring->fence_drv.last_seq);
212 do {
213 last_emitted = ring->fence_drv.sync_seq[ring->idx];
214 seq = amdgpu_fence_read(ring);
215 seq |= last_seq & 0xffffffff00000000LL;
216 if (seq < last_seq) {
217 seq &= 0xffffffff;
218 seq |= last_emitted & 0xffffffff00000000LL;
219 }
220
221 if (seq <= last_seq || seq > last_emitted) {
222 break;
223 }
224 /* If we loop over we don't want to return without
225 * checking if a fence is signaled as it means that the
226 * seq we just read is different from the previous on.
227 */
228 wake = true;
229 last_seq = seq;
230 if ((count_loop++) > 10) {
231 /* We looped over too many time leave with the
232 * fact that we might have set an older fence
233 * seq then the current real last seq as signaled
234 * by the hw.
235 */
236 break;
237 }
238 } while (atomic64_xchg(&ring->fence_drv.last_seq, seq) > seq);
239
240 if (seq < last_emitted)
241 amdgpu_fence_schedule_check(ring);
242
243 return wake;
244}
245
246/**
247 * amdgpu_fence_check_lockup - check for hardware lockup
248 *
249 * @work: delayed work item
250 *
251 * Checks for fence activity and if there is none probe
252 * the hardware if a lockup occured.
253 */
254static void amdgpu_fence_check_lockup(struct work_struct *work)
255{
256 struct amdgpu_fence_driver *fence_drv;
257 struct amdgpu_ring *ring;
258
259 fence_drv = container_of(work, struct amdgpu_fence_driver,
260 lockup_work.work);
261 ring = fence_drv->ring;
262
263 if (!down_read_trylock(&ring->adev->exclusive_lock)) {
264 /* just reschedule the check if a reset is going on */
265 amdgpu_fence_schedule_check(ring);
266 return;
267 }
268
monk.liu7f06c232015-07-30 18:28:12 +0800269 if (amdgpu_fence_activity(ring)) {
270 wake_up_all(&ring->fence_drv.fence_queue);
271 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400272 else if (amdgpu_ring_is_lockup(ring)) {
273 /* good news we believe it's a lockup */
274 dev_warn(ring->adev->dev, "GPU lockup (current fence id "
275 "0x%016llx last fence id 0x%016llx on ring %d)\n",
276 (uint64_t)atomic64_read(&fence_drv->last_seq),
277 fence_drv->sync_seq[ring->idx], ring->idx);
278
279 /* remember that we need an reset */
280 ring->adev->needs_reset = true;
monk.liu7f06c232015-07-30 18:28:12 +0800281 wake_up_all(&ring->fence_drv.fence_queue);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400282 }
283 up_read(&ring->adev->exclusive_lock);
284}
285
286/**
287 * amdgpu_fence_process - process a fence
288 *
289 * @adev: amdgpu_device pointer
290 * @ring: ring index the fence is associated with
291 *
292 * Checks the current fence value and wakes the fence queue
293 * if the sequence number has increased (all asics).
294 */
295void amdgpu_fence_process(struct amdgpu_ring *ring)
296{
297 uint64_t seq, last_seq, last_emitted;
298 unsigned count_loop = 0;
299 bool wake = false;
Chunming Zhou176e1ab2015-07-24 10:49:47 +0800300 unsigned long irqflags;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400301
302 /* Note there is a scenario here for an infinite loop but it's
303 * very unlikely to happen. For it to happen, the current polling
304 * process need to be interrupted by another process and another
305 * process needs to update the last_seq btw the atomic read and
306 * xchg of the current process.
307 *
308 * More over for this to go in infinite loop there need to be
309 * continuously new fence signaled ie amdgpu_fence_read needs
310 * to return a different value each time for both the currently
311 * polling process and the other process that xchg the last_seq
312 * btw atomic read and xchg of the current process. And the
313 * value the other process set as last seq must be higher than
314 * the seq value we just read. Which means that current process
315 * need to be interrupted after amdgpu_fence_read and before
316 * atomic xchg.
317 *
318 * To be even more safe we count the number of time we loop and
319 * we bail after 10 loop just accepting the fact that we might
320 * have temporarly set the last_seq not to the true real last
321 * seq but to an older one.
322 */
Chunming Zhou176e1ab2015-07-24 10:49:47 +0800323 spin_lock_irqsave(&ring->fence_lock, irqflags);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400324 last_seq = atomic64_read(&ring->fence_drv.last_seq);
325 do {
326 last_emitted = ring->fence_drv.sync_seq[ring->idx];
327 seq = amdgpu_fence_read(ring);
328 seq |= last_seq & 0xffffffff00000000LL;
329 if (seq < last_seq) {
330 seq &= 0xffffffff;
331 seq |= last_emitted & 0xffffffff00000000LL;
332 }
333
334 if (seq <= last_seq || seq > last_emitted) {
335 break;
336 }
337 /* If we loop over we don't want to return without
338 * checking if a fence is signaled as it means that the
339 * seq we just read is different from the previous on.
340 */
341 wake = true;
342 last_seq = seq;
343 if ((count_loop++) > 10) {
344 /* We looped over too many time leave with the
345 * fact that we might have set an older fence
346 * seq then the current real last seq as signaled
347 * by the hw.
348 */
349 break;
350 }
351 } while (atomic64_xchg(&ring->fence_drv.last_seq, seq) > seq);
352
Chunming Zhoue0d8f3c2015-07-21 17:43:41 +0800353 if (wake) {
354 if (amdgpu_enable_scheduler) {
355 uint64_t handled_seq =
356 amd_sched_get_handled_seq(ring->scheduler);
357 uint64_t latest_seq =
358 atomic64_read(&ring->fence_drv.last_seq);
359 if (handled_seq == latest_seq) {
360 DRM_ERROR("ring %d, EOP without seq update (lastest_seq=%llu)\n",
361 ring->idx, latest_seq);
Chunming Zhou176e1ab2015-07-24 10:49:47 +0800362 goto exit;
Chunming Zhoue0d8f3c2015-07-21 17:43:41 +0800363 }
364 do {
365 amd_sched_isr(ring->scheduler);
366 } while (amd_sched_get_handled_seq(ring->scheduler) < latest_seq);
367 }
368
monk.liu7f06c232015-07-30 18:28:12 +0800369 wake_up_all(&ring->fence_drv.fence_queue);
Chunming Zhoue0d8f3c2015-07-21 17:43:41 +0800370 }
Chunming Zhou176e1ab2015-07-24 10:49:47 +0800371exit:
372 spin_unlock_irqrestore(&ring->fence_lock, irqflags);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400373}
374
375/**
376 * amdgpu_fence_seq_signaled - check if a fence sequence number has signaled
377 *
378 * @ring: ring the fence is associated with
379 * @seq: sequence number
380 *
381 * Check if the last signaled fence sequnce number is >= the requested
382 * sequence number (all asics).
383 * Returns true if the fence has signaled (current fence value
384 * is >= requested value) or false if it has not (current fence
385 * value is < the requested value. Helper function for
386 * amdgpu_fence_signaled().
387 */
388static bool amdgpu_fence_seq_signaled(struct amdgpu_ring *ring, u64 seq)
389{
390 if (atomic64_read(&ring->fence_drv.last_seq) >= seq)
391 return true;
392
393 /* poll new last sequence at least once */
394 amdgpu_fence_process(ring);
395 if (atomic64_read(&ring->fence_drv.last_seq) >= seq)
396 return true;
397
398 return false;
399}
400
401static bool amdgpu_fence_is_signaled(struct fence *f)
402{
403 struct amdgpu_fence *fence = to_amdgpu_fence(f);
404 struct amdgpu_ring *ring = fence->ring;
405 struct amdgpu_device *adev = ring->adev;
406
407 if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq)
408 return true;
409
410 if (down_read_trylock(&adev->exclusive_lock)) {
411 amdgpu_fence_process(ring);
412 up_read(&adev->exclusive_lock);
413
414 if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq)
415 return true;
416 }
417 return false;
418}
419
420/**
421 * amdgpu_fence_enable_signaling - enable signalling on fence
422 * @fence: fence
423 *
424 * This function is called with fence_queue lock held, and adds a callback
425 * to fence_queue that checks if this fence is signaled, and if so it
426 * signals the fence and removes itself.
427 */
428static bool amdgpu_fence_enable_signaling(struct fence *f)
429{
430 struct amdgpu_fence *fence = to_amdgpu_fence(f);
431 struct amdgpu_ring *ring = fence->ring;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400432
433 if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq)
434 return false;
435
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400436 fence->fence_wake.flags = 0;
437 fence->fence_wake.private = NULL;
438 fence->fence_wake.func = amdgpu_fence_check_signaled;
monk.liu7f06c232015-07-30 18:28:12 +0800439 __add_wait_queue(&ring->fence_drv.fence_queue, &fence->fence_wake);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400440 fence_get(f);
441 FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx);
442 return true;
443}
444
445/**
446 * amdgpu_fence_signaled - check if a fence has signaled
447 *
448 * @fence: amdgpu fence object
449 *
450 * Check if the requested fence has signaled (all asics).
451 * Returns true if the fence has signaled or false if it has not.
452 */
453bool amdgpu_fence_signaled(struct amdgpu_fence *fence)
454{
455 if (!fence)
456 return true;
457
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400458 if (amdgpu_fence_seq_signaled(fence->ring, fence->seq)) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400459 if (!fence_signal(&fence->base))
460 FENCE_TRACE(&fence->base, "signaled from amdgpu_fence_signaled\n");
461 return true;
462 }
463
464 return false;
465}
466
monk.liu7f06c232015-07-30 18:28:12 +0800467/*
468 * amdgpu_ring_wait_seq_timeout - wait for seq of the specific ring to signal
469 * @ring: ring to wait on for the seq number
470 * @seq: seq number wait for
471 * @intr: if interruptible
472 * @timeout: jiffies before time out
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400473 *
monk.liu7f06c232015-07-30 18:28:12 +0800474 * return value:
475 * 0: time out but seq not signaled, and gpu not hang
476 * X (X > 0): seq signaled and X means how many jiffies remains before time out
477 * -EDEADL: GPU hang before time out
478 * -ESYSRESTART: interrupted before seq signaled
479 * -EINVAL: some paramter is not valid
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400480 */
monk.liu7f06c232015-07-30 18:28:12 +0800481static long amdgpu_fence_ring_wait_seq_timeout(struct amdgpu_ring *ring, uint64_t seq,
482 bool intr, long timeout)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400483{
monk.liu7f06c232015-07-30 18:28:12 +0800484 struct amdgpu_device *adev = ring->adev;
485 long r = 0;
486 bool signaled = false;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400487
monk.liu7f06c232015-07-30 18:28:12 +0800488 BUG_ON(!ring);
489 if (seq > ring->fence_drv.sync_seq[ring->idx])
490 return -EINVAL;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400491
monk.liu7f06c232015-07-30 18:28:12 +0800492 if (atomic64_read(&ring->fence_drv.last_seq) >= seq)
493 return timeout;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400494
monk.liu7f06c232015-07-30 18:28:12 +0800495 while (1) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400496 if (intr) {
monk.liu7f06c232015-07-30 18:28:12 +0800497 r = wait_event_interruptible_timeout(ring->fence_drv.fence_queue, (
498 (signaled = amdgpu_fence_seq_signaled(ring, seq))
499 || adev->needs_reset), AMDGPU_FENCE_JIFFIES_TIMEOUT);
500
501 if (r == -ERESTARTSYS) /* interrupted */
502 return r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400503 } else {
monk.liu7f06c232015-07-30 18:28:12 +0800504 r = wait_event_timeout(ring->fence_drv.fence_queue, (
505 (signaled = amdgpu_fence_seq_signaled(ring, seq))
506 || adev->needs_reset), AMDGPU_FENCE_JIFFIES_TIMEOUT);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400507 }
508
monk.liu7f06c232015-07-30 18:28:12 +0800509 if (signaled) {
510 /* seq signaled */
511 if (timeout == MAX_SCHEDULE_TIMEOUT)
512 return timeout;
513 return (timeout - AMDGPU_FENCE_JIFFIES_TIMEOUT - r);
514 }
515 else if (adev->needs_reset) {
516 return -EDEADLK;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400517 }
518
monk.liu7f06c232015-07-30 18:28:12 +0800519 /* check if it's a lockup */
520 if (amdgpu_ring_is_lockup(ring)) {
521 uint64_t last_seq = atomic64_read(&ring->fence_drv.last_seq);
522 /* ring lookup */
523 dev_warn(adev->dev, "GPU lockup (waiting for "
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400524 "0x%016llx last fence id 0x%016llx on"
525 " ring %d)\n",
monk.liu7f06c232015-07-30 18:28:12 +0800526 seq, last_seq, ring->idx);
527 wake_up_all(&ring->fence_drv.fence_queue);
528 return -EDEADLK;
529 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400530
monk.liu7f06c232015-07-30 18:28:12 +0800531 if (timeout < MAX_SCHEDULE_TIMEOUT) {
532 timeout -= AMDGPU_FENCE_JIFFIES_TIMEOUT;
533 if (timeout < 1)
534 return 0;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400535 }
536 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400537}
538
monk.liu7f06c232015-07-30 18:28:12 +0800539
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400540/**
541 * amdgpu_fence_wait - wait for a fence to signal
542 *
543 * @fence: amdgpu fence object
544 * @intr: use interruptable sleep
545 *
546 * Wait for the requested fence to signal (all asics).
547 * @intr selects whether to use interruptable (true) or non-interruptable
548 * (false) sleep when waiting for the fence.
549 * Returns 0 if the fence has passed, error for all other cases.
550 */
551int amdgpu_fence_wait(struct amdgpu_fence *fence, bool intr)
552{
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400553 long r;
554
monk.liu2e536082015-07-30 14:56:18 +0800555 r = fence_wait_timeout(&fence->base, intr, MAX_SCHEDULE_TIMEOUT);
556 if (r < 0)
557 return r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400558 return 0;
559}
560
561/**
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400562 * amdgpu_fence_wait_next - wait for the next fence to signal
563 *
564 * @adev: amdgpu device pointer
565 * @ring: ring index the fence is associated with
566 *
567 * Wait for the next fence on the requested ring to signal (all asics).
568 * Returns 0 if the next fence has passed, error for all other cases.
569 * Caller must hold ring lock.
570 */
571int amdgpu_fence_wait_next(struct amdgpu_ring *ring)
572{
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400573 long r;
574
monk.liu7f06c232015-07-30 18:28:12 +0800575 uint64_t seq = atomic64_read(&ring->fence_drv.last_seq) + 1ULL;
576 if (seq >= ring->fence_drv.sync_seq[ring->idx])
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400577 return -ENOENT;
monk.liu7f06c232015-07-30 18:28:12 +0800578 r = amdgpu_fence_ring_wait_seq_timeout(ring, seq, false, MAX_SCHEDULE_TIMEOUT);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400579 if (r < 0)
580 return r;
monk.liu7f06c232015-07-30 18:28:12 +0800581
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400582 return 0;
583}
584
585/**
586 * amdgpu_fence_wait_empty - wait for all fences to signal
587 *
588 * @adev: amdgpu device pointer
589 * @ring: ring index the fence is associated with
590 *
591 * Wait for all fences on the requested ring to signal (all asics).
592 * Returns 0 if the fences have passed, error for all other cases.
593 * Caller must hold ring lock.
594 */
595int amdgpu_fence_wait_empty(struct amdgpu_ring *ring)
596{
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400597 long r;
598
monk.liu7f06c232015-07-30 18:28:12 +0800599 uint64_t seq = ring->fence_drv.sync_seq[ring->idx];
600 if (!seq)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400601 return 0;
602
monk.liu7f06c232015-07-30 18:28:12 +0800603 r = amdgpu_fence_ring_wait_seq_timeout(ring, seq, false, MAX_SCHEDULE_TIMEOUT);
604
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400605 if (r < 0) {
606 if (r == -EDEADLK)
607 return -EDEADLK;
608
monk.liu7f06c232015-07-30 18:28:12 +0800609 dev_err(ring->adev->dev, "error waiting for ring[%d] to become idle (%ld)\n",
610 ring->idx, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400611 }
612 return 0;
613}
614
615/**
616 * amdgpu_fence_ref - take a ref on a fence
617 *
618 * @fence: amdgpu fence object
619 *
620 * Take a reference on a fence (all asics).
621 * Returns the fence.
622 */
623struct amdgpu_fence *amdgpu_fence_ref(struct amdgpu_fence *fence)
624{
625 fence_get(&fence->base);
626 return fence;
627}
628
629/**
630 * amdgpu_fence_unref - remove a ref on a fence
631 *
632 * @fence: amdgpu fence object
633 *
634 * Remove a reference on a fence (all asics).
635 */
636void amdgpu_fence_unref(struct amdgpu_fence **fence)
637{
638 struct amdgpu_fence *tmp = *fence;
639
640 *fence = NULL;
641 if (tmp)
642 fence_put(&tmp->base);
643}
644
645/**
646 * amdgpu_fence_count_emitted - get the count of emitted fences
647 *
648 * @ring: ring the fence is associated with
649 *
650 * Get the number of fences emitted on the requested ring (all asics).
651 * Returns the number of emitted fences on the ring. Used by the
652 * dynpm code to ring track activity.
653 */
654unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring)
655{
656 uint64_t emitted;
657
658 /* We are not protected by ring lock when reading the last sequence
659 * but it's ok to report slightly wrong fence count here.
660 */
661 amdgpu_fence_process(ring);
662 emitted = ring->fence_drv.sync_seq[ring->idx]
663 - atomic64_read(&ring->fence_drv.last_seq);
664 /* to avoid 32bits warp around */
665 if (emitted > 0x10000000)
666 emitted = 0x10000000;
667
668 return (unsigned)emitted;
669}
670
671/**
672 * amdgpu_fence_need_sync - do we need a semaphore
673 *
674 * @fence: amdgpu fence object
675 * @dst_ring: which ring to check against
676 *
677 * Check if the fence needs to be synced against another ring
678 * (all asics). If so, we need to emit a semaphore.
679 * Returns true if we need to sync with another ring, false if
680 * not.
681 */
682bool amdgpu_fence_need_sync(struct amdgpu_fence *fence,
683 struct amdgpu_ring *dst_ring)
684{
685 struct amdgpu_fence_driver *fdrv;
686
687 if (!fence)
688 return false;
689
690 if (fence->ring == dst_ring)
691 return false;
692
693 /* we are protected by the ring mutex */
694 fdrv = &dst_ring->fence_drv;
695 if (fence->seq <= fdrv->sync_seq[fence->ring->idx])
696 return false;
697
698 return true;
699}
700
701/**
702 * amdgpu_fence_note_sync - record the sync point
703 *
704 * @fence: amdgpu fence object
705 * @dst_ring: which ring to check against
706 *
707 * Note the sequence number at which point the fence will
708 * be synced with the requested ring (all asics).
709 */
710void amdgpu_fence_note_sync(struct amdgpu_fence *fence,
711 struct amdgpu_ring *dst_ring)
712{
713 struct amdgpu_fence_driver *dst, *src;
714 unsigned i;
715
716 if (!fence)
717 return;
718
719 if (fence->ring == dst_ring)
720 return;
721
722 /* we are protected by the ring mutex */
723 src = &fence->ring->fence_drv;
724 dst = &dst_ring->fence_drv;
725 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
726 if (i == dst_ring->idx)
727 continue;
728
729 dst->sync_seq[i] = max(dst->sync_seq[i], src->sync_seq[i]);
730 }
731}
732
733/**
734 * amdgpu_fence_driver_start_ring - make the fence driver
735 * ready for use on the requested ring.
736 *
737 * @ring: ring to start the fence driver on
738 * @irq_src: interrupt source to use for this ring
739 * @irq_type: interrupt type to use for this ring
740 *
741 * Make the fence driver ready for processing (all asics).
742 * Not all asics have all rings, so each asic will only
743 * start the fence driver on the rings it has.
744 * Returns 0 for success, errors for failure.
745 */
746int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
747 struct amdgpu_irq_src *irq_src,
748 unsigned irq_type)
749{
750 struct amdgpu_device *adev = ring->adev;
751 uint64_t index;
752
753 if (ring != &adev->uvd.ring) {
754 ring->fence_drv.cpu_addr = &adev->wb.wb[ring->fence_offs];
755 ring->fence_drv.gpu_addr = adev->wb.gpu_addr + (ring->fence_offs * 4);
756 } else {
757 /* put fence directly behind firmware */
758 index = ALIGN(adev->uvd.fw->size, 8);
759 ring->fence_drv.cpu_addr = adev->uvd.cpu_addr + index;
760 ring->fence_drv.gpu_addr = adev->uvd.gpu_addr + index;
761 }
762 amdgpu_fence_write(ring, atomic64_read(&ring->fence_drv.last_seq));
Chunming Zhouc6a40792015-06-01 14:14:32 +0800763 amdgpu_irq_get(adev, irq_src, irq_type);
764
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400765 ring->fence_drv.irq_src = irq_src;
766 ring->fence_drv.irq_type = irq_type;
Chunming Zhouc6a40792015-06-01 14:14:32 +0800767 ring->fence_drv.initialized = true;
768
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400769 dev_info(adev->dev, "fence driver on ring %d use gpu addr 0x%016llx, "
770 "cpu addr 0x%p\n", ring->idx,
771 ring->fence_drv.gpu_addr, ring->fence_drv.cpu_addr);
772 return 0;
773}
774
775/**
776 * amdgpu_fence_driver_init_ring - init the fence driver
777 * for the requested ring.
778 *
779 * @ring: ring to init the fence driver on
780 *
781 * Init the fence driver for the requested ring (all asics).
782 * Helper function for amdgpu_fence_driver_init().
783 */
784void amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring)
785{
786 int i;
787
788 ring->fence_drv.cpu_addr = NULL;
789 ring->fence_drv.gpu_addr = 0;
790 for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
791 ring->fence_drv.sync_seq[i] = 0;
792
793 atomic64_set(&ring->fence_drv.last_seq, 0);
794 ring->fence_drv.initialized = false;
795
796 INIT_DELAYED_WORK(&ring->fence_drv.lockup_work,
797 amdgpu_fence_check_lockup);
798 ring->fence_drv.ring = ring;
Alex Deucherb80d8472015-08-16 22:55:02 -0400799
800 if (amdgpu_enable_scheduler) {
801 ring->scheduler = amd_sched_create((void *)ring->adev,
Chunming Zhouc1b69ed2015-07-21 13:45:14 +0800802 &amdgpu_sched_ops,
Jammy Zhou4afcb302015-07-30 16:44:05 +0800803 ring->idx, 5, 0,
804 amdgpu_sched_hw_submission);
Alex Deucherb80d8472015-08-16 22:55:02 -0400805 if (!ring->scheduler)
806 DRM_ERROR("Failed to create scheduler on ring %d.\n",
807 ring->idx);
808 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400809}
810
811/**
812 * amdgpu_fence_driver_init - init the fence driver
813 * for all possible rings.
814 *
815 * @adev: amdgpu device pointer
816 *
817 * Init the fence driver for all possible rings (all asics).
818 * Not all asics have all rings, so each asic will only
819 * start the fence driver on the rings it has using
820 * amdgpu_fence_driver_start_ring().
821 * Returns 0 for success.
822 */
823int amdgpu_fence_driver_init(struct amdgpu_device *adev)
824{
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400825 if (amdgpu_debugfs_fence_init(adev))
826 dev_err(adev->dev, "fence debugfs file creation failed\n");
827
828 return 0;
829}
830
831/**
832 * amdgpu_fence_driver_fini - tear down the fence driver
833 * for all possible rings.
834 *
835 * @adev: amdgpu device pointer
836 *
837 * Tear down the fence driver for all possible rings (all asics).
838 */
839void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
840{
841 int i, r;
842
843 mutex_lock(&adev->ring_lock);
844 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
845 struct amdgpu_ring *ring = adev->rings[i];
846 if (!ring || !ring->fence_drv.initialized)
847 continue;
848 r = amdgpu_fence_wait_empty(ring);
849 if (r) {
850 /* no need to trigger GPU reset as we are unloading */
851 amdgpu_fence_driver_force_completion(adev);
852 }
monk.liu7f06c232015-07-30 18:28:12 +0800853 wake_up_all(&ring->fence_drv.fence_queue);
Chunming Zhouc6a40792015-06-01 14:14:32 +0800854 amdgpu_irq_put(adev, ring->fence_drv.irq_src,
855 ring->fence_drv.irq_type);
Alex Deucherb80d8472015-08-16 22:55:02 -0400856 if (ring->scheduler)
857 amd_sched_destroy(ring->scheduler);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400858 ring->fence_drv.initialized = false;
859 }
860 mutex_unlock(&adev->ring_lock);
861}
862
863/**
Alex Deucher5ceb54c2015-08-05 12:41:48 -0400864 * amdgpu_fence_driver_suspend - suspend the fence driver
865 * for all possible rings.
866 *
867 * @adev: amdgpu device pointer
868 *
869 * Suspend the fence driver for all possible rings (all asics).
870 */
871void amdgpu_fence_driver_suspend(struct amdgpu_device *adev)
872{
873 int i, r;
874
875 mutex_lock(&adev->ring_lock);
876 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
877 struct amdgpu_ring *ring = adev->rings[i];
878 if (!ring || !ring->fence_drv.initialized)
879 continue;
880
881 /* wait for gpu to finish processing current batch */
882 r = amdgpu_fence_wait_empty(ring);
883 if (r) {
884 /* delay GPU reset to resume */
885 amdgpu_fence_driver_force_completion(adev);
886 }
887
888 /* disable the interrupt */
889 amdgpu_irq_put(adev, ring->fence_drv.irq_src,
890 ring->fence_drv.irq_type);
891 }
892 mutex_unlock(&adev->ring_lock);
893}
894
895/**
896 * amdgpu_fence_driver_resume - resume the fence driver
897 * for all possible rings.
898 *
899 * @adev: amdgpu device pointer
900 *
901 * Resume the fence driver for all possible rings (all asics).
902 * Not all asics have all rings, so each asic will only
903 * start the fence driver on the rings it has using
904 * amdgpu_fence_driver_start_ring().
905 * Returns 0 for success.
906 */
907void amdgpu_fence_driver_resume(struct amdgpu_device *adev)
908{
909 int i;
910
911 mutex_lock(&adev->ring_lock);
912 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
913 struct amdgpu_ring *ring = adev->rings[i];
914 if (!ring || !ring->fence_drv.initialized)
915 continue;
916
917 /* enable the interrupt */
918 amdgpu_irq_get(adev, ring->fence_drv.irq_src,
919 ring->fence_drv.irq_type);
920 }
921 mutex_unlock(&adev->ring_lock);
922}
923
924/**
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400925 * amdgpu_fence_driver_force_completion - force all fence waiter to complete
926 *
927 * @adev: amdgpu device pointer
928 *
929 * In case of GPU reset failure make sure no process keep waiting on fence
930 * that will never complete.
931 */
932void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev)
933{
934 int i;
935
936 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
937 struct amdgpu_ring *ring = adev->rings[i];
938 if (!ring || !ring->fence_drv.initialized)
939 continue;
940
941 amdgpu_fence_write(ring, ring->fence_drv.sync_seq[i]);
942 }
943}
944
945
946/*
947 * Fence debugfs
948 */
949#if defined(CONFIG_DEBUG_FS)
950static int amdgpu_debugfs_fence_info(struct seq_file *m, void *data)
951{
952 struct drm_info_node *node = (struct drm_info_node *)m->private;
953 struct drm_device *dev = node->minor->dev;
954 struct amdgpu_device *adev = dev->dev_private;
955 int i, j;
956
957 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
958 struct amdgpu_ring *ring = adev->rings[i];
959 if (!ring || !ring->fence_drv.initialized)
960 continue;
961
962 amdgpu_fence_process(ring);
963
Christian König344c19f2015-06-02 15:47:16 +0200964 seq_printf(m, "--- ring %d (%s) ---\n", i, ring->name);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400965 seq_printf(m, "Last signaled fence 0x%016llx\n",
966 (unsigned long long)atomic64_read(&ring->fence_drv.last_seq));
967 seq_printf(m, "Last emitted 0x%016llx\n",
968 ring->fence_drv.sync_seq[i]);
969
970 for (j = 0; j < AMDGPU_MAX_RINGS; ++j) {
971 struct amdgpu_ring *other = adev->rings[j];
Christian König344c19f2015-06-02 15:47:16 +0200972 if (i != j && other && other->fence_drv.initialized &&
973 ring->fence_drv.sync_seq[j])
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400974 seq_printf(m, "Last sync to ring %d 0x%016llx\n",
975 j, ring->fence_drv.sync_seq[j]);
976 }
977 }
978 return 0;
979}
980
981static struct drm_info_list amdgpu_debugfs_fence_list[] = {
982 {"amdgpu_fence_info", &amdgpu_debugfs_fence_info, 0, NULL},
983};
984#endif
985
986int amdgpu_debugfs_fence_init(struct amdgpu_device *adev)
987{
988#if defined(CONFIG_DEBUG_FS)
989 return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_fence_list, 1);
990#else
991 return 0;
992#endif
993}
994
995static const char *amdgpu_fence_get_driver_name(struct fence *fence)
996{
997 return "amdgpu";
998}
999
1000static const char *amdgpu_fence_get_timeline_name(struct fence *f)
1001{
1002 struct amdgpu_fence *fence = to_amdgpu_fence(f);
1003 return (const char *)fence->ring->name;
1004}
1005
1006static inline bool amdgpu_test_signaled(struct amdgpu_fence *fence)
1007{
1008 return test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags);
1009}
1010
monk.liu332dfe92015-07-30 15:19:05 +08001011static inline bool amdgpu_test_signaled_any(struct amdgpu_fence **fences)
1012{
1013 int idx;
1014 struct amdgpu_fence *fence;
1015
1016 idx = 0;
1017 for (idx = 0; idx < AMDGPU_MAX_RINGS; ++idx) {
1018 fence = fences[idx];
1019 if (fence) {
1020 if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
1021 return true;
1022 }
1023 }
1024 return false;
1025}
1026
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001027struct amdgpu_wait_cb {
1028 struct fence_cb base;
1029 struct task_struct *task;
1030};
1031
1032static void amdgpu_fence_wait_cb(struct fence *fence, struct fence_cb *cb)
1033{
1034 struct amdgpu_wait_cb *wait =
1035 container_of(cb, struct amdgpu_wait_cb, base);
1036 wake_up_process(wait->task);
1037}
1038
1039static signed long amdgpu_fence_default_wait(struct fence *f, bool intr,
1040 signed long t)
1041{
monk.liue29551552015-07-30 18:26:18 +08001042 struct amdgpu_fence *array[AMDGPU_MAX_RINGS];
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001043 struct amdgpu_fence *fence = to_amdgpu_fence(f);
1044 struct amdgpu_device *adev = fence->ring->adev;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001045
monk.liue29551552015-07-30 18:26:18 +08001046 memset(&array[0], 0, sizeof(array));
1047 array[0] = fence;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001048
monk.liue29551552015-07-30 18:26:18 +08001049 return amdgpu_fence_wait_any(adev, array, intr, t);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001050}
1051
monk.liu332dfe92015-07-30 15:19:05 +08001052/* wait until any fence in array signaled */
1053signed long amdgpu_fence_wait_any(struct amdgpu_device *adev,
1054 struct amdgpu_fence **array, bool intr, signed long t)
1055{
1056 long idx = 0;
1057 struct amdgpu_wait_cb cb[AMDGPU_MAX_RINGS];
1058 struct amdgpu_fence *fence;
1059
1060 BUG_ON(!array);
1061
1062 for (idx = 0; idx < AMDGPU_MAX_RINGS; ++idx) {
1063 fence = array[idx];
1064 if (fence) {
1065 cb[idx].task = current;
1066 if (fence_add_callback(&fence->base,
1067 &cb[idx].base, amdgpu_fence_wait_cb))
1068 return t; /* return if fence is already signaled */
1069 }
1070 }
1071
1072 while (t > 0) {
1073 if (intr)
1074 set_current_state(TASK_INTERRUPTIBLE);
1075 else
1076 set_current_state(TASK_UNINTERRUPTIBLE);
1077
1078 /*
1079 * amdgpu_test_signaled_any must be called after
1080 * set_current_state to prevent a race with wake_up_process
1081 */
1082 if (amdgpu_test_signaled_any(array))
1083 break;
1084
1085 if (adev->needs_reset) {
1086 t = -EDEADLK;
1087 break;
1088 }
1089
1090 t = schedule_timeout(t);
1091
1092 if (t > 0 && intr && signal_pending(current))
1093 t = -ERESTARTSYS;
1094 }
1095
1096 __set_current_state(TASK_RUNNING);
1097
1098 idx = 0;
1099 for (idx = 0; idx < AMDGPU_MAX_RINGS; ++idx) {
1100 fence = array[idx];
1101 if (fence)
1102 fence_remove_callback(&fence->base, &cb[idx].base);
1103 }
1104
1105 return t;
1106}
1107
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001108const struct fence_ops amdgpu_fence_ops = {
1109 .get_driver_name = amdgpu_fence_get_driver_name,
1110 .get_timeline_name = amdgpu_fence_get_timeline_name,
1111 .enable_signaling = amdgpu_fence_enable_signaling,
1112 .signaled = amdgpu_fence_is_signaled,
1113 .wait = amdgpu_fence_default_wait,
1114 .release = NULL,
1115};