blob: c5980c4133a62d9834a4845615caccc053670c89 [file] [log] [blame]
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001/*
2 * Copyright 2009 Jerome Glisse.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 */
26/*
27 * Authors:
28 * Jerome Glisse <glisse@freedesktop.org>
29 * Dave Airlie
30 */
31#include <linux/seq_file.h>
32#include <linux/atomic.h>
33#include <linux/wait.h>
34#include <linux/kref.h>
35#include <linux/slab.h>
36#include <linux/firmware.h>
37#include <drm/drmP.h>
38#include "amdgpu.h"
39#include "amdgpu_trace.h"
40
41/*
42 * Fences
43 * Fences mark an event in the GPUs pipeline and are used
44 * for GPU/CPU synchronization. When the fence is written,
45 * it is expected that all buffers associated with that fence
46 * are no longer in use by the associated ring on the GPU and
47 * that the the relevant GPU caches have been flushed.
48 */
49
Christian König22e5a2f2016-03-11 15:12:53 +010050struct amdgpu_fence {
51 struct fence base;
52
53 /* RB, DMA, etc. */
54 struct amdgpu_ring *ring;
55 uint64_t seq;
Christian König22e5a2f2016-03-11 15:12:53 +010056};
57
Chunming Zhoub49c84a2015-11-05 11:28:28 +080058static struct kmem_cache *amdgpu_fence_slab;
59static atomic_t amdgpu_fence_slab_ref = ATOMIC_INIT(0);
60
Christian König22e5a2f2016-03-11 15:12:53 +010061/*
62 * Cast helper
63 */
64static const struct fence_ops amdgpu_fence_ops;
65static inline struct amdgpu_fence *to_amdgpu_fence(struct fence *f)
66{
67 struct amdgpu_fence *__f = container_of(f, struct amdgpu_fence, base);
68
69 if (__f->base.ops == &amdgpu_fence_ops)
70 return __f;
71
72 return NULL;
73}
74
Alex Deucherd38ceaf2015-04-20 16:55:21 -040075/**
76 * amdgpu_fence_write - write a fence value
77 *
78 * @ring: ring the fence is associated with
79 * @seq: sequence number to write
80 *
81 * Writes a fence value to memory (all asics).
82 */
83static void amdgpu_fence_write(struct amdgpu_ring *ring, u32 seq)
84{
85 struct amdgpu_fence_driver *drv = &ring->fence_drv;
86
87 if (drv->cpu_addr)
88 *drv->cpu_addr = cpu_to_le32(seq);
89}
90
91/**
92 * amdgpu_fence_read - read a fence value
93 *
94 * @ring: ring the fence is associated with
95 *
96 * Reads a fence value from memory (all asics).
97 * Returns the value of the fence read from memory.
98 */
99static u32 amdgpu_fence_read(struct amdgpu_ring *ring)
100{
101 struct amdgpu_fence_driver *drv = &ring->fence_drv;
102 u32 seq = 0;
103
104 if (drv->cpu_addr)
105 seq = le32_to_cpu(*drv->cpu_addr);
106 else
107 seq = lower_32_bits(atomic64_read(&drv->last_seq));
108
109 return seq;
110}
111
112/**
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400113 * amdgpu_fence_emit - emit a fence on the requested ring
114 *
115 * @ring: ring the fence is associated with
Christian König364beb22016-02-16 17:39:39 +0100116 * @f: resulting fence object
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400117 *
118 * Emits a fence command on the requested ring (all asics).
119 * Returns 0 on success, -ENOMEM on failure.
120 */
Christian König364beb22016-02-16 17:39:39 +0100121int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **f)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400122{
123 struct amdgpu_device *adev = ring->adev;
Christian König364beb22016-02-16 17:39:39 +0100124 struct amdgpu_fence *fence;
Christian König4a7d74f2016-03-14 14:29:46 +0100125 struct fence **ptr;
Christian Königc89377d2016-03-13 19:19:48 +0100126 unsigned idx;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400127
Christian König364beb22016-02-16 17:39:39 +0100128 fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL);
129 if (fence == NULL)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400130 return -ENOMEM;
Christian König364beb22016-02-16 17:39:39 +0100131
132 fence->seq = ++ring->fence_drv.sync_seq;
133 fence->ring = ring;
134 fence_init(&fence->base, &amdgpu_fence_ops,
Christian König4a7d74f2016-03-14 14:29:46 +0100135 &ring->fence_drv.lock,
Christian König364beb22016-02-16 17:39:39 +0100136 adev->fence_context + ring->idx,
137 fence->seq);
Chunming Zhou890ee232015-06-01 14:35:03 +0800138 amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
Christian König364beb22016-02-16 17:39:39 +0100139 fence->seq, AMDGPU_FENCE_FLAG_INT);
Christian Königc89377d2016-03-13 19:19:48 +0100140
141 idx = fence->seq & ring->fence_drv.num_fences_mask;
142 ptr = &ring->fence_drv.fences[idx];
143 /* This function can't be called concurrently anyway, otherwise
144 * emitting the fence would mess up the hardware ring buffer.
145 */
Christian König4a7d74f2016-03-14 14:29:46 +0100146 BUG_ON(rcu_dereference_protected(*ptr, 1));
Christian Königc89377d2016-03-13 19:19:48 +0100147
148 rcu_assign_pointer(*ptr, fence_get(&fence->base));
149
Christian König364beb22016-02-16 17:39:39 +0100150 *f = &fence->base;
Christian Königc89377d2016-03-13 19:19:48 +0100151
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400152 return 0;
153}
154
155/**
Christian Königc2776af2015-11-03 13:27:39 +0100156 * amdgpu_fence_schedule_fallback - schedule fallback check
157 *
158 * @ring: pointer to struct amdgpu_ring
159 *
160 * Start a timer as fallback to our interrupts.
161 */
162static void amdgpu_fence_schedule_fallback(struct amdgpu_ring *ring)
163{
164 mod_timer(&ring->fence_drv.fallback_timer,
165 jiffies + AMDGPU_FENCE_JIFFIES_TIMEOUT);
166}
167
168/**
Christian Königca08e042016-03-11 17:57:56 +0100169 * amdgpu_fence_process - check for fence activity
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400170 *
171 * @ring: pointer to struct amdgpu_ring
172 *
173 * Checks the current fence value and calculates the last
Christian Königca08e042016-03-11 17:57:56 +0100174 * signalled fence value. Wakes the fence queue if the
175 * sequence number has increased.
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400176 */
Christian Königca08e042016-03-11 17:57:56 +0100177void amdgpu_fence_process(struct amdgpu_ring *ring)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400178{
Christian König4a7d74f2016-03-14 14:29:46 +0100179 struct amdgpu_fence_driver *drv = &ring->fence_drv;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400180 uint64_t seq, last_seq, last_emitted;
Christian König4a7d74f2016-03-14 14:29:46 +0100181 int r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400182
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400183 do {
Christian König4a7d74f2016-03-14 14:29:46 +0100184 last_seq = atomic64_read(&ring->fence_drv.last_seq);
Christian König5907a0d2016-01-18 15:16:53 +0100185 last_emitted = ring->fence_drv.sync_seq;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400186 seq = amdgpu_fence_read(ring);
187 seq |= last_seq & 0xffffffff00000000LL;
188 if (seq < last_seq) {
189 seq &= 0xffffffff;
190 seq |= last_emitted & 0xffffffff00000000LL;
191 }
192
Christian Königd9713ef2016-03-11 17:49:58 +0100193 if (seq <= last_seq || seq > last_emitted)
Christian König4a7d74f2016-03-14 14:29:46 +0100194 return;
Christian Königd9713ef2016-03-11 17:49:58 +0100195
Christian König4a7d74f2016-03-14 14:29:46 +0100196 } while (atomic64_cmpxchg(&drv->last_seq, last_seq, seq) != last_seq);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400197
198 if (seq < last_emitted)
Christian Königc2776af2015-11-03 13:27:39 +0100199 amdgpu_fence_schedule_fallback(ring);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400200
Christian König4a7d74f2016-03-14 14:29:46 +0100201 while (last_seq != seq) {
202 struct fence *fence, **ptr;
203
204 ptr = &drv->fences[++last_seq & drv->num_fences_mask];
205
206 /* There is always exactly one thread signaling this fence slot */
207 fence = rcu_dereference_protected(*ptr, 1);
208 rcu_assign_pointer(*ptr, NULL);
209
210 BUG_ON(!fence);
211
212 r = fence_signal(fence);
213 if (!r)
214 FENCE_TRACE(fence, "signaled from irq context\n");
215 else
216 BUG();
217
218 fence_put(fence);
219 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400220}
221
222/**
Christian Königc2776af2015-11-03 13:27:39 +0100223 * amdgpu_fence_fallback - fallback for hardware interrupts
224 *
225 * @work: delayed work item
226 *
227 * Checks for fence activity.
228 */
229static void amdgpu_fence_fallback(unsigned long arg)
230{
231 struct amdgpu_ring *ring = (void *)arg;
232
233 amdgpu_fence_process(ring);
234}
235
236/**
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400237 * amdgpu_fence_wait_empty - wait for all fences to signal
238 *
239 * @adev: amdgpu device pointer
240 * @ring: ring index the fence is associated with
241 *
242 * Wait for all fences on the requested ring to signal (all asics).
243 * Returns 0 if the fences have passed, error for all other cases.
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400244 */
245int amdgpu_fence_wait_empty(struct amdgpu_ring *ring)
246{
Christian Königf09c2be2016-03-13 19:37:01 +0100247 uint64_t seq = ACCESS_ONCE(ring->fence_drv.sync_seq);
248 struct fence *fence, **ptr;
249 int r;
Christian König00d2a2b2015-08-07 16:15:36 +0200250
monk.liu7f06c232015-07-30 18:28:12 +0800251 if (!seq)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400252 return 0;
253
Christian Königf09c2be2016-03-13 19:37:01 +0100254 ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
255 rcu_read_lock();
256 fence = rcu_dereference(*ptr);
257 if (!fence || !fence_get_rcu(fence)) {
258 rcu_read_unlock();
259 return 0;
260 }
261 rcu_read_unlock();
262
263 r = fence_wait(fence, false);
264 fence_put(fence);
265 return r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400266}
267
268/**
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400269 * amdgpu_fence_count_emitted - get the count of emitted fences
270 *
271 * @ring: ring the fence is associated with
272 *
273 * Get the number of fences emitted on the requested ring (all asics).
274 * Returns the number of emitted fences on the ring. Used by the
275 * dynpm code to ring track activity.
276 */
277unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring)
278{
279 uint64_t emitted;
280
281 /* We are not protected by ring lock when reading the last sequence
282 * but it's ok to report slightly wrong fence count here.
283 */
284 amdgpu_fence_process(ring);
Christian König5907a0d2016-01-18 15:16:53 +0100285 emitted = ring->fence_drv.sync_seq
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400286 - atomic64_read(&ring->fence_drv.last_seq);
287 /* to avoid 32bits warp around */
288 if (emitted > 0x10000000)
289 emitted = 0x10000000;
290
291 return (unsigned)emitted;
292}
293
294/**
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400295 * amdgpu_fence_driver_start_ring - make the fence driver
296 * ready for use on the requested ring.
297 *
298 * @ring: ring to start the fence driver on
299 * @irq_src: interrupt source to use for this ring
300 * @irq_type: interrupt type to use for this ring
301 *
302 * Make the fence driver ready for processing (all asics).
303 * Not all asics have all rings, so each asic will only
304 * start the fence driver on the rings it has.
305 * Returns 0 for success, errors for failure.
306 */
307int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
308 struct amdgpu_irq_src *irq_src,
309 unsigned irq_type)
310{
311 struct amdgpu_device *adev = ring->adev;
312 uint64_t index;
313
314 if (ring != &adev->uvd.ring) {
315 ring->fence_drv.cpu_addr = &adev->wb.wb[ring->fence_offs];
316 ring->fence_drv.gpu_addr = adev->wb.gpu_addr + (ring->fence_offs * 4);
317 } else {
318 /* put fence directly behind firmware */
319 index = ALIGN(adev->uvd.fw->size, 8);
320 ring->fence_drv.cpu_addr = adev->uvd.cpu_addr + index;
321 ring->fence_drv.gpu_addr = adev->uvd.gpu_addr + index;
322 }
323 amdgpu_fence_write(ring, atomic64_read(&ring->fence_drv.last_seq));
Chunming Zhouc6a40792015-06-01 14:14:32 +0800324 amdgpu_irq_get(adev, irq_src, irq_type);
325
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400326 ring->fence_drv.irq_src = irq_src;
327 ring->fence_drv.irq_type = irq_type;
Chunming Zhouc6a40792015-06-01 14:14:32 +0800328 ring->fence_drv.initialized = true;
329
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400330 dev_info(adev->dev, "fence driver on ring %d use gpu addr 0x%016llx, "
331 "cpu addr 0x%p\n", ring->idx,
332 ring->fence_drv.gpu_addr, ring->fence_drv.cpu_addr);
333 return 0;
334}
335
336/**
337 * amdgpu_fence_driver_init_ring - init the fence driver
338 * for the requested ring.
339 *
340 * @ring: ring to init the fence driver on
Christian Könige6151a02016-03-15 14:52:26 +0100341 * @num_hw_submission: number of entries on the hardware queue
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400342 *
343 * Init the fence driver for the requested ring (all asics).
344 * Helper function for amdgpu_fence_driver_init().
345 */
Christian Könige6151a02016-03-15 14:52:26 +0100346int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
347 unsigned num_hw_submission)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400348{
Chunming Zhoucadf97b2016-01-15 11:25:00 +0800349 long timeout;
Christian König5907a0d2016-01-18 15:16:53 +0100350 int r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400351
Christian Könige6151a02016-03-15 14:52:26 +0100352 /* Check that num_hw_submission is a power of two */
353 if ((num_hw_submission & (num_hw_submission - 1)) != 0)
354 return -EINVAL;
355
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400356 ring->fence_drv.cpu_addr = NULL;
357 ring->fence_drv.gpu_addr = 0;
Christian König5907a0d2016-01-18 15:16:53 +0100358 ring->fence_drv.sync_seq = 0;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400359 atomic64_set(&ring->fence_drv.last_seq, 0);
360 ring->fence_drv.initialized = false;
361
Christian Königc2776af2015-11-03 13:27:39 +0100362 setup_timer(&ring->fence_drv.fallback_timer, amdgpu_fence_fallback,
363 (unsigned long)ring);
Alex Deucherb80d8472015-08-16 22:55:02 -0400364
Christian Königc89377d2016-03-13 19:19:48 +0100365 ring->fence_drv.num_fences_mask = num_hw_submission - 1;
Christian König4a7d74f2016-03-14 14:29:46 +0100366 spin_lock_init(&ring->fence_drv.lock);
Christian Königc89377d2016-03-13 19:19:48 +0100367 ring->fence_drv.fences = kcalloc(num_hw_submission, sizeof(void *),
368 GFP_KERNEL);
369 if (!ring->fence_drv.fences)
370 return -ENOMEM;
Christian König5ec92a72015-09-07 18:43:02 +0200371
Chunming Zhoucadf97b2016-01-15 11:25:00 +0800372 timeout = msecs_to_jiffies(amdgpu_lockup_timeout);
373 if (timeout == 0) {
374 /*
375 * FIXME:
376 * Delayed workqueue cannot use it directly,
377 * so the scheduler will not use delayed workqueue if
378 * MAX_SCHEDULE_TIMEOUT is set.
379 * Currently keep it simple and silly.
380 */
381 timeout = MAX_SCHEDULE_TIMEOUT;
382 }
383 r = amd_sched_init(&ring->sched, &amdgpu_sched_ops,
Christian Könige6151a02016-03-15 14:52:26 +0100384 num_hw_submission,
Chunming Zhoucadf97b2016-01-15 11:25:00 +0800385 timeout, ring->name);
386 if (r) {
387 DRM_ERROR("Failed to create scheduler on ring %s.\n",
388 ring->name);
389 return r;
Alex Deucherb80d8472015-08-16 22:55:02 -0400390 }
Christian König4f839a22015-09-08 20:22:31 +0200391
392 return 0;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400393}
394
395/**
396 * amdgpu_fence_driver_init - init the fence driver
397 * for all possible rings.
398 *
399 * @adev: amdgpu device pointer
400 *
401 * Init the fence driver for all possible rings (all asics).
402 * Not all asics have all rings, so each asic will only
403 * start the fence driver on the rings it has using
404 * amdgpu_fence_driver_start_ring().
405 * Returns 0 for success.
406 */
407int amdgpu_fence_driver_init(struct amdgpu_device *adev)
408{
Chunming Zhoub49c84a2015-11-05 11:28:28 +0800409 if (atomic_inc_return(&amdgpu_fence_slab_ref) == 1) {
410 amdgpu_fence_slab = kmem_cache_create(
411 "amdgpu_fence", sizeof(struct amdgpu_fence), 0,
412 SLAB_HWCACHE_ALIGN, NULL);
413 if (!amdgpu_fence_slab)
414 return -ENOMEM;
415 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400416 if (amdgpu_debugfs_fence_init(adev))
417 dev_err(adev->dev, "fence debugfs file creation failed\n");
418
419 return 0;
420}
421
422/**
423 * amdgpu_fence_driver_fini - tear down the fence driver
424 * for all possible rings.
425 *
426 * @adev: amdgpu device pointer
427 *
428 * Tear down the fence driver for all possible rings (all asics).
429 */
430void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
431{
Christian Königc89377d2016-03-13 19:19:48 +0100432 unsigned i, j;
433 int r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400434
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400435 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
436 struct amdgpu_ring *ring = adev->rings[i];
Christian Königc2776af2015-11-03 13:27:39 +0100437
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400438 if (!ring || !ring->fence_drv.initialized)
439 continue;
440 r = amdgpu_fence_wait_empty(ring);
441 if (r) {
442 /* no need to trigger GPU reset as we are unloading */
443 amdgpu_fence_driver_force_completion(adev);
444 }
Chunming Zhouc6a40792015-06-01 14:14:32 +0800445 amdgpu_irq_put(adev, ring->fence_drv.irq_src,
446 ring->fence_drv.irq_type);
Christian König4f839a22015-09-08 20:22:31 +0200447 amd_sched_fini(&ring->sched);
Christian Königc2776af2015-11-03 13:27:39 +0100448 del_timer_sync(&ring->fence_drv.fallback_timer);
Christian Königc89377d2016-03-13 19:19:48 +0100449 for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j)
450 fence_put(ring->fence_drv.fences[i]);
451 kfree(ring->fence_drv.fences);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400452 ring->fence_drv.initialized = false;
453 }
Christian Königc89377d2016-03-13 19:19:48 +0100454
455 if (atomic_dec_and_test(&amdgpu_fence_slab_ref))
456 kmem_cache_destroy(amdgpu_fence_slab);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400457}
458
459/**
Alex Deucher5ceb54c2015-08-05 12:41:48 -0400460 * amdgpu_fence_driver_suspend - suspend the fence driver
461 * for all possible rings.
462 *
463 * @adev: amdgpu device pointer
464 *
465 * Suspend the fence driver for all possible rings (all asics).
466 */
467void amdgpu_fence_driver_suspend(struct amdgpu_device *adev)
468{
469 int i, r;
470
Alex Deucher5ceb54c2015-08-05 12:41:48 -0400471 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
472 struct amdgpu_ring *ring = adev->rings[i];
473 if (!ring || !ring->fence_drv.initialized)
474 continue;
475
476 /* wait for gpu to finish processing current batch */
477 r = amdgpu_fence_wait_empty(ring);
478 if (r) {
479 /* delay GPU reset to resume */
480 amdgpu_fence_driver_force_completion(adev);
481 }
482
483 /* disable the interrupt */
484 amdgpu_irq_put(adev, ring->fence_drv.irq_src,
485 ring->fence_drv.irq_type);
486 }
Alex Deucher5ceb54c2015-08-05 12:41:48 -0400487}
488
489/**
490 * amdgpu_fence_driver_resume - resume the fence driver
491 * for all possible rings.
492 *
493 * @adev: amdgpu device pointer
494 *
495 * Resume the fence driver for all possible rings (all asics).
496 * Not all asics have all rings, so each asic will only
497 * start the fence driver on the rings it has using
498 * amdgpu_fence_driver_start_ring().
499 * Returns 0 for success.
500 */
501void amdgpu_fence_driver_resume(struct amdgpu_device *adev)
502{
503 int i;
504
Alex Deucher5ceb54c2015-08-05 12:41:48 -0400505 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
506 struct amdgpu_ring *ring = adev->rings[i];
507 if (!ring || !ring->fence_drv.initialized)
508 continue;
509
510 /* enable the interrupt */
511 amdgpu_irq_get(adev, ring->fence_drv.irq_src,
512 ring->fence_drv.irq_type);
513 }
Alex Deucher5ceb54c2015-08-05 12:41:48 -0400514}
515
516/**
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400517 * amdgpu_fence_driver_force_completion - force all fence waiter to complete
518 *
519 * @adev: amdgpu device pointer
520 *
521 * In case of GPU reset failure make sure no process keep waiting on fence
522 * that will never complete.
523 */
524void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev)
525{
526 int i;
527
528 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
529 struct amdgpu_ring *ring = adev->rings[i];
530 if (!ring || !ring->fence_drv.initialized)
531 continue;
532
Christian König5907a0d2016-01-18 15:16:53 +0100533 amdgpu_fence_write(ring, ring->fence_drv.sync_seq);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400534 }
535}
536
Christian Königa95e2642015-11-03 12:21:57 +0100537/*
538 * Common fence implementation
539 */
540
541static const char *amdgpu_fence_get_driver_name(struct fence *fence)
542{
543 return "amdgpu";
544}
545
546static const char *amdgpu_fence_get_timeline_name(struct fence *f)
547{
548 struct amdgpu_fence *fence = to_amdgpu_fence(f);
549 return (const char *)fence->ring->name;
550}
551
552/**
553 * amdgpu_fence_is_signaled - test if fence is signaled
554 *
555 * @f: fence to test
556 *
557 * Test the fence sequence number if it is already signaled. If it isn't
558 * signaled start fence processing. Returns True if the fence is signaled.
559 */
560static bool amdgpu_fence_is_signaled(struct fence *f)
561{
562 struct amdgpu_fence *fence = to_amdgpu_fence(f);
563 struct amdgpu_ring *ring = fence->ring;
564
565 if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq)
566 return true;
567
568 amdgpu_fence_process(ring);
569
570 if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq)
571 return true;
572
573 return false;
574}
575
576/**
Christian Königa95e2642015-11-03 12:21:57 +0100577 * amdgpu_fence_enable_signaling - enable signalling on fence
578 * @fence: fence
579 *
580 * This function is called with fence_queue lock held, and adds a callback
581 * to fence_queue that checks if this fence is signaled, and if so it
582 * signals the fence and removes itself.
583 */
584static bool amdgpu_fence_enable_signaling(struct fence *f)
585{
586 struct amdgpu_fence *fence = to_amdgpu_fence(f);
587 struct amdgpu_ring *ring = fence->ring;
588
Christian Königc2776af2015-11-03 13:27:39 +0100589 if (!timer_pending(&ring->fence_drv.fallback_timer))
590 amdgpu_fence_schedule_fallback(ring);
Christian König4a7d74f2016-03-14 14:29:46 +0100591
Christian Königa95e2642015-11-03 12:21:57 +0100592 FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx);
Christian König4a7d74f2016-03-14 14:29:46 +0100593
Christian Königa95e2642015-11-03 12:21:57 +0100594 return true;
595}
596
Christian Königb4413532016-03-15 13:40:17 +0100597/**
598 * amdgpu_fence_free - free up the fence memory
599 *
600 * @rcu: RCU callback head
601 *
602 * Free up the fence memory after the RCU grace period.
603 */
604static void amdgpu_fence_free(struct rcu_head *rcu)
Chunming Zhoub49c84a2015-11-05 11:28:28 +0800605{
Christian Königb4413532016-03-15 13:40:17 +0100606 struct fence *f = container_of(rcu, struct fence, rcu);
Chunming Zhoub49c84a2015-11-05 11:28:28 +0800607 struct amdgpu_fence *fence = to_amdgpu_fence(f);
608 kmem_cache_free(amdgpu_fence_slab, fence);
609}
610
Christian Königb4413532016-03-15 13:40:17 +0100611/**
612 * amdgpu_fence_release - callback that fence can be freed
613 *
614 * @fence: fence
615 *
616 * This function is called when the reference count becomes zero.
617 * It just RCU schedules freeing up the fence.
618 */
619static void amdgpu_fence_release(struct fence *f)
620{
621 call_rcu(&f->rcu, amdgpu_fence_free);
622}
623
Christian König22e5a2f2016-03-11 15:12:53 +0100624static const struct fence_ops amdgpu_fence_ops = {
Christian Königa95e2642015-11-03 12:21:57 +0100625 .get_driver_name = amdgpu_fence_get_driver_name,
626 .get_timeline_name = amdgpu_fence_get_timeline_name,
627 .enable_signaling = amdgpu_fence_enable_signaling,
628 .signaled = amdgpu_fence_is_signaled,
629 .wait = fence_default_wait,
Chunming Zhoub49c84a2015-11-05 11:28:28 +0800630 .release = amdgpu_fence_release,
Christian Königa95e2642015-11-03 12:21:57 +0100631};
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400632
633/*
634 * Fence debugfs
635 */
636#if defined(CONFIG_DEBUG_FS)
637static int amdgpu_debugfs_fence_info(struct seq_file *m, void *data)
638{
639 struct drm_info_node *node = (struct drm_info_node *)m->private;
640 struct drm_device *dev = node->minor->dev;
641 struct amdgpu_device *adev = dev->dev_private;
Christian König5907a0d2016-01-18 15:16:53 +0100642 int i;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400643
644 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
645 struct amdgpu_ring *ring = adev->rings[i];
646 if (!ring || !ring->fence_drv.initialized)
647 continue;
648
649 amdgpu_fence_process(ring);
650
Christian König344c19f2015-06-02 15:47:16 +0200651 seq_printf(m, "--- ring %d (%s) ---\n", i, ring->name);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400652 seq_printf(m, "Last signaled fence 0x%016llx\n",
653 (unsigned long long)atomic64_read(&ring->fence_drv.last_seq));
654 seq_printf(m, "Last emitted 0x%016llx\n",
Christian König5907a0d2016-01-18 15:16:53 +0100655 ring->fence_drv.sync_seq);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400656 }
657 return 0;
658}
659
Alex Deucher18db89b2016-01-14 10:25:22 -0500660/**
661 * amdgpu_debugfs_gpu_reset - manually trigger a gpu reset
662 *
663 * Manually trigger a gpu reset at the next fence wait.
664 */
665static int amdgpu_debugfs_gpu_reset(struct seq_file *m, void *data)
666{
667 struct drm_info_node *node = (struct drm_info_node *) m->private;
668 struct drm_device *dev = node->minor->dev;
669 struct amdgpu_device *adev = dev->dev_private;
670
671 seq_printf(m, "gpu reset\n");
672 amdgpu_gpu_reset(adev);
673
674 return 0;
675}
676
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400677static struct drm_info_list amdgpu_debugfs_fence_list[] = {
678 {"amdgpu_fence_info", &amdgpu_debugfs_fence_info, 0, NULL},
Alex Deucher18db89b2016-01-14 10:25:22 -0500679 {"amdgpu_gpu_reset", &amdgpu_debugfs_gpu_reset, 0, NULL}
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400680};
681#endif
682
683int amdgpu_debugfs_fence_init(struct amdgpu_device *adev)
684{
685#if defined(CONFIG_DEBUG_FS)
Alex Deucher18db89b2016-01-14 10:25:22 -0500686 return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_fence_list, 2);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400687#else
688 return 0;
689#endif
690}
691