blob: e74d620d9699f8a54c273b8b233c81f3efa40f9c [file] [log] [blame]
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001/*
2 * Copyright 2009 Jerome Glisse.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 */
26/*
27 * Authors:
28 * Jerome Glisse <glisse@freedesktop.org>
29 * Dave Airlie
30 */
31#include <linux/seq_file.h>
32#include <linux/atomic.h>
33#include <linux/wait.h>
34#include <linux/kref.h>
35#include <linux/slab.h>
36#include <linux/firmware.h>
37#include <drm/drmP.h>
38#include "amdgpu.h"
39#include "amdgpu_trace.h"
40
41/*
42 * Fences
43 * Fences mark an event in the GPUs pipeline and are used
44 * for GPU/CPU synchronization. When the fence is written,
45 * it is expected that all buffers associated with that fence
46 * are no longer in use by the associated ring on the GPU and
47 * that the the relevant GPU caches have been flushed.
48 */
49
Christian König22e5a2f2016-03-11 15:12:53 +010050struct amdgpu_fence {
Chris Wilsonf54d1862016-10-25 13:00:45 +010051 struct dma_fence base;
Christian König22e5a2f2016-03-11 15:12:53 +010052
53 /* RB, DMA, etc. */
54 struct amdgpu_ring *ring;
Christian König22e5a2f2016-03-11 15:12:53 +010055};
56
Chunming Zhoub49c84a2015-11-05 11:28:28 +080057static struct kmem_cache *amdgpu_fence_slab;
Chunming Zhoub49c84a2015-11-05 11:28:28 +080058
Rex Zhud573de22016-05-12 13:27:28 +080059int amdgpu_fence_slab_init(void)
60{
61 amdgpu_fence_slab = kmem_cache_create(
62 "amdgpu_fence", sizeof(struct amdgpu_fence), 0,
63 SLAB_HWCACHE_ALIGN, NULL);
64 if (!amdgpu_fence_slab)
65 return -ENOMEM;
66 return 0;
67}
68
69void amdgpu_fence_slab_fini(void)
70{
Grazvydas Ignotas0f104252016-10-23 21:31:43 +030071 rcu_barrier();
Rex Zhud573de22016-05-12 13:27:28 +080072 kmem_cache_destroy(amdgpu_fence_slab);
73}
Christian König22e5a2f2016-03-11 15:12:53 +010074/*
75 * Cast helper
76 */
Chris Wilsonf54d1862016-10-25 13:00:45 +010077static const struct dma_fence_ops amdgpu_fence_ops;
78static inline struct amdgpu_fence *to_amdgpu_fence(struct dma_fence *f)
Christian König22e5a2f2016-03-11 15:12:53 +010079{
80 struct amdgpu_fence *__f = container_of(f, struct amdgpu_fence, base);
81
82 if (__f->base.ops == &amdgpu_fence_ops)
83 return __f;
84
85 return NULL;
86}
87
Alex Deucherd38ceaf2015-04-20 16:55:21 -040088/**
89 * amdgpu_fence_write - write a fence value
90 *
91 * @ring: ring the fence is associated with
92 * @seq: sequence number to write
93 *
94 * Writes a fence value to memory (all asics).
95 */
96static void amdgpu_fence_write(struct amdgpu_ring *ring, u32 seq)
97{
98 struct amdgpu_fence_driver *drv = &ring->fence_drv;
99
100 if (drv->cpu_addr)
101 *drv->cpu_addr = cpu_to_le32(seq);
102}
103
104/**
105 * amdgpu_fence_read - read a fence value
106 *
107 * @ring: ring the fence is associated with
108 *
109 * Reads a fence value from memory (all asics).
110 * Returns the value of the fence read from memory.
111 */
112static u32 amdgpu_fence_read(struct amdgpu_ring *ring)
113{
114 struct amdgpu_fence_driver *drv = &ring->fence_drv;
115 u32 seq = 0;
116
117 if (drv->cpu_addr)
118 seq = le32_to_cpu(*drv->cpu_addr);
119 else
Christian König742c0852016-03-14 15:46:06 +0100120 seq = atomic_read(&drv->last_seq);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400121
122 return seq;
123}
124
125/**
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400126 * amdgpu_fence_emit - emit a fence on the requested ring
127 *
128 * @ring: ring the fence is associated with
Christian König364beb22016-02-16 17:39:39 +0100129 * @f: resulting fence object
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400130 *
131 * Emits a fence command on the requested ring (all asics).
132 * Returns 0 on success, -ENOMEM on failure.
133 */
Marek Olšákd240cd92018-04-03 13:05:03 -0400134int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f,
135 unsigned flags)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400136{
137 struct amdgpu_device *adev = ring->adev;
Christian König364beb22016-02-16 17:39:39 +0100138 struct amdgpu_fence *fence;
Chris Wilsonf54d1862016-10-25 13:00:45 +0100139 struct dma_fence *old, **ptr;
Christian König742c0852016-03-14 15:46:06 +0100140 uint32_t seq;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400141
Christian König364beb22016-02-16 17:39:39 +0100142 fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL);
143 if (fence == NULL)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400144 return -ENOMEM;
Christian König364beb22016-02-16 17:39:39 +0100145
Christian König742c0852016-03-14 15:46:06 +0100146 seq = ++ring->fence_drv.sync_seq;
Christian König364beb22016-02-16 17:39:39 +0100147 fence->ring = ring;
Chris Wilsonf54d1862016-10-25 13:00:45 +0100148 dma_fence_init(&fence->base, &amdgpu_fence_ops,
149 &ring->fence_drv.lock,
150 adev->fence_context + ring->idx,
151 seq);
Chunming Zhou890ee232015-06-01 14:35:03 +0800152 amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
Marek Olšákd240cd92018-04-03 13:05:03 -0400153 seq, flags | AMDGPU_FENCE_FLAG_INT);
Christian Königc89377d2016-03-13 19:19:48 +0100154
Christian König742c0852016-03-14 15:46:06 +0100155 ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
Christian Königc89377d2016-03-13 19:19:48 +0100156 /* This function can't be called concurrently anyway, otherwise
157 * emitting the fence would mess up the hardware ring buffer.
158 */
Chunming Zhoufc387a02016-03-31 11:07:14 +0800159 old = rcu_dereference_protected(*ptr, 1);
Chris Wilsonf54d1862016-10-25 13:00:45 +0100160 if (old && !dma_fence_is_signaled(old)) {
Chunming Zhoufc387a02016-03-31 11:07:14 +0800161 DRM_INFO("rcu slot is busy\n");
Chris Wilsonf54d1862016-10-25 13:00:45 +0100162 dma_fence_wait(old, false);
Chunming Zhoufc387a02016-03-31 11:07:14 +0800163 }
Christian Königc89377d2016-03-13 19:19:48 +0100164
Chris Wilsonf54d1862016-10-25 13:00:45 +0100165 rcu_assign_pointer(*ptr, dma_fence_get(&fence->base));
Christian Königc89377d2016-03-13 19:19:48 +0100166
Christian König364beb22016-02-16 17:39:39 +0100167 *f = &fence->base;
Christian Königc89377d2016-03-13 19:19:48 +0100168
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400169 return 0;
170}
171
172/**
pding43ca8ef2017-10-13 15:38:35 +0800173 * amdgpu_fence_emit_polling - emit a fence on the requeste ring
174 *
175 * @ring: ring the fence is associated with
176 * @s: resulting sequence number
177 *
178 * Emits a fence command on the requested ring (all asics).
179 * Used For polling fence.
180 * Returns 0 on success, -ENOMEM on failure.
181 */
182int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s)
183{
184 uint32_t seq;
185
186 if (!s)
187 return -EINVAL;
188
189 seq = ++ring->fence_drv.sync_seq;
190 amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
Monk Liud118a622017-12-04 20:46:17 +0800191 seq, 0);
pding43ca8ef2017-10-13 15:38:35 +0800192
193 *s = seq;
194
195 return 0;
196}
197
198/**
Christian Königc2776af2015-11-03 13:27:39 +0100199 * amdgpu_fence_schedule_fallback - schedule fallback check
200 *
201 * @ring: pointer to struct amdgpu_ring
202 *
203 * Start a timer as fallback to our interrupts.
204 */
205static void amdgpu_fence_schedule_fallback(struct amdgpu_ring *ring)
206{
207 mod_timer(&ring->fence_drv.fallback_timer,
208 jiffies + AMDGPU_FENCE_JIFFIES_TIMEOUT);
209}
210
211/**
Christian Königca08e042016-03-11 17:57:56 +0100212 * amdgpu_fence_process - check for fence activity
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400213 *
214 * @ring: pointer to struct amdgpu_ring
215 *
216 * Checks the current fence value and calculates the last
Christian Königca08e042016-03-11 17:57:56 +0100217 * signalled fence value. Wakes the fence queue if the
218 * sequence number has increased.
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400219 */
Christian Königca08e042016-03-11 17:57:56 +0100220void amdgpu_fence_process(struct amdgpu_ring *ring)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400221{
Christian König4a7d74f2016-03-14 14:29:46 +0100222 struct amdgpu_fence_driver *drv = &ring->fence_drv;
Christian König742c0852016-03-14 15:46:06 +0100223 uint32_t seq, last_seq;
Christian König4a7d74f2016-03-14 14:29:46 +0100224 int r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400225
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400226 do {
Christian König742c0852016-03-14 15:46:06 +0100227 last_seq = atomic_read(&ring->fence_drv.last_seq);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400228 seq = amdgpu_fence_read(ring);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400229
Christian König742c0852016-03-14 15:46:06 +0100230 } while (atomic_cmpxchg(&drv->last_seq, last_seq, seq) != last_seq);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400231
Christian König742c0852016-03-14 15:46:06 +0100232 if (seq != ring->fence_drv.sync_seq)
Christian Königc2776af2015-11-03 13:27:39 +0100233 amdgpu_fence_schedule_fallback(ring);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400234
Christian König2ef004d2016-07-12 13:57:03 +0200235 if (unlikely(seq == last_seq))
236 return;
237
Christian König4f399a02016-06-24 21:11:51 +0200238 last_seq &= drv->num_fences_mask;
239 seq &= drv->num_fences_mask;
240
Christian König2ef004d2016-07-12 13:57:03 +0200241 do {
Chris Wilsonf54d1862016-10-25 13:00:45 +0100242 struct dma_fence *fence, **ptr;
Christian König4a7d74f2016-03-14 14:29:46 +0100243
Christian König4f399a02016-06-24 21:11:51 +0200244 ++last_seq;
245 last_seq &= drv->num_fences_mask;
246 ptr = &drv->fences[last_seq];
Christian König4a7d74f2016-03-14 14:29:46 +0100247
248 /* There is always exactly one thread signaling this fence slot */
249 fence = rcu_dereference_protected(*ptr, 1);
Muhammad Falak R Wani84fae132016-05-01 00:30:24 +0530250 RCU_INIT_POINTER(*ptr, NULL);
Christian König4a7d74f2016-03-14 14:29:46 +0100251
Christian König4f399a02016-06-24 21:11:51 +0200252 if (!fence)
253 continue;
Christian König4a7d74f2016-03-14 14:29:46 +0100254
Chris Wilsonf54d1862016-10-25 13:00:45 +0100255 r = dma_fence_signal(fence);
Christian König4a7d74f2016-03-14 14:29:46 +0100256 if (!r)
Chris Wilsonf54d1862016-10-25 13:00:45 +0100257 DMA_FENCE_TRACE(fence, "signaled from irq context\n");
Christian König4a7d74f2016-03-14 14:29:46 +0100258 else
259 BUG();
260
Chris Wilsonf54d1862016-10-25 13:00:45 +0100261 dma_fence_put(fence);
Christian König2ef004d2016-07-12 13:57:03 +0200262 } while (last_seq != seq);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400263}
264
265/**
Christian Königc2776af2015-11-03 13:27:39 +0100266 * amdgpu_fence_fallback - fallback for hardware interrupts
267 *
268 * @work: delayed work item
269 *
270 * Checks for fence activity.
271 */
Kees Cook86cb30e2017-10-17 20:21:24 -0700272static void amdgpu_fence_fallback(struct timer_list *t)
Christian Königc2776af2015-11-03 13:27:39 +0100273{
Kees Cook86cb30e2017-10-17 20:21:24 -0700274 struct amdgpu_ring *ring = from_timer(ring, t,
275 fence_drv.fallback_timer);
Christian Königc2776af2015-11-03 13:27:39 +0100276
277 amdgpu_fence_process(ring);
278}
279
280/**
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400281 * amdgpu_fence_wait_empty - wait for all fences to signal
282 *
283 * @adev: amdgpu device pointer
284 * @ring: ring index the fence is associated with
285 *
286 * Wait for all fences on the requested ring to signal (all asics).
287 * Returns 0 if the fences have passed, error for all other cases.
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400288 */
289int amdgpu_fence_wait_empty(struct amdgpu_ring *ring)
290{
Mark Rutland6aa7de02017-10-23 14:07:29 -0700291 uint64_t seq = READ_ONCE(ring->fence_drv.sync_seq);
Chris Wilsonf54d1862016-10-25 13:00:45 +0100292 struct dma_fence *fence, **ptr;
Christian Königf09c2be2016-03-13 19:37:01 +0100293 int r;
Christian König00d2a2b2015-08-07 16:15:36 +0200294
monk.liu7f06c232015-07-30 18:28:12 +0800295 if (!seq)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400296 return 0;
297
Christian Königf09c2be2016-03-13 19:37:01 +0100298 ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
299 rcu_read_lock();
300 fence = rcu_dereference(*ptr);
Chris Wilsonf54d1862016-10-25 13:00:45 +0100301 if (!fence || !dma_fence_get_rcu(fence)) {
Christian Königf09c2be2016-03-13 19:37:01 +0100302 rcu_read_unlock();
303 return 0;
304 }
305 rcu_read_unlock();
306
Chris Wilsonf54d1862016-10-25 13:00:45 +0100307 r = dma_fence_wait(fence, false);
308 dma_fence_put(fence);
Christian Königf09c2be2016-03-13 19:37:01 +0100309 return r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400310}
311
312/**
pding43ca8ef2017-10-13 15:38:35 +0800313 * amdgpu_fence_wait_polling - busy wait for givn sequence number
314 *
315 * @ring: ring index the fence is associated with
316 * @wait_seq: sequence number to wait
317 * @timeout: the timeout for waiting in usecs
318 *
319 * Wait for all fences on the requested ring to signal (all asics).
320 * Returns left time if no timeout, 0 or minus if timeout.
321 */
322signed long amdgpu_fence_wait_polling(struct amdgpu_ring *ring,
323 uint32_t wait_seq,
324 signed long timeout)
325{
326 uint32_t seq;
327
328 do {
329 seq = amdgpu_fence_read(ring);
330 udelay(5);
331 timeout -= 5;
332 } while ((int32_t)(wait_seq - seq) > 0 && timeout > 0);
333
334 return timeout > 0 ? timeout : 0;
335}
336/**
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400337 * amdgpu_fence_count_emitted - get the count of emitted fences
338 *
339 * @ring: ring the fence is associated with
340 *
341 * Get the number of fences emitted on the requested ring (all asics).
342 * Returns the number of emitted fences on the ring. Used by the
343 * dynpm code to ring track activity.
344 */
345unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring)
346{
347 uint64_t emitted;
348
349 /* We are not protected by ring lock when reading the last sequence
350 * but it's ok to report slightly wrong fence count here.
351 */
352 amdgpu_fence_process(ring);
Christian König742c0852016-03-14 15:46:06 +0100353 emitted = 0x100000000ull;
354 emitted -= atomic_read(&ring->fence_drv.last_seq);
Mark Rutland6aa7de02017-10-23 14:07:29 -0700355 emitted += READ_ONCE(ring->fence_drv.sync_seq);
Christian König742c0852016-03-14 15:46:06 +0100356 return lower_32_bits(emitted);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400357}
358
359/**
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400360 * amdgpu_fence_driver_start_ring - make the fence driver
361 * ready for use on the requested ring.
362 *
363 * @ring: ring to start the fence driver on
364 * @irq_src: interrupt source to use for this ring
365 * @irq_type: interrupt type to use for this ring
366 *
367 * Make the fence driver ready for processing (all asics).
368 * Not all asics have all rings, so each asic will only
369 * start the fence driver on the rings it has.
370 * Returns 0 for success, errors for failure.
371 */
372int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
373 struct amdgpu_irq_src *irq_src,
374 unsigned irq_type)
375{
376 struct amdgpu_device *adev = ring->adev;
377 uint64_t index;
378
Leo Liud9e98ee2018-06-25 14:56:06 -0400379 if (ring->funcs->type != AMDGPU_RING_TYPE_UVD) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400380 ring->fence_drv.cpu_addr = &adev->wb.wb[ring->fence_offs];
381 ring->fence_drv.gpu_addr = adev->wb.gpu_addr + (ring->fence_offs * 4);
382 } else {
383 /* put fence directly behind firmware */
384 index = ALIGN(adev->uvd.fw->size, 8);
James Zhu10dd74ea2018-05-15 14:31:24 -0500385 ring->fence_drv.cpu_addr = adev->uvd.inst[ring->me].cpu_addr + index;
386 ring->fence_drv.gpu_addr = adev->uvd.inst[ring->me].gpu_addr + index;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400387 }
Christian König742c0852016-03-14 15:46:06 +0100388 amdgpu_fence_write(ring, atomic_read(&ring->fence_drv.last_seq));
Chunming Zhouc6a40792015-06-01 14:14:32 +0800389 amdgpu_irq_get(adev, irq_src, irq_type);
390
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400391 ring->fence_drv.irq_src = irq_src;
392 ring->fence_drv.irq_type = irq_type;
Chunming Zhouc6a40792015-06-01 14:14:32 +0800393 ring->fence_drv.initialized = true;
394
pding9953b722017-10-26 09:30:38 +0800395 dev_dbg(adev->dev, "fence driver on ring %d use gpu addr 0x%016llx, "
396 "cpu addr 0x%p\n", ring->idx,
397 ring->fence_drv.gpu_addr, ring->fence_drv.cpu_addr);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400398 return 0;
399}
400
401/**
402 * amdgpu_fence_driver_init_ring - init the fence driver
403 * for the requested ring.
404 *
405 * @ring: ring to init the fence driver on
Christian Könige6151a02016-03-15 14:52:26 +0100406 * @num_hw_submission: number of entries on the hardware queue
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400407 *
408 * Init the fence driver for the requested ring (all asics).
409 * Helper function for amdgpu_fence_driver_init().
410 */
Christian Könige6151a02016-03-15 14:52:26 +0100411int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
412 unsigned num_hw_submission)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400413{
Evan Quan687c1c22018-03-27 09:53:15 +0800414 long timeout;
Christian König5907a0d2016-01-18 15:16:53 +0100415 int r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400416
Christian Könige6151a02016-03-15 14:52:26 +0100417 /* Check that num_hw_submission is a power of two */
418 if ((num_hw_submission & (num_hw_submission - 1)) != 0)
419 return -EINVAL;
420
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400421 ring->fence_drv.cpu_addr = NULL;
422 ring->fence_drv.gpu_addr = 0;
Christian König5907a0d2016-01-18 15:16:53 +0100423 ring->fence_drv.sync_seq = 0;
Christian König742c0852016-03-14 15:46:06 +0100424 atomic_set(&ring->fence_drv.last_seq, 0);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400425 ring->fence_drv.initialized = false;
426
Kees Cook86cb30e2017-10-17 20:21:24 -0700427 timer_setup(&ring->fence_drv.fallback_timer, amdgpu_fence_fallback, 0);
Alex Deucherb80d8472015-08-16 22:55:02 -0400428
Chunming Zhou66067ad2016-04-14 10:27:28 +0800429 ring->fence_drv.num_fences_mask = num_hw_submission * 2 - 1;
Christian König4a7d74f2016-03-14 14:29:46 +0100430 spin_lock_init(&ring->fence_drv.lock);
Chunming Zhou66067ad2016-04-14 10:27:28 +0800431 ring->fence_drv.fences = kcalloc(num_hw_submission * 2, sizeof(void *),
Christian Königc89377d2016-03-13 19:19:48 +0100432 GFP_KERNEL);
433 if (!ring->fence_drv.fences)
434 return -ENOMEM;
Christian König5ec92a72015-09-07 18:43:02 +0200435
Trigger Huange2250442016-11-02 05:43:44 -0400436 /* No need to setup the GPU scheduler for KIQ ring */
437 if (ring->funcs->type != AMDGPU_RING_TYPE_KIQ) {
Evan Quan687c1c22018-03-27 09:53:15 +0800438 /* for non-sriov case, no timeout enforce on compute ring */
439 if ((ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
440 && !amdgpu_sriov_vf(ring->adev))
441 timeout = MAX_SCHEDULE_TIMEOUT;
442 else
443 timeout = msecs_to_jiffies(amdgpu_lockup_timeout);
444
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100445 r = drm_sched_init(&ring->sched, &amdgpu_sched_ops,
Monk Liu95aa9b12017-10-17 13:40:54 +0800446 num_hw_submission, amdgpu_job_hang_limit,
Evan Quan687c1c22018-03-27 09:53:15 +0800447 timeout, ring->name);
Trigger Huange2250442016-11-02 05:43:44 -0400448 if (r) {
449 DRM_ERROR("Failed to create scheduler on ring %s.\n",
450 ring->name);
451 return r;
452 }
Alex Deucherb80d8472015-08-16 22:55:02 -0400453 }
Christian König4f839a22015-09-08 20:22:31 +0200454
455 return 0;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400456}
457
458/**
459 * amdgpu_fence_driver_init - init the fence driver
460 * for all possible rings.
461 *
462 * @adev: amdgpu device pointer
463 *
464 * Init the fence driver for all possible rings (all asics).
465 * Not all asics have all rings, so each asic will only
466 * start the fence driver on the rings it has using
467 * amdgpu_fence_driver_start_ring().
468 * Returns 0 for success.
469 */
470int amdgpu_fence_driver_init(struct amdgpu_device *adev)
471{
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400472 if (amdgpu_debugfs_fence_init(adev))
473 dev_err(adev->dev, "fence debugfs file creation failed\n");
474
475 return 0;
476}
477
478/**
479 * amdgpu_fence_driver_fini - tear down the fence driver
480 * for all possible rings.
481 *
482 * @adev: amdgpu device pointer
483 *
484 * Tear down the fence driver for all possible rings (all asics).
485 */
486void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
487{
Christian Königc89377d2016-03-13 19:19:48 +0100488 unsigned i, j;
489 int r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400490
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400491 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
492 struct amdgpu_ring *ring = adev->rings[i];
Christian Königc2776af2015-11-03 13:27:39 +0100493
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400494 if (!ring || !ring->fence_drv.initialized)
495 continue;
496 r = amdgpu_fence_wait_empty(ring);
497 if (r) {
498 /* no need to trigger GPU reset as we are unloading */
Monk Liu2f9d4082017-10-16 14:38:10 +0800499 amdgpu_fence_driver_force_completion(ring);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400500 }
Chunming Zhouc6a40792015-06-01 14:14:32 +0800501 amdgpu_irq_put(adev, ring->fence_drv.irq_src,
502 ring->fence_drv.irq_type);
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100503 drm_sched_fini(&ring->sched);
Christian Königc2776af2015-11-03 13:27:39 +0100504 del_timer_sync(&ring->fence_drv.fallback_timer);
Christian Königc89377d2016-03-13 19:19:48 +0100505 for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j)
Chris Wilsonf54d1862016-10-25 13:00:45 +0100506 dma_fence_put(ring->fence_drv.fences[j]);
Christian Königc89377d2016-03-13 19:19:48 +0100507 kfree(ring->fence_drv.fences);
Grazvydas Ignotas54ddf3a2016-09-25 23:34:46 +0300508 ring->fence_drv.fences = NULL;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400509 ring->fence_drv.initialized = false;
510 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400511}
512
513/**
Alex Deucher5ceb54c2015-08-05 12:41:48 -0400514 * amdgpu_fence_driver_suspend - suspend the fence driver
515 * for all possible rings.
516 *
517 * @adev: amdgpu device pointer
518 *
519 * Suspend the fence driver for all possible rings (all asics).
520 */
521void amdgpu_fence_driver_suspend(struct amdgpu_device *adev)
522{
523 int i, r;
524
Alex Deucher5ceb54c2015-08-05 12:41:48 -0400525 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
526 struct amdgpu_ring *ring = adev->rings[i];
527 if (!ring || !ring->fence_drv.initialized)
528 continue;
529
530 /* wait for gpu to finish processing current batch */
531 r = amdgpu_fence_wait_empty(ring);
532 if (r) {
533 /* delay GPU reset to resume */
Monk Liu2f9d4082017-10-16 14:38:10 +0800534 amdgpu_fence_driver_force_completion(ring);
Alex Deucher5ceb54c2015-08-05 12:41:48 -0400535 }
536
537 /* disable the interrupt */
538 amdgpu_irq_put(adev, ring->fence_drv.irq_src,
539 ring->fence_drv.irq_type);
540 }
Alex Deucher5ceb54c2015-08-05 12:41:48 -0400541}
542
543/**
544 * amdgpu_fence_driver_resume - resume the fence driver
545 * for all possible rings.
546 *
547 * @adev: amdgpu device pointer
548 *
549 * Resume the fence driver for all possible rings (all asics).
550 * Not all asics have all rings, so each asic will only
551 * start the fence driver on the rings it has using
552 * amdgpu_fence_driver_start_ring().
553 * Returns 0 for success.
554 */
555void amdgpu_fence_driver_resume(struct amdgpu_device *adev)
556{
557 int i;
558
Alex Deucher5ceb54c2015-08-05 12:41:48 -0400559 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
560 struct amdgpu_ring *ring = adev->rings[i];
561 if (!ring || !ring->fence_drv.initialized)
562 continue;
563
564 /* enable the interrupt */
565 amdgpu_irq_get(adev, ring->fence_drv.irq_src,
566 ring->fence_drv.irq_type);
567 }
Alex Deucher5ceb54c2015-08-05 12:41:48 -0400568}
569
570/**
Monk Liu2f9d4082017-10-16 14:38:10 +0800571 * amdgpu_fence_driver_force_completion - force signal latest fence of ring
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400572 *
Monk Liu2f9d4082017-10-16 14:38:10 +0800573 * @ring: fence of the ring to signal
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400574 *
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400575 */
Monk Liu2f9d4082017-10-16 14:38:10 +0800576void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400577{
Monk Liu2f9d4082017-10-16 14:38:10 +0800578 amdgpu_fence_write(ring, ring->fence_drv.sync_seq);
579 amdgpu_fence_process(ring);
Monk Liu65781c72017-05-11 13:36:44 +0800580}
581
Christian Königa95e2642015-11-03 12:21:57 +0100582/*
583 * Common fence implementation
584 */
585
Chris Wilsonf54d1862016-10-25 13:00:45 +0100586static const char *amdgpu_fence_get_driver_name(struct dma_fence *fence)
Christian Königa95e2642015-11-03 12:21:57 +0100587{
588 return "amdgpu";
589}
590
Chris Wilsonf54d1862016-10-25 13:00:45 +0100591static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f)
Christian Königa95e2642015-11-03 12:21:57 +0100592{
593 struct amdgpu_fence *fence = to_amdgpu_fence(f);
594 return (const char *)fence->ring->name;
595}
596
597/**
Christian Königa95e2642015-11-03 12:21:57 +0100598 * amdgpu_fence_enable_signaling - enable signalling on fence
599 * @fence: fence
600 *
601 * This function is called with fence_queue lock held, and adds a callback
602 * to fence_queue that checks if this fence is signaled, and if so it
603 * signals the fence and removes itself.
604 */
Chris Wilsonf54d1862016-10-25 13:00:45 +0100605static bool amdgpu_fence_enable_signaling(struct dma_fence *f)
Christian Königa95e2642015-11-03 12:21:57 +0100606{
607 struct amdgpu_fence *fence = to_amdgpu_fence(f);
608 struct amdgpu_ring *ring = fence->ring;
609
Christian Königc2776af2015-11-03 13:27:39 +0100610 if (!timer_pending(&ring->fence_drv.fallback_timer))
611 amdgpu_fence_schedule_fallback(ring);
Christian König4a7d74f2016-03-14 14:29:46 +0100612
Chris Wilsonf54d1862016-10-25 13:00:45 +0100613 DMA_FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx);
Christian König4a7d74f2016-03-14 14:29:46 +0100614
Christian Königa95e2642015-11-03 12:21:57 +0100615 return true;
616}
617
Christian Königb4413532016-03-15 13:40:17 +0100618/**
619 * amdgpu_fence_free - free up the fence memory
620 *
621 * @rcu: RCU callback head
622 *
623 * Free up the fence memory after the RCU grace period.
624 */
625static void amdgpu_fence_free(struct rcu_head *rcu)
Chunming Zhoub49c84a2015-11-05 11:28:28 +0800626{
Chris Wilsonf54d1862016-10-25 13:00:45 +0100627 struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
Chunming Zhoub49c84a2015-11-05 11:28:28 +0800628 struct amdgpu_fence *fence = to_amdgpu_fence(f);
629 kmem_cache_free(amdgpu_fence_slab, fence);
630}
631
Christian Königb4413532016-03-15 13:40:17 +0100632/**
633 * amdgpu_fence_release - callback that fence can be freed
634 *
635 * @fence: fence
636 *
637 * This function is called when the reference count becomes zero.
638 * It just RCU schedules freeing up the fence.
639 */
Chris Wilsonf54d1862016-10-25 13:00:45 +0100640static void amdgpu_fence_release(struct dma_fence *f)
Christian Königb4413532016-03-15 13:40:17 +0100641{
642 call_rcu(&f->rcu, amdgpu_fence_free);
643}
644
Chris Wilsonf54d1862016-10-25 13:00:45 +0100645static const struct dma_fence_ops amdgpu_fence_ops = {
Christian Königa95e2642015-11-03 12:21:57 +0100646 .get_driver_name = amdgpu_fence_get_driver_name,
647 .get_timeline_name = amdgpu_fence_get_timeline_name,
648 .enable_signaling = amdgpu_fence_enable_signaling,
Chris Wilsonf54d1862016-10-25 13:00:45 +0100649 .wait = dma_fence_default_wait,
Chunming Zhoub49c84a2015-11-05 11:28:28 +0800650 .release = amdgpu_fence_release,
Christian Königa95e2642015-11-03 12:21:57 +0100651};
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400652
653/*
654 * Fence debugfs
655 */
656#if defined(CONFIG_DEBUG_FS)
657static int amdgpu_debugfs_fence_info(struct seq_file *m, void *data)
658{
659 struct drm_info_node *node = (struct drm_info_node *)m->private;
660 struct drm_device *dev = node->minor->dev;
661 struct amdgpu_device *adev = dev->dev_private;
Christian König5907a0d2016-01-18 15:16:53 +0100662 int i;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400663
664 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
665 struct amdgpu_ring *ring = adev->rings[i];
666 if (!ring || !ring->fence_drv.initialized)
667 continue;
668
669 amdgpu_fence_process(ring);
670
Christian König344c19f2015-06-02 15:47:16 +0200671 seq_printf(m, "--- ring %d (%s) ---\n", i, ring->name);
Christian König742c0852016-03-14 15:46:06 +0100672 seq_printf(m, "Last signaled fence 0x%08x\n",
673 atomic_read(&ring->fence_drv.last_seq));
674 seq_printf(m, "Last emitted 0x%08x\n",
Christian König5907a0d2016-01-18 15:16:53 +0100675 ring->fence_drv.sync_seq);
pdinge71de072017-10-12 13:53:20 +0800676
677 if (ring->funcs->type != AMDGPU_RING_TYPE_GFX)
678 continue;
679
680 /* set in CP_VMID_PREEMPT and preemption occurred */
681 seq_printf(m, "Last preempted 0x%08x\n",
682 le32_to_cpu(*(ring->fence_drv.cpu_addr + 2)));
683 /* set in CP_VMID_RESET and reset occurred */
684 seq_printf(m, "Last reset 0x%08x\n",
685 le32_to_cpu(*(ring->fence_drv.cpu_addr + 4)));
686 /* Both preemption and reset occurred */
687 seq_printf(m, "Last both 0x%08x\n",
688 le32_to_cpu(*(ring->fence_drv.cpu_addr + 6)));
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400689 }
690 return 0;
691}
692
Alex Deucher18db89b2016-01-14 10:25:22 -0500693/**
Monk Liu57406822017-10-25 16:37:02 +0800694 * amdgpu_debugfs_gpu_recover - manually trigger a gpu reset & recover
Alex Deucher18db89b2016-01-14 10:25:22 -0500695 *
696 * Manually trigger a gpu reset at the next fence wait.
697 */
Monk Liu57406822017-10-25 16:37:02 +0800698static int amdgpu_debugfs_gpu_recover(struct seq_file *m, void *data)
Alex Deucher18db89b2016-01-14 10:25:22 -0500699{
700 struct drm_info_node *node = (struct drm_info_node *) m->private;
701 struct drm_device *dev = node->minor->dev;
702 struct amdgpu_device *adev = dev->dev_private;
703
Monk Liu57406822017-10-25 16:37:02 +0800704 seq_printf(m, "gpu recover\n");
Alex Deucher5f152b52017-12-15 16:40:49 -0500705 amdgpu_device_gpu_recover(adev, NULL, true);
Alex Deucher18db89b2016-01-14 10:25:22 -0500706
707 return 0;
708}
709
Nils Wallménius06ab6832016-05-02 12:46:15 -0400710static const struct drm_info_list amdgpu_debugfs_fence_list[] = {
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400711 {"amdgpu_fence_info", &amdgpu_debugfs_fence_info, 0, NULL},
Monk Liu57406822017-10-25 16:37:02 +0800712 {"amdgpu_gpu_recover", &amdgpu_debugfs_gpu_recover, 0, NULL}
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400713};
Monk Liu4fbf87e22017-05-05 15:09:42 +0800714
715static const struct drm_info_list amdgpu_debugfs_fence_list_sriov[] = {
716 {"amdgpu_fence_info", &amdgpu_debugfs_fence_info, 0, NULL},
717};
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400718#endif
719
720int amdgpu_debugfs_fence_init(struct amdgpu_device *adev)
721{
722#if defined(CONFIG_DEBUG_FS)
Monk Liu4fbf87e22017-05-05 15:09:42 +0800723 if (amdgpu_sriov_vf(adev))
724 return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_fence_list_sriov, 1);
Alex Deucher18db89b2016-01-14 10:25:22 -0500725 return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_fence_list, 2);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400726#else
727 return 0;
728#endif
729}
730