Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2009 Jerome Glisse. |
| 3 | * All Rights Reserved. |
| 4 | * |
| 5 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 6 | * copy of this software and associated documentation files (the |
| 7 | * "Software"), to deal in the Software without restriction, including |
| 8 | * without limitation the rights to use, copy, modify, merge, publish, |
| 9 | * distribute, sub license, and/or sell copies of the Software, and to |
| 10 | * permit persons to whom the Software is furnished to do so, subject to |
| 11 | * the following conditions: |
| 12 | * |
| 13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 15 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
| 16 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
| 17 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
| 18 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
| 19 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
| 20 | * |
| 21 | * The above copyright notice and this permission notice (including the |
| 22 | * next paragraph) shall be included in all copies or substantial portions |
| 23 | * of the Software. |
| 24 | * |
| 25 | */ |
| 26 | /* |
| 27 | * Authors: |
| 28 | * Jerome Glisse <glisse@freedesktop.org> |
| 29 | * Dave Airlie |
| 30 | */ |
| 31 | #include <linux/seq_file.h> |
| 32 | #include <linux/atomic.h> |
| 33 | #include <linux/wait.h> |
| 34 | #include <linux/kref.h> |
| 35 | #include <linux/slab.h> |
| 36 | #include <linux/firmware.h> |
| 37 | #include <drm/drmP.h> |
| 38 | #include "amdgpu.h" |
| 39 | #include "amdgpu_trace.h" |
| 40 | |
| 41 | /* |
| 42 | * Fences |
| 43 | * Fences mark an event in the GPUs pipeline and are used |
| 44 | * for GPU/CPU synchronization. When the fence is written, |
| 45 | * it is expected that all buffers associated with that fence |
| 46 | * are no longer in use by the associated ring on the GPU and |
| 47 | * that the the relevant GPU caches have been flushed. |
| 48 | */ |
| 49 | |
Christian König | 22e5a2f | 2016-03-11 15:12:53 +0100 | [diff] [blame] | 50 | struct amdgpu_fence { |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 51 | struct dma_fence base; |
Christian König | 22e5a2f | 2016-03-11 15:12:53 +0100 | [diff] [blame] | 52 | |
| 53 | /* RB, DMA, etc. */ |
| 54 | struct amdgpu_ring *ring; |
Christian König | 22e5a2f | 2016-03-11 15:12:53 +0100 | [diff] [blame] | 55 | }; |
| 56 | |
Chunming Zhou | b49c84a | 2015-11-05 11:28:28 +0800 | [diff] [blame] | 57 | static struct kmem_cache *amdgpu_fence_slab; |
Chunming Zhou | b49c84a | 2015-11-05 11:28:28 +0800 | [diff] [blame] | 58 | |
Rex Zhu | d573de2 | 2016-05-12 13:27:28 +0800 | [diff] [blame] | 59 | int amdgpu_fence_slab_init(void) |
| 60 | { |
| 61 | amdgpu_fence_slab = kmem_cache_create( |
| 62 | "amdgpu_fence", sizeof(struct amdgpu_fence), 0, |
| 63 | SLAB_HWCACHE_ALIGN, NULL); |
| 64 | if (!amdgpu_fence_slab) |
| 65 | return -ENOMEM; |
| 66 | return 0; |
| 67 | } |
| 68 | |
| 69 | void amdgpu_fence_slab_fini(void) |
| 70 | { |
Grazvydas Ignotas | 0f10425 | 2016-10-23 21:31:43 +0300 | [diff] [blame] | 71 | rcu_barrier(); |
Rex Zhu | d573de2 | 2016-05-12 13:27:28 +0800 | [diff] [blame] | 72 | kmem_cache_destroy(amdgpu_fence_slab); |
| 73 | } |
Christian König | 22e5a2f | 2016-03-11 15:12:53 +0100 | [diff] [blame] | 74 | /* |
| 75 | * Cast helper |
| 76 | */ |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 77 | static const struct dma_fence_ops amdgpu_fence_ops; |
| 78 | static inline struct amdgpu_fence *to_amdgpu_fence(struct dma_fence *f) |
Christian König | 22e5a2f | 2016-03-11 15:12:53 +0100 | [diff] [blame] | 79 | { |
| 80 | struct amdgpu_fence *__f = container_of(f, struct amdgpu_fence, base); |
| 81 | |
| 82 | if (__f->base.ops == &amdgpu_fence_ops) |
| 83 | return __f; |
| 84 | |
| 85 | return NULL; |
| 86 | } |
| 87 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 88 | /** |
| 89 | * amdgpu_fence_write - write a fence value |
| 90 | * |
| 91 | * @ring: ring the fence is associated with |
| 92 | * @seq: sequence number to write |
| 93 | * |
| 94 | * Writes a fence value to memory (all asics). |
| 95 | */ |
| 96 | static void amdgpu_fence_write(struct amdgpu_ring *ring, u32 seq) |
| 97 | { |
| 98 | struct amdgpu_fence_driver *drv = &ring->fence_drv; |
| 99 | |
| 100 | if (drv->cpu_addr) |
| 101 | *drv->cpu_addr = cpu_to_le32(seq); |
| 102 | } |
| 103 | |
| 104 | /** |
| 105 | * amdgpu_fence_read - read a fence value |
| 106 | * |
| 107 | * @ring: ring the fence is associated with |
| 108 | * |
| 109 | * Reads a fence value from memory (all asics). |
| 110 | * Returns the value of the fence read from memory. |
| 111 | */ |
| 112 | static u32 amdgpu_fence_read(struct amdgpu_ring *ring) |
| 113 | { |
| 114 | struct amdgpu_fence_driver *drv = &ring->fence_drv; |
| 115 | u32 seq = 0; |
| 116 | |
| 117 | if (drv->cpu_addr) |
| 118 | seq = le32_to_cpu(*drv->cpu_addr); |
| 119 | else |
Christian König | 742c085 | 2016-03-14 15:46:06 +0100 | [diff] [blame] | 120 | seq = atomic_read(&drv->last_seq); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 121 | |
| 122 | return seq; |
| 123 | } |
| 124 | |
| 125 | /** |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 126 | * amdgpu_fence_emit - emit a fence on the requested ring |
| 127 | * |
| 128 | * @ring: ring the fence is associated with |
Christian König | 364beb2 | 2016-02-16 17:39:39 +0100 | [diff] [blame] | 129 | * @f: resulting fence object |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 130 | * |
| 131 | * Emits a fence command on the requested ring (all asics). |
| 132 | * Returns 0 on success, -ENOMEM on failure. |
| 133 | */ |
Marek Olšák | d240cd9 | 2018-04-03 13:05:03 -0400 | [diff] [blame] | 134 | int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, |
| 135 | unsigned flags) |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 136 | { |
| 137 | struct amdgpu_device *adev = ring->adev; |
Christian König | 364beb2 | 2016-02-16 17:39:39 +0100 | [diff] [blame] | 138 | struct amdgpu_fence *fence; |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 139 | struct dma_fence *old, **ptr; |
Christian König | 742c085 | 2016-03-14 15:46:06 +0100 | [diff] [blame] | 140 | uint32_t seq; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 141 | |
Christian König | 364beb2 | 2016-02-16 17:39:39 +0100 | [diff] [blame] | 142 | fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL); |
| 143 | if (fence == NULL) |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 144 | return -ENOMEM; |
Christian König | 364beb2 | 2016-02-16 17:39:39 +0100 | [diff] [blame] | 145 | |
Christian König | 742c085 | 2016-03-14 15:46:06 +0100 | [diff] [blame] | 146 | seq = ++ring->fence_drv.sync_seq; |
Christian König | 364beb2 | 2016-02-16 17:39:39 +0100 | [diff] [blame] | 147 | fence->ring = ring; |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 148 | dma_fence_init(&fence->base, &amdgpu_fence_ops, |
| 149 | &ring->fence_drv.lock, |
| 150 | adev->fence_context + ring->idx, |
| 151 | seq); |
Chunming Zhou | 890ee23 | 2015-06-01 14:35:03 +0800 | [diff] [blame] | 152 | amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr, |
Marek Olšák | d240cd9 | 2018-04-03 13:05:03 -0400 | [diff] [blame] | 153 | seq, flags | AMDGPU_FENCE_FLAG_INT); |
Christian König | c89377d | 2016-03-13 19:19:48 +0100 | [diff] [blame] | 154 | |
Christian König | 742c085 | 2016-03-14 15:46:06 +0100 | [diff] [blame] | 155 | ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask]; |
Christian König | c89377d | 2016-03-13 19:19:48 +0100 | [diff] [blame] | 156 | /* This function can't be called concurrently anyway, otherwise |
| 157 | * emitting the fence would mess up the hardware ring buffer. |
| 158 | */ |
Chunming Zhou | fc387a0 | 2016-03-31 11:07:14 +0800 | [diff] [blame] | 159 | old = rcu_dereference_protected(*ptr, 1); |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 160 | if (old && !dma_fence_is_signaled(old)) { |
Chunming Zhou | fc387a0 | 2016-03-31 11:07:14 +0800 | [diff] [blame] | 161 | DRM_INFO("rcu slot is busy\n"); |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 162 | dma_fence_wait(old, false); |
Chunming Zhou | fc387a0 | 2016-03-31 11:07:14 +0800 | [diff] [blame] | 163 | } |
Christian König | c89377d | 2016-03-13 19:19:48 +0100 | [diff] [blame] | 164 | |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 165 | rcu_assign_pointer(*ptr, dma_fence_get(&fence->base)); |
Christian König | c89377d | 2016-03-13 19:19:48 +0100 | [diff] [blame] | 166 | |
Christian König | 364beb2 | 2016-02-16 17:39:39 +0100 | [diff] [blame] | 167 | *f = &fence->base; |
Christian König | c89377d | 2016-03-13 19:19:48 +0100 | [diff] [blame] | 168 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 169 | return 0; |
| 170 | } |
| 171 | |
| 172 | /** |
pding | 43ca8ef | 2017-10-13 15:38:35 +0800 | [diff] [blame] | 173 | * amdgpu_fence_emit_polling - emit a fence on the requeste ring |
| 174 | * |
| 175 | * @ring: ring the fence is associated with |
| 176 | * @s: resulting sequence number |
| 177 | * |
| 178 | * Emits a fence command on the requested ring (all asics). |
| 179 | * Used For polling fence. |
| 180 | * Returns 0 on success, -ENOMEM on failure. |
| 181 | */ |
| 182 | int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s) |
| 183 | { |
| 184 | uint32_t seq; |
| 185 | |
| 186 | if (!s) |
| 187 | return -EINVAL; |
| 188 | |
| 189 | seq = ++ring->fence_drv.sync_seq; |
| 190 | amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr, |
Monk Liu | d118a62 | 2017-12-04 20:46:17 +0800 | [diff] [blame] | 191 | seq, 0); |
pding | 43ca8ef | 2017-10-13 15:38:35 +0800 | [diff] [blame] | 192 | |
| 193 | *s = seq; |
| 194 | |
| 195 | return 0; |
| 196 | } |
| 197 | |
| 198 | /** |
Christian König | c2776af | 2015-11-03 13:27:39 +0100 | [diff] [blame] | 199 | * amdgpu_fence_schedule_fallback - schedule fallback check |
| 200 | * |
| 201 | * @ring: pointer to struct amdgpu_ring |
| 202 | * |
| 203 | * Start a timer as fallback to our interrupts. |
| 204 | */ |
| 205 | static void amdgpu_fence_schedule_fallback(struct amdgpu_ring *ring) |
| 206 | { |
| 207 | mod_timer(&ring->fence_drv.fallback_timer, |
| 208 | jiffies + AMDGPU_FENCE_JIFFIES_TIMEOUT); |
| 209 | } |
| 210 | |
| 211 | /** |
Christian König | ca08e04 | 2016-03-11 17:57:56 +0100 | [diff] [blame] | 212 | * amdgpu_fence_process - check for fence activity |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 213 | * |
| 214 | * @ring: pointer to struct amdgpu_ring |
| 215 | * |
| 216 | * Checks the current fence value and calculates the last |
Christian König | ca08e04 | 2016-03-11 17:57:56 +0100 | [diff] [blame] | 217 | * signalled fence value. Wakes the fence queue if the |
| 218 | * sequence number has increased. |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 219 | */ |
Christian König | ca08e04 | 2016-03-11 17:57:56 +0100 | [diff] [blame] | 220 | void amdgpu_fence_process(struct amdgpu_ring *ring) |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 221 | { |
Christian König | 4a7d74f | 2016-03-14 14:29:46 +0100 | [diff] [blame] | 222 | struct amdgpu_fence_driver *drv = &ring->fence_drv; |
Christian König | 742c085 | 2016-03-14 15:46:06 +0100 | [diff] [blame] | 223 | uint32_t seq, last_seq; |
Christian König | 4a7d74f | 2016-03-14 14:29:46 +0100 | [diff] [blame] | 224 | int r; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 225 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 226 | do { |
Christian König | 742c085 | 2016-03-14 15:46:06 +0100 | [diff] [blame] | 227 | last_seq = atomic_read(&ring->fence_drv.last_seq); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 228 | seq = amdgpu_fence_read(ring); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 229 | |
Christian König | 742c085 | 2016-03-14 15:46:06 +0100 | [diff] [blame] | 230 | } while (atomic_cmpxchg(&drv->last_seq, last_seq, seq) != last_seq); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 231 | |
Christian König | 742c085 | 2016-03-14 15:46:06 +0100 | [diff] [blame] | 232 | if (seq != ring->fence_drv.sync_seq) |
Christian König | c2776af | 2015-11-03 13:27:39 +0100 | [diff] [blame] | 233 | amdgpu_fence_schedule_fallback(ring); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 234 | |
Christian König | 2ef004d | 2016-07-12 13:57:03 +0200 | [diff] [blame] | 235 | if (unlikely(seq == last_seq)) |
| 236 | return; |
| 237 | |
Christian König | 4f399a0 | 2016-06-24 21:11:51 +0200 | [diff] [blame] | 238 | last_seq &= drv->num_fences_mask; |
| 239 | seq &= drv->num_fences_mask; |
| 240 | |
Christian König | 2ef004d | 2016-07-12 13:57:03 +0200 | [diff] [blame] | 241 | do { |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 242 | struct dma_fence *fence, **ptr; |
Christian König | 4a7d74f | 2016-03-14 14:29:46 +0100 | [diff] [blame] | 243 | |
Christian König | 4f399a0 | 2016-06-24 21:11:51 +0200 | [diff] [blame] | 244 | ++last_seq; |
| 245 | last_seq &= drv->num_fences_mask; |
| 246 | ptr = &drv->fences[last_seq]; |
Christian König | 4a7d74f | 2016-03-14 14:29:46 +0100 | [diff] [blame] | 247 | |
| 248 | /* There is always exactly one thread signaling this fence slot */ |
| 249 | fence = rcu_dereference_protected(*ptr, 1); |
Muhammad Falak R Wani | 84fae13 | 2016-05-01 00:30:24 +0530 | [diff] [blame] | 250 | RCU_INIT_POINTER(*ptr, NULL); |
Christian König | 4a7d74f | 2016-03-14 14:29:46 +0100 | [diff] [blame] | 251 | |
Christian König | 4f399a0 | 2016-06-24 21:11:51 +0200 | [diff] [blame] | 252 | if (!fence) |
| 253 | continue; |
Christian König | 4a7d74f | 2016-03-14 14:29:46 +0100 | [diff] [blame] | 254 | |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 255 | r = dma_fence_signal(fence); |
Christian König | 4a7d74f | 2016-03-14 14:29:46 +0100 | [diff] [blame] | 256 | if (!r) |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 257 | DMA_FENCE_TRACE(fence, "signaled from irq context\n"); |
Christian König | 4a7d74f | 2016-03-14 14:29:46 +0100 | [diff] [blame] | 258 | else |
| 259 | BUG(); |
| 260 | |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 261 | dma_fence_put(fence); |
Christian König | 2ef004d | 2016-07-12 13:57:03 +0200 | [diff] [blame] | 262 | } while (last_seq != seq); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 263 | } |
| 264 | |
| 265 | /** |
Christian König | c2776af | 2015-11-03 13:27:39 +0100 | [diff] [blame] | 266 | * amdgpu_fence_fallback - fallback for hardware interrupts |
| 267 | * |
| 268 | * @work: delayed work item |
| 269 | * |
| 270 | * Checks for fence activity. |
| 271 | */ |
Kees Cook | 86cb30e | 2017-10-17 20:21:24 -0700 | [diff] [blame] | 272 | static void amdgpu_fence_fallback(struct timer_list *t) |
Christian König | c2776af | 2015-11-03 13:27:39 +0100 | [diff] [blame] | 273 | { |
Kees Cook | 86cb30e | 2017-10-17 20:21:24 -0700 | [diff] [blame] | 274 | struct amdgpu_ring *ring = from_timer(ring, t, |
| 275 | fence_drv.fallback_timer); |
Christian König | c2776af | 2015-11-03 13:27:39 +0100 | [diff] [blame] | 276 | |
| 277 | amdgpu_fence_process(ring); |
| 278 | } |
| 279 | |
| 280 | /** |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 281 | * amdgpu_fence_wait_empty - wait for all fences to signal |
| 282 | * |
| 283 | * @adev: amdgpu device pointer |
| 284 | * @ring: ring index the fence is associated with |
| 285 | * |
| 286 | * Wait for all fences on the requested ring to signal (all asics). |
| 287 | * Returns 0 if the fences have passed, error for all other cases. |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 288 | */ |
| 289 | int amdgpu_fence_wait_empty(struct amdgpu_ring *ring) |
| 290 | { |
Mark Rutland | 6aa7de0 | 2017-10-23 14:07:29 -0700 | [diff] [blame] | 291 | uint64_t seq = READ_ONCE(ring->fence_drv.sync_seq); |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 292 | struct dma_fence *fence, **ptr; |
Christian König | f09c2be | 2016-03-13 19:37:01 +0100 | [diff] [blame] | 293 | int r; |
Christian König | 00d2a2b | 2015-08-07 16:15:36 +0200 | [diff] [blame] | 294 | |
monk.liu | 7f06c23 | 2015-07-30 18:28:12 +0800 | [diff] [blame] | 295 | if (!seq) |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 296 | return 0; |
| 297 | |
Christian König | f09c2be | 2016-03-13 19:37:01 +0100 | [diff] [blame] | 298 | ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask]; |
| 299 | rcu_read_lock(); |
| 300 | fence = rcu_dereference(*ptr); |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 301 | if (!fence || !dma_fence_get_rcu(fence)) { |
Christian König | f09c2be | 2016-03-13 19:37:01 +0100 | [diff] [blame] | 302 | rcu_read_unlock(); |
| 303 | return 0; |
| 304 | } |
| 305 | rcu_read_unlock(); |
| 306 | |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 307 | r = dma_fence_wait(fence, false); |
| 308 | dma_fence_put(fence); |
Christian König | f09c2be | 2016-03-13 19:37:01 +0100 | [diff] [blame] | 309 | return r; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 310 | } |
| 311 | |
| 312 | /** |
pding | 43ca8ef | 2017-10-13 15:38:35 +0800 | [diff] [blame] | 313 | * amdgpu_fence_wait_polling - busy wait for givn sequence number |
| 314 | * |
| 315 | * @ring: ring index the fence is associated with |
| 316 | * @wait_seq: sequence number to wait |
| 317 | * @timeout: the timeout for waiting in usecs |
| 318 | * |
| 319 | * Wait for all fences on the requested ring to signal (all asics). |
| 320 | * Returns left time if no timeout, 0 or minus if timeout. |
| 321 | */ |
| 322 | signed long amdgpu_fence_wait_polling(struct amdgpu_ring *ring, |
| 323 | uint32_t wait_seq, |
| 324 | signed long timeout) |
| 325 | { |
| 326 | uint32_t seq; |
| 327 | |
| 328 | do { |
| 329 | seq = amdgpu_fence_read(ring); |
| 330 | udelay(5); |
| 331 | timeout -= 5; |
| 332 | } while ((int32_t)(wait_seq - seq) > 0 && timeout > 0); |
| 333 | |
| 334 | return timeout > 0 ? timeout : 0; |
| 335 | } |
| 336 | /** |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 337 | * amdgpu_fence_count_emitted - get the count of emitted fences |
| 338 | * |
| 339 | * @ring: ring the fence is associated with |
| 340 | * |
| 341 | * Get the number of fences emitted on the requested ring (all asics). |
| 342 | * Returns the number of emitted fences on the ring. Used by the |
| 343 | * dynpm code to ring track activity. |
| 344 | */ |
| 345 | unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring) |
| 346 | { |
| 347 | uint64_t emitted; |
| 348 | |
| 349 | /* We are not protected by ring lock when reading the last sequence |
| 350 | * but it's ok to report slightly wrong fence count here. |
| 351 | */ |
| 352 | amdgpu_fence_process(ring); |
Christian König | 742c085 | 2016-03-14 15:46:06 +0100 | [diff] [blame] | 353 | emitted = 0x100000000ull; |
| 354 | emitted -= atomic_read(&ring->fence_drv.last_seq); |
Mark Rutland | 6aa7de0 | 2017-10-23 14:07:29 -0700 | [diff] [blame] | 355 | emitted += READ_ONCE(ring->fence_drv.sync_seq); |
Christian König | 742c085 | 2016-03-14 15:46:06 +0100 | [diff] [blame] | 356 | return lower_32_bits(emitted); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 357 | } |
| 358 | |
| 359 | /** |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 360 | * amdgpu_fence_driver_start_ring - make the fence driver |
| 361 | * ready for use on the requested ring. |
| 362 | * |
| 363 | * @ring: ring to start the fence driver on |
| 364 | * @irq_src: interrupt source to use for this ring |
| 365 | * @irq_type: interrupt type to use for this ring |
| 366 | * |
| 367 | * Make the fence driver ready for processing (all asics). |
| 368 | * Not all asics have all rings, so each asic will only |
| 369 | * start the fence driver on the rings it has. |
| 370 | * Returns 0 for success, errors for failure. |
| 371 | */ |
| 372 | int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring, |
| 373 | struct amdgpu_irq_src *irq_src, |
| 374 | unsigned irq_type) |
| 375 | { |
| 376 | struct amdgpu_device *adev = ring->adev; |
| 377 | uint64_t index; |
| 378 | |
Leo Liu | d9e98ee | 2018-06-25 14:56:06 -0400 | [diff] [blame] | 379 | if (ring->funcs->type != AMDGPU_RING_TYPE_UVD) { |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 380 | ring->fence_drv.cpu_addr = &adev->wb.wb[ring->fence_offs]; |
| 381 | ring->fence_drv.gpu_addr = adev->wb.gpu_addr + (ring->fence_offs * 4); |
| 382 | } else { |
| 383 | /* put fence directly behind firmware */ |
| 384 | index = ALIGN(adev->uvd.fw->size, 8); |
James Zhu | 10dd74ea | 2018-05-15 14:31:24 -0500 | [diff] [blame] | 385 | ring->fence_drv.cpu_addr = adev->uvd.inst[ring->me].cpu_addr + index; |
| 386 | ring->fence_drv.gpu_addr = adev->uvd.inst[ring->me].gpu_addr + index; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 387 | } |
Christian König | 742c085 | 2016-03-14 15:46:06 +0100 | [diff] [blame] | 388 | amdgpu_fence_write(ring, atomic_read(&ring->fence_drv.last_seq)); |
Chunming Zhou | c6a4079 | 2015-06-01 14:14:32 +0800 | [diff] [blame] | 389 | amdgpu_irq_get(adev, irq_src, irq_type); |
| 390 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 391 | ring->fence_drv.irq_src = irq_src; |
| 392 | ring->fence_drv.irq_type = irq_type; |
Chunming Zhou | c6a4079 | 2015-06-01 14:14:32 +0800 | [diff] [blame] | 393 | ring->fence_drv.initialized = true; |
| 394 | |
pding | 9953b72 | 2017-10-26 09:30:38 +0800 | [diff] [blame] | 395 | dev_dbg(adev->dev, "fence driver on ring %d use gpu addr 0x%016llx, " |
| 396 | "cpu addr 0x%p\n", ring->idx, |
| 397 | ring->fence_drv.gpu_addr, ring->fence_drv.cpu_addr); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 398 | return 0; |
| 399 | } |
| 400 | |
| 401 | /** |
| 402 | * amdgpu_fence_driver_init_ring - init the fence driver |
| 403 | * for the requested ring. |
| 404 | * |
| 405 | * @ring: ring to init the fence driver on |
Christian König | e6151a0 | 2016-03-15 14:52:26 +0100 | [diff] [blame] | 406 | * @num_hw_submission: number of entries on the hardware queue |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 407 | * |
| 408 | * Init the fence driver for the requested ring (all asics). |
| 409 | * Helper function for amdgpu_fence_driver_init(). |
| 410 | */ |
Christian König | e6151a0 | 2016-03-15 14:52:26 +0100 | [diff] [blame] | 411 | int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring, |
| 412 | unsigned num_hw_submission) |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 413 | { |
Evan Quan | 687c1c2 | 2018-03-27 09:53:15 +0800 | [diff] [blame] | 414 | long timeout; |
Christian König | 5907a0d | 2016-01-18 15:16:53 +0100 | [diff] [blame] | 415 | int r; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 416 | |
Christian König | e6151a0 | 2016-03-15 14:52:26 +0100 | [diff] [blame] | 417 | /* Check that num_hw_submission is a power of two */ |
| 418 | if ((num_hw_submission & (num_hw_submission - 1)) != 0) |
| 419 | return -EINVAL; |
| 420 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 421 | ring->fence_drv.cpu_addr = NULL; |
| 422 | ring->fence_drv.gpu_addr = 0; |
Christian König | 5907a0d | 2016-01-18 15:16:53 +0100 | [diff] [blame] | 423 | ring->fence_drv.sync_seq = 0; |
Christian König | 742c085 | 2016-03-14 15:46:06 +0100 | [diff] [blame] | 424 | atomic_set(&ring->fence_drv.last_seq, 0); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 425 | ring->fence_drv.initialized = false; |
| 426 | |
Kees Cook | 86cb30e | 2017-10-17 20:21:24 -0700 | [diff] [blame] | 427 | timer_setup(&ring->fence_drv.fallback_timer, amdgpu_fence_fallback, 0); |
Alex Deucher | b80d847 | 2015-08-16 22:55:02 -0400 | [diff] [blame] | 428 | |
Chunming Zhou | 66067ad | 2016-04-14 10:27:28 +0800 | [diff] [blame] | 429 | ring->fence_drv.num_fences_mask = num_hw_submission * 2 - 1; |
Christian König | 4a7d74f | 2016-03-14 14:29:46 +0100 | [diff] [blame] | 430 | spin_lock_init(&ring->fence_drv.lock); |
Chunming Zhou | 66067ad | 2016-04-14 10:27:28 +0800 | [diff] [blame] | 431 | ring->fence_drv.fences = kcalloc(num_hw_submission * 2, sizeof(void *), |
Christian König | c89377d | 2016-03-13 19:19:48 +0100 | [diff] [blame] | 432 | GFP_KERNEL); |
| 433 | if (!ring->fence_drv.fences) |
| 434 | return -ENOMEM; |
Christian König | 5ec92a7 | 2015-09-07 18:43:02 +0200 | [diff] [blame] | 435 | |
Trigger Huang | e225044 | 2016-11-02 05:43:44 -0400 | [diff] [blame] | 436 | /* No need to setup the GPU scheduler for KIQ ring */ |
| 437 | if (ring->funcs->type != AMDGPU_RING_TYPE_KIQ) { |
Evan Quan | 687c1c2 | 2018-03-27 09:53:15 +0800 | [diff] [blame] | 438 | /* for non-sriov case, no timeout enforce on compute ring */ |
| 439 | if ((ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) |
| 440 | && !amdgpu_sriov_vf(ring->adev)) |
| 441 | timeout = MAX_SCHEDULE_TIMEOUT; |
| 442 | else |
| 443 | timeout = msecs_to_jiffies(amdgpu_lockup_timeout); |
| 444 | |
Lucas Stach | 1b1f42d | 2017-12-06 17:49:39 +0100 | [diff] [blame] | 445 | r = drm_sched_init(&ring->sched, &amdgpu_sched_ops, |
Monk Liu | 95aa9b1 | 2017-10-17 13:40:54 +0800 | [diff] [blame] | 446 | num_hw_submission, amdgpu_job_hang_limit, |
Evan Quan | 687c1c2 | 2018-03-27 09:53:15 +0800 | [diff] [blame] | 447 | timeout, ring->name); |
Trigger Huang | e225044 | 2016-11-02 05:43:44 -0400 | [diff] [blame] | 448 | if (r) { |
| 449 | DRM_ERROR("Failed to create scheduler on ring %s.\n", |
| 450 | ring->name); |
| 451 | return r; |
| 452 | } |
Alex Deucher | b80d847 | 2015-08-16 22:55:02 -0400 | [diff] [blame] | 453 | } |
Christian König | 4f839a2 | 2015-09-08 20:22:31 +0200 | [diff] [blame] | 454 | |
| 455 | return 0; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 456 | } |
| 457 | |
| 458 | /** |
| 459 | * amdgpu_fence_driver_init - init the fence driver |
| 460 | * for all possible rings. |
| 461 | * |
| 462 | * @adev: amdgpu device pointer |
| 463 | * |
| 464 | * Init the fence driver for all possible rings (all asics). |
| 465 | * Not all asics have all rings, so each asic will only |
| 466 | * start the fence driver on the rings it has using |
| 467 | * amdgpu_fence_driver_start_ring(). |
| 468 | * Returns 0 for success. |
| 469 | */ |
| 470 | int amdgpu_fence_driver_init(struct amdgpu_device *adev) |
| 471 | { |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 472 | if (amdgpu_debugfs_fence_init(adev)) |
| 473 | dev_err(adev->dev, "fence debugfs file creation failed\n"); |
| 474 | |
| 475 | return 0; |
| 476 | } |
| 477 | |
| 478 | /** |
| 479 | * amdgpu_fence_driver_fini - tear down the fence driver |
| 480 | * for all possible rings. |
| 481 | * |
| 482 | * @adev: amdgpu device pointer |
| 483 | * |
| 484 | * Tear down the fence driver for all possible rings (all asics). |
| 485 | */ |
| 486 | void amdgpu_fence_driver_fini(struct amdgpu_device *adev) |
| 487 | { |
Christian König | c89377d | 2016-03-13 19:19:48 +0100 | [diff] [blame] | 488 | unsigned i, j; |
| 489 | int r; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 490 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 491 | for (i = 0; i < AMDGPU_MAX_RINGS; i++) { |
| 492 | struct amdgpu_ring *ring = adev->rings[i]; |
Christian König | c2776af | 2015-11-03 13:27:39 +0100 | [diff] [blame] | 493 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 494 | if (!ring || !ring->fence_drv.initialized) |
| 495 | continue; |
| 496 | r = amdgpu_fence_wait_empty(ring); |
| 497 | if (r) { |
| 498 | /* no need to trigger GPU reset as we are unloading */ |
Monk Liu | 2f9d408 | 2017-10-16 14:38:10 +0800 | [diff] [blame] | 499 | amdgpu_fence_driver_force_completion(ring); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 500 | } |
Chunming Zhou | c6a4079 | 2015-06-01 14:14:32 +0800 | [diff] [blame] | 501 | amdgpu_irq_put(adev, ring->fence_drv.irq_src, |
| 502 | ring->fence_drv.irq_type); |
Lucas Stach | 1b1f42d | 2017-12-06 17:49:39 +0100 | [diff] [blame] | 503 | drm_sched_fini(&ring->sched); |
Christian König | c2776af | 2015-11-03 13:27:39 +0100 | [diff] [blame] | 504 | del_timer_sync(&ring->fence_drv.fallback_timer); |
Christian König | c89377d | 2016-03-13 19:19:48 +0100 | [diff] [blame] | 505 | for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j) |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 506 | dma_fence_put(ring->fence_drv.fences[j]); |
Christian König | c89377d | 2016-03-13 19:19:48 +0100 | [diff] [blame] | 507 | kfree(ring->fence_drv.fences); |
Grazvydas Ignotas | 54ddf3a | 2016-09-25 23:34:46 +0300 | [diff] [blame] | 508 | ring->fence_drv.fences = NULL; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 509 | ring->fence_drv.initialized = false; |
| 510 | } |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 511 | } |
| 512 | |
| 513 | /** |
Alex Deucher | 5ceb54c | 2015-08-05 12:41:48 -0400 | [diff] [blame] | 514 | * amdgpu_fence_driver_suspend - suspend the fence driver |
| 515 | * for all possible rings. |
| 516 | * |
| 517 | * @adev: amdgpu device pointer |
| 518 | * |
| 519 | * Suspend the fence driver for all possible rings (all asics). |
| 520 | */ |
| 521 | void amdgpu_fence_driver_suspend(struct amdgpu_device *adev) |
| 522 | { |
| 523 | int i, r; |
| 524 | |
Alex Deucher | 5ceb54c | 2015-08-05 12:41:48 -0400 | [diff] [blame] | 525 | for (i = 0; i < AMDGPU_MAX_RINGS; i++) { |
| 526 | struct amdgpu_ring *ring = adev->rings[i]; |
| 527 | if (!ring || !ring->fence_drv.initialized) |
| 528 | continue; |
| 529 | |
| 530 | /* wait for gpu to finish processing current batch */ |
| 531 | r = amdgpu_fence_wait_empty(ring); |
| 532 | if (r) { |
| 533 | /* delay GPU reset to resume */ |
Monk Liu | 2f9d408 | 2017-10-16 14:38:10 +0800 | [diff] [blame] | 534 | amdgpu_fence_driver_force_completion(ring); |
Alex Deucher | 5ceb54c | 2015-08-05 12:41:48 -0400 | [diff] [blame] | 535 | } |
| 536 | |
| 537 | /* disable the interrupt */ |
| 538 | amdgpu_irq_put(adev, ring->fence_drv.irq_src, |
| 539 | ring->fence_drv.irq_type); |
| 540 | } |
Alex Deucher | 5ceb54c | 2015-08-05 12:41:48 -0400 | [diff] [blame] | 541 | } |
| 542 | |
| 543 | /** |
| 544 | * amdgpu_fence_driver_resume - resume the fence driver |
| 545 | * for all possible rings. |
| 546 | * |
| 547 | * @adev: amdgpu device pointer |
| 548 | * |
| 549 | * Resume the fence driver for all possible rings (all asics). |
| 550 | * Not all asics have all rings, so each asic will only |
| 551 | * start the fence driver on the rings it has using |
| 552 | * amdgpu_fence_driver_start_ring(). |
| 553 | * Returns 0 for success. |
| 554 | */ |
| 555 | void amdgpu_fence_driver_resume(struct amdgpu_device *adev) |
| 556 | { |
| 557 | int i; |
| 558 | |
Alex Deucher | 5ceb54c | 2015-08-05 12:41:48 -0400 | [diff] [blame] | 559 | for (i = 0; i < AMDGPU_MAX_RINGS; i++) { |
| 560 | struct amdgpu_ring *ring = adev->rings[i]; |
| 561 | if (!ring || !ring->fence_drv.initialized) |
| 562 | continue; |
| 563 | |
| 564 | /* enable the interrupt */ |
| 565 | amdgpu_irq_get(adev, ring->fence_drv.irq_src, |
| 566 | ring->fence_drv.irq_type); |
| 567 | } |
Alex Deucher | 5ceb54c | 2015-08-05 12:41:48 -0400 | [diff] [blame] | 568 | } |
| 569 | |
| 570 | /** |
Monk Liu | 2f9d408 | 2017-10-16 14:38:10 +0800 | [diff] [blame] | 571 | * amdgpu_fence_driver_force_completion - force signal latest fence of ring |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 572 | * |
Monk Liu | 2f9d408 | 2017-10-16 14:38:10 +0800 | [diff] [blame] | 573 | * @ring: fence of the ring to signal |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 574 | * |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 575 | */ |
Monk Liu | 2f9d408 | 2017-10-16 14:38:10 +0800 | [diff] [blame] | 576 | void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring) |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 577 | { |
Monk Liu | 2f9d408 | 2017-10-16 14:38:10 +0800 | [diff] [blame] | 578 | amdgpu_fence_write(ring, ring->fence_drv.sync_seq); |
| 579 | amdgpu_fence_process(ring); |
Monk Liu | 65781c7 | 2017-05-11 13:36:44 +0800 | [diff] [blame] | 580 | } |
| 581 | |
Christian König | a95e264 | 2015-11-03 12:21:57 +0100 | [diff] [blame] | 582 | /* |
| 583 | * Common fence implementation |
| 584 | */ |
| 585 | |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 586 | static const char *amdgpu_fence_get_driver_name(struct dma_fence *fence) |
Christian König | a95e264 | 2015-11-03 12:21:57 +0100 | [diff] [blame] | 587 | { |
| 588 | return "amdgpu"; |
| 589 | } |
| 590 | |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 591 | static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f) |
Christian König | a95e264 | 2015-11-03 12:21:57 +0100 | [diff] [blame] | 592 | { |
| 593 | struct amdgpu_fence *fence = to_amdgpu_fence(f); |
| 594 | return (const char *)fence->ring->name; |
| 595 | } |
| 596 | |
| 597 | /** |
Christian König | a95e264 | 2015-11-03 12:21:57 +0100 | [diff] [blame] | 598 | * amdgpu_fence_enable_signaling - enable signalling on fence |
| 599 | * @fence: fence |
| 600 | * |
| 601 | * This function is called with fence_queue lock held, and adds a callback |
| 602 | * to fence_queue that checks if this fence is signaled, and if so it |
| 603 | * signals the fence and removes itself. |
| 604 | */ |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 605 | static bool amdgpu_fence_enable_signaling(struct dma_fence *f) |
Christian König | a95e264 | 2015-11-03 12:21:57 +0100 | [diff] [blame] | 606 | { |
| 607 | struct amdgpu_fence *fence = to_amdgpu_fence(f); |
| 608 | struct amdgpu_ring *ring = fence->ring; |
| 609 | |
Christian König | c2776af | 2015-11-03 13:27:39 +0100 | [diff] [blame] | 610 | if (!timer_pending(&ring->fence_drv.fallback_timer)) |
| 611 | amdgpu_fence_schedule_fallback(ring); |
Christian König | 4a7d74f | 2016-03-14 14:29:46 +0100 | [diff] [blame] | 612 | |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 613 | DMA_FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx); |
Christian König | 4a7d74f | 2016-03-14 14:29:46 +0100 | [diff] [blame] | 614 | |
Christian König | a95e264 | 2015-11-03 12:21:57 +0100 | [diff] [blame] | 615 | return true; |
| 616 | } |
| 617 | |
Christian König | b441353 | 2016-03-15 13:40:17 +0100 | [diff] [blame] | 618 | /** |
| 619 | * amdgpu_fence_free - free up the fence memory |
| 620 | * |
| 621 | * @rcu: RCU callback head |
| 622 | * |
| 623 | * Free up the fence memory after the RCU grace period. |
| 624 | */ |
| 625 | static void amdgpu_fence_free(struct rcu_head *rcu) |
Chunming Zhou | b49c84a | 2015-11-05 11:28:28 +0800 | [diff] [blame] | 626 | { |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 627 | struct dma_fence *f = container_of(rcu, struct dma_fence, rcu); |
Chunming Zhou | b49c84a | 2015-11-05 11:28:28 +0800 | [diff] [blame] | 628 | struct amdgpu_fence *fence = to_amdgpu_fence(f); |
| 629 | kmem_cache_free(amdgpu_fence_slab, fence); |
| 630 | } |
| 631 | |
Christian König | b441353 | 2016-03-15 13:40:17 +0100 | [diff] [blame] | 632 | /** |
| 633 | * amdgpu_fence_release - callback that fence can be freed |
| 634 | * |
| 635 | * @fence: fence |
| 636 | * |
| 637 | * This function is called when the reference count becomes zero. |
| 638 | * It just RCU schedules freeing up the fence. |
| 639 | */ |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 640 | static void amdgpu_fence_release(struct dma_fence *f) |
Christian König | b441353 | 2016-03-15 13:40:17 +0100 | [diff] [blame] | 641 | { |
| 642 | call_rcu(&f->rcu, amdgpu_fence_free); |
| 643 | } |
| 644 | |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 645 | static const struct dma_fence_ops amdgpu_fence_ops = { |
Christian König | a95e264 | 2015-11-03 12:21:57 +0100 | [diff] [blame] | 646 | .get_driver_name = amdgpu_fence_get_driver_name, |
| 647 | .get_timeline_name = amdgpu_fence_get_timeline_name, |
| 648 | .enable_signaling = amdgpu_fence_enable_signaling, |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 649 | .wait = dma_fence_default_wait, |
Chunming Zhou | b49c84a | 2015-11-05 11:28:28 +0800 | [diff] [blame] | 650 | .release = amdgpu_fence_release, |
Christian König | a95e264 | 2015-11-03 12:21:57 +0100 | [diff] [blame] | 651 | }; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 652 | |
| 653 | /* |
| 654 | * Fence debugfs |
| 655 | */ |
| 656 | #if defined(CONFIG_DEBUG_FS) |
| 657 | static int amdgpu_debugfs_fence_info(struct seq_file *m, void *data) |
| 658 | { |
| 659 | struct drm_info_node *node = (struct drm_info_node *)m->private; |
| 660 | struct drm_device *dev = node->minor->dev; |
| 661 | struct amdgpu_device *adev = dev->dev_private; |
Christian König | 5907a0d | 2016-01-18 15:16:53 +0100 | [diff] [blame] | 662 | int i; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 663 | |
| 664 | for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { |
| 665 | struct amdgpu_ring *ring = adev->rings[i]; |
| 666 | if (!ring || !ring->fence_drv.initialized) |
| 667 | continue; |
| 668 | |
| 669 | amdgpu_fence_process(ring); |
| 670 | |
Christian König | 344c19f | 2015-06-02 15:47:16 +0200 | [diff] [blame] | 671 | seq_printf(m, "--- ring %d (%s) ---\n", i, ring->name); |
Christian König | 742c085 | 2016-03-14 15:46:06 +0100 | [diff] [blame] | 672 | seq_printf(m, "Last signaled fence 0x%08x\n", |
| 673 | atomic_read(&ring->fence_drv.last_seq)); |
| 674 | seq_printf(m, "Last emitted 0x%08x\n", |
Christian König | 5907a0d | 2016-01-18 15:16:53 +0100 | [diff] [blame] | 675 | ring->fence_drv.sync_seq); |
pding | e71de07 | 2017-10-12 13:53:20 +0800 | [diff] [blame] | 676 | |
| 677 | if (ring->funcs->type != AMDGPU_RING_TYPE_GFX) |
| 678 | continue; |
| 679 | |
| 680 | /* set in CP_VMID_PREEMPT and preemption occurred */ |
| 681 | seq_printf(m, "Last preempted 0x%08x\n", |
| 682 | le32_to_cpu(*(ring->fence_drv.cpu_addr + 2))); |
| 683 | /* set in CP_VMID_RESET and reset occurred */ |
| 684 | seq_printf(m, "Last reset 0x%08x\n", |
| 685 | le32_to_cpu(*(ring->fence_drv.cpu_addr + 4))); |
| 686 | /* Both preemption and reset occurred */ |
| 687 | seq_printf(m, "Last both 0x%08x\n", |
| 688 | le32_to_cpu(*(ring->fence_drv.cpu_addr + 6))); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 689 | } |
| 690 | return 0; |
| 691 | } |
| 692 | |
Alex Deucher | 18db89b | 2016-01-14 10:25:22 -0500 | [diff] [blame] | 693 | /** |
Monk Liu | 5740682 | 2017-10-25 16:37:02 +0800 | [diff] [blame] | 694 | * amdgpu_debugfs_gpu_recover - manually trigger a gpu reset & recover |
Alex Deucher | 18db89b | 2016-01-14 10:25:22 -0500 | [diff] [blame] | 695 | * |
| 696 | * Manually trigger a gpu reset at the next fence wait. |
| 697 | */ |
Monk Liu | 5740682 | 2017-10-25 16:37:02 +0800 | [diff] [blame] | 698 | static int amdgpu_debugfs_gpu_recover(struct seq_file *m, void *data) |
Alex Deucher | 18db89b | 2016-01-14 10:25:22 -0500 | [diff] [blame] | 699 | { |
| 700 | struct drm_info_node *node = (struct drm_info_node *) m->private; |
| 701 | struct drm_device *dev = node->minor->dev; |
| 702 | struct amdgpu_device *adev = dev->dev_private; |
| 703 | |
Monk Liu | 5740682 | 2017-10-25 16:37:02 +0800 | [diff] [blame] | 704 | seq_printf(m, "gpu recover\n"); |
Alex Deucher | 5f152b5 | 2017-12-15 16:40:49 -0500 | [diff] [blame] | 705 | amdgpu_device_gpu_recover(adev, NULL, true); |
Alex Deucher | 18db89b | 2016-01-14 10:25:22 -0500 | [diff] [blame] | 706 | |
| 707 | return 0; |
| 708 | } |
| 709 | |
Nils Wallménius | 06ab683 | 2016-05-02 12:46:15 -0400 | [diff] [blame] | 710 | static const struct drm_info_list amdgpu_debugfs_fence_list[] = { |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 711 | {"amdgpu_fence_info", &amdgpu_debugfs_fence_info, 0, NULL}, |
Monk Liu | 5740682 | 2017-10-25 16:37:02 +0800 | [diff] [blame] | 712 | {"amdgpu_gpu_recover", &amdgpu_debugfs_gpu_recover, 0, NULL} |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 713 | }; |
Monk Liu | 4fbf87e2 | 2017-05-05 15:09:42 +0800 | [diff] [blame] | 714 | |
| 715 | static const struct drm_info_list amdgpu_debugfs_fence_list_sriov[] = { |
| 716 | {"amdgpu_fence_info", &amdgpu_debugfs_fence_info, 0, NULL}, |
| 717 | }; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 718 | #endif |
| 719 | |
| 720 | int amdgpu_debugfs_fence_init(struct amdgpu_device *adev) |
| 721 | { |
| 722 | #if defined(CONFIG_DEBUG_FS) |
Monk Liu | 4fbf87e2 | 2017-05-05 15:09:42 +0800 | [diff] [blame] | 723 | if (amdgpu_sriov_vf(adev)) |
| 724 | return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_fence_list_sriov, 1); |
Alex Deucher | 18db89b | 2016-01-14 10:25:22 -0500 | [diff] [blame] | 725 | return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_fence_list, 2); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 726 | #else |
| 727 | return 0; |
| 728 | #endif |
| 729 | } |
| 730 | |