blob: 3b02272db678e173f2caaffd96527aa332326dc0 [file] [log] [blame]
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 * Christian König
28 */
29#include <linux/seq_file.h>
30#include <linux/slab.h>
31#include <drm/drmP.h>
32#include <drm/amdgpu_drm.h>
33#include "amdgpu.h"
34#include "atom.h"
35
36/*
37 * Rings
38 * Most engines on the GPU are fed via ring buffers. Ring
39 * buffers are areas of GPU accessible memory that the host
40 * writes commands into and the GPU reads commands out of.
41 * There is a rptr (read pointer) that determines where the
42 * GPU is currently reading, and a wptr (write pointer)
43 * which determines where the host has written. When the
44 * pointers are equal, the ring is idle. When the host
45 * writes commands to the ring buffer, it increments the
46 * wptr. The GPU then starts fetching commands and executes
47 * them until the pointers are equal again.
48 */
Christian Königeb430962016-04-13 11:36:00 +020049static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
50 struct amdgpu_ring *ring);
Alex Deucherd38ceaf2015-04-20 16:55:21 -040051
52/**
Alex Deucherd38ceaf2015-04-20 16:55:21 -040053 * amdgpu_ring_alloc - allocate space on the ring buffer
54 *
55 * @adev: amdgpu_device pointer
56 * @ring: amdgpu_ring structure holding ring information
57 * @ndw: number of dwords to allocate in the ring buffer
58 *
59 * Allocate @ndw dwords in the ring buffer (all asics).
60 * Returns 0 on success, error on failure.
61 */
62int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw)
63{
Alex Deucherd38ceaf2015-04-20 16:55:21 -040064 /* Align requested size with padding so unlock_commit can
65 * pad safely */
Alex Deucherd38ceaf2015-04-20 16:55:21 -040066 ndw = (ndw + ring->align_mask) & ~ring->align_mask;
Christian Königc7e6be22016-01-21 13:06:05 +010067
68 /* Make sure we aren't trying to allocate more space
69 * than the maximum for one submission
70 */
71 if (WARN_ON_ONCE(ndw > ring->max_dw))
72 return -ENOMEM;
73
Alex Deucherd38ceaf2015-04-20 16:55:21 -040074 ring->count_dw = ndw;
75 ring->wptr_old = ring->wptr;
76 return 0;
77}
78
Jammy Zhouedff0e22015-09-01 13:04:08 +080079/** amdgpu_ring_insert_nop - insert NOP packets
80 *
81 * @ring: amdgpu_ring structure holding ring information
82 * @count: the number of NOP packets to insert
83 *
84 * This is the generic insert_nop function for rings except SDMA
85 */
86void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
87{
88 int i;
89
90 for (i = 0; i < count; i++)
91 amdgpu_ring_write(ring, ring->nop);
92}
93
Christian König9e5d53092016-01-31 12:20:55 +010094/** amdgpu_ring_generic_pad_ib - pad IB with NOP packets
95 *
96 * @ring: amdgpu_ring structure holding ring information
97 * @ib: IB to add NOP packets to
98 *
99 * This is the generic pad_ib function for rings except SDMA
100 */
101void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
102{
103 while (ib->length_dw & ring->align_mask)
104 ib->ptr[ib->length_dw++] = ring->nop;
105}
106
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400107/**
108 * amdgpu_ring_commit - tell the GPU to execute the new
109 * commands on the ring buffer
110 *
111 * @adev: amdgpu_device pointer
112 * @ring: amdgpu_ring structure holding ring information
113 *
114 * Update the wptr (write pointer) to tell the GPU to
115 * execute new commands on the ring buffer (all asics).
116 */
117void amdgpu_ring_commit(struct amdgpu_ring *ring)
118{
Jammy Zhouedff0e22015-09-01 13:04:08 +0800119 uint32_t count;
120
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400121 /* We pad to match fetch size */
Jammy Zhouedff0e22015-09-01 13:04:08 +0800122 count = ring->align_mask + 1 - (ring->wptr & ring->align_mask);
123 count %= ring->align_mask + 1;
124 ring->funcs->insert_nop(ring, count);
125
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400126 mb();
127 amdgpu_ring_set_wptr(ring);
128}
129
130/**
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400131 * amdgpu_ring_undo - reset the wptr
132 *
133 * @ring: amdgpu_ring structure holding ring information
134 *
135 * Reset the driver's copy of the wptr (all asics).
136 */
137void amdgpu_ring_undo(struct amdgpu_ring *ring)
138{
139 ring->wptr = ring->wptr_old;
140}
141
142/**
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400143 * amdgpu_ring_backup - Back up the content of a ring
144 *
145 * @ring: the ring we want to back up
146 *
147 * Saves all unprocessed commits from a ring, returns the number of dwords saved.
148 */
149unsigned amdgpu_ring_backup(struct amdgpu_ring *ring,
150 uint32_t **data)
151{
152 unsigned size, ptr, i;
153
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400154 *data = NULL;
155
Christian Königa27de352016-01-21 11:28:53 +0100156 if (ring->ring_obj == NULL)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400157 return 0;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400158
159 /* it doesn't make sense to save anything if all fences are signaled */
Christian Königa27de352016-01-21 11:28:53 +0100160 if (!amdgpu_fence_count_emitted(ring))
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400161 return 0;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400162
163 ptr = le32_to_cpu(*ring->next_rptr_cpu_addr);
164
165 size = ring->wptr + (ring->ring_size / 4);
166 size -= ptr;
167 size &= ring->ptr_mask;
Christian Königa27de352016-01-21 11:28:53 +0100168 if (size == 0)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400169 return 0;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400170
171 /* and then save the content of the ring */
172 *data = kmalloc_array(size, sizeof(uint32_t), GFP_KERNEL);
Christian Königa27de352016-01-21 11:28:53 +0100173 if (!*data)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400174 return 0;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400175 for (i = 0; i < size; ++i) {
176 (*data)[i] = ring->ring[ptr++];
177 ptr &= ring->ptr_mask;
178 }
179
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400180 return size;
181}
182
183/**
184 * amdgpu_ring_restore - append saved commands to the ring again
185 *
186 * @ring: ring to append commands to
187 * @size: number of dwords we want to write
188 * @data: saved commands
189 *
190 * Allocates space on the ring and restore the previously saved commands.
191 */
192int amdgpu_ring_restore(struct amdgpu_ring *ring,
193 unsigned size, uint32_t *data)
194{
195 int i, r;
196
197 if (!size || !data)
198 return 0;
199
200 /* restore the saved ring content */
Christian Königa27de352016-01-21 11:28:53 +0100201 r = amdgpu_ring_alloc(ring, size);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400202 if (r)
203 return r;
204
205 for (i = 0; i < size; ++i) {
206 amdgpu_ring_write(ring, data[i]);
207 }
208
Christian Königa27de352016-01-21 11:28:53 +0100209 amdgpu_ring_commit(ring);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400210 kfree(data);
211 return 0;
212}
213
214/**
215 * amdgpu_ring_init - init driver ring struct.
216 *
217 * @adev: amdgpu_device pointer
218 * @ring: amdgpu_ring structure holding ring information
Christian Königa3f1cf32016-04-12 16:26:34 +0200219 * @max_ndw: maximum number of dw for ring alloc
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400220 * @nop: nop packet for this ring
221 *
222 * Initialize the driver information for the selected ring (all asics).
223 * Returns 0 on success, error on failure.
224 */
225int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
Christian Königa3f1cf32016-04-12 16:26:34 +0200226 unsigned max_dw, u32 nop, u32 align_mask,
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400227 struct amdgpu_irq_src *irq_src, unsigned irq_type,
228 enum amdgpu_ring_type ring_type)
229{
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400230 int r;
231
232 if (ring->adev == NULL) {
233 if (adev->num_rings >= AMDGPU_MAX_RINGS)
234 return -EINVAL;
235
236 ring->adev = adev;
237 ring->idx = adev->num_rings++;
238 adev->rings[ring->idx] = ring;
Christian Könige6151a02016-03-15 14:52:26 +0100239 r = amdgpu_fence_driver_init_ring(ring,
240 amdgpu_sched_hw_submission);
Christian König4f839a22015-09-08 20:22:31 +0200241 if (r)
242 return r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400243 }
244
245 r = amdgpu_wb_get(adev, &ring->rptr_offs);
246 if (r) {
247 dev_err(adev->dev, "(%d) ring rptr_offs wb alloc failed\n", r);
248 return r;
249 }
250
251 r = amdgpu_wb_get(adev, &ring->wptr_offs);
252 if (r) {
253 dev_err(adev->dev, "(%d) ring wptr_offs wb alloc failed\n", r);
254 return r;
255 }
256
257 r = amdgpu_wb_get(adev, &ring->fence_offs);
258 if (r) {
259 dev_err(adev->dev, "(%d) ring fence_offs wb alloc failed\n", r);
260 return r;
261 }
262
263 r = amdgpu_wb_get(adev, &ring->next_rptr_offs);
264 if (r) {
265 dev_err(adev->dev, "(%d) ring next_rptr wb alloc failed\n", r);
266 return r;
267 }
Christian Königeb430962016-04-13 11:36:00 +0200268 ring->next_rptr_gpu_addr = adev->wb.gpu_addr + ring->next_rptr_offs * 4;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400269 ring->next_rptr_cpu_addr = &adev->wb.wb[ring->next_rptr_offs];
Monk Liu128cff12016-01-14 18:08:16 +0800270
271 r = amdgpu_wb_get(adev, &ring->cond_exe_offs);
272 if (r) {
273 dev_err(adev->dev, "(%d) ring cond_exec_polling wb alloc failed\n", r);
274 return r;
275 }
276 ring->cond_exe_gpu_addr = adev->wb.gpu_addr + (ring->cond_exe_offs * 4);
277 ring->cond_exe_cpu_addr = &adev->wb.wb[ring->cond_exe_offs];
278
Chunming Zhou176e1ab2015-07-24 10:49:47 +0800279 spin_lock_init(&ring->fence_lock);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400280 r = amdgpu_fence_driver_start_ring(ring, irq_src, irq_type);
281 if (r) {
282 dev_err(adev->dev, "failed initializing fences (%d).\n", r);
283 return r;
284 }
285
Christian Königa3f1cf32016-04-12 16:26:34 +0200286 ring->ring_size = roundup_pow_of_two(max_dw * 4 *
287 amdgpu_sched_hw_submission);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400288 ring->align_mask = align_mask;
289 ring->nop = nop;
290 ring->type = ring_type;
291
292 /* Allocate ring buffer */
293 if (ring->ring_obj == NULL) {
294 r = amdgpu_bo_create(adev, ring->ring_size, PAGE_SIZE, true,
295 AMDGPU_GEM_DOMAIN_GTT, 0,
Christian König72d76682015-09-03 17:34:59 +0200296 NULL, NULL, &ring->ring_obj);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400297 if (r) {
298 dev_err(adev->dev, "(%d) ring create failed\n", r);
299 return r;
300 }
301 r = amdgpu_bo_reserve(ring->ring_obj, false);
302 if (unlikely(r != 0))
303 return r;
304 r = amdgpu_bo_pin(ring->ring_obj, AMDGPU_GEM_DOMAIN_GTT,
305 &ring->gpu_addr);
306 if (r) {
307 amdgpu_bo_unreserve(ring->ring_obj);
308 dev_err(adev->dev, "(%d) ring pin failed\n", r);
309 return r;
310 }
311 r = amdgpu_bo_kmap(ring->ring_obj,
312 (void **)&ring->ring);
313 amdgpu_bo_unreserve(ring->ring_obj);
314 if (r) {
315 dev_err(adev->dev, "(%d) ring map failed\n", r);
316 return r;
317 }
318 }
319 ring->ptr_mask = (ring->ring_size / 4) - 1;
Christian Königa3f1cf32016-04-12 16:26:34 +0200320 ring->max_dw = max_dw;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400321
322 if (amdgpu_debugfs_ring_init(adev, ring)) {
323 DRM_ERROR("Failed to register debugfs file for rings !\n");
324 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400325 return 0;
326}
327
328/**
329 * amdgpu_ring_fini - tear down the driver ring struct.
330 *
331 * @adev: amdgpu_device pointer
332 * @ring: amdgpu_ring structure holding ring information
333 *
334 * Tear down the driver information for the selected ring (all asics).
335 */
336void amdgpu_ring_fini(struct amdgpu_ring *ring)
337{
338 int r;
339 struct amdgpu_bo *ring_obj;
340
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400341 ring_obj = ring->ring_obj;
342 ring->ready = false;
343 ring->ring = NULL;
344 ring->ring_obj = NULL;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400345
346 amdgpu_wb_free(ring->adev, ring->fence_offs);
347 amdgpu_wb_free(ring->adev, ring->rptr_offs);
348 amdgpu_wb_free(ring->adev, ring->wptr_offs);
349 amdgpu_wb_free(ring->adev, ring->next_rptr_offs);
350
351 if (ring_obj) {
352 r = amdgpu_bo_reserve(ring_obj, false);
353 if (likely(r == 0)) {
354 amdgpu_bo_kunmap(ring_obj);
355 amdgpu_bo_unpin(ring_obj);
356 amdgpu_bo_unreserve(ring_obj);
357 }
358 amdgpu_bo_unref(&ring_obj);
359 }
360}
361
362/*
363 * Debugfs info
364 */
365#if defined(CONFIG_DEBUG_FS)
366
367static int amdgpu_debugfs_ring_info(struct seq_file *m, void *data)
368{
369 struct drm_info_node *node = (struct drm_info_node *) m->private;
370 struct drm_device *dev = node->minor->dev;
371 struct amdgpu_device *adev = dev->dev_private;
Christian König771c8ec172016-04-13 11:34:44 +0200372 int roffset = (unsigned long)node->info_ent->data;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400373 struct amdgpu_ring *ring = (void *)(((uint8_t*)adev) + roffset);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400374 uint32_t rptr, wptr, rptr_next;
Christian Königc7e6be22016-01-21 13:06:05 +0100375 unsigned i;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400376
377 wptr = amdgpu_ring_get_wptr(ring);
Christian Königc7e6be22016-01-21 13:06:05 +0100378 seq_printf(m, "wptr: 0x%08x [%5d]\n", wptr, wptr);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400379
380 rptr = amdgpu_ring_get_rptr(ring);
Christian König41f2d992016-01-21 12:56:52 +0100381 rptr_next = le32_to_cpu(*ring->next_rptr_cpu_addr);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400382
Christian Königc7e6be22016-01-21 13:06:05 +0100383 seq_printf(m, "rptr: 0x%08x [%5d]\n", rptr, rptr);
384
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400385 seq_printf(m, "driver's copy of the wptr: 0x%08x [%5d]\n",
386 ring->wptr, ring->wptr);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400387
388 if (!ring->ready)
389 return 0;
390
391 /* print 8 dw before current rptr as often it's the last executed
392 * packet that is the root issue
393 */
394 i = (rptr + ring->ptr_mask + 1 - 32) & ring->ptr_mask;
Christian Königc7e6be22016-01-21 13:06:05 +0100395 while (i != rptr) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400396 seq_printf(m, "r[%5d]=0x%08x", i, ring->ring[i]);
Christian Königc7e6be22016-01-21 13:06:05 +0100397 if (i == rptr)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400398 seq_puts(m, " *");
Christian Königc7e6be22016-01-21 13:06:05 +0100399 if (i == rptr_next)
400 seq_puts(m, " #");
401 seq_puts(m, "\n");
402 i = (i + 1) & ring->ptr_mask;
403 }
404 while (i != wptr) {
405 seq_printf(m, "r[%5d]=0x%08x", i, ring->ring[i]);
406 if (i == rptr)
407 seq_puts(m, " *");
408 if (i == rptr_next)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400409 seq_puts(m, " #");
410 seq_puts(m, "\n");
411 i = (i + 1) & ring->ptr_mask;
412 }
413 return 0;
414}
415
Christian König771c8ec172016-04-13 11:34:44 +0200416static struct drm_info_list amdgpu_debugfs_ring_info_list[AMDGPU_MAX_RINGS];
417static char amdgpu_debugfs_ring_names[AMDGPU_MAX_RINGS][32];
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400418
419#endif
420
Christian König771c8ec172016-04-13 11:34:44 +0200421static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
422 struct amdgpu_ring *ring)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400423{
424#if defined(CONFIG_DEBUG_FS)
Christian König771c8ec172016-04-13 11:34:44 +0200425 unsigned offset = (uint8_t*)ring - (uint8_t*)adev;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400426 unsigned i;
Christian König771c8ec172016-04-13 11:34:44 +0200427 struct drm_info_list *info;
428 char *name;
429
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400430 for (i = 0; i < ARRAY_SIZE(amdgpu_debugfs_ring_info_list); ++i) {
Christian König771c8ec172016-04-13 11:34:44 +0200431 info = &amdgpu_debugfs_ring_info_list[i];
432 if (!info->data)
433 break;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400434 }
Christian König771c8ec172016-04-13 11:34:44 +0200435
436 if (i == ARRAY_SIZE(amdgpu_debugfs_ring_info_list))
437 return -ENOSPC;
438
439 name = &amdgpu_debugfs_ring_names[i][0];
440 sprintf(name, "amdgpu_ring_%s", ring->name);
441 info->name = name;
442 info->show = amdgpu_debugfs_ring_info;
443 info->driver_features = 0;
444 info->data = (void*)(uintptr_t)offset;
445
446 return amdgpu_debugfs_add_files(adev, info, 1);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400447#endif
448 return 0;
449}