blob: dacbd2e32072b395f844e21d1ae5368a141fbef5 [file] [log] [blame]
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 * Christian König
28 */
29#include <linux/seq_file.h>
30#include <linux/slab.h>
31#include <drm/drmP.h>
32#include <drm/amdgpu_drm.h>
33#include "amdgpu.h"
34#include "atom.h"
35
36/*
37 * IB
38 * IBs (Indirect Buffers) and areas of GPU accessible memory where
39 * commands are stored. You can put a pointer to the IB in the
40 * command ring and the hw will fetch the commands from the IB
41 * and execute them. Generally userspace acceleration drivers
42 * produce command buffers which are send to the kernel and
43 * put in IBs for execution by the requested ring.
44 */
45static int amdgpu_debugfs_sa_init(struct amdgpu_device *adev);
46
47/**
48 * amdgpu_ib_get - request an IB (Indirect Buffer)
49 *
50 * @ring: ring index the IB is associated with
51 * @size: requested IB size
52 * @ib: IB object returned
53 *
54 * Request an IB (all asics). IBs are allocated using the
55 * suballocator.
56 * Returns 0 on success, error on failure.
57 */
Christian Königb07c60c2016-01-31 12:29:04 +010058int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
Alex Deucherd38ceaf2015-04-20 16:55:21 -040059 unsigned size, struct amdgpu_ib *ib)
60{
Alex Deucherd38ceaf2015-04-20 16:55:21 -040061 int r;
62
63 if (size) {
Junwei Zhangbbf0b342015-09-06 14:00:46 +080064 r = amdgpu_sa_bo_new(&adev->ring_tmp_bo,
Alex Deucherd38ceaf2015-04-20 16:55:21 -040065 &ib->sa_bo, size, 256);
66 if (r) {
67 dev_err(adev->dev, "failed to get a new IB (%d)\n", r);
68 return r;
69 }
70
71 ib->ptr = amdgpu_sa_bo_cpu_addr(ib->sa_bo);
72
73 if (!vm)
74 ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
Alex Deucherd38ceaf2015-04-20 16:55:21 -040075 }
76
Christian König4ff37a82016-02-26 16:18:26 +010077 ib->vm_id = 0;
Alex Deucherd38ceaf2015-04-20 16:55:21 -040078
79 return 0;
80}
81
82/**
83 * amdgpu_ib_free - free an IB (Indirect Buffer)
84 *
85 * @adev: amdgpu_device pointer
86 * @ib: IB object to free
Monk Liucc55c452016-03-17 10:47:07 +080087 * @f: the fence SA bo need wait on for the ib alloation
Alex Deucherd38ceaf2015-04-20 16:55:21 -040088 *
89 * Free an IB (all asics).
90 */
Christian König4d9c5142016-05-03 18:46:19 +020091void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib,
92 struct fence *f)
Alex Deucherd38ceaf2015-04-20 16:55:21 -040093{
Monk Liucc55c452016-03-17 10:47:07 +080094 amdgpu_sa_bo_free(adev, &ib->sa_bo, f);
Alex Deucherd38ceaf2015-04-20 16:55:21 -040095}
96
97/**
98 * amdgpu_ib_schedule - schedule an IB (Indirect Buffer) on the ring
99 *
100 * @adev: amdgpu_device pointer
101 * @num_ibs: number of IBs to schedule
102 * @ibs: IB objects to schedule
Christian Königec72b802016-02-01 11:56:35 +0100103 * @f: fence created during this submission
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400104 *
105 * Schedule an IB on the associated ring (all asics).
106 * Returns 0 on success, error on failure.
107 *
108 * On SI, there are two parallel engines fed from the primary ring,
109 * the CE (Constant Engine) and the DE (Drawing Engine). Since
110 * resource descriptors have moved to memory, the CE allows you to
111 * prime the caches while the DE is updating register state so that
112 * the resource descriptors will be already in cache when the draw is
113 * processed. To accomplish this, the userspace driver submits two
114 * IBs, one for the CE and one for the DE. If there is a CE IB (called
115 * a CONST_IB), it will be put on the ring prior to the DE IB. Prior
116 * to SI there was just a DE IB.
117 */
Christian Königb07c60c2016-01-31 12:29:04 +0100118int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
Christian König336d1f52016-02-16 10:57:10 +0100119 struct amdgpu_ib *ibs, struct fence *last_vm_update,
Monk Liuc5637832016-04-19 20:11:32 +0800120 struct amdgpu_job *job, struct fence **f)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400121{
Christian Königb07c60c2016-01-31 12:29:04 +0100122 struct amdgpu_device *adev = ring->adev;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400123 struct amdgpu_ib *ib = &ibs[0];
Christian Königf153d282016-05-06 15:31:19 +0200124 bool skip_preamble, need_ctx_switch;
Christian König92f25092016-05-06 15:57:42 +0200125 unsigned patch_offset = ~0;
126 struct amdgpu_vm *vm;
127 struct fence *hwf;
128 uint64_t ctx;
Monk Liu03ccf482016-01-14 19:07:38 +0800129
Christian König92f25092016-05-06 15:57:42 +0200130 unsigned i;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400131 int r = 0;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400132
133 if (num_ibs == 0)
134 return -EINVAL;
135
Christian König92f25092016-05-06 15:57:42 +0200136 /* ring tests don't use a job */
137 if (job) {
Monk Liuc5637832016-04-19 20:11:32 +0800138 vm = job->vm;
Christian König92f25092016-05-06 15:57:42 +0200139 ctx = job->ctx;
140 } else {
141 vm = NULL;
142 ctx = 0;
143 }
Christian Königd919ad42015-05-11 14:32:17 +0200144
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400145 if (!ring->ready) {
146 dev_err(adev->dev, "couldn't schedule ib\n");
147 return -EINVAL;
148 }
Chunming Zhoube86c602016-01-15 11:12:42 +0800149
Christian König4ff37a82016-02-26 16:18:26 +0100150 if (vm && !ibs->vm_id) {
Christian König8d0a7ce2015-11-03 20:58:50 +0100151 dev_err(adev->dev, "VM IB without ID\n");
152 return -EINVAL;
153 }
154
Christian König867d0512016-02-03 15:12:58 +0100155 r = amdgpu_ring_alloc(ring, 256 * num_ibs);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400156 if (r) {
157 dev_err(adev->dev, "scheduling IB failed (%d).\n", r);
158 return r;
159 }
160
Monk Liu03ccf482016-01-14 19:07:38 +0800161 if (ring->type == AMDGPU_RING_TYPE_SDMA && ring->funcs->init_cond_exec)
162 patch_offset = amdgpu_ring_init_cond_exec(ring);
163
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400164 if (vm) {
Christian König41d9eb22016-03-01 16:46:18 +0100165 r = amdgpu_vm_flush(ring, ib->vm_id, ib->vm_pd_addr,
166 ib->gds_base, ib->gds_size,
167 ib->gws_base, ib->gws_size,
168 ib->oa_base, ib->oa_size);
169 if (r) {
170 amdgpu_ring_undo(ring);
171 return r;
172 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400173 }
174
Monk Liu794ff572016-05-04 16:27:41 +0800175 if (ring->funcs->emit_hdp_flush)
176 amdgpu_ring_emit_hdp_flush(ring);
177
Monk Liu128cff12016-01-14 18:08:16 +0800178 /* always set cond_exec_polling to CONTINUE */
179 *ring->cond_exe_cpu_addr = 1;
180
Christian König92f25092016-05-06 15:57:42 +0200181 skip_preamble = ring->current_ctx == ctx;
182 need_ctx_switch = ring->current_ctx != ctx;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400183 for (i = 0; i < num_ibs; ++i) {
Christian Königf153d282016-05-06 15:31:19 +0200184 ib = &ibs[i];
Christian König9f8fb5a2016-05-06 14:52:57 +0200185
186 /* drop preamble IBs if we don't have a context switch */
187 if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) && skip_preamble)
188 continue;
189
Christian Königf153d282016-05-06 15:31:19 +0200190 amdgpu_ring_emit_ib(ring, ib, need_ctx_switch);
191 need_ctx_switch = false;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400192 }
193
Monk Liu794ff572016-05-04 16:27:41 +0800194 if (ring->funcs->emit_hdp_invalidate)
195 amdgpu_ring_emit_hdp_invalidate(ring);
Chunming Zhou11afbde2016-03-03 11:38:48 +0800196
Monk Liu73cfa5f2016-03-17 13:48:13 +0800197 r = amdgpu_fence_emit(ring, &hwf);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400198 if (r) {
199 dev_err(adev->dev, "failed to emit fence (%d)\n", r);
Christian König971fe9a92016-03-01 15:09:25 +0100200 if (ib->vm_id)
201 amdgpu_vm_reset_id(adev, ib->vm_id);
Christian Königa27de352016-01-21 11:28:53 +0100202 amdgpu_ring_undo(ring);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400203 return r;
204 }
205
206 /* wrap the last IB with fence */
207 if (ib->user) {
208 uint64_t addr = amdgpu_bo_gpu_offset(ib->user->bo);
209 addr += ib->user->offset;
Christian König5430a3f2015-07-21 18:02:21 +0200210 amdgpu_ring_emit_fence(ring, addr, ib->sequence,
Chunming Zhou890ee232015-06-01 14:35:03 +0800211 AMDGPU_FENCE_FLAG_64BIT);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400212 }
213
Christian Königec72b802016-02-01 11:56:35 +0100214 if (f)
Monk Liu73cfa5f2016-03-17 13:48:13 +0800215 *f = fence_get(hwf);
Christian Königec72b802016-02-01 11:56:35 +0100216
Monk Liu03ccf482016-01-14 19:07:38 +0800217 if (patch_offset != ~0 && ring->funcs->patch_cond_exec)
218 amdgpu_ring_patch_cond_exec(ring, patch_offset);
219
Christian König92f25092016-05-06 15:57:42 +0200220 ring->current_ctx = ctx;
Christian Königa27de352016-01-21 11:28:53 +0100221 amdgpu_ring_commit(ring);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400222 return 0;
223}
224
225/**
226 * amdgpu_ib_pool_init - Init the IB (Indirect Buffer) pool
227 *
228 * @adev: amdgpu_device pointer
229 *
230 * Initialize the suballocator to manage a pool of memory
231 * for use as IBs (all asics).
232 * Returns 0 on success, error on failure.
233 */
234int amdgpu_ib_pool_init(struct amdgpu_device *adev)
235{
236 int r;
237
238 if (adev->ib_pool_ready) {
239 return 0;
240 }
241 r = amdgpu_sa_bo_manager_init(adev, &adev->ring_tmp_bo,
242 AMDGPU_IB_POOL_SIZE*64*1024,
243 AMDGPU_GPU_PAGE_SIZE,
244 AMDGPU_GEM_DOMAIN_GTT);
245 if (r) {
246 return r;
247 }
248
249 r = amdgpu_sa_bo_manager_start(adev, &adev->ring_tmp_bo);
250 if (r) {
251 return r;
252 }
253
254 adev->ib_pool_ready = true;
255 if (amdgpu_debugfs_sa_init(adev)) {
256 dev_err(adev->dev, "failed to register debugfs file for SA\n");
257 }
258 return 0;
259}
260
261/**
262 * amdgpu_ib_pool_fini - Free the IB (Indirect Buffer) pool
263 *
264 * @adev: amdgpu_device pointer
265 *
266 * Tear down the suballocator managing the pool of memory
267 * for use as IBs (all asics).
268 */
269void amdgpu_ib_pool_fini(struct amdgpu_device *adev)
270{
271 if (adev->ib_pool_ready) {
272 amdgpu_sa_bo_manager_suspend(adev, &adev->ring_tmp_bo);
273 amdgpu_sa_bo_manager_fini(adev, &adev->ring_tmp_bo);
274 adev->ib_pool_ready = false;
275 }
276}
277
278/**
279 * amdgpu_ib_ring_tests - test IBs on the rings
280 *
281 * @adev: amdgpu_device pointer
282 *
283 * Test an IB (Indirect Buffer) on each ring.
284 * If the test fails, disable the ring.
285 * Returns 0 on success, error if the primary GFX ring
286 * IB test fails.
287 */
288int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
289{
290 unsigned i;
291 int r;
292
293 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
294 struct amdgpu_ring *ring = adev->rings[i];
295
296 if (!ring || !ring->ready)
297 continue;
298
299 r = amdgpu_ring_test_ib(ring);
300 if (r) {
301 ring->ready = false;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400302
303 if (ring == &adev->gfx.gfx_ring[0]) {
304 /* oh, oh, that's really bad */
305 DRM_ERROR("amdgpu: failed testing IB on GFX ring (%d).\n", r);
306 adev->accel_working = false;
307 return r;
308
309 } else {
310 /* still not good, but we can live with it */
311 DRM_ERROR("amdgpu: failed testing IB on ring %d (%d).\n", i, r);
312 }
313 }
314 }
315 return 0;
316}
317
318/*
319 * Debugfs info
320 */
321#if defined(CONFIG_DEBUG_FS)
322
323static int amdgpu_debugfs_sa_info(struct seq_file *m, void *data)
324{
325 struct drm_info_node *node = (struct drm_info_node *) m->private;
326 struct drm_device *dev = node->minor->dev;
327 struct amdgpu_device *adev = dev->dev_private;
328
329 amdgpu_sa_bo_dump_debug_info(&adev->ring_tmp_bo, m);
330
331 return 0;
332
333}
334
Nils Wallménius06ab6832016-05-02 12:46:15 -0400335static const struct drm_info_list amdgpu_debugfs_sa_list[] = {
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400336 {"amdgpu_sa_info", &amdgpu_debugfs_sa_info, 0, NULL},
337};
338
339#endif
340
341static int amdgpu_debugfs_sa_init(struct amdgpu_device *adev)
342{
343#if defined(CONFIG_DEBUG_FS)
344 return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_sa_list, 1);
345#else
346 return 0;
347#endif
348}