blob: 60468385e6b4edbc8adc8641a88833e38c33a4f1 [file] [log] [blame]
Leo Liu95d09062016-12-21 13:21:52 -05001/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 */
26
27#include <linux/firmware.h>
28#include <linux/module.h>
29#include <drm/drmP.h>
30#include <drm/drm.h>
31
32#include "amdgpu.h"
33#include "amdgpu_pm.h"
34#include "amdgpu_vcn.h"
35#include "soc15d.h"
36#include "soc15_common.h"
37
Feifei Xub1ebd7c02017-11-27 17:57:30 +080038#include "vcn/vcn_1_0_offset.h"
Leo Liu95d09062016-12-21 13:21:52 -050039
40/* 1 second timeout */
41#define VCN_IDLE_TIMEOUT msecs_to_jiffies(1000)
42
43/* Firmware Names */
44#define FIRMWARE_RAVEN "amdgpu/raven_vcn.bin"
45
46MODULE_FIRMWARE(FIRMWARE_RAVEN);
47
48static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
49
50int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
51{
52 struct amdgpu_ring *ring;
Lucas Stach1b1f42d2017-12-06 17:49:39 +010053 struct drm_sched_rq *rq;
Leo Liu95d09062016-12-21 13:21:52 -050054 unsigned long bo_size;
55 const char *fw_name;
56 const struct common_firmware_header *hdr;
57 unsigned version_major, version_minor, family_id;
58 int r;
59
60 INIT_DELAYED_WORK(&adev->vcn.idle_work, amdgpu_vcn_idle_work_handler);
61
62 switch (adev->asic_type) {
63 case CHIP_RAVEN:
64 fw_name = FIRMWARE_RAVEN;
65 break;
66 default:
67 return -EINVAL;
68 }
69
70 r = request_firmware(&adev->vcn.fw, fw_name, adev->dev);
71 if (r) {
72 dev_err(adev->dev, "amdgpu_vcn: Can't load firmware \"%s\"\n",
73 fw_name);
74 return r;
75 }
76
77 r = amdgpu_ucode_validate(adev->vcn.fw);
78 if (r) {
79 dev_err(adev->dev, "amdgpu_vcn: Can't validate firmware \"%s\"\n",
80 fw_name);
81 release_firmware(adev->vcn.fw);
82 adev->vcn.fw = NULL;
83 return r;
84 }
85
86 hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
87 family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
88 version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
89 version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
90 DRM_INFO("Found VCN firmware Version: %hu.%hu Family ID: %hu\n",
91 version_major, version_minor, family_id);
92
93
94 bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8)
95 + AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_HEAP_SIZE
96 + AMDGPU_VCN_SESSION_SIZE * 40;
97 r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
98 AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.vcpu_bo,
99 &adev->vcn.gpu_addr, &adev->vcn.cpu_addr);
100 if (r) {
101 dev_err(adev->dev, "(%d) failed to allocate vcn bo\n", r);
102 return r;
103 }
104
105 ring = &adev->vcn.ring_dec;
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100106 rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
107 r = drm_sched_entity_init(&ring->sched, &adev->vcn.entity_dec,
Nayan Deshmukh8344c532018-03-29 22:36:32 +0530108 rq, NULL);
Leo Liu95d09062016-12-21 13:21:52 -0500109 if (r != 0) {
110 DRM_ERROR("Failed setting up VCN dec run queue.\n");
111 return r;
112 }
113
Leo Liu2d531d82016-12-21 13:56:44 -0500114 ring = &adev->vcn.ring_enc[0];
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100115 rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
116 r = drm_sched_entity_init(&ring->sched, &adev->vcn.entity_enc,
Nayan Deshmukh8344c532018-03-29 22:36:32 +0530117 rq, NULL);
Leo Liu2d531d82016-12-21 13:56:44 -0500118 if (r != 0) {
119 DRM_ERROR("Failed setting up VCN enc run queue.\n");
120 return r;
121 }
122
Leo Liu95d09062016-12-21 13:21:52 -0500123 return 0;
124}
125
126int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
127{
Leo Liu101c6fe2017-02-21 15:21:18 -0500128 int i;
129
Leo Liu95d09062016-12-21 13:21:52 -0500130 kfree(adev->vcn.saved_bo);
131
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100132 drm_sched_entity_fini(&adev->vcn.ring_dec.sched, &adev->vcn.entity_dec);
Leo Liu95d09062016-12-21 13:21:52 -0500133
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100134 drm_sched_entity_fini(&adev->vcn.ring_enc[0].sched, &adev->vcn.entity_enc);
Leo Liu2d531d82016-12-21 13:56:44 -0500135
Leo Liu95d09062016-12-21 13:21:52 -0500136 amdgpu_bo_free_kernel(&adev->vcn.vcpu_bo,
137 &adev->vcn.gpu_addr,
138 (void **)&adev->vcn.cpu_addr);
139
140 amdgpu_ring_fini(&adev->vcn.ring_dec);
141
Leo Liu101c6fe2017-02-21 15:21:18 -0500142 for (i = 0; i < adev->vcn.num_enc_rings; ++i)
143 amdgpu_ring_fini(&adev->vcn.ring_enc[i]);
144
Leo Liu95d09062016-12-21 13:21:52 -0500145 release_firmware(adev->vcn.fw);
146
147 return 0;
148}
149
150int amdgpu_vcn_suspend(struct amdgpu_device *adev)
151{
152 unsigned size;
153 void *ptr;
154
155 if (adev->vcn.vcpu_bo == NULL)
156 return 0;
157
158 cancel_delayed_work_sync(&adev->vcn.idle_work);
159
160 size = amdgpu_bo_size(adev->vcn.vcpu_bo);
161 ptr = adev->vcn.cpu_addr;
162
163 adev->vcn.saved_bo = kmalloc(size, GFP_KERNEL);
164 if (!adev->vcn.saved_bo)
165 return -ENOMEM;
166
167 memcpy_fromio(adev->vcn.saved_bo, ptr, size);
168
169 return 0;
170}
171
172int amdgpu_vcn_resume(struct amdgpu_device *adev)
173{
174 unsigned size;
175 void *ptr;
176
177 if (adev->vcn.vcpu_bo == NULL)
178 return -EINVAL;
179
180 size = amdgpu_bo_size(adev->vcn.vcpu_bo);
181 ptr = adev->vcn.cpu_addr;
182
183 if (adev->vcn.saved_bo != NULL) {
184 memcpy_toio(ptr, adev->vcn.saved_bo, size);
185 kfree(adev->vcn.saved_bo);
186 adev->vcn.saved_bo = NULL;
187 } else {
188 const struct common_firmware_header *hdr;
189 unsigned offset;
190
191 hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
192 offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
193 memcpy_toio(adev->vcn.cpu_addr, adev->vcn.fw->data + offset,
194 le32_to_cpu(hdr->ucode_size_bytes));
195 size -= le32_to_cpu(hdr->ucode_size_bytes);
196 ptr += le32_to_cpu(hdr->ucode_size_bytes);
197 memset_io(ptr, 0, size);
198 }
199
200 return 0;
201}
202
Leo Liu3e1086c2017-02-06 10:52:46 -0500203static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
204{
205 struct amdgpu_device *adev =
206 container_of(work, struct amdgpu_device, vcn.idle_work.work);
207 unsigned fences = amdgpu_fence_count_emitted(&adev->vcn.ring_dec);
Alex Deucher646e9062018-05-17 13:03:05 -0500208 unsigned i;
209
210 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
211 fences += amdgpu_fence_count_emitted(&adev->vcn.ring_enc[i]);
212 }
Leo Liu3e1086c2017-02-06 10:52:46 -0500213
214 if (fences == 0) {
215 if (adev->pm.dpm_enabled) {
Leo Liuaef060e2017-07-27 11:50:31 -0400216 /* might be used when with pg/cg
Leo Liu3e1086c2017-02-06 10:52:46 -0500217 amdgpu_dpm_enable_uvd(adev, false);
Leo Liuaef060e2017-07-27 11:50:31 -0400218 */
Leo Liu3e1086c2017-02-06 10:52:46 -0500219 }
220 } else {
221 schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
222 }
223}
224
225void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
226{
227 struct amdgpu_device *adev = ring->adev;
228 bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.idle_work);
229
Leo Liuaef060e2017-07-27 11:50:31 -0400230 if (set_clocks && adev->pm.dpm_enabled) {
231 /* might be used when with pg/cg
232 amdgpu_dpm_enable_uvd(adev, true);
233 */
Leo Liu3e1086c2017-02-06 10:52:46 -0500234 }
235}
236
237void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring)
238{
239 schedule_delayed_work(&ring->adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
240}
241
Leo Liu8c303c02017-02-06 11:52:46 -0500242int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
243{
244 struct amdgpu_device *adev = ring->adev;
245 uint32_t tmp = 0;
246 unsigned i;
247 int r;
248
249 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0xCAFEDEAD);
250 r = amdgpu_ring_alloc(ring, 3);
251 if (r) {
252 DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
253 ring->idx, r);
254 return r;
255 }
256 amdgpu_ring_write(ring,
257 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0));
258 amdgpu_ring_write(ring, 0xDEADBEEF);
259 amdgpu_ring_commit(ring);
260 for (i = 0; i < adev->usec_timeout; i++) {
261 tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID));
262 if (tmp == 0xDEADBEEF)
263 break;
264 DRM_UDELAY(1);
265 }
266
267 if (i < adev->usec_timeout) {
pding9953b722017-10-26 09:30:38 +0800268 DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
Leo Liu8c303c02017-02-06 11:52:46 -0500269 ring->idx, i);
270 } else {
271 DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
272 ring->idx, tmp);
273 r = -EINVAL;
274 }
275 return r;
276}
277
Christian Königadd9f9a2018-02-07 20:48:22 +0100278static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
279 struct amdgpu_bo *bo, bool direct,
280 struct dma_fence **fence)
Leo Liu95d09062016-12-21 13:21:52 -0500281{
Christian Königadd9f9a2018-02-07 20:48:22 +0100282 struct amdgpu_device *adev = ring->adev;
283 struct dma_fence *f = NULL;
Leo Liu95d09062016-12-21 13:21:52 -0500284 struct amdgpu_job *job;
285 struct amdgpu_ib *ib;
Leo Liu95d09062016-12-21 13:21:52 -0500286 uint64_t addr;
287 int i, r;
288
Leo Liu95d09062016-12-21 13:21:52 -0500289 r = amdgpu_job_alloc_with_ib(adev, 64, &job);
290 if (r)
291 goto err;
292
293 ib = &job->ibs[0];
294 addr = amdgpu_bo_gpu_offset(bo);
295 ib->ptr[0] = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0);
296 ib->ptr[1] = addr;
297 ib->ptr[2] = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0);
298 ib->ptr[3] = addr >> 32;
299 ib->ptr[4] = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0);
300 ib->ptr[5] = 0;
301 for (i = 6; i < 16; i += 2) {
302 ib->ptr[i] = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP), 0);
303 ib->ptr[i+1] = 0;
304 }
305 ib->length_dw = 16;
306
307 if (direct) {
308 r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
309 job->fence = dma_fence_get(f);
310 if (r)
311 goto err_free;
312
313 amdgpu_job_free(job);
314 } else {
315 r = amdgpu_job_submit(job, ring, &adev->vcn.entity_dec,
316 AMDGPU_FENCE_OWNER_UNDEFINED, &f);
317 if (r)
318 goto err_free;
319 }
320
Christian Königadd9f9a2018-02-07 20:48:22 +0100321 amdgpu_bo_fence(bo, f, false);
322 amdgpu_bo_unreserve(bo);
323 amdgpu_bo_unref(&bo);
Leo Liu95d09062016-12-21 13:21:52 -0500324
325 if (fence)
326 *fence = dma_fence_get(f);
Leo Liu95d09062016-12-21 13:21:52 -0500327 dma_fence_put(f);
328
329 return 0;
330
331err_free:
332 amdgpu_job_free(job);
333
334err:
Christian Königadd9f9a2018-02-07 20:48:22 +0100335 amdgpu_bo_unreserve(bo);
336 amdgpu_bo_unref(&bo);
Leo Liu95d09062016-12-21 13:21:52 -0500337 return r;
338}
339
340static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
341 struct dma_fence **fence)
342{
343 struct amdgpu_device *adev = ring->adev;
Christian Königadd9f9a2018-02-07 20:48:22 +0100344 struct amdgpu_bo *bo = NULL;
Leo Liu95d09062016-12-21 13:21:52 -0500345 uint32_t *msg;
346 int r, i;
347
Christian Königadd9f9a2018-02-07 20:48:22 +0100348 r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
349 AMDGPU_GEM_DOMAIN_VRAM,
350 &bo, NULL, (void **)&msg);
Leo Liu95d09062016-12-21 13:21:52 -0500351 if (r)
352 return r;
353
Leo Liu2d8a4252017-02-05 12:40:30 -0500354 msg[0] = cpu_to_le32(0x00000028);
Leo Liu3b8f5ab2017-03-30 12:00:25 -0400355 msg[1] = cpu_to_le32(0x00000038);
Leo Liu2d8a4252017-02-05 12:40:30 -0500356 msg[2] = cpu_to_le32(0x00000001);
Leo Liu95d09062016-12-21 13:21:52 -0500357 msg[3] = cpu_to_le32(0x00000000);
Leo Liu2d8a4252017-02-05 12:40:30 -0500358 msg[4] = cpu_to_le32(handle);
Leo Liu95d09062016-12-21 13:21:52 -0500359 msg[5] = cpu_to_le32(0x00000000);
Leo Liu2d8a4252017-02-05 12:40:30 -0500360 msg[6] = cpu_to_le32(0x00000001);
361 msg[7] = cpu_to_le32(0x00000028);
Leo Liu3b8f5ab2017-03-30 12:00:25 -0400362 msg[8] = cpu_to_le32(0x00000010);
Leo Liu95d09062016-12-21 13:21:52 -0500363 msg[9] = cpu_to_le32(0x00000000);
Leo Liu2d8a4252017-02-05 12:40:30 -0500364 msg[10] = cpu_to_le32(0x00000007);
365 msg[11] = cpu_to_le32(0x00000000);
Leo Liu3b8f5ab2017-03-30 12:00:25 -0400366 msg[12] = cpu_to_le32(0x00000780);
367 msg[13] = cpu_to_le32(0x00000440);
368 for (i = 14; i < 1024; ++i)
Leo Liu95d09062016-12-21 13:21:52 -0500369 msg[i] = cpu_to_le32(0x0);
370
Leo Liu95d09062016-12-21 13:21:52 -0500371 return amdgpu_vcn_dec_send_msg(ring, bo, true, fence);
372}
373
374static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
375 bool direct, struct dma_fence **fence)
376{
377 struct amdgpu_device *adev = ring->adev;
Christian Königadd9f9a2018-02-07 20:48:22 +0100378 struct amdgpu_bo *bo = NULL;
Leo Liu95d09062016-12-21 13:21:52 -0500379 uint32_t *msg;
380 int r, i;
381
Christian Königadd9f9a2018-02-07 20:48:22 +0100382 r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
383 AMDGPU_GEM_DOMAIN_VRAM,
384 &bo, NULL, (void **)&msg);
Leo Liu95d09062016-12-21 13:21:52 -0500385 if (r)
386 return r;
387
Leo Liu2d8a4252017-02-05 12:40:30 -0500388 msg[0] = cpu_to_le32(0x00000028);
389 msg[1] = cpu_to_le32(0x00000018);
390 msg[2] = cpu_to_le32(0x00000000);
391 msg[3] = cpu_to_le32(0x00000002);
392 msg[4] = cpu_to_le32(handle);
393 msg[5] = cpu_to_le32(0x00000000);
394 for (i = 6; i < 1024; ++i)
Leo Liu95d09062016-12-21 13:21:52 -0500395 msg[i] = cpu_to_le32(0x0);
396
Leo Liu95d09062016-12-21 13:21:52 -0500397 return amdgpu_vcn_dec_send_msg(ring, bo, direct, fence);
398}
399
Leo Liu95d09062016-12-21 13:21:52 -0500400int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
401{
402 struct dma_fence *fence;
403 long r;
404
405 r = amdgpu_vcn_dec_get_create_msg(ring, 1, NULL);
406 if (r) {
407 DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r);
408 goto error;
409 }
410
411 r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, true, &fence);
412 if (r) {
413 DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r);
414 goto error;
415 }
416
417 r = dma_fence_wait_timeout(fence, false, timeout);
418 if (r == 0) {
419 DRM_ERROR("amdgpu: IB test timed out.\n");
420 r = -ETIMEDOUT;
421 } else if (r < 0) {
422 DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
423 } else {
pding9953b722017-10-26 09:30:38 +0800424 DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
Leo Liu95d09062016-12-21 13:21:52 -0500425 r = 0;
426 }
427
428 dma_fence_put(fence);
429
430error:
431 return r;
432}
Leo Liu2d531d82016-12-21 13:56:44 -0500433
Leo Liu3e1086c2017-02-06 10:52:46 -0500434int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
435{
436 struct amdgpu_device *adev = ring->adev;
437 uint32_t rptr = amdgpu_ring_get_rptr(ring);
438 unsigned i;
439 int r;
440
441 r = amdgpu_ring_alloc(ring, 16);
442 if (r) {
443 DRM_ERROR("amdgpu: vcn enc failed to lock ring %d (%d).\n",
444 ring->idx, r);
445 return r;
446 }
Leo Liuc3bd3042017-02-21 10:38:42 -0500447 amdgpu_ring_write(ring, VCN_ENC_CMD_END);
Leo Liu3e1086c2017-02-06 10:52:46 -0500448 amdgpu_ring_commit(ring);
449
450 for (i = 0; i < adev->usec_timeout; i++) {
451 if (amdgpu_ring_get_rptr(ring) != rptr)
452 break;
453 DRM_UDELAY(1);
454 }
455
456 if (i < adev->usec_timeout) {
pding9953b722017-10-26 09:30:38 +0800457 DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
Leo Liu3e1086c2017-02-06 10:52:46 -0500458 ring->idx, i);
459 } else {
460 DRM_ERROR("amdgpu: ring %d test failed\n",
461 ring->idx);
462 r = -ETIMEDOUT;
463 }
464
465 return r;
466}
467
Leo Liu2d531d82016-12-21 13:56:44 -0500468static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
469 struct dma_fence **fence)
470{
Leo Liu25547cf2017-05-08 17:31:31 -0400471 const unsigned ib_size_dw = 16;
Leo Liu2d531d82016-12-21 13:56:44 -0500472 struct amdgpu_job *job;
473 struct amdgpu_ib *ib;
474 struct dma_fence *f = NULL;
475 uint64_t dummy;
476 int i, r;
477
478 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
479 if (r)
480 return r;
481
482 ib = &job->ibs[0];
Leo Liu2d531d82016-12-21 13:56:44 -0500483 dummy = ib->gpu_addr + 1024;
484
Leo Liu2d531d82016-12-21 13:56:44 -0500485 ib->length_dw = 0;
Leo Liu25547cf2017-05-08 17:31:31 -0400486 ib->ptr[ib->length_dw++] = 0x00000018;
487 ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
Leo Liu2d531d82016-12-21 13:56:44 -0500488 ib->ptr[ib->length_dw++] = handle;
Leo Liu2d531d82016-12-21 13:56:44 -0500489 ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
490 ib->ptr[ib->length_dw++] = dummy;
Leo Liu25547cf2017-05-08 17:31:31 -0400491 ib->ptr[ib->length_dw++] = 0x0000000b;
492
493 ib->ptr[ib->length_dw++] = 0x00000014;
494 ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
495 ib->ptr[ib->length_dw++] = 0x0000001c;
496 ib->ptr[ib->length_dw++] = 0x00000000;
497 ib->ptr[ib->length_dw++] = 0x00000000;
498
499 ib->ptr[ib->length_dw++] = 0x00000008;
500 ib->ptr[ib->length_dw++] = 0x08000001; /* op initialize */
Leo Liu2d531d82016-12-21 13:56:44 -0500501
502 for (i = ib->length_dw; i < ib_size_dw; ++i)
503 ib->ptr[i] = 0x0;
504
505 r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
506 job->fence = dma_fence_get(f);
507 if (r)
508 goto err;
509
510 amdgpu_job_free(job);
511 if (fence)
512 *fence = dma_fence_get(f);
513 dma_fence_put(f);
Leo Liu25547cf2017-05-08 17:31:31 -0400514
Leo Liu2d531d82016-12-21 13:56:44 -0500515 return 0;
516
517err:
518 amdgpu_job_free(job);
519 return r;
520}
521
522static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
Leo Liu25547cf2017-05-08 17:31:31 -0400523 struct dma_fence **fence)
Leo Liu2d531d82016-12-21 13:56:44 -0500524{
Leo Liu25547cf2017-05-08 17:31:31 -0400525 const unsigned ib_size_dw = 16;
Leo Liu2d531d82016-12-21 13:56:44 -0500526 struct amdgpu_job *job;
527 struct amdgpu_ib *ib;
528 struct dma_fence *f = NULL;
Leo Liu25547cf2017-05-08 17:31:31 -0400529 uint64_t dummy;
Leo Liu2d531d82016-12-21 13:56:44 -0500530 int i, r;
531
532 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
533 if (r)
534 return r;
535
536 ib = &job->ibs[0];
Leo Liu25547cf2017-05-08 17:31:31 -0400537 dummy = ib->gpu_addr + 1024;
Leo Liu2d531d82016-12-21 13:56:44 -0500538
Leo Liu2d531d82016-12-21 13:56:44 -0500539 ib->length_dw = 0;
Leo Liu25547cf2017-05-08 17:31:31 -0400540 ib->ptr[ib->length_dw++] = 0x00000018;
541 ib->ptr[ib->length_dw++] = 0x00000001;
Leo Liu2d531d82016-12-21 13:56:44 -0500542 ib->ptr[ib->length_dw++] = handle;
Leo Liu25547cf2017-05-08 17:31:31 -0400543 ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
544 ib->ptr[ib->length_dw++] = dummy;
545 ib->ptr[ib->length_dw++] = 0x0000000b;
Leo Liu2d531d82016-12-21 13:56:44 -0500546
Leo Liu25547cf2017-05-08 17:31:31 -0400547 ib->ptr[ib->length_dw++] = 0x00000014;
548 ib->ptr[ib->length_dw++] = 0x00000002;
549 ib->ptr[ib->length_dw++] = 0x0000001c;
Leo Liu2d531d82016-12-21 13:56:44 -0500550 ib->ptr[ib->length_dw++] = 0x00000000;
551 ib->ptr[ib->length_dw++] = 0x00000000;
Leo Liu2d531d82016-12-21 13:56:44 -0500552
Leo Liu25547cf2017-05-08 17:31:31 -0400553 ib->ptr[ib->length_dw++] = 0x00000008;
554 ib->ptr[ib->length_dw++] = 0x08000002; /* op close session */
Leo Liu2d531d82016-12-21 13:56:44 -0500555
556 for (i = ib->length_dw; i < ib_size_dw; ++i)
557 ib->ptr[i] = 0x0;
558
Leo Liu25547cf2017-05-08 17:31:31 -0400559 r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
560 job->fence = dma_fence_get(f);
561 if (r)
562 goto err;
Leo Liu2d531d82016-12-21 13:56:44 -0500563
Leo Liu25547cf2017-05-08 17:31:31 -0400564 amdgpu_job_free(job);
Leo Liu2d531d82016-12-21 13:56:44 -0500565 if (fence)
566 *fence = dma_fence_get(f);
567 dma_fence_put(f);
Leo Liu25547cf2017-05-08 17:31:31 -0400568
Leo Liu2d531d82016-12-21 13:56:44 -0500569 return 0;
570
571err:
572 amdgpu_job_free(job);
573 return r;
574}
575
Leo Liu2d531d82016-12-21 13:56:44 -0500576int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
577{
578 struct dma_fence *fence = NULL;
579 long r;
580
581 r = amdgpu_vcn_enc_get_create_msg(ring, 1, NULL);
582 if (r) {
583 DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r);
584 goto error;
585 }
586
Leo Liu25547cf2017-05-08 17:31:31 -0400587 r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, &fence);
Leo Liu2d531d82016-12-21 13:56:44 -0500588 if (r) {
589 DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r);
590 goto error;
591 }
592
593 r = dma_fence_wait_timeout(fence, false, timeout);
594 if (r == 0) {
595 DRM_ERROR("amdgpu: IB test timed out.\n");
596 r = -ETIMEDOUT;
597 } else if (r < 0) {
598 DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
599 } else {
pding9953b722017-10-26 09:30:38 +0800600 DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
Leo Liu2d531d82016-12-21 13:56:44 -0500601 r = 0;
602 }
603error:
604 dma_fence_put(fence);
605 return r;
606}