blob: 8851bcdfc2609f28a43ac27322bc3ff957a7a942 [file] [log] [blame]
Leo Liu95d09062016-12-21 13:21:52 -05001/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 */
26
27#include <linux/firmware.h>
28#include <linux/module.h>
29#include <drm/drmP.h>
30#include <drm/drm.h>
31
32#include "amdgpu.h"
33#include "amdgpu_pm.h"
34#include "amdgpu_vcn.h"
35#include "soc15d.h"
36#include "soc15_common.h"
37
Feifei Xub1ebd7c02017-11-27 17:57:30 +080038#include "vcn/vcn_1_0_offset.h"
Leo Liu95d09062016-12-21 13:21:52 -050039
40/* 1 second timeout */
41#define VCN_IDLE_TIMEOUT msecs_to_jiffies(1000)
42
43/* Firmware Names */
44#define FIRMWARE_RAVEN "amdgpu/raven_vcn.bin"
45
46MODULE_FIRMWARE(FIRMWARE_RAVEN);
47
48static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
49
50int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
51{
52 struct amdgpu_ring *ring;
Lucas Stach1b1f42d2017-12-06 17:49:39 +010053 struct drm_sched_rq *rq;
Leo Liu95d09062016-12-21 13:21:52 -050054 unsigned long bo_size;
55 const char *fw_name;
56 const struct common_firmware_header *hdr;
57 unsigned version_major, version_minor, family_id;
58 int r;
59
60 INIT_DELAYED_WORK(&adev->vcn.idle_work, amdgpu_vcn_idle_work_handler);
61
62 switch (adev->asic_type) {
63 case CHIP_RAVEN:
64 fw_name = FIRMWARE_RAVEN;
65 break;
66 default:
67 return -EINVAL;
68 }
69
70 r = request_firmware(&adev->vcn.fw, fw_name, adev->dev);
71 if (r) {
72 dev_err(adev->dev, "amdgpu_vcn: Can't load firmware \"%s\"\n",
73 fw_name);
74 return r;
75 }
76
77 r = amdgpu_ucode_validate(adev->vcn.fw);
78 if (r) {
79 dev_err(adev->dev, "amdgpu_vcn: Can't validate firmware \"%s\"\n",
80 fw_name);
81 release_firmware(adev->vcn.fw);
82 adev->vcn.fw = NULL;
83 return r;
84 }
85
86 hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
87 family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
88 version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
89 version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
90 DRM_INFO("Found VCN firmware Version: %hu.%hu Family ID: %hu\n",
91 version_major, version_minor, family_id);
92
93
94 bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8)
95 + AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_HEAP_SIZE
96 + AMDGPU_VCN_SESSION_SIZE * 40;
97 r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
98 AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.vcpu_bo,
99 &adev->vcn.gpu_addr, &adev->vcn.cpu_addr);
100 if (r) {
101 dev_err(adev->dev, "(%d) failed to allocate vcn bo\n", r);
102 return r;
103 }
104
105 ring = &adev->vcn.ring_dec;
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100106 rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
107 r = drm_sched_entity_init(&ring->sched, &adev->vcn.entity_dec,
Nayan Deshmukh8344c532018-03-29 22:36:32 +0530108 rq, NULL);
Leo Liu95d09062016-12-21 13:21:52 -0500109 if (r != 0) {
110 DRM_ERROR("Failed setting up VCN dec run queue.\n");
111 return r;
112 }
113
Leo Liu2d531d82016-12-21 13:56:44 -0500114 ring = &adev->vcn.ring_enc[0];
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100115 rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
116 r = drm_sched_entity_init(&ring->sched, &adev->vcn.entity_enc,
Nayan Deshmukh8344c532018-03-29 22:36:32 +0530117 rq, NULL);
Leo Liu2d531d82016-12-21 13:56:44 -0500118 if (r != 0) {
119 DRM_ERROR("Failed setting up VCN enc run queue.\n");
120 return r;
121 }
122
Leo Liu95d09062016-12-21 13:21:52 -0500123 return 0;
124}
125
126int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
127{
Leo Liu101c6fe2017-02-21 15:21:18 -0500128 int i;
129
Leo Liu95d09062016-12-21 13:21:52 -0500130 kfree(adev->vcn.saved_bo);
131
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100132 drm_sched_entity_fini(&adev->vcn.ring_dec.sched, &adev->vcn.entity_dec);
Leo Liu95d09062016-12-21 13:21:52 -0500133
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100134 drm_sched_entity_fini(&adev->vcn.ring_enc[0].sched, &adev->vcn.entity_enc);
Leo Liu2d531d82016-12-21 13:56:44 -0500135
Leo Liu95d09062016-12-21 13:21:52 -0500136 amdgpu_bo_free_kernel(&adev->vcn.vcpu_bo,
137 &adev->vcn.gpu_addr,
138 (void **)&adev->vcn.cpu_addr);
139
140 amdgpu_ring_fini(&adev->vcn.ring_dec);
141
Leo Liu101c6fe2017-02-21 15:21:18 -0500142 for (i = 0; i < adev->vcn.num_enc_rings; ++i)
143 amdgpu_ring_fini(&adev->vcn.ring_enc[i]);
144
Leo Liu95d09062016-12-21 13:21:52 -0500145 release_firmware(adev->vcn.fw);
146
147 return 0;
148}
149
150int amdgpu_vcn_suspend(struct amdgpu_device *adev)
151{
152 unsigned size;
153 void *ptr;
154
155 if (adev->vcn.vcpu_bo == NULL)
156 return 0;
157
158 cancel_delayed_work_sync(&adev->vcn.idle_work);
159
160 size = amdgpu_bo_size(adev->vcn.vcpu_bo);
161 ptr = adev->vcn.cpu_addr;
162
163 adev->vcn.saved_bo = kmalloc(size, GFP_KERNEL);
164 if (!adev->vcn.saved_bo)
165 return -ENOMEM;
166
167 memcpy_fromio(adev->vcn.saved_bo, ptr, size);
168
169 return 0;
170}
171
172int amdgpu_vcn_resume(struct amdgpu_device *adev)
173{
174 unsigned size;
175 void *ptr;
176
177 if (adev->vcn.vcpu_bo == NULL)
178 return -EINVAL;
179
180 size = amdgpu_bo_size(adev->vcn.vcpu_bo);
181 ptr = adev->vcn.cpu_addr;
182
183 if (adev->vcn.saved_bo != NULL) {
184 memcpy_toio(ptr, adev->vcn.saved_bo, size);
185 kfree(adev->vcn.saved_bo);
186 adev->vcn.saved_bo = NULL;
187 } else {
188 const struct common_firmware_header *hdr;
189 unsigned offset;
190
191 hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
192 offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
193 memcpy_toio(adev->vcn.cpu_addr, adev->vcn.fw->data + offset,
194 le32_to_cpu(hdr->ucode_size_bytes));
195 size -= le32_to_cpu(hdr->ucode_size_bytes);
196 ptr += le32_to_cpu(hdr->ucode_size_bytes);
197 memset_io(ptr, 0, size);
198 }
199
200 return 0;
201}
202
Leo Liu3e1086c2017-02-06 10:52:46 -0500203static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
204{
205 struct amdgpu_device *adev =
206 container_of(work, struct amdgpu_device, vcn.idle_work.work);
207 unsigned fences = amdgpu_fence_count_emitted(&adev->vcn.ring_dec);
Alex Deucher646e9062018-05-17 13:03:05 -0500208 unsigned i;
209
210 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
211 fences += amdgpu_fence_count_emitted(&adev->vcn.ring_enc[i]);
212 }
Leo Liu3e1086c2017-02-06 10:52:46 -0500213
214 if (fences == 0) {
Rex Zhu22cc6c52018-05-16 20:18:22 +0800215 if (adev->pm.dpm_enabled)
Leo Liu3e1086c2017-02-06 10:52:46 -0500216 amdgpu_dpm_enable_uvd(adev, false);
Rex Zhu22cc6c52018-05-16 20:18:22 +0800217 else
218 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
219 AMD_PG_STATE_GATE);
Leo Liu3e1086c2017-02-06 10:52:46 -0500220 } else {
221 schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
222 }
223}
224
225void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
226{
227 struct amdgpu_device *adev = ring->adev;
228 bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.idle_work);
229
Leo Liuaef060e2017-07-27 11:50:31 -0400230 if (set_clocks && adev->pm.dpm_enabled) {
Rex Zhu22cc6c52018-05-16 20:18:22 +0800231 if (adev->pm.dpm_enabled)
232 amdgpu_dpm_enable_uvd(adev, true);
233 else
234 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
235 AMD_PG_STATE_UNGATE);
Leo Liu3e1086c2017-02-06 10:52:46 -0500236 }
237}
238
239void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring)
240{
241 schedule_delayed_work(&ring->adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
242}
243
Leo Liu8c303c02017-02-06 11:52:46 -0500244int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
245{
246 struct amdgpu_device *adev = ring->adev;
247 uint32_t tmp = 0;
248 unsigned i;
249 int r;
250
251 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0xCAFEDEAD);
252 r = amdgpu_ring_alloc(ring, 3);
253 if (r) {
254 DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
255 ring->idx, r);
256 return r;
257 }
258 amdgpu_ring_write(ring,
259 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0));
260 amdgpu_ring_write(ring, 0xDEADBEEF);
261 amdgpu_ring_commit(ring);
262 for (i = 0; i < adev->usec_timeout; i++) {
263 tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID));
264 if (tmp == 0xDEADBEEF)
265 break;
266 DRM_UDELAY(1);
267 }
268
269 if (i < adev->usec_timeout) {
pding9953b722017-10-26 09:30:38 +0800270 DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
Leo Liu8c303c02017-02-06 11:52:46 -0500271 ring->idx, i);
272 } else {
273 DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
274 ring->idx, tmp);
275 r = -EINVAL;
276 }
277 return r;
278}
279
Christian Königadd9f9a2018-02-07 20:48:22 +0100280static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
281 struct amdgpu_bo *bo, bool direct,
282 struct dma_fence **fence)
Leo Liu95d09062016-12-21 13:21:52 -0500283{
Christian Königadd9f9a2018-02-07 20:48:22 +0100284 struct amdgpu_device *adev = ring->adev;
285 struct dma_fence *f = NULL;
Leo Liu95d09062016-12-21 13:21:52 -0500286 struct amdgpu_job *job;
287 struct amdgpu_ib *ib;
Leo Liu95d09062016-12-21 13:21:52 -0500288 uint64_t addr;
289 int i, r;
290
Leo Liu95d09062016-12-21 13:21:52 -0500291 r = amdgpu_job_alloc_with_ib(adev, 64, &job);
292 if (r)
293 goto err;
294
295 ib = &job->ibs[0];
296 addr = amdgpu_bo_gpu_offset(bo);
297 ib->ptr[0] = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0);
298 ib->ptr[1] = addr;
299 ib->ptr[2] = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0);
300 ib->ptr[3] = addr >> 32;
301 ib->ptr[4] = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0);
302 ib->ptr[5] = 0;
303 for (i = 6; i < 16; i += 2) {
304 ib->ptr[i] = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP), 0);
305 ib->ptr[i+1] = 0;
306 }
307 ib->length_dw = 16;
308
309 if (direct) {
310 r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
311 job->fence = dma_fence_get(f);
312 if (r)
313 goto err_free;
314
315 amdgpu_job_free(job);
316 } else {
317 r = amdgpu_job_submit(job, ring, &adev->vcn.entity_dec,
318 AMDGPU_FENCE_OWNER_UNDEFINED, &f);
319 if (r)
320 goto err_free;
321 }
322
Christian Königadd9f9a2018-02-07 20:48:22 +0100323 amdgpu_bo_fence(bo, f, false);
324 amdgpu_bo_unreserve(bo);
325 amdgpu_bo_unref(&bo);
Leo Liu95d09062016-12-21 13:21:52 -0500326
327 if (fence)
328 *fence = dma_fence_get(f);
Leo Liu95d09062016-12-21 13:21:52 -0500329 dma_fence_put(f);
330
331 return 0;
332
333err_free:
334 amdgpu_job_free(job);
335
336err:
Christian Königadd9f9a2018-02-07 20:48:22 +0100337 amdgpu_bo_unreserve(bo);
338 amdgpu_bo_unref(&bo);
Leo Liu95d09062016-12-21 13:21:52 -0500339 return r;
340}
341
342static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
343 struct dma_fence **fence)
344{
345 struct amdgpu_device *adev = ring->adev;
Christian Königadd9f9a2018-02-07 20:48:22 +0100346 struct amdgpu_bo *bo = NULL;
Leo Liu95d09062016-12-21 13:21:52 -0500347 uint32_t *msg;
348 int r, i;
349
Christian Königadd9f9a2018-02-07 20:48:22 +0100350 r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
351 AMDGPU_GEM_DOMAIN_VRAM,
352 &bo, NULL, (void **)&msg);
Leo Liu95d09062016-12-21 13:21:52 -0500353 if (r)
354 return r;
355
Leo Liu2d8a4252017-02-05 12:40:30 -0500356 msg[0] = cpu_to_le32(0x00000028);
Leo Liu3b8f5ab2017-03-30 12:00:25 -0400357 msg[1] = cpu_to_le32(0x00000038);
Leo Liu2d8a4252017-02-05 12:40:30 -0500358 msg[2] = cpu_to_le32(0x00000001);
Leo Liu95d09062016-12-21 13:21:52 -0500359 msg[3] = cpu_to_le32(0x00000000);
Leo Liu2d8a4252017-02-05 12:40:30 -0500360 msg[4] = cpu_to_le32(handle);
Leo Liu95d09062016-12-21 13:21:52 -0500361 msg[5] = cpu_to_le32(0x00000000);
Leo Liu2d8a4252017-02-05 12:40:30 -0500362 msg[6] = cpu_to_le32(0x00000001);
363 msg[7] = cpu_to_le32(0x00000028);
Leo Liu3b8f5ab2017-03-30 12:00:25 -0400364 msg[8] = cpu_to_le32(0x00000010);
Leo Liu95d09062016-12-21 13:21:52 -0500365 msg[9] = cpu_to_le32(0x00000000);
Leo Liu2d8a4252017-02-05 12:40:30 -0500366 msg[10] = cpu_to_le32(0x00000007);
367 msg[11] = cpu_to_le32(0x00000000);
Leo Liu3b8f5ab2017-03-30 12:00:25 -0400368 msg[12] = cpu_to_le32(0x00000780);
369 msg[13] = cpu_to_le32(0x00000440);
370 for (i = 14; i < 1024; ++i)
Leo Liu95d09062016-12-21 13:21:52 -0500371 msg[i] = cpu_to_le32(0x0);
372
Leo Liu95d09062016-12-21 13:21:52 -0500373 return amdgpu_vcn_dec_send_msg(ring, bo, true, fence);
374}
375
376static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
377 bool direct, struct dma_fence **fence)
378{
379 struct amdgpu_device *adev = ring->adev;
Christian Königadd9f9a2018-02-07 20:48:22 +0100380 struct amdgpu_bo *bo = NULL;
Leo Liu95d09062016-12-21 13:21:52 -0500381 uint32_t *msg;
382 int r, i;
383
Christian Königadd9f9a2018-02-07 20:48:22 +0100384 r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
385 AMDGPU_GEM_DOMAIN_VRAM,
386 &bo, NULL, (void **)&msg);
Leo Liu95d09062016-12-21 13:21:52 -0500387 if (r)
388 return r;
389
Leo Liu2d8a4252017-02-05 12:40:30 -0500390 msg[0] = cpu_to_le32(0x00000028);
391 msg[1] = cpu_to_le32(0x00000018);
392 msg[2] = cpu_to_le32(0x00000000);
393 msg[3] = cpu_to_le32(0x00000002);
394 msg[4] = cpu_to_le32(handle);
395 msg[5] = cpu_to_le32(0x00000000);
396 for (i = 6; i < 1024; ++i)
Leo Liu95d09062016-12-21 13:21:52 -0500397 msg[i] = cpu_to_le32(0x0);
398
Leo Liu95d09062016-12-21 13:21:52 -0500399 return amdgpu_vcn_dec_send_msg(ring, bo, direct, fence);
400}
401
Leo Liu95d09062016-12-21 13:21:52 -0500402int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
403{
404 struct dma_fence *fence;
405 long r;
406
407 r = amdgpu_vcn_dec_get_create_msg(ring, 1, NULL);
408 if (r) {
409 DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r);
410 goto error;
411 }
412
413 r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, true, &fence);
414 if (r) {
415 DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r);
416 goto error;
417 }
418
419 r = dma_fence_wait_timeout(fence, false, timeout);
420 if (r == 0) {
421 DRM_ERROR("amdgpu: IB test timed out.\n");
422 r = -ETIMEDOUT;
423 } else if (r < 0) {
424 DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
425 } else {
pding9953b722017-10-26 09:30:38 +0800426 DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
Leo Liu95d09062016-12-21 13:21:52 -0500427 r = 0;
428 }
429
430 dma_fence_put(fence);
431
432error:
433 return r;
434}
Leo Liu2d531d82016-12-21 13:56:44 -0500435
Leo Liu3e1086c2017-02-06 10:52:46 -0500436int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
437{
438 struct amdgpu_device *adev = ring->adev;
439 uint32_t rptr = amdgpu_ring_get_rptr(ring);
440 unsigned i;
441 int r;
442
443 r = amdgpu_ring_alloc(ring, 16);
444 if (r) {
445 DRM_ERROR("amdgpu: vcn enc failed to lock ring %d (%d).\n",
446 ring->idx, r);
447 return r;
448 }
Leo Liuc3bd3042017-02-21 10:38:42 -0500449 amdgpu_ring_write(ring, VCN_ENC_CMD_END);
Leo Liu3e1086c2017-02-06 10:52:46 -0500450 amdgpu_ring_commit(ring);
451
452 for (i = 0; i < adev->usec_timeout; i++) {
453 if (amdgpu_ring_get_rptr(ring) != rptr)
454 break;
455 DRM_UDELAY(1);
456 }
457
458 if (i < adev->usec_timeout) {
pding9953b722017-10-26 09:30:38 +0800459 DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
Leo Liu3e1086c2017-02-06 10:52:46 -0500460 ring->idx, i);
461 } else {
462 DRM_ERROR("amdgpu: ring %d test failed\n",
463 ring->idx);
464 r = -ETIMEDOUT;
465 }
466
467 return r;
468}
469
Leo Liu2d531d82016-12-21 13:56:44 -0500470static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
471 struct dma_fence **fence)
472{
Leo Liu25547cf2017-05-08 17:31:31 -0400473 const unsigned ib_size_dw = 16;
Leo Liu2d531d82016-12-21 13:56:44 -0500474 struct amdgpu_job *job;
475 struct amdgpu_ib *ib;
476 struct dma_fence *f = NULL;
477 uint64_t dummy;
478 int i, r;
479
480 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
481 if (r)
482 return r;
483
484 ib = &job->ibs[0];
Leo Liu2d531d82016-12-21 13:56:44 -0500485 dummy = ib->gpu_addr + 1024;
486
Leo Liu2d531d82016-12-21 13:56:44 -0500487 ib->length_dw = 0;
Leo Liu25547cf2017-05-08 17:31:31 -0400488 ib->ptr[ib->length_dw++] = 0x00000018;
489 ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
Leo Liu2d531d82016-12-21 13:56:44 -0500490 ib->ptr[ib->length_dw++] = handle;
Leo Liu2d531d82016-12-21 13:56:44 -0500491 ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
492 ib->ptr[ib->length_dw++] = dummy;
Leo Liu25547cf2017-05-08 17:31:31 -0400493 ib->ptr[ib->length_dw++] = 0x0000000b;
494
495 ib->ptr[ib->length_dw++] = 0x00000014;
496 ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
497 ib->ptr[ib->length_dw++] = 0x0000001c;
498 ib->ptr[ib->length_dw++] = 0x00000000;
499 ib->ptr[ib->length_dw++] = 0x00000000;
500
501 ib->ptr[ib->length_dw++] = 0x00000008;
502 ib->ptr[ib->length_dw++] = 0x08000001; /* op initialize */
Leo Liu2d531d82016-12-21 13:56:44 -0500503
504 for (i = ib->length_dw; i < ib_size_dw; ++i)
505 ib->ptr[i] = 0x0;
506
507 r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
508 job->fence = dma_fence_get(f);
509 if (r)
510 goto err;
511
512 amdgpu_job_free(job);
513 if (fence)
514 *fence = dma_fence_get(f);
515 dma_fence_put(f);
Leo Liu25547cf2017-05-08 17:31:31 -0400516
Leo Liu2d531d82016-12-21 13:56:44 -0500517 return 0;
518
519err:
520 amdgpu_job_free(job);
521 return r;
522}
523
524static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
Leo Liu25547cf2017-05-08 17:31:31 -0400525 struct dma_fence **fence)
Leo Liu2d531d82016-12-21 13:56:44 -0500526{
Leo Liu25547cf2017-05-08 17:31:31 -0400527 const unsigned ib_size_dw = 16;
Leo Liu2d531d82016-12-21 13:56:44 -0500528 struct amdgpu_job *job;
529 struct amdgpu_ib *ib;
530 struct dma_fence *f = NULL;
Leo Liu25547cf2017-05-08 17:31:31 -0400531 uint64_t dummy;
Leo Liu2d531d82016-12-21 13:56:44 -0500532 int i, r;
533
534 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
535 if (r)
536 return r;
537
538 ib = &job->ibs[0];
Leo Liu25547cf2017-05-08 17:31:31 -0400539 dummy = ib->gpu_addr + 1024;
Leo Liu2d531d82016-12-21 13:56:44 -0500540
Leo Liu2d531d82016-12-21 13:56:44 -0500541 ib->length_dw = 0;
Leo Liu25547cf2017-05-08 17:31:31 -0400542 ib->ptr[ib->length_dw++] = 0x00000018;
543 ib->ptr[ib->length_dw++] = 0x00000001;
Leo Liu2d531d82016-12-21 13:56:44 -0500544 ib->ptr[ib->length_dw++] = handle;
Leo Liu25547cf2017-05-08 17:31:31 -0400545 ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
546 ib->ptr[ib->length_dw++] = dummy;
547 ib->ptr[ib->length_dw++] = 0x0000000b;
Leo Liu2d531d82016-12-21 13:56:44 -0500548
Leo Liu25547cf2017-05-08 17:31:31 -0400549 ib->ptr[ib->length_dw++] = 0x00000014;
550 ib->ptr[ib->length_dw++] = 0x00000002;
551 ib->ptr[ib->length_dw++] = 0x0000001c;
Leo Liu2d531d82016-12-21 13:56:44 -0500552 ib->ptr[ib->length_dw++] = 0x00000000;
553 ib->ptr[ib->length_dw++] = 0x00000000;
Leo Liu2d531d82016-12-21 13:56:44 -0500554
Leo Liu25547cf2017-05-08 17:31:31 -0400555 ib->ptr[ib->length_dw++] = 0x00000008;
556 ib->ptr[ib->length_dw++] = 0x08000002; /* op close session */
Leo Liu2d531d82016-12-21 13:56:44 -0500557
558 for (i = ib->length_dw; i < ib_size_dw; ++i)
559 ib->ptr[i] = 0x0;
560
Leo Liu25547cf2017-05-08 17:31:31 -0400561 r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
562 job->fence = dma_fence_get(f);
563 if (r)
564 goto err;
Leo Liu2d531d82016-12-21 13:56:44 -0500565
Leo Liu25547cf2017-05-08 17:31:31 -0400566 amdgpu_job_free(job);
Leo Liu2d531d82016-12-21 13:56:44 -0500567 if (fence)
568 *fence = dma_fence_get(f);
569 dma_fence_put(f);
Leo Liu25547cf2017-05-08 17:31:31 -0400570
Leo Liu2d531d82016-12-21 13:56:44 -0500571 return 0;
572
573err:
574 amdgpu_job_free(job);
575 return r;
576}
577
Leo Liu2d531d82016-12-21 13:56:44 -0500578int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
579{
580 struct dma_fence *fence = NULL;
581 long r;
582
583 r = amdgpu_vcn_enc_get_create_msg(ring, 1, NULL);
584 if (r) {
585 DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r);
586 goto error;
587 }
588
Leo Liu25547cf2017-05-08 17:31:31 -0400589 r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, &fence);
Leo Liu2d531d82016-12-21 13:56:44 -0500590 if (r) {
591 DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r);
592 goto error;
593 }
594
595 r = dma_fence_wait_timeout(fence, false, timeout);
596 if (r == 0) {
597 DRM_ERROR("amdgpu: IB test timed out.\n");
598 r = -ETIMEDOUT;
599 } else if (r < 0) {
600 DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
601 } else {
pding9953b722017-10-26 09:30:38 +0800602 DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
Leo Liu2d531d82016-12-21 13:56:44 -0500603 r = 0;
604 }
605error:
606 dma_fence_put(fence);
607 return r;
608}