blob: 14be7614c9b3d32b763027e7f06bd5e4108a2cc9 [file] [log] [blame]
Leo Liu95d09062016-12-21 13:21:52 -05001/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 */
26
27#include <linux/firmware.h>
28#include <linux/module.h>
29#include <drm/drmP.h>
30#include <drm/drm.h>
31
32#include "amdgpu.h"
33#include "amdgpu_pm.h"
34#include "amdgpu_vcn.h"
35#include "soc15d.h"
36#include "soc15_common.h"
37
38#include "vega10/soc15ip.h"
39#include "raven1/VCN/vcn_1_0_offset.h"
40
41/* 1 second timeout */
42#define VCN_IDLE_TIMEOUT msecs_to_jiffies(1000)
43
44/* Firmware Names */
45#define FIRMWARE_RAVEN "amdgpu/raven_vcn.bin"
46
47MODULE_FIRMWARE(FIRMWARE_RAVEN);
48
49static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
50
51int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
52{
53 struct amdgpu_ring *ring;
54 struct amd_sched_rq *rq;
55 unsigned long bo_size;
56 const char *fw_name;
57 const struct common_firmware_header *hdr;
58 unsigned version_major, version_minor, family_id;
59 int r;
60
61 INIT_DELAYED_WORK(&adev->vcn.idle_work, amdgpu_vcn_idle_work_handler);
62
63 switch (adev->asic_type) {
64 case CHIP_RAVEN:
65 fw_name = FIRMWARE_RAVEN;
66 break;
67 default:
68 return -EINVAL;
69 }
70
71 r = request_firmware(&adev->vcn.fw, fw_name, adev->dev);
72 if (r) {
73 dev_err(adev->dev, "amdgpu_vcn: Can't load firmware \"%s\"\n",
74 fw_name);
75 return r;
76 }
77
78 r = amdgpu_ucode_validate(adev->vcn.fw);
79 if (r) {
80 dev_err(adev->dev, "amdgpu_vcn: Can't validate firmware \"%s\"\n",
81 fw_name);
82 release_firmware(adev->vcn.fw);
83 adev->vcn.fw = NULL;
84 return r;
85 }
86
87 hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
88 family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
89 version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
90 version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
91 DRM_INFO("Found VCN firmware Version: %hu.%hu Family ID: %hu\n",
92 version_major, version_minor, family_id);
93
94
95 bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8)
96 + AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_HEAP_SIZE
97 + AMDGPU_VCN_SESSION_SIZE * 40;
98 r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
99 AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.vcpu_bo,
100 &adev->vcn.gpu_addr, &adev->vcn.cpu_addr);
101 if (r) {
102 dev_err(adev->dev, "(%d) failed to allocate vcn bo\n", r);
103 return r;
104 }
105
106 ring = &adev->vcn.ring_dec;
107 rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
108 r = amd_sched_entity_init(&ring->sched, &adev->vcn.entity_dec,
109 rq, amdgpu_sched_jobs);
110 if (r != 0) {
111 DRM_ERROR("Failed setting up VCN dec run queue.\n");
112 return r;
113 }
114
Leo Liu2d531d82016-12-21 13:56:44 -0500115 ring = &adev->vcn.ring_enc[0];
116 rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
117 r = amd_sched_entity_init(&ring->sched, &adev->vcn.entity_enc,
118 rq, amdgpu_sched_jobs);
119 if (r != 0) {
120 DRM_ERROR("Failed setting up VCN enc run queue.\n");
121 return r;
122 }
123
Leo Liu95d09062016-12-21 13:21:52 -0500124 return 0;
125}
126
127int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
128{
129 kfree(adev->vcn.saved_bo);
130
131 amd_sched_entity_fini(&adev->vcn.ring_dec.sched, &adev->vcn.entity_dec);
132
Leo Liu2d531d82016-12-21 13:56:44 -0500133 amd_sched_entity_fini(&adev->vcn.ring_enc[0].sched, &adev->vcn.entity_enc);
134
Leo Liu95d09062016-12-21 13:21:52 -0500135 amdgpu_bo_free_kernel(&adev->vcn.vcpu_bo,
136 &adev->vcn.gpu_addr,
137 (void **)&adev->vcn.cpu_addr);
138
139 amdgpu_ring_fini(&adev->vcn.ring_dec);
140
141 release_firmware(adev->vcn.fw);
142
143 return 0;
144}
145
146int amdgpu_vcn_suspend(struct amdgpu_device *adev)
147{
148 unsigned size;
149 void *ptr;
150
151 if (adev->vcn.vcpu_bo == NULL)
152 return 0;
153
154 cancel_delayed_work_sync(&adev->vcn.idle_work);
155
156 size = amdgpu_bo_size(adev->vcn.vcpu_bo);
157 ptr = adev->vcn.cpu_addr;
158
159 adev->vcn.saved_bo = kmalloc(size, GFP_KERNEL);
160 if (!adev->vcn.saved_bo)
161 return -ENOMEM;
162
163 memcpy_fromio(adev->vcn.saved_bo, ptr, size);
164
165 return 0;
166}
167
168int amdgpu_vcn_resume(struct amdgpu_device *adev)
169{
170 unsigned size;
171 void *ptr;
172
173 if (adev->vcn.vcpu_bo == NULL)
174 return -EINVAL;
175
176 size = amdgpu_bo_size(adev->vcn.vcpu_bo);
177 ptr = adev->vcn.cpu_addr;
178
179 if (adev->vcn.saved_bo != NULL) {
180 memcpy_toio(ptr, adev->vcn.saved_bo, size);
181 kfree(adev->vcn.saved_bo);
182 adev->vcn.saved_bo = NULL;
183 } else {
184 const struct common_firmware_header *hdr;
185 unsigned offset;
186
187 hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
188 offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
189 memcpy_toio(adev->vcn.cpu_addr, adev->vcn.fw->data + offset,
190 le32_to_cpu(hdr->ucode_size_bytes));
191 size -= le32_to_cpu(hdr->ucode_size_bytes);
192 ptr += le32_to_cpu(hdr->ucode_size_bytes);
193 memset_io(ptr, 0, size);
194 }
195
196 return 0;
197}
198
Leo Liu3e1086c2017-02-06 10:52:46 -0500199static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
200{
201 struct amdgpu_device *adev =
202 container_of(work, struct amdgpu_device, vcn.idle_work.work);
203 unsigned fences = amdgpu_fence_count_emitted(&adev->vcn.ring_dec);
204
205 if (fences == 0) {
206 if (adev->pm.dpm_enabled) {
207 amdgpu_dpm_enable_uvd(adev, false);
208 } else {
209 amdgpu_asic_set_uvd_clocks(adev, 0, 0);
210 }
211 } else {
212 schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
213 }
214}
215
216void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
217{
218 struct amdgpu_device *adev = ring->adev;
219 bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.idle_work);
220
221 if (set_clocks) {
222 if (adev->pm.dpm_enabled) {
223 amdgpu_dpm_enable_uvd(adev, true);
224 } else {
225 amdgpu_asic_set_uvd_clocks(adev, 53300, 40000);
226 }
227 }
228}
229
230void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring)
231{
232 schedule_delayed_work(&ring->adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
233}
234
Leo Liu8c303c02017-02-06 11:52:46 -0500235int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
236{
237 struct amdgpu_device *adev = ring->adev;
238 uint32_t tmp = 0;
239 unsigned i;
240 int r;
241
242 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0xCAFEDEAD);
243 r = amdgpu_ring_alloc(ring, 3);
244 if (r) {
245 DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
246 ring->idx, r);
247 return r;
248 }
249 amdgpu_ring_write(ring,
250 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0));
251 amdgpu_ring_write(ring, 0xDEADBEEF);
252 amdgpu_ring_commit(ring);
253 for (i = 0; i < adev->usec_timeout; i++) {
254 tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID));
255 if (tmp == 0xDEADBEEF)
256 break;
257 DRM_UDELAY(1);
258 }
259
260 if (i < adev->usec_timeout) {
261 DRM_INFO("ring test on %d succeeded in %d usecs\n",
262 ring->idx, i);
263 } else {
264 DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
265 ring->idx, tmp);
266 r = -EINVAL;
267 }
268 return r;
269}
270
Leo Liu95d09062016-12-21 13:21:52 -0500271static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
272 bool direct, struct dma_fence **fence)
273{
274 struct ttm_validate_buffer tv;
275 struct ww_acquire_ctx ticket;
276 struct list_head head;
277 struct amdgpu_job *job;
278 struct amdgpu_ib *ib;
279 struct dma_fence *f = NULL;
280 struct amdgpu_device *adev = ring->adev;
281 uint64_t addr;
282 int i, r;
283
284 memset(&tv, 0, sizeof(tv));
285 tv.bo = &bo->tbo;
286
287 INIT_LIST_HEAD(&head);
288 list_add(&tv.head, &head);
289
290 r = ttm_eu_reserve_buffers(&ticket, &head, true, NULL);
291 if (r)
292 return r;
293
294 r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
295 if (r)
296 goto err;
297
298 r = amdgpu_job_alloc_with_ib(adev, 64, &job);
299 if (r)
300 goto err;
301
302 ib = &job->ibs[0];
303 addr = amdgpu_bo_gpu_offset(bo);
304 ib->ptr[0] = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0);
305 ib->ptr[1] = addr;
306 ib->ptr[2] = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0);
307 ib->ptr[3] = addr >> 32;
308 ib->ptr[4] = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0);
309 ib->ptr[5] = 0;
310 for (i = 6; i < 16; i += 2) {
311 ib->ptr[i] = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP), 0);
312 ib->ptr[i+1] = 0;
313 }
314 ib->length_dw = 16;
315
316 if (direct) {
317 r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
318 job->fence = dma_fence_get(f);
319 if (r)
320 goto err_free;
321
322 amdgpu_job_free(job);
323 } else {
324 r = amdgpu_job_submit(job, ring, &adev->vcn.entity_dec,
325 AMDGPU_FENCE_OWNER_UNDEFINED, &f);
326 if (r)
327 goto err_free;
328 }
329
330 ttm_eu_fence_buffer_objects(&ticket, &head, f);
331
332 if (fence)
333 *fence = dma_fence_get(f);
334 amdgpu_bo_unref(&bo);
335 dma_fence_put(f);
336
337 return 0;
338
339err_free:
340 amdgpu_job_free(job);
341
342err:
343 ttm_eu_backoff_reservation(&ticket, &head);
344 return r;
345}
346
347static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
348 struct dma_fence **fence)
349{
350 struct amdgpu_device *adev = ring->adev;
351 struct amdgpu_bo *bo;
352 uint32_t *msg;
353 int r, i;
354
355 r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true,
356 AMDGPU_GEM_DOMAIN_VRAM,
357 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
358 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
359 NULL, NULL, &bo);
360 if (r)
361 return r;
362
363 r = amdgpu_bo_reserve(bo, false);
364 if (r) {
365 amdgpu_bo_unref(&bo);
366 return r;
367 }
368
369 r = amdgpu_bo_kmap(bo, (void **)&msg);
370 if (r) {
371 amdgpu_bo_unreserve(bo);
372 amdgpu_bo_unref(&bo);
373 return r;
374 }
375
Leo Liu2d8a4252017-02-05 12:40:30 -0500376 msg[0] = cpu_to_le32(0x00000028);
377 msg[1] = cpu_to_le32(0x0000004c);
378 msg[2] = cpu_to_le32(0x00000001);
Leo Liu95d09062016-12-21 13:21:52 -0500379 msg[3] = cpu_to_le32(0x00000000);
Leo Liu2d8a4252017-02-05 12:40:30 -0500380 msg[4] = cpu_to_le32(handle);
Leo Liu95d09062016-12-21 13:21:52 -0500381 msg[5] = cpu_to_le32(0x00000000);
Leo Liu2d8a4252017-02-05 12:40:30 -0500382 msg[6] = cpu_to_le32(0x00000001);
383 msg[7] = cpu_to_le32(0x00000028);
384 msg[8] = cpu_to_le32(0x00000024);
Leo Liu95d09062016-12-21 13:21:52 -0500385 msg[9] = cpu_to_le32(0x00000000);
Leo Liu2d8a4252017-02-05 12:40:30 -0500386 msg[10] = cpu_to_le32(0x00000007);
387 msg[11] = cpu_to_le32(0x00000000);
388 msg[12] = cpu_to_le32(0x00000000);
389 msg[13] = cpu_to_le32(0x00000780);
390 msg[14] = cpu_to_le32(0x00000440);
391 msg[15] = cpu_to_le32(0x00000000);
392 msg[16] = cpu_to_le32(0x01b37000);
393 msg[17] = cpu_to_le32(0x00000000);
394 msg[18] = cpu_to_le32(0x00000000);
395 for (i = 19; i < 1024; ++i)
Leo Liu95d09062016-12-21 13:21:52 -0500396 msg[i] = cpu_to_le32(0x0);
397
398 amdgpu_bo_kunmap(bo);
399 amdgpu_bo_unreserve(bo);
400
401 return amdgpu_vcn_dec_send_msg(ring, bo, true, fence);
402}
403
404static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
405 bool direct, struct dma_fence **fence)
406{
407 struct amdgpu_device *adev = ring->adev;
408 struct amdgpu_bo *bo;
409 uint32_t *msg;
410 int r, i;
411
412 r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true,
413 AMDGPU_GEM_DOMAIN_VRAM,
414 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
415 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
416 NULL, NULL, &bo);
417 if (r)
418 return r;
419
420 r = amdgpu_bo_reserve(bo, false);
421 if (r) {
422 amdgpu_bo_unref(&bo);
423 return r;
424 }
425
426 r = amdgpu_bo_kmap(bo, (void **)&msg);
427 if (r) {
428 amdgpu_bo_unreserve(bo);
429 amdgpu_bo_unref(&bo);
430 return r;
431 }
432
Leo Liu2d8a4252017-02-05 12:40:30 -0500433 msg[0] = cpu_to_le32(0x00000028);
434 msg[1] = cpu_to_le32(0x00000018);
435 msg[2] = cpu_to_le32(0x00000000);
436 msg[3] = cpu_to_le32(0x00000002);
437 msg[4] = cpu_to_le32(handle);
438 msg[5] = cpu_to_le32(0x00000000);
439 for (i = 6; i < 1024; ++i)
Leo Liu95d09062016-12-21 13:21:52 -0500440 msg[i] = cpu_to_le32(0x0);
441
442 amdgpu_bo_kunmap(bo);
443 amdgpu_bo_unreserve(bo);
444
445 return amdgpu_vcn_dec_send_msg(ring, bo, direct, fence);
446}
447
Leo Liu95d09062016-12-21 13:21:52 -0500448int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
449{
450 struct dma_fence *fence;
451 long r;
452
453 r = amdgpu_vcn_dec_get_create_msg(ring, 1, NULL);
454 if (r) {
455 DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r);
456 goto error;
457 }
458
459 r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, true, &fence);
460 if (r) {
461 DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r);
462 goto error;
463 }
464
465 r = dma_fence_wait_timeout(fence, false, timeout);
466 if (r == 0) {
467 DRM_ERROR("amdgpu: IB test timed out.\n");
468 r = -ETIMEDOUT;
469 } else if (r < 0) {
470 DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
471 } else {
472 DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
473 r = 0;
474 }
475
476 dma_fence_put(fence);
477
478error:
479 return r;
480}
Leo Liu2d531d82016-12-21 13:56:44 -0500481
Leo Liu3e1086c2017-02-06 10:52:46 -0500482int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
483{
484 struct amdgpu_device *adev = ring->adev;
485 uint32_t rptr = amdgpu_ring_get_rptr(ring);
486 unsigned i;
487 int r;
488
489 r = amdgpu_ring_alloc(ring, 16);
490 if (r) {
491 DRM_ERROR("amdgpu: vcn enc failed to lock ring %d (%d).\n",
492 ring->idx, r);
493 return r;
494 }
495 amdgpu_ring_write(ring, VCE_CMD_END);
496 amdgpu_ring_commit(ring);
497
498 for (i = 0; i < adev->usec_timeout; i++) {
499 if (amdgpu_ring_get_rptr(ring) != rptr)
500 break;
501 DRM_UDELAY(1);
502 }
503
504 if (i < adev->usec_timeout) {
505 DRM_INFO("ring test on %d succeeded in %d usecs\n",
506 ring->idx, i);
507 } else {
508 DRM_ERROR("amdgpu: ring %d test failed\n",
509 ring->idx);
510 r = -ETIMEDOUT;
511 }
512
513 return r;
514}
515
Leo Liu2d531d82016-12-21 13:56:44 -0500516static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
517 struct dma_fence **fence)
518{
519 const unsigned ib_size_dw = 1024;
520 struct amdgpu_job *job;
521 struct amdgpu_ib *ib;
522 struct dma_fence *f = NULL;
523 uint64_t dummy;
524 int i, r;
525
526 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
527 if (r)
528 return r;
529
530 ib = &job->ibs[0];
531
532 dummy = ib->gpu_addr + 1024;
533
534 /* stitch together an VCN enc create msg */
535 ib->length_dw = 0;
536 ib->ptr[ib->length_dw++] = 0x0000000c; /* len */
537 ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */
538 ib->ptr[ib->length_dw++] = handle;
539
540 ib->ptr[ib->length_dw++] = 0x00000040; /* len */
541 ib->ptr[ib->length_dw++] = 0x01000001; /* create cmd */
542 ib->ptr[ib->length_dw++] = 0x00000000;
543 ib->ptr[ib->length_dw++] = 0x00000042;
544 ib->ptr[ib->length_dw++] = 0x0000000a;
545 ib->ptr[ib->length_dw++] = 0x00000001;
546 ib->ptr[ib->length_dw++] = 0x00000080;
547 ib->ptr[ib->length_dw++] = 0x00000060;
548 ib->ptr[ib->length_dw++] = 0x00000100;
549 ib->ptr[ib->length_dw++] = 0x00000100;
550 ib->ptr[ib->length_dw++] = 0x0000000c;
551 ib->ptr[ib->length_dw++] = 0x00000000;
552 ib->ptr[ib->length_dw++] = 0x00000000;
553 ib->ptr[ib->length_dw++] = 0x00000000;
554 ib->ptr[ib->length_dw++] = 0x00000000;
555 ib->ptr[ib->length_dw++] = 0x00000000;
556
557 ib->ptr[ib->length_dw++] = 0x00000014; /* len */
558 ib->ptr[ib->length_dw++] = 0x05000005; /* feedback buffer */
559 ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
560 ib->ptr[ib->length_dw++] = dummy;
561 ib->ptr[ib->length_dw++] = 0x00000001;
562
563 for (i = ib->length_dw; i < ib_size_dw; ++i)
564 ib->ptr[i] = 0x0;
565
566 r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
567 job->fence = dma_fence_get(f);
568 if (r)
569 goto err;
570
571 amdgpu_job_free(job);
572 if (fence)
573 *fence = dma_fence_get(f);
574 dma_fence_put(f);
575 return 0;
576
577err:
578 amdgpu_job_free(job);
579 return r;
580}
581
582static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
583 bool direct, struct dma_fence **fence)
584{
585 const unsigned ib_size_dw = 1024;
586 struct amdgpu_job *job;
587 struct amdgpu_ib *ib;
588 struct dma_fence *f = NULL;
589 int i, r;
590
591 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
592 if (r)
593 return r;
594
595 ib = &job->ibs[0];
596
597 /* stitch together an VCN enc destroy msg */
598 ib->length_dw = 0;
599 ib->ptr[ib->length_dw++] = 0x0000000c; /* len */
600 ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */
601 ib->ptr[ib->length_dw++] = handle;
602
603 ib->ptr[ib->length_dw++] = 0x00000020; /* len */
604 ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
605 ib->ptr[ib->length_dw++] = 0xffffffff; /* next task info, set to 0xffffffff if no */
606 ib->ptr[ib->length_dw++] = 0x00000001; /* destroy session */
607 ib->ptr[ib->length_dw++] = 0x00000000;
608 ib->ptr[ib->length_dw++] = 0x00000000;
609 ib->ptr[ib->length_dw++] = 0xffffffff; /* feedback is not needed, set to 0xffffffff and firmware will not output feedback */
610 ib->ptr[ib->length_dw++] = 0x00000000;
611
612 ib->ptr[ib->length_dw++] = 0x00000008; /* len */
613 ib->ptr[ib->length_dw++] = 0x02000001; /* destroy cmd */
614
615 for (i = ib->length_dw; i < ib_size_dw; ++i)
616 ib->ptr[i] = 0x0;
617
618 if (direct) {
619 r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
620 job->fence = dma_fence_get(f);
621 if (r)
622 goto err;
623
624 amdgpu_job_free(job);
625 } else {
626 r = amdgpu_job_submit(job, ring, &ring->adev->vcn.entity_enc,
627 AMDGPU_FENCE_OWNER_UNDEFINED, &f);
628 if (r)
629 goto err;
630 }
631
632 if (fence)
633 *fence = dma_fence_get(f);
634 dma_fence_put(f);
635 return 0;
636
637err:
638 amdgpu_job_free(job);
639 return r;
640}
641
Leo Liu2d531d82016-12-21 13:56:44 -0500642int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
643{
644 struct dma_fence *fence = NULL;
645 long r;
646
647 r = amdgpu_vcn_enc_get_create_msg(ring, 1, NULL);
648 if (r) {
649 DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r);
650 goto error;
651 }
652
653 r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, true, &fence);
654 if (r) {
655 DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r);
656 goto error;
657 }
658
659 r = dma_fence_wait_timeout(fence, false, timeout);
660 if (r == 0) {
661 DRM_ERROR("amdgpu: IB test timed out.\n");
662 r = -ETIMEDOUT;
663 } else if (r < 0) {
664 DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
665 } else {
666 DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
667 r = 0;
668 }
669error:
670 dma_fence_put(fence);
671 return r;
672}