Leo Liu | 95d0906 | 2016-12-21 13:21:52 -0500 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2016 Advanced Micro Devices, Inc. |
| 3 | * All Rights Reserved. |
| 4 | * |
| 5 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 6 | * copy of this software and associated documentation files (the |
| 7 | * "Software"), to deal in the Software without restriction, including |
| 8 | * without limitation the rights to use, copy, modify, merge, publish, |
| 9 | * distribute, sub license, and/or sell copies of the Software, and to |
| 10 | * permit persons to whom the Software is furnished to do so, subject to |
| 11 | * the following conditions: |
| 12 | * |
| 13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 15 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
| 16 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
| 17 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
| 18 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
| 19 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
| 20 | * |
| 21 | * The above copyright notice and this permission notice (including the |
| 22 | * next paragraph) shall be included in all copies or substantial portions |
| 23 | * of the Software. |
| 24 | * |
| 25 | */ |
| 26 | |
| 27 | #include <linux/firmware.h> |
| 28 | #include <linux/module.h> |
| 29 | #include <drm/drmP.h> |
| 30 | #include <drm/drm.h> |
| 31 | |
| 32 | #include "amdgpu.h" |
| 33 | #include "amdgpu_pm.h" |
| 34 | #include "amdgpu_vcn.h" |
| 35 | #include "soc15d.h" |
| 36 | #include "soc15_common.h" |
| 37 | |
| 38 | #include "vega10/soc15ip.h" |
| 39 | #include "raven1/VCN/vcn_1_0_offset.h" |
| 40 | |
| 41 | /* 1 second timeout */ |
| 42 | #define VCN_IDLE_TIMEOUT msecs_to_jiffies(1000) |
| 43 | |
| 44 | /* Firmware Names */ |
| 45 | #define FIRMWARE_RAVEN "amdgpu/raven_vcn.bin" |
| 46 | |
| 47 | MODULE_FIRMWARE(FIRMWARE_RAVEN); |
| 48 | |
| 49 | static void amdgpu_vcn_idle_work_handler(struct work_struct *work); |
| 50 | |
| 51 | int amdgpu_vcn_sw_init(struct amdgpu_device *adev) |
| 52 | { |
| 53 | struct amdgpu_ring *ring; |
| 54 | struct amd_sched_rq *rq; |
| 55 | unsigned long bo_size; |
| 56 | const char *fw_name; |
| 57 | const struct common_firmware_header *hdr; |
| 58 | unsigned version_major, version_minor, family_id; |
| 59 | int r; |
| 60 | |
| 61 | INIT_DELAYED_WORK(&adev->vcn.idle_work, amdgpu_vcn_idle_work_handler); |
| 62 | |
| 63 | switch (adev->asic_type) { |
| 64 | case CHIP_RAVEN: |
| 65 | fw_name = FIRMWARE_RAVEN; |
| 66 | break; |
| 67 | default: |
| 68 | return -EINVAL; |
| 69 | } |
| 70 | |
| 71 | r = request_firmware(&adev->vcn.fw, fw_name, adev->dev); |
| 72 | if (r) { |
| 73 | dev_err(adev->dev, "amdgpu_vcn: Can't load firmware \"%s\"\n", |
| 74 | fw_name); |
| 75 | return r; |
| 76 | } |
| 77 | |
| 78 | r = amdgpu_ucode_validate(adev->vcn.fw); |
| 79 | if (r) { |
| 80 | dev_err(adev->dev, "amdgpu_vcn: Can't validate firmware \"%s\"\n", |
| 81 | fw_name); |
| 82 | release_firmware(adev->vcn.fw); |
| 83 | adev->vcn.fw = NULL; |
| 84 | return r; |
| 85 | } |
| 86 | |
| 87 | hdr = (const struct common_firmware_header *)adev->vcn.fw->data; |
| 88 | family_id = le32_to_cpu(hdr->ucode_version) & 0xff; |
| 89 | version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff; |
| 90 | version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff; |
| 91 | DRM_INFO("Found VCN firmware Version: %hu.%hu Family ID: %hu\n", |
| 92 | version_major, version_minor, family_id); |
| 93 | |
| 94 | |
| 95 | bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8) |
| 96 | + AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_HEAP_SIZE |
| 97 | + AMDGPU_VCN_SESSION_SIZE * 40; |
| 98 | r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE, |
| 99 | AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.vcpu_bo, |
| 100 | &adev->vcn.gpu_addr, &adev->vcn.cpu_addr); |
| 101 | if (r) { |
| 102 | dev_err(adev->dev, "(%d) failed to allocate vcn bo\n", r); |
| 103 | return r; |
| 104 | } |
| 105 | |
| 106 | ring = &adev->vcn.ring_dec; |
| 107 | rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL]; |
| 108 | r = amd_sched_entity_init(&ring->sched, &adev->vcn.entity_dec, |
| 109 | rq, amdgpu_sched_jobs); |
| 110 | if (r != 0) { |
| 111 | DRM_ERROR("Failed setting up VCN dec run queue.\n"); |
| 112 | return r; |
| 113 | } |
| 114 | |
Leo Liu | 2d531d8 | 2016-12-21 13:56:44 -0500 | [diff] [blame^] | 115 | ring = &adev->vcn.ring_enc[0]; |
| 116 | rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL]; |
| 117 | r = amd_sched_entity_init(&ring->sched, &adev->vcn.entity_enc, |
| 118 | rq, amdgpu_sched_jobs); |
| 119 | if (r != 0) { |
| 120 | DRM_ERROR("Failed setting up VCN enc run queue.\n"); |
| 121 | return r; |
| 122 | } |
| 123 | |
Leo Liu | 95d0906 | 2016-12-21 13:21:52 -0500 | [diff] [blame] | 124 | return 0; |
| 125 | } |
| 126 | |
| 127 | int amdgpu_vcn_sw_fini(struct amdgpu_device *adev) |
| 128 | { |
| 129 | kfree(adev->vcn.saved_bo); |
| 130 | |
| 131 | amd_sched_entity_fini(&adev->vcn.ring_dec.sched, &adev->vcn.entity_dec); |
| 132 | |
Leo Liu | 2d531d8 | 2016-12-21 13:56:44 -0500 | [diff] [blame^] | 133 | amd_sched_entity_fini(&adev->vcn.ring_enc[0].sched, &adev->vcn.entity_enc); |
| 134 | |
Leo Liu | 95d0906 | 2016-12-21 13:21:52 -0500 | [diff] [blame] | 135 | amdgpu_bo_free_kernel(&adev->vcn.vcpu_bo, |
| 136 | &adev->vcn.gpu_addr, |
| 137 | (void **)&adev->vcn.cpu_addr); |
| 138 | |
| 139 | amdgpu_ring_fini(&adev->vcn.ring_dec); |
| 140 | |
| 141 | release_firmware(adev->vcn.fw); |
| 142 | |
| 143 | return 0; |
| 144 | } |
| 145 | |
| 146 | int amdgpu_vcn_suspend(struct amdgpu_device *adev) |
| 147 | { |
| 148 | unsigned size; |
| 149 | void *ptr; |
| 150 | |
| 151 | if (adev->vcn.vcpu_bo == NULL) |
| 152 | return 0; |
| 153 | |
| 154 | cancel_delayed_work_sync(&adev->vcn.idle_work); |
| 155 | |
| 156 | size = amdgpu_bo_size(adev->vcn.vcpu_bo); |
| 157 | ptr = adev->vcn.cpu_addr; |
| 158 | |
| 159 | adev->vcn.saved_bo = kmalloc(size, GFP_KERNEL); |
| 160 | if (!adev->vcn.saved_bo) |
| 161 | return -ENOMEM; |
| 162 | |
| 163 | memcpy_fromio(adev->vcn.saved_bo, ptr, size); |
| 164 | |
| 165 | return 0; |
| 166 | } |
| 167 | |
| 168 | int amdgpu_vcn_resume(struct amdgpu_device *adev) |
| 169 | { |
| 170 | unsigned size; |
| 171 | void *ptr; |
| 172 | |
| 173 | if (adev->vcn.vcpu_bo == NULL) |
| 174 | return -EINVAL; |
| 175 | |
| 176 | size = amdgpu_bo_size(adev->vcn.vcpu_bo); |
| 177 | ptr = adev->vcn.cpu_addr; |
| 178 | |
| 179 | if (adev->vcn.saved_bo != NULL) { |
| 180 | memcpy_toio(ptr, adev->vcn.saved_bo, size); |
| 181 | kfree(adev->vcn.saved_bo); |
| 182 | adev->vcn.saved_bo = NULL; |
| 183 | } else { |
| 184 | const struct common_firmware_header *hdr; |
| 185 | unsigned offset; |
| 186 | |
| 187 | hdr = (const struct common_firmware_header *)adev->vcn.fw->data; |
| 188 | offset = le32_to_cpu(hdr->ucode_array_offset_bytes); |
| 189 | memcpy_toio(adev->vcn.cpu_addr, adev->vcn.fw->data + offset, |
| 190 | le32_to_cpu(hdr->ucode_size_bytes)); |
| 191 | size -= le32_to_cpu(hdr->ucode_size_bytes); |
| 192 | ptr += le32_to_cpu(hdr->ucode_size_bytes); |
| 193 | memset_io(ptr, 0, size); |
| 194 | } |
| 195 | |
| 196 | return 0; |
| 197 | } |
| 198 | |
| 199 | static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, |
| 200 | bool direct, struct dma_fence **fence) |
| 201 | { |
| 202 | struct ttm_validate_buffer tv; |
| 203 | struct ww_acquire_ctx ticket; |
| 204 | struct list_head head; |
| 205 | struct amdgpu_job *job; |
| 206 | struct amdgpu_ib *ib; |
| 207 | struct dma_fence *f = NULL; |
| 208 | struct amdgpu_device *adev = ring->adev; |
| 209 | uint64_t addr; |
| 210 | int i, r; |
| 211 | |
| 212 | memset(&tv, 0, sizeof(tv)); |
| 213 | tv.bo = &bo->tbo; |
| 214 | |
| 215 | INIT_LIST_HEAD(&head); |
| 216 | list_add(&tv.head, &head); |
| 217 | |
| 218 | r = ttm_eu_reserve_buffers(&ticket, &head, true, NULL); |
| 219 | if (r) |
| 220 | return r; |
| 221 | |
| 222 | r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); |
| 223 | if (r) |
| 224 | goto err; |
| 225 | |
| 226 | r = amdgpu_job_alloc_with_ib(adev, 64, &job); |
| 227 | if (r) |
| 228 | goto err; |
| 229 | |
| 230 | ib = &job->ibs[0]; |
| 231 | addr = amdgpu_bo_gpu_offset(bo); |
| 232 | ib->ptr[0] = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0); |
| 233 | ib->ptr[1] = addr; |
| 234 | ib->ptr[2] = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0); |
| 235 | ib->ptr[3] = addr >> 32; |
| 236 | ib->ptr[4] = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0); |
| 237 | ib->ptr[5] = 0; |
| 238 | for (i = 6; i < 16; i += 2) { |
| 239 | ib->ptr[i] = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP), 0); |
| 240 | ib->ptr[i+1] = 0; |
| 241 | } |
| 242 | ib->length_dw = 16; |
| 243 | |
| 244 | if (direct) { |
| 245 | r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f); |
| 246 | job->fence = dma_fence_get(f); |
| 247 | if (r) |
| 248 | goto err_free; |
| 249 | |
| 250 | amdgpu_job_free(job); |
| 251 | } else { |
| 252 | r = amdgpu_job_submit(job, ring, &adev->vcn.entity_dec, |
| 253 | AMDGPU_FENCE_OWNER_UNDEFINED, &f); |
| 254 | if (r) |
| 255 | goto err_free; |
| 256 | } |
| 257 | |
| 258 | ttm_eu_fence_buffer_objects(&ticket, &head, f); |
| 259 | |
| 260 | if (fence) |
| 261 | *fence = dma_fence_get(f); |
| 262 | amdgpu_bo_unref(&bo); |
| 263 | dma_fence_put(f); |
| 264 | |
| 265 | return 0; |
| 266 | |
| 267 | err_free: |
| 268 | amdgpu_job_free(job); |
| 269 | |
| 270 | err: |
| 271 | ttm_eu_backoff_reservation(&ticket, &head); |
| 272 | return r; |
| 273 | } |
| 274 | |
| 275 | static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, |
| 276 | struct dma_fence **fence) |
| 277 | { |
| 278 | struct amdgpu_device *adev = ring->adev; |
| 279 | struct amdgpu_bo *bo; |
| 280 | uint32_t *msg; |
| 281 | int r, i; |
| 282 | |
| 283 | r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true, |
| 284 | AMDGPU_GEM_DOMAIN_VRAM, |
| 285 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | |
| 286 | AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, |
| 287 | NULL, NULL, &bo); |
| 288 | if (r) |
| 289 | return r; |
| 290 | |
| 291 | r = amdgpu_bo_reserve(bo, false); |
| 292 | if (r) { |
| 293 | amdgpu_bo_unref(&bo); |
| 294 | return r; |
| 295 | } |
| 296 | |
| 297 | r = amdgpu_bo_kmap(bo, (void **)&msg); |
| 298 | if (r) { |
| 299 | amdgpu_bo_unreserve(bo); |
| 300 | amdgpu_bo_unref(&bo); |
| 301 | return r; |
| 302 | } |
| 303 | |
| 304 | /* stitch together an vcn create msg */ |
| 305 | msg[0] = cpu_to_le32(0x00000de4); |
| 306 | msg[1] = cpu_to_le32(0x00000000); |
| 307 | msg[2] = cpu_to_le32(handle); |
| 308 | msg[3] = cpu_to_le32(0x00000000); |
| 309 | msg[4] = cpu_to_le32(0x00000000); |
| 310 | msg[5] = cpu_to_le32(0x00000000); |
| 311 | msg[6] = cpu_to_le32(0x00000000); |
| 312 | msg[7] = cpu_to_le32(0x00000780); |
| 313 | msg[8] = cpu_to_le32(0x00000440); |
| 314 | msg[9] = cpu_to_le32(0x00000000); |
| 315 | msg[10] = cpu_to_le32(0x01b37000); |
| 316 | for (i = 11; i < 1024; ++i) |
| 317 | msg[i] = cpu_to_le32(0x0); |
| 318 | |
| 319 | amdgpu_bo_kunmap(bo); |
| 320 | amdgpu_bo_unreserve(bo); |
| 321 | |
| 322 | return amdgpu_vcn_dec_send_msg(ring, bo, true, fence); |
| 323 | } |
| 324 | |
| 325 | static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, |
| 326 | bool direct, struct dma_fence **fence) |
| 327 | { |
| 328 | struct amdgpu_device *adev = ring->adev; |
| 329 | struct amdgpu_bo *bo; |
| 330 | uint32_t *msg; |
| 331 | int r, i; |
| 332 | |
| 333 | r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true, |
| 334 | AMDGPU_GEM_DOMAIN_VRAM, |
| 335 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | |
| 336 | AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, |
| 337 | NULL, NULL, &bo); |
| 338 | if (r) |
| 339 | return r; |
| 340 | |
| 341 | r = amdgpu_bo_reserve(bo, false); |
| 342 | if (r) { |
| 343 | amdgpu_bo_unref(&bo); |
| 344 | return r; |
| 345 | } |
| 346 | |
| 347 | r = amdgpu_bo_kmap(bo, (void **)&msg); |
| 348 | if (r) { |
| 349 | amdgpu_bo_unreserve(bo); |
| 350 | amdgpu_bo_unref(&bo); |
| 351 | return r; |
| 352 | } |
| 353 | |
| 354 | /* stitch together an vcn destroy msg */ |
| 355 | msg[0] = cpu_to_le32(0x00000de4); |
| 356 | msg[1] = cpu_to_le32(0x00000002); |
| 357 | msg[2] = cpu_to_le32(handle); |
| 358 | msg[3] = cpu_to_le32(0x00000000); |
| 359 | for (i = 4; i < 1024; ++i) |
| 360 | msg[i] = cpu_to_le32(0x0); |
| 361 | |
| 362 | amdgpu_bo_kunmap(bo); |
| 363 | amdgpu_bo_unreserve(bo); |
| 364 | |
| 365 | return amdgpu_vcn_dec_send_msg(ring, bo, direct, fence); |
| 366 | } |
| 367 | |
| 368 | static void amdgpu_vcn_idle_work_handler(struct work_struct *work) |
| 369 | { |
| 370 | struct amdgpu_device *adev = |
| 371 | container_of(work, struct amdgpu_device, vcn.idle_work.work); |
| 372 | unsigned fences = amdgpu_fence_count_emitted(&adev->vcn.ring_dec); |
| 373 | |
| 374 | if (fences == 0) { |
| 375 | if (adev->pm.dpm_enabled) { |
| 376 | amdgpu_dpm_enable_uvd(adev, false); |
| 377 | } else { |
| 378 | amdgpu_asic_set_uvd_clocks(adev, 0, 0); |
| 379 | } |
| 380 | } else { |
| 381 | schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT); |
| 382 | } |
| 383 | } |
| 384 | |
| 385 | void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring) |
| 386 | { |
| 387 | struct amdgpu_device *adev = ring->adev; |
| 388 | bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.idle_work); |
| 389 | |
| 390 | if (set_clocks) { |
| 391 | if (adev->pm.dpm_enabled) { |
| 392 | amdgpu_dpm_enable_uvd(adev, true); |
| 393 | } else { |
| 394 | amdgpu_asic_set_uvd_clocks(adev, 53300, 40000); |
| 395 | } |
| 396 | } |
| 397 | } |
| 398 | |
| 399 | void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring) |
| 400 | { |
| 401 | schedule_delayed_work(&ring->adev->vcn.idle_work, VCN_IDLE_TIMEOUT); |
| 402 | } |
| 403 | |
| 404 | int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout) |
| 405 | { |
| 406 | struct dma_fence *fence; |
| 407 | long r; |
| 408 | |
| 409 | r = amdgpu_vcn_dec_get_create_msg(ring, 1, NULL); |
| 410 | if (r) { |
| 411 | DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r); |
| 412 | goto error; |
| 413 | } |
| 414 | |
| 415 | r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, true, &fence); |
| 416 | if (r) { |
| 417 | DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r); |
| 418 | goto error; |
| 419 | } |
| 420 | |
| 421 | r = dma_fence_wait_timeout(fence, false, timeout); |
| 422 | if (r == 0) { |
| 423 | DRM_ERROR("amdgpu: IB test timed out.\n"); |
| 424 | r = -ETIMEDOUT; |
| 425 | } else if (r < 0) { |
| 426 | DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); |
| 427 | } else { |
| 428 | DRM_INFO("ib test on ring %d succeeded\n", ring->idx); |
| 429 | r = 0; |
| 430 | } |
| 431 | |
| 432 | dma_fence_put(fence); |
| 433 | |
| 434 | error: |
| 435 | return r; |
| 436 | } |
Leo Liu | 2d531d8 | 2016-12-21 13:56:44 -0500 | [diff] [blame^] | 437 | |
| 438 | static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, |
| 439 | struct dma_fence **fence) |
| 440 | { |
| 441 | const unsigned ib_size_dw = 1024; |
| 442 | struct amdgpu_job *job; |
| 443 | struct amdgpu_ib *ib; |
| 444 | struct dma_fence *f = NULL; |
| 445 | uint64_t dummy; |
| 446 | int i, r; |
| 447 | |
| 448 | r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job); |
| 449 | if (r) |
| 450 | return r; |
| 451 | |
| 452 | ib = &job->ibs[0]; |
| 453 | |
| 454 | dummy = ib->gpu_addr + 1024; |
| 455 | |
| 456 | /* stitch together an VCN enc create msg */ |
| 457 | ib->length_dw = 0; |
| 458 | ib->ptr[ib->length_dw++] = 0x0000000c; /* len */ |
| 459 | ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */ |
| 460 | ib->ptr[ib->length_dw++] = handle; |
| 461 | |
| 462 | ib->ptr[ib->length_dw++] = 0x00000040; /* len */ |
| 463 | ib->ptr[ib->length_dw++] = 0x01000001; /* create cmd */ |
| 464 | ib->ptr[ib->length_dw++] = 0x00000000; |
| 465 | ib->ptr[ib->length_dw++] = 0x00000042; |
| 466 | ib->ptr[ib->length_dw++] = 0x0000000a; |
| 467 | ib->ptr[ib->length_dw++] = 0x00000001; |
| 468 | ib->ptr[ib->length_dw++] = 0x00000080; |
| 469 | ib->ptr[ib->length_dw++] = 0x00000060; |
| 470 | ib->ptr[ib->length_dw++] = 0x00000100; |
| 471 | ib->ptr[ib->length_dw++] = 0x00000100; |
| 472 | ib->ptr[ib->length_dw++] = 0x0000000c; |
| 473 | ib->ptr[ib->length_dw++] = 0x00000000; |
| 474 | ib->ptr[ib->length_dw++] = 0x00000000; |
| 475 | ib->ptr[ib->length_dw++] = 0x00000000; |
| 476 | ib->ptr[ib->length_dw++] = 0x00000000; |
| 477 | ib->ptr[ib->length_dw++] = 0x00000000; |
| 478 | |
| 479 | ib->ptr[ib->length_dw++] = 0x00000014; /* len */ |
| 480 | ib->ptr[ib->length_dw++] = 0x05000005; /* feedback buffer */ |
| 481 | ib->ptr[ib->length_dw++] = upper_32_bits(dummy); |
| 482 | ib->ptr[ib->length_dw++] = dummy; |
| 483 | ib->ptr[ib->length_dw++] = 0x00000001; |
| 484 | |
| 485 | for (i = ib->length_dw; i < ib_size_dw; ++i) |
| 486 | ib->ptr[i] = 0x0; |
| 487 | |
| 488 | r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f); |
| 489 | job->fence = dma_fence_get(f); |
| 490 | if (r) |
| 491 | goto err; |
| 492 | |
| 493 | amdgpu_job_free(job); |
| 494 | if (fence) |
| 495 | *fence = dma_fence_get(f); |
| 496 | dma_fence_put(f); |
| 497 | return 0; |
| 498 | |
| 499 | err: |
| 500 | amdgpu_job_free(job); |
| 501 | return r; |
| 502 | } |
| 503 | |
| 504 | static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, |
| 505 | bool direct, struct dma_fence **fence) |
| 506 | { |
| 507 | const unsigned ib_size_dw = 1024; |
| 508 | struct amdgpu_job *job; |
| 509 | struct amdgpu_ib *ib; |
| 510 | struct dma_fence *f = NULL; |
| 511 | int i, r; |
| 512 | |
| 513 | r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job); |
| 514 | if (r) |
| 515 | return r; |
| 516 | |
| 517 | ib = &job->ibs[0]; |
| 518 | |
| 519 | /* stitch together an VCN enc destroy msg */ |
| 520 | ib->length_dw = 0; |
| 521 | ib->ptr[ib->length_dw++] = 0x0000000c; /* len */ |
| 522 | ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */ |
| 523 | ib->ptr[ib->length_dw++] = handle; |
| 524 | |
| 525 | ib->ptr[ib->length_dw++] = 0x00000020; /* len */ |
| 526 | ib->ptr[ib->length_dw++] = 0x00000002; /* task info */ |
| 527 | ib->ptr[ib->length_dw++] = 0xffffffff; /* next task info, set to 0xffffffff if no */ |
| 528 | ib->ptr[ib->length_dw++] = 0x00000001; /* destroy session */ |
| 529 | ib->ptr[ib->length_dw++] = 0x00000000; |
| 530 | ib->ptr[ib->length_dw++] = 0x00000000; |
| 531 | ib->ptr[ib->length_dw++] = 0xffffffff; /* feedback is not needed, set to 0xffffffff and firmware will not output feedback */ |
| 532 | ib->ptr[ib->length_dw++] = 0x00000000; |
| 533 | |
| 534 | ib->ptr[ib->length_dw++] = 0x00000008; /* len */ |
| 535 | ib->ptr[ib->length_dw++] = 0x02000001; /* destroy cmd */ |
| 536 | |
| 537 | for (i = ib->length_dw; i < ib_size_dw; ++i) |
| 538 | ib->ptr[i] = 0x0; |
| 539 | |
| 540 | if (direct) { |
| 541 | r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f); |
| 542 | job->fence = dma_fence_get(f); |
| 543 | if (r) |
| 544 | goto err; |
| 545 | |
| 546 | amdgpu_job_free(job); |
| 547 | } else { |
| 548 | r = amdgpu_job_submit(job, ring, &ring->adev->vcn.entity_enc, |
| 549 | AMDGPU_FENCE_OWNER_UNDEFINED, &f); |
| 550 | if (r) |
| 551 | goto err; |
| 552 | } |
| 553 | |
| 554 | if (fence) |
| 555 | *fence = dma_fence_get(f); |
| 556 | dma_fence_put(f); |
| 557 | return 0; |
| 558 | |
| 559 | err: |
| 560 | amdgpu_job_free(job); |
| 561 | return r; |
| 562 | } |
| 563 | |
| 564 | int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring) |
| 565 | { |
| 566 | struct amdgpu_device *adev = ring->adev; |
| 567 | uint32_t rptr = amdgpu_ring_get_rptr(ring); |
| 568 | unsigned i; |
| 569 | int r; |
| 570 | |
| 571 | r = amdgpu_ring_alloc(ring, 16); |
| 572 | if (r) { |
| 573 | DRM_ERROR("amdgpu: vcn enc failed to lock ring %d (%d).\n", |
| 574 | ring->idx, r); |
| 575 | return r; |
| 576 | } |
| 577 | amdgpu_ring_write(ring, VCE_CMD_END); |
| 578 | amdgpu_ring_commit(ring); |
| 579 | |
| 580 | for (i = 0; i < adev->usec_timeout; i++) { |
| 581 | if (amdgpu_ring_get_rptr(ring) != rptr) |
| 582 | break; |
| 583 | DRM_UDELAY(1); |
| 584 | } |
| 585 | |
| 586 | if (i < adev->usec_timeout) { |
| 587 | DRM_INFO("ring test on %d succeeded in %d usecs\n", |
| 588 | ring->idx, i); |
| 589 | } else { |
| 590 | DRM_ERROR("amdgpu: ring %d test failed\n", |
| 591 | ring->idx); |
| 592 | r = -ETIMEDOUT; |
| 593 | } |
| 594 | |
| 595 | return r; |
| 596 | } |
| 597 | |
| 598 | int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout) |
| 599 | { |
| 600 | struct dma_fence *fence = NULL; |
| 601 | long r; |
| 602 | |
| 603 | r = amdgpu_vcn_enc_get_create_msg(ring, 1, NULL); |
| 604 | if (r) { |
| 605 | DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r); |
| 606 | goto error; |
| 607 | } |
| 608 | |
| 609 | r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, true, &fence); |
| 610 | if (r) { |
| 611 | DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r); |
| 612 | goto error; |
| 613 | } |
| 614 | |
| 615 | r = dma_fence_wait_timeout(fence, false, timeout); |
| 616 | if (r == 0) { |
| 617 | DRM_ERROR("amdgpu: IB test timed out.\n"); |
| 618 | r = -ETIMEDOUT; |
| 619 | } else if (r < 0) { |
| 620 | DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); |
| 621 | } else { |
| 622 | DRM_INFO("ib test on ring %d succeeded\n", ring->idx); |
| 623 | r = 0; |
| 624 | } |
| 625 | error: |
| 626 | dma_fence_put(fence); |
| 627 | return r; |
| 628 | } |