Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2011 Advanced Micro Devices, Inc. |
| 3 | * All Rights Reserved. |
| 4 | * |
| 5 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 6 | * copy of this software and associated documentation files (the |
| 7 | * "Software"), to deal in the Software without restriction, including |
| 8 | * without limitation the rights to use, copy, modify, merge, publish, |
| 9 | * distribute, sub license, and/or sell copies of the Software, and to |
| 10 | * permit persons to whom the Software is furnished to do so, subject to |
| 11 | * the following conditions: |
| 12 | * |
| 13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 15 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
| 16 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
| 17 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
| 18 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
| 19 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
| 20 | * |
| 21 | * The above copyright notice and this permission notice (including the |
| 22 | * next paragraph) shall be included in all copies or substantial portions |
| 23 | * of the Software. |
| 24 | * |
| 25 | */ |
| 26 | /* |
| 27 | * Authors: |
| 28 | * Christian König <deathsimple@vodafone.de> |
| 29 | */ |
| 30 | |
| 31 | #include <linux/firmware.h> |
| 32 | #include <linux/module.h> |
| 33 | #include <drm/drmP.h> |
| 34 | #include <drm/drm.h> |
| 35 | |
| 36 | #include "amdgpu.h" |
| 37 | #include "amdgpu_pm.h" |
| 38 | #include "amdgpu_uvd.h" |
| 39 | #include "cikd.h" |
| 40 | #include "uvd/uvd_4_2_d.h" |
| 41 | |
| 42 | /* 1 second timeout */ |
| 43 | #define UVD_IDLE_TIMEOUT_MS 1000 |
| 44 | |
| 45 | /* Firmware Names */ |
| 46 | #ifdef CONFIG_DRM_AMDGPU_CIK |
| 47 | #define FIRMWARE_BONAIRE "radeon/bonaire_uvd.bin" |
| 48 | #define FIRMWARE_KABINI "radeon/kabini_uvd.bin" |
| 49 | #define FIRMWARE_KAVERI "radeon/kaveri_uvd.bin" |
| 50 | #define FIRMWARE_HAWAII "radeon/hawaii_uvd.bin" |
| 51 | #define FIRMWARE_MULLINS "radeon/mullins_uvd.bin" |
| 52 | #endif |
Jammy Zhou | c65444f | 2015-05-13 22:49:04 +0800 | [diff] [blame] | 53 | #define FIRMWARE_TONGA "amdgpu/tonga_uvd.bin" |
| 54 | #define FIRMWARE_CARRIZO "amdgpu/carrizo_uvd.bin" |
David Zhang | 974ee3d | 2015-07-08 17:32:15 +0800 | [diff] [blame] | 55 | #define FIRMWARE_FIJI "amdgpu/fiji_uvd.bin" |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 56 | |
| 57 | /** |
| 58 | * amdgpu_uvd_cs_ctx - Command submission parser context |
| 59 | * |
| 60 | * Used for emulating virtual memory support on UVD 4.2. |
| 61 | */ |
| 62 | struct amdgpu_uvd_cs_ctx { |
| 63 | struct amdgpu_cs_parser *parser; |
| 64 | unsigned reg, count; |
| 65 | unsigned data0, data1; |
| 66 | unsigned idx; |
| 67 | unsigned ib_idx; |
| 68 | |
| 69 | /* does the IB has a msg command */ |
| 70 | bool has_msg_cmd; |
| 71 | |
| 72 | /* minimum buffer sizes */ |
| 73 | unsigned *buf_sizes; |
| 74 | }; |
| 75 | |
| 76 | #ifdef CONFIG_DRM_AMDGPU_CIK |
| 77 | MODULE_FIRMWARE(FIRMWARE_BONAIRE); |
| 78 | MODULE_FIRMWARE(FIRMWARE_KABINI); |
| 79 | MODULE_FIRMWARE(FIRMWARE_KAVERI); |
| 80 | MODULE_FIRMWARE(FIRMWARE_HAWAII); |
| 81 | MODULE_FIRMWARE(FIRMWARE_MULLINS); |
| 82 | #endif |
| 83 | MODULE_FIRMWARE(FIRMWARE_TONGA); |
| 84 | MODULE_FIRMWARE(FIRMWARE_CARRIZO); |
David Zhang | 974ee3d | 2015-07-08 17:32:15 +0800 | [diff] [blame] | 85 | MODULE_FIRMWARE(FIRMWARE_FIJI); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 86 | |
| 87 | static void amdgpu_uvd_note_usage(struct amdgpu_device *adev); |
| 88 | static void amdgpu_uvd_idle_work_handler(struct work_struct *work); |
| 89 | |
| 90 | int amdgpu_uvd_sw_init(struct amdgpu_device *adev) |
| 91 | { |
| 92 | unsigned long bo_size; |
| 93 | const char *fw_name; |
| 94 | const struct common_firmware_header *hdr; |
| 95 | unsigned version_major, version_minor, family_id; |
| 96 | int i, r; |
| 97 | |
| 98 | INIT_DELAYED_WORK(&adev->uvd.idle_work, amdgpu_uvd_idle_work_handler); |
| 99 | |
| 100 | switch (adev->asic_type) { |
| 101 | #ifdef CONFIG_DRM_AMDGPU_CIK |
| 102 | case CHIP_BONAIRE: |
| 103 | fw_name = FIRMWARE_BONAIRE; |
| 104 | break; |
| 105 | case CHIP_KABINI: |
| 106 | fw_name = FIRMWARE_KABINI; |
| 107 | break; |
| 108 | case CHIP_KAVERI: |
| 109 | fw_name = FIRMWARE_KAVERI; |
| 110 | break; |
| 111 | case CHIP_HAWAII: |
| 112 | fw_name = FIRMWARE_HAWAII; |
| 113 | break; |
| 114 | case CHIP_MULLINS: |
| 115 | fw_name = FIRMWARE_MULLINS; |
| 116 | break; |
| 117 | #endif |
| 118 | case CHIP_TONGA: |
| 119 | fw_name = FIRMWARE_TONGA; |
| 120 | break; |
David Zhang | 974ee3d | 2015-07-08 17:32:15 +0800 | [diff] [blame] | 121 | case CHIP_FIJI: |
| 122 | fw_name = FIRMWARE_FIJI; |
| 123 | break; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 124 | case CHIP_CARRIZO: |
| 125 | fw_name = FIRMWARE_CARRIZO; |
| 126 | break; |
| 127 | default: |
| 128 | return -EINVAL; |
| 129 | } |
| 130 | |
| 131 | r = request_firmware(&adev->uvd.fw, fw_name, adev->dev); |
| 132 | if (r) { |
| 133 | dev_err(adev->dev, "amdgpu_uvd: Can't load firmware \"%s\"\n", |
| 134 | fw_name); |
| 135 | return r; |
| 136 | } |
| 137 | |
| 138 | r = amdgpu_ucode_validate(adev->uvd.fw); |
| 139 | if (r) { |
| 140 | dev_err(adev->dev, "amdgpu_uvd: Can't validate firmware \"%s\"\n", |
| 141 | fw_name); |
| 142 | release_firmware(adev->uvd.fw); |
| 143 | adev->uvd.fw = NULL; |
| 144 | return r; |
| 145 | } |
| 146 | |
| 147 | hdr = (const struct common_firmware_header *)adev->uvd.fw->data; |
| 148 | family_id = le32_to_cpu(hdr->ucode_version) & 0xff; |
| 149 | version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff; |
| 150 | version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff; |
| 151 | DRM_INFO("Found UVD firmware Version: %hu.%hu Family ID: %hu\n", |
| 152 | version_major, version_minor, family_id); |
| 153 | |
| 154 | bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8) |
| 155 | + AMDGPU_UVD_STACK_SIZE + AMDGPU_UVD_HEAP_SIZE; |
| 156 | r = amdgpu_bo_create(adev, bo_size, PAGE_SIZE, true, |
| 157 | AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, &adev->uvd.vcpu_bo); |
| 158 | if (r) { |
| 159 | dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r); |
| 160 | return r; |
| 161 | } |
| 162 | |
| 163 | r = amdgpu_bo_reserve(adev->uvd.vcpu_bo, false); |
| 164 | if (r) { |
| 165 | amdgpu_bo_unref(&adev->uvd.vcpu_bo); |
| 166 | dev_err(adev->dev, "(%d) failed to reserve UVD bo\n", r); |
| 167 | return r; |
| 168 | } |
| 169 | |
| 170 | r = amdgpu_bo_pin(adev->uvd.vcpu_bo, AMDGPU_GEM_DOMAIN_VRAM, |
| 171 | &adev->uvd.gpu_addr); |
| 172 | if (r) { |
| 173 | amdgpu_bo_unreserve(adev->uvd.vcpu_bo); |
| 174 | amdgpu_bo_unref(&adev->uvd.vcpu_bo); |
| 175 | dev_err(adev->dev, "(%d) UVD bo pin failed\n", r); |
| 176 | return r; |
| 177 | } |
| 178 | |
| 179 | r = amdgpu_bo_kmap(adev->uvd.vcpu_bo, &adev->uvd.cpu_addr); |
| 180 | if (r) { |
| 181 | dev_err(adev->dev, "(%d) UVD map failed\n", r); |
| 182 | return r; |
| 183 | } |
| 184 | |
| 185 | amdgpu_bo_unreserve(adev->uvd.vcpu_bo); |
| 186 | |
| 187 | for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) { |
| 188 | atomic_set(&adev->uvd.handles[i], 0); |
| 189 | adev->uvd.filp[i] = NULL; |
| 190 | } |
| 191 | |
| 192 | /* from uvd v5.0 HW addressing capacity increased to 64 bits */ |
yanyang1 | 5fc3aee | 2015-05-22 14:39:35 -0400 | [diff] [blame] | 193 | if (!amdgpu_ip_block_version_cmp(adev, AMD_IP_BLOCK_TYPE_UVD, 5, 0)) |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 194 | adev->uvd.address_64_bit = true; |
| 195 | |
| 196 | return 0; |
| 197 | } |
| 198 | |
| 199 | int amdgpu_uvd_sw_fini(struct amdgpu_device *adev) |
| 200 | { |
| 201 | int r; |
| 202 | |
| 203 | if (adev->uvd.vcpu_bo == NULL) |
| 204 | return 0; |
| 205 | |
| 206 | r = amdgpu_bo_reserve(adev->uvd.vcpu_bo, false); |
| 207 | if (!r) { |
| 208 | amdgpu_bo_kunmap(adev->uvd.vcpu_bo); |
| 209 | amdgpu_bo_unpin(adev->uvd.vcpu_bo); |
| 210 | amdgpu_bo_unreserve(adev->uvd.vcpu_bo); |
| 211 | } |
| 212 | |
| 213 | amdgpu_bo_unref(&adev->uvd.vcpu_bo); |
| 214 | |
| 215 | amdgpu_ring_fini(&adev->uvd.ring); |
| 216 | |
| 217 | release_firmware(adev->uvd.fw); |
| 218 | |
| 219 | return 0; |
| 220 | } |
| 221 | |
| 222 | int amdgpu_uvd_suspend(struct amdgpu_device *adev) |
| 223 | { |
| 224 | unsigned size; |
| 225 | void *ptr; |
| 226 | const struct common_firmware_header *hdr; |
| 227 | int i; |
| 228 | |
| 229 | if (adev->uvd.vcpu_bo == NULL) |
| 230 | return 0; |
| 231 | |
| 232 | for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) |
| 233 | if (atomic_read(&adev->uvd.handles[i])) |
| 234 | break; |
| 235 | |
| 236 | if (i == AMDGPU_MAX_UVD_HANDLES) |
| 237 | return 0; |
| 238 | |
| 239 | hdr = (const struct common_firmware_header *)adev->uvd.fw->data; |
| 240 | |
| 241 | size = amdgpu_bo_size(adev->uvd.vcpu_bo); |
| 242 | size -= le32_to_cpu(hdr->ucode_size_bytes); |
| 243 | |
| 244 | ptr = adev->uvd.cpu_addr; |
| 245 | ptr += le32_to_cpu(hdr->ucode_size_bytes); |
| 246 | |
| 247 | adev->uvd.saved_bo = kmalloc(size, GFP_KERNEL); |
| 248 | memcpy(adev->uvd.saved_bo, ptr, size); |
| 249 | |
| 250 | return 0; |
| 251 | } |
| 252 | |
| 253 | int amdgpu_uvd_resume(struct amdgpu_device *adev) |
| 254 | { |
| 255 | unsigned size; |
| 256 | void *ptr; |
| 257 | const struct common_firmware_header *hdr; |
| 258 | unsigned offset; |
| 259 | |
| 260 | if (adev->uvd.vcpu_bo == NULL) |
| 261 | return -EINVAL; |
| 262 | |
| 263 | hdr = (const struct common_firmware_header *)adev->uvd.fw->data; |
| 264 | offset = le32_to_cpu(hdr->ucode_array_offset_bytes); |
| 265 | memcpy(adev->uvd.cpu_addr, (adev->uvd.fw->data) + offset, |
| 266 | (adev->uvd.fw->size) - offset); |
| 267 | |
| 268 | size = amdgpu_bo_size(adev->uvd.vcpu_bo); |
| 269 | size -= le32_to_cpu(hdr->ucode_size_bytes); |
| 270 | ptr = adev->uvd.cpu_addr; |
| 271 | ptr += le32_to_cpu(hdr->ucode_size_bytes); |
| 272 | |
| 273 | if (adev->uvd.saved_bo != NULL) { |
| 274 | memcpy(ptr, adev->uvd.saved_bo, size); |
| 275 | kfree(adev->uvd.saved_bo); |
| 276 | adev->uvd.saved_bo = NULL; |
| 277 | } else |
| 278 | memset(ptr, 0, size); |
| 279 | |
| 280 | return 0; |
| 281 | } |
| 282 | |
| 283 | void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp) |
| 284 | { |
| 285 | struct amdgpu_ring *ring = &adev->uvd.ring; |
| 286 | int i, r; |
| 287 | |
| 288 | for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) { |
| 289 | uint32_t handle = atomic_read(&adev->uvd.handles[i]); |
| 290 | if (handle != 0 && adev->uvd.filp[i] == filp) { |
| 291 | struct amdgpu_fence *fence; |
| 292 | |
| 293 | amdgpu_uvd_note_usage(adev); |
| 294 | |
| 295 | r = amdgpu_uvd_get_destroy_msg(ring, handle, &fence); |
| 296 | if (r) { |
| 297 | DRM_ERROR("Error destroying UVD (%d)!\n", r); |
| 298 | continue; |
| 299 | } |
| 300 | |
| 301 | amdgpu_fence_wait(fence, false); |
| 302 | amdgpu_fence_unref(&fence); |
| 303 | |
| 304 | adev->uvd.filp[i] = NULL; |
| 305 | atomic_set(&adev->uvd.handles[i], 0); |
| 306 | } |
| 307 | } |
| 308 | } |
| 309 | |
| 310 | static void amdgpu_uvd_force_into_uvd_segment(struct amdgpu_bo *rbo) |
| 311 | { |
| 312 | int i; |
| 313 | for (i = 0; i < rbo->placement.num_placement; ++i) { |
| 314 | rbo->placements[i].fpfn = 0 >> PAGE_SHIFT; |
| 315 | rbo->placements[i].lpfn = (256 * 1024 * 1024) >> PAGE_SHIFT; |
| 316 | } |
| 317 | } |
| 318 | |
| 319 | /** |
| 320 | * amdgpu_uvd_cs_pass1 - first parsing round |
| 321 | * |
| 322 | * @ctx: UVD parser context |
| 323 | * |
| 324 | * Make sure UVD message and feedback buffers are in VRAM and |
| 325 | * nobody is violating an 256MB boundary. |
| 326 | */ |
| 327 | static int amdgpu_uvd_cs_pass1(struct amdgpu_uvd_cs_ctx *ctx) |
| 328 | { |
| 329 | struct amdgpu_bo_va_mapping *mapping; |
| 330 | struct amdgpu_bo *bo; |
| 331 | uint32_t cmd, lo, hi; |
| 332 | uint64_t addr; |
| 333 | int r = 0; |
| 334 | |
| 335 | lo = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->data0); |
| 336 | hi = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->data1); |
| 337 | addr = ((uint64_t)lo) | (((uint64_t)hi) << 32); |
| 338 | |
| 339 | mapping = amdgpu_cs_find_mapping(ctx->parser, addr, &bo); |
| 340 | if (mapping == NULL) { |
| 341 | DRM_ERROR("Can't find BO for addr 0x%08Lx\n", addr); |
| 342 | return -EINVAL; |
| 343 | } |
| 344 | |
| 345 | if (!ctx->parser->adev->uvd.address_64_bit) { |
| 346 | /* check if it's a message or feedback command */ |
| 347 | cmd = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->idx) >> 1; |
| 348 | if (cmd == 0x0 || cmd == 0x3) { |
| 349 | /* yes, force it into VRAM */ |
| 350 | uint32_t domain = AMDGPU_GEM_DOMAIN_VRAM; |
| 351 | amdgpu_ttm_placement_from_domain(bo, domain); |
| 352 | } |
| 353 | amdgpu_uvd_force_into_uvd_segment(bo); |
| 354 | |
| 355 | r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); |
| 356 | } |
| 357 | |
| 358 | return r; |
| 359 | } |
| 360 | |
| 361 | /** |
| 362 | * amdgpu_uvd_cs_msg_decode - handle UVD decode message |
| 363 | * |
| 364 | * @msg: pointer to message structure |
| 365 | * @buf_sizes: returned buffer sizes |
| 366 | * |
| 367 | * Peek into the decode message and calculate the necessary buffer sizes. |
| 368 | */ |
| 369 | static int amdgpu_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[]) |
| 370 | { |
| 371 | unsigned stream_type = msg[4]; |
| 372 | unsigned width = msg[6]; |
| 373 | unsigned height = msg[7]; |
| 374 | unsigned dpb_size = msg[9]; |
| 375 | unsigned pitch = msg[28]; |
| 376 | unsigned level = msg[57]; |
| 377 | |
| 378 | unsigned width_in_mb = width / 16; |
| 379 | unsigned height_in_mb = ALIGN(height / 16, 2); |
| 380 | unsigned fs_in_mb = width_in_mb * height_in_mb; |
| 381 | |
Jammy Zhou | 21df89a | 2015-08-07 15:30:44 +0800 | [diff] [blame] | 382 | unsigned image_size, tmp, min_dpb_size, num_dpb_buffer; |
| 383 | unsigned min_ctx_size = 0; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 384 | |
| 385 | image_size = width * height; |
| 386 | image_size += image_size / 2; |
| 387 | image_size = ALIGN(image_size, 1024); |
| 388 | |
| 389 | switch (stream_type) { |
| 390 | case 0: /* H264 */ |
| 391 | case 7: /* H264 Perf */ |
| 392 | switch(level) { |
| 393 | case 30: |
| 394 | num_dpb_buffer = 8100 / fs_in_mb; |
| 395 | break; |
| 396 | case 31: |
| 397 | num_dpb_buffer = 18000 / fs_in_mb; |
| 398 | break; |
| 399 | case 32: |
| 400 | num_dpb_buffer = 20480 / fs_in_mb; |
| 401 | break; |
| 402 | case 41: |
| 403 | num_dpb_buffer = 32768 / fs_in_mb; |
| 404 | break; |
| 405 | case 42: |
| 406 | num_dpb_buffer = 34816 / fs_in_mb; |
| 407 | break; |
| 408 | case 50: |
| 409 | num_dpb_buffer = 110400 / fs_in_mb; |
| 410 | break; |
| 411 | case 51: |
| 412 | num_dpb_buffer = 184320 / fs_in_mb; |
| 413 | break; |
| 414 | default: |
| 415 | num_dpb_buffer = 184320 / fs_in_mb; |
| 416 | break; |
| 417 | } |
| 418 | num_dpb_buffer++; |
| 419 | if (num_dpb_buffer > 17) |
| 420 | num_dpb_buffer = 17; |
| 421 | |
| 422 | /* reference picture buffer */ |
| 423 | min_dpb_size = image_size * num_dpb_buffer; |
| 424 | |
| 425 | /* macroblock context buffer */ |
| 426 | min_dpb_size += width_in_mb * height_in_mb * num_dpb_buffer * 192; |
| 427 | |
| 428 | /* IT surface buffer */ |
| 429 | min_dpb_size += width_in_mb * height_in_mb * 32; |
| 430 | break; |
| 431 | |
| 432 | case 1: /* VC1 */ |
| 433 | |
| 434 | /* reference picture buffer */ |
| 435 | min_dpb_size = image_size * 3; |
| 436 | |
| 437 | /* CONTEXT_BUFFER */ |
| 438 | min_dpb_size += width_in_mb * height_in_mb * 128; |
| 439 | |
| 440 | /* IT surface buffer */ |
| 441 | min_dpb_size += width_in_mb * 64; |
| 442 | |
| 443 | /* DB surface buffer */ |
| 444 | min_dpb_size += width_in_mb * 128; |
| 445 | |
| 446 | /* BP */ |
| 447 | tmp = max(width_in_mb, height_in_mb); |
| 448 | min_dpb_size += ALIGN(tmp * 7 * 16, 64); |
| 449 | break; |
| 450 | |
| 451 | case 3: /* MPEG2 */ |
| 452 | |
| 453 | /* reference picture buffer */ |
| 454 | min_dpb_size = image_size * 3; |
| 455 | break; |
| 456 | |
| 457 | case 4: /* MPEG4 */ |
| 458 | |
| 459 | /* reference picture buffer */ |
| 460 | min_dpb_size = image_size * 3; |
| 461 | |
| 462 | /* CM */ |
| 463 | min_dpb_size += width_in_mb * height_in_mb * 64; |
| 464 | |
| 465 | /* IT surface buffer */ |
| 466 | min_dpb_size += ALIGN(width_in_mb * height_in_mb * 32, 64); |
| 467 | break; |
| 468 | |
Christian König | 86fa0bd | 2015-05-05 16:36:01 +0200 | [diff] [blame] | 469 | case 16: /* H265 */ |
| 470 | image_size = (ALIGN(width, 16) * ALIGN(height, 16) * 3) / 2; |
| 471 | image_size = ALIGN(image_size, 256); |
| 472 | |
| 473 | num_dpb_buffer = (le32_to_cpu(msg[59]) & 0xff) + 2; |
| 474 | min_dpb_size = image_size * num_dpb_buffer; |
Boyuan Zhang | 8c8bac5 | 2015-08-05 14:03:48 -0400 | [diff] [blame] | 475 | min_ctx_size = ((width + 255) / 16) * ((height + 255) / 16) |
| 476 | * 16 * num_dpb_buffer + 52 * 1024; |
Christian König | 86fa0bd | 2015-05-05 16:36:01 +0200 | [diff] [blame] | 477 | break; |
| 478 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 479 | default: |
| 480 | DRM_ERROR("UVD codec not handled %d!\n", stream_type); |
| 481 | return -EINVAL; |
| 482 | } |
| 483 | |
| 484 | if (width > pitch) { |
| 485 | DRM_ERROR("Invalid UVD decoding target pitch!\n"); |
| 486 | return -EINVAL; |
| 487 | } |
| 488 | |
| 489 | if (dpb_size < min_dpb_size) { |
| 490 | DRM_ERROR("Invalid dpb_size in UVD message (%d / %d)!\n", |
| 491 | dpb_size, min_dpb_size); |
| 492 | return -EINVAL; |
| 493 | } |
| 494 | |
| 495 | buf_sizes[0x1] = dpb_size; |
| 496 | buf_sizes[0x2] = image_size; |
Boyuan Zhang | 8c8bac5 | 2015-08-05 14:03:48 -0400 | [diff] [blame] | 497 | buf_sizes[0x4] = min_ctx_size; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 498 | return 0; |
| 499 | } |
| 500 | |
| 501 | /** |
| 502 | * amdgpu_uvd_cs_msg - handle UVD message |
| 503 | * |
| 504 | * @ctx: UVD parser context |
| 505 | * @bo: buffer object containing the message |
| 506 | * @offset: offset into the buffer object |
| 507 | * |
| 508 | * Peek into the UVD message and extract the session id. |
| 509 | * Make sure that we don't open up to many sessions. |
| 510 | */ |
| 511 | static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx, |
| 512 | struct amdgpu_bo *bo, unsigned offset) |
| 513 | { |
| 514 | struct amdgpu_device *adev = ctx->parser->adev; |
| 515 | int32_t *msg, msg_type, handle; |
| 516 | struct fence *f; |
| 517 | void *ptr; |
| 518 | |
| 519 | int i, r; |
| 520 | |
| 521 | if (offset & 0x3F) { |
| 522 | DRM_ERROR("UVD messages must be 64 byte aligned!\n"); |
| 523 | return -EINVAL; |
| 524 | } |
| 525 | |
| 526 | f = reservation_object_get_excl(bo->tbo.resv); |
| 527 | if (f) { |
| 528 | r = amdgpu_fence_wait((struct amdgpu_fence *)f, false); |
| 529 | if (r) { |
| 530 | DRM_ERROR("Failed waiting for UVD message (%d)!\n", r); |
| 531 | return r; |
| 532 | } |
| 533 | } |
| 534 | |
| 535 | r = amdgpu_bo_kmap(bo, &ptr); |
| 536 | if (r) { |
| 537 | DRM_ERROR("Failed mapping the UVD message (%d)!\n", r); |
| 538 | return r; |
| 539 | } |
| 540 | |
| 541 | msg = ptr + offset; |
| 542 | |
| 543 | msg_type = msg[1]; |
| 544 | handle = msg[2]; |
| 545 | |
| 546 | if (handle == 0) { |
| 547 | DRM_ERROR("Invalid UVD handle!\n"); |
| 548 | return -EINVAL; |
| 549 | } |
| 550 | |
| 551 | if (msg_type == 1) { |
| 552 | /* it's a decode msg, calc buffer sizes */ |
| 553 | r = amdgpu_uvd_cs_msg_decode(msg, ctx->buf_sizes); |
| 554 | amdgpu_bo_kunmap(bo); |
| 555 | if (r) |
| 556 | return r; |
| 557 | |
| 558 | } else if (msg_type == 2) { |
| 559 | /* it's a destroy msg, free the handle */ |
| 560 | for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) |
| 561 | atomic_cmpxchg(&adev->uvd.handles[i], handle, 0); |
| 562 | amdgpu_bo_kunmap(bo); |
| 563 | return 0; |
| 564 | } else { |
| 565 | /* it's a create msg */ |
| 566 | amdgpu_bo_kunmap(bo); |
| 567 | |
| 568 | if (msg_type != 0) { |
| 569 | DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type); |
| 570 | return -EINVAL; |
| 571 | } |
| 572 | |
| 573 | /* it's a create msg, no special handling needed */ |
| 574 | } |
| 575 | |
| 576 | /* create or decode, validate the handle */ |
| 577 | for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) { |
| 578 | if (atomic_read(&adev->uvd.handles[i]) == handle) |
| 579 | return 0; |
| 580 | } |
| 581 | |
| 582 | /* handle not found try to alloc a new one */ |
| 583 | for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) { |
| 584 | if (!atomic_cmpxchg(&adev->uvd.handles[i], 0, handle)) { |
| 585 | adev->uvd.filp[i] = ctx->parser->filp; |
| 586 | return 0; |
| 587 | } |
| 588 | } |
| 589 | |
| 590 | DRM_ERROR("No more free UVD handles!\n"); |
| 591 | return -EINVAL; |
| 592 | } |
| 593 | |
| 594 | /** |
| 595 | * amdgpu_uvd_cs_pass2 - second parsing round |
| 596 | * |
| 597 | * @ctx: UVD parser context |
| 598 | * |
| 599 | * Patch buffer addresses, make sure buffer sizes are correct. |
| 600 | */ |
| 601 | static int amdgpu_uvd_cs_pass2(struct amdgpu_uvd_cs_ctx *ctx) |
| 602 | { |
| 603 | struct amdgpu_bo_va_mapping *mapping; |
| 604 | struct amdgpu_bo *bo; |
| 605 | struct amdgpu_ib *ib; |
| 606 | uint32_t cmd, lo, hi; |
| 607 | uint64_t start, end; |
| 608 | uint64_t addr; |
| 609 | int r; |
| 610 | |
| 611 | lo = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->data0); |
| 612 | hi = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->data1); |
| 613 | addr = ((uint64_t)lo) | (((uint64_t)hi) << 32); |
| 614 | |
| 615 | mapping = amdgpu_cs_find_mapping(ctx->parser, addr, &bo); |
| 616 | if (mapping == NULL) |
| 617 | return -EINVAL; |
| 618 | |
| 619 | start = amdgpu_bo_gpu_offset(bo); |
| 620 | |
| 621 | end = (mapping->it.last + 1 - mapping->it.start); |
| 622 | end = end * AMDGPU_GPU_PAGE_SIZE + start; |
| 623 | |
| 624 | addr -= ((uint64_t)mapping->it.start) * AMDGPU_GPU_PAGE_SIZE; |
| 625 | start += addr; |
| 626 | |
| 627 | ib = &ctx->parser->ibs[ctx->ib_idx]; |
| 628 | ib->ptr[ctx->data0] = start & 0xFFFFFFFF; |
| 629 | ib->ptr[ctx->data1] = start >> 32; |
| 630 | |
| 631 | cmd = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->idx) >> 1; |
| 632 | if (cmd < 0x4) { |
| 633 | if ((end - start) < ctx->buf_sizes[cmd]) { |
| 634 | DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd, |
| 635 | (unsigned)(end - start), |
| 636 | ctx->buf_sizes[cmd]); |
| 637 | return -EINVAL; |
| 638 | } |
| 639 | |
Boyuan Zhang | 8c8bac5 | 2015-08-05 14:03:48 -0400 | [diff] [blame] | 640 | } else if (cmd == 0x206) { |
| 641 | if ((end - start) < ctx->buf_sizes[4]) { |
| 642 | DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd, |
| 643 | (unsigned)(end - start), |
| 644 | ctx->buf_sizes[4]); |
| 645 | return -EINVAL; |
| 646 | } |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 647 | } else if ((cmd != 0x100) && (cmd != 0x204)) { |
| 648 | DRM_ERROR("invalid UVD command %X!\n", cmd); |
| 649 | return -EINVAL; |
| 650 | } |
| 651 | |
| 652 | if (!ctx->parser->adev->uvd.address_64_bit) { |
| 653 | if ((start >> 28) != ((end - 1) >> 28)) { |
| 654 | DRM_ERROR("reloc %LX-%LX crossing 256MB boundary!\n", |
| 655 | start, end); |
| 656 | return -EINVAL; |
| 657 | } |
| 658 | |
| 659 | if ((cmd == 0 || cmd == 0x3) && |
| 660 | (start >> 28) != (ctx->parser->adev->uvd.gpu_addr >> 28)) { |
| 661 | DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n", |
| 662 | start, end); |
| 663 | return -EINVAL; |
| 664 | } |
| 665 | } |
| 666 | |
| 667 | if (cmd == 0) { |
| 668 | ctx->has_msg_cmd = true; |
| 669 | r = amdgpu_uvd_cs_msg(ctx, bo, addr); |
| 670 | if (r) |
| 671 | return r; |
| 672 | } else if (!ctx->has_msg_cmd) { |
| 673 | DRM_ERROR("Message needed before other commands are send!\n"); |
| 674 | return -EINVAL; |
| 675 | } |
| 676 | |
| 677 | return 0; |
| 678 | } |
| 679 | |
| 680 | /** |
| 681 | * amdgpu_uvd_cs_reg - parse register writes |
| 682 | * |
| 683 | * @ctx: UVD parser context |
| 684 | * @cb: callback function |
| 685 | * |
| 686 | * Parse the register writes, call cb on each complete command. |
| 687 | */ |
| 688 | static int amdgpu_uvd_cs_reg(struct amdgpu_uvd_cs_ctx *ctx, |
| 689 | int (*cb)(struct amdgpu_uvd_cs_ctx *ctx)) |
| 690 | { |
| 691 | struct amdgpu_ib *ib = &ctx->parser->ibs[ctx->ib_idx]; |
| 692 | int i, r; |
| 693 | |
| 694 | ctx->idx++; |
| 695 | for (i = 0; i <= ctx->count; ++i) { |
| 696 | unsigned reg = ctx->reg + i; |
| 697 | |
| 698 | if (ctx->idx >= ib->length_dw) { |
| 699 | DRM_ERROR("Register command after end of CS!\n"); |
| 700 | return -EINVAL; |
| 701 | } |
| 702 | |
| 703 | switch (reg) { |
| 704 | case mmUVD_GPCOM_VCPU_DATA0: |
| 705 | ctx->data0 = ctx->idx; |
| 706 | break; |
| 707 | case mmUVD_GPCOM_VCPU_DATA1: |
| 708 | ctx->data1 = ctx->idx; |
| 709 | break; |
| 710 | case mmUVD_GPCOM_VCPU_CMD: |
| 711 | r = cb(ctx); |
| 712 | if (r) |
| 713 | return r; |
| 714 | break; |
| 715 | case mmUVD_ENGINE_CNTL: |
| 716 | break; |
| 717 | default: |
| 718 | DRM_ERROR("Invalid reg 0x%X!\n", reg); |
| 719 | return -EINVAL; |
| 720 | } |
| 721 | ctx->idx++; |
| 722 | } |
| 723 | return 0; |
| 724 | } |
| 725 | |
| 726 | /** |
| 727 | * amdgpu_uvd_cs_packets - parse UVD packets |
| 728 | * |
| 729 | * @ctx: UVD parser context |
| 730 | * @cb: callback function |
| 731 | * |
| 732 | * Parse the command stream packets. |
| 733 | */ |
| 734 | static int amdgpu_uvd_cs_packets(struct amdgpu_uvd_cs_ctx *ctx, |
| 735 | int (*cb)(struct amdgpu_uvd_cs_ctx *ctx)) |
| 736 | { |
| 737 | struct amdgpu_ib *ib = &ctx->parser->ibs[ctx->ib_idx]; |
| 738 | int r; |
| 739 | |
| 740 | for (ctx->idx = 0 ; ctx->idx < ib->length_dw; ) { |
| 741 | uint32_t cmd = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->idx); |
| 742 | unsigned type = CP_PACKET_GET_TYPE(cmd); |
| 743 | switch (type) { |
| 744 | case PACKET_TYPE0: |
| 745 | ctx->reg = CP_PACKET0_GET_REG(cmd); |
| 746 | ctx->count = CP_PACKET_GET_COUNT(cmd); |
| 747 | r = amdgpu_uvd_cs_reg(ctx, cb); |
| 748 | if (r) |
| 749 | return r; |
| 750 | break; |
| 751 | case PACKET_TYPE2: |
| 752 | ++ctx->idx; |
| 753 | break; |
| 754 | default: |
| 755 | DRM_ERROR("Unknown packet type %d !\n", type); |
| 756 | return -EINVAL; |
| 757 | } |
| 758 | } |
| 759 | return 0; |
| 760 | } |
| 761 | |
| 762 | /** |
| 763 | * amdgpu_uvd_ring_parse_cs - UVD command submission parser |
| 764 | * |
| 765 | * @parser: Command submission parser context |
| 766 | * |
| 767 | * Parse the command stream, patch in addresses as necessary. |
| 768 | */ |
| 769 | int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx) |
| 770 | { |
| 771 | struct amdgpu_uvd_cs_ctx ctx = {}; |
| 772 | unsigned buf_sizes[] = { |
| 773 | [0x00000000] = 2048, |
Boyuan Zhang | 8c8bac5 | 2015-08-05 14:03:48 -0400 | [diff] [blame] | 774 | [0x00000001] = 0xFFFFFFFF, |
| 775 | [0x00000002] = 0xFFFFFFFF, |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 776 | [0x00000003] = 2048, |
Boyuan Zhang | 8c8bac5 | 2015-08-05 14:03:48 -0400 | [diff] [blame] | 777 | [0x00000004] = 0xFFFFFFFF, |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 778 | }; |
| 779 | struct amdgpu_ib *ib = &parser->ibs[ib_idx]; |
| 780 | int r; |
| 781 | |
| 782 | if (ib->length_dw % 16) { |
| 783 | DRM_ERROR("UVD IB length (%d) not 16 dwords aligned!\n", |
| 784 | ib->length_dw); |
| 785 | return -EINVAL; |
| 786 | } |
| 787 | |
| 788 | ctx.parser = parser; |
| 789 | ctx.buf_sizes = buf_sizes; |
| 790 | ctx.ib_idx = ib_idx; |
| 791 | |
| 792 | /* first round, make sure the buffers are actually in the UVD segment */ |
| 793 | r = amdgpu_uvd_cs_packets(&ctx, amdgpu_uvd_cs_pass1); |
| 794 | if (r) |
| 795 | return r; |
| 796 | |
| 797 | /* second round, patch buffer addresses into the command stream */ |
| 798 | r = amdgpu_uvd_cs_packets(&ctx, amdgpu_uvd_cs_pass2); |
| 799 | if (r) |
| 800 | return r; |
| 801 | |
| 802 | if (!ctx.has_msg_cmd) { |
| 803 | DRM_ERROR("UVD-IBs need a msg command!\n"); |
| 804 | return -EINVAL; |
| 805 | } |
| 806 | |
| 807 | amdgpu_uvd_note_usage(ctx.parser->adev); |
| 808 | |
| 809 | return 0; |
| 810 | } |
| 811 | |
Chunming Zhou | 7b5ec43 | 2015-07-03 14:08:18 +0800 | [diff] [blame^] | 812 | static int amdgpu_uvd_free_job( |
| 813 | struct amdgpu_cs_parser *sched_job) |
| 814 | { |
| 815 | amdgpu_ib_free(sched_job->adev, sched_job->ibs); |
| 816 | kfree(sched_job->ibs); |
| 817 | return 0; |
| 818 | } |
| 819 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 820 | static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, |
| 821 | struct amdgpu_bo *bo, |
| 822 | struct amdgpu_fence **fence) |
| 823 | { |
| 824 | struct ttm_validate_buffer tv; |
| 825 | struct ww_acquire_ctx ticket; |
| 826 | struct list_head head; |
Chunming Zhou | 7b5ec43 | 2015-07-03 14:08:18 +0800 | [diff] [blame^] | 827 | struct amdgpu_ib *ib = NULL; |
| 828 | struct amdgpu_device *adev = ring->adev; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 829 | uint64_t addr; |
| 830 | int i, r; |
| 831 | |
| 832 | memset(&tv, 0, sizeof(tv)); |
| 833 | tv.bo = &bo->tbo; |
| 834 | |
| 835 | INIT_LIST_HEAD(&head); |
| 836 | list_add(&tv.head, &head); |
| 837 | |
| 838 | r = ttm_eu_reserve_buffers(&ticket, &head, true, NULL); |
| 839 | if (r) |
| 840 | return r; |
| 841 | |
| 842 | if (!bo->adev->uvd.address_64_bit) { |
| 843 | amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM); |
| 844 | amdgpu_uvd_force_into_uvd_segment(bo); |
| 845 | } |
| 846 | |
| 847 | r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); |
| 848 | if (r) |
| 849 | goto err; |
Chunming Zhou | 7b5ec43 | 2015-07-03 14:08:18 +0800 | [diff] [blame^] | 850 | ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL); |
| 851 | if (!ib) { |
| 852 | r = -ENOMEM; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 853 | goto err; |
Chunming Zhou | 7b5ec43 | 2015-07-03 14:08:18 +0800 | [diff] [blame^] | 854 | } |
| 855 | r = amdgpu_ib_get(ring, NULL, 64, ib); |
| 856 | if (r) |
| 857 | goto err1; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 858 | |
| 859 | addr = amdgpu_bo_gpu_offset(bo); |
Chunming Zhou | 7b5ec43 | 2015-07-03 14:08:18 +0800 | [diff] [blame^] | 860 | ib->ptr[0] = PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0); |
| 861 | ib->ptr[1] = addr; |
| 862 | ib->ptr[2] = PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0); |
| 863 | ib->ptr[3] = addr >> 32; |
| 864 | ib->ptr[4] = PACKET0(mmUVD_GPCOM_VCPU_CMD, 0); |
| 865 | ib->ptr[5] = 0; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 866 | for (i = 6; i < 16; ++i) |
Chunming Zhou | 7b5ec43 | 2015-07-03 14:08:18 +0800 | [diff] [blame^] | 867 | ib->ptr[i] = PACKET2(0); |
| 868 | ib->length_dw = 16; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 869 | |
Chunming Zhou | 7b5ec43 | 2015-07-03 14:08:18 +0800 | [diff] [blame^] | 870 | r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1, |
| 871 | &amdgpu_uvd_free_job, |
| 872 | AMDGPU_FENCE_OWNER_UNDEFINED); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 873 | if (r) |
Chunming Zhou | 7b5ec43 | 2015-07-03 14:08:18 +0800 | [diff] [blame^] | 874 | goto err2; |
| 875 | |
| 876 | ttm_eu_fence_buffer_objects(&ticket, &head, &ib->fence->base); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 877 | |
| 878 | if (fence) |
Chunming Zhou | 7b5ec43 | 2015-07-03 14:08:18 +0800 | [diff] [blame^] | 879 | *fence = amdgpu_fence_ref(ib->fence); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 880 | amdgpu_bo_unref(&bo); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 881 | |
Chunming Zhou | 7b5ec43 | 2015-07-03 14:08:18 +0800 | [diff] [blame^] | 882 | if (amdgpu_enable_scheduler) |
| 883 | return 0; |
| 884 | |
| 885 | amdgpu_ib_free(ring->adev, ib); |
| 886 | kfree(ib); |
| 887 | return 0; |
| 888 | err2: |
| 889 | amdgpu_ib_free(ring->adev, ib); |
| 890 | err1: |
| 891 | kfree(ib); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 892 | err: |
| 893 | ttm_eu_backoff_reservation(&ticket, &head); |
| 894 | return r; |
| 895 | } |
| 896 | |
| 897 | /* multiple fence commands without any stream commands in between can |
| 898 | crash the vcpu so just try to emmit a dummy create/destroy msg to |
| 899 | avoid this */ |
| 900 | int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, |
| 901 | struct amdgpu_fence **fence) |
| 902 | { |
| 903 | struct amdgpu_device *adev = ring->adev; |
| 904 | struct amdgpu_bo *bo; |
| 905 | uint32_t *msg; |
| 906 | int r, i; |
| 907 | |
| 908 | r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true, |
| 909 | AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, &bo); |
| 910 | if (r) |
| 911 | return r; |
| 912 | |
| 913 | r = amdgpu_bo_reserve(bo, false); |
| 914 | if (r) { |
| 915 | amdgpu_bo_unref(&bo); |
| 916 | return r; |
| 917 | } |
| 918 | |
| 919 | r = amdgpu_bo_kmap(bo, (void **)&msg); |
| 920 | if (r) { |
| 921 | amdgpu_bo_unreserve(bo); |
| 922 | amdgpu_bo_unref(&bo); |
| 923 | return r; |
| 924 | } |
| 925 | |
| 926 | /* stitch together an UVD create msg */ |
| 927 | msg[0] = cpu_to_le32(0x00000de4); |
| 928 | msg[1] = cpu_to_le32(0x00000000); |
| 929 | msg[2] = cpu_to_le32(handle); |
| 930 | msg[3] = cpu_to_le32(0x00000000); |
| 931 | msg[4] = cpu_to_le32(0x00000000); |
| 932 | msg[5] = cpu_to_le32(0x00000000); |
| 933 | msg[6] = cpu_to_le32(0x00000000); |
| 934 | msg[7] = cpu_to_le32(0x00000780); |
| 935 | msg[8] = cpu_to_le32(0x00000440); |
| 936 | msg[9] = cpu_to_le32(0x00000000); |
| 937 | msg[10] = cpu_to_le32(0x01b37000); |
| 938 | for (i = 11; i < 1024; ++i) |
| 939 | msg[i] = cpu_to_le32(0x0); |
| 940 | |
| 941 | amdgpu_bo_kunmap(bo); |
| 942 | amdgpu_bo_unreserve(bo); |
| 943 | |
| 944 | return amdgpu_uvd_send_msg(ring, bo, fence); |
| 945 | } |
| 946 | |
| 947 | int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, |
| 948 | struct amdgpu_fence **fence) |
| 949 | { |
| 950 | struct amdgpu_device *adev = ring->adev; |
| 951 | struct amdgpu_bo *bo; |
| 952 | uint32_t *msg; |
| 953 | int r, i; |
| 954 | |
| 955 | r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true, |
| 956 | AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, &bo); |
| 957 | if (r) |
| 958 | return r; |
| 959 | |
| 960 | r = amdgpu_bo_reserve(bo, false); |
| 961 | if (r) { |
| 962 | amdgpu_bo_unref(&bo); |
| 963 | return r; |
| 964 | } |
| 965 | |
| 966 | r = amdgpu_bo_kmap(bo, (void **)&msg); |
| 967 | if (r) { |
| 968 | amdgpu_bo_unreserve(bo); |
| 969 | amdgpu_bo_unref(&bo); |
| 970 | return r; |
| 971 | } |
| 972 | |
| 973 | /* stitch together an UVD destroy msg */ |
| 974 | msg[0] = cpu_to_le32(0x00000de4); |
| 975 | msg[1] = cpu_to_le32(0x00000002); |
| 976 | msg[2] = cpu_to_le32(handle); |
| 977 | msg[3] = cpu_to_le32(0x00000000); |
| 978 | for (i = 4; i < 1024; ++i) |
| 979 | msg[i] = cpu_to_le32(0x0); |
| 980 | |
| 981 | amdgpu_bo_kunmap(bo); |
| 982 | amdgpu_bo_unreserve(bo); |
| 983 | |
| 984 | return amdgpu_uvd_send_msg(ring, bo, fence); |
| 985 | } |
| 986 | |
| 987 | static void amdgpu_uvd_idle_work_handler(struct work_struct *work) |
| 988 | { |
| 989 | struct amdgpu_device *adev = |
| 990 | container_of(work, struct amdgpu_device, uvd.idle_work.work); |
| 991 | unsigned i, fences, handles = 0; |
| 992 | |
| 993 | fences = amdgpu_fence_count_emitted(&adev->uvd.ring); |
| 994 | |
| 995 | for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) |
| 996 | if (atomic_read(&adev->uvd.handles[i])) |
| 997 | ++handles; |
| 998 | |
| 999 | if (fences == 0 && handles == 0) { |
| 1000 | if (adev->pm.dpm_enabled) { |
| 1001 | amdgpu_dpm_enable_uvd(adev, false); |
| 1002 | } else { |
| 1003 | amdgpu_asic_set_uvd_clocks(adev, 0, 0); |
| 1004 | } |
| 1005 | } else { |
| 1006 | schedule_delayed_work(&adev->uvd.idle_work, |
| 1007 | msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS)); |
| 1008 | } |
| 1009 | } |
| 1010 | |
| 1011 | static void amdgpu_uvd_note_usage(struct amdgpu_device *adev) |
| 1012 | { |
| 1013 | bool set_clocks = !cancel_delayed_work_sync(&adev->uvd.idle_work); |
| 1014 | set_clocks &= schedule_delayed_work(&adev->uvd.idle_work, |
| 1015 | msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS)); |
| 1016 | |
| 1017 | if (set_clocks) { |
| 1018 | if (adev->pm.dpm_enabled) { |
| 1019 | amdgpu_dpm_enable_uvd(adev, true); |
| 1020 | } else { |
| 1021 | amdgpu_asic_set_uvd_clocks(adev, 53300, 40000); |
| 1022 | } |
| 1023 | } |
| 1024 | } |