blob: db86012deb671c65f410a2754107d45d0d515cec [file] [log] [blame]
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001/*
2 * Copyright 2011 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 */
26/*
27 * Authors:
28 * Christian König <deathsimple@vodafone.de>
29 */
30
31#include <linux/firmware.h>
32#include <linux/module.h>
33#include <drm/drmP.h>
34#include <drm/drm.h>
35
36#include "amdgpu.h"
37#include "amdgpu_pm.h"
38#include "amdgpu_uvd.h"
39#include "cikd.h"
40#include "uvd/uvd_4_2_d.h"
41
42/* 1 second timeout */
43#define UVD_IDLE_TIMEOUT_MS 1000
44
45/* Firmware Names */
46#ifdef CONFIG_DRM_AMDGPU_CIK
47#define FIRMWARE_BONAIRE "radeon/bonaire_uvd.bin"
48#define FIRMWARE_KABINI "radeon/kabini_uvd.bin"
49#define FIRMWARE_KAVERI "radeon/kaveri_uvd.bin"
50#define FIRMWARE_HAWAII "radeon/hawaii_uvd.bin"
51#define FIRMWARE_MULLINS "radeon/mullins_uvd.bin"
52#endif
Jammy Zhouc65444f2015-05-13 22:49:04 +080053#define FIRMWARE_TONGA "amdgpu/tonga_uvd.bin"
54#define FIRMWARE_CARRIZO "amdgpu/carrizo_uvd.bin"
David Zhang974ee3d2015-07-08 17:32:15 +080055#define FIRMWARE_FIJI "amdgpu/fiji_uvd.bin"
Samuel Lia39c8ce2015-10-08 16:27:21 -040056#define FIRMWARE_STONEY "amdgpu/stoney_uvd.bin"
Flora Cui2cc0c0b2016-03-14 18:33:29 -040057#define FIRMWARE_POLARIS10 "amdgpu/polaris10_uvd.bin"
Rex Zhu925a51c2016-03-23 14:48:03 +080058#define FIRMWARE_POLARIS11 "amdgpu/polaris11_uvd.bin"
Alex Deucherd38ceaf2015-04-20 16:55:21 -040059
60/**
61 * amdgpu_uvd_cs_ctx - Command submission parser context
62 *
63 * Used for emulating virtual memory support on UVD 4.2.
64 */
65struct amdgpu_uvd_cs_ctx {
66 struct amdgpu_cs_parser *parser;
67 unsigned reg, count;
68 unsigned data0, data1;
69 unsigned idx;
70 unsigned ib_idx;
71
72 /* does the IB has a msg command */
73 bool has_msg_cmd;
74
75 /* minimum buffer sizes */
76 unsigned *buf_sizes;
77};
78
79#ifdef CONFIG_DRM_AMDGPU_CIK
80MODULE_FIRMWARE(FIRMWARE_BONAIRE);
81MODULE_FIRMWARE(FIRMWARE_KABINI);
82MODULE_FIRMWARE(FIRMWARE_KAVERI);
83MODULE_FIRMWARE(FIRMWARE_HAWAII);
84MODULE_FIRMWARE(FIRMWARE_MULLINS);
85#endif
86MODULE_FIRMWARE(FIRMWARE_TONGA);
87MODULE_FIRMWARE(FIRMWARE_CARRIZO);
David Zhang974ee3d2015-07-08 17:32:15 +080088MODULE_FIRMWARE(FIRMWARE_FIJI);
Samuel Lia39c8ce2015-10-08 16:27:21 -040089MODULE_FIRMWARE(FIRMWARE_STONEY);
Flora Cui2cc0c0b2016-03-14 18:33:29 -040090MODULE_FIRMWARE(FIRMWARE_POLARIS10);
91MODULE_FIRMWARE(FIRMWARE_POLARIS11);
Alex Deucherd38ceaf2015-04-20 16:55:21 -040092
93static void amdgpu_uvd_note_usage(struct amdgpu_device *adev);
94static void amdgpu_uvd_idle_work_handler(struct work_struct *work);
95
96int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
97{
Christian Königead833e2016-02-10 14:35:19 +010098 struct amdgpu_ring *ring;
99 struct amd_sched_rq *rq;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400100 unsigned long bo_size;
101 const char *fw_name;
102 const struct common_firmware_header *hdr;
103 unsigned version_major, version_minor, family_id;
104 int i, r;
105
106 INIT_DELAYED_WORK(&adev->uvd.idle_work, amdgpu_uvd_idle_work_handler);
107
108 switch (adev->asic_type) {
109#ifdef CONFIG_DRM_AMDGPU_CIK
110 case CHIP_BONAIRE:
111 fw_name = FIRMWARE_BONAIRE;
112 break;
113 case CHIP_KABINI:
114 fw_name = FIRMWARE_KABINI;
115 break;
116 case CHIP_KAVERI:
117 fw_name = FIRMWARE_KAVERI;
118 break;
119 case CHIP_HAWAII:
120 fw_name = FIRMWARE_HAWAII;
121 break;
122 case CHIP_MULLINS:
123 fw_name = FIRMWARE_MULLINS;
124 break;
125#endif
126 case CHIP_TONGA:
127 fw_name = FIRMWARE_TONGA;
128 break;
David Zhang974ee3d2015-07-08 17:32:15 +0800129 case CHIP_FIJI:
130 fw_name = FIRMWARE_FIJI;
131 break;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400132 case CHIP_CARRIZO:
133 fw_name = FIRMWARE_CARRIZO;
134 break;
Samuel Lia39c8ce2015-10-08 16:27:21 -0400135 case CHIP_STONEY:
136 fw_name = FIRMWARE_STONEY;
137 break;
Flora Cui2cc0c0b2016-03-14 18:33:29 -0400138 case CHIP_POLARIS10:
139 fw_name = FIRMWARE_POLARIS10;
Sonny Jiang38d75812015-11-05 15:17:18 -0500140 break;
Flora Cui2cc0c0b2016-03-14 18:33:29 -0400141 case CHIP_POLARIS11:
142 fw_name = FIRMWARE_POLARIS11;
Sonny Jiang38d75812015-11-05 15:17:18 -0500143 break;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400144 default:
145 return -EINVAL;
146 }
147
148 r = request_firmware(&adev->uvd.fw, fw_name, adev->dev);
149 if (r) {
150 dev_err(adev->dev, "amdgpu_uvd: Can't load firmware \"%s\"\n",
151 fw_name);
152 return r;
153 }
154
155 r = amdgpu_ucode_validate(adev->uvd.fw);
156 if (r) {
157 dev_err(adev->dev, "amdgpu_uvd: Can't validate firmware \"%s\"\n",
158 fw_name);
159 release_firmware(adev->uvd.fw);
160 adev->uvd.fw = NULL;
161 return r;
162 }
163
Arindam Nathc0365542016-04-12 13:46:15 +0200164 /* Set the default UVD handles that the firmware can handle */
165 adev->uvd.max_handles = AMDGPU_DEFAULT_UVD_HANDLES;
166
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400167 hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
168 family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
169 version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
170 version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
171 DRM_INFO("Found UVD firmware Version: %hu.%hu Family ID: %hu\n",
172 version_major, version_minor, family_id);
173
Arindam Nathc0365542016-04-12 13:46:15 +0200174 /*
175 * Limit the number of UVD handles depending on microcode major
176 * and minor versions. The firmware version which has 40 UVD
177 * instances support is 1.80. So all subsequent versions should
178 * also have the same support.
179 */
180 if ((version_major > 0x01) ||
181 ((version_major == 0x01) && (version_minor >= 0x50)))
182 adev->uvd.max_handles = AMDGPU_MAX_UVD_HANDLES;
183
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400184 bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8)
Arindam Nathc0365542016-04-12 13:46:15 +0200185 + AMDGPU_UVD_STACK_SIZE + AMDGPU_UVD_HEAP_SIZE
186 + AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400187 r = amdgpu_bo_create(adev, bo_size, PAGE_SIZE, true,
Alex Deucher857d9132015-08-27 00:14:16 -0400188 AMDGPU_GEM_DOMAIN_VRAM,
189 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
Christian König72d76682015-09-03 17:34:59 +0200190 NULL, NULL, &adev->uvd.vcpu_bo);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400191 if (r) {
192 dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r);
193 return r;
194 }
195
196 r = amdgpu_bo_reserve(adev->uvd.vcpu_bo, false);
197 if (r) {
198 amdgpu_bo_unref(&adev->uvd.vcpu_bo);
199 dev_err(adev->dev, "(%d) failed to reserve UVD bo\n", r);
200 return r;
201 }
202
203 r = amdgpu_bo_pin(adev->uvd.vcpu_bo, AMDGPU_GEM_DOMAIN_VRAM,
204 &adev->uvd.gpu_addr);
205 if (r) {
206 amdgpu_bo_unreserve(adev->uvd.vcpu_bo);
207 amdgpu_bo_unref(&adev->uvd.vcpu_bo);
208 dev_err(adev->dev, "(%d) UVD bo pin failed\n", r);
209 return r;
210 }
211
212 r = amdgpu_bo_kmap(adev->uvd.vcpu_bo, &adev->uvd.cpu_addr);
213 if (r) {
214 dev_err(adev->dev, "(%d) UVD map failed\n", r);
215 return r;
216 }
217
218 amdgpu_bo_unreserve(adev->uvd.vcpu_bo);
219
Christian Königead833e2016-02-10 14:35:19 +0100220 ring = &adev->uvd.ring;
221 rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
222 r = amd_sched_entity_init(&ring->sched, &adev->uvd.entity,
223 rq, amdgpu_sched_jobs);
224 if (r != 0) {
225 DRM_ERROR("Failed setting up UVD run queue.\n");
226 return r;
227 }
228
Arindam Nathc0365542016-04-12 13:46:15 +0200229 for (i = 0; i < adev->uvd.max_handles; ++i) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400230 atomic_set(&adev->uvd.handles[i], 0);
231 adev->uvd.filp[i] = NULL;
232 }
233
234 /* from uvd v5.0 HW addressing capacity increased to 64 bits */
yanyang15fc3aee2015-05-22 14:39:35 -0400235 if (!amdgpu_ip_block_version_cmp(adev, AMD_IP_BLOCK_TYPE_UVD, 5, 0))
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400236 adev->uvd.address_64_bit = true;
237
238 return 0;
239}
240
241int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
242{
243 int r;
244
245 if (adev->uvd.vcpu_bo == NULL)
246 return 0;
247
Christian Königead833e2016-02-10 14:35:19 +0100248 amd_sched_entity_fini(&adev->uvd.ring.sched, &adev->uvd.entity);
249
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400250 r = amdgpu_bo_reserve(adev->uvd.vcpu_bo, false);
251 if (!r) {
252 amdgpu_bo_kunmap(adev->uvd.vcpu_bo);
253 amdgpu_bo_unpin(adev->uvd.vcpu_bo);
254 amdgpu_bo_unreserve(adev->uvd.vcpu_bo);
255 }
256
257 amdgpu_bo_unref(&adev->uvd.vcpu_bo);
258
259 amdgpu_ring_fini(&adev->uvd.ring);
260
261 release_firmware(adev->uvd.fw);
262
263 return 0;
264}
265
266int amdgpu_uvd_suspend(struct amdgpu_device *adev)
267{
Leo Liu3f99dd82016-04-01 10:36:06 -0400268 unsigned size;
269 void *ptr;
Leo Liu3f99dd82016-04-01 10:36:06 -0400270 int i;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400271
272 if (adev->uvd.vcpu_bo == NULL)
273 return 0;
274
Arindam Nathc0365542016-04-12 13:46:15 +0200275 for (i = 0; i < adev->uvd.max_handles; ++i)
Leo Liu3f99dd82016-04-01 10:36:06 -0400276 if (atomic_read(&adev->uvd.handles[i]))
277 break;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400278
Leo Liu3f99dd82016-04-01 10:36:06 -0400279 if (i == AMDGPU_MAX_UVD_HANDLES)
280 return 0;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400281
Leo Liu3f99dd82016-04-01 10:36:06 -0400282 size = amdgpu_bo_size(adev->uvd.vcpu_bo);
Leo Liu3f99dd82016-04-01 10:36:06 -0400283 ptr = adev->uvd.cpu_addr;
Leo Liu3f99dd82016-04-01 10:36:06 -0400284
285 adev->uvd.saved_bo = kmalloc(size, GFP_KERNEL);
286 if (!adev->uvd.saved_bo)
287 return -ENOMEM;
288
289 memcpy(adev->uvd.saved_bo, ptr, size);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400290
291 return 0;
292}
293
294int amdgpu_uvd_resume(struct amdgpu_device *adev)
295{
296 unsigned size;
297 void *ptr;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400298
299 if (adev->uvd.vcpu_bo == NULL)
300 return -EINVAL;
301
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400302 size = amdgpu_bo_size(adev->uvd.vcpu_bo);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400303 ptr = adev->uvd.cpu_addr;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400304
Leo Liu3f99dd82016-04-01 10:36:06 -0400305 if (adev->uvd.saved_bo != NULL) {
306 memcpy(ptr, adev->uvd.saved_bo, size);
307 kfree(adev->uvd.saved_bo);
308 adev->uvd.saved_bo = NULL;
Leo Liud23be4e2016-04-04 10:55:43 -0400309 } else {
310 const struct common_firmware_header *hdr;
311 unsigned offset;
312
313 hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
314 offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
315 memcpy(adev->uvd.cpu_addr, (adev->uvd.fw->data) + offset,
316 (adev->uvd.fw->size) - offset);
317 size -= le32_to_cpu(hdr->ucode_size_bytes);
318 ptr += le32_to_cpu(hdr->ucode_size_bytes);
Leo Liu3f99dd82016-04-01 10:36:06 -0400319 memset(ptr, 0, size);
Leo Liud23be4e2016-04-04 10:55:43 -0400320 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400321
322 return 0;
323}
324
325void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
326{
327 struct amdgpu_ring *ring = &adev->uvd.ring;
328 int i, r;
329
Arindam Nathc0365542016-04-12 13:46:15 +0200330 for (i = 0; i < adev->uvd.max_handles; ++i) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400331 uint32_t handle = atomic_read(&adev->uvd.handles[i]);
332 if (handle != 0 && adev->uvd.filp[i] == filp) {
Chunming Zhou0e3f1542015-08-03 13:11:04 +0800333 struct fence *fence;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400334
335 amdgpu_uvd_note_usage(adev);
336
Christian Königd7af97d2016-02-03 16:01:06 +0100337 r = amdgpu_uvd_get_destroy_msg(ring, handle,
338 false, &fence);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400339 if (r) {
340 DRM_ERROR("Error destroying UVD (%d)!\n", r);
341 continue;
342 }
343
Chunming Zhou0e3f1542015-08-03 13:11:04 +0800344 fence_wait(fence, false);
345 fence_put(fence);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400346
347 adev->uvd.filp[i] = NULL;
348 atomic_set(&adev->uvd.handles[i], 0);
349 }
350 }
351}
352
353static void amdgpu_uvd_force_into_uvd_segment(struct amdgpu_bo *rbo)
354{
355 int i;
356 for (i = 0; i < rbo->placement.num_placement; ++i) {
357 rbo->placements[i].fpfn = 0 >> PAGE_SHIFT;
358 rbo->placements[i].lpfn = (256 * 1024 * 1024) >> PAGE_SHIFT;
359 }
360}
361
362/**
363 * amdgpu_uvd_cs_pass1 - first parsing round
364 *
365 * @ctx: UVD parser context
366 *
367 * Make sure UVD message and feedback buffers are in VRAM and
368 * nobody is violating an 256MB boundary.
369 */
370static int amdgpu_uvd_cs_pass1(struct amdgpu_uvd_cs_ctx *ctx)
371{
372 struct amdgpu_bo_va_mapping *mapping;
373 struct amdgpu_bo *bo;
374 uint32_t cmd, lo, hi;
375 uint64_t addr;
376 int r = 0;
377
378 lo = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->data0);
379 hi = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->data1);
380 addr = ((uint64_t)lo) | (((uint64_t)hi) << 32);
381
382 mapping = amdgpu_cs_find_mapping(ctx->parser, addr, &bo);
383 if (mapping == NULL) {
384 DRM_ERROR("Can't find BO for addr 0x%08Lx\n", addr);
385 return -EINVAL;
386 }
387
388 if (!ctx->parser->adev->uvd.address_64_bit) {
389 /* check if it's a message or feedback command */
390 cmd = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->idx) >> 1;
391 if (cmd == 0x0 || cmd == 0x3) {
392 /* yes, force it into VRAM */
393 uint32_t domain = AMDGPU_GEM_DOMAIN_VRAM;
394 amdgpu_ttm_placement_from_domain(bo, domain);
395 }
396 amdgpu_uvd_force_into_uvd_segment(bo);
397
398 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
399 }
400
401 return r;
402}
403
404/**
405 * amdgpu_uvd_cs_msg_decode - handle UVD decode message
406 *
407 * @msg: pointer to message structure
408 * @buf_sizes: returned buffer sizes
409 *
410 * Peek into the decode message and calculate the necessary buffer sizes.
411 */
412static int amdgpu_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[])
413{
414 unsigned stream_type = msg[4];
415 unsigned width = msg[6];
416 unsigned height = msg[7];
417 unsigned dpb_size = msg[9];
418 unsigned pitch = msg[28];
419 unsigned level = msg[57];
420
421 unsigned width_in_mb = width / 16;
422 unsigned height_in_mb = ALIGN(height / 16, 2);
423 unsigned fs_in_mb = width_in_mb * height_in_mb;
424
Jammy Zhou21df89a2015-08-07 15:30:44 +0800425 unsigned image_size, tmp, min_dpb_size, num_dpb_buffer;
426 unsigned min_ctx_size = 0;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400427
428 image_size = width * height;
429 image_size += image_size / 2;
430 image_size = ALIGN(image_size, 1024);
431
432 switch (stream_type) {
433 case 0: /* H264 */
434 case 7: /* H264 Perf */
435 switch(level) {
436 case 30:
437 num_dpb_buffer = 8100 / fs_in_mb;
438 break;
439 case 31:
440 num_dpb_buffer = 18000 / fs_in_mb;
441 break;
442 case 32:
443 num_dpb_buffer = 20480 / fs_in_mb;
444 break;
445 case 41:
446 num_dpb_buffer = 32768 / fs_in_mb;
447 break;
448 case 42:
449 num_dpb_buffer = 34816 / fs_in_mb;
450 break;
451 case 50:
452 num_dpb_buffer = 110400 / fs_in_mb;
453 break;
454 case 51:
455 num_dpb_buffer = 184320 / fs_in_mb;
456 break;
457 default:
458 num_dpb_buffer = 184320 / fs_in_mb;
459 break;
460 }
461 num_dpb_buffer++;
462 if (num_dpb_buffer > 17)
463 num_dpb_buffer = 17;
464
465 /* reference picture buffer */
466 min_dpb_size = image_size * num_dpb_buffer;
467
468 /* macroblock context buffer */
469 min_dpb_size += width_in_mb * height_in_mb * num_dpb_buffer * 192;
470
471 /* IT surface buffer */
472 min_dpb_size += width_in_mb * height_in_mb * 32;
473 break;
474
475 case 1: /* VC1 */
476
477 /* reference picture buffer */
478 min_dpb_size = image_size * 3;
479
480 /* CONTEXT_BUFFER */
481 min_dpb_size += width_in_mb * height_in_mb * 128;
482
483 /* IT surface buffer */
484 min_dpb_size += width_in_mb * 64;
485
486 /* DB surface buffer */
487 min_dpb_size += width_in_mb * 128;
488
489 /* BP */
490 tmp = max(width_in_mb, height_in_mb);
491 min_dpb_size += ALIGN(tmp * 7 * 16, 64);
492 break;
493
494 case 3: /* MPEG2 */
495
496 /* reference picture buffer */
497 min_dpb_size = image_size * 3;
498 break;
499
500 case 4: /* MPEG4 */
501
502 /* reference picture buffer */
503 min_dpb_size = image_size * 3;
504
505 /* CM */
506 min_dpb_size += width_in_mb * height_in_mb * 64;
507
508 /* IT surface buffer */
509 min_dpb_size += ALIGN(width_in_mb * height_in_mb * 32, 64);
510 break;
511
Christian König86fa0bd2015-05-05 16:36:01 +0200512 case 16: /* H265 */
513 image_size = (ALIGN(width, 16) * ALIGN(height, 16) * 3) / 2;
514 image_size = ALIGN(image_size, 256);
515
516 num_dpb_buffer = (le32_to_cpu(msg[59]) & 0xff) + 2;
517 min_dpb_size = image_size * num_dpb_buffer;
Boyuan Zhang8c8bac52015-08-05 14:03:48 -0400518 min_ctx_size = ((width + 255) / 16) * ((height + 255) / 16)
519 * 16 * num_dpb_buffer + 52 * 1024;
Christian König86fa0bd2015-05-05 16:36:01 +0200520 break;
521
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400522 default:
523 DRM_ERROR("UVD codec not handled %d!\n", stream_type);
524 return -EINVAL;
525 }
526
527 if (width > pitch) {
528 DRM_ERROR("Invalid UVD decoding target pitch!\n");
529 return -EINVAL;
530 }
531
532 if (dpb_size < min_dpb_size) {
533 DRM_ERROR("Invalid dpb_size in UVD message (%d / %d)!\n",
534 dpb_size, min_dpb_size);
535 return -EINVAL;
536 }
537
538 buf_sizes[0x1] = dpb_size;
539 buf_sizes[0x2] = image_size;
Boyuan Zhang8c8bac52015-08-05 14:03:48 -0400540 buf_sizes[0x4] = min_ctx_size;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400541 return 0;
542}
543
544/**
545 * amdgpu_uvd_cs_msg - handle UVD message
546 *
547 * @ctx: UVD parser context
548 * @bo: buffer object containing the message
549 * @offset: offset into the buffer object
550 *
551 * Peek into the UVD message and extract the session id.
552 * Make sure that we don't open up to many sessions.
553 */
554static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
555 struct amdgpu_bo *bo, unsigned offset)
556{
557 struct amdgpu_device *adev = ctx->parser->adev;
558 int32_t *msg, msg_type, handle;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400559 void *ptr;
Christian König4127a592015-08-11 16:35:54 +0200560 long r;
561 int i;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400562
563 if (offset & 0x3F) {
564 DRM_ERROR("UVD messages must be 64 byte aligned!\n");
565 return -EINVAL;
566 }
567
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400568 r = amdgpu_bo_kmap(bo, &ptr);
569 if (r) {
Christian König4127a592015-08-11 16:35:54 +0200570 DRM_ERROR("Failed mapping the UVD message (%ld)!\n", r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400571 return r;
572 }
573
574 msg = ptr + offset;
575
576 msg_type = msg[1];
577 handle = msg[2];
578
579 if (handle == 0) {
580 DRM_ERROR("Invalid UVD handle!\n");
581 return -EINVAL;
582 }
583
Leo Liu51464192015-09-15 10:38:38 -0400584 switch (msg_type) {
585 case 0:
586 /* it's a create msg, calc image size (width * height) */
587 amdgpu_bo_kunmap(bo);
588
589 /* try to alloc a new handle */
Arindam Nathc0365542016-04-12 13:46:15 +0200590 for (i = 0; i < adev->uvd.max_handles; ++i) {
Leo Liu51464192015-09-15 10:38:38 -0400591 if (atomic_read(&adev->uvd.handles[i]) == handle) {
592 DRM_ERROR("Handle 0x%x already in use!\n", handle);
593 return -EINVAL;
594 }
595
596 if (!atomic_cmpxchg(&adev->uvd.handles[i], 0, handle)) {
597 adev->uvd.filp[i] = ctx->parser->filp;
598 return 0;
599 }
600 }
601
602 DRM_ERROR("No more free UVD handles!\n");
603 return -EINVAL;
604
605 case 1:
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400606 /* it's a decode msg, calc buffer sizes */
607 r = amdgpu_uvd_cs_msg_decode(msg, ctx->buf_sizes);
608 amdgpu_bo_kunmap(bo);
609 if (r)
610 return r;
611
Leo Liu51464192015-09-15 10:38:38 -0400612 /* validate the handle */
Arindam Nathc0365542016-04-12 13:46:15 +0200613 for (i = 0; i < adev->uvd.max_handles; ++i) {
Leo Liu51464192015-09-15 10:38:38 -0400614 if (atomic_read(&adev->uvd.handles[i]) == handle) {
615 if (adev->uvd.filp[i] != ctx->parser->filp) {
616 DRM_ERROR("UVD handle collision detected!\n");
617 return -EINVAL;
618 }
619 return 0;
620 }
621 }
622
623 DRM_ERROR("Invalid UVD handle 0x%x!\n", handle);
624 return -ENOENT;
625
626 case 2:
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400627 /* it's a destroy msg, free the handle */
Arindam Nathc0365542016-04-12 13:46:15 +0200628 for (i = 0; i < adev->uvd.max_handles; ++i)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400629 atomic_cmpxchg(&adev->uvd.handles[i], handle, 0);
630 amdgpu_bo_kunmap(bo);
631 return 0;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400632
Leo Liu51464192015-09-15 10:38:38 -0400633 default:
634 DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type);
635 return -EINVAL;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400636 }
Leo Liu51464192015-09-15 10:38:38 -0400637 BUG();
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400638 return -EINVAL;
639}
640
641/**
642 * amdgpu_uvd_cs_pass2 - second parsing round
643 *
644 * @ctx: UVD parser context
645 *
646 * Patch buffer addresses, make sure buffer sizes are correct.
647 */
648static int amdgpu_uvd_cs_pass2(struct amdgpu_uvd_cs_ctx *ctx)
649{
650 struct amdgpu_bo_va_mapping *mapping;
651 struct amdgpu_bo *bo;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400652 uint32_t cmd, lo, hi;
653 uint64_t start, end;
654 uint64_t addr;
655 int r;
656
657 lo = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->data0);
658 hi = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->data1);
659 addr = ((uint64_t)lo) | (((uint64_t)hi) << 32);
660
661 mapping = amdgpu_cs_find_mapping(ctx->parser, addr, &bo);
662 if (mapping == NULL)
663 return -EINVAL;
664
665 start = amdgpu_bo_gpu_offset(bo);
666
667 end = (mapping->it.last + 1 - mapping->it.start);
668 end = end * AMDGPU_GPU_PAGE_SIZE + start;
669
670 addr -= ((uint64_t)mapping->it.start) * AMDGPU_GPU_PAGE_SIZE;
671 start += addr;
672
Christian König7270f832016-01-31 11:00:41 +0100673 amdgpu_set_ib_value(ctx->parser, ctx->ib_idx, ctx->data0,
674 lower_32_bits(start));
675 amdgpu_set_ib_value(ctx->parser, ctx->ib_idx, ctx->data1,
676 upper_32_bits(start));
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400677
678 cmd = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->idx) >> 1;
679 if (cmd < 0x4) {
680 if ((end - start) < ctx->buf_sizes[cmd]) {
681 DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd,
682 (unsigned)(end - start),
683 ctx->buf_sizes[cmd]);
684 return -EINVAL;
685 }
686
Boyuan Zhang8c8bac52015-08-05 14:03:48 -0400687 } else if (cmd == 0x206) {
688 if ((end - start) < ctx->buf_sizes[4]) {
689 DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd,
690 (unsigned)(end - start),
691 ctx->buf_sizes[4]);
692 return -EINVAL;
693 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400694 } else if ((cmd != 0x100) && (cmd != 0x204)) {
695 DRM_ERROR("invalid UVD command %X!\n", cmd);
696 return -EINVAL;
697 }
698
699 if (!ctx->parser->adev->uvd.address_64_bit) {
700 if ((start >> 28) != ((end - 1) >> 28)) {
701 DRM_ERROR("reloc %LX-%LX crossing 256MB boundary!\n",
702 start, end);
703 return -EINVAL;
704 }
705
706 if ((cmd == 0 || cmd == 0x3) &&
707 (start >> 28) != (ctx->parser->adev->uvd.gpu_addr >> 28)) {
708 DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n",
709 start, end);
710 return -EINVAL;
711 }
712 }
713
714 if (cmd == 0) {
715 ctx->has_msg_cmd = true;
716 r = amdgpu_uvd_cs_msg(ctx, bo, addr);
717 if (r)
718 return r;
719 } else if (!ctx->has_msg_cmd) {
720 DRM_ERROR("Message needed before other commands are send!\n");
721 return -EINVAL;
722 }
723
724 return 0;
725}
726
727/**
728 * amdgpu_uvd_cs_reg - parse register writes
729 *
730 * @ctx: UVD parser context
731 * @cb: callback function
732 *
733 * Parse the register writes, call cb on each complete command.
734 */
735static int amdgpu_uvd_cs_reg(struct amdgpu_uvd_cs_ctx *ctx,
736 int (*cb)(struct amdgpu_uvd_cs_ctx *ctx))
737{
Christian König50838c82016-02-03 13:44:52 +0100738 struct amdgpu_ib *ib = &ctx->parser->job->ibs[ctx->ib_idx];
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400739 int i, r;
740
741 ctx->idx++;
742 for (i = 0; i <= ctx->count; ++i) {
743 unsigned reg = ctx->reg + i;
744
745 if (ctx->idx >= ib->length_dw) {
746 DRM_ERROR("Register command after end of CS!\n");
747 return -EINVAL;
748 }
749
750 switch (reg) {
751 case mmUVD_GPCOM_VCPU_DATA0:
752 ctx->data0 = ctx->idx;
753 break;
754 case mmUVD_GPCOM_VCPU_DATA1:
755 ctx->data1 = ctx->idx;
756 break;
757 case mmUVD_GPCOM_VCPU_CMD:
758 r = cb(ctx);
759 if (r)
760 return r;
761 break;
762 case mmUVD_ENGINE_CNTL:
763 break;
764 default:
765 DRM_ERROR("Invalid reg 0x%X!\n", reg);
766 return -EINVAL;
767 }
768 ctx->idx++;
769 }
770 return 0;
771}
772
773/**
774 * amdgpu_uvd_cs_packets - parse UVD packets
775 *
776 * @ctx: UVD parser context
777 * @cb: callback function
778 *
779 * Parse the command stream packets.
780 */
781static int amdgpu_uvd_cs_packets(struct amdgpu_uvd_cs_ctx *ctx,
782 int (*cb)(struct amdgpu_uvd_cs_ctx *ctx))
783{
Christian König50838c82016-02-03 13:44:52 +0100784 struct amdgpu_ib *ib = &ctx->parser->job->ibs[ctx->ib_idx];
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400785 int r;
786
787 for (ctx->idx = 0 ; ctx->idx < ib->length_dw; ) {
788 uint32_t cmd = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->idx);
789 unsigned type = CP_PACKET_GET_TYPE(cmd);
790 switch (type) {
791 case PACKET_TYPE0:
792 ctx->reg = CP_PACKET0_GET_REG(cmd);
793 ctx->count = CP_PACKET_GET_COUNT(cmd);
794 r = amdgpu_uvd_cs_reg(ctx, cb);
795 if (r)
796 return r;
797 break;
798 case PACKET_TYPE2:
799 ++ctx->idx;
800 break;
801 default:
802 DRM_ERROR("Unknown packet type %d !\n", type);
803 return -EINVAL;
804 }
805 }
806 return 0;
807}
808
809/**
810 * amdgpu_uvd_ring_parse_cs - UVD command submission parser
811 *
812 * @parser: Command submission parser context
813 *
814 * Parse the command stream, patch in addresses as necessary.
815 */
816int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
817{
818 struct amdgpu_uvd_cs_ctx ctx = {};
819 unsigned buf_sizes[] = {
820 [0x00000000] = 2048,
Boyuan Zhang8c8bac52015-08-05 14:03:48 -0400821 [0x00000001] = 0xFFFFFFFF,
822 [0x00000002] = 0xFFFFFFFF,
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400823 [0x00000003] = 2048,
Boyuan Zhang8c8bac52015-08-05 14:03:48 -0400824 [0x00000004] = 0xFFFFFFFF,
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400825 };
Christian König50838c82016-02-03 13:44:52 +0100826 struct amdgpu_ib *ib = &parser->job->ibs[ib_idx];
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400827 int r;
828
829 if (ib->length_dw % 16) {
830 DRM_ERROR("UVD IB length (%d) not 16 dwords aligned!\n",
831 ib->length_dw);
832 return -EINVAL;
833 }
834
835 ctx.parser = parser;
836 ctx.buf_sizes = buf_sizes;
837 ctx.ib_idx = ib_idx;
838
839 /* first round, make sure the buffers are actually in the UVD segment */
840 r = amdgpu_uvd_cs_packets(&ctx, amdgpu_uvd_cs_pass1);
841 if (r)
842 return r;
843
844 /* second round, patch buffer addresses into the command stream */
845 r = amdgpu_uvd_cs_packets(&ctx, amdgpu_uvd_cs_pass2);
846 if (r)
847 return r;
848
849 if (!ctx.has_msg_cmd) {
850 DRM_ERROR("UVD-IBs need a msg command!\n");
851 return -EINVAL;
852 }
853
854 amdgpu_uvd_note_usage(ctx.parser->adev);
855
856 return 0;
857}
858
Christian Königd7af97d2016-02-03 16:01:06 +0100859static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
860 bool direct, struct fence **fence)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400861{
862 struct ttm_validate_buffer tv;
863 struct ww_acquire_ctx ticket;
864 struct list_head head;
Christian Königd71518b2016-02-01 12:20:25 +0100865 struct amdgpu_job *job;
866 struct amdgpu_ib *ib;
Chunming Zhou17635522015-08-03 11:43:19 +0800867 struct fence *f = NULL;
Chunming Zhou7b5ec432015-07-03 14:08:18 +0800868 struct amdgpu_device *adev = ring->adev;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400869 uint64_t addr;
870 int i, r;
871
872 memset(&tv, 0, sizeof(tv));
873 tv.bo = &bo->tbo;
874
875 INIT_LIST_HEAD(&head);
876 list_add(&tv.head, &head);
877
878 r = ttm_eu_reserve_buffers(&ticket, &head, true, NULL);
879 if (r)
880 return r;
881
882 if (!bo->adev->uvd.address_64_bit) {
883 amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
884 amdgpu_uvd_force_into_uvd_segment(bo);
885 }
886
887 r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
888 if (r)
889 goto err;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400890
Christian Königd71518b2016-02-01 12:20:25 +0100891 r = amdgpu_job_alloc_with_ib(adev, 64, &job);
892 if (r)
893 goto err;
894
895 ib = &job->ibs[0];
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400896 addr = amdgpu_bo_gpu_offset(bo);
Chunming Zhou7b5ec432015-07-03 14:08:18 +0800897 ib->ptr[0] = PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0);
898 ib->ptr[1] = addr;
899 ib->ptr[2] = PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0);
900 ib->ptr[3] = addr >> 32;
901 ib->ptr[4] = PACKET0(mmUVD_GPCOM_VCPU_CMD, 0);
902 ib->ptr[5] = 0;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400903 for (i = 6; i < 16; ++i)
Chunming Zhou7b5ec432015-07-03 14:08:18 +0800904 ib->ptr[i] = PACKET2(0);
905 ib->length_dw = 16;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400906
Christian Königd7af97d2016-02-03 16:01:06 +0100907 if (direct) {
Christian König336d1f52016-02-16 10:57:10 +0100908 r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
Monk Liu73cfa5f2016-03-17 13:48:13 +0800909 job->fence = f;
Christian Königd7af97d2016-02-03 16:01:06 +0100910 if (r)
911 goto err_free;
912
913 amdgpu_job_free(job);
914 } else {
Christian Königead833e2016-02-10 14:35:19 +0100915 r = amdgpu_job_submit(job, ring, &adev->uvd.entity,
Christian Königd7af97d2016-02-03 16:01:06 +0100916 AMDGPU_FENCE_OWNER_UNDEFINED, &f);
917 if (r)
918 goto err_free;
919 }
Chunming Zhou7b5ec432015-07-03 14:08:18 +0800920
Chunming Zhou17635522015-08-03 11:43:19 +0800921 ttm_eu_fence_buffer_objects(&ticket, &head, f);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400922
923 if (fence)
Chunming Zhou17635522015-08-03 11:43:19 +0800924 *fence = fence_get(f);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400925 amdgpu_bo_unref(&bo);
Chunming Zhou281b4222015-08-12 12:58:31 +0800926 fence_put(f);
Chunming Zhou7b5ec432015-07-03 14:08:18 +0800927
Chunming Zhou7b5ec432015-07-03 14:08:18 +0800928 return 0;
Christian Königd71518b2016-02-01 12:20:25 +0100929
930err_free:
931 amdgpu_job_free(job);
932
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400933err:
934 ttm_eu_backoff_reservation(&ticket, &head);
935 return r;
936}
937
938/* multiple fence commands without any stream commands in between can
939 crash the vcpu so just try to emmit a dummy create/destroy msg to
940 avoid this */
941int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
Chunming Zhou0e3f1542015-08-03 13:11:04 +0800942 struct fence **fence)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400943{
944 struct amdgpu_device *adev = ring->adev;
945 struct amdgpu_bo *bo;
946 uint32_t *msg;
947 int r, i;
948
949 r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true,
Alex Deucher857d9132015-08-27 00:14:16 -0400950 AMDGPU_GEM_DOMAIN_VRAM,
951 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
Christian König72d76682015-09-03 17:34:59 +0200952 NULL, NULL, &bo);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400953 if (r)
954 return r;
955
956 r = amdgpu_bo_reserve(bo, false);
957 if (r) {
958 amdgpu_bo_unref(&bo);
959 return r;
960 }
961
962 r = amdgpu_bo_kmap(bo, (void **)&msg);
963 if (r) {
964 amdgpu_bo_unreserve(bo);
965 amdgpu_bo_unref(&bo);
966 return r;
967 }
968
969 /* stitch together an UVD create msg */
970 msg[0] = cpu_to_le32(0x00000de4);
971 msg[1] = cpu_to_le32(0x00000000);
972 msg[2] = cpu_to_le32(handle);
973 msg[3] = cpu_to_le32(0x00000000);
974 msg[4] = cpu_to_le32(0x00000000);
975 msg[5] = cpu_to_le32(0x00000000);
976 msg[6] = cpu_to_le32(0x00000000);
977 msg[7] = cpu_to_le32(0x00000780);
978 msg[8] = cpu_to_le32(0x00000440);
979 msg[9] = cpu_to_le32(0x00000000);
980 msg[10] = cpu_to_le32(0x01b37000);
981 for (i = 11; i < 1024; ++i)
982 msg[i] = cpu_to_le32(0x0);
983
984 amdgpu_bo_kunmap(bo);
985 amdgpu_bo_unreserve(bo);
986
Christian Königd7af97d2016-02-03 16:01:06 +0100987 return amdgpu_uvd_send_msg(ring, bo, true, fence);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400988}
989
990int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
Christian Königd7af97d2016-02-03 16:01:06 +0100991 bool direct, struct fence **fence)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400992{
993 struct amdgpu_device *adev = ring->adev;
994 struct amdgpu_bo *bo;
995 uint32_t *msg;
996 int r, i;
997
998 r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true,
Alex Deucher857d9132015-08-27 00:14:16 -0400999 AMDGPU_GEM_DOMAIN_VRAM,
1000 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
Christian König72d76682015-09-03 17:34:59 +02001001 NULL, NULL, &bo);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001002 if (r)
1003 return r;
1004
1005 r = amdgpu_bo_reserve(bo, false);
1006 if (r) {
1007 amdgpu_bo_unref(&bo);
1008 return r;
1009 }
1010
1011 r = amdgpu_bo_kmap(bo, (void **)&msg);
1012 if (r) {
1013 amdgpu_bo_unreserve(bo);
1014 amdgpu_bo_unref(&bo);
1015 return r;
1016 }
1017
1018 /* stitch together an UVD destroy msg */
1019 msg[0] = cpu_to_le32(0x00000de4);
1020 msg[1] = cpu_to_le32(0x00000002);
1021 msg[2] = cpu_to_le32(handle);
1022 msg[3] = cpu_to_le32(0x00000000);
1023 for (i = 4; i < 1024; ++i)
1024 msg[i] = cpu_to_le32(0x0);
1025
1026 amdgpu_bo_kunmap(bo);
1027 amdgpu_bo_unreserve(bo);
1028
Christian Königd7af97d2016-02-03 16:01:06 +01001029 return amdgpu_uvd_send_msg(ring, bo, direct, fence);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001030}
1031
1032static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
1033{
1034 struct amdgpu_device *adev =
1035 container_of(work, struct amdgpu_device, uvd.idle_work.work);
1036 unsigned i, fences, handles = 0;
1037
1038 fences = amdgpu_fence_count_emitted(&adev->uvd.ring);
1039
Arindam Nathc0365542016-04-12 13:46:15 +02001040 for (i = 0; i < adev->uvd.max_handles; ++i)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001041 if (atomic_read(&adev->uvd.handles[i]))
1042 ++handles;
1043
1044 if (fences == 0 && handles == 0) {
1045 if (adev->pm.dpm_enabled) {
1046 amdgpu_dpm_enable_uvd(adev, false);
1047 } else {
1048 amdgpu_asic_set_uvd_clocks(adev, 0, 0);
1049 }
1050 } else {
1051 schedule_delayed_work(&adev->uvd.idle_work,
1052 msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS));
1053 }
1054}
1055
1056static void amdgpu_uvd_note_usage(struct amdgpu_device *adev)
1057{
1058 bool set_clocks = !cancel_delayed_work_sync(&adev->uvd.idle_work);
1059 set_clocks &= schedule_delayed_work(&adev->uvd.idle_work,
1060 msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS));
1061
1062 if (set_clocks) {
1063 if (adev->pm.dpm_enabled) {
1064 amdgpu_dpm_enable_uvd(adev, true);
1065 } else {
1066 amdgpu_asic_set_uvd_clocks(adev, 53300, 40000);
1067 }
1068 }
1069}