blob: 916e51670bfd279b7ac3f9fa41da9586c0f5a339 [file] [log] [blame]
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001/*
2 * Copyright 2011 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 */
26/*
27 * Authors:
28 * Christian König <deathsimple@vodafone.de>
29 */
30
31#include <linux/firmware.h>
32#include <linux/module.h>
33#include <drm/drmP.h>
34#include <drm/drm.h>
35
36#include "amdgpu.h"
37#include "amdgpu_pm.h"
38#include "amdgpu_uvd.h"
39#include "cikd.h"
40#include "uvd/uvd_4_2_d.h"
41
42/* 1 second timeout */
Christian König08086632016-07-01 17:45:49 +020043#define UVD_IDLE_TIMEOUT msecs_to_jiffies(1000)
Christian König4cb5877c2016-07-26 12:05:40 +020044
45/* Firmware versions for VI */
46#define FW_1_65_10 ((1 << 24) | (65 << 16) | (10 << 8))
47#define FW_1_87_11 ((1 << 24) | (87 << 16) | (11 << 8))
48#define FW_1_87_12 ((1 << 24) | (87 << 16) | (12 << 8))
49#define FW_1_37_15 ((1 << 24) | (37 << 16) | (15 << 8))
50
Sonny Jiang8e008dd2016-05-11 13:29:48 -040051/* Polaris10/11 firmware version */
Christian König4cb5877c2016-07-26 12:05:40 +020052#define FW_1_66_16 ((1 << 24) | (66 << 16) | (16 << 8))
Alex Deucherd38ceaf2015-04-20 16:55:21 -040053
54/* Firmware Names */
55#ifdef CONFIG_DRM_AMDGPU_CIK
56#define FIRMWARE_BONAIRE "radeon/bonaire_uvd.bin"
Christian Königedf600d2016-05-03 15:54:54 +020057#define FIRMWARE_KABINI "radeon/kabini_uvd.bin"
58#define FIRMWARE_KAVERI "radeon/kaveri_uvd.bin"
59#define FIRMWARE_HAWAII "radeon/hawaii_uvd.bin"
Alex Deucherd38ceaf2015-04-20 16:55:21 -040060#define FIRMWARE_MULLINS "radeon/mullins_uvd.bin"
61#endif
Jammy Zhouc65444f2015-05-13 22:49:04 +080062#define FIRMWARE_TONGA "amdgpu/tonga_uvd.bin"
63#define FIRMWARE_CARRIZO "amdgpu/carrizo_uvd.bin"
David Zhang974ee3d2015-07-08 17:32:15 +080064#define FIRMWARE_FIJI "amdgpu/fiji_uvd.bin"
Samuel Lia39c8ce2015-10-08 16:27:21 -040065#define FIRMWARE_STONEY "amdgpu/stoney_uvd.bin"
Flora Cui2cc0c0b2016-03-14 18:33:29 -040066#define FIRMWARE_POLARIS10 "amdgpu/polaris10_uvd.bin"
Rex Zhu925a51c2016-03-23 14:48:03 +080067#define FIRMWARE_POLARIS11 "amdgpu/polaris11_uvd.bin"
Junwei Zhangc4642a42016-12-14 15:32:28 -050068#define FIRMWARE_POLARIS12 "amdgpu/polaris12_uvd.bin"
Alex Deucherd38ceaf2015-04-20 16:55:21 -040069
Leo Liu09bfb892017-03-03 18:13:26 -050070#define FIRMWARE_VEGA10 "amdgpu/vega10_uvd.bin"
71
72#define mmUVD_GPCOM_VCPU_DATA0_VEGA10 (0x03c4 + 0x7e00)
73#define mmUVD_GPCOM_VCPU_DATA1_VEGA10 (0x03c5 + 0x7e00)
74#define mmUVD_GPCOM_VCPU_CMD_VEGA10 (0x03c3 + 0x7e00)
75#define mmUVD_NO_OP_VEGA10 (0x03ff + 0x7e00)
76#define mmUVD_ENGINE_CNTL_VEGA10 (0x03c6 + 0x7e00)
77
Alex Deucherd38ceaf2015-04-20 16:55:21 -040078/**
79 * amdgpu_uvd_cs_ctx - Command submission parser context
80 *
81 * Used for emulating virtual memory support on UVD 4.2.
82 */
83struct amdgpu_uvd_cs_ctx {
84 struct amdgpu_cs_parser *parser;
85 unsigned reg, count;
86 unsigned data0, data1;
87 unsigned idx;
88 unsigned ib_idx;
89
90 /* does the IB has a msg command */
91 bool has_msg_cmd;
92
93 /* minimum buffer sizes */
94 unsigned *buf_sizes;
95};
96
97#ifdef CONFIG_DRM_AMDGPU_CIK
98MODULE_FIRMWARE(FIRMWARE_BONAIRE);
99MODULE_FIRMWARE(FIRMWARE_KABINI);
100MODULE_FIRMWARE(FIRMWARE_KAVERI);
101MODULE_FIRMWARE(FIRMWARE_HAWAII);
102MODULE_FIRMWARE(FIRMWARE_MULLINS);
103#endif
104MODULE_FIRMWARE(FIRMWARE_TONGA);
105MODULE_FIRMWARE(FIRMWARE_CARRIZO);
David Zhang974ee3d2015-07-08 17:32:15 +0800106MODULE_FIRMWARE(FIRMWARE_FIJI);
Samuel Lia39c8ce2015-10-08 16:27:21 -0400107MODULE_FIRMWARE(FIRMWARE_STONEY);
Flora Cui2cc0c0b2016-03-14 18:33:29 -0400108MODULE_FIRMWARE(FIRMWARE_POLARIS10);
109MODULE_FIRMWARE(FIRMWARE_POLARIS11);
Junwei Zhangc4642a42016-12-14 15:32:28 -0500110MODULE_FIRMWARE(FIRMWARE_POLARIS12);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400111
Leo Liu09bfb892017-03-03 18:13:26 -0500112MODULE_FIRMWARE(FIRMWARE_VEGA10);
113
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400114static void amdgpu_uvd_idle_work_handler(struct work_struct *work);
115
116int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
117{
Christian Königead833e2016-02-10 14:35:19 +0100118 struct amdgpu_ring *ring;
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100119 struct drm_sched_rq *rq;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400120 unsigned long bo_size;
121 const char *fw_name;
122 const struct common_firmware_header *hdr;
123 unsigned version_major, version_minor, family_id;
124 int i, r;
125
126 INIT_DELAYED_WORK(&adev->uvd.idle_work, amdgpu_uvd_idle_work_handler);
127
128 switch (adev->asic_type) {
129#ifdef CONFIG_DRM_AMDGPU_CIK
130 case CHIP_BONAIRE:
131 fw_name = FIRMWARE_BONAIRE;
132 break;
133 case CHIP_KABINI:
134 fw_name = FIRMWARE_KABINI;
135 break;
136 case CHIP_KAVERI:
137 fw_name = FIRMWARE_KAVERI;
138 break;
139 case CHIP_HAWAII:
140 fw_name = FIRMWARE_HAWAII;
141 break;
142 case CHIP_MULLINS:
143 fw_name = FIRMWARE_MULLINS;
144 break;
145#endif
146 case CHIP_TONGA:
147 fw_name = FIRMWARE_TONGA;
148 break;
David Zhang974ee3d2015-07-08 17:32:15 +0800149 case CHIP_FIJI:
150 fw_name = FIRMWARE_FIJI;
151 break;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400152 case CHIP_CARRIZO:
153 fw_name = FIRMWARE_CARRIZO;
154 break;
Samuel Lia39c8ce2015-10-08 16:27:21 -0400155 case CHIP_STONEY:
156 fw_name = FIRMWARE_STONEY;
157 break;
Flora Cui2cc0c0b2016-03-14 18:33:29 -0400158 case CHIP_POLARIS10:
159 fw_name = FIRMWARE_POLARIS10;
Sonny Jiang38d75812015-11-05 15:17:18 -0500160 break;
Flora Cui2cc0c0b2016-03-14 18:33:29 -0400161 case CHIP_POLARIS11:
162 fw_name = FIRMWARE_POLARIS11;
Sonny Jiang38d75812015-11-05 15:17:18 -0500163 break;
Leo Liu09bfb892017-03-03 18:13:26 -0500164 case CHIP_VEGA10:
165 fw_name = FIRMWARE_VEGA10;
166 break;
Junwei Zhangc4642a42016-12-14 15:32:28 -0500167 case CHIP_POLARIS12:
168 fw_name = FIRMWARE_POLARIS12;
169 break;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400170 default:
171 return -EINVAL;
172 }
173
174 r = request_firmware(&adev->uvd.fw, fw_name, adev->dev);
175 if (r) {
176 dev_err(adev->dev, "amdgpu_uvd: Can't load firmware \"%s\"\n",
177 fw_name);
178 return r;
179 }
180
181 r = amdgpu_ucode_validate(adev->uvd.fw);
182 if (r) {
183 dev_err(adev->dev, "amdgpu_uvd: Can't validate firmware \"%s\"\n",
184 fw_name);
185 release_firmware(adev->uvd.fw);
186 adev->uvd.fw = NULL;
187 return r;
188 }
189
Arindam Nathc0365542016-04-12 13:46:15 +0200190 /* Set the default UVD handles that the firmware can handle */
191 adev->uvd.max_handles = AMDGPU_DEFAULT_UVD_HANDLES;
192
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400193 hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
194 family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
195 version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
196 version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
197 DRM_INFO("Found UVD firmware Version: %hu.%hu Family ID: %hu\n",
198 version_major, version_minor, family_id);
199
Arindam Nathc0365542016-04-12 13:46:15 +0200200 /*
201 * Limit the number of UVD handles depending on microcode major
202 * and minor versions. The firmware version which has 40 UVD
203 * instances support is 1.80. So all subsequent versions should
204 * also have the same support.
205 */
206 if ((version_major > 0x01) ||
207 ((version_major == 0x01) && (version_minor >= 0x50)))
208 adev->uvd.max_handles = AMDGPU_MAX_UVD_HANDLES;
209
Sonny Jiang562e2682016-04-18 16:05:04 -0400210 adev->uvd.fw_version = ((version_major << 24) | (version_minor << 16) |
211 (family_id << 8));
212
Sonny Jiang8e008dd2016-05-11 13:29:48 -0400213 if ((adev->asic_type == CHIP_POLARIS10 ||
214 adev->asic_type == CHIP_POLARIS11) &&
215 (adev->uvd.fw_version < FW_1_66_16))
216 DRM_ERROR("POLARIS10/11 UVD firmware version %hu.%hu is too old.\n",
217 version_major, version_minor);
218
Leo Liu09bfb892017-03-03 18:13:26 -0500219 bo_size = AMDGPU_UVD_STACK_SIZE + AMDGPU_UVD_HEAP_SIZE
Arindam Nathc0365542016-04-12 13:46:15 +0200220 + AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles;
Leo Liu09bfb892017-03-03 18:13:26 -0500221 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
222 bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
223
Christian König4b62e692016-07-25 17:37:38 +0200224 r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
225 AMDGPU_GEM_DOMAIN_VRAM, &adev->uvd.vcpu_bo,
226 &adev->uvd.gpu_addr, &adev->uvd.cpu_addr);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400227 if (r) {
228 dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r);
229 return r;
230 }
231
Christian Königead833e2016-02-10 14:35:19 +0100232 ring = &adev->uvd.ring;
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100233 rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
234 r = drm_sched_entity_init(&ring->sched, &adev->uvd.entity,
Monk Liub3eebe32017-10-23 12:23:29 +0800235 rq, amdgpu_sched_jobs, NULL);
Christian Königead833e2016-02-10 14:35:19 +0100236 if (r != 0) {
237 DRM_ERROR("Failed setting up UVD run queue.\n");
238 return r;
239 }
240
Arindam Nathc0365542016-04-12 13:46:15 +0200241 for (i = 0; i < adev->uvd.max_handles; ++i) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400242 atomic_set(&adev->uvd.handles[i], 0);
243 adev->uvd.filp[i] = NULL;
244 }
245
246 /* from uvd v5.0 HW addressing capacity increased to 64 bits */
yanyang15fc3aee2015-05-22 14:39:35 -0400247 if (!amdgpu_ip_block_version_cmp(adev, AMD_IP_BLOCK_TYPE_UVD, 5, 0))
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400248 adev->uvd.address_64_bit = true;
249
Christian König4cb5877c2016-07-26 12:05:40 +0200250 switch (adev->asic_type) {
251 case CHIP_TONGA:
252 adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_65_10;
253 break;
254 case CHIP_CARRIZO:
255 adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_87_11;
256 break;
257 case CHIP_FIJI:
258 adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_87_12;
259 break;
260 case CHIP_STONEY:
261 adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_37_15;
262 break;
263 default:
264 adev->uvd.use_ctx_buf = adev->asic_type >= CHIP_POLARIS10;
265 }
266
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400267 return 0;
268}
269
270int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
271{
Monk Liu4ff184d2017-09-15 16:43:01 +0800272 int i;
Monk Liu05f19eb2016-05-30 15:13:59 +0800273 kfree(adev->uvd.saved_bo);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400274
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100275 drm_sched_entity_fini(&adev->uvd.ring.sched, &adev->uvd.entity);
Christian Königead833e2016-02-10 14:35:19 +0100276
Junwei Zhang8640fae2016-09-07 17:14:46 +0800277 amdgpu_bo_free_kernel(&adev->uvd.vcpu_bo,
278 &adev->uvd.gpu_addr,
279 (void **)&adev->uvd.cpu_addr);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400280
281 amdgpu_ring_fini(&adev->uvd.ring);
282
Monk Liu4ff184d2017-09-15 16:43:01 +0800283 for (i = 0; i < AMDGPU_MAX_UVD_ENC_RINGS; ++i)
284 amdgpu_ring_fini(&adev->uvd.ring_enc[i]);
285
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400286 release_firmware(adev->uvd.fw);
287
288 return 0;
289}
290
291int amdgpu_uvd_suspend(struct amdgpu_device *adev)
292{
Leo Liu3f99dd82016-04-01 10:36:06 -0400293 unsigned size;
294 void *ptr;
Leo Liu3f99dd82016-04-01 10:36:06 -0400295 int i;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400296
297 if (adev->uvd.vcpu_bo == NULL)
298 return 0;
299
Arindam Nathc0365542016-04-12 13:46:15 +0200300 for (i = 0; i < adev->uvd.max_handles; ++i)
Leo Liu3f99dd82016-04-01 10:36:06 -0400301 if (atomic_read(&adev->uvd.handles[i]))
302 break;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400303
Leo Liu3f99dd82016-04-01 10:36:06 -0400304 if (i == AMDGPU_MAX_UVD_HANDLES)
305 return 0;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400306
Rex Zhu85cc88f2016-04-12 19:25:52 +0800307 cancel_delayed_work_sync(&adev->uvd.idle_work);
308
Leo Liu3f99dd82016-04-01 10:36:06 -0400309 size = amdgpu_bo_size(adev->uvd.vcpu_bo);
Leo Liu3f99dd82016-04-01 10:36:06 -0400310 ptr = adev->uvd.cpu_addr;
Leo Liu3f99dd82016-04-01 10:36:06 -0400311
312 adev->uvd.saved_bo = kmalloc(size, GFP_KERNEL);
313 if (!adev->uvd.saved_bo)
314 return -ENOMEM;
315
Christian Königba0b2272016-08-23 11:00:17 +0200316 memcpy_fromio(adev->uvd.saved_bo, ptr, size);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400317
318 return 0;
319}
320
321int amdgpu_uvd_resume(struct amdgpu_device *adev)
322{
323 unsigned size;
324 void *ptr;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400325
326 if (adev->uvd.vcpu_bo == NULL)
327 return -EINVAL;
328
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400329 size = amdgpu_bo_size(adev->uvd.vcpu_bo);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400330 ptr = adev->uvd.cpu_addr;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400331
Leo Liu3f99dd82016-04-01 10:36:06 -0400332 if (adev->uvd.saved_bo != NULL) {
Christian Königba0b2272016-08-23 11:00:17 +0200333 memcpy_toio(ptr, adev->uvd.saved_bo, size);
Leo Liu3f99dd82016-04-01 10:36:06 -0400334 kfree(adev->uvd.saved_bo);
335 adev->uvd.saved_bo = NULL;
Leo Liud23be4e2016-04-04 10:55:43 -0400336 } else {
337 const struct common_firmware_header *hdr;
338 unsigned offset;
339
340 hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
Leo Liu09bfb892017-03-03 18:13:26 -0500341 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
342 offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
343 memcpy_toio(adev->uvd.cpu_addr, adev->uvd.fw->data + offset,
344 le32_to_cpu(hdr->ucode_size_bytes));
345 size -= le32_to_cpu(hdr->ucode_size_bytes);
346 ptr += le32_to_cpu(hdr->ucode_size_bytes);
347 }
Christian Königba0b2272016-08-23 11:00:17 +0200348 memset_io(ptr, 0, size);
Leo Liud23be4e2016-04-04 10:55:43 -0400349 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400350
351 return 0;
352}
353
354void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
355{
356 struct amdgpu_ring *ring = &adev->uvd.ring;
357 int i, r;
358
Arindam Nathc0365542016-04-12 13:46:15 +0200359 for (i = 0; i < adev->uvd.max_handles; ++i) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400360 uint32_t handle = atomic_read(&adev->uvd.handles[i]);
361 if (handle != 0 && adev->uvd.filp[i] == filp) {
Chris Wilsonf54d1862016-10-25 13:00:45 +0100362 struct dma_fence *fence;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400363
Christian Königd7af97d2016-02-03 16:01:06 +0100364 r = amdgpu_uvd_get_destroy_msg(ring, handle,
365 false, &fence);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400366 if (r) {
367 DRM_ERROR("Error destroying UVD (%d)!\n", r);
368 continue;
369 }
370
Chris Wilsonf54d1862016-10-25 13:00:45 +0100371 dma_fence_wait(fence, false);
372 dma_fence_put(fence);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400373
374 adev->uvd.filp[i] = NULL;
375 atomic_set(&adev->uvd.handles[i], 0);
376 }
377 }
378}
379
Christian König765e7fb2016-09-15 15:06:50 +0200380static void amdgpu_uvd_force_into_uvd_segment(struct amdgpu_bo *abo)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400381{
382 int i;
Christian König765e7fb2016-09-15 15:06:50 +0200383 for (i = 0; i < abo->placement.num_placement; ++i) {
384 abo->placements[i].fpfn = 0 >> PAGE_SHIFT;
385 abo->placements[i].lpfn = (256 * 1024 * 1024) >> PAGE_SHIFT;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400386 }
387}
388
Alex Deucher80983e42016-11-21 16:24:37 -0500389static u64 amdgpu_uvd_get_addr_from_ctx(struct amdgpu_uvd_cs_ctx *ctx)
390{
391 uint32_t lo, hi;
392 uint64_t addr;
393
394 lo = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->data0);
395 hi = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->data1);
396 addr = ((uint64_t)lo) | (((uint64_t)hi) << 32);
397
398 return addr;
399}
400
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400401/**
402 * amdgpu_uvd_cs_pass1 - first parsing round
403 *
404 * @ctx: UVD parser context
405 *
406 * Make sure UVD message and feedback buffers are in VRAM and
407 * nobody is violating an 256MB boundary.
408 */
409static int amdgpu_uvd_cs_pass1(struct amdgpu_uvd_cs_ctx *ctx)
410{
Christian König19be5572017-04-12 14:24:39 +0200411 struct ttm_operation_ctx tctx = { false, false };
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400412 struct amdgpu_bo_va_mapping *mapping;
413 struct amdgpu_bo *bo;
Alex Deucher80983e42016-11-21 16:24:37 -0500414 uint32_t cmd;
415 uint64_t addr = amdgpu_uvd_get_addr_from_ctx(ctx);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400416 int r = 0;
417
Christian König9cca0b82017-09-06 16:15:28 +0200418 r = amdgpu_cs_find_mapping(ctx->parser, addr, &bo, &mapping);
419 if (r) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400420 DRM_ERROR("Can't find BO for addr 0x%08Lx\n", addr);
Christian König9cca0b82017-09-06 16:15:28 +0200421 return r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400422 }
423
424 if (!ctx->parser->adev->uvd.address_64_bit) {
425 /* check if it's a message or feedback command */
426 cmd = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->idx) >> 1;
427 if (cmd == 0x0 || cmd == 0x3) {
428 /* yes, force it into VRAM */
429 uint32_t domain = AMDGPU_GEM_DOMAIN_VRAM;
430 amdgpu_ttm_placement_from_domain(bo, domain);
431 }
432 amdgpu_uvd_force_into_uvd_segment(bo);
433
Christian König19be5572017-04-12 14:24:39 +0200434 r = ttm_bo_validate(&bo->tbo, &bo->placement, &tctx);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400435 }
436
437 return r;
438}
439
440/**
441 * amdgpu_uvd_cs_msg_decode - handle UVD decode message
442 *
443 * @msg: pointer to message structure
444 * @buf_sizes: returned buffer sizes
445 *
446 * Peek into the decode message and calculate the necessary buffer sizes.
447 */
Sonny Jiang8e008dd2016-05-11 13:29:48 -0400448static int amdgpu_uvd_cs_msg_decode(struct amdgpu_device *adev, uint32_t *msg,
449 unsigned buf_sizes[])
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400450{
451 unsigned stream_type = msg[4];
452 unsigned width = msg[6];
453 unsigned height = msg[7];
454 unsigned dpb_size = msg[9];
455 unsigned pitch = msg[28];
456 unsigned level = msg[57];
457
458 unsigned width_in_mb = width / 16;
459 unsigned height_in_mb = ALIGN(height / 16, 2);
460 unsigned fs_in_mb = width_in_mb * height_in_mb;
461
Jammy Zhou21df89a2015-08-07 15:30:44 +0800462 unsigned image_size, tmp, min_dpb_size, num_dpb_buffer;
Christian Könige5a68582016-07-26 10:51:29 +0200463 unsigned min_ctx_size = ~0;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400464
465 image_size = width * height;
466 image_size += image_size / 2;
467 image_size = ALIGN(image_size, 1024);
468
469 switch (stream_type) {
470 case 0: /* H264 */
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400471 switch(level) {
472 case 30:
473 num_dpb_buffer = 8100 / fs_in_mb;
474 break;
475 case 31:
476 num_dpb_buffer = 18000 / fs_in_mb;
477 break;
478 case 32:
479 num_dpb_buffer = 20480 / fs_in_mb;
480 break;
481 case 41:
482 num_dpb_buffer = 32768 / fs_in_mb;
483 break;
484 case 42:
485 num_dpb_buffer = 34816 / fs_in_mb;
486 break;
487 case 50:
488 num_dpb_buffer = 110400 / fs_in_mb;
489 break;
490 case 51:
491 num_dpb_buffer = 184320 / fs_in_mb;
492 break;
493 default:
494 num_dpb_buffer = 184320 / fs_in_mb;
495 break;
496 }
497 num_dpb_buffer++;
498 if (num_dpb_buffer > 17)
499 num_dpb_buffer = 17;
500
501 /* reference picture buffer */
502 min_dpb_size = image_size * num_dpb_buffer;
503
504 /* macroblock context buffer */
505 min_dpb_size += width_in_mb * height_in_mb * num_dpb_buffer * 192;
506
507 /* IT surface buffer */
508 min_dpb_size += width_in_mb * height_in_mb * 32;
509 break;
510
511 case 1: /* VC1 */
512
513 /* reference picture buffer */
514 min_dpb_size = image_size * 3;
515
516 /* CONTEXT_BUFFER */
517 min_dpb_size += width_in_mb * height_in_mb * 128;
518
519 /* IT surface buffer */
520 min_dpb_size += width_in_mb * 64;
521
522 /* DB surface buffer */
523 min_dpb_size += width_in_mb * 128;
524
525 /* BP */
526 tmp = max(width_in_mb, height_in_mb);
527 min_dpb_size += ALIGN(tmp * 7 * 16, 64);
528 break;
529
530 case 3: /* MPEG2 */
531
532 /* reference picture buffer */
533 min_dpb_size = image_size * 3;
534 break;
535
536 case 4: /* MPEG4 */
537
538 /* reference picture buffer */
539 min_dpb_size = image_size * 3;
540
541 /* CM */
542 min_dpb_size += width_in_mb * height_in_mb * 64;
543
544 /* IT surface buffer */
545 min_dpb_size += ALIGN(width_in_mb * height_in_mb * 32, 64);
546 break;
547
Sonny Jiang8e008dd2016-05-11 13:29:48 -0400548 case 7: /* H264 Perf */
549 switch(level) {
550 case 30:
551 num_dpb_buffer = 8100 / fs_in_mb;
552 break;
553 case 31:
554 num_dpb_buffer = 18000 / fs_in_mb;
555 break;
556 case 32:
557 num_dpb_buffer = 20480 / fs_in_mb;
558 break;
559 case 41:
560 num_dpb_buffer = 32768 / fs_in_mb;
561 break;
562 case 42:
563 num_dpb_buffer = 34816 / fs_in_mb;
564 break;
565 case 50:
566 num_dpb_buffer = 110400 / fs_in_mb;
567 break;
568 case 51:
569 num_dpb_buffer = 184320 / fs_in_mb;
570 break;
571 default:
572 num_dpb_buffer = 184320 / fs_in_mb;
573 break;
574 }
575 num_dpb_buffer++;
576 if (num_dpb_buffer > 17)
577 num_dpb_buffer = 17;
578
579 /* reference picture buffer */
580 min_dpb_size = image_size * num_dpb_buffer;
581
Christian König4cb5877c2016-07-26 12:05:40 +0200582 if (!adev->uvd.use_ctx_buf){
Sonny Jiang8e008dd2016-05-11 13:29:48 -0400583 /* macroblock context buffer */
584 min_dpb_size +=
585 width_in_mb * height_in_mb * num_dpb_buffer * 192;
586
587 /* IT surface buffer */
588 min_dpb_size += width_in_mb * height_in_mb * 32;
589 } else {
590 /* macroblock context buffer */
591 min_ctx_size =
592 width_in_mb * height_in_mb * num_dpb_buffer * 192;
593 }
594 break;
595
Leo Liud0b83d42017-08-15 10:57:34 -0400596 case 8: /* MJPEG */
597 min_dpb_size = 0;
598 break;
599
Christian König86fa0bd2015-05-05 16:36:01 +0200600 case 16: /* H265 */
601 image_size = (ALIGN(width, 16) * ALIGN(height, 16) * 3) / 2;
602 image_size = ALIGN(image_size, 256);
603
604 num_dpb_buffer = (le32_to_cpu(msg[59]) & 0xff) + 2;
605 min_dpb_size = image_size * num_dpb_buffer;
Boyuan Zhang8c8bac52015-08-05 14:03:48 -0400606 min_ctx_size = ((width + 255) / 16) * ((height + 255) / 16)
607 * 16 * num_dpb_buffer + 52 * 1024;
Christian König86fa0bd2015-05-05 16:36:01 +0200608 break;
609
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400610 default:
611 DRM_ERROR("UVD codec not handled %d!\n", stream_type);
612 return -EINVAL;
613 }
614
615 if (width > pitch) {
616 DRM_ERROR("Invalid UVD decoding target pitch!\n");
617 return -EINVAL;
618 }
619
620 if (dpb_size < min_dpb_size) {
621 DRM_ERROR("Invalid dpb_size in UVD message (%d / %d)!\n",
622 dpb_size, min_dpb_size);
623 return -EINVAL;
624 }
625
626 buf_sizes[0x1] = dpb_size;
627 buf_sizes[0x2] = image_size;
Boyuan Zhang8c8bac52015-08-05 14:03:48 -0400628 buf_sizes[0x4] = min_ctx_size;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400629 return 0;
630}
631
632/**
633 * amdgpu_uvd_cs_msg - handle UVD message
634 *
635 * @ctx: UVD parser context
636 * @bo: buffer object containing the message
637 * @offset: offset into the buffer object
638 *
639 * Peek into the UVD message and extract the session id.
640 * Make sure that we don't open up to many sessions.
641 */
642static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
643 struct amdgpu_bo *bo, unsigned offset)
644{
645 struct amdgpu_device *adev = ctx->parser->adev;
646 int32_t *msg, msg_type, handle;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400647 void *ptr;
Christian König4127a592015-08-11 16:35:54 +0200648 long r;
649 int i;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400650
651 if (offset & 0x3F) {
652 DRM_ERROR("UVD messages must be 64 byte aligned!\n");
653 return -EINVAL;
654 }
655
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400656 r = amdgpu_bo_kmap(bo, &ptr);
657 if (r) {
Christian König4127a592015-08-11 16:35:54 +0200658 DRM_ERROR("Failed mapping the UVD message (%ld)!\n", r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400659 return r;
660 }
661
662 msg = ptr + offset;
663
664 msg_type = msg[1];
665 handle = msg[2];
666
667 if (handle == 0) {
668 DRM_ERROR("Invalid UVD handle!\n");
669 return -EINVAL;
670 }
671
Leo Liu51464192015-09-15 10:38:38 -0400672 switch (msg_type) {
673 case 0:
674 /* it's a create msg, calc image size (width * height) */
675 amdgpu_bo_kunmap(bo);
676
677 /* try to alloc a new handle */
Arindam Nathc0365542016-04-12 13:46:15 +0200678 for (i = 0; i < adev->uvd.max_handles; ++i) {
Leo Liu51464192015-09-15 10:38:38 -0400679 if (atomic_read(&adev->uvd.handles[i]) == handle) {
680 DRM_ERROR("Handle 0x%x already in use!\n", handle);
681 return -EINVAL;
682 }
683
684 if (!atomic_cmpxchg(&adev->uvd.handles[i], 0, handle)) {
685 adev->uvd.filp[i] = ctx->parser->filp;
686 return 0;
687 }
688 }
689
690 DRM_ERROR("No more free UVD handles!\n");
Christian König7129d3a2016-07-13 21:24:59 +0200691 return -ENOSPC;
Leo Liu51464192015-09-15 10:38:38 -0400692
693 case 1:
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400694 /* it's a decode msg, calc buffer sizes */
Sonny Jiang8e008dd2016-05-11 13:29:48 -0400695 r = amdgpu_uvd_cs_msg_decode(adev, msg, ctx->buf_sizes);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400696 amdgpu_bo_kunmap(bo);
697 if (r)
698 return r;
699
Leo Liu51464192015-09-15 10:38:38 -0400700 /* validate the handle */
Arindam Nathc0365542016-04-12 13:46:15 +0200701 for (i = 0; i < adev->uvd.max_handles; ++i) {
Leo Liu51464192015-09-15 10:38:38 -0400702 if (atomic_read(&adev->uvd.handles[i]) == handle) {
703 if (adev->uvd.filp[i] != ctx->parser->filp) {
704 DRM_ERROR("UVD handle collision detected!\n");
705 return -EINVAL;
706 }
707 return 0;
708 }
709 }
710
711 DRM_ERROR("Invalid UVD handle 0x%x!\n", handle);
712 return -ENOENT;
713
714 case 2:
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400715 /* it's a destroy msg, free the handle */
Arindam Nathc0365542016-04-12 13:46:15 +0200716 for (i = 0; i < adev->uvd.max_handles; ++i)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400717 atomic_cmpxchg(&adev->uvd.handles[i], handle, 0);
718 amdgpu_bo_kunmap(bo);
719 return 0;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400720
Leo Liu51464192015-09-15 10:38:38 -0400721 default:
722 DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type);
723 return -EINVAL;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400724 }
Leo Liu51464192015-09-15 10:38:38 -0400725 BUG();
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400726 return -EINVAL;
727}
728
729/**
730 * amdgpu_uvd_cs_pass2 - second parsing round
731 *
732 * @ctx: UVD parser context
733 *
734 * Patch buffer addresses, make sure buffer sizes are correct.
735 */
736static int amdgpu_uvd_cs_pass2(struct amdgpu_uvd_cs_ctx *ctx)
737{
738 struct amdgpu_bo_va_mapping *mapping;
739 struct amdgpu_bo *bo;
Alex Deucher80983e42016-11-21 16:24:37 -0500740 uint32_t cmd;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400741 uint64_t start, end;
Alex Deucher80983e42016-11-21 16:24:37 -0500742 uint64_t addr = amdgpu_uvd_get_addr_from_ctx(ctx);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400743 int r;
744
Christian König9cca0b82017-09-06 16:15:28 +0200745 r = amdgpu_cs_find_mapping(ctx->parser, addr, &bo, &mapping);
746 if (r) {
Alex Deucher042eb912016-11-21 16:34:29 -0500747 DRM_ERROR("Can't find BO for addr 0x%08Lx\n", addr);
Christian König9cca0b82017-09-06 16:15:28 +0200748 return r;
Alex Deucher042eb912016-11-21 16:34:29 -0500749 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400750
751 start = amdgpu_bo_gpu_offset(bo);
752
Christian Königa9f87f62017-03-30 14:03:59 +0200753 end = (mapping->last + 1 - mapping->start);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400754 end = end * AMDGPU_GPU_PAGE_SIZE + start;
755
Christian Königa9f87f62017-03-30 14:03:59 +0200756 addr -= mapping->start * AMDGPU_GPU_PAGE_SIZE;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400757 start += addr;
758
Christian König7270f832016-01-31 11:00:41 +0100759 amdgpu_set_ib_value(ctx->parser, ctx->ib_idx, ctx->data0,
760 lower_32_bits(start));
761 amdgpu_set_ib_value(ctx->parser, ctx->ib_idx, ctx->data1,
762 upper_32_bits(start));
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400763
764 cmd = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->idx) >> 1;
765 if (cmd < 0x4) {
766 if ((end - start) < ctx->buf_sizes[cmd]) {
767 DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd,
768 (unsigned)(end - start),
769 ctx->buf_sizes[cmd]);
770 return -EINVAL;
771 }
772
Boyuan Zhang8c8bac52015-08-05 14:03:48 -0400773 } else if (cmd == 0x206) {
774 if ((end - start) < ctx->buf_sizes[4]) {
775 DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd,
776 (unsigned)(end - start),
777 ctx->buf_sizes[4]);
778 return -EINVAL;
779 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400780 } else if ((cmd != 0x100) && (cmd != 0x204)) {
781 DRM_ERROR("invalid UVD command %X!\n", cmd);
782 return -EINVAL;
783 }
784
785 if (!ctx->parser->adev->uvd.address_64_bit) {
786 if ((start >> 28) != ((end - 1) >> 28)) {
787 DRM_ERROR("reloc %LX-%LX crossing 256MB boundary!\n",
788 start, end);
789 return -EINVAL;
790 }
791
792 if ((cmd == 0 || cmd == 0x3) &&
793 (start >> 28) != (ctx->parser->adev->uvd.gpu_addr >> 28)) {
794 DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n",
795 start, end);
796 return -EINVAL;
797 }
798 }
799
800 if (cmd == 0) {
801 ctx->has_msg_cmd = true;
802 r = amdgpu_uvd_cs_msg(ctx, bo, addr);
803 if (r)
804 return r;
805 } else if (!ctx->has_msg_cmd) {
806 DRM_ERROR("Message needed before other commands are send!\n");
807 return -EINVAL;
808 }
809
810 return 0;
811}
812
813/**
814 * amdgpu_uvd_cs_reg - parse register writes
815 *
816 * @ctx: UVD parser context
817 * @cb: callback function
818 *
819 * Parse the register writes, call cb on each complete command.
820 */
821static int amdgpu_uvd_cs_reg(struct amdgpu_uvd_cs_ctx *ctx,
822 int (*cb)(struct amdgpu_uvd_cs_ctx *ctx))
823{
Christian König50838c82016-02-03 13:44:52 +0100824 struct amdgpu_ib *ib = &ctx->parser->job->ibs[ctx->ib_idx];
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400825 int i, r;
826
827 ctx->idx++;
828 for (i = 0; i <= ctx->count; ++i) {
829 unsigned reg = ctx->reg + i;
830
831 if (ctx->idx >= ib->length_dw) {
832 DRM_ERROR("Register command after end of CS!\n");
833 return -EINVAL;
834 }
835
836 switch (reg) {
837 case mmUVD_GPCOM_VCPU_DATA0:
838 ctx->data0 = ctx->idx;
839 break;
840 case mmUVD_GPCOM_VCPU_DATA1:
841 ctx->data1 = ctx->idx;
842 break;
843 case mmUVD_GPCOM_VCPU_CMD:
844 r = cb(ctx);
845 if (r)
846 return r;
847 break;
848 case mmUVD_ENGINE_CNTL:
Alex Deucher8dd31d72016-08-22 17:58:14 -0400849 case mmUVD_NO_OP:
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400850 break;
851 default:
852 DRM_ERROR("Invalid reg 0x%X!\n", reg);
853 return -EINVAL;
854 }
855 ctx->idx++;
856 }
857 return 0;
858}
859
860/**
861 * amdgpu_uvd_cs_packets - parse UVD packets
862 *
863 * @ctx: UVD parser context
864 * @cb: callback function
865 *
866 * Parse the command stream packets.
867 */
868static int amdgpu_uvd_cs_packets(struct amdgpu_uvd_cs_ctx *ctx,
869 int (*cb)(struct amdgpu_uvd_cs_ctx *ctx))
870{
Christian König50838c82016-02-03 13:44:52 +0100871 struct amdgpu_ib *ib = &ctx->parser->job->ibs[ctx->ib_idx];
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400872 int r;
873
874 for (ctx->idx = 0 ; ctx->idx < ib->length_dw; ) {
875 uint32_t cmd = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->idx);
876 unsigned type = CP_PACKET_GET_TYPE(cmd);
877 switch (type) {
878 case PACKET_TYPE0:
879 ctx->reg = CP_PACKET0_GET_REG(cmd);
880 ctx->count = CP_PACKET_GET_COUNT(cmd);
881 r = amdgpu_uvd_cs_reg(ctx, cb);
882 if (r)
883 return r;
884 break;
885 case PACKET_TYPE2:
886 ++ctx->idx;
887 break;
888 default:
889 DRM_ERROR("Unknown packet type %d !\n", type);
890 return -EINVAL;
891 }
892 }
893 return 0;
894}
895
896/**
897 * amdgpu_uvd_ring_parse_cs - UVD command submission parser
898 *
899 * @parser: Command submission parser context
900 *
901 * Parse the command stream, patch in addresses as necessary.
902 */
903int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
904{
905 struct amdgpu_uvd_cs_ctx ctx = {};
906 unsigned buf_sizes[] = {
907 [0x00000000] = 2048,
Boyuan Zhang8c8bac52015-08-05 14:03:48 -0400908 [0x00000001] = 0xFFFFFFFF,
909 [0x00000002] = 0xFFFFFFFF,
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400910 [0x00000003] = 2048,
Boyuan Zhang8c8bac52015-08-05 14:03:48 -0400911 [0x00000004] = 0xFFFFFFFF,
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400912 };
Christian König50838c82016-02-03 13:44:52 +0100913 struct amdgpu_ib *ib = &parser->job->ibs[ib_idx];
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400914 int r;
915
Christian König45088ef2016-10-05 16:49:19 +0200916 parser->job->vm = NULL;
917 ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
918
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400919 if (ib->length_dw % 16) {
920 DRM_ERROR("UVD IB length (%d) not 16 dwords aligned!\n",
921 ib->length_dw);
922 return -EINVAL;
923 }
924
925 ctx.parser = parser;
926 ctx.buf_sizes = buf_sizes;
927 ctx.ib_idx = ib_idx;
928
Alex Deucher042eb912016-11-21 16:34:29 -0500929 /* first round only required on chips without UVD 64 bit address support */
930 if (!parser->adev->uvd.address_64_bit) {
931 /* first round, make sure the buffers are actually in the UVD segment */
932 r = amdgpu_uvd_cs_packets(&ctx, amdgpu_uvd_cs_pass1);
933 if (r)
934 return r;
935 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400936
937 /* second round, patch buffer addresses into the command stream */
938 r = amdgpu_uvd_cs_packets(&ctx, amdgpu_uvd_cs_pass2);
939 if (r)
940 return r;
941
942 if (!ctx.has_msg_cmd) {
943 DRM_ERROR("UVD-IBs need a msg command!\n");
944 return -EINVAL;
945 }
946
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400947 return 0;
948}
949
Christian Königd7af97d2016-02-03 16:01:06 +0100950static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
Chris Wilsonf54d1862016-10-25 13:00:45 +0100951 bool direct, struct dma_fence **fence)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400952{
Christian König19be5572017-04-12 14:24:39 +0200953 struct ttm_operation_ctx ctx = { true, false };
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400954 struct ttm_validate_buffer tv;
955 struct ww_acquire_ctx ticket;
956 struct list_head head;
Christian Königd71518b2016-02-01 12:20:25 +0100957 struct amdgpu_job *job;
958 struct amdgpu_ib *ib;
Chris Wilsonf54d1862016-10-25 13:00:45 +0100959 struct dma_fence *f = NULL;
Chunming Zhou7b5ec432015-07-03 14:08:18 +0800960 struct amdgpu_device *adev = ring->adev;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400961 uint64_t addr;
Leo Liu09bfb892017-03-03 18:13:26 -0500962 uint32_t data[4];
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400963 int i, r;
964
965 memset(&tv, 0, sizeof(tv));
966 tv.bo = &bo->tbo;
967
968 INIT_LIST_HEAD(&head);
969 list_add(&tv.head, &head);
970
971 r = ttm_eu_reserve_buffers(&ticket, &head, true, NULL);
972 if (r)
973 return r;
974
Christian Königa7d64de2016-09-15 14:58:48 +0200975 if (!ring->adev->uvd.address_64_bit) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400976 amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
977 amdgpu_uvd_force_into_uvd_segment(bo);
978 }
979
Christian König19be5572017-04-12 14:24:39 +0200980 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400981 if (r)
982 goto err;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400983
Christian Königd71518b2016-02-01 12:20:25 +0100984 r = amdgpu_job_alloc_with_ib(adev, 64, &job);
985 if (r)
986 goto err;
987
Leo Liu09bfb892017-03-03 18:13:26 -0500988 if (adev->asic_type >= CHIP_VEGA10) {
989 data[0] = PACKET0(mmUVD_GPCOM_VCPU_DATA0_VEGA10, 0);
990 data[1] = PACKET0(mmUVD_GPCOM_VCPU_DATA1_VEGA10, 0);
991 data[2] = PACKET0(mmUVD_GPCOM_VCPU_CMD_VEGA10, 0);
992 data[3] = PACKET0(mmUVD_NO_OP_VEGA10, 0);
993 } else {
994 data[0] = PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0);
995 data[1] = PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0);
996 data[2] = PACKET0(mmUVD_GPCOM_VCPU_CMD, 0);
997 data[3] = PACKET0(mmUVD_NO_OP, 0);
998 }
999
Christian Königd71518b2016-02-01 12:20:25 +01001000 ib = &job->ibs[0];
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001001 addr = amdgpu_bo_gpu_offset(bo);
Leo Liu09bfb892017-03-03 18:13:26 -05001002 ib->ptr[0] = data[0];
Chunming Zhou7b5ec432015-07-03 14:08:18 +08001003 ib->ptr[1] = addr;
Leo Liu09bfb892017-03-03 18:13:26 -05001004 ib->ptr[2] = data[1];
Chunming Zhou7b5ec432015-07-03 14:08:18 +08001005 ib->ptr[3] = addr >> 32;
Leo Liu09bfb892017-03-03 18:13:26 -05001006 ib->ptr[4] = data[2];
Chunming Zhou7b5ec432015-07-03 14:08:18 +08001007 ib->ptr[5] = 0;
Alex Deucherc8b4f282016-08-23 09:12:21 -04001008 for (i = 6; i < 16; i += 2) {
Leo Liu09bfb892017-03-03 18:13:26 -05001009 ib->ptr[i] = data[3];
Alex Deucherc8b4f282016-08-23 09:12:21 -04001010 ib->ptr[i+1] = 0;
1011 }
Chunming Zhou7b5ec432015-07-03 14:08:18 +08001012 ib->length_dw = 16;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001013
Christian Königd7af97d2016-02-03 16:01:06 +01001014 if (direct) {
Junwei Zhang50ddc752017-01-23 16:30:38 +08001015 r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
Chris Wilsonf54d1862016-10-25 13:00:45 +01001016 job->fence = dma_fence_get(f);
Christian Königd7af97d2016-02-03 16:01:06 +01001017 if (r)
1018 goto err_free;
1019
1020 amdgpu_job_free(job);
1021 } else {
Christian Königead833e2016-02-10 14:35:19 +01001022 r = amdgpu_job_submit(job, ring, &adev->uvd.entity,
Christian Königd7af97d2016-02-03 16:01:06 +01001023 AMDGPU_FENCE_OWNER_UNDEFINED, &f);
1024 if (r)
1025 goto err_free;
1026 }
Chunming Zhou7b5ec432015-07-03 14:08:18 +08001027
Chunming Zhou17635522015-08-03 11:43:19 +08001028 ttm_eu_fence_buffer_objects(&ticket, &head, f);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001029
1030 if (fence)
Chris Wilsonf54d1862016-10-25 13:00:45 +01001031 *fence = dma_fence_get(f);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001032 amdgpu_bo_unref(&bo);
Chris Wilsonf54d1862016-10-25 13:00:45 +01001033 dma_fence_put(f);
Chunming Zhou7b5ec432015-07-03 14:08:18 +08001034
Chunming Zhou7b5ec432015-07-03 14:08:18 +08001035 return 0;
Christian Königd71518b2016-02-01 12:20:25 +01001036
1037err_free:
1038 amdgpu_job_free(job);
1039
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001040err:
1041 ttm_eu_backoff_reservation(&ticket, &head);
1042 return r;
1043}
1044
1045/* multiple fence commands without any stream commands in between can
1046 crash the vcpu so just try to emmit a dummy create/destroy msg to
1047 avoid this */
1048int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
Chris Wilsonf54d1862016-10-25 13:00:45 +01001049 struct dma_fence **fence)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001050{
1051 struct amdgpu_device *adev = ring->adev;
1052 struct amdgpu_bo *bo;
1053 uint32_t *msg;
1054 int r, i;
1055
1056 r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true,
Alex Deucher857d9132015-08-27 00:14:16 -04001057 AMDGPU_GEM_DOMAIN_VRAM,
Christian König03f48dd2016-08-15 17:00:22 +02001058 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
1059 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
Yong Zhao2046d462017-07-20 18:49:09 -04001060 NULL, NULL, 0, &bo);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001061 if (r)
1062 return r;
1063
1064 r = amdgpu_bo_reserve(bo, false);
1065 if (r) {
1066 amdgpu_bo_unref(&bo);
1067 return r;
1068 }
1069
1070 r = amdgpu_bo_kmap(bo, (void **)&msg);
1071 if (r) {
1072 amdgpu_bo_unreserve(bo);
1073 amdgpu_bo_unref(&bo);
1074 return r;
1075 }
1076
1077 /* stitch together an UVD create msg */
1078 msg[0] = cpu_to_le32(0x00000de4);
1079 msg[1] = cpu_to_le32(0x00000000);
1080 msg[2] = cpu_to_le32(handle);
1081 msg[3] = cpu_to_le32(0x00000000);
1082 msg[4] = cpu_to_le32(0x00000000);
1083 msg[5] = cpu_to_le32(0x00000000);
1084 msg[6] = cpu_to_le32(0x00000000);
1085 msg[7] = cpu_to_le32(0x00000780);
1086 msg[8] = cpu_to_le32(0x00000440);
1087 msg[9] = cpu_to_le32(0x00000000);
1088 msg[10] = cpu_to_le32(0x01b37000);
1089 for (i = 11; i < 1024; ++i)
1090 msg[i] = cpu_to_le32(0x0);
1091
1092 amdgpu_bo_kunmap(bo);
1093 amdgpu_bo_unreserve(bo);
1094
Christian Königd7af97d2016-02-03 16:01:06 +01001095 return amdgpu_uvd_send_msg(ring, bo, true, fence);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001096}
1097
1098int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
Chris Wilsonf54d1862016-10-25 13:00:45 +01001099 bool direct, struct dma_fence **fence)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001100{
1101 struct amdgpu_device *adev = ring->adev;
1102 struct amdgpu_bo *bo;
1103 uint32_t *msg;
1104 int r, i;
1105
1106 r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true,
Alex Deucher857d9132015-08-27 00:14:16 -04001107 AMDGPU_GEM_DOMAIN_VRAM,
Christian König03f48dd2016-08-15 17:00:22 +02001108 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
1109 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
Yong Zhao2046d462017-07-20 18:49:09 -04001110 NULL, NULL, 0, &bo);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001111 if (r)
1112 return r;
1113
1114 r = amdgpu_bo_reserve(bo, false);
1115 if (r) {
1116 amdgpu_bo_unref(&bo);
1117 return r;
1118 }
1119
1120 r = amdgpu_bo_kmap(bo, (void **)&msg);
1121 if (r) {
1122 amdgpu_bo_unreserve(bo);
1123 amdgpu_bo_unref(&bo);
1124 return r;
1125 }
1126
1127 /* stitch together an UVD destroy msg */
1128 msg[0] = cpu_to_le32(0x00000de4);
1129 msg[1] = cpu_to_le32(0x00000002);
1130 msg[2] = cpu_to_le32(handle);
1131 msg[3] = cpu_to_le32(0x00000000);
1132 for (i = 4; i < 1024; ++i)
1133 msg[i] = cpu_to_le32(0x0);
1134
1135 amdgpu_bo_kunmap(bo);
1136 amdgpu_bo_unreserve(bo);
1137
Christian Königd7af97d2016-02-03 16:01:06 +01001138 return amdgpu_uvd_send_msg(ring, bo, direct, fence);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001139}
1140
1141static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
1142{
1143 struct amdgpu_device *adev =
1144 container_of(work, struct amdgpu_device, uvd.idle_work.work);
Leo Liu713c0022016-08-03 09:25:59 -04001145 unsigned fences = amdgpu_fence_count_emitted(&adev->uvd.ring);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001146
Xiangliang Yud9af2252017-03-07 14:45:25 +08001147 if (amdgpu_sriov_vf(adev))
1148 return;
1149
Leo Liu713c0022016-08-03 09:25:59 -04001150 if (fences == 0) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001151 if (adev->pm.dpm_enabled) {
1152 amdgpu_dpm_enable_uvd(adev, false);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001153 } else {
1154 amdgpu_asic_set_uvd_clocks(adev, 0, 0);
Rex Zhue38ca2b2017-01-20 12:06:05 +08001155 /* shutdown the UVD block */
1156 amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
1157 AMD_PG_STATE_GATE);
1158 amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
1159 AMD_CG_STATE_GATE);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001160 }
1161 } else {
Christian König08086632016-07-01 17:45:49 +02001162 schedule_delayed_work(&adev->uvd.idle_work, UVD_IDLE_TIMEOUT);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001163 }
1164}
1165
Christian Königc4120d52016-07-20 14:11:26 +02001166void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001167{
Christian Königc4120d52016-07-20 14:11:26 +02001168 struct amdgpu_device *adev = ring->adev;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001169 bool set_clocks = !cancel_delayed_work_sync(&adev->uvd.idle_work);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001170
Xiangliang Yud9af2252017-03-07 14:45:25 +08001171 if (amdgpu_sriov_vf(adev))
1172 return;
1173
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001174 if (set_clocks) {
1175 if (adev->pm.dpm_enabled) {
1176 amdgpu_dpm_enable_uvd(adev, true);
1177 } else {
1178 amdgpu_asic_set_uvd_clocks(adev, 53300, 40000);
Rex Zhue38ca2b2017-01-20 12:06:05 +08001179 amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
1180 AMD_CG_STATE_UNGATE);
1181 amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
1182 AMD_PG_STATE_UNGATE);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001183 }
1184 }
1185}
Christian Königc4120d52016-07-20 14:11:26 +02001186
1187void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring)
1188{
1189 schedule_delayed_work(&ring->adev->uvd.idle_work, UVD_IDLE_TIMEOUT);
1190}
Christian König8de190c2016-07-05 16:47:54 +02001191
1192/**
1193 * amdgpu_uvd_ring_test_ib - test ib execution
1194 *
1195 * @ring: amdgpu_ring pointer
1196 *
1197 * Test if we can successfully execute an IB
1198 */
Christian Königbbec97a2016-07-05 21:07:17 +02001199int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout)
Christian König8de190c2016-07-05 16:47:54 +02001200{
Chris Wilsonf54d1862016-10-25 13:00:45 +01001201 struct dma_fence *fence;
Christian Königbbec97a2016-07-05 21:07:17 +02001202 long r;
Christian König8de190c2016-07-05 16:47:54 +02001203
1204 r = amdgpu_uvd_get_create_msg(ring, 1, NULL);
1205 if (r) {
Christian Königbbec97a2016-07-05 21:07:17 +02001206 DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r);
Christian König8de190c2016-07-05 16:47:54 +02001207 goto error;
1208 }
1209
1210 r = amdgpu_uvd_get_destroy_msg(ring, 1, true, &fence);
1211 if (r) {
Christian Königbbec97a2016-07-05 21:07:17 +02001212 DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r);
Christian König8de190c2016-07-05 16:47:54 +02001213 goto error;
1214 }
1215
Chris Wilsonf54d1862016-10-25 13:00:45 +01001216 r = dma_fence_wait_timeout(fence, false, timeout);
Christian Königbbec97a2016-07-05 21:07:17 +02001217 if (r == 0) {
1218 DRM_ERROR("amdgpu: IB test timed out.\n");
1219 r = -ETIMEDOUT;
1220 } else if (r < 0) {
1221 DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
1222 } else {
pding9953b722017-10-26 09:30:38 +08001223 DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
Christian Königbbec97a2016-07-05 21:07:17 +02001224 r = 0;
Christian König8de190c2016-07-05 16:47:54 +02001225 }
Christian Königbbec97a2016-07-05 21:07:17 +02001226
Chris Wilsonf54d1862016-10-25 13:00:45 +01001227 dma_fence_put(fence);
Jay Cornwallc2a4c5b2016-08-03 13:39:42 -05001228
1229error:
Christian König8de190c2016-07-05 16:47:54 +02001230 return r;
1231}
Arindam Nath44879b62016-12-12 15:29:33 +05301232
1233/**
1234 * amdgpu_uvd_used_handles - returns used UVD handles
1235 *
1236 * @adev: amdgpu_device pointer
1237 *
1238 * Returns the number of UVD handles in use
1239 */
1240uint32_t amdgpu_uvd_used_handles(struct amdgpu_device *adev)
1241{
1242 unsigned i;
1243 uint32_t used_handles = 0;
1244
1245 for (i = 0; i < adev->uvd.max_handles; ++i) {
1246 /*
1247 * Handles can be freed in any order, and not
1248 * necessarily linear. So we need to count
1249 * all non-zero handles.
1250 */
1251 if (atomic_read(&adev->uvd.handles[i]))
1252 used_handles++;
1253 }
1254
1255 return used_handles;
1256}