blob: e961492d357ab579538edc85f15a093809c306af [file] [log] [blame]
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001/*
2 * Copyright 2011 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 */
26/*
27 * Authors:
28 * Christian König <deathsimple@vodafone.de>
29 */
30
31#include <linux/firmware.h>
32#include <linux/module.h>
33#include <drm/drmP.h>
34#include <drm/drm.h>
35
36#include "amdgpu.h"
37#include "amdgpu_pm.h"
38#include "amdgpu_uvd.h"
39#include "cikd.h"
40#include "uvd/uvd_4_2_d.h"
41
42/* 1 second timeout */
Christian König08086632016-07-01 17:45:49 +020043#define UVD_IDLE_TIMEOUT msecs_to_jiffies(1000)
Christian König4cb5877c2016-07-26 12:05:40 +020044
45/* Firmware versions for VI */
46#define FW_1_65_10 ((1 << 24) | (65 << 16) | (10 << 8))
47#define FW_1_87_11 ((1 << 24) | (87 << 16) | (11 << 8))
48#define FW_1_87_12 ((1 << 24) | (87 << 16) | (12 << 8))
49#define FW_1_37_15 ((1 << 24) | (37 << 16) | (15 << 8))
50
Sonny Jiang8e008dd2016-05-11 13:29:48 -040051/* Polaris10/11 firmware version */
Christian König4cb5877c2016-07-26 12:05:40 +020052#define FW_1_66_16 ((1 << 24) | (66 << 16) | (16 << 8))
Alex Deucherd38ceaf2015-04-20 16:55:21 -040053
54/* Firmware Names */
55#ifdef CONFIG_DRM_AMDGPU_CIK
56#define FIRMWARE_BONAIRE "radeon/bonaire_uvd.bin"
Christian Königedf600d2016-05-03 15:54:54 +020057#define FIRMWARE_KABINI "radeon/kabini_uvd.bin"
58#define FIRMWARE_KAVERI "radeon/kaveri_uvd.bin"
59#define FIRMWARE_HAWAII "radeon/hawaii_uvd.bin"
Alex Deucherd38ceaf2015-04-20 16:55:21 -040060#define FIRMWARE_MULLINS "radeon/mullins_uvd.bin"
61#endif
Jammy Zhouc65444f2015-05-13 22:49:04 +080062#define FIRMWARE_TONGA "amdgpu/tonga_uvd.bin"
63#define FIRMWARE_CARRIZO "amdgpu/carrizo_uvd.bin"
David Zhang974ee3d2015-07-08 17:32:15 +080064#define FIRMWARE_FIJI "amdgpu/fiji_uvd.bin"
Samuel Lia39c8ce2015-10-08 16:27:21 -040065#define FIRMWARE_STONEY "amdgpu/stoney_uvd.bin"
Flora Cui2cc0c0b2016-03-14 18:33:29 -040066#define FIRMWARE_POLARIS10 "amdgpu/polaris10_uvd.bin"
Rex Zhu925a51c2016-03-23 14:48:03 +080067#define FIRMWARE_POLARIS11 "amdgpu/polaris11_uvd.bin"
Junwei Zhangc4642a42016-12-14 15:32:28 -050068#define FIRMWARE_POLARIS12 "amdgpu/polaris12_uvd.bin"
Leo Liuba8f7ad2017-11-10 12:27:40 -050069#define FIRMWARE_VEGAM "amdgpu/vegam_uvd.bin"
Alex Deucherd38ceaf2015-04-20 16:55:21 -040070
Leo Liu09bfb892017-03-03 18:13:26 -050071#define FIRMWARE_VEGA10 "amdgpu/vega10_uvd.bin"
Alex Deucher2327e622017-09-01 16:35:30 -040072#define FIRMWARE_VEGA12 "amdgpu/vega12_uvd.bin"
Feifei Xucac18c82018-05-11 13:44:09 -050073#define FIRMWARE_VEGA20 "amdgpu/vega20_uvd.bin"
Leo Liu09bfb892017-03-03 18:13:26 -050074
75#define mmUVD_GPCOM_VCPU_DATA0_VEGA10 (0x03c4 + 0x7e00)
76#define mmUVD_GPCOM_VCPU_DATA1_VEGA10 (0x03c5 + 0x7e00)
77#define mmUVD_GPCOM_VCPU_CMD_VEGA10 (0x03c3 + 0x7e00)
78#define mmUVD_NO_OP_VEGA10 (0x03ff + 0x7e00)
79#define mmUVD_ENGINE_CNTL_VEGA10 (0x03c6 + 0x7e00)
80
Alex Deucherd38ceaf2015-04-20 16:55:21 -040081/**
82 * amdgpu_uvd_cs_ctx - Command submission parser context
83 *
84 * Used for emulating virtual memory support on UVD 4.2.
85 */
86struct amdgpu_uvd_cs_ctx {
87 struct amdgpu_cs_parser *parser;
88 unsigned reg, count;
89 unsigned data0, data1;
90 unsigned idx;
91 unsigned ib_idx;
92
93 /* does the IB has a msg command */
94 bool has_msg_cmd;
95
96 /* minimum buffer sizes */
97 unsigned *buf_sizes;
98};
99
100#ifdef CONFIG_DRM_AMDGPU_CIK
101MODULE_FIRMWARE(FIRMWARE_BONAIRE);
102MODULE_FIRMWARE(FIRMWARE_KABINI);
103MODULE_FIRMWARE(FIRMWARE_KAVERI);
104MODULE_FIRMWARE(FIRMWARE_HAWAII);
105MODULE_FIRMWARE(FIRMWARE_MULLINS);
106#endif
107MODULE_FIRMWARE(FIRMWARE_TONGA);
108MODULE_FIRMWARE(FIRMWARE_CARRIZO);
David Zhang974ee3d2015-07-08 17:32:15 +0800109MODULE_FIRMWARE(FIRMWARE_FIJI);
Samuel Lia39c8ce2015-10-08 16:27:21 -0400110MODULE_FIRMWARE(FIRMWARE_STONEY);
Flora Cui2cc0c0b2016-03-14 18:33:29 -0400111MODULE_FIRMWARE(FIRMWARE_POLARIS10);
112MODULE_FIRMWARE(FIRMWARE_POLARIS11);
Junwei Zhangc4642a42016-12-14 15:32:28 -0500113MODULE_FIRMWARE(FIRMWARE_POLARIS12);
Leo Liuba8f7ad2017-11-10 12:27:40 -0500114MODULE_FIRMWARE(FIRMWARE_VEGAM);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400115
Leo Liu09bfb892017-03-03 18:13:26 -0500116MODULE_FIRMWARE(FIRMWARE_VEGA10);
Alex Deucher2327e622017-09-01 16:35:30 -0400117MODULE_FIRMWARE(FIRMWARE_VEGA12);
Feifei Xucac18c82018-05-11 13:44:09 -0500118MODULE_FIRMWARE(FIRMWARE_VEGA20);
Leo Liu09bfb892017-03-03 18:13:26 -0500119
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400120static void amdgpu_uvd_idle_work_handler(struct work_struct *work);
121
122int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
123{
Christian Königead833e2016-02-10 14:35:19 +0100124 struct amdgpu_ring *ring;
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100125 struct drm_sched_rq *rq;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400126 unsigned long bo_size;
127 const char *fw_name;
128 const struct common_firmware_header *hdr;
129 unsigned version_major, version_minor, family_id;
James Zhu10dd74ea2018-05-15 14:31:24 -0500130 int i, j, r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400131
James Zhu2bb795f2018-05-15 14:25:46 -0500132 INIT_DELAYED_WORK(&adev->uvd.inst->idle_work, amdgpu_uvd_idle_work_handler);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400133
134 switch (adev->asic_type) {
135#ifdef CONFIG_DRM_AMDGPU_CIK
136 case CHIP_BONAIRE:
137 fw_name = FIRMWARE_BONAIRE;
138 break;
139 case CHIP_KABINI:
140 fw_name = FIRMWARE_KABINI;
141 break;
142 case CHIP_KAVERI:
143 fw_name = FIRMWARE_KAVERI;
144 break;
145 case CHIP_HAWAII:
146 fw_name = FIRMWARE_HAWAII;
147 break;
148 case CHIP_MULLINS:
149 fw_name = FIRMWARE_MULLINS;
150 break;
151#endif
152 case CHIP_TONGA:
153 fw_name = FIRMWARE_TONGA;
154 break;
David Zhang974ee3d2015-07-08 17:32:15 +0800155 case CHIP_FIJI:
156 fw_name = FIRMWARE_FIJI;
157 break;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400158 case CHIP_CARRIZO:
159 fw_name = FIRMWARE_CARRIZO;
160 break;
Samuel Lia39c8ce2015-10-08 16:27:21 -0400161 case CHIP_STONEY:
162 fw_name = FIRMWARE_STONEY;
163 break;
Flora Cui2cc0c0b2016-03-14 18:33:29 -0400164 case CHIP_POLARIS10:
165 fw_name = FIRMWARE_POLARIS10;
Sonny Jiang38d75812015-11-05 15:17:18 -0500166 break;
Flora Cui2cc0c0b2016-03-14 18:33:29 -0400167 case CHIP_POLARIS11:
168 fw_name = FIRMWARE_POLARIS11;
Sonny Jiang38d75812015-11-05 15:17:18 -0500169 break;
Alex Deucher2327e622017-09-01 16:35:30 -0400170 case CHIP_POLARIS12:
171 fw_name = FIRMWARE_POLARIS12;
172 break;
Leo Liu09bfb892017-03-03 18:13:26 -0500173 case CHIP_VEGA10:
174 fw_name = FIRMWARE_VEGA10;
175 break;
Alex Deucher2327e622017-09-01 16:35:30 -0400176 case CHIP_VEGA12:
177 fw_name = FIRMWARE_VEGA12;
Junwei Zhangc4642a42016-12-14 15:32:28 -0500178 break;
Leo Liuba8f7ad2017-11-10 12:27:40 -0500179 case CHIP_VEGAM:
180 fw_name = FIRMWARE_VEGAM;
181 break;
Feifei Xucac18c82018-05-11 13:44:09 -0500182 case CHIP_VEGA20:
183 fw_name = FIRMWARE_VEGA20;
184 break;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400185 default:
186 return -EINVAL;
187 }
188
189 r = request_firmware(&adev->uvd.fw, fw_name, adev->dev);
190 if (r) {
191 dev_err(adev->dev, "amdgpu_uvd: Can't load firmware \"%s\"\n",
192 fw_name);
193 return r;
194 }
195
196 r = amdgpu_ucode_validate(adev->uvd.fw);
197 if (r) {
198 dev_err(adev->dev, "amdgpu_uvd: Can't validate firmware \"%s\"\n",
199 fw_name);
200 release_firmware(adev->uvd.fw);
201 adev->uvd.fw = NULL;
202 return r;
203 }
204
Arindam Nathc0365542016-04-12 13:46:15 +0200205 /* Set the default UVD handles that the firmware can handle */
206 adev->uvd.max_handles = AMDGPU_DEFAULT_UVD_HANDLES;
207
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400208 hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
209 family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
210 version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
211 version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
212 DRM_INFO("Found UVD firmware Version: %hu.%hu Family ID: %hu\n",
213 version_major, version_minor, family_id);
214
Arindam Nathc0365542016-04-12 13:46:15 +0200215 /*
216 * Limit the number of UVD handles depending on microcode major
217 * and minor versions. The firmware version which has 40 UVD
218 * instances support is 1.80. So all subsequent versions should
219 * also have the same support.
220 */
221 if ((version_major > 0x01) ||
222 ((version_major == 0x01) && (version_minor >= 0x50)))
223 adev->uvd.max_handles = AMDGPU_MAX_UVD_HANDLES;
224
Sonny Jiang562e2682016-04-18 16:05:04 -0400225 adev->uvd.fw_version = ((version_major << 24) | (version_minor << 16) |
226 (family_id << 8));
227
Sonny Jiang8e008dd2016-05-11 13:29:48 -0400228 if ((adev->asic_type == CHIP_POLARIS10 ||
229 adev->asic_type == CHIP_POLARIS11) &&
230 (adev->uvd.fw_version < FW_1_66_16))
231 DRM_ERROR("POLARIS10/11 UVD firmware version %hu.%hu is too old.\n",
232 version_major, version_minor);
233
Leo Liu09bfb892017-03-03 18:13:26 -0500234 bo_size = AMDGPU_UVD_STACK_SIZE + AMDGPU_UVD_HEAP_SIZE
Arindam Nathc0365542016-04-12 13:46:15 +0200235 + AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles;
Leo Liu09bfb892017-03-03 18:13:26 -0500236 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
237 bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
238
James Zhu10dd74ea2018-05-15 14:31:24 -0500239 for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400240
James Zhu10dd74ea2018-05-15 14:31:24 -0500241 r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
242 AMDGPU_GEM_DOMAIN_VRAM, &adev->uvd.inst[j].vcpu_bo,
243 &adev->uvd.inst[j].gpu_addr, &adev->uvd.inst[j].cpu_addr);
244 if (r) {
245 dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r);
246 return r;
247 }
Christian Königead833e2016-02-10 14:35:19 +0100248
James Zhu10dd74ea2018-05-15 14:31:24 -0500249 ring = &adev->uvd.inst[j].ring;
250 rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
251 r = drm_sched_entity_init(&ring->sched, &adev->uvd.inst[j].entity,
252 rq, NULL);
253 if (r != 0) {
254 DRM_ERROR("Failed setting up UVD(%d) run queue.\n", j);
255 return r;
256 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400257
James Zhu10dd74ea2018-05-15 14:31:24 -0500258 for (i = 0; i < adev->uvd.max_handles; ++i) {
259 atomic_set(&adev->uvd.inst[j].handles[i], 0);
260 adev->uvd.inst[j].filp[i] = NULL;
261 }
262 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400263 /* from uvd v5.0 HW addressing capacity increased to 64 bits */
Alex Deucher2990a1f2017-12-15 16:18:00 -0500264 if (!amdgpu_device_ip_block_version_cmp(adev, AMD_IP_BLOCK_TYPE_UVD, 5, 0))
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400265 adev->uvd.address_64_bit = true;
266
Christian König4cb5877c2016-07-26 12:05:40 +0200267 switch (adev->asic_type) {
268 case CHIP_TONGA:
269 adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_65_10;
270 break;
271 case CHIP_CARRIZO:
272 adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_87_11;
273 break;
274 case CHIP_FIJI:
275 adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_87_12;
276 break;
277 case CHIP_STONEY:
278 adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_37_15;
279 break;
280 default:
281 adev->uvd.use_ctx_buf = adev->asic_type >= CHIP_POLARIS10;
282 }
283
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400284 return 0;
285}
286
287int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
288{
James Zhu10dd74ea2018-05-15 14:31:24 -0500289 int i, j;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400290
James Zhu10dd74ea2018-05-15 14:31:24 -0500291 for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
292 kfree(adev->uvd.inst[j].saved_bo);
Christian Königead833e2016-02-10 14:35:19 +0100293
James Zhu10dd74ea2018-05-15 14:31:24 -0500294 drm_sched_entity_fini(&adev->uvd.inst[j].ring.sched, &adev->uvd.inst[j].entity);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400295
James Zhu10dd74ea2018-05-15 14:31:24 -0500296 amdgpu_bo_free_kernel(&adev->uvd.inst[j].vcpu_bo,
297 &adev->uvd.inst[j].gpu_addr,
298 (void **)&adev->uvd.inst[j].cpu_addr);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400299
James Zhu10dd74ea2018-05-15 14:31:24 -0500300 amdgpu_ring_fini(&adev->uvd.inst[j].ring);
Monk Liu4ff184d2017-09-15 16:43:01 +0800301
James Zhu10dd74ea2018-05-15 14:31:24 -0500302 for (i = 0; i < AMDGPU_MAX_UVD_ENC_RINGS; ++i)
303 amdgpu_ring_fini(&adev->uvd.inst[j].ring_enc[i]);
304 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400305 release_firmware(adev->uvd.fw);
306
307 return 0;
308}
309
310int amdgpu_uvd_suspend(struct amdgpu_device *adev)
311{
Leo Liu3f99dd82016-04-01 10:36:06 -0400312 unsigned size;
313 void *ptr;
James Zhu10dd74ea2018-05-15 14:31:24 -0500314 int i, j;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400315
James Zhu10dd74ea2018-05-15 14:31:24 -0500316 for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
317 if (adev->uvd.inst[j].vcpu_bo == NULL)
318 continue;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400319
James Zhu10dd74ea2018-05-15 14:31:24 -0500320 cancel_delayed_work_sync(&adev->uvd.inst[j].idle_work);
Jim Qu8daf94e2017-12-15 15:27:57 +0800321
James Zhu10dd74ea2018-05-15 14:31:24 -0500322 /* only valid for physical mode */
323 if (adev->asic_type < CHIP_POLARIS10) {
324 for (i = 0; i < adev->uvd.max_handles; ++i)
325 if (atomic_read(&adev->uvd.inst[j].handles[i]))
326 break;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400327
James Zhu10dd74ea2018-05-15 14:31:24 -0500328 if (i == adev->uvd.max_handles)
329 continue;
330 }
331
332 size = amdgpu_bo_size(adev->uvd.inst[j].vcpu_bo);
333 ptr = adev->uvd.inst[j].cpu_addr;
334
335 adev->uvd.inst[j].saved_bo = kmalloc(size, GFP_KERNEL);
336 if (!adev->uvd.inst[j].saved_bo)
337 return -ENOMEM;
338
339 memcpy_fromio(adev->uvd.inst[j].saved_bo, ptr, size);
James Zhuf6c3b602018-03-06 14:52:35 -0500340 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400341 return 0;
342}
343
344int amdgpu_uvd_resume(struct amdgpu_device *adev)
345{
346 unsigned size;
347 void *ptr;
James Zhu10dd74ea2018-05-15 14:31:24 -0500348 int i;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400349
James Zhu10dd74ea2018-05-15 14:31:24 -0500350 for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
351 if (adev->uvd.inst[i].vcpu_bo == NULL)
352 return -EINVAL;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400353
James Zhu10dd74ea2018-05-15 14:31:24 -0500354 size = amdgpu_bo_size(adev->uvd.inst[i].vcpu_bo);
355 ptr = adev->uvd.inst[i].cpu_addr;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400356
James Zhu10dd74ea2018-05-15 14:31:24 -0500357 if (adev->uvd.inst[i].saved_bo != NULL) {
358 memcpy_toio(ptr, adev->uvd.inst[i].saved_bo, size);
359 kfree(adev->uvd.inst[i].saved_bo);
360 adev->uvd.inst[i].saved_bo = NULL;
361 } else {
362 const struct common_firmware_header *hdr;
363 unsigned offset;
Leo Liud23be4e2016-04-04 10:55:43 -0400364
James Zhu10dd74ea2018-05-15 14:31:24 -0500365 hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
366 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
367 offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
368 memcpy_toio(adev->uvd.inst[i].cpu_addr, adev->uvd.fw->data + offset,
369 le32_to_cpu(hdr->ucode_size_bytes));
370 size -= le32_to_cpu(hdr->ucode_size_bytes);
371 ptr += le32_to_cpu(hdr->ucode_size_bytes);
372 }
373 memset_io(ptr, 0, size);
374 /* to restore uvd fence seq */
375 amdgpu_fence_driver_force_completion(&adev->uvd.inst[i].ring);
Leo Liu09bfb892017-03-03 18:13:26 -0500376 }
Leo Liud23be4e2016-04-04 10:55:43 -0400377 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400378 return 0;
379}
380
381void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
382{
James Zhu10dd74ea2018-05-15 14:31:24 -0500383 struct amdgpu_ring *ring;
384 int i, j, r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400385
James Zhu10dd74ea2018-05-15 14:31:24 -0500386 for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
387 ring = &adev->uvd.inst[j].ring;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400388
James Zhu10dd74ea2018-05-15 14:31:24 -0500389 for (i = 0; i < adev->uvd.max_handles; ++i) {
390 uint32_t handle = atomic_read(&adev->uvd.inst[j].handles[i]);
391 if (handle != 0 && adev->uvd.inst[j].filp[i] == filp) {
392 struct dma_fence *fence;
393
394 r = amdgpu_uvd_get_destroy_msg(ring, handle,
395 false, &fence);
396 if (r) {
397 DRM_ERROR("Error destroying UVD(%d) %d!\n", j, r);
398 continue;
399 }
400
401 dma_fence_wait(fence, false);
402 dma_fence_put(fence);
403
404 adev->uvd.inst[j].filp[i] = NULL;
405 atomic_set(&adev->uvd.inst[j].handles[i], 0);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400406 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400407 }
408 }
409}
410
Christian König765e7fb2016-09-15 15:06:50 +0200411static void amdgpu_uvd_force_into_uvd_segment(struct amdgpu_bo *abo)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400412{
413 int i;
Christian König765e7fb2016-09-15 15:06:50 +0200414 for (i = 0; i < abo->placement.num_placement; ++i) {
415 abo->placements[i].fpfn = 0 >> PAGE_SHIFT;
416 abo->placements[i].lpfn = (256 * 1024 * 1024) >> PAGE_SHIFT;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400417 }
418}
419
Alex Deucher80983e42016-11-21 16:24:37 -0500420static u64 amdgpu_uvd_get_addr_from_ctx(struct amdgpu_uvd_cs_ctx *ctx)
421{
422 uint32_t lo, hi;
423 uint64_t addr;
424
425 lo = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->data0);
426 hi = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->data1);
427 addr = ((uint64_t)lo) | (((uint64_t)hi) << 32);
428
429 return addr;
430}
431
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400432/**
433 * amdgpu_uvd_cs_pass1 - first parsing round
434 *
435 * @ctx: UVD parser context
436 *
437 * Make sure UVD message and feedback buffers are in VRAM and
438 * nobody is violating an 256MB boundary.
439 */
440static int amdgpu_uvd_cs_pass1(struct amdgpu_uvd_cs_ctx *ctx)
441{
Christian König19be5572017-04-12 14:24:39 +0200442 struct ttm_operation_ctx tctx = { false, false };
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400443 struct amdgpu_bo_va_mapping *mapping;
444 struct amdgpu_bo *bo;
Alex Deucher80983e42016-11-21 16:24:37 -0500445 uint32_t cmd;
446 uint64_t addr = amdgpu_uvd_get_addr_from_ctx(ctx);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400447 int r = 0;
448
Christian König9cca0b82017-09-06 16:15:28 +0200449 r = amdgpu_cs_find_mapping(ctx->parser, addr, &bo, &mapping);
450 if (r) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400451 DRM_ERROR("Can't find BO for addr 0x%08Lx\n", addr);
Christian König9cca0b82017-09-06 16:15:28 +0200452 return r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400453 }
454
455 if (!ctx->parser->adev->uvd.address_64_bit) {
456 /* check if it's a message or feedback command */
457 cmd = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->idx) >> 1;
458 if (cmd == 0x0 || cmd == 0x3) {
459 /* yes, force it into VRAM */
460 uint32_t domain = AMDGPU_GEM_DOMAIN_VRAM;
461 amdgpu_ttm_placement_from_domain(bo, domain);
462 }
463 amdgpu_uvd_force_into_uvd_segment(bo);
464
Christian König19be5572017-04-12 14:24:39 +0200465 r = ttm_bo_validate(&bo->tbo, &bo->placement, &tctx);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400466 }
467
468 return r;
469}
470
471/**
472 * amdgpu_uvd_cs_msg_decode - handle UVD decode message
473 *
474 * @msg: pointer to message structure
475 * @buf_sizes: returned buffer sizes
476 *
477 * Peek into the decode message and calculate the necessary buffer sizes.
478 */
Sonny Jiang8e008dd2016-05-11 13:29:48 -0400479static int amdgpu_uvd_cs_msg_decode(struct amdgpu_device *adev, uint32_t *msg,
480 unsigned buf_sizes[])
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400481{
482 unsigned stream_type = msg[4];
483 unsigned width = msg[6];
484 unsigned height = msg[7];
485 unsigned dpb_size = msg[9];
486 unsigned pitch = msg[28];
487 unsigned level = msg[57];
488
489 unsigned width_in_mb = width / 16;
490 unsigned height_in_mb = ALIGN(height / 16, 2);
491 unsigned fs_in_mb = width_in_mb * height_in_mb;
492
Jammy Zhou21df89a2015-08-07 15:30:44 +0800493 unsigned image_size, tmp, min_dpb_size, num_dpb_buffer;
Christian Könige5a68582016-07-26 10:51:29 +0200494 unsigned min_ctx_size = ~0;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400495
496 image_size = width * height;
497 image_size += image_size / 2;
498 image_size = ALIGN(image_size, 1024);
499
500 switch (stream_type) {
501 case 0: /* H264 */
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400502 switch(level) {
503 case 30:
504 num_dpb_buffer = 8100 / fs_in_mb;
505 break;
506 case 31:
507 num_dpb_buffer = 18000 / fs_in_mb;
508 break;
509 case 32:
510 num_dpb_buffer = 20480 / fs_in_mb;
511 break;
512 case 41:
513 num_dpb_buffer = 32768 / fs_in_mb;
514 break;
515 case 42:
516 num_dpb_buffer = 34816 / fs_in_mb;
517 break;
518 case 50:
519 num_dpb_buffer = 110400 / fs_in_mb;
520 break;
521 case 51:
522 num_dpb_buffer = 184320 / fs_in_mb;
523 break;
524 default:
525 num_dpb_buffer = 184320 / fs_in_mb;
526 break;
527 }
528 num_dpb_buffer++;
529 if (num_dpb_buffer > 17)
530 num_dpb_buffer = 17;
531
532 /* reference picture buffer */
533 min_dpb_size = image_size * num_dpb_buffer;
534
535 /* macroblock context buffer */
536 min_dpb_size += width_in_mb * height_in_mb * num_dpb_buffer * 192;
537
538 /* IT surface buffer */
539 min_dpb_size += width_in_mb * height_in_mb * 32;
540 break;
541
542 case 1: /* VC1 */
543
544 /* reference picture buffer */
545 min_dpb_size = image_size * 3;
546
547 /* CONTEXT_BUFFER */
548 min_dpb_size += width_in_mb * height_in_mb * 128;
549
550 /* IT surface buffer */
551 min_dpb_size += width_in_mb * 64;
552
553 /* DB surface buffer */
554 min_dpb_size += width_in_mb * 128;
555
556 /* BP */
557 tmp = max(width_in_mb, height_in_mb);
558 min_dpb_size += ALIGN(tmp * 7 * 16, 64);
559 break;
560
561 case 3: /* MPEG2 */
562
563 /* reference picture buffer */
564 min_dpb_size = image_size * 3;
565 break;
566
567 case 4: /* MPEG4 */
568
569 /* reference picture buffer */
570 min_dpb_size = image_size * 3;
571
572 /* CM */
573 min_dpb_size += width_in_mb * height_in_mb * 64;
574
575 /* IT surface buffer */
576 min_dpb_size += ALIGN(width_in_mb * height_in_mb * 32, 64);
577 break;
578
Sonny Jiang8e008dd2016-05-11 13:29:48 -0400579 case 7: /* H264 Perf */
580 switch(level) {
581 case 30:
582 num_dpb_buffer = 8100 / fs_in_mb;
583 break;
584 case 31:
585 num_dpb_buffer = 18000 / fs_in_mb;
586 break;
587 case 32:
588 num_dpb_buffer = 20480 / fs_in_mb;
589 break;
590 case 41:
591 num_dpb_buffer = 32768 / fs_in_mb;
592 break;
593 case 42:
594 num_dpb_buffer = 34816 / fs_in_mb;
595 break;
596 case 50:
597 num_dpb_buffer = 110400 / fs_in_mb;
598 break;
599 case 51:
600 num_dpb_buffer = 184320 / fs_in_mb;
601 break;
602 default:
603 num_dpb_buffer = 184320 / fs_in_mb;
604 break;
605 }
606 num_dpb_buffer++;
607 if (num_dpb_buffer > 17)
608 num_dpb_buffer = 17;
609
610 /* reference picture buffer */
611 min_dpb_size = image_size * num_dpb_buffer;
612
Christian König4cb5877c2016-07-26 12:05:40 +0200613 if (!adev->uvd.use_ctx_buf){
Sonny Jiang8e008dd2016-05-11 13:29:48 -0400614 /* macroblock context buffer */
615 min_dpb_size +=
616 width_in_mb * height_in_mb * num_dpb_buffer * 192;
617
618 /* IT surface buffer */
619 min_dpb_size += width_in_mb * height_in_mb * 32;
620 } else {
621 /* macroblock context buffer */
622 min_ctx_size =
623 width_in_mb * height_in_mb * num_dpb_buffer * 192;
624 }
625 break;
626
Leo Liud0b83d42017-08-15 10:57:34 -0400627 case 8: /* MJPEG */
628 min_dpb_size = 0;
629 break;
630
Christian König86fa0bd2015-05-05 16:36:01 +0200631 case 16: /* H265 */
632 image_size = (ALIGN(width, 16) * ALIGN(height, 16) * 3) / 2;
633 image_size = ALIGN(image_size, 256);
634
635 num_dpb_buffer = (le32_to_cpu(msg[59]) & 0xff) + 2;
636 min_dpb_size = image_size * num_dpb_buffer;
Boyuan Zhang8c8bac52015-08-05 14:03:48 -0400637 min_ctx_size = ((width + 255) / 16) * ((height + 255) / 16)
638 * 16 * num_dpb_buffer + 52 * 1024;
Christian König86fa0bd2015-05-05 16:36:01 +0200639 break;
640
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400641 default:
642 DRM_ERROR("UVD codec not handled %d!\n", stream_type);
643 return -EINVAL;
644 }
645
646 if (width > pitch) {
647 DRM_ERROR("Invalid UVD decoding target pitch!\n");
648 return -EINVAL;
649 }
650
651 if (dpb_size < min_dpb_size) {
652 DRM_ERROR("Invalid dpb_size in UVD message (%d / %d)!\n",
653 dpb_size, min_dpb_size);
654 return -EINVAL;
655 }
656
657 buf_sizes[0x1] = dpb_size;
658 buf_sizes[0x2] = image_size;
Boyuan Zhang8c8bac52015-08-05 14:03:48 -0400659 buf_sizes[0x4] = min_ctx_size;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400660 return 0;
661}
662
663/**
664 * amdgpu_uvd_cs_msg - handle UVD message
665 *
666 * @ctx: UVD parser context
667 * @bo: buffer object containing the message
668 * @offset: offset into the buffer object
669 *
670 * Peek into the UVD message and extract the session id.
671 * Make sure that we don't open up to many sessions.
672 */
673static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
674 struct amdgpu_bo *bo, unsigned offset)
675{
676 struct amdgpu_device *adev = ctx->parser->adev;
677 int32_t *msg, msg_type, handle;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400678 void *ptr;
Christian König4127a592015-08-11 16:35:54 +0200679 long r;
680 int i;
James Zhu10dd74ea2018-05-15 14:31:24 -0500681 uint32_t ip_instance = ctx->parser->job->ring->me;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400682
683 if (offset & 0x3F) {
James Zhu10dd74ea2018-05-15 14:31:24 -0500684 DRM_ERROR("UVD(%d) messages must be 64 byte aligned!\n", ip_instance);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400685 return -EINVAL;
686 }
687
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400688 r = amdgpu_bo_kmap(bo, &ptr);
689 if (r) {
James Zhu10dd74ea2018-05-15 14:31:24 -0500690 DRM_ERROR("Failed mapping the UVD(%d) message (%ld)!\n", ip_instance, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400691 return r;
692 }
693
694 msg = ptr + offset;
695
696 msg_type = msg[1];
697 handle = msg[2];
698
699 if (handle == 0) {
James Zhu10dd74ea2018-05-15 14:31:24 -0500700 DRM_ERROR("Invalid UVD(%d) handle!\n", ip_instance);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400701 return -EINVAL;
702 }
703
Leo Liu51464192015-09-15 10:38:38 -0400704 switch (msg_type) {
705 case 0:
706 /* it's a create msg, calc image size (width * height) */
707 amdgpu_bo_kunmap(bo);
708
709 /* try to alloc a new handle */
Arindam Nathc0365542016-04-12 13:46:15 +0200710 for (i = 0; i < adev->uvd.max_handles; ++i) {
James Zhu10dd74ea2018-05-15 14:31:24 -0500711 if (atomic_read(&adev->uvd.inst[ip_instance].handles[i]) == handle) {
712 DRM_ERROR("(%d)Handle 0x%x already in use!\n", ip_instance, handle);
Leo Liu51464192015-09-15 10:38:38 -0400713 return -EINVAL;
714 }
715
James Zhu10dd74ea2018-05-15 14:31:24 -0500716 if (!atomic_cmpxchg(&adev->uvd.inst[ip_instance].handles[i], 0, handle)) {
717 adev->uvd.inst[ip_instance].filp[i] = ctx->parser->filp;
Leo Liu51464192015-09-15 10:38:38 -0400718 return 0;
719 }
720 }
721
James Zhu10dd74ea2018-05-15 14:31:24 -0500722 DRM_ERROR("No more free UVD(%d) handles!\n", ip_instance);
Christian König7129d3a2016-07-13 21:24:59 +0200723 return -ENOSPC;
Leo Liu51464192015-09-15 10:38:38 -0400724
725 case 1:
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400726 /* it's a decode msg, calc buffer sizes */
Sonny Jiang8e008dd2016-05-11 13:29:48 -0400727 r = amdgpu_uvd_cs_msg_decode(adev, msg, ctx->buf_sizes);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400728 amdgpu_bo_kunmap(bo);
729 if (r)
730 return r;
731
Leo Liu51464192015-09-15 10:38:38 -0400732 /* validate the handle */
Arindam Nathc0365542016-04-12 13:46:15 +0200733 for (i = 0; i < adev->uvd.max_handles; ++i) {
James Zhu10dd74ea2018-05-15 14:31:24 -0500734 if (atomic_read(&adev->uvd.inst[ip_instance].handles[i]) == handle) {
735 if (adev->uvd.inst[ip_instance].filp[i] != ctx->parser->filp) {
736 DRM_ERROR("UVD(%d) handle collision detected!\n", ip_instance);
Leo Liu51464192015-09-15 10:38:38 -0400737 return -EINVAL;
738 }
739 return 0;
740 }
741 }
742
James Zhu10dd74ea2018-05-15 14:31:24 -0500743 DRM_ERROR("Invalid UVD(%d) handle 0x%x!\n", ip_instance, handle);
Leo Liu51464192015-09-15 10:38:38 -0400744 return -ENOENT;
745
746 case 2:
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400747 /* it's a destroy msg, free the handle */
Arindam Nathc0365542016-04-12 13:46:15 +0200748 for (i = 0; i < adev->uvd.max_handles; ++i)
James Zhu10dd74ea2018-05-15 14:31:24 -0500749 atomic_cmpxchg(&adev->uvd.inst[ip_instance].handles[i], handle, 0);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400750 amdgpu_bo_kunmap(bo);
751 return 0;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400752
Leo Liu51464192015-09-15 10:38:38 -0400753 default:
James Zhu10dd74ea2018-05-15 14:31:24 -0500754 DRM_ERROR("Illegal UVD(%d) message type (%d)!\n", ip_instance, msg_type);
Leo Liu51464192015-09-15 10:38:38 -0400755 return -EINVAL;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400756 }
Leo Liu51464192015-09-15 10:38:38 -0400757 BUG();
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400758 return -EINVAL;
759}
760
761/**
762 * amdgpu_uvd_cs_pass2 - second parsing round
763 *
764 * @ctx: UVD parser context
765 *
766 * Patch buffer addresses, make sure buffer sizes are correct.
767 */
768static int amdgpu_uvd_cs_pass2(struct amdgpu_uvd_cs_ctx *ctx)
769{
770 struct amdgpu_bo_va_mapping *mapping;
771 struct amdgpu_bo *bo;
Alex Deucher80983e42016-11-21 16:24:37 -0500772 uint32_t cmd;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400773 uint64_t start, end;
Alex Deucher80983e42016-11-21 16:24:37 -0500774 uint64_t addr = amdgpu_uvd_get_addr_from_ctx(ctx);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400775 int r;
776
Christian König9cca0b82017-09-06 16:15:28 +0200777 r = amdgpu_cs_find_mapping(ctx->parser, addr, &bo, &mapping);
778 if (r) {
Alex Deucher042eb912016-11-21 16:34:29 -0500779 DRM_ERROR("Can't find BO for addr 0x%08Lx\n", addr);
Christian König9cca0b82017-09-06 16:15:28 +0200780 return r;
Alex Deucher042eb912016-11-21 16:34:29 -0500781 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400782
783 start = amdgpu_bo_gpu_offset(bo);
784
Christian Königa9f87f62017-03-30 14:03:59 +0200785 end = (mapping->last + 1 - mapping->start);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400786 end = end * AMDGPU_GPU_PAGE_SIZE + start;
787
Christian Königa9f87f62017-03-30 14:03:59 +0200788 addr -= mapping->start * AMDGPU_GPU_PAGE_SIZE;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400789 start += addr;
790
Christian König7270f832016-01-31 11:00:41 +0100791 amdgpu_set_ib_value(ctx->parser, ctx->ib_idx, ctx->data0,
792 lower_32_bits(start));
793 amdgpu_set_ib_value(ctx->parser, ctx->ib_idx, ctx->data1,
794 upper_32_bits(start));
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400795
796 cmd = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->idx) >> 1;
797 if (cmd < 0x4) {
798 if ((end - start) < ctx->buf_sizes[cmd]) {
799 DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd,
800 (unsigned)(end - start),
801 ctx->buf_sizes[cmd]);
802 return -EINVAL;
803 }
804
Boyuan Zhang8c8bac52015-08-05 14:03:48 -0400805 } else if (cmd == 0x206) {
806 if ((end - start) < ctx->buf_sizes[4]) {
807 DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd,
808 (unsigned)(end - start),
809 ctx->buf_sizes[4]);
810 return -EINVAL;
811 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400812 } else if ((cmd != 0x100) && (cmd != 0x204)) {
813 DRM_ERROR("invalid UVD command %X!\n", cmd);
814 return -EINVAL;
815 }
816
817 if (!ctx->parser->adev->uvd.address_64_bit) {
818 if ((start >> 28) != ((end - 1) >> 28)) {
819 DRM_ERROR("reloc %LX-%LX crossing 256MB boundary!\n",
820 start, end);
821 return -EINVAL;
822 }
823
824 if ((cmd == 0 || cmd == 0x3) &&
James Zhu2bb795f2018-05-15 14:25:46 -0500825 (start >> 28) != (ctx->parser->adev->uvd.inst->gpu_addr >> 28)) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400826 DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n",
827 start, end);
828 return -EINVAL;
829 }
830 }
831
832 if (cmd == 0) {
833 ctx->has_msg_cmd = true;
834 r = amdgpu_uvd_cs_msg(ctx, bo, addr);
835 if (r)
836 return r;
837 } else if (!ctx->has_msg_cmd) {
838 DRM_ERROR("Message needed before other commands are send!\n");
839 return -EINVAL;
840 }
841
842 return 0;
843}
844
845/**
846 * amdgpu_uvd_cs_reg - parse register writes
847 *
848 * @ctx: UVD parser context
849 * @cb: callback function
850 *
851 * Parse the register writes, call cb on each complete command.
852 */
853static int amdgpu_uvd_cs_reg(struct amdgpu_uvd_cs_ctx *ctx,
854 int (*cb)(struct amdgpu_uvd_cs_ctx *ctx))
855{
Christian König50838c82016-02-03 13:44:52 +0100856 struct amdgpu_ib *ib = &ctx->parser->job->ibs[ctx->ib_idx];
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400857 int i, r;
858
859 ctx->idx++;
860 for (i = 0; i <= ctx->count; ++i) {
861 unsigned reg = ctx->reg + i;
862
863 if (ctx->idx >= ib->length_dw) {
864 DRM_ERROR("Register command after end of CS!\n");
865 return -EINVAL;
866 }
867
868 switch (reg) {
869 case mmUVD_GPCOM_VCPU_DATA0:
870 ctx->data0 = ctx->idx;
871 break;
872 case mmUVD_GPCOM_VCPU_DATA1:
873 ctx->data1 = ctx->idx;
874 break;
875 case mmUVD_GPCOM_VCPU_CMD:
876 r = cb(ctx);
877 if (r)
878 return r;
879 break;
880 case mmUVD_ENGINE_CNTL:
Alex Deucher8dd31d72016-08-22 17:58:14 -0400881 case mmUVD_NO_OP:
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400882 break;
883 default:
884 DRM_ERROR("Invalid reg 0x%X!\n", reg);
885 return -EINVAL;
886 }
887 ctx->idx++;
888 }
889 return 0;
890}
891
892/**
893 * amdgpu_uvd_cs_packets - parse UVD packets
894 *
895 * @ctx: UVD parser context
896 * @cb: callback function
897 *
898 * Parse the command stream packets.
899 */
900static int amdgpu_uvd_cs_packets(struct amdgpu_uvd_cs_ctx *ctx,
901 int (*cb)(struct amdgpu_uvd_cs_ctx *ctx))
902{
Christian König50838c82016-02-03 13:44:52 +0100903 struct amdgpu_ib *ib = &ctx->parser->job->ibs[ctx->ib_idx];
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400904 int r;
905
906 for (ctx->idx = 0 ; ctx->idx < ib->length_dw; ) {
907 uint32_t cmd = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->idx);
908 unsigned type = CP_PACKET_GET_TYPE(cmd);
909 switch (type) {
910 case PACKET_TYPE0:
911 ctx->reg = CP_PACKET0_GET_REG(cmd);
912 ctx->count = CP_PACKET_GET_COUNT(cmd);
913 r = amdgpu_uvd_cs_reg(ctx, cb);
914 if (r)
915 return r;
916 break;
917 case PACKET_TYPE2:
918 ++ctx->idx;
919 break;
920 default:
921 DRM_ERROR("Unknown packet type %d !\n", type);
922 return -EINVAL;
923 }
924 }
925 return 0;
926}
927
928/**
929 * amdgpu_uvd_ring_parse_cs - UVD command submission parser
930 *
931 * @parser: Command submission parser context
932 *
933 * Parse the command stream, patch in addresses as necessary.
934 */
935int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
936{
937 struct amdgpu_uvd_cs_ctx ctx = {};
938 unsigned buf_sizes[] = {
939 [0x00000000] = 2048,
Boyuan Zhang8c8bac52015-08-05 14:03:48 -0400940 [0x00000001] = 0xFFFFFFFF,
941 [0x00000002] = 0xFFFFFFFF,
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400942 [0x00000003] = 2048,
Boyuan Zhang8c8bac52015-08-05 14:03:48 -0400943 [0x00000004] = 0xFFFFFFFF,
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400944 };
Christian König50838c82016-02-03 13:44:52 +0100945 struct amdgpu_ib *ib = &parser->job->ibs[ib_idx];
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400946 int r;
947
Christian König45088ef2016-10-05 16:49:19 +0200948 parser->job->vm = NULL;
949 ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
950
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400951 if (ib->length_dw % 16) {
952 DRM_ERROR("UVD IB length (%d) not 16 dwords aligned!\n",
953 ib->length_dw);
954 return -EINVAL;
955 }
956
957 ctx.parser = parser;
958 ctx.buf_sizes = buf_sizes;
959 ctx.ib_idx = ib_idx;
960
Alex Deucher042eb912016-11-21 16:34:29 -0500961 /* first round only required on chips without UVD 64 bit address support */
962 if (!parser->adev->uvd.address_64_bit) {
963 /* first round, make sure the buffers are actually in the UVD segment */
964 r = amdgpu_uvd_cs_packets(&ctx, amdgpu_uvd_cs_pass1);
965 if (r)
966 return r;
967 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400968
969 /* second round, patch buffer addresses into the command stream */
970 r = amdgpu_uvd_cs_packets(&ctx, amdgpu_uvd_cs_pass2);
971 if (r)
972 return r;
973
974 if (!ctx.has_msg_cmd) {
975 DRM_ERROR("UVD-IBs need a msg command!\n");
976 return -EINVAL;
977 }
978
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400979 return 0;
980}
981
Christian Königd7af97d2016-02-03 16:01:06 +0100982static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
Chris Wilsonf54d1862016-10-25 13:00:45 +0100983 bool direct, struct dma_fence **fence)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400984{
Christian König4ab91cf2018-02-07 20:48:21 +0100985 struct amdgpu_device *adev = ring->adev;
986 struct dma_fence *f = NULL;
Christian Königd71518b2016-02-01 12:20:25 +0100987 struct amdgpu_job *job;
988 struct amdgpu_ib *ib;
Leo Liu09bfb892017-03-03 18:13:26 -0500989 uint32_t data[4];
Christian König4ab91cf2018-02-07 20:48:21 +0100990 uint64_t addr;
991 long r;
992 int i;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400993
Christian König4ab91cf2018-02-07 20:48:21 +0100994 amdgpu_bo_kunmap(bo);
995 amdgpu_bo_unpin(bo);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400996
Christian Königa7d64de2016-09-15 14:58:48 +0200997 if (!ring->adev->uvd.address_64_bit) {
Christian König4ab91cf2018-02-07 20:48:21 +0100998 struct ttm_operation_ctx ctx = { true, false };
999
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001000 amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
1001 amdgpu_uvd_force_into_uvd_segment(bo);
Christian König4ab91cf2018-02-07 20:48:21 +01001002 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1003 if (r)
1004 goto err;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001005 }
1006
Christian Königd71518b2016-02-01 12:20:25 +01001007 r = amdgpu_job_alloc_with_ib(adev, 64, &job);
1008 if (r)
1009 goto err;
1010
Leo Liu09bfb892017-03-03 18:13:26 -05001011 if (adev->asic_type >= CHIP_VEGA10) {
1012 data[0] = PACKET0(mmUVD_GPCOM_VCPU_DATA0_VEGA10, 0);
1013 data[1] = PACKET0(mmUVD_GPCOM_VCPU_DATA1_VEGA10, 0);
1014 data[2] = PACKET0(mmUVD_GPCOM_VCPU_CMD_VEGA10, 0);
1015 data[3] = PACKET0(mmUVD_NO_OP_VEGA10, 0);
1016 } else {
1017 data[0] = PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0);
1018 data[1] = PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0);
1019 data[2] = PACKET0(mmUVD_GPCOM_VCPU_CMD, 0);
1020 data[3] = PACKET0(mmUVD_NO_OP, 0);
1021 }
1022
Christian Königd71518b2016-02-01 12:20:25 +01001023 ib = &job->ibs[0];
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001024 addr = amdgpu_bo_gpu_offset(bo);
Leo Liu09bfb892017-03-03 18:13:26 -05001025 ib->ptr[0] = data[0];
Chunming Zhou7b5ec432015-07-03 14:08:18 +08001026 ib->ptr[1] = addr;
Leo Liu09bfb892017-03-03 18:13:26 -05001027 ib->ptr[2] = data[1];
Chunming Zhou7b5ec432015-07-03 14:08:18 +08001028 ib->ptr[3] = addr >> 32;
Leo Liu09bfb892017-03-03 18:13:26 -05001029 ib->ptr[4] = data[2];
Chunming Zhou7b5ec432015-07-03 14:08:18 +08001030 ib->ptr[5] = 0;
Alex Deucherc8b4f282016-08-23 09:12:21 -04001031 for (i = 6; i < 16; i += 2) {
Leo Liu09bfb892017-03-03 18:13:26 -05001032 ib->ptr[i] = data[3];
Alex Deucherc8b4f282016-08-23 09:12:21 -04001033 ib->ptr[i+1] = 0;
1034 }
Chunming Zhou7b5ec432015-07-03 14:08:18 +08001035 ib->length_dw = 16;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001036
Christian Königd7af97d2016-02-03 16:01:06 +01001037 if (direct) {
Christian König4ab91cf2018-02-07 20:48:21 +01001038 r = reservation_object_wait_timeout_rcu(bo->tbo.resv,
1039 true, false,
1040 msecs_to_jiffies(10));
1041 if (r == 0)
1042 r = -ETIMEDOUT;
1043 if (r < 0)
1044 goto err_free;
1045
Junwei Zhang50ddc752017-01-23 16:30:38 +08001046 r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
Chris Wilsonf54d1862016-10-25 13:00:45 +01001047 job->fence = dma_fence_get(f);
Christian Königd7af97d2016-02-03 16:01:06 +01001048 if (r)
1049 goto err_free;
1050
1051 amdgpu_job_free(job);
1052 } else {
Christian König4ab91cf2018-02-07 20:48:21 +01001053 r = amdgpu_sync_resv(adev, &job->sync, bo->tbo.resv,
1054 AMDGPU_FENCE_OWNER_UNDEFINED, false);
1055 if (r)
1056 goto err_free;
1057
James Zhu10dd74ea2018-05-15 14:31:24 -05001058 r = amdgpu_job_submit(job, ring, &adev->uvd.inst[ring->me].entity,
Christian Königd7af97d2016-02-03 16:01:06 +01001059 AMDGPU_FENCE_OWNER_UNDEFINED, &f);
1060 if (r)
1061 goto err_free;
1062 }
Chunming Zhou7b5ec432015-07-03 14:08:18 +08001063
Christian König4ab91cf2018-02-07 20:48:21 +01001064 amdgpu_bo_fence(bo, f, false);
1065 amdgpu_bo_unreserve(bo);
1066 amdgpu_bo_unref(&bo);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001067
1068 if (fence)
Chris Wilsonf54d1862016-10-25 13:00:45 +01001069 *fence = dma_fence_get(f);
Chris Wilsonf54d1862016-10-25 13:00:45 +01001070 dma_fence_put(f);
Chunming Zhou7b5ec432015-07-03 14:08:18 +08001071
Chunming Zhou7b5ec432015-07-03 14:08:18 +08001072 return 0;
Christian Königd71518b2016-02-01 12:20:25 +01001073
1074err_free:
1075 amdgpu_job_free(job);
1076
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001077err:
Christian König4ab91cf2018-02-07 20:48:21 +01001078 amdgpu_bo_unreserve(bo);
1079 amdgpu_bo_unref(&bo);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001080 return r;
1081}
1082
1083/* multiple fence commands without any stream commands in between can
1084 crash the vcpu so just try to emmit a dummy create/destroy msg to
1085 avoid this */
1086int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
Chris Wilsonf54d1862016-10-25 13:00:45 +01001087 struct dma_fence **fence)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001088{
1089 struct amdgpu_device *adev = ring->adev;
Christian König4ab91cf2018-02-07 20:48:21 +01001090 struct amdgpu_bo *bo = NULL;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001091 uint32_t *msg;
1092 int r, i;
1093
Christian König4ab91cf2018-02-07 20:48:21 +01001094 r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
1095 AMDGPU_GEM_DOMAIN_VRAM,
1096 &bo, NULL, (void **)&msg);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001097 if (r)
1098 return r;
1099
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001100 /* stitch together an UVD create msg */
1101 msg[0] = cpu_to_le32(0x00000de4);
1102 msg[1] = cpu_to_le32(0x00000000);
1103 msg[2] = cpu_to_le32(handle);
1104 msg[3] = cpu_to_le32(0x00000000);
1105 msg[4] = cpu_to_le32(0x00000000);
1106 msg[5] = cpu_to_le32(0x00000000);
1107 msg[6] = cpu_to_le32(0x00000000);
1108 msg[7] = cpu_to_le32(0x00000780);
1109 msg[8] = cpu_to_le32(0x00000440);
1110 msg[9] = cpu_to_le32(0x00000000);
1111 msg[10] = cpu_to_le32(0x01b37000);
1112 for (i = 11; i < 1024; ++i)
1113 msg[i] = cpu_to_le32(0x0);
1114
Christian Königd7af97d2016-02-03 16:01:06 +01001115 return amdgpu_uvd_send_msg(ring, bo, true, fence);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001116}
1117
1118int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
Chris Wilsonf54d1862016-10-25 13:00:45 +01001119 bool direct, struct dma_fence **fence)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001120{
1121 struct amdgpu_device *adev = ring->adev;
Christian König4ab91cf2018-02-07 20:48:21 +01001122 struct amdgpu_bo *bo = NULL;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001123 uint32_t *msg;
1124 int r, i;
1125
Christian König4ab91cf2018-02-07 20:48:21 +01001126 r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
1127 AMDGPU_GEM_DOMAIN_VRAM,
1128 &bo, NULL, (void **)&msg);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001129 if (r)
1130 return r;
1131
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001132 /* stitch together an UVD destroy msg */
1133 msg[0] = cpu_to_le32(0x00000de4);
1134 msg[1] = cpu_to_le32(0x00000002);
1135 msg[2] = cpu_to_le32(handle);
1136 msg[3] = cpu_to_le32(0x00000000);
1137 for (i = 4; i < 1024; ++i)
1138 msg[i] = cpu_to_le32(0x0);
1139
Christian Königd7af97d2016-02-03 16:01:06 +01001140 return amdgpu_uvd_send_msg(ring, bo, direct, fence);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001141}
1142
1143static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
1144{
1145 struct amdgpu_device *adev =
James Zhu2bb795f2018-05-15 14:25:46 -05001146 container_of(work, struct amdgpu_device, uvd.inst->idle_work.work);
1147 unsigned fences = amdgpu_fence_count_emitted(&adev->uvd.inst->ring);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001148
Leo Liu713c0022016-08-03 09:25:59 -04001149 if (fences == 0) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001150 if (adev->pm.dpm_enabled) {
1151 amdgpu_dpm_enable_uvd(adev, false);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001152 } else {
1153 amdgpu_asic_set_uvd_clocks(adev, 0, 0);
Rex Zhue38ca2b2017-01-20 12:06:05 +08001154 /* shutdown the UVD block */
Alex Deucher2990a1f2017-12-15 16:18:00 -05001155 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
1156 AMD_PG_STATE_GATE);
1157 amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
1158 AMD_CG_STATE_GATE);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001159 }
1160 } else {
James Zhu2bb795f2018-05-15 14:25:46 -05001161 schedule_delayed_work(&adev->uvd.inst->idle_work, UVD_IDLE_TIMEOUT);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001162 }
1163}
1164
Christian Königc4120d52016-07-20 14:11:26 +02001165void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001166{
Christian Königc4120d52016-07-20 14:11:26 +02001167 struct amdgpu_device *adev = ring->adev;
Monk Liu14a80322018-01-19 20:29:17 +08001168 bool set_clocks;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001169
Xiangliang Yud9af2252017-03-07 14:45:25 +08001170 if (amdgpu_sriov_vf(adev))
1171 return;
1172
James Zhu2bb795f2018-05-15 14:25:46 -05001173 set_clocks = !cancel_delayed_work_sync(&adev->uvd.inst->idle_work);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001174 if (set_clocks) {
1175 if (adev->pm.dpm_enabled) {
1176 amdgpu_dpm_enable_uvd(adev, true);
1177 } else {
1178 amdgpu_asic_set_uvd_clocks(adev, 53300, 40000);
Alex Deucher2990a1f2017-12-15 16:18:00 -05001179 amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
1180 AMD_CG_STATE_UNGATE);
1181 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
1182 AMD_PG_STATE_UNGATE);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001183 }
1184 }
1185}
Christian Königc4120d52016-07-20 14:11:26 +02001186
1187void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring)
1188{
Monk Liu14a80322018-01-19 20:29:17 +08001189 if (!amdgpu_sriov_vf(ring->adev))
James Zhu2bb795f2018-05-15 14:25:46 -05001190 schedule_delayed_work(&ring->adev->uvd.inst->idle_work, UVD_IDLE_TIMEOUT);
Christian Königc4120d52016-07-20 14:11:26 +02001191}
Christian König8de190c2016-07-05 16:47:54 +02001192
1193/**
1194 * amdgpu_uvd_ring_test_ib - test ib execution
1195 *
1196 * @ring: amdgpu_ring pointer
1197 *
1198 * Test if we can successfully execute an IB
1199 */
Christian Königbbec97a2016-07-05 21:07:17 +02001200int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout)
Christian König8de190c2016-07-05 16:47:54 +02001201{
Chris Wilsonf54d1862016-10-25 13:00:45 +01001202 struct dma_fence *fence;
Christian Königbbec97a2016-07-05 21:07:17 +02001203 long r;
James Zhu10dd74ea2018-05-15 14:31:24 -05001204 uint32_t ip_instance = ring->me;
Christian König8de190c2016-07-05 16:47:54 +02001205
1206 r = amdgpu_uvd_get_create_msg(ring, 1, NULL);
1207 if (r) {
James Zhu10dd74ea2018-05-15 14:31:24 -05001208 DRM_ERROR("amdgpu: (%d)failed to get create msg (%ld).\n", ip_instance, r);
Christian König8de190c2016-07-05 16:47:54 +02001209 goto error;
1210 }
1211
1212 r = amdgpu_uvd_get_destroy_msg(ring, 1, true, &fence);
1213 if (r) {
James Zhu10dd74ea2018-05-15 14:31:24 -05001214 DRM_ERROR("amdgpu: (%d)failed to get destroy ib (%ld).\n", ip_instance, r);
Christian König8de190c2016-07-05 16:47:54 +02001215 goto error;
1216 }
1217
Chris Wilsonf54d1862016-10-25 13:00:45 +01001218 r = dma_fence_wait_timeout(fence, false, timeout);
Christian Königbbec97a2016-07-05 21:07:17 +02001219 if (r == 0) {
James Zhu10dd74ea2018-05-15 14:31:24 -05001220 DRM_ERROR("amdgpu: (%d)IB test timed out.\n", ip_instance);
Christian Königbbec97a2016-07-05 21:07:17 +02001221 r = -ETIMEDOUT;
1222 } else if (r < 0) {
James Zhu10dd74ea2018-05-15 14:31:24 -05001223 DRM_ERROR("amdgpu: (%d)fence wait failed (%ld).\n", ip_instance, r);
Christian Königbbec97a2016-07-05 21:07:17 +02001224 } else {
James Zhu10dd74ea2018-05-15 14:31:24 -05001225 DRM_DEBUG("ib test on (%d)ring %d succeeded\n", ip_instance, ring->idx);
Christian Königbbec97a2016-07-05 21:07:17 +02001226 r = 0;
Christian König8de190c2016-07-05 16:47:54 +02001227 }
Christian Königbbec97a2016-07-05 21:07:17 +02001228
Chris Wilsonf54d1862016-10-25 13:00:45 +01001229 dma_fence_put(fence);
Jay Cornwallc2a4c5b2016-08-03 13:39:42 -05001230
1231error:
Christian König8de190c2016-07-05 16:47:54 +02001232 return r;
1233}
Arindam Nath44879b62016-12-12 15:29:33 +05301234
1235/**
1236 * amdgpu_uvd_used_handles - returns used UVD handles
1237 *
1238 * @adev: amdgpu_device pointer
1239 *
1240 * Returns the number of UVD handles in use
1241 */
1242uint32_t amdgpu_uvd_used_handles(struct amdgpu_device *adev)
1243{
1244 unsigned i;
1245 uint32_t used_handles = 0;
1246
1247 for (i = 0; i < adev->uvd.max_handles; ++i) {
1248 /*
1249 * Handles can be freed in any order, and not
1250 * necessarily linear. So we need to count
1251 * all non-zero handles.
1252 */
James Zhu2bb795f2018-05-15 14:25:46 -05001253 if (atomic_read(&adev->uvd.inst->handles[i]))
Arindam Nath44879b62016-12-12 15:29:33 +05301254 used_handles++;
1255 }
1256
1257 return used_handles;
1258}