blob: bcf68f80bbf058b9cfb8f7a1239f82259f95774b [file] [log] [blame]
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001/*
2 * Copyright 2011 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 */
26/*
27 * Authors:
28 * Christian König <deathsimple@vodafone.de>
29 */
30
31#include <linux/firmware.h>
32#include <linux/module.h>
33#include <drm/drmP.h>
34#include <drm/drm.h>
35
36#include "amdgpu.h"
37#include "amdgpu_pm.h"
38#include "amdgpu_uvd.h"
39#include "cikd.h"
40#include "uvd/uvd_4_2_d.h"
41
42/* 1 second timeout */
Christian König08086632016-07-01 17:45:49 +020043#define UVD_IDLE_TIMEOUT msecs_to_jiffies(1000)
Christian König4cb5877c2016-07-26 12:05:40 +020044
45/* Firmware versions for VI */
46#define FW_1_65_10 ((1 << 24) | (65 << 16) | (10 << 8))
47#define FW_1_87_11 ((1 << 24) | (87 << 16) | (11 << 8))
48#define FW_1_87_12 ((1 << 24) | (87 << 16) | (12 << 8))
49#define FW_1_37_15 ((1 << 24) | (37 << 16) | (15 << 8))
50
Sonny Jiang8e008dd2016-05-11 13:29:48 -040051/* Polaris10/11 firmware version */
Christian König4cb5877c2016-07-26 12:05:40 +020052#define FW_1_66_16 ((1 << 24) | (66 << 16) | (16 << 8))
Alex Deucherd38ceaf2015-04-20 16:55:21 -040053
54/* Firmware Names */
55#ifdef CONFIG_DRM_AMDGPU_CIK
56#define FIRMWARE_BONAIRE "radeon/bonaire_uvd.bin"
Christian Königedf600d2016-05-03 15:54:54 +020057#define FIRMWARE_KABINI "radeon/kabini_uvd.bin"
58#define FIRMWARE_KAVERI "radeon/kaveri_uvd.bin"
59#define FIRMWARE_HAWAII "radeon/hawaii_uvd.bin"
Alex Deucherd38ceaf2015-04-20 16:55:21 -040060#define FIRMWARE_MULLINS "radeon/mullins_uvd.bin"
61#endif
Jammy Zhouc65444f2015-05-13 22:49:04 +080062#define FIRMWARE_TONGA "amdgpu/tonga_uvd.bin"
63#define FIRMWARE_CARRIZO "amdgpu/carrizo_uvd.bin"
David Zhang974ee3d2015-07-08 17:32:15 +080064#define FIRMWARE_FIJI "amdgpu/fiji_uvd.bin"
Samuel Lia39c8ce2015-10-08 16:27:21 -040065#define FIRMWARE_STONEY "amdgpu/stoney_uvd.bin"
Flora Cui2cc0c0b2016-03-14 18:33:29 -040066#define FIRMWARE_POLARIS10 "amdgpu/polaris10_uvd.bin"
Rex Zhu925a51c2016-03-23 14:48:03 +080067#define FIRMWARE_POLARIS11 "amdgpu/polaris11_uvd.bin"
Junwei Zhangc4642a42016-12-14 15:32:28 -050068#define FIRMWARE_POLARIS12 "amdgpu/polaris12_uvd.bin"
Leo Liuba8f7ad2017-11-10 12:27:40 -050069#define FIRMWARE_VEGAM "amdgpu/vegam_uvd.bin"
Alex Deucherd38ceaf2015-04-20 16:55:21 -040070
Leo Liu09bfb892017-03-03 18:13:26 -050071#define FIRMWARE_VEGA10 "amdgpu/vega10_uvd.bin"
Alex Deucher2327e622017-09-01 16:35:30 -040072#define FIRMWARE_VEGA12 "amdgpu/vega12_uvd.bin"
Feifei Xucac18c82018-05-11 13:44:09 -050073#define FIRMWARE_VEGA20 "amdgpu/vega20_uvd.bin"
Leo Liu09bfb892017-03-03 18:13:26 -050074
James Zhu9181dba2018-05-11 13:56:44 -050075/* These are common relative offsets for all asics, from uvd_7_0_offset.h, */
76#define UVD_GPCOM_VCPU_CMD 0x03c3
77#define UVD_GPCOM_VCPU_DATA0 0x03c4
78#define UVD_GPCOM_VCPU_DATA1 0x03c5
79#define UVD_NO_OP 0x03ff
80#define UVD_BASE_SI 0x3800
Leo Liu09bfb892017-03-03 18:13:26 -050081
Alex Deucherd38ceaf2015-04-20 16:55:21 -040082/**
83 * amdgpu_uvd_cs_ctx - Command submission parser context
84 *
85 * Used for emulating virtual memory support on UVD 4.2.
86 */
87struct amdgpu_uvd_cs_ctx {
88 struct amdgpu_cs_parser *parser;
89 unsigned reg, count;
90 unsigned data0, data1;
91 unsigned idx;
92 unsigned ib_idx;
93
94 /* does the IB has a msg command */
95 bool has_msg_cmd;
96
97 /* minimum buffer sizes */
98 unsigned *buf_sizes;
99};
100
101#ifdef CONFIG_DRM_AMDGPU_CIK
102MODULE_FIRMWARE(FIRMWARE_BONAIRE);
103MODULE_FIRMWARE(FIRMWARE_KABINI);
104MODULE_FIRMWARE(FIRMWARE_KAVERI);
105MODULE_FIRMWARE(FIRMWARE_HAWAII);
106MODULE_FIRMWARE(FIRMWARE_MULLINS);
107#endif
108MODULE_FIRMWARE(FIRMWARE_TONGA);
109MODULE_FIRMWARE(FIRMWARE_CARRIZO);
David Zhang974ee3d2015-07-08 17:32:15 +0800110MODULE_FIRMWARE(FIRMWARE_FIJI);
Samuel Lia39c8ce2015-10-08 16:27:21 -0400111MODULE_FIRMWARE(FIRMWARE_STONEY);
Flora Cui2cc0c0b2016-03-14 18:33:29 -0400112MODULE_FIRMWARE(FIRMWARE_POLARIS10);
113MODULE_FIRMWARE(FIRMWARE_POLARIS11);
Junwei Zhangc4642a42016-12-14 15:32:28 -0500114MODULE_FIRMWARE(FIRMWARE_POLARIS12);
Leo Liuba8f7ad2017-11-10 12:27:40 -0500115MODULE_FIRMWARE(FIRMWARE_VEGAM);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400116
Leo Liu09bfb892017-03-03 18:13:26 -0500117MODULE_FIRMWARE(FIRMWARE_VEGA10);
Alex Deucher2327e622017-09-01 16:35:30 -0400118MODULE_FIRMWARE(FIRMWARE_VEGA12);
Feifei Xucac18c82018-05-11 13:44:09 -0500119MODULE_FIRMWARE(FIRMWARE_VEGA20);
Leo Liu09bfb892017-03-03 18:13:26 -0500120
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400121static void amdgpu_uvd_idle_work_handler(struct work_struct *work);
122
123int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
124{
Christian Königead833e2016-02-10 14:35:19 +0100125 struct amdgpu_ring *ring;
Lucas Stach1b1f42d2017-12-06 17:49:39 +0100126 struct drm_sched_rq *rq;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400127 unsigned long bo_size;
128 const char *fw_name;
129 const struct common_firmware_header *hdr;
130 unsigned version_major, version_minor, family_id;
James Zhu10dd74ea2018-05-15 14:31:24 -0500131 int i, j, r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400132
James Zhu2bb795f2018-05-15 14:25:46 -0500133 INIT_DELAYED_WORK(&adev->uvd.inst->idle_work, amdgpu_uvd_idle_work_handler);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400134
135 switch (adev->asic_type) {
136#ifdef CONFIG_DRM_AMDGPU_CIK
137 case CHIP_BONAIRE:
138 fw_name = FIRMWARE_BONAIRE;
139 break;
140 case CHIP_KABINI:
141 fw_name = FIRMWARE_KABINI;
142 break;
143 case CHIP_KAVERI:
144 fw_name = FIRMWARE_KAVERI;
145 break;
146 case CHIP_HAWAII:
147 fw_name = FIRMWARE_HAWAII;
148 break;
149 case CHIP_MULLINS:
150 fw_name = FIRMWARE_MULLINS;
151 break;
152#endif
153 case CHIP_TONGA:
154 fw_name = FIRMWARE_TONGA;
155 break;
David Zhang974ee3d2015-07-08 17:32:15 +0800156 case CHIP_FIJI:
157 fw_name = FIRMWARE_FIJI;
158 break;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400159 case CHIP_CARRIZO:
160 fw_name = FIRMWARE_CARRIZO;
161 break;
Samuel Lia39c8ce2015-10-08 16:27:21 -0400162 case CHIP_STONEY:
163 fw_name = FIRMWARE_STONEY;
164 break;
Flora Cui2cc0c0b2016-03-14 18:33:29 -0400165 case CHIP_POLARIS10:
166 fw_name = FIRMWARE_POLARIS10;
Sonny Jiang38d75812015-11-05 15:17:18 -0500167 break;
Flora Cui2cc0c0b2016-03-14 18:33:29 -0400168 case CHIP_POLARIS11:
169 fw_name = FIRMWARE_POLARIS11;
Sonny Jiang38d75812015-11-05 15:17:18 -0500170 break;
Alex Deucher2327e622017-09-01 16:35:30 -0400171 case CHIP_POLARIS12:
172 fw_name = FIRMWARE_POLARIS12;
173 break;
Leo Liu09bfb892017-03-03 18:13:26 -0500174 case CHIP_VEGA10:
175 fw_name = FIRMWARE_VEGA10;
176 break;
Alex Deucher2327e622017-09-01 16:35:30 -0400177 case CHIP_VEGA12:
178 fw_name = FIRMWARE_VEGA12;
Junwei Zhangc4642a42016-12-14 15:32:28 -0500179 break;
Leo Liuba8f7ad2017-11-10 12:27:40 -0500180 case CHIP_VEGAM:
181 fw_name = FIRMWARE_VEGAM;
182 break;
Feifei Xucac18c82018-05-11 13:44:09 -0500183 case CHIP_VEGA20:
184 fw_name = FIRMWARE_VEGA20;
185 break;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400186 default:
187 return -EINVAL;
188 }
189
190 r = request_firmware(&adev->uvd.fw, fw_name, adev->dev);
191 if (r) {
192 dev_err(adev->dev, "amdgpu_uvd: Can't load firmware \"%s\"\n",
193 fw_name);
194 return r;
195 }
196
197 r = amdgpu_ucode_validate(adev->uvd.fw);
198 if (r) {
199 dev_err(adev->dev, "amdgpu_uvd: Can't validate firmware \"%s\"\n",
200 fw_name);
201 release_firmware(adev->uvd.fw);
202 adev->uvd.fw = NULL;
203 return r;
204 }
205
Arindam Nathc0365542016-04-12 13:46:15 +0200206 /* Set the default UVD handles that the firmware can handle */
207 adev->uvd.max_handles = AMDGPU_DEFAULT_UVD_HANDLES;
208
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400209 hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
210 family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
211 version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
212 version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
213 DRM_INFO("Found UVD firmware Version: %hu.%hu Family ID: %hu\n",
214 version_major, version_minor, family_id);
215
Arindam Nathc0365542016-04-12 13:46:15 +0200216 /*
217 * Limit the number of UVD handles depending on microcode major
218 * and minor versions. The firmware version which has 40 UVD
219 * instances support is 1.80. So all subsequent versions should
220 * also have the same support.
221 */
222 if ((version_major > 0x01) ||
223 ((version_major == 0x01) && (version_minor >= 0x50)))
224 adev->uvd.max_handles = AMDGPU_MAX_UVD_HANDLES;
225
Sonny Jiang562e2682016-04-18 16:05:04 -0400226 adev->uvd.fw_version = ((version_major << 24) | (version_minor << 16) |
227 (family_id << 8));
228
Sonny Jiang8e008dd2016-05-11 13:29:48 -0400229 if ((adev->asic_type == CHIP_POLARIS10 ||
230 adev->asic_type == CHIP_POLARIS11) &&
231 (adev->uvd.fw_version < FW_1_66_16))
232 DRM_ERROR("POLARIS10/11 UVD firmware version %hu.%hu is too old.\n",
233 version_major, version_minor);
234
Leo Liu09bfb892017-03-03 18:13:26 -0500235 bo_size = AMDGPU_UVD_STACK_SIZE + AMDGPU_UVD_HEAP_SIZE
Arindam Nathc0365542016-04-12 13:46:15 +0200236 + AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles;
Leo Liu09bfb892017-03-03 18:13:26 -0500237 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
238 bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
239
James Zhu10dd74ea2018-05-15 14:31:24 -0500240 for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400241
James Zhu10dd74ea2018-05-15 14:31:24 -0500242 r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
243 AMDGPU_GEM_DOMAIN_VRAM, &adev->uvd.inst[j].vcpu_bo,
244 &adev->uvd.inst[j].gpu_addr, &adev->uvd.inst[j].cpu_addr);
245 if (r) {
246 dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r);
247 return r;
248 }
Christian Königead833e2016-02-10 14:35:19 +0100249
James Zhu10dd74ea2018-05-15 14:31:24 -0500250 ring = &adev->uvd.inst[j].ring;
251 rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
252 r = drm_sched_entity_init(&ring->sched, &adev->uvd.inst[j].entity,
253 rq, NULL);
254 if (r != 0) {
255 DRM_ERROR("Failed setting up UVD(%d) run queue.\n", j);
256 return r;
257 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400258
James Zhu10dd74ea2018-05-15 14:31:24 -0500259 for (i = 0; i < adev->uvd.max_handles; ++i) {
260 atomic_set(&adev->uvd.inst[j].handles[i], 0);
261 adev->uvd.inst[j].filp[i] = NULL;
262 }
263 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400264 /* from uvd v5.0 HW addressing capacity increased to 64 bits */
Alex Deucher2990a1f2017-12-15 16:18:00 -0500265 if (!amdgpu_device_ip_block_version_cmp(adev, AMD_IP_BLOCK_TYPE_UVD, 5, 0))
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400266 adev->uvd.address_64_bit = true;
267
Christian König4cb5877c2016-07-26 12:05:40 +0200268 switch (adev->asic_type) {
269 case CHIP_TONGA:
270 adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_65_10;
271 break;
272 case CHIP_CARRIZO:
273 adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_87_11;
274 break;
275 case CHIP_FIJI:
276 adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_87_12;
277 break;
278 case CHIP_STONEY:
279 adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_37_15;
280 break;
281 default:
282 adev->uvd.use_ctx_buf = adev->asic_type >= CHIP_POLARIS10;
283 }
284
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400285 return 0;
286}
287
288int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
289{
James Zhu10dd74ea2018-05-15 14:31:24 -0500290 int i, j;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400291
James Zhu10dd74ea2018-05-15 14:31:24 -0500292 for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
293 kfree(adev->uvd.inst[j].saved_bo);
Christian Königead833e2016-02-10 14:35:19 +0100294
James Zhu10dd74ea2018-05-15 14:31:24 -0500295 drm_sched_entity_fini(&adev->uvd.inst[j].ring.sched, &adev->uvd.inst[j].entity);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400296
James Zhu10dd74ea2018-05-15 14:31:24 -0500297 amdgpu_bo_free_kernel(&adev->uvd.inst[j].vcpu_bo,
298 &adev->uvd.inst[j].gpu_addr,
299 (void **)&adev->uvd.inst[j].cpu_addr);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400300
James Zhu10dd74ea2018-05-15 14:31:24 -0500301 amdgpu_ring_fini(&adev->uvd.inst[j].ring);
Monk Liu4ff184d2017-09-15 16:43:01 +0800302
James Zhu10dd74ea2018-05-15 14:31:24 -0500303 for (i = 0; i < AMDGPU_MAX_UVD_ENC_RINGS; ++i)
304 amdgpu_ring_fini(&adev->uvd.inst[j].ring_enc[i]);
305 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400306 release_firmware(adev->uvd.fw);
307
308 return 0;
309}
310
311int amdgpu_uvd_suspend(struct amdgpu_device *adev)
312{
Leo Liu3f99dd82016-04-01 10:36:06 -0400313 unsigned size;
314 void *ptr;
James Zhu10dd74ea2018-05-15 14:31:24 -0500315 int i, j;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400316
James Zhu10dd74ea2018-05-15 14:31:24 -0500317 for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
318 if (adev->uvd.inst[j].vcpu_bo == NULL)
319 continue;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400320
James Zhu10dd74ea2018-05-15 14:31:24 -0500321 cancel_delayed_work_sync(&adev->uvd.inst[j].idle_work);
Jim Qu8daf94e2017-12-15 15:27:57 +0800322
James Zhu10dd74ea2018-05-15 14:31:24 -0500323 /* only valid for physical mode */
324 if (adev->asic_type < CHIP_POLARIS10) {
325 for (i = 0; i < adev->uvd.max_handles; ++i)
326 if (atomic_read(&adev->uvd.inst[j].handles[i]))
327 break;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400328
James Zhu10dd74ea2018-05-15 14:31:24 -0500329 if (i == adev->uvd.max_handles)
330 continue;
331 }
332
333 size = amdgpu_bo_size(adev->uvd.inst[j].vcpu_bo);
334 ptr = adev->uvd.inst[j].cpu_addr;
335
336 adev->uvd.inst[j].saved_bo = kmalloc(size, GFP_KERNEL);
337 if (!adev->uvd.inst[j].saved_bo)
338 return -ENOMEM;
339
340 memcpy_fromio(adev->uvd.inst[j].saved_bo, ptr, size);
James Zhuf6c3b602018-03-06 14:52:35 -0500341 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400342 return 0;
343}
344
345int amdgpu_uvd_resume(struct amdgpu_device *adev)
346{
347 unsigned size;
348 void *ptr;
James Zhu10dd74ea2018-05-15 14:31:24 -0500349 int i;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400350
James Zhu10dd74ea2018-05-15 14:31:24 -0500351 for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
352 if (adev->uvd.inst[i].vcpu_bo == NULL)
353 return -EINVAL;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400354
James Zhu10dd74ea2018-05-15 14:31:24 -0500355 size = amdgpu_bo_size(adev->uvd.inst[i].vcpu_bo);
356 ptr = adev->uvd.inst[i].cpu_addr;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400357
James Zhu10dd74ea2018-05-15 14:31:24 -0500358 if (adev->uvd.inst[i].saved_bo != NULL) {
359 memcpy_toio(ptr, adev->uvd.inst[i].saved_bo, size);
360 kfree(adev->uvd.inst[i].saved_bo);
361 adev->uvd.inst[i].saved_bo = NULL;
362 } else {
363 const struct common_firmware_header *hdr;
364 unsigned offset;
Leo Liud23be4e2016-04-04 10:55:43 -0400365
James Zhu10dd74ea2018-05-15 14:31:24 -0500366 hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
367 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
368 offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
369 memcpy_toio(adev->uvd.inst[i].cpu_addr, adev->uvd.fw->data + offset,
370 le32_to_cpu(hdr->ucode_size_bytes));
371 size -= le32_to_cpu(hdr->ucode_size_bytes);
372 ptr += le32_to_cpu(hdr->ucode_size_bytes);
373 }
374 memset_io(ptr, 0, size);
375 /* to restore uvd fence seq */
376 amdgpu_fence_driver_force_completion(&adev->uvd.inst[i].ring);
Leo Liu09bfb892017-03-03 18:13:26 -0500377 }
Leo Liud23be4e2016-04-04 10:55:43 -0400378 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400379 return 0;
380}
381
382void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
383{
James Zhu10dd74ea2018-05-15 14:31:24 -0500384 struct amdgpu_ring *ring;
385 int i, j, r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400386
James Zhu10dd74ea2018-05-15 14:31:24 -0500387 for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
388 ring = &adev->uvd.inst[j].ring;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400389
James Zhu10dd74ea2018-05-15 14:31:24 -0500390 for (i = 0; i < adev->uvd.max_handles; ++i) {
391 uint32_t handle = atomic_read(&adev->uvd.inst[j].handles[i]);
392 if (handle != 0 && adev->uvd.inst[j].filp[i] == filp) {
393 struct dma_fence *fence;
394
395 r = amdgpu_uvd_get_destroy_msg(ring, handle,
396 false, &fence);
397 if (r) {
398 DRM_ERROR("Error destroying UVD(%d) %d!\n", j, r);
399 continue;
400 }
401
402 dma_fence_wait(fence, false);
403 dma_fence_put(fence);
404
405 adev->uvd.inst[j].filp[i] = NULL;
406 atomic_set(&adev->uvd.inst[j].handles[i], 0);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400407 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400408 }
409 }
410}
411
Christian König765e7fb2016-09-15 15:06:50 +0200412static void amdgpu_uvd_force_into_uvd_segment(struct amdgpu_bo *abo)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400413{
414 int i;
Christian König765e7fb2016-09-15 15:06:50 +0200415 for (i = 0; i < abo->placement.num_placement; ++i) {
416 abo->placements[i].fpfn = 0 >> PAGE_SHIFT;
417 abo->placements[i].lpfn = (256 * 1024 * 1024) >> PAGE_SHIFT;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400418 }
419}
420
Alex Deucher80983e42016-11-21 16:24:37 -0500421static u64 amdgpu_uvd_get_addr_from_ctx(struct amdgpu_uvd_cs_ctx *ctx)
422{
423 uint32_t lo, hi;
424 uint64_t addr;
425
426 lo = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->data0);
427 hi = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->data1);
428 addr = ((uint64_t)lo) | (((uint64_t)hi) << 32);
429
430 return addr;
431}
432
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400433/**
434 * amdgpu_uvd_cs_pass1 - first parsing round
435 *
436 * @ctx: UVD parser context
437 *
438 * Make sure UVD message and feedback buffers are in VRAM and
439 * nobody is violating an 256MB boundary.
440 */
441static int amdgpu_uvd_cs_pass1(struct amdgpu_uvd_cs_ctx *ctx)
442{
Christian König19be5572017-04-12 14:24:39 +0200443 struct ttm_operation_ctx tctx = { false, false };
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400444 struct amdgpu_bo_va_mapping *mapping;
445 struct amdgpu_bo *bo;
Alex Deucher80983e42016-11-21 16:24:37 -0500446 uint32_t cmd;
447 uint64_t addr = amdgpu_uvd_get_addr_from_ctx(ctx);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400448 int r = 0;
449
Christian König9cca0b82017-09-06 16:15:28 +0200450 r = amdgpu_cs_find_mapping(ctx->parser, addr, &bo, &mapping);
451 if (r) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400452 DRM_ERROR("Can't find BO for addr 0x%08Lx\n", addr);
Christian König9cca0b82017-09-06 16:15:28 +0200453 return r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400454 }
455
456 if (!ctx->parser->adev->uvd.address_64_bit) {
457 /* check if it's a message or feedback command */
458 cmd = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->idx) >> 1;
459 if (cmd == 0x0 || cmd == 0x3) {
460 /* yes, force it into VRAM */
461 uint32_t domain = AMDGPU_GEM_DOMAIN_VRAM;
462 amdgpu_ttm_placement_from_domain(bo, domain);
463 }
464 amdgpu_uvd_force_into_uvd_segment(bo);
465
Christian König19be5572017-04-12 14:24:39 +0200466 r = ttm_bo_validate(&bo->tbo, &bo->placement, &tctx);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400467 }
468
469 return r;
470}
471
472/**
473 * amdgpu_uvd_cs_msg_decode - handle UVD decode message
474 *
475 * @msg: pointer to message structure
476 * @buf_sizes: returned buffer sizes
477 *
478 * Peek into the decode message and calculate the necessary buffer sizes.
479 */
Sonny Jiang8e008dd2016-05-11 13:29:48 -0400480static int amdgpu_uvd_cs_msg_decode(struct amdgpu_device *adev, uint32_t *msg,
481 unsigned buf_sizes[])
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400482{
483 unsigned stream_type = msg[4];
484 unsigned width = msg[6];
485 unsigned height = msg[7];
486 unsigned dpb_size = msg[9];
487 unsigned pitch = msg[28];
488 unsigned level = msg[57];
489
490 unsigned width_in_mb = width / 16;
491 unsigned height_in_mb = ALIGN(height / 16, 2);
492 unsigned fs_in_mb = width_in_mb * height_in_mb;
493
Jammy Zhou21df89a2015-08-07 15:30:44 +0800494 unsigned image_size, tmp, min_dpb_size, num_dpb_buffer;
Christian Könige5a68582016-07-26 10:51:29 +0200495 unsigned min_ctx_size = ~0;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400496
497 image_size = width * height;
498 image_size += image_size / 2;
499 image_size = ALIGN(image_size, 1024);
500
501 switch (stream_type) {
502 case 0: /* H264 */
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400503 switch(level) {
504 case 30:
505 num_dpb_buffer = 8100 / fs_in_mb;
506 break;
507 case 31:
508 num_dpb_buffer = 18000 / fs_in_mb;
509 break;
510 case 32:
511 num_dpb_buffer = 20480 / fs_in_mb;
512 break;
513 case 41:
514 num_dpb_buffer = 32768 / fs_in_mb;
515 break;
516 case 42:
517 num_dpb_buffer = 34816 / fs_in_mb;
518 break;
519 case 50:
520 num_dpb_buffer = 110400 / fs_in_mb;
521 break;
522 case 51:
523 num_dpb_buffer = 184320 / fs_in_mb;
524 break;
525 default:
526 num_dpb_buffer = 184320 / fs_in_mb;
527 break;
528 }
529 num_dpb_buffer++;
530 if (num_dpb_buffer > 17)
531 num_dpb_buffer = 17;
532
533 /* reference picture buffer */
534 min_dpb_size = image_size * num_dpb_buffer;
535
536 /* macroblock context buffer */
537 min_dpb_size += width_in_mb * height_in_mb * num_dpb_buffer * 192;
538
539 /* IT surface buffer */
540 min_dpb_size += width_in_mb * height_in_mb * 32;
541 break;
542
543 case 1: /* VC1 */
544
545 /* reference picture buffer */
546 min_dpb_size = image_size * 3;
547
548 /* CONTEXT_BUFFER */
549 min_dpb_size += width_in_mb * height_in_mb * 128;
550
551 /* IT surface buffer */
552 min_dpb_size += width_in_mb * 64;
553
554 /* DB surface buffer */
555 min_dpb_size += width_in_mb * 128;
556
557 /* BP */
558 tmp = max(width_in_mb, height_in_mb);
559 min_dpb_size += ALIGN(tmp * 7 * 16, 64);
560 break;
561
562 case 3: /* MPEG2 */
563
564 /* reference picture buffer */
565 min_dpb_size = image_size * 3;
566 break;
567
568 case 4: /* MPEG4 */
569
570 /* reference picture buffer */
571 min_dpb_size = image_size * 3;
572
573 /* CM */
574 min_dpb_size += width_in_mb * height_in_mb * 64;
575
576 /* IT surface buffer */
577 min_dpb_size += ALIGN(width_in_mb * height_in_mb * 32, 64);
578 break;
579
Sonny Jiang8e008dd2016-05-11 13:29:48 -0400580 case 7: /* H264 Perf */
581 switch(level) {
582 case 30:
583 num_dpb_buffer = 8100 / fs_in_mb;
584 break;
585 case 31:
586 num_dpb_buffer = 18000 / fs_in_mb;
587 break;
588 case 32:
589 num_dpb_buffer = 20480 / fs_in_mb;
590 break;
591 case 41:
592 num_dpb_buffer = 32768 / fs_in_mb;
593 break;
594 case 42:
595 num_dpb_buffer = 34816 / fs_in_mb;
596 break;
597 case 50:
598 num_dpb_buffer = 110400 / fs_in_mb;
599 break;
600 case 51:
601 num_dpb_buffer = 184320 / fs_in_mb;
602 break;
603 default:
604 num_dpb_buffer = 184320 / fs_in_mb;
605 break;
606 }
607 num_dpb_buffer++;
608 if (num_dpb_buffer > 17)
609 num_dpb_buffer = 17;
610
611 /* reference picture buffer */
612 min_dpb_size = image_size * num_dpb_buffer;
613
Christian König4cb5877c2016-07-26 12:05:40 +0200614 if (!adev->uvd.use_ctx_buf){
Sonny Jiang8e008dd2016-05-11 13:29:48 -0400615 /* macroblock context buffer */
616 min_dpb_size +=
617 width_in_mb * height_in_mb * num_dpb_buffer * 192;
618
619 /* IT surface buffer */
620 min_dpb_size += width_in_mb * height_in_mb * 32;
621 } else {
622 /* macroblock context buffer */
623 min_ctx_size =
624 width_in_mb * height_in_mb * num_dpb_buffer * 192;
625 }
626 break;
627
Leo Liud0b83d42017-08-15 10:57:34 -0400628 case 8: /* MJPEG */
629 min_dpb_size = 0;
630 break;
631
Christian König86fa0bd2015-05-05 16:36:01 +0200632 case 16: /* H265 */
633 image_size = (ALIGN(width, 16) * ALIGN(height, 16) * 3) / 2;
634 image_size = ALIGN(image_size, 256);
635
636 num_dpb_buffer = (le32_to_cpu(msg[59]) & 0xff) + 2;
637 min_dpb_size = image_size * num_dpb_buffer;
Boyuan Zhang8c8bac52015-08-05 14:03:48 -0400638 min_ctx_size = ((width + 255) / 16) * ((height + 255) / 16)
639 * 16 * num_dpb_buffer + 52 * 1024;
Christian König86fa0bd2015-05-05 16:36:01 +0200640 break;
641
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400642 default:
643 DRM_ERROR("UVD codec not handled %d!\n", stream_type);
644 return -EINVAL;
645 }
646
647 if (width > pitch) {
648 DRM_ERROR("Invalid UVD decoding target pitch!\n");
649 return -EINVAL;
650 }
651
652 if (dpb_size < min_dpb_size) {
653 DRM_ERROR("Invalid dpb_size in UVD message (%d / %d)!\n",
654 dpb_size, min_dpb_size);
655 return -EINVAL;
656 }
657
658 buf_sizes[0x1] = dpb_size;
659 buf_sizes[0x2] = image_size;
Boyuan Zhang8c8bac52015-08-05 14:03:48 -0400660 buf_sizes[0x4] = min_ctx_size;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400661 return 0;
662}
663
664/**
665 * amdgpu_uvd_cs_msg - handle UVD message
666 *
667 * @ctx: UVD parser context
668 * @bo: buffer object containing the message
669 * @offset: offset into the buffer object
670 *
671 * Peek into the UVD message and extract the session id.
672 * Make sure that we don't open up to many sessions.
673 */
674static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
675 struct amdgpu_bo *bo, unsigned offset)
676{
677 struct amdgpu_device *adev = ctx->parser->adev;
678 int32_t *msg, msg_type, handle;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400679 void *ptr;
Christian König4127a592015-08-11 16:35:54 +0200680 long r;
681 int i;
James Zhu10dd74ea2018-05-15 14:31:24 -0500682 uint32_t ip_instance = ctx->parser->job->ring->me;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400683
684 if (offset & 0x3F) {
James Zhu10dd74ea2018-05-15 14:31:24 -0500685 DRM_ERROR("UVD(%d) messages must be 64 byte aligned!\n", ip_instance);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400686 return -EINVAL;
687 }
688
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400689 r = amdgpu_bo_kmap(bo, &ptr);
690 if (r) {
James Zhu10dd74ea2018-05-15 14:31:24 -0500691 DRM_ERROR("Failed mapping the UVD(%d) message (%ld)!\n", ip_instance, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400692 return r;
693 }
694
695 msg = ptr + offset;
696
697 msg_type = msg[1];
698 handle = msg[2];
699
700 if (handle == 0) {
James Zhu10dd74ea2018-05-15 14:31:24 -0500701 DRM_ERROR("Invalid UVD(%d) handle!\n", ip_instance);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400702 return -EINVAL;
703 }
704
Leo Liu51464192015-09-15 10:38:38 -0400705 switch (msg_type) {
706 case 0:
707 /* it's a create msg, calc image size (width * height) */
708 amdgpu_bo_kunmap(bo);
709
710 /* try to alloc a new handle */
Arindam Nathc0365542016-04-12 13:46:15 +0200711 for (i = 0; i < adev->uvd.max_handles; ++i) {
James Zhu10dd74ea2018-05-15 14:31:24 -0500712 if (atomic_read(&adev->uvd.inst[ip_instance].handles[i]) == handle) {
713 DRM_ERROR("(%d)Handle 0x%x already in use!\n", ip_instance, handle);
Leo Liu51464192015-09-15 10:38:38 -0400714 return -EINVAL;
715 }
716
James Zhu10dd74ea2018-05-15 14:31:24 -0500717 if (!atomic_cmpxchg(&adev->uvd.inst[ip_instance].handles[i], 0, handle)) {
718 adev->uvd.inst[ip_instance].filp[i] = ctx->parser->filp;
Leo Liu51464192015-09-15 10:38:38 -0400719 return 0;
720 }
721 }
722
James Zhu10dd74ea2018-05-15 14:31:24 -0500723 DRM_ERROR("No more free UVD(%d) handles!\n", ip_instance);
Christian König7129d3a2016-07-13 21:24:59 +0200724 return -ENOSPC;
Leo Liu51464192015-09-15 10:38:38 -0400725
726 case 1:
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400727 /* it's a decode msg, calc buffer sizes */
Sonny Jiang8e008dd2016-05-11 13:29:48 -0400728 r = amdgpu_uvd_cs_msg_decode(adev, msg, ctx->buf_sizes);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400729 amdgpu_bo_kunmap(bo);
730 if (r)
731 return r;
732
Leo Liu51464192015-09-15 10:38:38 -0400733 /* validate the handle */
Arindam Nathc0365542016-04-12 13:46:15 +0200734 for (i = 0; i < adev->uvd.max_handles; ++i) {
James Zhu10dd74ea2018-05-15 14:31:24 -0500735 if (atomic_read(&adev->uvd.inst[ip_instance].handles[i]) == handle) {
736 if (adev->uvd.inst[ip_instance].filp[i] != ctx->parser->filp) {
737 DRM_ERROR("UVD(%d) handle collision detected!\n", ip_instance);
Leo Liu51464192015-09-15 10:38:38 -0400738 return -EINVAL;
739 }
740 return 0;
741 }
742 }
743
James Zhu10dd74ea2018-05-15 14:31:24 -0500744 DRM_ERROR("Invalid UVD(%d) handle 0x%x!\n", ip_instance, handle);
Leo Liu51464192015-09-15 10:38:38 -0400745 return -ENOENT;
746
747 case 2:
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400748 /* it's a destroy msg, free the handle */
Arindam Nathc0365542016-04-12 13:46:15 +0200749 for (i = 0; i < adev->uvd.max_handles; ++i)
James Zhu10dd74ea2018-05-15 14:31:24 -0500750 atomic_cmpxchg(&adev->uvd.inst[ip_instance].handles[i], handle, 0);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400751 amdgpu_bo_kunmap(bo);
752 return 0;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400753
Leo Liu51464192015-09-15 10:38:38 -0400754 default:
James Zhu10dd74ea2018-05-15 14:31:24 -0500755 DRM_ERROR("Illegal UVD(%d) message type (%d)!\n", ip_instance, msg_type);
Leo Liu51464192015-09-15 10:38:38 -0400756 return -EINVAL;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400757 }
Leo Liu51464192015-09-15 10:38:38 -0400758 BUG();
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400759 return -EINVAL;
760}
761
762/**
763 * amdgpu_uvd_cs_pass2 - second parsing round
764 *
765 * @ctx: UVD parser context
766 *
767 * Patch buffer addresses, make sure buffer sizes are correct.
768 */
769static int amdgpu_uvd_cs_pass2(struct amdgpu_uvd_cs_ctx *ctx)
770{
771 struct amdgpu_bo_va_mapping *mapping;
772 struct amdgpu_bo *bo;
Alex Deucher80983e42016-11-21 16:24:37 -0500773 uint32_t cmd;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400774 uint64_t start, end;
Alex Deucher80983e42016-11-21 16:24:37 -0500775 uint64_t addr = amdgpu_uvd_get_addr_from_ctx(ctx);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400776 int r;
777
Christian König9cca0b82017-09-06 16:15:28 +0200778 r = amdgpu_cs_find_mapping(ctx->parser, addr, &bo, &mapping);
779 if (r) {
Alex Deucher042eb912016-11-21 16:34:29 -0500780 DRM_ERROR("Can't find BO for addr 0x%08Lx\n", addr);
Christian König9cca0b82017-09-06 16:15:28 +0200781 return r;
Alex Deucher042eb912016-11-21 16:34:29 -0500782 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400783
784 start = amdgpu_bo_gpu_offset(bo);
785
Christian Königa9f87f62017-03-30 14:03:59 +0200786 end = (mapping->last + 1 - mapping->start);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400787 end = end * AMDGPU_GPU_PAGE_SIZE + start;
788
Christian Königa9f87f62017-03-30 14:03:59 +0200789 addr -= mapping->start * AMDGPU_GPU_PAGE_SIZE;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400790 start += addr;
791
Christian König7270f832016-01-31 11:00:41 +0100792 amdgpu_set_ib_value(ctx->parser, ctx->ib_idx, ctx->data0,
793 lower_32_bits(start));
794 amdgpu_set_ib_value(ctx->parser, ctx->ib_idx, ctx->data1,
795 upper_32_bits(start));
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400796
797 cmd = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->idx) >> 1;
798 if (cmd < 0x4) {
799 if ((end - start) < ctx->buf_sizes[cmd]) {
800 DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd,
801 (unsigned)(end - start),
802 ctx->buf_sizes[cmd]);
803 return -EINVAL;
804 }
805
Boyuan Zhang8c8bac52015-08-05 14:03:48 -0400806 } else if (cmd == 0x206) {
807 if ((end - start) < ctx->buf_sizes[4]) {
808 DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd,
809 (unsigned)(end - start),
810 ctx->buf_sizes[4]);
811 return -EINVAL;
812 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400813 } else if ((cmd != 0x100) && (cmd != 0x204)) {
814 DRM_ERROR("invalid UVD command %X!\n", cmd);
815 return -EINVAL;
816 }
817
818 if (!ctx->parser->adev->uvd.address_64_bit) {
819 if ((start >> 28) != ((end - 1) >> 28)) {
820 DRM_ERROR("reloc %LX-%LX crossing 256MB boundary!\n",
821 start, end);
822 return -EINVAL;
823 }
824
825 if ((cmd == 0 || cmd == 0x3) &&
James Zhu2bb795f2018-05-15 14:25:46 -0500826 (start >> 28) != (ctx->parser->adev->uvd.inst->gpu_addr >> 28)) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400827 DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n",
828 start, end);
829 return -EINVAL;
830 }
831 }
832
833 if (cmd == 0) {
834 ctx->has_msg_cmd = true;
835 r = amdgpu_uvd_cs_msg(ctx, bo, addr);
836 if (r)
837 return r;
838 } else if (!ctx->has_msg_cmd) {
839 DRM_ERROR("Message needed before other commands are send!\n");
840 return -EINVAL;
841 }
842
843 return 0;
844}
845
846/**
847 * amdgpu_uvd_cs_reg - parse register writes
848 *
849 * @ctx: UVD parser context
850 * @cb: callback function
851 *
852 * Parse the register writes, call cb on each complete command.
853 */
854static int amdgpu_uvd_cs_reg(struct amdgpu_uvd_cs_ctx *ctx,
855 int (*cb)(struct amdgpu_uvd_cs_ctx *ctx))
856{
Christian König50838c82016-02-03 13:44:52 +0100857 struct amdgpu_ib *ib = &ctx->parser->job->ibs[ctx->ib_idx];
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400858 int i, r;
859
860 ctx->idx++;
861 for (i = 0; i <= ctx->count; ++i) {
862 unsigned reg = ctx->reg + i;
863
864 if (ctx->idx >= ib->length_dw) {
865 DRM_ERROR("Register command after end of CS!\n");
866 return -EINVAL;
867 }
868
869 switch (reg) {
870 case mmUVD_GPCOM_VCPU_DATA0:
871 ctx->data0 = ctx->idx;
872 break;
873 case mmUVD_GPCOM_VCPU_DATA1:
874 ctx->data1 = ctx->idx;
875 break;
876 case mmUVD_GPCOM_VCPU_CMD:
877 r = cb(ctx);
878 if (r)
879 return r;
880 break;
881 case mmUVD_ENGINE_CNTL:
Alex Deucher8dd31d72016-08-22 17:58:14 -0400882 case mmUVD_NO_OP:
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400883 break;
884 default:
885 DRM_ERROR("Invalid reg 0x%X!\n", reg);
886 return -EINVAL;
887 }
888 ctx->idx++;
889 }
890 return 0;
891}
892
893/**
894 * amdgpu_uvd_cs_packets - parse UVD packets
895 *
896 * @ctx: UVD parser context
897 * @cb: callback function
898 *
899 * Parse the command stream packets.
900 */
901static int amdgpu_uvd_cs_packets(struct amdgpu_uvd_cs_ctx *ctx,
902 int (*cb)(struct amdgpu_uvd_cs_ctx *ctx))
903{
Christian König50838c82016-02-03 13:44:52 +0100904 struct amdgpu_ib *ib = &ctx->parser->job->ibs[ctx->ib_idx];
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400905 int r;
906
907 for (ctx->idx = 0 ; ctx->idx < ib->length_dw; ) {
908 uint32_t cmd = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->idx);
909 unsigned type = CP_PACKET_GET_TYPE(cmd);
910 switch (type) {
911 case PACKET_TYPE0:
912 ctx->reg = CP_PACKET0_GET_REG(cmd);
913 ctx->count = CP_PACKET_GET_COUNT(cmd);
914 r = amdgpu_uvd_cs_reg(ctx, cb);
915 if (r)
916 return r;
917 break;
918 case PACKET_TYPE2:
919 ++ctx->idx;
920 break;
921 default:
922 DRM_ERROR("Unknown packet type %d !\n", type);
923 return -EINVAL;
924 }
925 }
926 return 0;
927}
928
929/**
930 * amdgpu_uvd_ring_parse_cs - UVD command submission parser
931 *
932 * @parser: Command submission parser context
933 *
934 * Parse the command stream, patch in addresses as necessary.
935 */
936int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
937{
938 struct amdgpu_uvd_cs_ctx ctx = {};
939 unsigned buf_sizes[] = {
940 [0x00000000] = 2048,
Boyuan Zhang8c8bac52015-08-05 14:03:48 -0400941 [0x00000001] = 0xFFFFFFFF,
942 [0x00000002] = 0xFFFFFFFF,
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400943 [0x00000003] = 2048,
Boyuan Zhang8c8bac52015-08-05 14:03:48 -0400944 [0x00000004] = 0xFFFFFFFF,
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400945 };
Christian König50838c82016-02-03 13:44:52 +0100946 struct amdgpu_ib *ib = &parser->job->ibs[ib_idx];
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400947 int r;
948
Christian König45088ef2016-10-05 16:49:19 +0200949 parser->job->vm = NULL;
950 ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
951
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400952 if (ib->length_dw % 16) {
953 DRM_ERROR("UVD IB length (%d) not 16 dwords aligned!\n",
954 ib->length_dw);
955 return -EINVAL;
956 }
957
958 ctx.parser = parser;
959 ctx.buf_sizes = buf_sizes;
960 ctx.ib_idx = ib_idx;
961
Alex Deucher042eb912016-11-21 16:34:29 -0500962 /* first round only required on chips without UVD 64 bit address support */
963 if (!parser->adev->uvd.address_64_bit) {
964 /* first round, make sure the buffers are actually in the UVD segment */
965 r = amdgpu_uvd_cs_packets(&ctx, amdgpu_uvd_cs_pass1);
966 if (r)
967 return r;
968 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400969
970 /* second round, patch buffer addresses into the command stream */
971 r = amdgpu_uvd_cs_packets(&ctx, amdgpu_uvd_cs_pass2);
972 if (r)
973 return r;
974
975 if (!ctx.has_msg_cmd) {
976 DRM_ERROR("UVD-IBs need a msg command!\n");
977 return -EINVAL;
978 }
979
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400980 return 0;
981}
982
Christian Königd7af97d2016-02-03 16:01:06 +0100983static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
Chris Wilsonf54d1862016-10-25 13:00:45 +0100984 bool direct, struct dma_fence **fence)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400985{
Christian König4ab91cf2018-02-07 20:48:21 +0100986 struct amdgpu_device *adev = ring->adev;
987 struct dma_fence *f = NULL;
Christian Königd71518b2016-02-01 12:20:25 +0100988 struct amdgpu_job *job;
989 struct amdgpu_ib *ib;
Leo Liu09bfb892017-03-03 18:13:26 -0500990 uint32_t data[4];
Christian König4ab91cf2018-02-07 20:48:21 +0100991 uint64_t addr;
992 long r;
993 int i;
James Zhu9181dba2018-05-11 13:56:44 -0500994 unsigned offset_idx = 0;
995 unsigned offset[3] = { UVD_BASE_SI, 0, 0 };
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400996
Christian König4ab91cf2018-02-07 20:48:21 +0100997 amdgpu_bo_kunmap(bo);
998 amdgpu_bo_unpin(bo);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400999
Christian Königa7d64de2016-09-15 14:58:48 +02001000 if (!ring->adev->uvd.address_64_bit) {
Christian König4ab91cf2018-02-07 20:48:21 +01001001 struct ttm_operation_ctx ctx = { true, false };
1002
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001003 amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
1004 amdgpu_uvd_force_into_uvd_segment(bo);
Christian König4ab91cf2018-02-07 20:48:21 +01001005 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1006 if (r)
1007 goto err;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001008 }
1009
Christian Königd71518b2016-02-01 12:20:25 +01001010 r = amdgpu_job_alloc_with_ib(adev, 64, &job);
1011 if (r)
1012 goto err;
1013
Leo Liu09bfb892017-03-03 18:13:26 -05001014 if (adev->asic_type >= CHIP_VEGA10) {
James Zhu9181dba2018-05-11 13:56:44 -05001015 offset_idx = 1 + ring->me;
1016 offset[1] = adev->reg_offset[UVD_HWIP][0][1];
1017 offset[2] = adev->reg_offset[UVD_HWIP][1][1];
Leo Liu09bfb892017-03-03 18:13:26 -05001018 }
1019
James Zhu9181dba2018-05-11 13:56:44 -05001020 data[0] = PACKET0(offset[offset_idx] + UVD_GPCOM_VCPU_DATA0, 0);
1021 data[1] = PACKET0(offset[offset_idx] + UVD_GPCOM_VCPU_DATA1, 0);
1022 data[2] = PACKET0(offset[offset_idx] + UVD_GPCOM_VCPU_CMD, 0);
1023 data[3] = PACKET0(offset[offset_idx] + UVD_NO_OP, 0);
1024
Christian Königd71518b2016-02-01 12:20:25 +01001025 ib = &job->ibs[0];
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001026 addr = amdgpu_bo_gpu_offset(bo);
Leo Liu09bfb892017-03-03 18:13:26 -05001027 ib->ptr[0] = data[0];
Chunming Zhou7b5ec432015-07-03 14:08:18 +08001028 ib->ptr[1] = addr;
Leo Liu09bfb892017-03-03 18:13:26 -05001029 ib->ptr[2] = data[1];
Chunming Zhou7b5ec432015-07-03 14:08:18 +08001030 ib->ptr[3] = addr >> 32;
Leo Liu09bfb892017-03-03 18:13:26 -05001031 ib->ptr[4] = data[2];
Chunming Zhou7b5ec432015-07-03 14:08:18 +08001032 ib->ptr[5] = 0;
Alex Deucherc8b4f282016-08-23 09:12:21 -04001033 for (i = 6; i < 16; i += 2) {
Leo Liu09bfb892017-03-03 18:13:26 -05001034 ib->ptr[i] = data[3];
Alex Deucherc8b4f282016-08-23 09:12:21 -04001035 ib->ptr[i+1] = 0;
1036 }
Chunming Zhou7b5ec432015-07-03 14:08:18 +08001037 ib->length_dw = 16;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001038
Christian Königd7af97d2016-02-03 16:01:06 +01001039 if (direct) {
Christian König4ab91cf2018-02-07 20:48:21 +01001040 r = reservation_object_wait_timeout_rcu(bo->tbo.resv,
1041 true, false,
1042 msecs_to_jiffies(10));
1043 if (r == 0)
1044 r = -ETIMEDOUT;
1045 if (r < 0)
1046 goto err_free;
1047
Junwei Zhang50ddc752017-01-23 16:30:38 +08001048 r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
Chris Wilsonf54d1862016-10-25 13:00:45 +01001049 job->fence = dma_fence_get(f);
Christian Königd7af97d2016-02-03 16:01:06 +01001050 if (r)
1051 goto err_free;
1052
1053 amdgpu_job_free(job);
1054 } else {
Christian König4ab91cf2018-02-07 20:48:21 +01001055 r = amdgpu_sync_resv(adev, &job->sync, bo->tbo.resv,
1056 AMDGPU_FENCE_OWNER_UNDEFINED, false);
1057 if (r)
1058 goto err_free;
1059
James Zhu10dd74ea2018-05-15 14:31:24 -05001060 r = amdgpu_job_submit(job, ring, &adev->uvd.inst[ring->me].entity,
Christian Königd7af97d2016-02-03 16:01:06 +01001061 AMDGPU_FENCE_OWNER_UNDEFINED, &f);
1062 if (r)
1063 goto err_free;
1064 }
Chunming Zhou7b5ec432015-07-03 14:08:18 +08001065
Christian König4ab91cf2018-02-07 20:48:21 +01001066 amdgpu_bo_fence(bo, f, false);
1067 amdgpu_bo_unreserve(bo);
1068 amdgpu_bo_unref(&bo);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001069
1070 if (fence)
Chris Wilsonf54d1862016-10-25 13:00:45 +01001071 *fence = dma_fence_get(f);
Chris Wilsonf54d1862016-10-25 13:00:45 +01001072 dma_fence_put(f);
Chunming Zhou7b5ec432015-07-03 14:08:18 +08001073
Chunming Zhou7b5ec432015-07-03 14:08:18 +08001074 return 0;
Christian Königd71518b2016-02-01 12:20:25 +01001075
1076err_free:
1077 amdgpu_job_free(job);
1078
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001079err:
Christian König4ab91cf2018-02-07 20:48:21 +01001080 amdgpu_bo_unreserve(bo);
1081 amdgpu_bo_unref(&bo);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001082 return r;
1083}
1084
1085/* multiple fence commands without any stream commands in between can
1086 crash the vcpu so just try to emmit a dummy create/destroy msg to
1087 avoid this */
1088int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
Chris Wilsonf54d1862016-10-25 13:00:45 +01001089 struct dma_fence **fence)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001090{
1091 struct amdgpu_device *adev = ring->adev;
Christian König4ab91cf2018-02-07 20:48:21 +01001092 struct amdgpu_bo *bo = NULL;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001093 uint32_t *msg;
1094 int r, i;
1095
Christian König4ab91cf2018-02-07 20:48:21 +01001096 r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
1097 AMDGPU_GEM_DOMAIN_VRAM,
1098 &bo, NULL, (void **)&msg);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001099 if (r)
1100 return r;
1101
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001102 /* stitch together an UVD create msg */
1103 msg[0] = cpu_to_le32(0x00000de4);
1104 msg[1] = cpu_to_le32(0x00000000);
1105 msg[2] = cpu_to_le32(handle);
1106 msg[3] = cpu_to_le32(0x00000000);
1107 msg[4] = cpu_to_le32(0x00000000);
1108 msg[5] = cpu_to_le32(0x00000000);
1109 msg[6] = cpu_to_le32(0x00000000);
1110 msg[7] = cpu_to_le32(0x00000780);
1111 msg[8] = cpu_to_le32(0x00000440);
1112 msg[9] = cpu_to_le32(0x00000000);
1113 msg[10] = cpu_to_le32(0x01b37000);
1114 for (i = 11; i < 1024; ++i)
1115 msg[i] = cpu_to_le32(0x0);
1116
Christian Königd7af97d2016-02-03 16:01:06 +01001117 return amdgpu_uvd_send_msg(ring, bo, true, fence);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001118}
1119
1120int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
Chris Wilsonf54d1862016-10-25 13:00:45 +01001121 bool direct, struct dma_fence **fence)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001122{
1123 struct amdgpu_device *adev = ring->adev;
Christian König4ab91cf2018-02-07 20:48:21 +01001124 struct amdgpu_bo *bo = NULL;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001125 uint32_t *msg;
1126 int r, i;
1127
Christian König4ab91cf2018-02-07 20:48:21 +01001128 r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
1129 AMDGPU_GEM_DOMAIN_VRAM,
1130 &bo, NULL, (void **)&msg);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001131 if (r)
1132 return r;
1133
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001134 /* stitch together an UVD destroy msg */
1135 msg[0] = cpu_to_le32(0x00000de4);
1136 msg[1] = cpu_to_le32(0x00000002);
1137 msg[2] = cpu_to_le32(handle);
1138 msg[3] = cpu_to_le32(0x00000000);
1139 for (i = 4; i < 1024; ++i)
1140 msg[i] = cpu_to_le32(0x0);
1141
Christian Königd7af97d2016-02-03 16:01:06 +01001142 return amdgpu_uvd_send_msg(ring, bo, direct, fence);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001143}
1144
1145static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
1146{
1147 struct amdgpu_device *adev =
James Zhu2bb795f2018-05-15 14:25:46 -05001148 container_of(work, struct amdgpu_device, uvd.inst->idle_work.work);
Alex Deucher4bd2c5d2018-05-17 12:45:52 -05001149 unsigned fences = 0, i, j;
Alex Deucher6f0fd912018-05-17 12:33:34 -05001150
1151 for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
1152 fences += amdgpu_fence_count_emitted(&adev->uvd.inst[i].ring);
Alex Deucher4bd2c5d2018-05-17 12:45:52 -05001153 for (j = 0; j < adev->uvd.num_enc_rings; ++j) {
1154 fences += amdgpu_fence_count_emitted(&adev->uvd.inst[i].ring_enc[j]);
1155 }
Alex Deucher6f0fd912018-05-17 12:33:34 -05001156 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001157
Leo Liu713c0022016-08-03 09:25:59 -04001158 if (fences == 0) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001159 if (adev->pm.dpm_enabled) {
1160 amdgpu_dpm_enable_uvd(adev, false);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001161 } else {
1162 amdgpu_asic_set_uvd_clocks(adev, 0, 0);
Rex Zhue38ca2b2017-01-20 12:06:05 +08001163 /* shutdown the UVD block */
Alex Deucher2990a1f2017-12-15 16:18:00 -05001164 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
1165 AMD_PG_STATE_GATE);
1166 amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
1167 AMD_CG_STATE_GATE);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001168 }
1169 } else {
James Zhu2bb795f2018-05-15 14:25:46 -05001170 schedule_delayed_work(&adev->uvd.inst->idle_work, UVD_IDLE_TIMEOUT);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001171 }
1172}
1173
Christian Königc4120d52016-07-20 14:11:26 +02001174void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001175{
Christian Königc4120d52016-07-20 14:11:26 +02001176 struct amdgpu_device *adev = ring->adev;
Monk Liu14a80322018-01-19 20:29:17 +08001177 bool set_clocks;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001178
Xiangliang Yud9af2252017-03-07 14:45:25 +08001179 if (amdgpu_sriov_vf(adev))
1180 return;
1181
James Zhu2bb795f2018-05-15 14:25:46 -05001182 set_clocks = !cancel_delayed_work_sync(&adev->uvd.inst->idle_work);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001183 if (set_clocks) {
1184 if (adev->pm.dpm_enabled) {
1185 amdgpu_dpm_enable_uvd(adev, true);
1186 } else {
1187 amdgpu_asic_set_uvd_clocks(adev, 53300, 40000);
Alex Deucher2990a1f2017-12-15 16:18:00 -05001188 amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
1189 AMD_CG_STATE_UNGATE);
1190 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
1191 AMD_PG_STATE_UNGATE);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001192 }
1193 }
1194}
Christian Königc4120d52016-07-20 14:11:26 +02001195
1196void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring)
1197{
Monk Liu14a80322018-01-19 20:29:17 +08001198 if (!amdgpu_sriov_vf(ring->adev))
James Zhu2bb795f2018-05-15 14:25:46 -05001199 schedule_delayed_work(&ring->adev->uvd.inst->idle_work, UVD_IDLE_TIMEOUT);
Christian Königc4120d52016-07-20 14:11:26 +02001200}
Christian König8de190c2016-07-05 16:47:54 +02001201
1202/**
1203 * amdgpu_uvd_ring_test_ib - test ib execution
1204 *
1205 * @ring: amdgpu_ring pointer
1206 *
1207 * Test if we can successfully execute an IB
1208 */
Christian Königbbec97a2016-07-05 21:07:17 +02001209int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout)
Christian König8de190c2016-07-05 16:47:54 +02001210{
Chris Wilsonf54d1862016-10-25 13:00:45 +01001211 struct dma_fence *fence;
Christian Königbbec97a2016-07-05 21:07:17 +02001212 long r;
James Zhu10dd74ea2018-05-15 14:31:24 -05001213 uint32_t ip_instance = ring->me;
Christian König8de190c2016-07-05 16:47:54 +02001214
1215 r = amdgpu_uvd_get_create_msg(ring, 1, NULL);
1216 if (r) {
James Zhu10dd74ea2018-05-15 14:31:24 -05001217 DRM_ERROR("amdgpu: (%d)failed to get create msg (%ld).\n", ip_instance, r);
Christian König8de190c2016-07-05 16:47:54 +02001218 goto error;
1219 }
1220
1221 r = amdgpu_uvd_get_destroy_msg(ring, 1, true, &fence);
1222 if (r) {
James Zhu10dd74ea2018-05-15 14:31:24 -05001223 DRM_ERROR("amdgpu: (%d)failed to get destroy ib (%ld).\n", ip_instance, r);
Christian König8de190c2016-07-05 16:47:54 +02001224 goto error;
1225 }
1226
Chris Wilsonf54d1862016-10-25 13:00:45 +01001227 r = dma_fence_wait_timeout(fence, false, timeout);
Christian Königbbec97a2016-07-05 21:07:17 +02001228 if (r == 0) {
James Zhu10dd74ea2018-05-15 14:31:24 -05001229 DRM_ERROR("amdgpu: (%d)IB test timed out.\n", ip_instance);
Christian Königbbec97a2016-07-05 21:07:17 +02001230 r = -ETIMEDOUT;
1231 } else if (r < 0) {
James Zhu10dd74ea2018-05-15 14:31:24 -05001232 DRM_ERROR("amdgpu: (%d)fence wait failed (%ld).\n", ip_instance, r);
Christian Königbbec97a2016-07-05 21:07:17 +02001233 } else {
James Zhu10dd74ea2018-05-15 14:31:24 -05001234 DRM_DEBUG("ib test on (%d)ring %d succeeded\n", ip_instance, ring->idx);
Christian Königbbec97a2016-07-05 21:07:17 +02001235 r = 0;
Christian König8de190c2016-07-05 16:47:54 +02001236 }
Christian Königbbec97a2016-07-05 21:07:17 +02001237
Chris Wilsonf54d1862016-10-25 13:00:45 +01001238 dma_fence_put(fence);
Jay Cornwallc2a4c5b2016-08-03 13:39:42 -05001239
1240error:
Christian König8de190c2016-07-05 16:47:54 +02001241 return r;
1242}
Arindam Nath44879b62016-12-12 15:29:33 +05301243
1244/**
1245 * amdgpu_uvd_used_handles - returns used UVD handles
1246 *
1247 * @adev: amdgpu_device pointer
1248 *
1249 * Returns the number of UVD handles in use
1250 */
1251uint32_t amdgpu_uvd_used_handles(struct amdgpu_device *adev)
1252{
1253 unsigned i;
1254 uint32_t used_handles = 0;
1255
1256 for (i = 0; i < adev->uvd.max_handles; ++i) {
1257 /*
1258 * Handles can be freed in any order, and not
1259 * necessarily linear. So we need to count
1260 * all non-zero handles.
1261 */
James Zhu2bb795f2018-05-15 14:25:46 -05001262 if (atomic_read(&adev->uvd.inst->handles[i]))
Arindam Nath44879b62016-12-12 15:29:33 +05301263 used_handles++;
1264 }
1265
1266 return used_handles;
1267}