blob: c6b1cbca47fc8e2b423a52fd4affe6d9532678af [file] [log] [blame]
Christian Könige409b122013-08-13 11:56:53 +02001/*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Christian König <christian.koenig@amd.com>
23 */
24
Christian König856754c2013-04-16 22:11:22 +020025#include <linux/firmware.h>
Christian Könige409b122013-08-13 11:56:53 +020026#include <drm/drmP.h>
27#include "radeon.h"
28#include "radeon_asic.h"
29#include "r600d.h"
30
31/**
32 * uvd_v1_0_get_rptr - get read pointer
33 *
34 * @rdev: radeon_device pointer
35 * @ring: radeon_ring pointer
36 *
37 * Returns the current hardware read pointer
38 */
39uint32_t uvd_v1_0_get_rptr(struct radeon_device *rdev,
40 struct radeon_ring *ring)
41{
42 return RREG32(UVD_RBC_RB_RPTR);
43}
44
45/**
46 * uvd_v1_0_get_wptr - get write pointer
47 *
48 * @rdev: radeon_device pointer
49 * @ring: radeon_ring pointer
50 *
51 * Returns the current hardware write pointer
52 */
53uint32_t uvd_v1_0_get_wptr(struct radeon_device *rdev,
54 struct radeon_ring *ring)
55{
56 return RREG32(UVD_RBC_RB_WPTR);
57}
58
59/**
60 * uvd_v1_0_set_wptr - set write pointer
61 *
62 * @rdev: radeon_device pointer
63 * @ring: radeon_ring pointer
64 *
65 * Commits the write pointer to the hardware
66 */
67void uvd_v1_0_set_wptr(struct radeon_device *rdev,
68 struct radeon_ring *ring)
69{
70 WREG32(UVD_RBC_RB_WPTR, ring->wptr);
71}
72
73/**
Christian König856754c2013-04-16 22:11:22 +020074 * uvd_v1_0_fence_emit - emit an fence & trap command
75 *
76 * @rdev: radeon_device pointer
77 * @fence: fence to emit
78 *
79 * Write a fence and a trap command to the ring.
80 */
81void uvd_v1_0_fence_emit(struct radeon_device *rdev,
82 struct radeon_fence *fence)
83{
84 struct radeon_ring *ring = &rdev->ring[fence->ring];
85 uint64_t addr = rdev->fence_drv[fence->ring].gpu_addr;
86
87 radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA0, 0));
88 radeon_ring_write(ring, addr & 0xffffffff);
89 radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA1, 0));
90 radeon_ring_write(ring, fence->seq);
91 radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_CMD, 0));
92 radeon_ring_write(ring, 0);
93
94 radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA0, 0));
95 radeon_ring_write(ring, 0);
96 radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA1, 0));
97 radeon_ring_write(ring, 0);
98 radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_CMD, 0));
99 radeon_ring_write(ring, 2);
100 return;
101}
102
103/**
104 * uvd_v1_0_resume - memory controller programming
105 *
106 * @rdev: radeon_device pointer
107 *
108 * Let the UVD memory controller know it's offsets
109 */
110int uvd_v1_0_resume(struct radeon_device *rdev)
111{
112 uint64_t addr;
113 uint32_t size;
114 int r;
115
116 r = radeon_uvd_resume(rdev);
117 if (r)
118 return r;
119
120 /* programm the VCPU memory controller bits 0-27 */
121 addr = (rdev->uvd.gpu_addr >> 3) + 16;
122 size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size) >> 3;
123 WREG32(UVD_VCPU_CACHE_OFFSET0, addr);
124 WREG32(UVD_VCPU_CACHE_SIZE0, size);
125
126 addr += size;
127 size = RADEON_UVD_STACK_SIZE >> 3;
128 WREG32(UVD_VCPU_CACHE_OFFSET1, addr);
129 WREG32(UVD_VCPU_CACHE_SIZE1, size);
130
131 addr += size;
132 size = RADEON_UVD_HEAP_SIZE >> 3;
133 WREG32(UVD_VCPU_CACHE_OFFSET2, addr);
134 WREG32(UVD_VCPU_CACHE_SIZE2, size);
135
136 /* bits 28-31 */
137 addr = (rdev->uvd.gpu_addr >> 28) & 0xF;
138 WREG32(UVD_LMI_ADDR_EXT, (addr << 12) | (addr << 0));
139
140 /* bits 32-39 */
141 addr = (rdev->uvd.gpu_addr >> 32) & 0xFF;
142 WREG32(UVD_LMI_EXT40_ADDR, addr | (0x9 << 16) | (0x1 << 31));
143
144 WREG32(UVD_FW_START, *((uint32_t*)rdev->uvd.cpu_addr));
145
146 return 0;
147}
148
149/**
Christian Könige409b122013-08-13 11:56:53 +0200150 * uvd_v1_0_init - start and test UVD block
151 *
152 * @rdev: radeon_device pointer
153 *
154 * Initialize the hardware, boot up the VCPU and do some testing
155 */
156int uvd_v1_0_init(struct radeon_device *rdev)
157{
158 struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
159 uint32_t tmp;
160 int r;
161
162 /* raise clocks while booting up the VCPU */
Christian Könige4518762014-04-10 16:11:36 +0200163 if (rdev->family < CHIP_RV740)
164 radeon_set_uvd_clocks(rdev, 10000, 10000);
165 else
166 radeon_set_uvd_clocks(rdev, 53300, 40000);
Christian Könige409b122013-08-13 11:56:53 +0200167
Alex Deuchera7f28f02013-08-28 18:24:00 -0400168 r = uvd_v1_0_start(rdev);
169 if (r)
170 goto done;
Christian Könige409b122013-08-13 11:56:53 +0200171
172 ring->ready = true;
173 r = radeon_ring_test(rdev, R600_RING_TYPE_UVD_INDEX, ring);
174 if (r) {
175 ring->ready = false;
176 goto done;
177 }
178
179 r = radeon_ring_lock(rdev, ring, 10);
180 if (r) {
181 DRM_ERROR("radeon: ring failed to lock UVD ring (%d).\n", r);
182 goto done;
183 }
184
185 tmp = PACKET0(UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0);
186 radeon_ring_write(ring, tmp);
187 radeon_ring_write(ring, 0xFFFFF);
188
189 tmp = PACKET0(UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0);
190 radeon_ring_write(ring, tmp);
191 radeon_ring_write(ring, 0xFFFFF);
192
193 tmp = PACKET0(UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0);
194 radeon_ring_write(ring, tmp);
195 radeon_ring_write(ring, 0xFFFFF);
196
197 /* Clear timeout status bits */
198 radeon_ring_write(ring, PACKET0(UVD_SEMA_TIMEOUT_STATUS, 0));
199 radeon_ring_write(ring, 0x8);
200
201 radeon_ring_write(ring, PACKET0(UVD_SEMA_CNTL, 0));
202 radeon_ring_write(ring, 3);
203
Michel Dänzer1538a9e2014-08-18 17:34:55 +0900204 radeon_ring_unlock_commit(rdev, ring, false);
Christian Könige409b122013-08-13 11:56:53 +0200205
206done:
207 /* lower clocks again */
208 radeon_set_uvd_clocks(rdev, 0, 0);
209
Christian König115365e2013-04-25 09:02:14 +0200210 if (!r) {
211 switch (rdev->family) {
212 case CHIP_RV610:
213 case CHIP_RV630:
214 case CHIP_RV620:
215 /* 64byte granularity workaround */
216 WREG32(MC_CONFIG, 0);
217 WREG32(MC_CONFIG, 1 << 4);
218 WREG32(RS_DQ_RD_RET_CONF, 0x3f);
219 WREG32(MC_CONFIG, 0x1f);
220
221 /* fall through */
222 case CHIP_RV670:
223 case CHIP_RV635:
224
225 /* write clean workaround */
226 WREG32_P(UVD_VCPU_CNTL, 0x10, ~0x10);
227 break;
228
229 default:
230 /* TODO: Do we need more? */
231 break;
232 }
233
Christian Könige409b122013-08-13 11:56:53 +0200234 DRM_INFO("UVD initialized successfully.\n");
Christian König115365e2013-04-25 09:02:14 +0200235 }
Christian Könige409b122013-08-13 11:56:53 +0200236
237 return r;
238}
239
240/**
241 * uvd_v1_0_fini - stop the hardware block
242 *
243 * @rdev: radeon_device pointer
244 *
245 * Stop the UVD block, mark ring as not ready any more
246 */
247void uvd_v1_0_fini(struct radeon_device *rdev)
248{
249 struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
250
251 uvd_v1_0_stop(rdev);
252 ring->ready = false;
253}
254
255/**
256 * uvd_v1_0_start - start UVD block
257 *
258 * @rdev: radeon_device pointer
259 *
260 * Setup and start the UVD block
261 */
262int uvd_v1_0_start(struct radeon_device *rdev)
263{
264 struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
265 uint32_t rb_bufsz;
266 int i, j, r;
267
268 /* disable byte swapping */
269 u32 lmi_swap_cntl = 0;
270 u32 mp_swap_cntl = 0;
271
272 /* disable clock gating */
273 WREG32(UVD_CGC_GATE, 0);
274
275 /* disable interupt */
276 WREG32_P(UVD_MASTINT_EN, 0, ~(1 << 1));
277
278 /* Stall UMC and register bus before resetting VCPU */
279 WREG32_P(UVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
280 WREG32_P(UVD_RB_ARB_CTRL, 1 << 3, ~(1 << 3));
281 mdelay(1);
282
283 /* put LMI, VCPU, RBC etc... into reset */
284 WREG32(UVD_SOFT_RESET, LMI_SOFT_RESET | VCPU_SOFT_RESET |
285 LBSI_SOFT_RESET | RBC_SOFT_RESET | CSM_SOFT_RESET |
286 CXW_SOFT_RESET | TAP_SOFT_RESET | LMI_UMC_SOFT_RESET);
287 mdelay(5);
288
289 /* take UVD block out of reset */
290 WREG32_P(SRBM_SOFT_RESET, 0, ~SOFT_RESET_UVD);
291 mdelay(5);
292
293 /* initialize UVD memory controller */
294 WREG32(UVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) |
295 (1 << 21) | (1 << 9) | (1 << 20));
296
297#ifdef __BIG_ENDIAN
298 /* swap (8 in 32) RB and IB */
299 lmi_swap_cntl = 0xa;
300 mp_swap_cntl = 0;
301#endif
302 WREG32(UVD_LMI_SWAP_CNTL, lmi_swap_cntl);
303 WREG32(UVD_MP_SWAP_CNTL, mp_swap_cntl);
304
305 WREG32(UVD_MPC_SET_MUXA0, 0x40c2040);
306 WREG32(UVD_MPC_SET_MUXA1, 0x0);
307 WREG32(UVD_MPC_SET_MUXB0, 0x40c2040);
308 WREG32(UVD_MPC_SET_MUXB1, 0x0);
309 WREG32(UVD_MPC_SET_ALU, 0);
310 WREG32(UVD_MPC_SET_MUX, 0x88);
311
312 /* take all subblocks out of reset, except VCPU */
313 WREG32(UVD_SOFT_RESET, VCPU_SOFT_RESET);
314 mdelay(5);
315
316 /* enable VCPU clock */
317 WREG32(UVD_VCPU_CNTL, 1 << 9);
318
Christian Königbcf6f1e2013-10-15 20:12:03 +0200319 /* enable UMC */
320 WREG32_P(UVD_LMI_CTRL2, 0, ~(1 << 8));
Christian Könige409b122013-08-13 11:56:53 +0200321
Christian König32517d52014-08-27 09:59:45 +0200322 WREG32_P(UVD_RB_ARB_CTRL, 0, ~(1 << 3));
323
Christian Könige409b122013-08-13 11:56:53 +0200324 /* boot up the VCPU */
325 WREG32(UVD_SOFT_RESET, 0);
326 mdelay(10);
327
Christian Könige409b122013-08-13 11:56:53 +0200328 for (i = 0; i < 10; ++i) {
329 uint32_t status;
330 for (j = 0; j < 100; ++j) {
331 status = RREG32(UVD_STATUS);
332 if (status & 2)
333 break;
334 mdelay(10);
335 }
336 r = 0;
337 if (status & 2)
338 break;
339
340 DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n");
341 WREG32_P(UVD_SOFT_RESET, VCPU_SOFT_RESET, ~VCPU_SOFT_RESET);
342 mdelay(10);
343 WREG32_P(UVD_SOFT_RESET, 0, ~VCPU_SOFT_RESET);
344 mdelay(10);
345 r = -1;
346 }
347
348 if (r) {
349 DRM_ERROR("UVD not responding, giving up!!!\n");
350 return r;
351 }
352
353 /* enable interupt */
354 WREG32_P(UVD_MASTINT_EN, 3<<1, ~(3 << 1));
355
356 /* force RBC into idle state */
357 WREG32(UVD_RBC_RB_CNTL, 0x11010101);
358
359 /* Set the write pointer delay */
360 WREG32(UVD_RBC_RB_WPTR_CNTL, 0);
361
362 /* programm the 4GB memory segment for rptr and ring buffer */
363 WREG32(UVD_LMI_EXT40_ADDR, upper_32_bits(ring->gpu_addr) |
364 (0x7 << 16) | (0x1 << 31));
365
366 /* Initialize the ring buffer's read and write pointers */
367 WREG32(UVD_RBC_RB_RPTR, 0x0);
368
Christian Königff212f22014-02-18 14:52:33 +0100369 ring->wptr = RREG32(UVD_RBC_RB_RPTR);
Christian Könige409b122013-08-13 11:56:53 +0200370 WREG32(UVD_RBC_RB_WPTR, ring->wptr);
371
372 /* set the ring address */
373 WREG32(UVD_RBC_RB_BASE, ring->gpu_addr);
374
375 /* Set ring buffer size */
Dave Airlie9c725e52013-09-02 09:31:40 +1000376 rb_bufsz = order_base_2(ring->ring_size);
Christian Könige409b122013-08-13 11:56:53 +0200377 rb_bufsz = (0x1 << 8) | rb_bufsz;
378 WREG32_P(UVD_RBC_RB_CNTL, rb_bufsz, ~0x11f1f);
379
380 return 0;
381}
382
383/**
384 * uvd_v1_0_stop - stop UVD block
385 *
386 * @rdev: radeon_device pointer
387 *
388 * stop the UVD block
389 */
390void uvd_v1_0_stop(struct radeon_device *rdev)
391{
392 /* force RBC into idle state */
393 WREG32(UVD_RBC_RB_CNTL, 0x11010101);
394
395 /* Stall UMC and register bus before resetting VCPU */
396 WREG32_P(UVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
397 WREG32_P(UVD_RB_ARB_CTRL, 1 << 3, ~(1 << 3));
398 mdelay(1);
399
400 /* put VCPU into reset */
401 WREG32(UVD_SOFT_RESET, VCPU_SOFT_RESET);
402 mdelay(5);
403
404 /* disable VCPU clock */
405 WREG32(UVD_VCPU_CNTL, 0x0);
406
407 /* Unstall UMC and register bus */
408 WREG32_P(UVD_LMI_CTRL2, 0, ~(1 << 8));
409 WREG32_P(UVD_RB_ARB_CTRL, 0, ~(1 << 3));
410}
411
412/**
413 * uvd_v1_0_ring_test - register write test
414 *
415 * @rdev: radeon_device pointer
416 * @ring: radeon_ring pointer
417 *
418 * Test if we can successfully write to the context register
419 */
420int uvd_v1_0_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
421{
422 uint32_t tmp = 0;
423 unsigned i;
424 int r;
425
426 WREG32(UVD_CONTEXT_ID, 0xCAFEDEAD);
427 r = radeon_ring_lock(rdev, ring, 3);
428 if (r) {
429 DRM_ERROR("radeon: cp failed to lock ring %d (%d).\n",
430 ring->idx, r);
431 return r;
432 }
433 radeon_ring_write(ring, PACKET0(UVD_CONTEXT_ID, 0));
434 radeon_ring_write(ring, 0xDEADBEEF);
Michel Dänzer1538a9e2014-08-18 17:34:55 +0900435 radeon_ring_unlock_commit(rdev, ring, false);
Christian Könige409b122013-08-13 11:56:53 +0200436 for (i = 0; i < rdev->usec_timeout; i++) {
437 tmp = RREG32(UVD_CONTEXT_ID);
438 if (tmp == 0xDEADBEEF)
439 break;
440 DRM_UDELAY(1);
441 }
442
443 if (i < rdev->usec_timeout) {
444 DRM_INFO("ring test on %d succeeded in %d usecs\n",
445 ring->idx, i);
446 } else {
447 DRM_ERROR("radeon: ring %d test failed (0x%08X)\n",
448 ring->idx, tmp);
449 r = -EINVAL;
450 }
451 return r;
452}
453
454/**
455 * uvd_v1_0_semaphore_emit - emit semaphore command
456 *
457 * @rdev: radeon_device pointer
458 * @ring: radeon_ring pointer
459 * @semaphore: semaphore to emit commands for
460 * @emit_wait: true if we should emit a wait command
461 *
462 * Emit a semaphore command (either wait or signal) to the UVD ring.
463 */
Christian König1654b812013-11-12 12:58:05 +0100464bool uvd_v1_0_semaphore_emit(struct radeon_device *rdev,
Christian Könige409b122013-08-13 11:56:53 +0200465 struct radeon_ring *ring,
466 struct radeon_semaphore *semaphore,
467 bool emit_wait)
468{
Christian König013ead42015-05-01 12:34:12 +0200469 /* disable semaphores for UVD V1 hardware */
470 return false;
Christian Könige409b122013-08-13 11:56:53 +0200471}
472
473/**
474 * uvd_v1_0_ib_execute - execute indirect buffer
475 *
476 * @rdev: radeon_device pointer
477 * @ib: indirect buffer to execute
478 *
479 * Write ring commands to execute the indirect buffer
480 */
481void uvd_v1_0_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
482{
483 struct radeon_ring *ring = &rdev->ring[ib->ring];
484
485 radeon_ring_write(ring, PACKET0(UVD_RBC_IB_BASE, 0));
486 radeon_ring_write(ring, ib->gpu_addr);
487 radeon_ring_write(ring, PACKET0(UVD_RBC_IB_SIZE, 0));
488 radeon_ring_write(ring, ib->length_dw);
489}
490
491/**
492 * uvd_v1_0_ib_test - test ib execution
493 *
494 * @rdev: radeon_device pointer
495 * @ring: radeon_ring pointer
496 *
497 * Test if we can successfully execute an IB
498 */
499int uvd_v1_0_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
500{
501 struct radeon_fence *fence = NULL;
502 int r;
503
Christian Könige4518762014-04-10 16:11:36 +0200504 if (rdev->family < CHIP_RV740)
505 r = radeon_set_uvd_clocks(rdev, 10000, 10000);
506 else
507 r = radeon_set_uvd_clocks(rdev, 53300, 40000);
Christian Könige409b122013-08-13 11:56:53 +0200508 if (r) {
509 DRM_ERROR("radeon: failed to raise UVD clocks (%d).\n", r);
510 return r;
511 }
512
513 r = radeon_uvd_get_create_msg(rdev, ring->idx, 1, NULL);
514 if (r) {
515 DRM_ERROR("radeon: failed to get create msg (%d).\n", r);
516 goto error;
517 }
518
519 r = radeon_uvd_get_destroy_msg(rdev, ring->idx, 1, &fence);
520 if (r) {
521 DRM_ERROR("radeon: failed to get destroy ib (%d).\n", r);
522 goto error;
523 }
524
525 r = radeon_fence_wait(fence, false);
526 if (r) {
527 DRM_ERROR("radeon: fence wait failed (%d).\n", r);
528 goto error;
529 }
530 DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
531error:
532 radeon_fence_unref(&fence);
533 radeon_set_uvd_clocks(rdev, 0, 0);
534 return r;
535}