blob: dde5c7e29eb200b6dc78f1fad46197e43e0013ed [file] [log] [blame]
Christian König2483b4e2013-08-13 11:56:54 +02001/*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
24#include <linux/firmware.h>
25#include <drm/drmP.h>
26#include "radeon.h"
Alex Deucherf2c6b0f2014-06-25 19:32:36 -040027#include "radeon_ucode.h"
Christian König2483b4e2013-08-13 11:56:54 +020028#include "radeon_asic.h"
Christian König74d360f2013-10-29 20:14:48 +010029#include "radeon_trace.h"
Christian König2483b4e2013-08-13 11:56:54 +020030#include "cikd.h"
31
32/* sdma */
33#define CIK_SDMA_UCODE_SIZE 1050
34#define CIK_SDMA_UCODE_VERSION 64
35
36u32 cik_gpu_check_soft_reset(struct radeon_device *rdev);
37
38/*
39 * sDMA - System DMA
40 * Starting with CIK, the GPU has new asynchronous
41 * DMA engines. These engines are used for compute
42 * and gfx. There are two DMA engines (SDMA0, SDMA1)
43 * and each one supports 1 ring buffer used for gfx
44 * and 2 queues used for compute.
45 *
46 * The programming model is very similar to the CP
47 * (ring buffer, IBs, etc.), but sDMA has it's own
48 * packet format that is different from the PM4 format
49 * used by the CP. sDMA supports copying data, writing
50 * embedded data, solid fills, and a number of other
51 * things. It also has support for tiling/detiling of
52 * buffers.
53 */
54
55/**
Alex Deucherea31bf62013-12-09 19:44:30 -050056 * cik_sdma_get_rptr - get the current read pointer
57 *
58 * @rdev: radeon_device pointer
59 * @ring: radeon ring pointer
60 *
61 * Get the current rptr from the hardware (CIK+).
62 */
63uint32_t cik_sdma_get_rptr(struct radeon_device *rdev,
64 struct radeon_ring *ring)
65{
66 u32 rptr, reg;
67
68 if (rdev->wb.enabled) {
69 rptr = rdev->wb.wb[ring->rptr_offs/4];
70 } else {
71 if (ring->idx == R600_RING_TYPE_DMA_INDEX)
72 reg = SDMA0_GFX_RB_RPTR + SDMA0_REGISTER_OFFSET;
73 else
74 reg = SDMA0_GFX_RB_RPTR + SDMA1_REGISTER_OFFSET;
75
76 rptr = RREG32(reg);
77 }
78
79 return (rptr & 0x3fffc) >> 2;
80}
81
82/**
83 * cik_sdma_get_wptr - get the current write pointer
84 *
85 * @rdev: radeon_device pointer
86 * @ring: radeon ring pointer
87 *
88 * Get the current wptr from the hardware (CIK+).
89 */
90uint32_t cik_sdma_get_wptr(struct radeon_device *rdev,
91 struct radeon_ring *ring)
92{
93 u32 reg;
94
95 if (ring->idx == R600_RING_TYPE_DMA_INDEX)
96 reg = SDMA0_GFX_RB_WPTR + SDMA0_REGISTER_OFFSET;
97 else
98 reg = SDMA0_GFX_RB_WPTR + SDMA1_REGISTER_OFFSET;
99
100 return (RREG32(reg) & 0x3fffc) >> 2;
101}
102
103/**
104 * cik_sdma_set_wptr - commit the write pointer
105 *
106 * @rdev: radeon_device pointer
107 * @ring: radeon ring pointer
108 *
109 * Write the wptr back to the hardware (CIK+).
110 */
111void cik_sdma_set_wptr(struct radeon_device *rdev,
112 struct radeon_ring *ring)
113{
114 u32 reg;
115
116 if (ring->idx == R600_RING_TYPE_DMA_INDEX)
117 reg = SDMA0_GFX_RB_WPTR + SDMA0_REGISTER_OFFSET;
118 else
119 reg = SDMA0_GFX_RB_WPTR + SDMA1_REGISTER_OFFSET;
120
121 WREG32(reg, (ring->wptr << 2) & 0x3fffc);
Michel Dänzerf069dc12014-07-29 18:47:22 +0900122 (void)RREG32(reg);
Alex Deucherea31bf62013-12-09 19:44:30 -0500123}
124
125/**
Christian König2483b4e2013-08-13 11:56:54 +0200126 * cik_sdma_ring_ib_execute - Schedule an IB on the DMA engine
127 *
128 * @rdev: radeon_device pointer
129 * @ib: IB object to schedule
130 *
131 * Schedule an IB in the DMA ring (CIK).
132 */
133void cik_sdma_ring_ib_execute(struct radeon_device *rdev,
134 struct radeon_ib *ib)
135{
136 struct radeon_ring *ring = &rdev->ring[ib->ring];
Christian König7c42bc12014-11-19 14:01:25 +0100137 u32 extra_bits = (ib->vm ? ib->vm->ids[ib->ring].id : 0) & 0xf;
Christian König2483b4e2013-08-13 11:56:54 +0200138
139 if (rdev->wb.enabled) {
140 u32 next_rptr = ring->wptr + 5;
141 while ((next_rptr & 7) != 4)
142 next_rptr++;
143 next_rptr += 4;
144 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0));
145 radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
Christian König5e167cd2014-06-03 20:51:46 +0200146 radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr));
Christian König2483b4e2013-08-13 11:56:54 +0200147 radeon_ring_write(ring, 1); /* number of DWs to follow */
148 radeon_ring_write(ring, next_rptr);
149 }
150
151 /* IB packet must end on a 8 DW boundary */
152 while ((ring->wptr & 7) != 4)
153 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0));
154 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_INDIRECT_BUFFER, 0, extra_bits));
155 radeon_ring_write(ring, ib->gpu_addr & 0xffffffe0); /* base must be 32 byte aligned */
Christian König5e167cd2014-06-03 20:51:46 +0200156 radeon_ring_write(ring, upper_32_bits(ib->gpu_addr));
Christian König2483b4e2013-08-13 11:56:54 +0200157 radeon_ring_write(ring, ib->length_dw);
158
159}
160
161/**
Alex Deucherca113f62014-01-09 16:23:37 -0500162 * cik_sdma_hdp_flush_ring_emit - emit an hdp flush on the DMA ring
163 *
164 * @rdev: radeon_device pointer
165 * @ridx: radeon ring index
166 *
167 * Emit an hdp flush packet on the requested DMA ring.
168 */
169static void cik_sdma_hdp_flush_ring_emit(struct radeon_device *rdev,
170 int ridx)
171{
172 struct radeon_ring *ring = &rdev->ring[ridx];
Alex Deucherda9e07e2014-01-09 16:35:39 -0500173 u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(1) |
174 SDMA_POLL_REG_MEM_EXTRA_FUNC(3)); /* == */
175 u32 ref_and_mask;
Alex Deucherca113f62014-01-09 16:23:37 -0500176
Alex Deucherda9e07e2014-01-09 16:35:39 -0500177 if (ridx == R600_RING_TYPE_DMA_INDEX)
178 ref_and_mask = SDMA0;
179 else
180 ref_and_mask = SDMA1;
181
182 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits));
183 radeon_ring_write(ring, GPU_HDP_FLUSH_DONE);
184 radeon_ring_write(ring, GPU_HDP_FLUSH_REQ);
185 radeon_ring_write(ring, ref_and_mask); /* reference */
186 radeon_ring_write(ring, ref_and_mask); /* mask */
187 radeon_ring_write(ring, (0xfff << 16) | 10); /* retry count, poll interval */
Alex Deucherca113f62014-01-09 16:23:37 -0500188}
189
190/**
Christian König2483b4e2013-08-13 11:56:54 +0200191 * cik_sdma_fence_ring_emit - emit a fence on the DMA ring
192 *
193 * @rdev: radeon_device pointer
194 * @fence: radeon fence object
195 *
196 * Add a DMA fence packet to the ring to write
197 * the fence seq number and DMA trap packet to generate
198 * an interrupt if needed (CIK).
199 */
200void cik_sdma_fence_ring_emit(struct radeon_device *rdev,
201 struct radeon_fence *fence)
202{
203 struct radeon_ring *ring = &rdev->ring[fence->ring];
204 u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
Christian König2483b4e2013-08-13 11:56:54 +0200205
206 /* write the fence */
207 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_FENCE, 0, 0));
Christian König5e167cd2014-06-03 20:51:46 +0200208 radeon_ring_write(ring, lower_32_bits(addr));
209 radeon_ring_write(ring, upper_32_bits(addr));
Christian König2483b4e2013-08-13 11:56:54 +0200210 radeon_ring_write(ring, fence->seq);
211 /* generate an interrupt */
212 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_TRAP, 0, 0));
213 /* flush HDP */
Alex Deucherca113f62014-01-09 16:23:37 -0500214 cik_sdma_hdp_flush_ring_emit(rdev, fence->ring);
Christian König2483b4e2013-08-13 11:56:54 +0200215}
216
217/**
218 * cik_sdma_semaphore_ring_emit - emit a semaphore on the dma ring
219 *
220 * @rdev: radeon_device pointer
221 * @ring: radeon_ring structure holding ring information
222 * @semaphore: radeon semaphore object
223 * @emit_wait: wait or signal semaphore
224 *
225 * Add a DMA semaphore packet to the ring wait on or signal
226 * other rings (CIK).
227 */
Christian König1654b812013-11-12 12:58:05 +0100228bool cik_sdma_semaphore_ring_emit(struct radeon_device *rdev,
Christian König2483b4e2013-08-13 11:56:54 +0200229 struct radeon_ring *ring,
230 struct radeon_semaphore *semaphore,
231 bool emit_wait)
232{
233 u64 addr = semaphore->gpu_addr;
234 u32 extra_bits = emit_wait ? 0 : SDMA_SEMAPHORE_EXTRA_S;
235
236 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SEMAPHORE, 0, extra_bits));
237 radeon_ring_write(ring, addr & 0xfffffff8);
Christian König5e167cd2014-06-03 20:51:46 +0200238 radeon_ring_write(ring, upper_32_bits(addr));
Christian König1654b812013-11-12 12:58:05 +0100239
240 return true;
Christian König2483b4e2013-08-13 11:56:54 +0200241}
242
243/**
244 * cik_sdma_gfx_stop - stop the gfx async dma engines
245 *
246 * @rdev: radeon_device pointer
247 *
248 * Stop the gfx async dma ring buffers (CIK).
249 */
250static void cik_sdma_gfx_stop(struct radeon_device *rdev)
251{
252 u32 rb_cntl, reg_offset;
253 int i;
254
Alex Deucher50efa512014-01-27 11:26:33 -0500255 if ((rdev->asic->copy.copy_ring_index == R600_RING_TYPE_DMA_INDEX) ||
256 (rdev->asic->copy.copy_ring_index == CAYMAN_RING_TYPE_DMA1_INDEX))
257 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
Christian König2483b4e2013-08-13 11:56:54 +0200258
259 for (i = 0; i < 2; i++) {
260 if (i == 0)
261 reg_offset = SDMA0_REGISTER_OFFSET;
262 else
263 reg_offset = SDMA1_REGISTER_OFFSET;
264 rb_cntl = RREG32(SDMA0_GFX_RB_CNTL + reg_offset);
265 rb_cntl &= ~SDMA_RB_ENABLE;
266 WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl);
267 WREG32(SDMA0_GFX_IB_CNTL + reg_offset, 0);
268 }
Alex Deucher7b1bbe82014-03-12 15:15:58 -0400269 rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false;
270 rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready = false;
Christian König2483b4e2013-08-13 11:56:54 +0200271}
272
273/**
274 * cik_sdma_rlc_stop - stop the compute async dma engines
275 *
276 * @rdev: radeon_device pointer
277 *
278 * Stop the compute async dma queues (CIK).
279 */
280static void cik_sdma_rlc_stop(struct radeon_device *rdev)
281{
282 /* XXX todo */
283}
284
285/**
286 * cik_sdma_enable - stop the async dma engines
287 *
288 * @rdev: radeon_device pointer
289 * @enable: enable/disable the DMA MEs.
290 *
291 * Halt or unhalt the async dma engines (CIK).
292 */
293void cik_sdma_enable(struct radeon_device *rdev, bool enable)
294{
295 u32 me_cntl, reg_offset;
296 int i;
297
Alex Deucher07ae78c2014-03-12 15:26:34 -0400298 if (enable == false) {
299 cik_sdma_gfx_stop(rdev);
300 cik_sdma_rlc_stop(rdev);
301 }
302
Christian König2483b4e2013-08-13 11:56:54 +0200303 for (i = 0; i < 2; i++) {
304 if (i == 0)
305 reg_offset = SDMA0_REGISTER_OFFSET;
306 else
307 reg_offset = SDMA1_REGISTER_OFFSET;
308 me_cntl = RREG32(SDMA0_ME_CNTL + reg_offset);
309 if (enable)
310 me_cntl &= ~SDMA_HALT;
311 else
312 me_cntl |= SDMA_HALT;
313 WREG32(SDMA0_ME_CNTL + reg_offset, me_cntl);
314 }
315}
316
317/**
318 * cik_sdma_gfx_resume - setup and start the async dma engines
319 *
320 * @rdev: radeon_device pointer
321 *
322 * Set up the gfx DMA ring buffers and enable them (CIK).
323 * Returns 0 for success, error for failure.
324 */
325static int cik_sdma_gfx_resume(struct radeon_device *rdev)
326{
327 struct radeon_ring *ring;
328 u32 rb_cntl, ib_cntl;
329 u32 rb_bufsz;
330 u32 reg_offset, wb_offset;
331 int i, r;
332
333 for (i = 0; i < 2; i++) {
334 if (i == 0) {
335 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
336 reg_offset = SDMA0_REGISTER_OFFSET;
337 wb_offset = R600_WB_DMA_RPTR_OFFSET;
338 } else {
339 ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
340 reg_offset = SDMA1_REGISTER_OFFSET;
341 wb_offset = CAYMAN_WB_DMA1_RPTR_OFFSET;
342 }
343
344 WREG32(SDMA0_SEM_INCOMPLETE_TIMER_CNTL + reg_offset, 0);
345 WREG32(SDMA0_SEM_WAIT_FAIL_TIMER_CNTL + reg_offset, 0);
346
347 /* Set ring buffer size in dwords */
Dave Airlie9c725e52013-09-02 09:31:40 +1000348 rb_bufsz = order_base_2(ring->ring_size / 4);
Christian König2483b4e2013-08-13 11:56:54 +0200349 rb_cntl = rb_bufsz << 1;
350#ifdef __BIG_ENDIAN
351 rb_cntl |= SDMA_RB_SWAP_ENABLE | SDMA_RPTR_WRITEBACK_SWAP_ENABLE;
352#endif
353 WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl);
354
355 /* Initialize the ring buffer's read and write pointers */
356 WREG32(SDMA0_GFX_RB_RPTR + reg_offset, 0);
357 WREG32(SDMA0_GFX_RB_WPTR + reg_offset, 0);
358
359 /* set the wb address whether it's enabled or not */
360 WREG32(SDMA0_GFX_RB_RPTR_ADDR_HI + reg_offset,
361 upper_32_bits(rdev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF);
362 WREG32(SDMA0_GFX_RB_RPTR_ADDR_LO + reg_offset,
363 ((rdev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC));
364
365 if (rdev->wb.enabled)
366 rb_cntl |= SDMA_RPTR_WRITEBACK_ENABLE;
367
368 WREG32(SDMA0_GFX_RB_BASE + reg_offset, ring->gpu_addr >> 8);
369 WREG32(SDMA0_GFX_RB_BASE_HI + reg_offset, ring->gpu_addr >> 40);
370
371 ring->wptr = 0;
372 WREG32(SDMA0_GFX_RB_WPTR + reg_offset, ring->wptr << 2);
373
Christian König2483b4e2013-08-13 11:56:54 +0200374 /* enable DMA RB */
375 WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl | SDMA_RB_ENABLE);
376
377 ib_cntl = SDMA_IB_ENABLE;
378#ifdef __BIG_ENDIAN
379 ib_cntl |= SDMA_IB_SWAP_ENABLE;
380#endif
381 /* enable DMA IBs */
382 WREG32(SDMA0_GFX_IB_CNTL + reg_offset, ib_cntl);
383
384 ring->ready = true;
385
386 r = radeon_ring_test(rdev, ring->idx, ring);
387 if (r) {
388 ring->ready = false;
389 return r;
390 }
391 }
392
Alex Deucher50efa512014-01-27 11:26:33 -0500393 if ((rdev->asic->copy.copy_ring_index == R600_RING_TYPE_DMA_INDEX) ||
394 (rdev->asic->copy.copy_ring_index == CAYMAN_RING_TYPE_DMA1_INDEX))
395 radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
Christian König2483b4e2013-08-13 11:56:54 +0200396
397 return 0;
398}
399
400/**
401 * cik_sdma_rlc_resume - setup and start the async dma engines
402 *
403 * @rdev: radeon_device pointer
404 *
405 * Set up the compute DMA queues and enable them (CIK).
406 * Returns 0 for success, error for failure.
407 */
408static int cik_sdma_rlc_resume(struct radeon_device *rdev)
409{
410 /* XXX todo */
411 return 0;
412}
413
414/**
415 * cik_sdma_load_microcode - load the sDMA ME ucode
416 *
417 * @rdev: radeon_device pointer
418 *
419 * Loads the sDMA0/1 ucode.
420 * Returns 0 for success, -EINVAL if the ucode is not available.
421 */
422static int cik_sdma_load_microcode(struct radeon_device *rdev)
423{
Christian König2483b4e2013-08-13 11:56:54 +0200424 int i;
425
426 if (!rdev->sdma_fw)
427 return -EINVAL;
428
Christian König2483b4e2013-08-13 11:56:54 +0200429 /* halt the MEs */
430 cik_sdma_enable(rdev, false);
431
Alex Deucherf2c6b0f2014-06-25 19:32:36 -0400432 if (rdev->new_fw) {
433 const struct sdma_firmware_header_v1_0 *hdr =
434 (const struct sdma_firmware_header_v1_0 *)rdev->sdma_fw->data;
435 const __le32 *fw_data;
436 u32 fw_size;
Christian König2483b4e2013-08-13 11:56:54 +0200437
Alex Deucherf2c6b0f2014-06-25 19:32:36 -0400438 radeon_ucode_print_sdma_hdr(&hdr->header);
439
440 /* sdma0 */
441 fw_data = (const __le32 *)
442 (rdev->sdma_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
443 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
444 WREG32(SDMA0_UCODE_ADDR + SDMA0_REGISTER_OFFSET, 0);
445 for (i = 0; i < fw_size; i++)
446 WREG32(SDMA0_UCODE_DATA + SDMA0_REGISTER_OFFSET, le32_to_cpup(fw_data++));
447 WREG32(SDMA0_UCODE_DATA + SDMA0_REGISTER_OFFSET, CIK_SDMA_UCODE_VERSION);
448
449 /* sdma1 */
450 fw_data = (const __le32 *)
451 (rdev->sdma_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
452 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
453 WREG32(SDMA0_UCODE_ADDR + SDMA1_REGISTER_OFFSET, 0);
454 for (i = 0; i < fw_size; i++)
455 WREG32(SDMA0_UCODE_DATA + SDMA1_REGISTER_OFFSET, le32_to_cpup(fw_data++));
456 WREG32(SDMA0_UCODE_DATA + SDMA1_REGISTER_OFFSET, CIK_SDMA_UCODE_VERSION);
457 } else {
458 const __be32 *fw_data;
459
460 /* sdma0 */
461 fw_data = (const __be32 *)rdev->sdma_fw->data;
462 WREG32(SDMA0_UCODE_ADDR + SDMA0_REGISTER_OFFSET, 0);
463 for (i = 0; i < CIK_SDMA_UCODE_SIZE; i++)
464 WREG32(SDMA0_UCODE_DATA + SDMA0_REGISTER_OFFSET, be32_to_cpup(fw_data++));
465 WREG32(SDMA0_UCODE_DATA + SDMA0_REGISTER_OFFSET, CIK_SDMA_UCODE_VERSION);
466
467 /* sdma1 */
468 fw_data = (const __be32 *)rdev->sdma_fw->data;
469 WREG32(SDMA0_UCODE_ADDR + SDMA1_REGISTER_OFFSET, 0);
470 for (i = 0; i < CIK_SDMA_UCODE_SIZE; i++)
471 WREG32(SDMA0_UCODE_DATA + SDMA1_REGISTER_OFFSET, be32_to_cpup(fw_data++));
472 WREG32(SDMA0_UCODE_DATA + SDMA1_REGISTER_OFFSET, CIK_SDMA_UCODE_VERSION);
473 }
Christian König2483b4e2013-08-13 11:56:54 +0200474
475 WREG32(SDMA0_UCODE_ADDR + SDMA0_REGISTER_OFFSET, 0);
476 WREG32(SDMA0_UCODE_ADDR + SDMA1_REGISTER_OFFSET, 0);
477 return 0;
478}
479
480/**
481 * cik_sdma_resume - setup and start the async dma engines
482 *
483 * @rdev: radeon_device pointer
484 *
485 * Set up the DMA engines and enable them (CIK).
486 * Returns 0 for success, error for failure.
487 */
488int cik_sdma_resume(struct radeon_device *rdev)
489{
490 int r;
491
Christian König2483b4e2013-08-13 11:56:54 +0200492 r = cik_sdma_load_microcode(rdev);
493 if (r)
494 return r;
495
496 /* unhalt the MEs */
497 cik_sdma_enable(rdev, true);
498
499 /* start the gfx rings and rlc compute queues */
500 r = cik_sdma_gfx_resume(rdev);
501 if (r)
502 return r;
503 r = cik_sdma_rlc_resume(rdev);
504 if (r)
505 return r;
506
507 return 0;
508}
509
510/**
511 * cik_sdma_fini - tear down the async dma engines
512 *
513 * @rdev: radeon_device pointer
514 *
515 * Stop the async dma engines and free the rings (CIK).
516 */
517void cik_sdma_fini(struct radeon_device *rdev)
518{
Christian König2483b4e2013-08-13 11:56:54 +0200519 /* halt the MEs */
520 cik_sdma_enable(rdev, false);
521 radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]);
522 radeon_ring_fini(rdev, &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]);
523 /* XXX - compute dma queue tear down */
524}
525
526/**
527 * cik_copy_dma - copy pages using the DMA engine
528 *
529 * @rdev: radeon_device pointer
530 * @src_offset: src GPU address
531 * @dst_offset: dst GPU address
532 * @num_gpu_pages: number of GPU pages to xfer
Christian König57d20a42014-09-04 20:01:53 +0200533 * @resv: reservation object to sync to
Christian König2483b4e2013-08-13 11:56:54 +0200534 *
535 * Copy GPU paging using the DMA engine (CIK).
536 * Used by the radeon ttm implementation to move pages if
537 * registered as the asic copy callback.
538 */
Christian König57d20a42014-09-04 20:01:53 +0200539struct radeon_fence *cik_copy_dma(struct radeon_device *rdev,
540 uint64_t src_offset, uint64_t dst_offset,
541 unsigned num_gpu_pages,
542 struct reservation_object *resv)
Christian König2483b4e2013-08-13 11:56:54 +0200543{
Christian König57d20a42014-09-04 20:01:53 +0200544 struct radeon_fence *fence;
Christian König975700d22014-11-19 14:01:22 +0100545 struct radeon_sync sync;
Christian König2483b4e2013-08-13 11:56:54 +0200546 int ring_index = rdev->asic->copy.dma_ring_index;
547 struct radeon_ring *ring = &rdev->ring[ring_index];
548 u32 size_in_bytes, cur_size_in_bytes;
549 int i, num_loops;
550 int r = 0;
551
Christian König975700d22014-11-19 14:01:22 +0100552 radeon_sync_create(&sync);
Christian König2483b4e2013-08-13 11:56:54 +0200553
554 size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
555 num_loops = DIV_ROUND_UP(size_in_bytes, 0x1fffff);
556 r = radeon_ring_lock(rdev, ring, num_loops * 7 + 14);
557 if (r) {
558 DRM_ERROR("radeon: moving bo (%d).\n", r);
Christian König975700d22014-11-19 14:01:22 +0100559 radeon_sync_free(rdev, &sync, NULL);
Christian König57d20a42014-09-04 20:01:53 +0200560 return ERR_PTR(r);
Christian König2483b4e2013-08-13 11:56:54 +0200561 }
562
Christian König975700d22014-11-19 14:01:22 +0100563 radeon_sync_resv(rdev, &sync, resv, false);
564 radeon_sync_rings(rdev, &sync, ring->idx);
Christian König2483b4e2013-08-13 11:56:54 +0200565
566 for (i = 0; i < num_loops; i++) {
567 cur_size_in_bytes = size_in_bytes;
568 if (cur_size_in_bytes > 0x1fffff)
569 cur_size_in_bytes = 0x1fffff;
570 size_in_bytes -= cur_size_in_bytes;
571 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_COPY_SUB_OPCODE_LINEAR, 0));
572 radeon_ring_write(ring, cur_size_in_bytes);
573 radeon_ring_write(ring, 0); /* src/dst endian swap */
Christian König5e167cd2014-06-03 20:51:46 +0200574 radeon_ring_write(ring, lower_32_bits(src_offset));
575 radeon_ring_write(ring, upper_32_bits(src_offset));
576 radeon_ring_write(ring, lower_32_bits(dst_offset));
577 radeon_ring_write(ring, upper_32_bits(dst_offset));
Christian König2483b4e2013-08-13 11:56:54 +0200578 src_offset += cur_size_in_bytes;
579 dst_offset += cur_size_in_bytes;
580 }
581
Christian König57d20a42014-09-04 20:01:53 +0200582 r = radeon_fence_emit(rdev, &fence, ring->idx);
Christian König2483b4e2013-08-13 11:56:54 +0200583 if (r) {
584 radeon_ring_unlock_undo(rdev, ring);
Christian König975700d22014-11-19 14:01:22 +0100585 radeon_sync_free(rdev, &sync, NULL);
Christian König57d20a42014-09-04 20:01:53 +0200586 return ERR_PTR(r);
Christian König2483b4e2013-08-13 11:56:54 +0200587 }
588
Michel Dänzer1538a9e2014-08-18 17:34:55 +0900589 radeon_ring_unlock_commit(rdev, ring, false);
Christian König975700d22014-11-19 14:01:22 +0100590 radeon_sync_free(rdev, &sync, fence);
Christian König2483b4e2013-08-13 11:56:54 +0200591
Christian König57d20a42014-09-04 20:01:53 +0200592 return fence;
Christian König2483b4e2013-08-13 11:56:54 +0200593}
594
595/**
596 * cik_sdma_ring_test - simple async dma engine test
597 *
598 * @rdev: radeon_device pointer
599 * @ring: radeon_ring structure holding ring information
600 *
601 * Test the DMA engine by writing using it to write an
602 * value to memory. (CIK).
603 * Returns 0 for success, error for failure.
604 */
605int cik_sdma_ring_test(struct radeon_device *rdev,
606 struct radeon_ring *ring)
607{
608 unsigned i;
609 int r;
Alex Deucheradfed2b02014-10-13 13:20:02 -0400610 unsigned index;
Christian König2483b4e2013-08-13 11:56:54 +0200611 u32 tmp;
Alex Deucheradfed2b02014-10-13 13:20:02 -0400612 u64 gpu_addr;
Christian König2483b4e2013-08-13 11:56:54 +0200613
Alex Deucheradfed2b02014-10-13 13:20:02 -0400614 if (ring->idx == R600_RING_TYPE_DMA_INDEX)
615 index = R600_WB_DMA_RING_TEST_OFFSET;
616 else
617 index = CAYMAN_WB_DMA1_RING_TEST_OFFSET;
618
619 gpu_addr = rdev->wb.gpu_addr + index;
Christian König2483b4e2013-08-13 11:56:54 +0200620
621 tmp = 0xCAFEDEAD;
Alex Deucheradfed2b02014-10-13 13:20:02 -0400622 rdev->wb.wb[index/4] = cpu_to_le32(tmp);
Christian König2483b4e2013-08-13 11:56:54 +0200623
Alex Deucher7e95cfb2014-04-22 08:17:18 -0400624 r = radeon_ring_lock(rdev, ring, 5);
Christian König2483b4e2013-08-13 11:56:54 +0200625 if (r) {
626 DRM_ERROR("radeon: dma failed to lock ring %d (%d).\n", ring->idx, r);
627 return r;
628 }
629 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0));
Alex Deucheradfed2b02014-10-13 13:20:02 -0400630 radeon_ring_write(ring, lower_32_bits(gpu_addr));
631 radeon_ring_write(ring, upper_32_bits(gpu_addr));
Christian König2483b4e2013-08-13 11:56:54 +0200632 radeon_ring_write(ring, 1); /* number of DWs to follow */
633 radeon_ring_write(ring, 0xDEADBEEF);
Michel Dänzer1538a9e2014-08-18 17:34:55 +0900634 radeon_ring_unlock_commit(rdev, ring, false);
Christian König2483b4e2013-08-13 11:56:54 +0200635
636 for (i = 0; i < rdev->usec_timeout; i++) {
Alex Deucheradfed2b02014-10-13 13:20:02 -0400637 tmp = le32_to_cpu(rdev->wb.wb[index/4]);
Christian König2483b4e2013-08-13 11:56:54 +0200638 if (tmp == 0xDEADBEEF)
639 break;
640 DRM_UDELAY(1);
641 }
642
643 if (i < rdev->usec_timeout) {
644 DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
645 } else {
646 DRM_ERROR("radeon: ring %d test failed (0x%08X)\n",
647 ring->idx, tmp);
648 r = -EINVAL;
649 }
650 return r;
651}
652
653/**
654 * cik_sdma_ib_test - test an IB on the DMA engine
655 *
656 * @rdev: radeon_device pointer
657 * @ring: radeon_ring structure holding ring information
658 *
659 * Test a simple IB in the DMA ring (CIK).
660 * Returns 0 on success, error on failure.
661 */
662int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
663{
664 struct radeon_ib ib;
665 unsigned i;
Alex Deucher0b021c582014-11-03 11:27:17 -0500666 unsigned index;
Christian König2483b4e2013-08-13 11:56:54 +0200667 int r;
Christian König2483b4e2013-08-13 11:56:54 +0200668 u32 tmp = 0;
Alex Deucher0b021c582014-11-03 11:27:17 -0500669 u64 gpu_addr;
Christian König2483b4e2013-08-13 11:56:54 +0200670
Alex Deucher0b021c582014-11-03 11:27:17 -0500671 if (ring->idx == R600_RING_TYPE_DMA_INDEX)
672 index = R600_WB_DMA_RING_TEST_OFFSET;
673 else
674 index = CAYMAN_WB_DMA1_RING_TEST_OFFSET;
675
676 gpu_addr = rdev->wb.gpu_addr + index;
Christian König2483b4e2013-08-13 11:56:54 +0200677
678 tmp = 0xCAFEDEAD;
Alex Deucher0b021c582014-11-03 11:27:17 -0500679 rdev->wb.wb[index/4] = cpu_to_le32(tmp);
Christian König2483b4e2013-08-13 11:56:54 +0200680
681 r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
682 if (r) {
683 DRM_ERROR("radeon: failed to get ib (%d).\n", r);
684 return r;
685 }
686
687 ib.ptr[0] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
Alex Deucher0b021c582014-11-03 11:27:17 -0500688 ib.ptr[1] = lower_32_bits(gpu_addr);
689 ib.ptr[2] = upper_32_bits(gpu_addr);
Christian König2483b4e2013-08-13 11:56:54 +0200690 ib.ptr[3] = 1;
691 ib.ptr[4] = 0xDEADBEEF;
692 ib.length_dw = 5;
693
Michel Dänzer1538a9e2014-08-18 17:34:55 +0900694 r = radeon_ib_schedule(rdev, &ib, NULL, false);
Christian König2483b4e2013-08-13 11:56:54 +0200695 if (r) {
696 radeon_ib_free(rdev, &ib);
697 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
698 return r;
699 }
700 r = radeon_fence_wait(ib.fence, false);
701 if (r) {
702 DRM_ERROR("radeon: fence wait failed (%d).\n", r);
703 return r;
704 }
705 for (i = 0; i < rdev->usec_timeout; i++) {
Alex Deucher0b021c582014-11-03 11:27:17 -0500706 tmp = le32_to_cpu(rdev->wb.wb[index/4]);
Christian König2483b4e2013-08-13 11:56:54 +0200707 if (tmp == 0xDEADBEEF)
708 break;
709 DRM_UDELAY(1);
710 }
711 if (i < rdev->usec_timeout) {
712 DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i);
713 } else {
714 DRM_ERROR("radeon: ib test failed (0x%08X)\n", tmp);
715 r = -EINVAL;
716 }
717 radeon_ib_free(rdev, &ib);
718 return r;
719}
720
721/**
722 * cik_sdma_is_lockup - Check if the DMA engine is locked up
723 *
724 * @rdev: radeon_device pointer
725 * @ring: radeon_ring structure holding ring information
726 *
727 * Check if the async DMA engine is locked up (CIK).
728 * Returns true if the engine appears to be locked up, false if not.
729 */
730bool cik_sdma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
731{
732 u32 reset_mask = cik_gpu_check_soft_reset(rdev);
733 u32 mask;
734
735 if (ring->idx == R600_RING_TYPE_DMA_INDEX)
736 mask = RADEON_RESET_DMA;
737 else
738 mask = RADEON_RESET_DMA1;
739
740 if (!(reset_mask & mask)) {
Christian Königff212f22014-02-18 14:52:33 +0100741 radeon_ring_lockup_update(rdev, ring);
Christian König2483b4e2013-08-13 11:56:54 +0200742 return false;
743 }
Christian König2483b4e2013-08-13 11:56:54 +0200744 return radeon_ring_test_lockup(rdev, ring);
745}
746
747/**
Christian König03f62ab2014-07-30 21:05:17 +0200748 * cik_sdma_vm_copy_pages - update PTEs by copying them from the GART
749 *
750 * @rdev: radeon_device pointer
751 * @ib: indirect buffer to fill with commands
752 * @pe: addr of the page entry
753 * @src: src addr to copy from
754 * @count: number of page entries to update
755 *
756 * Update PTEs by copying them from the GART using sDMA (CIK).
757 */
758void cik_sdma_vm_copy_pages(struct radeon_device *rdev,
759 struct radeon_ib *ib,
760 uint64_t pe, uint64_t src,
761 unsigned count)
762{
763 while (count) {
764 unsigned bytes = count * 8;
765 if (bytes > 0x1FFFF8)
766 bytes = 0x1FFFF8;
767
768 ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_COPY,
769 SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
770 ib->ptr[ib->length_dw++] = bytes;
771 ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
772 ib->ptr[ib->length_dw++] = lower_32_bits(src);
773 ib->ptr[ib->length_dw++] = upper_32_bits(src);
774 ib->ptr[ib->length_dw++] = lower_32_bits(pe);
775 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
776
777 pe += bytes;
778 src += bytes;
779 count -= bytes / 8;
780 }
781}
782
783/**
784 * cik_sdma_vm_write_pages - update PTEs by writing them manually
785 *
786 * @rdev: radeon_device pointer
787 * @ib: indirect buffer to fill with commands
788 * @pe: addr of the page entry
789 * @addr: dst addr to write into pe
790 * @count: number of page entries to update
791 * @incr: increase next addr by incr bytes
792 * @flags: access flags
793 *
794 * Update PTEs by writing them manually using sDMA (CIK).
795 */
796void cik_sdma_vm_write_pages(struct radeon_device *rdev,
797 struct radeon_ib *ib,
798 uint64_t pe,
799 uint64_t addr, unsigned count,
800 uint32_t incr, uint32_t flags)
801{
802 uint64_t value;
803 unsigned ndw;
804
805 while (count) {
806 ndw = count * 2;
807 if (ndw > 0xFFFFE)
808 ndw = 0xFFFFE;
809
810 /* for non-physically contiguous pages (system) */
811 ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_WRITE,
812 SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
813 ib->ptr[ib->length_dw++] = pe;
814 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
815 ib->ptr[ib->length_dw++] = ndw;
816 for (; ndw > 0; ndw -= 2, --count, pe += 8) {
817 if (flags & R600_PTE_SYSTEM) {
818 value = radeon_vm_map_gart(rdev, addr);
819 value &= 0xFFFFFFFFFFFFF000ULL;
820 } else if (flags & R600_PTE_VALID) {
821 value = addr;
822 } else {
823 value = 0;
824 }
825 addr += incr;
826 value |= flags;
827 ib->ptr[ib->length_dw++] = value;
828 ib->ptr[ib->length_dw++] = upper_32_bits(value);
829 }
830 }
831}
832
833/**
834 * cik_sdma_vm_set_pages - update the page tables using sDMA
Christian König2483b4e2013-08-13 11:56:54 +0200835 *
836 * @rdev: radeon_device pointer
837 * @ib: indirect buffer to fill with commands
838 * @pe: addr of the page entry
839 * @addr: dst addr to write into pe
840 * @count: number of page entries to update
841 * @incr: increase next addr by incr bytes
842 * @flags: access flags
843 *
844 * Update the page tables using sDMA (CIK).
845 */
Christian König03f62ab2014-07-30 21:05:17 +0200846void cik_sdma_vm_set_pages(struct radeon_device *rdev,
847 struct radeon_ib *ib,
848 uint64_t pe,
849 uint64_t addr, unsigned count,
850 uint32_t incr, uint32_t flags)
Christian König2483b4e2013-08-13 11:56:54 +0200851{
Christian König2483b4e2013-08-13 11:56:54 +0200852 uint64_t value;
853 unsigned ndw;
854
Christian König03f62ab2014-07-30 21:05:17 +0200855 while (count) {
856 ndw = count;
857 if (ndw > 0x7FFFF)
858 ndw = 0x7FFFF;
Christian König74d360f2013-10-29 20:14:48 +0100859
Christian König03f62ab2014-07-30 21:05:17 +0200860 if (flags & R600_PTE_VALID)
861 value = addr;
862 else
863 value = 0;
Christian König3d7938f2014-05-27 20:10:28 +0200864
Christian König03f62ab2014-07-30 21:05:17 +0200865 /* for physically contiguous pages (vram) */
866 ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_GENERATE_PTE_PDE, 0, 0);
867 ib->ptr[ib->length_dw++] = pe; /* dst addr */
868 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
869 ib->ptr[ib->length_dw++] = flags; /* mask */
870 ib->ptr[ib->length_dw++] = 0;
871 ib->ptr[ib->length_dw++] = value; /* value */
872 ib->ptr[ib->length_dw++] = upper_32_bits(value);
873 ib->ptr[ib->length_dw++] = incr; /* increment size */
874 ib->ptr[ib->length_dw++] = 0;
875 ib->ptr[ib->length_dw++] = ndw; /* number of entries */
Christian König3d7938f2014-05-27 20:10:28 +0200876
Christian König03f62ab2014-07-30 21:05:17 +0200877 pe += ndw * 8;
878 addr += ndw * incr;
879 count -= ndw;
Christian König2483b4e2013-08-13 11:56:54 +0200880 }
Christian König03f62ab2014-07-30 21:05:17 +0200881}
882
883/**
884 * cik_sdma_vm_pad_ib - pad the IB to the required number of dw
885 *
886 * @ib: indirect buffer to fill with padding
887 *
888 */
889void cik_sdma_vm_pad_ib(struct radeon_ib *ib)
890{
Christian König2483b4e2013-08-13 11:56:54 +0200891 while (ib->length_dw & 0x7)
892 ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0);
893}
894
895/**
896 * cik_dma_vm_flush - cik vm flush using sDMA
897 *
898 * @rdev: radeon_device pointer
899 *
900 * Update the page table base and flush the VM TLB
901 * using sDMA (CIK).
902 */
Christian Königfaffaf62014-11-19 14:01:19 +0100903void cik_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
904 unsigned vm_id, uint64_t pd_addr)
Christian König2483b4e2013-08-13 11:56:54 +0200905{
Christian König2483b4e2013-08-13 11:56:54 +0200906 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
Christian Königfaffaf62014-11-19 14:01:19 +0100907 if (vm_id < 8) {
908 radeon_ring_write(ring, (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm_id << 2)) >> 2);
Christian König2483b4e2013-08-13 11:56:54 +0200909 } else {
Christian Königfaffaf62014-11-19 14:01:19 +0100910 radeon_ring_write(ring, (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm_id - 8) << 2)) >> 2);
Christian König2483b4e2013-08-13 11:56:54 +0200911 }
Christian Königfaffaf62014-11-19 14:01:19 +0100912 radeon_ring_write(ring, pd_addr >> 12);
Christian König2483b4e2013-08-13 11:56:54 +0200913
914 /* update SH_MEM_* regs */
915 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
916 radeon_ring_write(ring, SRBM_GFX_CNTL >> 2);
Christian Königfaffaf62014-11-19 14:01:19 +0100917 radeon_ring_write(ring, VMID(vm_id));
Christian König2483b4e2013-08-13 11:56:54 +0200918
919 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
920 radeon_ring_write(ring, SH_MEM_BASES >> 2);
921 radeon_ring_write(ring, 0);
922
923 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
924 radeon_ring_write(ring, SH_MEM_CONFIG >> 2);
925 radeon_ring_write(ring, 0);
926
927 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
928 radeon_ring_write(ring, SH_MEM_APE1_BASE >> 2);
929 radeon_ring_write(ring, 1);
930
931 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
932 radeon_ring_write(ring, SH_MEM_APE1_LIMIT >> 2);
933 radeon_ring_write(ring, 0);
934
935 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
936 radeon_ring_write(ring, SRBM_GFX_CNTL >> 2);
937 radeon_ring_write(ring, VMID(0));
938
939 /* flush HDP */
Christian Königfaffaf62014-11-19 14:01:19 +0100940 cik_sdma_hdp_flush_ring_emit(rdev, ring->idx);
Christian König2483b4e2013-08-13 11:56:54 +0200941
942 /* flush TLB */
943 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
944 radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
Christian Königfaffaf62014-11-19 14:01:19 +0100945 radeon_ring_write(ring, 1 << vm_id);
Christian König2483b4e2013-08-13 11:56:54 +0200946}
947