blob: 6004dce23dc1ff2b5c0ed3cc2b84bb33a2144c76 [file] [log] [blame]
Alex Deuchera2e73f52015-04-20 17:09:27 -04001/*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
24#include <linux/firmware.h>
25#include <drm/drmP.h>
26#include "amdgpu.h"
27#include "amdgpu_ucode.h"
28#include "amdgpu_trace.h"
29#include "cikd.h"
30#include "cik.h"
31
32#include "bif/bif_4_1_d.h"
33#include "bif/bif_4_1_sh_mask.h"
34
35#include "gca/gfx_7_2_d.h"
Jack Xiao74a5d162015-05-08 14:46:49 +080036#include "gca/gfx_7_2_enum.h"
37#include "gca/gfx_7_2_sh_mask.h"
Alex Deuchera2e73f52015-04-20 17:09:27 -040038
39#include "gmc/gmc_7_1_d.h"
40#include "gmc/gmc_7_1_sh_mask.h"
41
42#include "oss/oss_2_0_d.h"
43#include "oss/oss_2_0_sh_mask.h"
44
45static const u32 sdma_offsets[SDMA_MAX_INSTANCE] =
46{
47 SDMA0_REGISTER_OFFSET,
48 SDMA1_REGISTER_OFFSET
49};
50
51static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev);
52static void cik_sdma_set_irq_funcs(struct amdgpu_device *adev);
53static void cik_sdma_set_buffer_funcs(struct amdgpu_device *adev);
54static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev);
55
56MODULE_FIRMWARE("radeon/bonaire_sdma.bin");
57MODULE_FIRMWARE("radeon/bonaire_sdma1.bin");
58MODULE_FIRMWARE("radeon/hawaii_sdma.bin");
59MODULE_FIRMWARE("radeon/hawaii_sdma1.bin");
60MODULE_FIRMWARE("radeon/kaveri_sdma.bin");
61MODULE_FIRMWARE("radeon/kaveri_sdma1.bin");
62MODULE_FIRMWARE("radeon/kabini_sdma.bin");
63MODULE_FIRMWARE("radeon/kabini_sdma1.bin");
64MODULE_FIRMWARE("radeon/mullins_sdma.bin");
65MODULE_FIRMWARE("radeon/mullins_sdma1.bin");
66
67u32 amdgpu_cik_gpu_check_soft_reset(struct amdgpu_device *adev);
68
69/*
70 * sDMA - System DMA
71 * Starting with CIK, the GPU has new asynchronous
72 * DMA engines. These engines are used for compute
73 * and gfx. There are two DMA engines (SDMA0, SDMA1)
74 * and each one supports 1 ring buffer used for gfx
75 * and 2 queues used for compute.
76 *
77 * The programming model is very similar to the CP
78 * (ring buffer, IBs, etc.), but sDMA has it's own
79 * packet format that is different from the PM4 format
80 * used by the CP. sDMA supports copying data, writing
81 * embedded data, solid fills, and a number of other
82 * things. It also has support for tiling/detiling of
83 * buffers.
84 */
85
86/**
87 * cik_sdma_init_microcode - load ucode images from disk
88 *
89 * @adev: amdgpu_device pointer
90 *
91 * Use the firmware interface to load the ucode images into
92 * the driver (not loaded into hw).
93 * Returns 0 on success, error on failure.
94 */
95static int cik_sdma_init_microcode(struct amdgpu_device *adev)
96{
97 const char *chip_name;
98 char fw_name[30];
Alex Deucherc113ea12015-10-08 16:30:37 -040099 int err = 0, i;
Alex Deuchera2e73f52015-04-20 17:09:27 -0400100
101 DRM_DEBUG("\n");
102
103 switch (adev->asic_type) {
104 case CHIP_BONAIRE:
105 chip_name = "bonaire";
106 break;
107 case CHIP_HAWAII:
108 chip_name = "hawaii";
109 break;
110 case CHIP_KAVERI:
111 chip_name = "kaveri";
112 break;
113 case CHIP_KABINI:
114 chip_name = "kabini";
115 break;
116 case CHIP_MULLINS:
117 chip_name = "mullins";
118 break;
119 default: BUG();
120 }
121
Alex Deucherc113ea12015-10-08 16:30:37 -0400122 for (i = 0; i < adev->sdma.num_instances; i++) {
Alex Deuchera2e73f52015-04-20 17:09:27 -0400123 if (i == 0)
124 snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma.bin", chip_name);
125 else
126 snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma1.bin", chip_name);
Alex Deucherc113ea12015-10-08 16:30:37 -0400127 err = request_firmware(&adev->sdma.instance[i].fw, fw_name, adev->dev);
Alex Deuchera2e73f52015-04-20 17:09:27 -0400128 if (err)
129 goto out;
Alex Deucherc113ea12015-10-08 16:30:37 -0400130 err = amdgpu_ucode_validate(adev->sdma.instance[i].fw);
Alex Deuchera2e73f52015-04-20 17:09:27 -0400131 }
132out:
133 if (err) {
134 printk(KERN_ERR
135 "cik_sdma: Failed to load firmware \"%s\"\n",
136 fw_name);
Alex Deucherc113ea12015-10-08 16:30:37 -0400137 for (i = 0; i < adev->sdma.num_instances; i++) {
138 release_firmware(adev->sdma.instance[i].fw);
139 adev->sdma.instance[i].fw = NULL;
Alex Deuchera2e73f52015-04-20 17:09:27 -0400140 }
141 }
142 return err;
143}
144
145/**
146 * cik_sdma_ring_get_rptr - get the current read pointer
147 *
148 * @ring: amdgpu ring pointer
149 *
150 * Get the current rptr from the hardware (CIK+).
151 */
152static uint32_t cik_sdma_ring_get_rptr(struct amdgpu_ring *ring)
153{
154 u32 rptr;
155
156 rptr = ring->adev->wb.wb[ring->rptr_offs];
157
158 return (rptr & 0x3fffc) >> 2;
159}
160
161/**
162 * cik_sdma_ring_get_wptr - get the current write pointer
163 *
164 * @ring: amdgpu ring pointer
165 *
166 * Get the current wptr from the hardware (CIK+).
167 */
168static uint32_t cik_sdma_ring_get_wptr(struct amdgpu_ring *ring)
169{
170 struct amdgpu_device *adev = ring->adev;
Alex Deucherc113ea12015-10-08 16:30:37 -0400171 u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1;
Alex Deuchera2e73f52015-04-20 17:09:27 -0400172
173 return (RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me]) & 0x3fffc) >> 2;
174}
175
176/**
177 * cik_sdma_ring_set_wptr - commit the write pointer
178 *
179 * @ring: amdgpu ring pointer
180 *
181 * Write the wptr back to the hardware (CIK+).
182 */
183static void cik_sdma_ring_set_wptr(struct amdgpu_ring *ring)
184{
185 struct amdgpu_device *adev = ring->adev;
Alex Deucherc113ea12015-10-08 16:30:37 -0400186 u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1;
Alex Deuchera2e73f52015-04-20 17:09:27 -0400187
188 WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me], (ring->wptr << 2) & 0x3fffc);
189}
190
Jammy Zhouac01db32015-09-01 13:13:54 +0800191static void cik_sdma_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
192{
Alex Deucherc113ea12015-10-08 16:30:37 -0400193 struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring);
Jammy Zhouac01db32015-09-01 13:13:54 +0800194 int i;
195
196 for (i = 0; i < count; i++)
197 if (sdma && sdma->burst_nop && (i == 0))
198 amdgpu_ring_write(ring, ring->nop |
199 SDMA_NOP_COUNT(count - 1));
200 else
201 amdgpu_ring_write(ring, ring->nop);
202}
203
Alex Deuchera2e73f52015-04-20 17:09:27 -0400204/**
205 * cik_sdma_ring_emit_ib - Schedule an IB on the DMA engine
206 *
207 * @ring: amdgpu ring pointer
208 * @ib: IB object to schedule
209 *
210 * Schedule an IB in the DMA ring (CIK).
211 */
212static void cik_sdma_ring_emit_ib(struct amdgpu_ring *ring,
213 struct amdgpu_ib *ib)
214{
215 u32 extra_bits = (ib->vm ? ib->vm->ids[ring->idx].id : 0) & 0xf;
216 u32 next_rptr = ring->wptr + 5;
217
Alex Deuchera2e73f52015-04-20 17:09:27 -0400218 while ((next_rptr & 7) != 4)
219 next_rptr++;
220
221 next_rptr += 4;
222 amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0));
223 amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
224 amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
225 amdgpu_ring_write(ring, 1); /* number of DWs to follow */
226 amdgpu_ring_write(ring, next_rptr);
227
Alex Deuchera2e73f52015-04-20 17:09:27 -0400228 /* IB packet must end on a 8 DW boundary */
Jammy Zhouac01db32015-09-01 13:13:54 +0800229 cik_sdma_ring_insert_nop(ring, (12 - (ring->wptr & 7)) % 8);
230
Alex Deuchera2e73f52015-04-20 17:09:27 -0400231 amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_INDIRECT_BUFFER, 0, extra_bits));
232 amdgpu_ring_write(ring, ib->gpu_addr & 0xffffffe0); /* base must be 32 byte aligned */
233 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xffffffff);
234 amdgpu_ring_write(ring, ib->length_dw);
235
236}
237
238/**
Christian Königd2edb072015-05-11 14:10:34 +0200239 * cik_sdma_ring_emit_hdp_flush - emit an hdp flush on the DMA ring
Alex Deuchera2e73f52015-04-20 17:09:27 -0400240 *
241 * @ring: amdgpu ring pointer
242 *
243 * Emit an hdp flush packet on the requested DMA ring.
244 */
Christian Königd2edb072015-05-11 14:10:34 +0200245static void cik_sdma_ring_emit_hdp_flush(struct amdgpu_ring *ring)
Alex Deuchera2e73f52015-04-20 17:09:27 -0400246{
247 u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(1) |
248 SDMA_POLL_REG_MEM_EXTRA_FUNC(3)); /* == */
249 u32 ref_and_mask;
250
Alex Deucherc113ea12015-10-08 16:30:37 -0400251 if (ring == &ring->adev->sdma.instance[0].ring)
Alex Deuchera2e73f52015-04-20 17:09:27 -0400252 ref_and_mask = GPU_HDP_FLUSH_DONE__SDMA0_MASK;
253 else
254 ref_and_mask = GPU_HDP_FLUSH_DONE__SDMA1_MASK;
255
256 amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits));
257 amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_DONE << 2);
258 amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_REQ << 2);
259 amdgpu_ring_write(ring, ref_and_mask); /* reference */
260 amdgpu_ring_write(ring, ref_and_mask); /* mask */
261 amdgpu_ring_write(ring, (0xfff << 16) | 10); /* retry count, poll interval */
262}
263
264/**
265 * cik_sdma_ring_emit_fence - emit a fence on the DMA ring
266 *
267 * @ring: amdgpu ring pointer
268 * @fence: amdgpu fence object
269 *
270 * Add a DMA fence packet to the ring to write
271 * the fence seq number and DMA trap packet to generate
272 * an interrupt if needed (CIK).
273 */
274static void cik_sdma_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
Chunming Zhou890ee232015-06-01 14:35:03 +0800275 unsigned flags)
Alex Deuchera2e73f52015-04-20 17:09:27 -0400276{
Chunming Zhou890ee232015-06-01 14:35:03 +0800277 bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
Alex Deuchera2e73f52015-04-20 17:09:27 -0400278 /* write the fence */
279 amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_FENCE, 0, 0));
280 amdgpu_ring_write(ring, lower_32_bits(addr));
281 amdgpu_ring_write(ring, upper_32_bits(addr));
282 amdgpu_ring_write(ring, lower_32_bits(seq));
283
284 /* optionally write high bits as well */
285 if (write64bit) {
286 addr += 4;
287 amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_FENCE, 0, 0));
288 amdgpu_ring_write(ring, lower_32_bits(addr));
289 amdgpu_ring_write(ring, upper_32_bits(addr));
290 amdgpu_ring_write(ring, upper_32_bits(seq));
291 }
292
293 /* generate an interrupt */
294 amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_TRAP, 0, 0));
295}
296
297/**
Alex Deuchera2e73f52015-04-20 17:09:27 -0400298 * cik_sdma_gfx_stop - stop the gfx async dma engines
299 *
300 * @adev: amdgpu_device pointer
301 *
302 * Stop the gfx async dma ring buffers (CIK).
303 */
304static void cik_sdma_gfx_stop(struct amdgpu_device *adev)
305{
Alex Deucherc113ea12015-10-08 16:30:37 -0400306 struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
307 struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
Alex Deuchera2e73f52015-04-20 17:09:27 -0400308 u32 rb_cntl;
309 int i;
310
311 if ((adev->mman.buffer_funcs_ring == sdma0) ||
312 (adev->mman.buffer_funcs_ring == sdma1))
313 amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size);
314
Alex Deucherc113ea12015-10-08 16:30:37 -0400315 for (i = 0; i < adev->sdma.num_instances; i++) {
Alex Deuchera2e73f52015-04-20 17:09:27 -0400316 rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
317 rb_cntl &= ~SDMA0_GFX_RB_CNTL__RB_ENABLE_MASK;
318 WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
319 WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], 0);
320 }
321 sdma0->ready = false;
322 sdma1->ready = false;
323}
324
325/**
326 * cik_sdma_rlc_stop - stop the compute async dma engines
327 *
328 * @adev: amdgpu_device pointer
329 *
330 * Stop the compute async dma queues (CIK).
331 */
332static void cik_sdma_rlc_stop(struct amdgpu_device *adev)
333{
334 /* XXX todo */
335}
336
337/**
338 * cik_sdma_enable - stop the async dma engines
339 *
340 * @adev: amdgpu_device pointer
341 * @enable: enable/disable the DMA MEs.
342 *
343 * Halt or unhalt the async dma engines (CIK).
344 */
345static void cik_sdma_enable(struct amdgpu_device *adev, bool enable)
346{
347 u32 me_cntl;
348 int i;
349
350 if (enable == false) {
351 cik_sdma_gfx_stop(adev);
352 cik_sdma_rlc_stop(adev);
353 }
354
Alex Deucherc113ea12015-10-08 16:30:37 -0400355 for (i = 0; i < adev->sdma.num_instances; i++) {
Alex Deuchera2e73f52015-04-20 17:09:27 -0400356 me_cntl = RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i]);
357 if (enable)
358 me_cntl &= ~SDMA0_F32_CNTL__HALT_MASK;
359 else
360 me_cntl |= SDMA0_F32_CNTL__HALT_MASK;
361 WREG32(mmSDMA0_F32_CNTL + sdma_offsets[i], me_cntl);
362 }
363}
364
365/**
366 * cik_sdma_gfx_resume - setup and start the async dma engines
367 *
368 * @adev: amdgpu_device pointer
369 *
370 * Set up the gfx DMA ring buffers and enable them (CIK).
371 * Returns 0 for success, error for failure.
372 */
373static int cik_sdma_gfx_resume(struct amdgpu_device *adev)
374{
375 struct amdgpu_ring *ring;
376 u32 rb_cntl, ib_cntl;
377 u32 rb_bufsz;
378 u32 wb_offset;
379 int i, j, r;
380
Alex Deucherc113ea12015-10-08 16:30:37 -0400381 for (i = 0; i < adev->sdma.num_instances; i++) {
382 ring = &adev->sdma.instance[i].ring;
Alex Deuchera2e73f52015-04-20 17:09:27 -0400383 wb_offset = (ring->rptr_offs * 4);
384
385 mutex_lock(&adev->srbm_mutex);
386 for (j = 0; j < 16; j++) {
387 cik_srbm_select(adev, 0, 0, 0, j);
388 /* SDMA GFX */
389 WREG32(mmSDMA0_GFX_VIRTUAL_ADDR + sdma_offsets[i], 0);
390 WREG32(mmSDMA0_GFX_APE1_CNTL + sdma_offsets[i], 0);
391 /* XXX SDMA RLC - todo */
392 }
393 cik_srbm_select(adev, 0, 0, 0, 0);
394 mutex_unlock(&adev->srbm_mutex);
395
396 WREG32(mmSDMA0_SEM_INCOMPLETE_TIMER_CNTL + sdma_offsets[i], 0);
397 WREG32(mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i], 0);
398
399 /* Set ring buffer size in dwords */
400 rb_bufsz = order_base_2(ring->ring_size / 4);
401 rb_cntl = rb_bufsz << 1;
402#ifdef __BIG_ENDIAN
Alex Deucher454fc952015-06-09 09:58:23 -0400403 rb_cntl |= SDMA0_GFX_RB_CNTL__RB_SWAP_ENABLE_MASK |
404 SDMA0_GFX_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK;
Alex Deuchera2e73f52015-04-20 17:09:27 -0400405#endif
406 WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
407
408 /* Initialize the ring buffer's read and write pointers */
409 WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0);
410 WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0);
411
412 /* set the wb address whether it's enabled or not */
413 WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i],
414 upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF);
415 WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_LO + sdma_offsets[i],
416 ((adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC));
417
418 rb_cntl |= SDMA0_GFX_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK;
419
420 WREG32(mmSDMA0_GFX_RB_BASE + sdma_offsets[i], ring->gpu_addr >> 8);
421 WREG32(mmSDMA0_GFX_RB_BASE_HI + sdma_offsets[i], ring->gpu_addr >> 40);
422
423 ring->wptr = 0;
424 WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], ring->wptr << 2);
425
426 /* enable DMA RB */
427 WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i],
428 rb_cntl | SDMA0_GFX_RB_CNTL__RB_ENABLE_MASK);
429
430 ib_cntl = SDMA0_GFX_IB_CNTL__IB_ENABLE_MASK;
431#ifdef __BIG_ENDIAN
432 ib_cntl |= SDMA0_GFX_IB_CNTL__IB_SWAP_ENABLE_MASK;
433#endif
434 /* enable DMA IBs */
435 WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
436
437 ring->ready = true;
438
439 r = amdgpu_ring_test_ring(ring);
440 if (r) {
441 ring->ready = false;
442 return r;
443 }
444
445 if (adev->mman.buffer_funcs_ring == ring)
446 amdgpu_ttm_set_active_vram_size(adev, adev->mc.real_vram_size);
447 }
448
449 return 0;
450}
451
452/**
453 * cik_sdma_rlc_resume - setup and start the async dma engines
454 *
455 * @adev: amdgpu_device pointer
456 *
457 * Set up the compute DMA queues and enable them (CIK).
458 * Returns 0 for success, error for failure.
459 */
460static int cik_sdma_rlc_resume(struct amdgpu_device *adev)
461{
462 /* XXX todo */
463 return 0;
464}
465
466/**
467 * cik_sdma_load_microcode - load the sDMA ME ucode
468 *
469 * @adev: amdgpu_device pointer
470 *
471 * Loads the sDMA0/1 ucode.
472 * Returns 0 for success, -EINVAL if the ucode is not available.
473 */
474static int cik_sdma_load_microcode(struct amdgpu_device *adev)
475{
476 const struct sdma_firmware_header_v1_0 *hdr;
477 const __le32 *fw_data;
478 u32 fw_size;
479 int i, j;
480
Alex Deuchera2e73f52015-04-20 17:09:27 -0400481 /* halt the MEs */
482 cik_sdma_enable(adev, false);
483
Alex Deucherc113ea12015-10-08 16:30:37 -0400484 for (i = 0; i < adev->sdma.num_instances; i++) {
485 if (!adev->sdma.instance[i].fw)
486 return -EINVAL;
487 hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
Alex Deuchera2e73f52015-04-20 17:09:27 -0400488 amdgpu_ucode_print_sdma_hdr(&hdr->header);
489 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
Alex Deucherc113ea12015-10-08 16:30:37 -0400490 adev->sdma.instance[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
491 adev->sdma.instance[i].feature_version = le32_to_cpu(hdr->ucode_feature_version);
492 if (adev->sdma.instance[i].feature_version >= 20)
493 adev->sdma.instance[i].burst_nop = true;
Alex Deuchera2e73f52015-04-20 17:09:27 -0400494 fw_data = (const __le32 *)
Alex Deucherc113ea12015-10-08 16:30:37 -0400495 (adev->sdma.instance[i].fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
Alex Deuchera2e73f52015-04-20 17:09:27 -0400496 WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 0);
497 for (j = 0; j < fw_size; j++)
498 WREG32(mmSDMA0_UCODE_DATA + sdma_offsets[i], le32_to_cpup(fw_data++));
Alex Deucherc113ea12015-10-08 16:30:37 -0400499 WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], adev->sdma.instance[i].fw_version);
Alex Deuchera2e73f52015-04-20 17:09:27 -0400500 }
501
502 return 0;
503}
504
505/**
506 * cik_sdma_start - setup and start the async dma engines
507 *
508 * @adev: amdgpu_device pointer
509 *
510 * Set up the DMA engines and enable them (CIK).
511 * Returns 0 for success, error for failure.
512 */
513static int cik_sdma_start(struct amdgpu_device *adev)
514{
515 int r;
516
517 r = cik_sdma_load_microcode(adev);
518 if (r)
519 return r;
520
521 /* unhalt the MEs */
522 cik_sdma_enable(adev, true);
523
524 /* start the gfx rings and rlc compute queues */
525 r = cik_sdma_gfx_resume(adev);
526 if (r)
527 return r;
528 r = cik_sdma_rlc_resume(adev);
529 if (r)
530 return r;
531
532 return 0;
533}
534
535/**
536 * cik_sdma_ring_test_ring - simple async dma engine test
537 *
538 * @ring: amdgpu_ring structure holding ring information
539 *
540 * Test the DMA engine by writing using it to write an
541 * value to memory. (CIK).
542 * Returns 0 for success, error for failure.
543 */
544static int cik_sdma_ring_test_ring(struct amdgpu_ring *ring)
545{
546 struct amdgpu_device *adev = ring->adev;
547 unsigned i;
548 unsigned index;
549 int r;
550 u32 tmp;
551 u64 gpu_addr;
552
553 r = amdgpu_wb_get(adev, &index);
554 if (r) {
555 dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
556 return r;
557 }
558
559 gpu_addr = adev->wb.gpu_addr + (index * 4);
560 tmp = 0xCAFEDEAD;
561 adev->wb.wb[index] = cpu_to_le32(tmp);
562
Christian Königa27de352016-01-21 11:28:53 +0100563 r = amdgpu_ring_alloc(ring, 5);
Alex Deuchera2e73f52015-04-20 17:09:27 -0400564 if (r) {
565 DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
566 amdgpu_wb_free(adev, index);
567 return r;
568 }
569 amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0));
570 amdgpu_ring_write(ring, lower_32_bits(gpu_addr));
571 amdgpu_ring_write(ring, upper_32_bits(gpu_addr));
572 amdgpu_ring_write(ring, 1); /* number of DWs to follow */
573 amdgpu_ring_write(ring, 0xDEADBEEF);
Christian Königa27de352016-01-21 11:28:53 +0100574 amdgpu_ring_commit(ring);
Alex Deuchera2e73f52015-04-20 17:09:27 -0400575
576 for (i = 0; i < adev->usec_timeout; i++) {
577 tmp = le32_to_cpu(adev->wb.wb[index]);
578 if (tmp == 0xDEADBEEF)
579 break;
580 DRM_UDELAY(1);
581 }
582
583 if (i < adev->usec_timeout) {
584 DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
585 } else {
586 DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
587 ring->idx, tmp);
588 r = -EINVAL;
589 }
590 amdgpu_wb_free(adev, index);
591
592 return r;
593}
594
595/**
596 * cik_sdma_ring_test_ib - test an IB on the DMA engine
597 *
598 * @ring: amdgpu_ring structure holding ring information
599 *
600 * Test a simple IB in the DMA ring (CIK).
601 * Returns 0 on success, error on failure.
602 */
603static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring)
604{
605 struct amdgpu_device *adev = ring->adev;
606 struct amdgpu_ib ib;
Chunming Zhou17635522015-08-03 11:43:19 +0800607 struct fence *f = NULL;
Alex Deuchera2e73f52015-04-20 17:09:27 -0400608 unsigned i;
609 unsigned index;
610 int r;
611 u32 tmp = 0;
612 u64 gpu_addr;
613
614 r = amdgpu_wb_get(adev, &index);
615 if (r) {
616 dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
617 return r;
618 }
619
620 gpu_addr = adev->wb.gpu_addr + (index * 4);
621 tmp = 0xCAFEDEAD;
622 adev->wb.wb[index] = cpu_to_le32(tmp);
Christian Königb203dd92015-08-18 18:23:16 +0200623 memset(&ib, 0, sizeof(ib));
Christian Königb07c60c2016-01-31 12:29:04 +0100624 r = amdgpu_ib_get(adev, NULL, 256, &ib);
Alex Deuchera2e73f52015-04-20 17:09:27 -0400625 if (r) {
Alex Deuchera2e73f52015-04-20 17:09:27 -0400626 DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
Chunming Zhou0011fda2015-06-01 15:33:20 +0800627 goto err0;
Alex Deuchera2e73f52015-04-20 17:09:27 -0400628 }
629
630 ib.ptr[0] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
631 ib.ptr[1] = lower_32_bits(gpu_addr);
632 ib.ptr[2] = upper_32_bits(gpu_addr);
633 ib.ptr[3] = 1;
634 ib.ptr[4] = 0xDEADBEEF;
635 ib.length_dw = 5;
Christian Königa0332b52016-02-01 12:02:08 +0100636 r = amdgpu_ib_schedule(ring, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED, &f);
Chunming Zhou0011fda2015-06-01 15:33:20 +0800637 if (r)
638 goto err1;
Alex Deuchera2e73f52015-04-20 17:09:27 -0400639
Chunming Zhou17635522015-08-03 11:43:19 +0800640 r = fence_wait(f, false);
Alex Deuchera2e73f52015-04-20 17:09:27 -0400641 if (r) {
Alex Deuchera2e73f52015-04-20 17:09:27 -0400642 DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
Chunming Zhou0011fda2015-06-01 15:33:20 +0800643 goto err1;
Alex Deuchera2e73f52015-04-20 17:09:27 -0400644 }
645 for (i = 0; i < adev->usec_timeout; i++) {
646 tmp = le32_to_cpu(adev->wb.wb[index]);
647 if (tmp == 0xDEADBEEF)
648 break;
649 DRM_UDELAY(1);
650 }
651 if (i < adev->usec_timeout) {
652 DRM_INFO("ib test on ring %d succeeded in %u usecs\n",
Chunming Zhou0011fda2015-06-01 15:33:20 +0800653 ring->idx, i);
654 goto err1;
Alex Deuchera2e73f52015-04-20 17:09:27 -0400655 } else {
656 DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
657 r = -EINVAL;
658 }
Chunming Zhou0011fda2015-06-01 15:33:20 +0800659
660err1:
Chunming Zhou281b4222015-08-12 12:58:31 +0800661 fence_put(f);
Alex Deuchera2e73f52015-04-20 17:09:27 -0400662 amdgpu_ib_free(adev, &ib);
Chunming Zhou0011fda2015-06-01 15:33:20 +0800663err0:
Alex Deuchera2e73f52015-04-20 17:09:27 -0400664 amdgpu_wb_free(adev, index);
665 return r;
666}
667
668/**
669 * cik_sdma_vm_copy_pages - update PTEs by copying them from the GART
670 *
671 * @ib: indirect buffer to fill with commands
672 * @pe: addr of the page entry
673 * @src: src addr to copy from
674 * @count: number of page entries to update
675 *
676 * Update PTEs by copying them from the GART using sDMA (CIK).
677 */
678static void cik_sdma_vm_copy_pte(struct amdgpu_ib *ib,
679 uint64_t pe, uint64_t src,
680 unsigned count)
681{
682 while (count) {
683 unsigned bytes = count * 8;
684 if (bytes > 0x1FFFF8)
685 bytes = 0x1FFFF8;
686
687 ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_COPY,
688 SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
689 ib->ptr[ib->length_dw++] = bytes;
690 ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
691 ib->ptr[ib->length_dw++] = lower_32_bits(src);
692 ib->ptr[ib->length_dw++] = upper_32_bits(src);
693 ib->ptr[ib->length_dw++] = lower_32_bits(pe);
694 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
695
696 pe += bytes;
697 src += bytes;
698 count -= bytes / 8;
699 }
700}
701
702/**
703 * cik_sdma_vm_write_pages - update PTEs by writing them manually
704 *
705 * @ib: indirect buffer to fill with commands
706 * @pe: addr of the page entry
707 * @addr: dst addr to write into pe
708 * @count: number of page entries to update
709 * @incr: increase next addr by incr bytes
710 * @flags: access flags
711 *
712 * Update PTEs by writing them manually using sDMA (CIK).
713 */
714static void cik_sdma_vm_write_pte(struct amdgpu_ib *ib,
Christian Königb07c9d22015-11-30 13:26:07 +0100715 const dma_addr_t *pages_addr, uint64_t pe,
Alex Deuchera2e73f52015-04-20 17:09:27 -0400716 uint64_t addr, unsigned count,
717 uint32_t incr, uint32_t flags)
718{
719 uint64_t value;
720 unsigned ndw;
721
722 while (count) {
723 ndw = count * 2;
724 if (ndw > 0xFFFFE)
725 ndw = 0xFFFFE;
726
727 /* for non-physically contiguous pages (system) */
728 ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_WRITE,
729 SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
730 ib->ptr[ib->length_dw++] = pe;
731 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
732 ib->ptr[ib->length_dw++] = ndw;
733 for (; ndw > 0; ndw -= 2, --count, pe += 8) {
Christian Königb07c9d22015-11-30 13:26:07 +0100734 value = amdgpu_vm_map_gart(pages_addr, addr);
Alex Deuchera2e73f52015-04-20 17:09:27 -0400735 addr += incr;
736 value |= flags;
737 ib->ptr[ib->length_dw++] = value;
738 ib->ptr[ib->length_dw++] = upper_32_bits(value);
739 }
740 }
741}
742
743/**
744 * cik_sdma_vm_set_pages - update the page tables using sDMA
745 *
746 * @ib: indirect buffer to fill with commands
747 * @pe: addr of the page entry
748 * @addr: dst addr to write into pe
749 * @count: number of page entries to update
750 * @incr: increase next addr by incr bytes
751 * @flags: access flags
752 *
753 * Update the page tables using sDMA (CIK).
754 */
755static void cik_sdma_vm_set_pte_pde(struct amdgpu_ib *ib,
756 uint64_t pe,
757 uint64_t addr, unsigned count,
758 uint32_t incr, uint32_t flags)
759{
760 uint64_t value;
761 unsigned ndw;
762
763 while (count) {
764 ndw = count;
765 if (ndw > 0x7FFFF)
766 ndw = 0x7FFFF;
767
768 if (flags & AMDGPU_PTE_VALID)
769 value = addr;
770 else
771 value = 0;
772
773 /* for physically contiguous pages (vram) */
774 ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_GENERATE_PTE_PDE, 0, 0);
775 ib->ptr[ib->length_dw++] = pe; /* dst addr */
776 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
777 ib->ptr[ib->length_dw++] = flags; /* mask */
778 ib->ptr[ib->length_dw++] = 0;
779 ib->ptr[ib->length_dw++] = value; /* value */
780 ib->ptr[ib->length_dw++] = upper_32_bits(value);
781 ib->ptr[ib->length_dw++] = incr; /* increment size */
782 ib->ptr[ib->length_dw++] = 0;
783 ib->ptr[ib->length_dw++] = ndw; /* number of entries */
784
785 pe += ndw * 8;
786 addr += ndw * incr;
787 count -= ndw;
788 }
789}
790
791/**
792 * cik_sdma_vm_pad_ib - pad the IB to the required number of dw
793 *
794 * @ib: indirect buffer to fill with padding
795 *
796 */
Christian König9e5d53092016-01-31 12:20:55 +0100797static void cik_sdma_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
Alex Deuchera2e73f52015-04-20 17:09:27 -0400798{
Christian König9e5d53092016-01-31 12:20:55 +0100799 struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring);
Jammy Zhouac01db32015-09-01 13:13:54 +0800800 u32 pad_count;
801 int i;
802
803 pad_count = (8 - (ib->length_dw & 0x7)) % 8;
804 for (i = 0; i < pad_count; i++)
805 if (sdma && sdma->burst_nop && (i == 0))
806 ib->ptr[ib->length_dw++] =
807 SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0) |
808 SDMA_NOP_COUNT(pad_count - 1);
809 else
810 ib->ptr[ib->length_dw++] =
811 SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0);
Alex Deuchera2e73f52015-04-20 17:09:27 -0400812}
813
814/**
815 * cik_sdma_ring_emit_vm_flush - cik vm flush using sDMA
816 *
817 * @ring: amdgpu_ring pointer
818 * @vm: amdgpu_vm pointer
819 *
820 * Update the page table base and flush the VM TLB
821 * using sDMA (CIK).
822 */
823static void cik_sdma_ring_emit_vm_flush(struct amdgpu_ring *ring,
824 unsigned vm_id, uint64_t pd_addr)
825{
826 u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(0) |
827 SDMA_POLL_REG_MEM_EXTRA_FUNC(0)); /* always */
828
829 amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
830 if (vm_id < 8) {
831 amdgpu_ring_write(ring, (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id));
832 } else {
833 amdgpu_ring_write(ring, (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8));
834 }
835 amdgpu_ring_write(ring, pd_addr >> 12);
836
Alex Deuchera2e73f52015-04-20 17:09:27 -0400837 /* flush TLB */
838 amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
839 amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
840 amdgpu_ring_write(ring, 1 << vm_id);
841
842 amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits));
843 amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2);
844 amdgpu_ring_write(ring, 0);
845 amdgpu_ring_write(ring, 0); /* reference */
846 amdgpu_ring_write(ring, 0); /* mask */
847 amdgpu_ring_write(ring, (0xfff << 16) | 10); /* retry count, poll interval */
848}
849
850static void cik_enable_sdma_mgcg(struct amdgpu_device *adev,
851 bool enable)
852{
853 u32 orig, data;
854
855 if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_SDMA_MGCG)) {
856 WREG32(mmSDMA0_CLK_CTRL + SDMA0_REGISTER_OFFSET, 0x00000100);
857 WREG32(mmSDMA0_CLK_CTRL + SDMA1_REGISTER_OFFSET, 0x00000100);
858 } else {
859 orig = data = RREG32(mmSDMA0_CLK_CTRL + SDMA0_REGISTER_OFFSET);
860 data |= 0xff000000;
861 if (data != orig)
862 WREG32(mmSDMA0_CLK_CTRL + SDMA0_REGISTER_OFFSET, data);
863
864 orig = data = RREG32(mmSDMA0_CLK_CTRL + SDMA1_REGISTER_OFFSET);
865 data |= 0xff000000;
866 if (data != orig)
867 WREG32(mmSDMA0_CLK_CTRL + SDMA1_REGISTER_OFFSET, data);
868 }
869}
870
871static void cik_enable_sdma_mgls(struct amdgpu_device *adev,
872 bool enable)
873{
874 u32 orig, data;
875
876 if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_SDMA_LS)) {
877 orig = data = RREG32(mmSDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET);
878 data |= 0x100;
879 if (orig != data)
880 WREG32(mmSDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET, data);
881
882 orig = data = RREG32(mmSDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET);
883 data |= 0x100;
884 if (orig != data)
885 WREG32(mmSDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET, data);
886 } else {
887 orig = data = RREG32(mmSDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET);
888 data &= ~0x100;
889 if (orig != data)
890 WREG32(mmSDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET, data);
891
892 orig = data = RREG32(mmSDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET);
893 data &= ~0x100;
894 if (orig != data)
895 WREG32(mmSDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET, data);
896 }
897}
898
yanyang15fc3aee2015-05-22 14:39:35 -0400899static int cik_sdma_early_init(void *handle)
Alex Deuchera2e73f52015-04-20 17:09:27 -0400900{
yanyang15fc3aee2015-05-22 14:39:35 -0400901 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
902
Alex Deucherc113ea12015-10-08 16:30:37 -0400903 adev->sdma.num_instances = SDMA_MAX_INSTANCE;
904
Alex Deuchera2e73f52015-04-20 17:09:27 -0400905 cik_sdma_set_ring_funcs(adev);
906 cik_sdma_set_irq_funcs(adev);
907 cik_sdma_set_buffer_funcs(adev);
908 cik_sdma_set_vm_pte_funcs(adev);
909
910 return 0;
911}
912
yanyang15fc3aee2015-05-22 14:39:35 -0400913static int cik_sdma_sw_init(void *handle)
Alex Deuchera2e73f52015-04-20 17:09:27 -0400914{
915 struct amdgpu_ring *ring;
yanyang15fc3aee2015-05-22 14:39:35 -0400916 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
Alex Deucherc113ea12015-10-08 16:30:37 -0400917 int r, i;
Alex Deuchera2e73f52015-04-20 17:09:27 -0400918
919 r = cik_sdma_init_microcode(adev);
920 if (r) {
921 DRM_ERROR("Failed to load sdma firmware!\n");
922 return r;
923 }
924
925 /* SDMA trap event */
Alex Deucherc113ea12015-10-08 16:30:37 -0400926 r = amdgpu_irq_add_id(adev, 224, &adev->sdma.trap_irq);
Alex Deuchera2e73f52015-04-20 17:09:27 -0400927 if (r)
928 return r;
929
930 /* SDMA Privileged inst */
Alex Deucherc113ea12015-10-08 16:30:37 -0400931 r = amdgpu_irq_add_id(adev, 241, &adev->sdma.illegal_inst_irq);
Alex Deuchera2e73f52015-04-20 17:09:27 -0400932 if (r)
933 return r;
934
935 /* SDMA Privileged inst */
Alex Deucherc113ea12015-10-08 16:30:37 -0400936 r = amdgpu_irq_add_id(adev, 247, &adev->sdma.illegal_inst_irq);
Alex Deuchera2e73f52015-04-20 17:09:27 -0400937 if (r)
938 return r;
939
Alex Deucherc113ea12015-10-08 16:30:37 -0400940 for (i = 0; i < adev->sdma.num_instances; i++) {
941 ring = &adev->sdma.instance[i].ring;
942 ring->ring_obj = NULL;
943 sprintf(ring->name, "sdma%d", i);
944 r = amdgpu_ring_init(adev, ring, 256 * 1024,
945 SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0), 0xf,
946 &adev->sdma.trap_irq,
947 (i == 0) ?
948 AMDGPU_SDMA_IRQ_TRAP0 : AMDGPU_SDMA_IRQ_TRAP1,
949 AMDGPU_RING_TYPE_SDMA);
950 if (r)
951 return r;
952 }
Alex Deuchera2e73f52015-04-20 17:09:27 -0400953
954 return r;
955}
956
yanyang15fc3aee2015-05-22 14:39:35 -0400957static int cik_sdma_sw_fini(void *handle)
Alex Deuchera2e73f52015-04-20 17:09:27 -0400958{
yanyang15fc3aee2015-05-22 14:39:35 -0400959 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
Alex Deucherc113ea12015-10-08 16:30:37 -0400960 int i;
yanyang15fc3aee2015-05-22 14:39:35 -0400961
Alex Deucherc113ea12015-10-08 16:30:37 -0400962 for (i = 0; i < adev->sdma.num_instances; i++)
963 amdgpu_ring_fini(&adev->sdma.instance[i].ring);
Alex Deuchera2e73f52015-04-20 17:09:27 -0400964
965 return 0;
966}
967
yanyang15fc3aee2015-05-22 14:39:35 -0400968static int cik_sdma_hw_init(void *handle)
Alex Deuchera2e73f52015-04-20 17:09:27 -0400969{
970 int r;
yanyang15fc3aee2015-05-22 14:39:35 -0400971 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
Alex Deuchera2e73f52015-04-20 17:09:27 -0400972
973 r = cik_sdma_start(adev);
974 if (r)
975 return r;
976
977 return r;
978}
979
yanyang15fc3aee2015-05-22 14:39:35 -0400980static int cik_sdma_hw_fini(void *handle)
Alex Deuchera2e73f52015-04-20 17:09:27 -0400981{
yanyang15fc3aee2015-05-22 14:39:35 -0400982 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
983
Alex Deuchera2e73f52015-04-20 17:09:27 -0400984 cik_sdma_enable(adev, false);
985
986 return 0;
987}
988
yanyang15fc3aee2015-05-22 14:39:35 -0400989static int cik_sdma_suspend(void *handle)
Alex Deuchera2e73f52015-04-20 17:09:27 -0400990{
yanyang15fc3aee2015-05-22 14:39:35 -0400991 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
Alex Deuchera2e73f52015-04-20 17:09:27 -0400992
993 return cik_sdma_hw_fini(adev);
994}
995
yanyang15fc3aee2015-05-22 14:39:35 -0400996static int cik_sdma_resume(void *handle)
Alex Deuchera2e73f52015-04-20 17:09:27 -0400997{
yanyang15fc3aee2015-05-22 14:39:35 -0400998 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
Alex Deuchera2e73f52015-04-20 17:09:27 -0400999
1000 return cik_sdma_hw_init(adev);
1001}
1002
yanyang15fc3aee2015-05-22 14:39:35 -04001003static bool cik_sdma_is_idle(void *handle)
Alex Deuchera2e73f52015-04-20 17:09:27 -04001004{
yanyang15fc3aee2015-05-22 14:39:35 -04001005 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
Alex Deuchera2e73f52015-04-20 17:09:27 -04001006 u32 tmp = RREG32(mmSRBM_STATUS2);
1007
1008 if (tmp & (SRBM_STATUS2__SDMA_BUSY_MASK |
1009 SRBM_STATUS2__SDMA1_BUSY_MASK))
1010 return false;
1011
1012 return true;
1013}
1014
yanyang15fc3aee2015-05-22 14:39:35 -04001015static int cik_sdma_wait_for_idle(void *handle)
Alex Deuchera2e73f52015-04-20 17:09:27 -04001016{
1017 unsigned i;
1018 u32 tmp;
yanyang15fc3aee2015-05-22 14:39:35 -04001019 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
Alex Deuchera2e73f52015-04-20 17:09:27 -04001020
1021 for (i = 0; i < adev->usec_timeout; i++) {
1022 tmp = RREG32(mmSRBM_STATUS2) & (SRBM_STATUS2__SDMA_BUSY_MASK |
1023 SRBM_STATUS2__SDMA1_BUSY_MASK);
1024
1025 if (!tmp)
1026 return 0;
1027 udelay(1);
1028 }
1029 return -ETIMEDOUT;
1030}
1031
yanyang15fc3aee2015-05-22 14:39:35 -04001032static void cik_sdma_print_status(void *handle)
Alex Deuchera2e73f52015-04-20 17:09:27 -04001033{
1034 int i, j;
yanyang15fc3aee2015-05-22 14:39:35 -04001035 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
Alex Deuchera2e73f52015-04-20 17:09:27 -04001036
1037 dev_info(adev->dev, "CIK SDMA registers\n");
1038 dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n",
1039 RREG32(mmSRBM_STATUS2));
Alex Deucherc113ea12015-10-08 16:30:37 -04001040 for (i = 0; i < adev->sdma.num_instances; i++) {
Alex Deuchera2e73f52015-04-20 17:09:27 -04001041 dev_info(adev->dev, " SDMA%d_STATUS_REG=0x%08X\n",
1042 i, RREG32(mmSDMA0_STATUS_REG + sdma_offsets[i]));
1043 dev_info(adev->dev, " SDMA%d_ME_CNTL=0x%08X\n",
1044 i, RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i]));
1045 dev_info(adev->dev, " SDMA%d_CNTL=0x%08X\n",
1046 i, RREG32(mmSDMA0_CNTL + sdma_offsets[i]));
1047 dev_info(adev->dev, " SDMA%d_SEM_INCOMPLETE_TIMER_CNTL=0x%08X\n",
1048 i, RREG32(mmSDMA0_SEM_INCOMPLETE_TIMER_CNTL + sdma_offsets[i]));
1049 dev_info(adev->dev, " SDMA%d_SEM_WAIT_FAIL_TIMER_CNTL=0x%08X\n",
1050 i, RREG32(mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i]));
1051 dev_info(adev->dev, " SDMA%d_GFX_IB_CNTL=0x%08X\n",
1052 i, RREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i]));
1053 dev_info(adev->dev, " SDMA%d_GFX_RB_CNTL=0x%08X\n",
1054 i, RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]));
1055 dev_info(adev->dev, " SDMA%d_GFX_RB_RPTR=0x%08X\n",
1056 i, RREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i]));
1057 dev_info(adev->dev, " SDMA%d_GFX_RB_WPTR=0x%08X\n",
1058 i, RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i]));
1059 dev_info(adev->dev, " SDMA%d_GFX_RB_RPTR_ADDR_HI=0x%08X\n",
1060 i, RREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i]));
1061 dev_info(adev->dev, " SDMA%d_GFX_RB_RPTR_ADDR_LO=0x%08X\n",
1062 i, RREG32(mmSDMA0_GFX_RB_RPTR_ADDR_LO + sdma_offsets[i]));
1063 dev_info(adev->dev, " SDMA%d_GFX_RB_BASE=0x%08X\n",
1064 i, RREG32(mmSDMA0_GFX_RB_BASE + sdma_offsets[i]));
1065 dev_info(adev->dev, " SDMA%d_GFX_RB_BASE_HI=0x%08X\n",
1066 i, RREG32(mmSDMA0_GFX_RB_BASE_HI + sdma_offsets[i]));
1067 mutex_lock(&adev->srbm_mutex);
1068 for (j = 0; j < 16; j++) {
1069 cik_srbm_select(adev, 0, 0, 0, j);
1070 dev_info(adev->dev, " VM %d:\n", j);
1071 dev_info(adev->dev, " SDMA0_GFX_VIRTUAL_ADDR=0x%08X\n",
1072 RREG32(mmSDMA0_GFX_VIRTUAL_ADDR + sdma_offsets[i]));
1073 dev_info(adev->dev, " SDMA0_GFX_APE1_CNTL=0x%08X\n",
1074 RREG32(mmSDMA0_GFX_APE1_CNTL + sdma_offsets[i]));
1075 }
1076 cik_srbm_select(adev, 0, 0, 0, 0);
1077 mutex_unlock(&adev->srbm_mutex);
1078 }
1079}
1080
yanyang15fc3aee2015-05-22 14:39:35 -04001081static int cik_sdma_soft_reset(void *handle)
Alex Deuchera2e73f52015-04-20 17:09:27 -04001082{
1083 u32 srbm_soft_reset = 0;
yanyang15fc3aee2015-05-22 14:39:35 -04001084 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
Alex Deuchera2e73f52015-04-20 17:09:27 -04001085 u32 tmp = RREG32(mmSRBM_STATUS2);
1086
1087 if (tmp & SRBM_STATUS2__SDMA_BUSY_MASK) {
1088 /* sdma0 */
1089 tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET);
1090 tmp |= SDMA0_F32_CNTL__HALT_MASK;
1091 WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp);
1092 srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA_MASK;
1093 }
1094 if (tmp & SRBM_STATUS2__SDMA1_BUSY_MASK) {
1095 /* sdma1 */
1096 tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET);
1097 tmp |= SDMA0_F32_CNTL__HALT_MASK;
1098 WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp);
1099 srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA1_MASK;
1100 }
1101
1102 if (srbm_soft_reset) {
yanyang15fc3aee2015-05-22 14:39:35 -04001103 cik_sdma_print_status((void *)adev);
Alex Deuchera2e73f52015-04-20 17:09:27 -04001104
1105 tmp = RREG32(mmSRBM_SOFT_RESET);
1106 tmp |= srbm_soft_reset;
1107 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1108 WREG32(mmSRBM_SOFT_RESET, tmp);
1109 tmp = RREG32(mmSRBM_SOFT_RESET);
1110
1111 udelay(50);
1112
1113 tmp &= ~srbm_soft_reset;
1114 WREG32(mmSRBM_SOFT_RESET, tmp);
1115 tmp = RREG32(mmSRBM_SOFT_RESET);
1116
1117 /* Wait a little for things to settle down */
1118 udelay(50);
1119
yanyang15fc3aee2015-05-22 14:39:35 -04001120 cik_sdma_print_status((void *)adev);
Alex Deuchera2e73f52015-04-20 17:09:27 -04001121 }
1122
1123 return 0;
1124}
1125
1126static int cik_sdma_set_trap_irq_state(struct amdgpu_device *adev,
1127 struct amdgpu_irq_src *src,
1128 unsigned type,
1129 enum amdgpu_interrupt_state state)
1130{
1131 u32 sdma_cntl;
1132
1133 switch (type) {
1134 case AMDGPU_SDMA_IRQ_TRAP0:
1135 switch (state) {
1136 case AMDGPU_IRQ_STATE_DISABLE:
1137 sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET);
1138 sdma_cntl &= ~SDMA0_CNTL__TRAP_ENABLE_MASK;
1139 WREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET, sdma_cntl);
1140 break;
1141 case AMDGPU_IRQ_STATE_ENABLE:
1142 sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET);
1143 sdma_cntl |= SDMA0_CNTL__TRAP_ENABLE_MASK;
1144 WREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET, sdma_cntl);
1145 break;
1146 default:
1147 break;
1148 }
1149 break;
1150 case AMDGPU_SDMA_IRQ_TRAP1:
1151 switch (state) {
1152 case AMDGPU_IRQ_STATE_DISABLE:
1153 sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET);
1154 sdma_cntl &= ~SDMA0_CNTL__TRAP_ENABLE_MASK;
1155 WREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET, sdma_cntl);
1156 break;
1157 case AMDGPU_IRQ_STATE_ENABLE:
1158 sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET);
1159 sdma_cntl |= SDMA0_CNTL__TRAP_ENABLE_MASK;
1160 WREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET, sdma_cntl);
1161 break;
1162 default:
1163 break;
1164 }
1165 break;
1166 default:
1167 break;
1168 }
1169 return 0;
1170}
1171
1172static int cik_sdma_process_trap_irq(struct amdgpu_device *adev,
1173 struct amdgpu_irq_src *source,
1174 struct amdgpu_iv_entry *entry)
1175{
1176 u8 instance_id, queue_id;
1177
1178 instance_id = (entry->ring_id & 0x3) >> 0;
1179 queue_id = (entry->ring_id & 0xc) >> 2;
1180 DRM_DEBUG("IH: SDMA trap\n");
1181 switch (instance_id) {
1182 case 0:
1183 switch (queue_id) {
1184 case 0:
Alex Deucherc113ea12015-10-08 16:30:37 -04001185 amdgpu_fence_process(&adev->sdma.instance[0].ring);
Alex Deuchera2e73f52015-04-20 17:09:27 -04001186 break;
1187 case 1:
1188 /* XXX compute */
1189 break;
1190 case 2:
1191 /* XXX compute */
1192 break;
1193 }
1194 break;
1195 case 1:
1196 switch (queue_id) {
1197 case 0:
Alex Deucherc113ea12015-10-08 16:30:37 -04001198 amdgpu_fence_process(&adev->sdma.instance[1].ring);
Alex Deuchera2e73f52015-04-20 17:09:27 -04001199 break;
1200 case 1:
1201 /* XXX compute */
1202 break;
1203 case 2:
1204 /* XXX compute */
1205 break;
1206 }
1207 break;
1208 }
1209
1210 return 0;
1211}
1212
1213static int cik_sdma_process_illegal_inst_irq(struct amdgpu_device *adev,
1214 struct amdgpu_irq_src *source,
1215 struct amdgpu_iv_entry *entry)
1216{
1217 DRM_ERROR("Illegal instruction in SDMA command stream\n");
1218 schedule_work(&adev->reset_work);
1219 return 0;
1220}
1221
yanyang15fc3aee2015-05-22 14:39:35 -04001222static int cik_sdma_set_clockgating_state(void *handle,
1223 enum amd_clockgating_state state)
Alex Deuchera2e73f52015-04-20 17:09:27 -04001224{
1225 bool gate = false;
yanyang15fc3aee2015-05-22 14:39:35 -04001226 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
Alex Deuchera2e73f52015-04-20 17:09:27 -04001227
yanyang15fc3aee2015-05-22 14:39:35 -04001228 if (state == AMD_CG_STATE_GATE)
Alex Deuchera2e73f52015-04-20 17:09:27 -04001229 gate = true;
1230
1231 cik_enable_sdma_mgcg(adev, gate);
1232 cik_enable_sdma_mgls(adev, gate);
1233
1234 return 0;
1235}
1236
yanyang15fc3aee2015-05-22 14:39:35 -04001237static int cik_sdma_set_powergating_state(void *handle,
1238 enum amd_powergating_state state)
Alex Deuchera2e73f52015-04-20 17:09:27 -04001239{
1240 return 0;
1241}
1242
yanyang15fc3aee2015-05-22 14:39:35 -04001243const struct amd_ip_funcs cik_sdma_ip_funcs = {
Alex Deuchera2e73f52015-04-20 17:09:27 -04001244 .early_init = cik_sdma_early_init,
1245 .late_init = NULL,
1246 .sw_init = cik_sdma_sw_init,
1247 .sw_fini = cik_sdma_sw_fini,
1248 .hw_init = cik_sdma_hw_init,
1249 .hw_fini = cik_sdma_hw_fini,
1250 .suspend = cik_sdma_suspend,
1251 .resume = cik_sdma_resume,
1252 .is_idle = cik_sdma_is_idle,
1253 .wait_for_idle = cik_sdma_wait_for_idle,
1254 .soft_reset = cik_sdma_soft_reset,
1255 .print_status = cik_sdma_print_status,
1256 .set_clockgating_state = cik_sdma_set_clockgating_state,
1257 .set_powergating_state = cik_sdma_set_powergating_state,
1258};
1259
Alex Deuchera2e73f52015-04-20 17:09:27 -04001260static const struct amdgpu_ring_funcs cik_sdma_ring_funcs = {
1261 .get_rptr = cik_sdma_ring_get_rptr,
1262 .get_wptr = cik_sdma_ring_get_wptr,
1263 .set_wptr = cik_sdma_ring_set_wptr,
1264 .parse_cs = NULL,
1265 .emit_ib = cik_sdma_ring_emit_ib,
1266 .emit_fence = cik_sdma_ring_emit_fence,
Alex Deuchera2e73f52015-04-20 17:09:27 -04001267 .emit_vm_flush = cik_sdma_ring_emit_vm_flush,
Christian Königd2edb072015-05-11 14:10:34 +02001268 .emit_hdp_flush = cik_sdma_ring_emit_hdp_flush,
Alex Deuchera2e73f52015-04-20 17:09:27 -04001269 .test_ring = cik_sdma_ring_test_ring,
1270 .test_ib = cik_sdma_ring_test_ib,
Jammy Zhouac01db32015-09-01 13:13:54 +08001271 .insert_nop = cik_sdma_ring_insert_nop,
Christian König9e5d53092016-01-31 12:20:55 +01001272 .pad_ib = cik_sdma_ring_pad_ib,
Alex Deuchera2e73f52015-04-20 17:09:27 -04001273};
1274
1275static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev)
1276{
Alex Deucherc113ea12015-10-08 16:30:37 -04001277 int i;
1278
1279 for (i = 0; i < adev->sdma.num_instances; i++)
1280 adev->sdma.instance[i].ring.funcs = &cik_sdma_ring_funcs;
Alex Deuchera2e73f52015-04-20 17:09:27 -04001281}
1282
1283static const struct amdgpu_irq_src_funcs cik_sdma_trap_irq_funcs = {
1284 .set = cik_sdma_set_trap_irq_state,
1285 .process = cik_sdma_process_trap_irq,
1286};
1287
1288static const struct amdgpu_irq_src_funcs cik_sdma_illegal_inst_irq_funcs = {
1289 .process = cik_sdma_process_illegal_inst_irq,
1290};
1291
1292static void cik_sdma_set_irq_funcs(struct amdgpu_device *adev)
1293{
Alex Deucherc113ea12015-10-08 16:30:37 -04001294 adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
1295 adev->sdma.trap_irq.funcs = &cik_sdma_trap_irq_funcs;
1296 adev->sdma.illegal_inst_irq.funcs = &cik_sdma_illegal_inst_irq_funcs;
Alex Deuchera2e73f52015-04-20 17:09:27 -04001297}
1298
1299/**
1300 * cik_sdma_emit_copy_buffer - copy buffer using the sDMA engine
1301 *
1302 * @ring: amdgpu_ring structure holding ring information
1303 * @src_offset: src GPU address
1304 * @dst_offset: dst GPU address
1305 * @byte_count: number of bytes to xfer
1306 *
1307 * Copy GPU buffers using the DMA engine (CIK).
1308 * Used by the amdgpu ttm implementation to move pages if
1309 * registered as the asic copy callback.
1310 */
Chunming Zhouc7ae72c2015-08-25 17:23:45 +08001311static void cik_sdma_emit_copy_buffer(struct amdgpu_ib *ib,
Alex Deuchera2e73f52015-04-20 17:09:27 -04001312 uint64_t src_offset,
1313 uint64_t dst_offset,
1314 uint32_t byte_count)
1315{
Chunming Zhouc7ae72c2015-08-25 17:23:45 +08001316 ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_COPY_SUB_OPCODE_LINEAR, 0);
1317 ib->ptr[ib->length_dw++] = byte_count;
1318 ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
1319 ib->ptr[ib->length_dw++] = lower_32_bits(src_offset);
1320 ib->ptr[ib->length_dw++] = upper_32_bits(src_offset);
1321 ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
1322 ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
Alex Deuchera2e73f52015-04-20 17:09:27 -04001323}
1324
1325/**
1326 * cik_sdma_emit_fill_buffer - fill buffer using the sDMA engine
1327 *
1328 * @ring: amdgpu_ring structure holding ring information
1329 * @src_data: value to write to buffer
1330 * @dst_offset: dst GPU address
1331 * @byte_count: number of bytes to xfer
1332 *
1333 * Fill GPU buffers using the DMA engine (CIK).
1334 */
Chunming Zhou6e7a3842015-08-27 13:46:09 +08001335static void cik_sdma_emit_fill_buffer(struct amdgpu_ib *ib,
Alex Deuchera2e73f52015-04-20 17:09:27 -04001336 uint32_t src_data,
1337 uint64_t dst_offset,
1338 uint32_t byte_count)
1339{
Chunming Zhou6e7a3842015-08-27 13:46:09 +08001340 ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_CONSTANT_FILL, 0, 0);
1341 ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
1342 ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
1343 ib->ptr[ib->length_dw++] = src_data;
1344 ib->ptr[ib->length_dw++] = byte_count;
Alex Deuchera2e73f52015-04-20 17:09:27 -04001345}
1346
1347static const struct amdgpu_buffer_funcs cik_sdma_buffer_funcs = {
1348 .copy_max_bytes = 0x1fffff,
1349 .copy_num_dw = 7,
1350 .emit_copy_buffer = cik_sdma_emit_copy_buffer,
1351
1352 .fill_max_bytes = 0x1fffff,
1353 .fill_num_dw = 5,
1354 .emit_fill_buffer = cik_sdma_emit_fill_buffer,
1355};
1356
1357static void cik_sdma_set_buffer_funcs(struct amdgpu_device *adev)
1358{
1359 if (adev->mman.buffer_funcs == NULL) {
1360 adev->mman.buffer_funcs = &cik_sdma_buffer_funcs;
Alex Deucherc113ea12015-10-08 16:30:37 -04001361 adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
Alex Deuchera2e73f52015-04-20 17:09:27 -04001362 }
1363}
1364
1365static const struct amdgpu_vm_pte_funcs cik_sdma_vm_pte_funcs = {
1366 .copy_pte = cik_sdma_vm_copy_pte,
1367 .write_pte = cik_sdma_vm_write_pte,
1368 .set_pte_pde = cik_sdma_vm_set_pte_pde,
Alex Deuchera2e73f52015-04-20 17:09:27 -04001369};
1370
1371static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev)
1372{
1373 if (adev->vm_manager.vm_pte_funcs == NULL) {
1374 adev->vm_manager.vm_pte_funcs = &cik_sdma_vm_pte_funcs;
Alex Deucherc113ea12015-10-08 16:30:37 -04001375 adev->vm_manager.vm_pte_funcs_ring = &adev->sdma.instance[0].ring;
Chunming Zhou4274f5d2015-07-21 16:04:39 +08001376 adev->vm_manager.vm_pte_funcs_ring->is_pte_ring = true;
Alex Deuchera2e73f52015-04-20 17:09:27 -04001377 }
1378}