blob: 67441f8844bbe749e412ec4a46af1d2893d3c7d2 [file] [log] [blame]
Alex Deucheraaa36a9762015-04-20 17:31:14 -04001/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
24#include <linux/firmware.h>
25#include <drm/drmP.h>
26#include "amdgpu.h"
27#include "amdgpu_ucode.h"
28#include "amdgpu_trace.h"
29#include "vi.h"
30#include "vid.h"
31
32#include "oss/oss_3_0_d.h"
33#include "oss/oss_3_0_sh_mask.h"
34
35#include "gmc/gmc_8_1_d.h"
36#include "gmc/gmc_8_1_sh_mask.h"
37
38#include "gca/gfx_8_0_d.h"
Jack Xiao74a5d162015-05-08 14:46:49 +080039#include "gca/gfx_8_0_enum.h"
Alex Deucheraaa36a9762015-04-20 17:31:14 -040040#include "gca/gfx_8_0_sh_mask.h"
41
42#include "bif/bif_5_0_d.h"
43#include "bif/bif_5_0_sh_mask.h"
44
45#include "tonga_sdma_pkt_open.h"
46
47static void sdma_v3_0_set_ring_funcs(struct amdgpu_device *adev);
48static void sdma_v3_0_set_buffer_funcs(struct amdgpu_device *adev);
49static void sdma_v3_0_set_vm_pte_funcs(struct amdgpu_device *adev);
50static void sdma_v3_0_set_irq_funcs(struct amdgpu_device *adev);
51
Jammy Zhouc65444f2015-05-13 22:49:04 +080052MODULE_FIRMWARE("amdgpu/tonga_sdma.bin");
53MODULE_FIRMWARE("amdgpu/tonga_sdma1.bin");
54MODULE_FIRMWARE("amdgpu/carrizo_sdma.bin");
55MODULE_FIRMWARE("amdgpu/carrizo_sdma1.bin");
David Zhang1a5bbb62015-07-08 17:29:27 +080056MODULE_FIRMWARE("amdgpu/fiji_sdma.bin");
57MODULE_FIRMWARE("amdgpu/fiji_sdma1.bin");
Samuel Libb16e3b2015-10-08 17:17:51 -040058MODULE_FIRMWARE("amdgpu/stoney_sdma.bin");
Alex Deucheraaa36a9762015-04-20 17:31:14 -040059
60static const u32 sdma_offsets[SDMA_MAX_INSTANCE] =
61{
62 SDMA0_REGISTER_OFFSET,
63 SDMA1_REGISTER_OFFSET
64};
65
66static const u32 golden_settings_tonga_a11[] =
67{
68 mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007,
69 mmSDMA0_CLK_CTRL, 0xff000fff, 0x00000000,
70 mmSDMA0_GFX_IB_CNTL, 0x800f0111, 0x00000100,
71 mmSDMA0_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
72 mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
73 mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007,
74 mmSDMA1_CLK_CTRL, 0xff000fff, 0x00000000,
75 mmSDMA1_GFX_IB_CNTL, 0x800f0111, 0x00000100,
76 mmSDMA1_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
77 mmSDMA1_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
78};
79
80static const u32 tonga_mgcg_cgcg_init[] =
81{
82 mmSDMA0_CLK_CTRL, 0xff000ff0, 0x00000100,
83 mmSDMA1_CLK_CTRL, 0xff000ff0, 0x00000100
84};
85
David Zhang1a5bbb62015-07-08 17:29:27 +080086static const u32 golden_settings_fiji_a10[] =
87{
88 mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007,
89 mmSDMA0_GFX_IB_CNTL, 0x800f0111, 0x00000100,
90 mmSDMA0_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
91 mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
92 mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007,
93 mmSDMA1_GFX_IB_CNTL, 0x800f0111, 0x00000100,
94 mmSDMA1_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
95 mmSDMA1_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
96};
97
98static const u32 fiji_mgcg_cgcg_init[] =
99{
100 mmSDMA0_CLK_CTRL, 0xff000ff0, 0x00000100,
101 mmSDMA1_CLK_CTRL, 0xff000ff0, 0x00000100
102};
103
Alex Deucheraaa36a9762015-04-20 17:31:14 -0400104static const u32 cz_golden_settings_a11[] =
105{
106 mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007,
107 mmSDMA0_CLK_CTRL, 0xff000fff, 0x00000000,
108 mmSDMA0_GFX_IB_CNTL, 0x00000100, 0x00000100,
109 mmSDMA0_POWER_CNTL, 0x00000800, 0x0003c800,
110 mmSDMA0_RLC0_IB_CNTL, 0x00000100, 0x00000100,
111 mmSDMA0_RLC1_IB_CNTL, 0x00000100, 0x00000100,
112 mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007,
113 mmSDMA1_CLK_CTRL, 0xff000fff, 0x00000000,
114 mmSDMA1_GFX_IB_CNTL, 0x00000100, 0x00000100,
115 mmSDMA1_POWER_CNTL, 0x00000800, 0x0003c800,
116 mmSDMA1_RLC0_IB_CNTL, 0x00000100, 0x00000100,
117 mmSDMA1_RLC1_IB_CNTL, 0x00000100, 0x00000100,
118};
119
120static const u32 cz_mgcg_cgcg_init[] =
121{
122 mmSDMA0_CLK_CTRL, 0xff000ff0, 0x00000100,
123 mmSDMA1_CLK_CTRL, 0xff000ff0, 0x00000100
124};
125
Samuel Libb16e3b2015-10-08 17:17:51 -0400126static const u32 stoney_golden_settings_a11[] =
127{
128 mmSDMA0_GFX_IB_CNTL, 0x00000100, 0x00000100,
129 mmSDMA0_POWER_CNTL, 0x00000800, 0x0003c800,
130 mmSDMA0_RLC0_IB_CNTL, 0x00000100, 0x00000100,
131 mmSDMA0_RLC1_IB_CNTL, 0x00000100, 0x00000100,
132};
133
134static const u32 stoney_mgcg_cgcg_init[] =
135{
136 mmSDMA0_CLK_CTRL, 0xffffffff, 0x00000100,
137};
138
Alex Deucheraaa36a9762015-04-20 17:31:14 -0400139/*
140 * sDMA - System DMA
141 * Starting with CIK, the GPU has new asynchronous
142 * DMA engines. These engines are used for compute
143 * and gfx. There are two DMA engines (SDMA0, SDMA1)
144 * and each one supports 1 ring buffer used for gfx
145 * and 2 queues used for compute.
146 *
147 * The programming model is very similar to the CP
148 * (ring buffer, IBs, etc.), but sDMA has it's own
149 * packet format that is different from the PM4 format
150 * used by the CP. sDMA supports copying data, writing
151 * embedded data, solid fills, and a number of other
152 * things. It also has support for tiling/detiling of
153 * buffers.
154 */
155
156static void sdma_v3_0_init_golden_registers(struct amdgpu_device *adev)
157{
158 switch (adev->asic_type) {
David Zhang1a5bbb62015-07-08 17:29:27 +0800159 case CHIP_FIJI:
160 amdgpu_program_register_sequence(adev,
161 fiji_mgcg_cgcg_init,
162 (const u32)ARRAY_SIZE(fiji_mgcg_cgcg_init));
163 amdgpu_program_register_sequence(adev,
164 golden_settings_fiji_a10,
165 (const u32)ARRAY_SIZE(golden_settings_fiji_a10));
166 break;
Alex Deucheraaa36a9762015-04-20 17:31:14 -0400167 case CHIP_TONGA:
168 amdgpu_program_register_sequence(adev,
169 tonga_mgcg_cgcg_init,
170 (const u32)ARRAY_SIZE(tonga_mgcg_cgcg_init));
171 amdgpu_program_register_sequence(adev,
172 golden_settings_tonga_a11,
173 (const u32)ARRAY_SIZE(golden_settings_tonga_a11));
174 break;
175 case CHIP_CARRIZO:
176 amdgpu_program_register_sequence(adev,
177 cz_mgcg_cgcg_init,
178 (const u32)ARRAY_SIZE(cz_mgcg_cgcg_init));
179 amdgpu_program_register_sequence(adev,
180 cz_golden_settings_a11,
181 (const u32)ARRAY_SIZE(cz_golden_settings_a11));
182 break;
Samuel Libb16e3b2015-10-08 17:17:51 -0400183 case CHIP_STONEY:
184 amdgpu_program_register_sequence(adev,
185 stoney_mgcg_cgcg_init,
186 (const u32)ARRAY_SIZE(stoney_mgcg_cgcg_init));
187 amdgpu_program_register_sequence(adev,
188 stoney_golden_settings_a11,
189 (const u32)ARRAY_SIZE(stoney_golden_settings_a11));
190 break;
Alex Deucheraaa36a9762015-04-20 17:31:14 -0400191 default:
192 break;
193 }
194}
195
196/**
197 * sdma_v3_0_init_microcode - load ucode images from disk
198 *
199 * @adev: amdgpu_device pointer
200 *
201 * Use the firmware interface to load the ucode images into
202 * the driver (not loaded into hw).
203 * Returns 0 on success, error on failure.
204 */
205static int sdma_v3_0_init_microcode(struct amdgpu_device *adev)
206{
207 const char *chip_name;
208 char fw_name[30];
Alex Deucherc113ea12015-10-08 16:30:37 -0400209 int err = 0, i;
Alex Deucheraaa36a9762015-04-20 17:31:14 -0400210 struct amdgpu_firmware_info *info = NULL;
211 const struct common_firmware_header *header = NULL;
Jammy Zhou595fd012015-08-04 11:44:19 +0800212 const struct sdma_firmware_header_v1_0 *hdr;
Alex Deucheraaa36a9762015-04-20 17:31:14 -0400213
214 DRM_DEBUG("\n");
215
216 switch (adev->asic_type) {
217 case CHIP_TONGA:
218 chip_name = "tonga";
219 break;
David Zhang1a5bbb62015-07-08 17:29:27 +0800220 case CHIP_FIJI:
221 chip_name = "fiji";
222 break;
Alex Deucheraaa36a9762015-04-20 17:31:14 -0400223 case CHIP_CARRIZO:
224 chip_name = "carrizo";
225 break;
Samuel Libb16e3b2015-10-08 17:17:51 -0400226 case CHIP_STONEY:
227 chip_name = "stoney";
228 break;
Alex Deucheraaa36a9762015-04-20 17:31:14 -0400229 default: BUG();
230 }
231
Alex Deucherc113ea12015-10-08 16:30:37 -0400232 for (i = 0; i < adev->sdma.num_instances; i++) {
Alex Deucheraaa36a9762015-04-20 17:31:14 -0400233 if (i == 0)
Jammy Zhouc65444f2015-05-13 22:49:04 +0800234 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name);
Alex Deucheraaa36a9762015-04-20 17:31:14 -0400235 else
Jammy Zhouc65444f2015-05-13 22:49:04 +0800236 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma1.bin", chip_name);
Alex Deucherc113ea12015-10-08 16:30:37 -0400237 err = request_firmware(&adev->sdma.instance[i].fw, fw_name, adev->dev);
Alex Deucheraaa36a9762015-04-20 17:31:14 -0400238 if (err)
239 goto out;
Alex Deucherc113ea12015-10-08 16:30:37 -0400240 err = amdgpu_ucode_validate(adev->sdma.instance[i].fw);
Alex Deucheraaa36a9762015-04-20 17:31:14 -0400241 if (err)
242 goto out;
Alex Deucherc113ea12015-10-08 16:30:37 -0400243 hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
244 adev->sdma.instance[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
245 adev->sdma.instance[i].feature_version = le32_to_cpu(hdr->ucode_feature_version);
246 if (adev->sdma.instance[i].feature_version >= 20)
247 adev->sdma.instance[i].burst_nop = true;
Alex Deucheraaa36a9762015-04-20 17:31:14 -0400248
249 if (adev->firmware.smu_load) {
250 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i];
251 info->ucode_id = AMDGPU_UCODE_ID_SDMA0 + i;
Alex Deucherc113ea12015-10-08 16:30:37 -0400252 info->fw = adev->sdma.instance[i].fw;
Alex Deucheraaa36a9762015-04-20 17:31:14 -0400253 header = (const struct common_firmware_header *)info->fw->data;
254 adev->firmware.fw_size +=
255 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
256 }
257 }
258out:
259 if (err) {
260 printk(KERN_ERR
261 "sdma_v3_0: Failed to load firmware \"%s\"\n",
262 fw_name);
Alex Deucherc113ea12015-10-08 16:30:37 -0400263 for (i = 0; i < adev->sdma.num_instances; i++) {
264 release_firmware(adev->sdma.instance[i].fw);
265 adev->sdma.instance[i].fw = NULL;
Alex Deucheraaa36a9762015-04-20 17:31:14 -0400266 }
267 }
268 return err;
269}
270
271/**
272 * sdma_v3_0_ring_get_rptr - get the current read pointer
273 *
274 * @ring: amdgpu ring pointer
275 *
276 * Get the current rptr from the hardware (VI+).
277 */
278static uint32_t sdma_v3_0_ring_get_rptr(struct amdgpu_ring *ring)
279{
280 u32 rptr;
281
282 /* XXX check if swapping is necessary on BE */
283 rptr = ring->adev->wb.wb[ring->rptr_offs] >> 2;
284
285 return rptr;
286}
287
288/**
289 * sdma_v3_0_ring_get_wptr - get the current write pointer
290 *
291 * @ring: amdgpu ring pointer
292 *
293 * Get the current wptr from the hardware (VI+).
294 */
295static uint32_t sdma_v3_0_ring_get_wptr(struct amdgpu_ring *ring)
296{
297 struct amdgpu_device *adev = ring->adev;
298 u32 wptr;
299
300 if (ring->use_doorbell) {
301 /* XXX check if swapping is necessary on BE */
302 wptr = ring->adev->wb.wb[ring->wptr_offs] >> 2;
303 } else {
Alex Deucherc113ea12015-10-08 16:30:37 -0400304 int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1;
Alex Deucheraaa36a9762015-04-20 17:31:14 -0400305
306 wptr = RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me]) >> 2;
307 }
308
309 return wptr;
310}
311
312/**
313 * sdma_v3_0_ring_set_wptr - commit the write pointer
314 *
315 * @ring: amdgpu ring pointer
316 *
317 * Write the wptr back to the hardware (VI+).
318 */
319static void sdma_v3_0_ring_set_wptr(struct amdgpu_ring *ring)
320{
321 struct amdgpu_device *adev = ring->adev;
322
323 if (ring->use_doorbell) {
324 /* XXX check if swapping is necessary on BE */
325 adev->wb.wb[ring->wptr_offs] = ring->wptr << 2;
326 WDOORBELL32(ring->doorbell_index, ring->wptr << 2);
327 } else {
Alex Deucherc113ea12015-10-08 16:30:37 -0400328 int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1;
Alex Deucheraaa36a9762015-04-20 17:31:14 -0400329
330 WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me], ring->wptr << 2);
331 }
332}
333
Jammy Zhouac01db32015-09-01 13:13:54 +0800334static void sdma_v3_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
335{
Alex Deucherc113ea12015-10-08 16:30:37 -0400336 struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring);
Jammy Zhouac01db32015-09-01 13:13:54 +0800337 int i;
338
339 for (i = 0; i < count; i++)
340 if (sdma && sdma->burst_nop && (i == 0))
341 amdgpu_ring_write(ring, ring->nop |
342 SDMA_PKT_NOP_HEADER_COUNT(count - 1));
343 else
344 amdgpu_ring_write(ring, ring->nop);
345}
346
Alex Deucheraaa36a9762015-04-20 17:31:14 -0400347/**
348 * sdma_v3_0_ring_emit_ib - Schedule an IB on the DMA engine
349 *
350 * @ring: amdgpu ring pointer
351 * @ib: IB object to schedule
352 *
353 * Schedule an IB in the DMA ring (VI).
354 */
355static void sdma_v3_0_ring_emit_ib(struct amdgpu_ring *ring,
356 struct amdgpu_ib *ib)
357{
Christian König4ff37a82016-02-26 16:18:26 +0100358 u32 vmid = ib->vm_id & 0xf;
Alex Deucheraaa36a9762015-04-20 17:31:14 -0400359 u32 next_rptr = ring->wptr + 5;
360
Alex Deucheraaa36a9762015-04-20 17:31:14 -0400361 while ((next_rptr & 7) != 2)
362 next_rptr++;
363 next_rptr += 6;
364
365 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
366 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR));
367 amdgpu_ring_write(ring, lower_32_bits(ring->next_rptr_gpu_addr) & 0xfffffffc);
368 amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr));
369 amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1));
370 amdgpu_ring_write(ring, next_rptr);
371
Alex Deucheraaa36a9762015-04-20 17:31:14 -0400372 /* IB packet must end on a 8 DW boundary */
Jammy Zhouac01db32015-09-01 13:13:54 +0800373 sdma_v3_0_ring_insert_nop(ring, (10 - (ring->wptr & 7)) % 8);
Alex Deucheraaa36a9762015-04-20 17:31:14 -0400374
375 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
376 SDMA_PKT_INDIRECT_HEADER_VMID(vmid));
377 /* base must be 32 byte aligned */
378 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr) & 0xffffffe0);
379 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
380 amdgpu_ring_write(ring, ib->length_dw);
381 amdgpu_ring_write(ring, 0);
382 amdgpu_ring_write(ring, 0);
383
384}
385
386/**
Christian Königd2edb072015-05-11 14:10:34 +0200387 * sdma_v3_0_ring_emit_hdp_flush - emit an hdp flush on the DMA ring
Alex Deucheraaa36a9762015-04-20 17:31:14 -0400388 *
389 * @ring: amdgpu ring pointer
390 *
391 * Emit an hdp flush packet on the requested DMA ring.
392 */
Christian Königd2edb072015-05-11 14:10:34 +0200393static void sdma_v3_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
Alex Deucheraaa36a9762015-04-20 17:31:14 -0400394{
395 u32 ref_and_mask = 0;
396
Alex Deucherc113ea12015-10-08 16:30:37 -0400397 if (ring == &ring->adev->sdma.instance[0].ring)
Alex Deucheraaa36a9762015-04-20 17:31:14 -0400398 ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA0, 1);
399 else
400 ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA1, 1);
401
402 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
403 SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) |
404 SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */
405 amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_DONE << 2);
406 amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_REQ << 2);
407 amdgpu_ring_write(ring, ref_and_mask); /* reference */
408 amdgpu_ring_write(ring, ref_and_mask); /* mask */
409 amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
410 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
411}
412
413/**
414 * sdma_v3_0_ring_emit_fence - emit a fence on the DMA ring
415 *
416 * @ring: amdgpu ring pointer
417 * @fence: amdgpu fence object
418 *
419 * Add a DMA fence packet to the ring to write
420 * the fence seq number and DMA trap packet to generate
421 * an interrupt if needed (VI).
422 */
423static void sdma_v3_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
Chunming Zhou890ee232015-06-01 14:35:03 +0800424 unsigned flags)
Alex Deucheraaa36a9762015-04-20 17:31:14 -0400425{
Chunming Zhou890ee232015-06-01 14:35:03 +0800426 bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
Alex Deucheraaa36a9762015-04-20 17:31:14 -0400427 /* write the fence */
428 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE));
429 amdgpu_ring_write(ring, lower_32_bits(addr));
430 amdgpu_ring_write(ring, upper_32_bits(addr));
431 amdgpu_ring_write(ring, lower_32_bits(seq));
432
433 /* optionally write high bits as well */
Chunming Zhou890ee232015-06-01 14:35:03 +0800434 if (write64bit) {
Alex Deucheraaa36a9762015-04-20 17:31:14 -0400435 addr += 4;
436 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE));
437 amdgpu_ring_write(ring, lower_32_bits(addr));
438 amdgpu_ring_write(ring, upper_32_bits(addr));
439 amdgpu_ring_write(ring, upper_32_bits(seq));
440 }
441
442 /* generate an interrupt */
443 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_TRAP));
444 amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(0));
445}
446
Alex Deucheraaa36a9762015-04-20 17:31:14 -0400447/**
448 * sdma_v3_0_gfx_stop - stop the gfx async dma engines
449 *
450 * @adev: amdgpu_device pointer
451 *
452 * Stop the gfx async dma ring buffers (VI).
453 */
454static void sdma_v3_0_gfx_stop(struct amdgpu_device *adev)
455{
Alex Deucherc113ea12015-10-08 16:30:37 -0400456 struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
457 struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
Alex Deucheraaa36a9762015-04-20 17:31:14 -0400458 u32 rb_cntl, ib_cntl;
459 int i;
460
461 if ((adev->mman.buffer_funcs_ring == sdma0) ||
462 (adev->mman.buffer_funcs_ring == sdma1))
463 amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size);
464
Alex Deucherc113ea12015-10-08 16:30:37 -0400465 for (i = 0; i < adev->sdma.num_instances; i++) {
Alex Deucheraaa36a9762015-04-20 17:31:14 -0400466 rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
467 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
468 WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
469 ib_cntl = RREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i]);
470 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
471 WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
472 }
473 sdma0->ready = false;
474 sdma1->ready = false;
475}
476
477/**
478 * sdma_v3_0_rlc_stop - stop the compute async dma engines
479 *
480 * @adev: amdgpu_device pointer
481 *
482 * Stop the compute async dma queues (VI).
483 */
484static void sdma_v3_0_rlc_stop(struct amdgpu_device *adev)
485{
486 /* XXX todo */
487}
488
489/**
Ben Gozcd06bf62015-06-24 22:39:21 +0300490 * sdma_v3_0_ctx_switch_enable - stop the async dma engines context switch
491 *
492 * @adev: amdgpu_device pointer
493 * @enable: enable/disable the DMA MEs context switch.
494 *
495 * Halt or unhalt the async dma engines context switch (VI).
496 */
497static void sdma_v3_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
498{
499 u32 f32_cntl;
500 int i;
501
Alex Deucherc113ea12015-10-08 16:30:37 -0400502 for (i = 0; i < adev->sdma.num_instances; i++) {
Ben Gozcd06bf62015-06-24 22:39:21 +0300503 f32_cntl = RREG32(mmSDMA0_CNTL + sdma_offsets[i]);
504 if (enable)
505 f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
506 AUTO_CTXSW_ENABLE, 1);
507 else
508 f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
509 AUTO_CTXSW_ENABLE, 0);
510 WREG32(mmSDMA0_CNTL + sdma_offsets[i], f32_cntl);
511 }
512}
513
514/**
Alex Deucheraaa36a9762015-04-20 17:31:14 -0400515 * sdma_v3_0_enable - stop the async dma engines
516 *
517 * @adev: amdgpu_device pointer
518 * @enable: enable/disable the DMA MEs.
519 *
520 * Halt or unhalt the async dma engines (VI).
521 */
522static void sdma_v3_0_enable(struct amdgpu_device *adev, bool enable)
523{
524 u32 f32_cntl;
525 int i;
526
527 if (enable == false) {
528 sdma_v3_0_gfx_stop(adev);
529 sdma_v3_0_rlc_stop(adev);
530 }
531
Alex Deucherc113ea12015-10-08 16:30:37 -0400532 for (i = 0; i < adev->sdma.num_instances; i++) {
Alex Deucheraaa36a9762015-04-20 17:31:14 -0400533 f32_cntl = RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i]);
534 if (enable)
535 f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, 0);
536 else
537 f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, 1);
538 WREG32(mmSDMA0_F32_CNTL + sdma_offsets[i], f32_cntl);
539 }
540}
541
542/**
543 * sdma_v3_0_gfx_resume - setup and start the async dma engines
544 *
545 * @adev: amdgpu_device pointer
546 *
547 * Set up the gfx DMA ring buffers and enable them (VI).
548 * Returns 0 for success, error for failure.
549 */
550static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev)
551{
552 struct amdgpu_ring *ring;
553 u32 rb_cntl, ib_cntl;
554 u32 rb_bufsz;
555 u32 wb_offset;
556 u32 doorbell;
557 int i, j, r;
558
Alex Deucherc113ea12015-10-08 16:30:37 -0400559 for (i = 0; i < adev->sdma.num_instances; i++) {
560 ring = &adev->sdma.instance[i].ring;
Alex Deucheraaa36a9762015-04-20 17:31:14 -0400561 wb_offset = (ring->rptr_offs * 4);
562
563 mutex_lock(&adev->srbm_mutex);
564 for (j = 0; j < 16; j++) {
565 vi_srbm_select(adev, 0, 0, 0, j);
566 /* SDMA GFX */
567 WREG32(mmSDMA0_GFX_VIRTUAL_ADDR + sdma_offsets[i], 0);
568 WREG32(mmSDMA0_GFX_APE1_CNTL + sdma_offsets[i], 0);
569 }
570 vi_srbm_select(adev, 0, 0, 0, 0);
571 mutex_unlock(&adev->srbm_mutex);
572
Alex Deucherc458fe92016-02-12 03:19:14 -0500573 WREG32(mmSDMA0_TILING_CONFIG + sdma_offsets[i],
574 adev->gfx.config.gb_addr_config & 0x70);
575
Alex Deucheraaa36a9762015-04-20 17:31:14 -0400576 WREG32(mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i], 0);
577
578 /* Set ring buffer size in dwords */
579 rb_bufsz = order_base_2(ring->ring_size / 4);
580 rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
581 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, rb_bufsz);
582#ifdef __BIG_ENDIAN
583 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SWAP_ENABLE, 1);
584 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL,
585 RPTR_WRITEBACK_SWAP_ENABLE, 1);
586#endif
587 WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
588
589 /* Initialize the ring buffer's read and write pointers */
590 WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0);
591 WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0);
592
593 /* set the wb address whether it's enabled or not */
594 WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i],
595 upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF);
596 WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_LO + sdma_offsets[i],
597 lower_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC);
598
599 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1);
600
601 WREG32(mmSDMA0_GFX_RB_BASE + sdma_offsets[i], ring->gpu_addr >> 8);
602 WREG32(mmSDMA0_GFX_RB_BASE_HI + sdma_offsets[i], ring->gpu_addr >> 40);
603
604 ring->wptr = 0;
605 WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], ring->wptr << 2);
606
607 doorbell = RREG32(mmSDMA0_GFX_DOORBELL + sdma_offsets[i]);
608
609 if (ring->use_doorbell) {
610 doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL,
611 OFFSET, ring->doorbell_index);
612 doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 1);
613 } else {
614 doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 0);
615 }
616 WREG32(mmSDMA0_GFX_DOORBELL + sdma_offsets[i], doorbell);
617
618 /* enable DMA RB */
619 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1);
620 WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
621
622 ib_cntl = RREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i]);
623 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 1);
624#ifdef __BIG_ENDIAN
625 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_SWAP_ENABLE, 1);
626#endif
627 /* enable DMA IBs */
628 WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
629
630 ring->ready = true;
631
632 r = amdgpu_ring_test_ring(ring);
633 if (r) {
634 ring->ready = false;
635 return r;
636 }
637
638 if (adev->mman.buffer_funcs_ring == ring)
639 amdgpu_ttm_set_active_vram_size(adev, adev->mc.real_vram_size);
640 }
641
642 return 0;
643}
644
645/**
646 * sdma_v3_0_rlc_resume - setup and start the async dma engines
647 *
648 * @adev: amdgpu_device pointer
649 *
650 * Set up the compute DMA queues and enable them (VI).
651 * Returns 0 for success, error for failure.
652 */
653static int sdma_v3_0_rlc_resume(struct amdgpu_device *adev)
654{
655 /* XXX todo */
656 return 0;
657}
658
659/**
660 * sdma_v3_0_load_microcode - load the sDMA ME ucode
661 *
662 * @adev: amdgpu_device pointer
663 *
664 * Loads the sDMA0/1 ucode.
665 * Returns 0 for success, -EINVAL if the ucode is not available.
666 */
667static int sdma_v3_0_load_microcode(struct amdgpu_device *adev)
668{
669 const struct sdma_firmware_header_v1_0 *hdr;
670 const __le32 *fw_data;
671 u32 fw_size;
672 int i, j;
673
Alex Deucheraaa36a9762015-04-20 17:31:14 -0400674 /* halt the MEs */
675 sdma_v3_0_enable(adev, false);
676
Alex Deucherc113ea12015-10-08 16:30:37 -0400677 for (i = 0; i < adev->sdma.num_instances; i++) {
678 if (!adev->sdma.instance[i].fw)
679 return -EINVAL;
680 hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
Alex Deucheraaa36a9762015-04-20 17:31:14 -0400681 amdgpu_ucode_print_sdma_hdr(&hdr->header);
682 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
Alex Deucheraaa36a9762015-04-20 17:31:14 -0400683 fw_data = (const __le32 *)
Alex Deucherc113ea12015-10-08 16:30:37 -0400684 (adev->sdma.instance[i].fw->data +
Alex Deucheraaa36a9762015-04-20 17:31:14 -0400685 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
686 WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 0);
687 for (j = 0; j < fw_size; j++)
688 WREG32(mmSDMA0_UCODE_DATA + sdma_offsets[i], le32_to_cpup(fw_data++));
Alex Deucherc113ea12015-10-08 16:30:37 -0400689 WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], adev->sdma.instance[i].fw_version);
Alex Deucheraaa36a9762015-04-20 17:31:14 -0400690 }
691
692 return 0;
693}
694
695/**
696 * sdma_v3_0_start - setup and start the async dma engines
697 *
698 * @adev: amdgpu_device pointer
699 *
700 * Set up the DMA engines and enable them (VI).
701 * Returns 0 for success, error for failure.
702 */
703static int sdma_v3_0_start(struct amdgpu_device *adev)
704{
Alex Deucherc113ea12015-10-08 16:30:37 -0400705 int r, i;
Alex Deucheraaa36a9762015-04-20 17:31:14 -0400706
Jammy Zhoue61710c2015-11-10 18:31:08 -0500707 if (!adev->pp_enabled) {
Rex Zhuba5c2a82015-11-06 20:33:24 -0500708 if (!adev->firmware.smu_load) {
709 r = sdma_v3_0_load_microcode(adev);
Alex Deucherc113ea12015-10-08 16:30:37 -0400710 if (r)
Rex Zhuba5c2a82015-11-06 20:33:24 -0500711 return r;
712 } else {
713 for (i = 0; i < adev->sdma.num_instances; i++) {
714 r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
715 (i == 0) ?
716 AMDGPU_UCODE_ID_SDMA0 :
717 AMDGPU_UCODE_ID_SDMA1);
718 if (r)
719 return -EINVAL;
720 }
Alex Deucherc113ea12015-10-08 16:30:37 -0400721 }
Alex Deucheraaa36a9762015-04-20 17:31:14 -0400722 }
723
724 /* unhalt the MEs */
725 sdma_v3_0_enable(adev, true);
Ben Gozcd06bf62015-06-24 22:39:21 +0300726 /* enable sdma ring preemption */
727 sdma_v3_0_ctx_switch_enable(adev, true);
Alex Deucheraaa36a9762015-04-20 17:31:14 -0400728
729 /* start the gfx rings and rlc compute queues */
730 r = sdma_v3_0_gfx_resume(adev);
731 if (r)
732 return r;
733 r = sdma_v3_0_rlc_resume(adev);
734 if (r)
735 return r;
736
737 return 0;
738}
739
740/**
741 * sdma_v3_0_ring_test_ring - simple async dma engine test
742 *
743 * @ring: amdgpu_ring structure holding ring information
744 *
745 * Test the DMA engine by writing using it to write an
746 * value to memory. (VI).
747 * Returns 0 for success, error for failure.
748 */
749static int sdma_v3_0_ring_test_ring(struct amdgpu_ring *ring)
750{
751 struct amdgpu_device *adev = ring->adev;
752 unsigned i;
753 unsigned index;
754 int r;
755 u32 tmp;
756 u64 gpu_addr;
757
758 r = amdgpu_wb_get(adev, &index);
759 if (r) {
760 dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
761 return r;
762 }
763
764 gpu_addr = adev->wb.gpu_addr + (index * 4);
765 tmp = 0xCAFEDEAD;
766 adev->wb.wb[index] = cpu_to_le32(tmp);
767
Christian Königa27de352016-01-21 11:28:53 +0100768 r = amdgpu_ring_alloc(ring, 5);
Alex Deucheraaa36a9762015-04-20 17:31:14 -0400769 if (r) {
770 DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
771 amdgpu_wb_free(adev, index);
772 return r;
773 }
774
775 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
776 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR));
777 amdgpu_ring_write(ring, lower_32_bits(gpu_addr));
778 amdgpu_ring_write(ring, upper_32_bits(gpu_addr));
779 amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1));
780 amdgpu_ring_write(ring, 0xDEADBEEF);
Christian Königa27de352016-01-21 11:28:53 +0100781 amdgpu_ring_commit(ring);
Alex Deucheraaa36a9762015-04-20 17:31:14 -0400782
783 for (i = 0; i < adev->usec_timeout; i++) {
784 tmp = le32_to_cpu(adev->wb.wb[index]);
785 if (tmp == 0xDEADBEEF)
786 break;
787 DRM_UDELAY(1);
788 }
789
790 if (i < adev->usec_timeout) {
791 DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
792 } else {
793 DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
794 ring->idx, tmp);
795 r = -EINVAL;
796 }
797 amdgpu_wb_free(adev, index);
798
799 return r;
800}
801
802/**
803 * sdma_v3_0_ring_test_ib - test an IB on the DMA engine
804 *
805 * @ring: amdgpu_ring structure holding ring information
806 *
807 * Test a simple IB in the DMA ring (VI).
808 * Returns 0 on success, error on failure.
809 */
810static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring)
811{
812 struct amdgpu_device *adev = ring->adev;
813 struct amdgpu_ib ib;
Chunming Zhou17635522015-08-03 11:43:19 +0800814 struct fence *f = NULL;
Alex Deucheraaa36a9762015-04-20 17:31:14 -0400815 unsigned i;
816 unsigned index;
817 int r;
818 u32 tmp = 0;
819 u64 gpu_addr;
820
821 r = amdgpu_wb_get(adev, &index);
822 if (r) {
823 dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
824 return r;
825 }
826
827 gpu_addr = adev->wb.gpu_addr + (index * 4);
828 tmp = 0xCAFEDEAD;
829 adev->wb.wb[index] = cpu_to_le32(tmp);
Christian Königb203dd92015-08-18 18:23:16 +0200830 memset(&ib, 0, sizeof(ib));
Christian Königb07c60c2016-01-31 12:29:04 +0100831 r = amdgpu_ib_get(adev, NULL, 256, &ib);
Alex Deucheraaa36a9762015-04-20 17:31:14 -0400832 if (r) {
Alex Deucheraaa36a9762015-04-20 17:31:14 -0400833 DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
Chunming Zhou0011fda2015-06-01 15:33:20 +0800834 goto err0;
Alex Deucheraaa36a9762015-04-20 17:31:14 -0400835 }
836
837 ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
838 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
839 ib.ptr[1] = lower_32_bits(gpu_addr);
840 ib.ptr[2] = upper_32_bits(gpu_addr);
841 ib.ptr[3] = SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1);
842 ib.ptr[4] = 0xDEADBEEF;
843 ib.ptr[5] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
844 ib.ptr[6] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
845 ib.ptr[7] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
846 ib.length_dw = 8;
847
Christian Könige86f9ce2016-02-08 12:13:05 +0100848 r = amdgpu_ib_schedule(ring, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED,
849 NULL, &f);
Chunming Zhou0011fda2015-06-01 15:33:20 +0800850 if (r)
851 goto err1;
852
Chunming Zhou17635522015-08-03 11:43:19 +0800853 r = fence_wait(f, false);
Alex Deucheraaa36a9762015-04-20 17:31:14 -0400854 if (r) {
Alex Deucheraaa36a9762015-04-20 17:31:14 -0400855 DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
Chunming Zhou0011fda2015-06-01 15:33:20 +0800856 goto err1;
Alex Deucheraaa36a9762015-04-20 17:31:14 -0400857 }
858 for (i = 0; i < adev->usec_timeout; i++) {
859 tmp = le32_to_cpu(adev->wb.wb[index]);
860 if (tmp == 0xDEADBEEF)
861 break;
862 DRM_UDELAY(1);
863 }
864 if (i < adev->usec_timeout) {
865 DRM_INFO("ib test on ring %d succeeded in %u usecs\n",
Chunming Zhou0011fda2015-06-01 15:33:20 +0800866 ring->idx, i);
867 goto err1;
Alex Deucheraaa36a9762015-04-20 17:31:14 -0400868 } else {
869 DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
870 r = -EINVAL;
871 }
Chunming Zhou0011fda2015-06-01 15:33:20 +0800872err1:
Chunming Zhou281b4222015-08-12 12:58:31 +0800873 fence_put(f);
Alex Deucheraaa36a9762015-04-20 17:31:14 -0400874 amdgpu_ib_free(adev, &ib);
Chunming Zhou0011fda2015-06-01 15:33:20 +0800875err0:
Alex Deucheraaa36a9762015-04-20 17:31:14 -0400876 amdgpu_wb_free(adev, index);
877 return r;
878}
879
880/**
881 * sdma_v3_0_vm_copy_pte - update PTEs by copying them from the GART
882 *
883 * @ib: indirect buffer to fill with commands
884 * @pe: addr of the page entry
885 * @src: src addr to copy from
886 * @count: number of page entries to update
887 *
888 * Update PTEs by copying them from the GART using sDMA (CIK).
889 */
890static void sdma_v3_0_vm_copy_pte(struct amdgpu_ib *ib,
891 uint64_t pe, uint64_t src,
892 unsigned count)
893{
894 while (count) {
895 unsigned bytes = count * 8;
896 if (bytes > 0x1FFFF8)
897 bytes = 0x1FFFF8;
898
899 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
900 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
901 ib->ptr[ib->length_dw++] = bytes;
902 ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
903 ib->ptr[ib->length_dw++] = lower_32_bits(src);
904 ib->ptr[ib->length_dw++] = upper_32_bits(src);
905 ib->ptr[ib->length_dw++] = lower_32_bits(pe);
906 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
907
908 pe += bytes;
909 src += bytes;
910 count -= bytes / 8;
911 }
912}
913
914/**
915 * sdma_v3_0_vm_write_pte - update PTEs by writing them manually
916 *
917 * @ib: indirect buffer to fill with commands
918 * @pe: addr of the page entry
919 * @addr: dst addr to write into pe
920 * @count: number of page entries to update
921 * @incr: increase next addr by incr bytes
922 * @flags: access flags
923 *
924 * Update PTEs by writing them manually using sDMA (CIK).
925 */
926static void sdma_v3_0_vm_write_pte(struct amdgpu_ib *ib,
Christian Königb07c9d22015-11-30 13:26:07 +0100927 const dma_addr_t *pages_addr, uint64_t pe,
Alex Deucheraaa36a9762015-04-20 17:31:14 -0400928 uint64_t addr, unsigned count,
929 uint32_t incr, uint32_t flags)
930{
931 uint64_t value;
932 unsigned ndw;
933
934 while (count) {
935 ndw = count * 2;
936 if (ndw > 0xFFFFE)
937 ndw = 0xFFFFE;
938
939 /* for non-physically contiguous pages (system) */
940 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
941 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
942 ib->ptr[ib->length_dw++] = pe;
943 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
944 ib->ptr[ib->length_dw++] = ndw;
945 for (; ndw > 0; ndw -= 2, --count, pe += 8) {
Christian Königb07c9d22015-11-30 13:26:07 +0100946 value = amdgpu_vm_map_gart(pages_addr, addr);
Alex Deucheraaa36a9762015-04-20 17:31:14 -0400947 addr += incr;
948 value |= flags;
949 ib->ptr[ib->length_dw++] = value;
950 ib->ptr[ib->length_dw++] = upper_32_bits(value);
951 }
952 }
953}
954
955/**
956 * sdma_v3_0_vm_set_pte_pde - update the page tables using sDMA
957 *
958 * @ib: indirect buffer to fill with commands
959 * @pe: addr of the page entry
960 * @addr: dst addr to write into pe
961 * @count: number of page entries to update
962 * @incr: increase next addr by incr bytes
963 * @flags: access flags
964 *
965 * Update the page tables using sDMA (CIK).
966 */
967static void sdma_v3_0_vm_set_pte_pde(struct amdgpu_ib *ib,
968 uint64_t pe,
969 uint64_t addr, unsigned count,
970 uint32_t incr, uint32_t flags)
971{
972 uint64_t value;
973 unsigned ndw;
974
975 while (count) {
976 ndw = count;
977 if (ndw > 0x7FFFF)
978 ndw = 0x7FFFF;
979
980 if (flags & AMDGPU_PTE_VALID)
981 value = addr;
982 else
983 value = 0;
984
985 /* for physically contiguous pages (vram) */
986 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_GEN_PTEPDE);
987 ib->ptr[ib->length_dw++] = pe; /* dst addr */
988 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
989 ib->ptr[ib->length_dw++] = flags; /* mask */
990 ib->ptr[ib->length_dw++] = 0;
991 ib->ptr[ib->length_dw++] = value; /* value */
992 ib->ptr[ib->length_dw++] = upper_32_bits(value);
993 ib->ptr[ib->length_dw++] = incr; /* increment size */
994 ib->ptr[ib->length_dw++] = 0;
995 ib->ptr[ib->length_dw++] = ndw; /* number of entries */
996
997 pe += ndw * 8;
998 addr += ndw * incr;
999 count -= ndw;
1000 }
1001}
1002
1003/**
Christian König9e5d53092016-01-31 12:20:55 +01001004 * sdma_v3_0_ring_pad_ib - pad the IB to the required number of dw
Alex Deucheraaa36a9762015-04-20 17:31:14 -04001005 *
1006 * @ib: indirect buffer to fill with padding
1007 *
1008 */
Christian König9e5d53092016-01-31 12:20:55 +01001009static void sdma_v3_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
Alex Deucheraaa36a9762015-04-20 17:31:14 -04001010{
Christian König9e5d53092016-01-31 12:20:55 +01001011 struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring);
Jammy Zhouac01db32015-09-01 13:13:54 +08001012 u32 pad_count;
1013 int i;
1014
1015 pad_count = (8 - (ib->length_dw & 0x7)) % 8;
1016 for (i = 0; i < pad_count; i++)
1017 if (sdma && sdma->burst_nop && (i == 0))
1018 ib->ptr[ib->length_dw++] =
1019 SDMA_PKT_HEADER_OP(SDMA_OP_NOP) |
1020 SDMA_PKT_NOP_HEADER_COUNT(pad_count - 1);
1021 else
1022 ib->ptr[ib->length_dw++] =
1023 SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
Alex Deucheraaa36a9762015-04-20 17:31:14 -04001024}
1025
1026/**
1027 * sdma_v3_0_ring_emit_vm_flush - cik vm flush using sDMA
1028 *
1029 * @ring: amdgpu_ring pointer
1030 * @vm: amdgpu_vm pointer
1031 *
1032 * Update the page table base and flush the VM TLB
1033 * using sDMA (VI).
1034 */
1035static void sdma_v3_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
1036 unsigned vm_id, uint64_t pd_addr)
1037{
Chunming Zhou5c55db82016-03-02 11:30:31 +08001038 uint32_t seq = ring->fence_drv.sync_seq;
1039 uint64_t addr = ring->fence_drv.gpu_addr;
1040
1041 /* wait for idle */
1042 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
1043 SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) |
1044 SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3) | /* equal */
1045 SDMA_PKT_POLL_REGMEM_HEADER_MEM_POLL(1));
1046 amdgpu_ring_write(ring, addr & 0xfffffffc);
1047 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
1048 amdgpu_ring_write(ring, seq); /* reference */
1049 amdgpu_ring_write(ring, 0xfffffff); /* mask */
1050 amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
1051 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(4)); /* retry count, poll interval */
1052
Alex Deucheraaa36a9762015-04-20 17:31:14 -04001053 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
1054 SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
1055 if (vm_id < 8) {
1056 amdgpu_ring_write(ring, (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id));
1057 } else {
1058 amdgpu_ring_write(ring, (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8));
1059 }
1060 amdgpu_ring_write(ring, pd_addr >> 12);
1061
Alex Deucheraaa36a9762015-04-20 17:31:14 -04001062 /* flush TLB */
1063 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
1064 SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
1065 amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
1066 amdgpu_ring_write(ring, 1 << vm_id);
1067
1068 /* wait for flush */
1069 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
1070 SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) |
1071 SDMA_PKT_POLL_REGMEM_HEADER_FUNC(0)); /* always */
1072 amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2);
1073 amdgpu_ring_write(ring, 0);
1074 amdgpu_ring_write(ring, 0); /* reference */
1075 amdgpu_ring_write(ring, 0); /* mask */
1076 amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
1077 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
1078}
1079
yanyang15fc3aee2015-05-22 14:39:35 -04001080static int sdma_v3_0_early_init(void *handle)
Alex Deucheraaa36a9762015-04-20 17:31:14 -04001081{
yanyang15fc3aee2015-05-22 14:39:35 -04001082 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1083
Alex Deucherc113ea12015-10-08 16:30:37 -04001084 switch (adev->asic_type) {
Samuel Libb16e3b2015-10-08 17:17:51 -04001085 case CHIP_STONEY:
1086 adev->sdma.num_instances = 1;
1087 break;
Alex Deucherc113ea12015-10-08 16:30:37 -04001088 default:
1089 adev->sdma.num_instances = SDMA_MAX_INSTANCE;
1090 break;
1091 }
1092
Alex Deucheraaa36a9762015-04-20 17:31:14 -04001093 sdma_v3_0_set_ring_funcs(adev);
1094 sdma_v3_0_set_buffer_funcs(adev);
1095 sdma_v3_0_set_vm_pte_funcs(adev);
1096 sdma_v3_0_set_irq_funcs(adev);
1097
1098 return 0;
1099}
1100
yanyang15fc3aee2015-05-22 14:39:35 -04001101static int sdma_v3_0_sw_init(void *handle)
Alex Deucheraaa36a9762015-04-20 17:31:14 -04001102{
1103 struct amdgpu_ring *ring;
Alex Deucherc113ea12015-10-08 16:30:37 -04001104 int r, i;
yanyang15fc3aee2015-05-22 14:39:35 -04001105 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
Alex Deucheraaa36a9762015-04-20 17:31:14 -04001106
1107 /* SDMA trap event */
Alex Deucherc113ea12015-10-08 16:30:37 -04001108 r = amdgpu_irq_add_id(adev, 224, &adev->sdma.trap_irq);
Alex Deucheraaa36a9762015-04-20 17:31:14 -04001109 if (r)
1110 return r;
1111
1112 /* SDMA Privileged inst */
Alex Deucherc113ea12015-10-08 16:30:37 -04001113 r = amdgpu_irq_add_id(adev, 241, &adev->sdma.illegal_inst_irq);
Alex Deucheraaa36a9762015-04-20 17:31:14 -04001114 if (r)
1115 return r;
1116
1117 /* SDMA Privileged inst */
Alex Deucherc113ea12015-10-08 16:30:37 -04001118 r = amdgpu_irq_add_id(adev, 247, &adev->sdma.illegal_inst_irq);
Alex Deucheraaa36a9762015-04-20 17:31:14 -04001119 if (r)
1120 return r;
1121
1122 r = sdma_v3_0_init_microcode(adev);
1123 if (r) {
1124 DRM_ERROR("Failed to load sdma firmware!\n");
1125 return r;
1126 }
1127
Alex Deucherc113ea12015-10-08 16:30:37 -04001128 for (i = 0; i < adev->sdma.num_instances; i++) {
1129 ring = &adev->sdma.instance[i].ring;
1130 ring->ring_obj = NULL;
1131 ring->use_doorbell = true;
1132 ring->doorbell_index = (i == 0) ?
1133 AMDGPU_DOORBELL_sDMA_ENGINE0 : AMDGPU_DOORBELL_sDMA_ENGINE1;
Alex Deucheraaa36a9762015-04-20 17:31:14 -04001134
Alex Deucherc113ea12015-10-08 16:30:37 -04001135 sprintf(ring->name, "sdma%d", i);
1136 r = amdgpu_ring_init(adev, ring, 256 * 1024,
1137 SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), 0xf,
1138 &adev->sdma.trap_irq,
1139 (i == 0) ?
1140 AMDGPU_SDMA_IRQ_TRAP0 : AMDGPU_SDMA_IRQ_TRAP1,
1141 AMDGPU_RING_TYPE_SDMA);
1142 if (r)
1143 return r;
1144 }
Alex Deucheraaa36a9762015-04-20 17:31:14 -04001145
1146 return r;
1147}
1148
yanyang15fc3aee2015-05-22 14:39:35 -04001149static int sdma_v3_0_sw_fini(void *handle)
Alex Deucheraaa36a9762015-04-20 17:31:14 -04001150{
yanyang15fc3aee2015-05-22 14:39:35 -04001151 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
Alex Deucherc113ea12015-10-08 16:30:37 -04001152 int i;
yanyang15fc3aee2015-05-22 14:39:35 -04001153
Alex Deucherc113ea12015-10-08 16:30:37 -04001154 for (i = 0; i < adev->sdma.num_instances; i++)
1155 amdgpu_ring_fini(&adev->sdma.instance[i].ring);
Alex Deucheraaa36a9762015-04-20 17:31:14 -04001156
1157 return 0;
1158}
1159
yanyang15fc3aee2015-05-22 14:39:35 -04001160static int sdma_v3_0_hw_init(void *handle)
Alex Deucheraaa36a9762015-04-20 17:31:14 -04001161{
1162 int r;
yanyang15fc3aee2015-05-22 14:39:35 -04001163 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
Alex Deucheraaa36a9762015-04-20 17:31:14 -04001164
1165 sdma_v3_0_init_golden_registers(adev);
1166
1167 r = sdma_v3_0_start(adev);
1168 if (r)
1169 return r;
1170
1171 return r;
1172}
1173
yanyang15fc3aee2015-05-22 14:39:35 -04001174static int sdma_v3_0_hw_fini(void *handle)
Alex Deucheraaa36a9762015-04-20 17:31:14 -04001175{
yanyang15fc3aee2015-05-22 14:39:35 -04001176 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1177
Ben Gozcd06bf62015-06-24 22:39:21 +03001178 sdma_v3_0_ctx_switch_enable(adev, false);
Alex Deucheraaa36a9762015-04-20 17:31:14 -04001179 sdma_v3_0_enable(adev, false);
1180
1181 return 0;
1182}
1183
yanyang15fc3aee2015-05-22 14:39:35 -04001184static int sdma_v3_0_suspend(void *handle)
Alex Deucheraaa36a9762015-04-20 17:31:14 -04001185{
yanyang15fc3aee2015-05-22 14:39:35 -04001186 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
Alex Deucheraaa36a9762015-04-20 17:31:14 -04001187
1188 return sdma_v3_0_hw_fini(adev);
1189}
1190
yanyang15fc3aee2015-05-22 14:39:35 -04001191static int sdma_v3_0_resume(void *handle)
Alex Deucheraaa36a9762015-04-20 17:31:14 -04001192{
yanyang15fc3aee2015-05-22 14:39:35 -04001193 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
Alex Deucheraaa36a9762015-04-20 17:31:14 -04001194
1195 return sdma_v3_0_hw_init(adev);
1196}
1197
yanyang15fc3aee2015-05-22 14:39:35 -04001198static bool sdma_v3_0_is_idle(void *handle)
Alex Deucheraaa36a9762015-04-20 17:31:14 -04001199{
yanyang15fc3aee2015-05-22 14:39:35 -04001200 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
Alex Deucheraaa36a9762015-04-20 17:31:14 -04001201 u32 tmp = RREG32(mmSRBM_STATUS2);
1202
1203 if (tmp & (SRBM_STATUS2__SDMA_BUSY_MASK |
1204 SRBM_STATUS2__SDMA1_BUSY_MASK))
1205 return false;
1206
1207 return true;
1208}
1209
yanyang15fc3aee2015-05-22 14:39:35 -04001210static int sdma_v3_0_wait_for_idle(void *handle)
Alex Deucheraaa36a9762015-04-20 17:31:14 -04001211{
1212 unsigned i;
1213 u32 tmp;
yanyang15fc3aee2015-05-22 14:39:35 -04001214 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
Alex Deucheraaa36a9762015-04-20 17:31:14 -04001215
1216 for (i = 0; i < adev->usec_timeout; i++) {
1217 tmp = RREG32(mmSRBM_STATUS2) & (SRBM_STATUS2__SDMA_BUSY_MASK |
1218 SRBM_STATUS2__SDMA1_BUSY_MASK);
1219
1220 if (!tmp)
1221 return 0;
1222 udelay(1);
1223 }
1224 return -ETIMEDOUT;
1225}
1226
yanyang15fc3aee2015-05-22 14:39:35 -04001227static void sdma_v3_0_print_status(void *handle)
Alex Deucheraaa36a9762015-04-20 17:31:14 -04001228{
1229 int i, j;
yanyang15fc3aee2015-05-22 14:39:35 -04001230 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
Alex Deucheraaa36a9762015-04-20 17:31:14 -04001231
1232 dev_info(adev->dev, "VI SDMA registers\n");
1233 dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n",
1234 RREG32(mmSRBM_STATUS2));
Alex Deucherc113ea12015-10-08 16:30:37 -04001235 for (i = 0; i < adev->sdma.num_instances; i++) {
Alex Deucheraaa36a9762015-04-20 17:31:14 -04001236 dev_info(adev->dev, " SDMA%d_STATUS_REG=0x%08X\n",
1237 i, RREG32(mmSDMA0_STATUS_REG + sdma_offsets[i]));
1238 dev_info(adev->dev, " SDMA%d_F32_CNTL=0x%08X\n",
1239 i, RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i]));
1240 dev_info(adev->dev, " SDMA%d_CNTL=0x%08X\n",
1241 i, RREG32(mmSDMA0_CNTL + sdma_offsets[i]));
1242 dev_info(adev->dev, " SDMA%d_SEM_WAIT_FAIL_TIMER_CNTL=0x%08X\n",
1243 i, RREG32(mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i]));
1244 dev_info(adev->dev, " SDMA%d_GFX_IB_CNTL=0x%08X\n",
1245 i, RREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i]));
1246 dev_info(adev->dev, " SDMA%d_GFX_RB_CNTL=0x%08X\n",
1247 i, RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]));
1248 dev_info(adev->dev, " SDMA%d_GFX_RB_RPTR=0x%08X\n",
1249 i, RREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i]));
1250 dev_info(adev->dev, " SDMA%d_GFX_RB_WPTR=0x%08X\n",
1251 i, RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i]));
1252 dev_info(adev->dev, " SDMA%d_GFX_RB_RPTR_ADDR_HI=0x%08X\n",
1253 i, RREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i]));
1254 dev_info(adev->dev, " SDMA%d_GFX_RB_RPTR_ADDR_LO=0x%08X\n",
1255 i, RREG32(mmSDMA0_GFX_RB_RPTR_ADDR_LO + sdma_offsets[i]));
1256 dev_info(adev->dev, " SDMA%d_GFX_RB_BASE=0x%08X\n",
1257 i, RREG32(mmSDMA0_GFX_RB_BASE + sdma_offsets[i]));
1258 dev_info(adev->dev, " SDMA%d_GFX_RB_BASE_HI=0x%08X\n",
1259 i, RREG32(mmSDMA0_GFX_RB_BASE_HI + sdma_offsets[i]));
1260 dev_info(adev->dev, " SDMA%d_GFX_DOORBELL=0x%08X\n",
1261 i, RREG32(mmSDMA0_GFX_DOORBELL + sdma_offsets[i]));
Alex Deucherc458fe92016-02-12 03:19:14 -05001262 dev_info(adev->dev, " SDMA%d_TILING_CONFIG=0x%08X\n",
1263 i, RREG32(mmSDMA0_TILING_CONFIG + sdma_offsets[i]));
Alex Deucheraaa36a9762015-04-20 17:31:14 -04001264 mutex_lock(&adev->srbm_mutex);
1265 for (j = 0; j < 16; j++) {
1266 vi_srbm_select(adev, 0, 0, 0, j);
1267 dev_info(adev->dev, " VM %d:\n", j);
1268 dev_info(adev->dev, " SDMA%d_GFX_VIRTUAL_ADDR=0x%08X\n",
1269 i, RREG32(mmSDMA0_GFX_VIRTUAL_ADDR + sdma_offsets[i]));
1270 dev_info(adev->dev, " SDMA%d_GFX_APE1_CNTL=0x%08X\n",
1271 i, RREG32(mmSDMA0_GFX_APE1_CNTL + sdma_offsets[i]));
1272 }
1273 vi_srbm_select(adev, 0, 0, 0, 0);
1274 mutex_unlock(&adev->srbm_mutex);
1275 }
1276}
1277
yanyang15fc3aee2015-05-22 14:39:35 -04001278static int sdma_v3_0_soft_reset(void *handle)
Alex Deucheraaa36a9762015-04-20 17:31:14 -04001279{
1280 u32 srbm_soft_reset = 0;
yanyang15fc3aee2015-05-22 14:39:35 -04001281 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
Alex Deucheraaa36a9762015-04-20 17:31:14 -04001282 u32 tmp = RREG32(mmSRBM_STATUS2);
1283
1284 if (tmp & SRBM_STATUS2__SDMA_BUSY_MASK) {
1285 /* sdma0 */
1286 tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET);
1287 tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 0);
1288 WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp);
1289 srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA_MASK;
1290 }
1291 if (tmp & SRBM_STATUS2__SDMA1_BUSY_MASK) {
1292 /* sdma1 */
1293 tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET);
1294 tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 0);
1295 WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp);
1296 srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA1_MASK;
1297 }
1298
1299 if (srbm_soft_reset) {
yanyang15fc3aee2015-05-22 14:39:35 -04001300 sdma_v3_0_print_status((void *)adev);
Alex Deucheraaa36a9762015-04-20 17:31:14 -04001301
1302 tmp = RREG32(mmSRBM_SOFT_RESET);
1303 tmp |= srbm_soft_reset;
1304 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1305 WREG32(mmSRBM_SOFT_RESET, tmp);
1306 tmp = RREG32(mmSRBM_SOFT_RESET);
1307
1308 udelay(50);
1309
1310 tmp &= ~srbm_soft_reset;
1311 WREG32(mmSRBM_SOFT_RESET, tmp);
1312 tmp = RREG32(mmSRBM_SOFT_RESET);
1313
1314 /* Wait a little for things to settle down */
1315 udelay(50);
1316
yanyang15fc3aee2015-05-22 14:39:35 -04001317 sdma_v3_0_print_status((void *)adev);
Alex Deucheraaa36a9762015-04-20 17:31:14 -04001318 }
1319
1320 return 0;
1321}
1322
1323static int sdma_v3_0_set_trap_irq_state(struct amdgpu_device *adev,
1324 struct amdgpu_irq_src *source,
1325 unsigned type,
1326 enum amdgpu_interrupt_state state)
1327{
1328 u32 sdma_cntl;
1329
1330 switch (type) {
1331 case AMDGPU_SDMA_IRQ_TRAP0:
1332 switch (state) {
1333 case AMDGPU_IRQ_STATE_DISABLE:
1334 sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET);
1335 sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 0);
1336 WREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET, sdma_cntl);
1337 break;
1338 case AMDGPU_IRQ_STATE_ENABLE:
1339 sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET);
1340 sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 1);
1341 WREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET, sdma_cntl);
1342 break;
1343 default:
1344 break;
1345 }
1346 break;
1347 case AMDGPU_SDMA_IRQ_TRAP1:
1348 switch (state) {
1349 case AMDGPU_IRQ_STATE_DISABLE:
1350 sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET);
1351 sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 0);
1352 WREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET, sdma_cntl);
1353 break;
1354 case AMDGPU_IRQ_STATE_ENABLE:
1355 sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET);
1356 sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 1);
1357 WREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET, sdma_cntl);
1358 break;
1359 default:
1360 break;
1361 }
1362 break;
1363 default:
1364 break;
1365 }
1366 return 0;
1367}
1368
1369static int sdma_v3_0_process_trap_irq(struct amdgpu_device *adev,
1370 struct amdgpu_irq_src *source,
1371 struct amdgpu_iv_entry *entry)
1372{
1373 u8 instance_id, queue_id;
1374
1375 instance_id = (entry->ring_id & 0x3) >> 0;
1376 queue_id = (entry->ring_id & 0xc) >> 2;
1377 DRM_DEBUG("IH: SDMA trap\n");
1378 switch (instance_id) {
1379 case 0:
1380 switch (queue_id) {
1381 case 0:
Alex Deucherc113ea12015-10-08 16:30:37 -04001382 amdgpu_fence_process(&adev->sdma.instance[0].ring);
Alex Deucheraaa36a9762015-04-20 17:31:14 -04001383 break;
1384 case 1:
1385 /* XXX compute */
1386 break;
1387 case 2:
1388 /* XXX compute */
1389 break;
1390 }
1391 break;
1392 case 1:
1393 switch (queue_id) {
1394 case 0:
Alex Deucherc113ea12015-10-08 16:30:37 -04001395 amdgpu_fence_process(&adev->sdma.instance[1].ring);
Alex Deucheraaa36a9762015-04-20 17:31:14 -04001396 break;
1397 case 1:
1398 /* XXX compute */
1399 break;
1400 case 2:
1401 /* XXX compute */
1402 break;
1403 }
1404 break;
1405 }
1406 return 0;
1407}
1408
1409static int sdma_v3_0_process_illegal_inst_irq(struct amdgpu_device *adev,
1410 struct amdgpu_irq_src *source,
1411 struct amdgpu_iv_entry *entry)
1412{
1413 DRM_ERROR("Illegal instruction in SDMA command stream\n");
1414 schedule_work(&adev->reset_work);
1415 return 0;
1416}
1417
Eric Huang3c997d22015-11-11 11:49:11 -05001418static void fiji_update_sdma_medium_grain_clock_gating(
1419 struct amdgpu_device *adev,
1420 bool enable)
1421{
1422 uint32_t temp, data;
1423
1424 if (enable) {
1425 temp = data = RREG32(mmSDMA0_CLK_CTRL);
1426 data &= ~(SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK |
1427 SDMA0_CLK_CTRL__SOFT_OVERRIDE6_MASK |
1428 SDMA0_CLK_CTRL__SOFT_OVERRIDE5_MASK |
1429 SDMA0_CLK_CTRL__SOFT_OVERRIDE4_MASK |
1430 SDMA0_CLK_CTRL__SOFT_OVERRIDE3_MASK |
1431 SDMA0_CLK_CTRL__SOFT_OVERRIDE2_MASK |
1432 SDMA0_CLK_CTRL__SOFT_OVERRIDE1_MASK |
1433 SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK);
1434 if (data != temp)
1435 WREG32(mmSDMA0_CLK_CTRL, data);
1436
1437 temp = data = RREG32(mmSDMA1_CLK_CTRL);
1438 data &= ~(SDMA1_CLK_CTRL__SOFT_OVERRIDE7_MASK |
1439 SDMA1_CLK_CTRL__SOFT_OVERRIDE6_MASK |
1440 SDMA1_CLK_CTRL__SOFT_OVERRIDE5_MASK |
1441 SDMA1_CLK_CTRL__SOFT_OVERRIDE4_MASK |
1442 SDMA1_CLK_CTRL__SOFT_OVERRIDE3_MASK |
1443 SDMA1_CLK_CTRL__SOFT_OVERRIDE2_MASK |
1444 SDMA1_CLK_CTRL__SOFT_OVERRIDE1_MASK |
1445 SDMA1_CLK_CTRL__SOFT_OVERRIDE0_MASK);
1446
1447 if (data != temp)
1448 WREG32(mmSDMA1_CLK_CTRL, data);
1449 } else {
1450 temp = data = RREG32(mmSDMA0_CLK_CTRL);
1451 data |= SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK |
1452 SDMA0_CLK_CTRL__SOFT_OVERRIDE6_MASK |
1453 SDMA0_CLK_CTRL__SOFT_OVERRIDE5_MASK |
1454 SDMA0_CLK_CTRL__SOFT_OVERRIDE4_MASK |
1455 SDMA0_CLK_CTRL__SOFT_OVERRIDE3_MASK |
1456 SDMA0_CLK_CTRL__SOFT_OVERRIDE2_MASK |
1457 SDMA0_CLK_CTRL__SOFT_OVERRIDE1_MASK |
1458 SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK;
1459
1460 if (data != temp)
1461 WREG32(mmSDMA0_CLK_CTRL, data);
1462
1463 temp = data = RREG32(mmSDMA1_CLK_CTRL);
1464 data |= SDMA1_CLK_CTRL__SOFT_OVERRIDE7_MASK |
1465 SDMA1_CLK_CTRL__SOFT_OVERRIDE6_MASK |
1466 SDMA1_CLK_CTRL__SOFT_OVERRIDE5_MASK |
1467 SDMA1_CLK_CTRL__SOFT_OVERRIDE4_MASK |
1468 SDMA1_CLK_CTRL__SOFT_OVERRIDE3_MASK |
1469 SDMA1_CLK_CTRL__SOFT_OVERRIDE2_MASK |
1470 SDMA1_CLK_CTRL__SOFT_OVERRIDE1_MASK |
1471 SDMA1_CLK_CTRL__SOFT_OVERRIDE0_MASK;
1472
1473 if (data != temp)
1474 WREG32(mmSDMA1_CLK_CTRL, data);
1475 }
1476}
1477
1478static void fiji_update_sdma_medium_grain_light_sleep(
1479 struct amdgpu_device *adev,
1480 bool enable)
1481{
1482 uint32_t temp, data;
1483
1484 if (enable) {
1485 temp = data = RREG32(mmSDMA0_POWER_CNTL);
1486 data |= SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
1487
1488 if (temp != data)
1489 WREG32(mmSDMA0_POWER_CNTL, data);
1490
1491 temp = data = RREG32(mmSDMA1_POWER_CNTL);
1492 data |= SDMA1_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
1493
1494 if (temp != data)
1495 WREG32(mmSDMA1_POWER_CNTL, data);
1496 } else {
1497 temp = data = RREG32(mmSDMA0_POWER_CNTL);
1498 data &= ~SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
1499
1500 if (temp != data)
1501 WREG32(mmSDMA0_POWER_CNTL, data);
1502
1503 temp = data = RREG32(mmSDMA1_POWER_CNTL);
1504 data &= ~SDMA1_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
1505
1506 if (temp != data)
1507 WREG32(mmSDMA1_POWER_CNTL, data);
1508 }
1509}
1510
yanyang15fc3aee2015-05-22 14:39:35 -04001511static int sdma_v3_0_set_clockgating_state(void *handle,
1512 enum amd_clockgating_state state)
Alex Deucheraaa36a9762015-04-20 17:31:14 -04001513{
Eric Huang3c997d22015-11-11 11:49:11 -05001514 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1515
1516 switch (adev->asic_type) {
1517 case CHIP_FIJI:
1518 fiji_update_sdma_medium_grain_clock_gating(adev,
1519 state == AMD_CG_STATE_GATE ? true : false);
1520 fiji_update_sdma_medium_grain_light_sleep(adev,
1521 state == AMD_CG_STATE_GATE ? true : false);
1522 break;
1523 default:
1524 break;
1525 }
Alex Deucheraaa36a9762015-04-20 17:31:14 -04001526 return 0;
1527}
1528
yanyang15fc3aee2015-05-22 14:39:35 -04001529static int sdma_v3_0_set_powergating_state(void *handle,
1530 enum amd_powergating_state state)
1531{
1532 return 0;
1533}
1534
1535const struct amd_ip_funcs sdma_v3_0_ip_funcs = {
Alex Deucheraaa36a9762015-04-20 17:31:14 -04001536 .early_init = sdma_v3_0_early_init,
1537 .late_init = NULL,
1538 .sw_init = sdma_v3_0_sw_init,
1539 .sw_fini = sdma_v3_0_sw_fini,
1540 .hw_init = sdma_v3_0_hw_init,
1541 .hw_fini = sdma_v3_0_hw_fini,
1542 .suspend = sdma_v3_0_suspend,
1543 .resume = sdma_v3_0_resume,
1544 .is_idle = sdma_v3_0_is_idle,
1545 .wait_for_idle = sdma_v3_0_wait_for_idle,
1546 .soft_reset = sdma_v3_0_soft_reset,
1547 .print_status = sdma_v3_0_print_status,
1548 .set_clockgating_state = sdma_v3_0_set_clockgating_state,
1549 .set_powergating_state = sdma_v3_0_set_powergating_state,
1550};
1551
Alex Deucheraaa36a9762015-04-20 17:31:14 -04001552static const struct amdgpu_ring_funcs sdma_v3_0_ring_funcs = {
1553 .get_rptr = sdma_v3_0_ring_get_rptr,
1554 .get_wptr = sdma_v3_0_ring_get_wptr,
1555 .set_wptr = sdma_v3_0_ring_set_wptr,
1556 .parse_cs = NULL,
1557 .emit_ib = sdma_v3_0_ring_emit_ib,
1558 .emit_fence = sdma_v3_0_ring_emit_fence,
Alex Deucheraaa36a9762015-04-20 17:31:14 -04001559 .emit_vm_flush = sdma_v3_0_ring_emit_vm_flush,
Christian Königd2edb072015-05-11 14:10:34 +02001560 .emit_hdp_flush = sdma_v3_0_ring_emit_hdp_flush,
Alex Deucheraaa36a9762015-04-20 17:31:14 -04001561 .test_ring = sdma_v3_0_ring_test_ring,
1562 .test_ib = sdma_v3_0_ring_test_ib,
Jammy Zhouac01db32015-09-01 13:13:54 +08001563 .insert_nop = sdma_v3_0_ring_insert_nop,
Christian König9e5d53092016-01-31 12:20:55 +01001564 .pad_ib = sdma_v3_0_ring_pad_ib,
Alex Deucheraaa36a9762015-04-20 17:31:14 -04001565};
1566
1567static void sdma_v3_0_set_ring_funcs(struct amdgpu_device *adev)
1568{
Alex Deucherc113ea12015-10-08 16:30:37 -04001569 int i;
1570
1571 for (i = 0; i < adev->sdma.num_instances; i++)
1572 adev->sdma.instance[i].ring.funcs = &sdma_v3_0_ring_funcs;
Alex Deucheraaa36a9762015-04-20 17:31:14 -04001573}
1574
1575static const struct amdgpu_irq_src_funcs sdma_v3_0_trap_irq_funcs = {
1576 .set = sdma_v3_0_set_trap_irq_state,
1577 .process = sdma_v3_0_process_trap_irq,
1578};
1579
1580static const struct amdgpu_irq_src_funcs sdma_v3_0_illegal_inst_irq_funcs = {
1581 .process = sdma_v3_0_process_illegal_inst_irq,
1582};
1583
1584static void sdma_v3_0_set_irq_funcs(struct amdgpu_device *adev)
1585{
Alex Deucherc113ea12015-10-08 16:30:37 -04001586 adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
1587 adev->sdma.trap_irq.funcs = &sdma_v3_0_trap_irq_funcs;
1588 adev->sdma.illegal_inst_irq.funcs = &sdma_v3_0_illegal_inst_irq_funcs;
Alex Deucheraaa36a9762015-04-20 17:31:14 -04001589}
1590
1591/**
1592 * sdma_v3_0_emit_copy_buffer - copy buffer using the sDMA engine
1593 *
1594 * @ring: amdgpu_ring structure holding ring information
1595 * @src_offset: src GPU address
1596 * @dst_offset: dst GPU address
1597 * @byte_count: number of bytes to xfer
1598 *
1599 * Copy GPU buffers using the DMA engine (VI).
1600 * Used by the amdgpu ttm implementation to move pages if
1601 * registered as the asic copy callback.
1602 */
Chunming Zhouc7ae72c2015-08-25 17:23:45 +08001603static void sdma_v3_0_emit_copy_buffer(struct amdgpu_ib *ib,
Alex Deucheraaa36a9762015-04-20 17:31:14 -04001604 uint64_t src_offset,
1605 uint64_t dst_offset,
1606 uint32_t byte_count)
1607{
Chunming Zhouc7ae72c2015-08-25 17:23:45 +08001608 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
1609 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
1610 ib->ptr[ib->length_dw++] = byte_count;
1611 ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
1612 ib->ptr[ib->length_dw++] = lower_32_bits(src_offset);
1613 ib->ptr[ib->length_dw++] = upper_32_bits(src_offset);
1614 ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
1615 ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
Alex Deucheraaa36a9762015-04-20 17:31:14 -04001616}
1617
1618/**
1619 * sdma_v3_0_emit_fill_buffer - fill buffer using the sDMA engine
1620 *
1621 * @ring: amdgpu_ring structure holding ring information
1622 * @src_data: value to write to buffer
1623 * @dst_offset: dst GPU address
1624 * @byte_count: number of bytes to xfer
1625 *
1626 * Fill GPU buffers using the DMA engine (VI).
1627 */
Chunming Zhou6e7a3842015-08-27 13:46:09 +08001628static void sdma_v3_0_emit_fill_buffer(struct amdgpu_ib *ib,
Alex Deucheraaa36a9762015-04-20 17:31:14 -04001629 uint32_t src_data,
1630 uint64_t dst_offset,
1631 uint32_t byte_count)
1632{
Chunming Zhou6e7a3842015-08-27 13:46:09 +08001633 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_CONST_FILL);
1634 ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
1635 ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
1636 ib->ptr[ib->length_dw++] = src_data;
1637 ib->ptr[ib->length_dw++] = byte_count;
Alex Deucheraaa36a9762015-04-20 17:31:14 -04001638}
1639
1640static const struct amdgpu_buffer_funcs sdma_v3_0_buffer_funcs = {
1641 .copy_max_bytes = 0x1fffff,
1642 .copy_num_dw = 7,
1643 .emit_copy_buffer = sdma_v3_0_emit_copy_buffer,
1644
1645 .fill_max_bytes = 0x1fffff,
1646 .fill_num_dw = 5,
1647 .emit_fill_buffer = sdma_v3_0_emit_fill_buffer,
1648};
1649
1650static void sdma_v3_0_set_buffer_funcs(struct amdgpu_device *adev)
1651{
1652 if (adev->mman.buffer_funcs == NULL) {
1653 adev->mman.buffer_funcs = &sdma_v3_0_buffer_funcs;
Alex Deucherc113ea12015-10-08 16:30:37 -04001654 adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
Alex Deucheraaa36a9762015-04-20 17:31:14 -04001655 }
1656}
1657
1658static const struct amdgpu_vm_pte_funcs sdma_v3_0_vm_pte_funcs = {
1659 .copy_pte = sdma_v3_0_vm_copy_pte,
1660 .write_pte = sdma_v3_0_vm_write_pte,
1661 .set_pte_pde = sdma_v3_0_vm_set_pte_pde,
Alex Deucheraaa36a9762015-04-20 17:31:14 -04001662};
1663
1664static void sdma_v3_0_set_vm_pte_funcs(struct amdgpu_device *adev)
1665{
Christian König2d55e452016-02-08 17:37:38 +01001666 unsigned i;
1667
Alex Deucheraaa36a9762015-04-20 17:31:14 -04001668 if (adev->vm_manager.vm_pte_funcs == NULL) {
1669 adev->vm_manager.vm_pte_funcs = &sdma_v3_0_vm_pte_funcs;
Christian König2d55e452016-02-08 17:37:38 +01001670 for (i = 0; i < adev->sdma.num_instances; i++)
1671 adev->vm_manager.vm_pte_rings[i] =
1672 &adev->sdma.instance[i].ring;
1673
1674 adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances;
Alex Deucheraaa36a9762015-04-20 17:31:14 -04001675 }
1676}