blob: a7d3de73be040a56c337d41da714bdf2156f2e40 [file] [log] [blame]
Alex Deucher0af62b02011-01-06 21:19:31 -05001/*
2 * Copyright 2010 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
24#include <linux/firmware.h>
25#include <linux/platform_device.h>
26#include <linux/slab.h>
Paul Gortmakere0cd3602011-08-30 11:04:30 -040027#include <linux/module.h>
David Howells760285e2012-10-02 18:01:07 +010028#include <drm/drmP.h>
Alex Deucher0af62b02011-01-06 21:19:31 -050029#include "radeon.h"
30#include "radeon_asic.h"
David Howells760285e2012-10-02 18:01:07 +010031#include <drm/radeon_drm.h>
Alex Deucher0af62b02011-01-06 21:19:31 -050032#include "nid.h"
33#include "atom.h"
34#include "ni_reg.h"
Alex Deucher0c88a022011-03-02 20:07:31 -050035#include "cayman_blit_shaders.h"
Alex Deucher0af62b02011-01-06 21:19:31 -050036
Alex Deucher168757e2013-01-18 19:17:22 -050037extern bool evergreen_is_display_hung(struct radeon_device *rdev);
Alex Deucher187e3592013-01-18 14:51:38 -050038extern void evergreen_print_gpu_status_regs(struct radeon_device *rdev);
Alex Deucherb9952a82011-03-02 20:07:33 -050039extern void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save);
40extern void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save);
41extern int evergreen_mc_wait_for_idle(struct radeon_device *rdev);
Alex Deucher755d8192011-03-02 20:07:34 -050042extern void evergreen_mc_program(struct radeon_device *rdev);
43extern void evergreen_irq_suspend(struct radeon_device *rdev);
44extern int evergreen_mc_init(struct radeon_device *rdev);
Alex Deucherd054ac12011-09-01 17:46:15 +000045extern void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev);
Ilija Hadzicb07759b2011-09-20 10:22:58 -040046extern void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
Alex Deucherc420c742012-03-20 17:18:39 -040047extern void si_rlc_fini(struct radeon_device *rdev);
48extern int si_rlc_init(struct radeon_device *rdev);
Alex Deucherb9952a82011-03-02 20:07:33 -050049
Alex Deucher0af62b02011-01-06 21:19:31 -050050#define EVERGREEN_PFP_UCODE_SIZE 1120
51#define EVERGREEN_PM4_UCODE_SIZE 1376
52#define EVERGREEN_RLC_UCODE_SIZE 768
53#define BTC_MC_UCODE_SIZE 6024
54
Alex Deucher9b8253c2011-03-02 20:07:28 -050055#define CAYMAN_PFP_UCODE_SIZE 2176
56#define CAYMAN_PM4_UCODE_SIZE 2176
57#define CAYMAN_RLC_UCODE_SIZE 1024
58#define CAYMAN_MC_UCODE_SIZE 6037
59
Alex Deucherc420c742012-03-20 17:18:39 -040060#define ARUBA_RLC_UCODE_SIZE 1536
61
Alex Deucher0af62b02011-01-06 21:19:31 -050062/* Firmware Names */
63MODULE_FIRMWARE("radeon/BARTS_pfp.bin");
64MODULE_FIRMWARE("radeon/BARTS_me.bin");
65MODULE_FIRMWARE("radeon/BARTS_mc.bin");
66MODULE_FIRMWARE("radeon/BTC_rlc.bin");
67MODULE_FIRMWARE("radeon/TURKS_pfp.bin");
68MODULE_FIRMWARE("radeon/TURKS_me.bin");
69MODULE_FIRMWARE("radeon/TURKS_mc.bin");
70MODULE_FIRMWARE("radeon/CAICOS_pfp.bin");
71MODULE_FIRMWARE("radeon/CAICOS_me.bin");
72MODULE_FIRMWARE("radeon/CAICOS_mc.bin");
Alex Deucher9b8253c2011-03-02 20:07:28 -050073MODULE_FIRMWARE("radeon/CAYMAN_pfp.bin");
74MODULE_FIRMWARE("radeon/CAYMAN_me.bin");
75MODULE_FIRMWARE("radeon/CAYMAN_mc.bin");
76MODULE_FIRMWARE("radeon/CAYMAN_rlc.bin");
Alex Deucherc420c742012-03-20 17:18:39 -040077MODULE_FIRMWARE("radeon/ARUBA_pfp.bin");
78MODULE_FIRMWARE("radeon/ARUBA_me.bin");
79MODULE_FIRMWARE("radeon/ARUBA_rlc.bin");
Alex Deucher0af62b02011-01-06 21:19:31 -050080
81#define BTC_IO_MC_REGS_SIZE 29
82
83static const u32 barts_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = {
84 {0x00000077, 0xff010100},
85 {0x00000078, 0x00000000},
86 {0x00000079, 0x00001434},
87 {0x0000007a, 0xcc08ec08},
88 {0x0000007b, 0x00040000},
89 {0x0000007c, 0x000080c0},
90 {0x0000007d, 0x09000000},
91 {0x0000007e, 0x00210404},
92 {0x00000081, 0x08a8e800},
93 {0x00000082, 0x00030444},
94 {0x00000083, 0x00000000},
95 {0x00000085, 0x00000001},
96 {0x00000086, 0x00000002},
97 {0x00000087, 0x48490000},
98 {0x00000088, 0x20244647},
99 {0x00000089, 0x00000005},
100 {0x0000008b, 0x66030000},
101 {0x0000008c, 0x00006603},
102 {0x0000008d, 0x00000100},
103 {0x0000008f, 0x00001c0a},
104 {0x00000090, 0xff000001},
105 {0x00000094, 0x00101101},
106 {0x00000095, 0x00000fff},
107 {0x00000096, 0x00116fff},
108 {0x00000097, 0x60010000},
109 {0x00000098, 0x10010000},
110 {0x00000099, 0x00006000},
111 {0x0000009a, 0x00001000},
112 {0x0000009f, 0x00946a00}
113};
114
115static const u32 turks_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = {
116 {0x00000077, 0xff010100},
117 {0x00000078, 0x00000000},
118 {0x00000079, 0x00001434},
119 {0x0000007a, 0xcc08ec08},
120 {0x0000007b, 0x00040000},
121 {0x0000007c, 0x000080c0},
122 {0x0000007d, 0x09000000},
123 {0x0000007e, 0x00210404},
124 {0x00000081, 0x08a8e800},
125 {0x00000082, 0x00030444},
126 {0x00000083, 0x00000000},
127 {0x00000085, 0x00000001},
128 {0x00000086, 0x00000002},
129 {0x00000087, 0x48490000},
130 {0x00000088, 0x20244647},
131 {0x00000089, 0x00000005},
132 {0x0000008b, 0x66030000},
133 {0x0000008c, 0x00006603},
134 {0x0000008d, 0x00000100},
135 {0x0000008f, 0x00001c0a},
136 {0x00000090, 0xff000001},
137 {0x00000094, 0x00101101},
138 {0x00000095, 0x00000fff},
139 {0x00000096, 0x00116fff},
140 {0x00000097, 0x60010000},
141 {0x00000098, 0x10010000},
142 {0x00000099, 0x00006000},
143 {0x0000009a, 0x00001000},
144 {0x0000009f, 0x00936a00}
145};
146
147static const u32 caicos_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = {
148 {0x00000077, 0xff010100},
149 {0x00000078, 0x00000000},
150 {0x00000079, 0x00001434},
151 {0x0000007a, 0xcc08ec08},
152 {0x0000007b, 0x00040000},
153 {0x0000007c, 0x000080c0},
154 {0x0000007d, 0x09000000},
155 {0x0000007e, 0x00210404},
156 {0x00000081, 0x08a8e800},
157 {0x00000082, 0x00030444},
158 {0x00000083, 0x00000000},
159 {0x00000085, 0x00000001},
160 {0x00000086, 0x00000002},
161 {0x00000087, 0x48490000},
162 {0x00000088, 0x20244647},
163 {0x00000089, 0x00000005},
164 {0x0000008b, 0x66030000},
165 {0x0000008c, 0x00006603},
166 {0x0000008d, 0x00000100},
167 {0x0000008f, 0x00001c0a},
168 {0x00000090, 0xff000001},
169 {0x00000094, 0x00101101},
170 {0x00000095, 0x00000fff},
171 {0x00000096, 0x00116fff},
172 {0x00000097, 0x60010000},
173 {0x00000098, 0x10010000},
174 {0x00000099, 0x00006000},
175 {0x0000009a, 0x00001000},
176 {0x0000009f, 0x00916a00}
177};
178
Alex Deucher9b8253c2011-03-02 20:07:28 -0500179static const u32 cayman_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = {
180 {0x00000077, 0xff010100},
181 {0x00000078, 0x00000000},
182 {0x00000079, 0x00001434},
183 {0x0000007a, 0xcc08ec08},
184 {0x0000007b, 0x00040000},
185 {0x0000007c, 0x000080c0},
186 {0x0000007d, 0x09000000},
187 {0x0000007e, 0x00210404},
188 {0x00000081, 0x08a8e800},
189 {0x00000082, 0x00030444},
190 {0x00000083, 0x00000000},
191 {0x00000085, 0x00000001},
192 {0x00000086, 0x00000002},
193 {0x00000087, 0x48490000},
194 {0x00000088, 0x20244647},
195 {0x00000089, 0x00000005},
196 {0x0000008b, 0x66030000},
197 {0x0000008c, 0x00006603},
198 {0x0000008d, 0x00000100},
199 {0x0000008f, 0x00001c0a},
200 {0x00000090, 0xff000001},
201 {0x00000094, 0x00101101},
202 {0x00000095, 0x00000fff},
203 {0x00000096, 0x00116fff},
204 {0x00000097, 0x60010000},
205 {0x00000098, 0x10010000},
206 {0x00000099, 0x00006000},
207 {0x0000009a, 0x00001000},
208 {0x0000009f, 0x00976b00}
209};
210
Alex Deucher755d8192011-03-02 20:07:34 -0500211int ni_mc_load_microcode(struct radeon_device *rdev)
Alex Deucher0af62b02011-01-06 21:19:31 -0500212{
213 const __be32 *fw_data;
214 u32 mem_type, running, blackout = 0;
215 u32 *io_mc_regs;
Alex Deucher9b8253c2011-03-02 20:07:28 -0500216 int i, ucode_size, regs_size;
Alex Deucher0af62b02011-01-06 21:19:31 -0500217
218 if (!rdev->mc_fw)
219 return -EINVAL;
220
221 switch (rdev->family) {
222 case CHIP_BARTS:
223 io_mc_regs = (u32 *)&barts_io_mc_regs;
Alex Deucher9b8253c2011-03-02 20:07:28 -0500224 ucode_size = BTC_MC_UCODE_SIZE;
225 regs_size = BTC_IO_MC_REGS_SIZE;
Alex Deucher0af62b02011-01-06 21:19:31 -0500226 break;
227 case CHIP_TURKS:
228 io_mc_regs = (u32 *)&turks_io_mc_regs;
Alex Deucher9b8253c2011-03-02 20:07:28 -0500229 ucode_size = BTC_MC_UCODE_SIZE;
230 regs_size = BTC_IO_MC_REGS_SIZE;
Alex Deucher0af62b02011-01-06 21:19:31 -0500231 break;
232 case CHIP_CAICOS:
233 default:
234 io_mc_regs = (u32 *)&caicos_io_mc_regs;
Alex Deucher9b8253c2011-03-02 20:07:28 -0500235 ucode_size = BTC_MC_UCODE_SIZE;
236 regs_size = BTC_IO_MC_REGS_SIZE;
237 break;
238 case CHIP_CAYMAN:
239 io_mc_regs = (u32 *)&cayman_io_mc_regs;
240 ucode_size = CAYMAN_MC_UCODE_SIZE;
241 regs_size = BTC_IO_MC_REGS_SIZE;
Alex Deucher0af62b02011-01-06 21:19:31 -0500242 break;
243 }
244
245 mem_type = (RREG32(MC_SEQ_MISC0) & MC_SEQ_MISC0_GDDR5_MASK) >> MC_SEQ_MISC0_GDDR5_SHIFT;
246 running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK;
247
248 if ((mem_type == MC_SEQ_MISC0_GDDR5_VALUE) && (running == 0)) {
249 if (running) {
250 blackout = RREG32(MC_SHARED_BLACKOUT_CNTL);
251 WREG32(MC_SHARED_BLACKOUT_CNTL, 1);
252 }
253
254 /* reset the engine and set to writable */
255 WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
256 WREG32(MC_SEQ_SUP_CNTL, 0x00000010);
257
258 /* load mc io regs */
Alex Deucher9b8253c2011-03-02 20:07:28 -0500259 for (i = 0; i < regs_size; i++) {
Alex Deucher0af62b02011-01-06 21:19:31 -0500260 WREG32(MC_SEQ_IO_DEBUG_INDEX, io_mc_regs[(i << 1)]);
261 WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1]);
262 }
263 /* load the MC ucode */
264 fw_data = (const __be32 *)rdev->mc_fw->data;
Alex Deucher9b8253c2011-03-02 20:07:28 -0500265 for (i = 0; i < ucode_size; i++)
Alex Deucher0af62b02011-01-06 21:19:31 -0500266 WREG32(MC_SEQ_SUP_PGM, be32_to_cpup(fw_data++));
267
268 /* put the engine back into the active state */
269 WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
270 WREG32(MC_SEQ_SUP_CNTL, 0x00000004);
271 WREG32(MC_SEQ_SUP_CNTL, 0x00000001);
272
273 /* wait for training to complete */
Alex Deucher0e2c9782011-11-02 18:08:25 -0400274 for (i = 0; i < rdev->usec_timeout; i++) {
275 if (RREG32(MC_IO_PAD_CNTL_D0) & MEM_FALL_OUT_CMD)
276 break;
277 udelay(1);
278 }
Alex Deucher0af62b02011-01-06 21:19:31 -0500279
280 if (running)
281 WREG32(MC_SHARED_BLACKOUT_CNTL, blackout);
282 }
283
284 return 0;
285}
286
287int ni_init_microcode(struct radeon_device *rdev)
288{
289 struct platform_device *pdev;
290 const char *chip_name;
291 const char *rlc_chip_name;
292 size_t pfp_req_size, me_req_size, rlc_req_size, mc_req_size;
293 char fw_name[30];
294 int err;
295
296 DRM_DEBUG("\n");
297
298 pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0);
299 err = IS_ERR(pdev);
300 if (err) {
301 printk(KERN_ERR "radeon_cp: Failed to register firmware\n");
302 return -EINVAL;
303 }
304
305 switch (rdev->family) {
306 case CHIP_BARTS:
307 chip_name = "BARTS";
308 rlc_chip_name = "BTC";
Alex Deucher9b8253c2011-03-02 20:07:28 -0500309 pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
310 me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
311 rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
312 mc_req_size = BTC_MC_UCODE_SIZE * 4;
Alex Deucher0af62b02011-01-06 21:19:31 -0500313 break;
314 case CHIP_TURKS:
315 chip_name = "TURKS";
316 rlc_chip_name = "BTC";
Alex Deucher9b8253c2011-03-02 20:07:28 -0500317 pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
318 me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
319 rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
320 mc_req_size = BTC_MC_UCODE_SIZE * 4;
Alex Deucher0af62b02011-01-06 21:19:31 -0500321 break;
322 case CHIP_CAICOS:
323 chip_name = "CAICOS";
324 rlc_chip_name = "BTC";
Alex Deucher9b8253c2011-03-02 20:07:28 -0500325 pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
326 me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
327 rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
328 mc_req_size = BTC_MC_UCODE_SIZE * 4;
329 break;
330 case CHIP_CAYMAN:
331 chip_name = "CAYMAN";
332 rlc_chip_name = "CAYMAN";
333 pfp_req_size = CAYMAN_PFP_UCODE_SIZE * 4;
334 me_req_size = CAYMAN_PM4_UCODE_SIZE * 4;
335 rlc_req_size = CAYMAN_RLC_UCODE_SIZE * 4;
336 mc_req_size = CAYMAN_MC_UCODE_SIZE * 4;
Alex Deucher0af62b02011-01-06 21:19:31 -0500337 break;
Alex Deucherc420c742012-03-20 17:18:39 -0400338 case CHIP_ARUBA:
339 chip_name = "ARUBA";
340 rlc_chip_name = "ARUBA";
341 /* pfp/me same size as CAYMAN */
342 pfp_req_size = CAYMAN_PFP_UCODE_SIZE * 4;
343 me_req_size = CAYMAN_PM4_UCODE_SIZE * 4;
344 rlc_req_size = ARUBA_RLC_UCODE_SIZE * 4;
345 mc_req_size = 0;
346 break;
Alex Deucher0af62b02011-01-06 21:19:31 -0500347 default: BUG();
348 }
349
Alex Deucher0af62b02011-01-06 21:19:31 -0500350 DRM_INFO("Loading %s Microcode\n", chip_name);
351
352 snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
353 err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev);
354 if (err)
355 goto out;
356 if (rdev->pfp_fw->size != pfp_req_size) {
357 printk(KERN_ERR
358 "ni_cp: Bogus length %zu in firmware \"%s\"\n",
359 rdev->pfp_fw->size, fw_name);
360 err = -EINVAL;
361 goto out;
362 }
363
364 snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
365 err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev);
366 if (err)
367 goto out;
368 if (rdev->me_fw->size != me_req_size) {
369 printk(KERN_ERR
370 "ni_cp: Bogus length %zu in firmware \"%s\"\n",
371 rdev->me_fw->size, fw_name);
372 err = -EINVAL;
373 }
374
375 snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name);
376 err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev);
377 if (err)
378 goto out;
379 if (rdev->rlc_fw->size != rlc_req_size) {
380 printk(KERN_ERR
381 "ni_rlc: Bogus length %zu in firmware \"%s\"\n",
382 rdev->rlc_fw->size, fw_name);
383 err = -EINVAL;
384 }
385
Alex Deucherc420c742012-03-20 17:18:39 -0400386 /* no MC ucode on TN */
387 if (!(rdev->flags & RADEON_IS_IGP)) {
388 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
389 err = request_firmware(&rdev->mc_fw, fw_name, &pdev->dev);
390 if (err)
391 goto out;
392 if (rdev->mc_fw->size != mc_req_size) {
393 printk(KERN_ERR
394 "ni_mc: Bogus length %zu in firmware \"%s\"\n",
395 rdev->mc_fw->size, fw_name);
396 err = -EINVAL;
397 }
Alex Deucher0af62b02011-01-06 21:19:31 -0500398 }
399out:
400 platform_device_unregister(pdev);
401
402 if (err) {
403 if (err != -EINVAL)
404 printk(KERN_ERR
405 "ni_cp: Failed to load firmware \"%s\"\n",
406 fw_name);
407 release_firmware(rdev->pfp_fw);
408 rdev->pfp_fw = NULL;
409 release_firmware(rdev->me_fw);
410 rdev->me_fw = NULL;
411 release_firmware(rdev->rlc_fw);
412 rdev->rlc_fw = NULL;
413 release_firmware(rdev->mc_fw);
414 rdev->mc_fw = NULL;
415 }
416 return err;
417}
418
Alex Deucherfecf1d02011-03-02 20:07:29 -0500419/*
420 * Core functions
421 */
Alex Deucherfecf1d02011-03-02 20:07:29 -0500422static void cayman_gpu_init(struct radeon_device *rdev)
423{
Alex Deucherfecf1d02011-03-02 20:07:29 -0500424 u32 gb_addr_config = 0;
425 u32 mc_shared_chmap, mc_arb_ramcfg;
Alex Deucherfecf1d02011-03-02 20:07:29 -0500426 u32 cgts_tcc_disable;
427 u32 sx_debug_1;
428 u32 smx_dc_ctl0;
Alex Deucherfecf1d02011-03-02 20:07:29 -0500429 u32 cgts_sm_ctrl_reg;
430 u32 hdp_host_path_cntl;
431 u32 tmp;
Alex Deucher416a2bd2012-05-31 19:00:25 -0400432 u32 disabled_rb_mask;
Alex Deucherfecf1d02011-03-02 20:07:29 -0500433 int i, j;
434
435 switch (rdev->family) {
436 case CHIP_CAYMAN:
Alex Deucherfecf1d02011-03-02 20:07:29 -0500437 rdev->config.cayman.max_shader_engines = 2;
438 rdev->config.cayman.max_pipes_per_simd = 4;
439 rdev->config.cayman.max_tile_pipes = 8;
440 rdev->config.cayman.max_simds_per_se = 12;
441 rdev->config.cayman.max_backends_per_se = 4;
442 rdev->config.cayman.max_texture_channel_caches = 8;
443 rdev->config.cayman.max_gprs = 256;
444 rdev->config.cayman.max_threads = 256;
445 rdev->config.cayman.max_gs_threads = 32;
446 rdev->config.cayman.max_stack_entries = 512;
447 rdev->config.cayman.sx_num_of_sets = 8;
448 rdev->config.cayman.sx_max_export_size = 256;
449 rdev->config.cayman.sx_max_export_pos_size = 64;
450 rdev->config.cayman.sx_max_export_smx_size = 192;
451 rdev->config.cayman.max_hw_contexts = 8;
452 rdev->config.cayman.sq_num_cf_insts = 2;
453
454 rdev->config.cayman.sc_prim_fifo_size = 0x100;
455 rdev->config.cayman.sc_hiz_tile_fifo_size = 0x30;
456 rdev->config.cayman.sc_earlyz_tile_fifo_size = 0x130;
Alex Deucher416a2bd2012-05-31 19:00:25 -0400457 gb_addr_config = CAYMAN_GB_ADDR_CONFIG_GOLDEN;
Alex Deucherfecf1d02011-03-02 20:07:29 -0500458 break;
Alex Deucher7b76e472012-03-20 17:18:36 -0400459 case CHIP_ARUBA:
460 default:
461 rdev->config.cayman.max_shader_engines = 1;
462 rdev->config.cayman.max_pipes_per_simd = 4;
463 rdev->config.cayman.max_tile_pipes = 2;
464 if ((rdev->pdev->device == 0x9900) ||
Alex Deucherd430f7d2012-06-05 09:50:28 -0400465 (rdev->pdev->device == 0x9901) ||
466 (rdev->pdev->device == 0x9905) ||
467 (rdev->pdev->device == 0x9906) ||
468 (rdev->pdev->device == 0x9907) ||
469 (rdev->pdev->device == 0x9908) ||
470 (rdev->pdev->device == 0x9909) ||
471 (rdev->pdev->device == 0x9910) ||
472 (rdev->pdev->device == 0x9917)) {
Alex Deucher7b76e472012-03-20 17:18:36 -0400473 rdev->config.cayman.max_simds_per_se = 6;
474 rdev->config.cayman.max_backends_per_se = 2;
475 } else if ((rdev->pdev->device == 0x9903) ||
Alex Deucherd430f7d2012-06-05 09:50:28 -0400476 (rdev->pdev->device == 0x9904) ||
477 (rdev->pdev->device == 0x990A) ||
478 (rdev->pdev->device == 0x9913) ||
479 (rdev->pdev->device == 0x9918)) {
Alex Deucher7b76e472012-03-20 17:18:36 -0400480 rdev->config.cayman.max_simds_per_se = 4;
481 rdev->config.cayman.max_backends_per_se = 2;
Alex Deucherd430f7d2012-06-05 09:50:28 -0400482 } else if ((rdev->pdev->device == 0x9919) ||
483 (rdev->pdev->device == 0x9990) ||
484 (rdev->pdev->device == 0x9991) ||
485 (rdev->pdev->device == 0x9994) ||
486 (rdev->pdev->device == 0x99A0)) {
Alex Deucher7b76e472012-03-20 17:18:36 -0400487 rdev->config.cayman.max_simds_per_se = 3;
488 rdev->config.cayman.max_backends_per_se = 1;
489 } else {
490 rdev->config.cayman.max_simds_per_se = 2;
491 rdev->config.cayman.max_backends_per_se = 1;
492 }
493 rdev->config.cayman.max_texture_channel_caches = 2;
494 rdev->config.cayman.max_gprs = 256;
495 rdev->config.cayman.max_threads = 256;
496 rdev->config.cayman.max_gs_threads = 32;
497 rdev->config.cayman.max_stack_entries = 512;
498 rdev->config.cayman.sx_num_of_sets = 8;
499 rdev->config.cayman.sx_max_export_size = 256;
500 rdev->config.cayman.sx_max_export_pos_size = 64;
501 rdev->config.cayman.sx_max_export_smx_size = 192;
502 rdev->config.cayman.max_hw_contexts = 8;
503 rdev->config.cayman.sq_num_cf_insts = 2;
504
505 rdev->config.cayman.sc_prim_fifo_size = 0x40;
506 rdev->config.cayman.sc_hiz_tile_fifo_size = 0x30;
507 rdev->config.cayman.sc_earlyz_tile_fifo_size = 0x130;
Alex Deucher416a2bd2012-05-31 19:00:25 -0400508 gb_addr_config = ARUBA_GB_ADDR_CONFIG_GOLDEN;
Alex Deucher7b76e472012-03-20 17:18:36 -0400509 break;
Alex Deucherfecf1d02011-03-02 20:07:29 -0500510 }
511
512 /* Initialize HDP */
513 for (i = 0, j = 0; i < 32; i++, j += 0x18) {
514 WREG32((0x2c14 + j), 0x00000000);
515 WREG32((0x2c18 + j), 0x00000000);
516 WREG32((0x2c1c + j), 0x00000000);
517 WREG32((0x2c20 + j), 0x00000000);
518 WREG32((0x2c24 + j), 0x00000000);
519 }
520
521 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
522
Alex Deucherd054ac12011-09-01 17:46:15 +0000523 evergreen_fix_pci_max_read_req_size(rdev);
524
Alex Deucherfecf1d02011-03-02 20:07:29 -0500525 mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
526 mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
527
Alex Deucherfecf1d02011-03-02 20:07:29 -0500528 tmp = (mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT;
529 rdev->config.cayman.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
530 if (rdev->config.cayman.mem_row_size_in_kb > 4)
531 rdev->config.cayman.mem_row_size_in_kb = 4;
532 /* XXX use MC settings? */
533 rdev->config.cayman.shader_engine_tile_size = 32;
534 rdev->config.cayman.num_gpus = 1;
535 rdev->config.cayman.multi_gpu_tile_size = 64;
536
Alex Deucherfecf1d02011-03-02 20:07:29 -0500537 tmp = (gb_addr_config & NUM_PIPES_MASK) >> NUM_PIPES_SHIFT;
538 rdev->config.cayman.num_tile_pipes = (1 << tmp);
539 tmp = (gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT;
540 rdev->config.cayman.mem_max_burst_length_bytes = (tmp + 1) * 256;
541 tmp = (gb_addr_config & NUM_SHADER_ENGINES_MASK) >> NUM_SHADER_ENGINES_SHIFT;
542 rdev->config.cayman.num_shader_engines = tmp + 1;
543 tmp = (gb_addr_config & NUM_GPUS_MASK) >> NUM_GPUS_SHIFT;
544 rdev->config.cayman.num_gpus = tmp + 1;
545 tmp = (gb_addr_config & MULTI_GPU_TILE_SIZE_MASK) >> MULTI_GPU_TILE_SIZE_SHIFT;
546 rdev->config.cayman.multi_gpu_tile_size = 1 << tmp;
547 tmp = (gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT;
548 rdev->config.cayman.mem_row_size_in_kb = 1 << tmp;
549
Alex Deucher416a2bd2012-05-31 19:00:25 -0400550
Alex Deucherfecf1d02011-03-02 20:07:29 -0500551 /* setup tiling info dword. gb_addr_config is not adequate since it does
552 * not have bank info, so create a custom tiling dword.
553 * bits 3:0 num_pipes
554 * bits 7:4 num_banks
555 * bits 11:8 group_size
556 * bits 15:12 row_size
557 */
558 rdev->config.cayman.tile_config = 0;
559 switch (rdev->config.cayman.num_tile_pipes) {
560 case 1:
561 default:
562 rdev->config.cayman.tile_config |= (0 << 0);
563 break;
564 case 2:
565 rdev->config.cayman.tile_config |= (1 << 0);
566 break;
567 case 4:
568 rdev->config.cayman.tile_config |= (2 << 0);
569 break;
570 case 8:
571 rdev->config.cayman.tile_config |= (3 << 0);
572 break;
573 }
Alex Deucher7b76e472012-03-20 17:18:36 -0400574
575 /* num banks is 8 on all fusion asics. 0 = 4, 1 = 8, 2 = 16 */
576 if (rdev->flags & RADEON_IS_IGP)
Alex Deucher1f73cca2012-05-24 22:55:15 -0400577 rdev->config.cayman.tile_config |= 1 << 4;
Alex Deucher29d65402012-05-31 18:53:36 -0400578 else {
Alex Deucher5b23c902012-07-31 11:05:11 -0400579 switch ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) {
580 case 0: /* four banks */
Alex Deucher29d65402012-05-31 18:53:36 -0400581 rdev->config.cayman.tile_config |= 0 << 4;
Alex Deucher5b23c902012-07-31 11:05:11 -0400582 break;
583 case 1: /* eight banks */
584 rdev->config.cayman.tile_config |= 1 << 4;
585 break;
586 case 2: /* sixteen banks */
587 default:
588 rdev->config.cayman.tile_config |= 2 << 4;
589 break;
590 }
Alex Deucher29d65402012-05-31 18:53:36 -0400591 }
Alex Deucherfecf1d02011-03-02 20:07:29 -0500592 rdev->config.cayman.tile_config |=
Dave Airliecde50832011-05-19 14:14:41 +1000593 ((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8;
Alex Deucherfecf1d02011-03-02 20:07:29 -0500594 rdev->config.cayman.tile_config |=
595 ((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12;
596
Alex Deucher416a2bd2012-05-31 19:00:25 -0400597 tmp = 0;
598 for (i = (rdev->config.cayman.max_shader_engines - 1); i >= 0; i--) {
599 u32 rb_disable_bitmap;
600
601 WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
602 WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
603 rb_disable_bitmap = (RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000) >> 16;
604 tmp <<= 4;
605 tmp |= rb_disable_bitmap;
606 }
607 /* enabled rb are just the one not disabled :) */
608 disabled_rb_mask = tmp;
609
610 WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
611 WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
612
Alex Deucherfecf1d02011-03-02 20:07:29 -0500613 WREG32(GB_ADDR_CONFIG, gb_addr_config);
614 WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
615 WREG32(HDP_ADDR_CONFIG, gb_addr_config);
Alex Deucherf60cbd12012-12-04 15:27:33 -0500616 WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config);
617 WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config);
Alex Deucherfecf1d02011-03-02 20:07:29 -0500618
Alex Deucher8f612b22013-03-11 19:28:39 -0400619 if ((rdev->config.cayman.max_backends_per_se == 1) &&
620 (rdev->flags & RADEON_IS_IGP)) {
621 if ((disabled_rb_mask & 3) == 1) {
622 /* RB0 disabled, RB1 enabled */
623 tmp = 0x11111111;
624 } else {
625 /* RB1 disabled, RB0 enabled */
626 tmp = 0x00000000;
627 }
628 } else {
629 tmp = gb_addr_config & NUM_PIPES_MASK;
630 tmp = r6xx_remap_render_backend(rdev, tmp,
631 rdev->config.cayman.max_backends_per_se *
632 rdev->config.cayman.max_shader_engines,
633 CAYMAN_MAX_BACKENDS, disabled_rb_mask);
634 }
Alex Deucher416a2bd2012-05-31 19:00:25 -0400635 WREG32(GB_BACKEND_MAP, tmp);
Alex Deucherfecf1d02011-03-02 20:07:29 -0500636
Alex Deucher416a2bd2012-05-31 19:00:25 -0400637 cgts_tcc_disable = 0xffff0000;
638 for (i = 0; i < rdev->config.cayman.max_texture_channel_caches; i++)
639 cgts_tcc_disable &= ~(1 << (16 + i));
Alex Deucherfecf1d02011-03-02 20:07:29 -0500640 WREG32(CGTS_TCC_DISABLE, cgts_tcc_disable);
641 WREG32(CGTS_SYS_TCC_DISABLE, cgts_tcc_disable);
Alex Deucherfecf1d02011-03-02 20:07:29 -0500642 WREG32(CGTS_USER_SYS_TCC_DISABLE, cgts_tcc_disable);
643 WREG32(CGTS_USER_TCC_DISABLE, cgts_tcc_disable);
644
645 /* reprogram the shader complex */
646 cgts_sm_ctrl_reg = RREG32(CGTS_SM_CTRL_REG);
647 for (i = 0; i < 16; i++)
648 WREG32(CGTS_SM_CTRL_REG, OVERRIDE);
649 WREG32(CGTS_SM_CTRL_REG, cgts_sm_ctrl_reg);
650
651 /* set HW defaults for 3D engine */
652 WREG32(CP_MEQ_THRESHOLDS, MEQ1_START(0x30) | MEQ2_START(0x60));
653
654 sx_debug_1 = RREG32(SX_DEBUG_1);
655 sx_debug_1 |= ENABLE_NEW_SMX_ADDRESS;
656 WREG32(SX_DEBUG_1, sx_debug_1);
657
658 smx_dc_ctl0 = RREG32(SMX_DC_CTL0);
659 smx_dc_ctl0 &= ~NUMBER_OF_SETS(0x1ff);
Dave Airlie285e0422011-05-09 14:54:33 +1000660 smx_dc_ctl0 |= NUMBER_OF_SETS(rdev->config.cayman.sx_num_of_sets);
Alex Deucherfecf1d02011-03-02 20:07:29 -0500661 WREG32(SMX_DC_CTL0, smx_dc_ctl0);
662
663 WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4) | CRC_SIMD_ID_WADDR_DISABLE);
664
665 /* need to be explicitly zero-ed */
666 WREG32(VGT_OFFCHIP_LDS_BASE, 0);
667 WREG32(SQ_LSTMP_RING_BASE, 0);
668 WREG32(SQ_HSTMP_RING_BASE, 0);
669 WREG32(SQ_ESTMP_RING_BASE, 0);
670 WREG32(SQ_GSTMP_RING_BASE, 0);
671 WREG32(SQ_VSTMP_RING_BASE, 0);
672 WREG32(SQ_PSTMP_RING_BASE, 0);
673
674 WREG32(TA_CNTL_AUX, DISABLE_CUBE_ANISO);
675
Dave Airlie285e0422011-05-09 14:54:33 +1000676 WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.cayman.sx_max_export_size / 4) - 1) |
677 POSITION_BUFFER_SIZE((rdev->config.cayman.sx_max_export_pos_size / 4) - 1) |
678 SMX_BUFFER_SIZE((rdev->config.cayman.sx_max_export_smx_size / 4) - 1)));
Alex Deucherfecf1d02011-03-02 20:07:29 -0500679
Dave Airlie285e0422011-05-09 14:54:33 +1000680 WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.cayman.sc_prim_fifo_size) |
681 SC_HIZ_TILE_FIFO_SIZE(rdev->config.cayman.sc_hiz_tile_fifo_size) |
682 SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.cayman.sc_earlyz_tile_fifo_size)));
Alex Deucherfecf1d02011-03-02 20:07:29 -0500683
684
685 WREG32(VGT_NUM_INSTANCES, 1);
686
687 WREG32(CP_PERFMON_CNTL, 0);
688
Dave Airlie285e0422011-05-09 14:54:33 +1000689 WREG32(SQ_MS_FIFO_SIZES, (CACHE_FIFO_SIZE(16 * rdev->config.cayman.sq_num_cf_insts) |
Alex Deucherfecf1d02011-03-02 20:07:29 -0500690 FETCH_FIFO_HIWATER(0x4) |
691 DONE_FIFO_HIWATER(0xe0) |
692 ALU_UPDATE_FIFO_HIWATER(0x8)));
693
694 WREG32(SQ_GPR_RESOURCE_MGMT_1, NUM_CLAUSE_TEMP_GPRS(4));
695 WREG32(SQ_CONFIG, (VC_ENABLE |
696 EXPORT_SRC_C |
697 GFX_PRIO(0) |
698 CS1_PRIO(0) |
699 CS2_PRIO(1)));
700 WREG32(SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, DYN_GPR_ENABLE);
701
702 WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) |
703 FORCE_EOV_MAX_REZ_CNT(255)));
704
705 WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC) |
706 AUTO_INVLD_EN(ES_AND_GS_AUTO));
707
708 WREG32(VGT_GS_VERTEX_REUSE, 16);
709 WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
710
711 WREG32(CB_PERF_CTR0_SEL_0, 0);
712 WREG32(CB_PERF_CTR0_SEL_1, 0);
713 WREG32(CB_PERF_CTR1_SEL_0, 0);
714 WREG32(CB_PERF_CTR1_SEL_1, 0);
715 WREG32(CB_PERF_CTR2_SEL_0, 0);
716 WREG32(CB_PERF_CTR2_SEL_1, 0);
717 WREG32(CB_PERF_CTR3_SEL_0, 0);
718 WREG32(CB_PERF_CTR3_SEL_1, 0);
719
Dave Airlie0b65f832011-05-19 14:14:42 +1000720 tmp = RREG32(HDP_MISC_CNTL);
721 tmp |= HDP_FLUSH_INVALIDATE_CACHE;
722 WREG32(HDP_MISC_CNTL, tmp);
723
Alex Deucherfecf1d02011-03-02 20:07:29 -0500724 hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL);
725 WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
726
727 WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3));
728
729 udelay(50);
730}
731
Alex Deucherfa8198e2011-03-02 20:07:30 -0500732/*
733 * GART
734 */
735void cayman_pcie_gart_tlb_flush(struct radeon_device *rdev)
736{
737 /* flush hdp cache */
738 WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
739
740 /* bits 0-7 are the VM contexts0-7 */
741 WREG32(VM_INVALIDATE_REQUEST, 1);
742}
743
Lauri Kasanen1109ca02012-08-31 13:43:50 -0400744static int cayman_pcie_gart_enable(struct radeon_device *rdev)
Alex Deucherfa8198e2011-03-02 20:07:30 -0500745{
Jerome Glisse721604a2012-01-05 22:11:05 -0500746 int i, r;
Alex Deucherfa8198e2011-03-02 20:07:30 -0500747
Jerome Glissec9a1be92011-11-03 11:16:49 -0400748 if (rdev->gart.robj == NULL) {
Alex Deucherfa8198e2011-03-02 20:07:30 -0500749 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
750 return -EINVAL;
751 }
752 r = radeon_gart_table_vram_pin(rdev);
753 if (r)
754 return r;
755 radeon_gart_restore(rdev);
756 /* Setup TLB control */
Jerome Glisse721604a2012-01-05 22:11:05 -0500757 WREG32(MC_VM_MX_L1_TLB_CNTL,
758 (0xA << 7) |
759 ENABLE_L1_TLB |
Alex Deucherfa8198e2011-03-02 20:07:30 -0500760 ENABLE_L1_FRAGMENT_PROCESSING |
761 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
Jerome Glisse721604a2012-01-05 22:11:05 -0500762 ENABLE_ADVANCED_DRIVER_MODEL |
Alex Deucherfa8198e2011-03-02 20:07:30 -0500763 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
764 /* Setup L2 cache */
765 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE |
766 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
767 ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
768 EFFECTIVE_L2_QUEUE_SIZE(7) |
769 CONTEXT1_IDENTITY_ACCESS_MODE(1));
770 WREG32(VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS | INVALIDATE_L2_CACHE);
771 WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
772 L2_CACHE_BIGK_FRAGMENT_SIZE(6));
773 /* setup context0 */
774 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
775 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
776 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
777 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
778 (u32)(rdev->dummy_page.addr >> 12));
779 WREG32(VM_CONTEXT0_CNTL2, 0);
780 WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
781 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
Jerome Glisse721604a2012-01-05 22:11:05 -0500782
783 WREG32(0x15D4, 0);
784 WREG32(0x15D8, 0);
785 WREG32(0x15DC, 0);
786
787 /* empty context1-7 */
Alex Deucher23d4f1f2012-10-08 09:45:46 -0400788 /* Assign the pt base to something valid for now; the pts used for
789 * the VMs are determined by the application and setup and assigned
790 * on the fly in the vm part of radeon_gart.c
791 */
Jerome Glisse721604a2012-01-05 22:11:05 -0500792 for (i = 1; i < 8; i++) {
793 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (i << 2), 0);
Alex Deucherc1a7ca02012-10-08 12:15:13 -0400794 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (i << 2), rdev->vm_manager.max_pfn);
Jerome Glisse721604a2012-01-05 22:11:05 -0500795 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
796 rdev->gart.table_addr >> 12);
797 }
798
799 /* enable context1-7 */
800 WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
801 (u32)(rdev->dummy_page.addr >> 12));
Christian Königae133a12012-09-18 15:30:44 -0400802 WREG32(VM_CONTEXT1_CNTL2, 4);
Dmitry Cherkasovfa87e622012-09-17 19:36:19 +0200803 WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) |
Christian Königae133a12012-09-18 15:30:44 -0400804 RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
805 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT |
806 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
807 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT |
808 PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT |
809 PDE0_PROTECTION_FAULT_ENABLE_DEFAULT |
810 VALID_PROTECTION_FAULT_ENABLE_INTERRUPT |
811 VALID_PROTECTION_FAULT_ENABLE_DEFAULT |
812 READ_PROTECTION_FAULT_ENABLE_INTERRUPT |
813 READ_PROTECTION_FAULT_ENABLE_DEFAULT |
814 WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT |
815 WRITE_PROTECTION_FAULT_ENABLE_DEFAULT);
Alex Deucherfa8198e2011-03-02 20:07:30 -0500816
817 cayman_pcie_gart_tlb_flush(rdev);
Tormod Voldenfcf4de52011-08-31 21:54:07 +0000818 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
819 (unsigned)(rdev->mc.gtt_size >> 20),
820 (unsigned long long)rdev->gart.table_addr);
Alex Deucherfa8198e2011-03-02 20:07:30 -0500821 rdev->gart.ready = true;
822 return 0;
823}
824
Lauri Kasanen1109ca02012-08-31 13:43:50 -0400825static void cayman_pcie_gart_disable(struct radeon_device *rdev)
Alex Deucherfa8198e2011-03-02 20:07:30 -0500826{
Alex Deucherfa8198e2011-03-02 20:07:30 -0500827 /* Disable all tables */
828 WREG32(VM_CONTEXT0_CNTL, 0);
829 WREG32(VM_CONTEXT1_CNTL, 0);
830 /* Setup TLB control */
831 WREG32(MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING |
832 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
833 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
834 /* Setup L2 cache */
835 WREG32(VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
836 ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
837 EFFECTIVE_L2_QUEUE_SIZE(7) |
838 CONTEXT1_IDENTITY_ACCESS_MODE(1));
839 WREG32(VM_L2_CNTL2, 0);
840 WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
841 L2_CACHE_BIGK_FRAGMENT_SIZE(6));
Jerome Glissec9a1be92011-11-03 11:16:49 -0400842 radeon_gart_table_vram_unpin(rdev);
Alex Deucherfa8198e2011-03-02 20:07:30 -0500843}
844
Lauri Kasanen1109ca02012-08-31 13:43:50 -0400845static void cayman_pcie_gart_fini(struct radeon_device *rdev)
Alex Deucherfa8198e2011-03-02 20:07:30 -0500846{
847 cayman_pcie_gart_disable(rdev);
848 radeon_gart_table_vram_free(rdev);
849 radeon_gart_fini(rdev);
850}
851
Alex Deucher1b370782011-11-17 20:13:28 -0500852void cayman_cp_int_cntl_setup(struct radeon_device *rdev,
853 int ring, u32 cp_int_cntl)
854{
855 u32 srbm_gfx_cntl = RREG32(SRBM_GFX_CNTL) & ~3;
856
857 WREG32(SRBM_GFX_CNTL, srbm_gfx_cntl | (ring & 3));
858 WREG32(CP_INT_CNTL, cp_int_cntl);
859}
860
Alex Deucher0c88a022011-03-02 20:07:31 -0500861/*
862 * CP.
863 */
Alex Deucherb40e7e12011-11-17 14:57:50 -0500864void cayman_fence_ring_emit(struct radeon_device *rdev,
865 struct radeon_fence *fence)
866{
867 struct radeon_ring *ring = &rdev->ring[fence->ring];
868 u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
869
Jerome Glisse721604a2012-01-05 22:11:05 -0500870 /* flush read cache over gart for this vmid */
871 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
872 radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2);
873 radeon_ring_write(ring, 0);
Alex Deucherb40e7e12011-11-17 14:57:50 -0500874 radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
875 radeon_ring_write(ring, PACKET3_TC_ACTION_ENA | PACKET3_SH_ACTION_ENA);
876 radeon_ring_write(ring, 0xFFFFFFFF);
877 radeon_ring_write(ring, 0);
878 radeon_ring_write(ring, 10); /* poll interval */
879 /* EVENT_WRITE_EOP - flush caches, send int */
880 radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
881 radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5));
882 radeon_ring_write(ring, addr & 0xffffffff);
883 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
884 radeon_ring_write(ring, fence->seq);
885 radeon_ring_write(ring, 0);
886}
887
Jerome Glisse721604a2012-01-05 22:11:05 -0500888void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
889{
Christian König876dc9f2012-05-08 14:24:01 +0200890 struct radeon_ring *ring = &rdev->ring[ib->ring];
Jerome Glisse721604a2012-01-05 22:11:05 -0500891
892 /* set to DX10/11 mode */
893 radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0));
894 radeon_ring_write(ring, 1);
Christian König45df6802012-07-06 16:22:55 +0200895
896 if (ring->rptr_save_reg) {
897 uint32_t next_rptr = ring->wptr + 3 + 4 + 8;
898 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
899 radeon_ring_write(ring, ((ring->rptr_save_reg -
900 PACKET3_SET_CONFIG_REG_START) >> 2));
901 radeon_ring_write(ring, next_rptr);
902 }
903
Jerome Glisse721604a2012-01-05 22:11:05 -0500904 radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
905 radeon_ring_write(ring,
906#ifdef __BIG_ENDIAN
907 (2 << 0) |
908#endif
909 (ib->gpu_addr & 0xFFFFFFFC));
910 radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF);
Christian König4bf3dd92012-08-06 18:57:44 +0200911 radeon_ring_write(ring, ib->length_dw |
912 (ib->vm ? (ib->vm->id << 24) : 0));
Jerome Glisse721604a2012-01-05 22:11:05 -0500913
914 /* flush read cache over gart for this vmid */
915 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
916 radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2);
Christian König4bf3dd92012-08-06 18:57:44 +0200917 radeon_ring_write(ring, ib->vm ? ib->vm->id : 0);
Jerome Glisse721604a2012-01-05 22:11:05 -0500918 radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
919 radeon_ring_write(ring, PACKET3_TC_ACTION_ENA | PACKET3_SH_ACTION_ENA);
920 radeon_ring_write(ring, 0xFFFFFFFF);
921 radeon_ring_write(ring, 0);
922 radeon_ring_write(ring, 10); /* poll interval */
923}
924
Alex Deucher0c88a022011-03-02 20:07:31 -0500925static void cayman_cp_enable(struct radeon_device *rdev, bool enable)
926{
927 if (enable)
928 WREG32(CP_ME_CNTL, 0);
929 else {
Dave Airlie38f1cff2011-03-16 11:34:41 +1000930 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
Alex Deucher0c88a022011-03-02 20:07:31 -0500931 WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT));
932 WREG32(SCRATCH_UMSK, 0);
Alex Deucherf60cbd12012-12-04 15:27:33 -0500933 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
Alex Deucher0c88a022011-03-02 20:07:31 -0500934 }
935}
936
937static int cayman_cp_load_microcode(struct radeon_device *rdev)
938{
939 const __be32 *fw_data;
940 int i;
941
942 if (!rdev->me_fw || !rdev->pfp_fw)
943 return -EINVAL;
944
945 cayman_cp_enable(rdev, false);
946
947 fw_data = (const __be32 *)rdev->pfp_fw->data;
948 WREG32(CP_PFP_UCODE_ADDR, 0);
949 for (i = 0; i < CAYMAN_PFP_UCODE_SIZE; i++)
950 WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
951 WREG32(CP_PFP_UCODE_ADDR, 0);
952
953 fw_data = (const __be32 *)rdev->me_fw->data;
954 WREG32(CP_ME_RAM_WADDR, 0);
955 for (i = 0; i < CAYMAN_PM4_UCODE_SIZE; i++)
956 WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
957
958 WREG32(CP_PFP_UCODE_ADDR, 0);
959 WREG32(CP_ME_RAM_WADDR, 0);
960 WREG32(CP_ME_RAM_RADDR, 0);
961 return 0;
962}
963
964static int cayman_cp_start(struct radeon_device *rdev)
965{
Christian Könige32eb502011-10-23 12:56:27 +0200966 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
Alex Deucher0c88a022011-03-02 20:07:31 -0500967 int r, i;
968
Christian Könige32eb502011-10-23 12:56:27 +0200969 r = radeon_ring_lock(rdev, ring, 7);
Alex Deucher0c88a022011-03-02 20:07:31 -0500970 if (r) {
971 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
972 return r;
973 }
Christian Könige32eb502011-10-23 12:56:27 +0200974 radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
975 radeon_ring_write(ring, 0x1);
976 radeon_ring_write(ring, 0x0);
977 radeon_ring_write(ring, rdev->config.cayman.max_hw_contexts - 1);
978 radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
979 radeon_ring_write(ring, 0);
980 radeon_ring_write(ring, 0);
981 radeon_ring_unlock_commit(rdev, ring);
Alex Deucher0c88a022011-03-02 20:07:31 -0500982
983 cayman_cp_enable(rdev, true);
984
Christian Könige32eb502011-10-23 12:56:27 +0200985 r = radeon_ring_lock(rdev, ring, cayman_default_size + 19);
Alex Deucher0c88a022011-03-02 20:07:31 -0500986 if (r) {
987 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
988 return r;
989 }
990
991 /* setup clear context state */
Christian Könige32eb502011-10-23 12:56:27 +0200992 radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
993 radeon_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
Alex Deucher0c88a022011-03-02 20:07:31 -0500994
995 for (i = 0; i < cayman_default_size; i++)
Christian Könige32eb502011-10-23 12:56:27 +0200996 radeon_ring_write(ring, cayman_default_state[i]);
Alex Deucher0c88a022011-03-02 20:07:31 -0500997
Christian Könige32eb502011-10-23 12:56:27 +0200998 radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
999 radeon_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
Alex Deucher0c88a022011-03-02 20:07:31 -05001000
1001 /* set clear context state */
Christian Könige32eb502011-10-23 12:56:27 +02001002 radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
1003 radeon_ring_write(ring, 0);
Alex Deucher0c88a022011-03-02 20:07:31 -05001004
1005 /* SQ_VTX_BASE_VTX_LOC */
Christian Könige32eb502011-10-23 12:56:27 +02001006 radeon_ring_write(ring, 0xc0026f00);
1007 radeon_ring_write(ring, 0x00000000);
1008 radeon_ring_write(ring, 0x00000000);
1009 radeon_ring_write(ring, 0x00000000);
Alex Deucher0c88a022011-03-02 20:07:31 -05001010
1011 /* Clear consts */
Christian Könige32eb502011-10-23 12:56:27 +02001012 radeon_ring_write(ring, 0xc0036f00);
1013 radeon_ring_write(ring, 0x00000bc4);
1014 radeon_ring_write(ring, 0xffffffff);
1015 radeon_ring_write(ring, 0xffffffff);
1016 radeon_ring_write(ring, 0xffffffff);
Alex Deucher0c88a022011-03-02 20:07:31 -05001017
Christian Könige32eb502011-10-23 12:56:27 +02001018 radeon_ring_write(ring, 0xc0026900);
1019 radeon_ring_write(ring, 0x00000316);
1020 radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
1021 radeon_ring_write(ring, 0x00000010); /* */
Alex Deucher9b91d182011-03-02 20:07:39 -05001022
Christian Könige32eb502011-10-23 12:56:27 +02001023 radeon_ring_unlock_commit(rdev, ring);
Alex Deucher0c88a022011-03-02 20:07:31 -05001024
1025 /* XXX init other rings */
1026
1027 return 0;
1028}
1029
Alex Deucher755d8192011-03-02 20:07:34 -05001030static void cayman_cp_fini(struct radeon_device *rdev)
1031{
Christian König45df6802012-07-06 16:22:55 +02001032 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
Alex Deucher755d8192011-03-02 20:07:34 -05001033 cayman_cp_enable(rdev, false);
Christian König45df6802012-07-06 16:22:55 +02001034 radeon_ring_fini(rdev, ring);
1035 radeon_scratch_free(rdev, ring->rptr_save_reg);
Alex Deucher755d8192011-03-02 20:07:34 -05001036}
1037
Lauri Kasanen1109ca02012-08-31 13:43:50 -04001038static int cayman_cp_resume(struct radeon_device *rdev)
Alex Deucher0c88a022011-03-02 20:07:31 -05001039{
Christian Königb90ca982012-07-04 21:36:53 +02001040 static const int ridx[] = {
1041 RADEON_RING_TYPE_GFX_INDEX,
1042 CAYMAN_RING_TYPE_CP1_INDEX,
1043 CAYMAN_RING_TYPE_CP2_INDEX
1044 };
1045 static const unsigned cp_rb_cntl[] = {
1046 CP_RB0_CNTL,
1047 CP_RB1_CNTL,
1048 CP_RB2_CNTL,
1049 };
1050 static const unsigned cp_rb_rptr_addr[] = {
1051 CP_RB0_RPTR_ADDR,
1052 CP_RB1_RPTR_ADDR,
1053 CP_RB2_RPTR_ADDR
1054 };
1055 static const unsigned cp_rb_rptr_addr_hi[] = {
1056 CP_RB0_RPTR_ADDR_HI,
1057 CP_RB1_RPTR_ADDR_HI,
1058 CP_RB2_RPTR_ADDR_HI
1059 };
1060 static const unsigned cp_rb_base[] = {
1061 CP_RB0_BASE,
1062 CP_RB1_BASE,
1063 CP_RB2_BASE
1064 };
Christian Könige32eb502011-10-23 12:56:27 +02001065 struct radeon_ring *ring;
Christian Königb90ca982012-07-04 21:36:53 +02001066 int i, r;
Alex Deucher0c88a022011-03-02 20:07:31 -05001067
1068 /* Reset cp; if cp is reset, then PA, SH, VGT also need to be reset */
1069 WREG32(GRBM_SOFT_RESET, (SOFT_RESET_CP |
1070 SOFT_RESET_PA |
1071 SOFT_RESET_SH |
1072 SOFT_RESET_VGT |
Jerome Glissea49a50d2011-08-24 20:00:17 +00001073 SOFT_RESET_SPI |
Alex Deucher0c88a022011-03-02 20:07:31 -05001074 SOFT_RESET_SX));
1075 RREG32(GRBM_SOFT_RESET);
1076 mdelay(15);
1077 WREG32(GRBM_SOFT_RESET, 0);
1078 RREG32(GRBM_SOFT_RESET);
1079
Christian König15d33322011-09-15 19:02:22 +02001080 WREG32(CP_SEM_WAIT_TIMER, 0x0);
Alex Deucher11ef3f1f2012-01-20 14:47:43 -05001081 WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
Alex Deucher0c88a022011-03-02 20:07:31 -05001082
1083 /* Set the write pointer delay */
1084 WREG32(CP_RB_WPTR_DELAY, 0);
1085
1086 WREG32(CP_DEBUG, (1 << 27));
1087
Adam Buchbinder48fc7f72012-09-19 21:48:00 -04001088 /* set the wb address whether it's enabled or not */
Alex Deucher0c88a022011-03-02 20:07:31 -05001089 WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
Christian Königb90ca982012-07-04 21:36:53 +02001090 WREG32(SCRATCH_UMSK, 0xff);
Alex Deucher0c88a022011-03-02 20:07:31 -05001091
Christian Königb90ca982012-07-04 21:36:53 +02001092 for (i = 0; i < 3; ++i) {
1093 uint32_t rb_cntl;
1094 uint64_t addr;
1095
1096 /* Set ring buffer size */
1097 ring = &rdev->ring[ridx[i]];
1098 rb_cntl = drm_order(ring->ring_size / 8);
1099 rb_cntl |= drm_order(RADEON_GPU_PAGE_SIZE/8) << 8;
1100#ifdef __BIG_ENDIAN
1101 rb_cntl |= BUF_SWAP_32BIT;
1102#endif
1103 WREG32(cp_rb_cntl[i], rb_cntl);
1104
Adam Buchbinder48fc7f72012-09-19 21:48:00 -04001105 /* set the wb address whether it's enabled or not */
Christian Königb90ca982012-07-04 21:36:53 +02001106 addr = rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET;
1107 WREG32(cp_rb_rptr_addr[i], addr & 0xFFFFFFFC);
1108 WREG32(cp_rb_rptr_addr_hi[i], upper_32_bits(addr) & 0xFF);
Alex Deucher0c88a022011-03-02 20:07:31 -05001109 }
1110
Christian Königb90ca982012-07-04 21:36:53 +02001111 /* set the rb base addr, this causes an internal reset of ALL rings */
1112 for (i = 0; i < 3; ++i) {
1113 ring = &rdev->ring[ridx[i]];
1114 WREG32(cp_rb_base[i], ring->gpu_addr >> 8);
1115 }
Alex Deucher0c88a022011-03-02 20:07:31 -05001116
Christian Königb90ca982012-07-04 21:36:53 +02001117 for (i = 0; i < 3; ++i) {
1118 /* Initialize the ring buffer's read and write pointers */
1119 ring = &rdev->ring[ridx[i]];
1120 WREG32_P(cp_rb_cntl[i], RB_RPTR_WR_ENA, ~RB_RPTR_WR_ENA);
Alex Deucher0c88a022011-03-02 20:07:31 -05001121
Christian Königb90ca982012-07-04 21:36:53 +02001122 ring->rptr = ring->wptr = 0;
1123 WREG32(ring->rptr_reg, ring->rptr);
1124 WREG32(ring->wptr_reg, ring->wptr);
Alex Deucher0c88a022011-03-02 20:07:31 -05001125
Christian Königb90ca982012-07-04 21:36:53 +02001126 mdelay(1);
1127 WREG32_P(cp_rb_cntl[i], 0, ~RB_RPTR_WR_ENA);
1128 }
Alex Deucher0c88a022011-03-02 20:07:31 -05001129
1130 /* start the rings */
1131 cayman_cp_start(rdev);
Christian Könige32eb502011-10-23 12:56:27 +02001132 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = true;
1133 rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
1134 rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
Alex Deucher0c88a022011-03-02 20:07:31 -05001135 /* this only test cp0 */
Alex Deucherf7128122012-02-23 17:53:45 -05001136 r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
Alex Deucher0c88a022011-03-02 20:07:31 -05001137 if (r) {
Christian Könige32eb502011-10-23 12:56:27 +02001138 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
1139 rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
1140 rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
Alex Deucher0c88a022011-03-02 20:07:31 -05001141 return r;
1142 }
1143
1144 return 0;
1145}
1146
Alex Deucherf60cbd12012-12-04 15:27:33 -05001147/*
1148 * DMA
1149 * Starting with R600, the GPU has an asynchronous
1150 * DMA engine. The programming model is very similar
1151 * to the 3D engine (ring buffer, IBs, etc.), but the
1152 * DMA controller has it's own packet format that is
1153 * different form the PM4 format used by the 3D engine.
1154 * It supports copying data, writing embedded data,
1155 * solid fills, and a number of other things. It also
1156 * has support for tiling/detiling of buffers.
1157 * Cayman and newer support two asynchronous DMA engines.
1158 */
1159/**
1160 * cayman_dma_ring_ib_execute - Schedule an IB on the DMA engine
1161 *
1162 * @rdev: radeon_device pointer
1163 * @ib: IB object to schedule
1164 *
1165 * Schedule an IB in the DMA ring (cayman-SI).
1166 */
1167void cayman_dma_ring_ib_execute(struct radeon_device *rdev,
1168 struct radeon_ib *ib)
1169{
1170 struct radeon_ring *ring = &rdev->ring[ib->ring];
1171
1172 if (rdev->wb.enabled) {
1173 u32 next_rptr = ring->wptr + 4;
1174 while ((next_rptr & 7) != 5)
1175 next_rptr++;
1176 next_rptr += 3;
1177 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
1178 radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
1179 radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
1180 radeon_ring_write(ring, next_rptr);
1181 }
1182
1183 /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
1184 * Pad as necessary with NOPs.
1185 */
1186 while ((ring->wptr & 7) != 5)
1187 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
1188 radeon_ring_write(ring, DMA_IB_PACKET(DMA_PACKET_INDIRECT_BUFFER, ib->vm ? ib->vm->id : 0, 0));
1189 radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
1190 radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF));
1191
1192}
1193
1194/**
1195 * cayman_dma_stop - stop the async dma engines
1196 *
1197 * @rdev: radeon_device pointer
1198 *
1199 * Stop the async dma engines (cayman-SI).
1200 */
1201void cayman_dma_stop(struct radeon_device *rdev)
1202{
1203 u32 rb_cntl;
1204
1205 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
1206
1207 /* dma0 */
1208 rb_cntl = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
1209 rb_cntl &= ~DMA_RB_ENABLE;
1210 WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, rb_cntl);
1211
1212 /* dma1 */
1213 rb_cntl = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
1214 rb_cntl &= ~DMA_RB_ENABLE;
1215 WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, rb_cntl);
1216
1217 rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false;
1218 rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready = false;
1219}
1220
1221/**
1222 * cayman_dma_resume - setup and start the async dma engines
1223 *
1224 * @rdev: radeon_device pointer
1225 *
1226 * Set up the DMA ring buffers and enable them. (cayman-SI).
1227 * Returns 0 for success, error for failure.
1228 */
1229int cayman_dma_resume(struct radeon_device *rdev)
1230{
1231 struct radeon_ring *ring;
Michel Dänzerb3dfcb22013-01-24 19:02:01 +01001232 u32 rb_cntl, dma_cntl, ib_cntl;
Alex Deucherf60cbd12012-12-04 15:27:33 -05001233 u32 rb_bufsz;
1234 u32 reg_offset, wb_offset;
1235 int i, r;
1236
1237 /* Reset dma */
1238 WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA | SOFT_RESET_DMA1);
1239 RREG32(SRBM_SOFT_RESET);
1240 udelay(50);
1241 WREG32(SRBM_SOFT_RESET, 0);
1242
1243 for (i = 0; i < 2; i++) {
1244 if (i == 0) {
1245 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
1246 reg_offset = DMA0_REGISTER_OFFSET;
1247 wb_offset = R600_WB_DMA_RPTR_OFFSET;
1248 } else {
1249 ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
1250 reg_offset = DMA1_REGISTER_OFFSET;
1251 wb_offset = CAYMAN_WB_DMA1_RPTR_OFFSET;
1252 }
1253
1254 WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL + reg_offset, 0);
1255 WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL + reg_offset, 0);
1256
1257 /* Set ring buffer size in dwords */
1258 rb_bufsz = drm_order(ring->ring_size / 4);
1259 rb_cntl = rb_bufsz << 1;
1260#ifdef __BIG_ENDIAN
1261 rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE;
1262#endif
1263 WREG32(DMA_RB_CNTL + reg_offset, rb_cntl);
1264
1265 /* Initialize the ring buffer's read and write pointers */
1266 WREG32(DMA_RB_RPTR + reg_offset, 0);
1267 WREG32(DMA_RB_WPTR + reg_offset, 0);
1268
1269 /* set the wb address whether it's enabled or not */
1270 WREG32(DMA_RB_RPTR_ADDR_HI + reg_offset,
1271 upper_32_bits(rdev->wb.gpu_addr + wb_offset) & 0xFF);
1272 WREG32(DMA_RB_RPTR_ADDR_LO + reg_offset,
1273 ((rdev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC));
1274
1275 if (rdev->wb.enabled)
1276 rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE;
1277
1278 WREG32(DMA_RB_BASE + reg_offset, ring->gpu_addr >> 8);
1279
1280 /* enable DMA IBs */
Michel Dänzerb3dfcb22013-01-24 19:02:01 +01001281 ib_cntl = DMA_IB_ENABLE | CMD_VMID_FORCE;
1282#ifdef __BIG_ENDIAN
1283 ib_cntl |= DMA_IB_SWAP_ENABLE;
1284#endif
1285 WREG32(DMA_IB_CNTL + reg_offset, ib_cntl);
Alex Deucherf60cbd12012-12-04 15:27:33 -05001286
1287 dma_cntl = RREG32(DMA_CNTL + reg_offset);
1288 dma_cntl &= ~CTXEMPTY_INT_ENABLE;
1289 WREG32(DMA_CNTL + reg_offset, dma_cntl);
1290
1291 ring->wptr = 0;
1292 WREG32(DMA_RB_WPTR + reg_offset, ring->wptr << 2);
1293
1294 ring->rptr = RREG32(DMA_RB_RPTR + reg_offset) >> 2;
1295
1296 WREG32(DMA_RB_CNTL + reg_offset, rb_cntl | DMA_RB_ENABLE);
1297
1298 ring->ready = true;
1299
1300 r = radeon_ring_test(rdev, ring->idx, ring);
1301 if (r) {
1302 ring->ready = false;
1303 return r;
1304 }
1305 }
1306
1307 radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
1308
1309 return 0;
1310}
1311
1312/**
1313 * cayman_dma_fini - tear down the async dma engines
1314 *
1315 * @rdev: radeon_device pointer
1316 *
1317 * Stop the async dma engines and free the rings (cayman-SI).
1318 */
1319void cayman_dma_fini(struct radeon_device *rdev)
1320{
1321 cayman_dma_stop(rdev);
1322 radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]);
1323 radeon_ring_fini(rdev, &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]);
1324}
1325
Alex Deucher168757e2013-01-18 19:17:22 -05001326static u32 cayman_gpu_check_soft_reset(struct radeon_device *rdev)
1327{
1328 u32 reset_mask = 0;
1329 u32 tmp;
1330
1331 /* GRBM_STATUS */
1332 tmp = RREG32(GRBM_STATUS);
1333 if (tmp & (PA_BUSY | SC_BUSY |
1334 SH_BUSY | SX_BUSY |
1335 TA_BUSY | VGT_BUSY |
1336 DB_BUSY | CB_BUSY |
1337 GDS_BUSY | SPI_BUSY |
1338 IA_BUSY | IA_BUSY_NO_DMA))
1339 reset_mask |= RADEON_RESET_GFX;
1340
1341 if (tmp & (CF_RQ_PENDING | PF_RQ_PENDING |
1342 CP_BUSY | CP_COHERENCY_BUSY))
1343 reset_mask |= RADEON_RESET_CP;
1344
1345 if (tmp & GRBM_EE_BUSY)
1346 reset_mask |= RADEON_RESET_GRBM | RADEON_RESET_GFX | RADEON_RESET_CP;
1347
1348 /* DMA_STATUS_REG 0 */
1349 tmp = RREG32(DMA_STATUS_REG + DMA0_REGISTER_OFFSET);
1350 if (!(tmp & DMA_IDLE))
1351 reset_mask |= RADEON_RESET_DMA;
1352
1353 /* DMA_STATUS_REG 1 */
1354 tmp = RREG32(DMA_STATUS_REG + DMA1_REGISTER_OFFSET);
1355 if (!(tmp & DMA_IDLE))
1356 reset_mask |= RADEON_RESET_DMA1;
1357
1358 /* SRBM_STATUS2 */
1359 tmp = RREG32(SRBM_STATUS2);
1360 if (tmp & DMA_BUSY)
1361 reset_mask |= RADEON_RESET_DMA;
1362
1363 if (tmp & DMA1_BUSY)
1364 reset_mask |= RADEON_RESET_DMA1;
1365
1366 /* SRBM_STATUS */
1367 tmp = RREG32(SRBM_STATUS);
1368 if (tmp & (RLC_RQ_PENDING | RLC_BUSY))
1369 reset_mask |= RADEON_RESET_RLC;
1370
1371 if (tmp & IH_BUSY)
1372 reset_mask |= RADEON_RESET_IH;
1373
1374 if (tmp & SEM_BUSY)
1375 reset_mask |= RADEON_RESET_SEM;
1376
1377 if (tmp & GRBM_RQ_PENDING)
1378 reset_mask |= RADEON_RESET_GRBM;
1379
1380 if (tmp & VMC_BUSY)
1381 reset_mask |= RADEON_RESET_VMC;
1382
1383 if (tmp & (MCB_BUSY | MCB_NON_DISPLAY_BUSY |
1384 MCC_BUSY | MCD_BUSY))
1385 reset_mask |= RADEON_RESET_MC;
1386
1387 if (evergreen_is_display_hung(rdev))
1388 reset_mask |= RADEON_RESET_DISPLAY;
1389
1390 /* VM_L2_STATUS */
1391 tmp = RREG32(VM_L2_STATUS);
1392 if (tmp & L2_BUSY)
1393 reset_mask |= RADEON_RESET_VMC;
1394
Alex Deucherd808fc82013-02-28 10:03:08 -05001395 /* Skip MC reset as it's mostly likely not hung, just busy */
1396 if (reset_mask & RADEON_RESET_MC) {
1397 DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask);
1398 reset_mask &= ~RADEON_RESET_MC;
1399 }
1400
Alex Deucher168757e2013-01-18 19:17:22 -05001401 return reset_mask;
1402}
1403
1404static void cayman_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
Alex Deucher271d6fed2013-01-03 12:48:05 -05001405{
1406 struct evergreen_mc_save save;
Alex Deucher187e3592013-01-18 14:51:38 -05001407 u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
1408 u32 tmp;
Alex Deucher19fc42e2013-01-14 11:04:39 -05001409
Alex Deucher271d6fed2013-01-03 12:48:05 -05001410 if (reset_mask == 0)
Alex Deucher168757e2013-01-18 19:17:22 -05001411 return;
Alex Deucher271d6fed2013-01-03 12:48:05 -05001412
1413 dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
1414
Alex Deucher187e3592013-01-18 14:51:38 -05001415 evergreen_print_gpu_status_regs(rdev);
Alex Deucher271d6fed2013-01-03 12:48:05 -05001416 dev_info(rdev->dev, " VM_CONTEXT0_PROTECTION_FAULT_ADDR 0x%08X\n",
1417 RREG32(0x14F8));
1418 dev_info(rdev->dev, " VM_CONTEXT0_PROTECTION_FAULT_STATUS 0x%08X\n",
1419 RREG32(0x14D8));
1420 dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
1421 RREG32(0x14FC));
1422 dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
1423 RREG32(0x14DC));
1424
Alex Deucher187e3592013-01-18 14:51:38 -05001425 /* Disable CP parsing/prefetching */
1426 WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT);
1427
1428 if (reset_mask & RADEON_RESET_DMA) {
1429 /* dma0 */
1430 tmp = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
1431 tmp &= ~DMA_RB_ENABLE;
1432 WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, tmp);
Alex Deucher168757e2013-01-18 19:17:22 -05001433 }
Alex Deucher187e3592013-01-18 14:51:38 -05001434
Alex Deucher168757e2013-01-18 19:17:22 -05001435 if (reset_mask & RADEON_RESET_DMA1) {
Alex Deucher187e3592013-01-18 14:51:38 -05001436 /* dma1 */
1437 tmp = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
1438 tmp &= ~DMA_RB_ENABLE;
1439 WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, tmp);
1440 }
1441
Alex Deucher90fb8772013-01-23 18:59:17 -05001442 udelay(50);
1443
1444 evergreen_mc_stop(rdev, &save);
1445 if (evergreen_mc_wait_for_idle(rdev)) {
1446 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1447 }
1448
Alex Deucher187e3592013-01-18 14:51:38 -05001449 if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE)) {
1450 grbm_soft_reset = SOFT_RESET_CB |
1451 SOFT_RESET_DB |
1452 SOFT_RESET_GDS |
1453 SOFT_RESET_PA |
1454 SOFT_RESET_SC |
1455 SOFT_RESET_SPI |
1456 SOFT_RESET_SH |
1457 SOFT_RESET_SX |
1458 SOFT_RESET_TC |
1459 SOFT_RESET_TA |
1460 SOFT_RESET_VGT |
1461 SOFT_RESET_IA;
1462 }
1463
1464 if (reset_mask & RADEON_RESET_CP) {
1465 grbm_soft_reset |= SOFT_RESET_CP | SOFT_RESET_VGT;
1466
1467 srbm_soft_reset |= SOFT_RESET_GRBM;
1468 }
Alex Deucher271d6fed2013-01-03 12:48:05 -05001469
1470 if (reset_mask & RADEON_RESET_DMA)
Alex Deucher168757e2013-01-18 19:17:22 -05001471 srbm_soft_reset |= SOFT_RESET_DMA;
1472
1473 if (reset_mask & RADEON_RESET_DMA1)
1474 srbm_soft_reset |= SOFT_RESET_DMA1;
1475
1476 if (reset_mask & RADEON_RESET_DISPLAY)
1477 srbm_soft_reset |= SOFT_RESET_DC;
1478
1479 if (reset_mask & RADEON_RESET_RLC)
1480 srbm_soft_reset |= SOFT_RESET_RLC;
1481
1482 if (reset_mask & RADEON_RESET_SEM)
1483 srbm_soft_reset |= SOFT_RESET_SEM;
1484
1485 if (reset_mask & RADEON_RESET_IH)
1486 srbm_soft_reset |= SOFT_RESET_IH;
1487
1488 if (reset_mask & RADEON_RESET_GRBM)
1489 srbm_soft_reset |= SOFT_RESET_GRBM;
1490
1491 if (reset_mask & RADEON_RESET_VMC)
1492 srbm_soft_reset |= SOFT_RESET_VMC;
1493
Alex Deucher24178ec2013-01-24 15:00:17 -05001494 if (!(rdev->flags & RADEON_IS_IGP)) {
1495 if (reset_mask & RADEON_RESET_MC)
1496 srbm_soft_reset |= SOFT_RESET_MC;
1497 }
Alex Deucher187e3592013-01-18 14:51:38 -05001498
1499 if (grbm_soft_reset) {
1500 tmp = RREG32(GRBM_SOFT_RESET);
1501 tmp |= grbm_soft_reset;
1502 dev_info(rdev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
1503 WREG32(GRBM_SOFT_RESET, tmp);
1504 tmp = RREG32(GRBM_SOFT_RESET);
1505
1506 udelay(50);
1507
1508 tmp &= ~grbm_soft_reset;
1509 WREG32(GRBM_SOFT_RESET, tmp);
1510 tmp = RREG32(GRBM_SOFT_RESET);
1511 }
1512
1513 if (srbm_soft_reset) {
1514 tmp = RREG32(SRBM_SOFT_RESET);
1515 tmp |= srbm_soft_reset;
1516 dev_info(rdev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1517 WREG32(SRBM_SOFT_RESET, tmp);
1518 tmp = RREG32(SRBM_SOFT_RESET);
1519
1520 udelay(50);
1521
1522 tmp &= ~srbm_soft_reset;
1523 WREG32(SRBM_SOFT_RESET, tmp);
1524 tmp = RREG32(SRBM_SOFT_RESET);
1525 }
Alex Deucher271d6fed2013-01-03 12:48:05 -05001526
1527 /* Wait a little for things to settle down */
1528 udelay(50);
1529
Alex Deucherb9952a82011-03-02 20:07:33 -05001530 evergreen_mc_resume(rdev, &save);
Alex Deucher187e3592013-01-18 14:51:38 -05001531 udelay(50);
Alex Deucher410a3412013-01-18 13:05:39 -05001532
Alex Deucher187e3592013-01-18 14:51:38 -05001533 evergreen_print_gpu_status_regs(rdev);
Alex Deucherb9952a82011-03-02 20:07:33 -05001534}
1535
1536int cayman_asic_reset(struct radeon_device *rdev)
1537{
Alex Deucher168757e2013-01-18 19:17:22 -05001538 u32 reset_mask;
1539
1540 reset_mask = cayman_gpu_check_soft_reset(rdev);
1541
1542 if (reset_mask)
1543 r600_set_bios_scratch_engine_hung(rdev, true);
1544
1545 cayman_gpu_soft_reset(rdev, reset_mask);
1546
1547 reset_mask = cayman_gpu_check_soft_reset(rdev);
1548
1549 if (!reset_mask)
1550 r600_set_bios_scratch_engine_hung(rdev, false);
1551
1552 return 0;
Alex Deucherb9952a82011-03-02 20:07:33 -05001553}
1554
Alex Deucherf60cbd12012-12-04 15:27:33 -05001555/**
Alex Deucher123bc182013-01-24 11:37:19 -05001556 * cayman_gfx_is_lockup - Check if the GFX engine is locked up
1557 *
1558 * @rdev: radeon_device pointer
1559 * @ring: radeon_ring structure holding ring information
1560 *
1561 * Check if the GFX engine is locked up.
1562 * Returns true if the engine appears to be locked up, false if not.
1563 */
1564bool cayman_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
1565{
1566 u32 reset_mask = cayman_gpu_check_soft_reset(rdev);
1567
1568 if (!(reset_mask & (RADEON_RESET_GFX |
1569 RADEON_RESET_COMPUTE |
1570 RADEON_RESET_CP))) {
1571 radeon_ring_lockup_update(ring);
1572 return false;
1573 }
1574 /* force CP activities */
1575 radeon_ring_force_activity(rdev, ring);
1576 return radeon_ring_test_lockup(rdev, ring);
1577}
1578
1579/**
Alex Deucherf60cbd12012-12-04 15:27:33 -05001580 * cayman_dma_is_lockup - Check if the DMA engine is locked up
1581 *
1582 * @rdev: radeon_device pointer
1583 * @ring: radeon_ring structure holding ring information
1584 *
Alex Deucher123bc182013-01-24 11:37:19 -05001585 * Check if the async DMA engine is locked up.
Alex Deucherf60cbd12012-12-04 15:27:33 -05001586 * Returns true if the engine appears to be locked up, false if not.
1587 */
1588bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
1589{
Alex Deucher123bc182013-01-24 11:37:19 -05001590 u32 reset_mask = cayman_gpu_check_soft_reset(rdev);
1591 u32 mask;
Alex Deucherf60cbd12012-12-04 15:27:33 -05001592
1593 if (ring->idx == R600_RING_TYPE_DMA_INDEX)
Alex Deucher123bc182013-01-24 11:37:19 -05001594 mask = RADEON_RESET_DMA;
Alex Deucherf60cbd12012-12-04 15:27:33 -05001595 else
Alex Deucher123bc182013-01-24 11:37:19 -05001596 mask = RADEON_RESET_DMA1;
1597
1598 if (!(reset_mask & mask)) {
Alex Deucherf60cbd12012-12-04 15:27:33 -05001599 radeon_ring_lockup_update(ring);
1600 return false;
1601 }
1602 /* force ring activities */
1603 radeon_ring_force_activity(rdev, ring);
1604 return radeon_ring_test_lockup(rdev, ring);
1605}
1606
Alex Deucher755d8192011-03-02 20:07:34 -05001607static int cayman_startup(struct radeon_device *rdev)
1608{
Christian Könige32eb502011-10-23 12:56:27 +02001609 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
Alex Deucher755d8192011-03-02 20:07:34 -05001610 int r;
1611
Ilija Hadzicb07759b2011-09-20 10:22:58 -04001612 /* enable pcie gen2 link */
1613 evergreen_pcie_gen2_enable(rdev);
1614
Alex Deucherc420c742012-03-20 17:18:39 -04001615 if (rdev->flags & RADEON_IS_IGP) {
1616 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
1617 r = ni_init_microcode(rdev);
1618 if (r) {
1619 DRM_ERROR("Failed to load firmware!\n");
1620 return r;
1621 }
1622 }
1623 } else {
1624 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
1625 r = ni_init_microcode(rdev);
1626 if (r) {
1627 DRM_ERROR("Failed to load firmware!\n");
1628 return r;
1629 }
1630 }
1631
1632 r = ni_mc_load_microcode(rdev);
Alex Deucher755d8192011-03-02 20:07:34 -05001633 if (r) {
Alex Deucherc420c742012-03-20 17:18:39 -04001634 DRM_ERROR("Failed to load MC firmware!\n");
Alex Deucher755d8192011-03-02 20:07:34 -05001635 return r;
1636 }
1637 }
Alex Deucher755d8192011-03-02 20:07:34 -05001638
Alex Deucher16cdf042011-10-28 10:30:02 -04001639 r = r600_vram_scratch_init(rdev);
1640 if (r)
1641 return r;
1642
Alex Deucher755d8192011-03-02 20:07:34 -05001643 evergreen_mc_program(rdev);
1644 r = cayman_pcie_gart_enable(rdev);
1645 if (r)
1646 return r;
1647 cayman_gpu_init(rdev);
1648
Alex Deuchercb92d452011-05-25 16:39:00 -04001649 r = evergreen_blit_init(rdev);
Alex Deucher755d8192011-03-02 20:07:34 -05001650 if (r) {
Ilija Hadzicfb3d9e92011-10-12 23:29:41 -04001651 r600_blit_fini(rdev);
Alex Deucher27cd7762012-02-23 17:53:42 -05001652 rdev->asic->copy.copy = NULL;
Alex Deucher755d8192011-03-02 20:07:34 -05001653 dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
1654 }
Alex Deucher755d8192011-03-02 20:07:34 -05001655
Alex Deucherc420c742012-03-20 17:18:39 -04001656 /* allocate rlc buffers */
1657 if (rdev->flags & RADEON_IS_IGP) {
1658 r = si_rlc_init(rdev);
1659 if (r) {
1660 DRM_ERROR("Failed to init rlc BOs!\n");
1661 return r;
1662 }
1663 }
1664
Alex Deucher755d8192011-03-02 20:07:34 -05001665 /* allocate wb buffer */
1666 r = radeon_wb_init(rdev);
1667 if (r)
1668 return r;
1669
Jerome Glisse30eb77f2011-11-20 20:45:34 +00001670 r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
1671 if (r) {
1672 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
1673 return r;
1674 }
1675
1676 r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
1677 if (r) {
1678 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
1679 return r;
1680 }
1681
1682 r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
1683 if (r) {
1684 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
1685 return r;
1686 }
1687
Alex Deucherf60cbd12012-12-04 15:27:33 -05001688 r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
1689 if (r) {
1690 dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
1691 return r;
1692 }
1693
1694 r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_DMA1_INDEX);
1695 if (r) {
1696 dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
1697 return r;
1698 }
1699
Alex Deucher755d8192011-03-02 20:07:34 -05001700 /* Enable IRQ */
1701 r = r600_irq_init(rdev);
1702 if (r) {
1703 DRM_ERROR("radeon: IH init failed (%d).\n", r);
1704 radeon_irq_kms_fini(rdev);
1705 return r;
1706 }
1707 evergreen_irq_set(rdev);
1708
Christian Könige32eb502011-10-23 12:56:27 +02001709 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
Alex Deucher78c55602011-11-17 14:25:56 -05001710 CP_RB0_RPTR, CP_RB0_WPTR,
1711 0, 0xfffff, RADEON_CP_PACKET2);
Alex Deucher755d8192011-03-02 20:07:34 -05001712 if (r)
1713 return r;
Alex Deucherf60cbd12012-12-04 15:27:33 -05001714
1715 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
1716 r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
1717 DMA_RB_RPTR + DMA0_REGISTER_OFFSET,
1718 DMA_RB_WPTR + DMA0_REGISTER_OFFSET,
1719 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
1720 if (r)
1721 return r;
1722
1723 ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
1724 r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET,
1725 DMA_RB_RPTR + DMA1_REGISTER_OFFSET,
1726 DMA_RB_WPTR + DMA1_REGISTER_OFFSET,
1727 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
1728 if (r)
1729 return r;
1730
Alex Deucher755d8192011-03-02 20:07:34 -05001731 r = cayman_cp_load_microcode(rdev);
1732 if (r)
1733 return r;
1734 r = cayman_cp_resume(rdev);
1735 if (r)
1736 return r;
1737
Alex Deucherf60cbd12012-12-04 15:27:33 -05001738 r = cayman_dma_resume(rdev);
1739 if (r)
1740 return r;
1741
Christian König2898c342012-07-05 11:55:34 +02001742 r = radeon_ib_pool_init(rdev);
1743 if (r) {
1744 dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
Jerome Glisseb15ba512011-11-15 11:48:34 -05001745 return r;
Christian König2898c342012-07-05 11:55:34 +02001746 }
Jerome Glisseb15ba512011-11-15 11:48:34 -05001747
Christian Königc6105f22012-07-05 14:32:00 +02001748 r = radeon_vm_manager_init(rdev);
1749 if (r) {
1750 dev_err(rdev->dev, "vm manager initialization failed (%d).\n", r);
Jerome Glisse721604a2012-01-05 22:11:05 -05001751 return r;
Christian Königc6105f22012-07-05 14:32:00 +02001752 }
Jerome Glisse721604a2012-01-05 22:11:05 -05001753
Rafał Miłecki6b53a052012-06-11 12:34:01 +02001754 r = r600_audio_init(rdev);
1755 if (r)
1756 return r;
1757
Alex Deucher755d8192011-03-02 20:07:34 -05001758 return 0;
1759}
1760
1761int cayman_resume(struct radeon_device *rdev)
1762{
1763 int r;
1764
1765 /* Do not reset GPU before posting, on rv770 hw unlike on r500 hw,
1766 * posting will perform necessary task to bring back GPU into good
1767 * shape.
1768 */
1769 /* post card */
1770 atom_asic_init(rdev->mode_info.atom_context);
1771
Jerome Glisseb15ba512011-11-15 11:48:34 -05001772 rdev->accel_working = true;
Alex Deucher755d8192011-03-02 20:07:34 -05001773 r = cayman_startup(rdev);
1774 if (r) {
1775 DRM_ERROR("cayman startup failed on resume\n");
Jerome Glisse6b7746e2012-02-20 17:57:20 -05001776 rdev->accel_working = false;
Alex Deucher755d8192011-03-02 20:07:34 -05001777 return r;
1778 }
Alex Deucher755d8192011-03-02 20:07:34 -05001779 return r;
Alex Deucher755d8192011-03-02 20:07:34 -05001780}
1781
1782int cayman_suspend(struct radeon_device *rdev)
1783{
Rafał Miłecki6b53a052012-06-11 12:34:01 +02001784 r600_audio_fini(rdev);
Alex Deucherfa3daf92013-03-11 15:32:26 -04001785 radeon_vm_manager_fini(rdev);
Alex Deucher755d8192011-03-02 20:07:34 -05001786 cayman_cp_enable(rdev, false);
Alex Deucherf60cbd12012-12-04 15:27:33 -05001787 cayman_dma_stop(rdev);
Alex Deucher755d8192011-03-02 20:07:34 -05001788 evergreen_irq_suspend(rdev);
1789 radeon_wb_disable(rdev);
1790 cayman_pcie_gart_disable(rdev);
Alex Deucher755d8192011-03-02 20:07:34 -05001791 return 0;
1792}
1793
1794/* Plan is to move initialization in that function and use
1795 * helper function so that radeon_device_init pretty much
1796 * do nothing more than calling asic specific function. This
1797 * should also allow to remove a bunch of callback function
1798 * like vram_info.
1799 */
1800int cayman_init(struct radeon_device *rdev)
1801{
Christian Könige32eb502011-10-23 12:56:27 +02001802 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
Alex Deucher755d8192011-03-02 20:07:34 -05001803 int r;
1804
Alex Deucher755d8192011-03-02 20:07:34 -05001805 /* Read BIOS */
1806 if (!radeon_get_bios(rdev)) {
1807 if (ASIC_IS_AVIVO(rdev))
1808 return -EINVAL;
1809 }
1810 /* Must be an ATOMBIOS */
1811 if (!rdev->is_atom_bios) {
1812 dev_err(rdev->dev, "Expecting atombios for cayman GPU\n");
1813 return -EINVAL;
1814 }
1815 r = radeon_atombios_init(rdev);
1816 if (r)
1817 return r;
1818
1819 /* Post card if necessary */
1820 if (!radeon_card_posted(rdev)) {
1821 if (!rdev->bios) {
1822 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
1823 return -EINVAL;
1824 }
1825 DRM_INFO("GPU not posted. posting now...\n");
1826 atom_asic_init(rdev->mode_info.atom_context);
1827 }
1828 /* Initialize scratch registers */
1829 r600_scratch_init(rdev);
1830 /* Initialize surface registers */
1831 radeon_surface_init(rdev);
1832 /* Initialize clocks */
1833 radeon_get_clock_info(rdev->ddev);
1834 /* Fence driver */
Jerome Glisse30eb77f2011-11-20 20:45:34 +00001835 r = radeon_fence_driver_init(rdev);
Alex Deucher755d8192011-03-02 20:07:34 -05001836 if (r)
1837 return r;
1838 /* initialize memory controller */
1839 r = evergreen_mc_init(rdev);
1840 if (r)
1841 return r;
1842 /* Memory manager */
1843 r = radeon_bo_init(rdev);
1844 if (r)
1845 return r;
1846
1847 r = radeon_irq_kms_init(rdev);
1848 if (r)
1849 return r;
1850
Christian Könige32eb502011-10-23 12:56:27 +02001851 ring->ring_obj = NULL;
1852 r600_ring_init(rdev, ring, 1024 * 1024);
Alex Deucher755d8192011-03-02 20:07:34 -05001853
Alex Deucherf60cbd12012-12-04 15:27:33 -05001854 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
1855 ring->ring_obj = NULL;
1856 r600_ring_init(rdev, ring, 64 * 1024);
1857
1858 ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
1859 ring->ring_obj = NULL;
1860 r600_ring_init(rdev, ring, 64 * 1024);
1861
Alex Deucher755d8192011-03-02 20:07:34 -05001862 rdev->ih.ring_obj = NULL;
1863 r600_ih_ring_init(rdev, 64 * 1024);
1864
1865 r = r600_pcie_gart_init(rdev);
1866 if (r)
1867 return r;
1868
1869 rdev->accel_working = true;
1870 r = cayman_startup(rdev);
1871 if (r) {
1872 dev_err(rdev->dev, "disabling GPU acceleration\n");
1873 cayman_cp_fini(rdev);
Alex Deucherf60cbd12012-12-04 15:27:33 -05001874 cayman_dma_fini(rdev);
Alex Deucher755d8192011-03-02 20:07:34 -05001875 r600_irq_fini(rdev);
Alex Deucherc420c742012-03-20 17:18:39 -04001876 if (rdev->flags & RADEON_IS_IGP)
1877 si_rlc_fini(rdev);
Alex Deucher755d8192011-03-02 20:07:34 -05001878 radeon_wb_fini(rdev);
Christian König2898c342012-07-05 11:55:34 +02001879 radeon_ib_pool_fini(rdev);
Jerome Glisse721604a2012-01-05 22:11:05 -05001880 radeon_vm_manager_fini(rdev);
Alex Deucher755d8192011-03-02 20:07:34 -05001881 radeon_irq_kms_fini(rdev);
1882 cayman_pcie_gart_fini(rdev);
1883 rdev->accel_working = false;
1884 }
Alex Deucher755d8192011-03-02 20:07:34 -05001885
1886 /* Don't start up if the MC ucode is missing.
1887 * The default clocks and voltages before the MC ucode
1888 * is loaded are not suffient for advanced operations.
Alex Deucherc420c742012-03-20 17:18:39 -04001889 *
1890 * We can skip this check for TN, because there is no MC
1891 * ucode.
Alex Deucher755d8192011-03-02 20:07:34 -05001892 */
Alex Deucherc420c742012-03-20 17:18:39 -04001893 if (!rdev->mc_fw && !(rdev->flags & RADEON_IS_IGP)) {
Alex Deucher755d8192011-03-02 20:07:34 -05001894 DRM_ERROR("radeon: MC ucode required for NI+.\n");
1895 return -EINVAL;
1896 }
1897
1898 return 0;
1899}
1900
1901void cayman_fini(struct radeon_device *rdev)
1902{
Ilija Hadzicfb3d9e92011-10-12 23:29:41 -04001903 r600_blit_fini(rdev);
Alex Deucher755d8192011-03-02 20:07:34 -05001904 cayman_cp_fini(rdev);
Alex Deucherf60cbd12012-12-04 15:27:33 -05001905 cayman_dma_fini(rdev);
Alex Deucher755d8192011-03-02 20:07:34 -05001906 r600_irq_fini(rdev);
Alex Deucherc420c742012-03-20 17:18:39 -04001907 if (rdev->flags & RADEON_IS_IGP)
1908 si_rlc_fini(rdev);
Alex Deucher755d8192011-03-02 20:07:34 -05001909 radeon_wb_fini(rdev);
Jerome Glisse721604a2012-01-05 22:11:05 -05001910 radeon_vm_manager_fini(rdev);
Christian König2898c342012-07-05 11:55:34 +02001911 radeon_ib_pool_fini(rdev);
Alex Deucher755d8192011-03-02 20:07:34 -05001912 radeon_irq_kms_fini(rdev);
1913 cayman_pcie_gart_fini(rdev);
Alex Deucher16cdf042011-10-28 10:30:02 -04001914 r600_vram_scratch_fini(rdev);
Alex Deucher755d8192011-03-02 20:07:34 -05001915 radeon_gem_fini(rdev);
1916 radeon_fence_driver_fini(rdev);
1917 radeon_bo_fini(rdev);
1918 radeon_atombios_fini(rdev);
1919 kfree(rdev->bios);
1920 rdev->bios = NULL;
1921}
1922
Jerome Glisse721604a2012-01-05 22:11:05 -05001923/*
1924 * vm
1925 */
1926int cayman_vm_init(struct radeon_device *rdev)
1927{
1928 /* number of VMs */
1929 rdev->vm_manager.nvm = 8;
1930 /* base offset of vram pages */
Alex Deuchere71270f2012-03-20 17:18:38 -04001931 if (rdev->flags & RADEON_IS_IGP) {
1932 u64 tmp = RREG32(FUS_MC_VM_FB_OFFSET);
1933 tmp <<= 22;
1934 rdev->vm_manager.vram_base_offset = tmp;
1935 } else
1936 rdev->vm_manager.vram_base_offset = 0;
Jerome Glisse721604a2012-01-05 22:11:05 -05001937 return 0;
1938}
1939
1940void cayman_vm_fini(struct radeon_device *rdev)
1941{
1942}
1943
Christian Königdce34bf2012-09-17 19:36:18 +02001944#define R600_ENTRY_VALID (1 << 0)
Jerome Glisse721604a2012-01-05 22:11:05 -05001945#define R600_PTE_SYSTEM (1 << 1)
1946#define R600_PTE_SNOOPED (1 << 2)
1947#define R600_PTE_READABLE (1 << 5)
1948#define R600_PTE_WRITEABLE (1 << 6)
1949
Christian König089a7862012-08-11 11:54:05 +02001950uint32_t cayman_vm_page_flags(struct radeon_device *rdev, uint32_t flags)
Jerome Glisse721604a2012-01-05 22:11:05 -05001951{
1952 uint32_t r600_flags = 0;
Christian Königdce34bf2012-09-17 19:36:18 +02001953 r600_flags |= (flags & RADEON_VM_PAGE_VALID) ? R600_ENTRY_VALID : 0;
Jerome Glisse721604a2012-01-05 22:11:05 -05001954 r600_flags |= (flags & RADEON_VM_PAGE_READABLE) ? R600_PTE_READABLE : 0;
1955 r600_flags |= (flags & RADEON_VM_PAGE_WRITEABLE) ? R600_PTE_WRITEABLE : 0;
1956 if (flags & RADEON_VM_PAGE_SYSTEM) {
1957 r600_flags |= R600_PTE_SYSTEM;
1958 r600_flags |= (flags & RADEON_VM_PAGE_SNOOPED) ? R600_PTE_SNOOPED : 0;
1959 }
1960 return r600_flags;
1961}
1962
Alex Deucher7a083292012-08-31 13:51:21 -04001963/**
1964 * cayman_vm_set_page - update the page tables using the CP
1965 *
1966 * @rdev: radeon_device pointer
Alex Deucher43f12142013-02-01 17:32:42 +01001967 * @ib: indirect buffer to fill with commands
Christian Königdce34bf2012-09-17 19:36:18 +02001968 * @pe: addr of the page entry
1969 * @addr: dst addr to write into pe
1970 * @count: number of page entries to update
1971 * @incr: increase next addr by incr bytes
1972 * @flags: access flags
Alex Deucher7a083292012-08-31 13:51:21 -04001973 *
Alex Deucher43f12142013-02-01 17:32:42 +01001974 * Update the page tables using the CP (cayman/TN).
Alex Deucher7a083292012-08-31 13:51:21 -04001975 */
Alex Deucher43f12142013-02-01 17:32:42 +01001976void cayman_vm_set_page(struct radeon_device *rdev,
1977 struct radeon_ib *ib,
1978 uint64_t pe,
Christian Königdce34bf2012-09-17 19:36:18 +02001979 uint64_t addr, unsigned count,
1980 uint32_t incr, uint32_t flags)
Jerome Glisse721604a2012-01-05 22:11:05 -05001981{
Christian Königdce34bf2012-09-17 19:36:18 +02001982 uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
Alex Deucher3b6b59b2012-10-22 12:19:01 -04001983 uint64_t value;
1984 unsigned ndw;
Jerome Glisse721604a2012-01-05 22:11:05 -05001985
Alex Deucher3b6b59b2012-10-22 12:19:01 -04001986 if (rdev->asic->vm.pt_ring_index == RADEON_RING_TYPE_GFX_INDEX) {
1987 while (count) {
1988 ndw = 1 + count * 2;
1989 if (ndw > 0x3FFF)
1990 ndw = 0x3FFF;
Christian König089a7862012-08-11 11:54:05 +02001991
Alex Deucher43f12142013-02-01 17:32:42 +01001992 ib->ptr[ib->length_dw++] = PACKET3(PACKET3_ME_WRITE, ndw);
1993 ib->ptr[ib->length_dw++] = pe;
1994 ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
Alex Deucher3b6b59b2012-10-22 12:19:01 -04001995 for (; ndw > 1; ndw -= 2, --count, pe += 8) {
1996 if (flags & RADEON_VM_PAGE_SYSTEM) {
1997 value = radeon_vm_map_gart(rdev, addr);
1998 value &= 0xFFFFFFFFFFFFF000ULL;
1999 } else if (flags & RADEON_VM_PAGE_VALID) {
2000 value = addr;
2001 } else {
2002 value = 0;
2003 }
Christian Königf9fdffa2012-10-22 17:42:36 +02002004 addr += incr;
Alex Deucher3b6b59b2012-10-22 12:19:01 -04002005 value |= r600_flags;
Alex Deucher43f12142013-02-01 17:32:42 +01002006 ib->ptr[ib->length_dw++] = value;
2007 ib->ptr[ib->length_dw++] = upper_32_bits(value);
Christian Königf9fdffa2012-10-22 17:42:36 +02002008 }
Alex Deucher3b6b59b2012-10-22 12:19:01 -04002009 }
2010 } else {
2011 while (count) {
2012 ndw = count * 2;
2013 if (ndw > 0xFFFFE)
2014 ndw = 0xFFFFE;
Christian Königf9fdffa2012-10-22 17:42:36 +02002015
Alex Deucher3b6b59b2012-10-22 12:19:01 -04002016 /* for non-physically contiguous pages (system) */
Alex Deucher43f12142013-02-01 17:32:42 +01002017 ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, ndw);
2018 ib->ptr[ib->length_dw++] = pe;
2019 ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
Alex Deucher3b6b59b2012-10-22 12:19:01 -04002020 for (; ndw > 0; ndw -= 2, --count, pe += 8) {
2021 if (flags & RADEON_VM_PAGE_SYSTEM) {
2022 value = radeon_vm_map_gart(rdev, addr);
2023 value &= 0xFFFFFFFFFFFFF000ULL;
2024 } else if (flags & RADEON_VM_PAGE_VALID) {
2025 value = addr;
2026 } else {
2027 value = 0;
2028 }
2029 addr += incr;
2030 value |= r600_flags;
Alex Deucher43f12142013-02-01 17:32:42 +01002031 ib->ptr[ib->length_dw++] = value;
2032 ib->ptr[ib->length_dw++] = upper_32_bits(value);
Alex Deucher3b6b59b2012-10-22 12:19:01 -04002033 }
Christian König2a6f1ab2012-08-11 15:00:30 +02002034 }
Alex Deucher43f12142013-02-01 17:32:42 +01002035 while (ib->length_dw & 0x7)
2036 ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0);
Christian König2a6f1ab2012-08-11 15:00:30 +02002037 }
Jerome Glisse721604a2012-01-05 22:11:05 -05002038}
Christian König9b40e5d2012-08-08 12:22:43 +02002039
Alex Deucher7a083292012-08-31 13:51:21 -04002040/**
2041 * cayman_vm_flush - vm flush using the CP
2042 *
2043 * @rdev: radeon_device pointer
2044 *
2045 * Update the page table base and flush the VM TLB
2046 * using the CP (cayman-si).
2047 */
Alex Deucher498522b2012-10-02 14:43:38 -04002048void cayman_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
Christian König9b40e5d2012-08-08 12:22:43 +02002049{
Alex Deucher498522b2012-10-02 14:43:38 -04002050 struct radeon_ring *ring = &rdev->ring[ridx];
Christian König9b40e5d2012-08-08 12:22:43 +02002051
Christian Königee60e292012-08-09 16:21:08 +02002052 if (vm == NULL)
Christian König9b40e5d2012-08-08 12:22:43 +02002053 return;
2054
Christian Königee60e292012-08-09 16:21:08 +02002055 radeon_ring_write(ring, PACKET0(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2), 0));
Dmitry Cherkasovfa87e622012-09-17 19:36:19 +02002056 radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
Christian Königee60e292012-08-09 16:21:08 +02002057
Christian König9b40e5d2012-08-08 12:22:43 +02002058 /* flush hdp cache */
2059 radeon_ring_write(ring, PACKET0(HDP_MEM_COHERENCY_FLUSH_CNTL, 0));
2060 radeon_ring_write(ring, 0x1);
2061
2062 /* bits 0-7 are the VM contexts0-7 */
2063 radeon_ring_write(ring, PACKET0(VM_INVALIDATE_REQUEST, 0));
Alex Deucher498522b2012-10-02 14:43:38 -04002064 radeon_ring_write(ring, 1 << vm->id);
Christian König58f8cf52012-10-22 17:42:35 +02002065
2066 /* sync PFP to ME, otherwise we might get invalid PFP reads */
2067 radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
2068 radeon_ring_write(ring, 0x0);
Alex Deucher0af62b02011-01-06 21:19:31 -05002069}
Alex Deucherf60cbd12012-12-04 15:27:33 -05002070
2071void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
2072{
2073 struct radeon_ring *ring = &rdev->ring[ridx];
2074
2075 if (vm == NULL)
2076 return;
2077
2078 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
2079 radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2));
2080 radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
2081
2082 /* flush hdp cache */
2083 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
2084 radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
2085 radeon_ring_write(ring, 1);
2086
2087 /* bits 0-7 are the VM contexts0-7 */
2088 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
2089 radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2));
2090 radeon_ring_write(ring, 1 << vm->id);
2091}
2092