blob: 35d7caa60c4869c195f0af6876a628f68ce728be [file] [log] [blame]
Alex Deucher0af62b02011-01-06 21:19:31 -05001/*
2 * Copyright 2010 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
24#include <linux/firmware.h>
25#include <linux/platform_device.h>
26#include <linux/slab.h>
Paul Gortmakere0cd3602011-08-30 11:04:30 -040027#include <linux/module.h>
David Howells760285e2012-10-02 18:01:07 +010028#include <drm/drmP.h>
Alex Deucher0af62b02011-01-06 21:19:31 -050029#include "radeon.h"
30#include "radeon_asic.h"
David Howells760285e2012-10-02 18:01:07 +010031#include <drm/radeon_drm.h>
Alex Deucher0af62b02011-01-06 21:19:31 -050032#include "nid.h"
33#include "atom.h"
34#include "ni_reg.h"
Alex Deucher0c88a022011-03-02 20:07:31 -050035#include "cayman_blit_shaders.h"
Alex Deucher0af62b02011-01-06 21:19:31 -050036
Alex Deucher168757e2013-01-18 19:17:22 -050037extern bool evergreen_is_display_hung(struct radeon_device *rdev);
Alex Deucher187e3592013-01-18 14:51:38 -050038extern void evergreen_print_gpu_status_regs(struct radeon_device *rdev);
Alex Deucherb9952a82011-03-02 20:07:33 -050039extern void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save);
40extern void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save);
41extern int evergreen_mc_wait_for_idle(struct radeon_device *rdev);
Alex Deucher755d8192011-03-02 20:07:34 -050042extern void evergreen_mc_program(struct radeon_device *rdev);
43extern void evergreen_irq_suspend(struct radeon_device *rdev);
44extern int evergreen_mc_init(struct radeon_device *rdev);
Alex Deucherd054ac12011-09-01 17:46:15 +000045extern void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev);
Ilija Hadzicb07759b2011-09-20 10:22:58 -040046extern void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
Alex Deucherc420c742012-03-20 17:18:39 -040047extern void si_rlc_fini(struct radeon_device *rdev);
48extern int si_rlc_init(struct radeon_device *rdev);
Alex Deucherb9952a82011-03-02 20:07:33 -050049
Alex Deucher0af62b02011-01-06 21:19:31 -050050#define EVERGREEN_PFP_UCODE_SIZE 1120
51#define EVERGREEN_PM4_UCODE_SIZE 1376
52#define EVERGREEN_RLC_UCODE_SIZE 768
53#define BTC_MC_UCODE_SIZE 6024
54
Alex Deucher9b8253c2011-03-02 20:07:28 -050055#define CAYMAN_PFP_UCODE_SIZE 2176
56#define CAYMAN_PM4_UCODE_SIZE 2176
57#define CAYMAN_RLC_UCODE_SIZE 1024
58#define CAYMAN_MC_UCODE_SIZE 6037
59
Alex Deucherc420c742012-03-20 17:18:39 -040060#define ARUBA_RLC_UCODE_SIZE 1536
61
Alex Deucher0af62b02011-01-06 21:19:31 -050062/* Firmware Names */
63MODULE_FIRMWARE("radeon/BARTS_pfp.bin");
64MODULE_FIRMWARE("radeon/BARTS_me.bin");
65MODULE_FIRMWARE("radeon/BARTS_mc.bin");
66MODULE_FIRMWARE("radeon/BTC_rlc.bin");
67MODULE_FIRMWARE("radeon/TURKS_pfp.bin");
68MODULE_FIRMWARE("radeon/TURKS_me.bin");
69MODULE_FIRMWARE("radeon/TURKS_mc.bin");
70MODULE_FIRMWARE("radeon/CAICOS_pfp.bin");
71MODULE_FIRMWARE("radeon/CAICOS_me.bin");
72MODULE_FIRMWARE("radeon/CAICOS_mc.bin");
Alex Deucher9b8253c2011-03-02 20:07:28 -050073MODULE_FIRMWARE("radeon/CAYMAN_pfp.bin");
74MODULE_FIRMWARE("radeon/CAYMAN_me.bin");
75MODULE_FIRMWARE("radeon/CAYMAN_mc.bin");
76MODULE_FIRMWARE("radeon/CAYMAN_rlc.bin");
Alex Deucherc420c742012-03-20 17:18:39 -040077MODULE_FIRMWARE("radeon/ARUBA_pfp.bin");
78MODULE_FIRMWARE("radeon/ARUBA_me.bin");
79MODULE_FIRMWARE("radeon/ARUBA_rlc.bin");
Alex Deucher0af62b02011-01-06 21:19:31 -050080
81#define BTC_IO_MC_REGS_SIZE 29
82
83static const u32 barts_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = {
84 {0x00000077, 0xff010100},
85 {0x00000078, 0x00000000},
86 {0x00000079, 0x00001434},
87 {0x0000007a, 0xcc08ec08},
88 {0x0000007b, 0x00040000},
89 {0x0000007c, 0x000080c0},
90 {0x0000007d, 0x09000000},
91 {0x0000007e, 0x00210404},
92 {0x00000081, 0x08a8e800},
93 {0x00000082, 0x00030444},
94 {0x00000083, 0x00000000},
95 {0x00000085, 0x00000001},
96 {0x00000086, 0x00000002},
97 {0x00000087, 0x48490000},
98 {0x00000088, 0x20244647},
99 {0x00000089, 0x00000005},
100 {0x0000008b, 0x66030000},
101 {0x0000008c, 0x00006603},
102 {0x0000008d, 0x00000100},
103 {0x0000008f, 0x00001c0a},
104 {0x00000090, 0xff000001},
105 {0x00000094, 0x00101101},
106 {0x00000095, 0x00000fff},
107 {0x00000096, 0x00116fff},
108 {0x00000097, 0x60010000},
109 {0x00000098, 0x10010000},
110 {0x00000099, 0x00006000},
111 {0x0000009a, 0x00001000},
112 {0x0000009f, 0x00946a00}
113};
114
115static const u32 turks_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = {
116 {0x00000077, 0xff010100},
117 {0x00000078, 0x00000000},
118 {0x00000079, 0x00001434},
119 {0x0000007a, 0xcc08ec08},
120 {0x0000007b, 0x00040000},
121 {0x0000007c, 0x000080c0},
122 {0x0000007d, 0x09000000},
123 {0x0000007e, 0x00210404},
124 {0x00000081, 0x08a8e800},
125 {0x00000082, 0x00030444},
126 {0x00000083, 0x00000000},
127 {0x00000085, 0x00000001},
128 {0x00000086, 0x00000002},
129 {0x00000087, 0x48490000},
130 {0x00000088, 0x20244647},
131 {0x00000089, 0x00000005},
132 {0x0000008b, 0x66030000},
133 {0x0000008c, 0x00006603},
134 {0x0000008d, 0x00000100},
135 {0x0000008f, 0x00001c0a},
136 {0x00000090, 0xff000001},
137 {0x00000094, 0x00101101},
138 {0x00000095, 0x00000fff},
139 {0x00000096, 0x00116fff},
140 {0x00000097, 0x60010000},
141 {0x00000098, 0x10010000},
142 {0x00000099, 0x00006000},
143 {0x0000009a, 0x00001000},
144 {0x0000009f, 0x00936a00}
145};
146
147static const u32 caicos_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = {
148 {0x00000077, 0xff010100},
149 {0x00000078, 0x00000000},
150 {0x00000079, 0x00001434},
151 {0x0000007a, 0xcc08ec08},
152 {0x0000007b, 0x00040000},
153 {0x0000007c, 0x000080c0},
154 {0x0000007d, 0x09000000},
155 {0x0000007e, 0x00210404},
156 {0x00000081, 0x08a8e800},
157 {0x00000082, 0x00030444},
158 {0x00000083, 0x00000000},
159 {0x00000085, 0x00000001},
160 {0x00000086, 0x00000002},
161 {0x00000087, 0x48490000},
162 {0x00000088, 0x20244647},
163 {0x00000089, 0x00000005},
164 {0x0000008b, 0x66030000},
165 {0x0000008c, 0x00006603},
166 {0x0000008d, 0x00000100},
167 {0x0000008f, 0x00001c0a},
168 {0x00000090, 0xff000001},
169 {0x00000094, 0x00101101},
170 {0x00000095, 0x00000fff},
171 {0x00000096, 0x00116fff},
172 {0x00000097, 0x60010000},
173 {0x00000098, 0x10010000},
174 {0x00000099, 0x00006000},
175 {0x0000009a, 0x00001000},
176 {0x0000009f, 0x00916a00}
177};
178
Alex Deucher9b8253c2011-03-02 20:07:28 -0500179static const u32 cayman_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = {
180 {0x00000077, 0xff010100},
181 {0x00000078, 0x00000000},
182 {0x00000079, 0x00001434},
183 {0x0000007a, 0xcc08ec08},
184 {0x0000007b, 0x00040000},
185 {0x0000007c, 0x000080c0},
186 {0x0000007d, 0x09000000},
187 {0x0000007e, 0x00210404},
188 {0x00000081, 0x08a8e800},
189 {0x00000082, 0x00030444},
190 {0x00000083, 0x00000000},
191 {0x00000085, 0x00000001},
192 {0x00000086, 0x00000002},
193 {0x00000087, 0x48490000},
194 {0x00000088, 0x20244647},
195 {0x00000089, 0x00000005},
196 {0x0000008b, 0x66030000},
197 {0x0000008c, 0x00006603},
198 {0x0000008d, 0x00000100},
199 {0x0000008f, 0x00001c0a},
200 {0x00000090, 0xff000001},
201 {0x00000094, 0x00101101},
202 {0x00000095, 0x00000fff},
203 {0x00000096, 0x00116fff},
204 {0x00000097, 0x60010000},
205 {0x00000098, 0x10010000},
206 {0x00000099, 0x00006000},
207 {0x0000009a, 0x00001000},
208 {0x0000009f, 0x00976b00}
209};
210
Alex Deucher755d8192011-03-02 20:07:34 -0500211int ni_mc_load_microcode(struct radeon_device *rdev)
Alex Deucher0af62b02011-01-06 21:19:31 -0500212{
213 const __be32 *fw_data;
214 u32 mem_type, running, blackout = 0;
215 u32 *io_mc_regs;
Alex Deucher9b8253c2011-03-02 20:07:28 -0500216 int i, ucode_size, regs_size;
Alex Deucher0af62b02011-01-06 21:19:31 -0500217
218 if (!rdev->mc_fw)
219 return -EINVAL;
220
221 switch (rdev->family) {
222 case CHIP_BARTS:
223 io_mc_regs = (u32 *)&barts_io_mc_regs;
Alex Deucher9b8253c2011-03-02 20:07:28 -0500224 ucode_size = BTC_MC_UCODE_SIZE;
225 regs_size = BTC_IO_MC_REGS_SIZE;
Alex Deucher0af62b02011-01-06 21:19:31 -0500226 break;
227 case CHIP_TURKS:
228 io_mc_regs = (u32 *)&turks_io_mc_regs;
Alex Deucher9b8253c2011-03-02 20:07:28 -0500229 ucode_size = BTC_MC_UCODE_SIZE;
230 regs_size = BTC_IO_MC_REGS_SIZE;
Alex Deucher0af62b02011-01-06 21:19:31 -0500231 break;
232 case CHIP_CAICOS:
233 default:
234 io_mc_regs = (u32 *)&caicos_io_mc_regs;
Alex Deucher9b8253c2011-03-02 20:07:28 -0500235 ucode_size = BTC_MC_UCODE_SIZE;
236 regs_size = BTC_IO_MC_REGS_SIZE;
237 break;
238 case CHIP_CAYMAN:
239 io_mc_regs = (u32 *)&cayman_io_mc_regs;
240 ucode_size = CAYMAN_MC_UCODE_SIZE;
241 regs_size = BTC_IO_MC_REGS_SIZE;
Alex Deucher0af62b02011-01-06 21:19:31 -0500242 break;
243 }
244
245 mem_type = (RREG32(MC_SEQ_MISC0) & MC_SEQ_MISC0_GDDR5_MASK) >> MC_SEQ_MISC0_GDDR5_SHIFT;
246 running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK;
247
248 if ((mem_type == MC_SEQ_MISC0_GDDR5_VALUE) && (running == 0)) {
249 if (running) {
250 blackout = RREG32(MC_SHARED_BLACKOUT_CNTL);
251 WREG32(MC_SHARED_BLACKOUT_CNTL, 1);
252 }
253
254 /* reset the engine and set to writable */
255 WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
256 WREG32(MC_SEQ_SUP_CNTL, 0x00000010);
257
258 /* load mc io regs */
Alex Deucher9b8253c2011-03-02 20:07:28 -0500259 for (i = 0; i < regs_size; i++) {
Alex Deucher0af62b02011-01-06 21:19:31 -0500260 WREG32(MC_SEQ_IO_DEBUG_INDEX, io_mc_regs[(i << 1)]);
261 WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1]);
262 }
263 /* load the MC ucode */
264 fw_data = (const __be32 *)rdev->mc_fw->data;
Alex Deucher9b8253c2011-03-02 20:07:28 -0500265 for (i = 0; i < ucode_size; i++)
Alex Deucher0af62b02011-01-06 21:19:31 -0500266 WREG32(MC_SEQ_SUP_PGM, be32_to_cpup(fw_data++));
267
268 /* put the engine back into the active state */
269 WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
270 WREG32(MC_SEQ_SUP_CNTL, 0x00000004);
271 WREG32(MC_SEQ_SUP_CNTL, 0x00000001);
272
273 /* wait for training to complete */
Alex Deucher0e2c9782011-11-02 18:08:25 -0400274 for (i = 0; i < rdev->usec_timeout; i++) {
275 if (RREG32(MC_IO_PAD_CNTL_D0) & MEM_FALL_OUT_CMD)
276 break;
277 udelay(1);
278 }
Alex Deucher0af62b02011-01-06 21:19:31 -0500279
280 if (running)
281 WREG32(MC_SHARED_BLACKOUT_CNTL, blackout);
282 }
283
284 return 0;
285}
286
287int ni_init_microcode(struct radeon_device *rdev)
288{
289 struct platform_device *pdev;
290 const char *chip_name;
291 const char *rlc_chip_name;
292 size_t pfp_req_size, me_req_size, rlc_req_size, mc_req_size;
293 char fw_name[30];
294 int err;
295
296 DRM_DEBUG("\n");
297
298 pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0);
299 err = IS_ERR(pdev);
300 if (err) {
301 printk(KERN_ERR "radeon_cp: Failed to register firmware\n");
302 return -EINVAL;
303 }
304
305 switch (rdev->family) {
306 case CHIP_BARTS:
307 chip_name = "BARTS";
308 rlc_chip_name = "BTC";
Alex Deucher9b8253c2011-03-02 20:07:28 -0500309 pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
310 me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
311 rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
312 mc_req_size = BTC_MC_UCODE_SIZE * 4;
Alex Deucher0af62b02011-01-06 21:19:31 -0500313 break;
314 case CHIP_TURKS:
315 chip_name = "TURKS";
316 rlc_chip_name = "BTC";
Alex Deucher9b8253c2011-03-02 20:07:28 -0500317 pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
318 me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
319 rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
320 mc_req_size = BTC_MC_UCODE_SIZE * 4;
Alex Deucher0af62b02011-01-06 21:19:31 -0500321 break;
322 case CHIP_CAICOS:
323 chip_name = "CAICOS";
324 rlc_chip_name = "BTC";
Alex Deucher9b8253c2011-03-02 20:07:28 -0500325 pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
326 me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
327 rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
328 mc_req_size = BTC_MC_UCODE_SIZE * 4;
329 break;
330 case CHIP_CAYMAN:
331 chip_name = "CAYMAN";
332 rlc_chip_name = "CAYMAN";
333 pfp_req_size = CAYMAN_PFP_UCODE_SIZE * 4;
334 me_req_size = CAYMAN_PM4_UCODE_SIZE * 4;
335 rlc_req_size = CAYMAN_RLC_UCODE_SIZE * 4;
336 mc_req_size = CAYMAN_MC_UCODE_SIZE * 4;
Alex Deucher0af62b02011-01-06 21:19:31 -0500337 break;
Alex Deucherc420c742012-03-20 17:18:39 -0400338 case CHIP_ARUBA:
339 chip_name = "ARUBA";
340 rlc_chip_name = "ARUBA";
341 /* pfp/me same size as CAYMAN */
342 pfp_req_size = CAYMAN_PFP_UCODE_SIZE * 4;
343 me_req_size = CAYMAN_PM4_UCODE_SIZE * 4;
344 rlc_req_size = ARUBA_RLC_UCODE_SIZE * 4;
345 mc_req_size = 0;
346 break;
Alex Deucher0af62b02011-01-06 21:19:31 -0500347 default: BUG();
348 }
349
Alex Deucher0af62b02011-01-06 21:19:31 -0500350 DRM_INFO("Loading %s Microcode\n", chip_name);
351
352 snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
353 err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev);
354 if (err)
355 goto out;
356 if (rdev->pfp_fw->size != pfp_req_size) {
357 printk(KERN_ERR
358 "ni_cp: Bogus length %zu in firmware \"%s\"\n",
359 rdev->pfp_fw->size, fw_name);
360 err = -EINVAL;
361 goto out;
362 }
363
364 snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
365 err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev);
366 if (err)
367 goto out;
368 if (rdev->me_fw->size != me_req_size) {
369 printk(KERN_ERR
370 "ni_cp: Bogus length %zu in firmware \"%s\"\n",
371 rdev->me_fw->size, fw_name);
372 err = -EINVAL;
373 }
374
375 snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name);
376 err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev);
377 if (err)
378 goto out;
379 if (rdev->rlc_fw->size != rlc_req_size) {
380 printk(KERN_ERR
381 "ni_rlc: Bogus length %zu in firmware \"%s\"\n",
382 rdev->rlc_fw->size, fw_name);
383 err = -EINVAL;
384 }
385
Alex Deucherc420c742012-03-20 17:18:39 -0400386 /* no MC ucode on TN */
387 if (!(rdev->flags & RADEON_IS_IGP)) {
388 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
389 err = request_firmware(&rdev->mc_fw, fw_name, &pdev->dev);
390 if (err)
391 goto out;
392 if (rdev->mc_fw->size != mc_req_size) {
393 printk(KERN_ERR
394 "ni_mc: Bogus length %zu in firmware \"%s\"\n",
395 rdev->mc_fw->size, fw_name);
396 err = -EINVAL;
397 }
Alex Deucher0af62b02011-01-06 21:19:31 -0500398 }
399out:
400 platform_device_unregister(pdev);
401
402 if (err) {
403 if (err != -EINVAL)
404 printk(KERN_ERR
405 "ni_cp: Failed to load firmware \"%s\"\n",
406 fw_name);
407 release_firmware(rdev->pfp_fw);
408 rdev->pfp_fw = NULL;
409 release_firmware(rdev->me_fw);
410 rdev->me_fw = NULL;
411 release_firmware(rdev->rlc_fw);
412 rdev->rlc_fw = NULL;
413 release_firmware(rdev->mc_fw);
414 rdev->mc_fw = NULL;
415 }
416 return err;
417}
418
Alex Deucherfecf1d02011-03-02 20:07:29 -0500419/*
420 * Core functions
421 */
Alex Deucherfecf1d02011-03-02 20:07:29 -0500422static void cayman_gpu_init(struct radeon_device *rdev)
423{
Alex Deucherfecf1d02011-03-02 20:07:29 -0500424 u32 gb_addr_config = 0;
425 u32 mc_shared_chmap, mc_arb_ramcfg;
Alex Deucherfecf1d02011-03-02 20:07:29 -0500426 u32 cgts_tcc_disable;
427 u32 sx_debug_1;
428 u32 smx_dc_ctl0;
Alex Deucherfecf1d02011-03-02 20:07:29 -0500429 u32 cgts_sm_ctrl_reg;
430 u32 hdp_host_path_cntl;
431 u32 tmp;
Alex Deucher416a2bd2012-05-31 19:00:25 -0400432 u32 disabled_rb_mask;
Alex Deucherfecf1d02011-03-02 20:07:29 -0500433 int i, j;
434
435 switch (rdev->family) {
436 case CHIP_CAYMAN:
Alex Deucherfecf1d02011-03-02 20:07:29 -0500437 rdev->config.cayman.max_shader_engines = 2;
438 rdev->config.cayman.max_pipes_per_simd = 4;
439 rdev->config.cayman.max_tile_pipes = 8;
440 rdev->config.cayman.max_simds_per_se = 12;
441 rdev->config.cayman.max_backends_per_se = 4;
442 rdev->config.cayman.max_texture_channel_caches = 8;
443 rdev->config.cayman.max_gprs = 256;
444 rdev->config.cayman.max_threads = 256;
445 rdev->config.cayman.max_gs_threads = 32;
446 rdev->config.cayman.max_stack_entries = 512;
447 rdev->config.cayman.sx_num_of_sets = 8;
448 rdev->config.cayman.sx_max_export_size = 256;
449 rdev->config.cayman.sx_max_export_pos_size = 64;
450 rdev->config.cayman.sx_max_export_smx_size = 192;
451 rdev->config.cayman.max_hw_contexts = 8;
452 rdev->config.cayman.sq_num_cf_insts = 2;
453
454 rdev->config.cayman.sc_prim_fifo_size = 0x100;
455 rdev->config.cayman.sc_hiz_tile_fifo_size = 0x30;
456 rdev->config.cayman.sc_earlyz_tile_fifo_size = 0x130;
Alex Deucher416a2bd2012-05-31 19:00:25 -0400457 gb_addr_config = CAYMAN_GB_ADDR_CONFIG_GOLDEN;
Alex Deucherfecf1d02011-03-02 20:07:29 -0500458 break;
Alex Deucher7b76e472012-03-20 17:18:36 -0400459 case CHIP_ARUBA:
460 default:
461 rdev->config.cayman.max_shader_engines = 1;
462 rdev->config.cayman.max_pipes_per_simd = 4;
463 rdev->config.cayman.max_tile_pipes = 2;
464 if ((rdev->pdev->device == 0x9900) ||
Alex Deucherd430f7d2012-06-05 09:50:28 -0400465 (rdev->pdev->device == 0x9901) ||
466 (rdev->pdev->device == 0x9905) ||
467 (rdev->pdev->device == 0x9906) ||
468 (rdev->pdev->device == 0x9907) ||
469 (rdev->pdev->device == 0x9908) ||
470 (rdev->pdev->device == 0x9909) ||
Alex Deuchere4d17062013-03-08 13:44:15 -0500471 (rdev->pdev->device == 0x990B) ||
472 (rdev->pdev->device == 0x990C) ||
473 (rdev->pdev->device == 0x990F) ||
Alex Deucherd430f7d2012-06-05 09:50:28 -0400474 (rdev->pdev->device == 0x9910) ||
Alex Deuchere4d17062013-03-08 13:44:15 -0500475 (rdev->pdev->device == 0x9917) ||
476 (rdev->pdev->device == 0x9999)) {
Alex Deucher7b76e472012-03-20 17:18:36 -0400477 rdev->config.cayman.max_simds_per_se = 6;
478 rdev->config.cayman.max_backends_per_se = 2;
479 } else if ((rdev->pdev->device == 0x9903) ||
Alex Deucherd430f7d2012-06-05 09:50:28 -0400480 (rdev->pdev->device == 0x9904) ||
481 (rdev->pdev->device == 0x990A) ||
Alex Deuchere4d17062013-03-08 13:44:15 -0500482 (rdev->pdev->device == 0x990D) ||
483 (rdev->pdev->device == 0x990E) ||
Alex Deucherd430f7d2012-06-05 09:50:28 -0400484 (rdev->pdev->device == 0x9913) ||
485 (rdev->pdev->device == 0x9918)) {
Alex Deucher7b76e472012-03-20 17:18:36 -0400486 rdev->config.cayman.max_simds_per_se = 4;
487 rdev->config.cayman.max_backends_per_se = 2;
Alex Deucherd430f7d2012-06-05 09:50:28 -0400488 } else if ((rdev->pdev->device == 0x9919) ||
489 (rdev->pdev->device == 0x9990) ||
490 (rdev->pdev->device == 0x9991) ||
491 (rdev->pdev->device == 0x9994) ||
Alex Deuchere4d17062013-03-08 13:44:15 -0500492 (rdev->pdev->device == 0x9995) ||
493 (rdev->pdev->device == 0x9996) ||
494 (rdev->pdev->device == 0x999A) ||
Alex Deucherd430f7d2012-06-05 09:50:28 -0400495 (rdev->pdev->device == 0x99A0)) {
Alex Deucher7b76e472012-03-20 17:18:36 -0400496 rdev->config.cayman.max_simds_per_se = 3;
497 rdev->config.cayman.max_backends_per_se = 1;
498 } else {
499 rdev->config.cayman.max_simds_per_se = 2;
500 rdev->config.cayman.max_backends_per_se = 1;
501 }
502 rdev->config.cayman.max_texture_channel_caches = 2;
503 rdev->config.cayman.max_gprs = 256;
504 rdev->config.cayman.max_threads = 256;
505 rdev->config.cayman.max_gs_threads = 32;
506 rdev->config.cayman.max_stack_entries = 512;
507 rdev->config.cayman.sx_num_of_sets = 8;
508 rdev->config.cayman.sx_max_export_size = 256;
509 rdev->config.cayman.sx_max_export_pos_size = 64;
510 rdev->config.cayman.sx_max_export_smx_size = 192;
511 rdev->config.cayman.max_hw_contexts = 8;
512 rdev->config.cayman.sq_num_cf_insts = 2;
513
514 rdev->config.cayman.sc_prim_fifo_size = 0x40;
515 rdev->config.cayman.sc_hiz_tile_fifo_size = 0x30;
516 rdev->config.cayman.sc_earlyz_tile_fifo_size = 0x130;
Alex Deucher416a2bd2012-05-31 19:00:25 -0400517 gb_addr_config = ARUBA_GB_ADDR_CONFIG_GOLDEN;
Alex Deucher7b76e472012-03-20 17:18:36 -0400518 break;
Alex Deucherfecf1d02011-03-02 20:07:29 -0500519 }
520
521 /* Initialize HDP */
522 for (i = 0, j = 0; i < 32; i++, j += 0x18) {
523 WREG32((0x2c14 + j), 0x00000000);
524 WREG32((0x2c18 + j), 0x00000000);
525 WREG32((0x2c1c + j), 0x00000000);
526 WREG32((0x2c20 + j), 0x00000000);
527 WREG32((0x2c24 + j), 0x00000000);
528 }
529
530 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
531
Alex Deucherd054ac12011-09-01 17:46:15 +0000532 evergreen_fix_pci_max_read_req_size(rdev);
533
Alex Deucherfecf1d02011-03-02 20:07:29 -0500534 mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
535 mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
536
Alex Deucherfecf1d02011-03-02 20:07:29 -0500537 tmp = (mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT;
538 rdev->config.cayman.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
539 if (rdev->config.cayman.mem_row_size_in_kb > 4)
540 rdev->config.cayman.mem_row_size_in_kb = 4;
541 /* XXX use MC settings? */
542 rdev->config.cayman.shader_engine_tile_size = 32;
543 rdev->config.cayman.num_gpus = 1;
544 rdev->config.cayman.multi_gpu_tile_size = 64;
545
Alex Deucherfecf1d02011-03-02 20:07:29 -0500546 tmp = (gb_addr_config & NUM_PIPES_MASK) >> NUM_PIPES_SHIFT;
547 rdev->config.cayman.num_tile_pipes = (1 << tmp);
548 tmp = (gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT;
549 rdev->config.cayman.mem_max_burst_length_bytes = (tmp + 1) * 256;
550 tmp = (gb_addr_config & NUM_SHADER_ENGINES_MASK) >> NUM_SHADER_ENGINES_SHIFT;
551 rdev->config.cayman.num_shader_engines = tmp + 1;
552 tmp = (gb_addr_config & NUM_GPUS_MASK) >> NUM_GPUS_SHIFT;
553 rdev->config.cayman.num_gpus = tmp + 1;
554 tmp = (gb_addr_config & MULTI_GPU_TILE_SIZE_MASK) >> MULTI_GPU_TILE_SIZE_SHIFT;
555 rdev->config.cayman.multi_gpu_tile_size = 1 << tmp;
556 tmp = (gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT;
557 rdev->config.cayman.mem_row_size_in_kb = 1 << tmp;
558
Alex Deucher416a2bd2012-05-31 19:00:25 -0400559
Alex Deucherfecf1d02011-03-02 20:07:29 -0500560 /* setup tiling info dword. gb_addr_config is not adequate since it does
561 * not have bank info, so create a custom tiling dword.
562 * bits 3:0 num_pipes
563 * bits 7:4 num_banks
564 * bits 11:8 group_size
565 * bits 15:12 row_size
566 */
567 rdev->config.cayman.tile_config = 0;
568 switch (rdev->config.cayman.num_tile_pipes) {
569 case 1:
570 default:
571 rdev->config.cayman.tile_config |= (0 << 0);
572 break;
573 case 2:
574 rdev->config.cayman.tile_config |= (1 << 0);
575 break;
576 case 4:
577 rdev->config.cayman.tile_config |= (2 << 0);
578 break;
579 case 8:
580 rdev->config.cayman.tile_config |= (3 << 0);
581 break;
582 }
Alex Deucher7b76e472012-03-20 17:18:36 -0400583
584 /* num banks is 8 on all fusion asics. 0 = 4, 1 = 8, 2 = 16 */
585 if (rdev->flags & RADEON_IS_IGP)
Alex Deucher1f73cca2012-05-24 22:55:15 -0400586 rdev->config.cayman.tile_config |= 1 << 4;
Alex Deucher29d65402012-05-31 18:53:36 -0400587 else {
Alex Deucher5b23c902012-07-31 11:05:11 -0400588 switch ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) {
589 case 0: /* four banks */
Alex Deucher29d65402012-05-31 18:53:36 -0400590 rdev->config.cayman.tile_config |= 0 << 4;
Alex Deucher5b23c902012-07-31 11:05:11 -0400591 break;
592 case 1: /* eight banks */
593 rdev->config.cayman.tile_config |= 1 << 4;
594 break;
595 case 2: /* sixteen banks */
596 default:
597 rdev->config.cayman.tile_config |= 2 << 4;
598 break;
599 }
Alex Deucher29d65402012-05-31 18:53:36 -0400600 }
Alex Deucherfecf1d02011-03-02 20:07:29 -0500601 rdev->config.cayman.tile_config |=
Dave Airliecde50832011-05-19 14:14:41 +1000602 ((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8;
Alex Deucherfecf1d02011-03-02 20:07:29 -0500603 rdev->config.cayman.tile_config |=
604 ((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12;
605
Alex Deucher416a2bd2012-05-31 19:00:25 -0400606 tmp = 0;
607 for (i = (rdev->config.cayman.max_shader_engines - 1); i >= 0; i--) {
608 u32 rb_disable_bitmap;
609
610 WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
611 WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
612 rb_disable_bitmap = (RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000) >> 16;
613 tmp <<= 4;
614 tmp |= rb_disable_bitmap;
615 }
616 /* enabled rb are just the one not disabled :) */
617 disabled_rb_mask = tmp;
618
619 WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
620 WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
621
Alex Deucherfecf1d02011-03-02 20:07:29 -0500622 WREG32(GB_ADDR_CONFIG, gb_addr_config);
623 WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
Alex Deucher7c1c7c12013-04-05 10:28:08 -0400624 if (ASIC_IS_DCE6(rdev))
625 WREG32(DMIF_ADDR_CALC, gb_addr_config);
Alex Deucherfecf1d02011-03-02 20:07:29 -0500626 WREG32(HDP_ADDR_CONFIG, gb_addr_config);
Alex Deucherf60cbd12012-12-04 15:27:33 -0500627 WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config);
628 WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config);
Alex Deucherfecf1d02011-03-02 20:07:29 -0500629
Alex Deucher8f612b22013-03-11 19:28:39 -0400630 if ((rdev->config.cayman.max_backends_per_se == 1) &&
631 (rdev->flags & RADEON_IS_IGP)) {
632 if ((disabled_rb_mask & 3) == 1) {
633 /* RB0 disabled, RB1 enabled */
634 tmp = 0x11111111;
635 } else {
636 /* RB1 disabled, RB0 enabled */
637 tmp = 0x00000000;
638 }
639 } else {
640 tmp = gb_addr_config & NUM_PIPES_MASK;
641 tmp = r6xx_remap_render_backend(rdev, tmp,
642 rdev->config.cayman.max_backends_per_se *
643 rdev->config.cayman.max_shader_engines,
644 CAYMAN_MAX_BACKENDS, disabled_rb_mask);
645 }
Alex Deucher416a2bd2012-05-31 19:00:25 -0400646 WREG32(GB_BACKEND_MAP, tmp);
Alex Deucherfecf1d02011-03-02 20:07:29 -0500647
Alex Deucher416a2bd2012-05-31 19:00:25 -0400648 cgts_tcc_disable = 0xffff0000;
649 for (i = 0; i < rdev->config.cayman.max_texture_channel_caches; i++)
650 cgts_tcc_disable &= ~(1 << (16 + i));
Alex Deucherfecf1d02011-03-02 20:07:29 -0500651 WREG32(CGTS_TCC_DISABLE, cgts_tcc_disable);
652 WREG32(CGTS_SYS_TCC_DISABLE, cgts_tcc_disable);
Alex Deucherfecf1d02011-03-02 20:07:29 -0500653 WREG32(CGTS_USER_SYS_TCC_DISABLE, cgts_tcc_disable);
654 WREG32(CGTS_USER_TCC_DISABLE, cgts_tcc_disable);
655
656 /* reprogram the shader complex */
657 cgts_sm_ctrl_reg = RREG32(CGTS_SM_CTRL_REG);
658 for (i = 0; i < 16; i++)
659 WREG32(CGTS_SM_CTRL_REG, OVERRIDE);
660 WREG32(CGTS_SM_CTRL_REG, cgts_sm_ctrl_reg);
661
662 /* set HW defaults for 3D engine */
663 WREG32(CP_MEQ_THRESHOLDS, MEQ1_START(0x30) | MEQ2_START(0x60));
664
665 sx_debug_1 = RREG32(SX_DEBUG_1);
666 sx_debug_1 |= ENABLE_NEW_SMX_ADDRESS;
667 WREG32(SX_DEBUG_1, sx_debug_1);
668
669 smx_dc_ctl0 = RREG32(SMX_DC_CTL0);
670 smx_dc_ctl0 &= ~NUMBER_OF_SETS(0x1ff);
Dave Airlie285e0422011-05-09 14:54:33 +1000671 smx_dc_ctl0 |= NUMBER_OF_SETS(rdev->config.cayman.sx_num_of_sets);
Alex Deucherfecf1d02011-03-02 20:07:29 -0500672 WREG32(SMX_DC_CTL0, smx_dc_ctl0);
673
674 WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4) | CRC_SIMD_ID_WADDR_DISABLE);
675
676 /* need to be explicitly zero-ed */
677 WREG32(VGT_OFFCHIP_LDS_BASE, 0);
678 WREG32(SQ_LSTMP_RING_BASE, 0);
679 WREG32(SQ_HSTMP_RING_BASE, 0);
680 WREG32(SQ_ESTMP_RING_BASE, 0);
681 WREG32(SQ_GSTMP_RING_BASE, 0);
682 WREG32(SQ_VSTMP_RING_BASE, 0);
683 WREG32(SQ_PSTMP_RING_BASE, 0);
684
685 WREG32(TA_CNTL_AUX, DISABLE_CUBE_ANISO);
686
Dave Airlie285e0422011-05-09 14:54:33 +1000687 WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.cayman.sx_max_export_size / 4) - 1) |
688 POSITION_BUFFER_SIZE((rdev->config.cayman.sx_max_export_pos_size / 4) - 1) |
689 SMX_BUFFER_SIZE((rdev->config.cayman.sx_max_export_smx_size / 4) - 1)));
Alex Deucherfecf1d02011-03-02 20:07:29 -0500690
Dave Airlie285e0422011-05-09 14:54:33 +1000691 WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.cayman.sc_prim_fifo_size) |
692 SC_HIZ_TILE_FIFO_SIZE(rdev->config.cayman.sc_hiz_tile_fifo_size) |
693 SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.cayman.sc_earlyz_tile_fifo_size)));
Alex Deucherfecf1d02011-03-02 20:07:29 -0500694
695
696 WREG32(VGT_NUM_INSTANCES, 1);
697
698 WREG32(CP_PERFMON_CNTL, 0);
699
Dave Airlie285e0422011-05-09 14:54:33 +1000700 WREG32(SQ_MS_FIFO_SIZES, (CACHE_FIFO_SIZE(16 * rdev->config.cayman.sq_num_cf_insts) |
Alex Deucherfecf1d02011-03-02 20:07:29 -0500701 FETCH_FIFO_HIWATER(0x4) |
702 DONE_FIFO_HIWATER(0xe0) |
703 ALU_UPDATE_FIFO_HIWATER(0x8)));
704
705 WREG32(SQ_GPR_RESOURCE_MGMT_1, NUM_CLAUSE_TEMP_GPRS(4));
706 WREG32(SQ_CONFIG, (VC_ENABLE |
707 EXPORT_SRC_C |
708 GFX_PRIO(0) |
709 CS1_PRIO(0) |
710 CS2_PRIO(1)));
711 WREG32(SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, DYN_GPR_ENABLE);
712
713 WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) |
714 FORCE_EOV_MAX_REZ_CNT(255)));
715
716 WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC) |
717 AUTO_INVLD_EN(ES_AND_GS_AUTO));
718
719 WREG32(VGT_GS_VERTEX_REUSE, 16);
720 WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
721
722 WREG32(CB_PERF_CTR0_SEL_0, 0);
723 WREG32(CB_PERF_CTR0_SEL_1, 0);
724 WREG32(CB_PERF_CTR1_SEL_0, 0);
725 WREG32(CB_PERF_CTR1_SEL_1, 0);
726 WREG32(CB_PERF_CTR2_SEL_0, 0);
727 WREG32(CB_PERF_CTR2_SEL_1, 0);
728 WREG32(CB_PERF_CTR3_SEL_0, 0);
729 WREG32(CB_PERF_CTR3_SEL_1, 0);
730
Dave Airlie0b65f832011-05-19 14:14:42 +1000731 tmp = RREG32(HDP_MISC_CNTL);
732 tmp |= HDP_FLUSH_INVALIDATE_CACHE;
733 WREG32(HDP_MISC_CNTL, tmp);
734
Alex Deucherfecf1d02011-03-02 20:07:29 -0500735 hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL);
736 WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
737
738 WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3));
739
740 udelay(50);
741}
742
Alex Deucherfa8198e2011-03-02 20:07:30 -0500743/*
744 * GART
745 */
746void cayman_pcie_gart_tlb_flush(struct radeon_device *rdev)
747{
748 /* flush hdp cache */
749 WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
750
751 /* bits 0-7 are the VM contexts0-7 */
752 WREG32(VM_INVALIDATE_REQUEST, 1);
753}
754
Lauri Kasanen1109ca02012-08-31 13:43:50 -0400755static int cayman_pcie_gart_enable(struct radeon_device *rdev)
Alex Deucherfa8198e2011-03-02 20:07:30 -0500756{
Jerome Glisse721604a2012-01-05 22:11:05 -0500757 int i, r;
Alex Deucherfa8198e2011-03-02 20:07:30 -0500758
Jerome Glissec9a1be92011-11-03 11:16:49 -0400759 if (rdev->gart.robj == NULL) {
Alex Deucherfa8198e2011-03-02 20:07:30 -0500760 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
761 return -EINVAL;
762 }
763 r = radeon_gart_table_vram_pin(rdev);
764 if (r)
765 return r;
766 radeon_gart_restore(rdev);
767 /* Setup TLB control */
Jerome Glisse721604a2012-01-05 22:11:05 -0500768 WREG32(MC_VM_MX_L1_TLB_CNTL,
769 (0xA << 7) |
770 ENABLE_L1_TLB |
Alex Deucherfa8198e2011-03-02 20:07:30 -0500771 ENABLE_L1_FRAGMENT_PROCESSING |
772 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
Jerome Glisse721604a2012-01-05 22:11:05 -0500773 ENABLE_ADVANCED_DRIVER_MODEL |
Alex Deucherfa8198e2011-03-02 20:07:30 -0500774 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
775 /* Setup L2 cache */
776 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE |
777 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
778 ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
779 EFFECTIVE_L2_QUEUE_SIZE(7) |
780 CONTEXT1_IDENTITY_ACCESS_MODE(1));
781 WREG32(VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS | INVALIDATE_L2_CACHE);
782 WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
783 L2_CACHE_BIGK_FRAGMENT_SIZE(6));
784 /* setup context0 */
785 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
786 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
787 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
788 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
789 (u32)(rdev->dummy_page.addr >> 12));
790 WREG32(VM_CONTEXT0_CNTL2, 0);
791 WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
792 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
Jerome Glisse721604a2012-01-05 22:11:05 -0500793
794 WREG32(0x15D4, 0);
795 WREG32(0x15D8, 0);
796 WREG32(0x15DC, 0);
797
798 /* empty context1-7 */
Alex Deucher23d4f1f2012-10-08 09:45:46 -0400799 /* Assign the pt base to something valid for now; the pts used for
800 * the VMs are determined by the application and setup and assigned
801 * on the fly in the vm part of radeon_gart.c
802 */
Jerome Glisse721604a2012-01-05 22:11:05 -0500803 for (i = 1; i < 8; i++) {
804 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (i << 2), 0);
Alex Deucherc1a7ca02012-10-08 12:15:13 -0400805 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (i << 2), rdev->vm_manager.max_pfn);
Jerome Glisse721604a2012-01-05 22:11:05 -0500806 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
807 rdev->gart.table_addr >> 12);
808 }
809
810 /* enable context1-7 */
811 WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
812 (u32)(rdev->dummy_page.addr >> 12));
Christian Königae133a12012-09-18 15:30:44 -0400813 WREG32(VM_CONTEXT1_CNTL2, 4);
Dmitry Cherkasovfa87e622012-09-17 19:36:19 +0200814 WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) |
Christian Königae133a12012-09-18 15:30:44 -0400815 RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
816 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT |
817 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
818 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT |
819 PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT |
820 PDE0_PROTECTION_FAULT_ENABLE_DEFAULT |
821 VALID_PROTECTION_FAULT_ENABLE_INTERRUPT |
822 VALID_PROTECTION_FAULT_ENABLE_DEFAULT |
823 READ_PROTECTION_FAULT_ENABLE_INTERRUPT |
824 READ_PROTECTION_FAULT_ENABLE_DEFAULT |
825 WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT |
826 WRITE_PROTECTION_FAULT_ENABLE_DEFAULT);
Alex Deucherfa8198e2011-03-02 20:07:30 -0500827
828 cayman_pcie_gart_tlb_flush(rdev);
Tormod Voldenfcf4de52011-08-31 21:54:07 +0000829 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
830 (unsigned)(rdev->mc.gtt_size >> 20),
831 (unsigned long long)rdev->gart.table_addr);
Alex Deucherfa8198e2011-03-02 20:07:30 -0500832 rdev->gart.ready = true;
833 return 0;
834}
835
Lauri Kasanen1109ca02012-08-31 13:43:50 -0400836static void cayman_pcie_gart_disable(struct radeon_device *rdev)
Alex Deucherfa8198e2011-03-02 20:07:30 -0500837{
Alex Deucherfa8198e2011-03-02 20:07:30 -0500838 /* Disable all tables */
839 WREG32(VM_CONTEXT0_CNTL, 0);
840 WREG32(VM_CONTEXT1_CNTL, 0);
841 /* Setup TLB control */
842 WREG32(MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING |
843 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
844 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
845 /* Setup L2 cache */
846 WREG32(VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
847 ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
848 EFFECTIVE_L2_QUEUE_SIZE(7) |
849 CONTEXT1_IDENTITY_ACCESS_MODE(1));
850 WREG32(VM_L2_CNTL2, 0);
851 WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
852 L2_CACHE_BIGK_FRAGMENT_SIZE(6));
Jerome Glissec9a1be92011-11-03 11:16:49 -0400853 radeon_gart_table_vram_unpin(rdev);
Alex Deucherfa8198e2011-03-02 20:07:30 -0500854}
855
Lauri Kasanen1109ca02012-08-31 13:43:50 -0400856static void cayman_pcie_gart_fini(struct radeon_device *rdev)
Alex Deucherfa8198e2011-03-02 20:07:30 -0500857{
858 cayman_pcie_gart_disable(rdev);
859 radeon_gart_table_vram_free(rdev);
860 radeon_gart_fini(rdev);
861}
862
Alex Deucher1b370782011-11-17 20:13:28 -0500863void cayman_cp_int_cntl_setup(struct radeon_device *rdev,
864 int ring, u32 cp_int_cntl)
865{
866 u32 srbm_gfx_cntl = RREG32(SRBM_GFX_CNTL) & ~3;
867
868 WREG32(SRBM_GFX_CNTL, srbm_gfx_cntl | (ring & 3));
869 WREG32(CP_INT_CNTL, cp_int_cntl);
870}
871
Alex Deucher0c88a022011-03-02 20:07:31 -0500872/*
873 * CP.
874 */
Alex Deucherb40e7e12011-11-17 14:57:50 -0500875void cayman_fence_ring_emit(struct radeon_device *rdev,
876 struct radeon_fence *fence)
877{
878 struct radeon_ring *ring = &rdev->ring[fence->ring];
879 u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
880
Jerome Glisse721604a2012-01-05 22:11:05 -0500881 /* flush read cache over gart for this vmid */
882 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
883 radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2);
884 radeon_ring_write(ring, 0);
Alex Deucherb40e7e12011-11-17 14:57:50 -0500885 radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
886 radeon_ring_write(ring, PACKET3_TC_ACTION_ENA | PACKET3_SH_ACTION_ENA);
887 radeon_ring_write(ring, 0xFFFFFFFF);
888 radeon_ring_write(ring, 0);
889 radeon_ring_write(ring, 10); /* poll interval */
890 /* EVENT_WRITE_EOP - flush caches, send int */
891 radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
892 radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5));
893 radeon_ring_write(ring, addr & 0xffffffff);
894 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
895 radeon_ring_write(ring, fence->seq);
896 radeon_ring_write(ring, 0);
897}
898
Jerome Glisse721604a2012-01-05 22:11:05 -0500899void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
900{
Christian König876dc9f2012-05-08 14:24:01 +0200901 struct radeon_ring *ring = &rdev->ring[ib->ring];
Jerome Glisse721604a2012-01-05 22:11:05 -0500902
903 /* set to DX10/11 mode */
904 radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0));
905 radeon_ring_write(ring, 1);
Christian König45df6802012-07-06 16:22:55 +0200906
907 if (ring->rptr_save_reg) {
908 uint32_t next_rptr = ring->wptr + 3 + 4 + 8;
909 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
910 radeon_ring_write(ring, ((ring->rptr_save_reg -
911 PACKET3_SET_CONFIG_REG_START) >> 2));
912 radeon_ring_write(ring, next_rptr);
913 }
914
Jerome Glisse721604a2012-01-05 22:11:05 -0500915 radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
916 radeon_ring_write(ring,
917#ifdef __BIG_ENDIAN
918 (2 << 0) |
919#endif
920 (ib->gpu_addr & 0xFFFFFFFC));
921 radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF);
Christian König4bf3dd92012-08-06 18:57:44 +0200922 radeon_ring_write(ring, ib->length_dw |
923 (ib->vm ? (ib->vm->id << 24) : 0));
Jerome Glisse721604a2012-01-05 22:11:05 -0500924
925 /* flush read cache over gart for this vmid */
926 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
927 radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2);
Christian König4bf3dd92012-08-06 18:57:44 +0200928 radeon_ring_write(ring, ib->vm ? ib->vm->id : 0);
Jerome Glisse721604a2012-01-05 22:11:05 -0500929 radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
930 radeon_ring_write(ring, PACKET3_TC_ACTION_ENA | PACKET3_SH_ACTION_ENA);
931 radeon_ring_write(ring, 0xFFFFFFFF);
932 radeon_ring_write(ring, 0);
933 radeon_ring_write(ring, 10); /* poll interval */
934}
935
Christian Königf2ba57b2013-04-08 12:41:29 +0200936void cayman_uvd_semaphore_emit(struct radeon_device *rdev,
937 struct radeon_ring *ring,
938 struct radeon_semaphore *semaphore,
939 bool emit_wait)
940{
941 uint64_t addr = semaphore->gpu_addr;
942
943 radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_LOW, 0));
944 radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF);
945
946 radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_HIGH, 0));
947 radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF);
948
949 radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0));
950 radeon_ring_write(ring, 0x80 | (emit_wait ? 1 : 0));
951}
952
Alex Deucher0c88a022011-03-02 20:07:31 -0500953static void cayman_cp_enable(struct radeon_device *rdev, bool enable)
954{
955 if (enable)
956 WREG32(CP_ME_CNTL, 0);
957 else {
Dave Airlie38f1cff2011-03-16 11:34:41 +1000958 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
Alex Deucher0c88a022011-03-02 20:07:31 -0500959 WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT));
960 WREG32(SCRATCH_UMSK, 0);
Alex Deucherf60cbd12012-12-04 15:27:33 -0500961 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
Alex Deucher0c88a022011-03-02 20:07:31 -0500962 }
963}
964
965static int cayman_cp_load_microcode(struct radeon_device *rdev)
966{
967 const __be32 *fw_data;
968 int i;
969
970 if (!rdev->me_fw || !rdev->pfp_fw)
971 return -EINVAL;
972
973 cayman_cp_enable(rdev, false);
974
975 fw_data = (const __be32 *)rdev->pfp_fw->data;
976 WREG32(CP_PFP_UCODE_ADDR, 0);
977 for (i = 0; i < CAYMAN_PFP_UCODE_SIZE; i++)
978 WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
979 WREG32(CP_PFP_UCODE_ADDR, 0);
980
981 fw_data = (const __be32 *)rdev->me_fw->data;
982 WREG32(CP_ME_RAM_WADDR, 0);
983 for (i = 0; i < CAYMAN_PM4_UCODE_SIZE; i++)
984 WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
985
986 WREG32(CP_PFP_UCODE_ADDR, 0);
987 WREG32(CP_ME_RAM_WADDR, 0);
988 WREG32(CP_ME_RAM_RADDR, 0);
989 return 0;
990}
991
992static int cayman_cp_start(struct radeon_device *rdev)
993{
Christian Könige32eb502011-10-23 12:56:27 +0200994 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
Alex Deucher0c88a022011-03-02 20:07:31 -0500995 int r, i;
996
Christian Könige32eb502011-10-23 12:56:27 +0200997 r = radeon_ring_lock(rdev, ring, 7);
Alex Deucher0c88a022011-03-02 20:07:31 -0500998 if (r) {
999 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
1000 return r;
1001 }
Christian Könige32eb502011-10-23 12:56:27 +02001002 radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
1003 radeon_ring_write(ring, 0x1);
1004 radeon_ring_write(ring, 0x0);
1005 radeon_ring_write(ring, rdev->config.cayman.max_hw_contexts - 1);
1006 radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
1007 radeon_ring_write(ring, 0);
1008 radeon_ring_write(ring, 0);
1009 radeon_ring_unlock_commit(rdev, ring);
Alex Deucher0c88a022011-03-02 20:07:31 -05001010
1011 cayman_cp_enable(rdev, true);
1012
Christian Könige32eb502011-10-23 12:56:27 +02001013 r = radeon_ring_lock(rdev, ring, cayman_default_size + 19);
Alex Deucher0c88a022011-03-02 20:07:31 -05001014 if (r) {
1015 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
1016 return r;
1017 }
1018
1019 /* setup clear context state */
Christian Könige32eb502011-10-23 12:56:27 +02001020 radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
1021 radeon_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
Alex Deucher0c88a022011-03-02 20:07:31 -05001022
1023 for (i = 0; i < cayman_default_size; i++)
Christian Könige32eb502011-10-23 12:56:27 +02001024 radeon_ring_write(ring, cayman_default_state[i]);
Alex Deucher0c88a022011-03-02 20:07:31 -05001025
Christian Könige32eb502011-10-23 12:56:27 +02001026 radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
1027 radeon_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
Alex Deucher0c88a022011-03-02 20:07:31 -05001028
1029 /* set clear context state */
Christian Könige32eb502011-10-23 12:56:27 +02001030 radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
1031 radeon_ring_write(ring, 0);
Alex Deucher0c88a022011-03-02 20:07:31 -05001032
1033 /* SQ_VTX_BASE_VTX_LOC */
Christian Könige32eb502011-10-23 12:56:27 +02001034 radeon_ring_write(ring, 0xc0026f00);
1035 radeon_ring_write(ring, 0x00000000);
1036 radeon_ring_write(ring, 0x00000000);
1037 radeon_ring_write(ring, 0x00000000);
Alex Deucher0c88a022011-03-02 20:07:31 -05001038
1039 /* Clear consts */
Christian Könige32eb502011-10-23 12:56:27 +02001040 radeon_ring_write(ring, 0xc0036f00);
1041 radeon_ring_write(ring, 0x00000bc4);
1042 radeon_ring_write(ring, 0xffffffff);
1043 radeon_ring_write(ring, 0xffffffff);
1044 radeon_ring_write(ring, 0xffffffff);
Alex Deucher0c88a022011-03-02 20:07:31 -05001045
Christian Könige32eb502011-10-23 12:56:27 +02001046 radeon_ring_write(ring, 0xc0026900);
1047 radeon_ring_write(ring, 0x00000316);
1048 radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
1049 radeon_ring_write(ring, 0x00000010); /* */
Alex Deucher9b91d182011-03-02 20:07:39 -05001050
Christian Könige32eb502011-10-23 12:56:27 +02001051 radeon_ring_unlock_commit(rdev, ring);
Alex Deucher0c88a022011-03-02 20:07:31 -05001052
1053 /* XXX init other rings */
1054
1055 return 0;
1056}
1057
Alex Deucher755d8192011-03-02 20:07:34 -05001058static void cayman_cp_fini(struct radeon_device *rdev)
1059{
Christian König45df6802012-07-06 16:22:55 +02001060 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
Alex Deucher755d8192011-03-02 20:07:34 -05001061 cayman_cp_enable(rdev, false);
Christian König45df6802012-07-06 16:22:55 +02001062 radeon_ring_fini(rdev, ring);
1063 radeon_scratch_free(rdev, ring->rptr_save_reg);
Alex Deucher755d8192011-03-02 20:07:34 -05001064}
1065
Lauri Kasanen1109ca02012-08-31 13:43:50 -04001066static int cayman_cp_resume(struct radeon_device *rdev)
Alex Deucher0c88a022011-03-02 20:07:31 -05001067{
Christian Königb90ca982012-07-04 21:36:53 +02001068 static const int ridx[] = {
1069 RADEON_RING_TYPE_GFX_INDEX,
1070 CAYMAN_RING_TYPE_CP1_INDEX,
1071 CAYMAN_RING_TYPE_CP2_INDEX
1072 };
1073 static const unsigned cp_rb_cntl[] = {
1074 CP_RB0_CNTL,
1075 CP_RB1_CNTL,
1076 CP_RB2_CNTL,
1077 };
1078 static const unsigned cp_rb_rptr_addr[] = {
1079 CP_RB0_RPTR_ADDR,
1080 CP_RB1_RPTR_ADDR,
1081 CP_RB2_RPTR_ADDR
1082 };
1083 static const unsigned cp_rb_rptr_addr_hi[] = {
1084 CP_RB0_RPTR_ADDR_HI,
1085 CP_RB1_RPTR_ADDR_HI,
1086 CP_RB2_RPTR_ADDR_HI
1087 };
1088 static const unsigned cp_rb_base[] = {
1089 CP_RB0_BASE,
1090 CP_RB1_BASE,
1091 CP_RB2_BASE
1092 };
Christian Könige32eb502011-10-23 12:56:27 +02001093 struct radeon_ring *ring;
Christian Königb90ca982012-07-04 21:36:53 +02001094 int i, r;
Alex Deucher0c88a022011-03-02 20:07:31 -05001095
1096 /* Reset cp; if cp is reset, then PA, SH, VGT also need to be reset */
1097 WREG32(GRBM_SOFT_RESET, (SOFT_RESET_CP |
1098 SOFT_RESET_PA |
1099 SOFT_RESET_SH |
1100 SOFT_RESET_VGT |
Jerome Glissea49a50d2011-08-24 20:00:17 +00001101 SOFT_RESET_SPI |
Alex Deucher0c88a022011-03-02 20:07:31 -05001102 SOFT_RESET_SX));
1103 RREG32(GRBM_SOFT_RESET);
1104 mdelay(15);
1105 WREG32(GRBM_SOFT_RESET, 0);
1106 RREG32(GRBM_SOFT_RESET);
1107
Christian König15d33322011-09-15 19:02:22 +02001108 WREG32(CP_SEM_WAIT_TIMER, 0x0);
Alex Deucher11ef3f1f2012-01-20 14:47:43 -05001109 WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
Alex Deucher0c88a022011-03-02 20:07:31 -05001110
1111 /* Set the write pointer delay */
1112 WREG32(CP_RB_WPTR_DELAY, 0);
1113
1114 WREG32(CP_DEBUG, (1 << 27));
1115
Adam Buchbinder48fc7f72012-09-19 21:48:00 -04001116 /* set the wb address whether it's enabled or not */
Alex Deucher0c88a022011-03-02 20:07:31 -05001117 WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
Christian Königb90ca982012-07-04 21:36:53 +02001118 WREG32(SCRATCH_UMSK, 0xff);
Alex Deucher0c88a022011-03-02 20:07:31 -05001119
Christian Königb90ca982012-07-04 21:36:53 +02001120 for (i = 0; i < 3; ++i) {
1121 uint32_t rb_cntl;
1122 uint64_t addr;
1123
1124 /* Set ring buffer size */
1125 ring = &rdev->ring[ridx[i]];
1126 rb_cntl = drm_order(ring->ring_size / 8);
1127 rb_cntl |= drm_order(RADEON_GPU_PAGE_SIZE/8) << 8;
1128#ifdef __BIG_ENDIAN
1129 rb_cntl |= BUF_SWAP_32BIT;
1130#endif
1131 WREG32(cp_rb_cntl[i], rb_cntl);
1132
Adam Buchbinder48fc7f72012-09-19 21:48:00 -04001133 /* set the wb address whether it's enabled or not */
Christian Königb90ca982012-07-04 21:36:53 +02001134 addr = rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET;
1135 WREG32(cp_rb_rptr_addr[i], addr & 0xFFFFFFFC);
1136 WREG32(cp_rb_rptr_addr_hi[i], upper_32_bits(addr) & 0xFF);
Alex Deucher0c88a022011-03-02 20:07:31 -05001137 }
1138
Christian Königb90ca982012-07-04 21:36:53 +02001139 /* set the rb base addr, this causes an internal reset of ALL rings */
1140 for (i = 0; i < 3; ++i) {
1141 ring = &rdev->ring[ridx[i]];
1142 WREG32(cp_rb_base[i], ring->gpu_addr >> 8);
1143 }
Alex Deucher0c88a022011-03-02 20:07:31 -05001144
Christian Königb90ca982012-07-04 21:36:53 +02001145 for (i = 0; i < 3; ++i) {
1146 /* Initialize the ring buffer's read and write pointers */
1147 ring = &rdev->ring[ridx[i]];
1148 WREG32_P(cp_rb_cntl[i], RB_RPTR_WR_ENA, ~RB_RPTR_WR_ENA);
Alex Deucher0c88a022011-03-02 20:07:31 -05001149
Christian Königb90ca982012-07-04 21:36:53 +02001150 ring->rptr = ring->wptr = 0;
1151 WREG32(ring->rptr_reg, ring->rptr);
1152 WREG32(ring->wptr_reg, ring->wptr);
Alex Deucher0c88a022011-03-02 20:07:31 -05001153
Christian Königb90ca982012-07-04 21:36:53 +02001154 mdelay(1);
1155 WREG32_P(cp_rb_cntl[i], 0, ~RB_RPTR_WR_ENA);
1156 }
Alex Deucher0c88a022011-03-02 20:07:31 -05001157
1158 /* start the rings */
1159 cayman_cp_start(rdev);
Christian Könige32eb502011-10-23 12:56:27 +02001160 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = true;
1161 rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
1162 rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
Alex Deucher0c88a022011-03-02 20:07:31 -05001163 /* this only test cp0 */
Alex Deucherf7128122012-02-23 17:53:45 -05001164 r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
Alex Deucher0c88a022011-03-02 20:07:31 -05001165 if (r) {
Christian Könige32eb502011-10-23 12:56:27 +02001166 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
1167 rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
1168 rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
Alex Deucher0c88a022011-03-02 20:07:31 -05001169 return r;
1170 }
1171
1172 return 0;
1173}
1174
Alex Deucherf60cbd12012-12-04 15:27:33 -05001175/*
1176 * DMA
1177 * Starting with R600, the GPU has an asynchronous
1178 * DMA engine. The programming model is very similar
1179 * to the 3D engine (ring buffer, IBs, etc.), but the
1180 * DMA controller has it's own packet format that is
1181 * different form the PM4 format used by the 3D engine.
1182 * It supports copying data, writing embedded data,
1183 * solid fills, and a number of other things. It also
1184 * has support for tiling/detiling of buffers.
1185 * Cayman and newer support two asynchronous DMA engines.
1186 */
1187/**
1188 * cayman_dma_ring_ib_execute - Schedule an IB on the DMA engine
1189 *
1190 * @rdev: radeon_device pointer
1191 * @ib: IB object to schedule
1192 *
1193 * Schedule an IB in the DMA ring (cayman-SI).
1194 */
1195void cayman_dma_ring_ib_execute(struct radeon_device *rdev,
1196 struct radeon_ib *ib)
1197{
1198 struct radeon_ring *ring = &rdev->ring[ib->ring];
1199
1200 if (rdev->wb.enabled) {
1201 u32 next_rptr = ring->wptr + 4;
1202 while ((next_rptr & 7) != 5)
1203 next_rptr++;
1204 next_rptr += 3;
1205 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
1206 radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
1207 radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
1208 radeon_ring_write(ring, next_rptr);
1209 }
1210
1211 /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
1212 * Pad as necessary with NOPs.
1213 */
1214 while ((ring->wptr & 7) != 5)
1215 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
1216 radeon_ring_write(ring, DMA_IB_PACKET(DMA_PACKET_INDIRECT_BUFFER, ib->vm ? ib->vm->id : 0, 0));
1217 radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
1218 radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF));
1219
1220}
1221
1222/**
1223 * cayman_dma_stop - stop the async dma engines
1224 *
1225 * @rdev: radeon_device pointer
1226 *
1227 * Stop the async dma engines (cayman-SI).
1228 */
1229void cayman_dma_stop(struct radeon_device *rdev)
1230{
1231 u32 rb_cntl;
1232
1233 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
1234
1235 /* dma0 */
1236 rb_cntl = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
1237 rb_cntl &= ~DMA_RB_ENABLE;
1238 WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, rb_cntl);
1239
1240 /* dma1 */
1241 rb_cntl = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
1242 rb_cntl &= ~DMA_RB_ENABLE;
1243 WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, rb_cntl);
1244
1245 rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false;
1246 rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready = false;
1247}
1248
1249/**
1250 * cayman_dma_resume - setup and start the async dma engines
1251 *
1252 * @rdev: radeon_device pointer
1253 *
1254 * Set up the DMA ring buffers and enable them. (cayman-SI).
1255 * Returns 0 for success, error for failure.
1256 */
1257int cayman_dma_resume(struct radeon_device *rdev)
1258{
1259 struct radeon_ring *ring;
Michel Dänzerb3dfcb22013-01-24 19:02:01 +01001260 u32 rb_cntl, dma_cntl, ib_cntl;
Alex Deucherf60cbd12012-12-04 15:27:33 -05001261 u32 rb_bufsz;
1262 u32 reg_offset, wb_offset;
1263 int i, r;
1264
1265 /* Reset dma */
1266 WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA | SOFT_RESET_DMA1);
1267 RREG32(SRBM_SOFT_RESET);
1268 udelay(50);
1269 WREG32(SRBM_SOFT_RESET, 0);
1270
1271 for (i = 0; i < 2; i++) {
1272 if (i == 0) {
1273 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
1274 reg_offset = DMA0_REGISTER_OFFSET;
1275 wb_offset = R600_WB_DMA_RPTR_OFFSET;
1276 } else {
1277 ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
1278 reg_offset = DMA1_REGISTER_OFFSET;
1279 wb_offset = CAYMAN_WB_DMA1_RPTR_OFFSET;
1280 }
1281
1282 WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL + reg_offset, 0);
1283 WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL + reg_offset, 0);
1284
1285 /* Set ring buffer size in dwords */
1286 rb_bufsz = drm_order(ring->ring_size / 4);
1287 rb_cntl = rb_bufsz << 1;
1288#ifdef __BIG_ENDIAN
1289 rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE;
1290#endif
1291 WREG32(DMA_RB_CNTL + reg_offset, rb_cntl);
1292
1293 /* Initialize the ring buffer's read and write pointers */
1294 WREG32(DMA_RB_RPTR + reg_offset, 0);
1295 WREG32(DMA_RB_WPTR + reg_offset, 0);
1296
1297 /* set the wb address whether it's enabled or not */
1298 WREG32(DMA_RB_RPTR_ADDR_HI + reg_offset,
1299 upper_32_bits(rdev->wb.gpu_addr + wb_offset) & 0xFF);
1300 WREG32(DMA_RB_RPTR_ADDR_LO + reg_offset,
1301 ((rdev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC));
1302
1303 if (rdev->wb.enabled)
1304 rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE;
1305
1306 WREG32(DMA_RB_BASE + reg_offset, ring->gpu_addr >> 8);
1307
1308 /* enable DMA IBs */
Michel Dänzerb3dfcb22013-01-24 19:02:01 +01001309 ib_cntl = DMA_IB_ENABLE | CMD_VMID_FORCE;
1310#ifdef __BIG_ENDIAN
1311 ib_cntl |= DMA_IB_SWAP_ENABLE;
1312#endif
1313 WREG32(DMA_IB_CNTL + reg_offset, ib_cntl);
Alex Deucherf60cbd12012-12-04 15:27:33 -05001314
1315 dma_cntl = RREG32(DMA_CNTL + reg_offset);
1316 dma_cntl &= ~CTXEMPTY_INT_ENABLE;
1317 WREG32(DMA_CNTL + reg_offset, dma_cntl);
1318
1319 ring->wptr = 0;
1320 WREG32(DMA_RB_WPTR + reg_offset, ring->wptr << 2);
1321
1322 ring->rptr = RREG32(DMA_RB_RPTR + reg_offset) >> 2;
1323
1324 WREG32(DMA_RB_CNTL + reg_offset, rb_cntl | DMA_RB_ENABLE);
1325
1326 ring->ready = true;
1327
1328 r = radeon_ring_test(rdev, ring->idx, ring);
1329 if (r) {
1330 ring->ready = false;
1331 return r;
1332 }
1333 }
1334
1335 radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
1336
1337 return 0;
1338}
1339
1340/**
1341 * cayman_dma_fini - tear down the async dma engines
1342 *
1343 * @rdev: radeon_device pointer
1344 *
1345 * Stop the async dma engines and free the rings (cayman-SI).
1346 */
1347void cayman_dma_fini(struct radeon_device *rdev)
1348{
1349 cayman_dma_stop(rdev);
1350 radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]);
1351 radeon_ring_fini(rdev, &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]);
1352}
1353
Alex Deucher168757e2013-01-18 19:17:22 -05001354static u32 cayman_gpu_check_soft_reset(struct radeon_device *rdev)
1355{
1356 u32 reset_mask = 0;
1357 u32 tmp;
1358
1359 /* GRBM_STATUS */
1360 tmp = RREG32(GRBM_STATUS);
1361 if (tmp & (PA_BUSY | SC_BUSY |
1362 SH_BUSY | SX_BUSY |
1363 TA_BUSY | VGT_BUSY |
1364 DB_BUSY | CB_BUSY |
1365 GDS_BUSY | SPI_BUSY |
1366 IA_BUSY | IA_BUSY_NO_DMA))
1367 reset_mask |= RADEON_RESET_GFX;
1368
1369 if (tmp & (CF_RQ_PENDING | PF_RQ_PENDING |
1370 CP_BUSY | CP_COHERENCY_BUSY))
1371 reset_mask |= RADEON_RESET_CP;
1372
1373 if (tmp & GRBM_EE_BUSY)
1374 reset_mask |= RADEON_RESET_GRBM | RADEON_RESET_GFX | RADEON_RESET_CP;
1375
1376 /* DMA_STATUS_REG 0 */
1377 tmp = RREG32(DMA_STATUS_REG + DMA0_REGISTER_OFFSET);
1378 if (!(tmp & DMA_IDLE))
1379 reset_mask |= RADEON_RESET_DMA;
1380
1381 /* DMA_STATUS_REG 1 */
1382 tmp = RREG32(DMA_STATUS_REG + DMA1_REGISTER_OFFSET);
1383 if (!(tmp & DMA_IDLE))
1384 reset_mask |= RADEON_RESET_DMA1;
1385
1386 /* SRBM_STATUS2 */
1387 tmp = RREG32(SRBM_STATUS2);
1388 if (tmp & DMA_BUSY)
1389 reset_mask |= RADEON_RESET_DMA;
1390
1391 if (tmp & DMA1_BUSY)
1392 reset_mask |= RADEON_RESET_DMA1;
1393
1394 /* SRBM_STATUS */
1395 tmp = RREG32(SRBM_STATUS);
1396 if (tmp & (RLC_RQ_PENDING | RLC_BUSY))
1397 reset_mask |= RADEON_RESET_RLC;
1398
1399 if (tmp & IH_BUSY)
1400 reset_mask |= RADEON_RESET_IH;
1401
1402 if (tmp & SEM_BUSY)
1403 reset_mask |= RADEON_RESET_SEM;
1404
1405 if (tmp & GRBM_RQ_PENDING)
1406 reset_mask |= RADEON_RESET_GRBM;
1407
1408 if (tmp & VMC_BUSY)
1409 reset_mask |= RADEON_RESET_VMC;
1410
1411 if (tmp & (MCB_BUSY | MCB_NON_DISPLAY_BUSY |
1412 MCC_BUSY | MCD_BUSY))
1413 reset_mask |= RADEON_RESET_MC;
1414
1415 if (evergreen_is_display_hung(rdev))
1416 reset_mask |= RADEON_RESET_DISPLAY;
1417
1418 /* VM_L2_STATUS */
1419 tmp = RREG32(VM_L2_STATUS);
1420 if (tmp & L2_BUSY)
1421 reset_mask |= RADEON_RESET_VMC;
1422
Alex Deucherd808fc82013-02-28 10:03:08 -05001423 /* Skip MC reset as it's mostly likely not hung, just busy */
1424 if (reset_mask & RADEON_RESET_MC) {
1425 DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask);
1426 reset_mask &= ~RADEON_RESET_MC;
1427 }
1428
Alex Deucher168757e2013-01-18 19:17:22 -05001429 return reset_mask;
1430}
1431
1432static void cayman_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
Alex Deucher271d6fed2013-01-03 12:48:05 -05001433{
1434 struct evergreen_mc_save save;
Alex Deucher187e3592013-01-18 14:51:38 -05001435 u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
1436 u32 tmp;
Alex Deucher19fc42e2013-01-14 11:04:39 -05001437
Alex Deucher271d6fed2013-01-03 12:48:05 -05001438 if (reset_mask == 0)
Alex Deucher168757e2013-01-18 19:17:22 -05001439 return;
Alex Deucher271d6fed2013-01-03 12:48:05 -05001440
1441 dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
1442
Alex Deucher187e3592013-01-18 14:51:38 -05001443 evergreen_print_gpu_status_regs(rdev);
Alex Deucher271d6fed2013-01-03 12:48:05 -05001444 dev_info(rdev->dev, " VM_CONTEXT0_PROTECTION_FAULT_ADDR 0x%08X\n",
1445 RREG32(0x14F8));
1446 dev_info(rdev->dev, " VM_CONTEXT0_PROTECTION_FAULT_STATUS 0x%08X\n",
1447 RREG32(0x14D8));
1448 dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
1449 RREG32(0x14FC));
1450 dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
1451 RREG32(0x14DC));
1452
Alex Deucher187e3592013-01-18 14:51:38 -05001453 /* Disable CP parsing/prefetching */
1454 WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT);
1455
1456 if (reset_mask & RADEON_RESET_DMA) {
1457 /* dma0 */
1458 tmp = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
1459 tmp &= ~DMA_RB_ENABLE;
1460 WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, tmp);
Alex Deucher168757e2013-01-18 19:17:22 -05001461 }
Alex Deucher187e3592013-01-18 14:51:38 -05001462
Alex Deucher168757e2013-01-18 19:17:22 -05001463 if (reset_mask & RADEON_RESET_DMA1) {
Alex Deucher187e3592013-01-18 14:51:38 -05001464 /* dma1 */
1465 tmp = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
1466 tmp &= ~DMA_RB_ENABLE;
1467 WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, tmp);
1468 }
1469
Alex Deucher90fb8772013-01-23 18:59:17 -05001470 udelay(50);
1471
1472 evergreen_mc_stop(rdev, &save);
1473 if (evergreen_mc_wait_for_idle(rdev)) {
1474 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1475 }
1476
Alex Deucher187e3592013-01-18 14:51:38 -05001477 if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE)) {
1478 grbm_soft_reset = SOFT_RESET_CB |
1479 SOFT_RESET_DB |
1480 SOFT_RESET_GDS |
1481 SOFT_RESET_PA |
1482 SOFT_RESET_SC |
1483 SOFT_RESET_SPI |
1484 SOFT_RESET_SH |
1485 SOFT_RESET_SX |
1486 SOFT_RESET_TC |
1487 SOFT_RESET_TA |
1488 SOFT_RESET_VGT |
1489 SOFT_RESET_IA;
1490 }
1491
1492 if (reset_mask & RADEON_RESET_CP) {
1493 grbm_soft_reset |= SOFT_RESET_CP | SOFT_RESET_VGT;
1494
1495 srbm_soft_reset |= SOFT_RESET_GRBM;
1496 }
Alex Deucher271d6fed2013-01-03 12:48:05 -05001497
1498 if (reset_mask & RADEON_RESET_DMA)
Alex Deucher168757e2013-01-18 19:17:22 -05001499 srbm_soft_reset |= SOFT_RESET_DMA;
1500
1501 if (reset_mask & RADEON_RESET_DMA1)
1502 srbm_soft_reset |= SOFT_RESET_DMA1;
1503
1504 if (reset_mask & RADEON_RESET_DISPLAY)
1505 srbm_soft_reset |= SOFT_RESET_DC;
1506
1507 if (reset_mask & RADEON_RESET_RLC)
1508 srbm_soft_reset |= SOFT_RESET_RLC;
1509
1510 if (reset_mask & RADEON_RESET_SEM)
1511 srbm_soft_reset |= SOFT_RESET_SEM;
1512
1513 if (reset_mask & RADEON_RESET_IH)
1514 srbm_soft_reset |= SOFT_RESET_IH;
1515
1516 if (reset_mask & RADEON_RESET_GRBM)
1517 srbm_soft_reset |= SOFT_RESET_GRBM;
1518
1519 if (reset_mask & RADEON_RESET_VMC)
1520 srbm_soft_reset |= SOFT_RESET_VMC;
1521
Alex Deucher24178ec2013-01-24 15:00:17 -05001522 if (!(rdev->flags & RADEON_IS_IGP)) {
1523 if (reset_mask & RADEON_RESET_MC)
1524 srbm_soft_reset |= SOFT_RESET_MC;
1525 }
Alex Deucher187e3592013-01-18 14:51:38 -05001526
1527 if (grbm_soft_reset) {
1528 tmp = RREG32(GRBM_SOFT_RESET);
1529 tmp |= grbm_soft_reset;
1530 dev_info(rdev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
1531 WREG32(GRBM_SOFT_RESET, tmp);
1532 tmp = RREG32(GRBM_SOFT_RESET);
1533
1534 udelay(50);
1535
1536 tmp &= ~grbm_soft_reset;
1537 WREG32(GRBM_SOFT_RESET, tmp);
1538 tmp = RREG32(GRBM_SOFT_RESET);
1539 }
1540
1541 if (srbm_soft_reset) {
1542 tmp = RREG32(SRBM_SOFT_RESET);
1543 tmp |= srbm_soft_reset;
1544 dev_info(rdev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1545 WREG32(SRBM_SOFT_RESET, tmp);
1546 tmp = RREG32(SRBM_SOFT_RESET);
1547
1548 udelay(50);
1549
1550 tmp &= ~srbm_soft_reset;
1551 WREG32(SRBM_SOFT_RESET, tmp);
1552 tmp = RREG32(SRBM_SOFT_RESET);
1553 }
Alex Deucher271d6fed2013-01-03 12:48:05 -05001554
1555 /* Wait a little for things to settle down */
1556 udelay(50);
1557
Alex Deucherb9952a82011-03-02 20:07:33 -05001558 evergreen_mc_resume(rdev, &save);
Alex Deucher187e3592013-01-18 14:51:38 -05001559 udelay(50);
Alex Deucher410a3412013-01-18 13:05:39 -05001560
Alex Deucher187e3592013-01-18 14:51:38 -05001561 evergreen_print_gpu_status_regs(rdev);
Alex Deucherb9952a82011-03-02 20:07:33 -05001562}
1563
1564int cayman_asic_reset(struct radeon_device *rdev)
1565{
Alex Deucher168757e2013-01-18 19:17:22 -05001566 u32 reset_mask;
1567
1568 reset_mask = cayman_gpu_check_soft_reset(rdev);
1569
1570 if (reset_mask)
1571 r600_set_bios_scratch_engine_hung(rdev, true);
1572
1573 cayman_gpu_soft_reset(rdev, reset_mask);
1574
1575 reset_mask = cayman_gpu_check_soft_reset(rdev);
1576
1577 if (!reset_mask)
1578 r600_set_bios_scratch_engine_hung(rdev, false);
1579
1580 return 0;
Alex Deucherb9952a82011-03-02 20:07:33 -05001581}
1582
Alex Deucherf60cbd12012-12-04 15:27:33 -05001583/**
Alex Deucher123bc182013-01-24 11:37:19 -05001584 * cayman_gfx_is_lockup - Check if the GFX engine is locked up
1585 *
1586 * @rdev: radeon_device pointer
1587 * @ring: radeon_ring structure holding ring information
1588 *
1589 * Check if the GFX engine is locked up.
1590 * Returns true if the engine appears to be locked up, false if not.
1591 */
1592bool cayman_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
1593{
1594 u32 reset_mask = cayman_gpu_check_soft_reset(rdev);
1595
1596 if (!(reset_mask & (RADEON_RESET_GFX |
1597 RADEON_RESET_COMPUTE |
1598 RADEON_RESET_CP))) {
1599 radeon_ring_lockup_update(ring);
1600 return false;
1601 }
1602 /* force CP activities */
1603 radeon_ring_force_activity(rdev, ring);
1604 return radeon_ring_test_lockup(rdev, ring);
1605}
1606
1607/**
Alex Deucherf60cbd12012-12-04 15:27:33 -05001608 * cayman_dma_is_lockup - Check if the DMA engine is locked up
1609 *
1610 * @rdev: radeon_device pointer
1611 * @ring: radeon_ring structure holding ring information
1612 *
Alex Deucher123bc182013-01-24 11:37:19 -05001613 * Check if the async DMA engine is locked up.
Alex Deucherf60cbd12012-12-04 15:27:33 -05001614 * Returns true if the engine appears to be locked up, false if not.
1615 */
1616bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
1617{
Alex Deucher123bc182013-01-24 11:37:19 -05001618 u32 reset_mask = cayman_gpu_check_soft_reset(rdev);
1619 u32 mask;
Alex Deucherf60cbd12012-12-04 15:27:33 -05001620
1621 if (ring->idx == R600_RING_TYPE_DMA_INDEX)
Alex Deucher123bc182013-01-24 11:37:19 -05001622 mask = RADEON_RESET_DMA;
Alex Deucherf60cbd12012-12-04 15:27:33 -05001623 else
Alex Deucher123bc182013-01-24 11:37:19 -05001624 mask = RADEON_RESET_DMA1;
1625
1626 if (!(reset_mask & mask)) {
Alex Deucherf60cbd12012-12-04 15:27:33 -05001627 radeon_ring_lockup_update(ring);
1628 return false;
1629 }
1630 /* force ring activities */
1631 radeon_ring_force_activity(rdev, ring);
1632 return radeon_ring_test_lockup(rdev, ring);
1633}
1634
Alex Deucher755d8192011-03-02 20:07:34 -05001635static int cayman_startup(struct radeon_device *rdev)
1636{
Christian Könige32eb502011-10-23 12:56:27 +02001637 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
Alex Deucher755d8192011-03-02 20:07:34 -05001638 int r;
1639
Ilija Hadzicb07759b2011-09-20 10:22:58 -04001640 /* enable pcie gen2 link */
1641 evergreen_pcie_gen2_enable(rdev);
1642
Alex Deucherc420c742012-03-20 17:18:39 -04001643 if (rdev->flags & RADEON_IS_IGP) {
1644 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
1645 r = ni_init_microcode(rdev);
1646 if (r) {
1647 DRM_ERROR("Failed to load firmware!\n");
1648 return r;
1649 }
1650 }
1651 } else {
1652 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
1653 r = ni_init_microcode(rdev);
1654 if (r) {
1655 DRM_ERROR("Failed to load firmware!\n");
1656 return r;
1657 }
1658 }
1659
1660 r = ni_mc_load_microcode(rdev);
Alex Deucher755d8192011-03-02 20:07:34 -05001661 if (r) {
Alex Deucherc420c742012-03-20 17:18:39 -04001662 DRM_ERROR("Failed to load MC firmware!\n");
Alex Deucher755d8192011-03-02 20:07:34 -05001663 return r;
1664 }
1665 }
Alex Deucher755d8192011-03-02 20:07:34 -05001666
Alex Deucher16cdf042011-10-28 10:30:02 -04001667 r = r600_vram_scratch_init(rdev);
1668 if (r)
1669 return r;
1670
Alex Deucher755d8192011-03-02 20:07:34 -05001671 evergreen_mc_program(rdev);
1672 r = cayman_pcie_gart_enable(rdev);
1673 if (r)
1674 return r;
1675 cayman_gpu_init(rdev);
1676
Alex Deuchercb92d452011-05-25 16:39:00 -04001677 r = evergreen_blit_init(rdev);
Alex Deucher755d8192011-03-02 20:07:34 -05001678 if (r) {
Ilija Hadzicfb3d9e92011-10-12 23:29:41 -04001679 r600_blit_fini(rdev);
Alex Deucher27cd7762012-02-23 17:53:42 -05001680 rdev->asic->copy.copy = NULL;
Alex Deucher755d8192011-03-02 20:07:34 -05001681 dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
1682 }
Alex Deucher755d8192011-03-02 20:07:34 -05001683
Alex Deucherc420c742012-03-20 17:18:39 -04001684 /* allocate rlc buffers */
1685 if (rdev->flags & RADEON_IS_IGP) {
1686 r = si_rlc_init(rdev);
1687 if (r) {
1688 DRM_ERROR("Failed to init rlc BOs!\n");
1689 return r;
1690 }
1691 }
1692
Alex Deucher755d8192011-03-02 20:07:34 -05001693 /* allocate wb buffer */
1694 r = radeon_wb_init(rdev);
1695 if (r)
1696 return r;
1697
Jerome Glisse30eb77f2011-11-20 20:45:34 +00001698 r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
1699 if (r) {
1700 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
1701 return r;
1702 }
1703
Christian Königf2ba57b2013-04-08 12:41:29 +02001704 r = rv770_uvd_resume(rdev);
1705 if (!r) {
1706 r = radeon_fence_driver_start_ring(rdev,
1707 R600_RING_TYPE_UVD_INDEX);
1708 if (r)
1709 dev_err(rdev->dev, "UVD fences init error (%d).\n", r);
1710 }
1711 if (r)
1712 rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
1713
Jerome Glisse30eb77f2011-11-20 20:45:34 +00001714 r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
1715 if (r) {
1716 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
1717 return r;
1718 }
1719
1720 r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
1721 if (r) {
1722 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
1723 return r;
1724 }
1725
Alex Deucherf60cbd12012-12-04 15:27:33 -05001726 r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
1727 if (r) {
1728 dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
1729 return r;
1730 }
1731
1732 r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_DMA1_INDEX);
1733 if (r) {
1734 dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
1735 return r;
1736 }
1737
Alex Deucher755d8192011-03-02 20:07:34 -05001738 /* Enable IRQ */
1739 r = r600_irq_init(rdev);
1740 if (r) {
1741 DRM_ERROR("radeon: IH init failed (%d).\n", r);
1742 radeon_irq_kms_fini(rdev);
1743 return r;
1744 }
1745 evergreen_irq_set(rdev);
1746
Christian Könige32eb502011-10-23 12:56:27 +02001747 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
Alex Deucher78c55602011-11-17 14:25:56 -05001748 CP_RB0_RPTR, CP_RB0_WPTR,
1749 0, 0xfffff, RADEON_CP_PACKET2);
Alex Deucher755d8192011-03-02 20:07:34 -05001750 if (r)
1751 return r;
Alex Deucherf60cbd12012-12-04 15:27:33 -05001752
1753 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
1754 r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
1755 DMA_RB_RPTR + DMA0_REGISTER_OFFSET,
1756 DMA_RB_WPTR + DMA0_REGISTER_OFFSET,
1757 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
1758 if (r)
1759 return r;
1760
1761 ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
1762 r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET,
1763 DMA_RB_RPTR + DMA1_REGISTER_OFFSET,
1764 DMA_RB_WPTR + DMA1_REGISTER_OFFSET,
1765 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
1766 if (r)
1767 return r;
1768
Alex Deucher755d8192011-03-02 20:07:34 -05001769 r = cayman_cp_load_microcode(rdev);
1770 if (r)
1771 return r;
1772 r = cayman_cp_resume(rdev);
1773 if (r)
1774 return r;
1775
Alex Deucherf60cbd12012-12-04 15:27:33 -05001776 r = cayman_dma_resume(rdev);
1777 if (r)
1778 return r;
1779
Christian Königf2ba57b2013-04-08 12:41:29 +02001780 ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
1781 if (ring->ring_size) {
1782 r = radeon_ring_init(rdev, ring, ring->ring_size,
1783 R600_WB_UVD_RPTR_OFFSET,
1784 UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR,
1785 0, 0xfffff, RADEON_CP_PACKET2);
1786 if (!r)
1787 r = r600_uvd_init(rdev);
1788 if (r)
1789 DRM_ERROR("radeon: failed initializing UVD (%d).\n", r);
1790 }
1791
Christian König2898c342012-07-05 11:55:34 +02001792 r = radeon_ib_pool_init(rdev);
1793 if (r) {
1794 dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
Jerome Glisseb15ba512011-11-15 11:48:34 -05001795 return r;
Christian König2898c342012-07-05 11:55:34 +02001796 }
Jerome Glisseb15ba512011-11-15 11:48:34 -05001797
Christian Königc6105f22012-07-05 14:32:00 +02001798 r = radeon_vm_manager_init(rdev);
1799 if (r) {
1800 dev_err(rdev->dev, "vm manager initialization failed (%d).\n", r);
Jerome Glisse721604a2012-01-05 22:11:05 -05001801 return r;
Christian Königc6105f22012-07-05 14:32:00 +02001802 }
Jerome Glisse721604a2012-01-05 22:11:05 -05001803
Rafał Miłecki6b53a052012-06-11 12:34:01 +02001804 r = r600_audio_init(rdev);
1805 if (r)
1806 return r;
1807
Alex Deucher755d8192011-03-02 20:07:34 -05001808 return 0;
1809}
1810
1811int cayman_resume(struct radeon_device *rdev)
1812{
1813 int r;
1814
1815 /* Do not reset GPU before posting, on rv770 hw unlike on r500 hw,
1816 * posting will perform necessary task to bring back GPU into good
1817 * shape.
1818 */
1819 /* post card */
1820 atom_asic_init(rdev->mode_info.atom_context);
1821
Jerome Glisseb15ba512011-11-15 11:48:34 -05001822 rdev->accel_working = true;
Alex Deucher755d8192011-03-02 20:07:34 -05001823 r = cayman_startup(rdev);
1824 if (r) {
1825 DRM_ERROR("cayman startup failed on resume\n");
Jerome Glisse6b7746e2012-02-20 17:57:20 -05001826 rdev->accel_working = false;
Alex Deucher755d8192011-03-02 20:07:34 -05001827 return r;
1828 }
Alex Deucher755d8192011-03-02 20:07:34 -05001829 return r;
Alex Deucher755d8192011-03-02 20:07:34 -05001830}
1831
1832int cayman_suspend(struct radeon_device *rdev)
1833{
Rafał Miłecki6b53a052012-06-11 12:34:01 +02001834 r600_audio_fini(rdev);
Alex Deucherfa3daf92013-03-11 15:32:26 -04001835 radeon_vm_manager_fini(rdev);
Alex Deucher755d8192011-03-02 20:07:34 -05001836 cayman_cp_enable(rdev, false);
Alex Deucherf60cbd12012-12-04 15:27:33 -05001837 cayman_dma_stop(rdev);
Christian Königf2ba57b2013-04-08 12:41:29 +02001838 r600_uvd_rbc_stop(rdev);
1839 radeon_uvd_suspend(rdev);
Alex Deucher755d8192011-03-02 20:07:34 -05001840 evergreen_irq_suspend(rdev);
1841 radeon_wb_disable(rdev);
1842 cayman_pcie_gart_disable(rdev);
Alex Deucher755d8192011-03-02 20:07:34 -05001843 return 0;
1844}
1845
1846/* Plan is to move initialization in that function and use
1847 * helper function so that radeon_device_init pretty much
1848 * do nothing more than calling asic specific function. This
1849 * should also allow to remove a bunch of callback function
1850 * like vram_info.
1851 */
1852int cayman_init(struct radeon_device *rdev)
1853{
Christian Könige32eb502011-10-23 12:56:27 +02001854 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
Alex Deucher755d8192011-03-02 20:07:34 -05001855 int r;
1856
Alex Deucher755d8192011-03-02 20:07:34 -05001857 /* Read BIOS */
1858 if (!radeon_get_bios(rdev)) {
1859 if (ASIC_IS_AVIVO(rdev))
1860 return -EINVAL;
1861 }
1862 /* Must be an ATOMBIOS */
1863 if (!rdev->is_atom_bios) {
1864 dev_err(rdev->dev, "Expecting atombios for cayman GPU\n");
1865 return -EINVAL;
1866 }
1867 r = radeon_atombios_init(rdev);
1868 if (r)
1869 return r;
1870
1871 /* Post card if necessary */
1872 if (!radeon_card_posted(rdev)) {
1873 if (!rdev->bios) {
1874 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
1875 return -EINVAL;
1876 }
1877 DRM_INFO("GPU not posted. posting now...\n");
1878 atom_asic_init(rdev->mode_info.atom_context);
1879 }
1880 /* Initialize scratch registers */
1881 r600_scratch_init(rdev);
1882 /* Initialize surface registers */
1883 radeon_surface_init(rdev);
1884 /* Initialize clocks */
1885 radeon_get_clock_info(rdev->ddev);
1886 /* Fence driver */
Jerome Glisse30eb77f2011-11-20 20:45:34 +00001887 r = radeon_fence_driver_init(rdev);
Alex Deucher755d8192011-03-02 20:07:34 -05001888 if (r)
1889 return r;
1890 /* initialize memory controller */
1891 r = evergreen_mc_init(rdev);
1892 if (r)
1893 return r;
1894 /* Memory manager */
1895 r = radeon_bo_init(rdev);
1896 if (r)
1897 return r;
1898
1899 r = radeon_irq_kms_init(rdev);
1900 if (r)
1901 return r;
1902
Christian Könige32eb502011-10-23 12:56:27 +02001903 ring->ring_obj = NULL;
1904 r600_ring_init(rdev, ring, 1024 * 1024);
Alex Deucher755d8192011-03-02 20:07:34 -05001905
Alex Deucherf60cbd12012-12-04 15:27:33 -05001906 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
1907 ring->ring_obj = NULL;
1908 r600_ring_init(rdev, ring, 64 * 1024);
1909
1910 ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
1911 ring->ring_obj = NULL;
1912 r600_ring_init(rdev, ring, 64 * 1024);
1913
Christian Königf2ba57b2013-04-08 12:41:29 +02001914 r = radeon_uvd_init(rdev);
1915 if (!r) {
1916 ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
1917 ring->ring_obj = NULL;
1918 r600_ring_init(rdev, ring, 4096);
1919 }
1920
Alex Deucher755d8192011-03-02 20:07:34 -05001921 rdev->ih.ring_obj = NULL;
1922 r600_ih_ring_init(rdev, 64 * 1024);
1923
1924 r = r600_pcie_gart_init(rdev);
1925 if (r)
1926 return r;
1927
1928 rdev->accel_working = true;
1929 r = cayman_startup(rdev);
1930 if (r) {
1931 dev_err(rdev->dev, "disabling GPU acceleration\n");
1932 cayman_cp_fini(rdev);
Alex Deucherf60cbd12012-12-04 15:27:33 -05001933 cayman_dma_fini(rdev);
Alex Deucher755d8192011-03-02 20:07:34 -05001934 r600_irq_fini(rdev);
Alex Deucherc420c742012-03-20 17:18:39 -04001935 if (rdev->flags & RADEON_IS_IGP)
1936 si_rlc_fini(rdev);
Alex Deucher755d8192011-03-02 20:07:34 -05001937 radeon_wb_fini(rdev);
Christian König2898c342012-07-05 11:55:34 +02001938 radeon_ib_pool_fini(rdev);
Jerome Glisse721604a2012-01-05 22:11:05 -05001939 radeon_vm_manager_fini(rdev);
Alex Deucher755d8192011-03-02 20:07:34 -05001940 radeon_irq_kms_fini(rdev);
1941 cayman_pcie_gart_fini(rdev);
1942 rdev->accel_working = false;
1943 }
Alex Deucher755d8192011-03-02 20:07:34 -05001944
1945 /* Don't start up if the MC ucode is missing.
1946 * The default clocks and voltages before the MC ucode
1947 * is loaded are not suffient for advanced operations.
Alex Deucherc420c742012-03-20 17:18:39 -04001948 *
1949 * We can skip this check for TN, because there is no MC
1950 * ucode.
Alex Deucher755d8192011-03-02 20:07:34 -05001951 */
Alex Deucherc420c742012-03-20 17:18:39 -04001952 if (!rdev->mc_fw && !(rdev->flags & RADEON_IS_IGP)) {
Alex Deucher755d8192011-03-02 20:07:34 -05001953 DRM_ERROR("radeon: MC ucode required for NI+.\n");
1954 return -EINVAL;
1955 }
1956
1957 return 0;
1958}
1959
1960void cayman_fini(struct radeon_device *rdev)
1961{
Ilija Hadzicfb3d9e92011-10-12 23:29:41 -04001962 r600_blit_fini(rdev);
Alex Deucher755d8192011-03-02 20:07:34 -05001963 cayman_cp_fini(rdev);
Alex Deucherf60cbd12012-12-04 15:27:33 -05001964 cayman_dma_fini(rdev);
Alex Deucher755d8192011-03-02 20:07:34 -05001965 r600_irq_fini(rdev);
Alex Deucherc420c742012-03-20 17:18:39 -04001966 if (rdev->flags & RADEON_IS_IGP)
1967 si_rlc_fini(rdev);
Alex Deucher755d8192011-03-02 20:07:34 -05001968 radeon_wb_fini(rdev);
Jerome Glisse721604a2012-01-05 22:11:05 -05001969 radeon_vm_manager_fini(rdev);
Christian König2898c342012-07-05 11:55:34 +02001970 radeon_ib_pool_fini(rdev);
Alex Deucher755d8192011-03-02 20:07:34 -05001971 radeon_irq_kms_fini(rdev);
Christian Königf2ba57b2013-04-08 12:41:29 +02001972 radeon_uvd_fini(rdev);
Alex Deucher755d8192011-03-02 20:07:34 -05001973 cayman_pcie_gart_fini(rdev);
Alex Deucher16cdf042011-10-28 10:30:02 -04001974 r600_vram_scratch_fini(rdev);
Alex Deucher755d8192011-03-02 20:07:34 -05001975 radeon_gem_fini(rdev);
1976 radeon_fence_driver_fini(rdev);
1977 radeon_bo_fini(rdev);
1978 radeon_atombios_fini(rdev);
1979 kfree(rdev->bios);
1980 rdev->bios = NULL;
1981}
1982
Jerome Glisse721604a2012-01-05 22:11:05 -05001983/*
1984 * vm
1985 */
1986int cayman_vm_init(struct radeon_device *rdev)
1987{
1988 /* number of VMs */
1989 rdev->vm_manager.nvm = 8;
1990 /* base offset of vram pages */
Alex Deuchere71270f2012-03-20 17:18:38 -04001991 if (rdev->flags & RADEON_IS_IGP) {
1992 u64 tmp = RREG32(FUS_MC_VM_FB_OFFSET);
1993 tmp <<= 22;
1994 rdev->vm_manager.vram_base_offset = tmp;
1995 } else
1996 rdev->vm_manager.vram_base_offset = 0;
Jerome Glisse721604a2012-01-05 22:11:05 -05001997 return 0;
1998}
1999
2000void cayman_vm_fini(struct radeon_device *rdev)
2001{
2002}
2003
Christian Königdce34bf2012-09-17 19:36:18 +02002004#define R600_ENTRY_VALID (1 << 0)
Jerome Glisse721604a2012-01-05 22:11:05 -05002005#define R600_PTE_SYSTEM (1 << 1)
2006#define R600_PTE_SNOOPED (1 << 2)
2007#define R600_PTE_READABLE (1 << 5)
2008#define R600_PTE_WRITEABLE (1 << 6)
2009
Christian König089a7862012-08-11 11:54:05 +02002010uint32_t cayman_vm_page_flags(struct radeon_device *rdev, uint32_t flags)
Jerome Glisse721604a2012-01-05 22:11:05 -05002011{
2012 uint32_t r600_flags = 0;
Christian Königdce34bf2012-09-17 19:36:18 +02002013 r600_flags |= (flags & RADEON_VM_PAGE_VALID) ? R600_ENTRY_VALID : 0;
Jerome Glisse721604a2012-01-05 22:11:05 -05002014 r600_flags |= (flags & RADEON_VM_PAGE_READABLE) ? R600_PTE_READABLE : 0;
2015 r600_flags |= (flags & RADEON_VM_PAGE_WRITEABLE) ? R600_PTE_WRITEABLE : 0;
2016 if (flags & RADEON_VM_PAGE_SYSTEM) {
2017 r600_flags |= R600_PTE_SYSTEM;
2018 r600_flags |= (flags & RADEON_VM_PAGE_SNOOPED) ? R600_PTE_SNOOPED : 0;
2019 }
2020 return r600_flags;
2021}
2022
Alex Deucher7a083292012-08-31 13:51:21 -04002023/**
2024 * cayman_vm_set_page - update the page tables using the CP
2025 *
2026 * @rdev: radeon_device pointer
Alex Deucher43f12142013-02-01 17:32:42 +01002027 * @ib: indirect buffer to fill with commands
Christian Königdce34bf2012-09-17 19:36:18 +02002028 * @pe: addr of the page entry
2029 * @addr: dst addr to write into pe
2030 * @count: number of page entries to update
2031 * @incr: increase next addr by incr bytes
2032 * @flags: access flags
Alex Deucher7a083292012-08-31 13:51:21 -04002033 *
Alex Deucher43f12142013-02-01 17:32:42 +01002034 * Update the page tables using the CP (cayman/TN).
Alex Deucher7a083292012-08-31 13:51:21 -04002035 */
Alex Deucher43f12142013-02-01 17:32:42 +01002036void cayman_vm_set_page(struct radeon_device *rdev,
2037 struct radeon_ib *ib,
2038 uint64_t pe,
Christian Königdce34bf2012-09-17 19:36:18 +02002039 uint64_t addr, unsigned count,
2040 uint32_t incr, uint32_t flags)
Jerome Glisse721604a2012-01-05 22:11:05 -05002041{
Christian Königdce34bf2012-09-17 19:36:18 +02002042 uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
Alex Deucher3b6b59b2012-10-22 12:19:01 -04002043 uint64_t value;
2044 unsigned ndw;
Jerome Glisse721604a2012-01-05 22:11:05 -05002045
Alex Deucher3b6b59b2012-10-22 12:19:01 -04002046 if (rdev->asic->vm.pt_ring_index == RADEON_RING_TYPE_GFX_INDEX) {
2047 while (count) {
2048 ndw = 1 + count * 2;
2049 if (ndw > 0x3FFF)
2050 ndw = 0x3FFF;
Christian König089a7862012-08-11 11:54:05 +02002051
Alex Deucher43f12142013-02-01 17:32:42 +01002052 ib->ptr[ib->length_dw++] = PACKET3(PACKET3_ME_WRITE, ndw);
2053 ib->ptr[ib->length_dw++] = pe;
2054 ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
Alex Deucher3b6b59b2012-10-22 12:19:01 -04002055 for (; ndw > 1; ndw -= 2, --count, pe += 8) {
2056 if (flags & RADEON_VM_PAGE_SYSTEM) {
2057 value = radeon_vm_map_gart(rdev, addr);
2058 value &= 0xFFFFFFFFFFFFF000ULL;
2059 } else if (flags & RADEON_VM_PAGE_VALID) {
2060 value = addr;
2061 } else {
2062 value = 0;
2063 }
Christian Königf9fdffa2012-10-22 17:42:36 +02002064 addr += incr;
Alex Deucher3b6b59b2012-10-22 12:19:01 -04002065 value |= r600_flags;
Alex Deucher43f12142013-02-01 17:32:42 +01002066 ib->ptr[ib->length_dw++] = value;
2067 ib->ptr[ib->length_dw++] = upper_32_bits(value);
Christian Königf9fdffa2012-10-22 17:42:36 +02002068 }
Alex Deucher3b6b59b2012-10-22 12:19:01 -04002069 }
2070 } else {
2071 while (count) {
2072 ndw = count * 2;
2073 if (ndw > 0xFFFFE)
2074 ndw = 0xFFFFE;
Christian Königf9fdffa2012-10-22 17:42:36 +02002075
Alex Deucher3b6b59b2012-10-22 12:19:01 -04002076 /* for non-physically contiguous pages (system) */
Alex Deucher43f12142013-02-01 17:32:42 +01002077 ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, ndw);
2078 ib->ptr[ib->length_dw++] = pe;
2079 ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
Alex Deucher3b6b59b2012-10-22 12:19:01 -04002080 for (; ndw > 0; ndw -= 2, --count, pe += 8) {
2081 if (flags & RADEON_VM_PAGE_SYSTEM) {
2082 value = radeon_vm_map_gart(rdev, addr);
2083 value &= 0xFFFFFFFFFFFFF000ULL;
2084 } else if (flags & RADEON_VM_PAGE_VALID) {
2085 value = addr;
2086 } else {
2087 value = 0;
2088 }
2089 addr += incr;
2090 value |= r600_flags;
Alex Deucher43f12142013-02-01 17:32:42 +01002091 ib->ptr[ib->length_dw++] = value;
2092 ib->ptr[ib->length_dw++] = upper_32_bits(value);
Alex Deucher3b6b59b2012-10-22 12:19:01 -04002093 }
Christian König2a6f1ab2012-08-11 15:00:30 +02002094 }
Alex Deucher43f12142013-02-01 17:32:42 +01002095 while (ib->length_dw & 0x7)
2096 ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0);
Christian König2a6f1ab2012-08-11 15:00:30 +02002097 }
Jerome Glisse721604a2012-01-05 22:11:05 -05002098}
Christian König9b40e5d2012-08-08 12:22:43 +02002099
Alex Deucher7a083292012-08-31 13:51:21 -04002100/**
2101 * cayman_vm_flush - vm flush using the CP
2102 *
2103 * @rdev: radeon_device pointer
2104 *
2105 * Update the page table base and flush the VM TLB
2106 * using the CP (cayman-si).
2107 */
Alex Deucher498522b2012-10-02 14:43:38 -04002108void cayman_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
Christian König9b40e5d2012-08-08 12:22:43 +02002109{
Alex Deucher498522b2012-10-02 14:43:38 -04002110 struct radeon_ring *ring = &rdev->ring[ridx];
Christian König9b40e5d2012-08-08 12:22:43 +02002111
Christian Königee60e292012-08-09 16:21:08 +02002112 if (vm == NULL)
Christian König9b40e5d2012-08-08 12:22:43 +02002113 return;
2114
Christian Königee60e292012-08-09 16:21:08 +02002115 radeon_ring_write(ring, PACKET0(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2), 0));
Dmitry Cherkasovfa87e622012-09-17 19:36:19 +02002116 radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
Christian Königee60e292012-08-09 16:21:08 +02002117
Christian König9b40e5d2012-08-08 12:22:43 +02002118 /* flush hdp cache */
2119 radeon_ring_write(ring, PACKET0(HDP_MEM_COHERENCY_FLUSH_CNTL, 0));
2120 radeon_ring_write(ring, 0x1);
2121
2122 /* bits 0-7 are the VM contexts0-7 */
2123 radeon_ring_write(ring, PACKET0(VM_INVALIDATE_REQUEST, 0));
Alex Deucher498522b2012-10-02 14:43:38 -04002124 radeon_ring_write(ring, 1 << vm->id);
Christian König58f8cf52012-10-22 17:42:35 +02002125
2126 /* sync PFP to ME, otherwise we might get invalid PFP reads */
2127 radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
2128 radeon_ring_write(ring, 0x0);
Alex Deucher0af62b02011-01-06 21:19:31 -05002129}
Alex Deucherf60cbd12012-12-04 15:27:33 -05002130
2131void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
2132{
2133 struct radeon_ring *ring = &rdev->ring[ridx];
2134
2135 if (vm == NULL)
2136 return;
2137
2138 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
2139 radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2));
2140 radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
2141
2142 /* flush hdp cache */
2143 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
2144 radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
2145 radeon_ring_write(ring, 1);
2146
2147 /* bits 0-7 are the VM contexts0-7 */
2148 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
2149 radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2));
2150 radeon_ring_write(ring, 1 << vm->id);
2151}
2152