blob: 0b9c3c95a6be2198308f8c9eae4b2fb2940f53fb [file] [log] [blame]
Alex Deucher8cc1a532013-04-09 12:41:24 -04001/*
2 * Copyright 2012 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
24#include <linux/firmware.h>
25#include <linux/platform_device.h>
26#include <linux/slab.h>
27#include <linux/module.h>
28#include "drmP.h"
29#include "radeon.h"
Alex Deucher6f2043c2013-04-09 12:43:41 -040030#include "radeon_asic.h"
Alex Deucher8cc1a532013-04-09 12:41:24 -040031#include "cikd.h"
32#include "atom.h"
Alex Deucher841cf442012-12-18 21:47:44 -050033#include "cik_blit_shaders.h"
Alex Deucher8cc1a532013-04-09 12:41:24 -040034
Alex Deucher02c81322012-12-18 21:43:07 -050035/* GFX */
36#define CIK_PFP_UCODE_SIZE 2144
37#define CIK_ME_UCODE_SIZE 2144
38#define CIK_CE_UCODE_SIZE 2144
39/* compute */
40#define CIK_MEC_UCODE_SIZE 4192
41/* interrupts */
42#define BONAIRE_RLC_UCODE_SIZE 2048
43#define KB_RLC_UCODE_SIZE 2560
44#define KV_RLC_UCODE_SIZE 2560
45/* gddr controller */
46#define CIK_MC_UCODE_SIZE 7866
47
48MODULE_FIRMWARE("radeon/BONAIRE_pfp.bin");
49MODULE_FIRMWARE("radeon/BONAIRE_me.bin");
50MODULE_FIRMWARE("radeon/BONAIRE_ce.bin");
51MODULE_FIRMWARE("radeon/BONAIRE_mec.bin");
52MODULE_FIRMWARE("radeon/BONAIRE_mc.bin");
53MODULE_FIRMWARE("radeon/BONAIRE_rlc.bin");
54MODULE_FIRMWARE("radeon/KAVERI_pfp.bin");
55MODULE_FIRMWARE("radeon/KAVERI_me.bin");
56MODULE_FIRMWARE("radeon/KAVERI_ce.bin");
57MODULE_FIRMWARE("radeon/KAVERI_mec.bin");
58MODULE_FIRMWARE("radeon/KAVERI_rlc.bin");
59MODULE_FIRMWARE("radeon/KABINI_pfp.bin");
60MODULE_FIRMWARE("radeon/KABINI_me.bin");
61MODULE_FIRMWARE("radeon/KABINI_ce.bin");
62MODULE_FIRMWARE("radeon/KABINI_mec.bin");
63MODULE_FIRMWARE("radeon/KABINI_rlc.bin");
64
Alex Deucher6f2043c2013-04-09 12:43:41 -040065extern void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save);
66extern void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save);
Alex Deucher1c491652013-04-09 12:45:26 -040067extern void si_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
Alex Deucher6f2043c2013-04-09 12:43:41 -040068
Alex Deucherbc8273f2012-06-29 19:44:04 -040069#define BONAIRE_IO_MC_REGS_SIZE 36
70
71static const u32 bonaire_io_mc_regs[BONAIRE_IO_MC_REGS_SIZE][2] =
72{
73 {0x00000070, 0x04400000},
74 {0x00000071, 0x80c01803},
75 {0x00000072, 0x00004004},
76 {0x00000073, 0x00000100},
77 {0x00000074, 0x00ff0000},
78 {0x00000075, 0x34000000},
79 {0x00000076, 0x08000014},
80 {0x00000077, 0x00cc08ec},
81 {0x00000078, 0x00000400},
82 {0x00000079, 0x00000000},
83 {0x0000007a, 0x04090000},
84 {0x0000007c, 0x00000000},
85 {0x0000007e, 0x4408a8e8},
86 {0x0000007f, 0x00000304},
87 {0x00000080, 0x00000000},
88 {0x00000082, 0x00000001},
89 {0x00000083, 0x00000002},
90 {0x00000084, 0xf3e4f400},
91 {0x00000085, 0x052024e3},
92 {0x00000087, 0x00000000},
93 {0x00000088, 0x01000000},
94 {0x0000008a, 0x1c0a0000},
95 {0x0000008b, 0xff010000},
96 {0x0000008d, 0xffffefff},
97 {0x0000008e, 0xfff3efff},
98 {0x0000008f, 0xfff3efbf},
99 {0x00000092, 0xf7ffffff},
100 {0x00000093, 0xffffff7f},
101 {0x00000095, 0x00101101},
102 {0x00000096, 0x00000fff},
103 {0x00000097, 0x00116fff},
104 {0x00000098, 0x60010000},
105 {0x00000099, 0x10010000},
106 {0x0000009a, 0x00006000},
107 {0x0000009b, 0x00001000},
108 {0x0000009f, 0x00b48000}
109};
110
111/* ucode loading */
112/**
113 * ci_mc_load_microcode - load MC ucode into the hw
114 *
115 * @rdev: radeon_device pointer
116 *
117 * Load the GDDR MC ucode into the hw (CIK).
118 * Returns 0 on success, error on failure.
119 */
120static int ci_mc_load_microcode(struct radeon_device *rdev)
121{
122 const __be32 *fw_data;
123 u32 running, blackout = 0;
124 u32 *io_mc_regs;
125 int i, ucode_size, regs_size;
126
127 if (!rdev->mc_fw)
128 return -EINVAL;
129
130 switch (rdev->family) {
131 case CHIP_BONAIRE:
132 default:
133 io_mc_regs = (u32 *)&bonaire_io_mc_regs;
134 ucode_size = CIK_MC_UCODE_SIZE;
135 regs_size = BONAIRE_IO_MC_REGS_SIZE;
136 break;
137 }
138
139 running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK;
140
141 if (running == 0) {
142 if (running) {
143 blackout = RREG32(MC_SHARED_BLACKOUT_CNTL);
144 WREG32(MC_SHARED_BLACKOUT_CNTL, blackout | 1);
145 }
146
147 /* reset the engine and set to writable */
148 WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
149 WREG32(MC_SEQ_SUP_CNTL, 0x00000010);
150
151 /* load mc io regs */
152 for (i = 0; i < regs_size; i++) {
153 WREG32(MC_SEQ_IO_DEBUG_INDEX, io_mc_regs[(i << 1)]);
154 WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1]);
155 }
156 /* load the MC ucode */
157 fw_data = (const __be32 *)rdev->mc_fw->data;
158 for (i = 0; i < ucode_size; i++)
159 WREG32(MC_SEQ_SUP_PGM, be32_to_cpup(fw_data++));
160
161 /* put the engine back into the active state */
162 WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
163 WREG32(MC_SEQ_SUP_CNTL, 0x00000004);
164 WREG32(MC_SEQ_SUP_CNTL, 0x00000001);
165
166 /* wait for training to complete */
167 for (i = 0; i < rdev->usec_timeout; i++) {
168 if (RREG32(MC_SEQ_TRAIN_WAKEUP_CNTL) & TRAIN_DONE_D0)
169 break;
170 udelay(1);
171 }
172 for (i = 0; i < rdev->usec_timeout; i++) {
173 if (RREG32(MC_SEQ_TRAIN_WAKEUP_CNTL) & TRAIN_DONE_D1)
174 break;
175 udelay(1);
176 }
177
178 if (running)
179 WREG32(MC_SHARED_BLACKOUT_CNTL, blackout);
180 }
181
182 return 0;
183}
184
Alex Deucher02c81322012-12-18 21:43:07 -0500185/**
186 * cik_init_microcode - load ucode images from disk
187 *
188 * @rdev: radeon_device pointer
189 *
190 * Use the firmware interface to load the ucode images into
191 * the driver (not loaded into hw).
192 * Returns 0 on success, error on failure.
193 */
194static int cik_init_microcode(struct radeon_device *rdev)
195{
196 struct platform_device *pdev;
197 const char *chip_name;
198 size_t pfp_req_size, me_req_size, ce_req_size,
199 mec_req_size, rlc_req_size, mc_req_size;
200 char fw_name[30];
201 int err;
202
203 DRM_DEBUG("\n");
204
205 pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0);
206 err = IS_ERR(pdev);
207 if (err) {
208 printk(KERN_ERR "radeon_cp: Failed to register firmware\n");
209 return -EINVAL;
210 }
211
212 switch (rdev->family) {
213 case CHIP_BONAIRE:
214 chip_name = "BONAIRE";
215 pfp_req_size = CIK_PFP_UCODE_SIZE * 4;
216 me_req_size = CIK_ME_UCODE_SIZE * 4;
217 ce_req_size = CIK_CE_UCODE_SIZE * 4;
218 mec_req_size = CIK_MEC_UCODE_SIZE * 4;
219 rlc_req_size = BONAIRE_RLC_UCODE_SIZE * 4;
220 mc_req_size = CIK_MC_UCODE_SIZE * 4;
221 break;
222 case CHIP_KAVERI:
223 chip_name = "KAVERI";
224 pfp_req_size = CIK_PFP_UCODE_SIZE * 4;
225 me_req_size = CIK_ME_UCODE_SIZE * 4;
226 ce_req_size = CIK_CE_UCODE_SIZE * 4;
227 mec_req_size = CIK_MEC_UCODE_SIZE * 4;
228 rlc_req_size = KV_RLC_UCODE_SIZE * 4;
229 break;
230 case CHIP_KABINI:
231 chip_name = "KABINI";
232 pfp_req_size = CIK_PFP_UCODE_SIZE * 4;
233 me_req_size = CIK_ME_UCODE_SIZE * 4;
234 ce_req_size = CIK_CE_UCODE_SIZE * 4;
235 mec_req_size = CIK_MEC_UCODE_SIZE * 4;
236 rlc_req_size = KB_RLC_UCODE_SIZE * 4;
237 break;
238 default: BUG();
239 }
240
241 DRM_INFO("Loading %s Microcode\n", chip_name);
242
243 snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
244 err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev);
245 if (err)
246 goto out;
247 if (rdev->pfp_fw->size != pfp_req_size) {
248 printk(KERN_ERR
249 "cik_cp: Bogus length %zu in firmware \"%s\"\n",
250 rdev->pfp_fw->size, fw_name);
251 err = -EINVAL;
252 goto out;
253 }
254
255 snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
256 err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev);
257 if (err)
258 goto out;
259 if (rdev->me_fw->size != me_req_size) {
260 printk(KERN_ERR
261 "cik_cp: Bogus length %zu in firmware \"%s\"\n",
262 rdev->me_fw->size, fw_name);
263 err = -EINVAL;
264 }
265
266 snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", chip_name);
267 err = request_firmware(&rdev->ce_fw, fw_name, &pdev->dev);
268 if (err)
269 goto out;
270 if (rdev->ce_fw->size != ce_req_size) {
271 printk(KERN_ERR
272 "cik_cp: Bogus length %zu in firmware \"%s\"\n",
273 rdev->ce_fw->size, fw_name);
274 err = -EINVAL;
275 }
276
277 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mec.bin", chip_name);
278 err = request_firmware(&rdev->mec_fw, fw_name, &pdev->dev);
279 if (err)
280 goto out;
281 if (rdev->mec_fw->size != mec_req_size) {
282 printk(KERN_ERR
283 "cik_cp: Bogus length %zu in firmware \"%s\"\n",
284 rdev->mec_fw->size, fw_name);
285 err = -EINVAL;
286 }
287
288 snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", chip_name);
289 err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev);
290 if (err)
291 goto out;
292 if (rdev->rlc_fw->size != rlc_req_size) {
293 printk(KERN_ERR
294 "cik_rlc: Bogus length %zu in firmware \"%s\"\n",
295 rdev->rlc_fw->size, fw_name);
296 err = -EINVAL;
297 }
298
299 /* No MC ucode on APUs */
300 if (!(rdev->flags & RADEON_IS_IGP)) {
301 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
302 err = request_firmware(&rdev->mc_fw, fw_name, &pdev->dev);
303 if (err)
304 goto out;
305 if (rdev->mc_fw->size != mc_req_size) {
306 printk(KERN_ERR
307 "cik_mc: Bogus length %zu in firmware \"%s\"\n",
308 rdev->mc_fw->size, fw_name);
309 err = -EINVAL;
310 }
311 }
312
313out:
314 platform_device_unregister(pdev);
315
316 if (err) {
317 if (err != -EINVAL)
318 printk(KERN_ERR
319 "cik_cp: Failed to load firmware \"%s\"\n",
320 fw_name);
321 release_firmware(rdev->pfp_fw);
322 rdev->pfp_fw = NULL;
323 release_firmware(rdev->me_fw);
324 rdev->me_fw = NULL;
325 release_firmware(rdev->ce_fw);
326 rdev->ce_fw = NULL;
327 release_firmware(rdev->rlc_fw);
328 rdev->rlc_fw = NULL;
329 release_firmware(rdev->mc_fw);
330 rdev->mc_fw = NULL;
331 }
332 return err;
333}
334
Alex Deucher8cc1a532013-04-09 12:41:24 -0400335/*
336 * Core functions
337 */
338/**
339 * cik_tiling_mode_table_init - init the hw tiling table
340 *
341 * @rdev: radeon_device pointer
342 *
343 * Starting with SI, the tiling setup is done globally in a
344 * set of 32 tiling modes. Rather than selecting each set of
345 * parameters per surface as on older asics, we just select
346 * which index in the tiling table we want to use, and the
347 * surface uses those parameters (CIK).
348 */
349static void cik_tiling_mode_table_init(struct radeon_device *rdev)
350{
351 const u32 num_tile_mode_states = 32;
352 const u32 num_secondary_tile_mode_states = 16;
353 u32 reg_offset, gb_tile_moden, split_equal_to_row_size;
354 u32 num_pipe_configs;
355 u32 num_rbs = rdev->config.cik.max_backends_per_se *
356 rdev->config.cik.max_shader_engines;
357
358 switch (rdev->config.cik.mem_row_size_in_kb) {
359 case 1:
360 split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_1KB;
361 break;
362 case 2:
363 default:
364 split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_2KB;
365 break;
366 case 4:
367 split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_4KB;
368 break;
369 }
370
371 num_pipe_configs = rdev->config.cik.max_tile_pipes;
372 if (num_pipe_configs > 8)
373 num_pipe_configs = 8; /* ??? */
374
375 if (num_pipe_configs == 8) {
376 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
377 switch (reg_offset) {
378 case 0:
379 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
380 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
381 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
382 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B));
383 break;
384 case 1:
385 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
386 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
387 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
388 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B));
389 break;
390 case 2:
391 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
392 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
393 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
394 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
395 break;
396 case 3:
397 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
398 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
399 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
400 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B));
401 break;
402 case 4:
403 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
404 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
405 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
406 TILE_SPLIT(split_equal_to_row_size));
407 break;
408 case 5:
409 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
410 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
411 break;
412 case 6:
413 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
414 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
415 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
416 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
417 break;
418 case 7:
419 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
420 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
421 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
422 TILE_SPLIT(split_equal_to_row_size));
423 break;
424 case 8:
425 gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
426 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16));
427 break;
428 case 9:
429 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
430 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
431 break;
432 case 10:
433 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
434 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
435 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
436 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
437 break;
438 case 11:
439 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
440 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
441 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
442 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
443 break;
444 case 12:
445 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
446 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
447 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
448 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
449 break;
450 case 13:
451 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
452 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
453 break;
454 case 14:
455 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
456 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
457 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
458 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
459 break;
460 case 16:
461 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
462 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
463 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
464 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
465 break;
466 case 17:
467 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
468 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
469 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
470 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
471 break;
472 case 27:
473 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
474 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
475 break;
476 case 28:
477 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
478 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
479 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
480 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
481 break;
482 case 29:
483 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
484 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
485 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
486 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
487 break;
488 case 30:
489 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
490 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
491 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
492 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
493 break;
494 default:
495 gb_tile_moden = 0;
496 break;
497 }
498 WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
499 }
500 for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) {
501 switch (reg_offset) {
502 case 0:
503 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
504 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
505 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
506 NUM_BANKS(ADDR_SURF_16_BANK));
507 break;
508 case 1:
509 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
510 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
511 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
512 NUM_BANKS(ADDR_SURF_16_BANK));
513 break;
514 case 2:
515 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
516 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
517 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
518 NUM_BANKS(ADDR_SURF_16_BANK));
519 break;
520 case 3:
521 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
522 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
523 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
524 NUM_BANKS(ADDR_SURF_16_BANK));
525 break;
526 case 4:
527 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
528 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
529 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
530 NUM_BANKS(ADDR_SURF_8_BANK));
531 break;
532 case 5:
533 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
534 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
535 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
536 NUM_BANKS(ADDR_SURF_4_BANK));
537 break;
538 case 6:
539 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
540 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
541 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
542 NUM_BANKS(ADDR_SURF_2_BANK));
543 break;
544 case 8:
545 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
546 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
547 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
548 NUM_BANKS(ADDR_SURF_16_BANK));
549 break;
550 case 9:
551 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
552 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
553 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
554 NUM_BANKS(ADDR_SURF_16_BANK));
555 break;
556 case 10:
557 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
558 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
559 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
560 NUM_BANKS(ADDR_SURF_16_BANK));
561 break;
562 case 11:
563 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
564 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
565 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
566 NUM_BANKS(ADDR_SURF_16_BANK));
567 break;
568 case 12:
569 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
570 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
571 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
572 NUM_BANKS(ADDR_SURF_8_BANK));
573 break;
574 case 13:
575 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
576 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
577 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
578 NUM_BANKS(ADDR_SURF_4_BANK));
579 break;
580 case 14:
581 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
582 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
583 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
584 NUM_BANKS(ADDR_SURF_2_BANK));
585 break;
586 default:
587 gb_tile_moden = 0;
588 break;
589 }
590 WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden);
591 }
592 } else if (num_pipe_configs == 4) {
593 if (num_rbs == 4) {
594 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
595 switch (reg_offset) {
596 case 0:
597 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
598 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
599 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
600 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B));
601 break;
602 case 1:
603 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
604 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
605 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
606 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B));
607 break;
608 case 2:
609 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
610 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
611 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
612 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
613 break;
614 case 3:
615 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
616 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
617 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
618 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B));
619 break;
620 case 4:
621 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
622 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
623 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
624 TILE_SPLIT(split_equal_to_row_size));
625 break;
626 case 5:
627 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
628 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
629 break;
630 case 6:
631 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
632 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
633 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
634 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
635 break;
636 case 7:
637 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
638 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
639 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
640 TILE_SPLIT(split_equal_to_row_size));
641 break;
642 case 8:
643 gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
644 PIPE_CONFIG(ADDR_SURF_P4_16x16));
645 break;
646 case 9:
647 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
648 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
649 break;
650 case 10:
651 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
652 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
653 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
654 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
655 break;
656 case 11:
657 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
658 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
659 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
660 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
661 break;
662 case 12:
663 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
664 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
665 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
666 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
667 break;
668 case 13:
669 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
670 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
671 break;
672 case 14:
673 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
674 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
675 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
676 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
677 break;
678 case 16:
679 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
680 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
681 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
682 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
683 break;
684 case 17:
685 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
686 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
687 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
688 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
689 break;
690 case 27:
691 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
692 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
693 break;
694 case 28:
695 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
696 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
697 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
698 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
699 break;
700 case 29:
701 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
702 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
703 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
704 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
705 break;
706 case 30:
707 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
708 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
709 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
710 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
711 break;
712 default:
713 gb_tile_moden = 0;
714 break;
715 }
716 WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
717 }
718 } else if (num_rbs < 4) {
719 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
720 switch (reg_offset) {
721 case 0:
722 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
723 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
724 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
725 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B));
726 break;
727 case 1:
728 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
729 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
730 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
731 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B));
732 break;
733 case 2:
734 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
735 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
736 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
737 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
738 break;
739 case 3:
740 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
741 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
742 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
743 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B));
744 break;
745 case 4:
746 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
747 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
748 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
749 TILE_SPLIT(split_equal_to_row_size));
750 break;
751 case 5:
752 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
753 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
754 break;
755 case 6:
756 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
757 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
758 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
759 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
760 break;
761 case 7:
762 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
763 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
764 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
765 TILE_SPLIT(split_equal_to_row_size));
766 break;
767 case 8:
768 gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
769 PIPE_CONFIG(ADDR_SURF_P4_8x16));
770 break;
771 case 9:
772 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
773 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
774 break;
775 case 10:
776 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
777 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
778 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
779 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
780 break;
781 case 11:
782 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
783 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
784 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
785 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
786 break;
787 case 12:
788 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
789 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
790 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
791 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
792 break;
793 case 13:
794 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
795 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
796 break;
797 case 14:
798 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
799 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
800 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
801 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
802 break;
803 case 16:
804 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
805 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
806 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
807 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
808 break;
809 case 17:
810 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
811 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
812 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
813 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
814 break;
815 case 27:
816 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
817 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
818 break;
819 case 28:
820 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
821 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
822 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
823 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
824 break;
825 case 29:
826 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
827 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
828 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
829 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
830 break;
831 case 30:
832 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
833 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
834 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
835 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
836 break;
837 default:
838 gb_tile_moden = 0;
839 break;
840 }
841 WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
842 }
843 }
844 for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) {
845 switch (reg_offset) {
846 case 0:
847 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
848 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
849 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
850 NUM_BANKS(ADDR_SURF_16_BANK));
851 break;
852 case 1:
853 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
854 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
855 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
856 NUM_BANKS(ADDR_SURF_16_BANK));
857 break;
858 case 2:
859 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
860 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
861 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
862 NUM_BANKS(ADDR_SURF_16_BANK));
863 break;
864 case 3:
865 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
866 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
867 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
868 NUM_BANKS(ADDR_SURF_16_BANK));
869 break;
870 case 4:
871 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
872 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
873 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
874 NUM_BANKS(ADDR_SURF_16_BANK));
875 break;
876 case 5:
877 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
878 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
879 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
880 NUM_BANKS(ADDR_SURF_8_BANK));
881 break;
882 case 6:
883 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
884 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
885 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
886 NUM_BANKS(ADDR_SURF_4_BANK));
887 break;
888 case 8:
889 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
890 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
891 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
892 NUM_BANKS(ADDR_SURF_16_BANK));
893 break;
894 case 9:
895 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
896 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
897 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
898 NUM_BANKS(ADDR_SURF_16_BANK));
899 break;
900 case 10:
901 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
902 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
903 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
904 NUM_BANKS(ADDR_SURF_16_BANK));
905 break;
906 case 11:
907 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
908 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
909 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
910 NUM_BANKS(ADDR_SURF_16_BANK));
911 break;
912 case 12:
913 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
914 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
915 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
916 NUM_BANKS(ADDR_SURF_16_BANK));
917 break;
918 case 13:
919 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
920 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
921 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
922 NUM_BANKS(ADDR_SURF_8_BANK));
923 break;
924 case 14:
925 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
926 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
927 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
928 NUM_BANKS(ADDR_SURF_4_BANK));
929 break;
930 default:
931 gb_tile_moden = 0;
932 break;
933 }
934 WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden);
935 }
936 } else if (num_pipe_configs == 2) {
937 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
938 switch (reg_offset) {
939 case 0:
940 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
941 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
942 PIPE_CONFIG(ADDR_SURF_P2) |
943 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B));
944 break;
945 case 1:
946 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
947 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
948 PIPE_CONFIG(ADDR_SURF_P2) |
949 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B));
950 break;
951 case 2:
952 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
953 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
954 PIPE_CONFIG(ADDR_SURF_P2) |
955 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
956 break;
957 case 3:
958 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
959 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
960 PIPE_CONFIG(ADDR_SURF_P2) |
961 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B));
962 break;
963 case 4:
964 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
965 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
966 PIPE_CONFIG(ADDR_SURF_P2) |
967 TILE_SPLIT(split_equal_to_row_size));
968 break;
969 case 5:
970 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
971 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
972 break;
973 case 6:
974 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
975 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
976 PIPE_CONFIG(ADDR_SURF_P2) |
977 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
978 break;
979 case 7:
980 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
981 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
982 PIPE_CONFIG(ADDR_SURF_P2) |
983 TILE_SPLIT(split_equal_to_row_size));
984 break;
985 case 8:
986 gb_tile_moden = ARRAY_MODE(ARRAY_LINEAR_ALIGNED);
987 break;
988 case 9:
989 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
990 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
991 break;
992 case 10:
993 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
994 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
995 PIPE_CONFIG(ADDR_SURF_P2) |
996 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
997 break;
998 case 11:
999 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1000 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1001 PIPE_CONFIG(ADDR_SURF_P2) |
1002 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1003 break;
1004 case 12:
1005 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
1006 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1007 PIPE_CONFIG(ADDR_SURF_P2) |
1008 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1009 break;
1010 case 13:
1011 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1012 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
1013 break;
1014 case 14:
1015 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1016 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1017 PIPE_CONFIG(ADDR_SURF_P2) |
1018 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1019 break;
1020 case 16:
1021 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1022 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1023 PIPE_CONFIG(ADDR_SURF_P2) |
1024 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1025 break;
1026 case 17:
1027 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
1028 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1029 PIPE_CONFIG(ADDR_SURF_P2) |
1030 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1031 break;
1032 case 27:
1033 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1034 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
1035 break;
1036 case 28:
1037 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
1038 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1039 PIPE_CONFIG(ADDR_SURF_P2) |
1040 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1041 break;
1042 case 29:
1043 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1044 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1045 PIPE_CONFIG(ADDR_SURF_P2) |
1046 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1047 break;
1048 case 30:
1049 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
1050 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1051 PIPE_CONFIG(ADDR_SURF_P2) |
1052 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1053 break;
1054 default:
1055 gb_tile_moden = 0;
1056 break;
1057 }
1058 WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
1059 }
1060 for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) {
1061 switch (reg_offset) {
1062 case 0:
1063 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
1064 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1065 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1066 NUM_BANKS(ADDR_SURF_16_BANK));
1067 break;
1068 case 1:
1069 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
1070 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1071 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1072 NUM_BANKS(ADDR_SURF_16_BANK));
1073 break;
1074 case 2:
1075 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1076 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1077 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1078 NUM_BANKS(ADDR_SURF_16_BANK));
1079 break;
1080 case 3:
1081 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1082 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1083 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1084 NUM_BANKS(ADDR_SURF_16_BANK));
1085 break;
1086 case 4:
1087 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1088 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1089 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1090 NUM_BANKS(ADDR_SURF_16_BANK));
1091 break;
1092 case 5:
1093 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1094 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1095 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1096 NUM_BANKS(ADDR_SURF_16_BANK));
1097 break;
1098 case 6:
1099 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1100 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1101 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1102 NUM_BANKS(ADDR_SURF_8_BANK));
1103 break;
1104 case 8:
1105 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
1106 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
1107 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1108 NUM_BANKS(ADDR_SURF_16_BANK));
1109 break;
1110 case 9:
1111 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
1112 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1113 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1114 NUM_BANKS(ADDR_SURF_16_BANK));
1115 break;
1116 case 10:
1117 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
1118 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1119 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1120 NUM_BANKS(ADDR_SURF_16_BANK));
1121 break;
1122 case 11:
1123 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
1124 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1125 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1126 NUM_BANKS(ADDR_SURF_16_BANK));
1127 break;
1128 case 12:
1129 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1130 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1131 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1132 NUM_BANKS(ADDR_SURF_16_BANK));
1133 break;
1134 case 13:
1135 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1136 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1137 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1138 NUM_BANKS(ADDR_SURF_16_BANK));
1139 break;
1140 case 14:
1141 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1142 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1143 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1144 NUM_BANKS(ADDR_SURF_8_BANK));
1145 break;
1146 default:
1147 gb_tile_moden = 0;
1148 break;
1149 }
1150 WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden);
1151 }
1152 } else
1153 DRM_ERROR("unknown num pipe config: 0x%x\n", num_pipe_configs);
1154}
1155
1156/**
1157 * cik_select_se_sh - select which SE, SH to address
1158 *
1159 * @rdev: radeon_device pointer
1160 * @se_num: shader engine to address
1161 * @sh_num: sh block to address
1162 *
1163 * Select which SE, SH combinations to address. Certain
1164 * registers are instanced per SE or SH. 0xffffffff means
1165 * broadcast to all SEs or SHs (CIK).
1166 */
1167static void cik_select_se_sh(struct radeon_device *rdev,
1168 u32 se_num, u32 sh_num)
1169{
1170 u32 data = INSTANCE_BROADCAST_WRITES;
1171
1172 if ((se_num == 0xffffffff) && (sh_num == 0xffffffff))
1173 data = SH_BROADCAST_WRITES | SE_BROADCAST_WRITES;
1174 else if (se_num == 0xffffffff)
1175 data |= SE_BROADCAST_WRITES | SH_INDEX(sh_num);
1176 else if (sh_num == 0xffffffff)
1177 data |= SH_BROADCAST_WRITES | SE_INDEX(se_num);
1178 else
1179 data |= SH_INDEX(sh_num) | SE_INDEX(se_num);
1180 WREG32(GRBM_GFX_INDEX, data);
1181}
1182
1183/**
1184 * cik_create_bitmask - create a bitmask
1185 *
1186 * @bit_width: length of the mask
1187 *
1188 * create a variable length bit mask (CIK).
1189 * Returns the bitmask.
1190 */
1191static u32 cik_create_bitmask(u32 bit_width)
1192{
1193 u32 i, mask = 0;
1194
1195 for (i = 0; i < bit_width; i++) {
1196 mask <<= 1;
1197 mask |= 1;
1198 }
1199 return mask;
1200}
1201
1202/**
1203 * cik_select_se_sh - select which SE, SH to address
1204 *
1205 * @rdev: radeon_device pointer
1206 * @max_rb_num: max RBs (render backends) for the asic
1207 * @se_num: number of SEs (shader engines) for the asic
1208 * @sh_per_se: number of SH blocks per SE for the asic
1209 *
1210 * Calculates the bitmask of disabled RBs (CIK).
1211 * Returns the disabled RB bitmask.
1212 */
1213static u32 cik_get_rb_disabled(struct radeon_device *rdev,
1214 u32 max_rb_num, u32 se_num,
1215 u32 sh_per_se)
1216{
1217 u32 data, mask;
1218
1219 data = RREG32(CC_RB_BACKEND_DISABLE);
1220 if (data & 1)
1221 data &= BACKEND_DISABLE_MASK;
1222 else
1223 data = 0;
1224 data |= RREG32(GC_USER_RB_BACKEND_DISABLE);
1225
1226 data >>= BACKEND_DISABLE_SHIFT;
1227
1228 mask = cik_create_bitmask(max_rb_num / se_num / sh_per_se);
1229
1230 return data & mask;
1231}
1232
1233/**
1234 * cik_setup_rb - setup the RBs on the asic
1235 *
1236 * @rdev: radeon_device pointer
1237 * @se_num: number of SEs (shader engines) for the asic
1238 * @sh_per_se: number of SH blocks per SE for the asic
1239 * @max_rb_num: max RBs (render backends) for the asic
1240 *
1241 * Configures per-SE/SH RB registers (CIK).
1242 */
1243static void cik_setup_rb(struct radeon_device *rdev,
1244 u32 se_num, u32 sh_per_se,
1245 u32 max_rb_num)
1246{
1247 int i, j;
1248 u32 data, mask;
1249 u32 disabled_rbs = 0;
1250 u32 enabled_rbs = 0;
1251
1252 for (i = 0; i < se_num; i++) {
1253 for (j = 0; j < sh_per_se; j++) {
1254 cik_select_se_sh(rdev, i, j);
1255 data = cik_get_rb_disabled(rdev, max_rb_num, se_num, sh_per_se);
1256 disabled_rbs |= data << ((i * sh_per_se + j) * CIK_RB_BITMAP_WIDTH_PER_SH);
1257 }
1258 }
1259 cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
1260
1261 mask = 1;
1262 for (i = 0; i < max_rb_num; i++) {
1263 if (!(disabled_rbs & mask))
1264 enabled_rbs |= mask;
1265 mask <<= 1;
1266 }
1267
1268 for (i = 0; i < se_num; i++) {
1269 cik_select_se_sh(rdev, i, 0xffffffff);
1270 data = 0;
1271 for (j = 0; j < sh_per_se; j++) {
1272 switch (enabled_rbs & 3) {
1273 case 1:
1274 data |= (RASTER_CONFIG_RB_MAP_0 << (i * sh_per_se + j) * 2);
1275 break;
1276 case 2:
1277 data |= (RASTER_CONFIG_RB_MAP_3 << (i * sh_per_se + j) * 2);
1278 break;
1279 case 3:
1280 default:
1281 data |= (RASTER_CONFIG_RB_MAP_2 << (i * sh_per_se + j) * 2);
1282 break;
1283 }
1284 enabled_rbs >>= 2;
1285 }
1286 WREG32(PA_SC_RASTER_CONFIG, data);
1287 }
1288 cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
1289}
1290
1291/**
1292 * cik_gpu_init - setup the 3D engine
1293 *
1294 * @rdev: radeon_device pointer
1295 *
1296 * Configures the 3D engine and tiling configuration
1297 * registers so that the 3D engine is usable.
1298 */
1299static void cik_gpu_init(struct radeon_device *rdev)
1300{
1301 u32 gb_addr_config = RREG32(GB_ADDR_CONFIG);
1302 u32 mc_shared_chmap, mc_arb_ramcfg;
1303 u32 hdp_host_path_cntl;
1304 u32 tmp;
1305 int i, j;
1306
1307 switch (rdev->family) {
1308 case CHIP_BONAIRE:
1309 rdev->config.cik.max_shader_engines = 2;
1310 rdev->config.cik.max_tile_pipes = 4;
1311 rdev->config.cik.max_cu_per_sh = 7;
1312 rdev->config.cik.max_sh_per_se = 1;
1313 rdev->config.cik.max_backends_per_se = 2;
1314 rdev->config.cik.max_texture_channel_caches = 4;
1315 rdev->config.cik.max_gprs = 256;
1316 rdev->config.cik.max_gs_threads = 32;
1317 rdev->config.cik.max_hw_contexts = 8;
1318
1319 rdev->config.cik.sc_prim_fifo_size_frontend = 0x20;
1320 rdev->config.cik.sc_prim_fifo_size_backend = 0x100;
1321 rdev->config.cik.sc_hiz_tile_fifo_size = 0x30;
1322 rdev->config.cik.sc_earlyz_tile_fifo_size = 0x130;
1323 gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN;
1324 break;
1325 case CHIP_KAVERI:
1326 /* TODO */
1327 break;
1328 case CHIP_KABINI:
1329 default:
1330 rdev->config.cik.max_shader_engines = 1;
1331 rdev->config.cik.max_tile_pipes = 2;
1332 rdev->config.cik.max_cu_per_sh = 2;
1333 rdev->config.cik.max_sh_per_se = 1;
1334 rdev->config.cik.max_backends_per_se = 1;
1335 rdev->config.cik.max_texture_channel_caches = 2;
1336 rdev->config.cik.max_gprs = 256;
1337 rdev->config.cik.max_gs_threads = 16;
1338 rdev->config.cik.max_hw_contexts = 8;
1339
1340 rdev->config.cik.sc_prim_fifo_size_frontend = 0x20;
1341 rdev->config.cik.sc_prim_fifo_size_backend = 0x100;
1342 rdev->config.cik.sc_hiz_tile_fifo_size = 0x30;
1343 rdev->config.cik.sc_earlyz_tile_fifo_size = 0x130;
1344 gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN;
1345 break;
1346 }
1347
1348 /* Initialize HDP */
1349 for (i = 0, j = 0; i < 32; i++, j += 0x18) {
1350 WREG32((0x2c14 + j), 0x00000000);
1351 WREG32((0x2c18 + j), 0x00000000);
1352 WREG32((0x2c1c + j), 0x00000000);
1353 WREG32((0x2c20 + j), 0x00000000);
1354 WREG32((0x2c24 + j), 0x00000000);
1355 }
1356
1357 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
1358
1359 WREG32(BIF_FB_EN, FB_READ_EN | FB_WRITE_EN);
1360
1361 mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
1362 mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
1363
1364 rdev->config.cik.num_tile_pipes = rdev->config.cik.max_tile_pipes;
1365 rdev->config.cik.mem_max_burst_length_bytes = 256;
1366 tmp = (mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT;
1367 rdev->config.cik.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
1368 if (rdev->config.cik.mem_row_size_in_kb > 4)
1369 rdev->config.cik.mem_row_size_in_kb = 4;
1370 /* XXX use MC settings? */
1371 rdev->config.cik.shader_engine_tile_size = 32;
1372 rdev->config.cik.num_gpus = 1;
1373 rdev->config.cik.multi_gpu_tile_size = 64;
1374
1375 /* fix up row size */
1376 gb_addr_config &= ~ROW_SIZE_MASK;
1377 switch (rdev->config.cik.mem_row_size_in_kb) {
1378 case 1:
1379 default:
1380 gb_addr_config |= ROW_SIZE(0);
1381 break;
1382 case 2:
1383 gb_addr_config |= ROW_SIZE(1);
1384 break;
1385 case 4:
1386 gb_addr_config |= ROW_SIZE(2);
1387 break;
1388 }
1389
1390 /* setup tiling info dword. gb_addr_config is not adequate since it does
1391 * not have bank info, so create a custom tiling dword.
1392 * bits 3:0 num_pipes
1393 * bits 7:4 num_banks
1394 * bits 11:8 group_size
1395 * bits 15:12 row_size
1396 */
1397 rdev->config.cik.tile_config = 0;
1398 switch (rdev->config.cik.num_tile_pipes) {
1399 case 1:
1400 rdev->config.cik.tile_config |= (0 << 0);
1401 break;
1402 case 2:
1403 rdev->config.cik.tile_config |= (1 << 0);
1404 break;
1405 case 4:
1406 rdev->config.cik.tile_config |= (2 << 0);
1407 break;
1408 case 8:
1409 default:
1410 /* XXX what about 12? */
1411 rdev->config.cik.tile_config |= (3 << 0);
1412 break;
1413 }
1414 if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT)
1415 rdev->config.cik.tile_config |= 1 << 4;
1416 else
1417 rdev->config.cik.tile_config |= 0 << 4;
1418 rdev->config.cik.tile_config |=
1419 ((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8;
1420 rdev->config.cik.tile_config |=
1421 ((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12;
1422
1423 WREG32(GB_ADDR_CONFIG, gb_addr_config);
1424 WREG32(HDP_ADDR_CONFIG, gb_addr_config);
1425 WREG32(DMIF_ADDR_CALC, gb_addr_config);
1426
1427 cik_tiling_mode_table_init(rdev);
1428
1429 cik_setup_rb(rdev, rdev->config.cik.max_shader_engines,
1430 rdev->config.cik.max_sh_per_se,
1431 rdev->config.cik.max_backends_per_se);
1432
1433 /* set HW defaults for 3D engine */
1434 WREG32(CP_MEQ_THRESHOLDS, MEQ1_START(0x30) | MEQ2_START(0x60));
1435
1436 WREG32(SX_DEBUG_1, 0x20);
1437
1438 WREG32(TA_CNTL_AUX, 0x00010000);
1439
1440 tmp = RREG32(SPI_CONFIG_CNTL);
1441 tmp |= 0x03000000;
1442 WREG32(SPI_CONFIG_CNTL, tmp);
1443
1444 WREG32(SQ_CONFIG, 1);
1445
1446 WREG32(DB_DEBUG, 0);
1447
1448 tmp = RREG32(DB_DEBUG2) & ~0xf00fffff;
1449 tmp |= 0x00000400;
1450 WREG32(DB_DEBUG2, tmp);
1451
1452 tmp = RREG32(DB_DEBUG3) & ~0x0002021c;
1453 tmp |= 0x00020200;
1454 WREG32(DB_DEBUG3, tmp);
1455
1456 tmp = RREG32(CB_HW_CONTROL) & ~0x00010000;
1457 tmp |= 0x00018208;
1458 WREG32(CB_HW_CONTROL, tmp);
1459
1460 WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4));
1461
1462 WREG32(PA_SC_FIFO_SIZE, (SC_FRONTEND_PRIM_FIFO_SIZE(rdev->config.cik.sc_prim_fifo_size_frontend) |
1463 SC_BACKEND_PRIM_FIFO_SIZE(rdev->config.cik.sc_prim_fifo_size_backend) |
1464 SC_HIZ_TILE_FIFO_SIZE(rdev->config.cik.sc_hiz_tile_fifo_size) |
1465 SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.cik.sc_earlyz_tile_fifo_size)));
1466
1467 WREG32(VGT_NUM_INSTANCES, 1);
1468
1469 WREG32(CP_PERFMON_CNTL, 0);
1470
1471 WREG32(SQ_CONFIG, 0);
1472
1473 WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) |
1474 FORCE_EOV_MAX_REZ_CNT(255)));
1475
1476 WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC) |
1477 AUTO_INVLD_EN(ES_AND_GS_AUTO));
1478
1479 WREG32(VGT_GS_VERTEX_REUSE, 16);
1480 WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
1481
1482 tmp = RREG32(HDP_MISC_CNTL);
1483 tmp |= HDP_FLUSH_INVALIDATE_CACHE;
1484 WREG32(HDP_MISC_CNTL, tmp);
1485
1486 hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL);
1487 WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
1488
1489 WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3));
1490 WREG32(PA_SC_ENHANCE, ENABLE_PA_SC_OUT_OF_ORDER);
1491
1492 udelay(50);
1493}
1494
Alex Deucher841cf442012-12-18 21:47:44 -05001495/*
Alex Deucher2cae3bc2012-07-05 11:45:40 -04001496 * GPU scratch registers helpers function.
1497 */
1498/**
1499 * cik_scratch_init - setup driver info for CP scratch regs
1500 *
1501 * @rdev: radeon_device pointer
1502 *
1503 * Set up the number and offset of the CP scratch registers.
1504 * NOTE: use of CP scratch registers is a legacy inferface and
1505 * is not used by default on newer asics (r6xx+). On newer asics,
1506 * memory buffers are used for fences rather than scratch regs.
1507 */
1508static void cik_scratch_init(struct radeon_device *rdev)
1509{
1510 int i;
1511
1512 rdev->scratch.num_reg = 7;
1513 rdev->scratch.reg_base = SCRATCH_REG0;
1514 for (i = 0; i < rdev->scratch.num_reg; i++) {
1515 rdev->scratch.free[i] = true;
1516 rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
1517 }
1518}
1519
1520/**
1521 * cik_fence_ring_emit - emit a fence on the gfx ring
1522 *
1523 * @rdev: radeon_device pointer
1524 * @fence: radeon fence object
1525 *
1526 * Emits a fence sequnce number on the gfx ring and flushes
1527 * GPU caches.
1528 */
1529void cik_fence_ring_emit(struct radeon_device *rdev,
1530 struct radeon_fence *fence)
1531{
1532 struct radeon_ring *ring = &rdev->ring[fence->ring];
1533 u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
1534
1535 /* EVENT_WRITE_EOP - flush caches, send int */
1536 radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
1537 radeon_ring_write(ring, (EOP_TCL1_ACTION_EN |
1538 EOP_TC_ACTION_EN |
1539 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
1540 EVENT_INDEX(5)));
1541 radeon_ring_write(ring, addr & 0xfffffffc);
1542 radeon_ring_write(ring, (upper_32_bits(addr) & 0xffff) | DATA_SEL(1) | INT_SEL(2));
1543 radeon_ring_write(ring, fence->seq);
1544 radeon_ring_write(ring, 0);
1545 /* HDP flush */
1546 /* We should be using the new WAIT_REG_MEM special op packet here
1547 * but it causes the CP to hang
1548 */
1549 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
1550 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
1551 WRITE_DATA_DST_SEL(0)));
1552 radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2);
1553 radeon_ring_write(ring, 0);
1554 radeon_ring_write(ring, 0);
1555}
1556
1557void cik_semaphore_ring_emit(struct radeon_device *rdev,
1558 struct radeon_ring *ring,
1559 struct radeon_semaphore *semaphore,
1560 bool emit_wait)
1561{
1562 uint64_t addr = semaphore->gpu_addr;
1563 unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL;
1564
1565 radeon_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1));
1566 radeon_ring_write(ring, addr & 0xffffffff);
1567 radeon_ring_write(ring, (upper_32_bits(addr) & 0xffff) | sel);
1568}
1569
1570/*
1571 * IB stuff
1572 */
1573/**
1574 * cik_ring_ib_execute - emit an IB (Indirect Buffer) on the gfx ring
1575 *
1576 * @rdev: radeon_device pointer
1577 * @ib: radeon indirect buffer object
1578 *
1579 * Emits an DE (drawing engine) or CE (constant engine) IB
1580 * on the gfx ring. IBs are usually generated by userspace
1581 * acceleration drivers and submitted to the kernel for
1582 * sheduling on the ring. This function schedules the IB
1583 * on the gfx ring for execution by the GPU.
1584 */
1585void cik_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
1586{
1587 struct radeon_ring *ring = &rdev->ring[ib->ring];
1588 u32 header, control = INDIRECT_BUFFER_VALID;
1589
1590 if (ib->is_const_ib) {
1591 /* set switch buffer packet before const IB */
1592 radeon_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
1593 radeon_ring_write(ring, 0);
1594
1595 header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2);
1596 } else {
1597 u32 next_rptr;
1598 if (ring->rptr_save_reg) {
1599 next_rptr = ring->wptr + 3 + 4;
1600 radeon_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
1601 radeon_ring_write(ring, ((ring->rptr_save_reg -
1602 PACKET3_SET_UCONFIG_REG_START) >> 2));
1603 radeon_ring_write(ring, next_rptr);
1604 } else if (rdev->wb.enabled) {
1605 next_rptr = ring->wptr + 5 + 4;
1606 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
1607 radeon_ring_write(ring, WRITE_DATA_DST_SEL(1));
1608 radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
1609 radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
1610 radeon_ring_write(ring, next_rptr);
1611 }
1612
1613 header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
1614 }
1615
1616 control |= ib->length_dw |
1617 (ib->vm ? (ib->vm->id << 24) : 0);
1618
1619 radeon_ring_write(ring, header);
1620 radeon_ring_write(ring,
1621#ifdef __BIG_ENDIAN
1622 (2 << 0) |
1623#endif
1624 (ib->gpu_addr & 0xFFFFFFFC));
1625 radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
1626 radeon_ring_write(ring, control);
1627}
1628
1629/*
Alex Deucher841cf442012-12-18 21:47:44 -05001630 * CP.
1631 * On CIK, gfx and compute now have independant command processors.
1632 *
1633 * GFX
1634 * Gfx consists of a single ring and can process both gfx jobs and
1635 * compute jobs. The gfx CP consists of three microengines (ME):
1636 * PFP - Pre-Fetch Parser
1637 * ME - Micro Engine
1638 * CE - Constant Engine
1639 * The PFP and ME make up what is considered the Drawing Engine (DE).
1640 * The CE is an asynchronous engine used for updating buffer desciptors
1641 * used by the DE so that they can be loaded into cache in parallel
1642 * while the DE is processing state update packets.
1643 *
1644 * Compute
1645 * The compute CP consists of two microengines (ME):
1646 * MEC1 - Compute MicroEngine 1
1647 * MEC2 - Compute MicroEngine 2
1648 * Each MEC supports 4 compute pipes and each pipe supports 8 queues.
1649 * The queues are exposed to userspace and are programmed directly
1650 * by the compute runtime.
1651 */
1652/**
1653 * cik_cp_gfx_enable - enable/disable the gfx CP MEs
1654 *
1655 * @rdev: radeon_device pointer
1656 * @enable: enable or disable the MEs
1657 *
1658 * Halts or unhalts the gfx MEs.
1659 */
1660static void cik_cp_gfx_enable(struct radeon_device *rdev, bool enable)
1661{
1662 if (enable)
1663 WREG32(CP_ME_CNTL, 0);
1664 else {
1665 WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT));
1666 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
1667 }
1668 udelay(50);
1669}
1670
1671/**
1672 * cik_cp_gfx_load_microcode - load the gfx CP ME ucode
1673 *
1674 * @rdev: radeon_device pointer
1675 *
1676 * Loads the gfx PFP, ME, and CE ucode.
1677 * Returns 0 for success, -EINVAL if the ucode is not available.
1678 */
1679static int cik_cp_gfx_load_microcode(struct radeon_device *rdev)
1680{
1681 const __be32 *fw_data;
1682 int i;
1683
1684 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw)
1685 return -EINVAL;
1686
1687 cik_cp_gfx_enable(rdev, false);
1688
1689 /* PFP */
1690 fw_data = (const __be32 *)rdev->pfp_fw->data;
1691 WREG32(CP_PFP_UCODE_ADDR, 0);
1692 for (i = 0; i < CIK_PFP_UCODE_SIZE; i++)
1693 WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
1694 WREG32(CP_PFP_UCODE_ADDR, 0);
1695
1696 /* CE */
1697 fw_data = (const __be32 *)rdev->ce_fw->data;
1698 WREG32(CP_CE_UCODE_ADDR, 0);
1699 for (i = 0; i < CIK_CE_UCODE_SIZE; i++)
1700 WREG32(CP_CE_UCODE_DATA, be32_to_cpup(fw_data++));
1701 WREG32(CP_CE_UCODE_ADDR, 0);
1702
1703 /* ME */
1704 fw_data = (const __be32 *)rdev->me_fw->data;
1705 WREG32(CP_ME_RAM_WADDR, 0);
1706 for (i = 0; i < CIK_ME_UCODE_SIZE; i++)
1707 WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
1708 WREG32(CP_ME_RAM_WADDR, 0);
1709
1710 WREG32(CP_PFP_UCODE_ADDR, 0);
1711 WREG32(CP_CE_UCODE_ADDR, 0);
1712 WREG32(CP_ME_RAM_WADDR, 0);
1713 WREG32(CP_ME_RAM_RADDR, 0);
1714 return 0;
1715}
1716
1717/**
1718 * cik_cp_gfx_start - start the gfx ring
1719 *
1720 * @rdev: radeon_device pointer
1721 *
1722 * Enables the ring and loads the clear state context and other
1723 * packets required to init the ring.
1724 * Returns 0 for success, error for failure.
1725 */
1726static int cik_cp_gfx_start(struct radeon_device *rdev)
1727{
1728 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
1729 int r, i;
1730
1731 /* init the CP */
1732 WREG32(CP_MAX_CONTEXT, rdev->config.cik.max_hw_contexts - 1);
1733 WREG32(CP_ENDIAN_SWAP, 0);
1734 WREG32(CP_DEVICE_ID, 1);
1735
1736 cik_cp_gfx_enable(rdev, true);
1737
1738 r = radeon_ring_lock(rdev, ring, cik_default_size + 17);
1739 if (r) {
1740 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
1741 return r;
1742 }
1743
1744 /* init the CE partitions. CE only used for gfx on CIK */
1745 radeon_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
1746 radeon_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
1747 radeon_ring_write(ring, 0xc000);
1748 radeon_ring_write(ring, 0xc000);
1749
1750 /* setup clear context state */
1751 radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
1752 radeon_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
1753
1754 radeon_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
1755 radeon_ring_write(ring, 0x80000000);
1756 radeon_ring_write(ring, 0x80000000);
1757
1758 for (i = 0; i < cik_default_size; i++)
1759 radeon_ring_write(ring, cik_default_state[i]);
1760
1761 radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
1762 radeon_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
1763
1764 /* set clear context state */
1765 radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
1766 radeon_ring_write(ring, 0);
1767
1768 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
1769 radeon_ring_write(ring, 0x00000316);
1770 radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
1771 radeon_ring_write(ring, 0x00000010); /* VGT_OUT_DEALLOC_CNTL */
1772
1773 radeon_ring_unlock_commit(rdev, ring);
1774
1775 return 0;
1776}
1777
1778/**
1779 * cik_cp_gfx_fini - stop the gfx ring
1780 *
1781 * @rdev: radeon_device pointer
1782 *
1783 * Stop the gfx ring and tear down the driver ring
1784 * info.
1785 */
1786static void cik_cp_gfx_fini(struct radeon_device *rdev)
1787{
1788 cik_cp_gfx_enable(rdev, false);
1789 radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
1790}
1791
1792/**
1793 * cik_cp_gfx_resume - setup the gfx ring buffer registers
1794 *
1795 * @rdev: radeon_device pointer
1796 *
1797 * Program the location and size of the gfx ring buffer
1798 * and test it to make sure it's working.
1799 * Returns 0 for success, error for failure.
1800 */
1801static int cik_cp_gfx_resume(struct radeon_device *rdev)
1802{
1803 struct radeon_ring *ring;
1804 u32 tmp;
1805 u32 rb_bufsz;
1806 u64 rb_addr;
1807 int r;
1808
1809 WREG32(CP_SEM_WAIT_TIMER, 0x0);
1810 WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
1811
1812 /* Set the write pointer delay */
1813 WREG32(CP_RB_WPTR_DELAY, 0);
1814
1815 /* set the RB to use vmid 0 */
1816 WREG32(CP_RB_VMID, 0);
1817
1818 WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
1819
1820 /* ring 0 - compute and gfx */
1821 /* Set ring buffer size */
1822 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
1823 rb_bufsz = drm_order(ring->ring_size / 8);
1824 tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
1825#ifdef __BIG_ENDIAN
1826 tmp |= BUF_SWAP_32BIT;
1827#endif
1828 WREG32(CP_RB0_CNTL, tmp);
1829
1830 /* Initialize the ring buffer's read and write pointers */
1831 WREG32(CP_RB0_CNTL, tmp | RB_RPTR_WR_ENA);
1832 ring->wptr = 0;
1833 WREG32(CP_RB0_WPTR, ring->wptr);
1834
1835 /* set the wb address wether it's enabled or not */
1836 WREG32(CP_RB0_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC);
1837 WREG32(CP_RB0_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
1838
1839 /* scratch register shadowing is no longer supported */
1840 WREG32(SCRATCH_UMSK, 0);
1841
1842 if (!rdev->wb.enabled)
1843 tmp |= RB_NO_UPDATE;
1844
1845 mdelay(1);
1846 WREG32(CP_RB0_CNTL, tmp);
1847
1848 rb_addr = ring->gpu_addr >> 8;
1849 WREG32(CP_RB0_BASE, rb_addr);
1850 WREG32(CP_RB0_BASE_HI, upper_32_bits(rb_addr));
1851
1852 ring->rptr = RREG32(CP_RB0_RPTR);
1853
1854 /* start the ring */
1855 cik_cp_gfx_start(rdev);
1856 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = true;
1857 r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
1858 if (r) {
1859 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
1860 return r;
1861 }
1862 return 0;
1863}
1864
1865/**
1866 * cik_cp_compute_enable - enable/disable the compute CP MEs
1867 *
1868 * @rdev: radeon_device pointer
1869 * @enable: enable or disable the MEs
1870 *
1871 * Halts or unhalts the compute MEs.
1872 */
1873static void cik_cp_compute_enable(struct radeon_device *rdev, bool enable)
1874{
1875 if (enable)
1876 WREG32(CP_MEC_CNTL, 0);
1877 else
1878 WREG32(CP_MEC_CNTL, (MEC_ME1_HALT | MEC_ME2_HALT));
1879 udelay(50);
1880}
1881
1882/**
1883 * cik_cp_compute_load_microcode - load the compute CP ME ucode
1884 *
1885 * @rdev: radeon_device pointer
1886 *
1887 * Loads the compute MEC1&2 ucode.
1888 * Returns 0 for success, -EINVAL if the ucode is not available.
1889 */
1890static int cik_cp_compute_load_microcode(struct radeon_device *rdev)
1891{
1892 const __be32 *fw_data;
1893 int i;
1894
1895 if (!rdev->mec_fw)
1896 return -EINVAL;
1897
1898 cik_cp_compute_enable(rdev, false);
1899
1900 /* MEC1 */
1901 fw_data = (const __be32 *)rdev->mec_fw->data;
1902 WREG32(CP_MEC_ME1_UCODE_ADDR, 0);
1903 for (i = 0; i < CIK_MEC_UCODE_SIZE; i++)
1904 WREG32(CP_MEC_ME1_UCODE_DATA, be32_to_cpup(fw_data++));
1905 WREG32(CP_MEC_ME1_UCODE_ADDR, 0);
1906
1907 if (rdev->family == CHIP_KAVERI) {
1908 /* MEC2 */
1909 fw_data = (const __be32 *)rdev->mec_fw->data;
1910 WREG32(CP_MEC_ME2_UCODE_ADDR, 0);
1911 for (i = 0; i < CIK_MEC_UCODE_SIZE; i++)
1912 WREG32(CP_MEC_ME2_UCODE_DATA, be32_to_cpup(fw_data++));
1913 WREG32(CP_MEC_ME2_UCODE_ADDR, 0);
1914 }
1915
1916 return 0;
1917}
1918
1919/**
1920 * cik_cp_compute_start - start the compute queues
1921 *
1922 * @rdev: radeon_device pointer
1923 *
1924 * Enable the compute queues.
1925 * Returns 0 for success, error for failure.
1926 */
1927static int cik_cp_compute_start(struct radeon_device *rdev)
1928{
1929 //todo
1930 return 0;
1931}
1932
1933/**
1934 * cik_cp_compute_fini - stop the compute queues
1935 *
1936 * @rdev: radeon_device pointer
1937 *
1938 * Stop the compute queues and tear down the driver queue
1939 * info.
1940 */
1941static void cik_cp_compute_fini(struct radeon_device *rdev)
1942{
1943 cik_cp_compute_enable(rdev, false);
1944 //todo
1945}
1946
1947/**
1948 * cik_cp_compute_resume - setup the compute queue registers
1949 *
1950 * @rdev: radeon_device pointer
1951 *
1952 * Program the compute queues and test them to make sure they
1953 * are working.
1954 * Returns 0 for success, error for failure.
1955 */
1956static int cik_cp_compute_resume(struct radeon_device *rdev)
1957{
1958 int r;
1959
1960 //todo
1961 r = cik_cp_compute_start(rdev);
1962 if (r)
1963 return r;
1964 return 0;
1965}
1966
1967/* XXX temporary wrappers to handle both compute and gfx */
1968/* XXX */
1969static void cik_cp_enable(struct radeon_device *rdev, bool enable)
1970{
1971 cik_cp_gfx_enable(rdev, enable);
1972 cik_cp_compute_enable(rdev, enable);
1973}
1974
1975/* XXX */
1976static int cik_cp_load_microcode(struct radeon_device *rdev)
1977{
1978 int r;
1979
1980 r = cik_cp_gfx_load_microcode(rdev);
1981 if (r)
1982 return r;
1983 r = cik_cp_compute_load_microcode(rdev);
1984 if (r)
1985 return r;
1986
1987 return 0;
1988}
1989
1990/* XXX */
1991static void cik_cp_fini(struct radeon_device *rdev)
1992{
1993 cik_cp_gfx_fini(rdev);
1994 cik_cp_compute_fini(rdev);
1995}
1996
1997/* XXX */
1998static int cik_cp_resume(struct radeon_device *rdev)
1999{
2000 int r;
2001
2002 /* Reset all cp blocks */
2003 WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
2004 RREG32(GRBM_SOFT_RESET);
2005 mdelay(15);
2006 WREG32(GRBM_SOFT_RESET, 0);
2007 RREG32(GRBM_SOFT_RESET);
2008
2009 r = cik_cp_load_microcode(rdev);
2010 if (r)
2011 return r;
2012
2013 r = cik_cp_gfx_resume(rdev);
2014 if (r)
2015 return r;
2016 r = cik_cp_compute_resume(rdev);
2017 if (r)
2018 return r;
2019
2020 return 0;
2021}
2022
Alex Deucher6f2043c2013-04-09 12:43:41 -04002023/**
2024 * cik_gpu_is_lockup - check if the 3D engine is locked up
2025 *
2026 * @rdev: radeon_device pointer
2027 * @ring: radeon_ring structure holding ring information
2028 *
2029 * Check if the 3D engine is locked up (CIK).
2030 * Returns true if the engine is locked, false if not.
2031 */
2032bool cik_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
2033{
2034 u32 srbm_status, srbm_status2;
2035 u32 grbm_status, grbm_status2;
2036 u32 grbm_status_se0, grbm_status_se1, grbm_status_se2, grbm_status_se3;
2037
2038 srbm_status = RREG32(SRBM_STATUS);
2039 srbm_status2 = RREG32(SRBM_STATUS2);
2040 grbm_status = RREG32(GRBM_STATUS);
2041 grbm_status2 = RREG32(GRBM_STATUS2);
2042 grbm_status_se0 = RREG32(GRBM_STATUS_SE0);
2043 grbm_status_se1 = RREG32(GRBM_STATUS_SE1);
2044 grbm_status_se2 = RREG32(GRBM_STATUS_SE2);
2045 grbm_status_se3 = RREG32(GRBM_STATUS_SE3);
2046 if (!(grbm_status & GUI_ACTIVE)) {
2047 radeon_ring_lockup_update(ring);
2048 return false;
2049 }
2050 /* force CP activities */
2051 radeon_ring_force_activity(rdev, ring);
2052 return radeon_ring_test_lockup(rdev, ring);
2053}
2054
2055/**
2056 * cik_gfx_gpu_soft_reset - soft reset the 3D engine and CPG
2057 *
2058 * @rdev: radeon_device pointer
2059 *
2060 * Soft reset the GFX engine and CPG blocks (CIK).
2061 * XXX: deal with reseting RLC and CPF
2062 * Returns 0 for success.
2063 */
2064static int cik_gfx_gpu_soft_reset(struct radeon_device *rdev)
2065{
2066 struct evergreen_mc_save save;
2067 u32 grbm_reset = 0;
2068
2069 if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
2070 return 0;
2071
2072 dev_info(rdev->dev, "GPU GFX softreset \n");
2073 dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
2074 RREG32(GRBM_STATUS));
2075 dev_info(rdev->dev, " GRBM_STATUS2=0x%08X\n",
2076 RREG32(GRBM_STATUS2));
2077 dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n",
2078 RREG32(GRBM_STATUS_SE0));
2079 dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n",
2080 RREG32(GRBM_STATUS_SE1));
2081 dev_info(rdev->dev, " GRBM_STATUS_SE2=0x%08X\n",
2082 RREG32(GRBM_STATUS_SE2));
2083 dev_info(rdev->dev, " GRBM_STATUS_SE3=0x%08X\n",
2084 RREG32(GRBM_STATUS_SE3));
2085 dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
2086 RREG32(SRBM_STATUS));
2087 dev_info(rdev->dev, " SRBM_STATUS2=0x%08X\n",
2088 RREG32(SRBM_STATUS2));
2089 evergreen_mc_stop(rdev, &save);
2090 if (radeon_mc_wait_for_idle(rdev)) {
2091 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
2092 }
2093 /* Disable CP parsing/prefetching */
2094 WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT);
2095
2096 /* reset all the gfx block and all CPG blocks */
2097 grbm_reset = SOFT_RESET_CPG | SOFT_RESET_GFX;
2098
2099 dev_info(rdev->dev, " GRBM_SOFT_RESET=0x%08X\n", grbm_reset);
2100 WREG32(GRBM_SOFT_RESET, grbm_reset);
2101 (void)RREG32(GRBM_SOFT_RESET);
2102 udelay(50);
2103 WREG32(GRBM_SOFT_RESET, 0);
2104 (void)RREG32(GRBM_SOFT_RESET);
2105 /* Wait a little for things to settle down */
2106 udelay(50);
2107 dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
2108 RREG32(GRBM_STATUS));
2109 dev_info(rdev->dev, " GRBM_STATUS2=0x%08X\n",
2110 RREG32(GRBM_STATUS2));
2111 dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n",
2112 RREG32(GRBM_STATUS_SE0));
2113 dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n",
2114 RREG32(GRBM_STATUS_SE1));
2115 dev_info(rdev->dev, " GRBM_STATUS_SE2=0x%08X\n",
2116 RREG32(GRBM_STATUS_SE2));
2117 dev_info(rdev->dev, " GRBM_STATUS_SE3=0x%08X\n",
2118 RREG32(GRBM_STATUS_SE3));
2119 dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
2120 RREG32(SRBM_STATUS));
2121 dev_info(rdev->dev, " SRBM_STATUS2=0x%08X\n",
2122 RREG32(SRBM_STATUS2));
2123 evergreen_mc_resume(rdev, &save);
2124 return 0;
2125}
2126
2127/**
2128 * cik_compute_gpu_soft_reset - soft reset CPC
2129 *
2130 * @rdev: radeon_device pointer
2131 *
2132 * Soft reset the CPC blocks (CIK).
2133 * XXX: deal with reseting RLC and CPF
2134 * Returns 0 for success.
2135 */
2136static int cik_compute_gpu_soft_reset(struct radeon_device *rdev)
2137{
2138 struct evergreen_mc_save save;
2139 u32 grbm_reset = 0;
2140
2141 dev_info(rdev->dev, "GPU compute softreset \n");
2142 dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
2143 RREG32(GRBM_STATUS));
2144 dev_info(rdev->dev, " GRBM_STATUS2=0x%08X\n",
2145 RREG32(GRBM_STATUS2));
2146 dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n",
2147 RREG32(GRBM_STATUS_SE0));
2148 dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n",
2149 RREG32(GRBM_STATUS_SE1));
2150 dev_info(rdev->dev, " GRBM_STATUS_SE2=0x%08X\n",
2151 RREG32(GRBM_STATUS_SE2));
2152 dev_info(rdev->dev, " GRBM_STATUS_SE3=0x%08X\n",
2153 RREG32(GRBM_STATUS_SE3));
2154 dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
2155 RREG32(SRBM_STATUS));
2156 dev_info(rdev->dev, " SRBM_STATUS2=0x%08X\n",
2157 RREG32(SRBM_STATUS2));
2158 evergreen_mc_stop(rdev, &save);
2159 if (radeon_mc_wait_for_idle(rdev)) {
2160 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
2161 }
2162 /* Disable CP parsing/prefetching */
2163 WREG32(CP_MEC_CNTL, MEC_ME1_HALT | MEC_ME2_HALT);
2164
2165 /* reset all the CPC blocks */
2166 grbm_reset = SOFT_RESET_CPG;
2167
2168 dev_info(rdev->dev, " GRBM_SOFT_RESET=0x%08X\n", grbm_reset);
2169 WREG32(GRBM_SOFT_RESET, grbm_reset);
2170 (void)RREG32(GRBM_SOFT_RESET);
2171 udelay(50);
2172 WREG32(GRBM_SOFT_RESET, 0);
2173 (void)RREG32(GRBM_SOFT_RESET);
2174 /* Wait a little for things to settle down */
2175 udelay(50);
2176 dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
2177 RREG32(GRBM_STATUS));
2178 dev_info(rdev->dev, " GRBM_STATUS2=0x%08X\n",
2179 RREG32(GRBM_STATUS2));
2180 dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n",
2181 RREG32(GRBM_STATUS_SE0));
2182 dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n",
2183 RREG32(GRBM_STATUS_SE1));
2184 dev_info(rdev->dev, " GRBM_STATUS_SE2=0x%08X\n",
2185 RREG32(GRBM_STATUS_SE2));
2186 dev_info(rdev->dev, " GRBM_STATUS_SE3=0x%08X\n",
2187 RREG32(GRBM_STATUS_SE3));
2188 dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
2189 RREG32(SRBM_STATUS));
2190 dev_info(rdev->dev, " SRBM_STATUS2=0x%08X\n",
2191 RREG32(SRBM_STATUS2));
2192 evergreen_mc_resume(rdev, &save);
2193 return 0;
2194}
2195
2196/**
2197 * cik_asic_reset - soft reset compute and gfx
2198 *
2199 * @rdev: radeon_device pointer
2200 *
2201 * Soft reset the CPC blocks (CIK).
2202 * XXX: make this more fine grained and only reset
2203 * what is necessary.
2204 * Returns 0 for success.
2205 */
2206int cik_asic_reset(struct radeon_device *rdev)
2207{
2208 int r;
2209
2210 r = cik_compute_gpu_soft_reset(rdev);
2211 if (r)
2212 dev_info(rdev->dev, "Compute reset failed!\n");
2213
2214 return cik_gfx_gpu_soft_reset(rdev);
2215}
Alex Deucher1c491652013-04-09 12:45:26 -04002216
2217/* MC */
2218/**
2219 * cik_mc_program - program the GPU memory controller
2220 *
2221 * @rdev: radeon_device pointer
2222 *
2223 * Set the location of vram, gart, and AGP in the GPU's
2224 * physical address space (CIK).
2225 */
2226static void cik_mc_program(struct radeon_device *rdev)
2227{
2228 struct evergreen_mc_save save;
2229 u32 tmp;
2230 int i, j;
2231
2232 /* Initialize HDP */
2233 for (i = 0, j = 0; i < 32; i++, j += 0x18) {
2234 WREG32((0x2c14 + j), 0x00000000);
2235 WREG32((0x2c18 + j), 0x00000000);
2236 WREG32((0x2c1c + j), 0x00000000);
2237 WREG32((0x2c20 + j), 0x00000000);
2238 WREG32((0x2c24 + j), 0x00000000);
2239 }
2240 WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
2241
2242 evergreen_mc_stop(rdev, &save);
2243 if (radeon_mc_wait_for_idle(rdev)) {
2244 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
2245 }
2246 /* Lockout access through VGA aperture*/
2247 WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
2248 /* Update configuration */
2249 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
2250 rdev->mc.vram_start >> 12);
2251 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
2252 rdev->mc.vram_end >> 12);
2253 WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
2254 rdev->vram_scratch.gpu_addr >> 12);
2255 tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
2256 tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
2257 WREG32(MC_VM_FB_LOCATION, tmp);
2258 /* XXX double check these! */
2259 WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
2260 WREG32(HDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
2261 WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
2262 WREG32(MC_VM_AGP_BASE, 0);
2263 WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
2264 WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
2265 if (radeon_mc_wait_for_idle(rdev)) {
2266 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
2267 }
2268 evergreen_mc_resume(rdev, &save);
2269 /* we need to own VRAM, so turn off the VGA renderer here
2270 * to stop it overwriting our objects */
2271 rv515_vga_render_disable(rdev);
2272}
2273
2274/**
2275 * cik_mc_init - initialize the memory controller driver params
2276 *
2277 * @rdev: radeon_device pointer
2278 *
2279 * Look up the amount of vram, vram width, and decide how to place
2280 * vram and gart within the GPU's physical address space (CIK).
2281 * Returns 0 for success.
2282 */
2283static int cik_mc_init(struct radeon_device *rdev)
2284{
2285 u32 tmp;
2286 int chansize, numchan;
2287
2288 /* Get VRAM informations */
2289 rdev->mc.vram_is_ddr = true;
2290 tmp = RREG32(MC_ARB_RAMCFG);
2291 if (tmp & CHANSIZE_MASK) {
2292 chansize = 64;
2293 } else {
2294 chansize = 32;
2295 }
2296 tmp = RREG32(MC_SHARED_CHMAP);
2297 switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
2298 case 0:
2299 default:
2300 numchan = 1;
2301 break;
2302 case 1:
2303 numchan = 2;
2304 break;
2305 case 2:
2306 numchan = 4;
2307 break;
2308 case 3:
2309 numchan = 8;
2310 break;
2311 case 4:
2312 numchan = 3;
2313 break;
2314 case 5:
2315 numchan = 6;
2316 break;
2317 case 6:
2318 numchan = 10;
2319 break;
2320 case 7:
2321 numchan = 12;
2322 break;
2323 case 8:
2324 numchan = 16;
2325 break;
2326 }
2327 rdev->mc.vram_width = numchan * chansize;
2328 /* Could aper size report 0 ? */
2329 rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
2330 rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
2331 /* size in MB on si */
2332 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
2333 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
2334 rdev->mc.visible_vram_size = rdev->mc.aper_size;
2335 si_vram_gtt_location(rdev, &rdev->mc);
2336 radeon_update_bandwidth_info(rdev);
2337
2338 return 0;
2339}
2340
2341/*
2342 * GART
2343 * VMID 0 is the physical GPU addresses as used by the kernel.
2344 * VMIDs 1-15 are used for userspace clients and are handled
2345 * by the radeon vm/hsa code.
2346 */
2347/**
2348 * cik_pcie_gart_tlb_flush - gart tlb flush callback
2349 *
2350 * @rdev: radeon_device pointer
2351 *
2352 * Flush the TLB for the VMID 0 page table (CIK).
2353 */
2354void cik_pcie_gart_tlb_flush(struct radeon_device *rdev)
2355{
2356 /* flush hdp cache */
2357 WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0);
2358
2359 /* bits 0-15 are the VM contexts0-15 */
2360 WREG32(VM_INVALIDATE_REQUEST, 0x1);
2361}
2362
2363/**
2364 * cik_pcie_gart_enable - gart enable
2365 *
2366 * @rdev: radeon_device pointer
2367 *
2368 * This sets up the TLBs, programs the page tables for VMID0,
2369 * sets up the hw for VMIDs 1-15 which are allocated on
2370 * demand, and sets up the global locations for the LDS, GDS,
2371 * and GPUVM for FSA64 clients (CIK).
2372 * Returns 0 for success, errors for failure.
2373 */
2374static int cik_pcie_gart_enable(struct radeon_device *rdev)
2375{
2376 int r, i;
2377
2378 if (rdev->gart.robj == NULL) {
2379 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
2380 return -EINVAL;
2381 }
2382 r = radeon_gart_table_vram_pin(rdev);
2383 if (r)
2384 return r;
2385 radeon_gart_restore(rdev);
2386 /* Setup TLB control */
2387 WREG32(MC_VM_MX_L1_TLB_CNTL,
2388 (0xA << 7) |
2389 ENABLE_L1_TLB |
2390 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
2391 ENABLE_ADVANCED_DRIVER_MODEL |
2392 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
2393 /* Setup L2 cache */
2394 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE |
2395 ENABLE_L2_FRAGMENT_PROCESSING |
2396 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
2397 ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
2398 EFFECTIVE_L2_QUEUE_SIZE(7) |
2399 CONTEXT1_IDENTITY_ACCESS_MODE(1));
2400 WREG32(VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS | INVALIDATE_L2_CACHE);
2401 WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
2402 L2_CACHE_BIGK_FRAGMENT_SIZE(6));
2403 /* setup context0 */
2404 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
2405 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
2406 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
2407 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
2408 (u32)(rdev->dummy_page.addr >> 12));
2409 WREG32(VM_CONTEXT0_CNTL2, 0);
2410 WREG32(VM_CONTEXT0_CNTL, (ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
2411 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT));
2412
2413 WREG32(0x15D4, 0);
2414 WREG32(0x15D8, 0);
2415 WREG32(0x15DC, 0);
2416
2417 /* empty context1-15 */
2418 /* FIXME start with 4G, once using 2 level pt switch to full
2419 * vm size space
2420 */
2421 /* set vm size, must be a multiple of 4 */
2422 WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
2423 WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn);
2424 for (i = 1; i < 16; i++) {
2425 if (i < 8)
2426 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
2427 rdev->gart.table_addr >> 12);
2428 else
2429 WREG32(VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((i - 8) << 2),
2430 rdev->gart.table_addr >> 12);
2431 }
2432
2433 /* enable context1-15 */
2434 WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
2435 (u32)(rdev->dummy_page.addr >> 12));
Alex Deuchera00024b2012-09-18 16:06:01 -04002436 WREG32(VM_CONTEXT1_CNTL2, 4);
Alex Deucher1c491652013-04-09 12:45:26 -04002437 WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) |
Alex Deuchera00024b2012-09-18 16:06:01 -04002438 RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
2439 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT |
2440 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
2441 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT |
2442 PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT |
2443 PDE0_PROTECTION_FAULT_ENABLE_DEFAULT |
2444 VALID_PROTECTION_FAULT_ENABLE_INTERRUPT |
2445 VALID_PROTECTION_FAULT_ENABLE_DEFAULT |
2446 READ_PROTECTION_FAULT_ENABLE_INTERRUPT |
2447 READ_PROTECTION_FAULT_ENABLE_DEFAULT |
2448 WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT |
2449 WRITE_PROTECTION_FAULT_ENABLE_DEFAULT);
Alex Deucher1c491652013-04-09 12:45:26 -04002450
2451 /* TC cache setup ??? */
2452 WREG32(TC_CFG_L1_LOAD_POLICY0, 0);
2453 WREG32(TC_CFG_L1_LOAD_POLICY1, 0);
2454 WREG32(TC_CFG_L1_STORE_POLICY, 0);
2455
2456 WREG32(TC_CFG_L2_LOAD_POLICY0, 0);
2457 WREG32(TC_CFG_L2_LOAD_POLICY1, 0);
2458 WREG32(TC_CFG_L2_STORE_POLICY0, 0);
2459 WREG32(TC_CFG_L2_STORE_POLICY1, 0);
2460 WREG32(TC_CFG_L2_ATOMIC_POLICY, 0);
2461
2462 WREG32(TC_CFG_L1_VOLATILE, 0);
2463 WREG32(TC_CFG_L2_VOLATILE, 0);
2464
2465 if (rdev->family == CHIP_KAVERI) {
2466 u32 tmp = RREG32(CHUB_CONTROL);
2467 tmp &= ~BYPASS_VM;
2468 WREG32(CHUB_CONTROL, tmp);
2469 }
2470
2471 /* XXX SH_MEM regs */
2472 /* where to put LDS, scratch, GPUVM in FSA64 space */
2473 for (i = 0; i < 16; i++) {
2474 WREG32(SRBM_GFX_CNTL, VMID(i));
2475 WREG32(SH_MEM_CONFIG, 0);
2476 WREG32(SH_MEM_APE1_BASE, 1);
2477 WREG32(SH_MEM_APE1_LIMIT, 0);
2478 WREG32(SH_MEM_BASES, 0);
2479 }
2480 WREG32(SRBM_GFX_CNTL, 0);
2481
2482 cik_pcie_gart_tlb_flush(rdev);
2483 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
2484 (unsigned)(rdev->mc.gtt_size >> 20),
2485 (unsigned long long)rdev->gart.table_addr);
2486 rdev->gart.ready = true;
2487 return 0;
2488}
2489
2490/**
2491 * cik_pcie_gart_disable - gart disable
2492 *
2493 * @rdev: radeon_device pointer
2494 *
2495 * This disables all VM page table (CIK).
2496 */
2497static void cik_pcie_gart_disable(struct radeon_device *rdev)
2498{
2499 /* Disable all tables */
2500 WREG32(VM_CONTEXT0_CNTL, 0);
2501 WREG32(VM_CONTEXT1_CNTL, 0);
2502 /* Setup TLB control */
2503 WREG32(MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE_NOT_IN_SYS |
2504 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
2505 /* Setup L2 cache */
2506 WREG32(VM_L2_CNTL,
2507 ENABLE_L2_FRAGMENT_PROCESSING |
2508 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
2509 ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
2510 EFFECTIVE_L2_QUEUE_SIZE(7) |
2511 CONTEXT1_IDENTITY_ACCESS_MODE(1));
2512 WREG32(VM_L2_CNTL2, 0);
2513 WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
2514 L2_CACHE_BIGK_FRAGMENT_SIZE(6));
2515 radeon_gart_table_vram_unpin(rdev);
2516}
2517
2518/**
2519 * cik_pcie_gart_fini - vm fini callback
2520 *
2521 * @rdev: radeon_device pointer
2522 *
2523 * Tears down the driver GART/VM setup (CIK).
2524 */
2525static void cik_pcie_gart_fini(struct radeon_device *rdev)
2526{
2527 cik_pcie_gart_disable(rdev);
2528 radeon_gart_table_vram_free(rdev);
2529 radeon_gart_fini(rdev);
2530}
2531
2532/* vm parser */
2533/**
2534 * cik_ib_parse - vm ib_parse callback
2535 *
2536 * @rdev: radeon_device pointer
2537 * @ib: indirect buffer pointer
2538 *
2539 * CIK uses hw IB checking so this is a nop (CIK).
2540 */
2541int cik_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
2542{
2543 return 0;
2544}
2545
2546/*
2547 * vm
2548 * VMID 0 is the physical GPU addresses as used by the kernel.
2549 * VMIDs 1-15 are used for userspace clients and are handled
2550 * by the radeon vm/hsa code.
2551 */
2552/**
2553 * cik_vm_init - cik vm init callback
2554 *
2555 * @rdev: radeon_device pointer
2556 *
2557 * Inits cik specific vm parameters (number of VMs, base of vram for
2558 * VMIDs 1-15) (CIK).
2559 * Returns 0 for success.
2560 */
2561int cik_vm_init(struct radeon_device *rdev)
2562{
2563 /* number of VMs */
2564 rdev->vm_manager.nvm = 16;
2565 /* base offset of vram pages */
2566 if (rdev->flags & RADEON_IS_IGP) {
2567 u64 tmp = RREG32(MC_VM_FB_OFFSET);
2568 tmp <<= 22;
2569 rdev->vm_manager.vram_base_offset = tmp;
2570 } else
2571 rdev->vm_manager.vram_base_offset = 0;
2572
2573 return 0;
2574}
2575
2576/**
2577 * cik_vm_fini - cik vm fini callback
2578 *
2579 * @rdev: radeon_device pointer
2580 *
2581 * Tear down any asic specific VM setup (CIK).
2582 */
2583void cik_vm_fini(struct radeon_device *rdev)
2584{
2585}
2586