blob: 5712526a446866a63ad10943eaf9ec7b2083c5d1 [file] [log] [blame]
Alex Deucher8cc1a532013-04-09 12:41:24 -04001/*
2 * Copyright 2012 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
24#include <linux/firmware.h>
25#include <linux/platform_device.h>
26#include <linux/slab.h>
27#include <linux/module.h>
28#include "drmP.h"
29#include "radeon.h"
Alex Deucher6f2043c2013-04-09 12:43:41 -040030#include "radeon_asic.h"
Alex Deucher8cc1a532013-04-09 12:41:24 -040031#include "cikd.h"
32#include "atom.h"
Alex Deucher841cf442012-12-18 21:47:44 -050033#include "cik_blit_shaders.h"
Alex Deucher8cc1a532013-04-09 12:41:24 -040034
Alex Deucher02c81322012-12-18 21:43:07 -050035/* GFX */
36#define CIK_PFP_UCODE_SIZE 2144
37#define CIK_ME_UCODE_SIZE 2144
38#define CIK_CE_UCODE_SIZE 2144
39/* compute */
40#define CIK_MEC_UCODE_SIZE 4192
41/* interrupts */
42#define BONAIRE_RLC_UCODE_SIZE 2048
43#define KB_RLC_UCODE_SIZE 2560
44#define KV_RLC_UCODE_SIZE 2560
45/* gddr controller */
46#define CIK_MC_UCODE_SIZE 7866
47
48MODULE_FIRMWARE("radeon/BONAIRE_pfp.bin");
49MODULE_FIRMWARE("radeon/BONAIRE_me.bin");
50MODULE_FIRMWARE("radeon/BONAIRE_ce.bin");
51MODULE_FIRMWARE("radeon/BONAIRE_mec.bin");
52MODULE_FIRMWARE("radeon/BONAIRE_mc.bin");
53MODULE_FIRMWARE("radeon/BONAIRE_rlc.bin");
54MODULE_FIRMWARE("radeon/KAVERI_pfp.bin");
55MODULE_FIRMWARE("radeon/KAVERI_me.bin");
56MODULE_FIRMWARE("radeon/KAVERI_ce.bin");
57MODULE_FIRMWARE("radeon/KAVERI_mec.bin");
58MODULE_FIRMWARE("radeon/KAVERI_rlc.bin");
59MODULE_FIRMWARE("radeon/KABINI_pfp.bin");
60MODULE_FIRMWARE("radeon/KABINI_me.bin");
61MODULE_FIRMWARE("radeon/KABINI_ce.bin");
62MODULE_FIRMWARE("radeon/KABINI_mec.bin");
63MODULE_FIRMWARE("radeon/KABINI_rlc.bin");
64
Alex Deucher6f2043c2013-04-09 12:43:41 -040065extern void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save);
66extern void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save);
Alex Deucher1c491652013-04-09 12:45:26 -040067extern void si_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
Alex Deucher6f2043c2013-04-09 12:43:41 -040068
Alex Deucherbc8273f2012-06-29 19:44:04 -040069#define BONAIRE_IO_MC_REGS_SIZE 36
70
71static const u32 bonaire_io_mc_regs[BONAIRE_IO_MC_REGS_SIZE][2] =
72{
73 {0x00000070, 0x04400000},
74 {0x00000071, 0x80c01803},
75 {0x00000072, 0x00004004},
76 {0x00000073, 0x00000100},
77 {0x00000074, 0x00ff0000},
78 {0x00000075, 0x34000000},
79 {0x00000076, 0x08000014},
80 {0x00000077, 0x00cc08ec},
81 {0x00000078, 0x00000400},
82 {0x00000079, 0x00000000},
83 {0x0000007a, 0x04090000},
84 {0x0000007c, 0x00000000},
85 {0x0000007e, 0x4408a8e8},
86 {0x0000007f, 0x00000304},
87 {0x00000080, 0x00000000},
88 {0x00000082, 0x00000001},
89 {0x00000083, 0x00000002},
90 {0x00000084, 0xf3e4f400},
91 {0x00000085, 0x052024e3},
92 {0x00000087, 0x00000000},
93 {0x00000088, 0x01000000},
94 {0x0000008a, 0x1c0a0000},
95 {0x0000008b, 0xff010000},
96 {0x0000008d, 0xffffefff},
97 {0x0000008e, 0xfff3efff},
98 {0x0000008f, 0xfff3efbf},
99 {0x00000092, 0xf7ffffff},
100 {0x00000093, 0xffffff7f},
101 {0x00000095, 0x00101101},
102 {0x00000096, 0x00000fff},
103 {0x00000097, 0x00116fff},
104 {0x00000098, 0x60010000},
105 {0x00000099, 0x10010000},
106 {0x0000009a, 0x00006000},
107 {0x0000009b, 0x00001000},
108 {0x0000009f, 0x00b48000}
109};
110
111/* ucode loading */
112/**
113 * ci_mc_load_microcode - load MC ucode into the hw
114 *
115 * @rdev: radeon_device pointer
116 *
117 * Load the GDDR MC ucode into the hw (CIK).
118 * Returns 0 on success, error on failure.
119 */
120static int ci_mc_load_microcode(struct radeon_device *rdev)
121{
122 const __be32 *fw_data;
123 u32 running, blackout = 0;
124 u32 *io_mc_regs;
125 int i, ucode_size, regs_size;
126
127 if (!rdev->mc_fw)
128 return -EINVAL;
129
130 switch (rdev->family) {
131 case CHIP_BONAIRE:
132 default:
133 io_mc_regs = (u32 *)&bonaire_io_mc_regs;
134 ucode_size = CIK_MC_UCODE_SIZE;
135 regs_size = BONAIRE_IO_MC_REGS_SIZE;
136 break;
137 }
138
139 running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK;
140
141 if (running == 0) {
142 if (running) {
143 blackout = RREG32(MC_SHARED_BLACKOUT_CNTL);
144 WREG32(MC_SHARED_BLACKOUT_CNTL, blackout | 1);
145 }
146
147 /* reset the engine and set to writable */
148 WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
149 WREG32(MC_SEQ_SUP_CNTL, 0x00000010);
150
151 /* load mc io regs */
152 for (i = 0; i < regs_size; i++) {
153 WREG32(MC_SEQ_IO_DEBUG_INDEX, io_mc_regs[(i << 1)]);
154 WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1]);
155 }
156 /* load the MC ucode */
157 fw_data = (const __be32 *)rdev->mc_fw->data;
158 for (i = 0; i < ucode_size; i++)
159 WREG32(MC_SEQ_SUP_PGM, be32_to_cpup(fw_data++));
160
161 /* put the engine back into the active state */
162 WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
163 WREG32(MC_SEQ_SUP_CNTL, 0x00000004);
164 WREG32(MC_SEQ_SUP_CNTL, 0x00000001);
165
166 /* wait for training to complete */
167 for (i = 0; i < rdev->usec_timeout; i++) {
168 if (RREG32(MC_SEQ_TRAIN_WAKEUP_CNTL) & TRAIN_DONE_D0)
169 break;
170 udelay(1);
171 }
172 for (i = 0; i < rdev->usec_timeout; i++) {
173 if (RREG32(MC_SEQ_TRAIN_WAKEUP_CNTL) & TRAIN_DONE_D1)
174 break;
175 udelay(1);
176 }
177
178 if (running)
179 WREG32(MC_SHARED_BLACKOUT_CNTL, blackout);
180 }
181
182 return 0;
183}
184
Alex Deucher02c81322012-12-18 21:43:07 -0500185/**
186 * cik_init_microcode - load ucode images from disk
187 *
188 * @rdev: radeon_device pointer
189 *
190 * Use the firmware interface to load the ucode images into
191 * the driver (not loaded into hw).
192 * Returns 0 on success, error on failure.
193 */
194static int cik_init_microcode(struct radeon_device *rdev)
195{
196 struct platform_device *pdev;
197 const char *chip_name;
198 size_t pfp_req_size, me_req_size, ce_req_size,
199 mec_req_size, rlc_req_size, mc_req_size;
200 char fw_name[30];
201 int err;
202
203 DRM_DEBUG("\n");
204
205 pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0);
206 err = IS_ERR(pdev);
207 if (err) {
208 printk(KERN_ERR "radeon_cp: Failed to register firmware\n");
209 return -EINVAL;
210 }
211
212 switch (rdev->family) {
213 case CHIP_BONAIRE:
214 chip_name = "BONAIRE";
215 pfp_req_size = CIK_PFP_UCODE_SIZE * 4;
216 me_req_size = CIK_ME_UCODE_SIZE * 4;
217 ce_req_size = CIK_CE_UCODE_SIZE * 4;
218 mec_req_size = CIK_MEC_UCODE_SIZE * 4;
219 rlc_req_size = BONAIRE_RLC_UCODE_SIZE * 4;
220 mc_req_size = CIK_MC_UCODE_SIZE * 4;
221 break;
222 case CHIP_KAVERI:
223 chip_name = "KAVERI";
224 pfp_req_size = CIK_PFP_UCODE_SIZE * 4;
225 me_req_size = CIK_ME_UCODE_SIZE * 4;
226 ce_req_size = CIK_CE_UCODE_SIZE * 4;
227 mec_req_size = CIK_MEC_UCODE_SIZE * 4;
228 rlc_req_size = KV_RLC_UCODE_SIZE * 4;
229 break;
230 case CHIP_KABINI:
231 chip_name = "KABINI";
232 pfp_req_size = CIK_PFP_UCODE_SIZE * 4;
233 me_req_size = CIK_ME_UCODE_SIZE * 4;
234 ce_req_size = CIK_CE_UCODE_SIZE * 4;
235 mec_req_size = CIK_MEC_UCODE_SIZE * 4;
236 rlc_req_size = KB_RLC_UCODE_SIZE * 4;
237 break;
238 default: BUG();
239 }
240
241 DRM_INFO("Loading %s Microcode\n", chip_name);
242
243 snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
244 err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev);
245 if (err)
246 goto out;
247 if (rdev->pfp_fw->size != pfp_req_size) {
248 printk(KERN_ERR
249 "cik_cp: Bogus length %zu in firmware \"%s\"\n",
250 rdev->pfp_fw->size, fw_name);
251 err = -EINVAL;
252 goto out;
253 }
254
255 snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
256 err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev);
257 if (err)
258 goto out;
259 if (rdev->me_fw->size != me_req_size) {
260 printk(KERN_ERR
261 "cik_cp: Bogus length %zu in firmware \"%s\"\n",
262 rdev->me_fw->size, fw_name);
263 err = -EINVAL;
264 }
265
266 snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", chip_name);
267 err = request_firmware(&rdev->ce_fw, fw_name, &pdev->dev);
268 if (err)
269 goto out;
270 if (rdev->ce_fw->size != ce_req_size) {
271 printk(KERN_ERR
272 "cik_cp: Bogus length %zu in firmware \"%s\"\n",
273 rdev->ce_fw->size, fw_name);
274 err = -EINVAL;
275 }
276
277 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mec.bin", chip_name);
278 err = request_firmware(&rdev->mec_fw, fw_name, &pdev->dev);
279 if (err)
280 goto out;
281 if (rdev->mec_fw->size != mec_req_size) {
282 printk(KERN_ERR
283 "cik_cp: Bogus length %zu in firmware \"%s\"\n",
284 rdev->mec_fw->size, fw_name);
285 err = -EINVAL;
286 }
287
288 snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", chip_name);
289 err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev);
290 if (err)
291 goto out;
292 if (rdev->rlc_fw->size != rlc_req_size) {
293 printk(KERN_ERR
294 "cik_rlc: Bogus length %zu in firmware \"%s\"\n",
295 rdev->rlc_fw->size, fw_name);
296 err = -EINVAL;
297 }
298
299 /* No MC ucode on APUs */
300 if (!(rdev->flags & RADEON_IS_IGP)) {
301 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
302 err = request_firmware(&rdev->mc_fw, fw_name, &pdev->dev);
303 if (err)
304 goto out;
305 if (rdev->mc_fw->size != mc_req_size) {
306 printk(KERN_ERR
307 "cik_mc: Bogus length %zu in firmware \"%s\"\n",
308 rdev->mc_fw->size, fw_name);
309 err = -EINVAL;
310 }
311 }
312
313out:
314 platform_device_unregister(pdev);
315
316 if (err) {
317 if (err != -EINVAL)
318 printk(KERN_ERR
319 "cik_cp: Failed to load firmware \"%s\"\n",
320 fw_name);
321 release_firmware(rdev->pfp_fw);
322 rdev->pfp_fw = NULL;
323 release_firmware(rdev->me_fw);
324 rdev->me_fw = NULL;
325 release_firmware(rdev->ce_fw);
326 rdev->ce_fw = NULL;
327 release_firmware(rdev->rlc_fw);
328 rdev->rlc_fw = NULL;
329 release_firmware(rdev->mc_fw);
330 rdev->mc_fw = NULL;
331 }
332 return err;
333}
334
Alex Deucher8cc1a532013-04-09 12:41:24 -0400335/*
336 * Core functions
337 */
338/**
339 * cik_tiling_mode_table_init - init the hw tiling table
340 *
341 * @rdev: radeon_device pointer
342 *
343 * Starting with SI, the tiling setup is done globally in a
344 * set of 32 tiling modes. Rather than selecting each set of
345 * parameters per surface as on older asics, we just select
346 * which index in the tiling table we want to use, and the
347 * surface uses those parameters (CIK).
348 */
349static void cik_tiling_mode_table_init(struct radeon_device *rdev)
350{
351 const u32 num_tile_mode_states = 32;
352 const u32 num_secondary_tile_mode_states = 16;
353 u32 reg_offset, gb_tile_moden, split_equal_to_row_size;
354 u32 num_pipe_configs;
355 u32 num_rbs = rdev->config.cik.max_backends_per_se *
356 rdev->config.cik.max_shader_engines;
357
358 switch (rdev->config.cik.mem_row_size_in_kb) {
359 case 1:
360 split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_1KB;
361 break;
362 case 2:
363 default:
364 split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_2KB;
365 break;
366 case 4:
367 split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_4KB;
368 break;
369 }
370
371 num_pipe_configs = rdev->config.cik.max_tile_pipes;
372 if (num_pipe_configs > 8)
373 num_pipe_configs = 8; /* ??? */
374
375 if (num_pipe_configs == 8) {
376 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
377 switch (reg_offset) {
378 case 0:
379 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
380 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
381 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
382 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B));
383 break;
384 case 1:
385 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
386 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
387 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
388 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B));
389 break;
390 case 2:
391 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
392 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
393 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
394 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
395 break;
396 case 3:
397 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
398 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
399 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
400 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B));
401 break;
402 case 4:
403 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
404 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
405 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
406 TILE_SPLIT(split_equal_to_row_size));
407 break;
408 case 5:
409 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
410 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
411 break;
412 case 6:
413 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
414 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
415 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
416 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
417 break;
418 case 7:
419 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
420 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
421 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
422 TILE_SPLIT(split_equal_to_row_size));
423 break;
424 case 8:
425 gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
426 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16));
427 break;
428 case 9:
429 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
430 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
431 break;
432 case 10:
433 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
434 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
435 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
436 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
437 break;
438 case 11:
439 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
440 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
441 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
442 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
443 break;
444 case 12:
445 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
446 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
447 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
448 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
449 break;
450 case 13:
451 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
452 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
453 break;
454 case 14:
455 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
456 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
457 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
458 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
459 break;
460 case 16:
461 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
462 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
463 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
464 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
465 break;
466 case 17:
467 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
468 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
469 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
470 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
471 break;
472 case 27:
473 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
474 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
475 break;
476 case 28:
477 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
478 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
479 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
480 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
481 break;
482 case 29:
483 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
484 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
485 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
486 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
487 break;
488 case 30:
489 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
490 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
491 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
492 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
493 break;
494 default:
495 gb_tile_moden = 0;
496 break;
497 }
498 WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
499 }
500 for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) {
501 switch (reg_offset) {
502 case 0:
503 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
504 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
505 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
506 NUM_BANKS(ADDR_SURF_16_BANK));
507 break;
508 case 1:
509 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
510 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
511 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
512 NUM_BANKS(ADDR_SURF_16_BANK));
513 break;
514 case 2:
515 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
516 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
517 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
518 NUM_BANKS(ADDR_SURF_16_BANK));
519 break;
520 case 3:
521 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
522 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
523 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
524 NUM_BANKS(ADDR_SURF_16_BANK));
525 break;
526 case 4:
527 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
528 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
529 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
530 NUM_BANKS(ADDR_SURF_8_BANK));
531 break;
532 case 5:
533 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
534 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
535 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
536 NUM_BANKS(ADDR_SURF_4_BANK));
537 break;
538 case 6:
539 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
540 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
541 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
542 NUM_BANKS(ADDR_SURF_2_BANK));
543 break;
544 case 8:
545 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
546 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
547 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
548 NUM_BANKS(ADDR_SURF_16_BANK));
549 break;
550 case 9:
551 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
552 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
553 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
554 NUM_BANKS(ADDR_SURF_16_BANK));
555 break;
556 case 10:
557 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
558 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
559 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
560 NUM_BANKS(ADDR_SURF_16_BANK));
561 break;
562 case 11:
563 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
564 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
565 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
566 NUM_BANKS(ADDR_SURF_16_BANK));
567 break;
568 case 12:
569 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
570 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
571 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
572 NUM_BANKS(ADDR_SURF_8_BANK));
573 break;
574 case 13:
575 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
576 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
577 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
578 NUM_BANKS(ADDR_SURF_4_BANK));
579 break;
580 case 14:
581 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
582 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
583 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
584 NUM_BANKS(ADDR_SURF_2_BANK));
585 break;
586 default:
587 gb_tile_moden = 0;
588 break;
589 }
590 WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden);
591 }
592 } else if (num_pipe_configs == 4) {
593 if (num_rbs == 4) {
594 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
595 switch (reg_offset) {
596 case 0:
597 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
598 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
599 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
600 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B));
601 break;
602 case 1:
603 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
604 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
605 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
606 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B));
607 break;
608 case 2:
609 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
610 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
611 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
612 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
613 break;
614 case 3:
615 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
616 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
617 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
618 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B));
619 break;
620 case 4:
621 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
622 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
623 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
624 TILE_SPLIT(split_equal_to_row_size));
625 break;
626 case 5:
627 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
628 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
629 break;
630 case 6:
631 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
632 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
633 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
634 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
635 break;
636 case 7:
637 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
638 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
639 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
640 TILE_SPLIT(split_equal_to_row_size));
641 break;
642 case 8:
643 gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
644 PIPE_CONFIG(ADDR_SURF_P4_16x16));
645 break;
646 case 9:
647 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
648 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
649 break;
650 case 10:
651 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
652 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
653 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
654 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
655 break;
656 case 11:
657 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
658 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
659 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
660 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
661 break;
662 case 12:
663 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
664 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
665 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
666 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
667 break;
668 case 13:
669 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
670 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
671 break;
672 case 14:
673 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
674 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
675 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
676 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
677 break;
678 case 16:
679 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
680 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
681 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
682 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
683 break;
684 case 17:
685 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
686 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
687 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
688 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
689 break;
690 case 27:
691 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
692 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
693 break;
694 case 28:
695 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
696 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
697 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
698 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
699 break;
700 case 29:
701 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
702 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
703 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
704 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
705 break;
706 case 30:
707 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
708 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
709 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
710 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
711 break;
712 default:
713 gb_tile_moden = 0;
714 break;
715 }
716 WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
717 }
718 } else if (num_rbs < 4) {
719 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
720 switch (reg_offset) {
721 case 0:
722 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
723 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
724 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
725 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B));
726 break;
727 case 1:
728 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
729 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
730 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
731 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B));
732 break;
733 case 2:
734 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
735 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
736 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
737 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
738 break;
739 case 3:
740 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
741 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
742 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
743 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B));
744 break;
745 case 4:
746 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
747 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
748 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
749 TILE_SPLIT(split_equal_to_row_size));
750 break;
751 case 5:
752 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
753 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
754 break;
755 case 6:
756 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
757 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
758 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
759 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
760 break;
761 case 7:
762 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
763 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
764 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
765 TILE_SPLIT(split_equal_to_row_size));
766 break;
767 case 8:
768 gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
769 PIPE_CONFIG(ADDR_SURF_P4_8x16));
770 break;
771 case 9:
772 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
773 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
774 break;
775 case 10:
776 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
777 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
778 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
779 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
780 break;
781 case 11:
782 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
783 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
784 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
785 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
786 break;
787 case 12:
788 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
789 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
790 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
791 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
792 break;
793 case 13:
794 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
795 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
796 break;
797 case 14:
798 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
799 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
800 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
801 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
802 break;
803 case 16:
804 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
805 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
806 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
807 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
808 break;
809 case 17:
810 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
811 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
812 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
813 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
814 break;
815 case 27:
816 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
817 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
818 break;
819 case 28:
820 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
821 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
822 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
823 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
824 break;
825 case 29:
826 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
827 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
828 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
829 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
830 break;
831 case 30:
832 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
833 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
834 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
835 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
836 break;
837 default:
838 gb_tile_moden = 0;
839 break;
840 }
841 WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
842 }
843 }
844 for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) {
845 switch (reg_offset) {
846 case 0:
847 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
848 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
849 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
850 NUM_BANKS(ADDR_SURF_16_BANK));
851 break;
852 case 1:
853 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
854 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
855 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
856 NUM_BANKS(ADDR_SURF_16_BANK));
857 break;
858 case 2:
859 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
860 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
861 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
862 NUM_BANKS(ADDR_SURF_16_BANK));
863 break;
864 case 3:
865 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
866 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
867 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
868 NUM_BANKS(ADDR_SURF_16_BANK));
869 break;
870 case 4:
871 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
872 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
873 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
874 NUM_BANKS(ADDR_SURF_16_BANK));
875 break;
876 case 5:
877 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
878 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
879 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
880 NUM_BANKS(ADDR_SURF_8_BANK));
881 break;
882 case 6:
883 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
884 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
885 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
886 NUM_BANKS(ADDR_SURF_4_BANK));
887 break;
888 case 8:
889 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
890 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
891 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
892 NUM_BANKS(ADDR_SURF_16_BANK));
893 break;
894 case 9:
895 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
896 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
897 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
898 NUM_BANKS(ADDR_SURF_16_BANK));
899 break;
900 case 10:
901 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
902 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
903 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
904 NUM_BANKS(ADDR_SURF_16_BANK));
905 break;
906 case 11:
907 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
908 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
909 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
910 NUM_BANKS(ADDR_SURF_16_BANK));
911 break;
912 case 12:
913 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
914 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
915 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
916 NUM_BANKS(ADDR_SURF_16_BANK));
917 break;
918 case 13:
919 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
920 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
921 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
922 NUM_BANKS(ADDR_SURF_8_BANK));
923 break;
924 case 14:
925 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
926 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
927 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
928 NUM_BANKS(ADDR_SURF_4_BANK));
929 break;
930 default:
931 gb_tile_moden = 0;
932 break;
933 }
934 WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden);
935 }
936 } else if (num_pipe_configs == 2) {
937 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
938 switch (reg_offset) {
939 case 0:
940 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
941 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
942 PIPE_CONFIG(ADDR_SURF_P2) |
943 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B));
944 break;
945 case 1:
946 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
947 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
948 PIPE_CONFIG(ADDR_SURF_P2) |
949 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B));
950 break;
951 case 2:
952 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
953 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
954 PIPE_CONFIG(ADDR_SURF_P2) |
955 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
956 break;
957 case 3:
958 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
959 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
960 PIPE_CONFIG(ADDR_SURF_P2) |
961 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B));
962 break;
963 case 4:
964 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
965 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
966 PIPE_CONFIG(ADDR_SURF_P2) |
967 TILE_SPLIT(split_equal_to_row_size));
968 break;
969 case 5:
970 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
971 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
972 break;
973 case 6:
974 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
975 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
976 PIPE_CONFIG(ADDR_SURF_P2) |
977 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
978 break;
979 case 7:
980 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
981 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
982 PIPE_CONFIG(ADDR_SURF_P2) |
983 TILE_SPLIT(split_equal_to_row_size));
984 break;
985 case 8:
986 gb_tile_moden = ARRAY_MODE(ARRAY_LINEAR_ALIGNED);
987 break;
988 case 9:
989 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
990 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
991 break;
992 case 10:
993 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
994 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
995 PIPE_CONFIG(ADDR_SURF_P2) |
996 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
997 break;
998 case 11:
999 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1000 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1001 PIPE_CONFIG(ADDR_SURF_P2) |
1002 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1003 break;
1004 case 12:
1005 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
1006 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1007 PIPE_CONFIG(ADDR_SURF_P2) |
1008 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1009 break;
1010 case 13:
1011 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1012 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
1013 break;
1014 case 14:
1015 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1016 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1017 PIPE_CONFIG(ADDR_SURF_P2) |
1018 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1019 break;
1020 case 16:
1021 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1022 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1023 PIPE_CONFIG(ADDR_SURF_P2) |
1024 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1025 break;
1026 case 17:
1027 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
1028 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1029 PIPE_CONFIG(ADDR_SURF_P2) |
1030 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1031 break;
1032 case 27:
1033 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1034 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
1035 break;
1036 case 28:
1037 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
1038 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1039 PIPE_CONFIG(ADDR_SURF_P2) |
1040 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1041 break;
1042 case 29:
1043 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1044 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1045 PIPE_CONFIG(ADDR_SURF_P2) |
1046 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1047 break;
1048 case 30:
1049 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
1050 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1051 PIPE_CONFIG(ADDR_SURF_P2) |
1052 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1053 break;
1054 default:
1055 gb_tile_moden = 0;
1056 break;
1057 }
1058 WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
1059 }
1060 for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) {
1061 switch (reg_offset) {
1062 case 0:
1063 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
1064 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1065 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1066 NUM_BANKS(ADDR_SURF_16_BANK));
1067 break;
1068 case 1:
1069 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
1070 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1071 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1072 NUM_BANKS(ADDR_SURF_16_BANK));
1073 break;
1074 case 2:
1075 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1076 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1077 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1078 NUM_BANKS(ADDR_SURF_16_BANK));
1079 break;
1080 case 3:
1081 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1082 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1083 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1084 NUM_BANKS(ADDR_SURF_16_BANK));
1085 break;
1086 case 4:
1087 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1088 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1089 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1090 NUM_BANKS(ADDR_SURF_16_BANK));
1091 break;
1092 case 5:
1093 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1094 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1095 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1096 NUM_BANKS(ADDR_SURF_16_BANK));
1097 break;
1098 case 6:
1099 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1100 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1101 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1102 NUM_BANKS(ADDR_SURF_8_BANK));
1103 break;
1104 case 8:
1105 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
1106 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
1107 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1108 NUM_BANKS(ADDR_SURF_16_BANK));
1109 break;
1110 case 9:
1111 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
1112 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1113 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1114 NUM_BANKS(ADDR_SURF_16_BANK));
1115 break;
1116 case 10:
1117 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
1118 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1119 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1120 NUM_BANKS(ADDR_SURF_16_BANK));
1121 break;
1122 case 11:
1123 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
1124 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1125 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1126 NUM_BANKS(ADDR_SURF_16_BANK));
1127 break;
1128 case 12:
1129 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1130 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1131 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1132 NUM_BANKS(ADDR_SURF_16_BANK));
1133 break;
1134 case 13:
1135 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1136 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1137 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1138 NUM_BANKS(ADDR_SURF_16_BANK));
1139 break;
1140 case 14:
1141 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1142 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1143 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1144 NUM_BANKS(ADDR_SURF_8_BANK));
1145 break;
1146 default:
1147 gb_tile_moden = 0;
1148 break;
1149 }
1150 WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden);
1151 }
1152 } else
1153 DRM_ERROR("unknown num pipe config: 0x%x\n", num_pipe_configs);
1154}
1155
1156/**
1157 * cik_select_se_sh - select which SE, SH to address
1158 *
1159 * @rdev: radeon_device pointer
1160 * @se_num: shader engine to address
1161 * @sh_num: sh block to address
1162 *
1163 * Select which SE, SH combinations to address. Certain
1164 * registers are instanced per SE or SH. 0xffffffff means
1165 * broadcast to all SEs or SHs (CIK).
1166 */
1167static void cik_select_se_sh(struct radeon_device *rdev,
1168 u32 se_num, u32 sh_num)
1169{
1170 u32 data = INSTANCE_BROADCAST_WRITES;
1171
1172 if ((se_num == 0xffffffff) && (sh_num == 0xffffffff))
1173 data = SH_BROADCAST_WRITES | SE_BROADCAST_WRITES;
1174 else if (se_num == 0xffffffff)
1175 data |= SE_BROADCAST_WRITES | SH_INDEX(sh_num);
1176 else if (sh_num == 0xffffffff)
1177 data |= SH_BROADCAST_WRITES | SE_INDEX(se_num);
1178 else
1179 data |= SH_INDEX(sh_num) | SE_INDEX(se_num);
1180 WREG32(GRBM_GFX_INDEX, data);
1181}
1182
1183/**
1184 * cik_create_bitmask - create a bitmask
1185 *
1186 * @bit_width: length of the mask
1187 *
1188 * create a variable length bit mask (CIK).
1189 * Returns the bitmask.
1190 */
1191static u32 cik_create_bitmask(u32 bit_width)
1192{
1193 u32 i, mask = 0;
1194
1195 for (i = 0; i < bit_width; i++) {
1196 mask <<= 1;
1197 mask |= 1;
1198 }
1199 return mask;
1200}
1201
1202/**
1203 * cik_select_se_sh - select which SE, SH to address
1204 *
1205 * @rdev: radeon_device pointer
1206 * @max_rb_num: max RBs (render backends) for the asic
1207 * @se_num: number of SEs (shader engines) for the asic
1208 * @sh_per_se: number of SH blocks per SE for the asic
1209 *
1210 * Calculates the bitmask of disabled RBs (CIK).
1211 * Returns the disabled RB bitmask.
1212 */
1213static u32 cik_get_rb_disabled(struct radeon_device *rdev,
1214 u32 max_rb_num, u32 se_num,
1215 u32 sh_per_se)
1216{
1217 u32 data, mask;
1218
1219 data = RREG32(CC_RB_BACKEND_DISABLE);
1220 if (data & 1)
1221 data &= BACKEND_DISABLE_MASK;
1222 else
1223 data = 0;
1224 data |= RREG32(GC_USER_RB_BACKEND_DISABLE);
1225
1226 data >>= BACKEND_DISABLE_SHIFT;
1227
1228 mask = cik_create_bitmask(max_rb_num / se_num / sh_per_se);
1229
1230 return data & mask;
1231}
1232
1233/**
1234 * cik_setup_rb - setup the RBs on the asic
1235 *
1236 * @rdev: radeon_device pointer
1237 * @se_num: number of SEs (shader engines) for the asic
1238 * @sh_per_se: number of SH blocks per SE for the asic
1239 * @max_rb_num: max RBs (render backends) for the asic
1240 *
1241 * Configures per-SE/SH RB registers (CIK).
1242 */
1243static void cik_setup_rb(struct radeon_device *rdev,
1244 u32 se_num, u32 sh_per_se,
1245 u32 max_rb_num)
1246{
1247 int i, j;
1248 u32 data, mask;
1249 u32 disabled_rbs = 0;
1250 u32 enabled_rbs = 0;
1251
1252 for (i = 0; i < se_num; i++) {
1253 for (j = 0; j < sh_per_se; j++) {
1254 cik_select_se_sh(rdev, i, j);
1255 data = cik_get_rb_disabled(rdev, max_rb_num, se_num, sh_per_se);
1256 disabled_rbs |= data << ((i * sh_per_se + j) * CIK_RB_BITMAP_WIDTH_PER_SH);
1257 }
1258 }
1259 cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
1260
1261 mask = 1;
1262 for (i = 0; i < max_rb_num; i++) {
1263 if (!(disabled_rbs & mask))
1264 enabled_rbs |= mask;
1265 mask <<= 1;
1266 }
1267
1268 for (i = 0; i < se_num; i++) {
1269 cik_select_se_sh(rdev, i, 0xffffffff);
1270 data = 0;
1271 for (j = 0; j < sh_per_se; j++) {
1272 switch (enabled_rbs & 3) {
1273 case 1:
1274 data |= (RASTER_CONFIG_RB_MAP_0 << (i * sh_per_se + j) * 2);
1275 break;
1276 case 2:
1277 data |= (RASTER_CONFIG_RB_MAP_3 << (i * sh_per_se + j) * 2);
1278 break;
1279 case 3:
1280 default:
1281 data |= (RASTER_CONFIG_RB_MAP_2 << (i * sh_per_se + j) * 2);
1282 break;
1283 }
1284 enabled_rbs >>= 2;
1285 }
1286 WREG32(PA_SC_RASTER_CONFIG, data);
1287 }
1288 cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
1289}
1290
1291/**
1292 * cik_gpu_init - setup the 3D engine
1293 *
1294 * @rdev: radeon_device pointer
1295 *
1296 * Configures the 3D engine and tiling configuration
1297 * registers so that the 3D engine is usable.
1298 */
1299static void cik_gpu_init(struct radeon_device *rdev)
1300{
1301 u32 gb_addr_config = RREG32(GB_ADDR_CONFIG);
1302 u32 mc_shared_chmap, mc_arb_ramcfg;
1303 u32 hdp_host_path_cntl;
1304 u32 tmp;
1305 int i, j;
1306
1307 switch (rdev->family) {
1308 case CHIP_BONAIRE:
1309 rdev->config.cik.max_shader_engines = 2;
1310 rdev->config.cik.max_tile_pipes = 4;
1311 rdev->config.cik.max_cu_per_sh = 7;
1312 rdev->config.cik.max_sh_per_se = 1;
1313 rdev->config.cik.max_backends_per_se = 2;
1314 rdev->config.cik.max_texture_channel_caches = 4;
1315 rdev->config.cik.max_gprs = 256;
1316 rdev->config.cik.max_gs_threads = 32;
1317 rdev->config.cik.max_hw_contexts = 8;
1318
1319 rdev->config.cik.sc_prim_fifo_size_frontend = 0x20;
1320 rdev->config.cik.sc_prim_fifo_size_backend = 0x100;
1321 rdev->config.cik.sc_hiz_tile_fifo_size = 0x30;
1322 rdev->config.cik.sc_earlyz_tile_fifo_size = 0x130;
1323 gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN;
1324 break;
1325 case CHIP_KAVERI:
1326 /* TODO */
1327 break;
1328 case CHIP_KABINI:
1329 default:
1330 rdev->config.cik.max_shader_engines = 1;
1331 rdev->config.cik.max_tile_pipes = 2;
1332 rdev->config.cik.max_cu_per_sh = 2;
1333 rdev->config.cik.max_sh_per_se = 1;
1334 rdev->config.cik.max_backends_per_se = 1;
1335 rdev->config.cik.max_texture_channel_caches = 2;
1336 rdev->config.cik.max_gprs = 256;
1337 rdev->config.cik.max_gs_threads = 16;
1338 rdev->config.cik.max_hw_contexts = 8;
1339
1340 rdev->config.cik.sc_prim_fifo_size_frontend = 0x20;
1341 rdev->config.cik.sc_prim_fifo_size_backend = 0x100;
1342 rdev->config.cik.sc_hiz_tile_fifo_size = 0x30;
1343 rdev->config.cik.sc_earlyz_tile_fifo_size = 0x130;
1344 gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN;
1345 break;
1346 }
1347
1348 /* Initialize HDP */
1349 for (i = 0, j = 0; i < 32; i++, j += 0x18) {
1350 WREG32((0x2c14 + j), 0x00000000);
1351 WREG32((0x2c18 + j), 0x00000000);
1352 WREG32((0x2c1c + j), 0x00000000);
1353 WREG32((0x2c20 + j), 0x00000000);
1354 WREG32((0x2c24 + j), 0x00000000);
1355 }
1356
1357 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
1358
1359 WREG32(BIF_FB_EN, FB_READ_EN | FB_WRITE_EN);
1360
1361 mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
1362 mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
1363
1364 rdev->config.cik.num_tile_pipes = rdev->config.cik.max_tile_pipes;
1365 rdev->config.cik.mem_max_burst_length_bytes = 256;
1366 tmp = (mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT;
1367 rdev->config.cik.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
1368 if (rdev->config.cik.mem_row_size_in_kb > 4)
1369 rdev->config.cik.mem_row_size_in_kb = 4;
1370 /* XXX use MC settings? */
1371 rdev->config.cik.shader_engine_tile_size = 32;
1372 rdev->config.cik.num_gpus = 1;
1373 rdev->config.cik.multi_gpu_tile_size = 64;
1374
1375 /* fix up row size */
1376 gb_addr_config &= ~ROW_SIZE_MASK;
1377 switch (rdev->config.cik.mem_row_size_in_kb) {
1378 case 1:
1379 default:
1380 gb_addr_config |= ROW_SIZE(0);
1381 break;
1382 case 2:
1383 gb_addr_config |= ROW_SIZE(1);
1384 break;
1385 case 4:
1386 gb_addr_config |= ROW_SIZE(2);
1387 break;
1388 }
1389
1390 /* setup tiling info dword. gb_addr_config is not adequate since it does
1391 * not have bank info, so create a custom tiling dword.
1392 * bits 3:0 num_pipes
1393 * bits 7:4 num_banks
1394 * bits 11:8 group_size
1395 * bits 15:12 row_size
1396 */
1397 rdev->config.cik.tile_config = 0;
1398 switch (rdev->config.cik.num_tile_pipes) {
1399 case 1:
1400 rdev->config.cik.tile_config |= (0 << 0);
1401 break;
1402 case 2:
1403 rdev->config.cik.tile_config |= (1 << 0);
1404 break;
1405 case 4:
1406 rdev->config.cik.tile_config |= (2 << 0);
1407 break;
1408 case 8:
1409 default:
1410 /* XXX what about 12? */
1411 rdev->config.cik.tile_config |= (3 << 0);
1412 break;
1413 }
1414 if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT)
1415 rdev->config.cik.tile_config |= 1 << 4;
1416 else
1417 rdev->config.cik.tile_config |= 0 << 4;
1418 rdev->config.cik.tile_config |=
1419 ((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8;
1420 rdev->config.cik.tile_config |=
1421 ((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12;
1422
1423 WREG32(GB_ADDR_CONFIG, gb_addr_config);
1424 WREG32(HDP_ADDR_CONFIG, gb_addr_config);
1425 WREG32(DMIF_ADDR_CALC, gb_addr_config);
1426
1427 cik_tiling_mode_table_init(rdev);
1428
1429 cik_setup_rb(rdev, rdev->config.cik.max_shader_engines,
1430 rdev->config.cik.max_sh_per_se,
1431 rdev->config.cik.max_backends_per_se);
1432
1433 /* set HW defaults for 3D engine */
1434 WREG32(CP_MEQ_THRESHOLDS, MEQ1_START(0x30) | MEQ2_START(0x60));
1435
1436 WREG32(SX_DEBUG_1, 0x20);
1437
1438 WREG32(TA_CNTL_AUX, 0x00010000);
1439
1440 tmp = RREG32(SPI_CONFIG_CNTL);
1441 tmp |= 0x03000000;
1442 WREG32(SPI_CONFIG_CNTL, tmp);
1443
1444 WREG32(SQ_CONFIG, 1);
1445
1446 WREG32(DB_DEBUG, 0);
1447
1448 tmp = RREG32(DB_DEBUG2) & ~0xf00fffff;
1449 tmp |= 0x00000400;
1450 WREG32(DB_DEBUG2, tmp);
1451
1452 tmp = RREG32(DB_DEBUG3) & ~0x0002021c;
1453 tmp |= 0x00020200;
1454 WREG32(DB_DEBUG3, tmp);
1455
1456 tmp = RREG32(CB_HW_CONTROL) & ~0x00010000;
1457 tmp |= 0x00018208;
1458 WREG32(CB_HW_CONTROL, tmp);
1459
1460 WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4));
1461
1462 WREG32(PA_SC_FIFO_SIZE, (SC_FRONTEND_PRIM_FIFO_SIZE(rdev->config.cik.sc_prim_fifo_size_frontend) |
1463 SC_BACKEND_PRIM_FIFO_SIZE(rdev->config.cik.sc_prim_fifo_size_backend) |
1464 SC_HIZ_TILE_FIFO_SIZE(rdev->config.cik.sc_hiz_tile_fifo_size) |
1465 SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.cik.sc_earlyz_tile_fifo_size)));
1466
1467 WREG32(VGT_NUM_INSTANCES, 1);
1468
1469 WREG32(CP_PERFMON_CNTL, 0);
1470
1471 WREG32(SQ_CONFIG, 0);
1472
1473 WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) |
1474 FORCE_EOV_MAX_REZ_CNT(255)));
1475
1476 WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC) |
1477 AUTO_INVLD_EN(ES_AND_GS_AUTO));
1478
1479 WREG32(VGT_GS_VERTEX_REUSE, 16);
1480 WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
1481
1482 tmp = RREG32(HDP_MISC_CNTL);
1483 tmp |= HDP_FLUSH_INVALIDATE_CACHE;
1484 WREG32(HDP_MISC_CNTL, tmp);
1485
1486 hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL);
1487 WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
1488
1489 WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3));
1490 WREG32(PA_SC_ENHANCE, ENABLE_PA_SC_OUT_OF_ORDER);
1491
1492 udelay(50);
1493}
1494
Alex Deucher841cf442012-12-18 21:47:44 -05001495/*
1496 * CP.
1497 * On CIK, gfx and compute now have independant command processors.
1498 *
1499 * GFX
1500 * Gfx consists of a single ring and can process both gfx jobs and
1501 * compute jobs. The gfx CP consists of three microengines (ME):
1502 * PFP - Pre-Fetch Parser
1503 * ME - Micro Engine
1504 * CE - Constant Engine
1505 * The PFP and ME make up what is considered the Drawing Engine (DE).
1506 * The CE is an asynchronous engine used for updating buffer desciptors
1507 * used by the DE so that they can be loaded into cache in parallel
1508 * while the DE is processing state update packets.
1509 *
1510 * Compute
1511 * The compute CP consists of two microengines (ME):
1512 * MEC1 - Compute MicroEngine 1
1513 * MEC2 - Compute MicroEngine 2
1514 * Each MEC supports 4 compute pipes and each pipe supports 8 queues.
1515 * The queues are exposed to userspace and are programmed directly
1516 * by the compute runtime.
1517 */
1518/**
1519 * cik_cp_gfx_enable - enable/disable the gfx CP MEs
1520 *
1521 * @rdev: radeon_device pointer
1522 * @enable: enable or disable the MEs
1523 *
1524 * Halts or unhalts the gfx MEs.
1525 */
1526static void cik_cp_gfx_enable(struct radeon_device *rdev, bool enable)
1527{
1528 if (enable)
1529 WREG32(CP_ME_CNTL, 0);
1530 else {
1531 WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT));
1532 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
1533 }
1534 udelay(50);
1535}
1536
1537/**
1538 * cik_cp_gfx_load_microcode - load the gfx CP ME ucode
1539 *
1540 * @rdev: radeon_device pointer
1541 *
1542 * Loads the gfx PFP, ME, and CE ucode.
1543 * Returns 0 for success, -EINVAL if the ucode is not available.
1544 */
1545static int cik_cp_gfx_load_microcode(struct radeon_device *rdev)
1546{
1547 const __be32 *fw_data;
1548 int i;
1549
1550 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw)
1551 return -EINVAL;
1552
1553 cik_cp_gfx_enable(rdev, false);
1554
1555 /* PFP */
1556 fw_data = (const __be32 *)rdev->pfp_fw->data;
1557 WREG32(CP_PFP_UCODE_ADDR, 0);
1558 for (i = 0; i < CIK_PFP_UCODE_SIZE; i++)
1559 WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
1560 WREG32(CP_PFP_UCODE_ADDR, 0);
1561
1562 /* CE */
1563 fw_data = (const __be32 *)rdev->ce_fw->data;
1564 WREG32(CP_CE_UCODE_ADDR, 0);
1565 for (i = 0; i < CIK_CE_UCODE_SIZE; i++)
1566 WREG32(CP_CE_UCODE_DATA, be32_to_cpup(fw_data++));
1567 WREG32(CP_CE_UCODE_ADDR, 0);
1568
1569 /* ME */
1570 fw_data = (const __be32 *)rdev->me_fw->data;
1571 WREG32(CP_ME_RAM_WADDR, 0);
1572 for (i = 0; i < CIK_ME_UCODE_SIZE; i++)
1573 WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
1574 WREG32(CP_ME_RAM_WADDR, 0);
1575
1576 WREG32(CP_PFP_UCODE_ADDR, 0);
1577 WREG32(CP_CE_UCODE_ADDR, 0);
1578 WREG32(CP_ME_RAM_WADDR, 0);
1579 WREG32(CP_ME_RAM_RADDR, 0);
1580 return 0;
1581}
1582
1583/**
1584 * cik_cp_gfx_start - start the gfx ring
1585 *
1586 * @rdev: radeon_device pointer
1587 *
1588 * Enables the ring and loads the clear state context and other
1589 * packets required to init the ring.
1590 * Returns 0 for success, error for failure.
1591 */
1592static int cik_cp_gfx_start(struct radeon_device *rdev)
1593{
1594 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
1595 int r, i;
1596
1597 /* init the CP */
1598 WREG32(CP_MAX_CONTEXT, rdev->config.cik.max_hw_contexts - 1);
1599 WREG32(CP_ENDIAN_SWAP, 0);
1600 WREG32(CP_DEVICE_ID, 1);
1601
1602 cik_cp_gfx_enable(rdev, true);
1603
1604 r = radeon_ring_lock(rdev, ring, cik_default_size + 17);
1605 if (r) {
1606 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
1607 return r;
1608 }
1609
1610 /* init the CE partitions. CE only used for gfx on CIK */
1611 radeon_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
1612 radeon_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
1613 radeon_ring_write(ring, 0xc000);
1614 radeon_ring_write(ring, 0xc000);
1615
1616 /* setup clear context state */
1617 radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
1618 radeon_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
1619
1620 radeon_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
1621 radeon_ring_write(ring, 0x80000000);
1622 radeon_ring_write(ring, 0x80000000);
1623
1624 for (i = 0; i < cik_default_size; i++)
1625 radeon_ring_write(ring, cik_default_state[i]);
1626
1627 radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
1628 radeon_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
1629
1630 /* set clear context state */
1631 radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
1632 radeon_ring_write(ring, 0);
1633
1634 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
1635 radeon_ring_write(ring, 0x00000316);
1636 radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
1637 radeon_ring_write(ring, 0x00000010); /* VGT_OUT_DEALLOC_CNTL */
1638
1639 radeon_ring_unlock_commit(rdev, ring);
1640
1641 return 0;
1642}
1643
1644/**
1645 * cik_cp_gfx_fini - stop the gfx ring
1646 *
1647 * @rdev: radeon_device pointer
1648 *
1649 * Stop the gfx ring and tear down the driver ring
1650 * info.
1651 */
1652static void cik_cp_gfx_fini(struct radeon_device *rdev)
1653{
1654 cik_cp_gfx_enable(rdev, false);
1655 radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
1656}
1657
1658/**
1659 * cik_cp_gfx_resume - setup the gfx ring buffer registers
1660 *
1661 * @rdev: radeon_device pointer
1662 *
1663 * Program the location and size of the gfx ring buffer
1664 * and test it to make sure it's working.
1665 * Returns 0 for success, error for failure.
1666 */
1667static int cik_cp_gfx_resume(struct radeon_device *rdev)
1668{
1669 struct radeon_ring *ring;
1670 u32 tmp;
1671 u32 rb_bufsz;
1672 u64 rb_addr;
1673 int r;
1674
1675 WREG32(CP_SEM_WAIT_TIMER, 0x0);
1676 WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
1677
1678 /* Set the write pointer delay */
1679 WREG32(CP_RB_WPTR_DELAY, 0);
1680
1681 /* set the RB to use vmid 0 */
1682 WREG32(CP_RB_VMID, 0);
1683
1684 WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
1685
1686 /* ring 0 - compute and gfx */
1687 /* Set ring buffer size */
1688 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
1689 rb_bufsz = drm_order(ring->ring_size / 8);
1690 tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
1691#ifdef __BIG_ENDIAN
1692 tmp |= BUF_SWAP_32BIT;
1693#endif
1694 WREG32(CP_RB0_CNTL, tmp);
1695
1696 /* Initialize the ring buffer's read and write pointers */
1697 WREG32(CP_RB0_CNTL, tmp | RB_RPTR_WR_ENA);
1698 ring->wptr = 0;
1699 WREG32(CP_RB0_WPTR, ring->wptr);
1700
1701 /* set the wb address wether it's enabled or not */
1702 WREG32(CP_RB0_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC);
1703 WREG32(CP_RB0_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
1704
1705 /* scratch register shadowing is no longer supported */
1706 WREG32(SCRATCH_UMSK, 0);
1707
1708 if (!rdev->wb.enabled)
1709 tmp |= RB_NO_UPDATE;
1710
1711 mdelay(1);
1712 WREG32(CP_RB0_CNTL, tmp);
1713
1714 rb_addr = ring->gpu_addr >> 8;
1715 WREG32(CP_RB0_BASE, rb_addr);
1716 WREG32(CP_RB0_BASE_HI, upper_32_bits(rb_addr));
1717
1718 ring->rptr = RREG32(CP_RB0_RPTR);
1719
1720 /* start the ring */
1721 cik_cp_gfx_start(rdev);
1722 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = true;
1723 r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
1724 if (r) {
1725 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
1726 return r;
1727 }
1728 return 0;
1729}
1730
1731/**
1732 * cik_cp_compute_enable - enable/disable the compute CP MEs
1733 *
1734 * @rdev: radeon_device pointer
1735 * @enable: enable or disable the MEs
1736 *
1737 * Halts or unhalts the compute MEs.
1738 */
1739static void cik_cp_compute_enable(struct radeon_device *rdev, bool enable)
1740{
1741 if (enable)
1742 WREG32(CP_MEC_CNTL, 0);
1743 else
1744 WREG32(CP_MEC_CNTL, (MEC_ME1_HALT | MEC_ME2_HALT));
1745 udelay(50);
1746}
1747
1748/**
1749 * cik_cp_compute_load_microcode - load the compute CP ME ucode
1750 *
1751 * @rdev: radeon_device pointer
1752 *
1753 * Loads the compute MEC1&2 ucode.
1754 * Returns 0 for success, -EINVAL if the ucode is not available.
1755 */
1756static int cik_cp_compute_load_microcode(struct radeon_device *rdev)
1757{
1758 const __be32 *fw_data;
1759 int i;
1760
1761 if (!rdev->mec_fw)
1762 return -EINVAL;
1763
1764 cik_cp_compute_enable(rdev, false);
1765
1766 /* MEC1 */
1767 fw_data = (const __be32 *)rdev->mec_fw->data;
1768 WREG32(CP_MEC_ME1_UCODE_ADDR, 0);
1769 for (i = 0; i < CIK_MEC_UCODE_SIZE; i++)
1770 WREG32(CP_MEC_ME1_UCODE_DATA, be32_to_cpup(fw_data++));
1771 WREG32(CP_MEC_ME1_UCODE_ADDR, 0);
1772
1773 if (rdev->family == CHIP_KAVERI) {
1774 /* MEC2 */
1775 fw_data = (const __be32 *)rdev->mec_fw->data;
1776 WREG32(CP_MEC_ME2_UCODE_ADDR, 0);
1777 for (i = 0; i < CIK_MEC_UCODE_SIZE; i++)
1778 WREG32(CP_MEC_ME2_UCODE_DATA, be32_to_cpup(fw_data++));
1779 WREG32(CP_MEC_ME2_UCODE_ADDR, 0);
1780 }
1781
1782 return 0;
1783}
1784
1785/**
1786 * cik_cp_compute_start - start the compute queues
1787 *
1788 * @rdev: radeon_device pointer
1789 *
1790 * Enable the compute queues.
1791 * Returns 0 for success, error for failure.
1792 */
1793static int cik_cp_compute_start(struct radeon_device *rdev)
1794{
1795 //todo
1796 return 0;
1797}
1798
1799/**
1800 * cik_cp_compute_fini - stop the compute queues
1801 *
1802 * @rdev: radeon_device pointer
1803 *
1804 * Stop the compute queues and tear down the driver queue
1805 * info.
1806 */
1807static void cik_cp_compute_fini(struct radeon_device *rdev)
1808{
1809 cik_cp_compute_enable(rdev, false);
1810 //todo
1811}
1812
1813/**
1814 * cik_cp_compute_resume - setup the compute queue registers
1815 *
1816 * @rdev: radeon_device pointer
1817 *
1818 * Program the compute queues and test them to make sure they
1819 * are working.
1820 * Returns 0 for success, error for failure.
1821 */
1822static int cik_cp_compute_resume(struct radeon_device *rdev)
1823{
1824 int r;
1825
1826 //todo
1827 r = cik_cp_compute_start(rdev);
1828 if (r)
1829 return r;
1830 return 0;
1831}
1832
1833/* XXX temporary wrappers to handle both compute and gfx */
1834/* XXX */
1835static void cik_cp_enable(struct radeon_device *rdev, bool enable)
1836{
1837 cik_cp_gfx_enable(rdev, enable);
1838 cik_cp_compute_enable(rdev, enable);
1839}
1840
1841/* XXX */
1842static int cik_cp_load_microcode(struct radeon_device *rdev)
1843{
1844 int r;
1845
1846 r = cik_cp_gfx_load_microcode(rdev);
1847 if (r)
1848 return r;
1849 r = cik_cp_compute_load_microcode(rdev);
1850 if (r)
1851 return r;
1852
1853 return 0;
1854}
1855
1856/* XXX */
1857static void cik_cp_fini(struct radeon_device *rdev)
1858{
1859 cik_cp_gfx_fini(rdev);
1860 cik_cp_compute_fini(rdev);
1861}
1862
1863/* XXX */
1864static int cik_cp_resume(struct radeon_device *rdev)
1865{
1866 int r;
1867
1868 /* Reset all cp blocks */
1869 WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
1870 RREG32(GRBM_SOFT_RESET);
1871 mdelay(15);
1872 WREG32(GRBM_SOFT_RESET, 0);
1873 RREG32(GRBM_SOFT_RESET);
1874
1875 r = cik_cp_load_microcode(rdev);
1876 if (r)
1877 return r;
1878
1879 r = cik_cp_gfx_resume(rdev);
1880 if (r)
1881 return r;
1882 r = cik_cp_compute_resume(rdev);
1883 if (r)
1884 return r;
1885
1886 return 0;
1887}
1888
Alex Deucher6f2043c2013-04-09 12:43:41 -04001889/**
1890 * cik_gpu_is_lockup - check if the 3D engine is locked up
1891 *
1892 * @rdev: radeon_device pointer
1893 * @ring: radeon_ring structure holding ring information
1894 *
1895 * Check if the 3D engine is locked up (CIK).
1896 * Returns true if the engine is locked, false if not.
1897 */
1898bool cik_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
1899{
1900 u32 srbm_status, srbm_status2;
1901 u32 grbm_status, grbm_status2;
1902 u32 grbm_status_se0, grbm_status_se1, grbm_status_se2, grbm_status_se3;
1903
1904 srbm_status = RREG32(SRBM_STATUS);
1905 srbm_status2 = RREG32(SRBM_STATUS2);
1906 grbm_status = RREG32(GRBM_STATUS);
1907 grbm_status2 = RREG32(GRBM_STATUS2);
1908 grbm_status_se0 = RREG32(GRBM_STATUS_SE0);
1909 grbm_status_se1 = RREG32(GRBM_STATUS_SE1);
1910 grbm_status_se2 = RREG32(GRBM_STATUS_SE2);
1911 grbm_status_se3 = RREG32(GRBM_STATUS_SE3);
1912 if (!(grbm_status & GUI_ACTIVE)) {
1913 radeon_ring_lockup_update(ring);
1914 return false;
1915 }
1916 /* force CP activities */
1917 radeon_ring_force_activity(rdev, ring);
1918 return radeon_ring_test_lockup(rdev, ring);
1919}
1920
1921/**
1922 * cik_gfx_gpu_soft_reset - soft reset the 3D engine and CPG
1923 *
1924 * @rdev: radeon_device pointer
1925 *
1926 * Soft reset the GFX engine and CPG blocks (CIK).
1927 * XXX: deal with reseting RLC and CPF
1928 * Returns 0 for success.
1929 */
1930static int cik_gfx_gpu_soft_reset(struct radeon_device *rdev)
1931{
1932 struct evergreen_mc_save save;
1933 u32 grbm_reset = 0;
1934
1935 if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
1936 return 0;
1937
1938 dev_info(rdev->dev, "GPU GFX softreset \n");
1939 dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
1940 RREG32(GRBM_STATUS));
1941 dev_info(rdev->dev, " GRBM_STATUS2=0x%08X\n",
1942 RREG32(GRBM_STATUS2));
1943 dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n",
1944 RREG32(GRBM_STATUS_SE0));
1945 dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n",
1946 RREG32(GRBM_STATUS_SE1));
1947 dev_info(rdev->dev, " GRBM_STATUS_SE2=0x%08X\n",
1948 RREG32(GRBM_STATUS_SE2));
1949 dev_info(rdev->dev, " GRBM_STATUS_SE3=0x%08X\n",
1950 RREG32(GRBM_STATUS_SE3));
1951 dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
1952 RREG32(SRBM_STATUS));
1953 dev_info(rdev->dev, " SRBM_STATUS2=0x%08X\n",
1954 RREG32(SRBM_STATUS2));
1955 evergreen_mc_stop(rdev, &save);
1956 if (radeon_mc_wait_for_idle(rdev)) {
1957 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1958 }
1959 /* Disable CP parsing/prefetching */
1960 WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT);
1961
1962 /* reset all the gfx block and all CPG blocks */
1963 grbm_reset = SOFT_RESET_CPG | SOFT_RESET_GFX;
1964
1965 dev_info(rdev->dev, " GRBM_SOFT_RESET=0x%08X\n", grbm_reset);
1966 WREG32(GRBM_SOFT_RESET, grbm_reset);
1967 (void)RREG32(GRBM_SOFT_RESET);
1968 udelay(50);
1969 WREG32(GRBM_SOFT_RESET, 0);
1970 (void)RREG32(GRBM_SOFT_RESET);
1971 /* Wait a little for things to settle down */
1972 udelay(50);
1973 dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
1974 RREG32(GRBM_STATUS));
1975 dev_info(rdev->dev, " GRBM_STATUS2=0x%08X\n",
1976 RREG32(GRBM_STATUS2));
1977 dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n",
1978 RREG32(GRBM_STATUS_SE0));
1979 dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n",
1980 RREG32(GRBM_STATUS_SE1));
1981 dev_info(rdev->dev, " GRBM_STATUS_SE2=0x%08X\n",
1982 RREG32(GRBM_STATUS_SE2));
1983 dev_info(rdev->dev, " GRBM_STATUS_SE3=0x%08X\n",
1984 RREG32(GRBM_STATUS_SE3));
1985 dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
1986 RREG32(SRBM_STATUS));
1987 dev_info(rdev->dev, " SRBM_STATUS2=0x%08X\n",
1988 RREG32(SRBM_STATUS2));
1989 evergreen_mc_resume(rdev, &save);
1990 return 0;
1991}
1992
1993/**
1994 * cik_compute_gpu_soft_reset - soft reset CPC
1995 *
1996 * @rdev: radeon_device pointer
1997 *
1998 * Soft reset the CPC blocks (CIK).
1999 * XXX: deal with reseting RLC and CPF
2000 * Returns 0 for success.
2001 */
2002static int cik_compute_gpu_soft_reset(struct radeon_device *rdev)
2003{
2004 struct evergreen_mc_save save;
2005 u32 grbm_reset = 0;
2006
2007 dev_info(rdev->dev, "GPU compute softreset \n");
2008 dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
2009 RREG32(GRBM_STATUS));
2010 dev_info(rdev->dev, " GRBM_STATUS2=0x%08X\n",
2011 RREG32(GRBM_STATUS2));
2012 dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n",
2013 RREG32(GRBM_STATUS_SE0));
2014 dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n",
2015 RREG32(GRBM_STATUS_SE1));
2016 dev_info(rdev->dev, " GRBM_STATUS_SE2=0x%08X\n",
2017 RREG32(GRBM_STATUS_SE2));
2018 dev_info(rdev->dev, " GRBM_STATUS_SE3=0x%08X\n",
2019 RREG32(GRBM_STATUS_SE3));
2020 dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
2021 RREG32(SRBM_STATUS));
2022 dev_info(rdev->dev, " SRBM_STATUS2=0x%08X\n",
2023 RREG32(SRBM_STATUS2));
2024 evergreen_mc_stop(rdev, &save);
2025 if (radeon_mc_wait_for_idle(rdev)) {
2026 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
2027 }
2028 /* Disable CP parsing/prefetching */
2029 WREG32(CP_MEC_CNTL, MEC_ME1_HALT | MEC_ME2_HALT);
2030
2031 /* reset all the CPC blocks */
2032 grbm_reset = SOFT_RESET_CPG;
2033
2034 dev_info(rdev->dev, " GRBM_SOFT_RESET=0x%08X\n", grbm_reset);
2035 WREG32(GRBM_SOFT_RESET, grbm_reset);
2036 (void)RREG32(GRBM_SOFT_RESET);
2037 udelay(50);
2038 WREG32(GRBM_SOFT_RESET, 0);
2039 (void)RREG32(GRBM_SOFT_RESET);
2040 /* Wait a little for things to settle down */
2041 udelay(50);
2042 dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
2043 RREG32(GRBM_STATUS));
2044 dev_info(rdev->dev, " GRBM_STATUS2=0x%08X\n",
2045 RREG32(GRBM_STATUS2));
2046 dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n",
2047 RREG32(GRBM_STATUS_SE0));
2048 dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n",
2049 RREG32(GRBM_STATUS_SE1));
2050 dev_info(rdev->dev, " GRBM_STATUS_SE2=0x%08X\n",
2051 RREG32(GRBM_STATUS_SE2));
2052 dev_info(rdev->dev, " GRBM_STATUS_SE3=0x%08X\n",
2053 RREG32(GRBM_STATUS_SE3));
2054 dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
2055 RREG32(SRBM_STATUS));
2056 dev_info(rdev->dev, " SRBM_STATUS2=0x%08X\n",
2057 RREG32(SRBM_STATUS2));
2058 evergreen_mc_resume(rdev, &save);
2059 return 0;
2060}
2061
2062/**
2063 * cik_asic_reset - soft reset compute and gfx
2064 *
2065 * @rdev: radeon_device pointer
2066 *
2067 * Soft reset the CPC blocks (CIK).
2068 * XXX: make this more fine grained and only reset
2069 * what is necessary.
2070 * Returns 0 for success.
2071 */
2072int cik_asic_reset(struct radeon_device *rdev)
2073{
2074 int r;
2075
2076 r = cik_compute_gpu_soft_reset(rdev);
2077 if (r)
2078 dev_info(rdev->dev, "Compute reset failed!\n");
2079
2080 return cik_gfx_gpu_soft_reset(rdev);
2081}
Alex Deucher1c491652013-04-09 12:45:26 -04002082
2083/* MC */
2084/**
2085 * cik_mc_program - program the GPU memory controller
2086 *
2087 * @rdev: radeon_device pointer
2088 *
2089 * Set the location of vram, gart, and AGP in the GPU's
2090 * physical address space (CIK).
2091 */
2092static void cik_mc_program(struct radeon_device *rdev)
2093{
2094 struct evergreen_mc_save save;
2095 u32 tmp;
2096 int i, j;
2097
2098 /* Initialize HDP */
2099 for (i = 0, j = 0; i < 32; i++, j += 0x18) {
2100 WREG32((0x2c14 + j), 0x00000000);
2101 WREG32((0x2c18 + j), 0x00000000);
2102 WREG32((0x2c1c + j), 0x00000000);
2103 WREG32((0x2c20 + j), 0x00000000);
2104 WREG32((0x2c24 + j), 0x00000000);
2105 }
2106 WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
2107
2108 evergreen_mc_stop(rdev, &save);
2109 if (radeon_mc_wait_for_idle(rdev)) {
2110 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
2111 }
2112 /* Lockout access through VGA aperture*/
2113 WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
2114 /* Update configuration */
2115 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
2116 rdev->mc.vram_start >> 12);
2117 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
2118 rdev->mc.vram_end >> 12);
2119 WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
2120 rdev->vram_scratch.gpu_addr >> 12);
2121 tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
2122 tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
2123 WREG32(MC_VM_FB_LOCATION, tmp);
2124 /* XXX double check these! */
2125 WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
2126 WREG32(HDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
2127 WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
2128 WREG32(MC_VM_AGP_BASE, 0);
2129 WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
2130 WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
2131 if (radeon_mc_wait_for_idle(rdev)) {
2132 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
2133 }
2134 evergreen_mc_resume(rdev, &save);
2135 /* we need to own VRAM, so turn off the VGA renderer here
2136 * to stop it overwriting our objects */
2137 rv515_vga_render_disable(rdev);
2138}
2139
2140/**
2141 * cik_mc_init - initialize the memory controller driver params
2142 *
2143 * @rdev: radeon_device pointer
2144 *
2145 * Look up the amount of vram, vram width, and decide how to place
2146 * vram and gart within the GPU's physical address space (CIK).
2147 * Returns 0 for success.
2148 */
2149static int cik_mc_init(struct radeon_device *rdev)
2150{
2151 u32 tmp;
2152 int chansize, numchan;
2153
2154 /* Get VRAM informations */
2155 rdev->mc.vram_is_ddr = true;
2156 tmp = RREG32(MC_ARB_RAMCFG);
2157 if (tmp & CHANSIZE_MASK) {
2158 chansize = 64;
2159 } else {
2160 chansize = 32;
2161 }
2162 tmp = RREG32(MC_SHARED_CHMAP);
2163 switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
2164 case 0:
2165 default:
2166 numchan = 1;
2167 break;
2168 case 1:
2169 numchan = 2;
2170 break;
2171 case 2:
2172 numchan = 4;
2173 break;
2174 case 3:
2175 numchan = 8;
2176 break;
2177 case 4:
2178 numchan = 3;
2179 break;
2180 case 5:
2181 numchan = 6;
2182 break;
2183 case 6:
2184 numchan = 10;
2185 break;
2186 case 7:
2187 numchan = 12;
2188 break;
2189 case 8:
2190 numchan = 16;
2191 break;
2192 }
2193 rdev->mc.vram_width = numchan * chansize;
2194 /* Could aper size report 0 ? */
2195 rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
2196 rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
2197 /* size in MB on si */
2198 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
2199 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
2200 rdev->mc.visible_vram_size = rdev->mc.aper_size;
2201 si_vram_gtt_location(rdev, &rdev->mc);
2202 radeon_update_bandwidth_info(rdev);
2203
2204 return 0;
2205}
2206
2207/*
2208 * GART
2209 * VMID 0 is the physical GPU addresses as used by the kernel.
2210 * VMIDs 1-15 are used for userspace clients and are handled
2211 * by the radeon vm/hsa code.
2212 */
2213/**
2214 * cik_pcie_gart_tlb_flush - gart tlb flush callback
2215 *
2216 * @rdev: radeon_device pointer
2217 *
2218 * Flush the TLB for the VMID 0 page table (CIK).
2219 */
2220void cik_pcie_gart_tlb_flush(struct radeon_device *rdev)
2221{
2222 /* flush hdp cache */
2223 WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0);
2224
2225 /* bits 0-15 are the VM contexts0-15 */
2226 WREG32(VM_INVALIDATE_REQUEST, 0x1);
2227}
2228
2229/**
2230 * cik_pcie_gart_enable - gart enable
2231 *
2232 * @rdev: radeon_device pointer
2233 *
2234 * This sets up the TLBs, programs the page tables for VMID0,
2235 * sets up the hw for VMIDs 1-15 which are allocated on
2236 * demand, and sets up the global locations for the LDS, GDS,
2237 * and GPUVM for FSA64 clients (CIK).
2238 * Returns 0 for success, errors for failure.
2239 */
2240static int cik_pcie_gart_enable(struct radeon_device *rdev)
2241{
2242 int r, i;
2243
2244 if (rdev->gart.robj == NULL) {
2245 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
2246 return -EINVAL;
2247 }
2248 r = radeon_gart_table_vram_pin(rdev);
2249 if (r)
2250 return r;
2251 radeon_gart_restore(rdev);
2252 /* Setup TLB control */
2253 WREG32(MC_VM_MX_L1_TLB_CNTL,
2254 (0xA << 7) |
2255 ENABLE_L1_TLB |
2256 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
2257 ENABLE_ADVANCED_DRIVER_MODEL |
2258 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
2259 /* Setup L2 cache */
2260 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE |
2261 ENABLE_L2_FRAGMENT_PROCESSING |
2262 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
2263 ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
2264 EFFECTIVE_L2_QUEUE_SIZE(7) |
2265 CONTEXT1_IDENTITY_ACCESS_MODE(1));
2266 WREG32(VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS | INVALIDATE_L2_CACHE);
2267 WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
2268 L2_CACHE_BIGK_FRAGMENT_SIZE(6));
2269 /* setup context0 */
2270 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
2271 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
2272 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
2273 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
2274 (u32)(rdev->dummy_page.addr >> 12));
2275 WREG32(VM_CONTEXT0_CNTL2, 0);
2276 WREG32(VM_CONTEXT0_CNTL, (ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
2277 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT));
2278
2279 WREG32(0x15D4, 0);
2280 WREG32(0x15D8, 0);
2281 WREG32(0x15DC, 0);
2282
2283 /* empty context1-15 */
2284 /* FIXME start with 4G, once using 2 level pt switch to full
2285 * vm size space
2286 */
2287 /* set vm size, must be a multiple of 4 */
2288 WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
2289 WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn);
2290 for (i = 1; i < 16; i++) {
2291 if (i < 8)
2292 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
2293 rdev->gart.table_addr >> 12);
2294 else
2295 WREG32(VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((i - 8) << 2),
2296 rdev->gart.table_addr >> 12);
2297 }
2298
2299 /* enable context1-15 */
2300 WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
2301 (u32)(rdev->dummy_page.addr >> 12));
Alex Deuchera00024b2012-09-18 16:06:01 -04002302 WREG32(VM_CONTEXT1_CNTL2, 4);
Alex Deucher1c491652013-04-09 12:45:26 -04002303 WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) |
Alex Deuchera00024b2012-09-18 16:06:01 -04002304 RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
2305 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT |
2306 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
2307 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT |
2308 PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT |
2309 PDE0_PROTECTION_FAULT_ENABLE_DEFAULT |
2310 VALID_PROTECTION_FAULT_ENABLE_INTERRUPT |
2311 VALID_PROTECTION_FAULT_ENABLE_DEFAULT |
2312 READ_PROTECTION_FAULT_ENABLE_INTERRUPT |
2313 READ_PROTECTION_FAULT_ENABLE_DEFAULT |
2314 WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT |
2315 WRITE_PROTECTION_FAULT_ENABLE_DEFAULT);
Alex Deucher1c491652013-04-09 12:45:26 -04002316
2317 /* TC cache setup ??? */
2318 WREG32(TC_CFG_L1_LOAD_POLICY0, 0);
2319 WREG32(TC_CFG_L1_LOAD_POLICY1, 0);
2320 WREG32(TC_CFG_L1_STORE_POLICY, 0);
2321
2322 WREG32(TC_CFG_L2_LOAD_POLICY0, 0);
2323 WREG32(TC_CFG_L2_LOAD_POLICY1, 0);
2324 WREG32(TC_CFG_L2_STORE_POLICY0, 0);
2325 WREG32(TC_CFG_L2_STORE_POLICY1, 0);
2326 WREG32(TC_CFG_L2_ATOMIC_POLICY, 0);
2327
2328 WREG32(TC_CFG_L1_VOLATILE, 0);
2329 WREG32(TC_CFG_L2_VOLATILE, 0);
2330
2331 if (rdev->family == CHIP_KAVERI) {
2332 u32 tmp = RREG32(CHUB_CONTROL);
2333 tmp &= ~BYPASS_VM;
2334 WREG32(CHUB_CONTROL, tmp);
2335 }
2336
2337 /* XXX SH_MEM regs */
2338 /* where to put LDS, scratch, GPUVM in FSA64 space */
2339 for (i = 0; i < 16; i++) {
2340 WREG32(SRBM_GFX_CNTL, VMID(i));
2341 WREG32(SH_MEM_CONFIG, 0);
2342 WREG32(SH_MEM_APE1_BASE, 1);
2343 WREG32(SH_MEM_APE1_LIMIT, 0);
2344 WREG32(SH_MEM_BASES, 0);
2345 }
2346 WREG32(SRBM_GFX_CNTL, 0);
2347
2348 cik_pcie_gart_tlb_flush(rdev);
2349 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
2350 (unsigned)(rdev->mc.gtt_size >> 20),
2351 (unsigned long long)rdev->gart.table_addr);
2352 rdev->gart.ready = true;
2353 return 0;
2354}
2355
2356/**
2357 * cik_pcie_gart_disable - gart disable
2358 *
2359 * @rdev: radeon_device pointer
2360 *
2361 * This disables all VM page table (CIK).
2362 */
2363static void cik_pcie_gart_disable(struct radeon_device *rdev)
2364{
2365 /* Disable all tables */
2366 WREG32(VM_CONTEXT0_CNTL, 0);
2367 WREG32(VM_CONTEXT1_CNTL, 0);
2368 /* Setup TLB control */
2369 WREG32(MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE_NOT_IN_SYS |
2370 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
2371 /* Setup L2 cache */
2372 WREG32(VM_L2_CNTL,
2373 ENABLE_L2_FRAGMENT_PROCESSING |
2374 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
2375 ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
2376 EFFECTIVE_L2_QUEUE_SIZE(7) |
2377 CONTEXT1_IDENTITY_ACCESS_MODE(1));
2378 WREG32(VM_L2_CNTL2, 0);
2379 WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
2380 L2_CACHE_BIGK_FRAGMENT_SIZE(6));
2381 radeon_gart_table_vram_unpin(rdev);
2382}
2383
2384/**
2385 * cik_pcie_gart_fini - vm fini callback
2386 *
2387 * @rdev: radeon_device pointer
2388 *
2389 * Tears down the driver GART/VM setup (CIK).
2390 */
2391static void cik_pcie_gart_fini(struct radeon_device *rdev)
2392{
2393 cik_pcie_gart_disable(rdev);
2394 radeon_gart_table_vram_free(rdev);
2395 radeon_gart_fini(rdev);
2396}
2397
2398/* vm parser */
2399/**
2400 * cik_ib_parse - vm ib_parse callback
2401 *
2402 * @rdev: radeon_device pointer
2403 * @ib: indirect buffer pointer
2404 *
2405 * CIK uses hw IB checking so this is a nop (CIK).
2406 */
2407int cik_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
2408{
2409 return 0;
2410}
2411
2412/*
2413 * vm
2414 * VMID 0 is the physical GPU addresses as used by the kernel.
2415 * VMIDs 1-15 are used for userspace clients and are handled
2416 * by the radeon vm/hsa code.
2417 */
2418/**
2419 * cik_vm_init - cik vm init callback
2420 *
2421 * @rdev: radeon_device pointer
2422 *
2423 * Inits cik specific vm parameters (number of VMs, base of vram for
2424 * VMIDs 1-15) (CIK).
2425 * Returns 0 for success.
2426 */
2427int cik_vm_init(struct radeon_device *rdev)
2428{
2429 /* number of VMs */
2430 rdev->vm_manager.nvm = 16;
2431 /* base offset of vram pages */
2432 if (rdev->flags & RADEON_IS_IGP) {
2433 u64 tmp = RREG32(MC_VM_FB_OFFSET);
2434 tmp <<= 22;
2435 rdev->vm_manager.vram_base_offset = tmp;
2436 } else
2437 rdev->vm_manager.vram_base_offset = 0;
2438
2439 return 0;
2440}
2441
2442/**
2443 * cik_vm_fini - cik vm fini callback
2444 *
2445 * @rdev: radeon_device pointer
2446 *
2447 * Tear down any asic specific VM setup (CIK).
2448 */
2449void cik_vm_fini(struct radeon_device *rdev)
2450{
2451}
2452