blob: b406a48ef202c1bf118479a94c112b592251f656 [file] [log] [blame]
Alex Deucher43b3cd92012-03-20 17:18:00 -04001/*
2 * Copyright 2011 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
Alex Deucher0f0de062012-03-20 17:18:17 -040024#include <linux/firmware.h>
Alex Deucher0f0de062012-03-20 17:18:17 -040025#include <linux/slab.h>
26#include <linux/module.h>
David Howells760285e2012-10-02 18:01:07 +010027#include <drm/drmP.h>
Alex Deucher43b3cd92012-03-20 17:18:00 -040028#include "radeon.h"
29#include "radeon_asic.h"
David Howells760285e2012-10-02 18:01:07 +010030#include <drm/radeon_drm.h>
Alex Deucher43b3cd92012-03-20 17:18:00 -040031#include "sid.h"
32#include "atom.h"
Alex Deucher48c0c902012-03-20 17:18:19 -040033#include "si_blit_shaders.h"
Alex Deucherbd8cd532013-04-12 16:48:21 -040034#include "clearstate_si.h"
Alex Deuchera0ceada2013-03-27 15:18:04 -040035#include "radeon_ucode.h"
Alex Deucher43b3cd92012-03-20 17:18:00 -040036
Alex Deucher0f0de062012-03-20 17:18:17 -040037
38MODULE_FIRMWARE("radeon/TAHITI_pfp.bin");
39MODULE_FIRMWARE("radeon/TAHITI_me.bin");
40MODULE_FIRMWARE("radeon/TAHITI_ce.bin");
41MODULE_FIRMWARE("radeon/TAHITI_mc.bin");
42MODULE_FIRMWARE("radeon/TAHITI_rlc.bin");
Alex Deuchera9e61412013-06-25 17:56:16 -040043MODULE_FIRMWARE("radeon/TAHITI_smc.bin");
Alex Deucher0f0de062012-03-20 17:18:17 -040044MODULE_FIRMWARE("radeon/PITCAIRN_pfp.bin");
45MODULE_FIRMWARE("radeon/PITCAIRN_me.bin");
46MODULE_FIRMWARE("radeon/PITCAIRN_ce.bin");
47MODULE_FIRMWARE("radeon/PITCAIRN_mc.bin");
48MODULE_FIRMWARE("radeon/PITCAIRN_rlc.bin");
Alex Deuchera9e61412013-06-25 17:56:16 -040049MODULE_FIRMWARE("radeon/PITCAIRN_smc.bin");
Alex Deucher0f0de062012-03-20 17:18:17 -040050MODULE_FIRMWARE("radeon/VERDE_pfp.bin");
51MODULE_FIRMWARE("radeon/VERDE_me.bin");
52MODULE_FIRMWARE("radeon/VERDE_ce.bin");
53MODULE_FIRMWARE("radeon/VERDE_mc.bin");
54MODULE_FIRMWARE("radeon/VERDE_rlc.bin");
Alex Deuchera9e61412013-06-25 17:56:16 -040055MODULE_FIRMWARE("radeon/VERDE_smc.bin");
Alex Deucherbcc7f5d2012-07-26 18:36:28 -040056MODULE_FIRMWARE("radeon/OLAND_pfp.bin");
57MODULE_FIRMWARE("radeon/OLAND_me.bin");
58MODULE_FIRMWARE("radeon/OLAND_ce.bin");
59MODULE_FIRMWARE("radeon/OLAND_mc.bin");
60MODULE_FIRMWARE("radeon/OLAND_rlc.bin");
Alex Deuchera9e61412013-06-25 17:56:16 -040061MODULE_FIRMWARE("radeon/OLAND_smc.bin");
Alex Deucherc04c00b2012-07-31 12:57:45 -040062MODULE_FIRMWARE("radeon/HAINAN_pfp.bin");
63MODULE_FIRMWARE("radeon/HAINAN_me.bin");
64MODULE_FIRMWARE("radeon/HAINAN_ce.bin");
65MODULE_FIRMWARE("radeon/HAINAN_mc.bin");
66MODULE_FIRMWARE("radeon/HAINAN_rlc.bin");
Alex Deuchera9e61412013-06-25 17:56:16 -040067MODULE_FIRMWARE("radeon/HAINAN_smc.bin");
Alex Deucher0f0de062012-03-20 17:18:17 -040068
Alex Deucherb9d305d2013-02-14 17:16:51 -050069static void si_pcie_gen3_enable(struct radeon_device *rdev);
Alex Deuchere0bcf1652013-02-15 11:56:59 -050070static void si_program_aspm(struct radeon_device *rdev);
Alex Deucher1fd11772013-04-17 17:53:50 -040071extern void sumo_rlc_fini(struct radeon_device *rdev);
72extern int sumo_rlc_init(struct radeon_device *rdev);
Alex Deucher25a857f2012-03-20 17:18:22 -040073extern int r600_ih_ring_alloc(struct radeon_device *rdev);
74extern void r600_ih_ring_fini(struct radeon_device *rdev);
Alex Deucher0a96d722012-03-20 17:18:11 -040075extern void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev);
Alex Deucherc476dde2012-03-20 17:18:12 -040076extern void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save);
77extern void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save);
Alex Deucherca7db222012-03-20 17:18:30 -040078extern u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev);
Alex Deucher1c534672013-01-18 15:08:38 -050079extern void evergreen_print_gpu_status_regs(struct radeon_device *rdev);
Alex Deucher014bb202013-01-18 19:36:20 -050080extern bool evergreen_is_display_hung(struct radeon_device *rdev);
Alex Deucher811e4d52013-09-03 13:31:33 -040081static void si_enable_gui_idle_interrupt(struct radeon_device *rdev,
82 bool enable);
Alex Deucher4a5c8ea2013-11-15 16:35:55 -050083static void si_init_pg(struct radeon_device *rdev);
84static void si_init_cg(struct radeon_device *rdev);
Alex Deuchera6f4ae82013-10-02 14:50:57 -040085static void si_fini_pg(struct radeon_device *rdev);
86static void si_fini_cg(struct radeon_device *rdev);
87static void si_rlc_stop(struct radeon_device *rdev);
Alex Deucher0a96d722012-03-20 17:18:11 -040088
Alex Deucher6d8cf002013-03-06 18:48:05 -050089static const u32 verde_rlc_save_restore_register_list[] =
90{
91 (0x8000 << 16) | (0x98f4 >> 2),
92 0x00000000,
93 (0x8040 << 16) | (0x98f4 >> 2),
94 0x00000000,
95 (0x8000 << 16) | (0xe80 >> 2),
96 0x00000000,
97 (0x8040 << 16) | (0xe80 >> 2),
98 0x00000000,
99 (0x8000 << 16) | (0x89bc >> 2),
100 0x00000000,
101 (0x8040 << 16) | (0x89bc >> 2),
102 0x00000000,
103 (0x8000 << 16) | (0x8c1c >> 2),
104 0x00000000,
105 (0x8040 << 16) | (0x8c1c >> 2),
106 0x00000000,
107 (0x9c00 << 16) | (0x98f0 >> 2),
108 0x00000000,
109 (0x9c00 << 16) | (0xe7c >> 2),
110 0x00000000,
111 (0x8000 << 16) | (0x9148 >> 2),
112 0x00000000,
113 (0x8040 << 16) | (0x9148 >> 2),
114 0x00000000,
115 (0x9c00 << 16) | (0x9150 >> 2),
116 0x00000000,
117 (0x9c00 << 16) | (0x897c >> 2),
118 0x00000000,
119 (0x9c00 << 16) | (0x8d8c >> 2),
120 0x00000000,
121 (0x9c00 << 16) | (0xac54 >> 2),
122 0X00000000,
123 0x3,
124 (0x9c00 << 16) | (0x98f8 >> 2),
125 0x00000000,
126 (0x9c00 << 16) | (0x9910 >> 2),
127 0x00000000,
128 (0x9c00 << 16) | (0x9914 >> 2),
129 0x00000000,
130 (0x9c00 << 16) | (0x9918 >> 2),
131 0x00000000,
132 (0x9c00 << 16) | (0x991c >> 2),
133 0x00000000,
134 (0x9c00 << 16) | (0x9920 >> 2),
135 0x00000000,
136 (0x9c00 << 16) | (0x9924 >> 2),
137 0x00000000,
138 (0x9c00 << 16) | (0x9928 >> 2),
139 0x00000000,
140 (0x9c00 << 16) | (0x992c >> 2),
141 0x00000000,
142 (0x9c00 << 16) | (0x9930 >> 2),
143 0x00000000,
144 (0x9c00 << 16) | (0x9934 >> 2),
145 0x00000000,
146 (0x9c00 << 16) | (0x9938 >> 2),
147 0x00000000,
148 (0x9c00 << 16) | (0x993c >> 2),
149 0x00000000,
150 (0x9c00 << 16) | (0x9940 >> 2),
151 0x00000000,
152 (0x9c00 << 16) | (0x9944 >> 2),
153 0x00000000,
154 (0x9c00 << 16) | (0x9948 >> 2),
155 0x00000000,
156 (0x9c00 << 16) | (0x994c >> 2),
157 0x00000000,
158 (0x9c00 << 16) | (0x9950 >> 2),
159 0x00000000,
160 (0x9c00 << 16) | (0x9954 >> 2),
161 0x00000000,
162 (0x9c00 << 16) | (0x9958 >> 2),
163 0x00000000,
164 (0x9c00 << 16) | (0x995c >> 2),
165 0x00000000,
166 (0x9c00 << 16) | (0x9960 >> 2),
167 0x00000000,
168 (0x9c00 << 16) | (0x9964 >> 2),
169 0x00000000,
170 (0x9c00 << 16) | (0x9968 >> 2),
171 0x00000000,
172 (0x9c00 << 16) | (0x996c >> 2),
173 0x00000000,
174 (0x9c00 << 16) | (0x9970 >> 2),
175 0x00000000,
176 (0x9c00 << 16) | (0x9974 >> 2),
177 0x00000000,
178 (0x9c00 << 16) | (0x9978 >> 2),
179 0x00000000,
180 (0x9c00 << 16) | (0x997c >> 2),
181 0x00000000,
182 (0x9c00 << 16) | (0x9980 >> 2),
183 0x00000000,
184 (0x9c00 << 16) | (0x9984 >> 2),
185 0x00000000,
186 (0x9c00 << 16) | (0x9988 >> 2),
187 0x00000000,
188 (0x9c00 << 16) | (0x998c >> 2),
189 0x00000000,
190 (0x9c00 << 16) | (0x8c00 >> 2),
191 0x00000000,
192 (0x9c00 << 16) | (0x8c14 >> 2),
193 0x00000000,
194 (0x9c00 << 16) | (0x8c04 >> 2),
195 0x00000000,
196 (0x9c00 << 16) | (0x8c08 >> 2),
197 0x00000000,
198 (0x8000 << 16) | (0x9b7c >> 2),
199 0x00000000,
200 (0x8040 << 16) | (0x9b7c >> 2),
201 0x00000000,
202 (0x8000 << 16) | (0xe84 >> 2),
203 0x00000000,
204 (0x8040 << 16) | (0xe84 >> 2),
205 0x00000000,
206 (0x8000 << 16) | (0x89c0 >> 2),
207 0x00000000,
208 (0x8040 << 16) | (0x89c0 >> 2),
209 0x00000000,
210 (0x8000 << 16) | (0x914c >> 2),
211 0x00000000,
212 (0x8040 << 16) | (0x914c >> 2),
213 0x00000000,
214 (0x8000 << 16) | (0x8c20 >> 2),
215 0x00000000,
216 (0x8040 << 16) | (0x8c20 >> 2),
217 0x00000000,
218 (0x8000 << 16) | (0x9354 >> 2),
219 0x00000000,
220 (0x8040 << 16) | (0x9354 >> 2),
221 0x00000000,
222 (0x9c00 << 16) | (0x9060 >> 2),
223 0x00000000,
224 (0x9c00 << 16) | (0x9364 >> 2),
225 0x00000000,
226 (0x9c00 << 16) | (0x9100 >> 2),
227 0x00000000,
228 (0x9c00 << 16) | (0x913c >> 2),
229 0x00000000,
230 (0x8000 << 16) | (0x90e0 >> 2),
231 0x00000000,
232 (0x8000 << 16) | (0x90e4 >> 2),
233 0x00000000,
234 (0x8000 << 16) | (0x90e8 >> 2),
235 0x00000000,
236 (0x8040 << 16) | (0x90e0 >> 2),
237 0x00000000,
238 (0x8040 << 16) | (0x90e4 >> 2),
239 0x00000000,
240 (0x8040 << 16) | (0x90e8 >> 2),
241 0x00000000,
242 (0x9c00 << 16) | (0x8bcc >> 2),
243 0x00000000,
244 (0x9c00 << 16) | (0x8b24 >> 2),
245 0x00000000,
246 (0x9c00 << 16) | (0x88c4 >> 2),
247 0x00000000,
248 (0x9c00 << 16) | (0x8e50 >> 2),
249 0x00000000,
250 (0x9c00 << 16) | (0x8c0c >> 2),
251 0x00000000,
252 (0x9c00 << 16) | (0x8e58 >> 2),
253 0x00000000,
254 (0x9c00 << 16) | (0x8e5c >> 2),
255 0x00000000,
256 (0x9c00 << 16) | (0x9508 >> 2),
257 0x00000000,
258 (0x9c00 << 16) | (0x950c >> 2),
259 0x00000000,
260 (0x9c00 << 16) | (0x9494 >> 2),
261 0x00000000,
262 (0x9c00 << 16) | (0xac0c >> 2),
263 0x00000000,
264 (0x9c00 << 16) | (0xac10 >> 2),
265 0x00000000,
266 (0x9c00 << 16) | (0xac14 >> 2),
267 0x00000000,
268 (0x9c00 << 16) | (0xae00 >> 2),
269 0x00000000,
270 (0x9c00 << 16) | (0xac08 >> 2),
271 0x00000000,
272 (0x9c00 << 16) | (0x88d4 >> 2),
273 0x00000000,
274 (0x9c00 << 16) | (0x88c8 >> 2),
275 0x00000000,
276 (0x9c00 << 16) | (0x88cc >> 2),
277 0x00000000,
278 (0x9c00 << 16) | (0x89b0 >> 2),
279 0x00000000,
280 (0x9c00 << 16) | (0x8b10 >> 2),
281 0x00000000,
282 (0x9c00 << 16) | (0x8a14 >> 2),
283 0x00000000,
284 (0x9c00 << 16) | (0x9830 >> 2),
285 0x00000000,
286 (0x9c00 << 16) | (0x9834 >> 2),
287 0x00000000,
288 (0x9c00 << 16) | (0x9838 >> 2),
289 0x00000000,
290 (0x9c00 << 16) | (0x9a10 >> 2),
291 0x00000000,
292 (0x8000 << 16) | (0x9870 >> 2),
293 0x00000000,
294 (0x8000 << 16) | (0x9874 >> 2),
295 0x00000000,
296 (0x8001 << 16) | (0x9870 >> 2),
297 0x00000000,
298 (0x8001 << 16) | (0x9874 >> 2),
299 0x00000000,
300 (0x8040 << 16) | (0x9870 >> 2),
301 0x00000000,
302 (0x8040 << 16) | (0x9874 >> 2),
303 0x00000000,
304 (0x8041 << 16) | (0x9870 >> 2),
305 0x00000000,
306 (0x8041 << 16) | (0x9874 >> 2),
307 0x00000000,
308 0x00000000
309};
310
Alex Deucher205996c2013-03-01 17:08:42 -0500311static const u32 tahiti_golden_rlc_registers[] =
312{
313 0xc424, 0xffffffff, 0x00601005,
314 0xc47c, 0xffffffff, 0x10104040,
315 0xc488, 0xffffffff, 0x0100000a,
316 0xc314, 0xffffffff, 0x00000800,
317 0xc30c, 0xffffffff, 0x800000f4,
318 0xf4a8, 0xffffffff, 0x00000000
319};
320
321static const u32 tahiti_golden_registers[] =
322{
323 0x9a10, 0x00010000, 0x00018208,
324 0x9830, 0xffffffff, 0x00000000,
325 0x9834, 0xf00fffff, 0x00000400,
326 0x9838, 0x0002021c, 0x00020200,
327 0xc78, 0x00000080, 0x00000000,
328 0xd030, 0x000300c0, 0x00800040,
329 0xd830, 0x000300c0, 0x00800040,
330 0x5bb0, 0x000000f0, 0x00000070,
331 0x5bc0, 0x00200000, 0x50100000,
332 0x7030, 0x31000311, 0x00000011,
333 0x277c, 0x00000003, 0x000007ff,
334 0x240c, 0x000007ff, 0x00000000,
335 0x8a14, 0xf000001f, 0x00000007,
336 0x8b24, 0xffffffff, 0x00ffffff,
337 0x8b10, 0x0000ff0f, 0x00000000,
338 0x28a4c, 0x07ffffff, 0x4e000000,
339 0x28350, 0x3f3f3fff, 0x2a00126a,
340 0x30, 0x000000ff, 0x0040,
341 0x34, 0x00000040, 0x00004040,
342 0x9100, 0x07ffffff, 0x03000000,
343 0x8e88, 0x01ff1f3f, 0x00000000,
344 0x8e84, 0x01ff1f3f, 0x00000000,
345 0x9060, 0x0000007f, 0x00000020,
346 0x9508, 0x00010000, 0x00010000,
347 0xac14, 0x00000200, 0x000002fb,
348 0xac10, 0xffffffff, 0x0000543b,
349 0xac0c, 0xffffffff, 0xa9210876,
350 0x88d0, 0xffffffff, 0x000fff40,
351 0x88d4, 0x0000001f, 0x00000010,
352 0x1410, 0x20000000, 0x20fffed8,
353 0x15c0, 0x000c0fc0, 0x000c0400
354};
355
356static const u32 tahiti_golden_registers2[] =
357{
358 0xc64, 0x00000001, 0x00000001
359};
360
361static const u32 pitcairn_golden_rlc_registers[] =
362{
363 0xc424, 0xffffffff, 0x00601004,
364 0xc47c, 0xffffffff, 0x10102020,
365 0xc488, 0xffffffff, 0x01000020,
366 0xc314, 0xffffffff, 0x00000800,
367 0xc30c, 0xffffffff, 0x800000a4
368};
369
370static const u32 pitcairn_golden_registers[] =
371{
372 0x9a10, 0x00010000, 0x00018208,
373 0x9830, 0xffffffff, 0x00000000,
374 0x9834, 0xf00fffff, 0x00000400,
375 0x9838, 0x0002021c, 0x00020200,
376 0xc78, 0x00000080, 0x00000000,
377 0xd030, 0x000300c0, 0x00800040,
378 0xd830, 0x000300c0, 0x00800040,
379 0x5bb0, 0x000000f0, 0x00000070,
380 0x5bc0, 0x00200000, 0x50100000,
381 0x7030, 0x31000311, 0x00000011,
382 0x2ae4, 0x00073ffe, 0x000022a2,
383 0x240c, 0x000007ff, 0x00000000,
384 0x8a14, 0xf000001f, 0x00000007,
385 0x8b24, 0xffffffff, 0x00ffffff,
386 0x8b10, 0x0000ff0f, 0x00000000,
387 0x28a4c, 0x07ffffff, 0x4e000000,
388 0x28350, 0x3f3f3fff, 0x2a00126a,
389 0x30, 0x000000ff, 0x0040,
390 0x34, 0x00000040, 0x00004040,
391 0x9100, 0x07ffffff, 0x03000000,
392 0x9060, 0x0000007f, 0x00000020,
393 0x9508, 0x00010000, 0x00010000,
394 0xac14, 0x000003ff, 0x000000f7,
395 0xac10, 0xffffffff, 0x00000000,
396 0xac0c, 0xffffffff, 0x32761054,
397 0x88d4, 0x0000001f, 0x00000010,
398 0x15c0, 0x000c0fc0, 0x000c0400
399};
400
401static const u32 verde_golden_rlc_registers[] =
402{
403 0xc424, 0xffffffff, 0x033f1005,
404 0xc47c, 0xffffffff, 0x10808020,
405 0xc488, 0xffffffff, 0x00800008,
406 0xc314, 0xffffffff, 0x00001000,
407 0xc30c, 0xffffffff, 0x80010014
408};
409
410static const u32 verde_golden_registers[] =
411{
412 0x9a10, 0x00010000, 0x00018208,
413 0x9830, 0xffffffff, 0x00000000,
414 0x9834, 0xf00fffff, 0x00000400,
415 0x9838, 0x0002021c, 0x00020200,
416 0xc78, 0x00000080, 0x00000000,
417 0xd030, 0x000300c0, 0x00800040,
418 0xd030, 0x000300c0, 0x00800040,
419 0xd830, 0x000300c0, 0x00800040,
420 0xd830, 0x000300c0, 0x00800040,
421 0x5bb0, 0x000000f0, 0x00000070,
422 0x5bc0, 0x00200000, 0x50100000,
423 0x7030, 0x31000311, 0x00000011,
424 0x2ae4, 0x00073ffe, 0x000022a2,
425 0x2ae4, 0x00073ffe, 0x000022a2,
426 0x2ae4, 0x00073ffe, 0x000022a2,
427 0x240c, 0x000007ff, 0x00000000,
428 0x240c, 0x000007ff, 0x00000000,
429 0x240c, 0x000007ff, 0x00000000,
430 0x8a14, 0xf000001f, 0x00000007,
431 0x8a14, 0xf000001f, 0x00000007,
432 0x8a14, 0xf000001f, 0x00000007,
433 0x8b24, 0xffffffff, 0x00ffffff,
434 0x8b10, 0x0000ff0f, 0x00000000,
435 0x28a4c, 0x07ffffff, 0x4e000000,
436 0x28350, 0x3f3f3fff, 0x0000124a,
437 0x28350, 0x3f3f3fff, 0x0000124a,
438 0x28350, 0x3f3f3fff, 0x0000124a,
439 0x30, 0x000000ff, 0x0040,
440 0x34, 0x00000040, 0x00004040,
441 0x9100, 0x07ffffff, 0x03000000,
442 0x9100, 0x07ffffff, 0x03000000,
443 0x8e88, 0x01ff1f3f, 0x00000000,
444 0x8e88, 0x01ff1f3f, 0x00000000,
445 0x8e88, 0x01ff1f3f, 0x00000000,
446 0x8e84, 0x01ff1f3f, 0x00000000,
447 0x8e84, 0x01ff1f3f, 0x00000000,
448 0x8e84, 0x01ff1f3f, 0x00000000,
449 0x9060, 0x0000007f, 0x00000020,
450 0x9508, 0x00010000, 0x00010000,
451 0xac14, 0x000003ff, 0x00000003,
452 0xac14, 0x000003ff, 0x00000003,
453 0xac14, 0x000003ff, 0x00000003,
454 0xac10, 0xffffffff, 0x00000000,
455 0xac10, 0xffffffff, 0x00000000,
456 0xac10, 0xffffffff, 0x00000000,
457 0xac0c, 0xffffffff, 0x00001032,
458 0xac0c, 0xffffffff, 0x00001032,
459 0xac0c, 0xffffffff, 0x00001032,
460 0x88d4, 0x0000001f, 0x00000010,
461 0x88d4, 0x0000001f, 0x00000010,
462 0x88d4, 0x0000001f, 0x00000010,
463 0x15c0, 0x000c0fc0, 0x000c0400
464};
465
466static const u32 oland_golden_rlc_registers[] =
467{
468 0xc424, 0xffffffff, 0x00601005,
469 0xc47c, 0xffffffff, 0x10104040,
470 0xc488, 0xffffffff, 0x0100000a,
471 0xc314, 0xffffffff, 0x00000800,
472 0xc30c, 0xffffffff, 0x800000f4
473};
474
475static const u32 oland_golden_registers[] =
476{
477 0x9a10, 0x00010000, 0x00018208,
478 0x9830, 0xffffffff, 0x00000000,
479 0x9834, 0xf00fffff, 0x00000400,
480 0x9838, 0x0002021c, 0x00020200,
481 0xc78, 0x00000080, 0x00000000,
482 0xd030, 0x000300c0, 0x00800040,
483 0xd830, 0x000300c0, 0x00800040,
484 0x5bb0, 0x000000f0, 0x00000070,
485 0x5bc0, 0x00200000, 0x50100000,
486 0x7030, 0x31000311, 0x00000011,
487 0x2ae4, 0x00073ffe, 0x000022a2,
488 0x240c, 0x000007ff, 0x00000000,
489 0x8a14, 0xf000001f, 0x00000007,
490 0x8b24, 0xffffffff, 0x00ffffff,
491 0x8b10, 0x0000ff0f, 0x00000000,
492 0x28a4c, 0x07ffffff, 0x4e000000,
493 0x28350, 0x3f3f3fff, 0x00000082,
494 0x30, 0x000000ff, 0x0040,
495 0x34, 0x00000040, 0x00004040,
496 0x9100, 0x07ffffff, 0x03000000,
497 0x9060, 0x0000007f, 0x00000020,
498 0x9508, 0x00010000, 0x00010000,
499 0xac14, 0x000003ff, 0x000000f3,
500 0xac10, 0xffffffff, 0x00000000,
501 0xac0c, 0xffffffff, 0x00003210,
502 0x88d4, 0x0000001f, 0x00000010,
503 0x15c0, 0x000c0fc0, 0x000c0400
504};
505
Alex Deucherfffbdda2013-05-13 13:36:23 -0400506static const u32 hainan_golden_registers[] =
507{
508 0x9a10, 0x00010000, 0x00018208,
509 0x9830, 0xffffffff, 0x00000000,
510 0x9834, 0xf00fffff, 0x00000400,
511 0x9838, 0x0002021c, 0x00020200,
512 0xd0c0, 0xff000fff, 0x00000100,
513 0xd030, 0x000300c0, 0x00800040,
514 0xd8c0, 0xff000fff, 0x00000100,
515 0xd830, 0x000300c0, 0x00800040,
516 0x2ae4, 0x00073ffe, 0x000022a2,
517 0x240c, 0x000007ff, 0x00000000,
518 0x8a14, 0xf000001f, 0x00000007,
519 0x8b24, 0xffffffff, 0x00ffffff,
520 0x8b10, 0x0000ff0f, 0x00000000,
521 0x28a4c, 0x07ffffff, 0x4e000000,
522 0x28350, 0x3f3f3fff, 0x00000000,
523 0x30, 0x000000ff, 0x0040,
524 0x34, 0x00000040, 0x00004040,
525 0x9100, 0x03e00000, 0x03600000,
526 0x9060, 0x0000007f, 0x00000020,
527 0x9508, 0x00010000, 0x00010000,
528 0xac14, 0x000003ff, 0x000000f1,
529 0xac10, 0xffffffff, 0x00000000,
530 0xac0c, 0xffffffff, 0x00003210,
531 0x88d4, 0x0000001f, 0x00000010,
532 0x15c0, 0x000c0fc0, 0x000c0400
533};
534
535static const u32 hainan_golden_registers2[] =
536{
537 0x98f8, 0xffffffff, 0x02010001
538};
539
Alex Deucher205996c2013-03-01 17:08:42 -0500540static const u32 tahiti_mgcg_cgcg_init[] =
541{
542 0xc400, 0xffffffff, 0xfffffffc,
543 0x802c, 0xffffffff, 0xe0000000,
544 0x9a60, 0xffffffff, 0x00000100,
545 0x92a4, 0xffffffff, 0x00000100,
546 0xc164, 0xffffffff, 0x00000100,
547 0x9774, 0xffffffff, 0x00000100,
548 0x8984, 0xffffffff, 0x06000100,
549 0x8a18, 0xffffffff, 0x00000100,
550 0x92a0, 0xffffffff, 0x00000100,
551 0xc380, 0xffffffff, 0x00000100,
552 0x8b28, 0xffffffff, 0x00000100,
553 0x9144, 0xffffffff, 0x00000100,
554 0x8d88, 0xffffffff, 0x00000100,
555 0x8d8c, 0xffffffff, 0x00000100,
556 0x9030, 0xffffffff, 0x00000100,
557 0x9034, 0xffffffff, 0x00000100,
558 0x9038, 0xffffffff, 0x00000100,
559 0x903c, 0xffffffff, 0x00000100,
560 0xad80, 0xffffffff, 0x00000100,
561 0xac54, 0xffffffff, 0x00000100,
562 0x897c, 0xffffffff, 0x06000100,
563 0x9868, 0xffffffff, 0x00000100,
564 0x9510, 0xffffffff, 0x00000100,
565 0xaf04, 0xffffffff, 0x00000100,
566 0xae04, 0xffffffff, 0x00000100,
567 0x949c, 0xffffffff, 0x00000100,
568 0x802c, 0xffffffff, 0xe0000000,
569 0x9160, 0xffffffff, 0x00010000,
570 0x9164, 0xffffffff, 0x00030002,
571 0x9168, 0xffffffff, 0x00040007,
572 0x916c, 0xffffffff, 0x00060005,
573 0x9170, 0xffffffff, 0x00090008,
574 0x9174, 0xffffffff, 0x00020001,
575 0x9178, 0xffffffff, 0x00040003,
576 0x917c, 0xffffffff, 0x00000007,
577 0x9180, 0xffffffff, 0x00060005,
578 0x9184, 0xffffffff, 0x00090008,
579 0x9188, 0xffffffff, 0x00030002,
580 0x918c, 0xffffffff, 0x00050004,
581 0x9190, 0xffffffff, 0x00000008,
582 0x9194, 0xffffffff, 0x00070006,
583 0x9198, 0xffffffff, 0x000a0009,
584 0x919c, 0xffffffff, 0x00040003,
585 0x91a0, 0xffffffff, 0x00060005,
586 0x91a4, 0xffffffff, 0x00000009,
587 0x91a8, 0xffffffff, 0x00080007,
588 0x91ac, 0xffffffff, 0x000b000a,
589 0x91b0, 0xffffffff, 0x00050004,
590 0x91b4, 0xffffffff, 0x00070006,
591 0x91b8, 0xffffffff, 0x0008000b,
592 0x91bc, 0xffffffff, 0x000a0009,
593 0x91c0, 0xffffffff, 0x000d000c,
594 0x91c4, 0xffffffff, 0x00060005,
595 0x91c8, 0xffffffff, 0x00080007,
596 0x91cc, 0xffffffff, 0x0000000b,
597 0x91d0, 0xffffffff, 0x000a0009,
598 0x91d4, 0xffffffff, 0x000d000c,
599 0x91d8, 0xffffffff, 0x00070006,
600 0x91dc, 0xffffffff, 0x00090008,
601 0x91e0, 0xffffffff, 0x0000000c,
602 0x91e4, 0xffffffff, 0x000b000a,
603 0x91e8, 0xffffffff, 0x000e000d,
604 0x91ec, 0xffffffff, 0x00080007,
605 0x91f0, 0xffffffff, 0x000a0009,
606 0x91f4, 0xffffffff, 0x0000000d,
607 0x91f8, 0xffffffff, 0x000c000b,
608 0x91fc, 0xffffffff, 0x000f000e,
609 0x9200, 0xffffffff, 0x00090008,
610 0x9204, 0xffffffff, 0x000b000a,
611 0x9208, 0xffffffff, 0x000c000f,
612 0x920c, 0xffffffff, 0x000e000d,
613 0x9210, 0xffffffff, 0x00110010,
614 0x9214, 0xffffffff, 0x000a0009,
615 0x9218, 0xffffffff, 0x000c000b,
616 0x921c, 0xffffffff, 0x0000000f,
617 0x9220, 0xffffffff, 0x000e000d,
618 0x9224, 0xffffffff, 0x00110010,
619 0x9228, 0xffffffff, 0x000b000a,
620 0x922c, 0xffffffff, 0x000d000c,
621 0x9230, 0xffffffff, 0x00000010,
622 0x9234, 0xffffffff, 0x000f000e,
623 0x9238, 0xffffffff, 0x00120011,
624 0x923c, 0xffffffff, 0x000c000b,
625 0x9240, 0xffffffff, 0x000e000d,
626 0x9244, 0xffffffff, 0x00000011,
627 0x9248, 0xffffffff, 0x0010000f,
628 0x924c, 0xffffffff, 0x00130012,
629 0x9250, 0xffffffff, 0x000d000c,
630 0x9254, 0xffffffff, 0x000f000e,
631 0x9258, 0xffffffff, 0x00100013,
632 0x925c, 0xffffffff, 0x00120011,
633 0x9260, 0xffffffff, 0x00150014,
634 0x9264, 0xffffffff, 0x000e000d,
635 0x9268, 0xffffffff, 0x0010000f,
636 0x926c, 0xffffffff, 0x00000013,
637 0x9270, 0xffffffff, 0x00120011,
638 0x9274, 0xffffffff, 0x00150014,
639 0x9278, 0xffffffff, 0x000f000e,
640 0x927c, 0xffffffff, 0x00110010,
641 0x9280, 0xffffffff, 0x00000014,
642 0x9284, 0xffffffff, 0x00130012,
643 0x9288, 0xffffffff, 0x00160015,
644 0x928c, 0xffffffff, 0x0010000f,
645 0x9290, 0xffffffff, 0x00120011,
646 0x9294, 0xffffffff, 0x00000015,
647 0x9298, 0xffffffff, 0x00140013,
648 0x929c, 0xffffffff, 0x00170016,
649 0x9150, 0xffffffff, 0x96940200,
650 0x8708, 0xffffffff, 0x00900100,
651 0xc478, 0xffffffff, 0x00000080,
652 0xc404, 0xffffffff, 0x0020003f,
653 0x30, 0xffffffff, 0x0000001c,
654 0x34, 0x000f0000, 0x000f0000,
655 0x160c, 0xffffffff, 0x00000100,
656 0x1024, 0xffffffff, 0x00000100,
657 0x102c, 0x00000101, 0x00000000,
658 0x20a8, 0xffffffff, 0x00000104,
659 0x264c, 0x000c0000, 0x000c0000,
660 0x2648, 0x000c0000, 0x000c0000,
661 0x55e4, 0xff000fff, 0x00000100,
662 0x55e8, 0x00000001, 0x00000001,
663 0x2f50, 0x00000001, 0x00000001,
664 0x30cc, 0xc0000fff, 0x00000104,
665 0xc1e4, 0x00000001, 0x00000001,
666 0xd0c0, 0xfffffff0, 0x00000100,
667 0xd8c0, 0xfffffff0, 0x00000100
668};
669
670static const u32 pitcairn_mgcg_cgcg_init[] =
671{
672 0xc400, 0xffffffff, 0xfffffffc,
673 0x802c, 0xffffffff, 0xe0000000,
674 0x9a60, 0xffffffff, 0x00000100,
675 0x92a4, 0xffffffff, 0x00000100,
676 0xc164, 0xffffffff, 0x00000100,
677 0x9774, 0xffffffff, 0x00000100,
678 0x8984, 0xffffffff, 0x06000100,
679 0x8a18, 0xffffffff, 0x00000100,
680 0x92a0, 0xffffffff, 0x00000100,
681 0xc380, 0xffffffff, 0x00000100,
682 0x8b28, 0xffffffff, 0x00000100,
683 0x9144, 0xffffffff, 0x00000100,
684 0x8d88, 0xffffffff, 0x00000100,
685 0x8d8c, 0xffffffff, 0x00000100,
686 0x9030, 0xffffffff, 0x00000100,
687 0x9034, 0xffffffff, 0x00000100,
688 0x9038, 0xffffffff, 0x00000100,
689 0x903c, 0xffffffff, 0x00000100,
690 0xad80, 0xffffffff, 0x00000100,
691 0xac54, 0xffffffff, 0x00000100,
692 0x897c, 0xffffffff, 0x06000100,
693 0x9868, 0xffffffff, 0x00000100,
694 0x9510, 0xffffffff, 0x00000100,
695 0xaf04, 0xffffffff, 0x00000100,
696 0xae04, 0xffffffff, 0x00000100,
697 0x949c, 0xffffffff, 0x00000100,
698 0x802c, 0xffffffff, 0xe0000000,
699 0x9160, 0xffffffff, 0x00010000,
700 0x9164, 0xffffffff, 0x00030002,
701 0x9168, 0xffffffff, 0x00040007,
702 0x916c, 0xffffffff, 0x00060005,
703 0x9170, 0xffffffff, 0x00090008,
704 0x9174, 0xffffffff, 0x00020001,
705 0x9178, 0xffffffff, 0x00040003,
706 0x917c, 0xffffffff, 0x00000007,
707 0x9180, 0xffffffff, 0x00060005,
708 0x9184, 0xffffffff, 0x00090008,
709 0x9188, 0xffffffff, 0x00030002,
710 0x918c, 0xffffffff, 0x00050004,
711 0x9190, 0xffffffff, 0x00000008,
712 0x9194, 0xffffffff, 0x00070006,
713 0x9198, 0xffffffff, 0x000a0009,
714 0x919c, 0xffffffff, 0x00040003,
715 0x91a0, 0xffffffff, 0x00060005,
716 0x91a4, 0xffffffff, 0x00000009,
717 0x91a8, 0xffffffff, 0x00080007,
718 0x91ac, 0xffffffff, 0x000b000a,
719 0x91b0, 0xffffffff, 0x00050004,
720 0x91b4, 0xffffffff, 0x00070006,
721 0x91b8, 0xffffffff, 0x0008000b,
722 0x91bc, 0xffffffff, 0x000a0009,
723 0x91c0, 0xffffffff, 0x000d000c,
724 0x9200, 0xffffffff, 0x00090008,
725 0x9204, 0xffffffff, 0x000b000a,
726 0x9208, 0xffffffff, 0x000c000f,
727 0x920c, 0xffffffff, 0x000e000d,
728 0x9210, 0xffffffff, 0x00110010,
729 0x9214, 0xffffffff, 0x000a0009,
730 0x9218, 0xffffffff, 0x000c000b,
731 0x921c, 0xffffffff, 0x0000000f,
732 0x9220, 0xffffffff, 0x000e000d,
733 0x9224, 0xffffffff, 0x00110010,
734 0x9228, 0xffffffff, 0x000b000a,
735 0x922c, 0xffffffff, 0x000d000c,
736 0x9230, 0xffffffff, 0x00000010,
737 0x9234, 0xffffffff, 0x000f000e,
738 0x9238, 0xffffffff, 0x00120011,
739 0x923c, 0xffffffff, 0x000c000b,
740 0x9240, 0xffffffff, 0x000e000d,
741 0x9244, 0xffffffff, 0x00000011,
742 0x9248, 0xffffffff, 0x0010000f,
743 0x924c, 0xffffffff, 0x00130012,
744 0x9250, 0xffffffff, 0x000d000c,
745 0x9254, 0xffffffff, 0x000f000e,
746 0x9258, 0xffffffff, 0x00100013,
747 0x925c, 0xffffffff, 0x00120011,
748 0x9260, 0xffffffff, 0x00150014,
749 0x9150, 0xffffffff, 0x96940200,
750 0x8708, 0xffffffff, 0x00900100,
751 0xc478, 0xffffffff, 0x00000080,
752 0xc404, 0xffffffff, 0x0020003f,
753 0x30, 0xffffffff, 0x0000001c,
754 0x34, 0x000f0000, 0x000f0000,
755 0x160c, 0xffffffff, 0x00000100,
756 0x1024, 0xffffffff, 0x00000100,
757 0x102c, 0x00000101, 0x00000000,
758 0x20a8, 0xffffffff, 0x00000104,
759 0x55e4, 0xff000fff, 0x00000100,
760 0x55e8, 0x00000001, 0x00000001,
761 0x2f50, 0x00000001, 0x00000001,
762 0x30cc, 0xc0000fff, 0x00000104,
763 0xc1e4, 0x00000001, 0x00000001,
764 0xd0c0, 0xfffffff0, 0x00000100,
765 0xd8c0, 0xfffffff0, 0x00000100
766};
767
768static const u32 verde_mgcg_cgcg_init[] =
769{
770 0xc400, 0xffffffff, 0xfffffffc,
771 0x802c, 0xffffffff, 0xe0000000,
772 0x9a60, 0xffffffff, 0x00000100,
773 0x92a4, 0xffffffff, 0x00000100,
774 0xc164, 0xffffffff, 0x00000100,
775 0x9774, 0xffffffff, 0x00000100,
776 0x8984, 0xffffffff, 0x06000100,
777 0x8a18, 0xffffffff, 0x00000100,
778 0x92a0, 0xffffffff, 0x00000100,
779 0xc380, 0xffffffff, 0x00000100,
780 0x8b28, 0xffffffff, 0x00000100,
781 0x9144, 0xffffffff, 0x00000100,
782 0x8d88, 0xffffffff, 0x00000100,
783 0x8d8c, 0xffffffff, 0x00000100,
784 0x9030, 0xffffffff, 0x00000100,
785 0x9034, 0xffffffff, 0x00000100,
786 0x9038, 0xffffffff, 0x00000100,
787 0x903c, 0xffffffff, 0x00000100,
788 0xad80, 0xffffffff, 0x00000100,
789 0xac54, 0xffffffff, 0x00000100,
790 0x897c, 0xffffffff, 0x06000100,
791 0x9868, 0xffffffff, 0x00000100,
792 0x9510, 0xffffffff, 0x00000100,
793 0xaf04, 0xffffffff, 0x00000100,
794 0xae04, 0xffffffff, 0x00000100,
795 0x949c, 0xffffffff, 0x00000100,
796 0x802c, 0xffffffff, 0xe0000000,
797 0x9160, 0xffffffff, 0x00010000,
798 0x9164, 0xffffffff, 0x00030002,
799 0x9168, 0xffffffff, 0x00040007,
800 0x916c, 0xffffffff, 0x00060005,
801 0x9170, 0xffffffff, 0x00090008,
802 0x9174, 0xffffffff, 0x00020001,
803 0x9178, 0xffffffff, 0x00040003,
804 0x917c, 0xffffffff, 0x00000007,
805 0x9180, 0xffffffff, 0x00060005,
806 0x9184, 0xffffffff, 0x00090008,
807 0x9188, 0xffffffff, 0x00030002,
808 0x918c, 0xffffffff, 0x00050004,
809 0x9190, 0xffffffff, 0x00000008,
810 0x9194, 0xffffffff, 0x00070006,
811 0x9198, 0xffffffff, 0x000a0009,
812 0x919c, 0xffffffff, 0x00040003,
813 0x91a0, 0xffffffff, 0x00060005,
814 0x91a4, 0xffffffff, 0x00000009,
815 0x91a8, 0xffffffff, 0x00080007,
816 0x91ac, 0xffffffff, 0x000b000a,
817 0x91b0, 0xffffffff, 0x00050004,
818 0x91b4, 0xffffffff, 0x00070006,
819 0x91b8, 0xffffffff, 0x0008000b,
820 0x91bc, 0xffffffff, 0x000a0009,
821 0x91c0, 0xffffffff, 0x000d000c,
822 0x9200, 0xffffffff, 0x00090008,
823 0x9204, 0xffffffff, 0x000b000a,
824 0x9208, 0xffffffff, 0x000c000f,
825 0x920c, 0xffffffff, 0x000e000d,
826 0x9210, 0xffffffff, 0x00110010,
827 0x9214, 0xffffffff, 0x000a0009,
828 0x9218, 0xffffffff, 0x000c000b,
829 0x921c, 0xffffffff, 0x0000000f,
830 0x9220, 0xffffffff, 0x000e000d,
831 0x9224, 0xffffffff, 0x00110010,
832 0x9228, 0xffffffff, 0x000b000a,
833 0x922c, 0xffffffff, 0x000d000c,
834 0x9230, 0xffffffff, 0x00000010,
835 0x9234, 0xffffffff, 0x000f000e,
836 0x9238, 0xffffffff, 0x00120011,
837 0x923c, 0xffffffff, 0x000c000b,
838 0x9240, 0xffffffff, 0x000e000d,
839 0x9244, 0xffffffff, 0x00000011,
840 0x9248, 0xffffffff, 0x0010000f,
841 0x924c, 0xffffffff, 0x00130012,
842 0x9250, 0xffffffff, 0x000d000c,
843 0x9254, 0xffffffff, 0x000f000e,
844 0x9258, 0xffffffff, 0x00100013,
845 0x925c, 0xffffffff, 0x00120011,
846 0x9260, 0xffffffff, 0x00150014,
847 0x9150, 0xffffffff, 0x96940200,
848 0x8708, 0xffffffff, 0x00900100,
849 0xc478, 0xffffffff, 0x00000080,
850 0xc404, 0xffffffff, 0x0020003f,
851 0x30, 0xffffffff, 0x0000001c,
852 0x34, 0x000f0000, 0x000f0000,
853 0x160c, 0xffffffff, 0x00000100,
854 0x1024, 0xffffffff, 0x00000100,
855 0x102c, 0x00000101, 0x00000000,
856 0x20a8, 0xffffffff, 0x00000104,
857 0x264c, 0x000c0000, 0x000c0000,
858 0x2648, 0x000c0000, 0x000c0000,
859 0x55e4, 0xff000fff, 0x00000100,
860 0x55e8, 0x00000001, 0x00000001,
861 0x2f50, 0x00000001, 0x00000001,
862 0x30cc, 0xc0000fff, 0x00000104,
863 0xc1e4, 0x00000001, 0x00000001,
864 0xd0c0, 0xfffffff0, 0x00000100,
865 0xd8c0, 0xfffffff0, 0x00000100
866};
867
868static const u32 oland_mgcg_cgcg_init[] =
869{
870 0xc400, 0xffffffff, 0xfffffffc,
871 0x802c, 0xffffffff, 0xe0000000,
872 0x9a60, 0xffffffff, 0x00000100,
873 0x92a4, 0xffffffff, 0x00000100,
874 0xc164, 0xffffffff, 0x00000100,
875 0x9774, 0xffffffff, 0x00000100,
876 0x8984, 0xffffffff, 0x06000100,
877 0x8a18, 0xffffffff, 0x00000100,
878 0x92a0, 0xffffffff, 0x00000100,
879 0xc380, 0xffffffff, 0x00000100,
880 0x8b28, 0xffffffff, 0x00000100,
881 0x9144, 0xffffffff, 0x00000100,
882 0x8d88, 0xffffffff, 0x00000100,
883 0x8d8c, 0xffffffff, 0x00000100,
884 0x9030, 0xffffffff, 0x00000100,
885 0x9034, 0xffffffff, 0x00000100,
886 0x9038, 0xffffffff, 0x00000100,
887 0x903c, 0xffffffff, 0x00000100,
888 0xad80, 0xffffffff, 0x00000100,
889 0xac54, 0xffffffff, 0x00000100,
890 0x897c, 0xffffffff, 0x06000100,
891 0x9868, 0xffffffff, 0x00000100,
892 0x9510, 0xffffffff, 0x00000100,
893 0xaf04, 0xffffffff, 0x00000100,
894 0xae04, 0xffffffff, 0x00000100,
895 0x949c, 0xffffffff, 0x00000100,
896 0x802c, 0xffffffff, 0xe0000000,
897 0x9160, 0xffffffff, 0x00010000,
898 0x9164, 0xffffffff, 0x00030002,
899 0x9168, 0xffffffff, 0x00040007,
900 0x916c, 0xffffffff, 0x00060005,
901 0x9170, 0xffffffff, 0x00090008,
902 0x9174, 0xffffffff, 0x00020001,
903 0x9178, 0xffffffff, 0x00040003,
904 0x917c, 0xffffffff, 0x00000007,
905 0x9180, 0xffffffff, 0x00060005,
906 0x9184, 0xffffffff, 0x00090008,
907 0x9188, 0xffffffff, 0x00030002,
908 0x918c, 0xffffffff, 0x00050004,
909 0x9190, 0xffffffff, 0x00000008,
910 0x9194, 0xffffffff, 0x00070006,
911 0x9198, 0xffffffff, 0x000a0009,
912 0x919c, 0xffffffff, 0x00040003,
913 0x91a0, 0xffffffff, 0x00060005,
914 0x91a4, 0xffffffff, 0x00000009,
915 0x91a8, 0xffffffff, 0x00080007,
916 0x91ac, 0xffffffff, 0x000b000a,
917 0x91b0, 0xffffffff, 0x00050004,
918 0x91b4, 0xffffffff, 0x00070006,
919 0x91b8, 0xffffffff, 0x0008000b,
920 0x91bc, 0xffffffff, 0x000a0009,
921 0x91c0, 0xffffffff, 0x000d000c,
922 0x91c4, 0xffffffff, 0x00060005,
923 0x91c8, 0xffffffff, 0x00080007,
924 0x91cc, 0xffffffff, 0x0000000b,
925 0x91d0, 0xffffffff, 0x000a0009,
926 0x91d4, 0xffffffff, 0x000d000c,
927 0x9150, 0xffffffff, 0x96940200,
928 0x8708, 0xffffffff, 0x00900100,
929 0xc478, 0xffffffff, 0x00000080,
930 0xc404, 0xffffffff, 0x0020003f,
931 0x30, 0xffffffff, 0x0000001c,
932 0x34, 0x000f0000, 0x000f0000,
933 0x160c, 0xffffffff, 0x00000100,
934 0x1024, 0xffffffff, 0x00000100,
935 0x102c, 0x00000101, 0x00000000,
936 0x20a8, 0xffffffff, 0x00000104,
937 0x264c, 0x000c0000, 0x000c0000,
938 0x2648, 0x000c0000, 0x000c0000,
939 0x55e4, 0xff000fff, 0x00000100,
940 0x55e8, 0x00000001, 0x00000001,
941 0x2f50, 0x00000001, 0x00000001,
942 0x30cc, 0xc0000fff, 0x00000104,
943 0xc1e4, 0x00000001, 0x00000001,
944 0xd0c0, 0xfffffff0, 0x00000100,
945 0xd8c0, 0xfffffff0, 0x00000100
946};
947
Alex Deucherfffbdda2013-05-13 13:36:23 -0400948static const u32 hainan_mgcg_cgcg_init[] =
949{
950 0xc400, 0xffffffff, 0xfffffffc,
951 0x802c, 0xffffffff, 0xe0000000,
952 0x9a60, 0xffffffff, 0x00000100,
953 0x92a4, 0xffffffff, 0x00000100,
954 0xc164, 0xffffffff, 0x00000100,
955 0x9774, 0xffffffff, 0x00000100,
956 0x8984, 0xffffffff, 0x06000100,
957 0x8a18, 0xffffffff, 0x00000100,
958 0x92a0, 0xffffffff, 0x00000100,
959 0xc380, 0xffffffff, 0x00000100,
960 0x8b28, 0xffffffff, 0x00000100,
961 0x9144, 0xffffffff, 0x00000100,
962 0x8d88, 0xffffffff, 0x00000100,
963 0x8d8c, 0xffffffff, 0x00000100,
964 0x9030, 0xffffffff, 0x00000100,
965 0x9034, 0xffffffff, 0x00000100,
966 0x9038, 0xffffffff, 0x00000100,
967 0x903c, 0xffffffff, 0x00000100,
968 0xad80, 0xffffffff, 0x00000100,
969 0xac54, 0xffffffff, 0x00000100,
970 0x897c, 0xffffffff, 0x06000100,
971 0x9868, 0xffffffff, 0x00000100,
972 0x9510, 0xffffffff, 0x00000100,
973 0xaf04, 0xffffffff, 0x00000100,
974 0xae04, 0xffffffff, 0x00000100,
975 0x949c, 0xffffffff, 0x00000100,
976 0x802c, 0xffffffff, 0xe0000000,
977 0x9160, 0xffffffff, 0x00010000,
978 0x9164, 0xffffffff, 0x00030002,
979 0x9168, 0xffffffff, 0x00040007,
980 0x916c, 0xffffffff, 0x00060005,
981 0x9170, 0xffffffff, 0x00090008,
982 0x9174, 0xffffffff, 0x00020001,
983 0x9178, 0xffffffff, 0x00040003,
984 0x917c, 0xffffffff, 0x00000007,
985 0x9180, 0xffffffff, 0x00060005,
986 0x9184, 0xffffffff, 0x00090008,
987 0x9188, 0xffffffff, 0x00030002,
988 0x918c, 0xffffffff, 0x00050004,
989 0x9190, 0xffffffff, 0x00000008,
990 0x9194, 0xffffffff, 0x00070006,
991 0x9198, 0xffffffff, 0x000a0009,
992 0x919c, 0xffffffff, 0x00040003,
993 0x91a0, 0xffffffff, 0x00060005,
994 0x91a4, 0xffffffff, 0x00000009,
995 0x91a8, 0xffffffff, 0x00080007,
996 0x91ac, 0xffffffff, 0x000b000a,
997 0x91b0, 0xffffffff, 0x00050004,
998 0x91b4, 0xffffffff, 0x00070006,
999 0x91b8, 0xffffffff, 0x0008000b,
1000 0x91bc, 0xffffffff, 0x000a0009,
1001 0x91c0, 0xffffffff, 0x000d000c,
1002 0x91c4, 0xffffffff, 0x00060005,
1003 0x91c8, 0xffffffff, 0x00080007,
1004 0x91cc, 0xffffffff, 0x0000000b,
1005 0x91d0, 0xffffffff, 0x000a0009,
1006 0x91d4, 0xffffffff, 0x000d000c,
1007 0x9150, 0xffffffff, 0x96940200,
1008 0x8708, 0xffffffff, 0x00900100,
1009 0xc478, 0xffffffff, 0x00000080,
1010 0xc404, 0xffffffff, 0x0020003f,
1011 0x30, 0xffffffff, 0x0000001c,
1012 0x34, 0x000f0000, 0x000f0000,
1013 0x160c, 0xffffffff, 0x00000100,
1014 0x1024, 0xffffffff, 0x00000100,
1015 0x20a8, 0xffffffff, 0x00000104,
1016 0x264c, 0x000c0000, 0x000c0000,
1017 0x2648, 0x000c0000, 0x000c0000,
1018 0x2f50, 0x00000001, 0x00000001,
1019 0x30cc, 0xc0000fff, 0x00000104,
1020 0xc1e4, 0x00000001, 0x00000001,
1021 0xd0c0, 0xfffffff0, 0x00000100,
1022 0xd8c0, 0xfffffff0, 0x00000100
1023};
1024
Alex Deucher205996c2013-03-01 17:08:42 -05001025static u32 verde_pg_init[] =
1026{
1027 0x353c, 0xffffffff, 0x40000,
1028 0x3538, 0xffffffff, 0x200010ff,
1029 0x353c, 0xffffffff, 0x0,
1030 0x353c, 0xffffffff, 0x0,
1031 0x353c, 0xffffffff, 0x0,
1032 0x353c, 0xffffffff, 0x0,
1033 0x353c, 0xffffffff, 0x0,
1034 0x353c, 0xffffffff, 0x7007,
1035 0x3538, 0xffffffff, 0x300010ff,
1036 0x353c, 0xffffffff, 0x0,
1037 0x353c, 0xffffffff, 0x0,
1038 0x353c, 0xffffffff, 0x0,
1039 0x353c, 0xffffffff, 0x0,
1040 0x353c, 0xffffffff, 0x0,
1041 0x353c, 0xffffffff, 0x400000,
1042 0x3538, 0xffffffff, 0x100010ff,
1043 0x353c, 0xffffffff, 0x0,
1044 0x353c, 0xffffffff, 0x0,
1045 0x353c, 0xffffffff, 0x0,
1046 0x353c, 0xffffffff, 0x0,
1047 0x353c, 0xffffffff, 0x0,
1048 0x353c, 0xffffffff, 0x120200,
1049 0x3538, 0xffffffff, 0x500010ff,
1050 0x353c, 0xffffffff, 0x0,
1051 0x353c, 0xffffffff, 0x0,
1052 0x353c, 0xffffffff, 0x0,
1053 0x353c, 0xffffffff, 0x0,
1054 0x353c, 0xffffffff, 0x0,
1055 0x353c, 0xffffffff, 0x1e1e16,
1056 0x3538, 0xffffffff, 0x600010ff,
1057 0x353c, 0xffffffff, 0x0,
1058 0x353c, 0xffffffff, 0x0,
1059 0x353c, 0xffffffff, 0x0,
1060 0x353c, 0xffffffff, 0x0,
1061 0x353c, 0xffffffff, 0x0,
1062 0x353c, 0xffffffff, 0x171f1e,
1063 0x3538, 0xffffffff, 0x700010ff,
1064 0x353c, 0xffffffff, 0x0,
1065 0x353c, 0xffffffff, 0x0,
1066 0x353c, 0xffffffff, 0x0,
1067 0x353c, 0xffffffff, 0x0,
1068 0x353c, 0xffffffff, 0x0,
1069 0x353c, 0xffffffff, 0x0,
1070 0x3538, 0xffffffff, 0x9ff,
1071 0x3500, 0xffffffff, 0x0,
1072 0x3504, 0xffffffff, 0x10000800,
1073 0x3504, 0xffffffff, 0xf,
1074 0x3504, 0xffffffff, 0xf,
1075 0x3500, 0xffffffff, 0x4,
1076 0x3504, 0xffffffff, 0x1000051e,
1077 0x3504, 0xffffffff, 0xffff,
1078 0x3504, 0xffffffff, 0xffff,
1079 0x3500, 0xffffffff, 0x8,
1080 0x3504, 0xffffffff, 0x80500,
1081 0x3500, 0xffffffff, 0x12,
1082 0x3504, 0xffffffff, 0x9050c,
1083 0x3500, 0xffffffff, 0x1d,
1084 0x3504, 0xffffffff, 0xb052c,
1085 0x3500, 0xffffffff, 0x2a,
1086 0x3504, 0xffffffff, 0x1053e,
1087 0x3500, 0xffffffff, 0x2d,
1088 0x3504, 0xffffffff, 0x10546,
1089 0x3500, 0xffffffff, 0x30,
1090 0x3504, 0xffffffff, 0xa054e,
1091 0x3500, 0xffffffff, 0x3c,
1092 0x3504, 0xffffffff, 0x1055f,
1093 0x3500, 0xffffffff, 0x3f,
1094 0x3504, 0xffffffff, 0x10567,
1095 0x3500, 0xffffffff, 0x42,
1096 0x3504, 0xffffffff, 0x1056f,
1097 0x3500, 0xffffffff, 0x45,
1098 0x3504, 0xffffffff, 0x10572,
1099 0x3500, 0xffffffff, 0x48,
1100 0x3504, 0xffffffff, 0x20575,
1101 0x3500, 0xffffffff, 0x4c,
1102 0x3504, 0xffffffff, 0x190801,
1103 0x3500, 0xffffffff, 0x67,
1104 0x3504, 0xffffffff, 0x1082a,
1105 0x3500, 0xffffffff, 0x6a,
1106 0x3504, 0xffffffff, 0x1b082d,
1107 0x3500, 0xffffffff, 0x87,
1108 0x3504, 0xffffffff, 0x310851,
1109 0x3500, 0xffffffff, 0xba,
1110 0x3504, 0xffffffff, 0x891,
1111 0x3500, 0xffffffff, 0xbc,
1112 0x3504, 0xffffffff, 0x893,
1113 0x3500, 0xffffffff, 0xbe,
1114 0x3504, 0xffffffff, 0x20895,
1115 0x3500, 0xffffffff, 0xc2,
1116 0x3504, 0xffffffff, 0x20899,
1117 0x3500, 0xffffffff, 0xc6,
1118 0x3504, 0xffffffff, 0x2089d,
1119 0x3500, 0xffffffff, 0xca,
1120 0x3504, 0xffffffff, 0x8a1,
1121 0x3500, 0xffffffff, 0xcc,
1122 0x3504, 0xffffffff, 0x8a3,
1123 0x3500, 0xffffffff, 0xce,
1124 0x3504, 0xffffffff, 0x308a5,
1125 0x3500, 0xffffffff, 0xd3,
1126 0x3504, 0xffffffff, 0x6d08cd,
1127 0x3500, 0xffffffff, 0x142,
1128 0x3504, 0xffffffff, 0x2000095a,
1129 0x3504, 0xffffffff, 0x1,
1130 0x3500, 0xffffffff, 0x144,
1131 0x3504, 0xffffffff, 0x301f095b,
1132 0x3500, 0xffffffff, 0x165,
1133 0x3504, 0xffffffff, 0xc094d,
1134 0x3500, 0xffffffff, 0x173,
1135 0x3504, 0xffffffff, 0xf096d,
1136 0x3500, 0xffffffff, 0x184,
1137 0x3504, 0xffffffff, 0x15097f,
1138 0x3500, 0xffffffff, 0x19b,
1139 0x3504, 0xffffffff, 0xc0998,
1140 0x3500, 0xffffffff, 0x1a9,
1141 0x3504, 0xffffffff, 0x409a7,
1142 0x3500, 0xffffffff, 0x1af,
1143 0x3504, 0xffffffff, 0xcdc,
1144 0x3500, 0xffffffff, 0x1b1,
1145 0x3504, 0xffffffff, 0x800,
1146 0x3508, 0xffffffff, 0x6c9b2000,
1147 0x3510, 0xfc00, 0x2000,
1148 0x3544, 0xffffffff, 0xfc0,
1149 0x28d4, 0x00000100, 0x100
1150};
1151
1152static void si_init_golden_registers(struct radeon_device *rdev)
1153{
1154 switch (rdev->family) {
1155 case CHIP_TAHITI:
1156 radeon_program_register_sequence(rdev,
1157 tahiti_golden_registers,
1158 (const u32)ARRAY_SIZE(tahiti_golden_registers));
1159 radeon_program_register_sequence(rdev,
1160 tahiti_golden_rlc_registers,
1161 (const u32)ARRAY_SIZE(tahiti_golden_rlc_registers));
1162 radeon_program_register_sequence(rdev,
1163 tahiti_mgcg_cgcg_init,
1164 (const u32)ARRAY_SIZE(tahiti_mgcg_cgcg_init));
1165 radeon_program_register_sequence(rdev,
1166 tahiti_golden_registers2,
1167 (const u32)ARRAY_SIZE(tahiti_golden_registers2));
1168 break;
1169 case CHIP_PITCAIRN:
1170 radeon_program_register_sequence(rdev,
1171 pitcairn_golden_registers,
1172 (const u32)ARRAY_SIZE(pitcairn_golden_registers));
1173 radeon_program_register_sequence(rdev,
1174 pitcairn_golden_rlc_registers,
1175 (const u32)ARRAY_SIZE(pitcairn_golden_rlc_registers));
1176 radeon_program_register_sequence(rdev,
1177 pitcairn_mgcg_cgcg_init,
1178 (const u32)ARRAY_SIZE(pitcairn_mgcg_cgcg_init));
1179 break;
1180 case CHIP_VERDE:
1181 radeon_program_register_sequence(rdev,
1182 verde_golden_registers,
1183 (const u32)ARRAY_SIZE(verde_golden_registers));
1184 radeon_program_register_sequence(rdev,
1185 verde_golden_rlc_registers,
1186 (const u32)ARRAY_SIZE(verde_golden_rlc_registers));
1187 radeon_program_register_sequence(rdev,
1188 verde_mgcg_cgcg_init,
1189 (const u32)ARRAY_SIZE(verde_mgcg_cgcg_init));
1190 radeon_program_register_sequence(rdev,
1191 verde_pg_init,
1192 (const u32)ARRAY_SIZE(verde_pg_init));
1193 break;
1194 case CHIP_OLAND:
1195 radeon_program_register_sequence(rdev,
1196 oland_golden_registers,
1197 (const u32)ARRAY_SIZE(oland_golden_registers));
1198 radeon_program_register_sequence(rdev,
1199 oland_golden_rlc_registers,
1200 (const u32)ARRAY_SIZE(oland_golden_rlc_registers));
1201 radeon_program_register_sequence(rdev,
1202 oland_mgcg_cgcg_init,
1203 (const u32)ARRAY_SIZE(oland_mgcg_cgcg_init));
1204 break;
Alex Deucherfffbdda2013-05-13 13:36:23 -04001205 case CHIP_HAINAN:
1206 radeon_program_register_sequence(rdev,
1207 hainan_golden_registers,
1208 (const u32)ARRAY_SIZE(hainan_golden_registers));
1209 radeon_program_register_sequence(rdev,
1210 hainan_golden_registers2,
1211 (const u32)ARRAY_SIZE(hainan_golden_registers2));
1212 radeon_program_register_sequence(rdev,
1213 hainan_mgcg_cgcg_init,
1214 (const u32)ARRAY_SIZE(hainan_mgcg_cgcg_init));
1215 break;
Alex Deucher205996c2013-03-01 17:08:42 -05001216 default:
1217 break;
1218 }
1219}
1220
Alex Deucher454d2e22013-02-14 10:04:02 -05001221#define PCIE_BUS_CLK 10000
1222#define TCLK (PCIE_BUS_CLK / 10)
1223
1224/**
1225 * si_get_xclk - get the xclk
1226 *
1227 * @rdev: radeon_device pointer
1228 *
1229 * Returns the reference clock used by the gfx engine
1230 * (SI).
1231 */
1232u32 si_get_xclk(struct radeon_device *rdev)
1233{
1234 u32 reference_clock = rdev->clock.spll.reference_freq;
1235 u32 tmp;
1236
1237 tmp = RREG32(CG_CLKPIN_CNTL_2);
1238 if (tmp & MUX_TCLK_TO_XCLK)
1239 return TCLK;
1240
1241 tmp = RREG32(CG_CLKPIN_CNTL);
1242 if (tmp & XTALIN_DIVIDE)
1243 return reference_clock / 4;
1244
1245 return reference_clock;
1246}
1247
Alex Deucher1bd47d22012-03-20 17:18:10 -04001248/* get temperature in millidegrees */
1249int si_get_temp(struct radeon_device *rdev)
1250{
1251 u32 temp;
1252 int actual_temp = 0;
1253
1254 temp = (RREG32(CG_MULT_THERMAL_STATUS) & CTF_TEMP_MASK) >>
1255 CTF_TEMP_SHIFT;
1256
1257 if (temp & 0x200)
1258 actual_temp = 255;
1259 else
1260 actual_temp = temp & 0x1ff;
1261
1262 actual_temp = (actual_temp * 1000);
1263
1264 return actual_temp;
1265}
1266
Alex Deucher8b074dd2012-03-20 17:18:18 -04001267#define TAHITI_IO_MC_REGS_SIZE 36
1268
1269static const u32 tahiti_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = {
1270 {0x0000006f, 0x03044000},
1271 {0x00000070, 0x0480c018},
1272 {0x00000071, 0x00000040},
1273 {0x00000072, 0x01000000},
1274 {0x00000074, 0x000000ff},
1275 {0x00000075, 0x00143400},
1276 {0x00000076, 0x08ec0800},
1277 {0x00000077, 0x040000cc},
1278 {0x00000079, 0x00000000},
1279 {0x0000007a, 0x21000409},
1280 {0x0000007c, 0x00000000},
1281 {0x0000007d, 0xe8000000},
1282 {0x0000007e, 0x044408a8},
1283 {0x0000007f, 0x00000003},
1284 {0x00000080, 0x00000000},
1285 {0x00000081, 0x01000000},
1286 {0x00000082, 0x02000000},
1287 {0x00000083, 0x00000000},
1288 {0x00000084, 0xe3f3e4f4},
1289 {0x00000085, 0x00052024},
1290 {0x00000087, 0x00000000},
1291 {0x00000088, 0x66036603},
1292 {0x00000089, 0x01000000},
1293 {0x0000008b, 0x1c0a0000},
1294 {0x0000008c, 0xff010000},
1295 {0x0000008e, 0xffffefff},
1296 {0x0000008f, 0xfff3efff},
1297 {0x00000090, 0xfff3efbf},
1298 {0x00000094, 0x00101101},
1299 {0x00000095, 0x00000fff},
1300 {0x00000096, 0x00116fff},
1301 {0x00000097, 0x60010000},
1302 {0x00000098, 0x10010000},
1303 {0x00000099, 0x00006000},
1304 {0x0000009a, 0x00001000},
1305 {0x0000009f, 0x00a77400}
1306};
1307
1308static const u32 pitcairn_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = {
1309 {0x0000006f, 0x03044000},
1310 {0x00000070, 0x0480c018},
1311 {0x00000071, 0x00000040},
1312 {0x00000072, 0x01000000},
1313 {0x00000074, 0x000000ff},
1314 {0x00000075, 0x00143400},
1315 {0x00000076, 0x08ec0800},
1316 {0x00000077, 0x040000cc},
1317 {0x00000079, 0x00000000},
1318 {0x0000007a, 0x21000409},
1319 {0x0000007c, 0x00000000},
1320 {0x0000007d, 0xe8000000},
1321 {0x0000007e, 0x044408a8},
1322 {0x0000007f, 0x00000003},
1323 {0x00000080, 0x00000000},
1324 {0x00000081, 0x01000000},
1325 {0x00000082, 0x02000000},
1326 {0x00000083, 0x00000000},
1327 {0x00000084, 0xe3f3e4f4},
1328 {0x00000085, 0x00052024},
1329 {0x00000087, 0x00000000},
1330 {0x00000088, 0x66036603},
1331 {0x00000089, 0x01000000},
1332 {0x0000008b, 0x1c0a0000},
1333 {0x0000008c, 0xff010000},
1334 {0x0000008e, 0xffffefff},
1335 {0x0000008f, 0xfff3efff},
1336 {0x00000090, 0xfff3efbf},
1337 {0x00000094, 0x00101101},
1338 {0x00000095, 0x00000fff},
1339 {0x00000096, 0x00116fff},
1340 {0x00000097, 0x60010000},
1341 {0x00000098, 0x10010000},
1342 {0x00000099, 0x00006000},
1343 {0x0000009a, 0x00001000},
1344 {0x0000009f, 0x00a47400}
1345};
1346
1347static const u32 verde_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = {
1348 {0x0000006f, 0x03044000},
1349 {0x00000070, 0x0480c018},
1350 {0x00000071, 0x00000040},
1351 {0x00000072, 0x01000000},
1352 {0x00000074, 0x000000ff},
1353 {0x00000075, 0x00143400},
1354 {0x00000076, 0x08ec0800},
1355 {0x00000077, 0x040000cc},
1356 {0x00000079, 0x00000000},
1357 {0x0000007a, 0x21000409},
1358 {0x0000007c, 0x00000000},
1359 {0x0000007d, 0xe8000000},
1360 {0x0000007e, 0x044408a8},
1361 {0x0000007f, 0x00000003},
1362 {0x00000080, 0x00000000},
1363 {0x00000081, 0x01000000},
1364 {0x00000082, 0x02000000},
1365 {0x00000083, 0x00000000},
1366 {0x00000084, 0xe3f3e4f4},
1367 {0x00000085, 0x00052024},
1368 {0x00000087, 0x00000000},
1369 {0x00000088, 0x66036603},
1370 {0x00000089, 0x01000000},
1371 {0x0000008b, 0x1c0a0000},
1372 {0x0000008c, 0xff010000},
1373 {0x0000008e, 0xffffefff},
1374 {0x0000008f, 0xfff3efff},
1375 {0x00000090, 0xfff3efbf},
1376 {0x00000094, 0x00101101},
1377 {0x00000095, 0x00000fff},
1378 {0x00000096, 0x00116fff},
1379 {0x00000097, 0x60010000},
1380 {0x00000098, 0x10010000},
1381 {0x00000099, 0x00006000},
1382 {0x0000009a, 0x00001000},
1383 {0x0000009f, 0x00a37400}
1384};
1385
Alex Deucherbcc7f5d2012-07-26 18:36:28 -04001386static const u32 oland_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = {
1387 {0x0000006f, 0x03044000},
1388 {0x00000070, 0x0480c018},
1389 {0x00000071, 0x00000040},
1390 {0x00000072, 0x01000000},
1391 {0x00000074, 0x000000ff},
1392 {0x00000075, 0x00143400},
1393 {0x00000076, 0x08ec0800},
1394 {0x00000077, 0x040000cc},
1395 {0x00000079, 0x00000000},
1396 {0x0000007a, 0x21000409},
1397 {0x0000007c, 0x00000000},
1398 {0x0000007d, 0xe8000000},
1399 {0x0000007e, 0x044408a8},
1400 {0x0000007f, 0x00000003},
1401 {0x00000080, 0x00000000},
1402 {0x00000081, 0x01000000},
1403 {0x00000082, 0x02000000},
1404 {0x00000083, 0x00000000},
1405 {0x00000084, 0xe3f3e4f4},
1406 {0x00000085, 0x00052024},
1407 {0x00000087, 0x00000000},
1408 {0x00000088, 0x66036603},
1409 {0x00000089, 0x01000000},
1410 {0x0000008b, 0x1c0a0000},
1411 {0x0000008c, 0xff010000},
1412 {0x0000008e, 0xffffefff},
1413 {0x0000008f, 0xfff3efff},
1414 {0x00000090, 0xfff3efbf},
1415 {0x00000094, 0x00101101},
1416 {0x00000095, 0x00000fff},
1417 {0x00000096, 0x00116fff},
1418 {0x00000097, 0x60010000},
1419 {0x00000098, 0x10010000},
1420 {0x00000099, 0x00006000},
1421 {0x0000009a, 0x00001000},
1422 {0x0000009f, 0x00a17730}
1423};
1424
Alex Deucherc04c00b2012-07-31 12:57:45 -04001425static const u32 hainan_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = {
1426 {0x0000006f, 0x03044000},
1427 {0x00000070, 0x0480c018},
1428 {0x00000071, 0x00000040},
1429 {0x00000072, 0x01000000},
1430 {0x00000074, 0x000000ff},
1431 {0x00000075, 0x00143400},
1432 {0x00000076, 0x08ec0800},
1433 {0x00000077, 0x040000cc},
1434 {0x00000079, 0x00000000},
1435 {0x0000007a, 0x21000409},
1436 {0x0000007c, 0x00000000},
1437 {0x0000007d, 0xe8000000},
1438 {0x0000007e, 0x044408a8},
1439 {0x0000007f, 0x00000003},
1440 {0x00000080, 0x00000000},
1441 {0x00000081, 0x01000000},
1442 {0x00000082, 0x02000000},
1443 {0x00000083, 0x00000000},
1444 {0x00000084, 0xe3f3e4f4},
1445 {0x00000085, 0x00052024},
1446 {0x00000087, 0x00000000},
1447 {0x00000088, 0x66036603},
1448 {0x00000089, 0x01000000},
1449 {0x0000008b, 0x1c0a0000},
1450 {0x0000008c, 0xff010000},
1451 {0x0000008e, 0xffffefff},
1452 {0x0000008f, 0xfff3efff},
1453 {0x00000090, 0xfff3efbf},
1454 {0x00000094, 0x00101101},
1455 {0x00000095, 0x00000fff},
1456 {0x00000096, 0x00116fff},
1457 {0x00000097, 0x60010000},
1458 {0x00000098, 0x10010000},
1459 {0x00000099, 0x00006000},
1460 {0x0000009a, 0x00001000},
1461 {0x0000009f, 0x00a07730}
1462};
1463
Alex Deucher8b074dd2012-03-20 17:18:18 -04001464/* ucode loading */
Alex Deucher6c7bcce2013-12-18 14:07:14 -05001465int si_mc_load_microcode(struct radeon_device *rdev)
Alex Deucher8b074dd2012-03-20 17:18:18 -04001466{
1467 const __be32 *fw_data;
1468 u32 running, blackout = 0;
1469 u32 *io_mc_regs;
1470 int i, ucode_size, regs_size;
1471
1472 if (!rdev->mc_fw)
1473 return -EINVAL;
1474
1475 switch (rdev->family) {
1476 case CHIP_TAHITI:
1477 io_mc_regs = (u32 *)&tahiti_io_mc_regs;
1478 ucode_size = SI_MC_UCODE_SIZE;
1479 regs_size = TAHITI_IO_MC_REGS_SIZE;
1480 break;
1481 case CHIP_PITCAIRN:
1482 io_mc_regs = (u32 *)&pitcairn_io_mc_regs;
1483 ucode_size = SI_MC_UCODE_SIZE;
1484 regs_size = TAHITI_IO_MC_REGS_SIZE;
1485 break;
1486 case CHIP_VERDE:
1487 default:
1488 io_mc_regs = (u32 *)&verde_io_mc_regs;
1489 ucode_size = SI_MC_UCODE_SIZE;
1490 regs_size = TAHITI_IO_MC_REGS_SIZE;
1491 break;
Alex Deucherbcc7f5d2012-07-26 18:36:28 -04001492 case CHIP_OLAND:
1493 io_mc_regs = (u32 *)&oland_io_mc_regs;
1494 ucode_size = OLAND_MC_UCODE_SIZE;
1495 regs_size = TAHITI_IO_MC_REGS_SIZE;
1496 break;
Alex Deucherc04c00b2012-07-31 12:57:45 -04001497 case CHIP_HAINAN:
1498 io_mc_regs = (u32 *)&hainan_io_mc_regs;
1499 ucode_size = OLAND_MC_UCODE_SIZE;
1500 regs_size = TAHITI_IO_MC_REGS_SIZE;
1501 break;
Alex Deucher8b074dd2012-03-20 17:18:18 -04001502 }
1503
1504 running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK;
1505
1506 if (running == 0) {
1507 if (running) {
1508 blackout = RREG32(MC_SHARED_BLACKOUT_CNTL);
1509 WREG32(MC_SHARED_BLACKOUT_CNTL, blackout | 1);
1510 }
1511
1512 /* reset the engine and set to writable */
1513 WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
1514 WREG32(MC_SEQ_SUP_CNTL, 0x00000010);
1515
1516 /* load mc io regs */
1517 for (i = 0; i < regs_size; i++) {
1518 WREG32(MC_SEQ_IO_DEBUG_INDEX, io_mc_regs[(i << 1)]);
1519 WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1]);
1520 }
1521 /* load the MC ucode */
1522 fw_data = (const __be32 *)rdev->mc_fw->data;
1523 for (i = 0; i < ucode_size; i++)
1524 WREG32(MC_SEQ_SUP_PGM, be32_to_cpup(fw_data++));
1525
1526 /* put the engine back into the active state */
1527 WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
1528 WREG32(MC_SEQ_SUP_CNTL, 0x00000004);
1529 WREG32(MC_SEQ_SUP_CNTL, 0x00000001);
1530
1531 /* wait for training to complete */
1532 for (i = 0; i < rdev->usec_timeout; i++) {
1533 if (RREG32(MC_SEQ_TRAIN_WAKEUP_CNTL) & TRAIN_DONE_D0)
1534 break;
1535 udelay(1);
1536 }
1537 for (i = 0; i < rdev->usec_timeout; i++) {
1538 if (RREG32(MC_SEQ_TRAIN_WAKEUP_CNTL) & TRAIN_DONE_D1)
1539 break;
1540 udelay(1);
1541 }
1542
1543 if (running)
1544 WREG32(MC_SHARED_BLACKOUT_CNTL, blackout);
1545 }
1546
1547 return 0;
1548}
1549
Alex Deucher0f0de062012-03-20 17:18:17 -04001550static int si_init_microcode(struct radeon_device *rdev)
1551{
Alex Deucher0f0de062012-03-20 17:18:17 -04001552 const char *chip_name;
1553 const char *rlc_chip_name;
1554 size_t pfp_req_size, me_req_size, ce_req_size, rlc_req_size, mc_req_size;
Alex Deuchera9e61412013-06-25 17:56:16 -04001555 size_t smc_req_size;
Alex Deucher0f0de062012-03-20 17:18:17 -04001556 char fw_name[30];
1557 int err;
1558
1559 DRM_DEBUG("\n");
1560
Alex Deucher0f0de062012-03-20 17:18:17 -04001561 switch (rdev->family) {
1562 case CHIP_TAHITI:
1563 chip_name = "TAHITI";
1564 rlc_chip_name = "TAHITI";
1565 pfp_req_size = SI_PFP_UCODE_SIZE * 4;
1566 me_req_size = SI_PM4_UCODE_SIZE * 4;
1567 ce_req_size = SI_CE_UCODE_SIZE * 4;
1568 rlc_req_size = SI_RLC_UCODE_SIZE * 4;
1569 mc_req_size = SI_MC_UCODE_SIZE * 4;
Alex Deuchera9e61412013-06-25 17:56:16 -04001570 smc_req_size = ALIGN(TAHITI_SMC_UCODE_SIZE, 4);
Alex Deucher0f0de062012-03-20 17:18:17 -04001571 break;
1572 case CHIP_PITCAIRN:
1573 chip_name = "PITCAIRN";
1574 rlc_chip_name = "PITCAIRN";
1575 pfp_req_size = SI_PFP_UCODE_SIZE * 4;
1576 me_req_size = SI_PM4_UCODE_SIZE * 4;
1577 ce_req_size = SI_CE_UCODE_SIZE * 4;
1578 rlc_req_size = SI_RLC_UCODE_SIZE * 4;
1579 mc_req_size = SI_MC_UCODE_SIZE * 4;
Alex Deuchera9e61412013-06-25 17:56:16 -04001580 smc_req_size = ALIGN(PITCAIRN_SMC_UCODE_SIZE, 4);
Alex Deucher0f0de062012-03-20 17:18:17 -04001581 break;
1582 case CHIP_VERDE:
1583 chip_name = "VERDE";
1584 rlc_chip_name = "VERDE";
1585 pfp_req_size = SI_PFP_UCODE_SIZE * 4;
1586 me_req_size = SI_PM4_UCODE_SIZE * 4;
1587 ce_req_size = SI_CE_UCODE_SIZE * 4;
1588 rlc_req_size = SI_RLC_UCODE_SIZE * 4;
1589 mc_req_size = SI_MC_UCODE_SIZE * 4;
Alex Deuchera9e61412013-06-25 17:56:16 -04001590 smc_req_size = ALIGN(VERDE_SMC_UCODE_SIZE, 4);
Alex Deucher0f0de062012-03-20 17:18:17 -04001591 break;
Alex Deucherbcc7f5d2012-07-26 18:36:28 -04001592 case CHIP_OLAND:
1593 chip_name = "OLAND";
1594 rlc_chip_name = "OLAND";
1595 pfp_req_size = SI_PFP_UCODE_SIZE * 4;
1596 me_req_size = SI_PM4_UCODE_SIZE * 4;
1597 ce_req_size = SI_CE_UCODE_SIZE * 4;
1598 rlc_req_size = SI_RLC_UCODE_SIZE * 4;
1599 mc_req_size = OLAND_MC_UCODE_SIZE * 4;
Alex Deuchera9e61412013-06-25 17:56:16 -04001600 smc_req_size = ALIGN(OLAND_SMC_UCODE_SIZE, 4);
Alex Deucherbcc7f5d2012-07-26 18:36:28 -04001601 break;
Alex Deucherc04c00b2012-07-31 12:57:45 -04001602 case CHIP_HAINAN:
1603 chip_name = "HAINAN";
1604 rlc_chip_name = "HAINAN";
1605 pfp_req_size = SI_PFP_UCODE_SIZE * 4;
1606 me_req_size = SI_PM4_UCODE_SIZE * 4;
1607 ce_req_size = SI_CE_UCODE_SIZE * 4;
1608 rlc_req_size = SI_RLC_UCODE_SIZE * 4;
1609 mc_req_size = OLAND_MC_UCODE_SIZE * 4;
Alex Deuchera9e61412013-06-25 17:56:16 -04001610 smc_req_size = ALIGN(HAINAN_SMC_UCODE_SIZE, 4);
Alex Deucherc04c00b2012-07-31 12:57:45 -04001611 break;
Alex Deucher0f0de062012-03-20 17:18:17 -04001612 default: BUG();
1613 }
1614
1615 DRM_INFO("Loading %s Microcode\n", chip_name);
1616
1617 snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
Jerome Glisse0a168932013-07-11 15:53:01 -04001618 err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev);
Alex Deucher0f0de062012-03-20 17:18:17 -04001619 if (err)
1620 goto out;
1621 if (rdev->pfp_fw->size != pfp_req_size) {
1622 printk(KERN_ERR
1623 "si_cp: Bogus length %zu in firmware \"%s\"\n",
1624 rdev->pfp_fw->size, fw_name);
1625 err = -EINVAL;
1626 goto out;
1627 }
1628
1629 snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
Jerome Glisse0a168932013-07-11 15:53:01 -04001630 err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
Alex Deucher0f0de062012-03-20 17:18:17 -04001631 if (err)
1632 goto out;
1633 if (rdev->me_fw->size != me_req_size) {
1634 printk(KERN_ERR
1635 "si_cp: Bogus length %zu in firmware \"%s\"\n",
1636 rdev->me_fw->size, fw_name);
1637 err = -EINVAL;
1638 }
1639
1640 snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", chip_name);
Jerome Glisse0a168932013-07-11 15:53:01 -04001641 err = request_firmware(&rdev->ce_fw, fw_name, rdev->dev);
Alex Deucher0f0de062012-03-20 17:18:17 -04001642 if (err)
1643 goto out;
1644 if (rdev->ce_fw->size != ce_req_size) {
1645 printk(KERN_ERR
1646 "si_cp: Bogus length %zu in firmware \"%s\"\n",
1647 rdev->ce_fw->size, fw_name);
1648 err = -EINVAL;
1649 }
1650
1651 snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name);
Jerome Glisse0a168932013-07-11 15:53:01 -04001652 err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev);
Alex Deucher0f0de062012-03-20 17:18:17 -04001653 if (err)
1654 goto out;
1655 if (rdev->rlc_fw->size != rlc_req_size) {
1656 printk(KERN_ERR
1657 "si_rlc: Bogus length %zu in firmware \"%s\"\n",
1658 rdev->rlc_fw->size, fw_name);
1659 err = -EINVAL;
1660 }
1661
1662 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
Jerome Glisse0a168932013-07-11 15:53:01 -04001663 err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
Alex Deucher0f0de062012-03-20 17:18:17 -04001664 if (err)
1665 goto out;
1666 if (rdev->mc_fw->size != mc_req_size) {
1667 printk(KERN_ERR
1668 "si_mc: Bogus length %zu in firmware \"%s\"\n",
1669 rdev->mc_fw->size, fw_name);
1670 err = -EINVAL;
1671 }
1672
Alex Deuchera9e61412013-06-25 17:56:16 -04001673 snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
Jerome Glisse0a168932013-07-11 15:53:01 -04001674 err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
Alex Deucher8a53fa22013-08-07 16:09:08 -04001675 if (err) {
1676 printk(KERN_ERR
1677 "smc: error loading firmware \"%s\"\n",
1678 fw_name);
1679 release_firmware(rdev->smc_fw);
1680 rdev->smc_fw = NULL;
Alex Deucherd8367112013-10-16 11:36:30 -04001681 err = 0;
Alex Deucher8a53fa22013-08-07 16:09:08 -04001682 } else if (rdev->smc_fw->size != smc_req_size) {
Alex Deuchera9e61412013-06-25 17:56:16 -04001683 printk(KERN_ERR
1684 "si_smc: Bogus length %zu in firmware \"%s\"\n",
1685 rdev->smc_fw->size, fw_name);
1686 err = -EINVAL;
1687 }
1688
Alex Deucher0f0de062012-03-20 17:18:17 -04001689out:
Alex Deucher0f0de062012-03-20 17:18:17 -04001690 if (err) {
1691 if (err != -EINVAL)
1692 printk(KERN_ERR
1693 "si_cp: Failed to load firmware \"%s\"\n",
1694 fw_name);
1695 release_firmware(rdev->pfp_fw);
1696 rdev->pfp_fw = NULL;
1697 release_firmware(rdev->me_fw);
1698 rdev->me_fw = NULL;
1699 release_firmware(rdev->ce_fw);
1700 rdev->ce_fw = NULL;
1701 release_firmware(rdev->rlc_fw);
1702 rdev->rlc_fw = NULL;
1703 release_firmware(rdev->mc_fw);
1704 rdev->mc_fw = NULL;
Alex Deuchera9e61412013-06-25 17:56:16 -04001705 release_firmware(rdev->smc_fw);
1706 rdev->smc_fw = NULL;
Alex Deucher0f0de062012-03-20 17:18:17 -04001707 }
1708 return err;
1709}
1710
Alex Deucher43b3cd92012-03-20 17:18:00 -04001711/* watermark setup */
1712static u32 dce6_line_buffer_adjust(struct radeon_device *rdev,
1713 struct radeon_crtc *radeon_crtc,
1714 struct drm_display_mode *mode,
1715 struct drm_display_mode *other_mode)
1716{
Alex Deucher290d2452013-08-19 11:15:43 -04001717 u32 tmp, buffer_alloc, i;
1718 u32 pipe_offset = radeon_crtc->crtc_id * 0x20;
Alex Deucher43b3cd92012-03-20 17:18:00 -04001719 /*
1720 * Line Buffer Setup
1721 * There are 3 line buffers, each one shared by 2 display controllers.
1722 * DC_LB_MEMORY_SPLIT controls how that line buffer is shared between
1723 * the display controllers. The paritioning is done via one of four
1724 * preset allocations specified in bits 21:20:
1725 * 0 - half lb
1726 * 2 - whole lb, other crtc must be disabled
1727 */
1728 /* this can get tricky if we have two large displays on a paired group
1729 * of crtcs. Ideally for multiple large displays we'd assign them to
1730 * non-linked crtcs for maximum line buffer allocation.
1731 */
1732 if (radeon_crtc->base.enabled && mode) {
Alex Deucher290d2452013-08-19 11:15:43 -04001733 if (other_mode) {
Alex Deucher43b3cd92012-03-20 17:18:00 -04001734 tmp = 0; /* 1/2 */
Alex Deucher290d2452013-08-19 11:15:43 -04001735 buffer_alloc = 1;
1736 } else {
Alex Deucher43b3cd92012-03-20 17:18:00 -04001737 tmp = 2; /* whole */
Alex Deucher290d2452013-08-19 11:15:43 -04001738 buffer_alloc = 2;
1739 }
1740 } else {
Alex Deucher43b3cd92012-03-20 17:18:00 -04001741 tmp = 0;
Alex Deucher290d2452013-08-19 11:15:43 -04001742 buffer_alloc = 0;
1743 }
Alex Deucher43b3cd92012-03-20 17:18:00 -04001744
1745 WREG32(DC_LB_MEMORY_SPLIT + radeon_crtc->crtc_offset,
1746 DC_LB_MEMORY_CONFIG(tmp));
1747
Alex Deucher290d2452013-08-19 11:15:43 -04001748 WREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset,
1749 DMIF_BUFFERS_ALLOCATED(buffer_alloc));
1750 for (i = 0; i < rdev->usec_timeout; i++) {
1751 if (RREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset) &
1752 DMIF_BUFFERS_ALLOCATED_COMPLETED)
1753 break;
1754 udelay(1);
1755 }
1756
Alex Deucher43b3cd92012-03-20 17:18:00 -04001757 if (radeon_crtc->base.enabled && mode) {
1758 switch (tmp) {
1759 case 0:
1760 default:
1761 return 4096 * 2;
1762 case 2:
1763 return 8192 * 2;
1764 }
1765 }
1766
1767 /* controller not enabled, so no lb used */
1768 return 0;
1769}
1770
Alex Deucherca7db222012-03-20 17:18:30 -04001771static u32 si_get_number_of_dram_channels(struct radeon_device *rdev)
Alex Deucher43b3cd92012-03-20 17:18:00 -04001772{
1773 u32 tmp = RREG32(MC_SHARED_CHMAP);
1774
1775 switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
1776 case 0:
1777 default:
1778 return 1;
1779 case 1:
1780 return 2;
1781 case 2:
1782 return 4;
1783 case 3:
1784 return 8;
1785 case 4:
1786 return 3;
1787 case 5:
1788 return 6;
1789 case 6:
1790 return 10;
1791 case 7:
1792 return 12;
1793 case 8:
1794 return 16;
1795 }
1796}
1797
1798struct dce6_wm_params {
1799 u32 dram_channels; /* number of dram channels */
1800 u32 yclk; /* bandwidth per dram data pin in kHz */
1801 u32 sclk; /* engine clock in kHz */
1802 u32 disp_clk; /* display clock in kHz */
1803 u32 src_width; /* viewport width */
1804 u32 active_time; /* active display time in ns */
1805 u32 blank_time; /* blank time in ns */
1806 bool interlaced; /* mode is interlaced */
1807 fixed20_12 vsc; /* vertical scale ratio */
1808 u32 num_heads; /* number of active crtcs */
1809 u32 bytes_per_pixel; /* bytes per pixel display + overlay */
1810 u32 lb_size; /* line buffer allocated to pipe */
1811 u32 vtaps; /* vertical scaler taps */
1812};
1813
1814static u32 dce6_dram_bandwidth(struct dce6_wm_params *wm)
1815{
1816 /* Calculate raw DRAM Bandwidth */
1817 fixed20_12 dram_efficiency; /* 0.7 */
1818 fixed20_12 yclk, dram_channels, bandwidth;
1819 fixed20_12 a;
1820
1821 a.full = dfixed_const(1000);
1822 yclk.full = dfixed_const(wm->yclk);
1823 yclk.full = dfixed_div(yclk, a);
1824 dram_channels.full = dfixed_const(wm->dram_channels * 4);
1825 a.full = dfixed_const(10);
1826 dram_efficiency.full = dfixed_const(7);
1827 dram_efficiency.full = dfixed_div(dram_efficiency, a);
1828 bandwidth.full = dfixed_mul(dram_channels, yclk);
1829 bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
1830
1831 return dfixed_trunc(bandwidth);
1832}
1833
1834static u32 dce6_dram_bandwidth_for_display(struct dce6_wm_params *wm)
1835{
1836 /* Calculate DRAM Bandwidth and the part allocated to display. */
1837 fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
1838 fixed20_12 yclk, dram_channels, bandwidth;
1839 fixed20_12 a;
1840
1841 a.full = dfixed_const(1000);
1842 yclk.full = dfixed_const(wm->yclk);
1843 yclk.full = dfixed_div(yclk, a);
1844 dram_channels.full = dfixed_const(wm->dram_channels * 4);
1845 a.full = dfixed_const(10);
1846 disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
1847 disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
1848 bandwidth.full = dfixed_mul(dram_channels, yclk);
1849 bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
1850
1851 return dfixed_trunc(bandwidth);
1852}
1853
1854static u32 dce6_data_return_bandwidth(struct dce6_wm_params *wm)
1855{
1856 /* Calculate the display Data return Bandwidth */
1857 fixed20_12 return_efficiency; /* 0.8 */
1858 fixed20_12 sclk, bandwidth;
1859 fixed20_12 a;
1860
1861 a.full = dfixed_const(1000);
1862 sclk.full = dfixed_const(wm->sclk);
1863 sclk.full = dfixed_div(sclk, a);
1864 a.full = dfixed_const(10);
1865 return_efficiency.full = dfixed_const(8);
1866 return_efficiency.full = dfixed_div(return_efficiency, a);
1867 a.full = dfixed_const(32);
1868 bandwidth.full = dfixed_mul(a, sclk);
1869 bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
1870
1871 return dfixed_trunc(bandwidth);
1872}
1873
1874static u32 dce6_get_dmif_bytes_per_request(struct dce6_wm_params *wm)
1875{
1876 return 32;
1877}
1878
1879static u32 dce6_dmif_request_bandwidth(struct dce6_wm_params *wm)
1880{
1881 /* Calculate the DMIF Request Bandwidth */
1882 fixed20_12 disp_clk_request_efficiency; /* 0.8 */
1883 fixed20_12 disp_clk, sclk, bandwidth;
1884 fixed20_12 a, b1, b2;
1885 u32 min_bandwidth;
1886
1887 a.full = dfixed_const(1000);
1888 disp_clk.full = dfixed_const(wm->disp_clk);
1889 disp_clk.full = dfixed_div(disp_clk, a);
1890 a.full = dfixed_const(dce6_get_dmif_bytes_per_request(wm) / 2);
1891 b1.full = dfixed_mul(a, disp_clk);
1892
1893 a.full = dfixed_const(1000);
1894 sclk.full = dfixed_const(wm->sclk);
1895 sclk.full = dfixed_div(sclk, a);
1896 a.full = dfixed_const(dce6_get_dmif_bytes_per_request(wm));
1897 b2.full = dfixed_mul(a, sclk);
1898
1899 a.full = dfixed_const(10);
1900 disp_clk_request_efficiency.full = dfixed_const(8);
1901 disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
1902
1903 min_bandwidth = min(dfixed_trunc(b1), dfixed_trunc(b2));
1904
1905 a.full = dfixed_const(min_bandwidth);
1906 bandwidth.full = dfixed_mul(a, disp_clk_request_efficiency);
1907
1908 return dfixed_trunc(bandwidth);
1909}
1910
1911static u32 dce6_available_bandwidth(struct dce6_wm_params *wm)
1912{
1913 /* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
1914 u32 dram_bandwidth = dce6_dram_bandwidth(wm);
1915 u32 data_return_bandwidth = dce6_data_return_bandwidth(wm);
1916 u32 dmif_req_bandwidth = dce6_dmif_request_bandwidth(wm);
1917
1918 return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
1919}
1920
1921static u32 dce6_average_bandwidth(struct dce6_wm_params *wm)
1922{
1923 /* Calculate the display mode Average Bandwidth
1924 * DisplayMode should contain the source and destination dimensions,
1925 * timing, etc.
1926 */
1927 fixed20_12 bpp;
1928 fixed20_12 line_time;
1929 fixed20_12 src_width;
1930 fixed20_12 bandwidth;
1931 fixed20_12 a;
1932
1933 a.full = dfixed_const(1000);
1934 line_time.full = dfixed_const(wm->active_time + wm->blank_time);
1935 line_time.full = dfixed_div(line_time, a);
1936 bpp.full = dfixed_const(wm->bytes_per_pixel);
1937 src_width.full = dfixed_const(wm->src_width);
1938 bandwidth.full = dfixed_mul(src_width, bpp);
1939 bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
1940 bandwidth.full = dfixed_div(bandwidth, line_time);
1941
1942 return dfixed_trunc(bandwidth);
1943}
1944
1945static u32 dce6_latency_watermark(struct dce6_wm_params *wm)
1946{
1947 /* First calcualte the latency in ns */
1948 u32 mc_latency = 2000; /* 2000 ns. */
1949 u32 available_bandwidth = dce6_available_bandwidth(wm);
1950 u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
1951 u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
1952 u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
1953 u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
1954 (wm->num_heads * cursor_line_pair_return_time);
1955 u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
1956 u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
1957 u32 tmp, dmif_size = 12288;
1958 fixed20_12 a, b, c;
1959
1960 if (wm->num_heads == 0)
1961 return 0;
1962
1963 a.full = dfixed_const(2);
1964 b.full = dfixed_const(1);
1965 if ((wm->vsc.full > a.full) ||
1966 ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
1967 (wm->vtaps >= 5) ||
1968 ((wm->vsc.full >= a.full) && wm->interlaced))
1969 max_src_lines_per_dst_line = 4;
1970 else
1971 max_src_lines_per_dst_line = 2;
1972
1973 a.full = dfixed_const(available_bandwidth);
1974 b.full = dfixed_const(wm->num_heads);
1975 a.full = dfixed_div(a, b);
1976
1977 b.full = dfixed_const(mc_latency + 512);
1978 c.full = dfixed_const(wm->disp_clk);
1979 b.full = dfixed_div(b, c);
1980
1981 c.full = dfixed_const(dmif_size);
1982 b.full = dfixed_div(c, b);
1983
1984 tmp = min(dfixed_trunc(a), dfixed_trunc(b));
1985
1986 b.full = dfixed_const(1000);
1987 c.full = dfixed_const(wm->disp_clk);
1988 b.full = dfixed_div(c, b);
1989 c.full = dfixed_const(wm->bytes_per_pixel);
1990 b.full = dfixed_mul(b, c);
1991
1992 lb_fill_bw = min(tmp, dfixed_trunc(b));
1993
1994 a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
1995 b.full = dfixed_const(1000);
1996 c.full = dfixed_const(lb_fill_bw);
1997 b.full = dfixed_div(c, b);
1998 a.full = dfixed_div(a, b);
1999 line_fill_time = dfixed_trunc(a);
2000
2001 if (line_fill_time < wm->active_time)
2002 return latency;
2003 else
2004 return latency + (line_fill_time - wm->active_time);
2005
2006}
2007
2008static bool dce6_average_bandwidth_vs_dram_bandwidth_for_display(struct dce6_wm_params *wm)
2009{
2010 if (dce6_average_bandwidth(wm) <=
2011 (dce6_dram_bandwidth_for_display(wm) / wm->num_heads))
2012 return true;
2013 else
2014 return false;
2015};
2016
2017static bool dce6_average_bandwidth_vs_available_bandwidth(struct dce6_wm_params *wm)
2018{
2019 if (dce6_average_bandwidth(wm) <=
2020 (dce6_available_bandwidth(wm) / wm->num_heads))
2021 return true;
2022 else
2023 return false;
2024};
2025
2026static bool dce6_check_latency_hiding(struct dce6_wm_params *wm)
2027{
2028 u32 lb_partitions = wm->lb_size / wm->src_width;
2029 u32 line_time = wm->active_time + wm->blank_time;
2030 u32 latency_tolerant_lines;
2031 u32 latency_hiding;
2032 fixed20_12 a;
2033
2034 a.full = dfixed_const(1);
2035 if (wm->vsc.full > a.full)
2036 latency_tolerant_lines = 1;
2037 else {
2038 if (lb_partitions <= (wm->vtaps + 1))
2039 latency_tolerant_lines = 1;
2040 else
2041 latency_tolerant_lines = 2;
2042 }
2043
2044 latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
2045
2046 if (dce6_latency_watermark(wm) <= latency_hiding)
2047 return true;
2048 else
2049 return false;
2050}
2051
2052static void dce6_program_watermarks(struct radeon_device *rdev,
2053 struct radeon_crtc *radeon_crtc,
2054 u32 lb_size, u32 num_heads)
2055{
2056 struct drm_display_mode *mode = &radeon_crtc->base.mode;
Alex Deucherc696e532012-05-03 10:43:25 -04002057 struct dce6_wm_params wm_low, wm_high;
2058 u32 dram_channels;
Alex Deucher43b3cd92012-03-20 17:18:00 -04002059 u32 pixel_period;
2060 u32 line_time = 0;
2061 u32 latency_watermark_a = 0, latency_watermark_b = 0;
2062 u32 priority_a_mark = 0, priority_b_mark = 0;
2063 u32 priority_a_cnt = PRIORITY_OFF;
2064 u32 priority_b_cnt = PRIORITY_OFF;
2065 u32 tmp, arb_control3;
2066 fixed20_12 a, b, c;
2067
2068 if (radeon_crtc->base.enabled && num_heads && mode) {
2069 pixel_period = 1000000 / (u32)mode->clock;
2070 line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
2071 priority_a_cnt = 0;
2072 priority_b_cnt = 0;
2073
Alex Deucherca7db222012-03-20 17:18:30 -04002074 if (rdev->family == CHIP_ARUBA)
Alex Deucherc696e532012-05-03 10:43:25 -04002075 dram_channels = evergreen_get_number_of_dram_channels(rdev);
Alex Deucherca7db222012-03-20 17:18:30 -04002076 else
Alex Deucherc696e532012-05-03 10:43:25 -04002077 dram_channels = si_get_number_of_dram_channels(rdev);
2078
2079 /* watermark for high clocks */
2080 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
2081 wm_high.yclk =
2082 radeon_dpm_get_mclk(rdev, false) * 10;
2083 wm_high.sclk =
2084 radeon_dpm_get_sclk(rdev, false) * 10;
2085 } else {
2086 wm_high.yclk = rdev->pm.current_mclk * 10;
2087 wm_high.sclk = rdev->pm.current_sclk * 10;
2088 }
2089
2090 wm_high.disp_clk = mode->clock;
2091 wm_high.src_width = mode->crtc_hdisplay;
2092 wm_high.active_time = mode->crtc_hdisplay * pixel_period;
2093 wm_high.blank_time = line_time - wm_high.active_time;
2094 wm_high.interlaced = false;
2095 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
2096 wm_high.interlaced = true;
2097 wm_high.vsc = radeon_crtc->vsc;
2098 wm_high.vtaps = 1;
2099 if (radeon_crtc->rmx_type != RMX_OFF)
2100 wm_high.vtaps = 2;
2101 wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */
2102 wm_high.lb_size = lb_size;
2103 wm_high.dram_channels = dram_channels;
2104 wm_high.num_heads = num_heads;
2105
2106 /* watermark for low clocks */
2107 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
2108 wm_low.yclk =
2109 radeon_dpm_get_mclk(rdev, true) * 10;
2110 wm_low.sclk =
2111 radeon_dpm_get_sclk(rdev, true) * 10;
2112 } else {
2113 wm_low.yclk = rdev->pm.current_mclk * 10;
2114 wm_low.sclk = rdev->pm.current_sclk * 10;
2115 }
2116
2117 wm_low.disp_clk = mode->clock;
2118 wm_low.src_width = mode->crtc_hdisplay;
2119 wm_low.active_time = mode->crtc_hdisplay * pixel_period;
2120 wm_low.blank_time = line_time - wm_low.active_time;
2121 wm_low.interlaced = false;
2122 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
2123 wm_low.interlaced = true;
2124 wm_low.vsc = radeon_crtc->vsc;
2125 wm_low.vtaps = 1;
2126 if (radeon_crtc->rmx_type != RMX_OFF)
2127 wm_low.vtaps = 2;
2128 wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */
2129 wm_low.lb_size = lb_size;
2130 wm_low.dram_channels = dram_channels;
2131 wm_low.num_heads = num_heads;
Alex Deucher43b3cd92012-03-20 17:18:00 -04002132
2133 /* set for high clocks */
Alex Deucherc696e532012-05-03 10:43:25 -04002134 latency_watermark_a = min(dce6_latency_watermark(&wm_high), (u32)65535);
Alex Deucher43b3cd92012-03-20 17:18:00 -04002135 /* set for low clocks */
Alex Deucherc696e532012-05-03 10:43:25 -04002136 latency_watermark_b = min(dce6_latency_watermark(&wm_low), (u32)65535);
Alex Deucher43b3cd92012-03-20 17:18:00 -04002137
2138 /* possibly force display priority to high */
2139 /* should really do this at mode validation time... */
Alex Deucherc696e532012-05-03 10:43:25 -04002140 if (!dce6_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) ||
2141 !dce6_average_bandwidth_vs_available_bandwidth(&wm_high) ||
2142 !dce6_check_latency_hiding(&wm_high) ||
2143 (rdev->disp_priority == 2)) {
2144 DRM_DEBUG_KMS("force priority to high\n");
2145 priority_a_cnt |= PRIORITY_ALWAYS_ON;
2146 priority_b_cnt |= PRIORITY_ALWAYS_ON;
2147 }
2148 if (!dce6_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) ||
2149 !dce6_average_bandwidth_vs_available_bandwidth(&wm_low) ||
2150 !dce6_check_latency_hiding(&wm_low) ||
Alex Deucher43b3cd92012-03-20 17:18:00 -04002151 (rdev->disp_priority == 2)) {
2152 DRM_DEBUG_KMS("force priority to high\n");
2153 priority_a_cnt |= PRIORITY_ALWAYS_ON;
2154 priority_b_cnt |= PRIORITY_ALWAYS_ON;
2155 }
2156
2157 a.full = dfixed_const(1000);
2158 b.full = dfixed_const(mode->clock);
2159 b.full = dfixed_div(b, a);
2160 c.full = dfixed_const(latency_watermark_a);
2161 c.full = dfixed_mul(c, b);
2162 c.full = dfixed_mul(c, radeon_crtc->hsc);
2163 c.full = dfixed_div(c, a);
2164 a.full = dfixed_const(16);
2165 c.full = dfixed_div(c, a);
2166 priority_a_mark = dfixed_trunc(c);
2167 priority_a_cnt |= priority_a_mark & PRIORITY_MARK_MASK;
2168
2169 a.full = dfixed_const(1000);
2170 b.full = dfixed_const(mode->clock);
2171 b.full = dfixed_div(b, a);
2172 c.full = dfixed_const(latency_watermark_b);
2173 c.full = dfixed_mul(c, b);
2174 c.full = dfixed_mul(c, radeon_crtc->hsc);
2175 c.full = dfixed_div(c, a);
2176 a.full = dfixed_const(16);
2177 c.full = dfixed_div(c, a);
2178 priority_b_mark = dfixed_trunc(c);
2179 priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK;
2180 }
2181
2182 /* select wm A */
2183 arb_control3 = RREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset);
2184 tmp = arb_control3;
2185 tmp &= ~LATENCY_WATERMARK_MASK(3);
2186 tmp |= LATENCY_WATERMARK_MASK(1);
2187 WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset, tmp);
2188 WREG32(DPG_PIPE_LATENCY_CONTROL + radeon_crtc->crtc_offset,
2189 (LATENCY_LOW_WATERMARK(latency_watermark_a) |
2190 LATENCY_HIGH_WATERMARK(line_time)));
2191 /* select wm B */
2192 tmp = RREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset);
2193 tmp &= ~LATENCY_WATERMARK_MASK(3);
2194 tmp |= LATENCY_WATERMARK_MASK(2);
2195 WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset, tmp);
2196 WREG32(DPG_PIPE_LATENCY_CONTROL + radeon_crtc->crtc_offset,
2197 (LATENCY_LOW_WATERMARK(latency_watermark_b) |
2198 LATENCY_HIGH_WATERMARK(line_time)));
2199 /* restore original selection */
2200 WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset, arb_control3);
2201
2202 /* write the priority marks */
2203 WREG32(PRIORITY_A_CNT + radeon_crtc->crtc_offset, priority_a_cnt);
2204 WREG32(PRIORITY_B_CNT + radeon_crtc->crtc_offset, priority_b_cnt);
2205
Alex Deucher7178d2a2013-03-21 10:38:49 -04002206 /* save values for DPM */
2207 radeon_crtc->line_time = line_time;
2208 radeon_crtc->wm_high = latency_watermark_a;
2209 radeon_crtc->wm_low = latency_watermark_b;
Alex Deucher43b3cd92012-03-20 17:18:00 -04002210}
2211
2212void dce6_bandwidth_update(struct radeon_device *rdev)
2213{
2214 struct drm_display_mode *mode0 = NULL;
2215 struct drm_display_mode *mode1 = NULL;
2216 u32 num_heads = 0, lb_size;
2217 int i;
2218
2219 radeon_update_display_priority(rdev);
2220
2221 for (i = 0; i < rdev->num_crtc; i++) {
2222 if (rdev->mode_info.crtcs[i]->base.enabled)
2223 num_heads++;
2224 }
2225 for (i = 0; i < rdev->num_crtc; i += 2) {
2226 mode0 = &rdev->mode_info.crtcs[i]->base.mode;
2227 mode1 = &rdev->mode_info.crtcs[i+1]->base.mode;
2228 lb_size = dce6_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i], mode0, mode1);
2229 dce6_program_watermarks(rdev, rdev->mode_info.crtcs[i], lb_size, num_heads);
2230 lb_size = dce6_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i+1], mode1, mode0);
2231 dce6_program_watermarks(rdev, rdev->mode_info.crtcs[i+1], lb_size, num_heads);
2232 }
2233}
2234
Alex Deucher0a96d722012-03-20 17:18:11 -04002235/*
2236 * Core functions
2237 */
Alex Deucher0a96d722012-03-20 17:18:11 -04002238static void si_tiling_mode_table_init(struct radeon_device *rdev)
2239{
2240 const u32 num_tile_mode_states = 32;
2241 u32 reg_offset, gb_tile_moden, split_equal_to_row_size;
2242
2243 switch (rdev->config.si.mem_row_size_in_kb) {
2244 case 1:
2245 split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_1KB;
2246 break;
2247 case 2:
2248 default:
2249 split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_2KB;
2250 break;
2251 case 4:
2252 split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_4KB;
2253 break;
2254 }
2255
2256 if ((rdev->family == CHIP_TAHITI) ||
2257 (rdev->family == CHIP_PITCAIRN)) {
2258 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
2259 switch (reg_offset) {
2260 case 0: /* non-AA compressed depth or any compressed stencil */
2261 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2262 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
2263 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2264 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
2265 NUM_BANKS(ADDR_SURF_16_BANK) |
2266 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2267 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2268 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2269 break;
2270 case 1: /* 2xAA/4xAA compressed depth only */
2271 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2272 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
2273 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2274 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
2275 NUM_BANKS(ADDR_SURF_16_BANK) |
2276 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2277 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2278 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2279 break;
2280 case 2: /* 8xAA compressed depth only */
2281 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2282 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
2283 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2284 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2285 NUM_BANKS(ADDR_SURF_16_BANK) |
2286 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2287 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2288 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2289 break;
2290 case 3: /* 2xAA/4xAA compressed depth with stencil (for depth buffer) */
2291 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2292 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
2293 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2294 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
2295 NUM_BANKS(ADDR_SURF_16_BANK) |
2296 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2297 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2298 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2299 break;
2300 case 4: /* Maps w/ a dimension less than the 2D macro-tile dimensions (for mipmapped depth textures) */
2301 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2302 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
2303 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2304 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
2305 NUM_BANKS(ADDR_SURF_16_BANK) |
2306 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2307 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2308 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2309 break;
2310 case 5: /* Uncompressed 16bpp depth - and stencil buffer allocated with it */
2311 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2312 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
2313 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2314 TILE_SPLIT(split_equal_to_row_size) |
2315 NUM_BANKS(ADDR_SURF_16_BANK) |
2316 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2317 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2318 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2319 break;
2320 case 6: /* Uncompressed 32bpp depth - and stencil buffer allocated with it */
2321 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2322 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
2323 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2324 TILE_SPLIT(split_equal_to_row_size) |
2325 NUM_BANKS(ADDR_SURF_16_BANK) |
2326 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2327 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2328 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
2329 break;
2330 case 7: /* Uncompressed 8bpp stencil without depth (drivers typically do not use) */
2331 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2332 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
2333 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2334 TILE_SPLIT(split_equal_to_row_size) |
2335 NUM_BANKS(ADDR_SURF_16_BANK) |
2336 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2337 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2338 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2339 break;
2340 case 8: /* 1D and 1D Array Surfaces */
2341 gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
2342 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
2343 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2344 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
2345 NUM_BANKS(ADDR_SURF_16_BANK) |
2346 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2347 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2348 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2349 break;
2350 case 9: /* Displayable maps. */
2351 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2352 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
2353 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2354 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
2355 NUM_BANKS(ADDR_SURF_16_BANK) |
2356 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2357 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2358 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2359 break;
2360 case 10: /* Display 8bpp. */
2361 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2362 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
2363 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2364 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2365 NUM_BANKS(ADDR_SURF_16_BANK) |
2366 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2367 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2368 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2369 break;
2370 case 11: /* Display 16bpp. */
2371 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2372 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
2373 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2374 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2375 NUM_BANKS(ADDR_SURF_16_BANK) |
2376 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2377 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2378 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2379 break;
2380 case 12: /* Display 32bpp. */
2381 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2382 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
2383 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2384 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
2385 NUM_BANKS(ADDR_SURF_16_BANK) |
2386 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2387 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2388 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
2389 break;
2390 case 13: /* Thin. */
2391 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2392 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2393 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2394 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
2395 NUM_BANKS(ADDR_SURF_16_BANK) |
2396 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2397 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2398 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2399 break;
2400 case 14: /* Thin 8 bpp. */
2401 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2402 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2403 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2404 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2405 NUM_BANKS(ADDR_SURF_16_BANK) |
2406 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2407 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2408 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
2409 break;
2410 case 15: /* Thin 16 bpp. */
2411 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2412 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2413 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2414 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2415 NUM_BANKS(ADDR_SURF_16_BANK) |
2416 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2417 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2418 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
2419 break;
2420 case 16: /* Thin 32 bpp. */
2421 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2422 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2423 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2424 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
2425 NUM_BANKS(ADDR_SURF_16_BANK) |
2426 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2427 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2428 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
2429 break;
2430 case 17: /* Thin 64 bpp. */
2431 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2432 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2433 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2434 TILE_SPLIT(split_equal_to_row_size) |
2435 NUM_BANKS(ADDR_SURF_16_BANK) |
2436 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2437 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2438 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
2439 break;
2440 case 21: /* 8 bpp PRT. */
2441 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2442 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2443 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2444 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2445 NUM_BANKS(ADDR_SURF_16_BANK) |
2446 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
2447 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2448 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2449 break;
2450 case 22: /* 16 bpp PRT */
2451 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2452 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2453 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2454 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2455 NUM_BANKS(ADDR_SURF_16_BANK) |
2456 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2457 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2458 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
2459 break;
2460 case 23: /* 32 bpp PRT */
2461 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2462 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2463 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2464 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2465 NUM_BANKS(ADDR_SURF_16_BANK) |
2466 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2467 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2468 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2469 break;
2470 case 24: /* 64 bpp PRT */
2471 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2472 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2473 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2474 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
2475 NUM_BANKS(ADDR_SURF_16_BANK) |
2476 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2477 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2478 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2479 break;
2480 case 25: /* 128 bpp PRT */
2481 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2482 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2483 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2484 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
2485 NUM_BANKS(ADDR_SURF_8_BANK) |
2486 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2487 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2488 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
2489 break;
2490 default:
2491 gb_tile_moden = 0;
2492 break;
2493 }
Jerome Glisse64d7b8b2013-04-09 11:17:08 -04002494 rdev->config.si.tile_mode_array[reg_offset] = gb_tile_moden;
Alex Deucher0a96d722012-03-20 17:18:11 -04002495 WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
2496 }
Alex Deucherd0ae7fc2012-07-26 17:42:25 -04002497 } else if ((rdev->family == CHIP_VERDE) ||
Alex Deucher8b028592012-07-31 12:42:48 -04002498 (rdev->family == CHIP_OLAND) ||
2499 (rdev->family == CHIP_HAINAN)) {
Alex Deucher0a96d722012-03-20 17:18:11 -04002500 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
2501 switch (reg_offset) {
2502 case 0: /* non-AA compressed depth or any compressed stencil */
2503 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2504 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
2505 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2506 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
2507 NUM_BANKS(ADDR_SURF_16_BANK) |
2508 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2509 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2510 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
2511 break;
2512 case 1: /* 2xAA/4xAA compressed depth only */
2513 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2514 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
2515 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2516 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
2517 NUM_BANKS(ADDR_SURF_16_BANK) |
2518 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2519 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2520 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
2521 break;
2522 case 2: /* 8xAA compressed depth only */
2523 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2524 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
2525 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2526 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2527 NUM_BANKS(ADDR_SURF_16_BANK) |
2528 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2529 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2530 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
2531 break;
2532 case 3: /* 2xAA/4xAA compressed depth with stencil (for depth buffer) */
2533 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2534 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
2535 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2536 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
2537 NUM_BANKS(ADDR_SURF_16_BANK) |
2538 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2539 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2540 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
2541 break;
2542 case 4: /* Maps w/ a dimension less than the 2D macro-tile dimensions (for mipmapped depth textures) */
2543 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2544 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
2545 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2546 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
2547 NUM_BANKS(ADDR_SURF_16_BANK) |
2548 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2549 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2550 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2551 break;
2552 case 5: /* Uncompressed 16bpp depth - and stencil buffer allocated with it */
2553 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2554 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
2555 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2556 TILE_SPLIT(split_equal_to_row_size) |
2557 NUM_BANKS(ADDR_SURF_16_BANK) |
2558 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2559 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2560 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2561 break;
2562 case 6: /* Uncompressed 32bpp depth - and stencil buffer allocated with it */
2563 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2564 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
2565 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2566 TILE_SPLIT(split_equal_to_row_size) |
2567 NUM_BANKS(ADDR_SURF_16_BANK) |
2568 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2569 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2570 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2571 break;
2572 case 7: /* Uncompressed 8bpp stencil without depth (drivers typically do not use) */
2573 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2574 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
2575 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2576 TILE_SPLIT(split_equal_to_row_size) |
2577 NUM_BANKS(ADDR_SURF_16_BANK) |
2578 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2579 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2580 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
2581 break;
2582 case 8: /* 1D and 1D Array Surfaces */
2583 gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
2584 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
2585 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2586 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
2587 NUM_BANKS(ADDR_SURF_16_BANK) |
2588 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2589 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2590 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2591 break;
2592 case 9: /* Displayable maps. */
2593 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2594 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
2595 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2596 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
2597 NUM_BANKS(ADDR_SURF_16_BANK) |
2598 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2599 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2600 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2601 break;
2602 case 10: /* Display 8bpp. */
2603 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2604 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
2605 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2606 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2607 NUM_BANKS(ADDR_SURF_16_BANK) |
2608 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2609 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2610 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
2611 break;
2612 case 11: /* Display 16bpp. */
2613 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2614 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
2615 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2616 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2617 NUM_BANKS(ADDR_SURF_16_BANK) |
2618 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2619 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2620 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2621 break;
2622 case 12: /* Display 32bpp. */
2623 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2624 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
2625 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2626 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
2627 NUM_BANKS(ADDR_SURF_16_BANK) |
2628 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2629 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2630 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2631 break;
2632 case 13: /* Thin. */
2633 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2634 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2635 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2636 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
2637 NUM_BANKS(ADDR_SURF_16_BANK) |
2638 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2639 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2640 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2641 break;
2642 case 14: /* Thin 8 bpp. */
2643 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2644 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2645 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2646 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2647 NUM_BANKS(ADDR_SURF_16_BANK) |
2648 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2649 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2650 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2651 break;
2652 case 15: /* Thin 16 bpp. */
2653 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2654 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2655 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2656 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2657 NUM_BANKS(ADDR_SURF_16_BANK) |
2658 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2659 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2660 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2661 break;
2662 case 16: /* Thin 32 bpp. */
2663 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2664 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2665 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2666 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
2667 NUM_BANKS(ADDR_SURF_16_BANK) |
2668 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2669 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2670 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2671 break;
2672 case 17: /* Thin 64 bpp. */
2673 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2674 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2675 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2676 TILE_SPLIT(split_equal_to_row_size) |
2677 NUM_BANKS(ADDR_SURF_16_BANK) |
2678 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2679 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2680 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2681 break;
2682 case 21: /* 8 bpp PRT. */
2683 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2684 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2685 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2686 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2687 NUM_BANKS(ADDR_SURF_16_BANK) |
2688 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
2689 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2690 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2691 break;
2692 case 22: /* 16 bpp PRT */
2693 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2694 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2695 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2696 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2697 NUM_BANKS(ADDR_SURF_16_BANK) |
2698 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2699 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2700 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
2701 break;
2702 case 23: /* 32 bpp PRT */
2703 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2704 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2705 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2706 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2707 NUM_BANKS(ADDR_SURF_16_BANK) |
2708 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2709 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2710 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2711 break;
2712 case 24: /* 64 bpp PRT */
2713 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2714 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2715 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2716 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
2717 NUM_BANKS(ADDR_SURF_16_BANK) |
2718 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2719 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2720 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2721 break;
2722 case 25: /* 128 bpp PRT */
2723 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2724 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2725 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2726 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
2727 NUM_BANKS(ADDR_SURF_8_BANK) |
2728 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2729 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2730 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
2731 break;
2732 default:
2733 gb_tile_moden = 0;
2734 break;
2735 }
Jerome Glisse64d7b8b2013-04-09 11:17:08 -04002736 rdev->config.si.tile_mode_array[reg_offset] = gb_tile_moden;
Alex Deucher0a96d722012-03-20 17:18:11 -04002737 WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
2738 }
2739 } else
2740 DRM_ERROR("unknown asic: 0x%x\n", rdev->family);
2741}
2742
Alex Deucher1a8ca752012-06-01 18:58:22 -04002743static void si_select_se_sh(struct radeon_device *rdev,
2744 u32 se_num, u32 sh_num)
2745{
2746 u32 data = INSTANCE_BROADCAST_WRITES;
2747
2748 if ((se_num == 0xffffffff) && (sh_num == 0xffffffff))
Alex Deucher79b52d62013-04-18 16:26:36 -04002749 data |= SH_BROADCAST_WRITES | SE_BROADCAST_WRITES;
Alex Deucher1a8ca752012-06-01 18:58:22 -04002750 else if (se_num == 0xffffffff)
2751 data |= SE_BROADCAST_WRITES | SH_INDEX(sh_num);
2752 else if (sh_num == 0xffffffff)
2753 data |= SH_BROADCAST_WRITES | SE_INDEX(se_num);
2754 else
2755 data |= SH_INDEX(sh_num) | SE_INDEX(se_num);
2756 WREG32(GRBM_GFX_INDEX, data);
2757}
2758
2759static u32 si_create_bitmask(u32 bit_width)
2760{
2761 u32 i, mask = 0;
2762
2763 for (i = 0; i < bit_width; i++) {
2764 mask <<= 1;
2765 mask |= 1;
2766 }
2767 return mask;
2768}
2769
2770static u32 si_get_cu_enabled(struct radeon_device *rdev, u32 cu_per_sh)
2771{
2772 u32 data, mask;
2773
2774 data = RREG32(CC_GC_SHADER_ARRAY_CONFIG);
2775 if (data & 1)
2776 data &= INACTIVE_CUS_MASK;
2777 else
2778 data = 0;
2779 data |= RREG32(GC_USER_SHADER_ARRAY_CONFIG);
2780
2781 data >>= INACTIVE_CUS_SHIFT;
2782
2783 mask = si_create_bitmask(cu_per_sh);
2784
2785 return ~data & mask;
2786}
2787
2788static void si_setup_spi(struct radeon_device *rdev,
2789 u32 se_num, u32 sh_per_se,
2790 u32 cu_per_sh)
2791{
2792 int i, j, k;
2793 u32 data, mask, active_cu;
2794
2795 for (i = 0; i < se_num; i++) {
2796 for (j = 0; j < sh_per_se; j++) {
2797 si_select_se_sh(rdev, i, j);
2798 data = RREG32(SPI_STATIC_THREAD_MGMT_3);
2799 active_cu = si_get_cu_enabled(rdev, cu_per_sh);
2800
2801 mask = 1;
2802 for (k = 0; k < 16; k++) {
2803 mask <<= k;
2804 if (active_cu & mask) {
2805 data &= ~mask;
2806 WREG32(SPI_STATIC_THREAD_MGMT_3, data);
2807 break;
2808 }
2809 }
2810 }
2811 }
2812 si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
2813}
2814
2815static u32 si_get_rb_disabled(struct radeon_device *rdev,
Marek Olšák9fadb352013-12-22 02:18:00 +01002816 u32 max_rb_num_per_se,
Alex Deucher1a8ca752012-06-01 18:58:22 -04002817 u32 sh_per_se)
2818{
2819 u32 data, mask;
2820
2821 data = RREG32(CC_RB_BACKEND_DISABLE);
2822 if (data & 1)
2823 data &= BACKEND_DISABLE_MASK;
2824 else
2825 data = 0;
2826 data |= RREG32(GC_USER_RB_BACKEND_DISABLE);
2827
2828 data >>= BACKEND_DISABLE_SHIFT;
2829
Marek Olšák9fadb352013-12-22 02:18:00 +01002830 mask = si_create_bitmask(max_rb_num_per_se / sh_per_se);
Alex Deucher1a8ca752012-06-01 18:58:22 -04002831
2832 return data & mask;
2833}
2834
2835static void si_setup_rb(struct radeon_device *rdev,
2836 u32 se_num, u32 sh_per_se,
Marek Olšák9fadb352013-12-22 02:18:00 +01002837 u32 max_rb_num_per_se)
Alex Deucher1a8ca752012-06-01 18:58:22 -04002838{
2839 int i, j;
2840 u32 data, mask;
2841 u32 disabled_rbs = 0;
2842 u32 enabled_rbs = 0;
2843
2844 for (i = 0; i < se_num; i++) {
2845 for (j = 0; j < sh_per_se; j++) {
2846 si_select_se_sh(rdev, i, j);
Marek Olšák9fadb352013-12-22 02:18:00 +01002847 data = si_get_rb_disabled(rdev, max_rb_num_per_se, sh_per_se);
Alex Deucher1a8ca752012-06-01 18:58:22 -04002848 disabled_rbs |= data << ((i * sh_per_se + j) * TAHITI_RB_BITMAP_WIDTH_PER_SH);
2849 }
2850 }
2851 si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
2852
2853 mask = 1;
Marek Olšák9fadb352013-12-22 02:18:00 +01002854 for (i = 0; i < max_rb_num_per_se * se_num; i++) {
Alex Deucher1a8ca752012-06-01 18:58:22 -04002855 if (!(disabled_rbs & mask))
2856 enabled_rbs |= mask;
2857 mask <<= 1;
2858 }
2859
Marek Olšák439a1cf2013-12-22 02:18:01 +01002860 rdev->config.si.backend_enable_mask = enabled_rbs;
2861
Alex Deucher1a8ca752012-06-01 18:58:22 -04002862 for (i = 0; i < se_num; i++) {
2863 si_select_se_sh(rdev, i, 0xffffffff);
2864 data = 0;
2865 for (j = 0; j < sh_per_se; j++) {
2866 switch (enabled_rbs & 3) {
2867 case 1:
2868 data |= (RASTER_CONFIG_RB_MAP_0 << (i * sh_per_se + j) * 2);
2869 break;
2870 case 2:
2871 data |= (RASTER_CONFIG_RB_MAP_3 << (i * sh_per_se + j) * 2);
2872 break;
2873 case 3:
2874 default:
2875 data |= (RASTER_CONFIG_RB_MAP_2 << (i * sh_per_se + j) * 2);
2876 break;
2877 }
2878 enabled_rbs >>= 2;
2879 }
2880 WREG32(PA_SC_RASTER_CONFIG, data);
2881 }
2882 si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
2883}
2884
Alex Deucher0a96d722012-03-20 17:18:11 -04002885static void si_gpu_init(struct radeon_device *rdev)
2886{
Alex Deucher0a96d722012-03-20 17:18:11 -04002887 u32 gb_addr_config = 0;
2888 u32 mc_shared_chmap, mc_arb_ramcfg;
Alex Deucher0a96d722012-03-20 17:18:11 -04002889 u32 sx_debug_1;
Alex Deucher0a96d722012-03-20 17:18:11 -04002890 u32 hdp_host_path_cntl;
2891 u32 tmp;
2892 int i, j;
2893
2894 switch (rdev->family) {
2895 case CHIP_TAHITI:
2896 rdev->config.si.max_shader_engines = 2;
Alex Deucher0a96d722012-03-20 17:18:11 -04002897 rdev->config.si.max_tile_pipes = 12;
Alex Deucher1a8ca752012-06-01 18:58:22 -04002898 rdev->config.si.max_cu_per_sh = 8;
2899 rdev->config.si.max_sh_per_se = 2;
Alex Deucher0a96d722012-03-20 17:18:11 -04002900 rdev->config.si.max_backends_per_se = 4;
2901 rdev->config.si.max_texture_channel_caches = 12;
2902 rdev->config.si.max_gprs = 256;
2903 rdev->config.si.max_gs_threads = 32;
2904 rdev->config.si.max_hw_contexts = 8;
2905
2906 rdev->config.si.sc_prim_fifo_size_frontend = 0x20;
2907 rdev->config.si.sc_prim_fifo_size_backend = 0x100;
2908 rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
2909 rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
Alex Deucher1a8ca752012-06-01 18:58:22 -04002910 gb_addr_config = TAHITI_GB_ADDR_CONFIG_GOLDEN;
Alex Deucher0a96d722012-03-20 17:18:11 -04002911 break;
2912 case CHIP_PITCAIRN:
2913 rdev->config.si.max_shader_engines = 2;
Alex Deucher0a96d722012-03-20 17:18:11 -04002914 rdev->config.si.max_tile_pipes = 8;
Alex Deucher1a8ca752012-06-01 18:58:22 -04002915 rdev->config.si.max_cu_per_sh = 5;
2916 rdev->config.si.max_sh_per_se = 2;
Alex Deucher0a96d722012-03-20 17:18:11 -04002917 rdev->config.si.max_backends_per_se = 4;
2918 rdev->config.si.max_texture_channel_caches = 8;
2919 rdev->config.si.max_gprs = 256;
2920 rdev->config.si.max_gs_threads = 32;
2921 rdev->config.si.max_hw_contexts = 8;
2922
2923 rdev->config.si.sc_prim_fifo_size_frontend = 0x20;
2924 rdev->config.si.sc_prim_fifo_size_backend = 0x100;
2925 rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
2926 rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
Alex Deucher1a8ca752012-06-01 18:58:22 -04002927 gb_addr_config = TAHITI_GB_ADDR_CONFIG_GOLDEN;
Alex Deucher0a96d722012-03-20 17:18:11 -04002928 break;
2929 case CHIP_VERDE:
2930 default:
2931 rdev->config.si.max_shader_engines = 1;
Alex Deucher0a96d722012-03-20 17:18:11 -04002932 rdev->config.si.max_tile_pipes = 4;
Alex Deucher468ef1a2013-05-21 13:35:19 -04002933 rdev->config.si.max_cu_per_sh = 5;
Alex Deucher1a8ca752012-06-01 18:58:22 -04002934 rdev->config.si.max_sh_per_se = 2;
Alex Deucher0a96d722012-03-20 17:18:11 -04002935 rdev->config.si.max_backends_per_se = 4;
2936 rdev->config.si.max_texture_channel_caches = 4;
2937 rdev->config.si.max_gprs = 256;
2938 rdev->config.si.max_gs_threads = 32;
2939 rdev->config.si.max_hw_contexts = 8;
2940
2941 rdev->config.si.sc_prim_fifo_size_frontend = 0x20;
2942 rdev->config.si.sc_prim_fifo_size_backend = 0x40;
2943 rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
2944 rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
Alex Deucher1a8ca752012-06-01 18:58:22 -04002945 gb_addr_config = VERDE_GB_ADDR_CONFIG_GOLDEN;
Alex Deucher0a96d722012-03-20 17:18:11 -04002946 break;
Alex Deucherd0ae7fc2012-07-26 17:42:25 -04002947 case CHIP_OLAND:
2948 rdev->config.si.max_shader_engines = 1;
2949 rdev->config.si.max_tile_pipes = 4;
2950 rdev->config.si.max_cu_per_sh = 6;
2951 rdev->config.si.max_sh_per_se = 1;
2952 rdev->config.si.max_backends_per_se = 2;
2953 rdev->config.si.max_texture_channel_caches = 4;
2954 rdev->config.si.max_gprs = 256;
2955 rdev->config.si.max_gs_threads = 16;
2956 rdev->config.si.max_hw_contexts = 8;
2957
2958 rdev->config.si.sc_prim_fifo_size_frontend = 0x20;
2959 rdev->config.si.sc_prim_fifo_size_backend = 0x40;
2960 rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
2961 rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
2962 gb_addr_config = VERDE_GB_ADDR_CONFIG_GOLDEN;
2963 break;
Alex Deucher8b028592012-07-31 12:42:48 -04002964 case CHIP_HAINAN:
2965 rdev->config.si.max_shader_engines = 1;
2966 rdev->config.si.max_tile_pipes = 4;
2967 rdev->config.si.max_cu_per_sh = 5;
2968 rdev->config.si.max_sh_per_se = 1;
2969 rdev->config.si.max_backends_per_se = 1;
2970 rdev->config.si.max_texture_channel_caches = 2;
2971 rdev->config.si.max_gprs = 256;
2972 rdev->config.si.max_gs_threads = 16;
2973 rdev->config.si.max_hw_contexts = 8;
2974
2975 rdev->config.si.sc_prim_fifo_size_frontend = 0x20;
2976 rdev->config.si.sc_prim_fifo_size_backend = 0x40;
2977 rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
2978 rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
2979 gb_addr_config = HAINAN_GB_ADDR_CONFIG_GOLDEN;
2980 break;
Alex Deucher0a96d722012-03-20 17:18:11 -04002981 }
2982
2983 /* Initialize HDP */
2984 for (i = 0, j = 0; i < 32; i++, j += 0x18) {
2985 WREG32((0x2c14 + j), 0x00000000);
2986 WREG32((0x2c18 + j), 0x00000000);
2987 WREG32((0x2c1c + j), 0x00000000);
2988 WREG32((0x2c20 + j), 0x00000000);
2989 WREG32((0x2c24 + j), 0x00000000);
2990 }
2991
2992 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
2993
2994 evergreen_fix_pci_max_read_req_size(rdev);
2995
2996 WREG32(BIF_FB_EN, FB_READ_EN | FB_WRITE_EN);
2997
2998 mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
2999 mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
3000
Alex Deucher0a96d722012-03-20 17:18:11 -04003001 rdev->config.si.num_tile_pipes = rdev->config.si.max_tile_pipes;
Alex Deucher0a96d722012-03-20 17:18:11 -04003002 rdev->config.si.mem_max_burst_length_bytes = 256;
3003 tmp = (mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT;
3004 rdev->config.si.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
3005 if (rdev->config.si.mem_row_size_in_kb > 4)
3006 rdev->config.si.mem_row_size_in_kb = 4;
3007 /* XXX use MC settings? */
3008 rdev->config.si.shader_engine_tile_size = 32;
3009 rdev->config.si.num_gpus = 1;
3010 rdev->config.si.multi_gpu_tile_size = 64;
3011
Alex Deucher1a8ca752012-06-01 18:58:22 -04003012 /* fix up row size */
3013 gb_addr_config &= ~ROW_SIZE_MASK;
Alex Deucher0a96d722012-03-20 17:18:11 -04003014 switch (rdev->config.si.mem_row_size_in_kb) {
3015 case 1:
3016 default:
3017 gb_addr_config |= ROW_SIZE(0);
3018 break;
3019 case 2:
3020 gb_addr_config |= ROW_SIZE(1);
3021 break;
3022 case 4:
3023 gb_addr_config |= ROW_SIZE(2);
3024 break;
3025 }
3026
Alex Deucher0a96d722012-03-20 17:18:11 -04003027 /* setup tiling info dword. gb_addr_config is not adequate since it does
3028 * not have bank info, so create a custom tiling dword.
3029 * bits 3:0 num_pipes
3030 * bits 7:4 num_banks
3031 * bits 11:8 group_size
3032 * bits 15:12 row_size
3033 */
3034 rdev->config.si.tile_config = 0;
3035 switch (rdev->config.si.num_tile_pipes) {
3036 case 1:
3037 rdev->config.si.tile_config |= (0 << 0);
3038 break;
3039 case 2:
3040 rdev->config.si.tile_config |= (1 << 0);
3041 break;
3042 case 4:
3043 rdev->config.si.tile_config |= (2 << 0);
3044 break;
3045 case 8:
3046 default:
3047 /* XXX what about 12? */
3048 rdev->config.si.tile_config |= (3 << 0);
3049 break;
Christian Königdca571a2012-07-31 13:48:51 +02003050 }
3051 switch ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) {
3052 case 0: /* four banks */
Alex Deucher1a8ca752012-06-01 18:58:22 -04003053 rdev->config.si.tile_config |= 0 << 4;
Christian Königdca571a2012-07-31 13:48:51 +02003054 break;
3055 case 1: /* eight banks */
3056 rdev->config.si.tile_config |= 1 << 4;
3057 break;
3058 case 2: /* sixteen banks */
3059 default:
3060 rdev->config.si.tile_config |= 2 << 4;
3061 break;
3062 }
Alex Deucher0a96d722012-03-20 17:18:11 -04003063 rdev->config.si.tile_config |=
3064 ((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8;
3065 rdev->config.si.tile_config |=
3066 ((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12;
3067
Alex Deucher0a96d722012-03-20 17:18:11 -04003068 WREG32(GB_ADDR_CONFIG, gb_addr_config);
3069 WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
Alex Deucher7c1c7c12013-04-05 10:28:08 -04003070 WREG32(DMIF_ADDR_CALC, gb_addr_config);
Alex Deucher0a96d722012-03-20 17:18:11 -04003071 WREG32(HDP_ADDR_CONFIG, gb_addr_config);
Alex Deucher8c5fd7e2012-12-04 15:28:18 -05003072 WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config);
3073 WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config);
Alex Deucher1df0d522013-04-26 18:03:44 -04003074 if (rdev->has_uvd) {
3075 WREG32(UVD_UDEC_ADDR_CONFIG, gb_addr_config);
3076 WREG32(UVD_UDEC_DB_ADDR_CONFIG, gb_addr_config);
3077 WREG32(UVD_UDEC_DBW_ADDR_CONFIG, gb_addr_config);
3078 }
Alex Deucher0a96d722012-03-20 17:18:11 -04003079
Alex Deucher0a96d722012-03-20 17:18:11 -04003080 si_tiling_mode_table_init(rdev);
3081
Alex Deucher1a8ca752012-06-01 18:58:22 -04003082 si_setup_rb(rdev, rdev->config.si.max_shader_engines,
3083 rdev->config.si.max_sh_per_se,
3084 rdev->config.si.max_backends_per_se);
3085
3086 si_setup_spi(rdev, rdev->config.si.max_shader_engines,
3087 rdev->config.si.max_sh_per_se,
3088 rdev->config.si.max_cu_per_sh);
3089
3090
Alex Deucher0a96d722012-03-20 17:18:11 -04003091 /* set HW defaults for 3D engine */
3092 WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) |
3093 ROQ_IB2_START(0x2b)));
3094 WREG32(CP_MEQ_THRESHOLDS, MEQ1_START(0x30) | MEQ2_START(0x60));
3095
3096 sx_debug_1 = RREG32(SX_DEBUG_1);
3097 WREG32(SX_DEBUG_1, sx_debug_1);
3098
3099 WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4));
3100
3101 WREG32(PA_SC_FIFO_SIZE, (SC_FRONTEND_PRIM_FIFO_SIZE(rdev->config.si.sc_prim_fifo_size_frontend) |
3102 SC_BACKEND_PRIM_FIFO_SIZE(rdev->config.si.sc_prim_fifo_size_backend) |
3103 SC_HIZ_TILE_FIFO_SIZE(rdev->config.si.sc_hiz_tile_fifo_size) |
3104 SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.si.sc_earlyz_tile_fifo_size)));
3105
3106 WREG32(VGT_NUM_INSTANCES, 1);
3107
3108 WREG32(CP_PERFMON_CNTL, 0);
3109
3110 WREG32(SQ_CONFIG, 0);
3111
3112 WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) |
3113 FORCE_EOV_MAX_REZ_CNT(255)));
3114
3115 WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC) |
3116 AUTO_INVLD_EN(ES_AND_GS_AUTO));
3117
3118 WREG32(VGT_GS_VERTEX_REUSE, 16);
3119 WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
3120
3121 WREG32(CB_PERFCOUNTER0_SELECT0, 0);
3122 WREG32(CB_PERFCOUNTER0_SELECT1, 0);
3123 WREG32(CB_PERFCOUNTER1_SELECT0, 0);
3124 WREG32(CB_PERFCOUNTER1_SELECT1, 0);
3125 WREG32(CB_PERFCOUNTER2_SELECT0, 0);
3126 WREG32(CB_PERFCOUNTER2_SELECT1, 0);
3127 WREG32(CB_PERFCOUNTER3_SELECT0, 0);
3128 WREG32(CB_PERFCOUNTER3_SELECT1, 0);
3129
3130 tmp = RREG32(HDP_MISC_CNTL);
3131 tmp |= HDP_FLUSH_INVALIDATE_CACHE;
3132 WREG32(HDP_MISC_CNTL, tmp);
3133
3134 hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL);
3135 WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
3136
3137 WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3));
3138
3139 udelay(50);
3140}
Alex Deucherc476dde2012-03-20 17:18:12 -04003141
Alex Deucher48c0c902012-03-20 17:18:19 -04003142/*
Alex Deucher2ece2e82012-03-20 17:18:20 -04003143 * GPU scratch registers helpers function.
3144 */
3145static void si_scratch_init(struct radeon_device *rdev)
3146{
3147 int i;
3148
3149 rdev->scratch.num_reg = 7;
3150 rdev->scratch.reg_base = SCRATCH_REG0;
3151 for (i = 0; i < rdev->scratch.num_reg; i++) {
3152 rdev->scratch.free[i] = true;
3153 rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
3154 }
3155}
3156
3157void si_fence_ring_emit(struct radeon_device *rdev,
3158 struct radeon_fence *fence)
3159{
3160 struct radeon_ring *ring = &rdev->ring[fence->ring];
3161 u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
3162
3163 /* flush read cache over gart */
3164 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
3165 radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2);
3166 radeon_ring_write(ring, 0);
3167 radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
3168 radeon_ring_write(ring, PACKET3_TCL1_ACTION_ENA |
3169 PACKET3_TC_ACTION_ENA |
3170 PACKET3_SH_KCACHE_ACTION_ENA |
3171 PACKET3_SH_ICACHE_ACTION_ENA);
3172 radeon_ring_write(ring, 0xFFFFFFFF);
3173 radeon_ring_write(ring, 0);
3174 radeon_ring_write(ring, 10); /* poll interval */
3175 /* EVENT_WRITE_EOP - flush caches, send int */
3176 radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
3177 radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | EVENT_INDEX(5));
3178 radeon_ring_write(ring, addr & 0xffffffff);
3179 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
3180 radeon_ring_write(ring, fence->seq);
3181 radeon_ring_write(ring, 0);
3182}
3183
3184/*
3185 * IB stuff
3186 */
3187void si_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
3188{
Christian König876dc9f2012-05-08 14:24:01 +02003189 struct radeon_ring *ring = &rdev->ring[ib->ring];
Alex Deucher2ece2e82012-03-20 17:18:20 -04003190 u32 header;
3191
Alex Deuchera85a7da42012-07-17 14:02:29 -04003192 if (ib->is_const_ib) {
3193 /* set switch buffer packet before const IB */
3194 radeon_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
3195 radeon_ring_write(ring, 0);
Christian König45df6802012-07-06 16:22:55 +02003196
Alex Deucher2ece2e82012-03-20 17:18:20 -04003197 header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2);
Alex Deuchera85a7da42012-07-17 14:02:29 -04003198 } else {
Alex Deucher89d35802012-07-17 14:02:31 -04003199 u32 next_rptr;
Alex Deuchera85a7da42012-07-17 14:02:29 -04003200 if (ring->rptr_save_reg) {
Alex Deucher89d35802012-07-17 14:02:31 -04003201 next_rptr = ring->wptr + 3 + 4 + 8;
Alex Deuchera85a7da42012-07-17 14:02:29 -04003202 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
3203 radeon_ring_write(ring, ((ring->rptr_save_reg -
3204 PACKET3_SET_CONFIG_REG_START) >> 2));
3205 radeon_ring_write(ring, next_rptr);
Alex Deucher89d35802012-07-17 14:02:31 -04003206 } else if (rdev->wb.enabled) {
3207 next_rptr = ring->wptr + 5 + 4 + 8;
3208 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
3209 radeon_ring_write(ring, (1 << 8));
3210 radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
3211 radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
3212 radeon_ring_write(ring, next_rptr);
Alex Deuchera85a7da42012-07-17 14:02:29 -04003213 }
3214
Alex Deucher2ece2e82012-03-20 17:18:20 -04003215 header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
Alex Deuchera85a7da42012-07-17 14:02:29 -04003216 }
Alex Deucher2ece2e82012-03-20 17:18:20 -04003217
3218 radeon_ring_write(ring, header);
3219 radeon_ring_write(ring,
3220#ifdef __BIG_ENDIAN
3221 (2 << 0) |
3222#endif
3223 (ib->gpu_addr & 0xFFFFFFFC));
3224 radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
Christian König4bf3dd92012-08-06 18:57:44 +02003225 radeon_ring_write(ring, ib->length_dw |
3226 (ib->vm ? (ib->vm->id << 24) : 0));
Alex Deucher2ece2e82012-03-20 17:18:20 -04003227
Alex Deuchera85a7da42012-07-17 14:02:29 -04003228 if (!ib->is_const_ib) {
3229 /* flush read cache over gart for this vmid */
3230 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
3231 radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2);
Christian König4bf3dd92012-08-06 18:57:44 +02003232 radeon_ring_write(ring, ib->vm ? ib->vm->id : 0);
Alex Deuchera85a7da42012-07-17 14:02:29 -04003233 radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
3234 radeon_ring_write(ring, PACKET3_TCL1_ACTION_ENA |
3235 PACKET3_TC_ACTION_ENA |
3236 PACKET3_SH_KCACHE_ACTION_ENA |
3237 PACKET3_SH_ICACHE_ACTION_ENA);
3238 radeon_ring_write(ring, 0xFFFFFFFF);
3239 radeon_ring_write(ring, 0);
3240 radeon_ring_write(ring, 10); /* poll interval */
3241 }
Alex Deucher2ece2e82012-03-20 17:18:20 -04003242}
3243
3244/*
Alex Deucher48c0c902012-03-20 17:18:19 -04003245 * CP.
3246 */
3247static void si_cp_enable(struct radeon_device *rdev, bool enable)
3248{
3249 if (enable)
3250 WREG32(CP_ME_CNTL, 0);
3251 else {
Alex Deucher50efa512014-01-27 11:26:33 -05003252 if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
3253 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
Alex Deucher48c0c902012-03-20 17:18:19 -04003254 WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT));
3255 WREG32(SCRATCH_UMSK, 0);
Alex Deucher8c5fd7e2012-12-04 15:28:18 -05003256 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
3257 rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
3258 rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
Alex Deucher48c0c902012-03-20 17:18:19 -04003259 }
3260 udelay(50);
3261}
3262
3263static int si_cp_load_microcode(struct radeon_device *rdev)
3264{
3265 const __be32 *fw_data;
3266 int i;
3267
3268 if (!rdev->me_fw || !rdev->pfp_fw)
3269 return -EINVAL;
3270
3271 si_cp_enable(rdev, false);
3272
3273 /* PFP */
3274 fw_data = (const __be32 *)rdev->pfp_fw->data;
3275 WREG32(CP_PFP_UCODE_ADDR, 0);
3276 for (i = 0; i < SI_PFP_UCODE_SIZE; i++)
3277 WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
3278 WREG32(CP_PFP_UCODE_ADDR, 0);
3279
3280 /* CE */
3281 fw_data = (const __be32 *)rdev->ce_fw->data;
3282 WREG32(CP_CE_UCODE_ADDR, 0);
3283 for (i = 0; i < SI_CE_UCODE_SIZE; i++)
3284 WREG32(CP_CE_UCODE_DATA, be32_to_cpup(fw_data++));
3285 WREG32(CP_CE_UCODE_ADDR, 0);
3286
3287 /* ME */
3288 fw_data = (const __be32 *)rdev->me_fw->data;
3289 WREG32(CP_ME_RAM_WADDR, 0);
3290 for (i = 0; i < SI_PM4_UCODE_SIZE; i++)
3291 WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
3292 WREG32(CP_ME_RAM_WADDR, 0);
3293
3294 WREG32(CP_PFP_UCODE_ADDR, 0);
3295 WREG32(CP_CE_UCODE_ADDR, 0);
3296 WREG32(CP_ME_RAM_WADDR, 0);
3297 WREG32(CP_ME_RAM_RADDR, 0);
3298 return 0;
3299}
3300
3301static int si_cp_start(struct radeon_device *rdev)
3302{
3303 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
3304 int r, i;
3305
3306 r = radeon_ring_lock(rdev, ring, 7 + 4);
3307 if (r) {
3308 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
3309 return r;
3310 }
3311 /* init the CP */
3312 radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
3313 radeon_ring_write(ring, 0x1);
3314 radeon_ring_write(ring, 0x0);
3315 radeon_ring_write(ring, rdev->config.si.max_hw_contexts - 1);
3316 radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
3317 radeon_ring_write(ring, 0);
3318 radeon_ring_write(ring, 0);
3319
3320 /* init the CE partitions */
3321 radeon_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
3322 radeon_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
3323 radeon_ring_write(ring, 0xc000);
3324 radeon_ring_write(ring, 0xe000);
3325 radeon_ring_unlock_commit(rdev, ring);
3326
3327 si_cp_enable(rdev, true);
3328
3329 r = radeon_ring_lock(rdev, ring, si_default_size + 10);
3330 if (r) {
3331 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
3332 return r;
3333 }
3334
3335 /* setup clear context state */
3336 radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
3337 radeon_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
3338
3339 for (i = 0; i < si_default_size; i++)
3340 radeon_ring_write(ring, si_default_state[i]);
3341
3342 radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
3343 radeon_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
3344
3345 /* set clear context state */
3346 radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
3347 radeon_ring_write(ring, 0);
3348
3349 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
3350 radeon_ring_write(ring, 0x00000316);
3351 radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
3352 radeon_ring_write(ring, 0x00000010); /* VGT_OUT_DEALLOC_CNTL */
3353
3354 radeon_ring_unlock_commit(rdev, ring);
3355
3356 for (i = RADEON_RING_TYPE_GFX_INDEX; i <= CAYMAN_RING_TYPE_CP2_INDEX; ++i) {
3357 ring = &rdev->ring[i];
3358 r = radeon_ring_lock(rdev, ring, 2);
3359
3360 /* clear the compute context state */
3361 radeon_ring_write(ring, PACKET3_COMPUTE(PACKET3_CLEAR_STATE, 0));
3362 radeon_ring_write(ring, 0);
3363
3364 radeon_ring_unlock_commit(rdev, ring);
3365 }
3366
3367 return 0;
3368}
3369
3370static void si_cp_fini(struct radeon_device *rdev)
3371{
Christian König45df6802012-07-06 16:22:55 +02003372 struct radeon_ring *ring;
Alex Deucher48c0c902012-03-20 17:18:19 -04003373 si_cp_enable(rdev, false);
Christian König45df6802012-07-06 16:22:55 +02003374
3375 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
3376 radeon_ring_fini(rdev, ring);
3377 radeon_scratch_free(rdev, ring->rptr_save_reg);
3378
3379 ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
3380 radeon_ring_fini(rdev, ring);
3381 radeon_scratch_free(rdev, ring->rptr_save_reg);
3382
3383 ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
3384 radeon_ring_fini(rdev, ring);
3385 radeon_scratch_free(rdev, ring->rptr_save_reg);
Alex Deucher48c0c902012-03-20 17:18:19 -04003386}
3387
3388static int si_cp_resume(struct radeon_device *rdev)
3389{
3390 struct radeon_ring *ring;
3391 u32 tmp;
3392 u32 rb_bufsz;
3393 int r;
3394
Alex Deucher811e4d52013-09-03 13:31:33 -04003395 si_enable_gui_idle_interrupt(rdev, false);
3396
Alex Deucher48c0c902012-03-20 17:18:19 -04003397 WREG32(CP_SEM_WAIT_TIMER, 0x0);
3398 WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
3399
3400 /* Set the write pointer delay */
3401 WREG32(CP_RB_WPTR_DELAY, 0);
3402
3403 WREG32(CP_DEBUG, 0);
3404 WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
3405
3406 /* ring 0 - compute and gfx */
3407 /* Set ring buffer size */
3408 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
Daniel Vetterb72a8922013-07-10 14:11:59 +02003409 rb_bufsz = order_base_2(ring->ring_size / 8);
3410 tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
Alex Deucher48c0c902012-03-20 17:18:19 -04003411#ifdef __BIG_ENDIAN
3412 tmp |= BUF_SWAP_32BIT;
3413#endif
3414 WREG32(CP_RB0_CNTL, tmp);
3415
3416 /* Initialize the ring buffer's read and write pointers */
3417 WREG32(CP_RB0_CNTL, tmp | RB_RPTR_WR_ENA);
3418 ring->wptr = 0;
3419 WREG32(CP_RB0_WPTR, ring->wptr);
3420
Adam Buchbinder48fc7f72012-09-19 21:48:00 -04003421 /* set the wb address whether it's enabled or not */
Alex Deucher48c0c902012-03-20 17:18:19 -04003422 WREG32(CP_RB0_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC);
3423 WREG32(CP_RB0_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
3424
3425 if (rdev->wb.enabled)
3426 WREG32(SCRATCH_UMSK, 0xff);
3427 else {
3428 tmp |= RB_NO_UPDATE;
3429 WREG32(SCRATCH_UMSK, 0);
3430 }
3431
3432 mdelay(1);
3433 WREG32(CP_RB0_CNTL, tmp);
3434
3435 WREG32(CP_RB0_BASE, ring->gpu_addr >> 8);
3436
Alex Deucher48c0c902012-03-20 17:18:19 -04003437 /* ring1 - compute only */
3438 /* Set ring buffer size */
3439 ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
Daniel Vetterb72a8922013-07-10 14:11:59 +02003440 rb_bufsz = order_base_2(ring->ring_size / 8);
3441 tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
Alex Deucher48c0c902012-03-20 17:18:19 -04003442#ifdef __BIG_ENDIAN
3443 tmp |= BUF_SWAP_32BIT;
3444#endif
3445 WREG32(CP_RB1_CNTL, tmp);
3446
3447 /* Initialize the ring buffer's read and write pointers */
3448 WREG32(CP_RB1_CNTL, tmp | RB_RPTR_WR_ENA);
3449 ring->wptr = 0;
3450 WREG32(CP_RB1_WPTR, ring->wptr);
3451
Adam Buchbinder48fc7f72012-09-19 21:48:00 -04003452 /* set the wb address whether it's enabled or not */
Alex Deucher48c0c902012-03-20 17:18:19 -04003453 WREG32(CP_RB1_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFFFFFFFC);
3454 WREG32(CP_RB1_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFF);
3455
3456 mdelay(1);
3457 WREG32(CP_RB1_CNTL, tmp);
3458
3459 WREG32(CP_RB1_BASE, ring->gpu_addr >> 8);
3460
Alex Deucher48c0c902012-03-20 17:18:19 -04003461 /* ring2 - compute only */
3462 /* Set ring buffer size */
3463 ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
Daniel Vetterb72a8922013-07-10 14:11:59 +02003464 rb_bufsz = order_base_2(ring->ring_size / 8);
3465 tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
Alex Deucher48c0c902012-03-20 17:18:19 -04003466#ifdef __BIG_ENDIAN
3467 tmp |= BUF_SWAP_32BIT;
3468#endif
3469 WREG32(CP_RB2_CNTL, tmp);
3470
3471 /* Initialize the ring buffer's read and write pointers */
3472 WREG32(CP_RB2_CNTL, tmp | RB_RPTR_WR_ENA);
3473 ring->wptr = 0;
3474 WREG32(CP_RB2_WPTR, ring->wptr);
3475
Adam Buchbinder48fc7f72012-09-19 21:48:00 -04003476 /* set the wb address whether it's enabled or not */
Alex Deucher48c0c902012-03-20 17:18:19 -04003477 WREG32(CP_RB2_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFFFFFFFC);
3478 WREG32(CP_RB2_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFF);
3479
3480 mdelay(1);
3481 WREG32(CP_RB2_CNTL, tmp);
3482
3483 WREG32(CP_RB2_BASE, ring->gpu_addr >> 8);
3484
Alex Deucher48c0c902012-03-20 17:18:19 -04003485 /* start the rings */
3486 si_cp_start(rdev);
3487 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = true;
3488 rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = true;
3489 rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = true;
3490 r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
3491 if (r) {
3492 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
3493 rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
3494 rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
3495 return r;
3496 }
3497 r = radeon_ring_test(rdev, CAYMAN_RING_TYPE_CP1_INDEX, &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]);
3498 if (r) {
3499 rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
3500 }
3501 r = radeon_ring_test(rdev, CAYMAN_RING_TYPE_CP2_INDEX, &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]);
3502 if (r) {
3503 rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
3504 }
3505
Alex Deucher811e4d52013-09-03 13:31:33 -04003506 si_enable_gui_idle_interrupt(rdev, true);
3507
Alex Deucher50efa512014-01-27 11:26:33 -05003508 if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
3509 radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
3510
Alex Deucher48c0c902012-03-20 17:18:19 -04003511 return 0;
3512}
3513
Christian König2483b4e2013-08-13 11:56:54 +02003514u32 si_gpu_check_soft_reset(struct radeon_device *rdev)
Alex Deucher014bb202013-01-18 19:36:20 -05003515{
3516 u32 reset_mask = 0;
3517 u32 tmp;
3518
3519 /* GRBM_STATUS */
3520 tmp = RREG32(GRBM_STATUS);
3521 if (tmp & (PA_BUSY | SC_BUSY |
3522 BCI_BUSY | SX_BUSY |
3523 TA_BUSY | VGT_BUSY |
3524 DB_BUSY | CB_BUSY |
3525 GDS_BUSY | SPI_BUSY |
3526 IA_BUSY | IA_BUSY_NO_DMA))
3527 reset_mask |= RADEON_RESET_GFX;
3528
3529 if (tmp & (CF_RQ_PENDING | PF_RQ_PENDING |
3530 CP_BUSY | CP_COHERENCY_BUSY))
3531 reset_mask |= RADEON_RESET_CP;
3532
3533 if (tmp & GRBM_EE_BUSY)
3534 reset_mask |= RADEON_RESET_GRBM | RADEON_RESET_GFX | RADEON_RESET_CP;
3535
3536 /* GRBM_STATUS2 */
3537 tmp = RREG32(GRBM_STATUS2);
3538 if (tmp & (RLC_RQ_PENDING | RLC_BUSY))
3539 reset_mask |= RADEON_RESET_RLC;
3540
3541 /* DMA_STATUS_REG 0 */
3542 tmp = RREG32(DMA_STATUS_REG + DMA0_REGISTER_OFFSET);
3543 if (!(tmp & DMA_IDLE))
3544 reset_mask |= RADEON_RESET_DMA;
3545
3546 /* DMA_STATUS_REG 1 */
3547 tmp = RREG32(DMA_STATUS_REG + DMA1_REGISTER_OFFSET);
3548 if (!(tmp & DMA_IDLE))
3549 reset_mask |= RADEON_RESET_DMA1;
3550
3551 /* SRBM_STATUS2 */
3552 tmp = RREG32(SRBM_STATUS2);
3553 if (tmp & DMA_BUSY)
3554 reset_mask |= RADEON_RESET_DMA;
3555
3556 if (tmp & DMA1_BUSY)
3557 reset_mask |= RADEON_RESET_DMA1;
3558
3559 /* SRBM_STATUS */
3560 tmp = RREG32(SRBM_STATUS);
3561
3562 if (tmp & IH_BUSY)
3563 reset_mask |= RADEON_RESET_IH;
3564
3565 if (tmp & SEM_BUSY)
3566 reset_mask |= RADEON_RESET_SEM;
3567
3568 if (tmp & GRBM_RQ_PENDING)
3569 reset_mask |= RADEON_RESET_GRBM;
3570
3571 if (tmp & VMC_BUSY)
3572 reset_mask |= RADEON_RESET_VMC;
3573
3574 if (tmp & (MCB_BUSY | MCB_NON_DISPLAY_BUSY |
3575 MCC_BUSY | MCD_BUSY))
3576 reset_mask |= RADEON_RESET_MC;
3577
3578 if (evergreen_is_display_hung(rdev))
3579 reset_mask |= RADEON_RESET_DISPLAY;
3580
3581 /* VM_L2_STATUS */
3582 tmp = RREG32(VM_L2_STATUS);
3583 if (tmp & L2_BUSY)
3584 reset_mask |= RADEON_RESET_VMC;
3585
Alex Deucherd808fc82013-02-28 10:03:08 -05003586 /* Skip MC reset as it's mostly likely not hung, just busy */
3587 if (reset_mask & RADEON_RESET_MC) {
3588 DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask);
3589 reset_mask &= ~RADEON_RESET_MC;
3590 }
3591
Alex Deucher014bb202013-01-18 19:36:20 -05003592 return reset_mask;
3593}
3594
3595static void si_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
Alex Deucher06bc6df2013-01-03 13:15:30 -05003596{
3597 struct evergreen_mc_save save;
Alex Deucher1c534672013-01-18 15:08:38 -05003598 u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
3599 u32 tmp;
Alex Deucher19fc42e2013-01-14 11:04:39 -05003600
Alex Deucher06bc6df2013-01-03 13:15:30 -05003601 if (reset_mask == 0)
Alex Deucher014bb202013-01-18 19:36:20 -05003602 return;
Alex Deucher06bc6df2013-01-03 13:15:30 -05003603
3604 dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
3605
Alex Deucher1c534672013-01-18 15:08:38 -05003606 evergreen_print_gpu_status_regs(rdev);
Alex Deucher06bc6df2013-01-03 13:15:30 -05003607 dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
3608 RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR));
3609 dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
3610 RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS));
3611
Alex Deuchera6f4ae82013-10-02 14:50:57 -04003612 /* disable PG/CG */
3613 si_fini_pg(rdev);
3614 si_fini_cg(rdev);
3615
3616 /* stop the rlc */
3617 si_rlc_stop(rdev);
3618
Alex Deucher1c534672013-01-18 15:08:38 -05003619 /* Disable CP parsing/prefetching */
3620 WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT);
3621
3622 if (reset_mask & RADEON_RESET_DMA) {
3623 /* dma0 */
3624 tmp = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
3625 tmp &= ~DMA_RB_ENABLE;
3626 WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, tmp);
Alex Deucher014bb202013-01-18 19:36:20 -05003627 }
3628 if (reset_mask & RADEON_RESET_DMA1) {
Alex Deucher1c534672013-01-18 15:08:38 -05003629 /* dma1 */
3630 tmp = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
3631 tmp &= ~DMA_RB_ENABLE;
3632 WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, tmp);
3633 }
3634
Alex Deucherf770d782013-01-23 19:00:25 -05003635 udelay(50);
3636
3637 evergreen_mc_stop(rdev, &save);
3638 if (evergreen_mc_wait_for_idle(rdev)) {
3639 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
3640 }
3641
Alex Deucher1c534672013-01-18 15:08:38 -05003642 if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE | RADEON_RESET_CP)) {
3643 grbm_soft_reset = SOFT_RESET_CB |
3644 SOFT_RESET_DB |
3645 SOFT_RESET_GDS |
3646 SOFT_RESET_PA |
3647 SOFT_RESET_SC |
3648 SOFT_RESET_BCI |
3649 SOFT_RESET_SPI |
3650 SOFT_RESET_SX |
3651 SOFT_RESET_TC |
3652 SOFT_RESET_TA |
3653 SOFT_RESET_VGT |
3654 SOFT_RESET_IA;
3655 }
3656
3657 if (reset_mask & RADEON_RESET_CP) {
3658 grbm_soft_reset |= SOFT_RESET_CP | SOFT_RESET_VGT;
3659
3660 srbm_soft_reset |= SOFT_RESET_GRBM;
3661 }
Alex Deucher06bc6df2013-01-03 13:15:30 -05003662
3663 if (reset_mask & RADEON_RESET_DMA)
Alex Deucher014bb202013-01-18 19:36:20 -05003664 srbm_soft_reset |= SOFT_RESET_DMA;
3665
3666 if (reset_mask & RADEON_RESET_DMA1)
3667 srbm_soft_reset |= SOFT_RESET_DMA1;
3668
3669 if (reset_mask & RADEON_RESET_DISPLAY)
3670 srbm_soft_reset |= SOFT_RESET_DC;
3671
3672 if (reset_mask & RADEON_RESET_RLC)
3673 grbm_soft_reset |= SOFT_RESET_RLC;
3674
3675 if (reset_mask & RADEON_RESET_SEM)
3676 srbm_soft_reset |= SOFT_RESET_SEM;
3677
3678 if (reset_mask & RADEON_RESET_IH)
3679 srbm_soft_reset |= SOFT_RESET_IH;
3680
3681 if (reset_mask & RADEON_RESET_GRBM)
3682 srbm_soft_reset |= SOFT_RESET_GRBM;
3683
3684 if (reset_mask & RADEON_RESET_VMC)
3685 srbm_soft_reset |= SOFT_RESET_VMC;
3686
3687 if (reset_mask & RADEON_RESET_MC)
3688 srbm_soft_reset |= SOFT_RESET_MC;
Alex Deucher1c534672013-01-18 15:08:38 -05003689
3690 if (grbm_soft_reset) {
3691 tmp = RREG32(GRBM_SOFT_RESET);
3692 tmp |= grbm_soft_reset;
3693 dev_info(rdev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
3694 WREG32(GRBM_SOFT_RESET, tmp);
3695 tmp = RREG32(GRBM_SOFT_RESET);
3696
3697 udelay(50);
3698
3699 tmp &= ~grbm_soft_reset;
3700 WREG32(GRBM_SOFT_RESET, tmp);
3701 tmp = RREG32(GRBM_SOFT_RESET);
3702 }
3703
3704 if (srbm_soft_reset) {
3705 tmp = RREG32(SRBM_SOFT_RESET);
3706 tmp |= srbm_soft_reset;
3707 dev_info(rdev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
3708 WREG32(SRBM_SOFT_RESET, tmp);
3709 tmp = RREG32(SRBM_SOFT_RESET);
3710
3711 udelay(50);
3712
3713 tmp &= ~srbm_soft_reset;
3714 WREG32(SRBM_SOFT_RESET, tmp);
3715 tmp = RREG32(SRBM_SOFT_RESET);
3716 }
Alex Deucher06bc6df2013-01-03 13:15:30 -05003717
3718 /* Wait a little for things to settle down */
3719 udelay(50);
3720
Alex Deucherc476dde2012-03-20 17:18:12 -04003721 evergreen_mc_resume(rdev, &save);
Alex Deucher1c534672013-01-18 15:08:38 -05003722 udelay(50);
Alex Deucher410a3412013-01-18 13:05:39 -05003723
Alex Deucher1c534672013-01-18 15:08:38 -05003724 evergreen_print_gpu_status_regs(rdev);
Alex Deucherc476dde2012-03-20 17:18:12 -04003725}
3726
Alex Deucher4a5c8ea2013-11-15 16:35:55 -05003727static void si_set_clk_bypass_mode(struct radeon_device *rdev)
3728{
3729 u32 tmp, i;
3730
3731 tmp = RREG32(CG_SPLL_FUNC_CNTL);
3732 tmp |= SPLL_BYPASS_EN;
3733 WREG32(CG_SPLL_FUNC_CNTL, tmp);
3734
3735 tmp = RREG32(CG_SPLL_FUNC_CNTL_2);
3736 tmp |= SPLL_CTLREQ_CHG;
3737 WREG32(CG_SPLL_FUNC_CNTL_2, tmp);
3738
3739 for (i = 0; i < rdev->usec_timeout; i++) {
3740 if (RREG32(SPLL_STATUS) & SPLL_CHG_STATUS)
3741 break;
3742 udelay(1);
3743 }
3744
3745 tmp = RREG32(CG_SPLL_FUNC_CNTL_2);
3746 tmp &= ~(SPLL_CTLREQ_CHG | SCLK_MUX_UPDATE);
3747 WREG32(CG_SPLL_FUNC_CNTL_2, tmp);
3748
3749 tmp = RREG32(MPLL_CNTL_MODE);
3750 tmp &= ~MPLL_MCLK_SEL;
3751 WREG32(MPLL_CNTL_MODE, tmp);
3752}
3753
3754static void si_spll_powerdown(struct radeon_device *rdev)
3755{
3756 u32 tmp;
3757
3758 tmp = RREG32(SPLL_CNTL_MODE);
3759 tmp |= SPLL_SW_DIR_CONTROL;
3760 WREG32(SPLL_CNTL_MODE, tmp);
3761
3762 tmp = RREG32(CG_SPLL_FUNC_CNTL);
3763 tmp |= SPLL_RESET;
3764 WREG32(CG_SPLL_FUNC_CNTL, tmp);
3765
3766 tmp = RREG32(CG_SPLL_FUNC_CNTL);
3767 tmp |= SPLL_SLEEP;
3768 WREG32(CG_SPLL_FUNC_CNTL, tmp);
3769
3770 tmp = RREG32(SPLL_CNTL_MODE);
3771 tmp &= ~SPLL_SW_DIR_CONTROL;
3772 WREG32(SPLL_CNTL_MODE, tmp);
3773}
3774
3775static void si_gpu_pci_config_reset(struct radeon_device *rdev)
3776{
3777 struct evergreen_mc_save save;
3778 u32 tmp, i;
3779
3780 dev_info(rdev->dev, "GPU pci config reset\n");
3781
3782 /* disable dpm? */
3783
3784 /* disable cg/pg */
3785 si_fini_pg(rdev);
3786 si_fini_cg(rdev);
3787
3788 /* Disable CP parsing/prefetching */
3789 WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT);
3790 /* dma0 */
3791 tmp = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
3792 tmp &= ~DMA_RB_ENABLE;
3793 WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, tmp);
3794 /* dma1 */
3795 tmp = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
3796 tmp &= ~DMA_RB_ENABLE;
3797 WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, tmp);
3798 /* XXX other engines? */
3799
3800 /* halt the rlc, disable cp internal ints */
3801 si_rlc_stop(rdev);
3802
3803 udelay(50);
3804
3805 /* disable mem access */
3806 evergreen_mc_stop(rdev, &save);
3807 if (evergreen_mc_wait_for_idle(rdev)) {
3808 dev_warn(rdev->dev, "Wait for MC idle timed out !\n");
3809 }
3810
3811 /* set mclk/sclk to bypass */
3812 si_set_clk_bypass_mode(rdev);
3813 /* powerdown spll */
3814 si_spll_powerdown(rdev);
3815 /* disable BM */
3816 pci_clear_master(rdev->pdev);
3817 /* reset */
3818 radeon_pci_config_reset(rdev);
3819 /* wait for asic to come out of reset */
3820 for (i = 0; i < rdev->usec_timeout; i++) {
3821 if (RREG32(CONFIG_MEMSIZE) != 0xffffffff)
3822 break;
3823 udelay(1);
3824 }
3825}
3826
Alex Deucherc476dde2012-03-20 17:18:12 -04003827int si_asic_reset(struct radeon_device *rdev)
3828{
Alex Deucher014bb202013-01-18 19:36:20 -05003829 u32 reset_mask;
3830
3831 reset_mask = si_gpu_check_soft_reset(rdev);
3832
3833 if (reset_mask)
3834 r600_set_bios_scratch_engine_hung(rdev, true);
3835
Alex Deucher4a5c8ea2013-11-15 16:35:55 -05003836 /* try soft reset */
Alex Deucher014bb202013-01-18 19:36:20 -05003837 si_gpu_soft_reset(rdev, reset_mask);
3838
3839 reset_mask = si_gpu_check_soft_reset(rdev);
3840
Alex Deucher4a5c8ea2013-11-15 16:35:55 -05003841 /* try pci config reset */
3842 if (reset_mask && radeon_hard_reset)
3843 si_gpu_pci_config_reset(rdev);
3844
3845 reset_mask = si_gpu_check_soft_reset(rdev);
3846
Alex Deucher014bb202013-01-18 19:36:20 -05003847 if (!reset_mask)
3848 r600_set_bios_scratch_engine_hung(rdev, false);
3849
3850 return 0;
Alex Deucherc476dde2012-03-20 17:18:12 -04003851}
3852
Alex Deucher123bc182013-01-24 11:37:19 -05003853/**
3854 * si_gfx_is_lockup - Check if the GFX engine is locked up
3855 *
3856 * @rdev: radeon_device pointer
3857 * @ring: radeon_ring structure holding ring information
3858 *
3859 * Check if the GFX engine is locked up.
3860 * Returns true if the engine appears to be locked up, false if not.
3861 */
3862bool si_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
3863{
3864 u32 reset_mask = si_gpu_check_soft_reset(rdev);
3865
3866 if (!(reset_mask & (RADEON_RESET_GFX |
3867 RADEON_RESET_COMPUTE |
3868 RADEON_RESET_CP))) {
Christian Königff212f22014-02-18 14:52:33 +01003869 radeon_ring_lockup_update(rdev, ring);
Alex Deucher123bc182013-01-24 11:37:19 -05003870 return false;
3871 }
3872 /* force CP activities */
3873 radeon_ring_force_activity(rdev, ring);
3874 return radeon_ring_test_lockup(rdev, ring);
3875}
3876
Alex Deucherd2800ee2012-03-20 17:18:13 -04003877/* MC */
3878static void si_mc_program(struct radeon_device *rdev)
3879{
3880 struct evergreen_mc_save save;
3881 u32 tmp;
3882 int i, j;
3883
3884 /* Initialize HDP */
3885 for (i = 0, j = 0; i < 32; i++, j += 0x18) {
3886 WREG32((0x2c14 + j), 0x00000000);
3887 WREG32((0x2c18 + j), 0x00000000);
3888 WREG32((0x2c1c + j), 0x00000000);
3889 WREG32((0x2c20 + j), 0x00000000);
3890 WREG32((0x2c24 + j), 0x00000000);
3891 }
3892 WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
3893
3894 evergreen_mc_stop(rdev, &save);
3895 if (radeon_mc_wait_for_idle(rdev)) {
3896 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
3897 }
Alex Deucher51535502012-08-30 14:34:30 -04003898 if (!ASIC_IS_NODCE(rdev))
3899 /* Lockout access through VGA aperture*/
3900 WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
Alex Deucherd2800ee2012-03-20 17:18:13 -04003901 /* Update configuration */
3902 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
3903 rdev->mc.vram_start >> 12);
3904 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
3905 rdev->mc.vram_end >> 12);
3906 WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
3907 rdev->vram_scratch.gpu_addr >> 12);
3908 tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
3909 tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
3910 WREG32(MC_VM_FB_LOCATION, tmp);
3911 /* XXX double check these! */
3912 WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
3913 WREG32(HDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
3914 WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
3915 WREG32(MC_VM_AGP_BASE, 0);
3916 WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
3917 WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
3918 if (radeon_mc_wait_for_idle(rdev)) {
3919 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
3920 }
3921 evergreen_mc_resume(rdev, &save);
Alex Deucher51535502012-08-30 14:34:30 -04003922 if (!ASIC_IS_NODCE(rdev)) {
3923 /* we need to own VRAM, so turn off the VGA renderer here
3924 * to stop it overwriting our objects */
3925 rv515_vga_render_disable(rdev);
3926 }
Alex Deucherd2800ee2012-03-20 17:18:13 -04003927}
3928
Alex Deucher1c491652013-04-09 12:45:26 -04003929void si_vram_gtt_location(struct radeon_device *rdev,
3930 struct radeon_mc *mc)
Alex Deucherd2800ee2012-03-20 17:18:13 -04003931{
3932 if (mc->mc_vram_size > 0xFFC0000000ULL) {
3933 /* leave room for at least 1024M GTT */
3934 dev_warn(rdev->dev, "limiting VRAM\n");
3935 mc->real_vram_size = 0xFFC0000000ULL;
3936 mc->mc_vram_size = 0xFFC0000000ULL;
3937 }
Alex Deucher9ed8b1f2013-04-08 11:13:01 -04003938 radeon_vram_location(rdev, &rdev->mc, 0);
Alex Deucherd2800ee2012-03-20 17:18:13 -04003939 rdev->mc.gtt_base_align = 0;
Alex Deucher9ed8b1f2013-04-08 11:13:01 -04003940 radeon_gtt_location(rdev, mc);
Alex Deucherd2800ee2012-03-20 17:18:13 -04003941}
3942
3943static int si_mc_init(struct radeon_device *rdev)
3944{
3945 u32 tmp;
3946 int chansize, numchan;
3947
3948 /* Get VRAM informations */
3949 rdev->mc.vram_is_ddr = true;
3950 tmp = RREG32(MC_ARB_RAMCFG);
3951 if (tmp & CHANSIZE_OVERRIDE) {
3952 chansize = 16;
3953 } else if (tmp & CHANSIZE_MASK) {
3954 chansize = 64;
3955 } else {
3956 chansize = 32;
3957 }
3958 tmp = RREG32(MC_SHARED_CHMAP);
3959 switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
3960 case 0:
3961 default:
3962 numchan = 1;
3963 break;
3964 case 1:
3965 numchan = 2;
3966 break;
3967 case 2:
3968 numchan = 4;
3969 break;
3970 case 3:
3971 numchan = 8;
3972 break;
3973 case 4:
3974 numchan = 3;
3975 break;
3976 case 5:
3977 numchan = 6;
3978 break;
3979 case 6:
3980 numchan = 10;
3981 break;
3982 case 7:
3983 numchan = 12;
3984 break;
3985 case 8:
3986 numchan = 16;
3987 break;
3988 }
3989 rdev->mc.vram_width = numchan * chansize;
3990 /* Could aper size report 0 ? */
3991 rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
3992 rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
3993 /* size in MB on si */
Alex Deucher0ca223b2013-12-03 09:24:30 -05003994 tmp = RREG32(CONFIG_MEMSIZE);
3995 /* some boards may have garbage in the upper 16 bits */
3996 if (tmp & 0xffff0000) {
3997 DRM_INFO("Probable bad vram size: 0x%08x\n", tmp);
3998 if (tmp & 0xffff)
3999 tmp &= 0xffff;
4000 }
4001 rdev->mc.mc_vram_size = tmp * 1024ULL * 1024ULL;
4002 rdev->mc.real_vram_size = rdev->mc.mc_vram_size;
Alex Deucherd2800ee2012-03-20 17:18:13 -04004003 rdev->mc.visible_vram_size = rdev->mc.aper_size;
4004 si_vram_gtt_location(rdev, &rdev->mc);
4005 radeon_update_bandwidth_info(rdev);
4006
4007 return 0;
4008}
4009
4010/*
4011 * GART
4012 */
4013void si_pcie_gart_tlb_flush(struct radeon_device *rdev)
4014{
4015 /* flush hdp cache */
4016 WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
4017
4018 /* bits 0-15 are the VM contexts0-15 */
4019 WREG32(VM_INVALIDATE_REQUEST, 1);
4020}
4021
Lauri Kasanen1109ca02012-08-31 13:43:50 -04004022static int si_pcie_gart_enable(struct radeon_device *rdev)
Alex Deucherd2800ee2012-03-20 17:18:13 -04004023{
4024 int r, i;
4025
4026 if (rdev->gart.robj == NULL) {
4027 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
4028 return -EINVAL;
4029 }
4030 r = radeon_gart_table_vram_pin(rdev);
4031 if (r)
4032 return r;
4033 radeon_gart_restore(rdev);
4034 /* Setup TLB control */
4035 WREG32(MC_VM_MX_L1_TLB_CNTL,
4036 (0xA << 7) |
4037 ENABLE_L1_TLB |
4038 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
4039 ENABLE_ADVANCED_DRIVER_MODEL |
4040 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
4041 /* Setup L2 cache */
4042 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE |
4043 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
4044 ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
4045 EFFECTIVE_L2_QUEUE_SIZE(7) |
4046 CONTEXT1_IDENTITY_ACCESS_MODE(1));
4047 WREG32(VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS | INVALIDATE_L2_CACHE);
4048 WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
4049 L2_CACHE_BIGK_FRAGMENT_SIZE(0));
4050 /* setup context0 */
4051 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
4052 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
4053 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
4054 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
4055 (u32)(rdev->dummy_page.addr >> 12));
4056 WREG32(VM_CONTEXT0_CNTL2, 0);
4057 WREG32(VM_CONTEXT0_CNTL, (ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
4058 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT));
4059
4060 WREG32(0x15D4, 0);
4061 WREG32(0x15D8, 0);
4062 WREG32(0x15DC, 0);
4063
4064 /* empty context1-15 */
Alex Deucherd2800ee2012-03-20 17:18:13 -04004065 /* set vm size, must be a multiple of 4 */
4066 WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
Alex Deucherc21b3282012-06-28 17:53:07 -04004067 WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn);
Alex Deucher23d4f1f2012-10-08 09:45:46 -04004068 /* Assign the pt base to something valid for now; the pts used for
4069 * the VMs are determined by the application and setup and assigned
4070 * on the fly in the vm part of radeon_gart.c
4071 */
Alex Deucherd2800ee2012-03-20 17:18:13 -04004072 for (i = 1; i < 16; i++) {
4073 if (i < 8)
4074 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
4075 rdev->gart.table_addr >> 12);
4076 else
4077 WREG32(VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((i - 8) << 2),
4078 rdev->gart.table_addr >> 12);
4079 }
4080
4081 /* enable context1-15 */
4082 WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
4083 (u32)(rdev->dummy_page.addr >> 12));
Christian Königae133a12012-09-18 15:30:44 -04004084 WREG32(VM_CONTEXT1_CNTL2, 4);
Dmitry Cherkasovfa87e622012-09-17 19:36:19 +02004085 WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) |
Christian Königae133a12012-09-18 15:30:44 -04004086 RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
4087 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT |
4088 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
4089 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT |
4090 PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT |
4091 PDE0_PROTECTION_FAULT_ENABLE_DEFAULT |
4092 VALID_PROTECTION_FAULT_ENABLE_INTERRUPT |
4093 VALID_PROTECTION_FAULT_ENABLE_DEFAULT |
4094 READ_PROTECTION_FAULT_ENABLE_INTERRUPT |
4095 READ_PROTECTION_FAULT_ENABLE_DEFAULT |
4096 WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT |
4097 WRITE_PROTECTION_FAULT_ENABLE_DEFAULT);
Alex Deucherd2800ee2012-03-20 17:18:13 -04004098
4099 si_pcie_gart_tlb_flush(rdev);
4100 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
4101 (unsigned)(rdev->mc.gtt_size >> 20),
4102 (unsigned long long)rdev->gart.table_addr);
4103 rdev->gart.ready = true;
4104 return 0;
4105}
4106
Lauri Kasanen1109ca02012-08-31 13:43:50 -04004107static void si_pcie_gart_disable(struct radeon_device *rdev)
Alex Deucherd2800ee2012-03-20 17:18:13 -04004108{
4109 /* Disable all tables */
4110 WREG32(VM_CONTEXT0_CNTL, 0);
4111 WREG32(VM_CONTEXT1_CNTL, 0);
4112 /* Setup TLB control */
4113 WREG32(MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE_NOT_IN_SYS |
4114 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
4115 /* Setup L2 cache */
4116 WREG32(VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
4117 ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
4118 EFFECTIVE_L2_QUEUE_SIZE(7) |
4119 CONTEXT1_IDENTITY_ACCESS_MODE(1));
4120 WREG32(VM_L2_CNTL2, 0);
4121 WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
4122 L2_CACHE_BIGK_FRAGMENT_SIZE(0));
4123 radeon_gart_table_vram_unpin(rdev);
4124}
4125
Lauri Kasanen1109ca02012-08-31 13:43:50 -04004126static void si_pcie_gart_fini(struct radeon_device *rdev)
Alex Deucherd2800ee2012-03-20 17:18:13 -04004127{
4128 si_pcie_gart_disable(rdev);
4129 radeon_gart_table_vram_free(rdev);
4130 radeon_gart_fini(rdev);
4131}
4132
Alex Deucher498dd8b2012-03-20 17:18:15 -04004133/* vm parser */
4134static bool si_vm_reg_valid(u32 reg)
4135{
4136 /* context regs are fine */
4137 if (reg >= 0x28000)
4138 return true;
4139
4140 /* check config regs */
4141 switch (reg) {
4142 case GRBM_GFX_INDEX:
Alex Deucherf418b882012-11-08 10:13:24 -05004143 case CP_STRMOUT_CNTL:
Alex Deucher498dd8b2012-03-20 17:18:15 -04004144 case VGT_VTX_VECT_EJECT_REG:
4145 case VGT_CACHE_INVALIDATION:
4146 case VGT_ESGS_RING_SIZE:
4147 case VGT_GSVS_RING_SIZE:
4148 case VGT_GS_VERTEX_REUSE:
4149 case VGT_PRIMITIVE_TYPE:
4150 case VGT_INDEX_TYPE:
4151 case VGT_NUM_INDICES:
4152 case VGT_NUM_INSTANCES:
4153 case VGT_TF_RING_SIZE:
4154 case VGT_HS_OFFCHIP_PARAM:
4155 case VGT_TF_MEMORY_BASE:
4156 case PA_CL_ENHANCE:
4157 case PA_SU_LINE_STIPPLE_VALUE:
4158 case PA_SC_LINE_STIPPLE_STATE:
4159 case PA_SC_ENHANCE:
4160 case SQC_CACHES:
4161 case SPI_STATIC_THREAD_MGMT_1:
4162 case SPI_STATIC_THREAD_MGMT_2:
4163 case SPI_STATIC_THREAD_MGMT_3:
4164 case SPI_PS_MAX_WAVE_ID:
4165 case SPI_CONFIG_CNTL:
4166 case SPI_CONFIG_CNTL_1:
4167 case TA_CNTL_AUX:
4168 return true;
4169 default:
4170 DRM_ERROR("Invalid register 0x%x in CS\n", reg);
4171 return false;
4172 }
4173}
4174
4175static int si_vm_packet3_ce_check(struct radeon_device *rdev,
4176 u32 *ib, struct radeon_cs_packet *pkt)
4177{
4178 switch (pkt->opcode) {
4179 case PACKET3_NOP:
4180 case PACKET3_SET_BASE:
4181 case PACKET3_SET_CE_DE_COUNTERS:
4182 case PACKET3_LOAD_CONST_RAM:
4183 case PACKET3_WRITE_CONST_RAM:
4184 case PACKET3_WRITE_CONST_RAM_OFFSET:
4185 case PACKET3_DUMP_CONST_RAM:
4186 case PACKET3_INCREMENT_CE_COUNTER:
4187 case PACKET3_WAIT_ON_DE_COUNTER:
4188 case PACKET3_CE_WRITE:
4189 break;
4190 default:
4191 DRM_ERROR("Invalid CE packet3: 0x%x\n", pkt->opcode);
4192 return -EINVAL;
4193 }
4194 return 0;
4195}
4196
Tom Stellarde5b9e752013-08-16 17:47:39 -04004197static int si_vm_packet3_cp_dma_check(u32 *ib, u32 idx)
4198{
4199 u32 start_reg, reg, i;
4200 u32 command = ib[idx + 4];
4201 u32 info = ib[idx + 1];
4202 u32 idx_value = ib[idx];
4203 if (command & PACKET3_CP_DMA_CMD_SAS) {
4204 /* src address space is register */
4205 if (((info & 0x60000000) >> 29) == 0) {
4206 start_reg = idx_value << 2;
4207 if (command & PACKET3_CP_DMA_CMD_SAIC) {
4208 reg = start_reg;
4209 if (!si_vm_reg_valid(reg)) {
4210 DRM_ERROR("CP DMA Bad SRC register\n");
4211 return -EINVAL;
4212 }
4213 } else {
4214 for (i = 0; i < (command & 0x1fffff); i++) {
4215 reg = start_reg + (4 * i);
4216 if (!si_vm_reg_valid(reg)) {
4217 DRM_ERROR("CP DMA Bad SRC register\n");
4218 return -EINVAL;
4219 }
4220 }
4221 }
4222 }
4223 }
4224 if (command & PACKET3_CP_DMA_CMD_DAS) {
4225 /* dst address space is register */
4226 if (((info & 0x00300000) >> 20) == 0) {
4227 start_reg = ib[idx + 2];
4228 if (command & PACKET3_CP_DMA_CMD_DAIC) {
4229 reg = start_reg;
4230 if (!si_vm_reg_valid(reg)) {
4231 DRM_ERROR("CP DMA Bad DST register\n");
4232 return -EINVAL;
4233 }
4234 } else {
4235 for (i = 0; i < (command & 0x1fffff); i++) {
4236 reg = start_reg + (4 * i);
4237 if (!si_vm_reg_valid(reg)) {
4238 DRM_ERROR("CP DMA Bad DST register\n");
4239 return -EINVAL;
4240 }
4241 }
4242 }
4243 }
4244 }
4245 return 0;
4246}
4247
Alex Deucher498dd8b2012-03-20 17:18:15 -04004248static int si_vm_packet3_gfx_check(struct radeon_device *rdev,
4249 u32 *ib, struct radeon_cs_packet *pkt)
4250{
Tom Stellarde5b9e752013-08-16 17:47:39 -04004251 int r;
Alex Deucher498dd8b2012-03-20 17:18:15 -04004252 u32 idx = pkt->idx + 1;
4253 u32 idx_value = ib[idx];
4254 u32 start_reg, end_reg, reg, i;
4255
4256 switch (pkt->opcode) {
4257 case PACKET3_NOP:
4258 case PACKET3_SET_BASE:
4259 case PACKET3_CLEAR_STATE:
4260 case PACKET3_INDEX_BUFFER_SIZE:
4261 case PACKET3_DISPATCH_DIRECT:
4262 case PACKET3_DISPATCH_INDIRECT:
4263 case PACKET3_ALLOC_GDS:
4264 case PACKET3_WRITE_GDS_RAM:
4265 case PACKET3_ATOMIC_GDS:
4266 case PACKET3_ATOMIC:
4267 case PACKET3_OCCLUSION_QUERY:
4268 case PACKET3_SET_PREDICATION:
4269 case PACKET3_COND_EXEC:
4270 case PACKET3_PRED_EXEC:
4271 case PACKET3_DRAW_INDIRECT:
4272 case PACKET3_DRAW_INDEX_INDIRECT:
4273 case PACKET3_INDEX_BASE:
4274 case PACKET3_DRAW_INDEX_2:
4275 case PACKET3_CONTEXT_CONTROL:
4276 case PACKET3_INDEX_TYPE:
4277 case PACKET3_DRAW_INDIRECT_MULTI:
4278 case PACKET3_DRAW_INDEX_AUTO:
4279 case PACKET3_DRAW_INDEX_IMMD:
4280 case PACKET3_NUM_INSTANCES:
4281 case PACKET3_DRAW_INDEX_MULTI_AUTO:
4282 case PACKET3_STRMOUT_BUFFER_UPDATE:
4283 case PACKET3_DRAW_INDEX_OFFSET_2:
4284 case PACKET3_DRAW_INDEX_MULTI_ELEMENT:
4285 case PACKET3_DRAW_INDEX_INDIRECT_MULTI:
4286 case PACKET3_MPEG_INDEX:
4287 case PACKET3_WAIT_REG_MEM:
4288 case PACKET3_MEM_WRITE:
4289 case PACKET3_PFP_SYNC_ME:
4290 case PACKET3_SURFACE_SYNC:
4291 case PACKET3_EVENT_WRITE:
4292 case PACKET3_EVENT_WRITE_EOP:
4293 case PACKET3_EVENT_WRITE_EOS:
4294 case PACKET3_SET_CONTEXT_REG:
4295 case PACKET3_SET_CONTEXT_REG_INDIRECT:
4296 case PACKET3_SET_SH_REG:
4297 case PACKET3_SET_SH_REG_OFFSET:
4298 case PACKET3_INCREMENT_DE_COUNTER:
4299 case PACKET3_WAIT_ON_CE_COUNTER:
4300 case PACKET3_WAIT_ON_AVAIL_BUFFER:
4301 case PACKET3_ME_WRITE:
4302 break;
4303 case PACKET3_COPY_DATA:
4304 if ((idx_value & 0xf00) == 0) {
4305 reg = ib[idx + 3] * 4;
4306 if (!si_vm_reg_valid(reg))
4307 return -EINVAL;
4308 }
4309 break;
4310 case PACKET3_WRITE_DATA:
4311 if ((idx_value & 0xf00) == 0) {
4312 start_reg = ib[idx + 1] * 4;
4313 if (idx_value & 0x10000) {
4314 if (!si_vm_reg_valid(start_reg))
4315 return -EINVAL;
4316 } else {
4317 for (i = 0; i < (pkt->count - 2); i++) {
4318 reg = start_reg + (4 * i);
4319 if (!si_vm_reg_valid(reg))
4320 return -EINVAL;
4321 }
4322 }
4323 }
4324 break;
4325 case PACKET3_COND_WRITE:
4326 if (idx_value & 0x100) {
4327 reg = ib[idx + 5] * 4;
4328 if (!si_vm_reg_valid(reg))
4329 return -EINVAL;
4330 }
4331 break;
4332 case PACKET3_COPY_DW:
4333 if (idx_value & 0x2) {
4334 reg = ib[idx + 3] * 4;
4335 if (!si_vm_reg_valid(reg))
4336 return -EINVAL;
4337 }
4338 break;
4339 case PACKET3_SET_CONFIG_REG:
4340 start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_START;
4341 end_reg = 4 * pkt->count + start_reg - 4;
4342 if ((start_reg < PACKET3_SET_CONFIG_REG_START) ||
4343 (start_reg >= PACKET3_SET_CONFIG_REG_END) ||
4344 (end_reg >= PACKET3_SET_CONFIG_REG_END)) {
4345 DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
4346 return -EINVAL;
4347 }
4348 for (i = 0; i < pkt->count; i++) {
4349 reg = start_reg + (4 * i);
4350 if (!si_vm_reg_valid(reg))
4351 return -EINVAL;
4352 }
4353 break;
Alex Deucher5aa709b2012-12-03 19:42:37 -05004354 case PACKET3_CP_DMA:
Tom Stellarde5b9e752013-08-16 17:47:39 -04004355 r = si_vm_packet3_cp_dma_check(ib, idx);
4356 if (r)
4357 return r;
Alex Deucher5aa709b2012-12-03 19:42:37 -05004358 break;
Alex Deucher498dd8b2012-03-20 17:18:15 -04004359 default:
4360 DRM_ERROR("Invalid GFX packet3: 0x%x\n", pkt->opcode);
4361 return -EINVAL;
4362 }
4363 return 0;
4364}
4365
4366static int si_vm_packet3_compute_check(struct radeon_device *rdev,
4367 u32 *ib, struct radeon_cs_packet *pkt)
4368{
Tom Stellarde5b9e752013-08-16 17:47:39 -04004369 int r;
Alex Deucher498dd8b2012-03-20 17:18:15 -04004370 u32 idx = pkt->idx + 1;
4371 u32 idx_value = ib[idx];
4372 u32 start_reg, reg, i;
4373
4374 switch (pkt->opcode) {
4375 case PACKET3_NOP:
4376 case PACKET3_SET_BASE:
4377 case PACKET3_CLEAR_STATE:
4378 case PACKET3_DISPATCH_DIRECT:
4379 case PACKET3_DISPATCH_INDIRECT:
4380 case PACKET3_ALLOC_GDS:
4381 case PACKET3_WRITE_GDS_RAM:
4382 case PACKET3_ATOMIC_GDS:
4383 case PACKET3_ATOMIC:
4384 case PACKET3_OCCLUSION_QUERY:
4385 case PACKET3_SET_PREDICATION:
4386 case PACKET3_COND_EXEC:
4387 case PACKET3_PRED_EXEC:
4388 case PACKET3_CONTEXT_CONTROL:
4389 case PACKET3_STRMOUT_BUFFER_UPDATE:
4390 case PACKET3_WAIT_REG_MEM:
4391 case PACKET3_MEM_WRITE:
4392 case PACKET3_PFP_SYNC_ME:
4393 case PACKET3_SURFACE_SYNC:
4394 case PACKET3_EVENT_WRITE:
4395 case PACKET3_EVENT_WRITE_EOP:
4396 case PACKET3_EVENT_WRITE_EOS:
4397 case PACKET3_SET_CONTEXT_REG:
4398 case PACKET3_SET_CONTEXT_REG_INDIRECT:
4399 case PACKET3_SET_SH_REG:
4400 case PACKET3_SET_SH_REG_OFFSET:
4401 case PACKET3_INCREMENT_DE_COUNTER:
4402 case PACKET3_WAIT_ON_CE_COUNTER:
4403 case PACKET3_WAIT_ON_AVAIL_BUFFER:
4404 case PACKET3_ME_WRITE:
4405 break;
4406 case PACKET3_COPY_DATA:
4407 if ((idx_value & 0xf00) == 0) {
4408 reg = ib[idx + 3] * 4;
4409 if (!si_vm_reg_valid(reg))
4410 return -EINVAL;
4411 }
4412 break;
4413 case PACKET3_WRITE_DATA:
4414 if ((idx_value & 0xf00) == 0) {
4415 start_reg = ib[idx + 1] * 4;
4416 if (idx_value & 0x10000) {
4417 if (!si_vm_reg_valid(start_reg))
4418 return -EINVAL;
4419 } else {
4420 for (i = 0; i < (pkt->count - 2); i++) {
4421 reg = start_reg + (4 * i);
4422 if (!si_vm_reg_valid(reg))
4423 return -EINVAL;
4424 }
4425 }
4426 }
4427 break;
4428 case PACKET3_COND_WRITE:
4429 if (idx_value & 0x100) {
4430 reg = ib[idx + 5] * 4;
4431 if (!si_vm_reg_valid(reg))
4432 return -EINVAL;
4433 }
4434 break;
4435 case PACKET3_COPY_DW:
4436 if (idx_value & 0x2) {
4437 reg = ib[idx + 3] * 4;
4438 if (!si_vm_reg_valid(reg))
4439 return -EINVAL;
4440 }
4441 break;
Tom Stellarde5b9e752013-08-16 17:47:39 -04004442 case PACKET3_CP_DMA:
4443 r = si_vm_packet3_cp_dma_check(ib, idx);
4444 if (r)
4445 return r;
4446 break;
Alex Deucher498dd8b2012-03-20 17:18:15 -04004447 default:
4448 DRM_ERROR("Invalid Compute packet3: 0x%x\n", pkt->opcode);
4449 return -EINVAL;
4450 }
4451 return 0;
4452}
4453
4454int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
4455{
4456 int ret = 0;
4457 u32 idx = 0;
4458 struct radeon_cs_packet pkt;
4459
4460 do {
4461 pkt.idx = idx;
Ilija Hadzic4e872ae2013-01-02 18:27:48 -05004462 pkt.type = RADEON_CP_PACKET_GET_TYPE(ib->ptr[idx]);
4463 pkt.count = RADEON_CP_PACKET_GET_COUNT(ib->ptr[idx]);
Alex Deucher498dd8b2012-03-20 17:18:15 -04004464 pkt.one_reg_wr = 0;
4465 switch (pkt.type) {
Ilija Hadzic4e872ae2013-01-02 18:27:48 -05004466 case RADEON_PACKET_TYPE0:
Alex Deucher498dd8b2012-03-20 17:18:15 -04004467 dev_err(rdev->dev, "Packet0 not allowed!\n");
4468 ret = -EINVAL;
4469 break;
Ilija Hadzic4e872ae2013-01-02 18:27:48 -05004470 case RADEON_PACKET_TYPE2:
Alex Deucher498dd8b2012-03-20 17:18:15 -04004471 idx += 1;
4472 break;
Ilija Hadzic4e872ae2013-01-02 18:27:48 -05004473 case RADEON_PACKET_TYPE3:
4474 pkt.opcode = RADEON_CP_PACKET3_GET_OPCODE(ib->ptr[idx]);
Alex Deucher498dd8b2012-03-20 17:18:15 -04004475 if (ib->is_const_ib)
4476 ret = si_vm_packet3_ce_check(rdev, ib->ptr, &pkt);
4477 else {
Christian König876dc9f2012-05-08 14:24:01 +02004478 switch (ib->ring) {
Alex Deucher498dd8b2012-03-20 17:18:15 -04004479 case RADEON_RING_TYPE_GFX_INDEX:
4480 ret = si_vm_packet3_gfx_check(rdev, ib->ptr, &pkt);
4481 break;
4482 case CAYMAN_RING_TYPE_CP1_INDEX:
4483 case CAYMAN_RING_TYPE_CP2_INDEX:
4484 ret = si_vm_packet3_compute_check(rdev, ib->ptr, &pkt);
4485 break;
4486 default:
Christian König876dc9f2012-05-08 14:24:01 +02004487 dev_err(rdev->dev, "Non-PM4 ring %d !\n", ib->ring);
Alex Deucher498dd8b2012-03-20 17:18:15 -04004488 ret = -EINVAL;
4489 break;
4490 }
4491 }
4492 idx += pkt.count + 2;
4493 break;
4494 default:
4495 dev_err(rdev->dev, "Unknown packet type %d !\n", pkt.type);
4496 ret = -EINVAL;
4497 break;
4498 }
4499 if (ret)
4500 break;
4501 } while (idx < ib->length_dw);
4502
4503 return ret;
4504}
4505
Alex Deucherd2800ee2012-03-20 17:18:13 -04004506/*
4507 * vm
4508 */
4509int si_vm_init(struct radeon_device *rdev)
4510{
4511 /* number of VMs */
4512 rdev->vm_manager.nvm = 16;
4513 /* base offset of vram pages */
4514 rdev->vm_manager.vram_base_offset = 0;
4515
4516 return 0;
4517}
4518
4519void si_vm_fini(struct radeon_device *rdev)
4520{
4521}
4522
Alex Deucher82ffd922012-10-02 14:47:46 -04004523/**
Alex Deucherfbf6dc72013-06-13 18:47:58 -04004524 * si_vm_decode_fault - print human readable fault info
4525 *
4526 * @rdev: radeon_device pointer
4527 * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value
4528 * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value
4529 *
4530 * Print human readable fault information (SI).
4531 */
4532static void si_vm_decode_fault(struct radeon_device *rdev,
4533 u32 status, u32 addr)
4534{
4535 u32 mc_id = (status & MEMORY_CLIENT_ID_MASK) >> MEMORY_CLIENT_ID_SHIFT;
4536 u32 vmid = (status & FAULT_VMID_MASK) >> FAULT_VMID_SHIFT;
4537 u32 protections = (status & PROTECTIONS_MASK) >> PROTECTIONS_SHIFT;
4538 char *block;
4539
4540 if (rdev->family == CHIP_TAHITI) {
4541 switch (mc_id) {
4542 case 160:
4543 case 144:
4544 case 96:
4545 case 80:
4546 case 224:
4547 case 208:
4548 case 32:
4549 case 16:
4550 block = "CB";
4551 break;
4552 case 161:
4553 case 145:
4554 case 97:
4555 case 81:
4556 case 225:
4557 case 209:
4558 case 33:
4559 case 17:
4560 block = "CB_FMASK";
4561 break;
4562 case 162:
4563 case 146:
4564 case 98:
4565 case 82:
4566 case 226:
4567 case 210:
4568 case 34:
4569 case 18:
4570 block = "CB_CMASK";
4571 break;
4572 case 163:
4573 case 147:
4574 case 99:
4575 case 83:
4576 case 227:
4577 case 211:
4578 case 35:
4579 case 19:
4580 block = "CB_IMMED";
4581 break;
4582 case 164:
4583 case 148:
4584 case 100:
4585 case 84:
4586 case 228:
4587 case 212:
4588 case 36:
4589 case 20:
4590 block = "DB";
4591 break;
4592 case 165:
4593 case 149:
4594 case 101:
4595 case 85:
4596 case 229:
4597 case 213:
4598 case 37:
4599 case 21:
4600 block = "DB_HTILE";
4601 break;
4602 case 167:
4603 case 151:
4604 case 103:
4605 case 87:
4606 case 231:
4607 case 215:
4608 case 39:
4609 case 23:
4610 block = "DB_STEN";
4611 break;
4612 case 72:
4613 case 68:
4614 case 64:
4615 case 8:
4616 case 4:
4617 case 0:
4618 case 136:
4619 case 132:
4620 case 128:
4621 case 200:
4622 case 196:
4623 case 192:
4624 block = "TC";
4625 break;
4626 case 112:
4627 case 48:
4628 block = "CP";
4629 break;
4630 case 49:
4631 case 177:
4632 case 50:
4633 case 178:
4634 block = "SH";
4635 break;
4636 case 53:
4637 case 190:
4638 block = "VGT";
4639 break;
4640 case 117:
4641 block = "IH";
4642 break;
4643 case 51:
4644 case 115:
4645 block = "RLC";
4646 break;
4647 case 119:
4648 case 183:
4649 block = "DMA0";
4650 break;
4651 case 61:
4652 block = "DMA1";
4653 break;
4654 case 248:
4655 case 120:
4656 block = "HDP";
4657 break;
4658 default:
4659 block = "unknown";
4660 break;
4661 }
4662 } else {
4663 switch (mc_id) {
4664 case 32:
4665 case 16:
4666 case 96:
4667 case 80:
4668 case 160:
4669 case 144:
4670 case 224:
4671 case 208:
4672 block = "CB";
4673 break;
4674 case 33:
4675 case 17:
4676 case 97:
4677 case 81:
4678 case 161:
4679 case 145:
4680 case 225:
4681 case 209:
4682 block = "CB_FMASK";
4683 break;
4684 case 34:
4685 case 18:
4686 case 98:
4687 case 82:
4688 case 162:
4689 case 146:
4690 case 226:
4691 case 210:
4692 block = "CB_CMASK";
4693 break;
4694 case 35:
4695 case 19:
4696 case 99:
4697 case 83:
4698 case 163:
4699 case 147:
4700 case 227:
4701 case 211:
4702 block = "CB_IMMED";
4703 break;
4704 case 36:
4705 case 20:
4706 case 100:
4707 case 84:
4708 case 164:
4709 case 148:
4710 case 228:
4711 case 212:
4712 block = "DB";
4713 break;
4714 case 37:
4715 case 21:
4716 case 101:
4717 case 85:
4718 case 165:
4719 case 149:
4720 case 229:
4721 case 213:
4722 block = "DB_HTILE";
4723 break;
4724 case 39:
4725 case 23:
4726 case 103:
4727 case 87:
4728 case 167:
4729 case 151:
4730 case 231:
4731 case 215:
4732 block = "DB_STEN";
4733 break;
4734 case 72:
4735 case 68:
4736 case 8:
4737 case 4:
4738 case 136:
4739 case 132:
4740 case 200:
4741 case 196:
4742 block = "TC";
4743 break;
4744 case 112:
4745 case 48:
4746 block = "CP";
4747 break;
4748 case 49:
4749 case 177:
4750 case 50:
4751 case 178:
4752 block = "SH";
4753 break;
4754 case 53:
4755 block = "VGT";
4756 break;
4757 case 117:
4758 block = "IH";
4759 break;
4760 case 51:
4761 case 115:
4762 block = "RLC";
4763 break;
4764 case 119:
4765 case 183:
4766 block = "DMA0";
4767 break;
4768 case 61:
4769 block = "DMA1";
4770 break;
4771 case 248:
4772 case 120:
4773 block = "HDP";
4774 break;
4775 default:
4776 block = "unknown";
4777 break;
4778 }
4779 }
4780
4781 printk("VM fault (0x%02x, vmid %d) at page %u, %s from %s (%d)\n",
4782 protections, vmid, addr,
4783 (status & MEMORY_CLIENT_RW_MASK) ? "write" : "read",
4784 block, mc_id);
4785}
4786
Alex Deucher498522b2012-10-02 14:43:38 -04004787void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
Alex Deucherd2800ee2012-03-20 17:18:13 -04004788{
Alex Deucher498522b2012-10-02 14:43:38 -04004789 struct radeon_ring *ring = &rdev->ring[ridx];
Alex Deucherd2800ee2012-03-20 17:18:13 -04004790
Christian Königee60e292012-08-09 16:21:08 +02004791 if (vm == NULL)
Alex Deucherd2800ee2012-03-20 17:18:13 -04004792 return;
4793
Alex Deucher76c44f22012-10-02 14:39:18 -04004794 /* write new base address */
4795 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4796 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
4797 WRITE_DATA_DST_SEL(0)));
4798
Christian Königee60e292012-08-09 16:21:08 +02004799 if (vm->id < 8) {
Alex Deucher76c44f22012-10-02 14:39:18 -04004800 radeon_ring_write(ring,
4801 (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2);
Christian Königee60e292012-08-09 16:21:08 +02004802 } else {
Alex Deucher76c44f22012-10-02 14:39:18 -04004803 radeon_ring_write(ring,
4804 (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2)) >> 2);
Christian Königee60e292012-08-09 16:21:08 +02004805 }
Alex Deucher76c44f22012-10-02 14:39:18 -04004806 radeon_ring_write(ring, 0);
Dmitry Cherkasovfa87e622012-09-17 19:36:19 +02004807 radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
Christian Königee60e292012-08-09 16:21:08 +02004808
Alex Deucherd2800ee2012-03-20 17:18:13 -04004809 /* flush hdp cache */
Alex Deucher76c44f22012-10-02 14:39:18 -04004810 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4811 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
4812 WRITE_DATA_DST_SEL(0)));
4813 radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2);
4814 radeon_ring_write(ring, 0);
Christian Königee60e292012-08-09 16:21:08 +02004815 radeon_ring_write(ring, 0x1);
4816
Alex Deucherd2800ee2012-03-20 17:18:13 -04004817 /* bits 0-15 are the VM contexts0-15 */
Alex Deucher76c44f22012-10-02 14:39:18 -04004818 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4819 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
4820 WRITE_DATA_DST_SEL(0)));
4821 radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
4822 radeon_ring_write(ring, 0);
Alex Deucher498522b2012-10-02 14:43:38 -04004823 radeon_ring_write(ring, 1 << vm->id);
Christian König58f8cf52012-10-22 17:42:35 +02004824
4825 /* sync PFP to ME, otherwise we might get invalid PFP reads */
4826 radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
4827 radeon_ring_write(ring, 0x0);
Alex Deucherd2800ee2012-03-20 17:18:13 -04004828}
4829
Alex Deucher347e7592012-03-20 17:18:21 -04004830/*
Alex Deucherf8f84ac2013-03-07 12:56:35 -05004831 * Power and clock gating
4832 */
4833static void si_wait_for_rlc_serdes(struct radeon_device *rdev)
4834{
4835 int i;
4836
4837 for (i = 0; i < rdev->usec_timeout; i++) {
4838 if (RREG32(RLC_SERDES_MASTER_BUSY_0) == 0)
4839 break;
4840 udelay(1);
4841 }
4842
4843 for (i = 0; i < rdev->usec_timeout; i++) {
4844 if (RREG32(RLC_SERDES_MASTER_BUSY_1) == 0)
4845 break;
4846 udelay(1);
4847 }
4848}
4849
4850static void si_enable_gui_idle_interrupt(struct radeon_device *rdev,
4851 bool enable)
4852{
4853 u32 tmp = RREG32(CP_INT_CNTL_RING0);
4854 u32 mask;
4855 int i;
4856
4857 if (enable)
4858 tmp |= (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
4859 else
4860 tmp &= ~(CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
4861 WREG32(CP_INT_CNTL_RING0, tmp);
4862
4863 if (!enable) {
4864 /* read a gfx register */
4865 tmp = RREG32(DB_DEPTH_INFO);
4866
4867 mask = RLC_BUSY_STATUS | GFX_POWER_STATUS | GFX_CLOCK_STATUS | GFX_LS_STATUS;
4868 for (i = 0; i < rdev->usec_timeout; i++) {
4869 if ((RREG32(RLC_STAT) & mask) == (GFX_CLOCK_STATUS | GFX_POWER_STATUS))
4870 break;
4871 udelay(1);
4872 }
4873 }
4874}
4875
4876static void si_set_uvd_dcm(struct radeon_device *rdev,
4877 bool sw_mode)
4878{
4879 u32 tmp, tmp2;
4880
4881 tmp = RREG32(UVD_CGC_CTRL);
4882 tmp &= ~(CLK_OD_MASK | CG_DT_MASK);
4883 tmp |= DCM | CG_DT(1) | CLK_OD(4);
4884
4885 if (sw_mode) {
4886 tmp &= ~0x7ffff800;
4887 tmp2 = DYN_OR_EN | DYN_RR_EN | G_DIV_ID(7);
4888 } else {
4889 tmp |= 0x7ffff800;
4890 tmp2 = 0;
4891 }
4892
4893 WREG32(UVD_CGC_CTRL, tmp);
4894 WREG32_UVD_CTX(UVD_CGC_CTRL2, tmp2);
4895}
4896
Alex Deucher22c775c2013-07-23 09:41:05 -04004897void si_init_uvd_internal_cg(struct radeon_device *rdev)
Alex Deucherf8f84ac2013-03-07 12:56:35 -05004898{
4899 bool hw_mode = true;
4900
4901 if (hw_mode) {
4902 si_set_uvd_dcm(rdev, false);
4903 } else {
4904 u32 tmp = RREG32(UVD_CGC_CTRL);
4905 tmp &= ~DCM;
4906 WREG32(UVD_CGC_CTRL, tmp);
4907 }
4908}
4909
4910static u32 si_halt_rlc(struct radeon_device *rdev)
4911{
4912 u32 data, orig;
4913
4914 orig = data = RREG32(RLC_CNTL);
4915
4916 if (data & RLC_ENABLE) {
4917 data &= ~RLC_ENABLE;
4918 WREG32(RLC_CNTL, data);
4919
4920 si_wait_for_rlc_serdes(rdev);
4921 }
4922
4923 return orig;
4924}
4925
4926static void si_update_rlc(struct radeon_device *rdev, u32 rlc)
4927{
4928 u32 tmp;
4929
4930 tmp = RREG32(RLC_CNTL);
4931 if (tmp != rlc)
4932 WREG32(RLC_CNTL, rlc);
4933}
4934
4935static void si_enable_dma_pg(struct radeon_device *rdev, bool enable)
4936{
4937 u32 data, orig;
4938
4939 orig = data = RREG32(DMA_PG);
Alex Deuchere16866e2013-08-08 19:34:07 -04004940 if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_SDMA))
Alex Deucherf8f84ac2013-03-07 12:56:35 -05004941 data |= PG_CNTL_ENABLE;
4942 else
4943 data &= ~PG_CNTL_ENABLE;
4944 if (orig != data)
4945 WREG32(DMA_PG, data);
4946}
4947
4948static void si_init_dma_pg(struct radeon_device *rdev)
4949{
4950 u32 tmp;
4951
4952 WREG32(DMA_PGFSM_WRITE, 0x00002000);
4953 WREG32(DMA_PGFSM_CONFIG, 0x100010ff);
4954
4955 for (tmp = 0; tmp < 5; tmp++)
4956 WREG32(DMA_PGFSM_WRITE, 0);
4957}
4958
4959static void si_enable_gfx_cgpg(struct radeon_device *rdev,
4960 bool enable)
4961{
4962 u32 tmp;
4963
Alex Deucher2b19d172013-09-04 16:58:29 -04004964 if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG)) {
Alex Deucherf8f84ac2013-03-07 12:56:35 -05004965 tmp = RLC_PUD(0x10) | RLC_PDD(0x10) | RLC_TTPD(0x10) | RLC_MSD(0x10);
4966 WREG32(RLC_TTOP_D, tmp);
4967
4968 tmp = RREG32(RLC_PG_CNTL);
4969 tmp |= GFX_PG_ENABLE;
4970 WREG32(RLC_PG_CNTL, tmp);
4971
4972 tmp = RREG32(RLC_AUTO_PG_CTRL);
4973 tmp |= AUTO_PG_EN;
4974 WREG32(RLC_AUTO_PG_CTRL, tmp);
4975 } else {
4976 tmp = RREG32(RLC_AUTO_PG_CTRL);
4977 tmp &= ~AUTO_PG_EN;
4978 WREG32(RLC_AUTO_PG_CTRL, tmp);
4979
4980 tmp = RREG32(DB_RENDER_CONTROL);
4981 }
4982}
4983
4984static void si_init_gfx_cgpg(struct radeon_device *rdev)
4985{
4986 u32 tmp;
4987
4988 WREG32(RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8);
4989
4990 tmp = RREG32(RLC_PG_CNTL);
4991 tmp |= GFX_PG_SRC;
4992 WREG32(RLC_PG_CNTL, tmp);
4993
4994 WREG32(RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8);
4995
4996 tmp = RREG32(RLC_AUTO_PG_CTRL);
4997
4998 tmp &= ~GRBM_REG_SGIT_MASK;
4999 tmp |= GRBM_REG_SGIT(0x700);
5000 tmp &= ~PG_AFTER_GRBM_REG_ST_MASK;
5001 WREG32(RLC_AUTO_PG_CTRL, tmp);
5002}
5003
Alex Deucherba190312013-04-17 16:27:40 -04005004static u32 si_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh)
Alex Deucherf8f84ac2013-03-07 12:56:35 -05005005{
5006 u32 mask = 0, tmp, tmp1;
5007 int i;
5008
5009 si_select_se_sh(rdev, se, sh);
5010 tmp = RREG32(CC_GC_SHADER_ARRAY_CONFIG);
5011 tmp1 = RREG32(GC_USER_SHADER_ARRAY_CONFIG);
5012 si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
5013
5014 tmp &= 0xffff0000;
5015
5016 tmp |= tmp1;
5017 tmp >>= 16;
5018
5019 for (i = 0; i < rdev->config.si.max_cu_per_sh; i ++) {
5020 mask <<= 1;
5021 mask |= 1;
5022 }
5023
5024 return (~tmp) & mask;
5025}
5026
5027static void si_init_ao_cu_mask(struct radeon_device *rdev)
5028{
5029 u32 i, j, k, active_cu_number = 0;
5030 u32 mask, counter, cu_bitmap;
5031 u32 tmp = 0;
5032
5033 for (i = 0; i < rdev->config.si.max_shader_engines; i++) {
5034 for (j = 0; j < rdev->config.si.max_sh_per_se; j++) {
5035 mask = 1;
5036 cu_bitmap = 0;
5037 counter = 0;
5038 for (k = 0; k < rdev->config.si.max_cu_per_sh; k++) {
Alex Deucherba190312013-04-17 16:27:40 -04005039 if (si_get_cu_active_bitmap(rdev, i, j) & mask) {
Alex Deucherf8f84ac2013-03-07 12:56:35 -05005040 if (counter < 2)
5041 cu_bitmap |= mask;
5042 counter++;
5043 }
5044 mask <<= 1;
5045 }
5046
5047 active_cu_number += counter;
5048 tmp |= (cu_bitmap << (i * 16 + j * 8));
5049 }
5050 }
5051
5052 WREG32(RLC_PG_AO_CU_MASK, tmp);
5053
5054 tmp = RREG32(RLC_MAX_PG_CU);
5055 tmp &= ~MAX_PU_CU_MASK;
5056 tmp |= MAX_PU_CU(active_cu_number);
5057 WREG32(RLC_MAX_PG_CU, tmp);
5058}
5059
5060static void si_enable_cgcg(struct radeon_device *rdev,
5061 bool enable)
5062{
5063 u32 data, orig, tmp;
5064
5065 orig = data = RREG32(RLC_CGCG_CGLS_CTRL);
5066
Alex Deuchere16866e2013-08-08 19:34:07 -04005067 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_CGCG)) {
Alex Deucher5594a552013-08-15 16:20:26 -04005068 si_enable_gui_idle_interrupt(rdev, true);
Alex Deucherf8f84ac2013-03-07 12:56:35 -05005069
Alex Deucherf8f84ac2013-03-07 12:56:35 -05005070 WREG32(RLC_GCPM_GENERAL_3, 0x00000080);
5071
5072 tmp = si_halt_rlc(rdev);
5073
5074 WREG32(RLC_SERDES_WR_MASTER_MASK_0, 0xffffffff);
5075 WREG32(RLC_SERDES_WR_MASTER_MASK_1, 0xffffffff);
5076 WREG32(RLC_SERDES_WR_CTRL, 0x00b000ff);
5077
5078 si_wait_for_rlc_serdes(rdev);
5079
5080 si_update_rlc(rdev, tmp);
5081
5082 WREG32(RLC_SERDES_WR_CTRL, 0x007000ff);
5083
5084 data |= CGCG_EN | CGLS_EN;
5085 } else {
Alex Deucher5594a552013-08-15 16:20:26 -04005086 si_enable_gui_idle_interrupt(rdev, false);
5087
Alex Deucherf8f84ac2013-03-07 12:56:35 -05005088 RREG32(CB_CGTT_SCLK_CTRL);
5089 RREG32(CB_CGTT_SCLK_CTRL);
5090 RREG32(CB_CGTT_SCLK_CTRL);
5091 RREG32(CB_CGTT_SCLK_CTRL);
5092
5093 data &= ~(CGCG_EN | CGLS_EN);
5094 }
5095
5096 if (orig != data)
5097 WREG32(RLC_CGCG_CGLS_CTRL, data);
5098}
5099
5100static void si_enable_mgcg(struct radeon_device *rdev,
5101 bool enable)
5102{
5103 u32 data, orig, tmp = 0;
5104
Alex Deuchere16866e2013-08-08 19:34:07 -04005105 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_MGCG)) {
Alex Deucherf8f84ac2013-03-07 12:56:35 -05005106 orig = data = RREG32(CGTS_SM_CTRL_REG);
5107 data = 0x96940200;
5108 if (orig != data)
5109 WREG32(CGTS_SM_CTRL_REG, data);
5110
Alex Deuchere16866e2013-08-08 19:34:07 -04005111 if (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_CP_LS) {
5112 orig = data = RREG32(CP_MEM_SLP_CNTL);
5113 data |= CP_MEM_LS_EN;
5114 if (orig != data)
5115 WREG32(CP_MEM_SLP_CNTL, data);
5116 }
Alex Deucherf8f84ac2013-03-07 12:56:35 -05005117
5118 orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE);
5119 data &= 0xffffffc0;
5120 if (orig != data)
5121 WREG32(RLC_CGTT_MGCG_OVERRIDE, data);
5122
5123 tmp = si_halt_rlc(rdev);
5124
5125 WREG32(RLC_SERDES_WR_MASTER_MASK_0, 0xffffffff);
5126 WREG32(RLC_SERDES_WR_MASTER_MASK_1, 0xffffffff);
5127 WREG32(RLC_SERDES_WR_CTRL, 0x00d000ff);
5128
5129 si_update_rlc(rdev, tmp);
5130 } else {
5131 orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE);
5132 data |= 0x00000003;
5133 if (orig != data)
5134 WREG32(RLC_CGTT_MGCG_OVERRIDE, data);
5135
5136 data = RREG32(CP_MEM_SLP_CNTL);
5137 if (data & CP_MEM_LS_EN) {
5138 data &= ~CP_MEM_LS_EN;
5139 WREG32(CP_MEM_SLP_CNTL, data);
5140 }
5141 orig = data = RREG32(CGTS_SM_CTRL_REG);
5142 data |= LS_OVERRIDE | OVERRIDE;
5143 if (orig != data)
5144 WREG32(CGTS_SM_CTRL_REG, data);
5145
5146 tmp = si_halt_rlc(rdev);
5147
5148 WREG32(RLC_SERDES_WR_MASTER_MASK_0, 0xffffffff);
5149 WREG32(RLC_SERDES_WR_MASTER_MASK_1, 0xffffffff);
5150 WREG32(RLC_SERDES_WR_CTRL, 0x00e000ff);
5151
5152 si_update_rlc(rdev, tmp);
5153 }
5154}
5155
5156static void si_enable_uvd_mgcg(struct radeon_device *rdev,
5157 bool enable)
5158{
5159 u32 orig, data, tmp;
5160
Alex Deuchere16866e2013-08-08 19:34:07 -04005161 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_UVD_MGCG)) {
Alex Deucherf8f84ac2013-03-07 12:56:35 -05005162 tmp = RREG32_UVD_CTX(UVD_CGC_MEM_CTRL);
5163 tmp |= 0x3fff;
5164 WREG32_UVD_CTX(UVD_CGC_MEM_CTRL, tmp);
5165
5166 orig = data = RREG32(UVD_CGC_CTRL);
5167 data |= DCM;
5168 if (orig != data)
5169 WREG32(UVD_CGC_CTRL, data);
5170
5171 WREG32_SMC(SMC_CG_IND_START + CG_CGTT_LOCAL_0, 0);
5172 WREG32_SMC(SMC_CG_IND_START + CG_CGTT_LOCAL_1, 0);
5173 } else {
5174 tmp = RREG32_UVD_CTX(UVD_CGC_MEM_CTRL);
5175 tmp &= ~0x3fff;
5176 WREG32_UVD_CTX(UVD_CGC_MEM_CTRL, tmp);
5177
5178 orig = data = RREG32(UVD_CGC_CTRL);
5179 data &= ~DCM;
5180 if (orig != data)
5181 WREG32(UVD_CGC_CTRL, data);
5182
5183 WREG32_SMC(SMC_CG_IND_START + CG_CGTT_LOCAL_0, 0xffffffff);
5184 WREG32_SMC(SMC_CG_IND_START + CG_CGTT_LOCAL_1, 0xffffffff);
5185 }
5186}
5187
5188static const u32 mc_cg_registers[] =
5189{
5190 MC_HUB_MISC_HUB_CG,
5191 MC_HUB_MISC_SIP_CG,
5192 MC_HUB_MISC_VM_CG,
5193 MC_XPB_CLK_GAT,
5194 ATC_MISC_CG,
5195 MC_CITF_MISC_WR_CG,
5196 MC_CITF_MISC_RD_CG,
5197 MC_CITF_MISC_VM_CG,
5198 VM_L2_CG,
5199};
5200
5201static void si_enable_mc_ls(struct radeon_device *rdev,
5202 bool enable)
5203{
5204 int i;
5205 u32 orig, data;
5206
5207 for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
5208 orig = data = RREG32(mc_cg_registers[i]);
Alex Deuchere16866e2013-08-08 19:34:07 -04005209 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_MC_LS))
Alex Deucherf8f84ac2013-03-07 12:56:35 -05005210 data |= MC_LS_ENABLE;
5211 else
5212 data &= ~MC_LS_ENABLE;
5213 if (data != orig)
5214 WREG32(mc_cg_registers[i], data);
5215 }
5216}
5217
Alex Deuchere16866e2013-08-08 19:34:07 -04005218static void si_enable_mc_mgcg(struct radeon_device *rdev,
5219 bool enable)
5220{
5221 int i;
5222 u32 orig, data;
5223
5224 for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
5225 orig = data = RREG32(mc_cg_registers[i]);
5226 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_MC_MGCG))
5227 data |= MC_CG_ENABLE;
5228 else
5229 data &= ~MC_CG_ENABLE;
5230 if (data != orig)
5231 WREG32(mc_cg_registers[i], data);
5232 }
5233}
5234
5235static void si_enable_dma_mgcg(struct radeon_device *rdev,
5236 bool enable)
5237{
5238 u32 orig, data, offset;
5239 int i;
5240
5241 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_SDMA_MGCG)) {
5242 for (i = 0; i < 2; i++) {
5243 if (i == 0)
5244 offset = DMA0_REGISTER_OFFSET;
5245 else
5246 offset = DMA1_REGISTER_OFFSET;
5247 orig = data = RREG32(DMA_POWER_CNTL + offset);
5248 data &= ~MEM_POWER_OVERRIDE;
5249 if (data != orig)
5250 WREG32(DMA_POWER_CNTL + offset, data);
5251 WREG32(DMA_CLK_CTRL + offset, 0x00000100);
5252 }
5253 } else {
5254 for (i = 0; i < 2; i++) {
5255 if (i == 0)
5256 offset = DMA0_REGISTER_OFFSET;
5257 else
5258 offset = DMA1_REGISTER_OFFSET;
5259 orig = data = RREG32(DMA_POWER_CNTL + offset);
5260 data |= MEM_POWER_OVERRIDE;
5261 if (data != orig)
5262 WREG32(DMA_POWER_CNTL + offset, data);
5263
5264 orig = data = RREG32(DMA_CLK_CTRL + offset);
5265 data = 0xff000000;
5266 if (data != orig)
5267 WREG32(DMA_CLK_CTRL + offset, data);
5268 }
5269 }
5270}
5271
5272static void si_enable_bif_mgls(struct radeon_device *rdev,
5273 bool enable)
5274{
5275 u32 orig, data;
5276
5277 orig = data = RREG32_PCIE(PCIE_CNTL2);
5278
5279 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_BIF_LS))
5280 data |= SLV_MEM_LS_EN | MST_MEM_LS_EN |
5281 REPLAY_MEM_LS_EN | SLV_MEM_AGGRESSIVE_LS_EN;
5282 else
5283 data &= ~(SLV_MEM_LS_EN | MST_MEM_LS_EN |
5284 REPLAY_MEM_LS_EN | SLV_MEM_AGGRESSIVE_LS_EN);
5285
5286 if (orig != data)
5287 WREG32_PCIE(PCIE_CNTL2, data);
5288}
5289
5290static void si_enable_hdp_mgcg(struct radeon_device *rdev,
5291 bool enable)
5292{
5293 u32 orig, data;
5294
5295 orig = data = RREG32(HDP_HOST_PATH_CNTL);
5296
5297 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_HDP_MGCG))
5298 data &= ~CLOCK_GATING_DIS;
5299 else
5300 data |= CLOCK_GATING_DIS;
5301
5302 if (orig != data)
5303 WREG32(HDP_HOST_PATH_CNTL, data);
5304}
5305
5306static void si_enable_hdp_ls(struct radeon_device *rdev,
5307 bool enable)
5308{
5309 u32 orig, data;
5310
5311 orig = data = RREG32(HDP_MEM_POWER_LS);
5312
5313 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_HDP_LS))
5314 data |= HDP_LS_ENABLE;
5315 else
5316 data &= ~HDP_LS_ENABLE;
5317
5318 if (orig != data)
5319 WREG32(HDP_MEM_POWER_LS, data);
5320}
5321
Alex Deucher68e3a092013-12-18 14:11:40 -05005322static void si_update_cg(struct radeon_device *rdev,
5323 u32 block, bool enable)
Alex Deuchere16866e2013-08-08 19:34:07 -04005324{
5325 if (block & RADEON_CG_BLOCK_GFX) {
Alex Deucher811e4d52013-09-03 13:31:33 -04005326 si_enable_gui_idle_interrupt(rdev, false);
Alex Deuchere16866e2013-08-08 19:34:07 -04005327 /* order matters! */
5328 if (enable) {
5329 si_enable_mgcg(rdev, true);
5330 si_enable_cgcg(rdev, true);
5331 } else {
5332 si_enable_cgcg(rdev, false);
5333 si_enable_mgcg(rdev, false);
5334 }
Alex Deucher811e4d52013-09-03 13:31:33 -04005335 si_enable_gui_idle_interrupt(rdev, true);
Alex Deuchere16866e2013-08-08 19:34:07 -04005336 }
5337
5338 if (block & RADEON_CG_BLOCK_MC) {
5339 si_enable_mc_mgcg(rdev, enable);
5340 si_enable_mc_ls(rdev, enable);
5341 }
5342
5343 if (block & RADEON_CG_BLOCK_SDMA) {
5344 si_enable_dma_mgcg(rdev, enable);
5345 }
5346
5347 if (block & RADEON_CG_BLOCK_BIF) {
5348 si_enable_bif_mgls(rdev, enable);
5349 }
5350
5351 if (block & RADEON_CG_BLOCK_UVD) {
5352 if (rdev->has_uvd) {
5353 si_enable_uvd_mgcg(rdev, enable);
5354 }
5355 }
5356
5357 if (block & RADEON_CG_BLOCK_HDP) {
5358 si_enable_hdp_mgcg(rdev, enable);
5359 si_enable_hdp_ls(rdev, enable);
5360 }
5361}
Alex Deucherf8f84ac2013-03-07 12:56:35 -05005362
5363static void si_init_cg(struct radeon_device *rdev)
5364{
Alex Deuchere16866e2013-08-08 19:34:07 -04005365 si_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
5366 RADEON_CG_BLOCK_MC |
5367 RADEON_CG_BLOCK_SDMA |
5368 RADEON_CG_BLOCK_BIF |
5369 RADEON_CG_BLOCK_HDP), true);
Alex Deucherb2d70912013-07-27 17:53:25 -04005370 if (rdev->has_uvd) {
Alex Deuchere16866e2013-08-08 19:34:07 -04005371 si_update_cg(rdev, RADEON_CG_BLOCK_UVD, true);
Alex Deucherf8f84ac2013-03-07 12:56:35 -05005372 si_init_uvd_internal_cg(rdev);
5373 }
5374}
5375
5376static void si_fini_cg(struct radeon_device *rdev)
5377{
Alex Deucher0116e1e2013-08-08 18:00:10 -04005378 if (rdev->has_uvd) {
Alex Deuchere16866e2013-08-08 19:34:07 -04005379 si_update_cg(rdev, RADEON_CG_BLOCK_UVD, false);
Alex Deucher0116e1e2013-08-08 18:00:10 -04005380 }
Alex Deuchere16866e2013-08-08 19:34:07 -04005381 si_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
5382 RADEON_CG_BLOCK_MC |
5383 RADEON_CG_BLOCK_SDMA |
5384 RADEON_CG_BLOCK_BIF |
5385 RADEON_CG_BLOCK_HDP), false);
5386}
Alex Deucherf8f84ac2013-03-07 12:56:35 -05005387
Alex Deucher59a82d02013-08-13 12:48:06 -04005388u32 si_get_csb_size(struct radeon_device *rdev)
5389{
5390 u32 count = 0;
5391 const struct cs_section_def *sect = NULL;
5392 const struct cs_extent_def *ext = NULL;
5393
5394 if (rdev->rlc.cs_data == NULL)
5395 return 0;
5396
5397 /* begin clear state */
5398 count += 2;
5399 /* context control state */
5400 count += 3;
5401
5402 for (sect = rdev->rlc.cs_data; sect->section != NULL; ++sect) {
5403 for (ext = sect->section; ext->extent != NULL; ++ext) {
5404 if (sect->id == SECT_CONTEXT)
5405 count += 2 + ext->reg_count;
5406 else
5407 return 0;
5408 }
5409 }
5410 /* pa_sc_raster_config */
5411 count += 3;
5412 /* end clear state */
5413 count += 2;
5414 /* clear state */
5415 count += 2;
5416
5417 return count;
5418}
5419
5420void si_get_csb_buffer(struct radeon_device *rdev, volatile u32 *buffer)
5421{
5422 u32 count = 0, i;
5423 const struct cs_section_def *sect = NULL;
5424 const struct cs_extent_def *ext = NULL;
5425
5426 if (rdev->rlc.cs_data == NULL)
5427 return;
5428 if (buffer == NULL)
5429 return;
5430
Alex Deucher6ba81e52013-10-23 18:27:10 -04005431 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
5432 buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
Alex Deucher59a82d02013-08-13 12:48:06 -04005433
Alex Deucher6ba81e52013-10-23 18:27:10 -04005434 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
5435 buffer[count++] = cpu_to_le32(0x80000000);
5436 buffer[count++] = cpu_to_le32(0x80000000);
Alex Deucher59a82d02013-08-13 12:48:06 -04005437
5438 for (sect = rdev->rlc.cs_data; sect->section != NULL; ++sect) {
5439 for (ext = sect->section; ext->extent != NULL; ++ext) {
5440 if (sect->id == SECT_CONTEXT) {
Alex Deucher6ba81e52013-10-23 18:27:10 -04005441 buffer[count++] =
5442 cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
5443 buffer[count++] = cpu_to_le32(ext->reg_index - 0xa000);
Alex Deucher59a82d02013-08-13 12:48:06 -04005444 for (i = 0; i < ext->reg_count; i++)
Alex Deucher6ba81e52013-10-23 18:27:10 -04005445 buffer[count++] = cpu_to_le32(ext->extent[i]);
Alex Deucher59a82d02013-08-13 12:48:06 -04005446 } else {
5447 return;
5448 }
5449 }
5450 }
5451
Alex Deucher6ba81e52013-10-23 18:27:10 -04005452 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 1));
5453 buffer[count++] = cpu_to_le32(PA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
Alex Deucher59a82d02013-08-13 12:48:06 -04005454 switch (rdev->family) {
5455 case CHIP_TAHITI:
5456 case CHIP_PITCAIRN:
Alex Deucher6ba81e52013-10-23 18:27:10 -04005457 buffer[count++] = cpu_to_le32(0x2a00126a);
Alex Deucher59a82d02013-08-13 12:48:06 -04005458 break;
5459 case CHIP_VERDE:
Alex Deucher6ba81e52013-10-23 18:27:10 -04005460 buffer[count++] = cpu_to_le32(0x0000124a);
Alex Deucher59a82d02013-08-13 12:48:06 -04005461 break;
5462 case CHIP_OLAND:
Alex Deucher6ba81e52013-10-23 18:27:10 -04005463 buffer[count++] = cpu_to_le32(0x00000082);
Alex Deucher59a82d02013-08-13 12:48:06 -04005464 break;
5465 case CHIP_HAINAN:
Alex Deucher6ba81e52013-10-23 18:27:10 -04005466 buffer[count++] = cpu_to_le32(0x00000000);
Alex Deucher59a82d02013-08-13 12:48:06 -04005467 break;
5468 default:
Alex Deucher6ba81e52013-10-23 18:27:10 -04005469 buffer[count++] = cpu_to_le32(0x00000000);
Alex Deucher59a82d02013-08-13 12:48:06 -04005470 break;
5471 }
5472
Alex Deucher6ba81e52013-10-23 18:27:10 -04005473 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
5474 buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
Alex Deucher59a82d02013-08-13 12:48:06 -04005475
Alex Deucher6ba81e52013-10-23 18:27:10 -04005476 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
5477 buffer[count++] = cpu_to_le32(0);
Alex Deucherf8f84ac2013-03-07 12:56:35 -05005478}
5479
5480static void si_init_pg(struct radeon_device *rdev)
5481{
Alex Deucher0116e1e2013-08-08 18:00:10 -04005482 if (rdev->pg_flags) {
5483 if (rdev->pg_flags & RADEON_PG_SUPPORT_SDMA) {
5484 si_init_dma_pg(rdev);
Alex Deucher0116e1e2013-08-08 18:00:10 -04005485 }
Alex Deucherf8f84ac2013-03-07 12:56:35 -05005486 si_init_ao_cu_mask(rdev);
Alex Deucher2b19d172013-09-04 16:58:29 -04005487 if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG) {
Alex Deucher0116e1e2013-08-08 18:00:10 -04005488 si_init_gfx_cgpg(rdev);
Alex Deucheraa34dba2014-01-16 10:39:17 -05005489 } else {
5490 WREG32(RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8);
5491 WREG32(RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8);
Alex Deucher0116e1e2013-08-08 18:00:10 -04005492 }
Alex Deucherf8f84ac2013-03-07 12:56:35 -05005493 si_enable_dma_pg(rdev, true);
Alex Deucherf8f84ac2013-03-07 12:56:35 -05005494 si_enable_gfx_cgpg(rdev, true);
5495 } else {
5496 WREG32(RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8);
5497 WREG32(RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8);
5498 }
5499}
5500
5501static void si_fini_pg(struct radeon_device *rdev)
5502{
Alex Deucher0116e1e2013-08-08 18:00:10 -04005503 if (rdev->pg_flags) {
Alex Deucherf8f84ac2013-03-07 12:56:35 -05005504 si_enable_dma_pg(rdev, false);
5505 si_enable_gfx_cgpg(rdev, false);
5506 }
5507}
5508
5509/*
Alex Deucher347e7592012-03-20 17:18:21 -04005510 * RLC
5511 */
Alex Deucher866d83d2013-04-15 17:13:29 -04005512void si_rlc_reset(struct radeon_device *rdev)
Alex Deucherd719cef2013-02-15 16:49:59 -05005513{
Alex Deucherf8f84ac2013-03-07 12:56:35 -05005514 u32 tmp = RREG32(GRBM_SOFT_RESET);
Alex Deucherd719cef2013-02-15 16:49:59 -05005515
Alex Deucherf8f84ac2013-03-07 12:56:35 -05005516 tmp |= SOFT_RESET_RLC;
5517 WREG32(GRBM_SOFT_RESET, tmp);
5518 udelay(50);
5519 tmp &= ~SOFT_RESET_RLC;
5520 WREG32(GRBM_SOFT_RESET, tmp);
5521 udelay(50);
Alex Deucherd719cef2013-02-15 16:49:59 -05005522}
5523
Alex Deucher347e7592012-03-20 17:18:21 -04005524static void si_rlc_stop(struct radeon_device *rdev)
5525{
5526 WREG32(RLC_CNTL, 0);
Alex Deucherd719cef2013-02-15 16:49:59 -05005527
5528 si_enable_gui_idle_interrupt(rdev, false);
5529
5530 si_wait_for_rlc_serdes(rdev);
Alex Deucher347e7592012-03-20 17:18:21 -04005531}
5532
5533static void si_rlc_start(struct radeon_device *rdev)
5534{
5535 WREG32(RLC_CNTL, RLC_ENABLE);
Alex Deucherd719cef2013-02-15 16:49:59 -05005536
5537 si_enable_gui_idle_interrupt(rdev, true);
5538
5539 udelay(50);
5540}
5541
5542static bool si_lbpw_supported(struct radeon_device *rdev)
5543{
5544 u32 tmp;
5545
5546 /* Enable LBPW only for DDR3 */
5547 tmp = RREG32(MC_SEQ_MISC0);
5548 if ((tmp & 0xF0000000) == 0xB0000000)
5549 return true;
5550 return false;
5551}
5552
5553static void si_enable_lbpw(struct radeon_device *rdev, bool enable)
5554{
5555 u32 tmp;
5556
5557 tmp = RREG32(RLC_LB_CNTL);
5558 if (enable)
5559 tmp |= LOAD_BALANCE_ENABLE;
5560 else
5561 tmp &= ~LOAD_BALANCE_ENABLE;
5562 WREG32(RLC_LB_CNTL, tmp);
5563
5564 if (!enable) {
5565 si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
5566 WREG32(SPI_LB_CU_MASK, 0x00ff);
5567 }
Alex Deucher347e7592012-03-20 17:18:21 -04005568}
5569
5570static int si_rlc_resume(struct radeon_device *rdev)
5571{
5572 u32 i;
5573 const __be32 *fw_data;
5574
5575 if (!rdev->rlc_fw)
5576 return -EINVAL;
5577
5578 si_rlc_stop(rdev);
5579
Alex Deucherf8f84ac2013-03-07 12:56:35 -05005580 si_rlc_reset(rdev);
5581
5582 si_init_pg(rdev);
5583
5584 si_init_cg(rdev);
5585
Alex Deucher347e7592012-03-20 17:18:21 -04005586 WREG32(RLC_RL_BASE, 0);
5587 WREG32(RLC_RL_SIZE, 0);
5588 WREG32(RLC_LB_CNTL, 0);
5589 WREG32(RLC_LB_CNTR_MAX, 0xffffffff);
5590 WREG32(RLC_LB_CNTR_INIT, 0);
Alex Deucherd719cef2013-02-15 16:49:59 -05005591 WREG32(RLC_LB_INIT_CU_MASK, 0xffffffff);
Alex Deucher347e7592012-03-20 17:18:21 -04005592
Alex Deucher347e7592012-03-20 17:18:21 -04005593 WREG32(RLC_MC_CNTL, 0);
5594 WREG32(RLC_UCODE_CNTL, 0);
5595
5596 fw_data = (const __be32 *)rdev->rlc_fw->data;
5597 for (i = 0; i < SI_RLC_UCODE_SIZE; i++) {
5598 WREG32(RLC_UCODE_ADDR, i);
5599 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
5600 }
5601 WREG32(RLC_UCODE_ADDR, 0);
5602
Alex Deucherd719cef2013-02-15 16:49:59 -05005603 si_enable_lbpw(rdev, si_lbpw_supported(rdev));
5604
Alex Deucher347e7592012-03-20 17:18:21 -04005605 si_rlc_start(rdev);
5606
5607 return 0;
5608}
5609
Alex Deucher25a857f2012-03-20 17:18:22 -04005610static void si_enable_interrupts(struct radeon_device *rdev)
5611{
5612 u32 ih_cntl = RREG32(IH_CNTL);
5613 u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
5614
5615 ih_cntl |= ENABLE_INTR;
5616 ih_rb_cntl |= IH_RB_ENABLE;
5617 WREG32(IH_CNTL, ih_cntl);
5618 WREG32(IH_RB_CNTL, ih_rb_cntl);
5619 rdev->ih.enabled = true;
5620}
5621
5622static void si_disable_interrupts(struct radeon_device *rdev)
5623{
5624 u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
5625 u32 ih_cntl = RREG32(IH_CNTL);
5626
5627 ih_rb_cntl &= ~IH_RB_ENABLE;
5628 ih_cntl &= ~ENABLE_INTR;
5629 WREG32(IH_RB_CNTL, ih_rb_cntl);
5630 WREG32(IH_CNTL, ih_cntl);
5631 /* set rptr, wptr to 0 */
5632 WREG32(IH_RB_RPTR, 0);
5633 WREG32(IH_RB_WPTR, 0);
5634 rdev->ih.enabled = false;
Alex Deucher25a857f2012-03-20 17:18:22 -04005635 rdev->ih.rptr = 0;
5636}
5637
5638static void si_disable_interrupt_state(struct radeon_device *rdev)
5639{
5640 u32 tmp;
5641
Alex Deucher811e4d52013-09-03 13:31:33 -04005642 tmp = RREG32(CP_INT_CNTL_RING0) &
5643 (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
5644 WREG32(CP_INT_CNTL_RING0, tmp);
Alex Deucher25a857f2012-03-20 17:18:22 -04005645 WREG32(CP_INT_CNTL_RING1, 0);
5646 WREG32(CP_INT_CNTL_RING2, 0);
Alex Deucher8c5fd7e2012-12-04 15:28:18 -05005647 tmp = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET) & ~TRAP_ENABLE;
5648 WREG32(DMA_CNTL + DMA0_REGISTER_OFFSET, tmp);
5649 tmp = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET) & ~TRAP_ENABLE;
5650 WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, tmp);
Alex Deucher25a857f2012-03-20 17:18:22 -04005651 WREG32(GRBM_INT_CNTL, 0);
Alex Deucher51535502012-08-30 14:34:30 -04005652 if (rdev->num_crtc >= 2) {
5653 WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
5654 WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
5655 }
Alex Deucher25a857f2012-03-20 17:18:22 -04005656 if (rdev->num_crtc >= 4) {
5657 WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
5658 WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
5659 }
5660 if (rdev->num_crtc >= 6) {
5661 WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
5662 WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
5663 }
5664
Alex Deucher51535502012-08-30 14:34:30 -04005665 if (rdev->num_crtc >= 2) {
5666 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
5667 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
5668 }
Alex Deucher25a857f2012-03-20 17:18:22 -04005669 if (rdev->num_crtc >= 4) {
5670 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
5671 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
5672 }
5673 if (rdev->num_crtc >= 6) {
5674 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
5675 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
5676 }
5677
Alex Deucher51535502012-08-30 14:34:30 -04005678 if (!ASIC_IS_NODCE(rdev)) {
Alex Deuchere9a321c2014-01-27 11:54:44 -05005679 WREG32(DAC_AUTODETECT_INT_CONTROL, 0);
Alex Deucher25a857f2012-03-20 17:18:22 -04005680
Alex Deucher51535502012-08-30 14:34:30 -04005681 tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
5682 WREG32(DC_HPD1_INT_CONTROL, tmp);
5683 tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
5684 WREG32(DC_HPD2_INT_CONTROL, tmp);
5685 tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
5686 WREG32(DC_HPD3_INT_CONTROL, tmp);
5687 tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
5688 WREG32(DC_HPD4_INT_CONTROL, tmp);
5689 tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
5690 WREG32(DC_HPD5_INT_CONTROL, tmp);
5691 tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
5692 WREG32(DC_HPD6_INT_CONTROL, tmp);
5693 }
Alex Deucher25a857f2012-03-20 17:18:22 -04005694}
5695
5696static int si_irq_init(struct radeon_device *rdev)
5697{
5698 int ret = 0;
5699 int rb_bufsz;
5700 u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
5701
5702 /* allocate ring */
5703 ret = r600_ih_ring_alloc(rdev);
5704 if (ret)
5705 return ret;
5706
5707 /* disable irqs */
5708 si_disable_interrupts(rdev);
5709
5710 /* init rlc */
5711 ret = si_rlc_resume(rdev);
5712 if (ret) {
5713 r600_ih_ring_fini(rdev);
5714 return ret;
5715 }
5716
5717 /* setup interrupt control */
5718 /* set dummy read address to ring address */
5719 WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8);
5720 interrupt_cntl = RREG32(INTERRUPT_CNTL);
5721 /* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
5722 * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
5723 */
5724 interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE;
5725 /* IH_REQ_NONSNOOP_EN=1 if ring is in non-cacheable memory, e.g., vram */
5726 interrupt_cntl &= ~IH_REQ_NONSNOOP_EN;
5727 WREG32(INTERRUPT_CNTL, interrupt_cntl);
5728
5729 WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
Daniel Vetterb72a8922013-07-10 14:11:59 +02005730 rb_bufsz = order_base_2(rdev->ih.ring_size / 4);
Alex Deucher25a857f2012-03-20 17:18:22 -04005731
5732 ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
5733 IH_WPTR_OVERFLOW_CLEAR |
5734 (rb_bufsz << 1));
5735
5736 if (rdev->wb.enabled)
5737 ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE;
5738
5739 /* set the writeback address whether it's enabled or not */
5740 WREG32(IH_RB_WPTR_ADDR_LO, (rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFFFFFFFC);
5741 WREG32(IH_RB_WPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFF);
5742
5743 WREG32(IH_RB_CNTL, ih_rb_cntl);
5744
5745 /* set rptr, wptr to 0 */
5746 WREG32(IH_RB_RPTR, 0);
5747 WREG32(IH_RB_WPTR, 0);
5748
5749 /* Default settings for IH_CNTL (disabled at first) */
5750 ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10) | MC_VMID(0);
5751 /* RPTR_REARM only works if msi's are enabled */
5752 if (rdev->msi_enabled)
5753 ih_cntl |= RPTR_REARM;
5754 WREG32(IH_CNTL, ih_cntl);
5755
5756 /* force the active interrupt state to all disabled */
5757 si_disable_interrupt_state(rdev);
5758
Dave Airlie20998102012-04-03 11:53:05 +01005759 pci_set_master(rdev->pdev);
5760
Alex Deucher25a857f2012-03-20 17:18:22 -04005761 /* enable irqs */
5762 si_enable_interrupts(rdev);
5763
5764 return ret;
5765}
5766
5767int si_irq_set(struct radeon_device *rdev)
5768{
Alex Deucher811e4d52013-09-03 13:31:33 -04005769 u32 cp_int_cntl;
Alex Deucher25a857f2012-03-20 17:18:22 -04005770 u32 cp_int_cntl1 = 0, cp_int_cntl2 = 0;
5771 u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
Alex Deucher51535502012-08-30 14:34:30 -04005772 u32 hpd1 = 0, hpd2 = 0, hpd3 = 0, hpd4 = 0, hpd5 = 0, hpd6 = 0;
Alex Deucher25a857f2012-03-20 17:18:22 -04005773 u32 grbm_int_cntl = 0;
5774 u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0;
Alex Deucher8c5fd7e2012-12-04 15:28:18 -05005775 u32 dma_cntl, dma_cntl1;
Alex Deuchera9e61412013-06-25 17:56:16 -04005776 u32 thermal_int = 0;
Alex Deucher25a857f2012-03-20 17:18:22 -04005777
5778 if (!rdev->irq.installed) {
5779 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
5780 return -EINVAL;
5781 }
5782 /* don't enable anything if the ih is disabled */
5783 if (!rdev->ih.enabled) {
5784 si_disable_interrupts(rdev);
5785 /* force the active interrupt state to all disabled */
5786 si_disable_interrupt_state(rdev);
5787 return 0;
5788 }
5789
Alex Deucher811e4d52013-09-03 13:31:33 -04005790 cp_int_cntl = RREG32(CP_INT_CNTL_RING0) &
5791 (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
5792
Alex Deucher51535502012-08-30 14:34:30 -04005793 if (!ASIC_IS_NODCE(rdev)) {
5794 hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
5795 hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
5796 hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
5797 hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
5798 hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
5799 hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
5800 }
Alex Deucher25a857f2012-03-20 17:18:22 -04005801
Alex Deucher8c5fd7e2012-12-04 15:28:18 -05005802 dma_cntl = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET) & ~TRAP_ENABLE;
5803 dma_cntl1 = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET) & ~TRAP_ENABLE;
5804
Alex Deuchera9e61412013-06-25 17:56:16 -04005805 thermal_int = RREG32(CG_THERMAL_INT) &
5806 ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
5807
Alex Deucher25a857f2012-03-20 17:18:22 -04005808 /* enable CP interrupts on all rings */
Christian Koenig736fc372012-05-17 19:52:00 +02005809 if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
Alex Deucher25a857f2012-03-20 17:18:22 -04005810 DRM_DEBUG("si_irq_set: sw int gfx\n");
5811 cp_int_cntl |= TIME_STAMP_INT_ENABLE;
5812 }
Christian Koenig736fc372012-05-17 19:52:00 +02005813 if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP1_INDEX])) {
Alex Deucher25a857f2012-03-20 17:18:22 -04005814 DRM_DEBUG("si_irq_set: sw int cp1\n");
5815 cp_int_cntl1 |= TIME_STAMP_INT_ENABLE;
5816 }
Christian Koenig736fc372012-05-17 19:52:00 +02005817 if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP2_INDEX])) {
Alex Deucher25a857f2012-03-20 17:18:22 -04005818 DRM_DEBUG("si_irq_set: sw int cp2\n");
5819 cp_int_cntl2 |= TIME_STAMP_INT_ENABLE;
5820 }
Alex Deucher8c5fd7e2012-12-04 15:28:18 -05005821 if (atomic_read(&rdev->irq.ring_int[R600_RING_TYPE_DMA_INDEX])) {
5822 DRM_DEBUG("si_irq_set: sw int dma\n");
5823 dma_cntl |= TRAP_ENABLE;
5824 }
5825
5826 if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_DMA1_INDEX])) {
5827 DRM_DEBUG("si_irq_set: sw int dma1\n");
5828 dma_cntl1 |= TRAP_ENABLE;
5829 }
Alex Deucher25a857f2012-03-20 17:18:22 -04005830 if (rdev->irq.crtc_vblank_int[0] ||
Christian Koenig736fc372012-05-17 19:52:00 +02005831 atomic_read(&rdev->irq.pflip[0])) {
Alex Deucher25a857f2012-03-20 17:18:22 -04005832 DRM_DEBUG("si_irq_set: vblank 0\n");
5833 crtc1 |= VBLANK_INT_MASK;
5834 }
5835 if (rdev->irq.crtc_vblank_int[1] ||
Christian Koenig736fc372012-05-17 19:52:00 +02005836 atomic_read(&rdev->irq.pflip[1])) {
Alex Deucher25a857f2012-03-20 17:18:22 -04005837 DRM_DEBUG("si_irq_set: vblank 1\n");
5838 crtc2 |= VBLANK_INT_MASK;
5839 }
5840 if (rdev->irq.crtc_vblank_int[2] ||
Christian Koenig736fc372012-05-17 19:52:00 +02005841 atomic_read(&rdev->irq.pflip[2])) {
Alex Deucher25a857f2012-03-20 17:18:22 -04005842 DRM_DEBUG("si_irq_set: vblank 2\n");
5843 crtc3 |= VBLANK_INT_MASK;
5844 }
5845 if (rdev->irq.crtc_vblank_int[3] ||
Christian Koenig736fc372012-05-17 19:52:00 +02005846 atomic_read(&rdev->irq.pflip[3])) {
Alex Deucher25a857f2012-03-20 17:18:22 -04005847 DRM_DEBUG("si_irq_set: vblank 3\n");
5848 crtc4 |= VBLANK_INT_MASK;
5849 }
5850 if (rdev->irq.crtc_vblank_int[4] ||
Christian Koenig736fc372012-05-17 19:52:00 +02005851 atomic_read(&rdev->irq.pflip[4])) {
Alex Deucher25a857f2012-03-20 17:18:22 -04005852 DRM_DEBUG("si_irq_set: vblank 4\n");
5853 crtc5 |= VBLANK_INT_MASK;
5854 }
5855 if (rdev->irq.crtc_vblank_int[5] ||
Christian Koenig736fc372012-05-17 19:52:00 +02005856 atomic_read(&rdev->irq.pflip[5])) {
Alex Deucher25a857f2012-03-20 17:18:22 -04005857 DRM_DEBUG("si_irq_set: vblank 5\n");
5858 crtc6 |= VBLANK_INT_MASK;
5859 }
5860 if (rdev->irq.hpd[0]) {
5861 DRM_DEBUG("si_irq_set: hpd 1\n");
5862 hpd1 |= DC_HPDx_INT_EN;
5863 }
5864 if (rdev->irq.hpd[1]) {
5865 DRM_DEBUG("si_irq_set: hpd 2\n");
5866 hpd2 |= DC_HPDx_INT_EN;
5867 }
5868 if (rdev->irq.hpd[2]) {
5869 DRM_DEBUG("si_irq_set: hpd 3\n");
5870 hpd3 |= DC_HPDx_INT_EN;
5871 }
5872 if (rdev->irq.hpd[3]) {
5873 DRM_DEBUG("si_irq_set: hpd 4\n");
5874 hpd4 |= DC_HPDx_INT_EN;
5875 }
5876 if (rdev->irq.hpd[4]) {
5877 DRM_DEBUG("si_irq_set: hpd 5\n");
5878 hpd5 |= DC_HPDx_INT_EN;
5879 }
5880 if (rdev->irq.hpd[5]) {
5881 DRM_DEBUG("si_irq_set: hpd 6\n");
5882 hpd6 |= DC_HPDx_INT_EN;
5883 }
Alex Deucher25a857f2012-03-20 17:18:22 -04005884
5885 WREG32(CP_INT_CNTL_RING0, cp_int_cntl);
5886 WREG32(CP_INT_CNTL_RING1, cp_int_cntl1);
5887 WREG32(CP_INT_CNTL_RING2, cp_int_cntl2);
5888
Alex Deucher8c5fd7e2012-12-04 15:28:18 -05005889 WREG32(DMA_CNTL + DMA0_REGISTER_OFFSET, dma_cntl);
5890 WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, dma_cntl1);
5891
Alex Deucher25a857f2012-03-20 17:18:22 -04005892 WREG32(GRBM_INT_CNTL, grbm_int_cntl);
5893
Alex Deuchera9e61412013-06-25 17:56:16 -04005894 if (rdev->irq.dpm_thermal) {
5895 DRM_DEBUG("dpm thermal\n");
5896 thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW;
5897 }
5898
Alex Deucher51535502012-08-30 14:34:30 -04005899 if (rdev->num_crtc >= 2) {
5900 WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1);
5901 WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, crtc2);
5902 }
Alex Deucher25a857f2012-03-20 17:18:22 -04005903 if (rdev->num_crtc >= 4) {
5904 WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, crtc3);
5905 WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, crtc4);
5906 }
5907 if (rdev->num_crtc >= 6) {
5908 WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, crtc5);
5909 WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6);
5910 }
5911
Alex Deucher51535502012-08-30 14:34:30 -04005912 if (rdev->num_crtc >= 2) {
5913 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, grph1);
5914 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, grph2);
5915 }
Alex Deucher25a857f2012-03-20 17:18:22 -04005916 if (rdev->num_crtc >= 4) {
5917 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, grph3);
5918 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, grph4);
5919 }
5920 if (rdev->num_crtc >= 6) {
5921 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, grph5);
5922 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, grph6);
5923 }
5924
Alex Deucher51535502012-08-30 14:34:30 -04005925 if (!ASIC_IS_NODCE(rdev)) {
5926 WREG32(DC_HPD1_INT_CONTROL, hpd1);
5927 WREG32(DC_HPD2_INT_CONTROL, hpd2);
5928 WREG32(DC_HPD3_INT_CONTROL, hpd3);
5929 WREG32(DC_HPD4_INT_CONTROL, hpd4);
5930 WREG32(DC_HPD5_INT_CONTROL, hpd5);
5931 WREG32(DC_HPD6_INT_CONTROL, hpd6);
5932 }
Alex Deucher25a857f2012-03-20 17:18:22 -04005933
Alex Deuchera9e61412013-06-25 17:56:16 -04005934 WREG32(CG_THERMAL_INT, thermal_int);
5935
Alex Deucher25a857f2012-03-20 17:18:22 -04005936 return 0;
5937}
5938
5939static inline void si_irq_ack(struct radeon_device *rdev)
5940{
5941 u32 tmp;
5942
Alex Deucher51535502012-08-30 14:34:30 -04005943 if (ASIC_IS_NODCE(rdev))
5944 return;
5945
Alex Deucher25a857f2012-03-20 17:18:22 -04005946 rdev->irq.stat_regs.evergreen.disp_int = RREG32(DISP_INTERRUPT_STATUS);
5947 rdev->irq.stat_regs.evergreen.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
5948 rdev->irq.stat_regs.evergreen.disp_int_cont2 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE2);
5949 rdev->irq.stat_regs.evergreen.disp_int_cont3 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE3);
5950 rdev->irq.stat_regs.evergreen.disp_int_cont4 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE4);
5951 rdev->irq.stat_regs.evergreen.disp_int_cont5 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE5);
5952 rdev->irq.stat_regs.evergreen.d1grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET);
5953 rdev->irq.stat_regs.evergreen.d2grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET);
5954 if (rdev->num_crtc >= 4) {
5955 rdev->irq.stat_regs.evergreen.d3grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET);
5956 rdev->irq.stat_regs.evergreen.d4grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET);
5957 }
5958 if (rdev->num_crtc >= 6) {
5959 rdev->irq.stat_regs.evergreen.d5grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET);
5960 rdev->irq.stat_regs.evergreen.d6grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET);
5961 }
5962
5963 if (rdev->irq.stat_regs.evergreen.d1grph_int & GRPH_PFLIP_INT_OCCURRED)
5964 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
5965 if (rdev->irq.stat_regs.evergreen.d2grph_int & GRPH_PFLIP_INT_OCCURRED)
5966 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
5967 if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT)
5968 WREG32(VBLANK_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VBLANK_ACK);
5969 if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT)
5970 WREG32(VLINE_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VLINE_ACK);
5971 if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT)
5972 WREG32(VBLANK_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VBLANK_ACK);
5973 if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT)
5974 WREG32(VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VLINE_ACK);
5975
5976 if (rdev->num_crtc >= 4) {
5977 if (rdev->irq.stat_regs.evergreen.d3grph_int & GRPH_PFLIP_INT_OCCURRED)
5978 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
5979 if (rdev->irq.stat_regs.evergreen.d4grph_int & GRPH_PFLIP_INT_OCCURRED)
5980 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
5981 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT)
5982 WREG32(VBLANK_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VBLANK_ACK);
5983 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT)
5984 WREG32(VLINE_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VLINE_ACK);
5985 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT)
5986 WREG32(VBLANK_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VBLANK_ACK);
5987 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT)
5988 WREG32(VLINE_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VLINE_ACK);
5989 }
5990
5991 if (rdev->num_crtc >= 6) {
5992 if (rdev->irq.stat_regs.evergreen.d5grph_int & GRPH_PFLIP_INT_OCCURRED)
5993 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
5994 if (rdev->irq.stat_regs.evergreen.d6grph_int & GRPH_PFLIP_INT_OCCURRED)
5995 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
5996 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT)
5997 WREG32(VBLANK_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VBLANK_ACK);
5998 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT)
5999 WREG32(VLINE_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VLINE_ACK);
6000 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT)
6001 WREG32(VBLANK_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VBLANK_ACK);
6002 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT)
6003 WREG32(VLINE_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VLINE_ACK);
6004 }
6005
6006 if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) {
6007 tmp = RREG32(DC_HPD1_INT_CONTROL);
6008 tmp |= DC_HPDx_INT_ACK;
6009 WREG32(DC_HPD1_INT_CONTROL, tmp);
6010 }
6011 if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) {
6012 tmp = RREG32(DC_HPD2_INT_CONTROL);
6013 tmp |= DC_HPDx_INT_ACK;
6014 WREG32(DC_HPD2_INT_CONTROL, tmp);
6015 }
6016 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) {
6017 tmp = RREG32(DC_HPD3_INT_CONTROL);
6018 tmp |= DC_HPDx_INT_ACK;
6019 WREG32(DC_HPD3_INT_CONTROL, tmp);
6020 }
6021 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) {
6022 tmp = RREG32(DC_HPD4_INT_CONTROL);
6023 tmp |= DC_HPDx_INT_ACK;
6024 WREG32(DC_HPD4_INT_CONTROL, tmp);
6025 }
6026 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) {
6027 tmp = RREG32(DC_HPD5_INT_CONTROL);
6028 tmp |= DC_HPDx_INT_ACK;
6029 WREG32(DC_HPD5_INT_CONTROL, tmp);
6030 }
6031 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
6032 tmp = RREG32(DC_HPD5_INT_CONTROL);
6033 tmp |= DC_HPDx_INT_ACK;
6034 WREG32(DC_HPD6_INT_CONTROL, tmp);
6035 }
6036}
6037
6038static void si_irq_disable(struct radeon_device *rdev)
6039{
6040 si_disable_interrupts(rdev);
6041 /* Wait and acknowledge irq */
6042 mdelay(1);
6043 si_irq_ack(rdev);
6044 si_disable_interrupt_state(rdev);
6045}
6046
6047static void si_irq_suspend(struct radeon_device *rdev)
6048{
6049 si_irq_disable(rdev);
6050 si_rlc_stop(rdev);
6051}
6052
Alex Deucher9b136d52012-03-20 17:18:23 -04006053static void si_irq_fini(struct radeon_device *rdev)
6054{
6055 si_irq_suspend(rdev);
6056 r600_ih_ring_fini(rdev);
6057}
6058
Alex Deucher25a857f2012-03-20 17:18:22 -04006059static inline u32 si_get_ih_wptr(struct radeon_device *rdev)
6060{
6061 u32 wptr, tmp;
6062
6063 if (rdev->wb.enabled)
6064 wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]);
6065 else
6066 wptr = RREG32(IH_RB_WPTR);
6067
6068 if (wptr & RB_OVERFLOW) {
6069 /* When a ring buffer overflow happen start parsing interrupt
6070 * from the last not overwritten vector (wptr + 16). Hopefully
6071 * this should allow us to catchup.
6072 */
6073 dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
6074 wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
6075 rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
6076 tmp = RREG32(IH_RB_CNTL);
6077 tmp |= IH_WPTR_OVERFLOW_CLEAR;
6078 WREG32(IH_RB_CNTL, tmp);
6079 }
6080 return (wptr & rdev->ih.ptr_mask);
6081}
6082
6083/* SI IV Ring
6084 * Each IV ring entry is 128 bits:
6085 * [7:0] - interrupt source id
6086 * [31:8] - reserved
6087 * [59:32] - interrupt source data
6088 * [63:60] - reserved
6089 * [71:64] - RINGID
6090 * [79:72] - VMID
6091 * [127:80] - reserved
6092 */
6093int si_irq_process(struct radeon_device *rdev)
6094{
6095 u32 wptr;
6096 u32 rptr;
6097 u32 src_id, src_data, ring_id;
6098 u32 ring_index;
Alex Deucher25a857f2012-03-20 17:18:22 -04006099 bool queue_hotplug = false;
Alex Deuchera9e61412013-06-25 17:56:16 -04006100 bool queue_thermal = false;
Alex Deucherfbf6dc72013-06-13 18:47:58 -04006101 u32 status, addr;
Alex Deucher25a857f2012-03-20 17:18:22 -04006102
6103 if (!rdev->ih.enabled || rdev->shutdown)
6104 return IRQ_NONE;
6105
6106 wptr = si_get_ih_wptr(rdev);
Christian Koenigc20dc362012-05-16 21:45:24 +02006107
6108restart_ih:
6109 /* is somebody else already processing irqs? */
6110 if (atomic_xchg(&rdev->ih.lock, 1))
6111 return IRQ_NONE;
6112
Alex Deucher25a857f2012-03-20 17:18:22 -04006113 rptr = rdev->ih.rptr;
6114 DRM_DEBUG("si_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
6115
Alex Deucher25a857f2012-03-20 17:18:22 -04006116 /* Order reading of wptr vs. reading of IH ring data */
6117 rmb();
6118
6119 /* display interrupts */
6120 si_irq_ack(rdev);
6121
Alex Deucher25a857f2012-03-20 17:18:22 -04006122 while (rptr != wptr) {
6123 /* wptr/rptr are in bytes! */
6124 ring_index = rptr / 4;
6125 src_id = le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff;
6126 src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff;
6127 ring_id = le32_to_cpu(rdev->ih.ring[ring_index + 2]) & 0xff;
6128
6129 switch (src_id) {
6130 case 1: /* D1 vblank/vline */
6131 switch (src_data) {
6132 case 0: /* D1 vblank */
6133 if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT) {
6134 if (rdev->irq.crtc_vblank_int[0]) {
6135 drm_handle_vblank(rdev->ddev, 0);
6136 rdev->pm.vblank_sync = true;
6137 wake_up(&rdev->irq.vblank_queue);
6138 }
Christian Koenig736fc372012-05-17 19:52:00 +02006139 if (atomic_read(&rdev->irq.pflip[0]))
Alex Deucher25a857f2012-03-20 17:18:22 -04006140 radeon_crtc_handle_flip(rdev, 0);
6141 rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
6142 DRM_DEBUG("IH: D1 vblank\n");
6143 }
6144 break;
6145 case 1: /* D1 vline */
6146 if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT) {
6147 rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT;
6148 DRM_DEBUG("IH: D1 vline\n");
6149 }
6150 break;
6151 default:
6152 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
6153 break;
6154 }
6155 break;
6156 case 2: /* D2 vblank/vline */
6157 switch (src_data) {
6158 case 0: /* D2 vblank */
6159 if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT) {
6160 if (rdev->irq.crtc_vblank_int[1]) {
6161 drm_handle_vblank(rdev->ddev, 1);
6162 rdev->pm.vblank_sync = true;
6163 wake_up(&rdev->irq.vblank_queue);
6164 }
Christian Koenig736fc372012-05-17 19:52:00 +02006165 if (atomic_read(&rdev->irq.pflip[1]))
Alex Deucher25a857f2012-03-20 17:18:22 -04006166 radeon_crtc_handle_flip(rdev, 1);
6167 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
6168 DRM_DEBUG("IH: D2 vblank\n");
6169 }
6170 break;
6171 case 1: /* D2 vline */
6172 if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT) {
6173 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
6174 DRM_DEBUG("IH: D2 vline\n");
6175 }
6176 break;
6177 default:
6178 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
6179 break;
6180 }
6181 break;
6182 case 3: /* D3 vblank/vline */
6183 switch (src_data) {
6184 case 0: /* D3 vblank */
6185 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) {
6186 if (rdev->irq.crtc_vblank_int[2]) {
6187 drm_handle_vblank(rdev->ddev, 2);
6188 rdev->pm.vblank_sync = true;
6189 wake_up(&rdev->irq.vblank_queue);
6190 }
Christian Koenig736fc372012-05-17 19:52:00 +02006191 if (atomic_read(&rdev->irq.pflip[2]))
Alex Deucher25a857f2012-03-20 17:18:22 -04006192 radeon_crtc_handle_flip(rdev, 2);
6193 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
6194 DRM_DEBUG("IH: D3 vblank\n");
6195 }
6196 break;
6197 case 1: /* D3 vline */
6198 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) {
6199 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
6200 DRM_DEBUG("IH: D3 vline\n");
6201 }
6202 break;
6203 default:
6204 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
6205 break;
6206 }
6207 break;
6208 case 4: /* D4 vblank/vline */
6209 switch (src_data) {
6210 case 0: /* D4 vblank */
6211 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) {
6212 if (rdev->irq.crtc_vblank_int[3]) {
6213 drm_handle_vblank(rdev->ddev, 3);
6214 rdev->pm.vblank_sync = true;
6215 wake_up(&rdev->irq.vblank_queue);
6216 }
Christian Koenig736fc372012-05-17 19:52:00 +02006217 if (atomic_read(&rdev->irq.pflip[3]))
Alex Deucher25a857f2012-03-20 17:18:22 -04006218 radeon_crtc_handle_flip(rdev, 3);
6219 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
6220 DRM_DEBUG("IH: D4 vblank\n");
6221 }
6222 break;
6223 case 1: /* D4 vline */
6224 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) {
6225 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
6226 DRM_DEBUG("IH: D4 vline\n");
6227 }
6228 break;
6229 default:
6230 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
6231 break;
6232 }
6233 break;
6234 case 5: /* D5 vblank/vline */
6235 switch (src_data) {
6236 case 0: /* D5 vblank */
6237 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) {
6238 if (rdev->irq.crtc_vblank_int[4]) {
6239 drm_handle_vblank(rdev->ddev, 4);
6240 rdev->pm.vblank_sync = true;
6241 wake_up(&rdev->irq.vblank_queue);
6242 }
Christian Koenig736fc372012-05-17 19:52:00 +02006243 if (atomic_read(&rdev->irq.pflip[4]))
Alex Deucher25a857f2012-03-20 17:18:22 -04006244 radeon_crtc_handle_flip(rdev, 4);
6245 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
6246 DRM_DEBUG("IH: D5 vblank\n");
6247 }
6248 break;
6249 case 1: /* D5 vline */
6250 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) {
6251 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
6252 DRM_DEBUG("IH: D5 vline\n");
6253 }
6254 break;
6255 default:
6256 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
6257 break;
6258 }
6259 break;
6260 case 6: /* D6 vblank/vline */
6261 switch (src_data) {
6262 case 0: /* D6 vblank */
6263 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) {
6264 if (rdev->irq.crtc_vblank_int[5]) {
6265 drm_handle_vblank(rdev->ddev, 5);
6266 rdev->pm.vblank_sync = true;
6267 wake_up(&rdev->irq.vblank_queue);
6268 }
Christian Koenig736fc372012-05-17 19:52:00 +02006269 if (atomic_read(&rdev->irq.pflip[5]))
Alex Deucher25a857f2012-03-20 17:18:22 -04006270 radeon_crtc_handle_flip(rdev, 5);
6271 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
6272 DRM_DEBUG("IH: D6 vblank\n");
6273 }
6274 break;
6275 case 1: /* D6 vline */
6276 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) {
6277 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
6278 DRM_DEBUG("IH: D6 vline\n");
6279 }
6280 break;
6281 default:
6282 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
6283 break;
6284 }
6285 break;
6286 case 42: /* HPD hotplug */
6287 switch (src_data) {
6288 case 0:
6289 if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) {
6290 rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT;
6291 queue_hotplug = true;
6292 DRM_DEBUG("IH: HPD1\n");
6293 }
6294 break;
6295 case 1:
6296 if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) {
6297 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT;
6298 queue_hotplug = true;
6299 DRM_DEBUG("IH: HPD2\n");
6300 }
6301 break;
6302 case 2:
6303 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) {
6304 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
6305 queue_hotplug = true;
6306 DRM_DEBUG("IH: HPD3\n");
6307 }
6308 break;
6309 case 3:
6310 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) {
6311 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
6312 queue_hotplug = true;
6313 DRM_DEBUG("IH: HPD4\n");
6314 }
6315 break;
6316 case 4:
6317 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) {
6318 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
6319 queue_hotplug = true;
6320 DRM_DEBUG("IH: HPD5\n");
6321 }
6322 break;
6323 case 5:
6324 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
6325 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
6326 queue_hotplug = true;
6327 DRM_DEBUG("IH: HPD6\n");
6328 }
6329 break;
6330 default:
6331 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
6332 break;
6333 }
6334 break;
Christian Königb927e1c2014-01-30 19:01:16 +01006335 case 124: /* UVD */
6336 DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data);
6337 radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX);
6338 break;
Christian Königae133a12012-09-18 15:30:44 -04006339 case 146:
6340 case 147:
Alex Deucherfbf6dc72013-06-13 18:47:58 -04006341 addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR);
6342 status = RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS);
Christian Königae133a12012-09-18 15:30:44 -04006343 dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data);
6344 dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
Alex Deucherfbf6dc72013-06-13 18:47:58 -04006345 addr);
Christian Königae133a12012-09-18 15:30:44 -04006346 dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
Alex Deucherfbf6dc72013-06-13 18:47:58 -04006347 status);
6348 si_vm_decode_fault(rdev, status, addr);
Christian Königae133a12012-09-18 15:30:44 -04006349 /* reset addr and status */
6350 WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
6351 break;
Alex Deucher25a857f2012-03-20 17:18:22 -04006352 case 176: /* RINGID0 CP_INT */
6353 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
6354 break;
6355 case 177: /* RINGID1 CP_INT */
6356 radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
6357 break;
6358 case 178: /* RINGID2 CP_INT */
6359 radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
6360 break;
6361 case 181: /* CP EOP event */
6362 DRM_DEBUG("IH: CP EOP\n");
6363 switch (ring_id) {
6364 case 0:
6365 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
6366 break;
6367 case 1:
6368 radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
6369 break;
6370 case 2:
6371 radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
6372 break;
6373 }
6374 break;
Alex Deucher8c5fd7e2012-12-04 15:28:18 -05006375 case 224: /* DMA trap event */
6376 DRM_DEBUG("IH: DMA trap\n");
6377 radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX);
6378 break;
Alex Deuchera9e61412013-06-25 17:56:16 -04006379 case 230: /* thermal low to high */
6380 DRM_DEBUG("IH: thermal low to high\n");
6381 rdev->pm.dpm.thermal.high_to_low = false;
6382 queue_thermal = true;
6383 break;
6384 case 231: /* thermal high to low */
6385 DRM_DEBUG("IH: thermal high to low\n");
6386 rdev->pm.dpm.thermal.high_to_low = true;
6387 queue_thermal = true;
6388 break;
Alex Deucher25a857f2012-03-20 17:18:22 -04006389 case 233: /* GUI IDLE */
6390 DRM_DEBUG("IH: GUI idle\n");
Alex Deucher25a857f2012-03-20 17:18:22 -04006391 break;
Alex Deucher8c5fd7e2012-12-04 15:28:18 -05006392 case 244: /* DMA trap event */
6393 DRM_DEBUG("IH: DMA1 trap\n");
6394 radeon_fence_process(rdev, CAYMAN_RING_TYPE_DMA1_INDEX);
6395 break;
Alex Deucher25a857f2012-03-20 17:18:22 -04006396 default:
6397 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
6398 break;
6399 }
6400
6401 /* wptr/rptr are in bytes! */
6402 rptr += 16;
6403 rptr &= rdev->ih.ptr_mask;
6404 }
Alex Deucher25a857f2012-03-20 17:18:22 -04006405 if (queue_hotplug)
6406 schedule_work(&rdev->hotplug_work);
Alex Deuchera9e61412013-06-25 17:56:16 -04006407 if (queue_thermal && rdev->pm.dpm_enabled)
6408 schedule_work(&rdev->pm.dpm.thermal.work);
Alex Deucher25a857f2012-03-20 17:18:22 -04006409 rdev->ih.rptr = rptr;
6410 WREG32(IH_RB_RPTR, rdev->ih.rptr);
Christian Koenigc20dc362012-05-16 21:45:24 +02006411 atomic_set(&rdev->ih.lock, 0);
6412
6413 /* make sure wptr hasn't changed while processing */
6414 wptr = si_get_ih_wptr(rdev);
6415 if (wptr != rptr)
6416 goto restart_ih;
6417
Alex Deucher25a857f2012-03-20 17:18:22 -04006418 return IRQ_HANDLED;
6419}
6420
Alex Deucher9b136d52012-03-20 17:18:23 -04006421/*
6422 * startup/shutdown callbacks
6423 */
6424static int si_startup(struct radeon_device *rdev)
6425{
6426 struct radeon_ring *ring;
6427 int r;
6428
Alex Deucherb9d305d2013-02-14 17:16:51 -05006429 /* enable pcie gen2/3 link */
6430 si_pcie_gen3_enable(rdev);
Alex Deuchere0bcf1652013-02-15 11:56:59 -05006431 /* enable aspm */
6432 si_program_aspm(rdev);
Alex Deucherb9d305d2013-02-14 17:16:51 -05006433
Alex Deuchere5903d32013-08-30 08:58:20 -04006434 /* scratch needs to be initialized before MC */
6435 r = r600_vram_scratch_init(rdev);
6436 if (r)
6437 return r;
6438
Alex Deucher6fab3feb2013-08-04 12:13:17 -04006439 si_mc_program(rdev);
6440
Alex Deucher6c7bcce2013-12-18 14:07:14 -05006441 if (!rdev->pm.dpm_enabled) {
6442 r = si_mc_load_microcode(rdev);
6443 if (r) {
6444 DRM_ERROR("Failed to load MC firmware!\n");
6445 return r;
6446 }
Alex Deucher9b136d52012-03-20 17:18:23 -04006447 }
6448
Alex Deucher9b136d52012-03-20 17:18:23 -04006449 r = si_pcie_gart_enable(rdev);
6450 if (r)
6451 return r;
6452 si_gpu_init(rdev);
6453
Alex Deucher9b136d52012-03-20 17:18:23 -04006454 /* allocate rlc buffers */
Alex Deucher1fd11772013-04-17 17:53:50 -04006455 if (rdev->family == CHIP_VERDE) {
6456 rdev->rlc.reg_list = verde_rlc_save_restore_register_list;
6457 rdev->rlc.reg_list_size =
6458 (u32)ARRAY_SIZE(verde_rlc_save_restore_register_list);
6459 }
6460 rdev->rlc.cs_data = si_cs_data;
6461 r = sumo_rlc_init(rdev);
Alex Deucher9b136d52012-03-20 17:18:23 -04006462 if (r) {
6463 DRM_ERROR("Failed to init rlc BOs!\n");
6464 return r;
6465 }
6466
6467 /* allocate wb buffer */
6468 r = radeon_wb_init(rdev);
6469 if (r)
6470 return r;
6471
6472 r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
6473 if (r) {
6474 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
6475 return r;
6476 }
6477
6478 r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
6479 if (r) {
6480 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
6481 return r;
6482 }
6483
6484 r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
6485 if (r) {
6486 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
6487 return r;
6488 }
6489
Alex Deucher8c5fd7e2012-12-04 15:28:18 -05006490 r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
6491 if (r) {
6492 dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
6493 return r;
6494 }
6495
6496 r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_DMA1_INDEX);
6497 if (r) {
6498 dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
6499 return r;
6500 }
6501
Alex Deucher1df0d522013-04-26 18:03:44 -04006502 if (rdev->has_uvd) {
Christian Könige409b122013-08-13 11:56:53 +02006503 r = uvd_v2_2_resume(rdev);
Alex Deucher1df0d522013-04-26 18:03:44 -04006504 if (!r) {
6505 r = radeon_fence_driver_start_ring(rdev,
6506 R600_RING_TYPE_UVD_INDEX);
6507 if (r)
6508 dev_err(rdev->dev, "UVD fences init error (%d).\n", r);
6509 }
Christian Königf2ba57b2013-04-08 12:41:29 +02006510 if (r)
Alex Deucher1df0d522013-04-26 18:03:44 -04006511 rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
Christian Königf2ba57b2013-04-08 12:41:29 +02006512 }
Christian Königf2ba57b2013-04-08 12:41:29 +02006513
Alex Deucher9b136d52012-03-20 17:18:23 -04006514 /* Enable IRQ */
Adis Hamziće49f3952013-06-02 16:47:54 +02006515 if (!rdev->irq.installed) {
6516 r = radeon_irq_kms_init(rdev);
6517 if (r)
6518 return r;
6519 }
6520
Alex Deucher9b136d52012-03-20 17:18:23 -04006521 r = si_irq_init(rdev);
6522 if (r) {
6523 DRM_ERROR("radeon: IH init failed (%d).\n", r);
6524 radeon_irq_kms_fini(rdev);
6525 return r;
6526 }
6527 si_irq_set(rdev);
6528
6529 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
6530 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
Christian König2e1e6da2013-08-13 11:56:52 +02006531 RADEON_CP_PACKET2);
Alex Deucher9b136d52012-03-20 17:18:23 -04006532 if (r)
6533 return r;
6534
6535 ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
6536 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP1_RPTR_OFFSET,
Christian König2e1e6da2013-08-13 11:56:52 +02006537 RADEON_CP_PACKET2);
Alex Deucher9b136d52012-03-20 17:18:23 -04006538 if (r)
6539 return r;
6540
6541 ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
6542 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP2_RPTR_OFFSET,
Christian König2e1e6da2013-08-13 11:56:52 +02006543 RADEON_CP_PACKET2);
Alex Deucher9b136d52012-03-20 17:18:23 -04006544 if (r)
6545 return r;
6546
Alex Deucher8c5fd7e2012-12-04 15:28:18 -05006547 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
6548 r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
Christian König2e1e6da2013-08-13 11:56:52 +02006549 DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0));
Alex Deucher8c5fd7e2012-12-04 15:28:18 -05006550 if (r)
6551 return r;
6552
6553 ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
6554 r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET,
Christian König2e1e6da2013-08-13 11:56:52 +02006555 DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0));
Alex Deucher8c5fd7e2012-12-04 15:28:18 -05006556 if (r)
6557 return r;
6558
Alex Deucher9b136d52012-03-20 17:18:23 -04006559 r = si_cp_load_microcode(rdev);
6560 if (r)
6561 return r;
6562 r = si_cp_resume(rdev);
6563 if (r)
6564 return r;
6565
Alex Deucher8c5fd7e2012-12-04 15:28:18 -05006566 r = cayman_dma_resume(rdev);
6567 if (r)
6568 return r;
6569
Alex Deucher1df0d522013-04-26 18:03:44 -04006570 if (rdev->has_uvd) {
6571 ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
6572 if (ring->ring_size) {
Christian König02c9f7f2013-08-13 11:56:51 +02006573 r = radeon_ring_init(rdev, ring, ring->ring_size, 0,
Christian König2e1e6da2013-08-13 11:56:52 +02006574 RADEON_CP_PACKET2);
Alex Deucher1df0d522013-04-26 18:03:44 -04006575 if (!r)
Christian Könige409b122013-08-13 11:56:53 +02006576 r = uvd_v1_0_init(rdev);
Alex Deucher1df0d522013-04-26 18:03:44 -04006577 if (r)
6578 DRM_ERROR("radeon: failed initializing UVD (%d).\n", r);
6579 }
Christian Königf2ba57b2013-04-08 12:41:29 +02006580 }
6581
Christian König2898c342012-07-05 11:55:34 +02006582 r = radeon_ib_pool_init(rdev);
6583 if (r) {
6584 dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
Alex Deucher9b136d52012-03-20 17:18:23 -04006585 return r;
Christian König2898c342012-07-05 11:55:34 +02006586 }
Alex Deucher9b136d52012-03-20 17:18:23 -04006587
Christian Königc6105f22012-07-05 14:32:00 +02006588 r = radeon_vm_manager_init(rdev);
6589 if (r) {
6590 dev_err(rdev->dev, "vm manager initialization failed (%d).\n", r);
Alex Deucher9b136d52012-03-20 17:18:23 -04006591 return r;
Christian Königc6105f22012-07-05 14:32:00 +02006592 }
Alex Deucher9b136d52012-03-20 17:18:23 -04006593
Alex Deucherb5306022013-07-31 16:51:33 -04006594 r = dce6_audio_init(rdev);
6595 if (r)
6596 return r;
6597
Alex Deucher9b136d52012-03-20 17:18:23 -04006598 return 0;
6599}
6600
6601int si_resume(struct radeon_device *rdev)
6602{
6603 int r;
6604
6605 /* Do not reset GPU before posting, on rv770 hw unlike on r500 hw,
6606 * posting will perform necessary task to bring back GPU into good
6607 * shape.
6608 */
6609 /* post card */
6610 atom_asic_init(rdev->mode_info.atom_context);
6611
Alex Deucher205996c2013-03-01 17:08:42 -05006612 /* init golden registers */
6613 si_init_golden_registers(rdev);
6614
Alex Deucher6c7bcce2013-12-18 14:07:14 -05006615 radeon_pm_resume(rdev);
6616
Alex Deucher9b136d52012-03-20 17:18:23 -04006617 rdev->accel_working = true;
6618 r = si_startup(rdev);
6619 if (r) {
6620 DRM_ERROR("si startup failed on resume\n");
6621 rdev->accel_working = false;
6622 return r;
6623 }
6624
6625 return r;
6626
6627}
6628
6629int si_suspend(struct radeon_device *rdev)
6630{
Alex Deucher6c7bcce2013-12-18 14:07:14 -05006631 radeon_pm_suspend(rdev);
Alex Deucherb5306022013-07-31 16:51:33 -04006632 dce6_audio_fini(rdev);
Alex Deucherfa3daf92013-03-11 15:32:26 -04006633 radeon_vm_manager_fini(rdev);
Alex Deucher9b136d52012-03-20 17:18:23 -04006634 si_cp_enable(rdev, false);
Alex Deucher8c5fd7e2012-12-04 15:28:18 -05006635 cayman_dma_stop(rdev);
Alex Deucher1df0d522013-04-26 18:03:44 -04006636 if (rdev->has_uvd) {
Christian Könige409b122013-08-13 11:56:53 +02006637 uvd_v1_0_fini(rdev);
Alex Deucher1df0d522013-04-26 18:03:44 -04006638 radeon_uvd_suspend(rdev);
6639 }
Alex Deuchere16866e2013-08-08 19:34:07 -04006640 si_fini_pg(rdev);
6641 si_fini_cg(rdev);
Alex Deucher9b136d52012-03-20 17:18:23 -04006642 si_irq_suspend(rdev);
6643 radeon_wb_disable(rdev);
6644 si_pcie_gart_disable(rdev);
6645 return 0;
6646}
6647
6648/* Plan is to move initialization in that function and use
6649 * helper function so that radeon_device_init pretty much
6650 * do nothing more than calling asic specific function. This
6651 * should also allow to remove a bunch of callback function
6652 * like vram_info.
6653 */
6654int si_init(struct radeon_device *rdev)
6655{
6656 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
6657 int r;
6658
Alex Deucher9b136d52012-03-20 17:18:23 -04006659 /* Read BIOS */
6660 if (!radeon_get_bios(rdev)) {
6661 if (ASIC_IS_AVIVO(rdev))
6662 return -EINVAL;
6663 }
6664 /* Must be an ATOMBIOS */
6665 if (!rdev->is_atom_bios) {
6666 dev_err(rdev->dev, "Expecting atombios for cayman GPU\n");
6667 return -EINVAL;
6668 }
6669 r = radeon_atombios_init(rdev);
6670 if (r)
6671 return r;
6672
6673 /* Post card if necessary */
6674 if (!radeon_card_posted(rdev)) {
6675 if (!rdev->bios) {
6676 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
6677 return -EINVAL;
6678 }
6679 DRM_INFO("GPU not posted. posting now...\n");
6680 atom_asic_init(rdev->mode_info.atom_context);
6681 }
Alex Deucher205996c2013-03-01 17:08:42 -05006682 /* init golden registers */
6683 si_init_golden_registers(rdev);
Alex Deucher9b136d52012-03-20 17:18:23 -04006684 /* Initialize scratch registers */
6685 si_scratch_init(rdev);
6686 /* Initialize surface registers */
6687 radeon_surface_init(rdev);
6688 /* Initialize clocks */
6689 radeon_get_clock_info(rdev->ddev);
6690
6691 /* Fence driver */
6692 r = radeon_fence_driver_init(rdev);
6693 if (r)
6694 return r;
6695
6696 /* initialize memory controller */
6697 r = si_mc_init(rdev);
6698 if (r)
6699 return r;
6700 /* Memory manager */
6701 r = radeon_bo_init(rdev);
6702 if (r)
6703 return r;
6704
Alex Deucher01ac8792013-12-18 19:11:27 -05006705 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw ||
6706 !rdev->rlc_fw || !rdev->mc_fw) {
6707 r = si_init_microcode(rdev);
6708 if (r) {
6709 DRM_ERROR("Failed to load firmware!\n");
6710 return r;
6711 }
6712 }
6713
Alex Deucher6c7bcce2013-12-18 14:07:14 -05006714 /* Initialize power management */
6715 radeon_pm_init(rdev);
6716
Alex Deucher9b136d52012-03-20 17:18:23 -04006717 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
6718 ring->ring_obj = NULL;
6719 r600_ring_init(rdev, ring, 1024 * 1024);
6720
6721 ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
6722 ring->ring_obj = NULL;
6723 r600_ring_init(rdev, ring, 1024 * 1024);
6724
6725 ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
6726 ring->ring_obj = NULL;
6727 r600_ring_init(rdev, ring, 1024 * 1024);
6728
Alex Deucher8c5fd7e2012-12-04 15:28:18 -05006729 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
6730 ring->ring_obj = NULL;
6731 r600_ring_init(rdev, ring, 64 * 1024);
6732
6733 ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
6734 ring->ring_obj = NULL;
6735 r600_ring_init(rdev, ring, 64 * 1024);
6736
Alex Deucher1df0d522013-04-26 18:03:44 -04006737 if (rdev->has_uvd) {
6738 r = radeon_uvd_init(rdev);
6739 if (!r) {
6740 ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
6741 ring->ring_obj = NULL;
6742 r600_ring_init(rdev, ring, 4096);
6743 }
Christian Königf2ba57b2013-04-08 12:41:29 +02006744 }
6745
Alex Deucher9b136d52012-03-20 17:18:23 -04006746 rdev->ih.ring_obj = NULL;
6747 r600_ih_ring_init(rdev, 64 * 1024);
6748
6749 r = r600_pcie_gart_init(rdev);
6750 if (r)
6751 return r;
6752
Alex Deucher9b136d52012-03-20 17:18:23 -04006753 rdev->accel_working = true;
Alex Deucher9b136d52012-03-20 17:18:23 -04006754 r = si_startup(rdev);
6755 if (r) {
6756 dev_err(rdev->dev, "disabling GPU acceleration\n");
6757 si_cp_fini(rdev);
Alex Deucher8c5fd7e2012-12-04 15:28:18 -05006758 cayman_dma_fini(rdev);
Alex Deucher9b136d52012-03-20 17:18:23 -04006759 si_irq_fini(rdev);
Alex Deucher1fd11772013-04-17 17:53:50 -04006760 sumo_rlc_fini(rdev);
Alex Deucher9b136d52012-03-20 17:18:23 -04006761 radeon_wb_fini(rdev);
Christian König2898c342012-07-05 11:55:34 +02006762 radeon_ib_pool_fini(rdev);
Alex Deucher9b136d52012-03-20 17:18:23 -04006763 radeon_vm_manager_fini(rdev);
6764 radeon_irq_kms_fini(rdev);
6765 si_pcie_gart_fini(rdev);
6766 rdev->accel_working = false;
6767 }
6768
6769 /* Don't start up if the MC ucode is missing.
6770 * The default clocks and voltages before the MC ucode
6771 * is loaded are not suffient for advanced operations.
6772 */
6773 if (!rdev->mc_fw) {
6774 DRM_ERROR("radeon: MC ucode required for NI+.\n");
6775 return -EINVAL;
6776 }
6777
6778 return 0;
6779}
6780
6781void si_fini(struct radeon_device *rdev)
6782{
Alex Deucher6c7bcce2013-12-18 14:07:14 -05006783 radeon_pm_fini(rdev);
Alex Deucher9b136d52012-03-20 17:18:23 -04006784 si_cp_fini(rdev);
Alex Deucher8c5fd7e2012-12-04 15:28:18 -05006785 cayman_dma_fini(rdev);
Alex Deucherf8f84ac2013-03-07 12:56:35 -05006786 si_fini_pg(rdev);
Alex Deuchere16866e2013-08-08 19:34:07 -04006787 si_fini_cg(rdev);
Alex Deuchere0bcf1652013-02-15 11:56:59 -05006788 si_irq_fini(rdev);
Alex Deucher1fd11772013-04-17 17:53:50 -04006789 sumo_rlc_fini(rdev);
Alex Deucher9b136d52012-03-20 17:18:23 -04006790 radeon_wb_fini(rdev);
6791 radeon_vm_manager_fini(rdev);
Christian König2898c342012-07-05 11:55:34 +02006792 radeon_ib_pool_fini(rdev);
Alex Deucher9b136d52012-03-20 17:18:23 -04006793 radeon_irq_kms_fini(rdev);
Christian König2858c002013-08-01 17:34:07 +02006794 if (rdev->has_uvd) {
Christian Könige409b122013-08-13 11:56:53 +02006795 uvd_v1_0_fini(rdev);
Alex Deucher1df0d522013-04-26 18:03:44 -04006796 radeon_uvd_fini(rdev);
Christian König2858c002013-08-01 17:34:07 +02006797 }
Alex Deucher9b136d52012-03-20 17:18:23 -04006798 si_pcie_gart_fini(rdev);
6799 r600_vram_scratch_fini(rdev);
6800 radeon_gem_fini(rdev);
Alex Deucher9b136d52012-03-20 17:18:23 -04006801 radeon_fence_driver_fini(rdev);
6802 radeon_bo_fini(rdev);
6803 radeon_atombios_fini(rdev);
6804 kfree(rdev->bios);
6805 rdev->bios = NULL;
6806}
6807
Marek Olšák6759a0a2012-08-09 16:34:17 +02006808/**
Alex Deucherd0418892013-01-24 10:35:23 -05006809 * si_get_gpu_clock_counter - return GPU clock counter snapshot
Marek Olšák6759a0a2012-08-09 16:34:17 +02006810 *
6811 * @rdev: radeon_device pointer
6812 *
6813 * Fetches a GPU clock counter snapshot (SI).
6814 * Returns the 64 bit clock counter snapshot.
6815 */
Alex Deucherd0418892013-01-24 10:35:23 -05006816uint64_t si_get_gpu_clock_counter(struct radeon_device *rdev)
Marek Olšák6759a0a2012-08-09 16:34:17 +02006817{
6818 uint64_t clock;
6819
6820 mutex_lock(&rdev->gpu_clock_mutex);
6821 WREG32(RLC_CAPTURE_GPU_CLOCK_COUNT, 1);
6822 clock = (uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_LSB) |
6823 ((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
6824 mutex_unlock(&rdev->gpu_clock_mutex);
6825 return clock;
6826}
Christian König2539eb02013-04-08 12:41:34 +02006827
Christian König2539eb02013-04-08 12:41:34 +02006828int si_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
6829{
Christian Königfacd1122013-04-29 11:55:02 +02006830 unsigned fb_div = 0, vclk_div = 0, dclk_div = 0;
Christian König2539eb02013-04-08 12:41:34 +02006831 int r;
6832
Christian König4ed10832013-04-18 15:25:58 +02006833 /* bypass vclk and dclk with bclk */
6834 WREG32_P(CG_UPLL_FUNC_CNTL_2,
6835 VCLK_SRC_SEL(1) | DCLK_SRC_SEL(1),
6836 ~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK));
6837
6838 /* put PLL in bypass mode */
6839 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_BYPASS_EN_MASK, ~UPLL_BYPASS_EN_MASK);
6840
6841 if (!vclk || !dclk) {
6842 /* keep the Bypass mode, put PLL to sleep */
6843 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK);
6844 return 0;
6845 }
6846
Christian Königfacd1122013-04-29 11:55:02 +02006847 r = radeon_uvd_calc_upll_dividers(rdev, vclk, dclk, 125000, 250000,
6848 16384, 0x03FFFFFF, 0, 128, 5,
6849 &fb_div, &vclk_div, &dclk_div);
6850 if (r)
6851 return r;
Christian König2539eb02013-04-08 12:41:34 +02006852
6853 /* set RESET_ANTI_MUX to 0 */
6854 WREG32_P(CG_UPLL_FUNC_CNTL_5, 0, ~RESET_ANTI_MUX_MASK);
6855
6856 /* set VCO_MODE to 1 */
6857 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_VCO_MODE_MASK, ~UPLL_VCO_MODE_MASK);
6858
6859 /* toggle UPLL_SLEEP to 1 then back to 0 */
6860 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK);
6861 WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_SLEEP_MASK);
6862
6863 /* deassert UPLL_RESET */
6864 WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_RESET_MASK);
6865
6866 mdelay(1);
6867
Christian Königfacd1122013-04-29 11:55:02 +02006868 r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL);
Christian König2539eb02013-04-08 12:41:34 +02006869 if (r)
6870 return r;
6871
6872 /* assert UPLL_RESET again */
6873 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_RESET_MASK, ~UPLL_RESET_MASK);
6874
6875 /* disable spread spectrum. */
6876 WREG32_P(CG_UPLL_SPREAD_SPECTRUM, 0, ~SSEN_MASK);
6877
6878 /* set feedback divider */
Christian Königfacd1122013-04-29 11:55:02 +02006879 WREG32_P(CG_UPLL_FUNC_CNTL_3, UPLL_FB_DIV(fb_div), ~UPLL_FB_DIV_MASK);
Christian König2539eb02013-04-08 12:41:34 +02006880
6881 /* set ref divider to 0 */
6882 WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_REF_DIV_MASK);
6883
Christian Königfacd1122013-04-29 11:55:02 +02006884 if (fb_div < 307200)
Christian König2539eb02013-04-08 12:41:34 +02006885 WREG32_P(CG_UPLL_FUNC_CNTL_4, 0, ~UPLL_SPARE_ISPARE9);
6886 else
6887 WREG32_P(CG_UPLL_FUNC_CNTL_4, UPLL_SPARE_ISPARE9, ~UPLL_SPARE_ISPARE9);
6888
6889 /* set PDIV_A and PDIV_B */
6890 WREG32_P(CG_UPLL_FUNC_CNTL_2,
Christian Königfacd1122013-04-29 11:55:02 +02006891 UPLL_PDIV_A(vclk_div) | UPLL_PDIV_B(dclk_div),
Christian König2539eb02013-04-08 12:41:34 +02006892 ~(UPLL_PDIV_A_MASK | UPLL_PDIV_B_MASK));
6893
6894 /* give the PLL some time to settle */
6895 mdelay(15);
6896
6897 /* deassert PLL_RESET */
6898 WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_RESET_MASK);
6899
6900 mdelay(15);
6901
6902 /* switch from bypass mode to normal mode */
6903 WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_BYPASS_EN_MASK);
6904
Christian Königfacd1122013-04-29 11:55:02 +02006905 r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL);
Christian König2539eb02013-04-08 12:41:34 +02006906 if (r)
6907 return r;
6908
6909 /* switch VCLK and DCLK selection */
6910 WREG32_P(CG_UPLL_FUNC_CNTL_2,
6911 VCLK_SRC_SEL(2) | DCLK_SRC_SEL(2),
6912 ~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK));
6913
6914 mdelay(100);
6915
6916 return 0;
6917}
Alex Deucherb9d305d2013-02-14 17:16:51 -05006918
6919static void si_pcie_gen3_enable(struct radeon_device *rdev)
6920{
6921 struct pci_dev *root = rdev->pdev->bus->self;
6922 int bridge_pos, gpu_pos;
6923 u32 speed_cntl, mask, current_data_rate;
6924 int ret, i;
6925 u16 tmp16;
6926
6927 if (radeon_pcie_gen2 == 0)
6928 return;
6929
6930 if (rdev->flags & RADEON_IS_IGP)
6931 return;
6932
6933 if (!(rdev->flags & RADEON_IS_PCIE))
6934 return;
6935
6936 ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask);
6937 if (ret != 0)
6938 return;
6939
6940 if (!(mask & (DRM_PCIE_SPEED_50 | DRM_PCIE_SPEED_80)))
6941 return;
6942
6943 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
6944 current_data_rate = (speed_cntl & LC_CURRENT_DATA_RATE_MASK) >>
6945 LC_CURRENT_DATA_RATE_SHIFT;
6946 if (mask & DRM_PCIE_SPEED_80) {
6947 if (current_data_rate == 2) {
6948 DRM_INFO("PCIE gen 3 link speeds already enabled\n");
6949 return;
6950 }
6951 DRM_INFO("enabling PCIE gen 3 link speeds, disable with radeon.pcie_gen2=0\n");
6952 } else if (mask & DRM_PCIE_SPEED_50) {
6953 if (current_data_rate == 1) {
6954 DRM_INFO("PCIE gen 2 link speeds already enabled\n");
6955 return;
6956 }
6957 DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
6958 }
6959
6960 bridge_pos = pci_pcie_cap(root);
6961 if (!bridge_pos)
6962 return;
6963
6964 gpu_pos = pci_pcie_cap(rdev->pdev);
6965 if (!gpu_pos)
6966 return;
6967
6968 if (mask & DRM_PCIE_SPEED_80) {
6969 /* re-try equalization if gen3 is not already enabled */
6970 if (current_data_rate != 2) {
6971 u16 bridge_cfg, gpu_cfg;
6972 u16 bridge_cfg2, gpu_cfg2;
6973 u32 max_lw, current_lw, tmp;
6974
6975 pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
6976 pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
6977
6978 tmp16 = bridge_cfg | PCI_EXP_LNKCTL_HAWD;
6979 pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
6980
6981 tmp16 = gpu_cfg | PCI_EXP_LNKCTL_HAWD;
6982 pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
6983
6984 tmp = RREG32_PCIE(PCIE_LC_STATUS1);
6985 max_lw = (tmp & LC_DETECTED_LINK_WIDTH_MASK) >> LC_DETECTED_LINK_WIDTH_SHIFT;
6986 current_lw = (tmp & LC_OPERATING_LINK_WIDTH_MASK) >> LC_OPERATING_LINK_WIDTH_SHIFT;
6987
6988 if (current_lw < max_lw) {
6989 tmp = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
6990 if (tmp & LC_RENEGOTIATION_SUPPORT) {
6991 tmp &= ~(LC_LINK_WIDTH_MASK | LC_UPCONFIGURE_DIS);
6992 tmp |= (max_lw << LC_LINK_WIDTH_SHIFT);
6993 tmp |= LC_UPCONFIGURE_SUPPORT | LC_RENEGOTIATE_EN | LC_RECONFIG_NOW;
6994 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, tmp);
6995 }
6996 }
6997
6998 for (i = 0; i < 10; i++) {
6999 /* check status */
7000 pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_DEVSTA, &tmp16);
7001 if (tmp16 & PCI_EXP_DEVSTA_TRPND)
7002 break;
7003
7004 pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
7005 pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
7006
7007 pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &bridge_cfg2);
7008 pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &gpu_cfg2);
7009
7010 tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
7011 tmp |= LC_SET_QUIESCE;
7012 WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
7013
7014 tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
7015 tmp |= LC_REDO_EQ;
7016 WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
7017
7018 mdelay(100);
7019
7020 /* linkctl */
7021 pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &tmp16);
7022 tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
7023 tmp16 |= (bridge_cfg & PCI_EXP_LNKCTL_HAWD);
7024 pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
7025
7026 pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &tmp16);
7027 tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
7028 tmp16 |= (gpu_cfg & PCI_EXP_LNKCTL_HAWD);
7029 pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
7030
7031 /* linkctl2 */
7032 pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &tmp16);
7033 tmp16 &= ~((1 << 4) | (7 << 9));
7034 tmp16 |= (bridge_cfg2 & ((1 << 4) | (7 << 9)));
7035 pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, tmp16);
7036
7037 pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
7038 tmp16 &= ~((1 << 4) | (7 << 9));
7039 tmp16 |= (gpu_cfg2 & ((1 << 4) | (7 << 9)));
7040 pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
7041
7042 tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
7043 tmp &= ~LC_SET_QUIESCE;
7044 WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
7045 }
7046 }
7047 }
7048
7049 /* set the link speed */
7050 speed_cntl |= LC_FORCE_EN_SW_SPEED_CHANGE | LC_FORCE_DIS_HW_SPEED_CHANGE;
7051 speed_cntl &= ~LC_FORCE_DIS_SW_SPEED_CHANGE;
7052 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
7053
7054 pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
7055 tmp16 &= ~0xf;
7056 if (mask & DRM_PCIE_SPEED_80)
7057 tmp16 |= 3; /* gen3 */
7058 else if (mask & DRM_PCIE_SPEED_50)
7059 tmp16 |= 2; /* gen2 */
7060 else
7061 tmp16 |= 1; /* gen1 */
7062 pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
7063
7064 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
7065 speed_cntl |= LC_INITIATE_LINK_SPEED_CHANGE;
7066 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
7067
7068 for (i = 0; i < rdev->usec_timeout; i++) {
7069 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
7070 if ((speed_cntl & LC_INITIATE_LINK_SPEED_CHANGE) == 0)
7071 break;
7072 udelay(1);
7073 }
7074}
7075
Alex Deuchere0bcf1652013-02-15 11:56:59 -05007076static void si_program_aspm(struct radeon_device *rdev)
7077{
7078 u32 data, orig;
7079 bool disable_l0s = false, disable_l1 = false, disable_plloff_in_l1 = false;
7080 bool disable_clkreq = false;
7081
Alex Deucher1294d4a2013-07-16 15:58:50 -04007082 if (radeon_aspm == 0)
7083 return;
7084
Alex Deuchere0bcf1652013-02-15 11:56:59 -05007085 if (!(rdev->flags & RADEON_IS_PCIE))
7086 return;
7087
7088 orig = data = RREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL);
7089 data &= ~LC_XMIT_N_FTS_MASK;
7090 data |= LC_XMIT_N_FTS(0x24) | LC_XMIT_N_FTS_OVERRIDE_EN;
7091 if (orig != data)
7092 WREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL, data);
7093
7094 orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL3);
7095 data |= LC_GO_TO_RECOVERY;
7096 if (orig != data)
7097 WREG32_PCIE_PORT(PCIE_LC_CNTL3, data);
7098
7099 orig = data = RREG32_PCIE(PCIE_P_CNTL);
7100 data |= P_IGNORE_EDB_ERR;
7101 if (orig != data)
7102 WREG32_PCIE(PCIE_P_CNTL, data);
7103
7104 orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL);
7105 data &= ~(LC_L0S_INACTIVITY_MASK | LC_L1_INACTIVITY_MASK);
7106 data |= LC_PMI_TO_L1_DIS;
7107 if (!disable_l0s)
7108 data |= LC_L0S_INACTIVITY(7);
7109
7110 if (!disable_l1) {
7111 data |= LC_L1_INACTIVITY(7);
7112 data &= ~LC_PMI_TO_L1_DIS;
7113 if (orig != data)
7114 WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
7115
7116 if (!disable_plloff_in_l1) {
7117 bool clk_req_support;
7118
7119 orig = data = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_0);
7120 data &= ~(PLL_POWER_STATE_IN_OFF_0_MASK | PLL_POWER_STATE_IN_TXS2_0_MASK);
7121 data |= PLL_POWER_STATE_IN_OFF_0(7) | PLL_POWER_STATE_IN_TXS2_0(7);
7122 if (orig != data)
7123 WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_0, data);
7124
7125 orig = data = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_1);
7126 data &= ~(PLL_POWER_STATE_IN_OFF_1_MASK | PLL_POWER_STATE_IN_TXS2_1_MASK);
7127 data |= PLL_POWER_STATE_IN_OFF_1(7) | PLL_POWER_STATE_IN_TXS2_1(7);
7128 if (orig != data)
7129 WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_1, data);
7130
7131 orig = data = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_0);
7132 data &= ~(PLL_POWER_STATE_IN_OFF_0_MASK | PLL_POWER_STATE_IN_TXS2_0_MASK);
7133 data |= PLL_POWER_STATE_IN_OFF_0(7) | PLL_POWER_STATE_IN_TXS2_0(7);
7134 if (orig != data)
7135 WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_0, data);
7136
7137 orig = data = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_1);
7138 data &= ~(PLL_POWER_STATE_IN_OFF_1_MASK | PLL_POWER_STATE_IN_TXS2_1_MASK);
7139 data |= PLL_POWER_STATE_IN_OFF_1(7) | PLL_POWER_STATE_IN_TXS2_1(7);
7140 if (orig != data)
7141 WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_1, data);
7142
7143 if ((rdev->family != CHIP_OLAND) && (rdev->family != CHIP_HAINAN)) {
7144 orig = data = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_0);
7145 data &= ~PLL_RAMP_UP_TIME_0_MASK;
7146 if (orig != data)
7147 WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_0, data);
7148
7149 orig = data = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_1);
7150 data &= ~PLL_RAMP_UP_TIME_1_MASK;
7151 if (orig != data)
7152 WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_1, data);
7153
7154 orig = data = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_2);
7155 data &= ~PLL_RAMP_UP_TIME_2_MASK;
7156 if (orig != data)
7157 WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_2, data);
7158
7159 orig = data = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_3);
7160 data &= ~PLL_RAMP_UP_TIME_3_MASK;
7161 if (orig != data)
7162 WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_3, data);
7163
7164 orig = data = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_0);
7165 data &= ~PLL_RAMP_UP_TIME_0_MASK;
7166 if (orig != data)
7167 WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_0, data);
7168
7169 orig = data = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_1);
7170 data &= ~PLL_RAMP_UP_TIME_1_MASK;
7171 if (orig != data)
7172 WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_1, data);
7173
7174 orig = data = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_2);
7175 data &= ~PLL_RAMP_UP_TIME_2_MASK;
7176 if (orig != data)
7177 WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_2, data);
7178
7179 orig = data = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_3);
7180 data &= ~PLL_RAMP_UP_TIME_3_MASK;
7181 if (orig != data)
7182 WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_3, data);
7183 }
7184 orig = data = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
7185 data &= ~LC_DYN_LANES_PWR_STATE_MASK;
7186 data |= LC_DYN_LANES_PWR_STATE(3);
7187 if (orig != data)
7188 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, data);
7189
7190 orig = data = RREG32_PIF_PHY0(PB0_PIF_CNTL);
7191 data &= ~LS2_EXIT_TIME_MASK;
7192 if ((rdev->family == CHIP_OLAND) || (rdev->family == CHIP_HAINAN))
7193 data |= LS2_EXIT_TIME(5);
7194 if (orig != data)
7195 WREG32_PIF_PHY0(PB0_PIF_CNTL, data);
7196
7197 orig = data = RREG32_PIF_PHY1(PB1_PIF_CNTL);
7198 data &= ~LS2_EXIT_TIME_MASK;
7199 if ((rdev->family == CHIP_OLAND) || (rdev->family == CHIP_HAINAN))
7200 data |= LS2_EXIT_TIME(5);
7201 if (orig != data)
7202 WREG32_PIF_PHY1(PB1_PIF_CNTL, data);
7203
7204 if (!disable_clkreq) {
7205 struct pci_dev *root = rdev->pdev->bus->self;
7206 u32 lnkcap;
7207
7208 clk_req_support = false;
7209 pcie_capability_read_dword(root, PCI_EXP_LNKCAP, &lnkcap);
7210 if (lnkcap & PCI_EXP_LNKCAP_CLKPM)
7211 clk_req_support = true;
7212 } else {
7213 clk_req_support = false;
7214 }
7215
7216 if (clk_req_support) {
7217 orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL2);
7218 data |= LC_ALLOW_PDWN_IN_L1 | LC_ALLOW_PDWN_IN_L23;
7219 if (orig != data)
7220 WREG32_PCIE_PORT(PCIE_LC_CNTL2, data);
7221
7222 orig = data = RREG32(THM_CLK_CNTL);
7223 data &= ~(CMON_CLK_SEL_MASK | TMON_CLK_SEL_MASK);
7224 data |= CMON_CLK_SEL(1) | TMON_CLK_SEL(1);
7225 if (orig != data)
7226 WREG32(THM_CLK_CNTL, data);
7227
7228 orig = data = RREG32(MISC_CLK_CNTL);
7229 data &= ~(DEEP_SLEEP_CLK_SEL_MASK | ZCLK_SEL_MASK);
7230 data |= DEEP_SLEEP_CLK_SEL(1) | ZCLK_SEL(1);
7231 if (orig != data)
7232 WREG32(MISC_CLK_CNTL, data);
7233
7234 orig = data = RREG32(CG_CLKPIN_CNTL);
7235 data &= ~BCLK_AS_XCLK;
7236 if (orig != data)
7237 WREG32(CG_CLKPIN_CNTL, data);
7238
7239 orig = data = RREG32(CG_CLKPIN_CNTL_2);
7240 data &= ~FORCE_BIF_REFCLK_EN;
7241 if (orig != data)
7242 WREG32(CG_CLKPIN_CNTL_2, data);
7243
7244 orig = data = RREG32(MPLL_BYPASSCLK_SEL);
7245 data &= ~MPLL_CLKOUT_SEL_MASK;
7246 data |= MPLL_CLKOUT_SEL(4);
7247 if (orig != data)
7248 WREG32(MPLL_BYPASSCLK_SEL, data);
7249
7250 orig = data = RREG32(SPLL_CNTL_MODE);
7251 data &= ~SPLL_REFCLK_SEL_MASK;
7252 if (orig != data)
7253 WREG32(SPLL_CNTL_MODE, data);
7254 }
7255 }
7256 } else {
7257 if (orig != data)
7258 WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
7259 }
7260
7261 orig = data = RREG32_PCIE(PCIE_CNTL2);
7262 data |= SLV_MEM_LS_EN | MST_MEM_LS_EN | REPLAY_MEM_LS_EN;
7263 if (orig != data)
7264 WREG32_PCIE(PCIE_CNTL2, data);
7265
7266 if (!disable_l0s) {
7267 data = RREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL);
7268 if((data & LC_N_FTS_MASK) == LC_N_FTS_MASK) {
7269 data = RREG32_PCIE(PCIE_LC_STATUS1);
7270 if ((data & LC_REVERSE_XMIT) && (data & LC_REVERSE_RCVR)) {
7271 orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL);
7272 data &= ~LC_L0S_INACTIVITY_MASK;
7273 if (orig != data)
7274 WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
7275 }
7276 }
7277 }
7278}