blob: 22ecbc07e9a67f2c14f9b283002900fbff257395 [file] [log] [blame]
Alex Deucher43b3cd92012-03-20 17:18:00 -04001/*
2 * Copyright 2011 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
Alex Deucher0f0de062012-03-20 17:18:17 -040024#include <linux/firmware.h>
Alex Deucher0f0de062012-03-20 17:18:17 -040025#include <linux/slab.h>
26#include <linux/module.h>
David Howells760285e2012-10-02 18:01:07 +010027#include <drm/drmP.h>
Alex Deucher43b3cd92012-03-20 17:18:00 -040028#include "radeon.h"
29#include "radeon_asic.h"
David Howells760285e2012-10-02 18:01:07 +010030#include <drm/radeon_drm.h>
Alex Deucher43b3cd92012-03-20 17:18:00 -040031#include "sid.h"
32#include "atom.h"
Alex Deucher48c0c902012-03-20 17:18:19 -040033#include "si_blit_shaders.h"
Alex Deucherbd8cd532013-04-12 16:48:21 -040034#include "clearstate_si.h"
Alex Deuchera0ceada2013-03-27 15:18:04 -040035#include "radeon_ucode.h"
Alex Deucher43b3cd92012-03-20 17:18:00 -040036
Alex Deucher0f0de062012-03-20 17:18:17 -040037
38MODULE_FIRMWARE("radeon/TAHITI_pfp.bin");
39MODULE_FIRMWARE("radeon/TAHITI_me.bin");
40MODULE_FIRMWARE("radeon/TAHITI_ce.bin");
41MODULE_FIRMWARE("radeon/TAHITI_mc.bin");
Alex Deucher1ebe9282014-04-11 11:21:49 -040042MODULE_FIRMWARE("radeon/TAHITI_mc2.bin");
Alex Deucher0f0de062012-03-20 17:18:17 -040043MODULE_FIRMWARE("radeon/TAHITI_rlc.bin");
Alex Deuchera9e61412013-06-25 17:56:16 -040044MODULE_FIRMWARE("radeon/TAHITI_smc.bin");
Alex Deucher0f0de062012-03-20 17:18:17 -040045MODULE_FIRMWARE("radeon/PITCAIRN_pfp.bin");
46MODULE_FIRMWARE("radeon/PITCAIRN_me.bin");
47MODULE_FIRMWARE("radeon/PITCAIRN_ce.bin");
48MODULE_FIRMWARE("radeon/PITCAIRN_mc.bin");
Alex Deucher1ebe9282014-04-11 11:21:49 -040049MODULE_FIRMWARE("radeon/PITCAIRN_mc2.bin");
Alex Deucher0f0de062012-03-20 17:18:17 -040050MODULE_FIRMWARE("radeon/PITCAIRN_rlc.bin");
Alex Deuchera9e61412013-06-25 17:56:16 -040051MODULE_FIRMWARE("radeon/PITCAIRN_smc.bin");
Alex Deucher0f0de062012-03-20 17:18:17 -040052MODULE_FIRMWARE("radeon/VERDE_pfp.bin");
53MODULE_FIRMWARE("radeon/VERDE_me.bin");
54MODULE_FIRMWARE("radeon/VERDE_ce.bin");
55MODULE_FIRMWARE("radeon/VERDE_mc.bin");
Alex Deucher1ebe9282014-04-11 11:21:49 -040056MODULE_FIRMWARE("radeon/VERDE_mc2.bin");
Alex Deucher0f0de062012-03-20 17:18:17 -040057MODULE_FIRMWARE("radeon/VERDE_rlc.bin");
Alex Deuchera9e61412013-06-25 17:56:16 -040058MODULE_FIRMWARE("radeon/VERDE_smc.bin");
Alex Deucherbcc7f5d2012-07-26 18:36:28 -040059MODULE_FIRMWARE("radeon/OLAND_pfp.bin");
60MODULE_FIRMWARE("radeon/OLAND_me.bin");
61MODULE_FIRMWARE("radeon/OLAND_ce.bin");
62MODULE_FIRMWARE("radeon/OLAND_mc.bin");
Alex Deucher1ebe9282014-04-11 11:21:49 -040063MODULE_FIRMWARE("radeon/OLAND_mc2.bin");
Alex Deucherbcc7f5d2012-07-26 18:36:28 -040064MODULE_FIRMWARE("radeon/OLAND_rlc.bin");
Alex Deuchera9e61412013-06-25 17:56:16 -040065MODULE_FIRMWARE("radeon/OLAND_smc.bin");
Alex Deucherc04c00b2012-07-31 12:57:45 -040066MODULE_FIRMWARE("radeon/HAINAN_pfp.bin");
67MODULE_FIRMWARE("radeon/HAINAN_me.bin");
68MODULE_FIRMWARE("radeon/HAINAN_ce.bin");
69MODULE_FIRMWARE("radeon/HAINAN_mc.bin");
Alex Deucher1ebe9282014-04-11 11:21:49 -040070MODULE_FIRMWARE("radeon/HAINAN_mc2.bin");
Alex Deucherc04c00b2012-07-31 12:57:45 -040071MODULE_FIRMWARE("radeon/HAINAN_rlc.bin");
Alex Deuchera9e61412013-06-25 17:56:16 -040072MODULE_FIRMWARE("radeon/HAINAN_smc.bin");
Alex Deucher0f0de062012-03-20 17:18:17 -040073
Alex Deucherb9d305d2013-02-14 17:16:51 -050074static void si_pcie_gen3_enable(struct radeon_device *rdev);
Alex Deuchere0bcf1652013-02-15 11:56:59 -050075static void si_program_aspm(struct radeon_device *rdev);
Alex Deucher1fd11772013-04-17 17:53:50 -040076extern void sumo_rlc_fini(struct radeon_device *rdev);
77extern int sumo_rlc_init(struct radeon_device *rdev);
Alex Deucher25a857f2012-03-20 17:18:22 -040078extern int r600_ih_ring_alloc(struct radeon_device *rdev);
79extern void r600_ih_ring_fini(struct radeon_device *rdev);
Alex Deucher0a96d722012-03-20 17:18:11 -040080extern void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev);
Alex Deucherc476dde2012-03-20 17:18:12 -040081extern void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save);
82extern void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save);
Alex Deucherca7db222012-03-20 17:18:30 -040083extern u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev);
Alex Deucher1c534672013-01-18 15:08:38 -050084extern void evergreen_print_gpu_status_regs(struct radeon_device *rdev);
Alex Deucher014bb202013-01-18 19:36:20 -050085extern bool evergreen_is_display_hung(struct radeon_device *rdev);
Alex Deucher811e4d52013-09-03 13:31:33 -040086static void si_enable_gui_idle_interrupt(struct radeon_device *rdev,
87 bool enable);
Alex Deucher4a5c8ea2013-11-15 16:35:55 -050088static void si_init_pg(struct radeon_device *rdev);
89static void si_init_cg(struct radeon_device *rdev);
Alex Deuchera6f4ae82013-10-02 14:50:57 -040090static void si_fini_pg(struct radeon_device *rdev);
91static void si_fini_cg(struct radeon_device *rdev);
92static void si_rlc_stop(struct radeon_device *rdev);
Alex Deucher0a96d722012-03-20 17:18:11 -040093
Alex Deucher6d8cf002013-03-06 18:48:05 -050094static const u32 verde_rlc_save_restore_register_list[] =
95{
96 (0x8000 << 16) | (0x98f4 >> 2),
97 0x00000000,
98 (0x8040 << 16) | (0x98f4 >> 2),
99 0x00000000,
100 (0x8000 << 16) | (0xe80 >> 2),
101 0x00000000,
102 (0x8040 << 16) | (0xe80 >> 2),
103 0x00000000,
104 (0x8000 << 16) | (0x89bc >> 2),
105 0x00000000,
106 (0x8040 << 16) | (0x89bc >> 2),
107 0x00000000,
108 (0x8000 << 16) | (0x8c1c >> 2),
109 0x00000000,
110 (0x8040 << 16) | (0x8c1c >> 2),
111 0x00000000,
112 (0x9c00 << 16) | (0x98f0 >> 2),
113 0x00000000,
114 (0x9c00 << 16) | (0xe7c >> 2),
115 0x00000000,
116 (0x8000 << 16) | (0x9148 >> 2),
117 0x00000000,
118 (0x8040 << 16) | (0x9148 >> 2),
119 0x00000000,
120 (0x9c00 << 16) | (0x9150 >> 2),
121 0x00000000,
122 (0x9c00 << 16) | (0x897c >> 2),
123 0x00000000,
124 (0x9c00 << 16) | (0x8d8c >> 2),
125 0x00000000,
126 (0x9c00 << 16) | (0xac54 >> 2),
127 0X00000000,
128 0x3,
129 (0x9c00 << 16) | (0x98f8 >> 2),
130 0x00000000,
131 (0x9c00 << 16) | (0x9910 >> 2),
132 0x00000000,
133 (0x9c00 << 16) | (0x9914 >> 2),
134 0x00000000,
135 (0x9c00 << 16) | (0x9918 >> 2),
136 0x00000000,
137 (0x9c00 << 16) | (0x991c >> 2),
138 0x00000000,
139 (0x9c00 << 16) | (0x9920 >> 2),
140 0x00000000,
141 (0x9c00 << 16) | (0x9924 >> 2),
142 0x00000000,
143 (0x9c00 << 16) | (0x9928 >> 2),
144 0x00000000,
145 (0x9c00 << 16) | (0x992c >> 2),
146 0x00000000,
147 (0x9c00 << 16) | (0x9930 >> 2),
148 0x00000000,
149 (0x9c00 << 16) | (0x9934 >> 2),
150 0x00000000,
151 (0x9c00 << 16) | (0x9938 >> 2),
152 0x00000000,
153 (0x9c00 << 16) | (0x993c >> 2),
154 0x00000000,
155 (0x9c00 << 16) | (0x9940 >> 2),
156 0x00000000,
157 (0x9c00 << 16) | (0x9944 >> 2),
158 0x00000000,
159 (0x9c00 << 16) | (0x9948 >> 2),
160 0x00000000,
161 (0x9c00 << 16) | (0x994c >> 2),
162 0x00000000,
163 (0x9c00 << 16) | (0x9950 >> 2),
164 0x00000000,
165 (0x9c00 << 16) | (0x9954 >> 2),
166 0x00000000,
167 (0x9c00 << 16) | (0x9958 >> 2),
168 0x00000000,
169 (0x9c00 << 16) | (0x995c >> 2),
170 0x00000000,
171 (0x9c00 << 16) | (0x9960 >> 2),
172 0x00000000,
173 (0x9c00 << 16) | (0x9964 >> 2),
174 0x00000000,
175 (0x9c00 << 16) | (0x9968 >> 2),
176 0x00000000,
177 (0x9c00 << 16) | (0x996c >> 2),
178 0x00000000,
179 (0x9c00 << 16) | (0x9970 >> 2),
180 0x00000000,
181 (0x9c00 << 16) | (0x9974 >> 2),
182 0x00000000,
183 (0x9c00 << 16) | (0x9978 >> 2),
184 0x00000000,
185 (0x9c00 << 16) | (0x997c >> 2),
186 0x00000000,
187 (0x9c00 << 16) | (0x9980 >> 2),
188 0x00000000,
189 (0x9c00 << 16) | (0x9984 >> 2),
190 0x00000000,
191 (0x9c00 << 16) | (0x9988 >> 2),
192 0x00000000,
193 (0x9c00 << 16) | (0x998c >> 2),
194 0x00000000,
195 (0x9c00 << 16) | (0x8c00 >> 2),
196 0x00000000,
197 (0x9c00 << 16) | (0x8c14 >> 2),
198 0x00000000,
199 (0x9c00 << 16) | (0x8c04 >> 2),
200 0x00000000,
201 (0x9c00 << 16) | (0x8c08 >> 2),
202 0x00000000,
203 (0x8000 << 16) | (0x9b7c >> 2),
204 0x00000000,
205 (0x8040 << 16) | (0x9b7c >> 2),
206 0x00000000,
207 (0x8000 << 16) | (0xe84 >> 2),
208 0x00000000,
209 (0x8040 << 16) | (0xe84 >> 2),
210 0x00000000,
211 (0x8000 << 16) | (0x89c0 >> 2),
212 0x00000000,
213 (0x8040 << 16) | (0x89c0 >> 2),
214 0x00000000,
215 (0x8000 << 16) | (0x914c >> 2),
216 0x00000000,
217 (0x8040 << 16) | (0x914c >> 2),
218 0x00000000,
219 (0x8000 << 16) | (0x8c20 >> 2),
220 0x00000000,
221 (0x8040 << 16) | (0x8c20 >> 2),
222 0x00000000,
223 (0x8000 << 16) | (0x9354 >> 2),
224 0x00000000,
225 (0x8040 << 16) | (0x9354 >> 2),
226 0x00000000,
227 (0x9c00 << 16) | (0x9060 >> 2),
228 0x00000000,
229 (0x9c00 << 16) | (0x9364 >> 2),
230 0x00000000,
231 (0x9c00 << 16) | (0x9100 >> 2),
232 0x00000000,
233 (0x9c00 << 16) | (0x913c >> 2),
234 0x00000000,
235 (0x8000 << 16) | (0x90e0 >> 2),
236 0x00000000,
237 (0x8000 << 16) | (0x90e4 >> 2),
238 0x00000000,
239 (0x8000 << 16) | (0x90e8 >> 2),
240 0x00000000,
241 (0x8040 << 16) | (0x90e0 >> 2),
242 0x00000000,
243 (0x8040 << 16) | (0x90e4 >> 2),
244 0x00000000,
245 (0x8040 << 16) | (0x90e8 >> 2),
246 0x00000000,
247 (0x9c00 << 16) | (0x8bcc >> 2),
248 0x00000000,
249 (0x9c00 << 16) | (0x8b24 >> 2),
250 0x00000000,
251 (0x9c00 << 16) | (0x88c4 >> 2),
252 0x00000000,
253 (0x9c00 << 16) | (0x8e50 >> 2),
254 0x00000000,
255 (0x9c00 << 16) | (0x8c0c >> 2),
256 0x00000000,
257 (0x9c00 << 16) | (0x8e58 >> 2),
258 0x00000000,
259 (0x9c00 << 16) | (0x8e5c >> 2),
260 0x00000000,
261 (0x9c00 << 16) | (0x9508 >> 2),
262 0x00000000,
263 (0x9c00 << 16) | (0x950c >> 2),
264 0x00000000,
265 (0x9c00 << 16) | (0x9494 >> 2),
266 0x00000000,
267 (0x9c00 << 16) | (0xac0c >> 2),
268 0x00000000,
269 (0x9c00 << 16) | (0xac10 >> 2),
270 0x00000000,
271 (0x9c00 << 16) | (0xac14 >> 2),
272 0x00000000,
273 (0x9c00 << 16) | (0xae00 >> 2),
274 0x00000000,
275 (0x9c00 << 16) | (0xac08 >> 2),
276 0x00000000,
277 (0x9c00 << 16) | (0x88d4 >> 2),
278 0x00000000,
279 (0x9c00 << 16) | (0x88c8 >> 2),
280 0x00000000,
281 (0x9c00 << 16) | (0x88cc >> 2),
282 0x00000000,
283 (0x9c00 << 16) | (0x89b0 >> 2),
284 0x00000000,
285 (0x9c00 << 16) | (0x8b10 >> 2),
286 0x00000000,
287 (0x9c00 << 16) | (0x8a14 >> 2),
288 0x00000000,
289 (0x9c00 << 16) | (0x9830 >> 2),
290 0x00000000,
291 (0x9c00 << 16) | (0x9834 >> 2),
292 0x00000000,
293 (0x9c00 << 16) | (0x9838 >> 2),
294 0x00000000,
295 (0x9c00 << 16) | (0x9a10 >> 2),
296 0x00000000,
297 (0x8000 << 16) | (0x9870 >> 2),
298 0x00000000,
299 (0x8000 << 16) | (0x9874 >> 2),
300 0x00000000,
301 (0x8001 << 16) | (0x9870 >> 2),
302 0x00000000,
303 (0x8001 << 16) | (0x9874 >> 2),
304 0x00000000,
305 (0x8040 << 16) | (0x9870 >> 2),
306 0x00000000,
307 (0x8040 << 16) | (0x9874 >> 2),
308 0x00000000,
309 (0x8041 << 16) | (0x9870 >> 2),
310 0x00000000,
311 (0x8041 << 16) | (0x9874 >> 2),
312 0x00000000,
313 0x00000000
314};
315
Alex Deucher205996c2013-03-01 17:08:42 -0500316static const u32 tahiti_golden_rlc_registers[] =
317{
318 0xc424, 0xffffffff, 0x00601005,
319 0xc47c, 0xffffffff, 0x10104040,
320 0xc488, 0xffffffff, 0x0100000a,
321 0xc314, 0xffffffff, 0x00000800,
322 0xc30c, 0xffffffff, 0x800000f4,
323 0xf4a8, 0xffffffff, 0x00000000
324};
325
326static const u32 tahiti_golden_registers[] =
327{
328 0x9a10, 0x00010000, 0x00018208,
329 0x9830, 0xffffffff, 0x00000000,
330 0x9834, 0xf00fffff, 0x00000400,
331 0x9838, 0x0002021c, 0x00020200,
332 0xc78, 0x00000080, 0x00000000,
333 0xd030, 0x000300c0, 0x00800040,
334 0xd830, 0x000300c0, 0x00800040,
335 0x5bb0, 0x000000f0, 0x00000070,
336 0x5bc0, 0x00200000, 0x50100000,
337 0x7030, 0x31000311, 0x00000011,
338 0x277c, 0x00000003, 0x000007ff,
339 0x240c, 0x000007ff, 0x00000000,
340 0x8a14, 0xf000001f, 0x00000007,
341 0x8b24, 0xffffffff, 0x00ffffff,
342 0x8b10, 0x0000ff0f, 0x00000000,
343 0x28a4c, 0x07ffffff, 0x4e000000,
344 0x28350, 0x3f3f3fff, 0x2a00126a,
345 0x30, 0x000000ff, 0x0040,
346 0x34, 0x00000040, 0x00004040,
347 0x9100, 0x07ffffff, 0x03000000,
348 0x8e88, 0x01ff1f3f, 0x00000000,
349 0x8e84, 0x01ff1f3f, 0x00000000,
350 0x9060, 0x0000007f, 0x00000020,
351 0x9508, 0x00010000, 0x00010000,
352 0xac14, 0x00000200, 0x000002fb,
353 0xac10, 0xffffffff, 0x0000543b,
354 0xac0c, 0xffffffff, 0xa9210876,
355 0x88d0, 0xffffffff, 0x000fff40,
356 0x88d4, 0x0000001f, 0x00000010,
357 0x1410, 0x20000000, 0x20fffed8,
358 0x15c0, 0x000c0fc0, 0x000c0400
359};
360
361static const u32 tahiti_golden_registers2[] =
362{
363 0xc64, 0x00000001, 0x00000001
364};
365
366static const u32 pitcairn_golden_rlc_registers[] =
367{
368 0xc424, 0xffffffff, 0x00601004,
369 0xc47c, 0xffffffff, 0x10102020,
370 0xc488, 0xffffffff, 0x01000020,
371 0xc314, 0xffffffff, 0x00000800,
372 0xc30c, 0xffffffff, 0x800000a4
373};
374
375static const u32 pitcairn_golden_registers[] =
376{
377 0x9a10, 0x00010000, 0x00018208,
378 0x9830, 0xffffffff, 0x00000000,
379 0x9834, 0xf00fffff, 0x00000400,
380 0x9838, 0x0002021c, 0x00020200,
381 0xc78, 0x00000080, 0x00000000,
382 0xd030, 0x000300c0, 0x00800040,
383 0xd830, 0x000300c0, 0x00800040,
384 0x5bb0, 0x000000f0, 0x00000070,
385 0x5bc0, 0x00200000, 0x50100000,
386 0x7030, 0x31000311, 0x00000011,
387 0x2ae4, 0x00073ffe, 0x000022a2,
388 0x240c, 0x000007ff, 0x00000000,
389 0x8a14, 0xf000001f, 0x00000007,
390 0x8b24, 0xffffffff, 0x00ffffff,
391 0x8b10, 0x0000ff0f, 0x00000000,
392 0x28a4c, 0x07ffffff, 0x4e000000,
393 0x28350, 0x3f3f3fff, 0x2a00126a,
394 0x30, 0x000000ff, 0x0040,
395 0x34, 0x00000040, 0x00004040,
396 0x9100, 0x07ffffff, 0x03000000,
397 0x9060, 0x0000007f, 0x00000020,
398 0x9508, 0x00010000, 0x00010000,
399 0xac14, 0x000003ff, 0x000000f7,
400 0xac10, 0xffffffff, 0x00000000,
401 0xac0c, 0xffffffff, 0x32761054,
402 0x88d4, 0x0000001f, 0x00000010,
403 0x15c0, 0x000c0fc0, 0x000c0400
404};
405
406static const u32 verde_golden_rlc_registers[] =
407{
408 0xc424, 0xffffffff, 0x033f1005,
409 0xc47c, 0xffffffff, 0x10808020,
410 0xc488, 0xffffffff, 0x00800008,
411 0xc314, 0xffffffff, 0x00001000,
412 0xc30c, 0xffffffff, 0x80010014
413};
414
415static const u32 verde_golden_registers[] =
416{
417 0x9a10, 0x00010000, 0x00018208,
418 0x9830, 0xffffffff, 0x00000000,
419 0x9834, 0xf00fffff, 0x00000400,
420 0x9838, 0x0002021c, 0x00020200,
421 0xc78, 0x00000080, 0x00000000,
422 0xd030, 0x000300c0, 0x00800040,
423 0xd030, 0x000300c0, 0x00800040,
424 0xd830, 0x000300c0, 0x00800040,
425 0xd830, 0x000300c0, 0x00800040,
426 0x5bb0, 0x000000f0, 0x00000070,
427 0x5bc0, 0x00200000, 0x50100000,
428 0x7030, 0x31000311, 0x00000011,
429 0x2ae4, 0x00073ffe, 0x000022a2,
430 0x2ae4, 0x00073ffe, 0x000022a2,
431 0x2ae4, 0x00073ffe, 0x000022a2,
432 0x240c, 0x000007ff, 0x00000000,
433 0x240c, 0x000007ff, 0x00000000,
434 0x240c, 0x000007ff, 0x00000000,
435 0x8a14, 0xf000001f, 0x00000007,
436 0x8a14, 0xf000001f, 0x00000007,
437 0x8a14, 0xf000001f, 0x00000007,
438 0x8b24, 0xffffffff, 0x00ffffff,
439 0x8b10, 0x0000ff0f, 0x00000000,
440 0x28a4c, 0x07ffffff, 0x4e000000,
441 0x28350, 0x3f3f3fff, 0x0000124a,
442 0x28350, 0x3f3f3fff, 0x0000124a,
443 0x28350, 0x3f3f3fff, 0x0000124a,
444 0x30, 0x000000ff, 0x0040,
445 0x34, 0x00000040, 0x00004040,
446 0x9100, 0x07ffffff, 0x03000000,
447 0x9100, 0x07ffffff, 0x03000000,
448 0x8e88, 0x01ff1f3f, 0x00000000,
449 0x8e88, 0x01ff1f3f, 0x00000000,
450 0x8e88, 0x01ff1f3f, 0x00000000,
451 0x8e84, 0x01ff1f3f, 0x00000000,
452 0x8e84, 0x01ff1f3f, 0x00000000,
453 0x8e84, 0x01ff1f3f, 0x00000000,
454 0x9060, 0x0000007f, 0x00000020,
455 0x9508, 0x00010000, 0x00010000,
456 0xac14, 0x000003ff, 0x00000003,
457 0xac14, 0x000003ff, 0x00000003,
458 0xac14, 0x000003ff, 0x00000003,
459 0xac10, 0xffffffff, 0x00000000,
460 0xac10, 0xffffffff, 0x00000000,
461 0xac10, 0xffffffff, 0x00000000,
462 0xac0c, 0xffffffff, 0x00001032,
463 0xac0c, 0xffffffff, 0x00001032,
464 0xac0c, 0xffffffff, 0x00001032,
465 0x88d4, 0x0000001f, 0x00000010,
466 0x88d4, 0x0000001f, 0x00000010,
467 0x88d4, 0x0000001f, 0x00000010,
468 0x15c0, 0x000c0fc0, 0x000c0400
469};
470
471static const u32 oland_golden_rlc_registers[] =
472{
473 0xc424, 0xffffffff, 0x00601005,
474 0xc47c, 0xffffffff, 0x10104040,
475 0xc488, 0xffffffff, 0x0100000a,
476 0xc314, 0xffffffff, 0x00000800,
477 0xc30c, 0xffffffff, 0x800000f4
478};
479
480static const u32 oland_golden_registers[] =
481{
482 0x9a10, 0x00010000, 0x00018208,
483 0x9830, 0xffffffff, 0x00000000,
484 0x9834, 0xf00fffff, 0x00000400,
485 0x9838, 0x0002021c, 0x00020200,
486 0xc78, 0x00000080, 0x00000000,
487 0xd030, 0x000300c0, 0x00800040,
488 0xd830, 0x000300c0, 0x00800040,
489 0x5bb0, 0x000000f0, 0x00000070,
490 0x5bc0, 0x00200000, 0x50100000,
491 0x7030, 0x31000311, 0x00000011,
492 0x2ae4, 0x00073ffe, 0x000022a2,
493 0x240c, 0x000007ff, 0x00000000,
494 0x8a14, 0xf000001f, 0x00000007,
495 0x8b24, 0xffffffff, 0x00ffffff,
496 0x8b10, 0x0000ff0f, 0x00000000,
497 0x28a4c, 0x07ffffff, 0x4e000000,
498 0x28350, 0x3f3f3fff, 0x00000082,
499 0x30, 0x000000ff, 0x0040,
500 0x34, 0x00000040, 0x00004040,
501 0x9100, 0x07ffffff, 0x03000000,
502 0x9060, 0x0000007f, 0x00000020,
503 0x9508, 0x00010000, 0x00010000,
504 0xac14, 0x000003ff, 0x000000f3,
505 0xac10, 0xffffffff, 0x00000000,
506 0xac0c, 0xffffffff, 0x00003210,
507 0x88d4, 0x0000001f, 0x00000010,
508 0x15c0, 0x000c0fc0, 0x000c0400
509};
510
Alex Deucherfffbdda2013-05-13 13:36:23 -0400511static const u32 hainan_golden_registers[] =
512{
513 0x9a10, 0x00010000, 0x00018208,
514 0x9830, 0xffffffff, 0x00000000,
515 0x9834, 0xf00fffff, 0x00000400,
516 0x9838, 0x0002021c, 0x00020200,
517 0xd0c0, 0xff000fff, 0x00000100,
518 0xd030, 0x000300c0, 0x00800040,
519 0xd8c0, 0xff000fff, 0x00000100,
520 0xd830, 0x000300c0, 0x00800040,
521 0x2ae4, 0x00073ffe, 0x000022a2,
522 0x240c, 0x000007ff, 0x00000000,
523 0x8a14, 0xf000001f, 0x00000007,
524 0x8b24, 0xffffffff, 0x00ffffff,
525 0x8b10, 0x0000ff0f, 0x00000000,
526 0x28a4c, 0x07ffffff, 0x4e000000,
527 0x28350, 0x3f3f3fff, 0x00000000,
528 0x30, 0x000000ff, 0x0040,
529 0x34, 0x00000040, 0x00004040,
530 0x9100, 0x03e00000, 0x03600000,
531 0x9060, 0x0000007f, 0x00000020,
532 0x9508, 0x00010000, 0x00010000,
533 0xac14, 0x000003ff, 0x000000f1,
534 0xac10, 0xffffffff, 0x00000000,
535 0xac0c, 0xffffffff, 0x00003210,
536 0x88d4, 0x0000001f, 0x00000010,
537 0x15c0, 0x000c0fc0, 0x000c0400
538};
539
540static const u32 hainan_golden_registers2[] =
541{
542 0x98f8, 0xffffffff, 0x02010001
543};
544
Alex Deucher205996c2013-03-01 17:08:42 -0500545static const u32 tahiti_mgcg_cgcg_init[] =
546{
547 0xc400, 0xffffffff, 0xfffffffc,
548 0x802c, 0xffffffff, 0xe0000000,
549 0x9a60, 0xffffffff, 0x00000100,
550 0x92a4, 0xffffffff, 0x00000100,
551 0xc164, 0xffffffff, 0x00000100,
552 0x9774, 0xffffffff, 0x00000100,
553 0x8984, 0xffffffff, 0x06000100,
554 0x8a18, 0xffffffff, 0x00000100,
555 0x92a0, 0xffffffff, 0x00000100,
556 0xc380, 0xffffffff, 0x00000100,
557 0x8b28, 0xffffffff, 0x00000100,
558 0x9144, 0xffffffff, 0x00000100,
559 0x8d88, 0xffffffff, 0x00000100,
560 0x8d8c, 0xffffffff, 0x00000100,
561 0x9030, 0xffffffff, 0x00000100,
562 0x9034, 0xffffffff, 0x00000100,
563 0x9038, 0xffffffff, 0x00000100,
564 0x903c, 0xffffffff, 0x00000100,
565 0xad80, 0xffffffff, 0x00000100,
566 0xac54, 0xffffffff, 0x00000100,
567 0x897c, 0xffffffff, 0x06000100,
568 0x9868, 0xffffffff, 0x00000100,
569 0x9510, 0xffffffff, 0x00000100,
570 0xaf04, 0xffffffff, 0x00000100,
571 0xae04, 0xffffffff, 0x00000100,
572 0x949c, 0xffffffff, 0x00000100,
573 0x802c, 0xffffffff, 0xe0000000,
574 0x9160, 0xffffffff, 0x00010000,
575 0x9164, 0xffffffff, 0x00030002,
576 0x9168, 0xffffffff, 0x00040007,
577 0x916c, 0xffffffff, 0x00060005,
578 0x9170, 0xffffffff, 0x00090008,
579 0x9174, 0xffffffff, 0x00020001,
580 0x9178, 0xffffffff, 0x00040003,
581 0x917c, 0xffffffff, 0x00000007,
582 0x9180, 0xffffffff, 0x00060005,
583 0x9184, 0xffffffff, 0x00090008,
584 0x9188, 0xffffffff, 0x00030002,
585 0x918c, 0xffffffff, 0x00050004,
586 0x9190, 0xffffffff, 0x00000008,
587 0x9194, 0xffffffff, 0x00070006,
588 0x9198, 0xffffffff, 0x000a0009,
589 0x919c, 0xffffffff, 0x00040003,
590 0x91a0, 0xffffffff, 0x00060005,
591 0x91a4, 0xffffffff, 0x00000009,
592 0x91a8, 0xffffffff, 0x00080007,
593 0x91ac, 0xffffffff, 0x000b000a,
594 0x91b0, 0xffffffff, 0x00050004,
595 0x91b4, 0xffffffff, 0x00070006,
596 0x91b8, 0xffffffff, 0x0008000b,
597 0x91bc, 0xffffffff, 0x000a0009,
598 0x91c0, 0xffffffff, 0x000d000c,
599 0x91c4, 0xffffffff, 0x00060005,
600 0x91c8, 0xffffffff, 0x00080007,
601 0x91cc, 0xffffffff, 0x0000000b,
602 0x91d0, 0xffffffff, 0x000a0009,
603 0x91d4, 0xffffffff, 0x000d000c,
604 0x91d8, 0xffffffff, 0x00070006,
605 0x91dc, 0xffffffff, 0x00090008,
606 0x91e0, 0xffffffff, 0x0000000c,
607 0x91e4, 0xffffffff, 0x000b000a,
608 0x91e8, 0xffffffff, 0x000e000d,
609 0x91ec, 0xffffffff, 0x00080007,
610 0x91f0, 0xffffffff, 0x000a0009,
611 0x91f4, 0xffffffff, 0x0000000d,
612 0x91f8, 0xffffffff, 0x000c000b,
613 0x91fc, 0xffffffff, 0x000f000e,
614 0x9200, 0xffffffff, 0x00090008,
615 0x9204, 0xffffffff, 0x000b000a,
616 0x9208, 0xffffffff, 0x000c000f,
617 0x920c, 0xffffffff, 0x000e000d,
618 0x9210, 0xffffffff, 0x00110010,
619 0x9214, 0xffffffff, 0x000a0009,
620 0x9218, 0xffffffff, 0x000c000b,
621 0x921c, 0xffffffff, 0x0000000f,
622 0x9220, 0xffffffff, 0x000e000d,
623 0x9224, 0xffffffff, 0x00110010,
624 0x9228, 0xffffffff, 0x000b000a,
625 0x922c, 0xffffffff, 0x000d000c,
626 0x9230, 0xffffffff, 0x00000010,
627 0x9234, 0xffffffff, 0x000f000e,
628 0x9238, 0xffffffff, 0x00120011,
629 0x923c, 0xffffffff, 0x000c000b,
630 0x9240, 0xffffffff, 0x000e000d,
631 0x9244, 0xffffffff, 0x00000011,
632 0x9248, 0xffffffff, 0x0010000f,
633 0x924c, 0xffffffff, 0x00130012,
634 0x9250, 0xffffffff, 0x000d000c,
635 0x9254, 0xffffffff, 0x000f000e,
636 0x9258, 0xffffffff, 0x00100013,
637 0x925c, 0xffffffff, 0x00120011,
638 0x9260, 0xffffffff, 0x00150014,
639 0x9264, 0xffffffff, 0x000e000d,
640 0x9268, 0xffffffff, 0x0010000f,
641 0x926c, 0xffffffff, 0x00000013,
642 0x9270, 0xffffffff, 0x00120011,
643 0x9274, 0xffffffff, 0x00150014,
644 0x9278, 0xffffffff, 0x000f000e,
645 0x927c, 0xffffffff, 0x00110010,
646 0x9280, 0xffffffff, 0x00000014,
647 0x9284, 0xffffffff, 0x00130012,
648 0x9288, 0xffffffff, 0x00160015,
649 0x928c, 0xffffffff, 0x0010000f,
650 0x9290, 0xffffffff, 0x00120011,
651 0x9294, 0xffffffff, 0x00000015,
652 0x9298, 0xffffffff, 0x00140013,
653 0x929c, 0xffffffff, 0x00170016,
654 0x9150, 0xffffffff, 0x96940200,
655 0x8708, 0xffffffff, 0x00900100,
656 0xc478, 0xffffffff, 0x00000080,
657 0xc404, 0xffffffff, 0x0020003f,
658 0x30, 0xffffffff, 0x0000001c,
659 0x34, 0x000f0000, 0x000f0000,
660 0x160c, 0xffffffff, 0x00000100,
661 0x1024, 0xffffffff, 0x00000100,
662 0x102c, 0x00000101, 0x00000000,
663 0x20a8, 0xffffffff, 0x00000104,
664 0x264c, 0x000c0000, 0x000c0000,
665 0x2648, 0x000c0000, 0x000c0000,
666 0x55e4, 0xff000fff, 0x00000100,
667 0x55e8, 0x00000001, 0x00000001,
668 0x2f50, 0x00000001, 0x00000001,
669 0x30cc, 0xc0000fff, 0x00000104,
670 0xc1e4, 0x00000001, 0x00000001,
671 0xd0c0, 0xfffffff0, 0x00000100,
672 0xd8c0, 0xfffffff0, 0x00000100
673};
674
675static const u32 pitcairn_mgcg_cgcg_init[] =
676{
677 0xc400, 0xffffffff, 0xfffffffc,
678 0x802c, 0xffffffff, 0xe0000000,
679 0x9a60, 0xffffffff, 0x00000100,
680 0x92a4, 0xffffffff, 0x00000100,
681 0xc164, 0xffffffff, 0x00000100,
682 0x9774, 0xffffffff, 0x00000100,
683 0x8984, 0xffffffff, 0x06000100,
684 0x8a18, 0xffffffff, 0x00000100,
685 0x92a0, 0xffffffff, 0x00000100,
686 0xc380, 0xffffffff, 0x00000100,
687 0x8b28, 0xffffffff, 0x00000100,
688 0x9144, 0xffffffff, 0x00000100,
689 0x8d88, 0xffffffff, 0x00000100,
690 0x8d8c, 0xffffffff, 0x00000100,
691 0x9030, 0xffffffff, 0x00000100,
692 0x9034, 0xffffffff, 0x00000100,
693 0x9038, 0xffffffff, 0x00000100,
694 0x903c, 0xffffffff, 0x00000100,
695 0xad80, 0xffffffff, 0x00000100,
696 0xac54, 0xffffffff, 0x00000100,
697 0x897c, 0xffffffff, 0x06000100,
698 0x9868, 0xffffffff, 0x00000100,
699 0x9510, 0xffffffff, 0x00000100,
700 0xaf04, 0xffffffff, 0x00000100,
701 0xae04, 0xffffffff, 0x00000100,
702 0x949c, 0xffffffff, 0x00000100,
703 0x802c, 0xffffffff, 0xe0000000,
704 0x9160, 0xffffffff, 0x00010000,
705 0x9164, 0xffffffff, 0x00030002,
706 0x9168, 0xffffffff, 0x00040007,
707 0x916c, 0xffffffff, 0x00060005,
708 0x9170, 0xffffffff, 0x00090008,
709 0x9174, 0xffffffff, 0x00020001,
710 0x9178, 0xffffffff, 0x00040003,
711 0x917c, 0xffffffff, 0x00000007,
712 0x9180, 0xffffffff, 0x00060005,
713 0x9184, 0xffffffff, 0x00090008,
714 0x9188, 0xffffffff, 0x00030002,
715 0x918c, 0xffffffff, 0x00050004,
716 0x9190, 0xffffffff, 0x00000008,
717 0x9194, 0xffffffff, 0x00070006,
718 0x9198, 0xffffffff, 0x000a0009,
719 0x919c, 0xffffffff, 0x00040003,
720 0x91a0, 0xffffffff, 0x00060005,
721 0x91a4, 0xffffffff, 0x00000009,
722 0x91a8, 0xffffffff, 0x00080007,
723 0x91ac, 0xffffffff, 0x000b000a,
724 0x91b0, 0xffffffff, 0x00050004,
725 0x91b4, 0xffffffff, 0x00070006,
726 0x91b8, 0xffffffff, 0x0008000b,
727 0x91bc, 0xffffffff, 0x000a0009,
728 0x91c0, 0xffffffff, 0x000d000c,
729 0x9200, 0xffffffff, 0x00090008,
730 0x9204, 0xffffffff, 0x000b000a,
731 0x9208, 0xffffffff, 0x000c000f,
732 0x920c, 0xffffffff, 0x000e000d,
733 0x9210, 0xffffffff, 0x00110010,
734 0x9214, 0xffffffff, 0x000a0009,
735 0x9218, 0xffffffff, 0x000c000b,
736 0x921c, 0xffffffff, 0x0000000f,
737 0x9220, 0xffffffff, 0x000e000d,
738 0x9224, 0xffffffff, 0x00110010,
739 0x9228, 0xffffffff, 0x000b000a,
740 0x922c, 0xffffffff, 0x000d000c,
741 0x9230, 0xffffffff, 0x00000010,
742 0x9234, 0xffffffff, 0x000f000e,
743 0x9238, 0xffffffff, 0x00120011,
744 0x923c, 0xffffffff, 0x000c000b,
745 0x9240, 0xffffffff, 0x000e000d,
746 0x9244, 0xffffffff, 0x00000011,
747 0x9248, 0xffffffff, 0x0010000f,
748 0x924c, 0xffffffff, 0x00130012,
749 0x9250, 0xffffffff, 0x000d000c,
750 0x9254, 0xffffffff, 0x000f000e,
751 0x9258, 0xffffffff, 0x00100013,
752 0x925c, 0xffffffff, 0x00120011,
753 0x9260, 0xffffffff, 0x00150014,
754 0x9150, 0xffffffff, 0x96940200,
755 0x8708, 0xffffffff, 0x00900100,
756 0xc478, 0xffffffff, 0x00000080,
757 0xc404, 0xffffffff, 0x0020003f,
758 0x30, 0xffffffff, 0x0000001c,
759 0x34, 0x000f0000, 0x000f0000,
760 0x160c, 0xffffffff, 0x00000100,
761 0x1024, 0xffffffff, 0x00000100,
762 0x102c, 0x00000101, 0x00000000,
763 0x20a8, 0xffffffff, 0x00000104,
764 0x55e4, 0xff000fff, 0x00000100,
765 0x55e8, 0x00000001, 0x00000001,
766 0x2f50, 0x00000001, 0x00000001,
767 0x30cc, 0xc0000fff, 0x00000104,
768 0xc1e4, 0x00000001, 0x00000001,
769 0xd0c0, 0xfffffff0, 0x00000100,
770 0xd8c0, 0xfffffff0, 0x00000100
771};
772
773static const u32 verde_mgcg_cgcg_init[] =
774{
775 0xc400, 0xffffffff, 0xfffffffc,
776 0x802c, 0xffffffff, 0xe0000000,
777 0x9a60, 0xffffffff, 0x00000100,
778 0x92a4, 0xffffffff, 0x00000100,
779 0xc164, 0xffffffff, 0x00000100,
780 0x9774, 0xffffffff, 0x00000100,
781 0x8984, 0xffffffff, 0x06000100,
782 0x8a18, 0xffffffff, 0x00000100,
783 0x92a0, 0xffffffff, 0x00000100,
784 0xc380, 0xffffffff, 0x00000100,
785 0x8b28, 0xffffffff, 0x00000100,
786 0x9144, 0xffffffff, 0x00000100,
787 0x8d88, 0xffffffff, 0x00000100,
788 0x8d8c, 0xffffffff, 0x00000100,
789 0x9030, 0xffffffff, 0x00000100,
790 0x9034, 0xffffffff, 0x00000100,
791 0x9038, 0xffffffff, 0x00000100,
792 0x903c, 0xffffffff, 0x00000100,
793 0xad80, 0xffffffff, 0x00000100,
794 0xac54, 0xffffffff, 0x00000100,
795 0x897c, 0xffffffff, 0x06000100,
796 0x9868, 0xffffffff, 0x00000100,
797 0x9510, 0xffffffff, 0x00000100,
798 0xaf04, 0xffffffff, 0x00000100,
799 0xae04, 0xffffffff, 0x00000100,
800 0x949c, 0xffffffff, 0x00000100,
801 0x802c, 0xffffffff, 0xe0000000,
802 0x9160, 0xffffffff, 0x00010000,
803 0x9164, 0xffffffff, 0x00030002,
804 0x9168, 0xffffffff, 0x00040007,
805 0x916c, 0xffffffff, 0x00060005,
806 0x9170, 0xffffffff, 0x00090008,
807 0x9174, 0xffffffff, 0x00020001,
808 0x9178, 0xffffffff, 0x00040003,
809 0x917c, 0xffffffff, 0x00000007,
810 0x9180, 0xffffffff, 0x00060005,
811 0x9184, 0xffffffff, 0x00090008,
812 0x9188, 0xffffffff, 0x00030002,
813 0x918c, 0xffffffff, 0x00050004,
814 0x9190, 0xffffffff, 0x00000008,
815 0x9194, 0xffffffff, 0x00070006,
816 0x9198, 0xffffffff, 0x000a0009,
817 0x919c, 0xffffffff, 0x00040003,
818 0x91a0, 0xffffffff, 0x00060005,
819 0x91a4, 0xffffffff, 0x00000009,
820 0x91a8, 0xffffffff, 0x00080007,
821 0x91ac, 0xffffffff, 0x000b000a,
822 0x91b0, 0xffffffff, 0x00050004,
823 0x91b4, 0xffffffff, 0x00070006,
824 0x91b8, 0xffffffff, 0x0008000b,
825 0x91bc, 0xffffffff, 0x000a0009,
826 0x91c0, 0xffffffff, 0x000d000c,
827 0x9200, 0xffffffff, 0x00090008,
828 0x9204, 0xffffffff, 0x000b000a,
829 0x9208, 0xffffffff, 0x000c000f,
830 0x920c, 0xffffffff, 0x000e000d,
831 0x9210, 0xffffffff, 0x00110010,
832 0x9214, 0xffffffff, 0x000a0009,
833 0x9218, 0xffffffff, 0x000c000b,
834 0x921c, 0xffffffff, 0x0000000f,
835 0x9220, 0xffffffff, 0x000e000d,
836 0x9224, 0xffffffff, 0x00110010,
837 0x9228, 0xffffffff, 0x000b000a,
838 0x922c, 0xffffffff, 0x000d000c,
839 0x9230, 0xffffffff, 0x00000010,
840 0x9234, 0xffffffff, 0x000f000e,
841 0x9238, 0xffffffff, 0x00120011,
842 0x923c, 0xffffffff, 0x000c000b,
843 0x9240, 0xffffffff, 0x000e000d,
844 0x9244, 0xffffffff, 0x00000011,
845 0x9248, 0xffffffff, 0x0010000f,
846 0x924c, 0xffffffff, 0x00130012,
847 0x9250, 0xffffffff, 0x000d000c,
848 0x9254, 0xffffffff, 0x000f000e,
849 0x9258, 0xffffffff, 0x00100013,
850 0x925c, 0xffffffff, 0x00120011,
851 0x9260, 0xffffffff, 0x00150014,
852 0x9150, 0xffffffff, 0x96940200,
853 0x8708, 0xffffffff, 0x00900100,
854 0xc478, 0xffffffff, 0x00000080,
855 0xc404, 0xffffffff, 0x0020003f,
856 0x30, 0xffffffff, 0x0000001c,
857 0x34, 0x000f0000, 0x000f0000,
858 0x160c, 0xffffffff, 0x00000100,
859 0x1024, 0xffffffff, 0x00000100,
860 0x102c, 0x00000101, 0x00000000,
861 0x20a8, 0xffffffff, 0x00000104,
862 0x264c, 0x000c0000, 0x000c0000,
863 0x2648, 0x000c0000, 0x000c0000,
864 0x55e4, 0xff000fff, 0x00000100,
865 0x55e8, 0x00000001, 0x00000001,
866 0x2f50, 0x00000001, 0x00000001,
867 0x30cc, 0xc0000fff, 0x00000104,
868 0xc1e4, 0x00000001, 0x00000001,
869 0xd0c0, 0xfffffff0, 0x00000100,
870 0xd8c0, 0xfffffff0, 0x00000100
871};
872
873static const u32 oland_mgcg_cgcg_init[] =
874{
875 0xc400, 0xffffffff, 0xfffffffc,
876 0x802c, 0xffffffff, 0xe0000000,
877 0x9a60, 0xffffffff, 0x00000100,
878 0x92a4, 0xffffffff, 0x00000100,
879 0xc164, 0xffffffff, 0x00000100,
880 0x9774, 0xffffffff, 0x00000100,
881 0x8984, 0xffffffff, 0x06000100,
882 0x8a18, 0xffffffff, 0x00000100,
883 0x92a0, 0xffffffff, 0x00000100,
884 0xc380, 0xffffffff, 0x00000100,
885 0x8b28, 0xffffffff, 0x00000100,
886 0x9144, 0xffffffff, 0x00000100,
887 0x8d88, 0xffffffff, 0x00000100,
888 0x8d8c, 0xffffffff, 0x00000100,
889 0x9030, 0xffffffff, 0x00000100,
890 0x9034, 0xffffffff, 0x00000100,
891 0x9038, 0xffffffff, 0x00000100,
892 0x903c, 0xffffffff, 0x00000100,
893 0xad80, 0xffffffff, 0x00000100,
894 0xac54, 0xffffffff, 0x00000100,
895 0x897c, 0xffffffff, 0x06000100,
896 0x9868, 0xffffffff, 0x00000100,
897 0x9510, 0xffffffff, 0x00000100,
898 0xaf04, 0xffffffff, 0x00000100,
899 0xae04, 0xffffffff, 0x00000100,
900 0x949c, 0xffffffff, 0x00000100,
901 0x802c, 0xffffffff, 0xe0000000,
902 0x9160, 0xffffffff, 0x00010000,
903 0x9164, 0xffffffff, 0x00030002,
904 0x9168, 0xffffffff, 0x00040007,
905 0x916c, 0xffffffff, 0x00060005,
906 0x9170, 0xffffffff, 0x00090008,
907 0x9174, 0xffffffff, 0x00020001,
908 0x9178, 0xffffffff, 0x00040003,
909 0x917c, 0xffffffff, 0x00000007,
910 0x9180, 0xffffffff, 0x00060005,
911 0x9184, 0xffffffff, 0x00090008,
912 0x9188, 0xffffffff, 0x00030002,
913 0x918c, 0xffffffff, 0x00050004,
914 0x9190, 0xffffffff, 0x00000008,
915 0x9194, 0xffffffff, 0x00070006,
916 0x9198, 0xffffffff, 0x000a0009,
917 0x919c, 0xffffffff, 0x00040003,
918 0x91a0, 0xffffffff, 0x00060005,
919 0x91a4, 0xffffffff, 0x00000009,
920 0x91a8, 0xffffffff, 0x00080007,
921 0x91ac, 0xffffffff, 0x000b000a,
922 0x91b0, 0xffffffff, 0x00050004,
923 0x91b4, 0xffffffff, 0x00070006,
924 0x91b8, 0xffffffff, 0x0008000b,
925 0x91bc, 0xffffffff, 0x000a0009,
926 0x91c0, 0xffffffff, 0x000d000c,
927 0x91c4, 0xffffffff, 0x00060005,
928 0x91c8, 0xffffffff, 0x00080007,
929 0x91cc, 0xffffffff, 0x0000000b,
930 0x91d0, 0xffffffff, 0x000a0009,
931 0x91d4, 0xffffffff, 0x000d000c,
932 0x9150, 0xffffffff, 0x96940200,
933 0x8708, 0xffffffff, 0x00900100,
934 0xc478, 0xffffffff, 0x00000080,
935 0xc404, 0xffffffff, 0x0020003f,
936 0x30, 0xffffffff, 0x0000001c,
937 0x34, 0x000f0000, 0x000f0000,
938 0x160c, 0xffffffff, 0x00000100,
939 0x1024, 0xffffffff, 0x00000100,
940 0x102c, 0x00000101, 0x00000000,
941 0x20a8, 0xffffffff, 0x00000104,
942 0x264c, 0x000c0000, 0x000c0000,
943 0x2648, 0x000c0000, 0x000c0000,
944 0x55e4, 0xff000fff, 0x00000100,
945 0x55e8, 0x00000001, 0x00000001,
946 0x2f50, 0x00000001, 0x00000001,
947 0x30cc, 0xc0000fff, 0x00000104,
948 0xc1e4, 0x00000001, 0x00000001,
949 0xd0c0, 0xfffffff0, 0x00000100,
950 0xd8c0, 0xfffffff0, 0x00000100
951};
952
Alex Deucherfffbdda2013-05-13 13:36:23 -0400953static const u32 hainan_mgcg_cgcg_init[] =
954{
955 0xc400, 0xffffffff, 0xfffffffc,
956 0x802c, 0xffffffff, 0xe0000000,
957 0x9a60, 0xffffffff, 0x00000100,
958 0x92a4, 0xffffffff, 0x00000100,
959 0xc164, 0xffffffff, 0x00000100,
960 0x9774, 0xffffffff, 0x00000100,
961 0x8984, 0xffffffff, 0x06000100,
962 0x8a18, 0xffffffff, 0x00000100,
963 0x92a0, 0xffffffff, 0x00000100,
964 0xc380, 0xffffffff, 0x00000100,
965 0x8b28, 0xffffffff, 0x00000100,
966 0x9144, 0xffffffff, 0x00000100,
967 0x8d88, 0xffffffff, 0x00000100,
968 0x8d8c, 0xffffffff, 0x00000100,
969 0x9030, 0xffffffff, 0x00000100,
970 0x9034, 0xffffffff, 0x00000100,
971 0x9038, 0xffffffff, 0x00000100,
972 0x903c, 0xffffffff, 0x00000100,
973 0xad80, 0xffffffff, 0x00000100,
974 0xac54, 0xffffffff, 0x00000100,
975 0x897c, 0xffffffff, 0x06000100,
976 0x9868, 0xffffffff, 0x00000100,
977 0x9510, 0xffffffff, 0x00000100,
978 0xaf04, 0xffffffff, 0x00000100,
979 0xae04, 0xffffffff, 0x00000100,
980 0x949c, 0xffffffff, 0x00000100,
981 0x802c, 0xffffffff, 0xe0000000,
982 0x9160, 0xffffffff, 0x00010000,
983 0x9164, 0xffffffff, 0x00030002,
984 0x9168, 0xffffffff, 0x00040007,
985 0x916c, 0xffffffff, 0x00060005,
986 0x9170, 0xffffffff, 0x00090008,
987 0x9174, 0xffffffff, 0x00020001,
988 0x9178, 0xffffffff, 0x00040003,
989 0x917c, 0xffffffff, 0x00000007,
990 0x9180, 0xffffffff, 0x00060005,
991 0x9184, 0xffffffff, 0x00090008,
992 0x9188, 0xffffffff, 0x00030002,
993 0x918c, 0xffffffff, 0x00050004,
994 0x9190, 0xffffffff, 0x00000008,
995 0x9194, 0xffffffff, 0x00070006,
996 0x9198, 0xffffffff, 0x000a0009,
997 0x919c, 0xffffffff, 0x00040003,
998 0x91a0, 0xffffffff, 0x00060005,
999 0x91a4, 0xffffffff, 0x00000009,
1000 0x91a8, 0xffffffff, 0x00080007,
1001 0x91ac, 0xffffffff, 0x000b000a,
1002 0x91b0, 0xffffffff, 0x00050004,
1003 0x91b4, 0xffffffff, 0x00070006,
1004 0x91b8, 0xffffffff, 0x0008000b,
1005 0x91bc, 0xffffffff, 0x000a0009,
1006 0x91c0, 0xffffffff, 0x000d000c,
1007 0x91c4, 0xffffffff, 0x00060005,
1008 0x91c8, 0xffffffff, 0x00080007,
1009 0x91cc, 0xffffffff, 0x0000000b,
1010 0x91d0, 0xffffffff, 0x000a0009,
1011 0x91d4, 0xffffffff, 0x000d000c,
1012 0x9150, 0xffffffff, 0x96940200,
1013 0x8708, 0xffffffff, 0x00900100,
1014 0xc478, 0xffffffff, 0x00000080,
1015 0xc404, 0xffffffff, 0x0020003f,
1016 0x30, 0xffffffff, 0x0000001c,
1017 0x34, 0x000f0000, 0x000f0000,
1018 0x160c, 0xffffffff, 0x00000100,
1019 0x1024, 0xffffffff, 0x00000100,
1020 0x20a8, 0xffffffff, 0x00000104,
1021 0x264c, 0x000c0000, 0x000c0000,
1022 0x2648, 0x000c0000, 0x000c0000,
1023 0x2f50, 0x00000001, 0x00000001,
1024 0x30cc, 0xc0000fff, 0x00000104,
1025 0xc1e4, 0x00000001, 0x00000001,
1026 0xd0c0, 0xfffffff0, 0x00000100,
1027 0xd8c0, 0xfffffff0, 0x00000100
1028};
1029
Alex Deucher205996c2013-03-01 17:08:42 -05001030static u32 verde_pg_init[] =
1031{
1032 0x353c, 0xffffffff, 0x40000,
1033 0x3538, 0xffffffff, 0x200010ff,
1034 0x353c, 0xffffffff, 0x0,
1035 0x353c, 0xffffffff, 0x0,
1036 0x353c, 0xffffffff, 0x0,
1037 0x353c, 0xffffffff, 0x0,
1038 0x353c, 0xffffffff, 0x0,
1039 0x353c, 0xffffffff, 0x7007,
1040 0x3538, 0xffffffff, 0x300010ff,
1041 0x353c, 0xffffffff, 0x0,
1042 0x353c, 0xffffffff, 0x0,
1043 0x353c, 0xffffffff, 0x0,
1044 0x353c, 0xffffffff, 0x0,
1045 0x353c, 0xffffffff, 0x0,
1046 0x353c, 0xffffffff, 0x400000,
1047 0x3538, 0xffffffff, 0x100010ff,
1048 0x353c, 0xffffffff, 0x0,
1049 0x353c, 0xffffffff, 0x0,
1050 0x353c, 0xffffffff, 0x0,
1051 0x353c, 0xffffffff, 0x0,
1052 0x353c, 0xffffffff, 0x0,
1053 0x353c, 0xffffffff, 0x120200,
1054 0x3538, 0xffffffff, 0x500010ff,
1055 0x353c, 0xffffffff, 0x0,
1056 0x353c, 0xffffffff, 0x0,
1057 0x353c, 0xffffffff, 0x0,
1058 0x353c, 0xffffffff, 0x0,
1059 0x353c, 0xffffffff, 0x0,
1060 0x353c, 0xffffffff, 0x1e1e16,
1061 0x3538, 0xffffffff, 0x600010ff,
1062 0x353c, 0xffffffff, 0x0,
1063 0x353c, 0xffffffff, 0x0,
1064 0x353c, 0xffffffff, 0x0,
1065 0x353c, 0xffffffff, 0x0,
1066 0x353c, 0xffffffff, 0x0,
1067 0x353c, 0xffffffff, 0x171f1e,
1068 0x3538, 0xffffffff, 0x700010ff,
1069 0x353c, 0xffffffff, 0x0,
1070 0x353c, 0xffffffff, 0x0,
1071 0x353c, 0xffffffff, 0x0,
1072 0x353c, 0xffffffff, 0x0,
1073 0x353c, 0xffffffff, 0x0,
1074 0x353c, 0xffffffff, 0x0,
1075 0x3538, 0xffffffff, 0x9ff,
1076 0x3500, 0xffffffff, 0x0,
1077 0x3504, 0xffffffff, 0x10000800,
1078 0x3504, 0xffffffff, 0xf,
1079 0x3504, 0xffffffff, 0xf,
1080 0x3500, 0xffffffff, 0x4,
1081 0x3504, 0xffffffff, 0x1000051e,
1082 0x3504, 0xffffffff, 0xffff,
1083 0x3504, 0xffffffff, 0xffff,
1084 0x3500, 0xffffffff, 0x8,
1085 0x3504, 0xffffffff, 0x80500,
1086 0x3500, 0xffffffff, 0x12,
1087 0x3504, 0xffffffff, 0x9050c,
1088 0x3500, 0xffffffff, 0x1d,
1089 0x3504, 0xffffffff, 0xb052c,
1090 0x3500, 0xffffffff, 0x2a,
1091 0x3504, 0xffffffff, 0x1053e,
1092 0x3500, 0xffffffff, 0x2d,
1093 0x3504, 0xffffffff, 0x10546,
1094 0x3500, 0xffffffff, 0x30,
1095 0x3504, 0xffffffff, 0xa054e,
1096 0x3500, 0xffffffff, 0x3c,
1097 0x3504, 0xffffffff, 0x1055f,
1098 0x3500, 0xffffffff, 0x3f,
1099 0x3504, 0xffffffff, 0x10567,
1100 0x3500, 0xffffffff, 0x42,
1101 0x3504, 0xffffffff, 0x1056f,
1102 0x3500, 0xffffffff, 0x45,
1103 0x3504, 0xffffffff, 0x10572,
1104 0x3500, 0xffffffff, 0x48,
1105 0x3504, 0xffffffff, 0x20575,
1106 0x3500, 0xffffffff, 0x4c,
1107 0x3504, 0xffffffff, 0x190801,
1108 0x3500, 0xffffffff, 0x67,
1109 0x3504, 0xffffffff, 0x1082a,
1110 0x3500, 0xffffffff, 0x6a,
1111 0x3504, 0xffffffff, 0x1b082d,
1112 0x3500, 0xffffffff, 0x87,
1113 0x3504, 0xffffffff, 0x310851,
1114 0x3500, 0xffffffff, 0xba,
1115 0x3504, 0xffffffff, 0x891,
1116 0x3500, 0xffffffff, 0xbc,
1117 0x3504, 0xffffffff, 0x893,
1118 0x3500, 0xffffffff, 0xbe,
1119 0x3504, 0xffffffff, 0x20895,
1120 0x3500, 0xffffffff, 0xc2,
1121 0x3504, 0xffffffff, 0x20899,
1122 0x3500, 0xffffffff, 0xc6,
1123 0x3504, 0xffffffff, 0x2089d,
1124 0x3500, 0xffffffff, 0xca,
1125 0x3504, 0xffffffff, 0x8a1,
1126 0x3500, 0xffffffff, 0xcc,
1127 0x3504, 0xffffffff, 0x8a3,
1128 0x3500, 0xffffffff, 0xce,
1129 0x3504, 0xffffffff, 0x308a5,
1130 0x3500, 0xffffffff, 0xd3,
1131 0x3504, 0xffffffff, 0x6d08cd,
1132 0x3500, 0xffffffff, 0x142,
1133 0x3504, 0xffffffff, 0x2000095a,
1134 0x3504, 0xffffffff, 0x1,
1135 0x3500, 0xffffffff, 0x144,
1136 0x3504, 0xffffffff, 0x301f095b,
1137 0x3500, 0xffffffff, 0x165,
1138 0x3504, 0xffffffff, 0xc094d,
1139 0x3500, 0xffffffff, 0x173,
1140 0x3504, 0xffffffff, 0xf096d,
1141 0x3500, 0xffffffff, 0x184,
1142 0x3504, 0xffffffff, 0x15097f,
1143 0x3500, 0xffffffff, 0x19b,
1144 0x3504, 0xffffffff, 0xc0998,
1145 0x3500, 0xffffffff, 0x1a9,
1146 0x3504, 0xffffffff, 0x409a7,
1147 0x3500, 0xffffffff, 0x1af,
1148 0x3504, 0xffffffff, 0xcdc,
1149 0x3500, 0xffffffff, 0x1b1,
1150 0x3504, 0xffffffff, 0x800,
1151 0x3508, 0xffffffff, 0x6c9b2000,
1152 0x3510, 0xfc00, 0x2000,
1153 0x3544, 0xffffffff, 0xfc0,
1154 0x28d4, 0x00000100, 0x100
1155};
1156
1157static void si_init_golden_registers(struct radeon_device *rdev)
1158{
1159 switch (rdev->family) {
1160 case CHIP_TAHITI:
1161 radeon_program_register_sequence(rdev,
1162 tahiti_golden_registers,
1163 (const u32)ARRAY_SIZE(tahiti_golden_registers));
1164 radeon_program_register_sequence(rdev,
1165 tahiti_golden_rlc_registers,
1166 (const u32)ARRAY_SIZE(tahiti_golden_rlc_registers));
1167 radeon_program_register_sequence(rdev,
1168 tahiti_mgcg_cgcg_init,
1169 (const u32)ARRAY_SIZE(tahiti_mgcg_cgcg_init));
1170 radeon_program_register_sequence(rdev,
1171 tahiti_golden_registers2,
1172 (const u32)ARRAY_SIZE(tahiti_golden_registers2));
1173 break;
1174 case CHIP_PITCAIRN:
1175 radeon_program_register_sequence(rdev,
1176 pitcairn_golden_registers,
1177 (const u32)ARRAY_SIZE(pitcairn_golden_registers));
1178 radeon_program_register_sequence(rdev,
1179 pitcairn_golden_rlc_registers,
1180 (const u32)ARRAY_SIZE(pitcairn_golden_rlc_registers));
1181 radeon_program_register_sequence(rdev,
1182 pitcairn_mgcg_cgcg_init,
1183 (const u32)ARRAY_SIZE(pitcairn_mgcg_cgcg_init));
1184 break;
1185 case CHIP_VERDE:
1186 radeon_program_register_sequence(rdev,
1187 verde_golden_registers,
1188 (const u32)ARRAY_SIZE(verde_golden_registers));
1189 radeon_program_register_sequence(rdev,
1190 verde_golden_rlc_registers,
1191 (const u32)ARRAY_SIZE(verde_golden_rlc_registers));
1192 radeon_program_register_sequence(rdev,
1193 verde_mgcg_cgcg_init,
1194 (const u32)ARRAY_SIZE(verde_mgcg_cgcg_init));
1195 radeon_program_register_sequence(rdev,
1196 verde_pg_init,
1197 (const u32)ARRAY_SIZE(verde_pg_init));
1198 break;
1199 case CHIP_OLAND:
1200 radeon_program_register_sequence(rdev,
1201 oland_golden_registers,
1202 (const u32)ARRAY_SIZE(oland_golden_registers));
1203 radeon_program_register_sequence(rdev,
1204 oland_golden_rlc_registers,
1205 (const u32)ARRAY_SIZE(oland_golden_rlc_registers));
1206 radeon_program_register_sequence(rdev,
1207 oland_mgcg_cgcg_init,
1208 (const u32)ARRAY_SIZE(oland_mgcg_cgcg_init));
1209 break;
Alex Deucherfffbdda2013-05-13 13:36:23 -04001210 case CHIP_HAINAN:
1211 radeon_program_register_sequence(rdev,
1212 hainan_golden_registers,
1213 (const u32)ARRAY_SIZE(hainan_golden_registers));
1214 radeon_program_register_sequence(rdev,
1215 hainan_golden_registers2,
1216 (const u32)ARRAY_SIZE(hainan_golden_registers2));
1217 radeon_program_register_sequence(rdev,
1218 hainan_mgcg_cgcg_init,
1219 (const u32)ARRAY_SIZE(hainan_mgcg_cgcg_init));
1220 break;
Alex Deucher205996c2013-03-01 17:08:42 -05001221 default:
1222 break;
1223 }
1224}
1225
Alex Deucher454d2e22013-02-14 10:04:02 -05001226#define PCIE_BUS_CLK 10000
1227#define TCLK (PCIE_BUS_CLK / 10)
1228
1229/**
1230 * si_get_xclk - get the xclk
1231 *
1232 * @rdev: radeon_device pointer
1233 *
1234 * Returns the reference clock used by the gfx engine
1235 * (SI).
1236 */
1237u32 si_get_xclk(struct radeon_device *rdev)
1238{
1239 u32 reference_clock = rdev->clock.spll.reference_freq;
1240 u32 tmp;
1241
1242 tmp = RREG32(CG_CLKPIN_CNTL_2);
1243 if (tmp & MUX_TCLK_TO_XCLK)
1244 return TCLK;
1245
1246 tmp = RREG32(CG_CLKPIN_CNTL);
1247 if (tmp & XTALIN_DIVIDE)
1248 return reference_clock / 4;
1249
1250 return reference_clock;
1251}
1252
Alex Deucher1bd47d22012-03-20 17:18:10 -04001253/* get temperature in millidegrees */
1254int si_get_temp(struct radeon_device *rdev)
1255{
1256 u32 temp;
1257 int actual_temp = 0;
1258
1259 temp = (RREG32(CG_MULT_THERMAL_STATUS) & CTF_TEMP_MASK) >>
1260 CTF_TEMP_SHIFT;
1261
1262 if (temp & 0x200)
1263 actual_temp = 255;
1264 else
1265 actual_temp = temp & 0x1ff;
1266
1267 actual_temp = (actual_temp * 1000);
1268
1269 return actual_temp;
1270}
1271
Alex Deucher8b074dd2012-03-20 17:18:18 -04001272#define TAHITI_IO_MC_REGS_SIZE 36
1273
1274static const u32 tahiti_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = {
1275 {0x0000006f, 0x03044000},
1276 {0x00000070, 0x0480c018},
1277 {0x00000071, 0x00000040},
1278 {0x00000072, 0x01000000},
1279 {0x00000074, 0x000000ff},
1280 {0x00000075, 0x00143400},
1281 {0x00000076, 0x08ec0800},
1282 {0x00000077, 0x040000cc},
1283 {0x00000079, 0x00000000},
1284 {0x0000007a, 0x21000409},
1285 {0x0000007c, 0x00000000},
1286 {0x0000007d, 0xe8000000},
1287 {0x0000007e, 0x044408a8},
1288 {0x0000007f, 0x00000003},
1289 {0x00000080, 0x00000000},
1290 {0x00000081, 0x01000000},
1291 {0x00000082, 0x02000000},
1292 {0x00000083, 0x00000000},
1293 {0x00000084, 0xe3f3e4f4},
1294 {0x00000085, 0x00052024},
1295 {0x00000087, 0x00000000},
1296 {0x00000088, 0x66036603},
1297 {0x00000089, 0x01000000},
1298 {0x0000008b, 0x1c0a0000},
1299 {0x0000008c, 0xff010000},
1300 {0x0000008e, 0xffffefff},
1301 {0x0000008f, 0xfff3efff},
1302 {0x00000090, 0xfff3efbf},
1303 {0x00000094, 0x00101101},
1304 {0x00000095, 0x00000fff},
1305 {0x00000096, 0x00116fff},
1306 {0x00000097, 0x60010000},
1307 {0x00000098, 0x10010000},
1308 {0x00000099, 0x00006000},
1309 {0x0000009a, 0x00001000},
1310 {0x0000009f, 0x00a77400}
1311};
1312
1313static const u32 pitcairn_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = {
1314 {0x0000006f, 0x03044000},
1315 {0x00000070, 0x0480c018},
1316 {0x00000071, 0x00000040},
1317 {0x00000072, 0x01000000},
1318 {0x00000074, 0x000000ff},
1319 {0x00000075, 0x00143400},
1320 {0x00000076, 0x08ec0800},
1321 {0x00000077, 0x040000cc},
1322 {0x00000079, 0x00000000},
1323 {0x0000007a, 0x21000409},
1324 {0x0000007c, 0x00000000},
1325 {0x0000007d, 0xe8000000},
1326 {0x0000007e, 0x044408a8},
1327 {0x0000007f, 0x00000003},
1328 {0x00000080, 0x00000000},
1329 {0x00000081, 0x01000000},
1330 {0x00000082, 0x02000000},
1331 {0x00000083, 0x00000000},
1332 {0x00000084, 0xe3f3e4f4},
1333 {0x00000085, 0x00052024},
1334 {0x00000087, 0x00000000},
1335 {0x00000088, 0x66036603},
1336 {0x00000089, 0x01000000},
1337 {0x0000008b, 0x1c0a0000},
1338 {0x0000008c, 0xff010000},
1339 {0x0000008e, 0xffffefff},
1340 {0x0000008f, 0xfff3efff},
1341 {0x00000090, 0xfff3efbf},
1342 {0x00000094, 0x00101101},
1343 {0x00000095, 0x00000fff},
1344 {0x00000096, 0x00116fff},
1345 {0x00000097, 0x60010000},
1346 {0x00000098, 0x10010000},
1347 {0x00000099, 0x00006000},
1348 {0x0000009a, 0x00001000},
1349 {0x0000009f, 0x00a47400}
1350};
1351
1352static const u32 verde_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = {
1353 {0x0000006f, 0x03044000},
1354 {0x00000070, 0x0480c018},
1355 {0x00000071, 0x00000040},
1356 {0x00000072, 0x01000000},
1357 {0x00000074, 0x000000ff},
1358 {0x00000075, 0x00143400},
1359 {0x00000076, 0x08ec0800},
1360 {0x00000077, 0x040000cc},
1361 {0x00000079, 0x00000000},
1362 {0x0000007a, 0x21000409},
1363 {0x0000007c, 0x00000000},
1364 {0x0000007d, 0xe8000000},
1365 {0x0000007e, 0x044408a8},
1366 {0x0000007f, 0x00000003},
1367 {0x00000080, 0x00000000},
1368 {0x00000081, 0x01000000},
1369 {0x00000082, 0x02000000},
1370 {0x00000083, 0x00000000},
1371 {0x00000084, 0xe3f3e4f4},
1372 {0x00000085, 0x00052024},
1373 {0x00000087, 0x00000000},
1374 {0x00000088, 0x66036603},
1375 {0x00000089, 0x01000000},
1376 {0x0000008b, 0x1c0a0000},
1377 {0x0000008c, 0xff010000},
1378 {0x0000008e, 0xffffefff},
1379 {0x0000008f, 0xfff3efff},
1380 {0x00000090, 0xfff3efbf},
1381 {0x00000094, 0x00101101},
1382 {0x00000095, 0x00000fff},
1383 {0x00000096, 0x00116fff},
1384 {0x00000097, 0x60010000},
1385 {0x00000098, 0x10010000},
1386 {0x00000099, 0x00006000},
1387 {0x0000009a, 0x00001000},
1388 {0x0000009f, 0x00a37400}
1389};
1390
Alex Deucherbcc7f5d2012-07-26 18:36:28 -04001391static const u32 oland_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = {
1392 {0x0000006f, 0x03044000},
1393 {0x00000070, 0x0480c018},
1394 {0x00000071, 0x00000040},
1395 {0x00000072, 0x01000000},
1396 {0x00000074, 0x000000ff},
1397 {0x00000075, 0x00143400},
1398 {0x00000076, 0x08ec0800},
1399 {0x00000077, 0x040000cc},
1400 {0x00000079, 0x00000000},
1401 {0x0000007a, 0x21000409},
1402 {0x0000007c, 0x00000000},
1403 {0x0000007d, 0xe8000000},
1404 {0x0000007e, 0x044408a8},
1405 {0x0000007f, 0x00000003},
1406 {0x00000080, 0x00000000},
1407 {0x00000081, 0x01000000},
1408 {0x00000082, 0x02000000},
1409 {0x00000083, 0x00000000},
1410 {0x00000084, 0xe3f3e4f4},
1411 {0x00000085, 0x00052024},
1412 {0x00000087, 0x00000000},
1413 {0x00000088, 0x66036603},
1414 {0x00000089, 0x01000000},
1415 {0x0000008b, 0x1c0a0000},
1416 {0x0000008c, 0xff010000},
1417 {0x0000008e, 0xffffefff},
1418 {0x0000008f, 0xfff3efff},
1419 {0x00000090, 0xfff3efbf},
1420 {0x00000094, 0x00101101},
1421 {0x00000095, 0x00000fff},
1422 {0x00000096, 0x00116fff},
1423 {0x00000097, 0x60010000},
1424 {0x00000098, 0x10010000},
1425 {0x00000099, 0x00006000},
1426 {0x0000009a, 0x00001000},
1427 {0x0000009f, 0x00a17730}
1428};
1429
Alex Deucherc04c00b2012-07-31 12:57:45 -04001430static const u32 hainan_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = {
1431 {0x0000006f, 0x03044000},
1432 {0x00000070, 0x0480c018},
1433 {0x00000071, 0x00000040},
1434 {0x00000072, 0x01000000},
1435 {0x00000074, 0x000000ff},
1436 {0x00000075, 0x00143400},
1437 {0x00000076, 0x08ec0800},
1438 {0x00000077, 0x040000cc},
1439 {0x00000079, 0x00000000},
1440 {0x0000007a, 0x21000409},
1441 {0x0000007c, 0x00000000},
1442 {0x0000007d, 0xe8000000},
1443 {0x0000007e, 0x044408a8},
1444 {0x0000007f, 0x00000003},
1445 {0x00000080, 0x00000000},
1446 {0x00000081, 0x01000000},
1447 {0x00000082, 0x02000000},
1448 {0x00000083, 0x00000000},
1449 {0x00000084, 0xe3f3e4f4},
1450 {0x00000085, 0x00052024},
1451 {0x00000087, 0x00000000},
1452 {0x00000088, 0x66036603},
1453 {0x00000089, 0x01000000},
1454 {0x0000008b, 0x1c0a0000},
1455 {0x0000008c, 0xff010000},
1456 {0x0000008e, 0xffffefff},
1457 {0x0000008f, 0xfff3efff},
1458 {0x00000090, 0xfff3efbf},
1459 {0x00000094, 0x00101101},
1460 {0x00000095, 0x00000fff},
1461 {0x00000096, 0x00116fff},
1462 {0x00000097, 0x60010000},
1463 {0x00000098, 0x10010000},
1464 {0x00000099, 0x00006000},
1465 {0x0000009a, 0x00001000},
1466 {0x0000009f, 0x00a07730}
1467};
1468
Alex Deucher8b074dd2012-03-20 17:18:18 -04001469/* ucode loading */
Alex Deucher6c7bcce2013-12-18 14:07:14 -05001470int si_mc_load_microcode(struct radeon_device *rdev)
Alex Deucher8b074dd2012-03-20 17:18:18 -04001471{
1472 const __be32 *fw_data;
1473 u32 running, blackout = 0;
1474 u32 *io_mc_regs;
Alex Deucher8c79bae2014-04-16 09:42:22 -04001475 int i, regs_size, ucode_size;
Alex Deucher8b074dd2012-03-20 17:18:18 -04001476
1477 if (!rdev->mc_fw)
1478 return -EINVAL;
1479
Alex Deucher8c79bae2014-04-16 09:42:22 -04001480 ucode_size = rdev->mc_fw->size / 4;
1481
Alex Deucher8b074dd2012-03-20 17:18:18 -04001482 switch (rdev->family) {
1483 case CHIP_TAHITI:
1484 io_mc_regs = (u32 *)&tahiti_io_mc_regs;
Alex Deucher8b074dd2012-03-20 17:18:18 -04001485 regs_size = TAHITI_IO_MC_REGS_SIZE;
1486 break;
1487 case CHIP_PITCAIRN:
1488 io_mc_regs = (u32 *)&pitcairn_io_mc_regs;
Alex Deucher8b074dd2012-03-20 17:18:18 -04001489 regs_size = TAHITI_IO_MC_REGS_SIZE;
1490 break;
1491 case CHIP_VERDE:
1492 default:
1493 io_mc_regs = (u32 *)&verde_io_mc_regs;
Alex Deucher8b074dd2012-03-20 17:18:18 -04001494 regs_size = TAHITI_IO_MC_REGS_SIZE;
1495 break;
Alex Deucherbcc7f5d2012-07-26 18:36:28 -04001496 case CHIP_OLAND:
1497 io_mc_regs = (u32 *)&oland_io_mc_regs;
Alex Deucherbcc7f5d2012-07-26 18:36:28 -04001498 regs_size = TAHITI_IO_MC_REGS_SIZE;
1499 break;
Alex Deucherc04c00b2012-07-31 12:57:45 -04001500 case CHIP_HAINAN:
1501 io_mc_regs = (u32 *)&hainan_io_mc_regs;
Alex Deucherc04c00b2012-07-31 12:57:45 -04001502 regs_size = TAHITI_IO_MC_REGS_SIZE;
1503 break;
Alex Deucher8b074dd2012-03-20 17:18:18 -04001504 }
1505
1506 running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK;
1507
1508 if (running == 0) {
1509 if (running) {
1510 blackout = RREG32(MC_SHARED_BLACKOUT_CNTL);
1511 WREG32(MC_SHARED_BLACKOUT_CNTL, blackout | 1);
1512 }
1513
1514 /* reset the engine and set to writable */
1515 WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
1516 WREG32(MC_SEQ_SUP_CNTL, 0x00000010);
1517
1518 /* load mc io regs */
1519 for (i = 0; i < regs_size; i++) {
1520 WREG32(MC_SEQ_IO_DEBUG_INDEX, io_mc_regs[(i << 1)]);
1521 WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1]);
1522 }
1523 /* load the MC ucode */
1524 fw_data = (const __be32 *)rdev->mc_fw->data;
1525 for (i = 0; i < ucode_size; i++)
1526 WREG32(MC_SEQ_SUP_PGM, be32_to_cpup(fw_data++));
1527
1528 /* put the engine back into the active state */
1529 WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
1530 WREG32(MC_SEQ_SUP_CNTL, 0x00000004);
1531 WREG32(MC_SEQ_SUP_CNTL, 0x00000001);
1532
1533 /* wait for training to complete */
1534 for (i = 0; i < rdev->usec_timeout; i++) {
1535 if (RREG32(MC_SEQ_TRAIN_WAKEUP_CNTL) & TRAIN_DONE_D0)
1536 break;
1537 udelay(1);
1538 }
1539 for (i = 0; i < rdev->usec_timeout; i++) {
1540 if (RREG32(MC_SEQ_TRAIN_WAKEUP_CNTL) & TRAIN_DONE_D1)
1541 break;
1542 udelay(1);
1543 }
1544
1545 if (running)
1546 WREG32(MC_SHARED_BLACKOUT_CNTL, blackout);
1547 }
1548
1549 return 0;
1550}
1551
Alex Deucher0f0de062012-03-20 17:18:17 -04001552static int si_init_microcode(struct radeon_device *rdev)
1553{
Alex Deucher0f0de062012-03-20 17:18:17 -04001554 const char *chip_name;
1555 const char *rlc_chip_name;
1556 size_t pfp_req_size, me_req_size, ce_req_size, rlc_req_size, mc_req_size;
Alex Deucher1ebe9282014-04-11 11:21:49 -04001557 size_t smc_req_size, mc2_req_size;
Alex Deucher0f0de062012-03-20 17:18:17 -04001558 char fw_name[30];
1559 int err;
1560
1561 DRM_DEBUG("\n");
1562
Alex Deucher0f0de062012-03-20 17:18:17 -04001563 switch (rdev->family) {
1564 case CHIP_TAHITI:
1565 chip_name = "TAHITI";
1566 rlc_chip_name = "TAHITI";
1567 pfp_req_size = SI_PFP_UCODE_SIZE * 4;
1568 me_req_size = SI_PM4_UCODE_SIZE * 4;
1569 ce_req_size = SI_CE_UCODE_SIZE * 4;
1570 rlc_req_size = SI_RLC_UCODE_SIZE * 4;
1571 mc_req_size = SI_MC_UCODE_SIZE * 4;
Alex Deucher1ebe9282014-04-11 11:21:49 -04001572 mc2_req_size = TAHITI_MC_UCODE_SIZE * 4;
Alex Deuchera9e61412013-06-25 17:56:16 -04001573 smc_req_size = ALIGN(TAHITI_SMC_UCODE_SIZE, 4);
Alex Deucher0f0de062012-03-20 17:18:17 -04001574 break;
1575 case CHIP_PITCAIRN:
1576 chip_name = "PITCAIRN";
1577 rlc_chip_name = "PITCAIRN";
1578 pfp_req_size = SI_PFP_UCODE_SIZE * 4;
1579 me_req_size = SI_PM4_UCODE_SIZE * 4;
1580 ce_req_size = SI_CE_UCODE_SIZE * 4;
1581 rlc_req_size = SI_RLC_UCODE_SIZE * 4;
1582 mc_req_size = SI_MC_UCODE_SIZE * 4;
Alex Deucher1ebe9282014-04-11 11:21:49 -04001583 mc2_req_size = PITCAIRN_MC_UCODE_SIZE * 4;
Alex Deuchera9e61412013-06-25 17:56:16 -04001584 smc_req_size = ALIGN(PITCAIRN_SMC_UCODE_SIZE, 4);
Alex Deucher0f0de062012-03-20 17:18:17 -04001585 break;
1586 case CHIP_VERDE:
1587 chip_name = "VERDE";
1588 rlc_chip_name = "VERDE";
1589 pfp_req_size = SI_PFP_UCODE_SIZE * 4;
1590 me_req_size = SI_PM4_UCODE_SIZE * 4;
1591 ce_req_size = SI_CE_UCODE_SIZE * 4;
1592 rlc_req_size = SI_RLC_UCODE_SIZE * 4;
1593 mc_req_size = SI_MC_UCODE_SIZE * 4;
Alex Deucher1ebe9282014-04-11 11:21:49 -04001594 mc2_req_size = VERDE_MC_UCODE_SIZE * 4;
Alex Deuchera9e61412013-06-25 17:56:16 -04001595 smc_req_size = ALIGN(VERDE_SMC_UCODE_SIZE, 4);
Alex Deucher0f0de062012-03-20 17:18:17 -04001596 break;
Alex Deucherbcc7f5d2012-07-26 18:36:28 -04001597 case CHIP_OLAND:
1598 chip_name = "OLAND";
1599 rlc_chip_name = "OLAND";
1600 pfp_req_size = SI_PFP_UCODE_SIZE * 4;
1601 me_req_size = SI_PM4_UCODE_SIZE * 4;
1602 ce_req_size = SI_CE_UCODE_SIZE * 4;
1603 rlc_req_size = SI_RLC_UCODE_SIZE * 4;
Alex Deucher1ebe9282014-04-11 11:21:49 -04001604 mc_req_size = mc2_req_size = OLAND_MC_UCODE_SIZE * 4;
Alex Deuchera9e61412013-06-25 17:56:16 -04001605 smc_req_size = ALIGN(OLAND_SMC_UCODE_SIZE, 4);
Alex Deucherbcc7f5d2012-07-26 18:36:28 -04001606 break;
Alex Deucherc04c00b2012-07-31 12:57:45 -04001607 case CHIP_HAINAN:
1608 chip_name = "HAINAN";
1609 rlc_chip_name = "HAINAN";
1610 pfp_req_size = SI_PFP_UCODE_SIZE * 4;
1611 me_req_size = SI_PM4_UCODE_SIZE * 4;
1612 ce_req_size = SI_CE_UCODE_SIZE * 4;
1613 rlc_req_size = SI_RLC_UCODE_SIZE * 4;
Alex Deucher1ebe9282014-04-11 11:21:49 -04001614 mc_req_size = mc2_req_size = OLAND_MC_UCODE_SIZE * 4;
Alex Deuchera9e61412013-06-25 17:56:16 -04001615 smc_req_size = ALIGN(HAINAN_SMC_UCODE_SIZE, 4);
Alex Deucherc04c00b2012-07-31 12:57:45 -04001616 break;
Alex Deucher0f0de062012-03-20 17:18:17 -04001617 default: BUG();
1618 }
1619
1620 DRM_INFO("Loading %s Microcode\n", chip_name);
1621
1622 snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
Jerome Glisse0a168932013-07-11 15:53:01 -04001623 err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev);
Alex Deucher0f0de062012-03-20 17:18:17 -04001624 if (err)
1625 goto out;
1626 if (rdev->pfp_fw->size != pfp_req_size) {
1627 printk(KERN_ERR
1628 "si_cp: Bogus length %zu in firmware \"%s\"\n",
1629 rdev->pfp_fw->size, fw_name);
1630 err = -EINVAL;
1631 goto out;
1632 }
1633
1634 snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
Jerome Glisse0a168932013-07-11 15:53:01 -04001635 err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
Alex Deucher0f0de062012-03-20 17:18:17 -04001636 if (err)
1637 goto out;
1638 if (rdev->me_fw->size != me_req_size) {
1639 printk(KERN_ERR
1640 "si_cp: Bogus length %zu in firmware \"%s\"\n",
1641 rdev->me_fw->size, fw_name);
1642 err = -EINVAL;
1643 }
1644
1645 snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", chip_name);
Jerome Glisse0a168932013-07-11 15:53:01 -04001646 err = request_firmware(&rdev->ce_fw, fw_name, rdev->dev);
Alex Deucher0f0de062012-03-20 17:18:17 -04001647 if (err)
1648 goto out;
1649 if (rdev->ce_fw->size != ce_req_size) {
1650 printk(KERN_ERR
1651 "si_cp: Bogus length %zu in firmware \"%s\"\n",
1652 rdev->ce_fw->size, fw_name);
1653 err = -EINVAL;
1654 }
1655
1656 snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name);
Jerome Glisse0a168932013-07-11 15:53:01 -04001657 err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev);
Alex Deucher0f0de062012-03-20 17:18:17 -04001658 if (err)
1659 goto out;
1660 if (rdev->rlc_fw->size != rlc_req_size) {
1661 printk(KERN_ERR
1662 "si_rlc: Bogus length %zu in firmware \"%s\"\n",
1663 rdev->rlc_fw->size, fw_name);
1664 err = -EINVAL;
1665 }
1666
Alex Deucher1ebe9282014-04-11 11:21:49 -04001667 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc2.bin", chip_name);
Jerome Glisse0a168932013-07-11 15:53:01 -04001668 err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
Alex Deucher1ebe9282014-04-11 11:21:49 -04001669 if (err) {
1670 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
1671 err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
1672 if (err)
1673 goto out;
1674 }
1675 if ((rdev->mc_fw->size != mc_req_size) &&
1676 (rdev->mc_fw->size != mc2_req_size)) {
Alex Deucher0f0de062012-03-20 17:18:17 -04001677 printk(KERN_ERR
1678 "si_mc: Bogus length %zu in firmware \"%s\"\n",
1679 rdev->mc_fw->size, fw_name);
1680 err = -EINVAL;
1681 }
Alex Deucher1ebe9282014-04-11 11:21:49 -04001682 DRM_INFO("%s: %zu bytes\n", fw_name, rdev->mc_fw->size);
Alex Deucher0f0de062012-03-20 17:18:17 -04001683
Alex Deuchera9e61412013-06-25 17:56:16 -04001684 snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
Jerome Glisse0a168932013-07-11 15:53:01 -04001685 err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
Alex Deucher8a53fa22013-08-07 16:09:08 -04001686 if (err) {
1687 printk(KERN_ERR
1688 "smc: error loading firmware \"%s\"\n",
1689 fw_name);
1690 release_firmware(rdev->smc_fw);
1691 rdev->smc_fw = NULL;
Alex Deucherd8367112013-10-16 11:36:30 -04001692 err = 0;
Alex Deucher8a53fa22013-08-07 16:09:08 -04001693 } else if (rdev->smc_fw->size != smc_req_size) {
Alex Deuchera9e61412013-06-25 17:56:16 -04001694 printk(KERN_ERR
1695 "si_smc: Bogus length %zu in firmware \"%s\"\n",
1696 rdev->smc_fw->size, fw_name);
1697 err = -EINVAL;
1698 }
1699
Alex Deucher0f0de062012-03-20 17:18:17 -04001700out:
Alex Deucher0f0de062012-03-20 17:18:17 -04001701 if (err) {
1702 if (err != -EINVAL)
1703 printk(KERN_ERR
1704 "si_cp: Failed to load firmware \"%s\"\n",
1705 fw_name);
1706 release_firmware(rdev->pfp_fw);
1707 rdev->pfp_fw = NULL;
1708 release_firmware(rdev->me_fw);
1709 rdev->me_fw = NULL;
1710 release_firmware(rdev->ce_fw);
1711 rdev->ce_fw = NULL;
1712 release_firmware(rdev->rlc_fw);
1713 rdev->rlc_fw = NULL;
1714 release_firmware(rdev->mc_fw);
1715 rdev->mc_fw = NULL;
Alex Deuchera9e61412013-06-25 17:56:16 -04001716 release_firmware(rdev->smc_fw);
1717 rdev->smc_fw = NULL;
Alex Deucher0f0de062012-03-20 17:18:17 -04001718 }
1719 return err;
1720}
1721
Alex Deucher43b3cd92012-03-20 17:18:00 -04001722/* watermark setup */
1723static u32 dce6_line_buffer_adjust(struct radeon_device *rdev,
1724 struct radeon_crtc *radeon_crtc,
1725 struct drm_display_mode *mode,
1726 struct drm_display_mode *other_mode)
1727{
Alex Deucher290d2452013-08-19 11:15:43 -04001728 u32 tmp, buffer_alloc, i;
1729 u32 pipe_offset = radeon_crtc->crtc_id * 0x20;
Alex Deucher43b3cd92012-03-20 17:18:00 -04001730 /*
1731 * Line Buffer Setup
1732 * There are 3 line buffers, each one shared by 2 display controllers.
1733 * DC_LB_MEMORY_SPLIT controls how that line buffer is shared between
1734 * the display controllers. The paritioning is done via one of four
1735 * preset allocations specified in bits 21:20:
1736 * 0 - half lb
1737 * 2 - whole lb, other crtc must be disabled
1738 */
1739 /* this can get tricky if we have two large displays on a paired group
1740 * of crtcs. Ideally for multiple large displays we'd assign them to
1741 * non-linked crtcs for maximum line buffer allocation.
1742 */
1743 if (radeon_crtc->base.enabled && mode) {
Alex Deucher290d2452013-08-19 11:15:43 -04001744 if (other_mode) {
Alex Deucher43b3cd92012-03-20 17:18:00 -04001745 tmp = 0; /* 1/2 */
Alex Deucher290d2452013-08-19 11:15:43 -04001746 buffer_alloc = 1;
1747 } else {
Alex Deucher43b3cd92012-03-20 17:18:00 -04001748 tmp = 2; /* whole */
Alex Deucher290d2452013-08-19 11:15:43 -04001749 buffer_alloc = 2;
1750 }
1751 } else {
Alex Deucher43b3cd92012-03-20 17:18:00 -04001752 tmp = 0;
Alex Deucher290d2452013-08-19 11:15:43 -04001753 buffer_alloc = 0;
1754 }
Alex Deucher43b3cd92012-03-20 17:18:00 -04001755
1756 WREG32(DC_LB_MEMORY_SPLIT + radeon_crtc->crtc_offset,
1757 DC_LB_MEMORY_CONFIG(tmp));
1758
Alex Deucher290d2452013-08-19 11:15:43 -04001759 WREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset,
1760 DMIF_BUFFERS_ALLOCATED(buffer_alloc));
1761 for (i = 0; i < rdev->usec_timeout; i++) {
1762 if (RREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset) &
1763 DMIF_BUFFERS_ALLOCATED_COMPLETED)
1764 break;
1765 udelay(1);
1766 }
1767
Alex Deucher43b3cd92012-03-20 17:18:00 -04001768 if (radeon_crtc->base.enabled && mode) {
1769 switch (tmp) {
1770 case 0:
1771 default:
1772 return 4096 * 2;
1773 case 2:
1774 return 8192 * 2;
1775 }
1776 }
1777
1778 /* controller not enabled, so no lb used */
1779 return 0;
1780}
1781
Alex Deucherca7db222012-03-20 17:18:30 -04001782static u32 si_get_number_of_dram_channels(struct radeon_device *rdev)
Alex Deucher43b3cd92012-03-20 17:18:00 -04001783{
1784 u32 tmp = RREG32(MC_SHARED_CHMAP);
1785
1786 switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
1787 case 0:
1788 default:
1789 return 1;
1790 case 1:
1791 return 2;
1792 case 2:
1793 return 4;
1794 case 3:
1795 return 8;
1796 case 4:
1797 return 3;
1798 case 5:
1799 return 6;
1800 case 6:
1801 return 10;
1802 case 7:
1803 return 12;
1804 case 8:
1805 return 16;
1806 }
1807}
1808
1809struct dce6_wm_params {
1810 u32 dram_channels; /* number of dram channels */
1811 u32 yclk; /* bandwidth per dram data pin in kHz */
1812 u32 sclk; /* engine clock in kHz */
1813 u32 disp_clk; /* display clock in kHz */
1814 u32 src_width; /* viewport width */
1815 u32 active_time; /* active display time in ns */
1816 u32 blank_time; /* blank time in ns */
1817 bool interlaced; /* mode is interlaced */
1818 fixed20_12 vsc; /* vertical scale ratio */
1819 u32 num_heads; /* number of active crtcs */
1820 u32 bytes_per_pixel; /* bytes per pixel display + overlay */
1821 u32 lb_size; /* line buffer allocated to pipe */
1822 u32 vtaps; /* vertical scaler taps */
1823};
1824
1825static u32 dce6_dram_bandwidth(struct dce6_wm_params *wm)
1826{
1827 /* Calculate raw DRAM Bandwidth */
1828 fixed20_12 dram_efficiency; /* 0.7 */
1829 fixed20_12 yclk, dram_channels, bandwidth;
1830 fixed20_12 a;
1831
1832 a.full = dfixed_const(1000);
1833 yclk.full = dfixed_const(wm->yclk);
1834 yclk.full = dfixed_div(yclk, a);
1835 dram_channels.full = dfixed_const(wm->dram_channels * 4);
1836 a.full = dfixed_const(10);
1837 dram_efficiency.full = dfixed_const(7);
1838 dram_efficiency.full = dfixed_div(dram_efficiency, a);
1839 bandwidth.full = dfixed_mul(dram_channels, yclk);
1840 bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
1841
1842 return dfixed_trunc(bandwidth);
1843}
1844
1845static u32 dce6_dram_bandwidth_for_display(struct dce6_wm_params *wm)
1846{
1847 /* Calculate DRAM Bandwidth and the part allocated to display. */
1848 fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
1849 fixed20_12 yclk, dram_channels, bandwidth;
1850 fixed20_12 a;
1851
1852 a.full = dfixed_const(1000);
1853 yclk.full = dfixed_const(wm->yclk);
1854 yclk.full = dfixed_div(yclk, a);
1855 dram_channels.full = dfixed_const(wm->dram_channels * 4);
1856 a.full = dfixed_const(10);
1857 disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
1858 disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
1859 bandwidth.full = dfixed_mul(dram_channels, yclk);
1860 bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
1861
1862 return dfixed_trunc(bandwidth);
1863}
1864
1865static u32 dce6_data_return_bandwidth(struct dce6_wm_params *wm)
1866{
1867 /* Calculate the display Data return Bandwidth */
1868 fixed20_12 return_efficiency; /* 0.8 */
1869 fixed20_12 sclk, bandwidth;
1870 fixed20_12 a;
1871
1872 a.full = dfixed_const(1000);
1873 sclk.full = dfixed_const(wm->sclk);
1874 sclk.full = dfixed_div(sclk, a);
1875 a.full = dfixed_const(10);
1876 return_efficiency.full = dfixed_const(8);
1877 return_efficiency.full = dfixed_div(return_efficiency, a);
1878 a.full = dfixed_const(32);
1879 bandwidth.full = dfixed_mul(a, sclk);
1880 bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
1881
1882 return dfixed_trunc(bandwidth);
1883}
1884
1885static u32 dce6_get_dmif_bytes_per_request(struct dce6_wm_params *wm)
1886{
1887 return 32;
1888}
1889
1890static u32 dce6_dmif_request_bandwidth(struct dce6_wm_params *wm)
1891{
1892 /* Calculate the DMIF Request Bandwidth */
1893 fixed20_12 disp_clk_request_efficiency; /* 0.8 */
1894 fixed20_12 disp_clk, sclk, bandwidth;
1895 fixed20_12 a, b1, b2;
1896 u32 min_bandwidth;
1897
1898 a.full = dfixed_const(1000);
1899 disp_clk.full = dfixed_const(wm->disp_clk);
1900 disp_clk.full = dfixed_div(disp_clk, a);
1901 a.full = dfixed_const(dce6_get_dmif_bytes_per_request(wm) / 2);
1902 b1.full = dfixed_mul(a, disp_clk);
1903
1904 a.full = dfixed_const(1000);
1905 sclk.full = dfixed_const(wm->sclk);
1906 sclk.full = dfixed_div(sclk, a);
1907 a.full = dfixed_const(dce6_get_dmif_bytes_per_request(wm));
1908 b2.full = dfixed_mul(a, sclk);
1909
1910 a.full = dfixed_const(10);
1911 disp_clk_request_efficiency.full = dfixed_const(8);
1912 disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
1913
1914 min_bandwidth = min(dfixed_trunc(b1), dfixed_trunc(b2));
1915
1916 a.full = dfixed_const(min_bandwidth);
1917 bandwidth.full = dfixed_mul(a, disp_clk_request_efficiency);
1918
1919 return dfixed_trunc(bandwidth);
1920}
1921
1922static u32 dce6_available_bandwidth(struct dce6_wm_params *wm)
1923{
1924 /* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
1925 u32 dram_bandwidth = dce6_dram_bandwidth(wm);
1926 u32 data_return_bandwidth = dce6_data_return_bandwidth(wm);
1927 u32 dmif_req_bandwidth = dce6_dmif_request_bandwidth(wm);
1928
1929 return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
1930}
1931
1932static u32 dce6_average_bandwidth(struct dce6_wm_params *wm)
1933{
1934 /* Calculate the display mode Average Bandwidth
1935 * DisplayMode should contain the source and destination dimensions,
1936 * timing, etc.
1937 */
1938 fixed20_12 bpp;
1939 fixed20_12 line_time;
1940 fixed20_12 src_width;
1941 fixed20_12 bandwidth;
1942 fixed20_12 a;
1943
1944 a.full = dfixed_const(1000);
1945 line_time.full = dfixed_const(wm->active_time + wm->blank_time);
1946 line_time.full = dfixed_div(line_time, a);
1947 bpp.full = dfixed_const(wm->bytes_per_pixel);
1948 src_width.full = dfixed_const(wm->src_width);
1949 bandwidth.full = dfixed_mul(src_width, bpp);
1950 bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
1951 bandwidth.full = dfixed_div(bandwidth, line_time);
1952
1953 return dfixed_trunc(bandwidth);
1954}
1955
1956static u32 dce6_latency_watermark(struct dce6_wm_params *wm)
1957{
1958 /* First calcualte the latency in ns */
1959 u32 mc_latency = 2000; /* 2000 ns. */
1960 u32 available_bandwidth = dce6_available_bandwidth(wm);
1961 u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
1962 u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
1963 u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
1964 u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
1965 (wm->num_heads * cursor_line_pair_return_time);
1966 u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
1967 u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
1968 u32 tmp, dmif_size = 12288;
1969 fixed20_12 a, b, c;
1970
1971 if (wm->num_heads == 0)
1972 return 0;
1973
1974 a.full = dfixed_const(2);
1975 b.full = dfixed_const(1);
1976 if ((wm->vsc.full > a.full) ||
1977 ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
1978 (wm->vtaps >= 5) ||
1979 ((wm->vsc.full >= a.full) && wm->interlaced))
1980 max_src_lines_per_dst_line = 4;
1981 else
1982 max_src_lines_per_dst_line = 2;
1983
1984 a.full = dfixed_const(available_bandwidth);
1985 b.full = dfixed_const(wm->num_heads);
1986 a.full = dfixed_div(a, b);
1987
1988 b.full = dfixed_const(mc_latency + 512);
1989 c.full = dfixed_const(wm->disp_clk);
1990 b.full = dfixed_div(b, c);
1991
1992 c.full = dfixed_const(dmif_size);
1993 b.full = dfixed_div(c, b);
1994
1995 tmp = min(dfixed_trunc(a), dfixed_trunc(b));
1996
1997 b.full = dfixed_const(1000);
1998 c.full = dfixed_const(wm->disp_clk);
1999 b.full = dfixed_div(c, b);
2000 c.full = dfixed_const(wm->bytes_per_pixel);
2001 b.full = dfixed_mul(b, c);
2002
2003 lb_fill_bw = min(tmp, dfixed_trunc(b));
2004
2005 a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
2006 b.full = dfixed_const(1000);
2007 c.full = dfixed_const(lb_fill_bw);
2008 b.full = dfixed_div(c, b);
2009 a.full = dfixed_div(a, b);
2010 line_fill_time = dfixed_trunc(a);
2011
2012 if (line_fill_time < wm->active_time)
2013 return latency;
2014 else
2015 return latency + (line_fill_time - wm->active_time);
2016
2017}
2018
2019static bool dce6_average_bandwidth_vs_dram_bandwidth_for_display(struct dce6_wm_params *wm)
2020{
2021 if (dce6_average_bandwidth(wm) <=
2022 (dce6_dram_bandwidth_for_display(wm) / wm->num_heads))
2023 return true;
2024 else
2025 return false;
2026};
2027
2028static bool dce6_average_bandwidth_vs_available_bandwidth(struct dce6_wm_params *wm)
2029{
2030 if (dce6_average_bandwidth(wm) <=
2031 (dce6_available_bandwidth(wm) / wm->num_heads))
2032 return true;
2033 else
2034 return false;
2035};
2036
2037static bool dce6_check_latency_hiding(struct dce6_wm_params *wm)
2038{
2039 u32 lb_partitions = wm->lb_size / wm->src_width;
2040 u32 line_time = wm->active_time + wm->blank_time;
2041 u32 latency_tolerant_lines;
2042 u32 latency_hiding;
2043 fixed20_12 a;
2044
2045 a.full = dfixed_const(1);
2046 if (wm->vsc.full > a.full)
2047 latency_tolerant_lines = 1;
2048 else {
2049 if (lb_partitions <= (wm->vtaps + 1))
2050 latency_tolerant_lines = 1;
2051 else
2052 latency_tolerant_lines = 2;
2053 }
2054
2055 latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
2056
2057 if (dce6_latency_watermark(wm) <= latency_hiding)
2058 return true;
2059 else
2060 return false;
2061}
2062
2063static void dce6_program_watermarks(struct radeon_device *rdev,
2064 struct radeon_crtc *radeon_crtc,
2065 u32 lb_size, u32 num_heads)
2066{
2067 struct drm_display_mode *mode = &radeon_crtc->base.mode;
Alex Deucherc696e532012-05-03 10:43:25 -04002068 struct dce6_wm_params wm_low, wm_high;
2069 u32 dram_channels;
Alex Deucher43b3cd92012-03-20 17:18:00 -04002070 u32 pixel_period;
2071 u32 line_time = 0;
2072 u32 latency_watermark_a = 0, latency_watermark_b = 0;
2073 u32 priority_a_mark = 0, priority_b_mark = 0;
2074 u32 priority_a_cnt = PRIORITY_OFF;
2075 u32 priority_b_cnt = PRIORITY_OFF;
2076 u32 tmp, arb_control3;
2077 fixed20_12 a, b, c;
2078
2079 if (radeon_crtc->base.enabled && num_heads && mode) {
2080 pixel_period = 1000000 / (u32)mode->clock;
2081 line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
2082 priority_a_cnt = 0;
2083 priority_b_cnt = 0;
2084
Alex Deucherca7db222012-03-20 17:18:30 -04002085 if (rdev->family == CHIP_ARUBA)
Alex Deucherc696e532012-05-03 10:43:25 -04002086 dram_channels = evergreen_get_number_of_dram_channels(rdev);
Alex Deucherca7db222012-03-20 17:18:30 -04002087 else
Alex Deucherc696e532012-05-03 10:43:25 -04002088 dram_channels = si_get_number_of_dram_channels(rdev);
2089
2090 /* watermark for high clocks */
2091 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
2092 wm_high.yclk =
2093 radeon_dpm_get_mclk(rdev, false) * 10;
2094 wm_high.sclk =
2095 radeon_dpm_get_sclk(rdev, false) * 10;
2096 } else {
2097 wm_high.yclk = rdev->pm.current_mclk * 10;
2098 wm_high.sclk = rdev->pm.current_sclk * 10;
2099 }
2100
2101 wm_high.disp_clk = mode->clock;
2102 wm_high.src_width = mode->crtc_hdisplay;
2103 wm_high.active_time = mode->crtc_hdisplay * pixel_period;
2104 wm_high.blank_time = line_time - wm_high.active_time;
2105 wm_high.interlaced = false;
2106 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
2107 wm_high.interlaced = true;
2108 wm_high.vsc = radeon_crtc->vsc;
2109 wm_high.vtaps = 1;
2110 if (radeon_crtc->rmx_type != RMX_OFF)
2111 wm_high.vtaps = 2;
2112 wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */
2113 wm_high.lb_size = lb_size;
2114 wm_high.dram_channels = dram_channels;
2115 wm_high.num_heads = num_heads;
2116
2117 /* watermark for low clocks */
2118 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
2119 wm_low.yclk =
2120 radeon_dpm_get_mclk(rdev, true) * 10;
2121 wm_low.sclk =
2122 radeon_dpm_get_sclk(rdev, true) * 10;
2123 } else {
2124 wm_low.yclk = rdev->pm.current_mclk * 10;
2125 wm_low.sclk = rdev->pm.current_sclk * 10;
2126 }
2127
2128 wm_low.disp_clk = mode->clock;
2129 wm_low.src_width = mode->crtc_hdisplay;
2130 wm_low.active_time = mode->crtc_hdisplay * pixel_period;
2131 wm_low.blank_time = line_time - wm_low.active_time;
2132 wm_low.interlaced = false;
2133 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
2134 wm_low.interlaced = true;
2135 wm_low.vsc = radeon_crtc->vsc;
2136 wm_low.vtaps = 1;
2137 if (radeon_crtc->rmx_type != RMX_OFF)
2138 wm_low.vtaps = 2;
2139 wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */
2140 wm_low.lb_size = lb_size;
2141 wm_low.dram_channels = dram_channels;
2142 wm_low.num_heads = num_heads;
Alex Deucher43b3cd92012-03-20 17:18:00 -04002143
2144 /* set for high clocks */
Alex Deucherc696e532012-05-03 10:43:25 -04002145 latency_watermark_a = min(dce6_latency_watermark(&wm_high), (u32)65535);
Alex Deucher43b3cd92012-03-20 17:18:00 -04002146 /* set for low clocks */
Alex Deucherc696e532012-05-03 10:43:25 -04002147 latency_watermark_b = min(dce6_latency_watermark(&wm_low), (u32)65535);
Alex Deucher43b3cd92012-03-20 17:18:00 -04002148
2149 /* possibly force display priority to high */
2150 /* should really do this at mode validation time... */
Alex Deucherc696e532012-05-03 10:43:25 -04002151 if (!dce6_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) ||
2152 !dce6_average_bandwidth_vs_available_bandwidth(&wm_high) ||
2153 !dce6_check_latency_hiding(&wm_high) ||
2154 (rdev->disp_priority == 2)) {
2155 DRM_DEBUG_KMS("force priority to high\n");
2156 priority_a_cnt |= PRIORITY_ALWAYS_ON;
2157 priority_b_cnt |= PRIORITY_ALWAYS_ON;
2158 }
2159 if (!dce6_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) ||
2160 !dce6_average_bandwidth_vs_available_bandwidth(&wm_low) ||
2161 !dce6_check_latency_hiding(&wm_low) ||
Alex Deucher43b3cd92012-03-20 17:18:00 -04002162 (rdev->disp_priority == 2)) {
2163 DRM_DEBUG_KMS("force priority to high\n");
2164 priority_a_cnt |= PRIORITY_ALWAYS_ON;
2165 priority_b_cnt |= PRIORITY_ALWAYS_ON;
2166 }
2167
2168 a.full = dfixed_const(1000);
2169 b.full = dfixed_const(mode->clock);
2170 b.full = dfixed_div(b, a);
2171 c.full = dfixed_const(latency_watermark_a);
2172 c.full = dfixed_mul(c, b);
2173 c.full = dfixed_mul(c, radeon_crtc->hsc);
2174 c.full = dfixed_div(c, a);
2175 a.full = dfixed_const(16);
2176 c.full = dfixed_div(c, a);
2177 priority_a_mark = dfixed_trunc(c);
2178 priority_a_cnt |= priority_a_mark & PRIORITY_MARK_MASK;
2179
2180 a.full = dfixed_const(1000);
2181 b.full = dfixed_const(mode->clock);
2182 b.full = dfixed_div(b, a);
2183 c.full = dfixed_const(latency_watermark_b);
2184 c.full = dfixed_mul(c, b);
2185 c.full = dfixed_mul(c, radeon_crtc->hsc);
2186 c.full = dfixed_div(c, a);
2187 a.full = dfixed_const(16);
2188 c.full = dfixed_div(c, a);
2189 priority_b_mark = dfixed_trunc(c);
2190 priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK;
2191 }
2192
2193 /* select wm A */
2194 arb_control3 = RREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset);
2195 tmp = arb_control3;
2196 tmp &= ~LATENCY_WATERMARK_MASK(3);
2197 tmp |= LATENCY_WATERMARK_MASK(1);
2198 WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset, tmp);
2199 WREG32(DPG_PIPE_LATENCY_CONTROL + radeon_crtc->crtc_offset,
2200 (LATENCY_LOW_WATERMARK(latency_watermark_a) |
2201 LATENCY_HIGH_WATERMARK(line_time)));
2202 /* select wm B */
2203 tmp = RREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset);
2204 tmp &= ~LATENCY_WATERMARK_MASK(3);
2205 tmp |= LATENCY_WATERMARK_MASK(2);
2206 WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset, tmp);
2207 WREG32(DPG_PIPE_LATENCY_CONTROL + radeon_crtc->crtc_offset,
2208 (LATENCY_LOW_WATERMARK(latency_watermark_b) |
2209 LATENCY_HIGH_WATERMARK(line_time)));
2210 /* restore original selection */
2211 WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset, arb_control3);
2212
2213 /* write the priority marks */
2214 WREG32(PRIORITY_A_CNT + radeon_crtc->crtc_offset, priority_a_cnt);
2215 WREG32(PRIORITY_B_CNT + radeon_crtc->crtc_offset, priority_b_cnt);
2216
Alex Deucher7178d2a2013-03-21 10:38:49 -04002217 /* save values for DPM */
2218 radeon_crtc->line_time = line_time;
2219 radeon_crtc->wm_high = latency_watermark_a;
2220 radeon_crtc->wm_low = latency_watermark_b;
Alex Deucher43b3cd92012-03-20 17:18:00 -04002221}
2222
2223void dce6_bandwidth_update(struct radeon_device *rdev)
2224{
2225 struct drm_display_mode *mode0 = NULL;
2226 struct drm_display_mode *mode1 = NULL;
2227 u32 num_heads = 0, lb_size;
2228 int i;
2229
2230 radeon_update_display_priority(rdev);
2231
2232 for (i = 0; i < rdev->num_crtc; i++) {
2233 if (rdev->mode_info.crtcs[i]->base.enabled)
2234 num_heads++;
2235 }
2236 for (i = 0; i < rdev->num_crtc; i += 2) {
2237 mode0 = &rdev->mode_info.crtcs[i]->base.mode;
2238 mode1 = &rdev->mode_info.crtcs[i+1]->base.mode;
2239 lb_size = dce6_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i], mode0, mode1);
2240 dce6_program_watermarks(rdev, rdev->mode_info.crtcs[i], lb_size, num_heads);
2241 lb_size = dce6_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i+1], mode1, mode0);
2242 dce6_program_watermarks(rdev, rdev->mode_info.crtcs[i+1], lb_size, num_heads);
2243 }
2244}
2245
Alex Deucher0a96d722012-03-20 17:18:11 -04002246/*
2247 * Core functions
2248 */
Alex Deucher0a96d722012-03-20 17:18:11 -04002249static void si_tiling_mode_table_init(struct radeon_device *rdev)
2250{
2251 const u32 num_tile_mode_states = 32;
2252 u32 reg_offset, gb_tile_moden, split_equal_to_row_size;
2253
2254 switch (rdev->config.si.mem_row_size_in_kb) {
2255 case 1:
2256 split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_1KB;
2257 break;
2258 case 2:
2259 default:
2260 split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_2KB;
2261 break;
2262 case 4:
2263 split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_4KB;
2264 break;
2265 }
2266
2267 if ((rdev->family == CHIP_TAHITI) ||
2268 (rdev->family == CHIP_PITCAIRN)) {
2269 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
2270 switch (reg_offset) {
2271 case 0: /* non-AA compressed depth or any compressed stencil */
2272 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2273 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
2274 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2275 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
2276 NUM_BANKS(ADDR_SURF_16_BANK) |
2277 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2278 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2279 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2280 break;
2281 case 1: /* 2xAA/4xAA compressed depth only */
2282 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2283 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
2284 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2285 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
2286 NUM_BANKS(ADDR_SURF_16_BANK) |
2287 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2288 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2289 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2290 break;
2291 case 2: /* 8xAA compressed depth only */
2292 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2293 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
2294 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2295 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2296 NUM_BANKS(ADDR_SURF_16_BANK) |
2297 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2298 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2299 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2300 break;
2301 case 3: /* 2xAA/4xAA compressed depth with stencil (for depth buffer) */
2302 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2303 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
2304 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2305 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
2306 NUM_BANKS(ADDR_SURF_16_BANK) |
2307 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2308 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2309 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2310 break;
2311 case 4: /* Maps w/ a dimension less than the 2D macro-tile dimensions (for mipmapped depth textures) */
2312 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2313 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
2314 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2315 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
2316 NUM_BANKS(ADDR_SURF_16_BANK) |
2317 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2318 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2319 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2320 break;
2321 case 5: /* Uncompressed 16bpp depth - and stencil buffer allocated with it */
2322 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2323 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
2324 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2325 TILE_SPLIT(split_equal_to_row_size) |
2326 NUM_BANKS(ADDR_SURF_16_BANK) |
2327 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2328 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2329 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2330 break;
2331 case 6: /* Uncompressed 32bpp depth - and stencil buffer allocated with it */
2332 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2333 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
2334 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2335 TILE_SPLIT(split_equal_to_row_size) |
2336 NUM_BANKS(ADDR_SURF_16_BANK) |
2337 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2338 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2339 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
2340 break;
2341 case 7: /* Uncompressed 8bpp stencil without depth (drivers typically do not use) */
2342 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2343 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
2344 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2345 TILE_SPLIT(split_equal_to_row_size) |
2346 NUM_BANKS(ADDR_SURF_16_BANK) |
2347 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2348 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2349 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2350 break;
2351 case 8: /* 1D and 1D Array Surfaces */
2352 gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
2353 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
2354 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2355 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
2356 NUM_BANKS(ADDR_SURF_16_BANK) |
2357 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2358 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2359 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2360 break;
2361 case 9: /* Displayable maps. */
2362 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2363 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
2364 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2365 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
2366 NUM_BANKS(ADDR_SURF_16_BANK) |
2367 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2368 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2369 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2370 break;
2371 case 10: /* Display 8bpp. */
2372 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2373 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
2374 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2375 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2376 NUM_BANKS(ADDR_SURF_16_BANK) |
2377 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2378 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2379 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2380 break;
2381 case 11: /* Display 16bpp. */
2382 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2383 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
2384 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2385 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2386 NUM_BANKS(ADDR_SURF_16_BANK) |
2387 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2388 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2389 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2390 break;
2391 case 12: /* Display 32bpp. */
2392 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2393 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
2394 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2395 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
2396 NUM_BANKS(ADDR_SURF_16_BANK) |
2397 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2398 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2399 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
2400 break;
2401 case 13: /* Thin. */
2402 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2403 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2404 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2405 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
2406 NUM_BANKS(ADDR_SURF_16_BANK) |
2407 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2408 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2409 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2410 break;
2411 case 14: /* Thin 8 bpp. */
2412 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2413 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2414 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2415 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2416 NUM_BANKS(ADDR_SURF_16_BANK) |
2417 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2418 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2419 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
2420 break;
2421 case 15: /* Thin 16 bpp. */
2422 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2423 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2424 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2425 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2426 NUM_BANKS(ADDR_SURF_16_BANK) |
2427 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2428 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2429 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
2430 break;
2431 case 16: /* Thin 32 bpp. */
2432 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2433 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2434 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2435 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
2436 NUM_BANKS(ADDR_SURF_16_BANK) |
2437 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2438 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2439 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
2440 break;
2441 case 17: /* Thin 64 bpp. */
2442 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2443 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2444 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2445 TILE_SPLIT(split_equal_to_row_size) |
2446 NUM_BANKS(ADDR_SURF_16_BANK) |
2447 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2448 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2449 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
2450 break;
2451 case 21: /* 8 bpp PRT. */
2452 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2453 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2454 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2455 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2456 NUM_BANKS(ADDR_SURF_16_BANK) |
2457 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
2458 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2459 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2460 break;
2461 case 22: /* 16 bpp PRT */
2462 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2463 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2464 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2465 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2466 NUM_BANKS(ADDR_SURF_16_BANK) |
2467 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2468 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2469 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
2470 break;
2471 case 23: /* 32 bpp PRT */
2472 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2473 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2474 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2475 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2476 NUM_BANKS(ADDR_SURF_16_BANK) |
2477 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2478 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2479 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2480 break;
2481 case 24: /* 64 bpp PRT */
2482 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2483 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2484 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2485 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
2486 NUM_BANKS(ADDR_SURF_16_BANK) |
2487 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2488 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2489 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2490 break;
2491 case 25: /* 128 bpp PRT */
2492 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2493 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2494 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2495 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
2496 NUM_BANKS(ADDR_SURF_8_BANK) |
2497 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2498 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2499 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
2500 break;
2501 default:
2502 gb_tile_moden = 0;
2503 break;
2504 }
Jerome Glisse64d7b8b2013-04-09 11:17:08 -04002505 rdev->config.si.tile_mode_array[reg_offset] = gb_tile_moden;
Alex Deucher0a96d722012-03-20 17:18:11 -04002506 WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
2507 }
Alex Deucherd0ae7fc2012-07-26 17:42:25 -04002508 } else if ((rdev->family == CHIP_VERDE) ||
Alex Deucher8b028592012-07-31 12:42:48 -04002509 (rdev->family == CHIP_OLAND) ||
2510 (rdev->family == CHIP_HAINAN)) {
Alex Deucher0a96d722012-03-20 17:18:11 -04002511 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
2512 switch (reg_offset) {
2513 case 0: /* non-AA compressed depth or any compressed stencil */
2514 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2515 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
2516 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2517 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
2518 NUM_BANKS(ADDR_SURF_16_BANK) |
2519 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2520 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2521 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
2522 break;
2523 case 1: /* 2xAA/4xAA compressed depth only */
2524 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2525 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
2526 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2527 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
2528 NUM_BANKS(ADDR_SURF_16_BANK) |
2529 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2530 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2531 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
2532 break;
2533 case 2: /* 8xAA compressed depth only */
2534 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2535 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
2536 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2537 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2538 NUM_BANKS(ADDR_SURF_16_BANK) |
2539 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2540 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2541 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
2542 break;
2543 case 3: /* 2xAA/4xAA compressed depth with stencil (for depth buffer) */
2544 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2545 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
2546 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2547 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
2548 NUM_BANKS(ADDR_SURF_16_BANK) |
2549 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2550 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2551 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
2552 break;
2553 case 4: /* Maps w/ a dimension less than the 2D macro-tile dimensions (for mipmapped depth textures) */
2554 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2555 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
2556 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2557 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
2558 NUM_BANKS(ADDR_SURF_16_BANK) |
2559 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2560 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2561 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2562 break;
2563 case 5: /* Uncompressed 16bpp depth - and stencil buffer allocated with it */
2564 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2565 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
2566 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2567 TILE_SPLIT(split_equal_to_row_size) |
2568 NUM_BANKS(ADDR_SURF_16_BANK) |
2569 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2570 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2571 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2572 break;
2573 case 6: /* Uncompressed 32bpp depth - and stencil buffer allocated with it */
2574 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2575 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
2576 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2577 TILE_SPLIT(split_equal_to_row_size) |
2578 NUM_BANKS(ADDR_SURF_16_BANK) |
2579 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2580 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2581 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2582 break;
2583 case 7: /* Uncompressed 8bpp stencil without depth (drivers typically do not use) */
2584 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2585 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
2586 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2587 TILE_SPLIT(split_equal_to_row_size) |
2588 NUM_BANKS(ADDR_SURF_16_BANK) |
2589 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2590 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2591 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
2592 break;
2593 case 8: /* 1D and 1D Array Surfaces */
2594 gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
2595 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
2596 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2597 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
2598 NUM_BANKS(ADDR_SURF_16_BANK) |
2599 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2600 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2601 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2602 break;
2603 case 9: /* Displayable maps. */
2604 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2605 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
2606 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2607 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
2608 NUM_BANKS(ADDR_SURF_16_BANK) |
2609 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2610 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2611 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2612 break;
2613 case 10: /* Display 8bpp. */
2614 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2615 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
2616 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2617 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2618 NUM_BANKS(ADDR_SURF_16_BANK) |
2619 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2620 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2621 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
2622 break;
2623 case 11: /* Display 16bpp. */
2624 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2625 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
2626 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2627 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2628 NUM_BANKS(ADDR_SURF_16_BANK) |
2629 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2630 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2631 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2632 break;
2633 case 12: /* Display 32bpp. */
2634 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2635 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
2636 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2637 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
2638 NUM_BANKS(ADDR_SURF_16_BANK) |
2639 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2640 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2641 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2642 break;
2643 case 13: /* Thin. */
2644 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2645 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2646 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2647 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
2648 NUM_BANKS(ADDR_SURF_16_BANK) |
2649 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2650 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2651 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2652 break;
2653 case 14: /* Thin 8 bpp. */
2654 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2655 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2656 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2657 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2658 NUM_BANKS(ADDR_SURF_16_BANK) |
2659 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2660 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2661 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2662 break;
2663 case 15: /* Thin 16 bpp. */
2664 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2665 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2666 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2667 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2668 NUM_BANKS(ADDR_SURF_16_BANK) |
2669 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2670 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2671 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2672 break;
2673 case 16: /* Thin 32 bpp. */
2674 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2675 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2676 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2677 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
2678 NUM_BANKS(ADDR_SURF_16_BANK) |
2679 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2680 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2681 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2682 break;
2683 case 17: /* Thin 64 bpp. */
2684 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2685 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2686 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2687 TILE_SPLIT(split_equal_to_row_size) |
2688 NUM_BANKS(ADDR_SURF_16_BANK) |
2689 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2690 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2691 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2692 break;
2693 case 21: /* 8 bpp PRT. */
2694 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2695 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2696 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2697 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2698 NUM_BANKS(ADDR_SURF_16_BANK) |
2699 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
2700 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2701 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2702 break;
2703 case 22: /* 16 bpp PRT */
2704 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2705 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2706 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2707 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2708 NUM_BANKS(ADDR_SURF_16_BANK) |
2709 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2710 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2711 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
2712 break;
2713 case 23: /* 32 bpp PRT */
2714 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2715 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2716 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2717 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2718 NUM_BANKS(ADDR_SURF_16_BANK) |
2719 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2720 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2721 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2722 break;
2723 case 24: /* 64 bpp PRT */
2724 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2725 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2726 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2727 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
2728 NUM_BANKS(ADDR_SURF_16_BANK) |
2729 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2730 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2731 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2732 break;
2733 case 25: /* 128 bpp PRT */
2734 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2735 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2736 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2737 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
2738 NUM_BANKS(ADDR_SURF_8_BANK) |
2739 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2740 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2741 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
2742 break;
2743 default:
2744 gb_tile_moden = 0;
2745 break;
2746 }
Jerome Glisse64d7b8b2013-04-09 11:17:08 -04002747 rdev->config.si.tile_mode_array[reg_offset] = gb_tile_moden;
Alex Deucher0a96d722012-03-20 17:18:11 -04002748 WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
2749 }
2750 } else
2751 DRM_ERROR("unknown asic: 0x%x\n", rdev->family);
2752}
2753
Alex Deucher1a8ca752012-06-01 18:58:22 -04002754static void si_select_se_sh(struct radeon_device *rdev,
2755 u32 se_num, u32 sh_num)
2756{
2757 u32 data = INSTANCE_BROADCAST_WRITES;
2758
2759 if ((se_num == 0xffffffff) && (sh_num == 0xffffffff))
Alex Deucher79b52d62013-04-18 16:26:36 -04002760 data |= SH_BROADCAST_WRITES | SE_BROADCAST_WRITES;
Alex Deucher1a8ca752012-06-01 18:58:22 -04002761 else if (se_num == 0xffffffff)
2762 data |= SE_BROADCAST_WRITES | SH_INDEX(sh_num);
2763 else if (sh_num == 0xffffffff)
2764 data |= SH_BROADCAST_WRITES | SE_INDEX(se_num);
2765 else
2766 data |= SH_INDEX(sh_num) | SE_INDEX(se_num);
2767 WREG32(GRBM_GFX_INDEX, data);
2768}
2769
2770static u32 si_create_bitmask(u32 bit_width)
2771{
2772 u32 i, mask = 0;
2773
2774 for (i = 0; i < bit_width; i++) {
2775 mask <<= 1;
2776 mask |= 1;
2777 }
2778 return mask;
2779}
2780
2781static u32 si_get_cu_enabled(struct radeon_device *rdev, u32 cu_per_sh)
2782{
2783 u32 data, mask;
2784
2785 data = RREG32(CC_GC_SHADER_ARRAY_CONFIG);
2786 if (data & 1)
2787 data &= INACTIVE_CUS_MASK;
2788 else
2789 data = 0;
2790 data |= RREG32(GC_USER_SHADER_ARRAY_CONFIG);
2791
2792 data >>= INACTIVE_CUS_SHIFT;
2793
2794 mask = si_create_bitmask(cu_per_sh);
2795
2796 return ~data & mask;
2797}
2798
2799static void si_setup_spi(struct radeon_device *rdev,
2800 u32 se_num, u32 sh_per_se,
2801 u32 cu_per_sh)
2802{
2803 int i, j, k;
2804 u32 data, mask, active_cu;
2805
2806 for (i = 0; i < se_num; i++) {
2807 for (j = 0; j < sh_per_se; j++) {
2808 si_select_se_sh(rdev, i, j);
2809 data = RREG32(SPI_STATIC_THREAD_MGMT_3);
2810 active_cu = si_get_cu_enabled(rdev, cu_per_sh);
2811
2812 mask = 1;
2813 for (k = 0; k < 16; k++) {
2814 mask <<= k;
2815 if (active_cu & mask) {
2816 data &= ~mask;
2817 WREG32(SPI_STATIC_THREAD_MGMT_3, data);
2818 break;
2819 }
2820 }
2821 }
2822 }
2823 si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
2824}
2825
2826static u32 si_get_rb_disabled(struct radeon_device *rdev,
Marek Olšák9fadb352013-12-22 02:18:00 +01002827 u32 max_rb_num_per_se,
Alex Deucher1a8ca752012-06-01 18:58:22 -04002828 u32 sh_per_se)
2829{
2830 u32 data, mask;
2831
2832 data = RREG32(CC_RB_BACKEND_DISABLE);
2833 if (data & 1)
2834 data &= BACKEND_DISABLE_MASK;
2835 else
2836 data = 0;
2837 data |= RREG32(GC_USER_RB_BACKEND_DISABLE);
2838
2839 data >>= BACKEND_DISABLE_SHIFT;
2840
Marek Olšák9fadb352013-12-22 02:18:00 +01002841 mask = si_create_bitmask(max_rb_num_per_se / sh_per_se);
Alex Deucher1a8ca752012-06-01 18:58:22 -04002842
2843 return data & mask;
2844}
2845
2846static void si_setup_rb(struct radeon_device *rdev,
2847 u32 se_num, u32 sh_per_se,
Marek Olšák9fadb352013-12-22 02:18:00 +01002848 u32 max_rb_num_per_se)
Alex Deucher1a8ca752012-06-01 18:58:22 -04002849{
2850 int i, j;
2851 u32 data, mask;
2852 u32 disabled_rbs = 0;
2853 u32 enabled_rbs = 0;
2854
2855 for (i = 0; i < se_num; i++) {
2856 for (j = 0; j < sh_per_se; j++) {
2857 si_select_se_sh(rdev, i, j);
Marek Olšák9fadb352013-12-22 02:18:00 +01002858 data = si_get_rb_disabled(rdev, max_rb_num_per_se, sh_per_se);
Alex Deucher1a8ca752012-06-01 18:58:22 -04002859 disabled_rbs |= data << ((i * sh_per_se + j) * TAHITI_RB_BITMAP_WIDTH_PER_SH);
2860 }
2861 }
2862 si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
2863
2864 mask = 1;
Marek Olšák9fadb352013-12-22 02:18:00 +01002865 for (i = 0; i < max_rb_num_per_se * se_num; i++) {
Alex Deucher1a8ca752012-06-01 18:58:22 -04002866 if (!(disabled_rbs & mask))
2867 enabled_rbs |= mask;
2868 mask <<= 1;
2869 }
2870
Marek Olšák439a1cf2013-12-22 02:18:01 +01002871 rdev->config.si.backend_enable_mask = enabled_rbs;
2872
Alex Deucher1a8ca752012-06-01 18:58:22 -04002873 for (i = 0; i < se_num; i++) {
2874 si_select_se_sh(rdev, i, 0xffffffff);
2875 data = 0;
2876 for (j = 0; j < sh_per_se; j++) {
2877 switch (enabled_rbs & 3) {
2878 case 1:
2879 data |= (RASTER_CONFIG_RB_MAP_0 << (i * sh_per_se + j) * 2);
2880 break;
2881 case 2:
2882 data |= (RASTER_CONFIG_RB_MAP_3 << (i * sh_per_se + j) * 2);
2883 break;
2884 case 3:
2885 default:
2886 data |= (RASTER_CONFIG_RB_MAP_2 << (i * sh_per_se + j) * 2);
2887 break;
2888 }
2889 enabled_rbs >>= 2;
2890 }
2891 WREG32(PA_SC_RASTER_CONFIG, data);
2892 }
2893 si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
2894}
2895
Alex Deucher0a96d722012-03-20 17:18:11 -04002896static void si_gpu_init(struct radeon_device *rdev)
2897{
Alex Deucher0a96d722012-03-20 17:18:11 -04002898 u32 gb_addr_config = 0;
2899 u32 mc_shared_chmap, mc_arb_ramcfg;
Alex Deucher0a96d722012-03-20 17:18:11 -04002900 u32 sx_debug_1;
Alex Deucher0a96d722012-03-20 17:18:11 -04002901 u32 hdp_host_path_cntl;
2902 u32 tmp;
2903 int i, j;
2904
2905 switch (rdev->family) {
2906 case CHIP_TAHITI:
2907 rdev->config.si.max_shader_engines = 2;
Alex Deucher0a96d722012-03-20 17:18:11 -04002908 rdev->config.si.max_tile_pipes = 12;
Alex Deucher1a8ca752012-06-01 18:58:22 -04002909 rdev->config.si.max_cu_per_sh = 8;
2910 rdev->config.si.max_sh_per_se = 2;
Alex Deucher0a96d722012-03-20 17:18:11 -04002911 rdev->config.si.max_backends_per_se = 4;
2912 rdev->config.si.max_texture_channel_caches = 12;
2913 rdev->config.si.max_gprs = 256;
2914 rdev->config.si.max_gs_threads = 32;
2915 rdev->config.si.max_hw_contexts = 8;
2916
2917 rdev->config.si.sc_prim_fifo_size_frontend = 0x20;
2918 rdev->config.si.sc_prim_fifo_size_backend = 0x100;
2919 rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
2920 rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
Alex Deucher1a8ca752012-06-01 18:58:22 -04002921 gb_addr_config = TAHITI_GB_ADDR_CONFIG_GOLDEN;
Alex Deucher0a96d722012-03-20 17:18:11 -04002922 break;
2923 case CHIP_PITCAIRN:
2924 rdev->config.si.max_shader_engines = 2;
Alex Deucher0a96d722012-03-20 17:18:11 -04002925 rdev->config.si.max_tile_pipes = 8;
Alex Deucher1a8ca752012-06-01 18:58:22 -04002926 rdev->config.si.max_cu_per_sh = 5;
2927 rdev->config.si.max_sh_per_se = 2;
Alex Deucher0a96d722012-03-20 17:18:11 -04002928 rdev->config.si.max_backends_per_se = 4;
2929 rdev->config.si.max_texture_channel_caches = 8;
2930 rdev->config.si.max_gprs = 256;
2931 rdev->config.si.max_gs_threads = 32;
2932 rdev->config.si.max_hw_contexts = 8;
2933
2934 rdev->config.si.sc_prim_fifo_size_frontend = 0x20;
2935 rdev->config.si.sc_prim_fifo_size_backend = 0x100;
2936 rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
2937 rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
Alex Deucher1a8ca752012-06-01 18:58:22 -04002938 gb_addr_config = TAHITI_GB_ADDR_CONFIG_GOLDEN;
Alex Deucher0a96d722012-03-20 17:18:11 -04002939 break;
2940 case CHIP_VERDE:
2941 default:
2942 rdev->config.si.max_shader_engines = 1;
Alex Deucher0a96d722012-03-20 17:18:11 -04002943 rdev->config.si.max_tile_pipes = 4;
Alex Deucher468ef1a2013-05-21 13:35:19 -04002944 rdev->config.si.max_cu_per_sh = 5;
Alex Deucher1a8ca752012-06-01 18:58:22 -04002945 rdev->config.si.max_sh_per_se = 2;
Alex Deucher0a96d722012-03-20 17:18:11 -04002946 rdev->config.si.max_backends_per_se = 4;
2947 rdev->config.si.max_texture_channel_caches = 4;
2948 rdev->config.si.max_gprs = 256;
2949 rdev->config.si.max_gs_threads = 32;
2950 rdev->config.si.max_hw_contexts = 8;
2951
2952 rdev->config.si.sc_prim_fifo_size_frontend = 0x20;
2953 rdev->config.si.sc_prim_fifo_size_backend = 0x40;
2954 rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
2955 rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
Alex Deucher1a8ca752012-06-01 18:58:22 -04002956 gb_addr_config = VERDE_GB_ADDR_CONFIG_GOLDEN;
Alex Deucher0a96d722012-03-20 17:18:11 -04002957 break;
Alex Deucherd0ae7fc2012-07-26 17:42:25 -04002958 case CHIP_OLAND:
2959 rdev->config.si.max_shader_engines = 1;
2960 rdev->config.si.max_tile_pipes = 4;
2961 rdev->config.si.max_cu_per_sh = 6;
2962 rdev->config.si.max_sh_per_se = 1;
2963 rdev->config.si.max_backends_per_se = 2;
2964 rdev->config.si.max_texture_channel_caches = 4;
2965 rdev->config.si.max_gprs = 256;
2966 rdev->config.si.max_gs_threads = 16;
2967 rdev->config.si.max_hw_contexts = 8;
2968
2969 rdev->config.si.sc_prim_fifo_size_frontend = 0x20;
2970 rdev->config.si.sc_prim_fifo_size_backend = 0x40;
2971 rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
2972 rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
2973 gb_addr_config = VERDE_GB_ADDR_CONFIG_GOLDEN;
2974 break;
Alex Deucher8b028592012-07-31 12:42:48 -04002975 case CHIP_HAINAN:
2976 rdev->config.si.max_shader_engines = 1;
2977 rdev->config.si.max_tile_pipes = 4;
2978 rdev->config.si.max_cu_per_sh = 5;
2979 rdev->config.si.max_sh_per_se = 1;
2980 rdev->config.si.max_backends_per_se = 1;
2981 rdev->config.si.max_texture_channel_caches = 2;
2982 rdev->config.si.max_gprs = 256;
2983 rdev->config.si.max_gs_threads = 16;
2984 rdev->config.si.max_hw_contexts = 8;
2985
2986 rdev->config.si.sc_prim_fifo_size_frontend = 0x20;
2987 rdev->config.si.sc_prim_fifo_size_backend = 0x40;
2988 rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
2989 rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
2990 gb_addr_config = HAINAN_GB_ADDR_CONFIG_GOLDEN;
2991 break;
Alex Deucher0a96d722012-03-20 17:18:11 -04002992 }
2993
2994 /* Initialize HDP */
2995 for (i = 0, j = 0; i < 32; i++, j += 0x18) {
2996 WREG32((0x2c14 + j), 0x00000000);
2997 WREG32((0x2c18 + j), 0x00000000);
2998 WREG32((0x2c1c + j), 0x00000000);
2999 WREG32((0x2c20 + j), 0x00000000);
3000 WREG32((0x2c24 + j), 0x00000000);
3001 }
3002
3003 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
3004
3005 evergreen_fix_pci_max_read_req_size(rdev);
3006
3007 WREG32(BIF_FB_EN, FB_READ_EN | FB_WRITE_EN);
3008
3009 mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
3010 mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
3011
Alex Deucher0a96d722012-03-20 17:18:11 -04003012 rdev->config.si.num_tile_pipes = rdev->config.si.max_tile_pipes;
Alex Deucher0a96d722012-03-20 17:18:11 -04003013 rdev->config.si.mem_max_burst_length_bytes = 256;
3014 tmp = (mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT;
3015 rdev->config.si.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
3016 if (rdev->config.si.mem_row_size_in_kb > 4)
3017 rdev->config.si.mem_row_size_in_kb = 4;
3018 /* XXX use MC settings? */
3019 rdev->config.si.shader_engine_tile_size = 32;
3020 rdev->config.si.num_gpus = 1;
3021 rdev->config.si.multi_gpu_tile_size = 64;
3022
Alex Deucher1a8ca752012-06-01 18:58:22 -04003023 /* fix up row size */
3024 gb_addr_config &= ~ROW_SIZE_MASK;
Alex Deucher0a96d722012-03-20 17:18:11 -04003025 switch (rdev->config.si.mem_row_size_in_kb) {
3026 case 1:
3027 default:
3028 gb_addr_config |= ROW_SIZE(0);
3029 break;
3030 case 2:
3031 gb_addr_config |= ROW_SIZE(1);
3032 break;
3033 case 4:
3034 gb_addr_config |= ROW_SIZE(2);
3035 break;
3036 }
3037
Alex Deucher0a96d722012-03-20 17:18:11 -04003038 /* setup tiling info dword. gb_addr_config is not adequate since it does
3039 * not have bank info, so create a custom tiling dword.
3040 * bits 3:0 num_pipes
3041 * bits 7:4 num_banks
3042 * bits 11:8 group_size
3043 * bits 15:12 row_size
3044 */
3045 rdev->config.si.tile_config = 0;
3046 switch (rdev->config.si.num_tile_pipes) {
3047 case 1:
3048 rdev->config.si.tile_config |= (0 << 0);
3049 break;
3050 case 2:
3051 rdev->config.si.tile_config |= (1 << 0);
3052 break;
3053 case 4:
3054 rdev->config.si.tile_config |= (2 << 0);
3055 break;
3056 case 8:
3057 default:
3058 /* XXX what about 12? */
3059 rdev->config.si.tile_config |= (3 << 0);
3060 break;
Christian Königdca571a2012-07-31 13:48:51 +02003061 }
3062 switch ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) {
3063 case 0: /* four banks */
Alex Deucher1a8ca752012-06-01 18:58:22 -04003064 rdev->config.si.tile_config |= 0 << 4;
Christian Königdca571a2012-07-31 13:48:51 +02003065 break;
3066 case 1: /* eight banks */
3067 rdev->config.si.tile_config |= 1 << 4;
3068 break;
3069 case 2: /* sixteen banks */
3070 default:
3071 rdev->config.si.tile_config |= 2 << 4;
3072 break;
3073 }
Alex Deucher0a96d722012-03-20 17:18:11 -04003074 rdev->config.si.tile_config |=
3075 ((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8;
3076 rdev->config.si.tile_config |=
3077 ((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12;
3078
Alex Deucher0a96d722012-03-20 17:18:11 -04003079 WREG32(GB_ADDR_CONFIG, gb_addr_config);
3080 WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
Alex Deucher7c1c7c12013-04-05 10:28:08 -04003081 WREG32(DMIF_ADDR_CALC, gb_addr_config);
Alex Deucher0a96d722012-03-20 17:18:11 -04003082 WREG32(HDP_ADDR_CONFIG, gb_addr_config);
Alex Deucher8c5fd7e2012-12-04 15:28:18 -05003083 WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config);
3084 WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config);
Alex Deucher1df0d522013-04-26 18:03:44 -04003085 if (rdev->has_uvd) {
3086 WREG32(UVD_UDEC_ADDR_CONFIG, gb_addr_config);
3087 WREG32(UVD_UDEC_DB_ADDR_CONFIG, gb_addr_config);
3088 WREG32(UVD_UDEC_DBW_ADDR_CONFIG, gb_addr_config);
3089 }
Alex Deucher0a96d722012-03-20 17:18:11 -04003090
Alex Deucher0a96d722012-03-20 17:18:11 -04003091 si_tiling_mode_table_init(rdev);
3092
Alex Deucher1a8ca752012-06-01 18:58:22 -04003093 si_setup_rb(rdev, rdev->config.si.max_shader_engines,
3094 rdev->config.si.max_sh_per_se,
3095 rdev->config.si.max_backends_per_se);
3096
3097 si_setup_spi(rdev, rdev->config.si.max_shader_engines,
3098 rdev->config.si.max_sh_per_se,
3099 rdev->config.si.max_cu_per_sh);
3100
3101
Alex Deucher0a96d722012-03-20 17:18:11 -04003102 /* set HW defaults for 3D engine */
3103 WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) |
3104 ROQ_IB2_START(0x2b)));
3105 WREG32(CP_MEQ_THRESHOLDS, MEQ1_START(0x30) | MEQ2_START(0x60));
3106
3107 sx_debug_1 = RREG32(SX_DEBUG_1);
3108 WREG32(SX_DEBUG_1, sx_debug_1);
3109
3110 WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4));
3111
3112 WREG32(PA_SC_FIFO_SIZE, (SC_FRONTEND_PRIM_FIFO_SIZE(rdev->config.si.sc_prim_fifo_size_frontend) |
3113 SC_BACKEND_PRIM_FIFO_SIZE(rdev->config.si.sc_prim_fifo_size_backend) |
3114 SC_HIZ_TILE_FIFO_SIZE(rdev->config.si.sc_hiz_tile_fifo_size) |
3115 SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.si.sc_earlyz_tile_fifo_size)));
3116
3117 WREG32(VGT_NUM_INSTANCES, 1);
3118
3119 WREG32(CP_PERFMON_CNTL, 0);
3120
3121 WREG32(SQ_CONFIG, 0);
3122
3123 WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) |
3124 FORCE_EOV_MAX_REZ_CNT(255)));
3125
3126 WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC) |
3127 AUTO_INVLD_EN(ES_AND_GS_AUTO));
3128
3129 WREG32(VGT_GS_VERTEX_REUSE, 16);
3130 WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
3131
3132 WREG32(CB_PERFCOUNTER0_SELECT0, 0);
3133 WREG32(CB_PERFCOUNTER0_SELECT1, 0);
3134 WREG32(CB_PERFCOUNTER1_SELECT0, 0);
3135 WREG32(CB_PERFCOUNTER1_SELECT1, 0);
3136 WREG32(CB_PERFCOUNTER2_SELECT0, 0);
3137 WREG32(CB_PERFCOUNTER2_SELECT1, 0);
3138 WREG32(CB_PERFCOUNTER3_SELECT0, 0);
3139 WREG32(CB_PERFCOUNTER3_SELECT1, 0);
3140
3141 tmp = RREG32(HDP_MISC_CNTL);
3142 tmp |= HDP_FLUSH_INVALIDATE_CACHE;
3143 WREG32(HDP_MISC_CNTL, tmp);
3144
3145 hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL);
3146 WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
3147
3148 WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3));
3149
3150 udelay(50);
3151}
Alex Deucherc476dde2012-03-20 17:18:12 -04003152
Alex Deucher48c0c902012-03-20 17:18:19 -04003153/*
Alex Deucher2ece2e82012-03-20 17:18:20 -04003154 * GPU scratch registers helpers function.
3155 */
3156static void si_scratch_init(struct radeon_device *rdev)
3157{
3158 int i;
3159
3160 rdev->scratch.num_reg = 7;
3161 rdev->scratch.reg_base = SCRATCH_REG0;
3162 for (i = 0; i < rdev->scratch.num_reg; i++) {
3163 rdev->scratch.free[i] = true;
3164 rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
3165 }
3166}
3167
3168void si_fence_ring_emit(struct radeon_device *rdev,
3169 struct radeon_fence *fence)
3170{
3171 struct radeon_ring *ring = &rdev->ring[fence->ring];
3172 u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
3173
3174 /* flush read cache over gart */
3175 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
3176 radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2);
3177 radeon_ring_write(ring, 0);
3178 radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
3179 radeon_ring_write(ring, PACKET3_TCL1_ACTION_ENA |
3180 PACKET3_TC_ACTION_ENA |
3181 PACKET3_SH_KCACHE_ACTION_ENA |
3182 PACKET3_SH_ICACHE_ACTION_ENA);
3183 radeon_ring_write(ring, 0xFFFFFFFF);
3184 radeon_ring_write(ring, 0);
3185 radeon_ring_write(ring, 10); /* poll interval */
3186 /* EVENT_WRITE_EOP - flush caches, send int */
3187 radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
3188 radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | EVENT_INDEX(5));
3189 radeon_ring_write(ring, addr & 0xffffffff);
3190 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
3191 radeon_ring_write(ring, fence->seq);
3192 radeon_ring_write(ring, 0);
3193}
3194
3195/*
3196 * IB stuff
3197 */
3198void si_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
3199{
Christian König876dc9f2012-05-08 14:24:01 +02003200 struct radeon_ring *ring = &rdev->ring[ib->ring];
Alex Deucher2ece2e82012-03-20 17:18:20 -04003201 u32 header;
3202
Alex Deuchera85a7da42012-07-17 14:02:29 -04003203 if (ib->is_const_ib) {
3204 /* set switch buffer packet before const IB */
3205 radeon_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
3206 radeon_ring_write(ring, 0);
Christian König45df6802012-07-06 16:22:55 +02003207
Alex Deucher2ece2e82012-03-20 17:18:20 -04003208 header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2);
Alex Deuchera85a7da42012-07-17 14:02:29 -04003209 } else {
Alex Deucher89d35802012-07-17 14:02:31 -04003210 u32 next_rptr;
Alex Deuchera85a7da42012-07-17 14:02:29 -04003211 if (ring->rptr_save_reg) {
Alex Deucher89d35802012-07-17 14:02:31 -04003212 next_rptr = ring->wptr + 3 + 4 + 8;
Alex Deuchera85a7da42012-07-17 14:02:29 -04003213 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
3214 radeon_ring_write(ring, ((ring->rptr_save_reg -
3215 PACKET3_SET_CONFIG_REG_START) >> 2));
3216 radeon_ring_write(ring, next_rptr);
Alex Deucher89d35802012-07-17 14:02:31 -04003217 } else if (rdev->wb.enabled) {
3218 next_rptr = ring->wptr + 5 + 4 + 8;
3219 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
3220 radeon_ring_write(ring, (1 << 8));
3221 radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
3222 radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
3223 radeon_ring_write(ring, next_rptr);
Alex Deuchera85a7da42012-07-17 14:02:29 -04003224 }
3225
Alex Deucher2ece2e82012-03-20 17:18:20 -04003226 header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
Alex Deuchera85a7da42012-07-17 14:02:29 -04003227 }
Alex Deucher2ece2e82012-03-20 17:18:20 -04003228
3229 radeon_ring_write(ring, header);
3230 radeon_ring_write(ring,
3231#ifdef __BIG_ENDIAN
3232 (2 << 0) |
3233#endif
3234 (ib->gpu_addr & 0xFFFFFFFC));
3235 radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
Christian König4bf3dd92012-08-06 18:57:44 +02003236 radeon_ring_write(ring, ib->length_dw |
3237 (ib->vm ? (ib->vm->id << 24) : 0));
Alex Deucher2ece2e82012-03-20 17:18:20 -04003238
Alex Deuchera85a7da42012-07-17 14:02:29 -04003239 if (!ib->is_const_ib) {
3240 /* flush read cache over gart for this vmid */
3241 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
3242 radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2);
Christian König4bf3dd92012-08-06 18:57:44 +02003243 radeon_ring_write(ring, ib->vm ? ib->vm->id : 0);
Alex Deuchera85a7da42012-07-17 14:02:29 -04003244 radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
3245 radeon_ring_write(ring, PACKET3_TCL1_ACTION_ENA |
3246 PACKET3_TC_ACTION_ENA |
3247 PACKET3_SH_KCACHE_ACTION_ENA |
3248 PACKET3_SH_ICACHE_ACTION_ENA);
3249 radeon_ring_write(ring, 0xFFFFFFFF);
3250 radeon_ring_write(ring, 0);
3251 radeon_ring_write(ring, 10); /* poll interval */
3252 }
Alex Deucher2ece2e82012-03-20 17:18:20 -04003253}
3254
3255/*
Alex Deucher48c0c902012-03-20 17:18:19 -04003256 * CP.
3257 */
3258static void si_cp_enable(struct radeon_device *rdev, bool enable)
3259{
3260 if (enable)
3261 WREG32(CP_ME_CNTL, 0);
3262 else {
Alex Deucher50efa512014-01-27 11:26:33 -05003263 if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
3264 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
Alex Deucher48c0c902012-03-20 17:18:19 -04003265 WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT));
3266 WREG32(SCRATCH_UMSK, 0);
Alex Deucher8c5fd7e2012-12-04 15:28:18 -05003267 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
3268 rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
3269 rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
Alex Deucher48c0c902012-03-20 17:18:19 -04003270 }
3271 udelay(50);
3272}
3273
3274static int si_cp_load_microcode(struct radeon_device *rdev)
3275{
3276 const __be32 *fw_data;
3277 int i;
3278
3279 if (!rdev->me_fw || !rdev->pfp_fw)
3280 return -EINVAL;
3281
3282 si_cp_enable(rdev, false);
3283
3284 /* PFP */
3285 fw_data = (const __be32 *)rdev->pfp_fw->data;
3286 WREG32(CP_PFP_UCODE_ADDR, 0);
3287 for (i = 0; i < SI_PFP_UCODE_SIZE; i++)
3288 WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
3289 WREG32(CP_PFP_UCODE_ADDR, 0);
3290
3291 /* CE */
3292 fw_data = (const __be32 *)rdev->ce_fw->data;
3293 WREG32(CP_CE_UCODE_ADDR, 0);
3294 for (i = 0; i < SI_CE_UCODE_SIZE; i++)
3295 WREG32(CP_CE_UCODE_DATA, be32_to_cpup(fw_data++));
3296 WREG32(CP_CE_UCODE_ADDR, 0);
3297
3298 /* ME */
3299 fw_data = (const __be32 *)rdev->me_fw->data;
3300 WREG32(CP_ME_RAM_WADDR, 0);
3301 for (i = 0; i < SI_PM4_UCODE_SIZE; i++)
3302 WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
3303 WREG32(CP_ME_RAM_WADDR, 0);
3304
3305 WREG32(CP_PFP_UCODE_ADDR, 0);
3306 WREG32(CP_CE_UCODE_ADDR, 0);
3307 WREG32(CP_ME_RAM_WADDR, 0);
3308 WREG32(CP_ME_RAM_RADDR, 0);
3309 return 0;
3310}
3311
3312static int si_cp_start(struct radeon_device *rdev)
3313{
3314 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
3315 int r, i;
3316
3317 r = radeon_ring_lock(rdev, ring, 7 + 4);
3318 if (r) {
3319 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
3320 return r;
3321 }
3322 /* init the CP */
3323 radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
3324 radeon_ring_write(ring, 0x1);
3325 radeon_ring_write(ring, 0x0);
3326 radeon_ring_write(ring, rdev->config.si.max_hw_contexts - 1);
3327 radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
3328 radeon_ring_write(ring, 0);
3329 radeon_ring_write(ring, 0);
3330
3331 /* init the CE partitions */
3332 radeon_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
3333 radeon_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
3334 radeon_ring_write(ring, 0xc000);
3335 radeon_ring_write(ring, 0xe000);
3336 radeon_ring_unlock_commit(rdev, ring);
3337
3338 si_cp_enable(rdev, true);
3339
3340 r = radeon_ring_lock(rdev, ring, si_default_size + 10);
3341 if (r) {
3342 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
3343 return r;
3344 }
3345
3346 /* setup clear context state */
3347 radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
3348 radeon_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
3349
3350 for (i = 0; i < si_default_size; i++)
3351 radeon_ring_write(ring, si_default_state[i]);
3352
3353 radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
3354 radeon_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
3355
3356 /* set clear context state */
3357 radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
3358 radeon_ring_write(ring, 0);
3359
3360 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
3361 radeon_ring_write(ring, 0x00000316);
3362 radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
3363 radeon_ring_write(ring, 0x00000010); /* VGT_OUT_DEALLOC_CNTL */
3364
3365 radeon_ring_unlock_commit(rdev, ring);
3366
3367 for (i = RADEON_RING_TYPE_GFX_INDEX; i <= CAYMAN_RING_TYPE_CP2_INDEX; ++i) {
3368 ring = &rdev->ring[i];
3369 r = radeon_ring_lock(rdev, ring, 2);
3370
3371 /* clear the compute context state */
3372 radeon_ring_write(ring, PACKET3_COMPUTE(PACKET3_CLEAR_STATE, 0));
3373 radeon_ring_write(ring, 0);
3374
3375 radeon_ring_unlock_commit(rdev, ring);
3376 }
3377
3378 return 0;
3379}
3380
3381static void si_cp_fini(struct radeon_device *rdev)
3382{
Christian König45df6802012-07-06 16:22:55 +02003383 struct radeon_ring *ring;
Alex Deucher48c0c902012-03-20 17:18:19 -04003384 si_cp_enable(rdev, false);
Christian König45df6802012-07-06 16:22:55 +02003385
3386 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
3387 radeon_ring_fini(rdev, ring);
3388 radeon_scratch_free(rdev, ring->rptr_save_reg);
3389
3390 ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
3391 radeon_ring_fini(rdev, ring);
3392 radeon_scratch_free(rdev, ring->rptr_save_reg);
3393
3394 ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
3395 radeon_ring_fini(rdev, ring);
3396 radeon_scratch_free(rdev, ring->rptr_save_reg);
Alex Deucher48c0c902012-03-20 17:18:19 -04003397}
3398
3399static int si_cp_resume(struct radeon_device *rdev)
3400{
3401 struct radeon_ring *ring;
3402 u32 tmp;
3403 u32 rb_bufsz;
3404 int r;
3405
Alex Deucher811e4d52013-09-03 13:31:33 -04003406 si_enable_gui_idle_interrupt(rdev, false);
3407
Alex Deucher48c0c902012-03-20 17:18:19 -04003408 WREG32(CP_SEM_WAIT_TIMER, 0x0);
3409 WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
3410
3411 /* Set the write pointer delay */
3412 WREG32(CP_RB_WPTR_DELAY, 0);
3413
3414 WREG32(CP_DEBUG, 0);
3415 WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
3416
3417 /* ring 0 - compute and gfx */
3418 /* Set ring buffer size */
3419 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
Daniel Vetterb72a8922013-07-10 14:11:59 +02003420 rb_bufsz = order_base_2(ring->ring_size / 8);
3421 tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
Alex Deucher48c0c902012-03-20 17:18:19 -04003422#ifdef __BIG_ENDIAN
3423 tmp |= BUF_SWAP_32BIT;
3424#endif
3425 WREG32(CP_RB0_CNTL, tmp);
3426
3427 /* Initialize the ring buffer's read and write pointers */
3428 WREG32(CP_RB0_CNTL, tmp | RB_RPTR_WR_ENA);
3429 ring->wptr = 0;
3430 WREG32(CP_RB0_WPTR, ring->wptr);
3431
Adam Buchbinder48fc7f72012-09-19 21:48:00 -04003432 /* set the wb address whether it's enabled or not */
Alex Deucher48c0c902012-03-20 17:18:19 -04003433 WREG32(CP_RB0_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC);
3434 WREG32(CP_RB0_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
3435
3436 if (rdev->wb.enabled)
3437 WREG32(SCRATCH_UMSK, 0xff);
3438 else {
3439 tmp |= RB_NO_UPDATE;
3440 WREG32(SCRATCH_UMSK, 0);
3441 }
3442
3443 mdelay(1);
3444 WREG32(CP_RB0_CNTL, tmp);
3445
3446 WREG32(CP_RB0_BASE, ring->gpu_addr >> 8);
3447
Alex Deucher48c0c902012-03-20 17:18:19 -04003448 /* ring1 - compute only */
3449 /* Set ring buffer size */
3450 ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
Daniel Vetterb72a8922013-07-10 14:11:59 +02003451 rb_bufsz = order_base_2(ring->ring_size / 8);
3452 tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
Alex Deucher48c0c902012-03-20 17:18:19 -04003453#ifdef __BIG_ENDIAN
3454 tmp |= BUF_SWAP_32BIT;
3455#endif
3456 WREG32(CP_RB1_CNTL, tmp);
3457
3458 /* Initialize the ring buffer's read and write pointers */
3459 WREG32(CP_RB1_CNTL, tmp | RB_RPTR_WR_ENA);
3460 ring->wptr = 0;
3461 WREG32(CP_RB1_WPTR, ring->wptr);
3462
Adam Buchbinder48fc7f72012-09-19 21:48:00 -04003463 /* set the wb address whether it's enabled or not */
Alex Deucher48c0c902012-03-20 17:18:19 -04003464 WREG32(CP_RB1_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFFFFFFFC);
3465 WREG32(CP_RB1_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFF);
3466
3467 mdelay(1);
3468 WREG32(CP_RB1_CNTL, tmp);
3469
3470 WREG32(CP_RB1_BASE, ring->gpu_addr >> 8);
3471
Alex Deucher48c0c902012-03-20 17:18:19 -04003472 /* ring2 - compute only */
3473 /* Set ring buffer size */
3474 ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
Daniel Vetterb72a8922013-07-10 14:11:59 +02003475 rb_bufsz = order_base_2(ring->ring_size / 8);
3476 tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
Alex Deucher48c0c902012-03-20 17:18:19 -04003477#ifdef __BIG_ENDIAN
3478 tmp |= BUF_SWAP_32BIT;
3479#endif
3480 WREG32(CP_RB2_CNTL, tmp);
3481
3482 /* Initialize the ring buffer's read and write pointers */
3483 WREG32(CP_RB2_CNTL, tmp | RB_RPTR_WR_ENA);
3484 ring->wptr = 0;
3485 WREG32(CP_RB2_WPTR, ring->wptr);
3486
Adam Buchbinder48fc7f72012-09-19 21:48:00 -04003487 /* set the wb address whether it's enabled or not */
Alex Deucher48c0c902012-03-20 17:18:19 -04003488 WREG32(CP_RB2_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFFFFFFFC);
3489 WREG32(CP_RB2_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFF);
3490
3491 mdelay(1);
3492 WREG32(CP_RB2_CNTL, tmp);
3493
3494 WREG32(CP_RB2_BASE, ring->gpu_addr >> 8);
3495
Alex Deucher48c0c902012-03-20 17:18:19 -04003496 /* start the rings */
3497 si_cp_start(rdev);
3498 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = true;
3499 rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = true;
3500 rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = true;
3501 r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
3502 if (r) {
3503 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
3504 rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
3505 rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
3506 return r;
3507 }
3508 r = radeon_ring_test(rdev, CAYMAN_RING_TYPE_CP1_INDEX, &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]);
3509 if (r) {
3510 rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
3511 }
3512 r = radeon_ring_test(rdev, CAYMAN_RING_TYPE_CP2_INDEX, &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]);
3513 if (r) {
3514 rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
3515 }
3516
Alex Deucher811e4d52013-09-03 13:31:33 -04003517 si_enable_gui_idle_interrupt(rdev, true);
3518
Alex Deucher50efa512014-01-27 11:26:33 -05003519 if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
3520 radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
3521
Alex Deucher48c0c902012-03-20 17:18:19 -04003522 return 0;
3523}
3524
Christian König2483b4e2013-08-13 11:56:54 +02003525u32 si_gpu_check_soft_reset(struct radeon_device *rdev)
Alex Deucher014bb202013-01-18 19:36:20 -05003526{
3527 u32 reset_mask = 0;
3528 u32 tmp;
3529
3530 /* GRBM_STATUS */
3531 tmp = RREG32(GRBM_STATUS);
3532 if (tmp & (PA_BUSY | SC_BUSY |
3533 BCI_BUSY | SX_BUSY |
3534 TA_BUSY | VGT_BUSY |
3535 DB_BUSY | CB_BUSY |
3536 GDS_BUSY | SPI_BUSY |
3537 IA_BUSY | IA_BUSY_NO_DMA))
3538 reset_mask |= RADEON_RESET_GFX;
3539
3540 if (tmp & (CF_RQ_PENDING | PF_RQ_PENDING |
3541 CP_BUSY | CP_COHERENCY_BUSY))
3542 reset_mask |= RADEON_RESET_CP;
3543
3544 if (tmp & GRBM_EE_BUSY)
3545 reset_mask |= RADEON_RESET_GRBM | RADEON_RESET_GFX | RADEON_RESET_CP;
3546
3547 /* GRBM_STATUS2 */
3548 tmp = RREG32(GRBM_STATUS2);
3549 if (tmp & (RLC_RQ_PENDING | RLC_BUSY))
3550 reset_mask |= RADEON_RESET_RLC;
3551
3552 /* DMA_STATUS_REG 0 */
3553 tmp = RREG32(DMA_STATUS_REG + DMA0_REGISTER_OFFSET);
3554 if (!(tmp & DMA_IDLE))
3555 reset_mask |= RADEON_RESET_DMA;
3556
3557 /* DMA_STATUS_REG 1 */
3558 tmp = RREG32(DMA_STATUS_REG + DMA1_REGISTER_OFFSET);
3559 if (!(tmp & DMA_IDLE))
3560 reset_mask |= RADEON_RESET_DMA1;
3561
3562 /* SRBM_STATUS2 */
3563 tmp = RREG32(SRBM_STATUS2);
3564 if (tmp & DMA_BUSY)
3565 reset_mask |= RADEON_RESET_DMA;
3566
3567 if (tmp & DMA1_BUSY)
3568 reset_mask |= RADEON_RESET_DMA1;
3569
3570 /* SRBM_STATUS */
3571 tmp = RREG32(SRBM_STATUS);
3572
3573 if (tmp & IH_BUSY)
3574 reset_mask |= RADEON_RESET_IH;
3575
3576 if (tmp & SEM_BUSY)
3577 reset_mask |= RADEON_RESET_SEM;
3578
3579 if (tmp & GRBM_RQ_PENDING)
3580 reset_mask |= RADEON_RESET_GRBM;
3581
3582 if (tmp & VMC_BUSY)
3583 reset_mask |= RADEON_RESET_VMC;
3584
3585 if (tmp & (MCB_BUSY | MCB_NON_DISPLAY_BUSY |
3586 MCC_BUSY | MCD_BUSY))
3587 reset_mask |= RADEON_RESET_MC;
3588
3589 if (evergreen_is_display_hung(rdev))
3590 reset_mask |= RADEON_RESET_DISPLAY;
3591
3592 /* VM_L2_STATUS */
3593 tmp = RREG32(VM_L2_STATUS);
3594 if (tmp & L2_BUSY)
3595 reset_mask |= RADEON_RESET_VMC;
3596
Alex Deucherd808fc82013-02-28 10:03:08 -05003597 /* Skip MC reset as it's mostly likely not hung, just busy */
3598 if (reset_mask & RADEON_RESET_MC) {
3599 DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask);
3600 reset_mask &= ~RADEON_RESET_MC;
3601 }
3602
Alex Deucher014bb202013-01-18 19:36:20 -05003603 return reset_mask;
3604}
3605
3606static void si_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
Alex Deucher06bc6df2013-01-03 13:15:30 -05003607{
3608 struct evergreen_mc_save save;
Alex Deucher1c534672013-01-18 15:08:38 -05003609 u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
3610 u32 tmp;
Alex Deucher19fc42e2013-01-14 11:04:39 -05003611
Alex Deucher06bc6df2013-01-03 13:15:30 -05003612 if (reset_mask == 0)
Alex Deucher014bb202013-01-18 19:36:20 -05003613 return;
Alex Deucher06bc6df2013-01-03 13:15:30 -05003614
3615 dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
3616
Alex Deucher1c534672013-01-18 15:08:38 -05003617 evergreen_print_gpu_status_regs(rdev);
Alex Deucher06bc6df2013-01-03 13:15:30 -05003618 dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
3619 RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR));
3620 dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
3621 RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS));
3622
Alex Deuchera6f4ae82013-10-02 14:50:57 -04003623 /* disable PG/CG */
3624 si_fini_pg(rdev);
3625 si_fini_cg(rdev);
3626
3627 /* stop the rlc */
3628 si_rlc_stop(rdev);
3629
Alex Deucher1c534672013-01-18 15:08:38 -05003630 /* Disable CP parsing/prefetching */
3631 WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT);
3632
3633 if (reset_mask & RADEON_RESET_DMA) {
3634 /* dma0 */
3635 tmp = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
3636 tmp &= ~DMA_RB_ENABLE;
3637 WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, tmp);
Alex Deucher014bb202013-01-18 19:36:20 -05003638 }
3639 if (reset_mask & RADEON_RESET_DMA1) {
Alex Deucher1c534672013-01-18 15:08:38 -05003640 /* dma1 */
3641 tmp = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
3642 tmp &= ~DMA_RB_ENABLE;
3643 WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, tmp);
3644 }
3645
Alex Deucherf770d782013-01-23 19:00:25 -05003646 udelay(50);
3647
3648 evergreen_mc_stop(rdev, &save);
3649 if (evergreen_mc_wait_for_idle(rdev)) {
3650 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
3651 }
3652
Alex Deucher1c534672013-01-18 15:08:38 -05003653 if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE | RADEON_RESET_CP)) {
3654 grbm_soft_reset = SOFT_RESET_CB |
3655 SOFT_RESET_DB |
3656 SOFT_RESET_GDS |
3657 SOFT_RESET_PA |
3658 SOFT_RESET_SC |
3659 SOFT_RESET_BCI |
3660 SOFT_RESET_SPI |
3661 SOFT_RESET_SX |
3662 SOFT_RESET_TC |
3663 SOFT_RESET_TA |
3664 SOFT_RESET_VGT |
3665 SOFT_RESET_IA;
3666 }
3667
3668 if (reset_mask & RADEON_RESET_CP) {
3669 grbm_soft_reset |= SOFT_RESET_CP | SOFT_RESET_VGT;
3670
3671 srbm_soft_reset |= SOFT_RESET_GRBM;
3672 }
Alex Deucher06bc6df2013-01-03 13:15:30 -05003673
3674 if (reset_mask & RADEON_RESET_DMA)
Alex Deucher014bb202013-01-18 19:36:20 -05003675 srbm_soft_reset |= SOFT_RESET_DMA;
3676
3677 if (reset_mask & RADEON_RESET_DMA1)
3678 srbm_soft_reset |= SOFT_RESET_DMA1;
3679
3680 if (reset_mask & RADEON_RESET_DISPLAY)
3681 srbm_soft_reset |= SOFT_RESET_DC;
3682
3683 if (reset_mask & RADEON_RESET_RLC)
3684 grbm_soft_reset |= SOFT_RESET_RLC;
3685
3686 if (reset_mask & RADEON_RESET_SEM)
3687 srbm_soft_reset |= SOFT_RESET_SEM;
3688
3689 if (reset_mask & RADEON_RESET_IH)
3690 srbm_soft_reset |= SOFT_RESET_IH;
3691
3692 if (reset_mask & RADEON_RESET_GRBM)
3693 srbm_soft_reset |= SOFT_RESET_GRBM;
3694
3695 if (reset_mask & RADEON_RESET_VMC)
3696 srbm_soft_reset |= SOFT_RESET_VMC;
3697
3698 if (reset_mask & RADEON_RESET_MC)
3699 srbm_soft_reset |= SOFT_RESET_MC;
Alex Deucher1c534672013-01-18 15:08:38 -05003700
3701 if (grbm_soft_reset) {
3702 tmp = RREG32(GRBM_SOFT_RESET);
3703 tmp |= grbm_soft_reset;
3704 dev_info(rdev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
3705 WREG32(GRBM_SOFT_RESET, tmp);
3706 tmp = RREG32(GRBM_SOFT_RESET);
3707
3708 udelay(50);
3709
3710 tmp &= ~grbm_soft_reset;
3711 WREG32(GRBM_SOFT_RESET, tmp);
3712 tmp = RREG32(GRBM_SOFT_RESET);
3713 }
3714
3715 if (srbm_soft_reset) {
3716 tmp = RREG32(SRBM_SOFT_RESET);
3717 tmp |= srbm_soft_reset;
3718 dev_info(rdev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
3719 WREG32(SRBM_SOFT_RESET, tmp);
3720 tmp = RREG32(SRBM_SOFT_RESET);
3721
3722 udelay(50);
3723
3724 tmp &= ~srbm_soft_reset;
3725 WREG32(SRBM_SOFT_RESET, tmp);
3726 tmp = RREG32(SRBM_SOFT_RESET);
3727 }
Alex Deucher06bc6df2013-01-03 13:15:30 -05003728
3729 /* Wait a little for things to settle down */
3730 udelay(50);
3731
Alex Deucherc476dde2012-03-20 17:18:12 -04003732 evergreen_mc_resume(rdev, &save);
Alex Deucher1c534672013-01-18 15:08:38 -05003733 udelay(50);
Alex Deucher410a3412013-01-18 13:05:39 -05003734
Alex Deucher1c534672013-01-18 15:08:38 -05003735 evergreen_print_gpu_status_regs(rdev);
Alex Deucherc476dde2012-03-20 17:18:12 -04003736}
3737
Alex Deucher4a5c8ea2013-11-15 16:35:55 -05003738static void si_set_clk_bypass_mode(struct radeon_device *rdev)
3739{
3740 u32 tmp, i;
3741
3742 tmp = RREG32(CG_SPLL_FUNC_CNTL);
3743 tmp |= SPLL_BYPASS_EN;
3744 WREG32(CG_SPLL_FUNC_CNTL, tmp);
3745
3746 tmp = RREG32(CG_SPLL_FUNC_CNTL_2);
3747 tmp |= SPLL_CTLREQ_CHG;
3748 WREG32(CG_SPLL_FUNC_CNTL_2, tmp);
3749
3750 for (i = 0; i < rdev->usec_timeout; i++) {
3751 if (RREG32(SPLL_STATUS) & SPLL_CHG_STATUS)
3752 break;
3753 udelay(1);
3754 }
3755
3756 tmp = RREG32(CG_SPLL_FUNC_CNTL_2);
3757 tmp &= ~(SPLL_CTLREQ_CHG | SCLK_MUX_UPDATE);
3758 WREG32(CG_SPLL_FUNC_CNTL_2, tmp);
3759
3760 tmp = RREG32(MPLL_CNTL_MODE);
3761 tmp &= ~MPLL_MCLK_SEL;
3762 WREG32(MPLL_CNTL_MODE, tmp);
3763}
3764
3765static void si_spll_powerdown(struct radeon_device *rdev)
3766{
3767 u32 tmp;
3768
3769 tmp = RREG32(SPLL_CNTL_MODE);
3770 tmp |= SPLL_SW_DIR_CONTROL;
3771 WREG32(SPLL_CNTL_MODE, tmp);
3772
3773 tmp = RREG32(CG_SPLL_FUNC_CNTL);
3774 tmp |= SPLL_RESET;
3775 WREG32(CG_SPLL_FUNC_CNTL, tmp);
3776
3777 tmp = RREG32(CG_SPLL_FUNC_CNTL);
3778 tmp |= SPLL_SLEEP;
3779 WREG32(CG_SPLL_FUNC_CNTL, tmp);
3780
3781 tmp = RREG32(SPLL_CNTL_MODE);
3782 tmp &= ~SPLL_SW_DIR_CONTROL;
3783 WREG32(SPLL_CNTL_MODE, tmp);
3784}
3785
3786static void si_gpu_pci_config_reset(struct radeon_device *rdev)
3787{
3788 struct evergreen_mc_save save;
3789 u32 tmp, i;
3790
3791 dev_info(rdev->dev, "GPU pci config reset\n");
3792
3793 /* disable dpm? */
3794
3795 /* disable cg/pg */
3796 si_fini_pg(rdev);
3797 si_fini_cg(rdev);
3798
3799 /* Disable CP parsing/prefetching */
3800 WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT);
3801 /* dma0 */
3802 tmp = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
3803 tmp &= ~DMA_RB_ENABLE;
3804 WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, tmp);
3805 /* dma1 */
3806 tmp = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
3807 tmp &= ~DMA_RB_ENABLE;
3808 WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, tmp);
3809 /* XXX other engines? */
3810
3811 /* halt the rlc, disable cp internal ints */
3812 si_rlc_stop(rdev);
3813
3814 udelay(50);
3815
3816 /* disable mem access */
3817 evergreen_mc_stop(rdev, &save);
3818 if (evergreen_mc_wait_for_idle(rdev)) {
3819 dev_warn(rdev->dev, "Wait for MC idle timed out !\n");
3820 }
3821
3822 /* set mclk/sclk to bypass */
3823 si_set_clk_bypass_mode(rdev);
3824 /* powerdown spll */
3825 si_spll_powerdown(rdev);
3826 /* disable BM */
3827 pci_clear_master(rdev->pdev);
3828 /* reset */
3829 radeon_pci_config_reset(rdev);
3830 /* wait for asic to come out of reset */
3831 for (i = 0; i < rdev->usec_timeout; i++) {
3832 if (RREG32(CONFIG_MEMSIZE) != 0xffffffff)
3833 break;
3834 udelay(1);
3835 }
3836}
3837
Alex Deucherc476dde2012-03-20 17:18:12 -04003838int si_asic_reset(struct radeon_device *rdev)
3839{
Alex Deucher014bb202013-01-18 19:36:20 -05003840 u32 reset_mask;
3841
3842 reset_mask = si_gpu_check_soft_reset(rdev);
3843
3844 if (reset_mask)
3845 r600_set_bios_scratch_engine_hung(rdev, true);
3846
Alex Deucher4a5c8ea2013-11-15 16:35:55 -05003847 /* try soft reset */
Alex Deucher014bb202013-01-18 19:36:20 -05003848 si_gpu_soft_reset(rdev, reset_mask);
3849
3850 reset_mask = si_gpu_check_soft_reset(rdev);
3851
Alex Deucher4a5c8ea2013-11-15 16:35:55 -05003852 /* try pci config reset */
3853 if (reset_mask && radeon_hard_reset)
3854 si_gpu_pci_config_reset(rdev);
3855
3856 reset_mask = si_gpu_check_soft_reset(rdev);
3857
Alex Deucher014bb202013-01-18 19:36:20 -05003858 if (!reset_mask)
3859 r600_set_bios_scratch_engine_hung(rdev, false);
3860
3861 return 0;
Alex Deucherc476dde2012-03-20 17:18:12 -04003862}
3863
Alex Deucher123bc182013-01-24 11:37:19 -05003864/**
3865 * si_gfx_is_lockup - Check if the GFX engine is locked up
3866 *
3867 * @rdev: radeon_device pointer
3868 * @ring: radeon_ring structure holding ring information
3869 *
3870 * Check if the GFX engine is locked up.
3871 * Returns true if the engine appears to be locked up, false if not.
3872 */
3873bool si_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
3874{
3875 u32 reset_mask = si_gpu_check_soft_reset(rdev);
3876
3877 if (!(reset_mask & (RADEON_RESET_GFX |
3878 RADEON_RESET_COMPUTE |
3879 RADEON_RESET_CP))) {
Christian Königff212f22014-02-18 14:52:33 +01003880 radeon_ring_lockup_update(rdev, ring);
Alex Deucher123bc182013-01-24 11:37:19 -05003881 return false;
3882 }
Alex Deucher123bc182013-01-24 11:37:19 -05003883 return radeon_ring_test_lockup(rdev, ring);
3884}
3885
Alex Deucherd2800ee2012-03-20 17:18:13 -04003886/* MC */
3887static void si_mc_program(struct radeon_device *rdev)
3888{
3889 struct evergreen_mc_save save;
3890 u32 tmp;
3891 int i, j;
3892
3893 /* Initialize HDP */
3894 for (i = 0, j = 0; i < 32; i++, j += 0x18) {
3895 WREG32((0x2c14 + j), 0x00000000);
3896 WREG32((0x2c18 + j), 0x00000000);
3897 WREG32((0x2c1c + j), 0x00000000);
3898 WREG32((0x2c20 + j), 0x00000000);
3899 WREG32((0x2c24 + j), 0x00000000);
3900 }
3901 WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
3902
3903 evergreen_mc_stop(rdev, &save);
3904 if (radeon_mc_wait_for_idle(rdev)) {
3905 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
3906 }
Alex Deucher51535502012-08-30 14:34:30 -04003907 if (!ASIC_IS_NODCE(rdev))
3908 /* Lockout access through VGA aperture*/
3909 WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
Alex Deucherd2800ee2012-03-20 17:18:13 -04003910 /* Update configuration */
3911 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
3912 rdev->mc.vram_start >> 12);
3913 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
3914 rdev->mc.vram_end >> 12);
3915 WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
3916 rdev->vram_scratch.gpu_addr >> 12);
3917 tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
3918 tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
3919 WREG32(MC_VM_FB_LOCATION, tmp);
3920 /* XXX double check these! */
3921 WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
3922 WREG32(HDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
3923 WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
3924 WREG32(MC_VM_AGP_BASE, 0);
3925 WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
3926 WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
3927 if (radeon_mc_wait_for_idle(rdev)) {
3928 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
3929 }
3930 evergreen_mc_resume(rdev, &save);
Alex Deucher51535502012-08-30 14:34:30 -04003931 if (!ASIC_IS_NODCE(rdev)) {
3932 /* we need to own VRAM, so turn off the VGA renderer here
3933 * to stop it overwriting our objects */
3934 rv515_vga_render_disable(rdev);
3935 }
Alex Deucherd2800ee2012-03-20 17:18:13 -04003936}
3937
Alex Deucher1c491652013-04-09 12:45:26 -04003938void si_vram_gtt_location(struct radeon_device *rdev,
3939 struct radeon_mc *mc)
Alex Deucherd2800ee2012-03-20 17:18:13 -04003940{
3941 if (mc->mc_vram_size > 0xFFC0000000ULL) {
3942 /* leave room for at least 1024M GTT */
3943 dev_warn(rdev->dev, "limiting VRAM\n");
3944 mc->real_vram_size = 0xFFC0000000ULL;
3945 mc->mc_vram_size = 0xFFC0000000ULL;
3946 }
Alex Deucher9ed8b1f2013-04-08 11:13:01 -04003947 radeon_vram_location(rdev, &rdev->mc, 0);
Alex Deucherd2800ee2012-03-20 17:18:13 -04003948 rdev->mc.gtt_base_align = 0;
Alex Deucher9ed8b1f2013-04-08 11:13:01 -04003949 radeon_gtt_location(rdev, mc);
Alex Deucherd2800ee2012-03-20 17:18:13 -04003950}
3951
3952static int si_mc_init(struct radeon_device *rdev)
3953{
3954 u32 tmp;
3955 int chansize, numchan;
3956
3957 /* Get VRAM informations */
3958 rdev->mc.vram_is_ddr = true;
3959 tmp = RREG32(MC_ARB_RAMCFG);
3960 if (tmp & CHANSIZE_OVERRIDE) {
3961 chansize = 16;
3962 } else if (tmp & CHANSIZE_MASK) {
3963 chansize = 64;
3964 } else {
3965 chansize = 32;
3966 }
3967 tmp = RREG32(MC_SHARED_CHMAP);
3968 switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
3969 case 0:
3970 default:
3971 numchan = 1;
3972 break;
3973 case 1:
3974 numchan = 2;
3975 break;
3976 case 2:
3977 numchan = 4;
3978 break;
3979 case 3:
3980 numchan = 8;
3981 break;
3982 case 4:
3983 numchan = 3;
3984 break;
3985 case 5:
3986 numchan = 6;
3987 break;
3988 case 6:
3989 numchan = 10;
3990 break;
3991 case 7:
3992 numchan = 12;
3993 break;
3994 case 8:
3995 numchan = 16;
3996 break;
3997 }
3998 rdev->mc.vram_width = numchan * chansize;
3999 /* Could aper size report 0 ? */
4000 rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
4001 rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
4002 /* size in MB on si */
Alex Deucher0ca223b2013-12-03 09:24:30 -05004003 tmp = RREG32(CONFIG_MEMSIZE);
4004 /* some boards may have garbage in the upper 16 bits */
4005 if (tmp & 0xffff0000) {
4006 DRM_INFO("Probable bad vram size: 0x%08x\n", tmp);
4007 if (tmp & 0xffff)
4008 tmp &= 0xffff;
4009 }
4010 rdev->mc.mc_vram_size = tmp * 1024ULL * 1024ULL;
4011 rdev->mc.real_vram_size = rdev->mc.mc_vram_size;
Alex Deucherd2800ee2012-03-20 17:18:13 -04004012 rdev->mc.visible_vram_size = rdev->mc.aper_size;
4013 si_vram_gtt_location(rdev, &rdev->mc);
4014 radeon_update_bandwidth_info(rdev);
4015
4016 return 0;
4017}
4018
4019/*
4020 * GART
4021 */
4022void si_pcie_gart_tlb_flush(struct radeon_device *rdev)
4023{
4024 /* flush hdp cache */
4025 WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
4026
4027 /* bits 0-15 are the VM contexts0-15 */
4028 WREG32(VM_INVALIDATE_REQUEST, 1);
4029}
4030
Lauri Kasanen1109ca02012-08-31 13:43:50 -04004031static int si_pcie_gart_enable(struct radeon_device *rdev)
Alex Deucherd2800ee2012-03-20 17:18:13 -04004032{
4033 int r, i;
4034
4035 if (rdev->gart.robj == NULL) {
4036 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
4037 return -EINVAL;
4038 }
4039 r = radeon_gart_table_vram_pin(rdev);
4040 if (r)
4041 return r;
4042 radeon_gart_restore(rdev);
4043 /* Setup TLB control */
4044 WREG32(MC_VM_MX_L1_TLB_CNTL,
4045 (0xA << 7) |
4046 ENABLE_L1_TLB |
Christian Königec3dbbc2014-05-10 12:17:55 +02004047 ENABLE_L1_FRAGMENT_PROCESSING |
Alex Deucherd2800ee2012-03-20 17:18:13 -04004048 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
4049 ENABLE_ADVANCED_DRIVER_MODEL |
4050 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
4051 /* Setup L2 cache */
4052 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE |
Christian Königec3dbbc2014-05-10 12:17:55 +02004053 ENABLE_L2_FRAGMENT_PROCESSING |
Alex Deucherd2800ee2012-03-20 17:18:13 -04004054 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
4055 ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
4056 EFFECTIVE_L2_QUEUE_SIZE(7) |
4057 CONTEXT1_IDENTITY_ACCESS_MODE(1));
4058 WREG32(VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS | INVALIDATE_L2_CACHE);
4059 WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
Christian Königec3dbbc2014-05-10 12:17:55 +02004060 BANK_SELECT(4) |
4061 L2_CACHE_BIGK_FRAGMENT_SIZE(4));
Alex Deucherd2800ee2012-03-20 17:18:13 -04004062 /* setup context0 */
4063 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
4064 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
4065 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
4066 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
4067 (u32)(rdev->dummy_page.addr >> 12));
4068 WREG32(VM_CONTEXT0_CNTL2, 0);
4069 WREG32(VM_CONTEXT0_CNTL, (ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
4070 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT));
4071
4072 WREG32(0x15D4, 0);
4073 WREG32(0x15D8, 0);
4074 WREG32(0x15DC, 0);
4075
4076 /* empty context1-15 */
Alex Deucherd2800ee2012-03-20 17:18:13 -04004077 /* set vm size, must be a multiple of 4 */
4078 WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
Alex Deucherc21b3282012-06-28 17:53:07 -04004079 WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn);
Alex Deucher23d4f1f2012-10-08 09:45:46 -04004080 /* Assign the pt base to something valid for now; the pts used for
4081 * the VMs are determined by the application and setup and assigned
4082 * on the fly in the vm part of radeon_gart.c
4083 */
Alex Deucherd2800ee2012-03-20 17:18:13 -04004084 for (i = 1; i < 16; i++) {
4085 if (i < 8)
4086 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
4087 rdev->gart.table_addr >> 12);
4088 else
4089 WREG32(VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((i - 8) << 2),
4090 rdev->gart.table_addr >> 12);
4091 }
4092
4093 /* enable context1-15 */
4094 WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
4095 (u32)(rdev->dummy_page.addr >> 12));
Christian Königae133a12012-09-18 15:30:44 -04004096 WREG32(VM_CONTEXT1_CNTL2, 4);
Dmitry Cherkasovfa87e622012-09-17 19:36:19 +02004097 WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) |
Christian Königae133a12012-09-18 15:30:44 -04004098 RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
4099 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT |
4100 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
4101 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT |
4102 PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT |
4103 PDE0_PROTECTION_FAULT_ENABLE_DEFAULT |
4104 VALID_PROTECTION_FAULT_ENABLE_INTERRUPT |
4105 VALID_PROTECTION_FAULT_ENABLE_DEFAULT |
4106 READ_PROTECTION_FAULT_ENABLE_INTERRUPT |
4107 READ_PROTECTION_FAULT_ENABLE_DEFAULT |
4108 WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT |
4109 WRITE_PROTECTION_FAULT_ENABLE_DEFAULT);
Alex Deucherd2800ee2012-03-20 17:18:13 -04004110
4111 si_pcie_gart_tlb_flush(rdev);
4112 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
4113 (unsigned)(rdev->mc.gtt_size >> 20),
4114 (unsigned long long)rdev->gart.table_addr);
4115 rdev->gart.ready = true;
4116 return 0;
4117}
4118
Lauri Kasanen1109ca02012-08-31 13:43:50 -04004119static void si_pcie_gart_disable(struct radeon_device *rdev)
Alex Deucherd2800ee2012-03-20 17:18:13 -04004120{
4121 /* Disable all tables */
4122 WREG32(VM_CONTEXT0_CNTL, 0);
4123 WREG32(VM_CONTEXT1_CNTL, 0);
4124 /* Setup TLB control */
4125 WREG32(MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE_NOT_IN_SYS |
4126 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
4127 /* Setup L2 cache */
4128 WREG32(VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
4129 ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
4130 EFFECTIVE_L2_QUEUE_SIZE(7) |
4131 CONTEXT1_IDENTITY_ACCESS_MODE(1));
4132 WREG32(VM_L2_CNTL2, 0);
4133 WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
4134 L2_CACHE_BIGK_FRAGMENT_SIZE(0));
4135 radeon_gart_table_vram_unpin(rdev);
4136}
4137
Lauri Kasanen1109ca02012-08-31 13:43:50 -04004138static void si_pcie_gart_fini(struct radeon_device *rdev)
Alex Deucherd2800ee2012-03-20 17:18:13 -04004139{
4140 si_pcie_gart_disable(rdev);
4141 radeon_gart_table_vram_free(rdev);
4142 radeon_gart_fini(rdev);
4143}
4144
Alex Deucher498dd8b2012-03-20 17:18:15 -04004145/* vm parser */
4146static bool si_vm_reg_valid(u32 reg)
4147{
4148 /* context regs are fine */
4149 if (reg >= 0x28000)
4150 return true;
4151
4152 /* check config regs */
4153 switch (reg) {
4154 case GRBM_GFX_INDEX:
Alex Deucherf418b882012-11-08 10:13:24 -05004155 case CP_STRMOUT_CNTL:
Alex Deucher498dd8b2012-03-20 17:18:15 -04004156 case VGT_VTX_VECT_EJECT_REG:
4157 case VGT_CACHE_INVALIDATION:
4158 case VGT_ESGS_RING_SIZE:
4159 case VGT_GSVS_RING_SIZE:
4160 case VGT_GS_VERTEX_REUSE:
4161 case VGT_PRIMITIVE_TYPE:
4162 case VGT_INDEX_TYPE:
4163 case VGT_NUM_INDICES:
4164 case VGT_NUM_INSTANCES:
4165 case VGT_TF_RING_SIZE:
4166 case VGT_HS_OFFCHIP_PARAM:
4167 case VGT_TF_MEMORY_BASE:
4168 case PA_CL_ENHANCE:
4169 case PA_SU_LINE_STIPPLE_VALUE:
4170 case PA_SC_LINE_STIPPLE_STATE:
4171 case PA_SC_ENHANCE:
4172 case SQC_CACHES:
4173 case SPI_STATIC_THREAD_MGMT_1:
4174 case SPI_STATIC_THREAD_MGMT_2:
4175 case SPI_STATIC_THREAD_MGMT_3:
4176 case SPI_PS_MAX_WAVE_ID:
4177 case SPI_CONFIG_CNTL:
4178 case SPI_CONFIG_CNTL_1:
4179 case TA_CNTL_AUX:
4180 return true;
4181 default:
4182 DRM_ERROR("Invalid register 0x%x in CS\n", reg);
4183 return false;
4184 }
4185}
4186
4187static int si_vm_packet3_ce_check(struct radeon_device *rdev,
4188 u32 *ib, struct radeon_cs_packet *pkt)
4189{
4190 switch (pkt->opcode) {
4191 case PACKET3_NOP:
4192 case PACKET3_SET_BASE:
4193 case PACKET3_SET_CE_DE_COUNTERS:
4194 case PACKET3_LOAD_CONST_RAM:
4195 case PACKET3_WRITE_CONST_RAM:
4196 case PACKET3_WRITE_CONST_RAM_OFFSET:
4197 case PACKET3_DUMP_CONST_RAM:
4198 case PACKET3_INCREMENT_CE_COUNTER:
4199 case PACKET3_WAIT_ON_DE_COUNTER:
4200 case PACKET3_CE_WRITE:
4201 break;
4202 default:
4203 DRM_ERROR("Invalid CE packet3: 0x%x\n", pkt->opcode);
4204 return -EINVAL;
4205 }
4206 return 0;
4207}
4208
Tom Stellarde5b9e752013-08-16 17:47:39 -04004209static int si_vm_packet3_cp_dma_check(u32 *ib, u32 idx)
4210{
4211 u32 start_reg, reg, i;
4212 u32 command = ib[idx + 4];
4213 u32 info = ib[idx + 1];
4214 u32 idx_value = ib[idx];
4215 if (command & PACKET3_CP_DMA_CMD_SAS) {
4216 /* src address space is register */
4217 if (((info & 0x60000000) >> 29) == 0) {
4218 start_reg = idx_value << 2;
4219 if (command & PACKET3_CP_DMA_CMD_SAIC) {
4220 reg = start_reg;
4221 if (!si_vm_reg_valid(reg)) {
4222 DRM_ERROR("CP DMA Bad SRC register\n");
4223 return -EINVAL;
4224 }
4225 } else {
4226 for (i = 0; i < (command & 0x1fffff); i++) {
4227 reg = start_reg + (4 * i);
4228 if (!si_vm_reg_valid(reg)) {
4229 DRM_ERROR("CP DMA Bad SRC register\n");
4230 return -EINVAL;
4231 }
4232 }
4233 }
4234 }
4235 }
4236 if (command & PACKET3_CP_DMA_CMD_DAS) {
4237 /* dst address space is register */
4238 if (((info & 0x00300000) >> 20) == 0) {
4239 start_reg = ib[idx + 2];
4240 if (command & PACKET3_CP_DMA_CMD_DAIC) {
4241 reg = start_reg;
4242 if (!si_vm_reg_valid(reg)) {
4243 DRM_ERROR("CP DMA Bad DST register\n");
4244 return -EINVAL;
4245 }
4246 } else {
4247 for (i = 0; i < (command & 0x1fffff); i++) {
4248 reg = start_reg + (4 * i);
4249 if (!si_vm_reg_valid(reg)) {
4250 DRM_ERROR("CP DMA Bad DST register\n");
4251 return -EINVAL;
4252 }
4253 }
4254 }
4255 }
4256 }
4257 return 0;
4258}
4259
Alex Deucher498dd8b2012-03-20 17:18:15 -04004260static int si_vm_packet3_gfx_check(struct radeon_device *rdev,
4261 u32 *ib, struct radeon_cs_packet *pkt)
4262{
Tom Stellarde5b9e752013-08-16 17:47:39 -04004263 int r;
Alex Deucher498dd8b2012-03-20 17:18:15 -04004264 u32 idx = pkt->idx + 1;
4265 u32 idx_value = ib[idx];
4266 u32 start_reg, end_reg, reg, i;
4267
4268 switch (pkt->opcode) {
4269 case PACKET3_NOP:
4270 case PACKET3_SET_BASE:
4271 case PACKET3_CLEAR_STATE:
4272 case PACKET3_INDEX_BUFFER_SIZE:
4273 case PACKET3_DISPATCH_DIRECT:
4274 case PACKET3_DISPATCH_INDIRECT:
4275 case PACKET3_ALLOC_GDS:
4276 case PACKET3_WRITE_GDS_RAM:
4277 case PACKET3_ATOMIC_GDS:
4278 case PACKET3_ATOMIC:
4279 case PACKET3_OCCLUSION_QUERY:
4280 case PACKET3_SET_PREDICATION:
4281 case PACKET3_COND_EXEC:
4282 case PACKET3_PRED_EXEC:
4283 case PACKET3_DRAW_INDIRECT:
4284 case PACKET3_DRAW_INDEX_INDIRECT:
4285 case PACKET3_INDEX_BASE:
4286 case PACKET3_DRAW_INDEX_2:
4287 case PACKET3_CONTEXT_CONTROL:
4288 case PACKET3_INDEX_TYPE:
4289 case PACKET3_DRAW_INDIRECT_MULTI:
4290 case PACKET3_DRAW_INDEX_AUTO:
4291 case PACKET3_DRAW_INDEX_IMMD:
4292 case PACKET3_NUM_INSTANCES:
4293 case PACKET3_DRAW_INDEX_MULTI_AUTO:
4294 case PACKET3_STRMOUT_BUFFER_UPDATE:
4295 case PACKET3_DRAW_INDEX_OFFSET_2:
4296 case PACKET3_DRAW_INDEX_MULTI_ELEMENT:
4297 case PACKET3_DRAW_INDEX_INDIRECT_MULTI:
4298 case PACKET3_MPEG_INDEX:
4299 case PACKET3_WAIT_REG_MEM:
4300 case PACKET3_MEM_WRITE:
4301 case PACKET3_PFP_SYNC_ME:
4302 case PACKET3_SURFACE_SYNC:
4303 case PACKET3_EVENT_WRITE:
4304 case PACKET3_EVENT_WRITE_EOP:
4305 case PACKET3_EVENT_WRITE_EOS:
4306 case PACKET3_SET_CONTEXT_REG:
4307 case PACKET3_SET_CONTEXT_REG_INDIRECT:
4308 case PACKET3_SET_SH_REG:
4309 case PACKET3_SET_SH_REG_OFFSET:
4310 case PACKET3_INCREMENT_DE_COUNTER:
4311 case PACKET3_WAIT_ON_CE_COUNTER:
4312 case PACKET3_WAIT_ON_AVAIL_BUFFER:
4313 case PACKET3_ME_WRITE:
4314 break;
4315 case PACKET3_COPY_DATA:
4316 if ((idx_value & 0xf00) == 0) {
4317 reg = ib[idx + 3] * 4;
4318 if (!si_vm_reg_valid(reg))
4319 return -EINVAL;
4320 }
4321 break;
4322 case PACKET3_WRITE_DATA:
4323 if ((idx_value & 0xf00) == 0) {
4324 start_reg = ib[idx + 1] * 4;
4325 if (idx_value & 0x10000) {
4326 if (!si_vm_reg_valid(start_reg))
4327 return -EINVAL;
4328 } else {
4329 for (i = 0; i < (pkt->count - 2); i++) {
4330 reg = start_reg + (4 * i);
4331 if (!si_vm_reg_valid(reg))
4332 return -EINVAL;
4333 }
4334 }
4335 }
4336 break;
4337 case PACKET3_COND_WRITE:
4338 if (idx_value & 0x100) {
4339 reg = ib[idx + 5] * 4;
4340 if (!si_vm_reg_valid(reg))
4341 return -EINVAL;
4342 }
4343 break;
4344 case PACKET3_COPY_DW:
4345 if (idx_value & 0x2) {
4346 reg = ib[idx + 3] * 4;
4347 if (!si_vm_reg_valid(reg))
4348 return -EINVAL;
4349 }
4350 break;
4351 case PACKET3_SET_CONFIG_REG:
4352 start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_START;
4353 end_reg = 4 * pkt->count + start_reg - 4;
4354 if ((start_reg < PACKET3_SET_CONFIG_REG_START) ||
4355 (start_reg >= PACKET3_SET_CONFIG_REG_END) ||
4356 (end_reg >= PACKET3_SET_CONFIG_REG_END)) {
4357 DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
4358 return -EINVAL;
4359 }
4360 for (i = 0; i < pkt->count; i++) {
4361 reg = start_reg + (4 * i);
4362 if (!si_vm_reg_valid(reg))
4363 return -EINVAL;
4364 }
4365 break;
Alex Deucher5aa709b2012-12-03 19:42:37 -05004366 case PACKET3_CP_DMA:
Tom Stellarde5b9e752013-08-16 17:47:39 -04004367 r = si_vm_packet3_cp_dma_check(ib, idx);
4368 if (r)
4369 return r;
Alex Deucher5aa709b2012-12-03 19:42:37 -05004370 break;
Alex Deucher498dd8b2012-03-20 17:18:15 -04004371 default:
4372 DRM_ERROR("Invalid GFX packet3: 0x%x\n", pkt->opcode);
4373 return -EINVAL;
4374 }
4375 return 0;
4376}
4377
4378static int si_vm_packet3_compute_check(struct radeon_device *rdev,
4379 u32 *ib, struct radeon_cs_packet *pkt)
4380{
Tom Stellarde5b9e752013-08-16 17:47:39 -04004381 int r;
Alex Deucher498dd8b2012-03-20 17:18:15 -04004382 u32 idx = pkt->idx + 1;
4383 u32 idx_value = ib[idx];
4384 u32 start_reg, reg, i;
4385
4386 switch (pkt->opcode) {
4387 case PACKET3_NOP:
4388 case PACKET3_SET_BASE:
4389 case PACKET3_CLEAR_STATE:
4390 case PACKET3_DISPATCH_DIRECT:
4391 case PACKET3_DISPATCH_INDIRECT:
4392 case PACKET3_ALLOC_GDS:
4393 case PACKET3_WRITE_GDS_RAM:
4394 case PACKET3_ATOMIC_GDS:
4395 case PACKET3_ATOMIC:
4396 case PACKET3_OCCLUSION_QUERY:
4397 case PACKET3_SET_PREDICATION:
4398 case PACKET3_COND_EXEC:
4399 case PACKET3_PRED_EXEC:
4400 case PACKET3_CONTEXT_CONTROL:
4401 case PACKET3_STRMOUT_BUFFER_UPDATE:
4402 case PACKET3_WAIT_REG_MEM:
4403 case PACKET3_MEM_WRITE:
4404 case PACKET3_PFP_SYNC_ME:
4405 case PACKET3_SURFACE_SYNC:
4406 case PACKET3_EVENT_WRITE:
4407 case PACKET3_EVENT_WRITE_EOP:
4408 case PACKET3_EVENT_WRITE_EOS:
4409 case PACKET3_SET_CONTEXT_REG:
4410 case PACKET3_SET_CONTEXT_REG_INDIRECT:
4411 case PACKET3_SET_SH_REG:
4412 case PACKET3_SET_SH_REG_OFFSET:
4413 case PACKET3_INCREMENT_DE_COUNTER:
4414 case PACKET3_WAIT_ON_CE_COUNTER:
4415 case PACKET3_WAIT_ON_AVAIL_BUFFER:
4416 case PACKET3_ME_WRITE:
4417 break;
4418 case PACKET3_COPY_DATA:
4419 if ((idx_value & 0xf00) == 0) {
4420 reg = ib[idx + 3] * 4;
4421 if (!si_vm_reg_valid(reg))
4422 return -EINVAL;
4423 }
4424 break;
4425 case PACKET3_WRITE_DATA:
4426 if ((idx_value & 0xf00) == 0) {
4427 start_reg = ib[idx + 1] * 4;
4428 if (idx_value & 0x10000) {
4429 if (!si_vm_reg_valid(start_reg))
4430 return -EINVAL;
4431 } else {
4432 for (i = 0; i < (pkt->count - 2); i++) {
4433 reg = start_reg + (4 * i);
4434 if (!si_vm_reg_valid(reg))
4435 return -EINVAL;
4436 }
4437 }
4438 }
4439 break;
4440 case PACKET3_COND_WRITE:
4441 if (idx_value & 0x100) {
4442 reg = ib[idx + 5] * 4;
4443 if (!si_vm_reg_valid(reg))
4444 return -EINVAL;
4445 }
4446 break;
4447 case PACKET3_COPY_DW:
4448 if (idx_value & 0x2) {
4449 reg = ib[idx + 3] * 4;
4450 if (!si_vm_reg_valid(reg))
4451 return -EINVAL;
4452 }
4453 break;
Tom Stellarde5b9e752013-08-16 17:47:39 -04004454 case PACKET3_CP_DMA:
4455 r = si_vm_packet3_cp_dma_check(ib, idx);
4456 if (r)
4457 return r;
4458 break;
Alex Deucher498dd8b2012-03-20 17:18:15 -04004459 default:
4460 DRM_ERROR("Invalid Compute packet3: 0x%x\n", pkt->opcode);
4461 return -EINVAL;
4462 }
4463 return 0;
4464}
4465
4466int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
4467{
4468 int ret = 0;
4469 u32 idx = 0;
4470 struct radeon_cs_packet pkt;
4471
4472 do {
4473 pkt.idx = idx;
Ilija Hadzic4e872ae2013-01-02 18:27:48 -05004474 pkt.type = RADEON_CP_PACKET_GET_TYPE(ib->ptr[idx]);
4475 pkt.count = RADEON_CP_PACKET_GET_COUNT(ib->ptr[idx]);
Alex Deucher498dd8b2012-03-20 17:18:15 -04004476 pkt.one_reg_wr = 0;
4477 switch (pkt.type) {
Ilija Hadzic4e872ae2013-01-02 18:27:48 -05004478 case RADEON_PACKET_TYPE0:
Alex Deucher498dd8b2012-03-20 17:18:15 -04004479 dev_err(rdev->dev, "Packet0 not allowed!\n");
4480 ret = -EINVAL;
4481 break;
Ilija Hadzic4e872ae2013-01-02 18:27:48 -05004482 case RADEON_PACKET_TYPE2:
Alex Deucher498dd8b2012-03-20 17:18:15 -04004483 idx += 1;
4484 break;
Ilija Hadzic4e872ae2013-01-02 18:27:48 -05004485 case RADEON_PACKET_TYPE3:
4486 pkt.opcode = RADEON_CP_PACKET3_GET_OPCODE(ib->ptr[idx]);
Alex Deucher498dd8b2012-03-20 17:18:15 -04004487 if (ib->is_const_ib)
4488 ret = si_vm_packet3_ce_check(rdev, ib->ptr, &pkt);
4489 else {
Christian König876dc9f2012-05-08 14:24:01 +02004490 switch (ib->ring) {
Alex Deucher498dd8b2012-03-20 17:18:15 -04004491 case RADEON_RING_TYPE_GFX_INDEX:
4492 ret = si_vm_packet3_gfx_check(rdev, ib->ptr, &pkt);
4493 break;
4494 case CAYMAN_RING_TYPE_CP1_INDEX:
4495 case CAYMAN_RING_TYPE_CP2_INDEX:
4496 ret = si_vm_packet3_compute_check(rdev, ib->ptr, &pkt);
4497 break;
4498 default:
Christian König876dc9f2012-05-08 14:24:01 +02004499 dev_err(rdev->dev, "Non-PM4 ring %d !\n", ib->ring);
Alex Deucher498dd8b2012-03-20 17:18:15 -04004500 ret = -EINVAL;
4501 break;
4502 }
4503 }
4504 idx += pkt.count + 2;
4505 break;
4506 default:
4507 dev_err(rdev->dev, "Unknown packet type %d !\n", pkt.type);
4508 ret = -EINVAL;
4509 break;
4510 }
4511 if (ret)
4512 break;
4513 } while (idx < ib->length_dw);
4514
4515 return ret;
4516}
4517
Alex Deucherd2800ee2012-03-20 17:18:13 -04004518/*
4519 * vm
4520 */
4521int si_vm_init(struct radeon_device *rdev)
4522{
4523 /* number of VMs */
4524 rdev->vm_manager.nvm = 16;
4525 /* base offset of vram pages */
4526 rdev->vm_manager.vram_base_offset = 0;
4527
4528 return 0;
4529}
4530
4531void si_vm_fini(struct radeon_device *rdev)
4532{
4533}
4534
Alex Deucher82ffd922012-10-02 14:47:46 -04004535/**
Alex Deucherfbf6dc72013-06-13 18:47:58 -04004536 * si_vm_decode_fault - print human readable fault info
4537 *
4538 * @rdev: radeon_device pointer
4539 * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value
4540 * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value
4541 *
4542 * Print human readable fault information (SI).
4543 */
4544static void si_vm_decode_fault(struct radeon_device *rdev,
4545 u32 status, u32 addr)
4546{
4547 u32 mc_id = (status & MEMORY_CLIENT_ID_MASK) >> MEMORY_CLIENT_ID_SHIFT;
4548 u32 vmid = (status & FAULT_VMID_MASK) >> FAULT_VMID_SHIFT;
4549 u32 protections = (status & PROTECTIONS_MASK) >> PROTECTIONS_SHIFT;
4550 char *block;
4551
4552 if (rdev->family == CHIP_TAHITI) {
4553 switch (mc_id) {
4554 case 160:
4555 case 144:
4556 case 96:
4557 case 80:
4558 case 224:
4559 case 208:
4560 case 32:
4561 case 16:
4562 block = "CB";
4563 break;
4564 case 161:
4565 case 145:
4566 case 97:
4567 case 81:
4568 case 225:
4569 case 209:
4570 case 33:
4571 case 17:
4572 block = "CB_FMASK";
4573 break;
4574 case 162:
4575 case 146:
4576 case 98:
4577 case 82:
4578 case 226:
4579 case 210:
4580 case 34:
4581 case 18:
4582 block = "CB_CMASK";
4583 break;
4584 case 163:
4585 case 147:
4586 case 99:
4587 case 83:
4588 case 227:
4589 case 211:
4590 case 35:
4591 case 19:
4592 block = "CB_IMMED";
4593 break;
4594 case 164:
4595 case 148:
4596 case 100:
4597 case 84:
4598 case 228:
4599 case 212:
4600 case 36:
4601 case 20:
4602 block = "DB";
4603 break;
4604 case 165:
4605 case 149:
4606 case 101:
4607 case 85:
4608 case 229:
4609 case 213:
4610 case 37:
4611 case 21:
4612 block = "DB_HTILE";
4613 break;
4614 case 167:
4615 case 151:
4616 case 103:
4617 case 87:
4618 case 231:
4619 case 215:
4620 case 39:
4621 case 23:
4622 block = "DB_STEN";
4623 break;
4624 case 72:
4625 case 68:
4626 case 64:
4627 case 8:
4628 case 4:
4629 case 0:
4630 case 136:
4631 case 132:
4632 case 128:
4633 case 200:
4634 case 196:
4635 case 192:
4636 block = "TC";
4637 break;
4638 case 112:
4639 case 48:
4640 block = "CP";
4641 break;
4642 case 49:
4643 case 177:
4644 case 50:
4645 case 178:
4646 block = "SH";
4647 break;
4648 case 53:
4649 case 190:
4650 block = "VGT";
4651 break;
4652 case 117:
4653 block = "IH";
4654 break;
4655 case 51:
4656 case 115:
4657 block = "RLC";
4658 break;
4659 case 119:
4660 case 183:
4661 block = "DMA0";
4662 break;
4663 case 61:
4664 block = "DMA1";
4665 break;
4666 case 248:
4667 case 120:
4668 block = "HDP";
4669 break;
4670 default:
4671 block = "unknown";
4672 break;
4673 }
4674 } else {
4675 switch (mc_id) {
4676 case 32:
4677 case 16:
4678 case 96:
4679 case 80:
4680 case 160:
4681 case 144:
4682 case 224:
4683 case 208:
4684 block = "CB";
4685 break;
4686 case 33:
4687 case 17:
4688 case 97:
4689 case 81:
4690 case 161:
4691 case 145:
4692 case 225:
4693 case 209:
4694 block = "CB_FMASK";
4695 break;
4696 case 34:
4697 case 18:
4698 case 98:
4699 case 82:
4700 case 162:
4701 case 146:
4702 case 226:
4703 case 210:
4704 block = "CB_CMASK";
4705 break;
4706 case 35:
4707 case 19:
4708 case 99:
4709 case 83:
4710 case 163:
4711 case 147:
4712 case 227:
4713 case 211:
4714 block = "CB_IMMED";
4715 break;
4716 case 36:
4717 case 20:
4718 case 100:
4719 case 84:
4720 case 164:
4721 case 148:
4722 case 228:
4723 case 212:
4724 block = "DB";
4725 break;
4726 case 37:
4727 case 21:
4728 case 101:
4729 case 85:
4730 case 165:
4731 case 149:
4732 case 229:
4733 case 213:
4734 block = "DB_HTILE";
4735 break;
4736 case 39:
4737 case 23:
4738 case 103:
4739 case 87:
4740 case 167:
4741 case 151:
4742 case 231:
4743 case 215:
4744 block = "DB_STEN";
4745 break;
4746 case 72:
4747 case 68:
4748 case 8:
4749 case 4:
4750 case 136:
4751 case 132:
4752 case 200:
4753 case 196:
4754 block = "TC";
4755 break;
4756 case 112:
4757 case 48:
4758 block = "CP";
4759 break;
4760 case 49:
4761 case 177:
4762 case 50:
4763 case 178:
4764 block = "SH";
4765 break;
4766 case 53:
4767 block = "VGT";
4768 break;
4769 case 117:
4770 block = "IH";
4771 break;
4772 case 51:
4773 case 115:
4774 block = "RLC";
4775 break;
4776 case 119:
4777 case 183:
4778 block = "DMA0";
4779 break;
4780 case 61:
4781 block = "DMA1";
4782 break;
4783 case 248:
4784 case 120:
4785 block = "HDP";
4786 break;
4787 default:
4788 block = "unknown";
4789 break;
4790 }
4791 }
4792
4793 printk("VM fault (0x%02x, vmid %d) at page %u, %s from %s (%d)\n",
4794 protections, vmid, addr,
4795 (status & MEMORY_CLIENT_RW_MASK) ? "write" : "read",
4796 block, mc_id);
4797}
4798
Alex Deucher498522b2012-10-02 14:43:38 -04004799void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
Alex Deucherd2800ee2012-03-20 17:18:13 -04004800{
Alex Deucher498522b2012-10-02 14:43:38 -04004801 struct radeon_ring *ring = &rdev->ring[ridx];
Alex Deucherd2800ee2012-03-20 17:18:13 -04004802
Christian Königee60e292012-08-09 16:21:08 +02004803 if (vm == NULL)
Alex Deucherd2800ee2012-03-20 17:18:13 -04004804 return;
4805
Alex Deucher76c44f22012-10-02 14:39:18 -04004806 /* write new base address */
4807 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4808 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
4809 WRITE_DATA_DST_SEL(0)));
4810
Christian Königee60e292012-08-09 16:21:08 +02004811 if (vm->id < 8) {
Alex Deucher76c44f22012-10-02 14:39:18 -04004812 radeon_ring_write(ring,
4813 (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2);
Christian Königee60e292012-08-09 16:21:08 +02004814 } else {
Alex Deucher76c44f22012-10-02 14:39:18 -04004815 radeon_ring_write(ring,
4816 (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2)) >> 2);
Christian Königee60e292012-08-09 16:21:08 +02004817 }
Alex Deucher76c44f22012-10-02 14:39:18 -04004818 radeon_ring_write(ring, 0);
Dmitry Cherkasovfa87e622012-09-17 19:36:19 +02004819 radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
Christian Königee60e292012-08-09 16:21:08 +02004820
Alex Deucherd2800ee2012-03-20 17:18:13 -04004821 /* flush hdp cache */
Alex Deucher76c44f22012-10-02 14:39:18 -04004822 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4823 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
4824 WRITE_DATA_DST_SEL(0)));
4825 radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2);
4826 radeon_ring_write(ring, 0);
Christian Königee60e292012-08-09 16:21:08 +02004827 radeon_ring_write(ring, 0x1);
4828
Alex Deucherd2800ee2012-03-20 17:18:13 -04004829 /* bits 0-15 are the VM contexts0-15 */
Alex Deucher76c44f22012-10-02 14:39:18 -04004830 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4831 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
4832 WRITE_DATA_DST_SEL(0)));
4833 radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
4834 radeon_ring_write(ring, 0);
Alex Deucher498522b2012-10-02 14:43:38 -04004835 radeon_ring_write(ring, 1 << vm->id);
Christian König58f8cf52012-10-22 17:42:35 +02004836
4837 /* sync PFP to ME, otherwise we might get invalid PFP reads */
4838 radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
4839 radeon_ring_write(ring, 0x0);
Alex Deucherd2800ee2012-03-20 17:18:13 -04004840}
4841
Alex Deucher347e7592012-03-20 17:18:21 -04004842/*
Alex Deucherf8f84ac2013-03-07 12:56:35 -05004843 * Power and clock gating
4844 */
4845static void si_wait_for_rlc_serdes(struct radeon_device *rdev)
4846{
4847 int i;
4848
4849 for (i = 0; i < rdev->usec_timeout; i++) {
4850 if (RREG32(RLC_SERDES_MASTER_BUSY_0) == 0)
4851 break;
4852 udelay(1);
4853 }
4854
4855 for (i = 0; i < rdev->usec_timeout; i++) {
4856 if (RREG32(RLC_SERDES_MASTER_BUSY_1) == 0)
4857 break;
4858 udelay(1);
4859 }
4860}
4861
4862static void si_enable_gui_idle_interrupt(struct radeon_device *rdev,
4863 bool enable)
4864{
4865 u32 tmp = RREG32(CP_INT_CNTL_RING0);
4866 u32 mask;
4867 int i;
4868
4869 if (enable)
4870 tmp |= (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
4871 else
4872 tmp &= ~(CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
4873 WREG32(CP_INT_CNTL_RING0, tmp);
4874
4875 if (!enable) {
4876 /* read a gfx register */
4877 tmp = RREG32(DB_DEPTH_INFO);
4878
4879 mask = RLC_BUSY_STATUS | GFX_POWER_STATUS | GFX_CLOCK_STATUS | GFX_LS_STATUS;
4880 for (i = 0; i < rdev->usec_timeout; i++) {
4881 if ((RREG32(RLC_STAT) & mask) == (GFX_CLOCK_STATUS | GFX_POWER_STATUS))
4882 break;
4883 udelay(1);
4884 }
4885 }
4886}
4887
4888static void si_set_uvd_dcm(struct radeon_device *rdev,
4889 bool sw_mode)
4890{
4891 u32 tmp, tmp2;
4892
4893 tmp = RREG32(UVD_CGC_CTRL);
4894 tmp &= ~(CLK_OD_MASK | CG_DT_MASK);
4895 tmp |= DCM | CG_DT(1) | CLK_OD(4);
4896
4897 if (sw_mode) {
4898 tmp &= ~0x7ffff800;
4899 tmp2 = DYN_OR_EN | DYN_RR_EN | G_DIV_ID(7);
4900 } else {
4901 tmp |= 0x7ffff800;
4902 tmp2 = 0;
4903 }
4904
4905 WREG32(UVD_CGC_CTRL, tmp);
4906 WREG32_UVD_CTX(UVD_CGC_CTRL2, tmp2);
4907}
4908
Alex Deucher22c775c2013-07-23 09:41:05 -04004909void si_init_uvd_internal_cg(struct radeon_device *rdev)
Alex Deucherf8f84ac2013-03-07 12:56:35 -05004910{
4911 bool hw_mode = true;
4912
4913 if (hw_mode) {
4914 si_set_uvd_dcm(rdev, false);
4915 } else {
4916 u32 tmp = RREG32(UVD_CGC_CTRL);
4917 tmp &= ~DCM;
4918 WREG32(UVD_CGC_CTRL, tmp);
4919 }
4920}
4921
4922static u32 si_halt_rlc(struct radeon_device *rdev)
4923{
4924 u32 data, orig;
4925
4926 orig = data = RREG32(RLC_CNTL);
4927
4928 if (data & RLC_ENABLE) {
4929 data &= ~RLC_ENABLE;
4930 WREG32(RLC_CNTL, data);
4931
4932 si_wait_for_rlc_serdes(rdev);
4933 }
4934
4935 return orig;
4936}
4937
4938static void si_update_rlc(struct radeon_device *rdev, u32 rlc)
4939{
4940 u32 tmp;
4941
4942 tmp = RREG32(RLC_CNTL);
4943 if (tmp != rlc)
4944 WREG32(RLC_CNTL, rlc);
4945}
4946
4947static void si_enable_dma_pg(struct radeon_device *rdev, bool enable)
4948{
4949 u32 data, orig;
4950
4951 orig = data = RREG32(DMA_PG);
Alex Deuchere16866e2013-08-08 19:34:07 -04004952 if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_SDMA))
Alex Deucherf8f84ac2013-03-07 12:56:35 -05004953 data |= PG_CNTL_ENABLE;
4954 else
4955 data &= ~PG_CNTL_ENABLE;
4956 if (orig != data)
4957 WREG32(DMA_PG, data);
4958}
4959
4960static void si_init_dma_pg(struct radeon_device *rdev)
4961{
4962 u32 tmp;
4963
4964 WREG32(DMA_PGFSM_WRITE, 0x00002000);
4965 WREG32(DMA_PGFSM_CONFIG, 0x100010ff);
4966
4967 for (tmp = 0; tmp < 5; tmp++)
4968 WREG32(DMA_PGFSM_WRITE, 0);
4969}
4970
4971static void si_enable_gfx_cgpg(struct radeon_device *rdev,
4972 bool enable)
4973{
4974 u32 tmp;
4975
Alex Deucher2b19d172013-09-04 16:58:29 -04004976 if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG)) {
Alex Deucherf8f84ac2013-03-07 12:56:35 -05004977 tmp = RLC_PUD(0x10) | RLC_PDD(0x10) | RLC_TTPD(0x10) | RLC_MSD(0x10);
4978 WREG32(RLC_TTOP_D, tmp);
4979
4980 tmp = RREG32(RLC_PG_CNTL);
4981 tmp |= GFX_PG_ENABLE;
4982 WREG32(RLC_PG_CNTL, tmp);
4983
4984 tmp = RREG32(RLC_AUTO_PG_CTRL);
4985 tmp |= AUTO_PG_EN;
4986 WREG32(RLC_AUTO_PG_CTRL, tmp);
4987 } else {
4988 tmp = RREG32(RLC_AUTO_PG_CTRL);
4989 tmp &= ~AUTO_PG_EN;
4990 WREG32(RLC_AUTO_PG_CTRL, tmp);
4991
4992 tmp = RREG32(DB_RENDER_CONTROL);
4993 }
4994}
4995
4996static void si_init_gfx_cgpg(struct radeon_device *rdev)
4997{
4998 u32 tmp;
4999
5000 WREG32(RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8);
5001
5002 tmp = RREG32(RLC_PG_CNTL);
5003 tmp |= GFX_PG_SRC;
5004 WREG32(RLC_PG_CNTL, tmp);
5005
5006 WREG32(RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8);
5007
5008 tmp = RREG32(RLC_AUTO_PG_CTRL);
5009
5010 tmp &= ~GRBM_REG_SGIT_MASK;
5011 tmp |= GRBM_REG_SGIT(0x700);
5012 tmp &= ~PG_AFTER_GRBM_REG_ST_MASK;
5013 WREG32(RLC_AUTO_PG_CTRL, tmp);
5014}
5015
Alex Deucherba190312013-04-17 16:27:40 -04005016static u32 si_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh)
Alex Deucherf8f84ac2013-03-07 12:56:35 -05005017{
5018 u32 mask = 0, tmp, tmp1;
5019 int i;
5020
5021 si_select_se_sh(rdev, se, sh);
5022 tmp = RREG32(CC_GC_SHADER_ARRAY_CONFIG);
5023 tmp1 = RREG32(GC_USER_SHADER_ARRAY_CONFIG);
5024 si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
5025
5026 tmp &= 0xffff0000;
5027
5028 tmp |= tmp1;
5029 tmp >>= 16;
5030
5031 for (i = 0; i < rdev->config.si.max_cu_per_sh; i ++) {
5032 mask <<= 1;
5033 mask |= 1;
5034 }
5035
5036 return (~tmp) & mask;
5037}
5038
5039static void si_init_ao_cu_mask(struct radeon_device *rdev)
5040{
5041 u32 i, j, k, active_cu_number = 0;
5042 u32 mask, counter, cu_bitmap;
5043 u32 tmp = 0;
5044
5045 for (i = 0; i < rdev->config.si.max_shader_engines; i++) {
5046 for (j = 0; j < rdev->config.si.max_sh_per_se; j++) {
5047 mask = 1;
5048 cu_bitmap = 0;
5049 counter = 0;
5050 for (k = 0; k < rdev->config.si.max_cu_per_sh; k++) {
Alex Deucherba190312013-04-17 16:27:40 -04005051 if (si_get_cu_active_bitmap(rdev, i, j) & mask) {
Alex Deucherf8f84ac2013-03-07 12:56:35 -05005052 if (counter < 2)
5053 cu_bitmap |= mask;
5054 counter++;
5055 }
5056 mask <<= 1;
5057 }
5058
5059 active_cu_number += counter;
5060 tmp |= (cu_bitmap << (i * 16 + j * 8));
5061 }
5062 }
5063
5064 WREG32(RLC_PG_AO_CU_MASK, tmp);
5065
5066 tmp = RREG32(RLC_MAX_PG_CU);
5067 tmp &= ~MAX_PU_CU_MASK;
5068 tmp |= MAX_PU_CU(active_cu_number);
5069 WREG32(RLC_MAX_PG_CU, tmp);
5070}
5071
5072static void si_enable_cgcg(struct radeon_device *rdev,
5073 bool enable)
5074{
5075 u32 data, orig, tmp;
5076
5077 orig = data = RREG32(RLC_CGCG_CGLS_CTRL);
5078
Alex Deuchere16866e2013-08-08 19:34:07 -04005079 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_CGCG)) {
Alex Deucher5594a552013-08-15 16:20:26 -04005080 si_enable_gui_idle_interrupt(rdev, true);
Alex Deucherf8f84ac2013-03-07 12:56:35 -05005081
Alex Deucherf8f84ac2013-03-07 12:56:35 -05005082 WREG32(RLC_GCPM_GENERAL_3, 0x00000080);
5083
5084 tmp = si_halt_rlc(rdev);
5085
5086 WREG32(RLC_SERDES_WR_MASTER_MASK_0, 0xffffffff);
5087 WREG32(RLC_SERDES_WR_MASTER_MASK_1, 0xffffffff);
5088 WREG32(RLC_SERDES_WR_CTRL, 0x00b000ff);
5089
5090 si_wait_for_rlc_serdes(rdev);
5091
5092 si_update_rlc(rdev, tmp);
5093
5094 WREG32(RLC_SERDES_WR_CTRL, 0x007000ff);
5095
5096 data |= CGCG_EN | CGLS_EN;
5097 } else {
Alex Deucher5594a552013-08-15 16:20:26 -04005098 si_enable_gui_idle_interrupt(rdev, false);
5099
Alex Deucherf8f84ac2013-03-07 12:56:35 -05005100 RREG32(CB_CGTT_SCLK_CTRL);
5101 RREG32(CB_CGTT_SCLK_CTRL);
5102 RREG32(CB_CGTT_SCLK_CTRL);
5103 RREG32(CB_CGTT_SCLK_CTRL);
5104
5105 data &= ~(CGCG_EN | CGLS_EN);
5106 }
5107
5108 if (orig != data)
5109 WREG32(RLC_CGCG_CGLS_CTRL, data);
5110}
5111
5112static void si_enable_mgcg(struct radeon_device *rdev,
5113 bool enable)
5114{
5115 u32 data, orig, tmp = 0;
5116
Alex Deuchere16866e2013-08-08 19:34:07 -04005117 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_MGCG)) {
Alex Deucherf8f84ac2013-03-07 12:56:35 -05005118 orig = data = RREG32(CGTS_SM_CTRL_REG);
5119 data = 0x96940200;
5120 if (orig != data)
5121 WREG32(CGTS_SM_CTRL_REG, data);
5122
Alex Deuchere16866e2013-08-08 19:34:07 -04005123 if (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_CP_LS) {
5124 orig = data = RREG32(CP_MEM_SLP_CNTL);
5125 data |= CP_MEM_LS_EN;
5126 if (orig != data)
5127 WREG32(CP_MEM_SLP_CNTL, data);
5128 }
Alex Deucherf8f84ac2013-03-07 12:56:35 -05005129
5130 orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE);
5131 data &= 0xffffffc0;
5132 if (orig != data)
5133 WREG32(RLC_CGTT_MGCG_OVERRIDE, data);
5134
5135 tmp = si_halt_rlc(rdev);
5136
5137 WREG32(RLC_SERDES_WR_MASTER_MASK_0, 0xffffffff);
5138 WREG32(RLC_SERDES_WR_MASTER_MASK_1, 0xffffffff);
5139 WREG32(RLC_SERDES_WR_CTRL, 0x00d000ff);
5140
5141 si_update_rlc(rdev, tmp);
5142 } else {
5143 orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE);
5144 data |= 0x00000003;
5145 if (orig != data)
5146 WREG32(RLC_CGTT_MGCG_OVERRIDE, data);
5147
5148 data = RREG32(CP_MEM_SLP_CNTL);
5149 if (data & CP_MEM_LS_EN) {
5150 data &= ~CP_MEM_LS_EN;
5151 WREG32(CP_MEM_SLP_CNTL, data);
5152 }
5153 orig = data = RREG32(CGTS_SM_CTRL_REG);
5154 data |= LS_OVERRIDE | OVERRIDE;
5155 if (orig != data)
5156 WREG32(CGTS_SM_CTRL_REG, data);
5157
5158 tmp = si_halt_rlc(rdev);
5159
5160 WREG32(RLC_SERDES_WR_MASTER_MASK_0, 0xffffffff);
5161 WREG32(RLC_SERDES_WR_MASTER_MASK_1, 0xffffffff);
5162 WREG32(RLC_SERDES_WR_CTRL, 0x00e000ff);
5163
5164 si_update_rlc(rdev, tmp);
5165 }
5166}
5167
5168static void si_enable_uvd_mgcg(struct radeon_device *rdev,
5169 bool enable)
5170{
5171 u32 orig, data, tmp;
5172
Alex Deuchere16866e2013-08-08 19:34:07 -04005173 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_UVD_MGCG)) {
Alex Deucherf8f84ac2013-03-07 12:56:35 -05005174 tmp = RREG32_UVD_CTX(UVD_CGC_MEM_CTRL);
5175 tmp |= 0x3fff;
5176 WREG32_UVD_CTX(UVD_CGC_MEM_CTRL, tmp);
5177
5178 orig = data = RREG32(UVD_CGC_CTRL);
5179 data |= DCM;
5180 if (orig != data)
5181 WREG32(UVD_CGC_CTRL, data);
5182
5183 WREG32_SMC(SMC_CG_IND_START + CG_CGTT_LOCAL_0, 0);
5184 WREG32_SMC(SMC_CG_IND_START + CG_CGTT_LOCAL_1, 0);
5185 } else {
5186 tmp = RREG32_UVD_CTX(UVD_CGC_MEM_CTRL);
5187 tmp &= ~0x3fff;
5188 WREG32_UVD_CTX(UVD_CGC_MEM_CTRL, tmp);
5189
5190 orig = data = RREG32(UVD_CGC_CTRL);
5191 data &= ~DCM;
5192 if (orig != data)
5193 WREG32(UVD_CGC_CTRL, data);
5194
5195 WREG32_SMC(SMC_CG_IND_START + CG_CGTT_LOCAL_0, 0xffffffff);
5196 WREG32_SMC(SMC_CG_IND_START + CG_CGTT_LOCAL_1, 0xffffffff);
5197 }
5198}
5199
5200static const u32 mc_cg_registers[] =
5201{
5202 MC_HUB_MISC_HUB_CG,
5203 MC_HUB_MISC_SIP_CG,
5204 MC_HUB_MISC_VM_CG,
5205 MC_XPB_CLK_GAT,
5206 ATC_MISC_CG,
5207 MC_CITF_MISC_WR_CG,
5208 MC_CITF_MISC_RD_CG,
5209 MC_CITF_MISC_VM_CG,
5210 VM_L2_CG,
5211};
5212
5213static void si_enable_mc_ls(struct radeon_device *rdev,
5214 bool enable)
5215{
5216 int i;
5217 u32 orig, data;
5218
5219 for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
5220 orig = data = RREG32(mc_cg_registers[i]);
Alex Deuchere16866e2013-08-08 19:34:07 -04005221 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_MC_LS))
Alex Deucherf8f84ac2013-03-07 12:56:35 -05005222 data |= MC_LS_ENABLE;
5223 else
5224 data &= ~MC_LS_ENABLE;
5225 if (data != orig)
5226 WREG32(mc_cg_registers[i], data);
5227 }
5228}
5229
Alex Deuchere16866e2013-08-08 19:34:07 -04005230static void si_enable_mc_mgcg(struct radeon_device *rdev,
5231 bool enable)
5232{
5233 int i;
5234 u32 orig, data;
5235
5236 for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
5237 orig = data = RREG32(mc_cg_registers[i]);
5238 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_MC_MGCG))
5239 data |= MC_CG_ENABLE;
5240 else
5241 data &= ~MC_CG_ENABLE;
5242 if (data != orig)
5243 WREG32(mc_cg_registers[i], data);
5244 }
5245}
5246
5247static void si_enable_dma_mgcg(struct radeon_device *rdev,
5248 bool enable)
5249{
5250 u32 orig, data, offset;
5251 int i;
5252
5253 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_SDMA_MGCG)) {
5254 for (i = 0; i < 2; i++) {
5255 if (i == 0)
5256 offset = DMA0_REGISTER_OFFSET;
5257 else
5258 offset = DMA1_REGISTER_OFFSET;
5259 orig = data = RREG32(DMA_POWER_CNTL + offset);
5260 data &= ~MEM_POWER_OVERRIDE;
5261 if (data != orig)
5262 WREG32(DMA_POWER_CNTL + offset, data);
5263 WREG32(DMA_CLK_CTRL + offset, 0x00000100);
5264 }
5265 } else {
5266 for (i = 0; i < 2; i++) {
5267 if (i == 0)
5268 offset = DMA0_REGISTER_OFFSET;
5269 else
5270 offset = DMA1_REGISTER_OFFSET;
5271 orig = data = RREG32(DMA_POWER_CNTL + offset);
5272 data |= MEM_POWER_OVERRIDE;
5273 if (data != orig)
5274 WREG32(DMA_POWER_CNTL + offset, data);
5275
5276 orig = data = RREG32(DMA_CLK_CTRL + offset);
5277 data = 0xff000000;
5278 if (data != orig)
5279 WREG32(DMA_CLK_CTRL + offset, data);
5280 }
5281 }
5282}
5283
5284static void si_enable_bif_mgls(struct radeon_device *rdev,
5285 bool enable)
5286{
5287 u32 orig, data;
5288
5289 orig = data = RREG32_PCIE(PCIE_CNTL2);
5290
5291 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_BIF_LS))
5292 data |= SLV_MEM_LS_EN | MST_MEM_LS_EN |
5293 REPLAY_MEM_LS_EN | SLV_MEM_AGGRESSIVE_LS_EN;
5294 else
5295 data &= ~(SLV_MEM_LS_EN | MST_MEM_LS_EN |
5296 REPLAY_MEM_LS_EN | SLV_MEM_AGGRESSIVE_LS_EN);
5297
5298 if (orig != data)
5299 WREG32_PCIE(PCIE_CNTL2, data);
5300}
5301
5302static void si_enable_hdp_mgcg(struct radeon_device *rdev,
5303 bool enable)
5304{
5305 u32 orig, data;
5306
5307 orig = data = RREG32(HDP_HOST_PATH_CNTL);
5308
5309 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_HDP_MGCG))
5310 data &= ~CLOCK_GATING_DIS;
5311 else
5312 data |= CLOCK_GATING_DIS;
5313
5314 if (orig != data)
5315 WREG32(HDP_HOST_PATH_CNTL, data);
5316}
5317
5318static void si_enable_hdp_ls(struct radeon_device *rdev,
5319 bool enable)
5320{
5321 u32 orig, data;
5322
5323 orig = data = RREG32(HDP_MEM_POWER_LS);
5324
5325 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_HDP_LS))
5326 data |= HDP_LS_ENABLE;
5327 else
5328 data &= ~HDP_LS_ENABLE;
5329
5330 if (orig != data)
5331 WREG32(HDP_MEM_POWER_LS, data);
5332}
5333
Alex Deucher68e3a092013-12-18 14:11:40 -05005334static void si_update_cg(struct radeon_device *rdev,
5335 u32 block, bool enable)
Alex Deuchere16866e2013-08-08 19:34:07 -04005336{
5337 if (block & RADEON_CG_BLOCK_GFX) {
Alex Deucher811e4d52013-09-03 13:31:33 -04005338 si_enable_gui_idle_interrupt(rdev, false);
Alex Deuchere16866e2013-08-08 19:34:07 -04005339 /* order matters! */
5340 if (enable) {
5341 si_enable_mgcg(rdev, true);
5342 si_enable_cgcg(rdev, true);
5343 } else {
5344 si_enable_cgcg(rdev, false);
5345 si_enable_mgcg(rdev, false);
5346 }
Alex Deucher811e4d52013-09-03 13:31:33 -04005347 si_enable_gui_idle_interrupt(rdev, true);
Alex Deuchere16866e2013-08-08 19:34:07 -04005348 }
5349
5350 if (block & RADEON_CG_BLOCK_MC) {
5351 si_enable_mc_mgcg(rdev, enable);
5352 si_enable_mc_ls(rdev, enable);
5353 }
5354
5355 if (block & RADEON_CG_BLOCK_SDMA) {
5356 si_enable_dma_mgcg(rdev, enable);
5357 }
5358
5359 if (block & RADEON_CG_BLOCK_BIF) {
5360 si_enable_bif_mgls(rdev, enable);
5361 }
5362
5363 if (block & RADEON_CG_BLOCK_UVD) {
5364 if (rdev->has_uvd) {
5365 si_enable_uvd_mgcg(rdev, enable);
5366 }
5367 }
5368
5369 if (block & RADEON_CG_BLOCK_HDP) {
5370 si_enable_hdp_mgcg(rdev, enable);
5371 si_enable_hdp_ls(rdev, enable);
5372 }
5373}
Alex Deucherf8f84ac2013-03-07 12:56:35 -05005374
5375static void si_init_cg(struct radeon_device *rdev)
5376{
Alex Deuchere16866e2013-08-08 19:34:07 -04005377 si_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
5378 RADEON_CG_BLOCK_MC |
5379 RADEON_CG_BLOCK_SDMA |
5380 RADEON_CG_BLOCK_BIF |
5381 RADEON_CG_BLOCK_HDP), true);
Alex Deucherb2d70912013-07-27 17:53:25 -04005382 if (rdev->has_uvd) {
Alex Deuchere16866e2013-08-08 19:34:07 -04005383 si_update_cg(rdev, RADEON_CG_BLOCK_UVD, true);
Alex Deucherf8f84ac2013-03-07 12:56:35 -05005384 si_init_uvd_internal_cg(rdev);
5385 }
5386}
5387
5388static void si_fini_cg(struct radeon_device *rdev)
5389{
Alex Deucher0116e1e2013-08-08 18:00:10 -04005390 if (rdev->has_uvd) {
Alex Deuchere16866e2013-08-08 19:34:07 -04005391 si_update_cg(rdev, RADEON_CG_BLOCK_UVD, false);
Alex Deucher0116e1e2013-08-08 18:00:10 -04005392 }
Alex Deuchere16866e2013-08-08 19:34:07 -04005393 si_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
5394 RADEON_CG_BLOCK_MC |
5395 RADEON_CG_BLOCK_SDMA |
5396 RADEON_CG_BLOCK_BIF |
5397 RADEON_CG_BLOCK_HDP), false);
5398}
Alex Deucherf8f84ac2013-03-07 12:56:35 -05005399
Alex Deucher59a82d02013-08-13 12:48:06 -04005400u32 si_get_csb_size(struct radeon_device *rdev)
5401{
5402 u32 count = 0;
5403 const struct cs_section_def *sect = NULL;
5404 const struct cs_extent_def *ext = NULL;
5405
5406 if (rdev->rlc.cs_data == NULL)
5407 return 0;
5408
5409 /* begin clear state */
5410 count += 2;
5411 /* context control state */
5412 count += 3;
5413
5414 for (sect = rdev->rlc.cs_data; sect->section != NULL; ++sect) {
5415 for (ext = sect->section; ext->extent != NULL; ++ext) {
5416 if (sect->id == SECT_CONTEXT)
5417 count += 2 + ext->reg_count;
5418 else
5419 return 0;
5420 }
5421 }
5422 /* pa_sc_raster_config */
5423 count += 3;
5424 /* end clear state */
5425 count += 2;
5426 /* clear state */
5427 count += 2;
5428
5429 return count;
5430}
5431
5432void si_get_csb_buffer(struct radeon_device *rdev, volatile u32 *buffer)
5433{
5434 u32 count = 0, i;
5435 const struct cs_section_def *sect = NULL;
5436 const struct cs_extent_def *ext = NULL;
5437
5438 if (rdev->rlc.cs_data == NULL)
5439 return;
5440 if (buffer == NULL)
5441 return;
5442
Alex Deucher6ba81e52013-10-23 18:27:10 -04005443 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
5444 buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
Alex Deucher59a82d02013-08-13 12:48:06 -04005445
Alex Deucher6ba81e52013-10-23 18:27:10 -04005446 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
5447 buffer[count++] = cpu_to_le32(0x80000000);
5448 buffer[count++] = cpu_to_le32(0x80000000);
Alex Deucher59a82d02013-08-13 12:48:06 -04005449
5450 for (sect = rdev->rlc.cs_data; sect->section != NULL; ++sect) {
5451 for (ext = sect->section; ext->extent != NULL; ++ext) {
5452 if (sect->id == SECT_CONTEXT) {
Alex Deucher6ba81e52013-10-23 18:27:10 -04005453 buffer[count++] =
5454 cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
5455 buffer[count++] = cpu_to_le32(ext->reg_index - 0xa000);
Alex Deucher59a82d02013-08-13 12:48:06 -04005456 for (i = 0; i < ext->reg_count; i++)
Alex Deucher6ba81e52013-10-23 18:27:10 -04005457 buffer[count++] = cpu_to_le32(ext->extent[i]);
Alex Deucher59a82d02013-08-13 12:48:06 -04005458 } else {
5459 return;
5460 }
5461 }
5462 }
5463
Alex Deucher6ba81e52013-10-23 18:27:10 -04005464 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 1));
5465 buffer[count++] = cpu_to_le32(PA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
Alex Deucher59a82d02013-08-13 12:48:06 -04005466 switch (rdev->family) {
5467 case CHIP_TAHITI:
5468 case CHIP_PITCAIRN:
Alex Deucher6ba81e52013-10-23 18:27:10 -04005469 buffer[count++] = cpu_to_le32(0x2a00126a);
Alex Deucher59a82d02013-08-13 12:48:06 -04005470 break;
5471 case CHIP_VERDE:
Alex Deucher6ba81e52013-10-23 18:27:10 -04005472 buffer[count++] = cpu_to_le32(0x0000124a);
Alex Deucher59a82d02013-08-13 12:48:06 -04005473 break;
5474 case CHIP_OLAND:
Alex Deucher6ba81e52013-10-23 18:27:10 -04005475 buffer[count++] = cpu_to_le32(0x00000082);
Alex Deucher59a82d02013-08-13 12:48:06 -04005476 break;
5477 case CHIP_HAINAN:
Alex Deucher6ba81e52013-10-23 18:27:10 -04005478 buffer[count++] = cpu_to_le32(0x00000000);
Alex Deucher59a82d02013-08-13 12:48:06 -04005479 break;
5480 default:
Alex Deucher6ba81e52013-10-23 18:27:10 -04005481 buffer[count++] = cpu_to_le32(0x00000000);
Alex Deucher59a82d02013-08-13 12:48:06 -04005482 break;
5483 }
5484
Alex Deucher6ba81e52013-10-23 18:27:10 -04005485 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
5486 buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
Alex Deucher59a82d02013-08-13 12:48:06 -04005487
Alex Deucher6ba81e52013-10-23 18:27:10 -04005488 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
5489 buffer[count++] = cpu_to_le32(0);
Alex Deucherf8f84ac2013-03-07 12:56:35 -05005490}
5491
5492static void si_init_pg(struct radeon_device *rdev)
5493{
Alex Deucher0116e1e2013-08-08 18:00:10 -04005494 if (rdev->pg_flags) {
5495 if (rdev->pg_flags & RADEON_PG_SUPPORT_SDMA) {
5496 si_init_dma_pg(rdev);
Alex Deucher0116e1e2013-08-08 18:00:10 -04005497 }
Alex Deucherf8f84ac2013-03-07 12:56:35 -05005498 si_init_ao_cu_mask(rdev);
Alex Deucher2b19d172013-09-04 16:58:29 -04005499 if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG) {
Alex Deucher0116e1e2013-08-08 18:00:10 -04005500 si_init_gfx_cgpg(rdev);
Alex Deucheraa34dba2014-01-16 10:39:17 -05005501 } else {
5502 WREG32(RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8);
5503 WREG32(RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8);
Alex Deucher0116e1e2013-08-08 18:00:10 -04005504 }
Alex Deucherf8f84ac2013-03-07 12:56:35 -05005505 si_enable_dma_pg(rdev, true);
Alex Deucherf8f84ac2013-03-07 12:56:35 -05005506 si_enable_gfx_cgpg(rdev, true);
5507 } else {
5508 WREG32(RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8);
5509 WREG32(RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8);
5510 }
5511}
5512
5513static void si_fini_pg(struct radeon_device *rdev)
5514{
Alex Deucher0116e1e2013-08-08 18:00:10 -04005515 if (rdev->pg_flags) {
Alex Deucherf8f84ac2013-03-07 12:56:35 -05005516 si_enable_dma_pg(rdev, false);
5517 si_enable_gfx_cgpg(rdev, false);
5518 }
5519}
5520
5521/*
Alex Deucher347e7592012-03-20 17:18:21 -04005522 * RLC
5523 */
Alex Deucher866d83d2013-04-15 17:13:29 -04005524void si_rlc_reset(struct radeon_device *rdev)
Alex Deucherd719cef2013-02-15 16:49:59 -05005525{
Alex Deucherf8f84ac2013-03-07 12:56:35 -05005526 u32 tmp = RREG32(GRBM_SOFT_RESET);
Alex Deucherd719cef2013-02-15 16:49:59 -05005527
Alex Deucherf8f84ac2013-03-07 12:56:35 -05005528 tmp |= SOFT_RESET_RLC;
5529 WREG32(GRBM_SOFT_RESET, tmp);
5530 udelay(50);
5531 tmp &= ~SOFT_RESET_RLC;
5532 WREG32(GRBM_SOFT_RESET, tmp);
5533 udelay(50);
Alex Deucherd719cef2013-02-15 16:49:59 -05005534}
5535
Alex Deucher347e7592012-03-20 17:18:21 -04005536static void si_rlc_stop(struct radeon_device *rdev)
5537{
5538 WREG32(RLC_CNTL, 0);
Alex Deucherd719cef2013-02-15 16:49:59 -05005539
5540 si_enable_gui_idle_interrupt(rdev, false);
5541
5542 si_wait_for_rlc_serdes(rdev);
Alex Deucher347e7592012-03-20 17:18:21 -04005543}
5544
5545static void si_rlc_start(struct radeon_device *rdev)
5546{
5547 WREG32(RLC_CNTL, RLC_ENABLE);
Alex Deucherd719cef2013-02-15 16:49:59 -05005548
5549 si_enable_gui_idle_interrupt(rdev, true);
5550
5551 udelay(50);
5552}
5553
5554static bool si_lbpw_supported(struct radeon_device *rdev)
5555{
5556 u32 tmp;
5557
5558 /* Enable LBPW only for DDR3 */
5559 tmp = RREG32(MC_SEQ_MISC0);
5560 if ((tmp & 0xF0000000) == 0xB0000000)
5561 return true;
5562 return false;
5563}
5564
5565static void si_enable_lbpw(struct radeon_device *rdev, bool enable)
5566{
5567 u32 tmp;
5568
5569 tmp = RREG32(RLC_LB_CNTL);
5570 if (enable)
5571 tmp |= LOAD_BALANCE_ENABLE;
5572 else
5573 tmp &= ~LOAD_BALANCE_ENABLE;
5574 WREG32(RLC_LB_CNTL, tmp);
5575
5576 if (!enable) {
5577 si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
5578 WREG32(SPI_LB_CU_MASK, 0x00ff);
5579 }
Alex Deucher347e7592012-03-20 17:18:21 -04005580}
5581
5582static int si_rlc_resume(struct radeon_device *rdev)
5583{
5584 u32 i;
5585 const __be32 *fw_data;
5586
5587 if (!rdev->rlc_fw)
5588 return -EINVAL;
5589
5590 si_rlc_stop(rdev);
5591
Alex Deucherf8f84ac2013-03-07 12:56:35 -05005592 si_rlc_reset(rdev);
5593
5594 si_init_pg(rdev);
5595
5596 si_init_cg(rdev);
5597
Alex Deucher347e7592012-03-20 17:18:21 -04005598 WREG32(RLC_RL_BASE, 0);
5599 WREG32(RLC_RL_SIZE, 0);
5600 WREG32(RLC_LB_CNTL, 0);
5601 WREG32(RLC_LB_CNTR_MAX, 0xffffffff);
5602 WREG32(RLC_LB_CNTR_INIT, 0);
Alex Deucherd719cef2013-02-15 16:49:59 -05005603 WREG32(RLC_LB_INIT_CU_MASK, 0xffffffff);
Alex Deucher347e7592012-03-20 17:18:21 -04005604
Alex Deucher347e7592012-03-20 17:18:21 -04005605 WREG32(RLC_MC_CNTL, 0);
5606 WREG32(RLC_UCODE_CNTL, 0);
5607
5608 fw_data = (const __be32 *)rdev->rlc_fw->data;
5609 for (i = 0; i < SI_RLC_UCODE_SIZE; i++) {
5610 WREG32(RLC_UCODE_ADDR, i);
5611 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
5612 }
5613 WREG32(RLC_UCODE_ADDR, 0);
5614
Alex Deucherd719cef2013-02-15 16:49:59 -05005615 si_enable_lbpw(rdev, si_lbpw_supported(rdev));
5616
Alex Deucher347e7592012-03-20 17:18:21 -04005617 si_rlc_start(rdev);
5618
5619 return 0;
5620}
5621
Alex Deucher25a857f2012-03-20 17:18:22 -04005622static void si_enable_interrupts(struct radeon_device *rdev)
5623{
5624 u32 ih_cntl = RREG32(IH_CNTL);
5625 u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
5626
5627 ih_cntl |= ENABLE_INTR;
5628 ih_rb_cntl |= IH_RB_ENABLE;
5629 WREG32(IH_CNTL, ih_cntl);
5630 WREG32(IH_RB_CNTL, ih_rb_cntl);
5631 rdev->ih.enabled = true;
5632}
5633
5634static void si_disable_interrupts(struct radeon_device *rdev)
5635{
5636 u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
5637 u32 ih_cntl = RREG32(IH_CNTL);
5638
5639 ih_rb_cntl &= ~IH_RB_ENABLE;
5640 ih_cntl &= ~ENABLE_INTR;
5641 WREG32(IH_RB_CNTL, ih_rb_cntl);
5642 WREG32(IH_CNTL, ih_cntl);
5643 /* set rptr, wptr to 0 */
5644 WREG32(IH_RB_RPTR, 0);
5645 WREG32(IH_RB_WPTR, 0);
5646 rdev->ih.enabled = false;
Alex Deucher25a857f2012-03-20 17:18:22 -04005647 rdev->ih.rptr = 0;
5648}
5649
5650static void si_disable_interrupt_state(struct radeon_device *rdev)
5651{
5652 u32 tmp;
5653
Alex Deucher811e4d52013-09-03 13:31:33 -04005654 tmp = RREG32(CP_INT_CNTL_RING0) &
5655 (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
5656 WREG32(CP_INT_CNTL_RING0, tmp);
Alex Deucher25a857f2012-03-20 17:18:22 -04005657 WREG32(CP_INT_CNTL_RING1, 0);
5658 WREG32(CP_INT_CNTL_RING2, 0);
Alex Deucher8c5fd7e2012-12-04 15:28:18 -05005659 tmp = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET) & ~TRAP_ENABLE;
5660 WREG32(DMA_CNTL + DMA0_REGISTER_OFFSET, tmp);
5661 tmp = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET) & ~TRAP_ENABLE;
5662 WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, tmp);
Alex Deucher25a857f2012-03-20 17:18:22 -04005663 WREG32(GRBM_INT_CNTL, 0);
Alex Deucher51535502012-08-30 14:34:30 -04005664 if (rdev->num_crtc >= 2) {
5665 WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
5666 WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
5667 }
Alex Deucher25a857f2012-03-20 17:18:22 -04005668 if (rdev->num_crtc >= 4) {
5669 WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
5670 WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
5671 }
5672 if (rdev->num_crtc >= 6) {
5673 WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
5674 WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
5675 }
5676
Alex Deucher51535502012-08-30 14:34:30 -04005677 if (rdev->num_crtc >= 2) {
5678 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
5679 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
5680 }
Alex Deucher25a857f2012-03-20 17:18:22 -04005681 if (rdev->num_crtc >= 4) {
5682 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
5683 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
5684 }
5685 if (rdev->num_crtc >= 6) {
5686 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
5687 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
5688 }
5689
Alex Deucher51535502012-08-30 14:34:30 -04005690 if (!ASIC_IS_NODCE(rdev)) {
Alex Deuchere9a321c2014-01-27 11:54:44 -05005691 WREG32(DAC_AUTODETECT_INT_CONTROL, 0);
Alex Deucher25a857f2012-03-20 17:18:22 -04005692
Alex Deucher51535502012-08-30 14:34:30 -04005693 tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
5694 WREG32(DC_HPD1_INT_CONTROL, tmp);
5695 tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
5696 WREG32(DC_HPD2_INT_CONTROL, tmp);
5697 tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
5698 WREG32(DC_HPD3_INT_CONTROL, tmp);
5699 tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
5700 WREG32(DC_HPD4_INT_CONTROL, tmp);
5701 tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
5702 WREG32(DC_HPD5_INT_CONTROL, tmp);
5703 tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
5704 WREG32(DC_HPD6_INT_CONTROL, tmp);
5705 }
Alex Deucher25a857f2012-03-20 17:18:22 -04005706}
5707
5708static int si_irq_init(struct radeon_device *rdev)
5709{
5710 int ret = 0;
5711 int rb_bufsz;
5712 u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
5713
5714 /* allocate ring */
5715 ret = r600_ih_ring_alloc(rdev);
5716 if (ret)
5717 return ret;
5718
5719 /* disable irqs */
5720 si_disable_interrupts(rdev);
5721
5722 /* init rlc */
5723 ret = si_rlc_resume(rdev);
5724 if (ret) {
5725 r600_ih_ring_fini(rdev);
5726 return ret;
5727 }
5728
5729 /* setup interrupt control */
5730 /* set dummy read address to ring address */
5731 WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8);
5732 interrupt_cntl = RREG32(INTERRUPT_CNTL);
5733 /* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
5734 * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
5735 */
5736 interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE;
5737 /* IH_REQ_NONSNOOP_EN=1 if ring is in non-cacheable memory, e.g., vram */
5738 interrupt_cntl &= ~IH_REQ_NONSNOOP_EN;
5739 WREG32(INTERRUPT_CNTL, interrupt_cntl);
5740
5741 WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
Daniel Vetterb72a8922013-07-10 14:11:59 +02005742 rb_bufsz = order_base_2(rdev->ih.ring_size / 4);
Alex Deucher25a857f2012-03-20 17:18:22 -04005743
5744 ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
5745 IH_WPTR_OVERFLOW_CLEAR |
5746 (rb_bufsz << 1));
5747
5748 if (rdev->wb.enabled)
5749 ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE;
5750
5751 /* set the writeback address whether it's enabled or not */
5752 WREG32(IH_RB_WPTR_ADDR_LO, (rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFFFFFFFC);
5753 WREG32(IH_RB_WPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFF);
5754
5755 WREG32(IH_RB_CNTL, ih_rb_cntl);
5756
5757 /* set rptr, wptr to 0 */
5758 WREG32(IH_RB_RPTR, 0);
5759 WREG32(IH_RB_WPTR, 0);
5760
5761 /* Default settings for IH_CNTL (disabled at first) */
5762 ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10) | MC_VMID(0);
5763 /* RPTR_REARM only works if msi's are enabled */
5764 if (rdev->msi_enabled)
5765 ih_cntl |= RPTR_REARM;
5766 WREG32(IH_CNTL, ih_cntl);
5767
5768 /* force the active interrupt state to all disabled */
5769 si_disable_interrupt_state(rdev);
5770
Dave Airlie20998102012-04-03 11:53:05 +01005771 pci_set_master(rdev->pdev);
5772
Alex Deucher25a857f2012-03-20 17:18:22 -04005773 /* enable irqs */
5774 si_enable_interrupts(rdev);
5775
5776 return ret;
5777}
5778
5779int si_irq_set(struct radeon_device *rdev)
5780{
Alex Deucher811e4d52013-09-03 13:31:33 -04005781 u32 cp_int_cntl;
Alex Deucher25a857f2012-03-20 17:18:22 -04005782 u32 cp_int_cntl1 = 0, cp_int_cntl2 = 0;
5783 u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
Alex Deucher51535502012-08-30 14:34:30 -04005784 u32 hpd1 = 0, hpd2 = 0, hpd3 = 0, hpd4 = 0, hpd5 = 0, hpd6 = 0;
Alex Deucher25a857f2012-03-20 17:18:22 -04005785 u32 grbm_int_cntl = 0;
5786 u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0;
Alex Deucher8c5fd7e2012-12-04 15:28:18 -05005787 u32 dma_cntl, dma_cntl1;
Alex Deuchera9e61412013-06-25 17:56:16 -04005788 u32 thermal_int = 0;
Alex Deucher25a857f2012-03-20 17:18:22 -04005789
5790 if (!rdev->irq.installed) {
5791 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
5792 return -EINVAL;
5793 }
5794 /* don't enable anything if the ih is disabled */
5795 if (!rdev->ih.enabled) {
5796 si_disable_interrupts(rdev);
5797 /* force the active interrupt state to all disabled */
5798 si_disable_interrupt_state(rdev);
5799 return 0;
5800 }
5801
Alex Deucher811e4d52013-09-03 13:31:33 -04005802 cp_int_cntl = RREG32(CP_INT_CNTL_RING0) &
5803 (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
5804
Alex Deucher51535502012-08-30 14:34:30 -04005805 if (!ASIC_IS_NODCE(rdev)) {
5806 hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
5807 hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
5808 hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
5809 hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
5810 hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
5811 hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
5812 }
Alex Deucher25a857f2012-03-20 17:18:22 -04005813
Alex Deucher8c5fd7e2012-12-04 15:28:18 -05005814 dma_cntl = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET) & ~TRAP_ENABLE;
5815 dma_cntl1 = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET) & ~TRAP_ENABLE;
5816
Alex Deuchera9e61412013-06-25 17:56:16 -04005817 thermal_int = RREG32(CG_THERMAL_INT) &
5818 ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
5819
Alex Deucher25a857f2012-03-20 17:18:22 -04005820 /* enable CP interrupts on all rings */
Christian Koenig736fc372012-05-17 19:52:00 +02005821 if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
Alex Deucher25a857f2012-03-20 17:18:22 -04005822 DRM_DEBUG("si_irq_set: sw int gfx\n");
5823 cp_int_cntl |= TIME_STAMP_INT_ENABLE;
5824 }
Christian Koenig736fc372012-05-17 19:52:00 +02005825 if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP1_INDEX])) {
Alex Deucher25a857f2012-03-20 17:18:22 -04005826 DRM_DEBUG("si_irq_set: sw int cp1\n");
5827 cp_int_cntl1 |= TIME_STAMP_INT_ENABLE;
5828 }
Christian Koenig736fc372012-05-17 19:52:00 +02005829 if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP2_INDEX])) {
Alex Deucher25a857f2012-03-20 17:18:22 -04005830 DRM_DEBUG("si_irq_set: sw int cp2\n");
5831 cp_int_cntl2 |= TIME_STAMP_INT_ENABLE;
5832 }
Alex Deucher8c5fd7e2012-12-04 15:28:18 -05005833 if (atomic_read(&rdev->irq.ring_int[R600_RING_TYPE_DMA_INDEX])) {
5834 DRM_DEBUG("si_irq_set: sw int dma\n");
5835 dma_cntl |= TRAP_ENABLE;
5836 }
5837
5838 if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_DMA1_INDEX])) {
5839 DRM_DEBUG("si_irq_set: sw int dma1\n");
5840 dma_cntl1 |= TRAP_ENABLE;
5841 }
Alex Deucher25a857f2012-03-20 17:18:22 -04005842 if (rdev->irq.crtc_vblank_int[0] ||
Christian Koenig736fc372012-05-17 19:52:00 +02005843 atomic_read(&rdev->irq.pflip[0])) {
Alex Deucher25a857f2012-03-20 17:18:22 -04005844 DRM_DEBUG("si_irq_set: vblank 0\n");
5845 crtc1 |= VBLANK_INT_MASK;
5846 }
5847 if (rdev->irq.crtc_vblank_int[1] ||
Christian Koenig736fc372012-05-17 19:52:00 +02005848 atomic_read(&rdev->irq.pflip[1])) {
Alex Deucher25a857f2012-03-20 17:18:22 -04005849 DRM_DEBUG("si_irq_set: vblank 1\n");
5850 crtc2 |= VBLANK_INT_MASK;
5851 }
5852 if (rdev->irq.crtc_vblank_int[2] ||
Christian Koenig736fc372012-05-17 19:52:00 +02005853 atomic_read(&rdev->irq.pflip[2])) {
Alex Deucher25a857f2012-03-20 17:18:22 -04005854 DRM_DEBUG("si_irq_set: vblank 2\n");
5855 crtc3 |= VBLANK_INT_MASK;
5856 }
5857 if (rdev->irq.crtc_vblank_int[3] ||
Christian Koenig736fc372012-05-17 19:52:00 +02005858 atomic_read(&rdev->irq.pflip[3])) {
Alex Deucher25a857f2012-03-20 17:18:22 -04005859 DRM_DEBUG("si_irq_set: vblank 3\n");
5860 crtc4 |= VBLANK_INT_MASK;
5861 }
5862 if (rdev->irq.crtc_vblank_int[4] ||
Christian Koenig736fc372012-05-17 19:52:00 +02005863 atomic_read(&rdev->irq.pflip[4])) {
Alex Deucher25a857f2012-03-20 17:18:22 -04005864 DRM_DEBUG("si_irq_set: vblank 4\n");
5865 crtc5 |= VBLANK_INT_MASK;
5866 }
5867 if (rdev->irq.crtc_vblank_int[5] ||
Christian Koenig736fc372012-05-17 19:52:00 +02005868 atomic_read(&rdev->irq.pflip[5])) {
Alex Deucher25a857f2012-03-20 17:18:22 -04005869 DRM_DEBUG("si_irq_set: vblank 5\n");
5870 crtc6 |= VBLANK_INT_MASK;
5871 }
5872 if (rdev->irq.hpd[0]) {
5873 DRM_DEBUG("si_irq_set: hpd 1\n");
5874 hpd1 |= DC_HPDx_INT_EN;
5875 }
5876 if (rdev->irq.hpd[1]) {
5877 DRM_DEBUG("si_irq_set: hpd 2\n");
5878 hpd2 |= DC_HPDx_INT_EN;
5879 }
5880 if (rdev->irq.hpd[2]) {
5881 DRM_DEBUG("si_irq_set: hpd 3\n");
5882 hpd3 |= DC_HPDx_INT_EN;
5883 }
5884 if (rdev->irq.hpd[3]) {
5885 DRM_DEBUG("si_irq_set: hpd 4\n");
5886 hpd4 |= DC_HPDx_INT_EN;
5887 }
5888 if (rdev->irq.hpd[4]) {
5889 DRM_DEBUG("si_irq_set: hpd 5\n");
5890 hpd5 |= DC_HPDx_INT_EN;
5891 }
5892 if (rdev->irq.hpd[5]) {
5893 DRM_DEBUG("si_irq_set: hpd 6\n");
5894 hpd6 |= DC_HPDx_INT_EN;
5895 }
Alex Deucher25a857f2012-03-20 17:18:22 -04005896
5897 WREG32(CP_INT_CNTL_RING0, cp_int_cntl);
5898 WREG32(CP_INT_CNTL_RING1, cp_int_cntl1);
5899 WREG32(CP_INT_CNTL_RING2, cp_int_cntl2);
5900
Alex Deucher8c5fd7e2012-12-04 15:28:18 -05005901 WREG32(DMA_CNTL + DMA0_REGISTER_OFFSET, dma_cntl);
5902 WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, dma_cntl1);
5903
Alex Deucher25a857f2012-03-20 17:18:22 -04005904 WREG32(GRBM_INT_CNTL, grbm_int_cntl);
5905
Alex Deuchera9e61412013-06-25 17:56:16 -04005906 if (rdev->irq.dpm_thermal) {
5907 DRM_DEBUG("dpm thermal\n");
5908 thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW;
5909 }
5910
Alex Deucher51535502012-08-30 14:34:30 -04005911 if (rdev->num_crtc >= 2) {
5912 WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1);
5913 WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, crtc2);
5914 }
Alex Deucher25a857f2012-03-20 17:18:22 -04005915 if (rdev->num_crtc >= 4) {
5916 WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, crtc3);
5917 WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, crtc4);
5918 }
5919 if (rdev->num_crtc >= 6) {
5920 WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, crtc5);
5921 WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6);
5922 }
5923
Alex Deucher51535502012-08-30 14:34:30 -04005924 if (rdev->num_crtc >= 2) {
5925 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, grph1);
5926 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, grph2);
5927 }
Alex Deucher25a857f2012-03-20 17:18:22 -04005928 if (rdev->num_crtc >= 4) {
5929 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, grph3);
5930 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, grph4);
5931 }
5932 if (rdev->num_crtc >= 6) {
5933 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, grph5);
5934 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, grph6);
5935 }
5936
Alex Deucher51535502012-08-30 14:34:30 -04005937 if (!ASIC_IS_NODCE(rdev)) {
5938 WREG32(DC_HPD1_INT_CONTROL, hpd1);
5939 WREG32(DC_HPD2_INT_CONTROL, hpd2);
5940 WREG32(DC_HPD3_INT_CONTROL, hpd3);
5941 WREG32(DC_HPD4_INT_CONTROL, hpd4);
5942 WREG32(DC_HPD5_INT_CONTROL, hpd5);
5943 WREG32(DC_HPD6_INT_CONTROL, hpd6);
5944 }
Alex Deucher25a857f2012-03-20 17:18:22 -04005945
Alex Deuchera9e61412013-06-25 17:56:16 -04005946 WREG32(CG_THERMAL_INT, thermal_int);
5947
Alex Deucher25a857f2012-03-20 17:18:22 -04005948 return 0;
5949}
5950
5951static inline void si_irq_ack(struct radeon_device *rdev)
5952{
5953 u32 tmp;
5954
Alex Deucher51535502012-08-30 14:34:30 -04005955 if (ASIC_IS_NODCE(rdev))
5956 return;
5957
Alex Deucher25a857f2012-03-20 17:18:22 -04005958 rdev->irq.stat_regs.evergreen.disp_int = RREG32(DISP_INTERRUPT_STATUS);
5959 rdev->irq.stat_regs.evergreen.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
5960 rdev->irq.stat_regs.evergreen.disp_int_cont2 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE2);
5961 rdev->irq.stat_regs.evergreen.disp_int_cont3 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE3);
5962 rdev->irq.stat_regs.evergreen.disp_int_cont4 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE4);
5963 rdev->irq.stat_regs.evergreen.disp_int_cont5 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE5);
5964 rdev->irq.stat_regs.evergreen.d1grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET);
5965 rdev->irq.stat_regs.evergreen.d2grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET);
5966 if (rdev->num_crtc >= 4) {
5967 rdev->irq.stat_regs.evergreen.d3grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET);
5968 rdev->irq.stat_regs.evergreen.d4grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET);
5969 }
5970 if (rdev->num_crtc >= 6) {
5971 rdev->irq.stat_regs.evergreen.d5grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET);
5972 rdev->irq.stat_regs.evergreen.d6grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET);
5973 }
5974
5975 if (rdev->irq.stat_regs.evergreen.d1grph_int & GRPH_PFLIP_INT_OCCURRED)
5976 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
5977 if (rdev->irq.stat_regs.evergreen.d2grph_int & GRPH_PFLIP_INT_OCCURRED)
5978 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
5979 if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT)
5980 WREG32(VBLANK_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VBLANK_ACK);
5981 if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT)
5982 WREG32(VLINE_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VLINE_ACK);
5983 if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT)
5984 WREG32(VBLANK_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VBLANK_ACK);
5985 if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT)
5986 WREG32(VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VLINE_ACK);
5987
5988 if (rdev->num_crtc >= 4) {
5989 if (rdev->irq.stat_regs.evergreen.d3grph_int & GRPH_PFLIP_INT_OCCURRED)
5990 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
5991 if (rdev->irq.stat_regs.evergreen.d4grph_int & GRPH_PFLIP_INT_OCCURRED)
5992 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
5993 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT)
5994 WREG32(VBLANK_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VBLANK_ACK);
5995 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT)
5996 WREG32(VLINE_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VLINE_ACK);
5997 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT)
5998 WREG32(VBLANK_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VBLANK_ACK);
5999 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT)
6000 WREG32(VLINE_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VLINE_ACK);
6001 }
6002
6003 if (rdev->num_crtc >= 6) {
6004 if (rdev->irq.stat_regs.evergreen.d5grph_int & GRPH_PFLIP_INT_OCCURRED)
6005 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
6006 if (rdev->irq.stat_regs.evergreen.d6grph_int & GRPH_PFLIP_INT_OCCURRED)
6007 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
6008 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT)
6009 WREG32(VBLANK_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VBLANK_ACK);
6010 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT)
6011 WREG32(VLINE_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VLINE_ACK);
6012 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT)
6013 WREG32(VBLANK_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VBLANK_ACK);
6014 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT)
6015 WREG32(VLINE_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VLINE_ACK);
6016 }
6017
6018 if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) {
6019 tmp = RREG32(DC_HPD1_INT_CONTROL);
6020 tmp |= DC_HPDx_INT_ACK;
6021 WREG32(DC_HPD1_INT_CONTROL, tmp);
6022 }
6023 if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) {
6024 tmp = RREG32(DC_HPD2_INT_CONTROL);
6025 tmp |= DC_HPDx_INT_ACK;
6026 WREG32(DC_HPD2_INT_CONTROL, tmp);
6027 }
6028 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) {
6029 tmp = RREG32(DC_HPD3_INT_CONTROL);
6030 tmp |= DC_HPDx_INT_ACK;
6031 WREG32(DC_HPD3_INT_CONTROL, tmp);
6032 }
6033 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) {
6034 tmp = RREG32(DC_HPD4_INT_CONTROL);
6035 tmp |= DC_HPDx_INT_ACK;
6036 WREG32(DC_HPD4_INT_CONTROL, tmp);
6037 }
6038 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) {
6039 tmp = RREG32(DC_HPD5_INT_CONTROL);
6040 tmp |= DC_HPDx_INT_ACK;
6041 WREG32(DC_HPD5_INT_CONTROL, tmp);
6042 }
6043 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
6044 tmp = RREG32(DC_HPD5_INT_CONTROL);
6045 tmp |= DC_HPDx_INT_ACK;
6046 WREG32(DC_HPD6_INT_CONTROL, tmp);
6047 }
6048}
6049
6050static void si_irq_disable(struct radeon_device *rdev)
6051{
6052 si_disable_interrupts(rdev);
6053 /* Wait and acknowledge irq */
6054 mdelay(1);
6055 si_irq_ack(rdev);
6056 si_disable_interrupt_state(rdev);
6057}
6058
6059static void si_irq_suspend(struct radeon_device *rdev)
6060{
6061 si_irq_disable(rdev);
6062 si_rlc_stop(rdev);
6063}
6064
Alex Deucher9b136d52012-03-20 17:18:23 -04006065static void si_irq_fini(struct radeon_device *rdev)
6066{
6067 si_irq_suspend(rdev);
6068 r600_ih_ring_fini(rdev);
6069}
6070
Alex Deucher25a857f2012-03-20 17:18:22 -04006071static inline u32 si_get_ih_wptr(struct radeon_device *rdev)
6072{
6073 u32 wptr, tmp;
6074
6075 if (rdev->wb.enabled)
6076 wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]);
6077 else
6078 wptr = RREG32(IH_RB_WPTR);
6079
6080 if (wptr & RB_OVERFLOW) {
6081 /* When a ring buffer overflow happen start parsing interrupt
6082 * from the last not overwritten vector (wptr + 16). Hopefully
6083 * this should allow us to catchup.
6084 */
6085 dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
6086 wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
6087 rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
6088 tmp = RREG32(IH_RB_CNTL);
6089 tmp |= IH_WPTR_OVERFLOW_CLEAR;
6090 WREG32(IH_RB_CNTL, tmp);
6091 }
6092 return (wptr & rdev->ih.ptr_mask);
6093}
6094
6095/* SI IV Ring
6096 * Each IV ring entry is 128 bits:
6097 * [7:0] - interrupt source id
6098 * [31:8] - reserved
6099 * [59:32] - interrupt source data
6100 * [63:60] - reserved
6101 * [71:64] - RINGID
6102 * [79:72] - VMID
6103 * [127:80] - reserved
6104 */
6105int si_irq_process(struct radeon_device *rdev)
6106{
6107 u32 wptr;
6108 u32 rptr;
6109 u32 src_id, src_data, ring_id;
6110 u32 ring_index;
Alex Deucher25a857f2012-03-20 17:18:22 -04006111 bool queue_hotplug = false;
Alex Deuchera9e61412013-06-25 17:56:16 -04006112 bool queue_thermal = false;
Alex Deucherfbf6dc72013-06-13 18:47:58 -04006113 u32 status, addr;
Alex Deucher25a857f2012-03-20 17:18:22 -04006114
6115 if (!rdev->ih.enabled || rdev->shutdown)
6116 return IRQ_NONE;
6117
6118 wptr = si_get_ih_wptr(rdev);
Christian Koenigc20dc362012-05-16 21:45:24 +02006119
6120restart_ih:
6121 /* is somebody else already processing irqs? */
6122 if (atomic_xchg(&rdev->ih.lock, 1))
6123 return IRQ_NONE;
6124
Alex Deucher25a857f2012-03-20 17:18:22 -04006125 rptr = rdev->ih.rptr;
6126 DRM_DEBUG("si_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
6127
Alex Deucher25a857f2012-03-20 17:18:22 -04006128 /* Order reading of wptr vs. reading of IH ring data */
6129 rmb();
6130
6131 /* display interrupts */
6132 si_irq_ack(rdev);
6133
Alex Deucher25a857f2012-03-20 17:18:22 -04006134 while (rptr != wptr) {
6135 /* wptr/rptr are in bytes! */
6136 ring_index = rptr / 4;
6137 src_id = le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff;
6138 src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff;
6139 ring_id = le32_to_cpu(rdev->ih.ring[ring_index + 2]) & 0xff;
6140
6141 switch (src_id) {
6142 case 1: /* D1 vblank/vline */
6143 switch (src_data) {
6144 case 0: /* D1 vblank */
6145 if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT) {
6146 if (rdev->irq.crtc_vblank_int[0]) {
6147 drm_handle_vblank(rdev->ddev, 0);
6148 rdev->pm.vblank_sync = true;
6149 wake_up(&rdev->irq.vblank_queue);
6150 }
Christian Koenig736fc372012-05-17 19:52:00 +02006151 if (atomic_read(&rdev->irq.pflip[0]))
Alex Deucher25a857f2012-03-20 17:18:22 -04006152 radeon_crtc_handle_flip(rdev, 0);
6153 rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
6154 DRM_DEBUG("IH: D1 vblank\n");
6155 }
6156 break;
6157 case 1: /* D1 vline */
6158 if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT) {
6159 rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT;
6160 DRM_DEBUG("IH: D1 vline\n");
6161 }
6162 break;
6163 default:
6164 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
6165 break;
6166 }
6167 break;
6168 case 2: /* D2 vblank/vline */
6169 switch (src_data) {
6170 case 0: /* D2 vblank */
6171 if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT) {
6172 if (rdev->irq.crtc_vblank_int[1]) {
6173 drm_handle_vblank(rdev->ddev, 1);
6174 rdev->pm.vblank_sync = true;
6175 wake_up(&rdev->irq.vblank_queue);
6176 }
Christian Koenig736fc372012-05-17 19:52:00 +02006177 if (atomic_read(&rdev->irq.pflip[1]))
Alex Deucher25a857f2012-03-20 17:18:22 -04006178 radeon_crtc_handle_flip(rdev, 1);
6179 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
6180 DRM_DEBUG("IH: D2 vblank\n");
6181 }
6182 break;
6183 case 1: /* D2 vline */
6184 if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT) {
6185 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
6186 DRM_DEBUG("IH: D2 vline\n");
6187 }
6188 break;
6189 default:
6190 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
6191 break;
6192 }
6193 break;
6194 case 3: /* D3 vblank/vline */
6195 switch (src_data) {
6196 case 0: /* D3 vblank */
6197 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) {
6198 if (rdev->irq.crtc_vblank_int[2]) {
6199 drm_handle_vblank(rdev->ddev, 2);
6200 rdev->pm.vblank_sync = true;
6201 wake_up(&rdev->irq.vblank_queue);
6202 }
Christian Koenig736fc372012-05-17 19:52:00 +02006203 if (atomic_read(&rdev->irq.pflip[2]))
Alex Deucher25a857f2012-03-20 17:18:22 -04006204 radeon_crtc_handle_flip(rdev, 2);
6205 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
6206 DRM_DEBUG("IH: D3 vblank\n");
6207 }
6208 break;
6209 case 1: /* D3 vline */
6210 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) {
6211 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
6212 DRM_DEBUG("IH: D3 vline\n");
6213 }
6214 break;
6215 default:
6216 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
6217 break;
6218 }
6219 break;
6220 case 4: /* D4 vblank/vline */
6221 switch (src_data) {
6222 case 0: /* D4 vblank */
6223 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) {
6224 if (rdev->irq.crtc_vblank_int[3]) {
6225 drm_handle_vblank(rdev->ddev, 3);
6226 rdev->pm.vblank_sync = true;
6227 wake_up(&rdev->irq.vblank_queue);
6228 }
Christian Koenig736fc372012-05-17 19:52:00 +02006229 if (atomic_read(&rdev->irq.pflip[3]))
Alex Deucher25a857f2012-03-20 17:18:22 -04006230 radeon_crtc_handle_flip(rdev, 3);
6231 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
6232 DRM_DEBUG("IH: D4 vblank\n");
6233 }
6234 break;
6235 case 1: /* D4 vline */
6236 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) {
6237 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
6238 DRM_DEBUG("IH: D4 vline\n");
6239 }
6240 break;
6241 default:
6242 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
6243 break;
6244 }
6245 break;
6246 case 5: /* D5 vblank/vline */
6247 switch (src_data) {
6248 case 0: /* D5 vblank */
6249 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) {
6250 if (rdev->irq.crtc_vblank_int[4]) {
6251 drm_handle_vblank(rdev->ddev, 4);
6252 rdev->pm.vblank_sync = true;
6253 wake_up(&rdev->irq.vblank_queue);
6254 }
Christian Koenig736fc372012-05-17 19:52:00 +02006255 if (atomic_read(&rdev->irq.pflip[4]))
Alex Deucher25a857f2012-03-20 17:18:22 -04006256 radeon_crtc_handle_flip(rdev, 4);
6257 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
6258 DRM_DEBUG("IH: D5 vblank\n");
6259 }
6260 break;
6261 case 1: /* D5 vline */
6262 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) {
6263 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
6264 DRM_DEBUG("IH: D5 vline\n");
6265 }
6266 break;
6267 default:
6268 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
6269 break;
6270 }
6271 break;
6272 case 6: /* D6 vblank/vline */
6273 switch (src_data) {
6274 case 0: /* D6 vblank */
6275 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) {
6276 if (rdev->irq.crtc_vblank_int[5]) {
6277 drm_handle_vblank(rdev->ddev, 5);
6278 rdev->pm.vblank_sync = true;
6279 wake_up(&rdev->irq.vblank_queue);
6280 }
Christian Koenig736fc372012-05-17 19:52:00 +02006281 if (atomic_read(&rdev->irq.pflip[5]))
Alex Deucher25a857f2012-03-20 17:18:22 -04006282 radeon_crtc_handle_flip(rdev, 5);
6283 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
6284 DRM_DEBUG("IH: D6 vblank\n");
6285 }
6286 break;
6287 case 1: /* D6 vline */
6288 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) {
6289 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
6290 DRM_DEBUG("IH: D6 vline\n");
6291 }
6292 break;
6293 default:
6294 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
6295 break;
6296 }
6297 break;
6298 case 42: /* HPD hotplug */
6299 switch (src_data) {
6300 case 0:
6301 if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) {
6302 rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT;
6303 queue_hotplug = true;
6304 DRM_DEBUG("IH: HPD1\n");
6305 }
6306 break;
6307 case 1:
6308 if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) {
6309 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT;
6310 queue_hotplug = true;
6311 DRM_DEBUG("IH: HPD2\n");
6312 }
6313 break;
6314 case 2:
6315 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) {
6316 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
6317 queue_hotplug = true;
6318 DRM_DEBUG("IH: HPD3\n");
6319 }
6320 break;
6321 case 3:
6322 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) {
6323 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
6324 queue_hotplug = true;
6325 DRM_DEBUG("IH: HPD4\n");
6326 }
6327 break;
6328 case 4:
6329 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) {
6330 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
6331 queue_hotplug = true;
6332 DRM_DEBUG("IH: HPD5\n");
6333 }
6334 break;
6335 case 5:
6336 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
6337 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
6338 queue_hotplug = true;
6339 DRM_DEBUG("IH: HPD6\n");
6340 }
6341 break;
6342 default:
6343 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
6344 break;
6345 }
6346 break;
Christian Königb927e1c2014-01-30 19:01:16 +01006347 case 124: /* UVD */
6348 DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data);
6349 radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX);
6350 break;
Christian Königae133a12012-09-18 15:30:44 -04006351 case 146:
6352 case 147:
Alex Deucherfbf6dc72013-06-13 18:47:58 -04006353 addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR);
6354 status = RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS);
Christian Königae133a12012-09-18 15:30:44 -04006355 dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data);
6356 dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
Alex Deucherfbf6dc72013-06-13 18:47:58 -04006357 addr);
Christian Königae133a12012-09-18 15:30:44 -04006358 dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
Alex Deucherfbf6dc72013-06-13 18:47:58 -04006359 status);
6360 si_vm_decode_fault(rdev, status, addr);
Christian Königae133a12012-09-18 15:30:44 -04006361 /* reset addr and status */
6362 WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
6363 break;
Alex Deucher25a857f2012-03-20 17:18:22 -04006364 case 176: /* RINGID0 CP_INT */
6365 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
6366 break;
6367 case 177: /* RINGID1 CP_INT */
6368 radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
6369 break;
6370 case 178: /* RINGID2 CP_INT */
6371 radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
6372 break;
6373 case 181: /* CP EOP event */
6374 DRM_DEBUG("IH: CP EOP\n");
6375 switch (ring_id) {
6376 case 0:
6377 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
6378 break;
6379 case 1:
6380 radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
6381 break;
6382 case 2:
6383 radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
6384 break;
6385 }
6386 break;
Alex Deucher8c5fd7e2012-12-04 15:28:18 -05006387 case 224: /* DMA trap event */
6388 DRM_DEBUG("IH: DMA trap\n");
6389 radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX);
6390 break;
Alex Deuchera9e61412013-06-25 17:56:16 -04006391 case 230: /* thermal low to high */
6392 DRM_DEBUG("IH: thermal low to high\n");
6393 rdev->pm.dpm.thermal.high_to_low = false;
6394 queue_thermal = true;
6395 break;
6396 case 231: /* thermal high to low */
6397 DRM_DEBUG("IH: thermal high to low\n");
6398 rdev->pm.dpm.thermal.high_to_low = true;
6399 queue_thermal = true;
6400 break;
Alex Deucher25a857f2012-03-20 17:18:22 -04006401 case 233: /* GUI IDLE */
6402 DRM_DEBUG("IH: GUI idle\n");
Alex Deucher25a857f2012-03-20 17:18:22 -04006403 break;
Alex Deucher8c5fd7e2012-12-04 15:28:18 -05006404 case 244: /* DMA trap event */
6405 DRM_DEBUG("IH: DMA1 trap\n");
6406 radeon_fence_process(rdev, CAYMAN_RING_TYPE_DMA1_INDEX);
6407 break;
Alex Deucher25a857f2012-03-20 17:18:22 -04006408 default:
6409 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
6410 break;
6411 }
6412
6413 /* wptr/rptr are in bytes! */
6414 rptr += 16;
6415 rptr &= rdev->ih.ptr_mask;
6416 }
Alex Deucher25a857f2012-03-20 17:18:22 -04006417 if (queue_hotplug)
6418 schedule_work(&rdev->hotplug_work);
Alex Deuchera9e61412013-06-25 17:56:16 -04006419 if (queue_thermal && rdev->pm.dpm_enabled)
6420 schedule_work(&rdev->pm.dpm.thermal.work);
Alex Deucher25a857f2012-03-20 17:18:22 -04006421 rdev->ih.rptr = rptr;
6422 WREG32(IH_RB_RPTR, rdev->ih.rptr);
Christian Koenigc20dc362012-05-16 21:45:24 +02006423 atomic_set(&rdev->ih.lock, 0);
6424
6425 /* make sure wptr hasn't changed while processing */
6426 wptr = si_get_ih_wptr(rdev);
6427 if (wptr != rptr)
6428 goto restart_ih;
6429
Alex Deucher25a857f2012-03-20 17:18:22 -04006430 return IRQ_HANDLED;
6431}
6432
Alex Deucher9b136d52012-03-20 17:18:23 -04006433/*
6434 * startup/shutdown callbacks
6435 */
6436static int si_startup(struct radeon_device *rdev)
6437{
6438 struct radeon_ring *ring;
6439 int r;
6440
Alex Deucherb9d305d2013-02-14 17:16:51 -05006441 /* enable pcie gen2/3 link */
6442 si_pcie_gen3_enable(rdev);
Alex Deuchere0bcf1652013-02-15 11:56:59 -05006443 /* enable aspm */
6444 si_program_aspm(rdev);
Alex Deucherb9d305d2013-02-14 17:16:51 -05006445
Alex Deuchere5903d32013-08-30 08:58:20 -04006446 /* scratch needs to be initialized before MC */
6447 r = r600_vram_scratch_init(rdev);
6448 if (r)
6449 return r;
6450
Alex Deucher6fab3feb2013-08-04 12:13:17 -04006451 si_mc_program(rdev);
6452
Alex Deucher6c7bcce2013-12-18 14:07:14 -05006453 if (!rdev->pm.dpm_enabled) {
6454 r = si_mc_load_microcode(rdev);
6455 if (r) {
6456 DRM_ERROR("Failed to load MC firmware!\n");
6457 return r;
6458 }
Alex Deucher9b136d52012-03-20 17:18:23 -04006459 }
6460
Alex Deucher9b136d52012-03-20 17:18:23 -04006461 r = si_pcie_gart_enable(rdev);
6462 if (r)
6463 return r;
6464 si_gpu_init(rdev);
6465
Alex Deucher9b136d52012-03-20 17:18:23 -04006466 /* allocate rlc buffers */
Alex Deucher1fd11772013-04-17 17:53:50 -04006467 if (rdev->family == CHIP_VERDE) {
6468 rdev->rlc.reg_list = verde_rlc_save_restore_register_list;
6469 rdev->rlc.reg_list_size =
6470 (u32)ARRAY_SIZE(verde_rlc_save_restore_register_list);
6471 }
6472 rdev->rlc.cs_data = si_cs_data;
6473 r = sumo_rlc_init(rdev);
Alex Deucher9b136d52012-03-20 17:18:23 -04006474 if (r) {
6475 DRM_ERROR("Failed to init rlc BOs!\n");
6476 return r;
6477 }
6478
6479 /* allocate wb buffer */
6480 r = radeon_wb_init(rdev);
6481 if (r)
6482 return r;
6483
6484 r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
6485 if (r) {
6486 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
6487 return r;
6488 }
6489
6490 r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
6491 if (r) {
6492 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
6493 return r;
6494 }
6495
6496 r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
6497 if (r) {
6498 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
6499 return r;
6500 }
6501
Alex Deucher8c5fd7e2012-12-04 15:28:18 -05006502 r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
6503 if (r) {
6504 dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
6505 return r;
6506 }
6507
6508 r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_DMA1_INDEX);
6509 if (r) {
6510 dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
6511 return r;
6512 }
6513
Alex Deucher1df0d522013-04-26 18:03:44 -04006514 if (rdev->has_uvd) {
Christian Könige409b122013-08-13 11:56:53 +02006515 r = uvd_v2_2_resume(rdev);
Alex Deucher1df0d522013-04-26 18:03:44 -04006516 if (!r) {
6517 r = radeon_fence_driver_start_ring(rdev,
6518 R600_RING_TYPE_UVD_INDEX);
6519 if (r)
6520 dev_err(rdev->dev, "UVD fences init error (%d).\n", r);
6521 }
Christian Königf2ba57b2013-04-08 12:41:29 +02006522 if (r)
Alex Deucher1df0d522013-04-26 18:03:44 -04006523 rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
Christian Königf2ba57b2013-04-08 12:41:29 +02006524 }
Christian Königf2ba57b2013-04-08 12:41:29 +02006525
Alex Deucher9b136d52012-03-20 17:18:23 -04006526 /* Enable IRQ */
Adis Hamziće49f3952013-06-02 16:47:54 +02006527 if (!rdev->irq.installed) {
6528 r = radeon_irq_kms_init(rdev);
6529 if (r)
6530 return r;
6531 }
6532
Alex Deucher9b136d52012-03-20 17:18:23 -04006533 r = si_irq_init(rdev);
6534 if (r) {
6535 DRM_ERROR("radeon: IH init failed (%d).\n", r);
6536 radeon_irq_kms_fini(rdev);
6537 return r;
6538 }
6539 si_irq_set(rdev);
6540
6541 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
6542 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
Christian König2e1e6da2013-08-13 11:56:52 +02006543 RADEON_CP_PACKET2);
Alex Deucher9b136d52012-03-20 17:18:23 -04006544 if (r)
6545 return r;
6546
6547 ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
6548 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP1_RPTR_OFFSET,
Christian König2e1e6da2013-08-13 11:56:52 +02006549 RADEON_CP_PACKET2);
Alex Deucher9b136d52012-03-20 17:18:23 -04006550 if (r)
6551 return r;
6552
6553 ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
6554 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP2_RPTR_OFFSET,
Christian König2e1e6da2013-08-13 11:56:52 +02006555 RADEON_CP_PACKET2);
Alex Deucher9b136d52012-03-20 17:18:23 -04006556 if (r)
6557 return r;
6558
Alex Deucher8c5fd7e2012-12-04 15:28:18 -05006559 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
6560 r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
Christian König2e1e6da2013-08-13 11:56:52 +02006561 DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0));
Alex Deucher8c5fd7e2012-12-04 15:28:18 -05006562 if (r)
6563 return r;
6564
6565 ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
6566 r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET,
Christian König2e1e6da2013-08-13 11:56:52 +02006567 DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0));
Alex Deucher8c5fd7e2012-12-04 15:28:18 -05006568 if (r)
6569 return r;
6570
Alex Deucher9b136d52012-03-20 17:18:23 -04006571 r = si_cp_load_microcode(rdev);
6572 if (r)
6573 return r;
6574 r = si_cp_resume(rdev);
6575 if (r)
6576 return r;
6577
Alex Deucher8c5fd7e2012-12-04 15:28:18 -05006578 r = cayman_dma_resume(rdev);
6579 if (r)
6580 return r;
6581
Alex Deucher1df0d522013-04-26 18:03:44 -04006582 if (rdev->has_uvd) {
6583 ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
6584 if (ring->ring_size) {
Christian König02c9f7f2013-08-13 11:56:51 +02006585 r = radeon_ring_init(rdev, ring, ring->ring_size, 0,
Christian König2e1e6da2013-08-13 11:56:52 +02006586 RADEON_CP_PACKET2);
Alex Deucher1df0d522013-04-26 18:03:44 -04006587 if (!r)
Christian Könige409b122013-08-13 11:56:53 +02006588 r = uvd_v1_0_init(rdev);
Alex Deucher1df0d522013-04-26 18:03:44 -04006589 if (r)
6590 DRM_ERROR("radeon: failed initializing UVD (%d).\n", r);
6591 }
Christian Königf2ba57b2013-04-08 12:41:29 +02006592 }
6593
Christian König2898c342012-07-05 11:55:34 +02006594 r = radeon_ib_pool_init(rdev);
6595 if (r) {
6596 dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
Alex Deucher9b136d52012-03-20 17:18:23 -04006597 return r;
Christian König2898c342012-07-05 11:55:34 +02006598 }
Alex Deucher9b136d52012-03-20 17:18:23 -04006599
Christian Königc6105f22012-07-05 14:32:00 +02006600 r = radeon_vm_manager_init(rdev);
6601 if (r) {
6602 dev_err(rdev->dev, "vm manager initialization failed (%d).\n", r);
Alex Deucher9b136d52012-03-20 17:18:23 -04006603 return r;
Christian Königc6105f22012-07-05 14:32:00 +02006604 }
Alex Deucher9b136d52012-03-20 17:18:23 -04006605
Alex Deucherb5306022013-07-31 16:51:33 -04006606 r = dce6_audio_init(rdev);
6607 if (r)
6608 return r;
6609
Alex Deucher9b136d52012-03-20 17:18:23 -04006610 return 0;
6611}
6612
6613int si_resume(struct radeon_device *rdev)
6614{
6615 int r;
6616
6617 /* Do not reset GPU before posting, on rv770 hw unlike on r500 hw,
6618 * posting will perform necessary task to bring back GPU into good
6619 * shape.
6620 */
6621 /* post card */
6622 atom_asic_init(rdev->mode_info.atom_context);
6623
Alex Deucher205996c2013-03-01 17:08:42 -05006624 /* init golden registers */
6625 si_init_golden_registers(rdev);
6626
Alex Deucherbc6a6292014-02-25 12:01:28 -05006627 if (rdev->pm.pm_method == PM_METHOD_DPM)
6628 radeon_pm_resume(rdev);
Alex Deucher6c7bcce2013-12-18 14:07:14 -05006629
Alex Deucher9b136d52012-03-20 17:18:23 -04006630 rdev->accel_working = true;
6631 r = si_startup(rdev);
6632 if (r) {
6633 DRM_ERROR("si startup failed on resume\n");
6634 rdev->accel_working = false;
6635 return r;
6636 }
6637
6638 return r;
6639
6640}
6641
6642int si_suspend(struct radeon_device *rdev)
6643{
Alex Deucher6c7bcce2013-12-18 14:07:14 -05006644 radeon_pm_suspend(rdev);
Alex Deucherb5306022013-07-31 16:51:33 -04006645 dce6_audio_fini(rdev);
Alex Deucherfa3daf92013-03-11 15:32:26 -04006646 radeon_vm_manager_fini(rdev);
Alex Deucher9b136d52012-03-20 17:18:23 -04006647 si_cp_enable(rdev, false);
Alex Deucher8c5fd7e2012-12-04 15:28:18 -05006648 cayman_dma_stop(rdev);
Alex Deucher1df0d522013-04-26 18:03:44 -04006649 if (rdev->has_uvd) {
Christian Könige409b122013-08-13 11:56:53 +02006650 uvd_v1_0_fini(rdev);
Alex Deucher1df0d522013-04-26 18:03:44 -04006651 radeon_uvd_suspend(rdev);
6652 }
Alex Deuchere16866e2013-08-08 19:34:07 -04006653 si_fini_pg(rdev);
6654 si_fini_cg(rdev);
Alex Deucher9b136d52012-03-20 17:18:23 -04006655 si_irq_suspend(rdev);
6656 radeon_wb_disable(rdev);
6657 si_pcie_gart_disable(rdev);
6658 return 0;
6659}
6660
6661/* Plan is to move initialization in that function and use
6662 * helper function so that radeon_device_init pretty much
6663 * do nothing more than calling asic specific function. This
6664 * should also allow to remove a bunch of callback function
6665 * like vram_info.
6666 */
6667int si_init(struct radeon_device *rdev)
6668{
6669 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
6670 int r;
6671
Alex Deucher9b136d52012-03-20 17:18:23 -04006672 /* Read BIOS */
6673 if (!radeon_get_bios(rdev)) {
6674 if (ASIC_IS_AVIVO(rdev))
6675 return -EINVAL;
6676 }
6677 /* Must be an ATOMBIOS */
6678 if (!rdev->is_atom_bios) {
6679 dev_err(rdev->dev, "Expecting atombios for cayman GPU\n");
6680 return -EINVAL;
6681 }
6682 r = radeon_atombios_init(rdev);
6683 if (r)
6684 return r;
6685
6686 /* Post card if necessary */
6687 if (!radeon_card_posted(rdev)) {
6688 if (!rdev->bios) {
6689 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
6690 return -EINVAL;
6691 }
6692 DRM_INFO("GPU not posted. posting now...\n");
6693 atom_asic_init(rdev->mode_info.atom_context);
6694 }
Alex Deucher205996c2013-03-01 17:08:42 -05006695 /* init golden registers */
6696 si_init_golden_registers(rdev);
Alex Deucher9b136d52012-03-20 17:18:23 -04006697 /* Initialize scratch registers */
6698 si_scratch_init(rdev);
6699 /* Initialize surface registers */
6700 radeon_surface_init(rdev);
6701 /* Initialize clocks */
6702 radeon_get_clock_info(rdev->ddev);
6703
6704 /* Fence driver */
6705 r = radeon_fence_driver_init(rdev);
6706 if (r)
6707 return r;
6708
6709 /* initialize memory controller */
6710 r = si_mc_init(rdev);
6711 if (r)
6712 return r;
6713 /* Memory manager */
6714 r = radeon_bo_init(rdev);
6715 if (r)
6716 return r;
6717
Alex Deucher01ac8792013-12-18 19:11:27 -05006718 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw ||
6719 !rdev->rlc_fw || !rdev->mc_fw) {
6720 r = si_init_microcode(rdev);
6721 if (r) {
6722 DRM_ERROR("Failed to load firmware!\n");
6723 return r;
6724 }
6725 }
6726
Alex Deucher6c7bcce2013-12-18 14:07:14 -05006727 /* Initialize power management */
6728 radeon_pm_init(rdev);
6729
Alex Deucher9b136d52012-03-20 17:18:23 -04006730 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
6731 ring->ring_obj = NULL;
6732 r600_ring_init(rdev, ring, 1024 * 1024);
6733
6734 ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
6735 ring->ring_obj = NULL;
6736 r600_ring_init(rdev, ring, 1024 * 1024);
6737
6738 ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
6739 ring->ring_obj = NULL;
6740 r600_ring_init(rdev, ring, 1024 * 1024);
6741
Alex Deucher8c5fd7e2012-12-04 15:28:18 -05006742 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
6743 ring->ring_obj = NULL;
6744 r600_ring_init(rdev, ring, 64 * 1024);
6745
6746 ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
6747 ring->ring_obj = NULL;
6748 r600_ring_init(rdev, ring, 64 * 1024);
6749
Alex Deucher1df0d522013-04-26 18:03:44 -04006750 if (rdev->has_uvd) {
6751 r = radeon_uvd_init(rdev);
6752 if (!r) {
6753 ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
6754 ring->ring_obj = NULL;
6755 r600_ring_init(rdev, ring, 4096);
6756 }
Christian Königf2ba57b2013-04-08 12:41:29 +02006757 }
6758
Alex Deucher9b136d52012-03-20 17:18:23 -04006759 rdev->ih.ring_obj = NULL;
6760 r600_ih_ring_init(rdev, 64 * 1024);
6761
6762 r = r600_pcie_gart_init(rdev);
6763 if (r)
6764 return r;
6765
Alex Deucher9b136d52012-03-20 17:18:23 -04006766 rdev->accel_working = true;
Alex Deucher9b136d52012-03-20 17:18:23 -04006767 r = si_startup(rdev);
6768 if (r) {
6769 dev_err(rdev->dev, "disabling GPU acceleration\n");
6770 si_cp_fini(rdev);
Alex Deucher8c5fd7e2012-12-04 15:28:18 -05006771 cayman_dma_fini(rdev);
Alex Deucher9b136d52012-03-20 17:18:23 -04006772 si_irq_fini(rdev);
Alex Deucher1fd11772013-04-17 17:53:50 -04006773 sumo_rlc_fini(rdev);
Alex Deucher9b136d52012-03-20 17:18:23 -04006774 radeon_wb_fini(rdev);
Christian König2898c342012-07-05 11:55:34 +02006775 radeon_ib_pool_fini(rdev);
Alex Deucher9b136d52012-03-20 17:18:23 -04006776 radeon_vm_manager_fini(rdev);
6777 radeon_irq_kms_fini(rdev);
6778 si_pcie_gart_fini(rdev);
6779 rdev->accel_working = false;
6780 }
6781
6782 /* Don't start up if the MC ucode is missing.
6783 * The default clocks and voltages before the MC ucode
6784 * is loaded are not suffient for advanced operations.
6785 */
6786 if (!rdev->mc_fw) {
6787 DRM_ERROR("radeon: MC ucode required for NI+.\n");
6788 return -EINVAL;
6789 }
6790
6791 return 0;
6792}
6793
6794void si_fini(struct radeon_device *rdev)
6795{
Alex Deucher6c7bcce2013-12-18 14:07:14 -05006796 radeon_pm_fini(rdev);
Alex Deucher9b136d52012-03-20 17:18:23 -04006797 si_cp_fini(rdev);
Alex Deucher8c5fd7e2012-12-04 15:28:18 -05006798 cayman_dma_fini(rdev);
Alex Deucherf8f84ac2013-03-07 12:56:35 -05006799 si_fini_pg(rdev);
Alex Deuchere16866e2013-08-08 19:34:07 -04006800 si_fini_cg(rdev);
Alex Deuchere0bcf1652013-02-15 11:56:59 -05006801 si_irq_fini(rdev);
Alex Deucher1fd11772013-04-17 17:53:50 -04006802 sumo_rlc_fini(rdev);
Alex Deucher9b136d52012-03-20 17:18:23 -04006803 radeon_wb_fini(rdev);
6804 radeon_vm_manager_fini(rdev);
Christian König2898c342012-07-05 11:55:34 +02006805 radeon_ib_pool_fini(rdev);
Alex Deucher9b136d52012-03-20 17:18:23 -04006806 radeon_irq_kms_fini(rdev);
Christian König2858c002013-08-01 17:34:07 +02006807 if (rdev->has_uvd) {
Christian Könige409b122013-08-13 11:56:53 +02006808 uvd_v1_0_fini(rdev);
Alex Deucher1df0d522013-04-26 18:03:44 -04006809 radeon_uvd_fini(rdev);
Christian König2858c002013-08-01 17:34:07 +02006810 }
Alex Deucher9b136d52012-03-20 17:18:23 -04006811 si_pcie_gart_fini(rdev);
6812 r600_vram_scratch_fini(rdev);
6813 radeon_gem_fini(rdev);
Alex Deucher9b136d52012-03-20 17:18:23 -04006814 radeon_fence_driver_fini(rdev);
6815 radeon_bo_fini(rdev);
6816 radeon_atombios_fini(rdev);
6817 kfree(rdev->bios);
6818 rdev->bios = NULL;
6819}
6820
Marek Olšák6759a0a2012-08-09 16:34:17 +02006821/**
Alex Deucherd0418892013-01-24 10:35:23 -05006822 * si_get_gpu_clock_counter - return GPU clock counter snapshot
Marek Olšák6759a0a2012-08-09 16:34:17 +02006823 *
6824 * @rdev: radeon_device pointer
6825 *
6826 * Fetches a GPU clock counter snapshot (SI).
6827 * Returns the 64 bit clock counter snapshot.
6828 */
Alex Deucherd0418892013-01-24 10:35:23 -05006829uint64_t si_get_gpu_clock_counter(struct radeon_device *rdev)
Marek Olšák6759a0a2012-08-09 16:34:17 +02006830{
6831 uint64_t clock;
6832
6833 mutex_lock(&rdev->gpu_clock_mutex);
6834 WREG32(RLC_CAPTURE_GPU_CLOCK_COUNT, 1);
6835 clock = (uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_LSB) |
6836 ((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
6837 mutex_unlock(&rdev->gpu_clock_mutex);
6838 return clock;
6839}
Christian König2539eb02013-04-08 12:41:34 +02006840
Christian König2539eb02013-04-08 12:41:34 +02006841int si_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
6842{
Christian Königfacd1122013-04-29 11:55:02 +02006843 unsigned fb_div = 0, vclk_div = 0, dclk_div = 0;
Christian König2539eb02013-04-08 12:41:34 +02006844 int r;
6845
Christian König4ed10832013-04-18 15:25:58 +02006846 /* bypass vclk and dclk with bclk */
6847 WREG32_P(CG_UPLL_FUNC_CNTL_2,
6848 VCLK_SRC_SEL(1) | DCLK_SRC_SEL(1),
6849 ~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK));
6850
6851 /* put PLL in bypass mode */
6852 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_BYPASS_EN_MASK, ~UPLL_BYPASS_EN_MASK);
6853
6854 if (!vclk || !dclk) {
6855 /* keep the Bypass mode, put PLL to sleep */
6856 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK);
6857 return 0;
6858 }
6859
Christian Königfacd1122013-04-29 11:55:02 +02006860 r = radeon_uvd_calc_upll_dividers(rdev, vclk, dclk, 125000, 250000,
6861 16384, 0x03FFFFFF, 0, 128, 5,
6862 &fb_div, &vclk_div, &dclk_div);
6863 if (r)
6864 return r;
Christian König2539eb02013-04-08 12:41:34 +02006865
6866 /* set RESET_ANTI_MUX to 0 */
6867 WREG32_P(CG_UPLL_FUNC_CNTL_5, 0, ~RESET_ANTI_MUX_MASK);
6868
6869 /* set VCO_MODE to 1 */
6870 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_VCO_MODE_MASK, ~UPLL_VCO_MODE_MASK);
6871
6872 /* toggle UPLL_SLEEP to 1 then back to 0 */
6873 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK);
6874 WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_SLEEP_MASK);
6875
6876 /* deassert UPLL_RESET */
6877 WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_RESET_MASK);
6878
6879 mdelay(1);
6880
Christian Königfacd1122013-04-29 11:55:02 +02006881 r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL);
Christian König2539eb02013-04-08 12:41:34 +02006882 if (r)
6883 return r;
6884
6885 /* assert UPLL_RESET again */
6886 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_RESET_MASK, ~UPLL_RESET_MASK);
6887
6888 /* disable spread spectrum. */
6889 WREG32_P(CG_UPLL_SPREAD_SPECTRUM, 0, ~SSEN_MASK);
6890
6891 /* set feedback divider */
Christian Königfacd1122013-04-29 11:55:02 +02006892 WREG32_P(CG_UPLL_FUNC_CNTL_3, UPLL_FB_DIV(fb_div), ~UPLL_FB_DIV_MASK);
Christian König2539eb02013-04-08 12:41:34 +02006893
6894 /* set ref divider to 0 */
6895 WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_REF_DIV_MASK);
6896
Christian Königfacd1122013-04-29 11:55:02 +02006897 if (fb_div < 307200)
Christian König2539eb02013-04-08 12:41:34 +02006898 WREG32_P(CG_UPLL_FUNC_CNTL_4, 0, ~UPLL_SPARE_ISPARE9);
6899 else
6900 WREG32_P(CG_UPLL_FUNC_CNTL_4, UPLL_SPARE_ISPARE9, ~UPLL_SPARE_ISPARE9);
6901
6902 /* set PDIV_A and PDIV_B */
6903 WREG32_P(CG_UPLL_FUNC_CNTL_2,
Christian Königfacd1122013-04-29 11:55:02 +02006904 UPLL_PDIV_A(vclk_div) | UPLL_PDIV_B(dclk_div),
Christian König2539eb02013-04-08 12:41:34 +02006905 ~(UPLL_PDIV_A_MASK | UPLL_PDIV_B_MASK));
6906
6907 /* give the PLL some time to settle */
6908 mdelay(15);
6909
6910 /* deassert PLL_RESET */
6911 WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_RESET_MASK);
6912
6913 mdelay(15);
6914
6915 /* switch from bypass mode to normal mode */
6916 WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_BYPASS_EN_MASK);
6917
Christian Königfacd1122013-04-29 11:55:02 +02006918 r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL);
Christian König2539eb02013-04-08 12:41:34 +02006919 if (r)
6920 return r;
6921
6922 /* switch VCLK and DCLK selection */
6923 WREG32_P(CG_UPLL_FUNC_CNTL_2,
6924 VCLK_SRC_SEL(2) | DCLK_SRC_SEL(2),
6925 ~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK));
6926
6927 mdelay(100);
6928
6929 return 0;
6930}
Alex Deucherb9d305d2013-02-14 17:16:51 -05006931
6932static void si_pcie_gen3_enable(struct radeon_device *rdev)
6933{
6934 struct pci_dev *root = rdev->pdev->bus->self;
6935 int bridge_pos, gpu_pos;
6936 u32 speed_cntl, mask, current_data_rate;
6937 int ret, i;
6938 u16 tmp16;
6939
6940 if (radeon_pcie_gen2 == 0)
6941 return;
6942
6943 if (rdev->flags & RADEON_IS_IGP)
6944 return;
6945
6946 if (!(rdev->flags & RADEON_IS_PCIE))
6947 return;
6948
6949 ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask);
6950 if (ret != 0)
6951 return;
6952
6953 if (!(mask & (DRM_PCIE_SPEED_50 | DRM_PCIE_SPEED_80)))
6954 return;
6955
6956 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
6957 current_data_rate = (speed_cntl & LC_CURRENT_DATA_RATE_MASK) >>
6958 LC_CURRENT_DATA_RATE_SHIFT;
6959 if (mask & DRM_PCIE_SPEED_80) {
6960 if (current_data_rate == 2) {
6961 DRM_INFO("PCIE gen 3 link speeds already enabled\n");
6962 return;
6963 }
6964 DRM_INFO("enabling PCIE gen 3 link speeds, disable with radeon.pcie_gen2=0\n");
6965 } else if (mask & DRM_PCIE_SPEED_50) {
6966 if (current_data_rate == 1) {
6967 DRM_INFO("PCIE gen 2 link speeds already enabled\n");
6968 return;
6969 }
6970 DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
6971 }
6972
6973 bridge_pos = pci_pcie_cap(root);
6974 if (!bridge_pos)
6975 return;
6976
6977 gpu_pos = pci_pcie_cap(rdev->pdev);
6978 if (!gpu_pos)
6979 return;
6980
6981 if (mask & DRM_PCIE_SPEED_80) {
6982 /* re-try equalization if gen3 is not already enabled */
6983 if (current_data_rate != 2) {
6984 u16 bridge_cfg, gpu_cfg;
6985 u16 bridge_cfg2, gpu_cfg2;
6986 u32 max_lw, current_lw, tmp;
6987
6988 pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
6989 pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
6990
6991 tmp16 = bridge_cfg | PCI_EXP_LNKCTL_HAWD;
6992 pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
6993
6994 tmp16 = gpu_cfg | PCI_EXP_LNKCTL_HAWD;
6995 pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
6996
6997 tmp = RREG32_PCIE(PCIE_LC_STATUS1);
6998 max_lw = (tmp & LC_DETECTED_LINK_WIDTH_MASK) >> LC_DETECTED_LINK_WIDTH_SHIFT;
6999 current_lw = (tmp & LC_OPERATING_LINK_WIDTH_MASK) >> LC_OPERATING_LINK_WIDTH_SHIFT;
7000
7001 if (current_lw < max_lw) {
7002 tmp = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
7003 if (tmp & LC_RENEGOTIATION_SUPPORT) {
7004 tmp &= ~(LC_LINK_WIDTH_MASK | LC_UPCONFIGURE_DIS);
7005 tmp |= (max_lw << LC_LINK_WIDTH_SHIFT);
7006 tmp |= LC_UPCONFIGURE_SUPPORT | LC_RENEGOTIATE_EN | LC_RECONFIG_NOW;
7007 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, tmp);
7008 }
7009 }
7010
7011 for (i = 0; i < 10; i++) {
7012 /* check status */
7013 pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_DEVSTA, &tmp16);
7014 if (tmp16 & PCI_EXP_DEVSTA_TRPND)
7015 break;
7016
7017 pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
7018 pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
7019
7020 pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &bridge_cfg2);
7021 pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &gpu_cfg2);
7022
7023 tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
7024 tmp |= LC_SET_QUIESCE;
7025 WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
7026
7027 tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
7028 tmp |= LC_REDO_EQ;
7029 WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
7030
7031 mdelay(100);
7032
7033 /* linkctl */
7034 pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &tmp16);
7035 tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
7036 tmp16 |= (bridge_cfg & PCI_EXP_LNKCTL_HAWD);
7037 pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
7038
7039 pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &tmp16);
7040 tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
7041 tmp16 |= (gpu_cfg & PCI_EXP_LNKCTL_HAWD);
7042 pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
7043
7044 /* linkctl2 */
7045 pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &tmp16);
7046 tmp16 &= ~((1 << 4) | (7 << 9));
7047 tmp16 |= (bridge_cfg2 & ((1 << 4) | (7 << 9)));
7048 pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, tmp16);
7049
7050 pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
7051 tmp16 &= ~((1 << 4) | (7 << 9));
7052 tmp16 |= (gpu_cfg2 & ((1 << 4) | (7 << 9)));
7053 pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
7054
7055 tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
7056 tmp &= ~LC_SET_QUIESCE;
7057 WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
7058 }
7059 }
7060 }
7061
7062 /* set the link speed */
7063 speed_cntl |= LC_FORCE_EN_SW_SPEED_CHANGE | LC_FORCE_DIS_HW_SPEED_CHANGE;
7064 speed_cntl &= ~LC_FORCE_DIS_SW_SPEED_CHANGE;
7065 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
7066
7067 pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
7068 tmp16 &= ~0xf;
7069 if (mask & DRM_PCIE_SPEED_80)
7070 tmp16 |= 3; /* gen3 */
7071 else if (mask & DRM_PCIE_SPEED_50)
7072 tmp16 |= 2; /* gen2 */
7073 else
7074 tmp16 |= 1; /* gen1 */
7075 pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
7076
7077 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
7078 speed_cntl |= LC_INITIATE_LINK_SPEED_CHANGE;
7079 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
7080
7081 for (i = 0; i < rdev->usec_timeout; i++) {
7082 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
7083 if ((speed_cntl & LC_INITIATE_LINK_SPEED_CHANGE) == 0)
7084 break;
7085 udelay(1);
7086 }
7087}
7088
Alex Deuchere0bcf1652013-02-15 11:56:59 -05007089static void si_program_aspm(struct radeon_device *rdev)
7090{
7091 u32 data, orig;
7092 bool disable_l0s = false, disable_l1 = false, disable_plloff_in_l1 = false;
7093 bool disable_clkreq = false;
7094
Alex Deucher1294d4a2013-07-16 15:58:50 -04007095 if (radeon_aspm == 0)
7096 return;
7097
Alex Deuchere0bcf1652013-02-15 11:56:59 -05007098 if (!(rdev->flags & RADEON_IS_PCIE))
7099 return;
7100
7101 orig = data = RREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL);
7102 data &= ~LC_XMIT_N_FTS_MASK;
7103 data |= LC_XMIT_N_FTS(0x24) | LC_XMIT_N_FTS_OVERRIDE_EN;
7104 if (orig != data)
7105 WREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL, data);
7106
7107 orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL3);
7108 data |= LC_GO_TO_RECOVERY;
7109 if (orig != data)
7110 WREG32_PCIE_PORT(PCIE_LC_CNTL3, data);
7111
7112 orig = data = RREG32_PCIE(PCIE_P_CNTL);
7113 data |= P_IGNORE_EDB_ERR;
7114 if (orig != data)
7115 WREG32_PCIE(PCIE_P_CNTL, data);
7116
7117 orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL);
7118 data &= ~(LC_L0S_INACTIVITY_MASK | LC_L1_INACTIVITY_MASK);
7119 data |= LC_PMI_TO_L1_DIS;
7120 if (!disable_l0s)
7121 data |= LC_L0S_INACTIVITY(7);
7122
7123 if (!disable_l1) {
7124 data |= LC_L1_INACTIVITY(7);
7125 data &= ~LC_PMI_TO_L1_DIS;
7126 if (orig != data)
7127 WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
7128
7129 if (!disable_plloff_in_l1) {
7130 bool clk_req_support;
7131
7132 orig = data = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_0);
7133 data &= ~(PLL_POWER_STATE_IN_OFF_0_MASK | PLL_POWER_STATE_IN_TXS2_0_MASK);
7134 data |= PLL_POWER_STATE_IN_OFF_0(7) | PLL_POWER_STATE_IN_TXS2_0(7);
7135 if (orig != data)
7136 WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_0, data);
7137
7138 orig = data = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_1);
7139 data &= ~(PLL_POWER_STATE_IN_OFF_1_MASK | PLL_POWER_STATE_IN_TXS2_1_MASK);
7140 data |= PLL_POWER_STATE_IN_OFF_1(7) | PLL_POWER_STATE_IN_TXS2_1(7);
7141 if (orig != data)
7142 WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_1, data);
7143
7144 orig = data = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_0);
7145 data &= ~(PLL_POWER_STATE_IN_OFF_0_MASK | PLL_POWER_STATE_IN_TXS2_0_MASK);
7146 data |= PLL_POWER_STATE_IN_OFF_0(7) | PLL_POWER_STATE_IN_TXS2_0(7);
7147 if (orig != data)
7148 WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_0, data);
7149
7150 orig = data = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_1);
7151 data &= ~(PLL_POWER_STATE_IN_OFF_1_MASK | PLL_POWER_STATE_IN_TXS2_1_MASK);
7152 data |= PLL_POWER_STATE_IN_OFF_1(7) | PLL_POWER_STATE_IN_TXS2_1(7);
7153 if (orig != data)
7154 WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_1, data);
7155
7156 if ((rdev->family != CHIP_OLAND) && (rdev->family != CHIP_HAINAN)) {
7157 orig = data = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_0);
7158 data &= ~PLL_RAMP_UP_TIME_0_MASK;
7159 if (orig != data)
7160 WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_0, data);
7161
7162 orig = data = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_1);
7163 data &= ~PLL_RAMP_UP_TIME_1_MASK;
7164 if (orig != data)
7165 WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_1, data);
7166
7167 orig = data = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_2);
7168 data &= ~PLL_RAMP_UP_TIME_2_MASK;
7169 if (orig != data)
7170 WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_2, data);
7171
7172 orig = data = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_3);
7173 data &= ~PLL_RAMP_UP_TIME_3_MASK;
7174 if (orig != data)
7175 WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_3, data);
7176
7177 orig = data = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_0);
7178 data &= ~PLL_RAMP_UP_TIME_0_MASK;
7179 if (orig != data)
7180 WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_0, data);
7181
7182 orig = data = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_1);
7183 data &= ~PLL_RAMP_UP_TIME_1_MASK;
7184 if (orig != data)
7185 WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_1, data);
7186
7187 orig = data = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_2);
7188 data &= ~PLL_RAMP_UP_TIME_2_MASK;
7189 if (orig != data)
7190 WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_2, data);
7191
7192 orig = data = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_3);
7193 data &= ~PLL_RAMP_UP_TIME_3_MASK;
7194 if (orig != data)
7195 WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_3, data);
7196 }
7197 orig = data = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
7198 data &= ~LC_DYN_LANES_PWR_STATE_MASK;
7199 data |= LC_DYN_LANES_PWR_STATE(3);
7200 if (orig != data)
7201 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, data);
7202
7203 orig = data = RREG32_PIF_PHY0(PB0_PIF_CNTL);
7204 data &= ~LS2_EXIT_TIME_MASK;
7205 if ((rdev->family == CHIP_OLAND) || (rdev->family == CHIP_HAINAN))
7206 data |= LS2_EXIT_TIME(5);
7207 if (orig != data)
7208 WREG32_PIF_PHY0(PB0_PIF_CNTL, data);
7209
7210 orig = data = RREG32_PIF_PHY1(PB1_PIF_CNTL);
7211 data &= ~LS2_EXIT_TIME_MASK;
7212 if ((rdev->family == CHIP_OLAND) || (rdev->family == CHIP_HAINAN))
7213 data |= LS2_EXIT_TIME(5);
7214 if (orig != data)
7215 WREG32_PIF_PHY1(PB1_PIF_CNTL, data);
7216
7217 if (!disable_clkreq) {
7218 struct pci_dev *root = rdev->pdev->bus->self;
7219 u32 lnkcap;
7220
7221 clk_req_support = false;
7222 pcie_capability_read_dword(root, PCI_EXP_LNKCAP, &lnkcap);
7223 if (lnkcap & PCI_EXP_LNKCAP_CLKPM)
7224 clk_req_support = true;
7225 } else {
7226 clk_req_support = false;
7227 }
7228
7229 if (clk_req_support) {
7230 orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL2);
7231 data |= LC_ALLOW_PDWN_IN_L1 | LC_ALLOW_PDWN_IN_L23;
7232 if (orig != data)
7233 WREG32_PCIE_PORT(PCIE_LC_CNTL2, data);
7234
7235 orig = data = RREG32(THM_CLK_CNTL);
7236 data &= ~(CMON_CLK_SEL_MASK | TMON_CLK_SEL_MASK);
7237 data |= CMON_CLK_SEL(1) | TMON_CLK_SEL(1);
7238 if (orig != data)
7239 WREG32(THM_CLK_CNTL, data);
7240
7241 orig = data = RREG32(MISC_CLK_CNTL);
7242 data &= ~(DEEP_SLEEP_CLK_SEL_MASK | ZCLK_SEL_MASK);
7243 data |= DEEP_SLEEP_CLK_SEL(1) | ZCLK_SEL(1);
7244 if (orig != data)
7245 WREG32(MISC_CLK_CNTL, data);
7246
7247 orig = data = RREG32(CG_CLKPIN_CNTL);
7248 data &= ~BCLK_AS_XCLK;
7249 if (orig != data)
7250 WREG32(CG_CLKPIN_CNTL, data);
7251
7252 orig = data = RREG32(CG_CLKPIN_CNTL_2);
7253 data &= ~FORCE_BIF_REFCLK_EN;
7254 if (orig != data)
7255 WREG32(CG_CLKPIN_CNTL_2, data);
7256
7257 orig = data = RREG32(MPLL_BYPASSCLK_SEL);
7258 data &= ~MPLL_CLKOUT_SEL_MASK;
7259 data |= MPLL_CLKOUT_SEL(4);
7260 if (orig != data)
7261 WREG32(MPLL_BYPASSCLK_SEL, data);
7262
7263 orig = data = RREG32(SPLL_CNTL_MODE);
7264 data &= ~SPLL_REFCLK_SEL_MASK;
7265 if (orig != data)
7266 WREG32(SPLL_CNTL_MODE, data);
7267 }
7268 }
7269 } else {
7270 if (orig != data)
7271 WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
7272 }
7273
7274 orig = data = RREG32_PCIE(PCIE_CNTL2);
7275 data |= SLV_MEM_LS_EN | MST_MEM_LS_EN | REPLAY_MEM_LS_EN;
7276 if (orig != data)
7277 WREG32_PCIE(PCIE_CNTL2, data);
7278
7279 if (!disable_l0s) {
7280 data = RREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL);
7281 if((data & LC_N_FTS_MASK) == LC_N_FTS_MASK) {
7282 data = RREG32_PCIE(PCIE_LC_STATUS1);
7283 if ((data & LC_REVERSE_XMIT) && (data & LC_REVERSE_RCVR)) {
7284 orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL);
7285 data &= ~LC_L0S_INACTIVITY_MASK;
7286 if (orig != data)
7287 WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
7288 }
7289 }
7290 }
7291}