blob: b75d809c292e3b6418f06d567febc48175c6bdfa [file] [log] [blame]
Alex Deucher43b3cd92012-03-20 17:18:00 -04001/*
2 * Copyright 2011 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
Alex Deucher0f0de062012-03-20 17:18:17 -040024#include <linux/firmware.h>
Alex Deucher0f0de062012-03-20 17:18:17 -040025#include <linux/slab.h>
26#include <linux/module.h>
David Howells760285e2012-10-02 18:01:07 +010027#include <drm/drmP.h>
Alex Deucher43b3cd92012-03-20 17:18:00 -040028#include "radeon.h"
29#include "radeon_asic.h"
Slava Grigorevbfc1f972014-12-22 17:26:51 -050030#include "radeon_audio.h"
David Howells760285e2012-10-02 18:01:07 +010031#include <drm/radeon_drm.h>
Alex Deucher43b3cd92012-03-20 17:18:00 -040032#include "sid.h"
33#include "atom.h"
Alex Deucher48c0c902012-03-20 17:18:19 -040034#include "si_blit_shaders.h"
Alex Deucherbd8cd532013-04-12 16:48:21 -040035#include "clearstate_si.h"
Alex Deuchera0ceada2013-03-27 15:18:04 -040036#include "radeon_ucode.h"
Alex Deucher43b3cd92012-03-20 17:18:00 -040037
Alex Deucher0f0de062012-03-20 17:18:17 -040038
39MODULE_FIRMWARE("radeon/TAHITI_pfp.bin");
40MODULE_FIRMWARE("radeon/TAHITI_me.bin");
41MODULE_FIRMWARE("radeon/TAHITI_ce.bin");
42MODULE_FIRMWARE("radeon/TAHITI_mc.bin");
Alex Deucher1ebe9282014-04-11 11:21:49 -040043MODULE_FIRMWARE("radeon/TAHITI_mc2.bin");
Alex Deucher0f0de062012-03-20 17:18:17 -040044MODULE_FIRMWARE("radeon/TAHITI_rlc.bin");
Alex Deuchera9e61412013-06-25 17:56:16 -040045MODULE_FIRMWARE("radeon/TAHITI_smc.bin");
Alex Deucher629bd332014-06-25 18:41:34 -040046
47MODULE_FIRMWARE("radeon/tahiti_pfp.bin");
48MODULE_FIRMWARE("radeon/tahiti_me.bin");
49MODULE_FIRMWARE("radeon/tahiti_ce.bin");
50MODULE_FIRMWARE("radeon/tahiti_mc.bin");
51MODULE_FIRMWARE("radeon/tahiti_rlc.bin");
52MODULE_FIRMWARE("radeon/tahiti_smc.bin");
53
Alex Deucher0f0de062012-03-20 17:18:17 -040054MODULE_FIRMWARE("radeon/PITCAIRN_pfp.bin");
55MODULE_FIRMWARE("radeon/PITCAIRN_me.bin");
56MODULE_FIRMWARE("radeon/PITCAIRN_ce.bin");
57MODULE_FIRMWARE("radeon/PITCAIRN_mc.bin");
Alex Deucher1ebe9282014-04-11 11:21:49 -040058MODULE_FIRMWARE("radeon/PITCAIRN_mc2.bin");
Alex Deucher0f0de062012-03-20 17:18:17 -040059MODULE_FIRMWARE("radeon/PITCAIRN_rlc.bin");
Alex Deuchera9e61412013-06-25 17:56:16 -040060MODULE_FIRMWARE("radeon/PITCAIRN_smc.bin");
Alex Deucher629bd332014-06-25 18:41:34 -040061
62MODULE_FIRMWARE("radeon/pitcairn_pfp.bin");
63MODULE_FIRMWARE("radeon/pitcairn_me.bin");
64MODULE_FIRMWARE("radeon/pitcairn_ce.bin");
65MODULE_FIRMWARE("radeon/pitcairn_mc.bin");
66MODULE_FIRMWARE("radeon/pitcairn_rlc.bin");
67MODULE_FIRMWARE("radeon/pitcairn_smc.bin");
Alex Deucher861c7fd2015-12-09 23:48:11 -050068MODULE_FIRMWARE("radeon/pitcairn_k_smc.bin");
Alex Deucher629bd332014-06-25 18:41:34 -040069
Alex Deucher0f0de062012-03-20 17:18:17 -040070MODULE_FIRMWARE("radeon/VERDE_pfp.bin");
71MODULE_FIRMWARE("radeon/VERDE_me.bin");
72MODULE_FIRMWARE("radeon/VERDE_ce.bin");
73MODULE_FIRMWARE("radeon/VERDE_mc.bin");
Alex Deucher1ebe9282014-04-11 11:21:49 -040074MODULE_FIRMWARE("radeon/VERDE_mc2.bin");
Alex Deucher0f0de062012-03-20 17:18:17 -040075MODULE_FIRMWARE("radeon/VERDE_rlc.bin");
Alex Deuchera9e61412013-06-25 17:56:16 -040076MODULE_FIRMWARE("radeon/VERDE_smc.bin");
Alex Deucher629bd332014-06-25 18:41:34 -040077
78MODULE_FIRMWARE("radeon/verde_pfp.bin");
79MODULE_FIRMWARE("radeon/verde_me.bin");
80MODULE_FIRMWARE("radeon/verde_ce.bin");
81MODULE_FIRMWARE("radeon/verde_mc.bin");
82MODULE_FIRMWARE("radeon/verde_rlc.bin");
83MODULE_FIRMWARE("radeon/verde_smc.bin");
Alex Deucher861c7fd2015-12-09 23:48:11 -050084MODULE_FIRMWARE("radeon/verde_k_smc.bin");
Alex Deucher629bd332014-06-25 18:41:34 -040085
Alex Deucherbcc7f5d2012-07-26 18:36:28 -040086MODULE_FIRMWARE("radeon/OLAND_pfp.bin");
87MODULE_FIRMWARE("radeon/OLAND_me.bin");
88MODULE_FIRMWARE("radeon/OLAND_ce.bin");
89MODULE_FIRMWARE("radeon/OLAND_mc.bin");
Alex Deucher1ebe9282014-04-11 11:21:49 -040090MODULE_FIRMWARE("radeon/OLAND_mc2.bin");
Alex Deucherbcc7f5d2012-07-26 18:36:28 -040091MODULE_FIRMWARE("radeon/OLAND_rlc.bin");
Alex Deuchera9e61412013-06-25 17:56:16 -040092MODULE_FIRMWARE("radeon/OLAND_smc.bin");
Alex Deucher629bd332014-06-25 18:41:34 -040093
94MODULE_FIRMWARE("radeon/oland_pfp.bin");
95MODULE_FIRMWARE("radeon/oland_me.bin");
96MODULE_FIRMWARE("radeon/oland_ce.bin");
97MODULE_FIRMWARE("radeon/oland_mc.bin");
98MODULE_FIRMWARE("radeon/oland_rlc.bin");
99MODULE_FIRMWARE("radeon/oland_smc.bin");
Alex Deucher861c7fd2015-12-09 23:48:11 -0500100MODULE_FIRMWARE("radeon/oland_k_smc.bin");
Alex Deucher629bd332014-06-25 18:41:34 -0400101
Alex Deucherc04c00b2012-07-31 12:57:45 -0400102MODULE_FIRMWARE("radeon/HAINAN_pfp.bin");
103MODULE_FIRMWARE("radeon/HAINAN_me.bin");
104MODULE_FIRMWARE("radeon/HAINAN_ce.bin");
105MODULE_FIRMWARE("radeon/HAINAN_mc.bin");
Alex Deucher1ebe9282014-04-11 11:21:49 -0400106MODULE_FIRMWARE("radeon/HAINAN_mc2.bin");
Alex Deucherc04c00b2012-07-31 12:57:45 -0400107MODULE_FIRMWARE("radeon/HAINAN_rlc.bin");
Alex Deuchera9e61412013-06-25 17:56:16 -0400108MODULE_FIRMWARE("radeon/HAINAN_smc.bin");
Alex Deucher0f0de062012-03-20 17:18:17 -0400109
Alex Deucher629bd332014-06-25 18:41:34 -0400110MODULE_FIRMWARE("radeon/hainan_pfp.bin");
111MODULE_FIRMWARE("radeon/hainan_me.bin");
112MODULE_FIRMWARE("radeon/hainan_ce.bin");
113MODULE_FIRMWARE("radeon/hainan_mc.bin");
114MODULE_FIRMWARE("radeon/hainan_rlc.bin");
115MODULE_FIRMWARE("radeon/hainan_smc.bin");
Alex Deucher861c7fd2015-12-09 23:48:11 -0500116MODULE_FIRMWARE("radeon/hainan_k_smc.bin");
Alex Deucher629bd332014-06-25 18:41:34 -0400117
Alex Deucher77e82092016-12-20 16:35:50 -0500118MODULE_FIRMWARE("radeon/si58_mc.bin");
119
Alex Deucher65fcf662014-06-02 16:13:21 -0400120static u32 si_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh);
Alex Deucherb9d305d2013-02-14 17:16:51 -0500121static void si_pcie_gen3_enable(struct radeon_device *rdev);
Alex Deuchere0bcf162013-02-15 11:56:59 -0500122static void si_program_aspm(struct radeon_device *rdev);
Alex Deucher1fd11772013-04-17 17:53:50 -0400123extern void sumo_rlc_fini(struct radeon_device *rdev);
124extern int sumo_rlc_init(struct radeon_device *rdev);
Alex Deucher25a857f2012-03-20 17:18:22 -0400125extern int r600_ih_ring_alloc(struct radeon_device *rdev);
126extern void r600_ih_ring_fini(struct radeon_device *rdev);
Alex Deucher0a96d722012-03-20 17:18:11 -0400127extern void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev);
Alex Deucherc476dde2012-03-20 17:18:12 -0400128extern void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save);
129extern void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save);
Alex Deucherca7db222012-03-20 17:18:30 -0400130extern u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev);
Alex Deucher1c534672013-01-18 15:08:38 -0500131extern void evergreen_print_gpu_status_regs(struct radeon_device *rdev);
Alex Deucher014bb202013-01-18 19:36:20 -0500132extern bool evergreen_is_display_hung(struct radeon_device *rdev);
Alex Deucher811e4d52013-09-03 13:31:33 -0400133static void si_enable_gui_idle_interrupt(struct radeon_device *rdev,
134 bool enable);
Alex Deucher4a5c8ea2013-11-15 16:35:55 -0500135static void si_init_pg(struct radeon_device *rdev);
136static void si_init_cg(struct radeon_device *rdev);
Alex Deuchera6f4ae82013-10-02 14:50:57 -0400137static void si_fini_pg(struct radeon_device *rdev);
138static void si_fini_cg(struct radeon_device *rdev);
139static void si_rlc_stop(struct radeon_device *rdev);
Alex Deucher0a96d722012-03-20 17:18:11 -0400140
Alex Deucher6d8cf002013-03-06 18:48:05 -0500141static const u32 verde_rlc_save_restore_register_list[] =
142{
143 (0x8000 << 16) | (0x98f4 >> 2),
144 0x00000000,
145 (0x8040 << 16) | (0x98f4 >> 2),
146 0x00000000,
147 (0x8000 << 16) | (0xe80 >> 2),
148 0x00000000,
149 (0x8040 << 16) | (0xe80 >> 2),
150 0x00000000,
151 (0x8000 << 16) | (0x89bc >> 2),
152 0x00000000,
153 (0x8040 << 16) | (0x89bc >> 2),
154 0x00000000,
155 (0x8000 << 16) | (0x8c1c >> 2),
156 0x00000000,
157 (0x8040 << 16) | (0x8c1c >> 2),
158 0x00000000,
159 (0x9c00 << 16) | (0x98f0 >> 2),
160 0x00000000,
161 (0x9c00 << 16) | (0xe7c >> 2),
162 0x00000000,
163 (0x8000 << 16) | (0x9148 >> 2),
164 0x00000000,
165 (0x8040 << 16) | (0x9148 >> 2),
166 0x00000000,
167 (0x9c00 << 16) | (0x9150 >> 2),
168 0x00000000,
169 (0x9c00 << 16) | (0x897c >> 2),
170 0x00000000,
171 (0x9c00 << 16) | (0x8d8c >> 2),
172 0x00000000,
173 (0x9c00 << 16) | (0xac54 >> 2),
174 0X00000000,
175 0x3,
176 (0x9c00 << 16) | (0x98f8 >> 2),
177 0x00000000,
178 (0x9c00 << 16) | (0x9910 >> 2),
179 0x00000000,
180 (0x9c00 << 16) | (0x9914 >> 2),
181 0x00000000,
182 (0x9c00 << 16) | (0x9918 >> 2),
183 0x00000000,
184 (0x9c00 << 16) | (0x991c >> 2),
185 0x00000000,
186 (0x9c00 << 16) | (0x9920 >> 2),
187 0x00000000,
188 (0x9c00 << 16) | (0x9924 >> 2),
189 0x00000000,
190 (0x9c00 << 16) | (0x9928 >> 2),
191 0x00000000,
192 (0x9c00 << 16) | (0x992c >> 2),
193 0x00000000,
194 (0x9c00 << 16) | (0x9930 >> 2),
195 0x00000000,
196 (0x9c00 << 16) | (0x9934 >> 2),
197 0x00000000,
198 (0x9c00 << 16) | (0x9938 >> 2),
199 0x00000000,
200 (0x9c00 << 16) | (0x993c >> 2),
201 0x00000000,
202 (0x9c00 << 16) | (0x9940 >> 2),
203 0x00000000,
204 (0x9c00 << 16) | (0x9944 >> 2),
205 0x00000000,
206 (0x9c00 << 16) | (0x9948 >> 2),
207 0x00000000,
208 (0x9c00 << 16) | (0x994c >> 2),
209 0x00000000,
210 (0x9c00 << 16) | (0x9950 >> 2),
211 0x00000000,
212 (0x9c00 << 16) | (0x9954 >> 2),
213 0x00000000,
214 (0x9c00 << 16) | (0x9958 >> 2),
215 0x00000000,
216 (0x9c00 << 16) | (0x995c >> 2),
217 0x00000000,
218 (0x9c00 << 16) | (0x9960 >> 2),
219 0x00000000,
220 (0x9c00 << 16) | (0x9964 >> 2),
221 0x00000000,
222 (0x9c00 << 16) | (0x9968 >> 2),
223 0x00000000,
224 (0x9c00 << 16) | (0x996c >> 2),
225 0x00000000,
226 (0x9c00 << 16) | (0x9970 >> 2),
227 0x00000000,
228 (0x9c00 << 16) | (0x9974 >> 2),
229 0x00000000,
230 (0x9c00 << 16) | (0x9978 >> 2),
231 0x00000000,
232 (0x9c00 << 16) | (0x997c >> 2),
233 0x00000000,
234 (0x9c00 << 16) | (0x9980 >> 2),
235 0x00000000,
236 (0x9c00 << 16) | (0x9984 >> 2),
237 0x00000000,
238 (0x9c00 << 16) | (0x9988 >> 2),
239 0x00000000,
240 (0x9c00 << 16) | (0x998c >> 2),
241 0x00000000,
242 (0x9c00 << 16) | (0x8c00 >> 2),
243 0x00000000,
244 (0x9c00 << 16) | (0x8c14 >> 2),
245 0x00000000,
246 (0x9c00 << 16) | (0x8c04 >> 2),
247 0x00000000,
248 (0x9c00 << 16) | (0x8c08 >> 2),
249 0x00000000,
250 (0x8000 << 16) | (0x9b7c >> 2),
251 0x00000000,
252 (0x8040 << 16) | (0x9b7c >> 2),
253 0x00000000,
254 (0x8000 << 16) | (0xe84 >> 2),
255 0x00000000,
256 (0x8040 << 16) | (0xe84 >> 2),
257 0x00000000,
258 (0x8000 << 16) | (0x89c0 >> 2),
259 0x00000000,
260 (0x8040 << 16) | (0x89c0 >> 2),
261 0x00000000,
262 (0x8000 << 16) | (0x914c >> 2),
263 0x00000000,
264 (0x8040 << 16) | (0x914c >> 2),
265 0x00000000,
266 (0x8000 << 16) | (0x8c20 >> 2),
267 0x00000000,
268 (0x8040 << 16) | (0x8c20 >> 2),
269 0x00000000,
270 (0x8000 << 16) | (0x9354 >> 2),
271 0x00000000,
272 (0x8040 << 16) | (0x9354 >> 2),
273 0x00000000,
274 (0x9c00 << 16) | (0x9060 >> 2),
275 0x00000000,
276 (0x9c00 << 16) | (0x9364 >> 2),
277 0x00000000,
278 (0x9c00 << 16) | (0x9100 >> 2),
279 0x00000000,
280 (0x9c00 << 16) | (0x913c >> 2),
281 0x00000000,
282 (0x8000 << 16) | (0x90e0 >> 2),
283 0x00000000,
284 (0x8000 << 16) | (0x90e4 >> 2),
285 0x00000000,
286 (0x8000 << 16) | (0x90e8 >> 2),
287 0x00000000,
288 (0x8040 << 16) | (0x90e0 >> 2),
289 0x00000000,
290 (0x8040 << 16) | (0x90e4 >> 2),
291 0x00000000,
292 (0x8040 << 16) | (0x90e8 >> 2),
293 0x00000000,
294 (0x9c00 << 16) | (0x8bcc >> 2),
295 0x00000000,
296 (0x9c00 << 16) | (0x8b24 >> 2),
297 0x00000000,
298 (0x9c00 << 16) | (0x88c4 >> 2),
299 0x00000000,
300 (0x9c00 << 16) | (0x8e50 >> 2),
301 0x00000000,
302 (0x9c00 << 16) | (0x8c0c >> 2),
303 0x00000000,
304 (0x9c00 << 16) | (0x8e58 >> 2),
305 0x00000000,
306 (0x9c00 << 16) | (0x8e5c >> 2),
307 0x00000000,
308 (0x9c00 << 16) | (0x9508 >> 2),
309 0x00000000,
310 (0x9c00 << 16) | (0x950c >> 2),
311 0x00000000,
312 (0x9c00 << 16) | (0x9494 >> 2),
313 0x00000000,
314 (0x9c00 << 16) | (0xac0c >> 2),
315 0x00000000,
316 (0x9c00 << 16) | (0xac10 >> 2),
317 0x00000000,
318 (0x9c00 << 16) | (0xac14 >> 2),
319 0x00000000,
320 (0x9c00 << 16) | (0xae00 >> 2),
321 0x00000000,
322 (0x9c00 << 16) | (0xac08 >> 2),
323 0x00000000,
324 (0x9c00 << 16) | (0x88d4 >> 2),
325 0x00000000,
326 (0x9c00 << 16) | (0x88c8 >> 2),
327 0x00000000,
328 (0x9c00 << 16) | (0x88cc >> 2),
329 0x00000000,
330 (0x9c00 << 16) | (0x89b0 >> 2),
331 0x00000000,
332 (0x9c00 << 16) | (0x8b10 >> 2),
333 0x00000000,
334 (0x9c00 << 16) | (0x8a14 >> 2),
335 0x00000000,
336 (0x9c00 << 16) | (0x9830 >> 2),
337 0x00000000,
338 (0x9c00 << 16) | (0x9834 >> 2),
339 0x00000000,
340 (0x9c00 << 16) | (0x9838 >> 2),
341 0x00000000,
342 (0x9c00 << 16) | (0x9a10 >> 2),
343 0x00000000,
344 (0x8000 << 16) | (0x9870 >> 2),
345 0x00000000,
346 (0x8000 << 16) | (0x9874 >> 2),
347 0x00000000,
348 (0x8001 << 16) | (0x9870 >> 2),
349 0x00000000,
350 (0x8001 << 16) | (0x9874 >> 2),
351 0x00000000,
352 (0x8040 << 16) | (0x9870 >> 2),
353 0x00000000,
354 (0x8040 << 16) | (0x9874 >> 2),
355 0x00000000,
356 (0x8041 << 16) | (0x9870 >> 2),
357 0x00000000,
358 (0x8041 << 16) | (0x9874 >> 2),
359 0x00000000,
360 0x00000000
361};
362
Alex Deucher205996c2013-03-01 17:08:42 -0500363static const u32 tahiti_golden_rlc_registers[] =
364{
365 0xc424, 0xffffffff, 0x00601005,
366 0xc47c, 0xffffffff, 0x10104040,
367 0xc488, 0xffffffff, 0x0100000a,
368 0xc314, 0xffffffff, 0x00000800,
369 0xc30c, 0xffffffff, 0x800000f4,
370 0xf4a8, 0xffffffff, 0x00000000
371};
372
373static const u32 tahiti_golden_registers[] =
374{
375 0x9a10, 0x00010000, 0x00018208,
376 0x9830, 0xffffffff, 0x00000000,
377 0x9834, 0xf00fffff, 0x00000400,
378 0x9838, 0x0002021c, 0x00020200,
379 0xc78, 0x00000080, 0x00000000,
380 0xd030, 0x000300c0, 0x00800040,
381 0xd830, 0x000300c0, 0x00800040,
382 0x5bb0, 0x000000f0, 0x00000070,
383 0x5bc0, 0x00200000, 0x50100000,
384 0x7030, 0x31000311, 0x00000011,
385 0x277c, 0x00000003, 0x000007ff,
386 0x240c, 0x000007ff, 0x00000000,
387 0x8a14, 0xf000001f, 0x00000007,
388 0x8b24, 0xffffffff, 0x00ffffff,
389 0x8b10, 0x0000ff0f, 0x00000000,
390 0x28a4c, 0x07ffffff, 0x4e000000,
391 0x28350, 0x3f3f3fff, 0x2a00126a,
392 0x30, 0x000000ff, 0x0040,
393 0x34, 0x00000040, 0x00004040,
394 0x9100, 0x07ffffff, 0x03000000,
395 0x8e88, 0x01ff1f3f, 0x00000000,
396 0x8e84, 0x01ff1f3f, 0x00000000,
397 0x9060, 0x0000007f, 0x00000020,
398 0x9508, 0x00010000, 0x00010000,
399 0xac14, 0x00000200, 0x000002fb,
400 0xac10, 0xffffffff, 0x0000543b,
401 0xac0c, 0xffffffff, 0xa9210876,
402 0x88d0, 0xffffffff, 0x000fff40,
403 0x88d4, 0x0000001f, 0x00000010,
404 0x1410, 0x20000000, 0x20fffed8,
405 0x15c0, 0x000c0fc0, 0x000c0400
406};
407
408static const u32 tahiti_golden_registers2[] =
409{
410 0xc64, 0x00000001, 0x00000001
411};
412
413static const u32 pitcairn_golden_rlc_registers[] =
414{
415 0xc424, 0xffffffff, 0x00601004,
416 0xc47c, 0xffffffff, 0x10102020,
417 0xc488, 0xffffffff, 0x01000020,
418 0xc314, 0xffffffff, 0x00000800,
419 0xc30c, 0xffffffff, 0x800000a4
420};
421
422static const u32 pitcairn_golden_registers[] =
423{
424 0x9a10, 0x00010000, 0x00018208,
425 0x9830, 0xffffffff, 0x00000000,
426 0x9834, 0xf00fffff, 0x00000400,
427 0x9838, 0x0002021c, 0x00020200,
428 0xc78, 0x00000080, 0x00000000,
429 0xd030, 0x000300c0, 0x00800040,
430 0xd830, 0x000300c0, 0x00800040,
431 0x5bb0, 0x000000f0, 0x00000070,
432 0x5bc0, 0x00200000, 0x50100000,
433 0x7030, 0x31000311, 0x00000011,
434 0x2ae4, 0x00073ffe, 0x000022a2,
435 0x240c, 0x000007ff, 0x00000000,
436 0x8a14, 0xf000001f, 0x00000007,
437 0x8b24, 0xffffffff, 0x00ffffff,
438 0x8b10, 0x0000ff0f, 0x00000000,
439 0x28a4c, 0x07ffffff, 0x4e000000,
440 0x28350, 0x3f3f3fff, 0x2a00126a,
441 0x30, 0x000000ff, 0x0040,
442 0x34, 0x00000040, 0x00004040,
443 0x9100, 0x07ffffff, 0x03000000,
444 0x9060, 0x0000007f, 0x00000020,
445 0x9508, 0x00010000, 0x00010000,
446 0xac14, 0x000003ff, 0x000000f7,
447 0xac10, 0xffffffff, 0x00000000,
448 0xac0c, 0xffffffff, 0x32761054,
449 0x88d4, 0x0000001f, 0x00000010,
450 0x15c0, 0x000c0fc0, 0x000c0400
451};
452
453static const u32 verde_golden_rlc_registers[] =
454{
455 0xc424, 0xffffffff, 0x033f1005,
456 0xc47c, 0xffffffff, 0x10808020,
457 0xc488, 0xffffffff, 0x00800008,
458 0xc314, 0xffffffff, 0x00001000,
459 0xc30c, 0xffffffff, 0x80010014
460};
461
462static const u32 verde_golden_registers[] =
463{
464 0x9a10, 0x00010000, 0x00018208,
465 0x9830, 0xffffffff, 0x00000000,
466 0x9834, 0xf00fffff, 0x00000400,
467 0x9838, 0x0002021c, 0x00020200,
468 0xc78, 0x00000080, 0x00000000,
469 0xd030, 0x000300c0, 0x00800040,
470 0xd030, 0x000300c0, 0x00800040,
471 0xd830, 0x000300c0, 0x00800040,
472 0xd830, 0x000300c0, 0x00800040,
473 0x5bb0, 0x000000f0, 0x00000070,
474 0x5bc0, 0x00200000, 0x50100000,
475 0x7030, 0x31000311, 0x00000011,
476 0x2ae4, 0x00073ffe, 0x000022a2,
477 0x2ae4, 0x00073ffe, 0x000022a2,
478 0x2ae4, 0x00073ffe, 0x000022a2,
479 0x240c, 0x000007ff, 0x00000000,
480 0x240c, 0x000007ff, 0x00000000,
481 0x240c, 0x000007ff, 0x00000000,
482 0x8a14, 0xf000001f, 0x00000007,
483 0x8a14, 0xf000001f, 0x00000007,
484 0x8a14, 0xf000001f, 0x00000007,
485 0x8b24, 0xffffffff, 0x00ffffff,
486 0x8b10, 0x0000ff0f, 0x00000000,
487 0x28a4c, 0x07ffffff, 0x4e000000,
488 0x28350, 0x3f3f3fff, 0x0000124a,
489 0x28350, 0x3f3f3fff, 0x0000124a,
490 0x28350, 0x3f3f3fff, 0x0000124a,
491 0x30, 0x000000ff, 0x0040,
492 0x34, 0x00000040, 0x00004040,
493 0x9100, 0x07ffffff, 0x03000000,
494 0x9100, 0x07ffffff, 0x03000000,
495 0x8e88, 0x01ff1f3f, 0x00000000,
496 0x8e88, 0x01ff1f3f, 0x00000000,
497 0x8e88, 0x01ff1f3f, 0x00000000,
498 0x8e84, 0x01ff1f3f, 0x00000000,
499 0x8e84, 0x01ff1f3f, 0x00000000,
500 0x8e84, 0x01ff1f3f, 0x00000000,
501 0x9060, 0x0000007f, 0x00000020,
502 0x9508, 0x00010000, 0x00010000,
503 0xac14, 0x000003ff, 0x00000003,
504 0xac14, 0x000003ff, 0x00000003,
505 0xac14, 0x000003ff, 0x00000003,
506 0xac10, 0xffffffff, 0x00000000,
507 0xac10, 0xffffffff, 0x00000000,
508 0xac10, 0xffffffff, 0x00000000,
509 0xac0c, 0xffffffff, 0x00001032,
510 0xac0c, 0xffffffff, 0x00001032,
511 0xac0c, 0xffffffff, 0x00001032,
512 0x88d4, 0x0000001f, 0x00000010,
513 0x88d4, 0x0000001f, 0x00000010,
514 0x88d4, 0x0000001f, 0x00000010,
515 0x15c0, 0x000c0fc0, 0x000c0400
516};
517
518static const u32 oland_golden_rlc_registers[] =
519{
520 0xc424, 0xffffffff, 0x00601005,
521 0xc47c, 0xffffffff, 0x10104040,
522 0xc488, 0xffffffff, 0x0100000a,
523 0xc314, 0xffffffff, 0x00000800,
524 0xc30c, 0xffffffff, 0x800000f4
525};
526
527static const u32 oland_golden_registers[] =
528{
529 0x9a10, 0x00010000, 0x00018208,
530 0x9830, 0xffffffff, 0x00000000,
531 0x9834, 0xf00fffff, 0x00000400,
532 0x9838, 0x0002021c, 0x00020200,
533 0xc78, 0x00000080, 0x00000000,
534 0xd030, 0x000300c0, 0x00800040,
535 0xd830, 0x000300c0, 0x00800040,
536 0x5bb0, 0x000000f0, 0x00000070,
537 0x5bc0, 0x00200000, 0x50100000,
538 0x7030, 0x31000311, 0x00000011,
539 0x2ae4, 0x00073ffe, 0x000022a2,
540 0x240c, 0x000007ff, 0x00000000,
541 0x8a14, 0xf000001f, 0x00000007,
542 0x8b24, 0xffffffff, 0x00ffffff,
543 0x8b10, 0x0000ff0f, 0x00000000,
544 0x28a4c, 0x07ffffff, 0x4e000000,
545 0x28350, 0x3f3f3fff, 0x00000082,
546 0x30, 0x000000ff, 0x0040,
547 0x34, 0x00000040, 0x00004040,
548 0x9100, 0x07ffffff, 0x03000000,
549 0x9060, 0x0000007f, 0x00000020,
550 0x9508, 0x00010000, 0x00010000,
551 0xac14, 0x000003ff, 0x000000f3,
552 0xac10, 0xffffffff, 0x00000000,
553 0xac0c, 0xffffffff, 0x00003210,
554 0x88d4, 0x0000001f, 0x00000010,
555 0x15c0, 0x000c0fc0, 0x000c0400
556};
557
Alex Deucherfffbdda2013-05-13 13:36:23 -0400558static const u32 hainan_golden_registers[] =
559{
560 0x9a10, 0x00010000, 0x00018208,
561 0x9830, 0xffffffff, 0x00000000,
562 0x9834, 0xf00fffff, 0x00000400,
563 0x9838, 0x0002021c, 0x00020200,
564 0xd0c0, 0xff000fff, 0x00000100,
565 0xd030, 0x000300c0, 0x00800040,
566 0xd8c0, 0xff000fff, 0x00000100,
567 0xd830, 0x000300c0, 0x00800040,
568 0x2ae4, 0x00073ffe, 0x000022a2,
569 0x240c, 0x000007ff, 0x00000000,
570 0x8a14, 0xf000001f, 0x00000007,
571 0x8b24, 0xffffffff, 0x00ffffff,
572 0x8b10, 0x0000ff0f, 0x00000000,
573 0x28a4c, 0x07ffffff, 0x4e000000,
574 0x28350, 0x3f3f3fff, 0x00000000,
575 0x30, 0x000000ff, 0x0040,
576 0x34, 0x00000040, 0x00004040,
577 0x9100, 0x03e00000, 0x03600000,
578 0x9060, 0x0000007f, 0x00000020,
579 0x9508, 0x00010000, 0x00010000,
580 0xac14, 0x000003ff, 0x000000f1,
581 0xac10, 0xffffffff, 0x00000000,
582 0xac0c, 0xffffffff, 0x00003210,
583 0x88d4, 0x0000001f, 0x00000010,
584 0x15c0, 0x000c0fc0, 0x000c0400
585};
586
587static const u32 hainan_golden_registers2[] =
588{
589 0x98f8, 0xffffffff, 0x02010001
590};
591
Alex Deucher205996c2013-03-01 17:08:42 -0500592static const u32 tahiti_mgcg_cgcg_init[] =
593{
594 0xc400, 0xffffffff, 0xfffffffc,
595 0x802c, 0xffffffff, 0xe0000000,
596 0x9a60, 0xffffffff, 0x00000100,
597 0x92a4, 0xffffffff, 0x00000100,
598 0xc164, 0xffffffff, 0x00000100,
599 0x9774, 0xffffffff, 0x00000100,
600 0x8984, 0xffffffff, 0x06000100,
601 0x8a18, 0xffffffff, 0x00000100,
602 0x92a0, 0xffffffff, 0x00000100,
603 0xc380, 0xffffffff, 0x00000100,
604 0x8b28, 0xffffffff, 0x00000100,
605 0x9144, 0xffffffff, 0x00000100,
606 0x8d88, 0xffffffff, 0x00000100,
607 0x8d8c, 0xffffffff, 0x00000100,
608 0x9030, 0xffffffff, 0x00000100,
609 0x9034, 0xffffffff, 0x00000100,
610 0x9038, 0xffffffff, 0x00000100,
611 0x903c, 0xffffffff, 0x00000100,
612 0xad80, 0xffffffff, 0x00000100,
613 0xac54, 0xffffffff, 0x00000100,
614 0x897c, 0xffffffff, 0x06000100,
615 0x9868, 0xffffffff, 0x00000100,
616 0x9510, 0xffffffff, 0x00000100,
617 0xaf04, 0xffffffff, 0x00000100,
618 0xae04, 0xffffffff, 0x00000100,
619 0x949c, 0xffffffff, 0x00000100,
620 0x802c, 0xffffffff, 0xe0000000,
621 0x9160, 0xffffffff, 0x00010000,
622 0x9164, 0xffffffff, 0x00030002,
623 0x9168, 0xffffffff, 0x00040007,
624 0x916c, 0xffffffff, 0x00060005,
625 0x9170, 0xffffffff, 0x00090008,
626 0x9174, 0xffffffff, 0x00020001,
627 0x9178, 0xffffffff, 0x00040003,
628 0x917c, 0xffffffff, 0x00000007,
629 0x9180, 0xffffffff, 0x00060005,
630 0x9184, 0xffffffff, 0x00090008,
631 0x9188, 0xffffffff, 0x00030002,
632 0x918c, 0xffffffff, 0x00050004,
633 0x9190, 0xffffffff, 0x00000008,
634 0x9194, 0xffffffff, 0x00070006,
635 0x9198, 0xffffffff, 0x000a0009,
636 0x919c, 0xffffffff, 0x00040003,
637 0x91a0, 0xffffffff, 0x00060005,
638 0x91a4, 0xffffffff, 0x00000009,
639 0x91a8, 0xffffffff, 0x00080007,
640 0x91ac, 0xffffffff, 0x000b000a,
641 0x91b0, 0xffffffff, 0x00050004,
642 0x91b4, 0xffffffff, 0x00070006,
643 0x91b8, 0xffffffff, 0x0008000b,
644 0x91bc, 0xffffffff, 0x000a0009,
645 0x91c0, 0xffffffff, 0x000d000c,
646 0x91c4, 0xffffffff, 0x00060005,
647 0x91c8, 0xffffffff, 0x00080007,
648 0x91cc, 0xffffffff, 0x0000000b,
649 0x91d0, 0xffffffff, 0x000a0009,
650 0x91d4, 0xffffffff, 0x000d000c,
651 0x91d8, 0xffffffff, 0x00070006,
652 0x91dc, 0xffffffff, 0x00090008,
653 0x91e0, 0xffffffff, 0x0000000c,
654 0x91e4, 0xffffffff, 0x000b000a,
655 0x91e8, 0xffffffff, 0x000e000d,
656 0x91ec, 0xffffffff, 0x00080007,
657 0x91f0, 0xffffffff, 0x000a0009,
658 0x91f4, 0xffffffff, 0x0000000d,
659 0x91f8, 0xffffffff, 0x000c000b,
660 0x91fc, 0xffffffff, 0x000f000e,
661 0x9200, 0xffffffff, 0x00090008,
662 0x9204, 0xffffffff, 0x000b000a,
663 0x9208, 0xffffffff, 0x000c000f,
664 0x920c, 0xffffffff, 0x000e000d,
665 0x9210, 0xffffffff, 0x00110010,
666 0x9214, 0xffffffff, 0x000a0009,
667 0x9218, 0xffffffff, 0x000c000b,
668 0x921c, 0xffffffff, 0x0000000f,
669 0x9220, 0xffffffff, 0x000e000d,
670 0x9224, 0xffffffff, 0x00110010,
671 0x9228, 0xffffffff, 0x000b000a,
672 0x922c, 0xffffffff, 0x000d000c,
673 0x9230, 0xffffffff, 0x00000010,
674 0x9234, 0xffffffff, 0x000f000e,
675 0x9238, 0xffffffff, 0x00120011,
676 0x923c, 0xffffffff, 0x000c000b,
677 0x9240, 0xffffffff, 0x000e000d,
678 0x9244, 0xffffffff, 0x00000011,
679 0x9248, 0xffffffff, 0x0010000f,
680 0x924c, 0xffffffff, 0x00130012,
681 0x9250, 0xffffffff, 0x000d000c,
682 0x9254, 0xffffffff, 0x000f000e,
683 0x9258, 0xffffffff, 0x00100013,
684 0x925c, 0xffffffff, 0x00120011,
685 0x9260, 0xffffffff, 0x00150014,
686 0x9264, 0xffffffff, 0x000e000d,
687 0x9268, 0xffffffff, 0x0010000f,
688 0x926c, 0xffffffff, 0x00000013,
689 0x9270, 0xffffffff, 0x00120011,
690 0x9274, 0xffffffff, 0x00150014,
691 0x9278, 0xffffffff, 0x000f000e,
692 0x927c, 0xffffffff, 0x00110010,
693 0x9280, 0xffffffff, 0x00000014,
694 0x9284, 0xffffffff, 0x00130012,
695 0x9288, 0xffffffff, 0x00160015,
696 0x928c, 0xffffffff, 0x0010000f,
697 0x9290, 0xffffffff, 0x00120011,
698 0x9294, 0xffffffff, 0x00000015,
699 0x9298, 0xffffffff, 0x00140013,
700 0x929c, 0xffffffff, 0x00170016,
701 0x9150, 0xffffffff, 0x96940200,
702 0x8708, 0xffffffff, 0x00900100,
703 0xc478, 0xffffffff, 0x00000080,
704 0xc404, 0xffffffff, 0x0020003f,
705 0x30, 0xffffffff, 0x0000001c,
706 0x34, 0x000f0000, 0x000f0000,
707 0x160c, 0xffffffff, 0x00000100,
708 0x1024, 0xffffffff, 0x00000100,
709 0x102c, 0x00000101, 0x00000000,
710 0x20a8, 0xffffffff, 0x00000104,
711 0x264c, 0x000c0000, 0x000c0000,
712 0x2648, 0x000c0000, 0x000c0000,
713 0x55e4, 0xff000fff, 0x00000100,
714 0x55e8, 0x00000001, 0x00000001,
715 0x2f50, 0x00000001, 0x00000001,
716 0x30cc, 0xc0000fff, 0x00000104,
717 0xc1e4, 0x00000001, 0x00000001,
718 0xd0c0, 0xfffffff0, 0x00000100,
719 0xd8c0, 0xfffffff0, 0x00000100
720};
721
722static const u32 pitcairn_mgcg_cgcg_init[] =
723{
724 0xc400, 0xffffffff, 0xfffffffc,
725 0x802c, 0xffffffff, 0xe0000000,
726 0x9a60, 0xffffffff, 0x00000100,
727 0x92a4, 0xffffffff, 0x00000100,
728 0xc164, 0xffffffff, 0x00000100,
729 0x9774, 0xffffffff, 0x00000100,
730 0x8984, 0xffffffff, 0x06000100,
731 0x8a18, 0xffffffff, 0x00000100,
732 0x92a0, 0xffffffff, 0x00000100,
733 0xc380, 0xffffffff, 0x00000100,
734 0x8b28, 0xffffffff, 0x00000100,
735 0x9144, 0xffffffff, 0x00000100,
736 0x8d88, 0xffffffff, 0x00000100,
737 0x8d8c, 0xffffffff, 0x00000100,
738 0x9030, 0xffffffff, 0x00000100,
739 0x9034, 0xffffffff, 0x00000100,
740 0x9038, 0xffffffff, 0x00000100,
741 0x903c, 0xffffffff, 0x00000100,
742 0xad80, 0xffffffff, 0x00000100,
743 0xac54, 0xffffffff, 0x00000100,
744 0x897c, 0xffffffff, 0x06000100,
745 0x9868, 0xffffffff, 0x00000100,
746 0x9510, 0xffffffff, 0x00000100,
747 0xaf04, 0xffffffff, 0x00000100,
748 0xae04, 0xffffffff, 0x00000100,
749 0x949c, 0xffffffff, 0x00000100,
750 0x802c, 0xffffffff, 0xe0000000,
751 0x9160, 0xffffffff, 0x00010000,
752 0x9164, 0xffffffff, 0x00030002,
753 0x9168, 0xffffffff, 0x00040007,
754 0x916c, 0xffffffff, 0x00060005,
755 0x9170, 0xffffffff, 0x00090008,
756 0x9174, 0xffffffff, 0x00020001,
757 0x9178, 0xffffffff, 0x00040003,
758 0x917c, 0xffffffff, 0x00000007,
759 0x9180, 0xffffffff, 0x00060005,
760 0x9184, 0xffffffff, 0x00090008,
761 0x9188, 0xffffffff, 0x00030002,
762 0x918c, 0xffffffff, 0x00050004,
763 0x9190, 0xffffffff, 0x00000008,
764 0x9194, 0xffffffff, 0x00070006,
765 0x9198, 0xffffffff, 0x000a0009,
766 0x919c, 0xffffffff, 0x00040003,
767 0x91a0, 0xffffffff, 0x00060005,
768 0x91a4, 0xffffffff, 0x00000009,
769 0x91a8, 0xffffffff, 0x00080007,
770 0x91ac, 0xffffffff, 0x000b000a,
771 0x91b0, 0xffffffff, 0x00050004,
772 0x91b4, 0xffffffff, 0x00070006,
773 0x91b8, 0xffffffff, 0x0008000b,
774 0x91bc, 0xffffffff, 0x000a0009,
775 0x91c0, 0xffffffff, 0x000d000c,
776 0x9200, 0xffffffff, 0x00090008,
777 0x9204, 0xffffffff, 0x000b000a,
778 0x9208, 0xffffffff, 0x000c000f,
779 0x920c, 0xffffffff, 0x000e000d,
780 0x9210, 0xffffffff, 0x00110010,
781 0x9214, 0xffffffff, 0x000a0009,
782 0x9218, 0xffffffff, 0x000c000b,
783 0x921c, 0xffffffff, 0x0000000f,
784 0x9220, 0xffffffff, 0x000e000d,
785 0x9224, 0xffffffff, 0x00110010,
786 0x9228, 0xffffffff, 0x000b000a,
787 0x922c, 0xffffffff, 0x000d000c,
788 0x9230, 0xffffffff, 0x00000010,
789 0x9234, 0xffffffff, 0x000f000e,
790 0x9238, 0xffffffff, 0x00120011,
791 0x923c, 0xffffffff, 0x000c000b,
792 0x9240, 0xffffffff, 0x000e000d,
793 0x9244, 0xffffffff, 0x00000011,
794 0x9248, 0xffffffff, 0x0010000f,
795 0x924c, 0xffffffff, 0x00130012,
796 0x9250, 0xffffffff, 0x000d000c,
797 0x9254, 0xffffffff, 0x000f000e,
798 0x9258, 0xffffffff, 0x00100013,
799 0x925c, 0xffffffff, 0x00120011,
800 0x9260, 0xffffffff, 0x00150014,
801 0x9150, 0xffffffff, 0x96940200,
802 0x8708, 0xffffffff, 0x00900100,
803 0xc478, 0xffffffff, 0x00000080,
804 0xc404, 0xffffffff, 0x0020003f,
805 0x30, 0xffffffff, 0x0000001c,
806 0x34, 0x000f0000, 0x000f0000,
807 0x160c, 0xffffffff, 0x00000100,
808 0x1024, 0xffffffff, 0x00000100,
809 0x102c, 0x00000101, 0x00000000,
810 0x20a8, 0xffffffff, 0x00000104,
811 0x55e4, 0xff000fff, 0x00000100,
812 0x55e8, 0x00000001, 0x00000001,
813 0x2f50, 0x00000001, 0x00000001,
814 0x30cc, 0xc0000fff, 0x00000104,
815 0xc1e4, 0x00000001, 0x00000001,
816 0xd0c0, 0xfffffff0, 0x00000100,
817 0xd8c0, 0xfffffff0, 0x00000100
818};
819
820static const u32 verde_mgcg_cgcg_init[] =
821{
822 0xc400, 0xffffffff, 0xfffffffc,
823 0x802c, 0xffffffff, 0xe0000000,
824 0x9a60, 0xffffffff, 0x00000100,
825 0x92a4, 0xffffffff, 0x00000100,
826 0xc164, 0xffffffff, 0x00000100,
827 0x9774, 0xffffffff, 0x00000100,
828 0x8984, 0xffffffff, 0x06000100,
829 0x8a18, 0xffffffff, 0x00000100,
830 0x92a0, 0xffffffff, 0x00000100,
831 0xc380, 0xffffffff, 0x00000100,
832 0x8b28, 0xffffffff, 0x00000100,
833 0x9144, 0xffffffff, 0x00000100,
834 0x8d88, 0xffffffff, 0x00000100,
835 0x8d8c, 0xffffffff, 0x00000100,
836 0x9030, 0xffffffff, 0x00000100,
837 0x9034, 0xffffffff, 0x00000100,
838 0x9038, 0xffffffff, 0x00000100,
839 0x903c, 0xffffffff, 0x00000100,
840 0xad80, 0xffffffff, 0x00000100,
841 0xac54, 0xffffffff, 0x00000100,
842 0x897c, 0xffffffff, 0x06000100,
843 0x9868, 0xffffffff, 0x00000100,
844 0x9510, 0xffffffff, 0x00000100,
845 0xaf04, 0xffffffff, 0x00000100,
846 0xae04, 0xffffffff, 0x00000100,
847 0x949c, 0xffffffff, 0x00000100,
848 0x802c, 0xffffffff, 0xe0000000,
849 0x9160, 0xffffffff, 0x00010000,
850 0x9164, 0xffffffff, 0x00030002,
851 0x9168, 0xffffffff, 0x00040007,
852 0x916c, 0xffffffff, 0x00060005,
853 0x9170, 0xffffffff, 0x00090008,
854 0x9174, 0xffffffff, 0x00020001,
855 0x9178, 0xffffffff, 0x00040003,
856 0x917c, 0xffffffff, 0x00000007,
857 0x9180, 0xffffffff, 0x00060005,
858 0x9184, 0xffffffff, 0x00090008,
859 0x9188, 0xffffffff, 0x00030002,
860 0x918c, 0xffffffff, 0x00050004,
861 0x9190, 0xffffffff, 0x00000008,
862 0x9194, 0xffffffff, 0x00070006,
863 0x9198, 0xffffffff, 0x000a0009,
864 0x919c, 0xffffffff, 0x00040003,
865 0x91a0, 0xffffffff, 0x00060005,
866 0x91a4, 0xffffffff, 0x00000009,
867 0x91a8, 0xffffffff, 0x00080007,
868 0x91ac, 0xffffffff, 0x000b000a,
869 0x91b0, 0xffffffff, 0x00050004,
870 0x91b4, 0xffffffff, 0x00070006,
871 0x91b8, 0xffffffff, 0x0008000b,
872 0x91bc, 0xffffffff, 0x000a0009,
873 0x91c0, 0xffffffff, 0x000d000c,
874 0x9200, 0xffffffff, 0x00090008,
875 0x9204, 0xffffffff, 0x000b000a,
876 0x9208, 0xffffffff, 0x000c000f,
877 0x920c, 0xffffffff, 0x000e000d,
878 0x9210, 0xffffffff, 0x00110010,
879 0x9214, 0xffffffff, 0x000a0009,
880 0x9218, 0xffffffff, 0x000c000b,
881 0x921c, 0xffffffff, 0x0000000f,
882 0x9220, 0xffffffff, 0x000e000d,
883 0x9224, 0xffffffff, 0x00110010,
884 0x9228, 0xffffffff, 0x000b000a,
885 0x922c, 0xffffffff, 0x000d000c,
886 0x9230, 0xffffffff, 0x00000010,
887 0x9234, 0xffffffff, 0x000f000e,
888 0x9238, 0xffffffff, 0x00120011,
889 0x923c, 0xffffffff, 0x000c000b,
890 0x9240, 0xffffffff, 0x000e000d,
891 0x9244, 0xffffffff, 0x00000011,
892 0x9248, 0xffffffff, 0x0010000f,
893 0x924c, 0xffffffff, 0x00130012,
894 0x9250, 0xffffffff, 0x000d000c,
895 0x9254, 0xffffffff, 0x000f000e,
896 0x9258, 0xffffffff, 0x00100013,
897 0x925c, 0xffffffff, 0x00120011,
898 0x9260, 0xffffffff, 0x00150014,
899 0x9150, 0xffffffff, 0x96940200,
900 0x8708, 0xffffffff, 0x00900100,
901 0xc478, 0xffffffff, 0x00000080,
902 0xc404, 0xffffffff, 0x0020003f,
903 0x30, 0xffffffff, 0x0000001c,
904 0x34, 0x000f0000, 0x000f0000,
905 0x160c, 0xffffffff, 0x00000100,
906 0x1024, 0xffffffff, 0x00000100,
907 0x102c, 0x00000101, 0x00000000,
908 0x20a8, 0xffffffff, 0x00000104,
909 0x264c, 0x000c0000, 0x000c0000,
910 0x2648, 0x000c0000, 0x000c0000,
911 0x55e4, 0xff000fff, 0x00000100,
912 0x55e8, 0x00000001, 0x00000001,
913 0x2f50, 0x00000001, 0x00000001,
914 0x30cc, 0xc0000fff, 0x00000104,
915 0xc1e4, 0x00000001, 0x00000001,
916 0xd0c0, 0xfffffff0, 0x00000100,
917 0xd8c0, 0xfffffff0, 0x00000100
918};
919
920static const u32 oland_mgcg_cgcg_init[] =
921{
922 0xc400, 0xffffffff, 0xfffffffc,
923 0x802c, 0xffffffff, 0xe0000000,
924 0x9a60, 0xffffffff, 0x00000100,
925 0x92a4, 0xffffffff, 0x00000100,
926 0xc164, 0xffffffff, 0x00000100,
927 0x9774, 0xffffffff, 0x00000100,
928 0x8984, 0xffffffff, 0x06000100,
929 0x8a18, 0xffffffff, 0x00000100,
930 0x92a0, 0xffffffff, 0x00000100,
931 0xc380, 0xffffffff, 0x00000100,
932 0x8b28, 0xffffffff, 0x00000100,
933 0x9144, 0xffffffff, 0x00000100,
934 0x8d88, 0xffffffff, 0x00000100,
935 0x8d8c, 0xffffffff, 0x00000100,
936 0x9030, 0xffffffff, 0x00000100,
937 0x9034, 0xffffffff, 0x00000100,
938 0x9038, 0xffffffff, 0x00000100,
939 0x903c, 0xffffffff, 0x00000100,
940 0xad80, 0xffffffff, 0x00000100,
941 0xac54, 0xffffffff, 0x00000100,
942 0x897c, 0xffffffff, 0x06000100,
943 0x9868, 0xffffffff, 0x00000100,
944 0x9510, 0xffffffff, 0x00000100,
945 0xaf04, 0xffffffff, 0x00000100,
946 0xae04, 0xffffffff, 0x00000100,
947 0x949c, 0xffffffff, 0x00000100,
948 0x802c, 0xffffffff, 0xe0000000,
949 0x9160, 0xffffffff, 0x00010000,
950 0x9164, 0xffffffff, 0x00030002,
951 0x9168, 0xffffffff, 0x00040007,
952 0x916c, 0xffffffff, 0x00060005,
953 0x9170, 0xffffffff, 0x00090008,
954 0x9174, 0xffffffff, 0x00020001,
955 0x9178, 0xffffffff, 0x00040003,
956 0x917c, 0xffffffff, 0x00000007,
957 0x9180, 0xffffffff, 0x00060005,
958 0x9184, 0xffffffff, 0x00090008,
959 0x9188, 0xffffffff, 0x00030002,
960 0x918c, 0xffffffff, 0x00050004,
961 0x9190, 0xffffffff, 0x00000008,
962 0x9194, 0xffffffff, 0x00070006,
963 0x9198, 0xffffffff, 0x000a0009,
964 0x919c, 0xffffffff, 0x00040003,
965 0x91a0, 0xffffffff, 0x00060005,
966 0x91a4, 0xffffffff, 0x00000009,
967 0x91a8, 0xffffffff, 0x00080007,
968 0x91ac, 0xffffffff, 0x000b000a,
969 0x91b0, 0xffffffff, 0x00050004,
970 0x91b4, 0xffffffff, 0x00070006,
971 0x91b8, 0xffffffff, 0x0008000b,
972 0x91bc, 0xffffffff, 0x000a0009,
973 0x91c0, 0xffffffff, 0x000d000c,
974 0x91c4, 0xffffffff, 0x00060005,
975 0x91c8, 0xffffffff, 0x00080007,
976 0x91cc, 0xffffffff, 0x0000000b,
977 0x91d0, 0xffffffff, 0x000a0009,
978 0x91d4, 0xffffffff, 0x000d000c,
979 0x9150, 0xffffffff, 0x96940200,
980 0x8708, 0xffffffff, 0x00900100,
981 0xc478, 0xffffffff, 0x00000080,
982 0xc404, 0xffffffff, 0x0020003f,
983 0x30, 0xffffffff, 0x0000001c,
984 0x34, 0x000f0000, 0x000f0000,
985 0x160c, 0xffffffff, 0x00000100,
986 0x1024, 0xffffffff, 0x00000100,
987 0x102c, 0x00000101, 0x00000000,
988 0x20a8, 0xffffffff, 0x00000104,
989 0x264c, 0x000c0000, 0x000c0000,
990 0x2648, 0x000c0000, 0x000c0000,
991 0x55e4, 0xff000fff, 0x00000100,
992 0x55e8, 0x00000001, 0x00000001,
993 0x2f50, 0x00000001, 0x00000001,
994 0x30cc, 0xc0000fff, 0x00000104,
995 0xc1e4, 0x00000001, 0x00000001,
996 0xd0c0, 0xfffffff0, 0x00000100,
997 0xd8c0, 0xfffffff0, 0x00000100
998};
999
Alex Deucherfffbdda2013-05-13 13:36:23 -04001000static const u32 hainan_mgcg_cgcg_init[] =
1001{
1002 0xc400, 0xffffffff, 0xfffffffc,
1003 0x802c, 0xffffffff, 0xe0000000,
1004 0x9a60, 0xffffffff, 0x00000100,
1005 0x92a4, 0xffffffff, 0x00000100,
1006 0xc164, 0xffffffff, 0x00000100,
1007 0x9774, 0xffffffff, 0x00000100,
1008 0x8984, 0xffffffff, 0x06000100,
1009 0x8a18, 0xffffffff, 0x00000100,
1010 0x92a0, 0xffffffff, 0x00000100,
1011 0xc380, 0xffffffff, 0x00000100,
1012 0x8b28, 0xffffffff, 0x00000100,
1013 0x9144, 0xffffffff, 0x00000100,
1014 0x8d88, 0xffffffff, 0x00000100,
1015 0x8d8c, 0xffffffff, 0x00000100,
1016 0x9030, 0xffffffff, 0x00000100,
1017 0x9034, 0xffffffff, 0x00000100,
1018 0x9038, 0xffffffff, 0x00000100,
1019 0x903c, 0xffffffff, 0x00000100,
1020 0xad80, 0xffffffff, 0x00000100,
1021 0xac54, 0xffffffff, 0x00000100,
1022 0x897c, 0xffffffff, 0x06000100,
1023 0x9868, 0xffffffff, 0x00000100,
1024 0x9510, 0xffffffff, 0x00000100,
1025 0xaf04, 0xffffffff, 0x00000100,
1026 0xae04, 0xffffffff, 0x00000100,
1027 0x949c, 0xffffffff, 0x00000100,
1028 0x802c, 0xffffffff, 0xe0000000,
1029 0x9160, 0xffffffff, 0x00010000,
1030 0x9164, 0xffffffff, 0x00030002,
1031 0x9168, 0xffffffff, 0x00040007,
1032 0x916c, 0xffffffff, 0x00060005,
1033 0x9170, 0xffffffff, 0x00090008,
1034 0x9174, 0xffffffff, 0x00020001,
1035 0x9178, 0xffffffff, 0x00040003,
1036 0x917c, 0xffffffff, 0x00000007,
1037 0x9180, 0xffffffff, 0x00060005,
1038 0x9184, 0xffffffff, 0x00090008,
1039 0x9188, 0xffffffff, 0x00030002,
1040 0x918c, 0xffffffff, 0x00050004,
1041 0x9190, 0xffffffff, 0x00000008,
1042 0x9194, 0xffffffff, 0x00070006,
1043 0x9198, 0xffffffff, 0x000a0009,
1044 0x919c, 0xffffffff, 0x00040003,
1045 0x91a0, 0xffffffff, 0x00060005,
1046 0x91a4, 0xffffffff, 0x00000009,
1047 0x91a8, 0xffffffff, 0x00080007,
1048 0x91ac, 0xffffffff, 0x000b000a,
1049 0x91b0, 0xffffffff, 0x00050004,
1050 0x91b4, 0xffffffff, 0x00070006,
1051 0x91b8, 0xffffffff, 0x0008000b,
1052 0x91bc, 0xffffffff, 0x000a0009,
1053 0x91c0, 0xffffffff, 0x000d000c,
1054 0x91c4, 0xffffffff, 0x00060005,
1055 0x91c8, 0xffffffff, 0x00080007,
1056 0x91cc, 0xffffffff, 0x0000000b,
1057 0x91d0, 0xffffffff, 0x000a0009,
1058 0x91d4, 0xffffffff, 0x000d000c,
1059 0x9150, 0xffffffff, 0x96940200,
1060 0x8708, 0xffffffff, 0x00900100,
1061 0xc478, 0xffffffff, 0x00000080,
1062 0xc404, 0xffffffff, 0x0020003f,
1063 0x30, 0xffffffff, 0x0000001c,
1064 0x34, 0x000f0000, 0x000f0000,
1065 0x160c, 0xffffffff, 0x00000100,
1066 0x1024, 0xffffffff, 0x00000100,
1067 0x20a8, 0xffffffff, 0x00000104,
1068 0x264c, 0x000c0000, 0x000c0000,
1069 0x2648, 0x000c0000, 0x000c0000,
1070 0x2f50, 0x00000001, 0x00000001,
1071 0x30cc, 0xc0000fff, 0x00000104,
1072 0xc1e4, 0x00000001, 0x00000001,
1073 0xd0c0, 0xfffffff0, 0x00000100,
1074 0xd8c0, 0xfffffff0, 0x00000100
1075};
1076
Alex Deucher205996c2013-03-01 17:08:42 -05001077static u32 verde_pg_init[] =
1078{
1079 0x353c, 0xffffffff, 0x40000,
1080 0x3538, 0xffffffff, 0x200010ff,
1081 0x353c, 0xffffffff, 0x0,
1082 0x353c, 0xffffffff, 0x0,
1083 0x353c, 0xffffffff, 0x0,
1084 0x353c, 0xffffffff, 0x0,
1085 0x353c, 0xffffffff, 0x0,
1086 0x353c, 0xffffffff, 0x7007,
1087 0x3538, 0xffffffff, 0x300010ff,
1088 0x353c, 0xffffffff, 0x0,
1089 0x353c, 0xffffffff, 0x0,
1090 0x353c, 0xffffffff, 0x0,
1091 0x353c, 0xffffffff, 0x0,
1092 0x353c, 0xffffffff, 0x0,
1093 0x353c, 0xffffffff, 0x400000,
1094 0x3538, 0xffffffff, 0x100010ff,
1095 0x353c, 0xffffffff, 0x0,
1096 0x353c, 0xffffffff, 0x0,
1097 0x353c, 0xffffffff, 0x0,
1098 0x353c, 0xffffffff, 0x0,
1099 0x353c, 0xffffffff, 0x0,
1100 0x353c, 0xffffffff, 0x120200,
1101 0x3538, 0xffffffff, 0x500010ff,
1102 0x353c, 0xffffffff, 0x0,
1103 0x353c, 0xffffffff, 0x0,
1104 0x353c, 0xffffffff, 0x0,
1105 0x353c, 0xffffffff, 0x0,
1106 0x353c, 0xffffffff, 0x0,
1107 0x353c, 0xffffffff, 0x1e1e16,
1108 0x3538, 0xffffffff, 0x600010ff,
1109 0x353c, 0xffffffff, 0x0,
1110 0x353c, 0xffffffff, 0x0,
1111 0x353c, 0xffffffff, 0x0,
1112 0x353c, 0xffffffff, 0x0,
1113 0x353c, 0xffffffff, 0x0,
1114 0x353c, 0xffffffff, 0x171f1e,
1115 0x3538, 0xffffffff, 0x700010ff,
1116 0x353c, 0xffffffff, 0x0,
1117 0x353c, 0xffffffff, 0x0,
1118 0x353c, 0xffffffff, 0x0,
1119 0x353c, 0xffffffff, 0x0,
1120 0x353c, 0xffffffff, 0x0,
1121 0x353c, 0xffffffff, 0x0,
1122 0x3538, 0xffffffff, 0x9ff,
1123 0x3500, 0xffffffff, 0x0,
1124 0x3504, 0xffffffff, 0x10000800,
1125 0x3504, 0xffffffff, 0xf,
1126 0x3504, 0xffffffff, 0xf,
1127 0x3500, 0xffffffff, 0x4,
1128 0x3504, 0xffffffff, 0x1000051e,
1129 0x3504, 0xffffffff, 0xffff,
1130 0x3504, 0xffffffff, 0xffff,
1131 0x3500, 0xffffffff, 0x8,
1132 0x3504, 0xffffffff, 0x80500,
1133 0x3500, 0xffffffff, 0x12,
1134 0x3504, 0xffffffff, 0x9050c,
1135 0x3500, 0xffffffff, 0x1d,
1136 0x3504, 0xffffffff, 0xb052c,
1137 0x3500, 0xffffffff, 0x2a,
1138 0x3504, 0xffffffff, 0x1053e,
1139 0x3500, 0xffffffff, 0x2d,
1140 0x3504, 0xffffffff, 0x10546,
1141 0x3500, 0xffffffff, 0x30,
1142 0x3504, 0xffffffff, 0xa054e,
1143 0x3500, 0xffffffff, 0x3c,
1144 0x3504, 0xffffffff, 0x1055f,
1145 0x3500, 0xffffffff, 0x3f,
1146 0x3504, 0xffffffff, 0x10567,
1147 0x3500, 0xffffffff, 0x42,
1148 0x3504, 0xffffffff, 0x1056f,
1149 0x3500, 0xffffffff, 0x45,
1150 0x3504, 0xffffffff, 0x10572,
1151 0x3500, 0xffffffff, 0x48,
1152 0x3504, 0xffffffff, 0x20575,
1153 0x3500, 0xffffffff, 0x4c,
1154 0x3504, 0xffffffff, 0x190801,
1155 0x3500, 0xffffffff, 0x67,
1156 0x3504, 0xffffffff, 0x1082a,
1157 0x3500, 0xffffffff, 0x6a,
1158 0x3504, 0xffffffff, 0x1b082d,
1159 0x3500, 0xffffffff, 0x87,
1160 0x3504, 0xffffffff, 0x310851,
1161 0x3500, 0xffffffff, 0xba,
1162 0x3504, 0xffffffff, 0x891,
1163 0x3500, 0xffffffff, 0xbc,
1164 0x3504, 0xffffffff, 0x893,
1165 0x3500, 0xffffffff, 0xbe,
1166 0x3504, 0xffffffff, 0x20895,
1167 0x3500, 0xffffffff, 0xc2,
1168 0x3504, 0xffffffff, 0x20899,
1169 0x3500, 0xffffffff, 0xc6,
1170 0x3504, 0xffffffff, 0x2089d,
1171 0x3500, 0xffffffff, 0xca,
1172 0x3504, 0xffffffff, 0x8a1,
1173 0x3500, 0xffffffff, 0xcc,
1174 0x3504, 0xffffffff, 0x8a3,
1175 0x3500, 0xffffffff, 0xce,
1176 0x3504, 0xffffffff, 0x308a5,
1177 0x3500, 0xffffffff, 0xd3,
1178 0x3504, 0xffffffff, 0x6d08cd,
1179 0x3500, 0xffffffff, 0x142,
1180 0x3504, 0xffffffff, 0x2000095a,
1181 0x3504, 0xffffffff, 0x1,
1182 0x3500, 0xffffffff, 0x144,
1183 0x3504, 0xffffffff, 0x301f095b,
1184 0x3500, 0xffffffff, 0x165,
1185 0x3504, 0xffffffff, 0xc094d,
1186 0x3500, 0xffffffff, 0x173,
1187 0x3504, 0xffffffff, 0xf096d,
1188 0x3500, 0xffffffff, 0x184,
1189 0x3504, 0xffffffff, 0x15097f,
1190 0x3500, 0xffffffff, 0x19b,
1191 0x3504, 0xffffffff, 0xc0998,
1192 0x3500, 0xffffffff, 0x1a9,
1193 0x3504, 0xffffffff, 0x409a7,
1194 0x3500, 0xffffffff, 0x1af,
1195 0x3504, 0xffffffff, 0xcdc,
1196 0x3500, 0xffffffff, 0x1b1,
1197 0x3504, 0xffffffff, 0x800,
1198 0x3508, 0xffffffff, 0x6c9b2000,
1199 0x3510, 0xfc00, 0x2000,
1200 0x3544, 0xffffffff, 0xfc0,
1201 0x28d4, 0x00000100, 0x100
1202};
1203
1204static void si_init_golden_registers(struct radeon_device *rdev)
1205{
1206 switch (rdev->family) {
1207 case CHIP_TAHITI:
1208 radeon_program_register_sequence(rdev,
1209 tahiti_golden_registers,
1210 (const u32)ARRAY_SIZE(tahiti_golden_registers));
1211 radeon_program_register_sequence(rdev,
1212 tahiti_golden_rlc_registers,
1213 (const u32)ARRAY_SIZE(tahiti_golden_rlc_registers));
1214 radeon_program_register_sequence(rdev,
1215 tahiti_mgcg_cgcg_init,
1216 (const u32)ARRAY_SIZE(tahiti_mgcg_cgcg_init));
1217 radeon_program_register_sequence(rdev,
1218 tahiti_golden_registers2,
1219 (const u32)ARRAY_SIZE(tahiti_golden_registers2));
1220 break;
1221 case CHIP_PITCAIRN:
1222 radeon_program_register_sequence(rdev,
1223 pitcairn_golden_registers,
1224 (const u32)ARRAY_SIZE(pitcairn_golden_registers));
1225 radeon_program_register_sequence(rdev,
1226 pitcairn_golden_rlc_registers,
1227 (const u32)ARRAY_SIZE(pitcairn_golden_rlc_registers));
1228 radeon_program_register_sequence(rdev,
1229 pitcairn_mgcg_cgcg_init,
1230 (const u32)ARRAY_SIZE(pitcairn_mgcg_cgcg_init));
1231 break;
1232 case CHIP_VERDE:
1233 radeon_program_register_sequence(rdev,
1234 verde_golden_registers,
1235 (const u32)ARRAY_SIZE(verde_golden_registers));
1236 radeon_program_register_sequence(rdev,
1237 verde_golden_rlc_registers,
1238 (const u32)ARRAY_SIZE(verde_golden_rlc_registers));
1239 radeon_program_register_sequence(rdev,
1240 verde_mgcg_cgcg_init,
1241 (const u32)ARRAY_SIZE(verde_mgcg_cgcg_init));
1242 radeon_program_register_sequence(rdev,
1243 verde_pg_init,
1244 (const u32)ARRAY_SIZE(verde_pg_init));
1245 break;
1246 case CHIP_OLAND:
1247 radeon_program_register_sequence(rdev,
1248 oland_golden_registers,
1249 (const u32)ARRAY_SIZE(oland_golden_registers));
1250 radeon_program_register_sequence(rdev,
1251 oland_golden_rlc_registers,
1252 (const u32)ARRAY_SIZE(oland_golden_rlc_registers));
1253 radeon_program_register_sequence(rdev,
1254 oland_mgcg_cgcg_init,
1255 (const u32)ARRAY_SIZE(oland_mgcg_cgcg_init));
1256 break;
Alex Deucherfffbdda2013-05-13 13:36:23 -04001257 case CHIP_HAINAN:
1258 radeon_program_register_sequence(rdev,
1259 hainan_golden_registers,
1260 (const u32)ARRAY_SIZE(hainan_golden_registers));
1261 radeon_program_register_sequence(rdev,
1262 hainan_golden_registers2,
1263 (const u32)ARRAY_SIZE(hainan_golden_registers2));
1264 radeon_program_register_sequence(rdev,
1265 hainan_mgcg_cgcg_init,
1266 (const u32)ARRAY_SIZE(hainan_mgcg_cgcg_init));
1267 break;
Alex Deucher205996c2013-03-01 17:08:42 -05001268 default:
1269 break;
1270 }
1271}
1272
Alex Deucher4af692f2014-10-01 10:03:31 -04001273/**
1274 * si_get_allowed_info_register - fetch the register for the info ioctl
1275 *
1276 * @rdev: radeon_device pointer
1277 * @reg: register offset in bytes
1278 * @val: register value
1279 *
1280 * Returns 0 for success or -EINVAL for an invalid register
1281 *
1282 */
1283int si_get_allowed_info_register(struct radeon_device *rdev,
1284 u32 reg, u32 *val)
1285{
1286 switch (reg) {
1287 case GRBM_STATUS:
1288 case GRBM_STATUS2:
1289 case GRBM_STATUS_SE0:
1290 case GRBM_STATUS_SE1:
1291 case SRBM_STATUS:
1292 case SRBM_STATUS2:
1293 case (DMA_STATUS_REG + DMA0_REGISTER_OFFSET):
1294 case (DMA_STATUS_REG + DMA1_REGISTER_OFFSET):
1295 case UVD_STATUS:
1296 *val = RREG32(reg);
1297 return 0;
1298 default:
1299 return -EINVAL;
1300 }
1301}
1302
Alex Deucher454d2e22013-02-14 10:04:02 -05001303#define PCIE_BUS_CLK 10000
1304#define TCLK (PCIE_BUS_CLK / 10)
1305
1306/**
1307 * si_get_xclk - get the xclk
1308 *
1309 * @rdev: radeon_device pointer
1310 *
1311 * Returns the reference clock used by the gfx engine
1312 * (SI).
1313 */
1314u32 si_get_xclk(struct radeon_device *rdev)
1315{
Jérome Glisse3cf8bb12016-03-16 12:56:45 +01001316 u32 reference_clock = rdev->clock.spll.reference_freq;
Alex Deucher454d2e22013-02-14 10:04:02 -05001317 u32 tmp;
1318
1319 tmp = RREG32(CG_CLKPIN_CNTL_2);
1320 if (tmp & MUX_TCLK_TO_XCLK)
1321 return TCLK;
1322
1323 tmp = RREG32(CG_CLKPIN_CNTL);
1324 if (tmp & XTALIN_DIVIDE)
1325 return reference_clock / 4;
1326
1327 return reference_clock;
1328}
1329
Alex Deucher1bd47d22012-03-20 17:18:10 -04001330/* get temperature in millidegrees */
1331int si_get_temp(struct radeon_device *rdev)
1332{
1333 u32 temp;
1334 int actual_temp = 0;
1335
1336 temp = (RREG32(CG_MULT_THERMAL_STATUS) & CTF_TEMP_MASK) >>
1337 CTF_TEMP_SHIFT;
1338
1339 if (temp & 0x200)
1340 actual_temp = 255;
1341 else
1342 actual_temp = temp & 0x1ff;
1343
1344 actual_temp = (actual_temp * 1000);
1345
1346 return actual_temp;
1347}
1348
Alex Deucher8b074dd2012-03-20 17:18:18 -04001349#define TAHITI_IO_MC_REGS_SIZE 36
1350
1351static const u32 tahiti_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = {
1352 {0x0000006f, 0x03044000},
1353 {0x00000070, 0x0480c018},
1354 {0x00000071, 0x00000040},
1355 {0x00000072, 0x01000000},
1356 {0x00000074, 0x000000ff},
1357 {0x00000075, 0x00143400},
1358 {0x00000076, 0x08ec0800},
1359 {0x00000077, 0x040000cc},
1360 {0x00000079, 0x00000000},
1361 {0x0000007a, 0x21000409},
1362 {0x0000007c, 0x00000000},
1363 {0x0000007d, 0xe8000000},
1364 {0x0000007e, 0x044408a8},
1365 {0x0000007f, 0x00000003},
1366 {0x00000080, 0x00000000},
1367 {0x00000081, 0x01000000},
1368 {0x00000082, 0x02000000},
1369 {0x00000083, 0x00000000},
1370 {0x00000084, 0xe3f3e4f4},
1371 {0x00000085, 0x00052024},
1372 {0x00000087, 0x00000000},
1373 {0x00000088, 0x66036603},
1374 {0x00000089, 0x01000000},
1375 {0x0000008b, 0x1c0a0000},
1376 {0x0000008c, 0xff010000},
1377 {0x0000008e, 0xffffefff},
1378 {0x0000008f, 0xfff3efff},
1379 {0x00000090, 0xfff3efbf},
1380 {0x00000094, 0x00101101},
1381 {0x00000095, 0x00000fff},
1382 {0x00000096, 0x00116fff},
1383 {0x00000097, 0x60010000},
1384 {0x00000098, 0x10010000},
1385 {0x00000099, 0x00006000},
1386 {0x0000009a, 0x00001000},
1387 {0x0000009f, 0x00a77400}
1388};
1389
1390static const u32 pitcairn_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = {
1391 {0x0000006f, 0x03044000},
1392 {0x00000070, 0x0480c018},
1393 {0x00000071, 0x00000040},
1394 {0x00000072, 0x01000000},
1395 {0x00000074, 0x000000ff},
1396 {0x00000075, 0x00143400},
1397 {0x00000076, 0x08ec0800},
1398 {0x00000077, 0x040000cc},
1399 {0x00000079, 0x00000000},
1400 {0x0000007a, 0x21000409},
1401 {0x0000007c, 0x00000000},
1402 {0x0000007d, 0xe8000000},
1403 {0x0000007e, 0x044408a8},
1404 {0x0000007f, 0x00000003},
1405 {0x00000080, 0x00000000},
1406 {0x00000081, 0x01000000},
1407 {0x00000082, 0x02000000},
1408 {0x00000083, 0x00000000},
1409 {0x00000084, 0xe3f3e4f4},
1410 {0x00000085, 0x00052024},
1411 {0x00000087, 0x00000000},
1412 {0x00000088, 0x66036603},
1413 {0x00000089, 0x01000000},
1414 {0x0000008b, 0x1c0a0000},
1415 {0x0000008c, 0xff010000},
1416 {0x0000008e, 0xffffefff},
1417 {0x0000008f, 0xfff3efff},
1418 {0x00000090, 0xfff3efbf},
1419 {0x00000094, 0x00101101},
1420 {0x00000095, 0x00000fff},
1421 {0x00000096, 0x00116fff},
1422 {0x00000097, 0x60010000},
1423 {0x00000098, 0x10010000},
1424 {0x00000099, 0x00006000},
1425 {0x0000009a, 0x00001000},
1426 {0x0000009f, 0x00a47400}
1427};
1428
1429static const u32 verde_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = {
1430 {0x0000006f, 0x03044000},
1431 {0x00000070, 0x0480c018},
1432 {0x00000071, 0x00000040},
1433 {0x00000072, 0x01000000},
1434 {0x00000074, 0x000000ff},
1435 {0x00000075, 0x00143400},
1436 {0x00000076, 0x08ec0800},
1437 {0x00000077, 0x040000cc},
1438 {0x00000079, 0x00000000},
1439 {0x0000007a, 0x21000409},
1440 {0x0000007c, 0x00000000},
1441 {0x0000007d, 0xe8000000},
1442 {0x0000007e, 0x044408a8},
1443 {0x0000007f, 0x00000003},
1444 {0x00000080, 0x00000000},
1445 {0x00000081, 0x01000000},
1446 {0x00000082, 0x02000000},
1447 {0x00000083, 0x00000000},
1448 {0x00000084, 0xe3f3e4f4},
1449 {0x00000085, 0x00052024},
1450 {0x00000087, 0x00000000},
1451 {0x00000088, 0x66036603},
1452 {0x00000089, 0x01000000},
1453 {0x0000008b, 0x1c0a0000},
1454 {0x0000008c, 0xff010000},
1455 {0x0000008e, 0xffffefff},
1456 {0x0000008f, 0xfff3efff},
1457 {0x00000090, 0xfff3efbf},
1458 {0x00000094, 0x00101101},
1459 {0x00000095, 0x00000fff},
1460 {0x00000096, 0x00116fff},
1461 {0x00000097, 0x60010000},
1462 {0x00000098, 0x10010000},
1463 {0x00000099, 0x00006000},
1464 {0x0000009a, 0x00001000},
1465 {0x0000009f, 0x00a37400}
1466};
1467
Alex Deucherbcc7f5d2012-07-26 18:36:28 -04001468static const u32 oland_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = {
1469 {0x0000006f, 0x03044000},
1470 {0x00000070, 0x0480c018},
1471 {0x00000071, 0x00000040},
1472 {0x00000072, 0x01000000},
1473 {0x00000074, 0x000000ff},
1474 {0x00000075, 0x00143400},
1475 {0x00000076, 0x08ec0800},
1476 {0x00000077, 0x040000cc},
1477 {0x00000079, 0x00000000},
1478 {0x0000007a, 0x21000409},
1479 {0x0000007c, 0x00000000},
1480 {0x0000007d, 0xe8000000},
1481 {0x0000007e, 0x044408a8},
1482 {0x0000007f, 0x00000003},
1483 {0x00000080, 0x00000000},
1484 {0x00000081, 0x01000000},
1485 {0x00000082, 0x02000000},
1486 {0x00000083, 0x00000000},
1487 {0x00000084, 0xe3f3e4f4},
1488 {0x00000085, 0x00052024},
1489 {0x00000087, 0x00000000},
1490 {0x00000088, 0x66036603},
1491 {0x00000089, 0x01000000},
1492 {0x0000008b, 0x1c0a0000},
1493 {0x0000008c, 0xff010000},
1494 {0x0000008e, 0xffffefff},
1495 {0x0000008f, 0xfff3efff},
1496 {0x00000090, 0xfff3efbf},
1497 {0x00000094, 0x00101101},
1498 {0x00000095, 0x00000fff},
1499 {0x00000096, 0x00116fff},
1500 {0x00000097, 0x60010000},
1501 {0x00000098, 0x10010000},
1502 {0x00000099, 0x00006000},
1503 {0x0000009a, 0x00001000},
1504 {0x0000009f, 0x00a17730}
1505};
1506
Alex Deucherc04c00b2012-07-31 12:57:45 -04001507static const u32 hainan_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = {
1508 {0x0000006f, 0x03044000},
1509 {0x00000070, 0x0480c018},
1510 {0x00000071, 0x00000040},
1511 {0x00000072, 0x01000000},
1512 {0x00000074, 0x000000ff},
1513 {0x00000075, 0x00143400},
1514 {0x00000076, 0x08ec0800},
1515 {0x00000077, 0x040000cc},
1516 {0x00000079, 0x00000000},
1517 {0x0000007a, 0x21000409},
1518 {0x0000007c, 0x00000000},
1519 {0x0000007d, 0xe8000000},
1520 {0x0000007e, 0x044408a8},
1521 {0x0000007f, 0x00000003},
1522 {0x00000080, 0x00000000},
1523 {0x00000081, 0x01000000},
1524 {0x00000082, 0x02000000},
1525 {0x00000083, 0x00000000},
1526 {0x00000084, 0xe3f3e4f4},
1527 {0x00000085, 0x00052024},
1528 {0x00000087, 0x00000000},
1529 {0x00000088, 0x66036603},
1530 {0x00000089, 0x01000000},
1531 {0x0000008b, 0x1c0a0000},
1532 {0x0000008c, 0xff010000},
1533 {0x0000008e, 0xffffefff},
1534 {0x0000008f, 0xfff3efff},
1535 {0x00000090, 0xfff3efbf},
1536 {0x00000094, 0x00101101},
1537 {0x00000095, 0x00000fff},
1538 {0x00000096, 0x00116fff},
1539 {0x00000097, 0x60010000},
1540 {0x00000098, 0x10010000},
1541 {0x00000099, 0x00006000},
1542 {0x0000009a, 0x00001000},
1543 {0x0000009f, 0x00a07730}
1544};
1545
Alex Deucher8b074dd2012-03-20 17:18:18 -04001546/* ucode loading */
Alex Deucher6c7bcce2013-12-18 14:07:14 -05001547int si_mc_load_microcode(struct radeon_device *rdev)
Alex Deucher8b074dd2012-03-20 17:18:18 -04001548{
Alex Deucher629bd332014-06-25 18:41:34 -04001549 const __be32 *fw_data = NULL;
1550 const __le32 *new_fw_data = NULL;
Heinrich Schuchardtddbbd3b2016-08-21 22:36:29 +02001551 u32 running;
Alex Deucher629bd332014-06-25 18:41:34 -04001552 u32 *io_mc_regs = NULL;
1553 const __le32 *new_io_mc_regs = NULL;
Alex Deucher8c79bae2014-04-16 09:42:22 -04001554 int i, regs_size, ucode_size;
Alex Deucher8b074dd2012-03-20 17:18:18 -04001555
1556 if (!rdev->mc_fw)
1557 return -EINVAL;
1558
Alex Deucher629bd332014-06-25 18:41:34 -04001559 if (rdev->new_fw) {
1560 const struct mc_firmware_header_v1_0 *hdr =
1561 (const struct mc_firmware_header_v1_0 *)rdev->mc_fw->data;
Alex Deucher8c79bae2014-04-16 09:42:22 -04001562
Alex Deucher629bd332014-06-25 18:41:34 -04001563 radeon_ucode_print_mc_hdr(&hdr->header);
1564 regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
1565 new_io_mc_regs = (const __le32 *)
1566 (rdev->mc_fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
1567 ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
1568 new_fw_data = (const __le32 *)
1569 (rdev->mc_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1570 } else {
1571 ucode_size = rdev->mc_fw->size / 4;
1572
1573 switch (rdev->family) {
1574 case CHIP_TAHITI:
1575 io_mc_regs = (u32 *)&tahiti_io_mc_regs;
1576 regs_size = TAHITI_IO_MC_REGS_SIZE;
1577 break;
1578 case CHIP_PITCAIRN:
1579 io_mc_regs = (u32 *)&pitcairn_io_mc_regs;
1580 regs_size = TAHITI_IO_MC_REGS_SIZE;
1581 break;
1582 case CHIP_VERDE:
1583 default:
1584 io_mc_regs = (u32 *)&verde_io_mc_regs;
1585 regs_size = TAHITI_IO_MC_REGS_SIZE;
1586 break;
1587 case CHIP_OLAND:
1588 io_mc_regs = (u32 *)&oland_io_mc_regs;
1589 regs_size = TAHITI_IO_MC_REGS_SIZE;
1590 break;
1591 case CHIP_HAINAN:
1592 io_mc_regs = (u32 *)&hainan_io_mc_regs;
1593 regs_size = TAHITI_IO_MC_REGS_SIZE;
1594 break;
1595 }
1596 fw_data = (const __be32 *)rdev->mc_fw->data;
Alex Deucher8b074dd2012-03-20 17:18:18 -04001597 }
1598
1599 running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK;
1600
1601 if (running == 0) {
Alex Deucher8b074dd2012-03-20 17:18:18 -04001602 /* reset the engine and set to writable */
1603 WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
1604 WREG32(MC_SEQ_SUP_CNTL, 0x00000010);
1605
1606 /* load mc io regs */
1607 for (i = 0; i < regs_size; i++) {
Alex Deucher629bd332014-06-25 18:41:34 -04001608 if (rdev->new_fw) {
1609 WREG32(MC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(new_io_mc_regs++));
1610 WREG32(MC_SEQ_IO_DEBUG_DATA, le32_to_cpup(new_io_mc_regs++));
1611 } else {
1612 WREG32(MC_SEQ_IO_DEBUG_INDEX, io_mc_regs[(i << 1)]);
1613 WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1]);
1614 }
Alex Deucher8b074dd2012-03-20 17:18:18 -04001615 }
1616 /* load the MC ucode */
Alex Deucher629bd332014-06-25 18:41:34 -04001617 for (i = 0; i < ucode_size; i++) {
1618 if (rdev->new_fw)
1619 WREG32(MC_SEQ_SUP_PGM, le32_to_cpup(new_fw_data++));
1620 else
1621 WREG32(MC_SEQ_SUP_PGM, be32_to_cpup(fw_data++));
1622 }
Alex Deucher8b074dd2012-03-20 17:18:18 -04001623
1624 /* put the engine back into the active state */
1625 WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
1626 WREG32(MC_SEQ_SUP_CNTL, 0x00000004);
1627 WREG32(MC_SEQ_SUP_CNTL, 0x00000001);
1628
1629 /* wait for training to complete */
1630 for (i = 0; i < rdev->usec_timeout; i++) {
1631 if (RREG32(MC_SEQ_TRAIN_WAKEUP_CNTL) & TRAIN_DONE_D0)
1632 break;
1633 udelay(1);
1634 }
1635 for (i = 0; i < rdev->usec_timeout; i++) {
1636 if (RREG32(MC_SEQ_TRAIN_WAKEUP_CNTL) & TRAIN_DONE_D1)
1637 break;
1638 udelay(1);
1639 }
Alex Deucher8b074dd2012-03-20 17:18:18 -04001640 }
1641
1642 return 0;
1643}
1644
Alex Deucher0f0de062012-03-20 17:18:17 -04001645static int si_init_microcode(struct radeon_device *rdev)
1646{
Alex Deucher0f0de062012-03-20 17:18:17 -04001647 const char *chip_name;
Alex Deucher629bd332014-06-25 18:41:34 -04001648 const char *new_chip_name;
Alex Deucher0f0de062012-03-20 17:18:17 -04001649 size_t pfp_req_size, me_req_size, ce_req_size, rlc_req_size, mc_req_size;
Alex Deucher1ebe9282014-04-11 11:21:49 -04001650 size_t smc_req_size, mc2_req_size;
Alex Deucher0f0de062012-03-20 17:18:17 -04001651 char fw_name[30];
1652 int err;
Alex Deucher629bd332014-06-25 18:41:34 -04001653 int new_fw = 0;
Alex Deucher861c7fd2015-12-09 23:48:11 -05001654 bool new_smc = false;
Alex Deucher77e82092016-12-20 16:35:50 -05001655 bool si58_fw = false;
Alex Deucher0f0de062012-03-20 17:18:17 -04001656
1657 DRM_DEBUG("\n");
1658
Alex Deucher0f0de062012-03-20 17:18:17 -04001659 switch (rdev->family) {
1660 case CHIP_TAHITI:
1661 chip_name = "TAHITI";
Alex Deucher629bd332014-06-25 18:41:34 -04001662 new_chip_name = "tahiti";
Alex Deucher0f0de062012-03-20 17:18:17 -04001663 pfp_req_size = SI_PFP_UCODE_SIZE * 4;
1664 me_req_size = SI_PM4_UCODE_SIZE * 4;
1665 ce_req_size = SI_CE_UCODE_SIZE * 4;
1666 rlc_req_size = SI_RLC_UCODE_SIZE * 4;
1667 mc_req_size = SI_MC_UCODE_SIZE * 4;
Alex Deucher1ebe9282014-04-11 11:21:49 -04001668 mc2_req_size = TAHITI_MC_UCODE_SIZE * 4;
Alex Deuchera9e61412013-06-25 17:56:16 -04001669 smc_req_size = ALIGN(TAHITI_SMC_UCODE_SIZE, 4);
Alex Deucher0f0de062012-03-20 17:18:17 -04001670 break;
1671 case CHIP_PITCAIRN:
1672 chip_name = "PITCAIRN";
Alex Deucher7cd7aea2017-01-05 12:15:52 -05001673 if ((rdev->pdev->revision == 0x81) &&
1674 ((rdev->pdev->device == 0x6810) ||
1675 (rdev->pdev->device == 0x6811)))
Alex Deucher861c7fd2015-12-09 23:48:11 -05001676 new_smc = true;
Alex Deucher629bd332014-06-25 18:41:34 -04001677 new_chip_name = "pitcairn";
Alex Deucher0f0de062012-03-20 17:18:17 -04001678 pfp_req_size = SI_PFP_UCODE_SIZE * 4;
1679 me_req_size = SI_PM4_UCODE_SIZE * 4;
1680 ce_req_size = SI_CE_UCODE_SIZE * 4;
1681 rlc_req_size = SI_RLC_UCODE_SIZE * 4;
1682 mc_req_size = SI_MC_UCODE_SIZE * 4;
Alex Deucher1ebe9282014-04-11 11:21:49 -04001683 mc2_req_size = PITCAIRN_MC_UCODE_SIZE * 4;
Alex Deuchera9e61412013-06-25 17:56:16 -04001684 smc_req_size = ALIGN(PITCAIRN_SMC_UCODE_SIZE, 4);
Alex Deucher0f0de062012-03-20 17:18:17 -04001685 break;
1686 case CHIP_VERDE:
1687 chip_name = "VERDE";
Alex Deucher7cd7aea2017-01-05 12:15:52 -05001688 if (((rdev->pdev->device == 0x6820) &&
1689 ((rdev->pdev->revision == 0x81) ||
1690 (rdev->pdev->revision == 0x83))) ||
1691 ((rdev->pdev->device == 0x6821) &&
1692 ((rdev->pdev->revision == 0x83) ||
1693 (rdev->pdev->revision == 0x87))) ||
1694 ((rdev->pdev->revision == 0x87) &&
1695 ((rdev->pdev->device == 0x6823) ||
1696 (rdev->pdev->device == 0x682b))))
Alex Deucher861c7fd2015-12-09 23:48:11 -05001697 new_smc = true;
Alex Deucher629bd332014-06-25 18:41:34 -04001698 new_chip_name = "verde";
Alex Deucher0f0de062012-03-20 17:18:17 -04001699 pfp_req_size = SI_PFP_UCODE_SIZE * 4;
1700 me_req_size = SI_PM4_UCODE_SIZE * 4;
1701 ce_req_size = SI_CE_UCODE_SIZE * 4;
1702 rlc_req_size = SI_RLC_UCODE_SIZE * 4;
1703 mc_req_size = SI_MC_UCODE_SIZE * 4;
Alex Deucher1ebe9282014-04-11 11:21:49 -04001704 mc2_req_size = VERDE_MC_UCODE_SIZE * 4;
Alex Deuchera9e61412013-06-25 17:56:16 -04001705 smc_req_size = ALIGN(VERDE_SMC_UCODE_SIZE, 4);
Alex Deucher0f0de062012-03-20 17:18:17 -04001706 break;
Alex Deucherbcc7f5d2012-07-26 18:36:28 -04001707 case CHIP_OLAND:
1708 chip_name = "OLAND";
Alex Deucher7cd7aea2017-01-05 12:15:52 -05001709 if (((rdev->pdev->revision == 0x81) &&
1710 ((rdev->pdev->device == 0x6600) ||
1711 (rdev->pdev->device == 0x6604) ||
1712 (rdev->pdev->device == 0x6605) ||
1713 (rdev->pdev->device == 0x6610))) ||
1714 ((rdev->pdev->revision == 0x83) &&
1715 (rdev->pdev->device == 0x6610)))
Alex Deucher861c7fd2015-12-09 23:48:11 -05001716 new_smc = true;
Alex Deucher629bd332014-06-25 18:41:34 -04001717 new_chip_name = "oland";
Alex Deucherbcc7f5d2012-07-26 18:36:28 -04001718 pfp_req_size = SI_PFP_UCODE_SIZE * 4;
1719 me_req_size = SI_PM4_UCODE_SIZE * 4;
1720 ce_req_size = SI_CE_UCODE_SIZE * 4;
1721 rlc_req_size = SI_RLC_UCODE_SIZE * 4;
Alex Deucher1ebe9282014-04-11 11:21:49 -04001722 mc_req_size = mc2_req_size = OLAND_MC_UCODE_SIZE * 4;
Alex Deuchera9e61412013-06-25 17:56:16 -04001723 smc_req_size = ALIGN(OLAND_SMC_UCODE_SIZE, 4);
Alex Deucherbcc7f5d2012-07-26 18:36:28 -04001724 break;
Alex Deucherc04c00b2012-07-31 12:57:45 -04001725 case CHIP_HAINAN:
1726 chip_name = "HAINAN";
Alex Deucher7cd7aea2017-01-05 12:15:52 -05001727 if (((rdev->pdev->revision == 0x81) &&
1728 (rdev->pdev->device == 0x6660)) ||
1729 ((rdev->pdev->revision == 0x83) &&
1730 ((rdev->pdev->device == 0x6660) ||
1731 (rdev->pdev->device == 0x6663) ||
1732 (rdev->pdev->device == 0x6665) ||
1733 (rdev->pdev->device == 0x6667))) ||
1734 ((rdev->pdev->revision == 0xc3) &&
1735 (rdev->pdev->device == 0x6665)))
Alex Deucher861c7fd2015-12-09 23:48:11 -05001736 new_smc = true;
Alex Deucher629bd332014-06-25 18:41:34 -04001737 new_chip_name = "hainan";
Alex Deucherc04c00b2012-07-31 12:57:45 -04001738 pfp_req_size = SI_PFP_UCODE_SIZE * 4;
1739 me_req_size = SI_PM4_UCODE_SIZE * 4;
1740 ce_req_size = SI_CE_UCODE_SIZE * 4;
1741 rlc_req_size = SI_RLC_UCODE_SIZE * 4;
Alex Deucher1ebe9282014-04-11 11:21:49 -04001742 mc_req_size = mc2_req_size = OLAND_MC_UCODE_SIZE * 4;
Alex Deuchera9e61412013-06-25 17:56:16 -04001743 smc_req_size = ALIGN(HAINAN_SMC_UCODE_SIZE, 4);
Alex Deucherc04c00b2012-07-31 12:57:45 -04001744 break;
Alex Deucher0f0de062012-03-20 17:18:17 -04001745 default: BUG();
1746 }
1747
Alex Deucher77e82092016-12-20 16:35:50 -05001748 /* this memory configuration requires special firmware */
1749 if (((RREG32(MC_SEQ_MISC0) & 0xff000000) >> 24) == 0x58)
1750 si58_fw = true;
1751
Alex Deucher629bd332014-06-25 18:41:34 -04001752 DRM_INFO("Loading %s Microcode\n", new_chip_name);
Alex Deucher0f0de062012-03-20 17:18:17 -04001753
Alex Deucher629bd332014-06-25 18:41:34 -04001754 snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", new_chip_name);
Jerome Glisse0a168932013-07-11 15:53:01 -04001755 err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev);
Alex Deucher1ebe9282014-04-11 11:21:49 -04001756 if (err) {
Alex Deucher629bd332014-06-25 18:41:34 -04001757 snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
1758 err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev);
Alex Deucher1ebe9282014-04-11 11:21:49 -04001759 if (err)
1760 goto out;
Alex Deucher629bd332014-06-25 18:41:34 -04001761 if (rdev->pfp_fw->size != pfp_req_size) {
1762 printk(KERN_ERR
1763 "si_cp: Bogus length %zu in firmware \"%s\"\n",
1764 rdev->pfp_fw->size, fw_name);
1765 err = -EINVAL;
1766 goto out;
1767 }
1768 } else {
1769 err = radeon_ucode_validate(rdev->pfp_fw);
1770 if (err) {
1771 printk(KERN_ERR
1772 "si_cp: validation failed for firmware \"%s\"\n",
1773 fw_name);
1774 goto out;
1775 } else {
1776 new_fw++;
1777 }
Alex Deucher1ebe9282014-04-11 11:21:49 -04001778 }
Alex Deucher0f0de062012-03-20 17:18:17 -04001779
Alex Deucher629bd332014-06-25 18:41:34 -04001780 snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", new_chip_name);
1781 err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
1782 if (err) {
1783 snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
1784 err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
1785 if (err)
1786 goto out;
1787 if (rdev->me_fw->size != me_req_size) {
1788 printk(KERN_ERR
1789 "si_cp: Bogus length %zu in firmware \"%s\"\n",
1790 rdev->me_fw->size, fw_name);
1791 err = -EINVAL;
1792 }
1793 } else {
1794 err = radeon_ucode_validate(rdev->me_fw);
1795 if (err) {
1796 printk(KERN_ERR
1797 "si_cp: validation failed for firmware \"%s\"\n",
1798 fw_name);
1799 goto out;
1800 } else {
1801 new_fw++;
1802 }
1803 }
1804
1805 snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", new_chip_name);
1806 err = request_firmware(&rdev->ce_fw, fw_name, rdev->dev);
1807 if (err) {
1808 snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", chip_name);
1809 err = request_firmware(&rdev->ce_fw, fw_name, rdev->dev);
1810 if (err)
1811 goto out;
1812 if (rdev->ce_fw->size != ce_req_size) {
1813 printk(KERN_ERR
1814 "si_cp: Bogus length %zu in firmware \"%s\"\n",
1815 rdev->ce_fw->size, fw_name);
1816 err = -EINVAL;
1817 }
1818 } else {
1819 err = radeon_ucode_validate(rdev->ce_fw);
1820 if (err) {
1821 printk(KERN_ERR
1822 "si_cp: validation failed for firmware \"%s\"\n",
1823 fw_name);
1824 goto out;
1825 } else {
1826 new_fw++;
1827 }
1828 }
1829
1830 snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", new_chip_name);
1831 err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev);
1832 if (err) {
1833 snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", chip_name);
1834 err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev);
1835 if (err)
1836 goto out;
1837 if (rdev->rlc_fw->size != rlc_req_size) {
1838 printk(KERN_ERR
1839 "si_rlc: Bogus length %zu in firmware \"%s\"\n",
1840 rdev->rlc_fw->size, fw_name);
1841 err = -EINVAL;
1842 }
1843 } else {
1844 err = radeon_ucode_validate(rdev->rlc_fw);
1845 if (err) {
1846 printk(KERN_ERR
1847 "si_cp: validation failed for firmware \"%s\"\n",
1848 fw_name);
1849 goto out;
1850 } else {
1851 new_fw++;
1852 }
1853 }
1854
Alex Deucher77e82092016-12-20 16:35:50 -05001855 if (si58_fw)
1856 snprintf(fw_name, sizeof(fw_name), "radeon/si58_mc.bin");
1857 else
1858 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", new_chip_name);
Alex Deucher629bd332014-06-25 18:41:34 -04001859 err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
1860 if (err) {
1861 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc2.bin", chip_name);
1862 err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
1863 if (err) {
1864 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
1865 err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
1866 if (err)
1867 goto out;
1868 }
1869 if ((rdev->mc_fw->size != mc_req_size) &&
1870 (rdev->mc_fw->size != mc2_req_size)) {
1871 printk(KERN_ERR
1872 "si_mc: Bogus length %zu in firmware \"%s\"\n",
1873 rdev->mc_fw->size, fw_name);
1874 err = -EINVAL;
1875 }
1876 DRM_INFO("%s: %zu bytes\n", fw_name, rdev->mc_fw->size);
1877 } else {
1878 err = radeon_ucode_validate(rdev->mc_fw);
1879 if (err) {
1880 printk(KERN_ERR
1881 "si_cp: validation failed for firmware \"%s\"\n",
1882 fw_name);
1883 goto out;
1884 } else {
1885 new_fw++;
1886 }
1887 }
1888
Alex Deucher861c7fd2015-12-09 23:48:11 -05001889 if (new_smc)
1890 snprintf(fw_name, sizeof(fw_name), "radeon/%s_k_smc.bin", new_chip_name);
1891 else
1892 snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", new_chip_name);
Jerome Glisse0a168932013-07-11 15:53:01 -04001893 err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
Alex Deucher8a53fa22013-08-07 16:09:08 -04001894 if (err) {
Alex Deucher629bd332014-06-25 18:41:34 -04001895 snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
1896 err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
1897 if (err) {
1898 printk(KERN_ERR
1899 "smc: error loading firmware \"%s\"\n",
1900 fw_name);
1901 release_firmware(rdev->smc_fw);
1902 rdev->smc_fw = NULL;
1903 err = 0;
1904 } else if (rdev->smc_fw->size != smc_req_size) {
1905 printk(KERN_ERR
1906 "si_smc: Bogus length %zu in firmware \"%s\"\n",
1907 rdev->smc_fw->size, fw_name);
1908 err = -EINVAL;
1909 }
1910 } else {
1911 err = radeon_ucode_validate(rdev->smc_fw);
1912 if (err) {
1913 printk(KERN_ERR
1914 "si_cp: validation failed for firmware \"%s\"\n",
1915 fw_name);
1916 goto out;
1917 } else {
1918 new_fw++;
1919 }
Alex Deuchera9e61412013-06-25 17:56:16 -04001920 }
1921
Alex Deucher629bd332014-06-25 18:41:34 -04001922 if (new_fw == 0) {
1923 rdev->new_fw = false;
1924 } else if (new_fw < 6) {
1925 printk(KERN_ERR "si_fw: mixing new and old firmware!\n");
1926 err = -EINVAL;
1927 } else {
1928 rdev->new_fw = true;
1929 }
Alex Deucher0f0de062012-03-20 17:18:17 -04001930out:
Alex Deucher0f0de062012-03-20 17:18:17 -04001931 if (err) {
1932 if (err != -EINVAL)
1933 printk(KERN_ERR
1934 "si_cp: Failed to load firmware \"%s\"\n",
1935 fw_name);
1936 release_firmware(rdev->pfp_fw);
1937 rdev->pfp_fw = NULL;
1938 release_firmware(rdev->me_fw);
1939 rdev->me_fw = NULL;
1940 release_firmware(rdev->ce_fw);
1941 rdev->ce_fw = NULL;
1942 release_firmware(rdev->rlc_fw);
1943 rdev->rlc_fw = NULL;
1944 release_firmware(rdev->mc_fw);
1945 rdev->mc_fw = NULL;
Alex Deuchera9e61412013-06-25 17:56:16 -04001946 release_firmware(rdev->smc_fw);
1947 rdev->smc_fw = NULL;
Alex Deucher0f0de062012-03-20 17:18:17 -04001948 }
1949 return err;
1950}
1951
Alex Deucher43b3cd92012-03-20 17:18:00 -04001952/* watermark setup */
1953static u32 dce6_line_buffer_adjust(struct radeon_device *rdev,
1954 struct radeon_crtc *radeon_crtc,
1955 struct drm_display_mode *mode,
1956 struct drm_display_mode *other_mode)
1957{
Alex Deucher290d2452013-08-19 11:15:43 -04001958 u32 tmp, buffer_alloc, i;
1959 u32 pipe_offset = radeon_crtc->crtc_id * 0x20;
Alex Deucher43b3cd92012-03-20 17:18:00 -04001960 /*
1961 * Line Buffer Setup
1962 * There are 3 line buffers, each one shared by 2 display controllers.
1963 * DC_LB_MEMORY_SPLIT controls how that line buffer is shared between
1964 * the display controllers. The paritioning is done via one of four
1965 * preset allocations specified in bits 21:20:
1966 * 0 - half lb
1967 * 2 - whole lb, other crtc must be disabled
1968 */
1969 /* this can get tricky if we have two large displays on a paired group
1970 * of crtcs. Ideally for multiple large displays we'd assign them to
1971 * non-linked crtcs for maximum line buffer allocation.
1972 */
1973 if (radeon_crtc->base.enabled && mode) {
Alex Deucher290d2452013-08-19 11:15:43 -04001974 if (other_mode) {
Alex Deucher43b3cd92012-03-20 17:18:00 -04001975 tmp = 0; /* 1/2 */
Alex Deucher290d2452013-08-19 11:15:43 -04001976 buffer_alloc = 1;
1977 } else {
Alex Deucher43b3cd92012-03-20 17:18:00 -04001978 tmp = 2; /* whole */
Alex Deucher290d2452013-08-19 11:15:43 -04001979 buffer_alloc = 2;
1980 }
1981 } else {
Alex Deucher43b3cd92012-03-20 17:18:00 -04001982 tmp = 0;
Alex Deucher290d2452013-08-19 11:15:43 -04001983 buffer_alloc = 0;
1984 }
Alex Deucher43b3cd92012-03-20 17:18:00 -04001985
1986 WREG32(DC_LB_MEMORY_SPLIT + radeon_crtc->crtc_offset,
1987 DC_LB_MEMORY_CONFIG(tmp));
1988
Alex Deucher290d2452013-08-19 11:15:43 -04001989 WREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset,
1990 DMIF_BUFFERS_ALLOCATED(buffer_alloc));
1991 for (i = 0; i < rdev->usec_timeout; i++) {
1992 if (RREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset) &
1993 DMIF_BUFFERS_ALLOCATED_COMPLETED)
1994 break;
1995 udelay(1);
1996 }
1997
Alex Deucher43b3cd92012-03-20 17:18:00 -04001998 if (radeon_crtc->base.enabled && mode) {
1999 switch (tmp) {
2000 case 0:
2001 default:
2002 return 4096 * 2;
2003 case 2:
2004 return 8192 * 2;
2005 }
2006 }
2007
2008 /* controller not enabled, so no lb used */
2009 return 0;
2010}
2011
Alex Deucherca7db222012-03-20 17:18:30 -04002012static u32 si_get_number_of_dram_channels(struct radeon_device *rdev)
Alex Deucher43b3cd92012-03-20 17:18:00 -04002013{
2014 u32 tmp = RREG32(MC_SHARED_CHMAP);
2015
2016 switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
2017 case 0:
2018 default:
2019 return 1;
2020 case 1:
2021 return 2;
2022 case 2:
2023 return 4;
2024 case 3:
2025 return 8;
2026 case 4:
2027 return 3;
2028 case 5:
2029 return 6;
2030 case 6:
2031 return 10;
2032 case 7:
2033 return 12;
2034 case 8:
2035 return 16;
2036 }
2037}
2038
2039struct dce6_wm_params {
2040 u32 dram_channels; /* number of dram channels */
2041 u32 yclk; /* bandwidth per dram data pin in kHz */
2042 u32 sclk; /* engine clock in kHz */
2043 u32 disp_clk; /* display clock in kHz */
2044 u32 src_width; /* viewport width */
2045 u32 active_time; /* active display time in ns */
2046 u32 blank_time; /* blank time in ns */
2047 bool interlaced; /* mode is interlaced */
2048 fixed20_12 vsc; /* vertical scale ratio */
2049 u32 num_heads; /* number of active crtcs */
2050 u32 bytes_per_pixel; /* bytes per pixel display + overlay */
2051 u32 lb_size; /* line buffer allocated to pipe */
2052 u32 vtaps; /* vertical scaler taps */
2053};
2054
2055static u32 dce6_dram_bandwidth(struct dce6_wm_params *wm)
2056{
2057 /* Calculate raw DRAM Bandwidth */
2058 fixed20_12 dram_efficiency; /* 0.7 */
2059 fixed20_12 yclk, dram_channels, bandwidth;
2060 fixed20_12 a;
2061
2062 a.full = dfixed_const(1000);
2063 yclk.full = dfixed_const(wm->yclk);
2064 yclk.full = dfixed_div(yclk, a);
2065 dram_channels.full = dfixed_const(wm->dram_channels * 4);
2066 a.full = dfixed_const(10);
2067 dram_efficiency.full = dfixed_const(7);
2068 dram_efficiency.full = dfixed_div(dram_efficiency, a);
2069 bandwidth.full = dfixed_mul(dram_channels, yclk);
2070 bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
2071
2072 return dfixed_trunc(bandwidth);
2073}
2074
2075static u32 dce6_dram_bandwidth_for_display(struct dce6_wm_params *wm)
2076{
2077 /* Calculate DRAM Bandwidth and the part allocated to display. */
2078 fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
2079 fixed20_12 yclk, dram_channels, bandwidth;
2080 fixed20_12 a;
2081
2082 a.full = dfixed_const(1000);
2083 yclk.full = dfixed_const(wm->yclk);
2084 yclk.full = dfixed_div(yclk, a);
2085 dram_channels.full = dfixed_const(wm->dram_channels * 4);
2086 a.full = dfixed_const(10);
2087 disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
2088 disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
2089 bandwidth.full = dfixed_mul(dram_channels, yclk);
2090 bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
2091
2092 return dfixed_trunc(bandwidth);
2093}
2094
2095static u32 dce6_data_return_bandwidth(struct dce6_wm_params *wm)
2096{
2097 /* Calculate the display Data return Bandwidth */
2098 fixed20_12 return_efficiency; /* 0.8 */
2099 fixed20_12 sclk, bandwidth;
2100 fixed20_12 a;
2101
2102 a.full = dfixed_const(1000);
2103 sclk.full = dfixed_const(wm->sclk);
2104 sclk.full = dfixed_div(sclk, a);
2105 a.full = dfixed_const(10);
2106 return_efficiency.full = dfixed_const(8);
2107 return_efficiency.full = dfixed_div(return_efficiency, a);
2108 a.full = dfixed_const(32);
2109 bandwidth.full = dfixed_mul(a, sclk);
2110 bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
2111
2112 return dfixed_trunc(bandwidth);
2113}
2114
2115static u32 dce6_get_dmif_bytes_per_request(struct dce6_wm_params *wm)
2116{
2117 return 32;
2118}
2119
2120static u32 dce6_dmif_request_bandwidth(struct dce6_wm_params *wm)
2121{
2122 /* Calculate the DMIF Request Bandwidth */
2123 fixed20_12 disp_clk_request_efficiency; /* 0.8 */
2124 fixed20_12 disp_clk, sclk, bandwidth;
2125 fixed20_12 a, b1, b2;
2126 u32 min_bandwidth;
2127
2128 a.full = dfixed_const(1000);
2129 disp_clk.full = dfixed_const(wm->disp_clk);
2130 disp_clk.full = dfixed_div(disp_clk, a);
2131 a.full = dfixed_const(dce6_get_dmif_bytes_per_request(wm) / 2);
2132 b1.full = dfixed_mul(a, disp_clk);
2133
2134 a.full = dfixed_const(1000);
2135 sclk.full = dfixed_const(wm->sclk);
2136 sclk.full = dfixed_div(sclk, a);
2137 a.full = dfixed_const(dce6_get_dmif_bytes_per_request(wm));
2138 b2.full = dfixed_mul(a, sclk);
2139
2140 a.full = dfixed_const(10);
2141 disp_clk_request_efficiency.full = dfixed_const(8);
2142 disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
2143
2144 min_bandwidth = min(dfixed_trunc(b1), dfixed_trunc(b2));
2145
2146 a.full = dfixed_const(min_bandwidth);
2147 bandwidth.full = dfixed_mul(a, disp_clk_request_efficiency);
2148
2149 return dfixed_trunc(bandwidth);
2150}
2151
2152static u32 dce6_available_bandwidth(struct dce6_wm_params *wm)
2153{
2154 /* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
2155 u32 dram_bandwidth = dce6_dram_bandwidth(wm);
2156 u32 data_return_bandwidth = dce6_data_return_bandwidth(wm);
2157 u32 dmif_req_bandwidth = dce6_dmif_request_bandwidth(wm);
2158
2159 return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
2160}
2161
2162static u32 dce6_average_bandwidth(struct dce6_wm_params *wm)
2163{
2164 /* Calculate the display mode Average Bandwidth
2165 * DisplayMode should contain the source and destination dimensions,
2166 * timing, etc.
2167 */
2168 fixed20_12 bpp;
2169 fixed20_12 line_time;
2170 fixed20_12 src_width;
2171 fixed20_12 bandwidth;
2172 fixed20_12 a;
2173
2174 a.full = dfixed_const(1000);
2175 line_time.full = dfixed_const(wm->active_time + wm->blank_time);
2176 line_time.full = dfixed_div(line_time, a);
2177 bpp.full = dfixed_const(wm->bytes_per_pixel);
2178 src_width.full = dfixed_const(wm->src_width);
2179 bandwidth.full = dfixed_mul(src_width, bpp);
2180 bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
2181 bandwidth.full = dfixed_div(bandwidth, line_time);
2182
2183 return dfixed_trunc(bandwidth);
2184}
2185
2186static u32 dce6_latency_watermark(struct dce6_wm_params *wm)
2187{
2188 /* First calcualte the latency in ns */
2189 u32 mc_latency = 2000; /* 2000 ns. */
2190 u32 available_bandwidth = dce6_available_bandwidth(wm);
2191 u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
2192 u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
2193 u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
2194 u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
2195 (wm->num_heads * cursor_line_pair_return_time);
2196 u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
2197 u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
2198 u32 tmp, dmif_size = 12288;
2199 fixed20_12 a, b, c;
2200
2201 if (wm->num_heads == 0)
2202 return 0;
2203
2204 a.full = dfixed_const(2);
2205 b.full = dfixed_const(1);
2206 if ((wm->vsc.full > a.full) ||
2207 ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
2208 (wm->vtaps >= 5) ||
2209 ((wm->vsc.full >= a.full) && wm->interlaced))
2210 max_src_lines_per_dst_line = 4;
2211 else
2212 max_src_lines_per_dst_line = 2;
2213
2214 a.full = dfixed_const(available_bandwidth);
2215 b.full = dfixed_const(wm->num_heads);
2216 a.full = dfixed_div(a, b);
2217
2218 b.full = dfixed_const(mc_latency + 512);
2219 c.full = dfixed_const(wm->disp_clk);
2220 b.full = dfixed_div(b, c);
2221
2222 c.full = dfixed_const(dmif_size);
2223 b.full = dfixed_div(c, b);
2224
2225 tmp = min(dfixed_trunc(a), dfixed_trunc(b));
2226
2227 b.full = dfixed_const(1000);
2228 c.full = dfixed_const(wm->disp_clk);
2229 b.full = dfixed_div(c, b);
2230 c.full = dfixed_const(wm->bytes_per_pixel);
2231 b.full = dfixed_mul(b, c);
2232
2233 lb_fill_bw = min(tmp, dfixed_trunc(b));
2234
2235 a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
2236 b.full = dfixed_const(1000);
2237 c.full = dfixed_const(lb_fill_bw);
2238 b.full = dfixed_div(c, b);
2239 a.full = dfixed_div(a, b);
2240 line_fill_time = dfixed_trunc(a);
2241
2242 if (line_fill_time < wm->active_time)
2243 return latency;
2244 else
2245 return latency + (line_fill_time - wm->active_time);
2246
2247}
2248
2249static bool dce6_average_bandwidth_vs_dram_bandwidth_for_display(struct dce6_wm_params *wm)
2250{
2251 if (dce6_average_bandwidth(wm) <=
2252 (dce6_dram_bandwidth_for_display(wm) / wm->num_heads))
2253 return true;
2254 else
2255 return false;
2256};
2257
2258static bool dce6_average_bandwidth_vs_available_bandwidth(struct dce6_wm_params *wm)
2259{
2260 if (dce6_average_bandwidth(wm) <=
2261 (dce6_available_bandwidth(wm) / wm->num_heads))
2262 return true;
2263 else
2264 return false;
2265};
2266
2267static bool dce6_check_latency_hiding(struct dce6_wm_params *wm)
2268{
2269 u32 lb_partitions = wm->lb_size / wm->src_width;
2270 u32 line_time = wm->active_time + wm->blank_time;
2271 u32 latency_tolerant_lines;
2272 u32 latency_hiding;
2273 fixed20_12 a;
2274
2275 a.full = dfixed_const(1);
2276 if (wm->vsc.full > a.full)
2277 latency_tolerant_lines = 1;
2278 else {
2279 if (lb_partitions <= (wm->vtaps + 1))
2280 latency_tolerant_lines = 1;
2281 else
2282 latency_tolerant_lines = 2;
2283 }
2284
2285 latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
2286
2287 if (dce6_latency_watermark(wm) <= latency_hiding)
2288 return true;
2289 else
2290 return false;
2291}
2292
2293static void dce6_program_watermarks(struct radeon_device *rdev,
2294 struct radeon_crtc *radeon_crtc,
2295 u32 lb_size, u32 num_heads)
2296{
2297 struct drm_display_mode *mode = &radeon_crtc->base.mode;
Alex Deucherc696e532012-05-03 10:43:25 -04002298 struct dce6_wm_params wm_low, wm_high;
2299 u32 dram_channels;
Alex Deucher43b3cd92012-03-20 17:18:00 -04002300 u32 pixel_period;
2301 u32 line_time = 0;
2302 u32 latency_watermark_a = 0, latency_watermark_b = 0;
2303 u32 priority_a_mark = 0, priority_b_mark = 0;
2304 u32 priority_a_cnt = PRIORITY_OFF;
2305 u32 priority_b_cnt = PRIORITY_OFF;
2306 u32 tmp, arb_control3;
2307 fixed20_12 a, b, c;
2308
2309 if (radeon_crtc->base.enabled && num_heads && mode) {
2310 pixel_period = 1000000 / (u32)mode->clock;
2311 line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
2312 priority_a_cnt = 0;
2313 priority_b_cnt = 0;
2314
Alex Deucherca7db222012-03-20 17:18:30 -04002315 if (rdev->family == CHIP_ARUBA)
Alex Deucherc696e532012-05-03 10:43:25 -04002316 dram_channels = evergreen_get_number_of_dram_channels(rdev);
Alex Deucherca7db222012-03-20 17:18:30 -04002317 else
Alex Deucherc696e532012-05-03 10:43:25 -04002318 dram_channels = si_get_number_of_dram_channels(rdev);
2319
2320 /* watermark for high clocks */
2321 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
2322 wm_high.yclk =
2323 radeon_dpm_get_mclk(rdev, false) * 10;
2324 wm_high.sclk =
2325 radeon_dpm_get_sclk(rdev, false) * 10;
2326 } else {
2327 wm_high.yclk = rdev->pm.current_mclk * 10;
2328 wm_high.sclk = rdev->pm.current_sclk * 10;
2329 }
2330
2331 wm_high.disp_clk = mode->clock;
2332 wm_high.src_width = mode->crtc_hdisplay;
2333 wm_high.active_time = mode->crtc_hdisplay * pixel_period;
2334 wm_high.blank_time = line_time - wm_high.active_time;
2335 wm_high.interlaced = false;
2336 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
2337 wm_high.interlaced = true;
2338 wm_high.vsc = radeon_crtc->vsc;
2339 wm_high.vtaps = 1;
2340 if (radeon_crtc->rmx_type != RMX_OFF)
2341 wm_high.vtaps = 2;
2342 wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */
2343 wm_high.lb_size = lb_size;
2344 wm_high.dram_channels = dram_channels;
2345 wm_high.num_heads = num_heads;
2346
2347 /* watermark for low clocks */
2348 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
2349 wm_low.yclk =
2350 radeon_dpm_get_mclk(rdev, true) * 10;
2351 wm_low.sclk =
2352 radeon_dpm_get_sclk(rdev, true) * 10;
2353 } else {
2354 wm_low.yclk = rdev->pm.current_mclk * 10;
2355 wm_low.sclk = rdev->pm.current_sclk * 10;
2356 }
2357
2358 wm_low.disp_clk = mode->clock;
2359 wm_low.src_width = mode->crtc_hdisplay;
2360 wm_low.active_time = mode->crtc_hdisplay * pixel_period;
2361 wm_low.blank_time = line_time - wm_low.active_time;
2362 wm_low.interlaced = false;
2363 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
2364 wm_low.interlaced = true;
2365 wm_low.vsc = radeon_crtc->vsc;
2366 wm_low.vtaps = 1;
2367 if (radeon_crtc->rmx_type != RMX_OFF)
2368 wm_low.vtaps = 2;
2369 wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */
2370 wm_low.lb_size = lb_size;
2371 wm_low.dram_channels = dram_channels;
2372 wm_low.num_heads = num_heads;
Alex Deucher43b3cd92012-03-20 17:18:00 -04002373
2374 /* set for high clocks */
Alex Deucherc696e532012-05-03 10:43:25 -04002375 latency_watermark_a = min(dce6_latency_watermark(&wm_high), (u32)65535);
Alex Deucher43b3cd92012-03-20 17:18:00 -04002376 /* set for low clocks */
Alex Deucherc696e532012-05-03 10:43:25 -04002377 latency_watermark_b = min(dce6_latency_watermark(&wm_low), (u32)65535);
Alex Deucher43b3cd92012-03-20 17:18:00 -04002378
2379 /* possibly force display priority to high */
2380 /* should really do this at mode validation time... */
Alex Deucherc696e532012-05-03 10:43:25 -04002381 if (!dce6_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) ||
2382 !dce6_average_bandwidth_vs_available_bandwidth(&wm_high) ||
2383 !dce6_check_latency_hiding(&wm_high) ||
2384 (rdev->disp_priority == 2)) {
2385 DRM_DEBUG_KMS("force priority to high\n");
2386 priority_a_cnt |= PRIORITY_ALWAYS_ON;
2387 priority_b_cnt |= PRIORITY_ALWAYS_ON;
2388 }
2389 if (!dce6_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) ||
2390 !dce6_average_bandwidth_vs_available_bandwidth(&wm_low) ||
2391 !dce6_check_latency_hiding(&wm_low) ||
Alex Deucher43b3cd92012-03-20 17:18:00 -04002392 (rdev->disp_priority == 2)) {
2393 DRM_DEBUG_KMS("force priority to high\n");
2394 priority_a_cnt |= PRIORITY_ALWAYS_ON;
2395 priority_b_cnt |= PRIORITY_ALWAYS_ON;
2396 }
2397
2398 a.full = dfixed_const(1000);
2399 b.full = dfixed_const(mode->clock);
2400 b.full = dfixed_div(b, a);
2401 c.full = dfixed_const(latency_watermark_a);
2402 c.full = dfixed_mul(c, b);
2403 c.full = dfixed_mul(c, radeon_crtc->hsc);
2404 c.full = dfixed_div(c, a);
2405 a.full = dfixed_const(16);
2406 c.full = dfixed_div(c, a);
2407 priority_a_mark = dfixed_trunc(c);
2408 priority_a_cnt |= priority_a_mark & PRIORITY_MARK_MASK;
2409
2410 a.full = dfixed_const(1000);
2411 b.full = dfixed_const(mode->clock);
2412 b.full = dfixed_div(b, a);
2413 c.full = dfixed_const(latency_watermark_b);
2414 c.full = dfixed_mul(c, b);
2415 c.full = dfixed_mul(c, radeon_crtc->hsc);
2416 c.full = dfixed_div(c, a);
2417 a.full = dfixed_const(16);
2418 c.full = dfixed_div(c, a);
2419 priority_b_mark = dfixed_trunc(c);
2420 priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK;
Mario Kleiner5b5561b2015-11-25 20:14:31 +01002421
2422 /* Save number of lines the linebuffer leads before the scanout */
2423 radeon_crtc->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
Alex Deucher43b3cd92012-03-20 17:18:00 -04002424 }
2425
2426 /* select wm A */
2427 arb_control3 = RREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset);
2428 tmp = arb_control3;
2429 tmp &= ~LATENCY_WATERMARK_MASK(3);
2430 tmp |= LATENCY_WATERMARK_MASK(1);
2431 WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset, tmp);
2432 WREG32(DPG_PIPE_LATENCY_CONTROL + radeon_crtc->crtc_offset,
2433 (LATENCY_LOW_WATERMARK(latency_watermark_a) |
2434 LATENCY_HIGH_WATERMARK(line_time)));
2435 /* select wm B */
2436 tmp = RREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset);
2437 tmp &= ~LATENCY_WATERMARK_MASK(3);
2438 tmp |= LATENCY_WATERMARK_MASK(2);
2439 WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset, tmp);
2440 WREG32(DPG_PIPE_LATENCY_CONTROL + radeon_crtc->crtc_offset,
2441 (LATENCY_LOW_WATERMARK(latency_watermark_b) |
2442 LATENCY_HIGH_WATERMARK(line_time)));
2443 /* restore original selection */
2444 WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset, arb_control3);
2445
2446 /* write the priority marks */
2447 WREG32(PRIORITY_A_CNT + radeon_crtc->crtc_offset, priority_a_cnt);
2448 WREG32(PRIORITY_B_CNT + radeon_crtc->crtc_offset, priority_b_cnt);
2449
Alex Deucher7178d2a2013-03-21 10:38:49 -04002450 /* save values for DPM */
2451 radeon_crtc->line_time = line_time;
2452 radeon_crtc->wm_high = latency_watermark_a;
2453 radeon_crtc->wm_low = latency_watermark_b;
Alex Deucher43b3cd92012-03-20 17:18:00 -04002454}
2455
2456void dce6_bandwidth_update(struct radeon_device *rdev)
2457{
2458 struct drm_display_mode *mode0 = NULL;
2459 struct drm_display_mode *mode1 = NULL;
2460 u32 num_heads = 0, lb_size;
2461 int i;
2462
Alex Deucher8efe82c2014-11-03 09:57:46 -05002463 if (!rdev->mode_info.mode_config_initialized)
2464 return;
2465
Alex Deucher43b3cd92012-03-20 17:18:00 -04002466 radeon_update_display_priority(rdev);
2467
2468 for (i = 0; i < rdev->num_crtc; i++) {
2469 if (rdev->mode_info.crtcs[i]->base.enabled)
2470 num_heads++;
2471 }
2472 for (i = 0; i < rdev->num_crtc; i += 2) {
2473 mode0 = &rdev->mode_info.crtcs[i]->base.mode;
2474 mode1 = &rdev->mode_info.crtcs[i+1]->base.mode;
2475 lb_size = dce6_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i], mode0, mode1);
2476 dce6_program_watermarks(rdev, rdev->mode_info.crtcs[i], lb_size, num_heads);
2477 lb_size = dce6_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i+1], mode1, mode0);
2478 dce6_program_watermarks(rdev, rdev->mode_info.crtcs[i+1], lb_size, num_heads);
2479 }
2480}
2481
Alex Deucher0a96d722012-03-20 17:18:11 -04002482/*
2483 * Core functions
2484 */
Alex Deucher0a96d722012-03-20 17:18:11 -04002485static void si_tiling_mode_table_init(struct radeon_device *rdev)
2486{
Josh Poimboeuf102534b2016-03-11 08:18:24 -06002487 u32 *tile = rdev->config.si.tile_mode_array;
2488 const u32 num_tile_mode_states =
2489 ARRAY_SIZE(rdev->config.si.tile_mode_array);
2490 u32 reg_offset, split_equal_to_row_size;
Alex Deucher0a96d722012-03-20 17:18:11 -04002491
2492 switch (rdev->config.si.mem_row_size_in_kb) {
2493 case 1:
2494 split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_1KB;
2495 break;
2496 case 2:
2497 default:
2498 split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_2KB;
2499 break;
2500 case 4:
2501 split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_4KB;
2502 break;
2503 }
2504
Josh Poimboeuf102534b2016-03-11 08:18:24 -06002505 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
2506 tile[reg_offset] = 0;
2507
2508 switch(rdev->family) {
2509 case CHIP_TAHITI:
2510 case CHIP_PITCAIRN:
2511 /* non-AA compressed depth or any compressed stencil */
2512 tile[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2513 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
2514 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2515 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
2516 NUM_BANKS(ADDR_SURF_16_BANK) |
2517 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2518 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2519 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2520 /* 2xAA/4xAA compressed depth only */
2521 tile[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2522 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
2523 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2524 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
2525 NUM_BANKS(ADDR_SURF_16_BANK) |
2526 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2527 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2528 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2529 /* 8xAA compressed depth only */
2530 tile[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2531 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
2532 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2533 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2534 NUM_BANKS(ADDR_SURF_16_BANK) |
2535 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2536 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2537 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2538 /* 2xAA/4xAA compressed depth with stencil (for depth buffer) */
2539 tile[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2540 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
2541 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2542 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
2543 NUM_BANKS(ADDR_SURF_16_BANK) |
2544 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2545 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2546 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2547 /* Maps w/ a dimension less than the 2D macro-tile dimensions (for mipmapped depth textures) */
2548 tile[4] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2549 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
2550 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2551 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
2552 NUM_BANKS(ADDR_SURF_16_BANK) |
2553 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2554 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2555 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2556 /* Uncompressed 16bpp depth - and stencil buffer allocated with it */
2557 tile[5] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2558 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
2559 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2560 TILE_SPLIT(split_equal_to_row_size) |
2561 NUM_BANKS(ADDR_SURF_16_BANK) |
2562 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2563 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2564 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2565 /* Uncompressed 32bpp depth - and stencil buffer allocated with it */
2566 tile[6] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2567 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
2568 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2569 TILE_SPLIT(split_equal_to_row_size) |
2570 NUM_BANKS(ADDR_SURF_16_BANK) |
2571 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2572 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2573 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
2574 /* Uncompressed 8bpp stencil without depth (drivers typically do not use) */
2575 tile[7] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2576 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
2577 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2578 TILE_SPLIT(split_equal_to_row_size) |
2579 NUM_BANKS(ADDR_SURF_16_BANK) |
2580 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2581 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2582 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2583 /* 1D and 1D Array Surfaces */
2584 tile[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
2585 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
2586 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2587 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
2588 NUM_BANKS(ADDR_SURF_16_BANK) |
2589 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2590 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2591 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2592 /* Displayable maps. */
2593 tile[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2594 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
2595 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2596 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
2597 NUM_BANKS(ADDR_SURF_16_BANK) |
2598 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2599 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2600 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2601 /* Display 8bpp. */
2602 tile[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2603 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
2604 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2605 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2606 NUM_BANKS(ADDR_SURF_16_BANK) |
2607 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2608 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2609 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2610 /* Display 16bpp. */
2611 tile[11] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2612 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
2613 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2614 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2615 NUM_BANKS(ADDR_SURF_16_BANK) |
2616 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2617 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2618 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2619 /* Display 32bpp. */
2620 tile[12] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2621 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
2622 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2623 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
2624 NUM_BANKS(ADDR_SURF_16_BANK) |
2625 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2626 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2627 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
2628 /* Thin. */
2629 tile[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2630 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2631 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2632 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
2633 NUM_BANKS(ADDR_SURF_16_BANK) |
2634 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2635 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2636 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2637 /* Thin 8 bpp. */
2638 tile[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2639 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2640 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2641 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2642 NUM_BANKS(ADDR_SURF_16_BANK) |
2643 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2644 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2645 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
2646 /* Thin 16 bpp. */
2647 tile[15] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2648 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2649 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2650 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2651 NUM_BANKS(ADDR_SURF_16_BANK) |
2652 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2653 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2654 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
2655 /* Thin 32 bpp. */
2656 tile[16] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2657 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2658 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2659 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
2660 NUM_BANKS(ADDR_SURF_16_BANK) |
2661 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2662 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2663 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
2664 /* Thin 64 bpp. */
2665 tile[17] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2666 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2667 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2668 TILE_SPLIT(split_equal_to_row_size) |
2669 NUM_BANKS(ADDR_SURF_16_BANK) |
2670 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2671 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2672 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
2673 /* 8 bpp PRT. */
2674 tile[21] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2675 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2676 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2677 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2678 NUM_BANKS(ADDR_SURF_16_BANK) |
2679 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
2680 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2681 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2682 /* 16 bpp PRT */
2683 tile[22] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2684 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2685 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2686 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2687 NUM_BANKS(ADDR_SURF_16_BANK) |
2688 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2689 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2690 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
2691 /* 32 bpp PRT */
2692 tile[23] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2693 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2694 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2695 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2696 NUM_BANKS(ADDR_SURF_16_BANK) |
2697 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2698 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2699 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2700 /* 64 bpp PRT */
2701 tile[24] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2702 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2703 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2704 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
2705 NUM_BANKS(ADDR_SURF_16_BANK) |
2706 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2707 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2708 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2709 /* 128 bpp PRT */
2710 tile[25] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2711 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2712 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2713 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
2714 NUM_BANKS(ADDR_SURF_8_BANK) |
2715 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2716 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2717 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
2718
2719 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
2720 WREG32(GB_TILE_MODE0 + (reg_offset * 4), tile[reg_offset]);
2721 break;
2722
2723 case CHIP_VERDE:
2724 case CHIP_OLAND:
2725 case CHIP_HAINAN:
2726 /* non-AA compressed depth or any compressed stencil */
2727 tile[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2728 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
2729 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2730 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
2731 NUM_BANKS(ADDR_SURF_16_BANK) |
2732 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2733 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2734 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
2735 /* 2xAA/4xAA compressed depth only */
2736 tile[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2737 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
2738 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2739 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
2740 NUM_BANKS(ADDR_SURF_16_BANK) |
2741 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2742 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2743 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
2744 /* 8xAA compressed depth only */
2745 tile[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2746 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
2747 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2748 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2749 NUM_BANKS(ADDR_SURF_16_BANK) |
2750 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2751 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2752 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
2753 /* 2xAA/4xAA compressed depth with stencil (for depth buffer) */
2754 tile[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2755 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
2756 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2757 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
2758 NUM_BANKS(ADDR_SURF_16_BANK) |
2759 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2760 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2761 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
2762 /* Maps w/ a dimension less than the 2D macro-tile dimensions (for mipmapped depth textures) */
2763 tile[4] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2764 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
2765 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2766 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
2767 NUM_BANKS(ADDR_SURF_16_BANK) |
2768 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2769 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2770 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2771 /* Uncompressed 16bpp depth - and stencil buffer allocated with it */
2772 tile[5] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2773 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
2774 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2775 TILE_SPLIT(split_equal_to_row_size) |
2776 NUM_BANKS(ADDR_SURF_16_BANK) |
2777 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2778 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2779 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2780 /* Uncompressed 32bpp depth - and stencil buffer allocated with it */
2781 tile[6] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2782 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
2783 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2784 TILE_SPLIT(split_equal_to_row_size) |
2785 NUM_BANKS(ADDR_SURF_16_BANK) |
2786 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2787 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2788 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2789 /* Uncompressed 8bpp stencil without depth (drivers typically do not use) */
2790 tile[7] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2791 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
2792 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2793 TILE_SPLIT(split_equal_to_row_size) |
2794 NUM_BANKS(ADDR_SURF_16_BANK) |
2795 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2796 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2797 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
2798 /* 1D and 1D Array Surfaces */
2799 tile[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
2800 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
2801 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2802 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
2803 NUM_BANKS(ADDR_SURF_16_BANK) |
2804 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2805 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2806 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2807 /* Displayable maps. */
2808 tile[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2809 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
2810 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2811 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
2812 NUM_BANKS(ADDR_SURF_16_BANK) |
2813 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2814 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2815 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2816 /* Display 8bpp. */
2817 tile[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2818 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
2819 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2820 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2821 NUM_BANKS(ADDR_SURF_16_BANK) |
2822 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2823 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2824 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
2825 /* Display 16bpp. */
2826 tile[11] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2827 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
2828 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2829 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2830 NUM_BANKS(ADDR_SURF_16_BANK) |
2831 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2832 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2833 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2834 /* Display 32bpp. */
2835 tile[12] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2836 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
2837 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2838 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
2839 NUM_BANKS(ADDR_SURF_16_BANK) |
2840 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2841 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2842 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2843 /* Thin. */
2844 tile[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2845 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2846 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2847 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
2848 NUM_BANKS(ADDR_SURF_16_BANK) |
2849 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2850 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2851 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2852 /* Thin 8 bpp. */
2853 tile[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2854 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2855 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2856 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2857 NUM_BANKS(ADDR_SURF_16_BANK) |
2858 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2859 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2860 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2861 /* Thin 16 bpp. */
2862 tile[15] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2863 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2864 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2865 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2866 NUM_BANKS(ADDR_SURF_16_BANK) |
2867 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2868 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2869 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2870 /* Thin 32 bpp. */
2871 tile[16] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2872 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2873 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2874 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
2875 NUM_BANKS(ADDR_SURF_16_BANK) |
2876 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2877 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2878 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2879 /* Thin 64 bpp. */
2880 tile[17] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2881 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2882 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2883 TILE_SPLIT(split_equal_to_row_size) |
2884 NUM_BANKS(ADDR_SURF_16_BANK) |
2885 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2886 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2887 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2888 /* 8 bpp PRT. */
2889 tile[21] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2890 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2891 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2892 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2893 NUM_BANKS(ADDR_SURF_16_BANK) |
2894 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
2895 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2896 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2897 /* 16 bpp PRT */
2898 tile[22] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2899 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2900 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2901 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2902 NUM_BANKS(ADDR_SURF_16_BANK) |
2903 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2904 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2905 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
2906 /* 32 bpp PRT */
2907 tile[23] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2908 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2909 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2910 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2911 NUM_BANKS(ADDR_SURF_16_BANK) |
2912 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2913 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2914 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2915 /* 64 bpp PRT */
2916 tile[24] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2917 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2918 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2919 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
2920 NUM_BANKS(ADDR_SURF_16_BANK) |
2921 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2922 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2923 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2924 /* 128 bpp PRT */
2925 tile[25] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2926 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2927 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2928 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
2929 NUM_BANKS(ADDR_SURF_8_BANK) |
2930 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2931 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2932 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
2933
2934 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
2935 WREG32(GB_TILE_MODE0 + (reg_offset * 4), tile[reg_offset]);
2936 break;
2937
2938 default:
Alex Deucher0a96d722012-03-20 17:18:11 -04002939 DRM_ERROR("unknown asic: 0x%x\n", rdev->family);
Josh Poimboeuf102534b2016-03-11 08:18:24 -06002940 }
Alex Deucher0a96d722012-03-20 17:18:11 -04002941}
2942
Alex Deucher1a8ca752012-06-01 18:58:22 -04002943static void si_select_se_sh(struct radeon_device *rdev,
2944 u32 se_num, u32 sh_num)
2945{
2946 u32 data = INSTANCE_BROADCAST_WRITES;
2947
2948 if ((se_num == 0xffffffff) && (sh_num == 0xffffffff))
Alex Deucher79b52d62013-04-18 16:26:36 -04002949 data |= SH_BROADCAST_WRITES | SE_BROADCAST_WRITES;
Alex Deucher1a8ca752012-06-01 18:58:22 -04002950 else if (se_num == 0xffffffff)
2951 data |= SE_BROADCAST_WRITES | SH_INDEX(sh_num);
2952 else if (sh_num == 0xffffffff)
2953 data |= SH_BROADCAST_WRITES | SE_INDEX(se_num);
2954 else
2955 data |= SH_INDEX(sh_num) | SE_INDEX(se_num);
2956 WREG32(GRBM_GFX_INDEX, data);
2957}
2958
2959static u32 si_create_bitmask(u32 bit_width)
2960{
2961 u32 i, mask = 0;
2962
2963 for (i = 0; i < bit_width; i++) {
2964 mask <<= 1;
2965 mask |= 1;
2966 }
2967 return mask;
2968}
2969
2970static u32 si_get_cu_enabled(struct radeon_device *rdev, u32 cu_per_sh)
2971{
2972 u32 data, mask;
2973
2974 data = RREG32(CC_GC_SHADER_ARRAY_CONFIG);
2975 if (data & 1)
2976 data &= INACTIVE_CUS_MASK;
2977 else
2978 data = 0;
2979 data |= RREG32(GC_USER_SHADER_ARRAY_CONFIG);
2980
2981 data >>= INACTIVE_CUS_SHIFT;
2982
2983 mask = si_create_bitmask(cu_per_sh);
2984
2985 return ~data & mask;
2986}
2987
2988static void si_setup_spi(struct radeon_device *rdev,
2989 u32 se_num, u32 sh_per_se,
2990 u32 cu_per_sh)
2991{
2992 int i, j, k;
2993 u32 data, mask, active_cu;
2994
2995 for (i = 0; i < se_num; i++) {
2996 for (j = 0; j < sh_per_se; j++) {
2997 si_select_se_sh(rdev, i, j);
2998 data = RREG32(SPI_STATIC_THREAD_MGMT_3);
2999 active_cu = si_get_cu_enabled(rdev, cu_per_sh);
3000
3001 mask = 1;
3002 for (k = 0; k < 16; k++) {
3003 mask <<= k;
3004 if (active_cu & mask) {
3005 data &= ~mask;
3006 WREG32(SPI_STATIC_THREAD_MGMT_3, data);
3007 break;
3008 }
3009 }
3010 }
3011 }
3012 si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
3013}
3014
3015static u32 si_get_rb_disabled(struct radeon_device *rdev,
Marek Olšák9fadb352013-12-22 02:18:00 +01003016 u32 max_rb_num_per_se,
Alex Deucher1a8ca752012-06-01 18:58:22 -04003017 u32 sh_per_se)
3018{
3019 u32 data, mask;
3020
3021 data = RREG32(CC_RB_BACKEND_DISABLE);
3022 if (data & 1)
3023 data &= BACKEND_DISABLE_MASK;
3024 else
3025 data = 0;
3026 data |= RREG32(GC_USER_RB_BACKEND_DISABLE);
3027
3028 data >>= BACKEND_DISABLE_SHIFT;
3029
Marek Olšák9fadb352013-12-22 02:18:00 +01003030 mask = si_create_bitmask(max_rb_num_per_se / sh_per_se);
Alex Deucher1a8ca752012-06-01 18:58:22 -04003031
3032 return data & mask;
3033}
3034
3035static void si_setup_rb(struct radeon_device *rdev,
3036 u32 se_num, u32 sh_per_se,
Marek Olšák9fadb352013-12-22 02:18:00 +01003037 u32 max_rb_num_per_se)
Alex Deucher1a8ca752012-06-01 18:58:22 -04003038{
3039 int i, j;
3040 u32 data, mask;
3041 u32 disabled_rbs = 0;
3042 u32 enabled_rbs = 0;
3043
3044 for (i = 0; i < se_num; i++) {
3045 for (j = 0; j < sh_per_se; j++) {
3046 si_select_se_sh(rdev, i, j);
Marek Olšák9fadb352013-12-22 02:18:00 +01003047 data = si_get_rb_disabled(rdev, max_rb_num_per_se, sh_per_se);
Alex Deucher1a8ca752012-06-01 18:58:22 -04003048 disabled_rbs |= data << ((i * sh_per_se + j) * TAHITI_RB_BITMAP_WIDTH_PER_SH);
3049 }
3050 }
3051 si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
3052
3053 mask = 1;
Marek Olšák9fadb352013-12-22 02:18:00 +01003054 for (i = 0; i < max_rb_num_per_se * se_num; i++) {
Alex Deucher1a8ca752012-06-01 18:58:22 -04003055 if (!(disabled_rbs & mask))
3056 enabled_rbs |= mask;
3057 mask <<= 1;
3058 }
3059
Marek Olšák439a1cf2013-12-22 02:18:01 +01003060 rdev->config.si.backend_enable_mask = enabled_rbs;
3061
Alex Deucher1a8ca752012-06-01 18:58:22 -04003062 for (i = 0; i < se_num; i++) {
3063 si_select_se_sh(rdev, i, 0xffffffff);
3064 data = 0;
3065 for (j = 0; j < sh_per_se; j++) {
3066 switch (enabled_rbs & 3) {
3067 case 1:
3068 data |= (RASTER_CONFIG_RB_MAP_0 << (i * sh_per_se + j) * 2);
3069 break;
3070 case 2:
3071 data |= (RASTER_CONFIG_RB_MAP_3 << (i * sh_per_se + j) * 2);
3072 break;
3073 case 3:
3074 default:
3075 data |= (RASTER_CONFIG_RB_MAP_2 << (i * sh_per_se + j) * 2);
3076 break;
3077 }
3078 enabled_rbs >>= 2;
3079 }
3080 WREG32(PA_SC_RASTER_CONFIG, data);
3081 }
3082 si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
3083}
3084
Alex Deucher0a96d722012-03-20 17:18:11 -04003085static void si_gpu_init(struct radeon_device *rdev)
3086{
Alex Deucher0a96d722012-03-20 17:18:11 -04003087 u32 gb_addr_config = 0;
3088 u32 mc_shared_chmap, mc_arb_ramcfg;
Alex Deucher0a96d722012-03-20 17:18:11 -04003089 u32 sx_debug_1;
Alex Deucher0a96d722012-03-20 17:18:11 -04003090 u32 hdp_host_path_cntl;
3091 u32 tmp;
Alex Deucher6101b3a2014-08-19 11:54:15 -04003092 int i, j;
Alex Deucher0a96d722012-03-20 17:18:11 -04003093
3094 switch (rdev->family) {
3095 case CHIP_TAHITI:
3096 rdev->config.si.max_shader_engines = 2;
Alex Deucher0a96d722012-03-20 17:18:11 -04003097 rdev->config.si.max_tile_pipes = 12;
Alex Deucher1a8ca752012-06-01 18:58:22 -04003098 rdev->config.si.max_cu_per_sh = 8;
3099 rdev->config.si.max_sh_per_se = 2;
Alex Deucher0a96d722012-03-20 17:18:11 -04003100 rdev->config.si.max_backends_per_se = 4;
3101 rdev->config.si.max_texture_channel_caches = 12;
3102 rdev->config.si.max_gprs = 256;
3103 rdev->config.si.max_gs_threads = 32;
3104 rdev->config.si.max_hw_contexts = 8;
3105
3106 rdev->config.si.sc_prim_fifo_size_frontend = 0x20;
3107 rdev->config.si.sc_prim_fifo_size_backend = 0x100;
3108 rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
3109 rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
Alex Deucher1a8ca752012-06-01 18:58:22 -04003110 gb_addr_config = TAHITI_GB_ADDR_CONFIG_GOLDEN;
Alex Deucher0a96d722012-03-20 17:18:11 -04003111 break;
3112 case CHIP_PITCAIRN:
3113 rdev->config.si.max_shader_engines = 2;
Alex Deucher0a96d722012-03-20 17:18:11 -04003114 rdev->config.si.max_tile_pipes = 8;
Alex Deucher1a8ca752012-06-01 18:58:22 -04003115 rdev->config.si.max_cu_per_sh = 5;
3116 rdev->config.si.max_sh_per_se = 2;
Alex Deucher0a96d722012-03-20 17:18:11 -04003117 rdev->config.si.max_backends_per_se = 4;
3118 rdev->config.si.max_texture_channel_caches = 8;
3119 rdev->config.si.max_gprs = 256;
3120 rdev->config.si.max_gs_threads = 32;
3121 rdev->config.si.max_hw_contexts = 8;
3122
3123 rdev->config.si.sc_prim_fifo_size_frontend = 0x20;
3124 rdev->config.si.sc_prim_fifo_size_backend = 0x100;
3125 rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
3126 rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
Alex Deucher1a8ca752012-06-01 18:58:22 -04003127 gb_addr_config = TAHITI_GB_ADDR_CONFIG_GOLDEN;
Alex Deucher0a96d722012-03-20 17:18:11 -04003128 break;
3129 case CHIP_VERDE:
3130 default:
3131 rdev->config.si.max_shader_engines = 1;
Alex Deucher0a96d722012-03-20 17:18:11 -04003132 rdev->config.si.max_tile_pipes = 4;
Alex Deucher468ef1a2013-05-21 13:35:19 -04003133 rdev->config.si.max_cu_per_sh = 5;
Alex Deucher1a8ca752012-06-01 18:58:22 -04003134 rdev->config.si.max_sh_per_se = 2;
Alex Deucher0a96d722012-03-20 17:18:11 -04003135 rdev->config.si.max_backends_per_se = 4;
3136 rdev->config.si.max_texture_channel_caches = 4;
3137 rdev->config.si.max_gprs = 256;
3138 rdev->config.si.max_gs_threads = 32;
3139 rdev->config.si.max_hw_contexts = 8;
3140
3141 rdev->config.si.sc_prim_fifo_size_frontend = 0x20;
3142 rdev->config.si.sc_prim_fifo_size_backend = 0x40;
3143 rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
3144 rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
Alex Deucher1a8ca752012-06-01 18:58:22 -04003145 gb_addr_config = VERDE_GB_ADDR_CONFIG_GOLDEN;
Alex Deucher0a96d722012-03-20 17:18:11 -04003146 break;
Alex Deucherd0ae7fc2012-07-26 17:42:25 -04003147 case CHIP_OLAND:
3148 rdev->config.si.max_shader_engines = 1;
3149 rdev->config.si.max_tile_pipes = 4;
3150 rdev->config.si.max_cu_per_sh = 6;
3151 rdev->config.si.max_sh_per_se = 1;
3152 rdev->config.si.max_backends_per_se = 2;
3153 rdev->config.si.max_texture_channel_caches = 4;
3154 rdev->config.si.max_gprs = 256;
3155 rdev->config.si.max_gs_threads = 16;
3156 rdev->config.si.max_hw_contexts = 8;
3157
3158 rdev->config.si.sc_prim_fifo_size_frontend = 0x20;
3159 rdev->config.si.sc_prim_fifo_size_backend = 0x40;
3160 rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
3161 rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
3162 gb_addr_config = VERDE_GB_ADDR_CONFIG_GOLDEN;
3163 break;
Alex Deucher8b028592012-07-31 12:42:48 -04003164 case CHIP_HAINAN:
3165 rdev->config.si.max_shader_engines = 1;
3166 rdev->config.si.max_tile_pipes = 4;
3167 rdev->config.si.max_cu_per_sh = 5;
3168 rdev->config.si.max_sh_per_se = 1;
3169 rdev->config.si.max_backends_per_se = 1;
3170 rdev->config.si.max_texture_channel_caches = 2;
3171 rdev->config.si.max_gprs = 256;
3172 rdev->config.si.max_gs_threads = 16;
3173 rdev->config.si.max_hw_contexts = 8;
3174
3175 rdev->config.si.sc_prim_fifo_size_frontend = 0x20;
3176 rdev->config.si.sc_prim_fifo_size_backend = 0x40;
3177 rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
3178 rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
3179 gb_addr_config = HAINAN_GB_ADDR_CONFIG_GOLDEN;
3180 break;
Alex Deucher0a96d722012-03-20 17:18:11 -04003181 }
3182
3183 /* Initialize HDP */
3184 for (i = 0, j = 0; i < 32; i++, j += 0x18) {
3185 WREG32((0x2c14 + j), 0x00000000);
3186 WREG32((0x2c18 + j), 0x00000000);
3187 WREG32((0x2c1c + j), 0x00000000);
3188 WREG32((0x2c20 + j), 0x00000000);
3189 WREG32((0x2c24 + j), 0x00000000);
3190 }
3191
3192 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
Christian König18ad01e2015-02-18 13:19:27 +01003193 WREG32(SRBM_INT_CNTL, 1);
3194 WREG32(SRBM_INT_ACK, 1);
Alex Deucher0a96d722012-03-20 17:18:11 -04003195
3196 evergreen_fix_pci_max_read_req_size(rdev);
3197
3198 WREG32(BIF_FB_EN, FB_READ_EN | FB_WRITE_EN);
3199
3200 mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
3201 mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
3202
Alex Deucher0a96d722012-03-20 17:18:11 -04003203 rdev->config.si.num_tile_pipes = rdev->config.si.max_tile_pipes;
Alex Deucher0a96d722012-03-20 17:18:11 -04003204 rdev->config.si.mem_max_burst_length_bytes = 256;
3205 tmp = (mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT;
3206 rdev->config.si.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
3207 if (rdev->config.si.mem_row_size_in_kb > 4)
3208 rdev->config.si.mem_row_size_in_kb = 4;
3209 /* XXX use MC settings? */
3210 rdev->config.si.shader_engine_tile_size = 32;
3211 rdev->config.si.num_gpus = 1;
3212 rdev->config.si.multi_gpu_tile_size = 64;
3213
Alex Deucher1a8ca752012-06-01 18:58:22 -04003214 /* fix up row size */
3215 gb_addr_config &= ~ROW_SIZE_MASK;
Alex Deucher0a96d722012-03-20 17:18:11 -04003216 switch (rdev->config.si.mem_row_size_in_kb) {
3217 case 1:
3218 default:
3219 gb_addr_config |= ROW_SIZE(0);
3220 break;
3221 case 2:
3222 gb_addr_config |= ROW_SIZE(1);
3223 break;
3224 case 4:
3225 gb_addr_config |= ROW_SIZE(2);
3226 break;
3227 }
3228
Alex Deucher0a96d722012-03-20 17:18:11 -04003229 /* setup tiling info dword. gb_addr_config is not adequate since it does
3230 * not have bank info, so create a custom tiling dword.
3231 * bits 3:0 num_pipes
3232 * bits 7:4 num_banks
3233 * bits 11:8 group_size
3234 * bits 15:12 row_size
3235 */
3236 rdev->config.si.tile_config = 0;
3237 switch (rdev->config.si.num_tile_pipes) {
3238 case 1:
3239 rdev->config.si.tile_config |= (0 << 0);
3240 break;
3241 case 2:
3242 rdev->config.si.tile_config |= (1 << 0);
3243 break;
3244 case 4:
3245 rdev->config.si.tile_config |= (2 << 0);
3246 break;
3247 case 8:
3248 default:
3249 /* XXX what about 12? */
3250 rdev->config.si.tile_config |= (3 << 0);
3251 break;
Christian Königdca571a2012-07-31 13:48:51 +02003252 }
3253 switch ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) {
3254 case 0: /* four banks */
Alex Deucher1a8ca752012-06-01 18:58:22 -04003255 rdev->config.si.tile_config |= 0 << 4;
Christian Königdca571a2012-07-31 13:48:51 +02003256 break;
3257 case 1: /* eight banks */
3258 rdev->config.si.tile_config |= 1 << 4;
3259 break;
3260 case 2: /* sixteen banks */
3261 default:
3262 rdev->config.si.tile_config |= 2 << 4;
3263 break;
3264 }
Alex Deucher0a96d722012-03-20 17:18:11 -04003265 rdev->config.si.tile_config |=
3266 ((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8;
3267 rdev->config.si.tile_config |=
3268 ((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12;
3269
Alex Deucher0a96d722012-03-20 17:18:11 -04003270 WREG32(GB_ADDR_CONFIG, gb_addr_config);
3271 WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
Alex Deucher7c1c7c12013-04-05 10:28:08 -04003272 WREG32(DMIF_ADDR_CALC, gb_addr_config);
Alex Deucher0a96d722012-03-20 17:18:11 -04003273 WREG32(HDP_ADDR_CONFIG, gb_addr_config);
Alex Deucher8c5fd7e2012-12-04 15:28:18 -05003274 WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config);
3275 WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config);
Alex Deucher1df0d522013-04-26 18:03:44 -04003276 if (rdev->has_uvd) {
3277 WREG32(UVD_UDEC_ADDR_CONFIG, gb_addr_config);
3278 WREG32(UVD_UDEC_DB_ADDR_CONFIG, gb_addr_config);
3279 WREG32(UVD_UDEC_DBW_ADDR_CONFIG, gb_addr_config);
3280 }
Alex Deucher0a96d722012-03-20 17:18:11 -04003281
Alex Deucher0a96d722012-03-20 17:18:11 -04003282 si_tiling_mode_table_init(rdev);
3283
Alex Deucher1a8ca752012-06-01 18:58:22 -04003284 si_setup_rb(rdev, rdev->config.si.max_shader_engines,
3285 rdev->config.si.max_sh_per_se,
3286 rdev->config.si.max_backends_per_se);
3287
3288 si_setup_spi(rdev, rdev->config.si.max_shader_engines,
3289 rdev->config.si.max_sh_per_se,
3290 rdev->config.si.max_cu_per_sh);
3291
Alex Deucher52da51f2014-08-19 11:56:38 -04003292 rdev->config.si.active_cus = 0;
Alex Deucher65fcf662014-06-02 16:13:21 -04003293 for (i = 0; i < rdev->config.si.max_shader_engines; i++) {
3294 for (j = 0; j < rdev->config.si.max_sh_per_se; j++) {
Alex Deucher6101b3a2014-08-19 11:54:15 -04003295 rdev->config.si.active_cus +=
3296 hweight32(si_get_cu_active_bitmap(rdev, i, j));
Alex Deucher65fcf662014-06-02 16:13:21 -04003297 }
3298 }
Alex Deucher1a8ca752012-06-01 18:58:22 -04003299
Alex Deucher0a96d722012-03-20 17:18:11 -04003300 /* set HW defaults for 3D engine */
3301 WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) |
3302 ROQ_IB2_START(0x2b)));
3303 WREG32(CP_MEQ_THRESHOLDS, MEQ1_START(0x30) | MEQ2_START(0x60));
3304
3305 sx_debug_1 = RREG32(SX_DEBUG_1);
3306 WREG32(SX_DEBUG_1, sx_debug_1);
3307
3308 WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4));
3309
3310 WREG32(PA_SC_FIFO_SIZE, (SC_FRONTEND_PRIM_FIFO_SIZE(rdev->config.si.sc_prim_fifo_size_frontend) |
3311 SC_BACKEND_PRIM_FIFO_SIZE(rdev->config.si.sc_prim_fifo_size_backend) |
3312 SC_HIZ_TILE_FIFO_SIZE(rdev->config.si.sc_hiz_tile_fifo_size) |
3313 SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.si.sc_earlyz_tile_fifo_size)));
3314
3315 WREG32(VGT_NUM_INSTANCES, 1);
3316
3317 WREG32(CP_PERFMON_CNTL, 0);
3318
3319 WREG32(SQ_CONFIG, 0);
3320
3321 WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) |
3322 FORCE_EOV_MAX_REZ_CNT(255)));
3323
3324 WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC) |
3325 AUTO_INVLD_EN(ES_AND_GS_AUTO));
3326
3327 WREG32(VGT_GS_VERTEX_REUSE, 16);
3328 WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
3329
3330 WREG32(CB_PERFCOUNTER0_SELECT0, 0);
3331 WREG32(CB_PERFCOUNTER0_SELECT1, 0);
3332 WREG32(CB_PERFCOUNTER1_SELECT0, 0);
3333 WREG32(CB_PERFCOUNTER1_SELECT1, 0);
3334 WREG32(CB_PERFCOUNTER2_SELECT0, 0);
3335 WREG32(CB_PERFCOUNTER2_SELECT1, 0);
3336 WREG32(CB_PERFCOUNTER3_SELECT0, 0);
3337 WREG32(CB_PERFCOUNTER3_SELECT1, 0);
3338
3339 tmp = RREG32(HDP_MISC_CNTL);
3340 tmp |= HDP_FLUSH_INVALIDATE_CACHE;
3341 WREG32(HDP_MISC_CNTL, tmp);
3342
3343 hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL);
3344 WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
3345
3346 WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3));
3347
3348 udelay(50);
3349}
Alex Deucherc476dde2012-03-20 17:18:12 -04003350
Alex Deucher48c0c902012-03-20 17:18:19 -04003351/*
Alex Deucher2ece2e82012-03-20 17:18:20 -04003352 * GPU scratch registers helpers function.
3353 */
3354static void si_scratch_init(struct radeon_device *rdev)
3355{
3356 int i;
3357
3358 rdev->scratch.num_reg = 7;
3359 rdev->scratch.reg_base = SCRATCH_REG0;
3360 for (i = 0; i < rdev->scratch.num_reg; i++) {
3361 rdev->scratch.free[i] = true;
3362 rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
3363 }
3364}
3365
3366void si_fence_ring_emit(struct radeon_device *rdev,
3367 struct radeon_fence *fence)
3368{
3369 struct radeon_ring *ring = &rdev->ring[fence->ring];
3370 u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
3371
3372 /* flush read cache over gart */
3373 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
3374 radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2);
3375 radeon_ring_write(ring, 0);
3376 radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
3377 radeon_ring_write(ring, PACKET3_TCL1_ACTION_ENA |
3378 PACKET3_TC_ACTION_ENA |
3379 PACKET3_SH_KCACHE_ACTION_ENA |
3380 PACKET3_SH_ICACHE_ACTION_ENA);
3381 radeon_ring_write(ring, 0xFFFFFFFF);
3382 radeon_ring_write(ring, 0);
3383 radeon_ring_write(ring, 10); /* poll interval */
3384 /* EVENT_WRITE_EOP - flush caches, send int */
3385 radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
3386 radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | EVENT_INDEX(5));
Christian König5e167cd2014-06-03 20:51:46 +02003387 radeon_ring_write(ring, lower_32_bits(addr));
Alex Deucher2ece2e82012-03-20 17:18:20 -04003388 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
3389 radeon_ring_write(ring, fence->seq);
3390 radeon_ring_write(ring, 0);
3391}
3392
3393/*
3394 * IB stuff
3395 */
3396void si_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
3397{
Christian König876dc9f2012-05-08 14:24:01 +02003398 struct radeon_ring *ring = &rdev->ring[ib->ring];
Christian König7c42bc12014-11-19 14:01:25 +01003399 unsigned vm_id = ib->vm ? ib->vm->ids[ib->ring].id : 0;
Alex Deucher2ece2e82012-03-20 17:18:20 -04003400 u32 header;
3401
Alex Deuchera85a7da42012-07-17 14:02:29 -04003402 if (ib->is_const_ib) {
3403 /* set switch buffer packet before const IB */
3404 radeon_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
3405 radeon_ring_write(ring, 0);
Christian König45df6802012-07-06 16:22:55 +02003406
Alex Deucher2ece2e82012-03-20 17:18:20 -04003407 header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2);
Alex Deuchera85a7da42012-07-17 14:02:29 -04003408 } else {
Alex Deucher89d35802012-07-17 14:02:31 -04003409 u32 next_rptr;
Alex Deuchera85a7da42012-07-17 14:02:29 -04003410 if (ring->rptr_save_reg) {
Alex Deucher89d35802012-07-17 14:02:31 -04003411 next_rptr = ring->wptr + 3 + 4 + 8;
Alex Deuchera85a7da42012-07-17 14:02:29 -04003412 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
3413 radeon_ring_write(ring, ((ring->rptr_save_reg -
3414 PACKET3_SET_CONFIG_REG_START) >> 2));
3415 radeon_ring_write(ring, next_rptr);
Alex Deucher89d35802012-07-17 14:02:31 -04003416 } else if (rdev->wb.enabled) {
3417 next_rptr = ring->wptr + 5 + 4 + 8;
3418 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
3419 radeon_ring_write(ring, (1 << 8));
3420 radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
Christian König5e167cd2014-06-03 20:51:46 +02003421 radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr));
Alex Deucher89d35802012-07-17 14:02:31 -04003422 radeon_ring_write(ring, next_rptr);
Alex Deuchera85a7da42012-07-17 14:02:29 -04003423 }
3424
Alex Deucher2ece2e82012-03-20 17:18:20 -04003425 header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
Alex Deuchera85a7da42012-07-17 14:02:29 -04003426 }
Alex Deucher2ece2e82012-03-20 17:18:20 -04003427
3428 radeon_ring_write(ring, header);
3429 radeon_ring_write(ring,
3430#ifdef __BIG_ENDIAN
3431 (2 << 0) |
3432#endif
3433 (ib->gpu_addr & 0xFFFFFFFC));
3434 radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
Christian König7c42bc12014-11-19 14:01:25 +01003435 radeon_ring_write(ring, ib->length_dw | (vm_id << 24));
Alex Deucher2ece2e82012-03-20 17:18:20 -04003436
Alex Deuchera85a7da42012-07-17 14:02:29 -04003437 if (!ib->is_const_ib) {
3438 /* flush read cache over gart for this vmid */
3439 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
3440 radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2);
Christian König7c42bc12014-11-19 14:01:25 +01003441 radeon_ring_write(ring, vm_id);
Alex Deuchera85a7da42012-07-17 14:02:29 -04003442 radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
3443 radeon_ring_write(ring, PACKET3_TCL1_ACTION_ENA |
3444 PACKET3_TC_ACTION_ENA |
3445 PACKET3_SH_KCACHE_ACTION_ENA |
3446 PACKET3_SH_ICACHE_ACTION_ENA);
3447 radeon_ring_write(ring, 0xFFFFFFFF);
3448 radeon_ring_write(ring, 0);
3449 radeon_ring_write(ring, 10); /* poll interval */
3450 }
Alex Deucher2ece2e82012-03-20 17:18:20 -04003451}
3452
3453/*
Alex Deucher48c0c902012-03-20 17:18:19 -04003454 * CP.
3455 */
3456static void si_cp_enable(struct radeon_device *rdev, bool enable)
3457{
3458 if (enable)
3459 WREG32(CP_ME_CNTL, 0);
3460 else {
Alex Deucher50efa512014-01-27 11:26:33 -05003461 if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
3462 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
Alex Deucher48c0c902012-03-20 17:18:19 -04003463 WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT));
3464 WREG32(SCRATCH_UMSK, 0);
Alex Deucher8c5fd7e2012-12-04 15:28:18 -05003465 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
3466 rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
3467 rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
Alex Deucher48c0c902012-03-20 17:18:19 -04003468 }
3469 udelay(50);
3470}
3471
3472static int si_cp_load_microcode(struct radeon_device *rdev)
3473{
Alex Deucher48c0c902012-03-20 17:18:19 -04003474 int i;
3475
Alex Deucher629bd332014-06-25 18:41:34 -04003476 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw)
Alex Deucher48c0c902012-03-20 17:18:19 -04003477 return -EINVAL;
3478
3479 si_cp_enable(rdev, false);
3480
Alex Deucher629bd332014-06-25 18:41:34 -04003481 if (rdev->new_fw) {
3482 const struct gfx_firmware_header_v1_0 *pfp_hdr =
3483 (const struct gfx_firmware_header_v1_0 *)rdev->pfp_fw->data;
3484 const struct gfx_firmware_header_v1_0 *ce_hdr =
3485 (const struct gfx_firmware_header_v1_0 *)rdev->ce_fw->data;
3486 const struct gfx_firmware_header_v1_0 *me_hdr =
3487 (const struct gfx_firmware_header_v1_0 *)rdev->me_fw->data;
3488 const __le32 *fw_data;
3489 u32 fw_size;
Alex Deucher48c0c902012-03-20 17:18:19 -04003490
Alex Deucher629bd332014-06-25 18:41:34 -04003491 radeon_ucode_print_gfx_hdr(&pfp_hdr->header);
3492 radeon_ucode_print_gfx_hdr(&ce_hdr->header);
3493 radeon_ucode_print_gfx_hdr(&me_hdr->header);
Alex Deucher48c0c902012-03-20 17:18:19 -04003494
Alex Deucher629bd332014-06-25 18:41:34 -04003495 /* PFP */
3496 fw_data = (const __le32 *)
3497 (rdev->pfp_fw->data + le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes));
3498 fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes) / 4;
3499 WREG32(CP_PFP_UCODE_ADDR, 0);
3500 for (i = 0; i < fw_size; i++)
3501 WREG32(CP_PFP_UCODE_DATA, le32_to_cpup(fw_data++));
3502 WREG32(CP_PFP_UCODE_ADDR, 0);
3503
3504 /* CE */
3505 fw_data = (const __le32 *)
3506 (rdev->ce_fw->data + le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes));
3507 fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes) / 4;
3508 WREG32(CP_CE_UCODE_ADDR, 0);
3509 for (i = 0; i < fw_size; i++)
3510 WREG32(CP_CE_UCODE_DATA, le32_to_cpup(fw_data++));
3511 WREG32(CP_CE_UCODE_ADDR, 0);
3512
3513 /* ME */
3514 fw_data = (const __be32 *)
3515 (rdev->me_fw->data + le32_to_cpu(me_hdr->header.ucode_array_offset_bytes));
3516 fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes) / 4;
3517 WREG32(CP_ME_RAM_WADDR, 0);
3518 for (i = 0; i < fw_size; i++)
3519 WREG32(CP_ME_RAM_DATA, le32_to_cpup(fw_data++));
3520 WREG32(CP_ME_RAM_WADDR, 0);
3521 } else {
3522 const __be32 *fw_data;
3523
3524 /* PFP */
3525 fw_data = (const __be32 *)rdev->pfp_fw->data;
3526 WREG32(CP_PFP_UCODE_ADDR, 0);
3527 for (i = 0; i < SI_PFP_UCODE_SIZE; i++)
3528 WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
3529 WREG32(CP_PFP_UCODE_ADDR, 0);
3530
3531 /* CE */
3532 fw_data = (const __be32 *)rdev->ce_fw->data;
3533 WREG32(CP_CE_UCODE_ADDR, 0);
3534 for (i = 0; i < SI_CE_UCODE_SIZE; i++)
3535 WREG32(CP_CE_UCODE_DATA, be32_to_cpup(fw_data++));
3536 WREG32(CP_CE_UCODE_ADDR, 0);
3537
3538 /* ME */
3539 fw_data = (const __be32 *)rdev->me_fw->data;
3540 WREG32(CP_ME_RAM_WADDR, 0);
3541 for (i = 0; i < SI_PM4_UCODE_SIZE; i++)
3542 WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
3543 WREG32(CP_ME_RAM_WADDR, 0);
3544 }
Alex Deucher48c0c902012-03-20 17:18:19 -04003545
3546 WREG32(CP_PFP_UCODE_ADDR, 0);
3547 WREG32(CP_CE_UCODE_ADDR, 0);
3548 WREG32(CP_ME_RAM_WADDR, 0);
3549 WREG32(CP_ME_RAM_RADDR, 0);
3550 return 0;
3551}
3552
3553static int si_cp_start(struct radeon_device *rdev)
3554{
3555 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
3556 int r, i;
3557
3558 r = radeon_ring_lock(rdev, ring, 7 + 4);
3559 if (r) {
3560 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
3561 return r;
3562 }
3563 /* init the CP */
3564 radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
3565 radeon_ring_write(ring, 0x1);
3566 radeon_ring_write(ring, 0x0);
3567 radeon_ring_write(ring, rdev->config.si.max_hw_contexts - 1);
3568 radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
3569 radeon_ring_write(ring, 0);
3570 radeon_ring_write(ring, 0);
3571
3572 /* init the CE partitions */
3573 radeon_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
3574 radeon_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
3575 radeon_ring_write(ring, 0xc000);
3576 radeon_ring_write(ring, 0xe000);
Michel Dänzer1538a9e2014-08-18 17:34:55 +09003577 radeon_ring_unlock_commit(rdev, ring, false);
Alex Deucher48c0c902012-03-20 17:18:19 -04003578
3579 si_cp_enable(rdev, true);
3580
3581 r = radeon_ring_lock(rdev, ring, si_default_size + 10);
3582 if (r) {
3583 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
3584 return r;
3585 }
3586
3587 /* setup clear context state */
3588 radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
3589 radeon_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
3590
3591 for (i = 0; i < si_default_size; i++)
3592 radeon_ring_write(ring, si_default_state[i]);
3593
3594 radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
3595 radeon_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
3596
3597 /* set clear context state */
3598 radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
3599 radeon_ring_write(ring, 0);
3600
3601 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
3602 radeon_ring_write(ring, 0x00000316);
3603 radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
3604 radeon_ring_write(ring, 0x00000010); /* VGT_OUT_DEALLOC_CNTL */
3605
Michel Dänzer1538a9e2014-08-18 17:34:55 +09003606 radeon_ring_unlock_commit(rdev, ring, false);
Alex Deucher48c0c902012-03-20 17:18:19 -04003607
3608 for (i = RADEON_RING_TYPE_GFX_INDEX; i <= CAYMAN_RING_TYPE_CP2_INDEX; ++i) {
3609 ring = &rdev->ring[i];
3610 r = radeon_ring_lock(rdev, ring, 2);
3611
3612 /* clear the compute context state */
3613 radeon_ring_write(ring, PACKET3_COMPUTE(PACKET3_CLEAR_STATE, 0));
3614 radeon_ring_write(ring, 0);
3615
Michel Dänzer1538a9e2014-08-18 17:34:55 +09003616 radeon_ring_unlock_commit(rdev, ring, false);
Alex Deucher48c0c902012-03-20 17:18:19 -04003617 }
3618
3619 return 0;
3620}
3621
3622static void si_cp_fini(struct radeon_device *rdev)
3623{
Christian König45df6802012-07-06 16:22:55 +02003624 struct radeon_ring *ring;
Alex Deucher48c0c902012-03-20 17:18:19 -04003625 si_cp_enable(rdev, false);
Christian König45df6802012-07-06 16:22:55 +02003626
3627 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
3628 radeon_ring_fini(rdev, ring);
3629 radeon_scratch_free(rdev, ring->rptr_save_reg);
3630
3631 ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
3632 radeon_ring_fini(rdev, ring);
3633 radeon_scratch_free(rdev, ring->rptr_save_reg);
3634
3635 ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
3636 radeon_ring_fini(rdev, ring);
3637 radeon_scratch_free(rdev, ring->rptr_save_reg);
Alex Deucher48c0c902012-03-20 17:18:19 -04003638}
3639
3640static int si_cp_resume(struct radeon_device *rdev)
3641{
3642 struct radeon_ring *ring;
3643 u32 tmp;
3644 u32 rb_bufsz;
3645 int r;
3646
Alex Deucher811e4d52013-09-03 13:31:33 -04003647 si_enable_gui_idle_interrupt(rdev, false);
3648
Alex Deucher48c0c902012-03-20 17:18:19 -04003649 WREG32(CP_SEM_WAIT_TIMER, 0x0);
3650 WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
3651
3652 /* Set the write pointer delay */
3653 WREG32(CP_RB_WPTR_DELAY, 0);
3654
3655 WREG32(CP_DEBUG, 0);
3656 WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
3657
3658 /* ring 0 - compute and gfx */
3659 /* Set ring buffer size */
3660 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
Daniel Vetterb72a8922013-07-10 14:11:59 +02003661 rb_bufsz = order_base_2(ring->ring_size / 8);
3662 tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
Alex Deucher48c0c902012-03-20 17:18:19 -04003663#ifdef __BIG_ENDIAN
3664 tmp |= BUF_SWAP_32BIT;
3665#endif
3666 WREG32(CP_RB0_CNTL, tmp);
3667
3668 /* Initialize the ring buffer's read and write pointers */
3669 WREG32(CP_RB0_CNTL, tmp | RB_RPTR_WR_ENA);
3670 ring->wptr = 0;
3671 WREG32(CP_RB0_WPTR, ring->wptr);
3672
Adam Buchbinder48fc7f72012-09-19 21:48:00 -04003673 /* set the wb address whether it's enabled or not */
Alex Deucher48c0c902012-03-20 17:18:19 -04003674 WREG32(CP_RB0_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC);
3675 WREG32(CP_RB0_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
3676
3677 if (rdev->wb.enabled)
3678 WREG32(SCRATCH_UMSK, 0xff);
3679 else {
3680 tmp |= RB_NO_UPDATE;
3681 WREG32(SCRATCH_UMSK, 0);
3682 }
3683
3684 mdelay(1);
3685 WREG32(CP_RB0_CNTL, tmp);
3686
3687 WREG32(CP_RB0_BASE, ring->gpu_addr >> 8);
3688
Alex Deucher48c0c902012-03-20 17:18:19 -04003689 /* ring1 - compute only */
3690 /* Set ring buffer size */
3691 ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
Daniel Vetterb72a8922013-07-10 14:11:59 +02003692 rb_bufsz = order_base_2(ring->ring_size / 8);
3693 tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
Alex Deucher48c0c902012-03-20 17:18:19 -04003694#ifdef __BIG_ENDIAN
3695 tmp |= BUF_SWAP_32BIT;
3696#endif
3697 WREG32(CP_RB1_CNTL, tmp);
3698
3699 /* Initialize the ring buffer's read and write pointers */
3700 WREG32(CP_RB1_CNTL, tmp | RB_RPTR_WR_ENA);
3701 ring->wptr = 0;
3702 WREG32(CP_RB1_WPTR, ring->wptr);
3703
Adam Buchbinder48fc7f72012-09-19 21:48:00 -04003704 /* set the wb address whether it's enabled or not */
Alex Deucher48c0c902012-03-20 17:18:19 -04003705 WREG32(CP_RB1_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFFFFFFFC);
3706 WREG32(CP_RB1_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFF);
3707
3708 mdelay(1);
3709 WREG32(CP_RB1_CNTL, tmp);
3710
3711 WREG32(CP_RB1_BASE, ring->gpu_addr >> 8);
3712
Alex Deucher48c0c902012-03-20 17:18:19 -04003713 /* ring2 - compute only */
3714 /* Set ring buffer size */
3715 ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
Daniel Vetterb72a8922013-07-10 14:11:59 +02003716 rb_bufsz = order_base_2(ring->ring_size / 8);
3717 tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
Alex Deucher48c0c902012-03-20 17:18:19 -04003718#ifdef __BIG_ENDIAN
3719 tmp |= BUF_SWAP_32BIT;
3720#endif
3721 WREG32(CP_RB2_CNTL, tmp);
3722
3723 /* Initialize the ring buffer's read and write pointers */
3724 WREG32(CP_RB2_CNTL, tmp | RB_RPTR_WR_ENA);
3725 ring->wptr = 0;
3726 WREG32(CP_RB2_WPTR, ring->wptr);
3727
Adam Buchbinder48fc7f72012-09-19 21:48:00 -04003728 /* set the wb address whether it's enabled or not */
Alex Deucher48c0c902012-03-20 17:18:19 -04003729 WREG32(CP_RB2_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFFFFFFFC);
3730 WREG32(CP_RB2_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFF);
3731
3732 mdelay(1);
3733 WREG32(CP_RB2_CNTL, tmp);
3734
3735 WREG32(CP_RB2_BASE, ring->gpu_addr >> 8);
3736
Alex Deucher48c0c902012-03-20 17:18:19 -04003737 /* start the rings */
3738 si_cp_start(rdev);
3739 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = true;
3740 rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = true;
3741 rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = true;
3742 r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
3743 if (r) {
3744 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
3745 rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
3746 rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
3747 return r;
3748 }
3749 r = radeon_ring_test(rdev, CAYMAN_RING_TYPE_CP1_INDEX, &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]);
3750 if (r) {
3751 rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
3752 }
3753 r = radeon_ring_test(rdev, CAYMAN_RING_TYPE_CP2_INDEX, &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]);
3754 if (r) {
3755 rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
3756 }
3757
Alex Deucher811e4d52013-09-03 13:31:33 -04003758 si_enable_gui_idle_interrupt(rdev, true);
3759
Alex Deucher50efa512014-01-27 11:26:33 -05003760 if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
3761 radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
3762
Alex Deucher48c0c902012-03-20 17:18:19 -04003763 return 0;
3764}
3765
Christian König2483b4e2013-08-13 11:56:54 +02003766u32 si_gpu_check_soft_reset(struct radeon_device *rdev)
Alex Deucher014bb202013-01-18 19:36:20 -05003767{
3768 u32 reset_mask = 0;
3769 u32 tmp;
3770
3771 /* GRBM_STATUS */
3772 tmp = RREG32(GRBM_STATUS);
3773 if (tmp & (PA_BUSY | SC_BUSY |
3774 BCI_BUSY | SX_BUSY |
3775 TA_BUSY | VGT_BUSY |
3776 DB_BUSY | CB_BUSY |
3777 GDS_BUSY | SPI_BUSY |
3778 IA_BUSY | IA_BUSY_NO_DMA))
3779 reset_mask |= RADEON_RESET_GFX;
3780
3781 if (tmp & (CF_RQ_PENDING | PF_RQ_PENDING |
3782 CP_BUSY | CP_COHERENCY_BUSY))
3783 reset_mask |= RADEON_RESET_CP;
3784
3785 if (tmp & GRBM_EE_BUSY)
3786 reset_mask |= RADEON_RESET_GRBM | RADEON_RESET_GFX | RADEON_RESET_CP;
3787
3788 /* GRBM_STATUS2 */
3789 tmp = RREG32(GRBM_STATUS2);
3790 if (tmp & (RLC_RQ_PENDING | RLC_BUSY))
3791 reset_mask |= RADEON_RESET_RLC;
3792
3793 /* DMA_STATUS_REG 0 */
3794 tmp = RREG32(DMA_STATUS_REG + DMA0_REGISTER_OFFSET);
3795 if (!(tmp & DMA_IDLE))
3796 reset_mask |= RADEON_RESET_DMA;
3797
3798 /* DMA_STATUS_REG 1 */
3799 tmp = RREG32(DMA_STATUS_REG + DMA1_REGISTER_OFFSET);
3800 if (!(tmp & DMA_IDLE))
3801 reset_mask |= RADEON_RESET_DMA1;
3802
3803 /* SRBM_STATUS2 */
3804 tmp = RREG32(SRBM_STATUS2);
3805 if (tmp & DMA_BUSY)
3806 reset_mask |= RADEON_RESET_DMA;
3807
3808 if (tmp & DMA1_BUSY)
3809 reset_mask |= RADEON_RESET_DMA1;
3810
3811 /* SRBM_STATUS */
3812 tmp = RREG32(SRBM_STATUS);
3813
3814 if (tmp & IH_BUSY)
3815 reset_mask |= RADEON_RESET_IH;
3816
3817 if (tmp & SEM_BUSY)
3818 reset_mask |= RADEON_RESET_SEM;
3819
3820 if (tmp & GRBM_RQ_PENDING)
3821 reset_mask |= RADEON_RESET_GRBM;
3822
3823 if (tmp & VMC_BUSY)
3824 reset_mask |= RADEON_RESET_VMC;
3825
3826 if (tmp & (MCB_BUSY | MCB_NON_DISPLAY_BUSY |
3827 MCC_BUSY | MCD_BUSY))
3828 reset_mask |= RADEON_RESET_MC;
3829
3830 if (evergreen_is_display_hung(rdev))
3831 reset_mask |= RADEON_RESET_DISPLAY;
3832
3833 /* VM_L2_STATUS */
3834 tmp = RREG32(VM_L2_STATUS);
3835 if (tmp & L2_BUSY)
3836 reset_mask |= RADEON_RESET_VMC;
3837
Alex Deucherd808fc82013-02-28 10:03:08 -05003838 /* Skip MC reset as it's mostly likely not hung, just busy */
3839 if (reset_mask & RADEON_RESET_MC) {
3840 DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask);
3841 reset_mask &= ~RADEON_RESET_MC;
3842 }
3843
Alex Deucher014bb202013-01-18 19:36:20 -05003844 return reset_mask;
3845}
3846
3847static void si_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
Alex Deucher06bc6df2013-01-03 13:15:30 -05003848{
3849 struct evergreen_mc_save save;
Alex Deucher1c534672013-01-18 15:08:38 -05003850 u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
3851 u32 tmp;
Alex Deucher19fc42e2013-01-14 11:04:39 -05003852
Alex Deucher06bc6df2013-01-03 13:15:30 -05003853 if (reset_mask == 0)
Alex Deucher014bb202013-01-18 19:36:20 -05003854 return;
Alex Deucher06bc6df2013-01-03 13:15:30 -05003855
3856 dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
3857
Alex Deucher1c534672013-01-18 15:08:38 -05003858 evergreen_print_gpu_status_regs(rdev);
Alex Deucher06bc6df2013-01-03 13:15:30 -05003859 dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
3860 RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR));
3861 dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
3862 RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS));
3863
Alex Deuchera6f4ae82013-10-02 14:50:57 -04003864 /* disable PG/CG */
3865 si_fini_pg(rdev);
3866 si_fini_cg(rdev);
3867
3868 /* stop the rlc */
3869 si_rlc_stop(rdev);
3870
Alex Deucher1c534672013-01-18 15:08:38 -05003871 /* Disable CP parsing/prefetching */
3872 WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT);
3873
3874 if (reset_mask & RADEON_RESET_DMA) {
3875 /* dma0 */
3876 tmp = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
3877 tmp &= ~DMA_RB_ENABLE;
3878 WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, tmp);
Alex Deucher014bb202013-01-18 19:36:20 -05003879 }
3880 if (reset_mask & RADEON_RESET_DMA1) {
Alex Deucher1c534672013-01-18 15:08:38 -05003881 /* dma1 */
3882 tmp = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
3883 tmp &= ~DMA_RB_ENABLE;
3884 WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, tmp);
3885 }
3886
Alex Deucherf770d782013-01-23 19:00:25 -05003887 udelay(50);
3888
3889 evergreen_mc_stop(rdev, &save);
3890 if (evergreen_mc_wait_for_idle(rdev)) {
3891 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
3892 }
3893
Alex Deucher1c534672013-01-18 15:08:38 -05003894 if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE | RADEON_RESET_CP)) {
3895 grbm_soft_reset = SOFT_RESET_CB |
3896 SOFT_RESET_DB |
3897 SOFT_RESET_GDS |
3898 SOFT_RESET_PA |
3899 SOFT_RESET_SC |
3900 SOFT_RESET_BCI |
3901 SOFT_RESET_SPI |
3902 SOFT_RESET_SX |
3903 SOFT_RESET_TC |
3904 SOFT_RESET_TA |
3905 SOFT_RESET_VGT |
3906 SOFT_RESET_IA;
3907 }
3908
3909 if (reset_mask & RADEON_RESET_CP) {
3910 grbm_soft_reset |= SOFT_RESET_CP | SOFT_RESET_VGT;
3911
3912 srbm_soft_reset |= SOFT_RESET_GRBM;
3913 }
Alex Deucher06bc6df2013-01-03 13:15:30 -05003914
3915 if (reset_mask & RADEON_RESET_DMA)
Alex Deucher014bb202013-01-18 19:36:20 -05003916 srbm_soft_reset |= SOFT_RESET_DMA;
3917
3918 if (reset_mask & RADEON_RESET_DMA1)
3919 srbm_soft_reset |= SOFT_RESET_DMA1;
3920
3921 if (reset_mask & RADEON_RESET_DISPLAY)
3922 srbm_soft_reset |= SOFT_RESET_DC;
3923
3924 if (reset_mask & RADEON_RESET_RLC)
3925 grbm_soft_reset |= SOFT_RESET_RLC;
3926
3927 if (reset_mask & RADEON_RESET_SEM)
3928 srbm_soft_reset |= SOFT_RESET_SEM;
3929
3930 if (reset_mask & RADEON_RESET_IH)
3931 srbm_soft_reset |= SOFT_RESET_IH;
3932
3933 if (reset_mask & RADEON_RESET_GRBM)
3934 srbm_soft_reset |= SOFT_RESET_GRBM;
3935
3936 if (reset_mask & RADEON_RESET_VMC)
3937 srbm_soft_reset |= SOFT_RESET_VMC;
3938
3939 if (reset_mask & RADEON_RESET_MC)
3940 srbm_soft_reset |= SOFT_RESET_MC;
Alex Deucher1c534672013-01-18 15:08:38 -05003941
3942 if (grbm_soft_reset) {
3943 tmp = RREG32(GRBM_SOFT_RESET);
3944 tmp |= grbm_soft_reset;
3945 dev_info(rdev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
3946 WREG32(GRBM_SOFT_RESET, tmp);
3947 tmp = RREG32(GRBM_SOFT_RESET);
3948
3949 udelay(50);
3950
3951 tmp &= ~grbm_soft_reset;
3952 WREG32(GRBM_SOFT_RESET, tmp);
3953 tmp = RREG32(GRBM_SOFT_RESET);
3954 }
3955
3956 if (srbm_soft_reset) {
3957 tmp = RREG32(SRBM_SOFT_RESET);
3958 tmp |= srbm_soft_reset;
3959 dev_info(rdev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
3960 WREG32(SRBM_SOFT_RESET, tmp);
3961 tmp = RREG32(SRBM_SOFT_RESET);
3962
3963 udelay(50);
3964
3965 tmp &= ~srbm_soft_reset;
3966 WREG32(SRBM_SOFT_RESET, tmp);
3967 tmp = RREG32(SRBM_SOFT_RESET);
3968 }
Alex Deucher06bc6df2013-01-03 13:15:30 -05003969
3970 /* Wait a little for things to settle down */
3971 udelay(50);
3972
Alex Deucherc476dde2012-03-20 17:18:12 -04003973 evergreen_mc_resume(rdev, &save);
Alex Deucher1c534672013-01-18 15:08:38 -05003974 udelay(50);
Alex Deucher410a3412013-01-18 13:05:39 -05003975
Alex Deucher1c534672013-01-18 15:08:38 -05003976 evergreen_print_gpu_status_regs(rdev);
Alex Deucherc476dde2012-03-20 17:18:12 -04003977}
3978
Alex Deucher4a5c8ea2013-11-15 16:35:55 -05003979static void si_set_clk_bypass_mode(struct radeon_device *rdev)
3980{
3981 u32 tmp, i;
3982
3983 tmp = RREG32(CG_SPLL_FUNC_CNTL);
3984 tmp |= SPLL_BYPASS_EN;
3985 WREG32(CG_SPLL_FUNC_CNTL, tmp);
3986
3987 tmp = RREG32(CG_SPLL_FUNC_CNTL_2);
3988 tmp |= SPLL_CTLREQ_CHG;
3989 WREG32(CG_SPLL_FUNC_CNTL_2, tmp);
3990
3991 for (i = 0; i < rdev->usec_timeout; i++) {
3992 if (RREG32(SPLL_STATUS) & SPLL_CHG_STATUS)
3993 break;
3994 udelay(1);
3995 }
3996
3997 tmp = RREG32(CG_SPLL_FUNC_CNTL_2);
3998 tmp &= ~(SPLL_CTLREQ_CHG | SCLK_MUX_UPDATE);
3999 WREG32(CG_SPLL_FUNC_CNTL_2, tmp);
4000
4001 tmp = RREG32(MPLL_CNTL_MODE);
4002 tmp &= ~MPLL_MCLK_SEL;
4003 WREG32(MPLL_CNTL_MODE, tmp);
4004}
4005
4006static void si_spll_powerdown(struct radeon_device *rdev)
4007{
4008 u32 tmp;
4009
4010 tmp = RREG32(SPLL_CNTL_MODE);
4011 tmp |= SPLL_SW_DIR_CONTROL;
4012 WREG32(SPLL_CNTL_MODE, tmp);
4013
4014 tmp = RREG32(CG_SPLL_FUNC_CNTL);
4015 tmp |= SPLL_RESET;
4016 WREG32(CG_SPLL_FUNC_CNTL, tmp);
4017
4018 tmp = RREG32(CG_SPLL_FUNC_CNTL);
4019 tmp |= SPLL_SLEEP;
4020 WREG32(CG_SPLL_FUNC_CNTL, tmp);
4021
4022 tmp = RREG32(SPLL_CNTL_MODE);
4023 tmp &= ~SPLL_SW_DIR_CONTROL;
4024 WREG32(SPLL_CNTL_MODE, tmp);
4025}
4026
4027static void si_gpu_pci_config_reset(struct radeon_device *rdev)
4028{
4029 struct evergreen_mc_save save;
4030 u32 tmp, i;
4031
4032 dev_info(rdev->dev, "GPU pci config reset\n");
4033
4034 /* disable dpm? */
4035
4036 /* disable cg/pg */
4037 si_fini_pg(rdev);
4038 si_fini_cg(rdev);
4039
4040 /* Disable CP parsing/prefetching */
4041 WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT);
4042 /* dma0 */
4043 tmp = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
4044 tmp &= ~DMA_RB_ENABLE;
4045 WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, tmp);
4046 /* dma1 */
4047 tmp = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
4048 tmp &= ~DMA_RB_ENABLE;
4049 WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, tmp);
4050 /* XXX other engines? */
4051
4052 /* halt the rlc, disable cp internal ints */
4053 si_rlc_stop(rdev);
4054
4055 udelay(50);
4056
4057 /* disable mem access */
4058 evergreen_mc_stop(rdev, &save);
4059 if (evergreen_mc_wait_for_idle(rdev)) {
4060 dev_warn(rdev->dev, "Wait for MC idle timed out !\n");
4061 }
4062
4063 /* set mclk/sclk to bypass */
4064 si_set_clk_bypass_mode(rdev);
4065 /* powerdown spll */
4066 si_spll_powerdown(rdev);
4067 /* disable BM */
4068 pci_clear_master(rdev->pdev);
4069 /* reset */
4070 radeon_pci_config_reset(rdev);
4071 /* wait for asic to come out of reset */
4072 for (i = 0; i < rdev->usec_timeout; i++) {
4073 if (RREG32(CONFIG_MEMSIZE) != 0xffffffff)
4074 break;
4075 udelay(1);
4076 }
4077}
4078
Jérome Glisse71fe2892016-03-18 16:58:38 +01004079int si_asic_reset(struct radeon_device *rdev, bool hard)
Alex Deucherc476dde2012-03-20 17:18:12 -04004080{
Alex Deucher014bb202013-01-18 19:36:20 -05004081 u32 reset_mask;
4082
Jérome Glisse71fe2892016-03-18 16:58:38 +01004083 if (hard) {
4084 si_gpu_pci_config_reset(rdev);
4085 return 0;
4086 }
4087
Alex Deucher014bb202013-01-18 19:36:20 -05004088 reset_mask = si_gpu_check_soft_reset(rdev);
4089
4090 if (reset_mask)
4091 r600_set_bios_scratch_engine_hung(rdev, true);
4092
Alex Deucher4a5c8ea2013-11-15 16:35:55 -05004093 /* try soft reset */
Alex Deucher014bb202013-01-18 19:36:20 -05004094 si_gpu_soft_reset(rdev, reset_mask);
4095
4096 reset_mask = si_gpu_check_soft_reset(rdev);
4097
Alex Deucher4a5c8ea2013-11-15 16:35:55 -05004098 /* try pci config reset */
4099 if (reset_mask && radeon_hard_reset)
4100 si_gpu_pci_config_reset(rdev);
4101
4102 reset_mask = si_gpu_check_soft_reset(rdev);
4103
Alex Deucher014bb202013-01-18 19:36:20 -05004104 if (!reset_mask)
4105 r600_set_bios_scratch_engine_hung(rdev, false);
4106
4107 return 0;
Alex Deucherc476dde2012-03-20 17:18:12 -04004108}
4109
Alex Deucher123bc182013-01-24 11:37:19 -05004110/**
4111 * si_gfx_is_lockup - Check if the GFX engine is locked up
4112 *
4113 * @rdev: radeon_device pointer
4114 * @ring: radeon_ring structure holding ring information
4115 *
4116 * Check if the GFX engine is locked up.
4117 * Returns true if the engine appears to be locked up, false if not.
4118 */
4119bool si_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
4120{
4121 u32 reset_mask = si_gpu_check_soft_reset(rdev);
4122
4123 if (!(reset_mask & (RADEON_RESET_GFX |
4124 RADEON_RESET_COMPUTE |
4125 RADEON_RESET_CP))) {
Christian Königff212f22014-02-18 14:52:33 +01004126 radeon_ring_lockup_update(rdev, ring);
Alex Deucher123bc182013-01-24 11:37:19 -05004127 return false;
4128 }
Alex Deucher123bc182013-01-24 11:37:19 -05004129 return radeon_ring_test_lockup(rdev, ring);
4130}
4131
Alex Deucherd2800ee2012-03-20 17:18:13 -04004132/* MC */
4133static void si_mc_program(struct radeon_device *rdev)
4134{
4135 struct evergreen_mc_save save;
4136 u32 tmp;
4137 int i, j;
4138
4139 /* Initialize HDP */
4140 for (i = 0, j = 0; i < 32; i++, j += 0x18) {
4141 WREG32((0x2c14 + j), 0x00000000);
4142 WREG32((0x2c18 + j), 0x00000000);
4143 WREG32((0x2c1c + j), 0x00000000);
4144 WREG32((0x2c20 + j), 0x00000000);
4145 WREG32((0x2c24 + j), 0x00000000);
4146 }
4147 WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
4148
4149 evergreen_mc_stop(rdev, &save);
4150 if (radeon_mc_wait_for_idle(rdev)) {
4151 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
4152 }
Alex Deucher51535502012-08-30 14:34:30 -04004153 if (!ASIC_IS_NODCE(rdev))
4154 /* Lockout access through VGA aperture*/
4155 WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
Alex Deucherd2800ee2012-03-20 17:18:13 -04004156 /* Update configuration */
4157 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
4158 rdev->mc.vram_start >> 12);
4159 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
4160 rdev->mc.vram_end >> 12);
4161 WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
4162 rdev->vram_scratch.gpu_addr >> 12);
4163 tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
4164 tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
4165 WREG32(MC_VM_FB_LOCATION, tmp);
4166 /* XXX double check these! */
4167 WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
4168 WREG32(HDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
4169 WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
4170 WREG32(MC_VM_AGP_BASE, 0);
4171 WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
4172 WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
4173 if (radeon_mc_wait_for_idle(rdev)) {
4174 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
4175 }
4176 evergreen_mc_resume(rdev, &save);
Alex Deucher51535502012-08-30 14:34:30 -04004177 if (!ASIC_IS_NODCE(rdev)) {
4178 /* we need to own VRAM, so turn off the VGA renderer here
4179 * to stop it overwriting our objects */
4180 rv515_vga_render_disable(rdev);
4181 }
Alex Deucherd2800ee2012-03-20 17:18:13 -04004182}
4183
Alex Deucher1c491652013-04-09 12:45:26 -04004184void si_vram_gtt_location(struct radeon_device *rdev,
4185 struct radeon_mc *mc)
Alex Deucherd2800ee2012-03-20 17:18:13 -04004186{
4187 if (mc->mc_vram_size > 0xFFC0000000ULL) {
4188 /* leave room for at least 1024M GTT */
4189 dev_warn(rdev->dev, "limiting VRAM\n");
4190 mc->real_vram_size = 0xFFC0000000ULL;
4191 mc->mc_vram_size = 0xFFC0000000ULL;
4192 }
Alex Deucher9ed8b1f2013-04-08 11:13:01 -04004193 radeon_vram_location(rdev, &rdev->mc, 0);
Alex Deucherd2800ee2012-03-20 17:18:13 -04004194 rdev->mc.gtt_base_align = 0;
Alex Deucher9ed8b1f2013-04-08 11:13:01 -04004195 radeon_gtt_location(rdev, mc);
Alex Deucherd2800ee2012-03-20 17:18:13 -04004196}
4197
4198static int si_mc_init(struct radeon_device *rdev)
4199{
4200 u32 tmp;
4201 int chansize, numchan;
4202
4203 /* Get VRAM informations */
4204 rdev->mc.vram_is_ddr = true;
4205 tmp = RREG32(MC_ARB_RAMCFG);
4206 if (tmp & CHANSIZE_OVERRIDE) {
4207 chansize = 16;
4208 } else if (tmp & CHANSIZE_MASK) {
4209 chansize = 64;
4210 } else {
4211 chansize = 32;
4212 }
4213 tmp = RREG32(MC_SHARED_CHMAP);
4214 switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
4215 case 0:
4216 default:
4217 numchan = 1;
4218 break;
4219 case 1:
4220 numchan = 2;
4221 break;
4222 case 2:
4223 numchan = 4;
4224 break;
4225 case 3:
4226 numchan = 8;
4227 break;
4228 case 4:
4229 numchan = 3;
4230 break;
4231 case 5:
4232 numchan = 6;
4233 break;
4234 case 6:
4235 numchan = 10;
4236 break;
4237 case 7:
4238 numchan = 12;
4239 break;
4240 case 8:
4241 numchan = 16;
4242 break;
4243 }
4244 rdev->mc.vram_width = numchan * chansize;
4245 /* Could aper size report 0 ? */
4246 rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
4247 rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
4248 /* size in MB on si */
Alex Deucher0ca223b2013-12-03 09:24:30 -05004249 tmp = RREG32(CONFIG_MEMSIZE);
4250 /* some boards may have garbage in the upper 16 bits */
4251 if (tmp & 0xffff0000) {
4252 DRM_INFO("Probable bad vram size: 0x%08x\n", tmp);
4253 if (tmp & 0xffff)
4254 tmp &= 0xffff;
4255 }
4256 rdev->mc.mc_vram_size = tmp * 1024ULL * 1024ULL;
4257 rdev->mc.real_vram_size = rdev->mc.mc_vram_size;
Alex Deucherd2800ee2012-03-20 17:18:13 -04004258 rdev->mc.visible_vram_size = rdev->mc.aper_size;
4259 si_vram_gtt_location(rdev, &rdev->mc);
4260 radeon_update_bandwidth_info(rdev);
4261
4262 return 0;
4263}
4264
4265/*
4266 * GART
4267 */
4268void si_pcie_gart_tlb_flush(struct radeon_device *rdev)
4269{
4270 /* flush hdp cache */
4271 WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
4272
4273 /* bits 0-15 are the VM contexts0-15 */
4274 WREG32(VM_INVALIDATE_REQUEST, 1);
4275}
4276
Lauri Kasanen1109ca02012-08-31 13:43:50 -04004277static int si_pcie_gart_enable(struct radeon_device *rdev)
Alex Deucherd2800ee2012-03-20 17:18:13 -04004278{
4279 int r, i;
4280
4281 if (rdev->gart.robj == NULL) {
4282 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
4283 return -EINVAL;
4284 }
4285 r = radeon_gart_table_vram_pin(rdev);
4286 if (r)
4287 return r;
Alex Deucherd2800ee2012-03-20 17:18:13 -04004288 /* Setup TLB control */
4289 WREG32(MC_VM_MX_L1_TLB_CNTL,
4290 (0xA << 7) |
4291 ENABLE_L1_TLB |
Christian Königec3dbbc2014-05-10 12:17:55 +02004292 ENABLE_L1_FRAGMENT_PROCESSING |
Alex Deucherd2800ee2012-03-20 17:18:13 -04004293 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
4294 ENABLE_ADVANCED_DRIVER_MODEL |
4295 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
4296 /* Setup L2 cache */
4297 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE |
Christian Königec3dbbc2014-05-10 12:17:55 +02004298 ENABLE_L2_FRAGMENT_PROCESSING |
Alex Deucherd2800ee2012-03-20 17:18:13 -04004299 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
4300 ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
4301 EFFECTIVE_L2_QUEUE_SIZE(7) |
4302 CONTEXT1_IDENTITY_ACCESS_MODE(1));
4303 WREG32(VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS | INVALIDATE_L2_CACHE);
4304 WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
Christian Königec3dbbc2014-05-10 12:17:55 +02004305 BANK_SELECT(4) |
4306 L2_CACHE_BIGK_FRAGMENT_SIZE(4));
Alex Deucherd2800ee2012-03-20 17:18:13 -04004307 /* setup context0 */
4308 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
Christian König7c0411d2015-05-28 15:51:59 +02004309 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
Alex Deucherd2800ee2012-03-20 17:18:13 -04004310 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
4311 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
4312 (u32)(rdev->dummy_page.addr >> 12));
4313 WREG32(VM_CONTEXT0_CNTL2, 0);
4314 WREG32(VM_CONTEXT0_CNTL, (ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
4315 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT));
4316
4317 WREG32(0x15D4, 0);
4318 WREG32(0x15D8, 0);
4319 WREG32(0x15DC, 0);
4320
4321 /* empty context1-15 */
Alex Deucherd2800ee2012-03-20 17:18:13 -04004322 /* set vm size, must be a multiple of 4 */
4323 WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
Christian König607d4802015-05-12 14:56:17 +02004324 WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn - 1);
Alex Deucher23d4f1f2012-10-08 09:45:46 -04004325 /* Assign the pt base to something valid for now; the pts used for
4326 * the VMs are determined by the application and setup and assigned
4327 * on the fly in the vm part of radeon_gart.c
4328 */
Alex Deucherd2800ee2012-03-20 17:18:13 -04004329 for (i = 1; i < 16; i++) {
4330 if (i < 8)
4331 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
Christian König054e01d2014-08-26 14:45:54 +02004332 rdev->vm_manager.saved_table_addr[i]);
Alex Deucherd2800ee2012-03-20 17:18:13 -04004333 else
4334 WREG32(VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((i - 8) << 2),
Christian König054e01d2014-08-26 14:45:54 +02004335 rdev->vm_manager.saved_table_addr[i]);
Alex Deucherd2800ee2012-03-20 17:18:13 -04004336 }
4337
4338 /* enable context1-15 */
4339 WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
4340 (u32)(rdev->dummy_page.addr >> 12));
Christian Königae133a12012-09-18 15:30:44 -04004341 WREG32(VM_CONTEXT1_CNTL2, 4);
Dmitry Cherkasovfa87e622012-09-17 19:36:19 +02004342 WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) |
Christian König4510fb92014-06-05 23:56:50 -04004343 PAGE_TABLE_BLOCK_SIZE(radeon_vm_block_size - 9) |
Christian Königae133a12012-09-18 15:30:44 -04004344 RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
4345 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT |
4346 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
4347 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT |
4348 PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT |
4349 PDE0_PROTECTION_FAULT_ENABLE_DEFAULT |
4350 VALID_PROTECTION_FAULT_ENABLE_INTERRUPT |
4351 VALID_PROTECTION_FAULT_ENABLE_DEFAULT |
4352 READ_PROTECTION_FAULT_ENABLE_INTERRUPT |
4353 READ_PROTECTION_FAULT_ENABLE_DEFAULT |
4354 WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT |
4355 WRITE_PROTECTION_FAULT_ENABLE_DEFAULT);
Alex Deucherd2800ee2012-03-20 17:18:13 -04004356
4357 si_pcie_gart_tlb_flush(rdev);
4358 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
4359 (unsigned)(rdev->mc.gtt_size >> 20),
4360 (unsigned long long)rdev->gart.table_addr);
4361 rdev->gart.ready = true;
4362 return 0;
4363}
4364
Lauri Kasanen1109ca02012-08-31 13:43:50 -04004365static void si_pcie_gart_disable(struct radeon_device *rdev)
Alex Deucherd2800ee2012-03-20 17:18:13 -04004366{
Christian König054e01d2014-08-26 14:45:54 +02004367 unsigned i;
4368
4369 for (i = 1; i < 16; ++i) {
4370 uint32_t reg;
4371 if (i < 8)
4372 reg = VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2);
4373 else
4374 reg = VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((i - 8) << 2);
4375 rdev->vm_manager.saved_table_addr[i] = RREG32(reg);
4376 }
4377
Alex Deucherd2800ee2012-03-20 17:18:13 -04004378 /* Disable all tables */
4379 WREG32(VM_CONTEXT0_CNTL, 0);
4380 WREG32(VM_CONTEXT1_CNTL, 0);
4381 /* Setup TLB control */
4382 WREG32(MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE_NOT_IN_SYS |
4383 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
4384 /* Setup L2 cache */
4385 WREG32(VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
4386 ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
4387 EFFECTIVE_L2_QUEUE_SIZE(7) |
4388 CONTEXT1_IDENTITY_ACCESS_MODE(1));
4389 WREG32(VM_L2_CNTL2, 0);
4390 WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
4391 L2_CACHE_BIGK_FRAGMENT_SIZE(0));
4392 radeon_gart_table_vram_unpin(rdev);
4393}
4394
Lauri Kasanen1109ca02012-08-31 13:43:50 -04004395static void si_pcie_gart_fini(struct radeon_device *rdev)
Alex Deucherd2800ee2012-03-20 17:18:13 -04004396{
4397 si_pcie_gart_disable(rdev);
4398 radeon_gart_table_vram_free(rdev);
4399 radeon_gart_fini(rdev);
4400}
4401
Alex Deucher498dd8b2012-03-20 17:18:15 -04004402/* vm parser */
4403static bool si_vm_reg_valid(u32 reg)
4404{
4405 /* context regs are fine */
4406 if (reg >= 0x28000)
4407 return true;
4408
Bas Nieuwenhuizen3d02b7f2016-04-15 02:47:49 +02004409 /* shader regs are also fine */
4410 if (reg >= 0xB000 && reg < 0xC000)
4411 return true;
4412
Alex Deucher498dd8b2012-03-20 17:18:15 -04004413 /* check config regs */
4414 switch (reg) {
4415 case GRBM_GFX_INDEX:
Alex Deucherf418b882012-11-08 10:13:24 -05004416 case CP_STRMOUT_CNTL:
Alex Deucher498dd8b2012-03-20 17:18:15 -04004417 case VGT_VTX_VECT_EJECT_REG:
4418 case VGT_CACHE_INVALIDATION:
4419 case VGT_ESGS_RING_SIZE:
4420 case VGT_GSVS_RING_SIZE:
4421 case VGT_GS_VERTEX_REUSE:
4422 case VGT_PRIMITIVE_TYPE:
4423 case VGT_INDEX_TYPE:
4424 case VGT_NUM_INDICES:
4425 case VGT_NUM_INSTANCES:
4426 case VGT_TF_RING_SIZE:
4427 case VGT_HS_OFFCHIP_PARAM:
4428 case VGT_TF_MEMORY_BASE:
4429 case PA_CL_ENHANCE:
4430 case PA_SU_LINE_STIPPLE_VALUE:
4431 case PA_SC_LINE_STIPPLE_STATE:
4432 case PA_SC_ENHANCE:
4433 case SQC_CACHES:
4434 case SPI_STATIC_THREAD_MGMT_1:
4435 case SPI_STATIC_THREAD_MGMT_2:
4436 case SPI_STATIC_THREAD_MGMT_3:
4437 case SPI_PS_MAX_WAVE_ID:
4438 case SPI_CONFIG_CNTL:
4439 case SPI_CONFIG_CNTL_1:
4440 case TA_CNTL_AUX:
Marek Olšák113d0f92016-10-10 13:23:25 +02004441 case TA_CS_BC_BASE_ADDR:
Alex Deucher498dd8b2012-03-20 17:18:15 -04004442 return true;
4443 default:
4444 DRM_ERROR("Invalid register 0x%x in CS\n", reg);
4445 return false;
4446 }
4447}
4448
4449static int si_vm_packet3_ce_check(struct radeon_device *rdev,
4450 u32 *ib, struct radeon_cs_packet *pkt)
4451{
4452 switch (pkt->opcode) {
4453 case PACKET3_NOP:
4454 case PACKET3_SET_BASE:
4455 case PACKET3_SET_CE_DE_COUNTERS:
4456 case PACKET3_LOAD_CONST_RAM:
4457 case PACKET3_WRITE_CONST_RAM:
4458 case PACKET3_WRITE_CONST_RAM_OFFSET:
4459 case PACKET3_DUMP_CONST_RAM:
4460 case PACKET3_INCREMENT_CE_COUNTER:
4461 case PACKET3_WAIT_ON_DE_COUNTER:
4462 case PACKET3_CE_WRITE:
4463 break;
4464 default:
4465 DRM_ERROR("Invalid CE packet3: 0x%x\n", pkt->opcode);
4466 return -EINVAL;
4467 }
4468 return 0;
4469}
4470
Tom Stellarde5b9e752013-08-16 17:47:39 -04004471static int si_vm_packet3_cp_dma_check(u32 *ib, u32 idx)
4472{
4473 u32 start_reg, reg, i;
4474 u32 command = ib[idx + 4];
4475 u32 info = ib[idx + 1];
4476 u32 idx_value = ib[idx];
4477 if (command & PACKET3_CP_DMA_CMD_SAS) {
4478 /* src address space is register */
4479 if (((info & 0x60000000) >> 29) == 0) {
4480 start_reg = idx_value << 2;
4481 if (command & PACKET3_CP_DMA_CMD_SAIC) {
4482 reg = start_reg;
4483 if (!si_vm_reg_valid(reg)) {
4484 DRM_ERROR("CP DMA Bad SRC register\n");
4485 return -EINVAL;
4486 }
4487 } else {
4488 for (i = 0; i < (command & 0x1fffff); i++) {
4489 reg = start_reg + (4 * i);
4490 if (!si_vm_reg_valid(reg)) {
4491 DRM_ERROR("CP DMA Bad SRC register\n");
4492 return -EINVAL;
4493 }
4494 }
4495 }
4496 }
4497 }
4498 if (command & PACKET3_CP_DMA_CMD_DAS) {
4499 /* dst address space is register */
4500 if (((info & 0x00300000) >> 20) == 0) {
4501 start_reg = ib[idx + 2];
4502 if (command & PACKET3_CP_DMA_CMD_DAIC) {
4503 reg = start_reg;
4504 if (!si_vm_reg_valid(reg)) {
4505 DRM_ERROR("CP DMA Bad DST register\n");
4506 return -EINVAL;
4507 }
4508 } else {
4509 for (i = 0; i < (command & 0x1fffff); i++) {
4510 reg = start_reg + (4 * i);
4511 if (!si_vm_reg_valid(reg)) {
4512 DRM_ERROR("CP DMA Bad DST register\n");
4513 return -EINVAL;
4514 }
4515 }
4516 }
4517 }
4518 }
4519 return 0;
4520}
4521
Alex Deucher498dd8b2012-03-20 17:18:15 -04004522static int si_vm_packet3_gfx_check(struct radeon_device *rdev,
4523 u32 *ib, struct radeon_cs_packet *pkt)
4524{
Tom Stellarde5b9e752013-08-16 17:47:39 -04004525 int r;
Alex Deucher498dd8b2012-03-20 17:18:15 -04004526 u32 idx = pkt->idx + 1;
4527 u32 idx_value = ib[idx];
4528 u32 start_reg, end_reg, reg, i;
4529
4530 switch (pkt->opcode) {
4531 case PACKET3_NOP:
4532 case PACKET3_SET_BASE:
4533 case PACKET3_CLEAR_STATE:
4534 case PACKET3_INDEX_BUFFER_SIZE:
4535 case PACKET3_DISPATCH_DIRECT:
4536 case PACKET3_DISPATCH_INDIRECT:
4537 case PACKET3_ALLOC_GDS:
4538 case PACKET3_WRITE_GDS_RAM:
4539 case PACKET3_ATOMIC_GDS:
4540 case PACKET3_ATOMIC:
4541 case PACKET3_OCCLUSION_QUERY:
4542 case PACKET3_SET_PREDICATION:
4543 case PACKET3_COND_EXEC:
4544 case PACKET3_PRED_EXEC:
4545 case PACKET3_DRAW_INDIRECT:
4546 case PACKET3_DRAW_INDEX_INDIRECT:
4547 case PACKET3_INDEX_BASE:
4548 case PACKET3_DRAW_INDEX_2:
4549 case PACKET3_CONTEXT_CONTROL:
4550 case PACKET3_INDEX_TYPE:
4551 case PACKET3_DRAW_INDIRECT_MULTI:
4552 case PACKET3_DRAW_INDEX_AUTO:
4553 case PACKET3_DRAW_INDEX_IMMD:
4554 case PACKET3_NUM_INSTANCES:
4555 case PACKET3_DRAW_INDEX_MULTI_AUTO:
4556 case PACKET3_STRMOUT_BUFFER_UPDATE:
4557 case PACKET3_DRAW_INDEX_OFFSET_2:
4558 case PACKET3_DRAW_INDEX_MULTI_ELEMENT:
4559 case PACKET3_DRAW_INDEX_INDIRECT_MULTI:
4560 case PACKET3_MPEG_INDEX:
4561 case PACKET3_WAIT_REG_MEM:
4562 case PACKET3_MEM_WRITE:
4563 case PACKET3_PFP_SYNC_ME:
4564 case PACKET3_SURFACE_SYNC:
4565 case PACKET3_EVENT_WRITE:
4566 case PACKET3_EVENT_WRITE_EOP:
4567 case PACKET3_EVENT_WRITE_EOS:
4568 case PACKET3_SET_CONTEXT_REG:
4569 case PACKET3_SET_CONTEXT_REG_INDIRECT:
4570 case PACKET3_SET_SH_REG:
4571 case PACKET3_SET_SH_REG_OFFSET:
4572 case PACKET3_INCREMENT_DE_COUNTER:
4573 case PACKET3_WAIT_ON_CE_COUNTER:
4574 case PACKET3_WAIT_ON_AVAIL_BUFFER:
4575 case PACKET3_ME_WRITE:
4576 break;
4577 case PACKET3_COPY_DATA:
4578 if ((idx_value & 0xf00) == 0) {
4579 reg = ib[idx + 3] * 4;
4580 if (!si_vm_reg_valid(reg))
4581 return -EINVAL;
4582 }
4583 break;
4584 case PACKET3_WRITE_DATA:
4585 if ((idx_value & 0xf00) == 0) {
4586 start_reg = ib[idx + 1] * 4;
4587 if (idx_value & 0x10000) {
4588 if (!si_vm_reg_valid(start_reg))
4589 return -EINVAL;
4590 } else {
4591 for (i = 0; i < (pkt->count - 2); i++) {
4592 reg = start_reg + (4 * i);
4593 if (!si_vm_reg_valid(reg))
4594 return -EINVAL;
4595 }
4596 }
4597 }
4598 break;
4599 case PACKET3_COND_WRITE:
4600 if (idx_value & 0x100) {
4601 reg = ib[idx + 5] * 4;
4602 if (!si_vm_reg_valid(reg))
4603 return -EINVAL;
4604 }
4605 break;
4606 case PACKET3_COPY_DW:
4607 if (idx_value & 0x2) {
4608 reg = ib[idx + 3] * 4;
4609 if (!si_vm_reg_valid(reg))
4610 return -EINVAL;
4611 }
4612 break;
4613 case PACKET3_SET_CONFIG_REG:
4614 start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_START;
4615 end_reg = 4 * pkt->count + start_reg - 4;
4616 if ((start_reg < PACKET3_SET_CONFIG_REG_START) ||
4617 (start_reg >= PACKET3_SET_CONFIG_REG_END) ||
4618 (end_reg >= PACKET3_SET_CONFIG_REG_END)) {
4619 DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
4620 return -EINVAL;
4621 }
4622 for (i = 0; i < pkt->count; i++) {
4623 reg = start_reg + (4 * i);
4624 if (!si_vm_reg_valid(reg))
4625 return -EINVAL;
4626 }
4627 break;
Alex Deucher5aa709b2012-12-03 19:42:37 -05004628 case PACKET3_CP_DMA:
Tom Stellarde5b9e752013-08-16 17:47:39 -04004629 r = si_vm_packet3_cp_dma_check(ib, idx);
4630 if (r)
4631 return r;
Alex Deucher5aa709b2012-12-03 19:42:37 -05004632 break;
Alex Deucher498dd8b2012-03-20 17:18:15 -04004633 default:
4634 DRM_ERROR("Invalid GFX packet3: 0x%x\n", pkt->opcode);
4635 return -EINVAL;
4636 }
4637 return 0;
4638}
4639
4640static int si_vm_packet3_compute_check(struct radeon_device *rdev,
4641 u32 *ib, struct radeon_cs_packet *pkt)
4642{
Tom Stellarde5b9e752013-08-16 17:47:39 -04004643 int r;
Alex Deucher498dd8b2012-03-20 17:18:15 -04004644 u32 idx = pkt->idx + 1;
4645 u32 idx_value = ib[idx];
4646 u32 start_reg, reg, i;
4647
4648 switch (pkt->opcode) {
4649 case PACKET3_NOP:
4650 case PACKET3_SET_BASE:
4651 case PACKET3_CLEAR_STATE:
4652 case PACKET3_DISPATCH_DIRECT:
4653 case PACKET3_DISPATCH_INDIRECT:
4654 case PACKET3_ALLOC_GDS:
4655 case PACKET3_WRITE_GDS_RAM:
4656 case PACKET3_ATOMIC_GDS:
4657 case PACKET3_ATOMIC:
4658 case PACKET3_OCCLUSION_QUERY:
4659 case PACKET3_SET_PREDICATION:
4660 case PACKET3_COND_EXEC:
4661 case PACKET3_PRED_EXEC:
4662 case PACKET3_CONTEXT_CONTROL:
4663 case PACKET3_STRMOUT_BUFFER_UPDATE:
4664 case PACKET3_WAIT_REG_MEM:
4665 case PACKET3_MEM_WRITE:
4666 case PACKET3_PFP_SYNC_ME:
4667 case PACKET3_SURFACE_SYNC:
4668 case PACKET3_EVENT_WRITE:
4669 case PACKET3_EVENT_WRITE_EOP:
4670 case PACKET3_EVENT_WRITE_EOS:
4671 case PACKET3_SET_CONTEXT_REG:
4672 case PACKET3_SET_CONTEXT_REG_INDIRECT:
4673 case PACKET3_SET_SH_REG:
4674 case PACKET3_SET_SH_REG_OFFSET:
4675 case PACKET3_INCREMENT_DE_COUNTER:
4676 case PACKET3_WAIT_ON_CE_COUNTER:
4677 case PACKET3_WAIT_ON_AVAIL_BUFFER:
4678 case PACKET3_ME_WRITE:
4679 break;
4680 case PACKET3_COPY_DATA:
4681 if ((idx_value & 0xf00) == 0) {
4682 reg = ib[idx + 3] * 4;
4683 if (!si_vm_reg_valid(reg))
4684 return -EINVAL;
4685 }
4686 break;
4687 case PACKET3_WRITE_DATA:
4688 if ((idx_value & 0xf00) == 0) {
4689 start_reg = ib[idx + 1] * 4;
4690 if (idx_value & 0x10000) {
4691 if (!si_vm_reg_valid(start_reg))
4692 return -EINVAL;
4693 } else {
4694 for (i = 0; i < (pkt->count - 2); i++) {
4695 reg = start_reg + (4 * i);
4696 if (!si_vm_reg_valid(reg))
4697 return -EINVAL;
4698 }
4699 }
4700 }
4701 break;
4702 case PACKET3_COND_WRITE:
4703 if (idx_value & 0x100) {
4704 reg = ib[idx + 5] * 4;
4705 if (!si_vm_reg_valid(reg))
4706 return -EINVAL;
4707 }
4708 break;
4709 case PACKET3_COPY_DW:
4710 if (idx_value & 0x2) {
4711 reg = ib[idx + 3] * 4;
4712 if (!si_vm_reg_valid(reg))
4713 return -EINVAL;
4714 }
4715 break;
Tom Stellarde5b9e752013-08-16 17:47:39 -04004716 case PACKET3_CP_DMA:
4717 r = si_vm_packet3_cp_dma_check(ib, idx);
4718 if (r)
4719 return r;
4720 break;
Alex Deucher498dd8b2012-03-20 17:18:15 -04004721 default:
4722 DRM_ERROR("Invalid Compute packet3: 0x%x\n", pkt->opcode);
4723 return -EINVAL;
4724 }
4725 return 0;
4726}
4727
4728int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
4729{
4730 int ret = 0;
Alex Deucher9d0223d2014-09-30 09:25:32 -04004731 u32 idx = 0, i;
Alex Deucher498dd8b2012-03-20 17:18:15 -04004732 struct radeon_cs_packet pkt;
4733
4734 do {
4735 pkt.idx = idx;
Ilija Hadzic4e872ae2013-01-02 18:27:48 -05004736 pkt.type = RADEON_CP_PACKET_GET_TYPE(ib->ptr[idx]);
4737 pkt.count = RADEON_CP_PACKET_GET_COUNT(ib->ptr[idx]);
Alex Deucher498dd8b2012-03-20 17:18:15 -04004738 pkt.one_reg_wr = 0;
4739 switch (pkt.type) {
Ilija Hadzic4e872ae2013-01-02 18:27:48 -05004740 case RADEON_PACKET_TYPE0:
Alex Deucher498dd8b2012-03-20 17:18:15 -04004741 dev_err(rdev->dev, "Packet0 not allowed!\n");
4742 ret = -EINVAL;
4743 break;
Ilija Hadzic4e872ae2013-01-02 18:27:48 -05004744 case RADEON_PACKET_TYPE2:
Alex Deucher498dd8b2012-03-20 17:18:15 -04004745 idx += 1;
4746 break;
Ilija Hadzic4e872ae2013-01-02 18:27:48 -05004747 case RADEON_PACKET_TYPE3:
4748 pkt.opcode = RADEON_CP_PACKET3_GET_OPCODE(ib->ptr[idx]);
Alex Deucher498dd8b2012-03-20 17:18:15 -04004749 if (ib->is_const_ib)
4750 ret = si_vm_packet3_ce_check(rdev, ib->ptr, &pkt);
4751 else {
Christian König876dc9f2012-05-08 14:24:01 +02004752 switch (ib->ring) {
Alex Deucher498dd8b2012-03-20 17:18:15 -04004753 case RADEON_RING_TYPE_GFX_INDEX:
4754 ret = si_vm_packet3_gfx_check(rdev, ib->ptr, &pkt);
4755 break;
4756 case CAYMAN_RING_TYPE_CP1_INDEX:
4757 case CAYMAN_RING_TYPE_CP2_INDEX:
4758 ret = si_vm_packet3_compute_check(rdev, ib->ptr, &pkt);
4759 break;
4760 default:
Christian König876dc9f2012-05-08 14:24:01 +02004761 dev_err(rdev->dev, "Non-PM4 ring %d !\n", ib->ring);
Alex Deucher498dd8b2012-03-20 17:18:15 -04004762 ret = -EINVAL;
4763 break;
4764 }
4765 }
4766 idx += pkt.count + 2;
4767 break;
4768 default:
4769 dev_err(rdev->dev, "Unknown packet type %d !\n", pkt.type);
4770 ret = -EINVAL;
4771 break;
4772 }
Alex Deuchere1b4e722015-02-18 10:15:10 -05004773 if (ret) {
4774 for (i = 0; i < ib->length_dw; i++) {
4775 if (i == idx)
4776 printk("\t0x%08x <---\n", ib->ptr[i]);
4777 else
4778 printk("\t0x%08x\n", ib->ptr[i]);
4779 }
Alex Deucher498dd8b2012-03-20 17:18:15 -04004780 break;
Alex Deuchere1b4e722015-02-18 10:15:10 -05004781 }
Alex Deucher498dd8b2012-03-20 17:18:15 -04004782 } while (idx < ib->length_dw);
4783
4784 return ret;
4785}
4786
Alex Deucherd2800ee2012-03-20 17:18:13 -04004787/*
4788 * vm
4789 */
4790int si_vm_init(struct radeon_device *rdev)
4791{
4792 /* number of VMs */
4793 rdev->vm_manager.nvm = 16;
4794 /* base offset of vram pages */
4795 rdev->vm_manager.vram_base_offset = 0;
4796
4797 return 0;
4798}
4799
4800void si_vm_fini(struct radeon_device *rdev)
4801{
4802}
4803
Alex Deucher82ffd922012-10-02 14:47:46 -04004804/**
Alex Deucherfbf6dc72013-06-13 18:47:58 -04004805 * si_vm_decode_fault - print human readable fault info
4806 *
4807 * @rdev: radeon_device pointer
4808 * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value
4809 * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value
4810 *
4811 * Print human readable fault information (SI).
4812 */
4813static void si_vm_decode_fault(struct radeon_device *rdev,
4814 u32 status, u32 addr)
4815{
4816 u32 mc_id = (status & MEMORY_CLIENT_ID_MASK) >> MEMORY_CLIENT_ID_SHIFT;
4817 u32 vmid = (status & FAULT_VMID_MASK) >> FAULT_VMID_SHIFT;
4818 u32 protections = (status & PROTECTIONS_MASK) >> PROTECTIONS_SHIFT;
4819 char *block;
4820
4821 if (rdev->family == CHIP_TAHITI) {
4822 switch (mc_id) {
4823 case 160:
4824 case 144:
4825 case 96:
4826 case 80:
4827 case 224:
4828 case 208:
4829 case 32:
4830 case 16:
4831 block = "CB";
4832 break;
4833 case 161:
4834 case 145:
4835 case 97:
4836 case 81:
4837 case 225:
4838 case 209:
4839 case 33:
4840 case 17:
4841 block = "CB_FMASK";
4842 break;
4843 case 162:
4844 case 146:
4845 case 98:
4846 case 82:
4847 case 226:
4848 case 210:
4849 case 34:
4850 case 18:
4851 block = "CB_CMASK";
4852 break;
4853 case 163:
4854 case 147:
4855 case 99:
4856 case 83:
4857 case 227:
4858 case 211:
4859 case 35:
4860 case 19:
4861 block = "CB_IMMED";
4862 break;
4863 case 164:
4864 case 148:
4865 case 100:
4866 case 84:
4867 case 228:
4868 case 212:
4869 case 36:
4870 case 20:
4871 block = "DB";
4872 break;
4873 case 165:
4874 case 149:
4875 case 101:
4876 case 85:
4877 case 229:
4878 case 213:
4879 case 37:
4880 case 21:
4881 block = "DB_HTILE";
4882 break;
4883 case 167:
4884 case 151:
4885 case 103:
4886 case 87:
4887 case 231:
4888 case 215:
4889 case 39:
4890 case 23:
4891 block = "DB_STEN";
4892 break;
4893 case 72:
4894 case 68:
4895 case 64:
4896 case 8:
4897 case 4:
4898 case 0:
4899 case 136:
4900 case 132:
4901 case 128:
4902 case 200:
4903 case 196:
4904 case 192:
4905 block = "TC";
4906 break;
4907 case 112:
4908 case 48:
4909 block = "CP";
4910 break;
4911 case 49:
4912 case 177:
4913 case 50:
4914 case 178:
4915 block = "SH";
4916 break;
4917 case 53:
4918 case 190:
4919 block = "VGT";
4920 break;
4921 case 117:
4922 block = "IH";
4923 break;
4924 case 51:
4925 case 115:
4926 block = "RLC";
4927 break;
4928 case 119:
4929 case 183:
4930 block = "DMA0";
4931 break;
4932 case 61:
4933 block = "DMA1";
4934 break;
4935 case 248:
4936 case 120:
4937 block = "HDP";
4938 break;
4939 default:
4940 block = "unknown";
4941 break;
4942 }
4943 } else {
4944 switch (mc_id) {
4945 case 32:
4946 case 16:
4947 case 96:
4948 case 80:
4949 case 160:
4950 case 144:
4951 case 224:
4952 case 208:
4953 block = "CB";
4954 break;
4955 case 33:
4956 case 17:
4957 case 97:
4958 case 81:
4959 case 161:
4960 case 145:
4961 case 225:
4962 case 209:
4963 block = "CB_FMASK";
4964 break;
4965 case 34:
4966 case 18:
4967 case 98:
4968 case 82:
4969 case 162:
4970 case 146:
4971 case 226:
4972 case 210:
4973 block = "CB_CMASK";
4974 break;
4975 case 35:
4976 case 19:
4977 case 99:
4978 case 83:
4979 case 163:
4980 case 147:
4981 case 227:
4982 case 211:
4983 block = "CB_IMMED";
4984 break;
4985 case 36:
4986 case 20:
4987 case 100:
4988 case 84:
4989 case 164:
4990 case 148:
4991 case 228:
4992 case 212:
4993 block = "DB";
4994 break;
4995 case 37:
4996 case 21:
4997 case 101:
4998 case 85:
4999 case 165:
5000 case 149:
5001 case 229:
5002 case 213:
5003 block = "DB_HTILE";
5004 break;
5005 case 39:
5006 case 23:
5007 case 103:
5008 case 87:
5009 case 167:
5010 case 151:
5011 case 231:
5012 case 215:
5013 block = "DB_STEN";
5014 break;
5015 case 72:
5016 case 68:
5017 case 8:
5018 case 4:
5019 case 136:
5020 case 132:
5021 case 200:
5022 case 196:
5023 block = "TC";
5024 break;
5025 case 112:
5026 case 48:
5027 block = "CP";
5028 break;
5029 case 49:
5030 case 177:
5031 case 50:
5032 case 178:
5033 block = "SH";
5034 break;
5035 case 53:
5036 block = "VGT";
5037 break;
5038 case 117:
5039 block = "IH";
5040 break;
5041 case 51:
5042 case 115:
5043 block = "RLC";
5044 break;
5045 case 119:
5046 case 183:
5047 block = "DMA0";
5048 break;
5049 case 61:
5050 block = "DMA1";
5051 break;
5052 case 248:
5053 case 120:
5054 block = "HDP";
5055 break;
5056 default:
5057 block = "unknown";
5058 break;
5059 }
5060 }
5061
5062 printk("VM fault (0x%02x, vmid %d) at page %u, %s from %s (%d)\n",
5063 protections, vmid, addr,
5064 (status & MEMORY_CLIENT_RW_MASK) ? "write" : "read",
5065 block, mc_id);
5066}
5067
Christian Königfaffaf62014-11-19 14:01:19 +01005068void si_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
5069 unsigned vm_id, uint64_t pd_addr)
Alex Deucherd2800ee2012-03-20 17:18:13 -04005070{
Alex Deucher76c44f22012-10-02 14:39:18 -04005071 /* write new base address */
5072 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
Christian Königf1d2a262014-07-30 17:18:12 +02005073 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
Alex Deucher76c44f22012-10-02 14:39:18 -04005074 WRITE_DATA_DST_SEL(0)));
5075
Christian Königfaffaf62014-11-19 14:01:19 +01005076 if (vm_id < 8) {
Alex Deucher76c44f22012-10-02 14:39:18 -04005077 radeon_ring_write(ring,
Christian Königfaffaf62014-11-19 14:01:19 +01005078 (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm_id << 2)) >> 2);
Christian Königee60e292012-08-09 16:21:08 +02005079 } else {
Alex Deucher76c44f22012-10-02 14:39:18 -04005080 radeon_ring_write(ring,
Christian Königfaffaf62014-11-19 14:01:19 +01005081 (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm_id - 8) << 2)) >> 2);
Christian Königee60e292012-08-09 16:21:08 +02005082 }
Alex Deucher76c44f22012-10-02 14:39:18 -04005083 radeon_ring_write(ring, 0);
Christian Königfaffaf62014-11-19 14:01:19 +01005084 radeon_ring_write(ring, pd_addr >> 12);
Christian Königee60e292012-08-09 16:21:08 +02005085
Alex Deucherd2800ee2012-03-20 17:18:13 -04005086 /* flush hdp cache */
Alex Deucher76c44f22012-10-02 14:39:18 -04005087 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
Alex Deucher4fb0bbd2014-08-07 09:57:21 -04005088 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
Alex Deucher76c44f22012-10-02 14:39:18 -04005089 WRITE_DATA_DST_SEL(0)));
5090 radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2);
5091 radeon_ring_write(ring, 0);
Christian Königee60e292012-08-09 16:21:08 +02005092 radeon_ring_write(ring, 0x1);
5093
Alex Deucherd2800ee2012-03-20 17:18:13 -04005094 /* bits 0-15 are the VM contexts0-15 */
Alex Deucher76c44f22012-10-02 14:39:18 -04005095 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
Alex Deucher4fb0bbd2014-08-07 09:57:21 -04005096 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
Alex Deucher76c44f22012-10-02 14:39:18 -04005097 WRITE_DATA_DST_SEL(0)));
5098 radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
5099 radeon_ring_write(ring, 0);
Christian Königfaffaf62014-11-19 14:01:19 +01005100 radeon_ring_write(ring, 1 << vm_id);
Christian König58f8cf52012-10-22 17:42:35 +02005101
Alex Deucherd474ea72015-01-05 19:54:50 -05005102 /* wait for the invalidate to complete */
5103 radeon_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
5104 radeon_ring_write(ring, (WAIT_REG_MEM_FUNCTION(0) | /* always */
5105 WAIT_REG_MEM_ENGINE(0))); /* me */
5106 radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
5107 radeon_ring_write(ring, 0);
5108 radeon_ring_write(ring, 0); /* ref */
5109 radeon_ring_write(ring, 0); /* mask */
5110 radeon_ring_write(ring, 0x20); /* poll interval */
5111
Christian König58f8cf52012-10-22 17:42:35 +02005112 /* sync PFP to ME, otherwise we might get invalid PFP reads */
5113 radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
5114 radeon_ring_write(ring, 0x0);
Alex Deucherd2800ee2012-03-20 17:18:13 -04005115}
5116
Alex Deucher347e7592012-03-20 17:18:21 -04005117/*
Alex Deucherf8f84ac2013-03-07 12:56:35 -05005118 * Power and clock gating
5119 */
5120static void si_wait_for_rlc_serdes(struct radeon_device *rdev)
5121{
5122 int i;
5123
5124 for (i = 0; i < rdev->usec_timeout; i++) {
5125 if (RREG32(RLC_SERDES_MASTER_BUSY_0) == 0)
5126 break;
5127 udelay(1);
5128 }
5129
5130 for (i = 0; i < rdev->usec_timeout; i++) {
5131 if (RREG32(RLC_SERDES_MASTER_BUSY_1) == 0)
5132 break;
5133 udelay(1);
5134 }
5135}
5136
5137static void si_enable_gui_idle_interrupt(struct radeon_device *rdev,
5138 bool enable)
5139{
5140 u32 tmp = RREG32(CP_INT_CNTL_RING0);
5141 u32 mask;
5142 int i;
5143
5144 if (enable)
5145 tmp |= (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
5146 else
5147 tmp &= ~(CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
5148 WREG32(CP_INT_CNTL_RING0, tmp);
5149
5150 if (!enable) {
5151 /* read a gfx register */
5152 tmp = RREG32(DB_DEPTH_INFO);
5153
5154 mask = RLC_BUSY_STATUS | GFX_POWER_STATUS | GFX_CLOCK_STATUS | GFX_LS_STATUS;
5155 for (i = 0; i < rdev->usec_timeout; i++) {
5156 if ((RREG32(RLC_STAT) & mask) == (GFX_CLOCK_STATUS | GFX_POWER_STATUS))
5157 break;
5158 udelay(1);
5159 }
5160 }
5161}
5162
5163static void si_set_uvd_dcm(struct radeon_device *rdev,
5164 bool sw_mode)
5165{
5166 u32 tmp, tmp2;
5167
5168 tmp = RREG32(UVD_CGC_CTRL);
5169 tmp &= ~(CLK_OD_MASK | CG_DT_MASK);
5170 tmp |= DCM | CG_DT(1) | CLK_OD(4);
5171
5172 if (sw_mode) {
5173 tmp &= ~0x7ffff800;
5174 tmp2 = DYN_OR_EN | DYN_RR_EN | G_DIV_ID(7);
5175 } else {
5176 tmp |= 0x7ffff800;
5177 tmp2 = 0;
5178 }
5179
5180 WREG32(UVD_CGC_CTRL, tmp);
5181 WREG32_UVD_CTX(UVD_CGC_CTRL2, tmp2);
5182}
5183
Alex Deucher22c775c2013-07-23 09:41:05 -04005184void si_init_uvd_internal_cg(struct radeon_device *rdev)
Alex Deucherf8f84ac2013-03-07 12:56:35 -05005185{
5186 bool hw_mode = true;
5187
5188 if (hw_mode) {
5189 si_set_uvd_dcm(rdev, false);
5190 } else {
5191 u32 tmp = RREG32(UVD_CGC_CTRL);
5192 tmp &= ~DCM;
5193 WREG32(UVD_CGC_CTRL, tmp);
5194 }
5195}
5196
5197static u32 si_halt_rlc(struct radeon_device *rdev)
5198{
5199 u32 data, orig;
5200
5201 orig = data = RREG32(RLC_CNTL);
5202
5203 if (data & RLC_ENABLE) {
5204 data &= ~RLC_ENABLE;
5205 WREG32(RLC_CNTL, data);
5206
5207 si_wait_for_rlc_serdes(rdev);
5208 }
5209
5210 return orig;
5211}
5212
5213static void si_update_rlc(struct radeon_device *rdev, u32 rlc)
5214{
5215 u32 tmp;
5216
5217 tmp = RREG32(RLC_CNTL);
5218 if (tmp != rlc)
5219 WREG32(RLC_CNTL, rlc);
5220}
5221
5222static void si_enable_dma_pg(struct radeon_device *rdev, bool enable)
5223{
5224 u32 data, orig;
5225
5226 orig = data = RREG32(DMA_PG);
Alex Deuchere16866e2013-08-08 19:34:07 -04005227 if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_SDMA))
Alex Deucherf8f84ac2013-03-07 12:56:35 -05005228 data |= PG_CNTL_ENABLE;
5229 else
5230 data &= ~PG_CNTL_ENABLE;
5231 if (orig != data)
5232 WREG32(DMA_PG, data);
5233}
5234
5235static void si_init_dma_pg(struct radeon_device *rdev)
5236{
5237 u32 tmp;
5238
5239 WREG32(DMA_PGFSM_WRITE, 0x00002000);
5240 WREG32(DMA_PGFSM_CONFIG, 0x100010ff);
5241
5242 for (tmp = 0; tmp < 5; tmp++)
5243 WREG32(DMA_PGFSM_WRITE, 0);
5244}
5245
5246static void si_enable_gfx_cgpg(struct radeon_device *rdev,
5247 bool enable)
5248{
5249 u32 tmp;
5250
Alex Deucher2b19d172013-09-04 16:58:29 -04005251 if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG)) {
Alex Deucherf8f84ac2013-03-07 12:56:35 -05005252 tmp = RLC_PUD(0x10) | RLC_PDD(0x10) | RLC_TTPD(0x10) | RLC_MSD(0x10);
5253 WREG32(RLC_TTOP_D, tmp);
5254
5255 tmp = RREG32(RLC_PG_CNTL);
5256 tmp |= GFX_PG_ENABLE;
5257 WREG32(RLC_PG_CNTL, tmp);
5258
5259 tmp = RREG32(RLC_AUTO_PG_CTRL);
5260 tmp |= AUTO_PG_EN;
5261 WREG32(RLC_AUTO_PG_CTRL, tmp);
5262 } else {
5263 tmp = RREG32(RLC_AUTO_PG_CTRL);
5264 tmp &= ~AUTO_PG_EN;
5265 WREG32(RLC_AUTO_PG_CTRL, tmp);
5266
5267 tmp = RREG32(DB_RENDER_CONTROL);
5268 }
5269}
5270
5271static void si_init_gfx_cgpg(struct radeon_device *rdev)
5272{
5273 u32 tmp;
5274
5275 WREG32(RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8);
5276
5277 tmp = RREG32(RLC_PG_CNTL);
5278 tmp |= GFX_PG_SRC;
5279 WREG32(RLC_PG_CNTL, tmp);
5280
5281 WREG32(RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8);
5282
5283 tmp = RREG32(RLC_AUTO_PG_CTRL);
5284
5285 tmp &= ~GRBM_REG_SGIT_MASK;
5286 tmp |= GRBM_REG_SGIT(0x700);
5287 tmp &= ~PG_AFTER_GRBM_REG_ST_MASK;
5288 WREG32(RLC_AUTO_PG_CTRL, tmp);
5289}
5290
Alex Deucherba190312013-04-17 16:27:40 -04005291static u32 si_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh)
Alex Deucherf8f84ac2013-03-07 12:56:35 -05005292{
5293 u32 mask = 0, tmp, tmp1;
5294 int i;
5295
5296 si_select_se_sh(rdev, se, sh);
5297 tmp = RREG32(CC_GC_SHADER_ARRAY_CONFIG);
5298 tmp1 = RREG32(GC_USER_SHADER_ARRAY_CONFIG);
5299 si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
5300
5301 tmp &= 0xffff0000;
5302
5303 tmp |= tmp1;
5304 tmp >>= 16;
5305
5306 for (i = 0; i < rdev->config.si.max_cu_per_sh; i ++) {
5307 mask <<= 1;
5308 mask |= 1;
5309 }
5310
5311 return (~tmp) & mask;
5312}
5313
5314static void si_init_ao_cu_mask(struct radeon_device *rdev)
5315{
5316 u32 i, j, k, active_cu_number = 0;
5317 u32 mask, counter, cu_bitmap;
5318 u32 tmp = 0;
5319
5320 for (i = 0; i < rdev->config.si.max_shader_engines; i++) {
5321 for (j = 0; j < rdev->config.si.max_sh_per_se; j++) {
5322 mask = 1;
5323 cu_bitmap = 0;
5324 counter = 0;
5325 for (k = 0; k < rdev->config.si.max_cu_per_sh; k++) {
Alex Deucherba190312013-04-17 16:27:40 -04005326 if (si_get_cu_active_bitmap(rdev, i, j) & mask) {
Alex Deucherf8f84ac2013-03-07 12:56:35 -05005327 if (counter < 2)
5328 cu_bitmap |= mask;
5329 counter++;
5330 }
5331 mask <<= 1;
5332 }
5333
5334 active_cu_number += counter;
5335 tmp |= (cu_bitmap << (i * 16 + j * 8));
5336 }
5337 }
5338
5339 WREG32(RLC_PG_AO_CU_MASK, tmp);
5340
5341 tmp = RREG32(RLC_MAX_PG_CU);
5342 tmp &= ~MAX_PU_CU_MASK;
5343 tmp |= MAX_PU_CU(active_cu_number);
5344 WREG32(RLC_MAX_PG_CU, tmp);
5345}
5346
5347static void si_enable_cgcg(struct radeon_device *rdev,
5348 bool enable)
5349{
5350 u32 data, orig, tmp;
5351
5352 orig = data = RREG32(RLC_CGCG_CGLS_CTRL);
5353
Alex Deuchere16866e2013-08-08 19:34:07 -04005354 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_CGCG)) {
Alex Deucher5594a552013-08-15 16:20:26 -04005355 si_enable_gui_idle_interrupt(rdev, true);
Alex Deucherf8f84ac2013-03-07 12:56:35 -05005356
Alex Deucherf8f84ac2013-03-07 12:56:35 -05005357 WREG32(RLC_GCPM_GENERAL_3, 0x00000080);
5358
5359 tmp = si_halt_rlc(rdev);
5360
5361 WREG32(RLC_SERDES_WR_MASTER_MASK_0, 0xffffffff);
5362 WREG32(RLC_SERDES_WR_MASTER_MASK_1, 0xffffffff);
5363 WREG32(RLC_SERDES_WR_CTRL, 0x00b000ff);
5364
5365 si_wait_for_rlc_serdes(rdev);
5366
5367 si_update_rlc(rdev, tmp);
5368
5369 WREG32(RLC_SERDES_WR_CTRL, 0x007000ff);
5370
5371 data |= CGCG_EN | CGLS_EN;
5372 } else {
Alex Deucher5594a552013-08-15 16:20:26 -04005373 si_enable_gui_idle_interrupt(rdev, false);
5374
Alex Deucherf8f84ac2013-03-07 12:56:35 -05005375 RREG32(CB_CGTT_SCLK_CTRL);
5376 RREG32(CB_CGTT_SCLK_CTRL);
5377 RREG32(CB_CGTT_SCLK_CTRL);
5378 RREG32(CB_CGTT_SCLK_CTRL);
5379
5380 data &= ~(CGCG_EN | CGLS_EN);
5381 }
5382
5383 if (orig != data)
5384 WREG32(RLC_CGCG_CGLS_CTRL, data);
5385}
5386
5387static void si_enable_mgcg(struct radeon_device *rdev,
5388 bool enable)
5389{
5390 u32 data, orig, tmp = 0;
5391
Alex Deuchere16866e2013-08-08 19:34:07 -04005392 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_MGCG)) {
Alex Deucherf8f84ac2013-03-07 12:56:35 -05005393 orig = data = RREG32(CGTS_SM_CTRL_REG);
5394 data = 0x96940200;
5395 if (orig != data)
5396 WREG32(CGTS_SM_CTRL_REG, data);
5397
Alex Deuchere16866e2013-08-08 19:34:07 -04005398 if (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_CP_LS) {
5399 orig = data = RREG32(CP_MEM_SLP_CNTL);
5400 data |= CP_MEM_LS_EN;
5401 if (orig != data)
5402 WREG32(CP_MEM_SLP_CNTL, data);
5403 }
Alex Deucherf8f84ac2013-03-07 12:56:35 -05005404
5405 orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE);
5406 data &= 0xffffffc0;
5407 if (orig != data)
5408 WREG32(RLC_CGTT_MGCG_OVERRIDE, data);
5409
5410 tmp = si_halt_rlc(rdev);
5411
5412 WREG32(RLC_SERDES_WR_MASTER_MASK_0, 0xffffffff);
5413 WREG32(RLC_SERDES_WR_MASTER_MASK_1, 0xffffffff);
5414 WREG32(RLC_SERDES_WR_CTRL, 0x00d000ff);
5415
5416 si_update_rlc(rdev, tmp);
5417 } else {
5418 orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE);
5419 data |= 0x00000003;
5420 if (orig != data)
5421 WREG32(RLC_CGTT_MGCG_OVERRIDE, data);
5422
5423 data = RREG32(CP_MEM_SLP_CNTL);
5424 if (data & CP_MEM_LS_EN) {
5425 data &= ~CP_MEM_LS_EN;
5426 WREG32(CP_MEM_SLP_CNTL, data);
5427 }
5428 orig = data = RREG32(CGTS_SM_CTRL_REG);
5429 data |= LS_OVERRIDE | OVERRIDE;
5430 if (orig != data)
5431 WREG32(CGTS_SM_CTRL_REG, data);
5432
5433 tmp = si_halt_rlc(rdev);
5434
5435 WREG32(RLC_SERDES_WR_MASTER_MASK_0, 0xffffffff);
5436 WREG32(RLC_SERDES_WR_MASTER_MASK_1, 0xffffffff);
5437 WREG32(RLC_SERDES_WR_CTRL, 0x00e000ff);
5438
5439 si_update_rlc(rdev, tmp);
5440 }
5441}
5442
5443static void si_enable_uvd_mgcg(struct radeon_device *rdev,
5444 bool enable)
5445{
5446 u32 orig, data, tmp;
5447
Alex Deuchere16866e2013-08-08 19:34:07 -04005448 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_UVD_MGCG)) {
Alex Deucherf8f84ac2013-03-07 12:56:35 -05005449 tmp = RREG32_UVD_CTX(UVD_CGC_MEM_CTRL);
5450 tmp |= 0x3fff;
5451 WREG32_UVD_CTX(UVD_CGC_MEM_CTRL, tmp);
5452
5453 orig = data = RREG32(UVD_CGC_CTRL);
5454 data |= DCM;
5455 if (orig != data)
5456 WREG32(UVD_CGC_CTRL, data);
5457
5458 WREG32_SMC(SMC_CG_IND_START + CG_CGTT_LOCAL_0, 0);
5459 WREG32_SMC(SMC_CG_IND_START + CG_CGTT_LOCAL_1, 0);
5460 } else {
5461 tmp = RREG32_UVD_CTX(UVD_CGC_MEM_CTRL);
5462 tmp &= ~0x3fff;
5463 WREG32_UVD_CTX(UVD_CGC_MEM_CTRL, tmp);
5464
5465 orig = data = RREG32(UVD_CGC_CTRL);
5466 data &= ~DCM;
5467 if (orig != data)
5468 WREG32(UVD_CGC_CTRL, data);
5469
5470 WREG32_SMC(SMC_CG_IND_START + CG_CGTT_LOCAL_0, 0xffffffff);
5471 WREG32_SMC(SMC_CG_IND_START + CG_CGTT_LOCAL_1, 0xffffffff);
5472 }
5473}
5474
5475static const u32 mc_cg_registers[] =
5476{
5477 MC_HUB_MISC_HUB_CG,
5478 MC_HUB_MISC_SIP_CG,
5479 MC_HUB_MISC_VM_CG,
5480 MC_XPB_CLK_GAT,
5481 ATC_MISC_CG,
5482 MC_CITF_MISC_WR_CG,
5483 MC_CITF_MISC_RD_CG,
5484 MC_CITF_MISC_VM_CG,
5485 VM_L2_CG,
5486};
5487
5488static void si_enable_mc_ls(struct radeon_device *rdev,
5489 bool enable)
5490{
5491 int i;
5492 u32 orig, data;
5493
5494 for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
5495 orig = data = RREG32(mc_cg_registers[i]);
Alex Deuchere16866e2013-08-08 19:34:07 -04005496 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_MC_LS))
Alex Deucherf8f84ac2013-03-07 12:56:35 -05005497 data |= MC_LS_ENABLE;
5498 else
5499 data &= ~MC_LS_ENABLE;
5500 if (data != orig)
5501 WREG32(mc_cg_registers[i], data);
5502 }
5503}
5504
Alex Deuchere16866e2013-08-08 19:34:07 -04005505static void si_enable_mc_mgcg(struct radeon_device *rdev,
5506 bool enable)
5507{
5508 int i;
5509 u32 orig, data;
5510
5511 for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
5512 orig = data = RREG32(mc_cg_registers[i]);
5513 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_MC_MGCG))
5514 data |= MC_CG_ENABLE;
5515 else
5516 data &= ~MC_CG_ENABLE;
5517 if (data != orig)
5518 WREG32(mc_cg_registers[i], data);
5519 }
5520}
5521
5522static void si_enable_dma_mgcg(struct radeon_device *rdev,
5523 bool enable)
5524{
5525 u32 orig, data, offset;
5526 int i;
5527
5528 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_SDMA_MGCG)) {
5529 for (i = 0; i < 2; i++) {
5530 if (i == 0)
5531 offset = DMA0_REGISTER_OFFSET;
5532 else
5533 offset = DMA1_REGISTER_OFFSET;
5534 orig = data = RREG32(DMA_POWER_CNTL + offset);
5535 data &= ~MEM_POWER_OVERRIDE;
5536 if (data != orig)
5537 WREG32(DMA_POWER_CNTL + offset, data);
5538 WREG32(DMA_CLK_CTRL + offset, 0x00000100);
5539 }
5540 } else {
5541 for (i = 0; i < 2; i++) {
5542 if (i == 0)
5543 offset = DMA0_REGISTER_OFFSET;
5544 else
5545 offset = DMA1_REGISTER_OFFSET;
5546 orig = data = RREG32(DMA_POWER_CNTL + offset);
5547 data |= MEM_POWER_OVERRIDE;
5548 if (data != orig)
5549 WREG32(DMA_POWER_CNTL + offset, data);
5550
5551 orig = data = RREG32(DMA_CLK_CTRL + offset);
5552 data = 0xff000000;
5553 if (data != orig)
5554 WREG32(DMA_CLK_CTRL + offset, data);
5555 }
5556 }
5557}
5558
5559static void si_enable_bif_mgls(struct radeon_device *rdev,
5560 bool enable)
5561{
5562 u32 orig, data;
5563
5564 orig = data = RREG32_PCIE(PCIE_CNTL2);
5565
5566 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_BIF_LS))
5567 data |= SLV_MEM_LS_EN | MST_MEM_LS_EN |
5568 REPLAY_MEM_LS_EN | SLV_MEM_AGGRESSIVE_LS_EN;
5569 else
5570 data &= ~(SLV_MEM_LS_EN | MST_MEM_LS_EN |
5571 REPLAY_MEM_LS_EN | SLV_MEM_AGGRESSIVE_LS_EN);
5572
5573 if (orig != data)
5574 WREG32_PCIE(PCIE_CNTL2, data);
5575}
5576
5577static void si_enable_hdp_mgcg(struct radeon_device *rdev,
5578 bool enable)
5579{
5580 u32 orig, data;
5581
5582 orig = data = RREG32(HDP_HOST_PATH_CNTL);
5583
5584 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_HDP_MGCG))
5585 data &= ~CLOCK_GATING_DIS;
5586 else
5587 data |= CLOCK_GATING_DIS;
5588
5589 if (orig != data)
5590 WREG32(HDP_HOST_PATH_CNTL, data);
5591}
5592
5593static void si_enable_hdp_ls(struct radeon_device *rdev,
5594 bool enable)
5595{
5596 u32 orig, data;
5597
5598 orig = data = RREG32(HDP_MEM_POWER_LS);
5599
5600 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_HDP_LS))
5601 data |= HDP_LS_ENABLE;
5602 else
5603 data &= ~HDP_LS_ENABLE;
5604
5605 if (orig != data)
5606 WREG32(HDP_MEM_POWER_LS, data);
5607}
5608
Alex Deucher68e3a092013-12-18 14:11:40 -05005609static void si_update_cg(struct radeon_device *rdev,
5610 u32 block, bool enable)
Alex Deuchere16866e2013-08-08 19:34:07 -04005611{
5612 if (block & RADEON_CG_BLOCK_GFX) {
Alex Deucher811e4d52013-09-03 13:31:33 -04005613 si_enable_gui_idle_interrupt(rdev, false);
Alex Deuchere16866e2013-08-08 19:34:07 -04005614 /* order matters! */
5615 if (enable) {
5616 si_enable_mgcg(rdev, true);
5617 si_enable_cgcg(rdev, true);
5618 } else {
5619 si_enable_cgcg(rdev, false);
5620 si_enable_mgcg(rdev, false);
5621 }
Alex Deucher811e4d52013-09-03 13:31:33 -04005622 si_enable_gui_idle_interrupt(rdev, true);
Alex Deuchere16866e2013-08-08 19:34:07 -04005623 }
5624
5625 if (block & RADEON_CG_BLOCK_MC) {
5626 si_enable_mc_mgcg(rdev, enable);
5627 si_enable_mc_ls(rdev, enable);
5628 }
5629
5630 if (block & RADEON_CG_BLOCK_SDMA) {
5631 si_enable_dma_mgcg(rdev, enable);
5632 }
5633
5634 if (block & RADEON_CG_BLOCK_BIF) {
5635 si_enable_bif_mgls(rdev, enable);
5636 }
5637
5638 if (block & RADEON_CG_BLOCK_UVD) {
5639 if (rdev->has_uvd) {
5640 si_enable_uvd_mgcg(rdev, enable);
5641 }
5642 }
5643
5644 if (block & RADEON_CG_BLOCK_HDP) {
5645 si_enable_hdp_mgcg(rdev, enable);
5646 si_enable_hdp_ls(rdev, enable);
5647 }
5648}
Alex Deucherf8f84ac2013-03-07 12:56:35 -05005649
5650static void si_init_cg(struct radeon_device *rdev)
5651{
Alex Deuchere16866e2013-08-08 19:34:07 -04005652 si_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
5653 RADEON_CG_BLOCK_MC |
5654 RADEON_CG_BLOCK_SDMA |
5655 RADEON_CG_BLOCK_BIF |
5656 RADEON_CG_BLOCK_HDP), true);
Alex Deucherb2d70912013-07-27 17:53:25 -04005657 if (rdev->has_uvd) {
Alex Deuchere16866e2013-08-08 19:34:07 -04005658 si_update_cg(rdev, RADEON_CG_BLOCK_UVD, true);
Alex Deucherf8f84ac2013-03-07 12:56:35 -05005659 si_init_uvd_internal_cg(rdev);
5660 }
5661}
5662
5663static void si_fini_cg(struct radeon_device *rdev)
5664{
Alex Deucher0116e1e2013-08-08 18:00:10 -04005665 if (rdev->has_uvd) {
Alex Deuchere16866e2013-08-08 19:34:07 -04005666 si_update_cg(rdev, RADEON_CG_BLOCK_UVD, false);
Alex Deucher0116e1e2013-08-08 18:00:10 -04005667 }
Alex Deuchere16866e2013-08-08 19:34:07 -04005668 si_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
5669 RADEON_CG_BLOCK_MC |
5670 RADEON_CG_BLOCK_SDMA |
5671 RADEON_CG_BLOCK_BIF |
5672 RADEON_CG_BLOCK_HDP), false);
5673}
Alex Deucherf8f84ac2013-03-07 12:56:35 -05005674
Alex Deucher59a82d02013-08-13 12:48:06 -04005675u32 si_get_csb_size(struct radeon_device *rdev)
5676{
5677 u32 count = 0;
5678 const struct cs_section_def *sect = NULL;
5679 const struct cs_extent_def *ext = NULL;
5680
5681 if (rdev->rlc.cs_data == NULL)
5682 return 0;
5683
5684 /* begin clear state */
5685 count += 2;
5686 /* context control state */
5687 count += 3;
5688
5689 for (sect = rdev->rlc.cs_data; sect->section != NULL; ++sect) {
5690 for (ext = sect->section; ext->extent != NULL; ++ext) {
5691 if (sect->id == SECT_CONTEXT)
5692 count += 2 + ext->reg_count;
5693 else
5694 return 0;
5695 }
5696 }
5697 /* pa_sc_raster_config */
5698 count += 3;
5699 /* end clear state */
5700 count += 2;
5701 /* clear state */
5702 count += 2;
5703
5704 return count;
5705}
5706
5707void si_get_csb_buffer(struct radeon_device *rdev, volatile u32 *buffer)
5708{
5709 u32 count = 0, i;
5710 const struct cs_section_def *sect = NULL;
5711 const struct cs_extent_def *ext = NULL;
5712
5713 if (rdev->rlc.cs_data == NULL)
5714 return;
5715 if (buffer == NULL)
5716 return;
5717
Alex Deucher6ba81e52013-10-23 18:27:10 -04005718 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
5719 buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
Alex Deucher59a82d02013-08-13 12:48:06 -04005720
Alex Deucher6ba81e52013-10-23 18:27:10 -04005721 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
5722 buffer[count++] = cpu_to_le32(0x80000000);
5723 buffer[count++] = cpu_to_le32(0x80000000);
Alex Deucher59a82d02013-08-13 12:48:06 -04005724
5725 for (sect = rdev->rlc.cs_data; sect->section != NULL; ++sect) {
5726 for (ext = sect->section; ext->extent != NULL; ++ext) {
5727 if (sect->id == SECT_CONTEXT) {
Alex Deucher6ba81e52013-10-23 18:27:10 -04005728 buffer[count++] =
5729 cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
5730 buffer[count++] = cpu_to_le32(ext->reg_index - 0xa000);
Alex Deucher59a82d02013-08-13 12:48:06 -04005731 for (i = 0; i < ext->reg_count; i++)
Alex Deucher6ba81e52013-10-23 18:27:10 -04005732 buffer[count++] = cpu_to_le32(ext->extent[i]);
Alex Deucher59a82d02013-08-13 12:48:06 -04005733 } else {
5734 return;
5735 }
5736 }
5737 }
5738
Alex Deucher6ba81e52013-10-23 18:27:10 -04005739 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 1));
5740 buffer[count++] = cpu_to_le32(PA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
Alex Deucher59a82d02013-08-13 12:48:06 -04005741 switch (rdev->family) {
5742 case CHIP_TAHITI:
5743 case CHIP_PITCAIRN:
Alex Deucher6ba81e52013-10-23 18:27:10 -04005744 buffer[count++] = cpu_to_le32(0x2a00126a);
Alex Deucher59a82d02013-08-13 12:48:06 -04005745 break;
5746 case CHIP_VERDE:
Alex Deucher6ba81e52013-10-23 18:27:10 -04005747 buffer[count++] = cpu_to_le32(0x0000124a);
Alex Deucher59a82d02013-08-13 12:48:06 -04005748 break;
5749 case CHIP_OLAND:
Alex Deucher6ba81e52013-10-23 18:27:10 -04005750 buffer[count++] = cpu_to_le32(0x00000082);
Alex Deucher59a82d02013-08-13 12:48:06 -04005751 break;
5752 case CHIP_HAINAN:
Alex Deucher6ba81e52013-10-23 18:27:10 -04005753 buffer[count++] = cpu_to_le32(0x00000000);
Alex Deucher59a82d02013-08-13 12:48:06 -04005754 break;
5755 default:
Alex Deucher6ba81e52013-10-23 18:27:10 -04005756 buffer[count++] = cpu_to_le32(0x00000000);
Alex Deucher59a82d02013-08-13 12:48:06 -04005757 break;
5758 }
5759
Alex Deucher6ba81e52013-10-23 18:27:10 -04005760 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
5761 buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
Alex Deucher59a82d02013-08-13 12:48:06 -04005762
Alex Deucher6ba81e52013-10-23 18:27:10 -04005763 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
5764 buffer[count++] = cpu_to_le32(0);
Alex Deucherf8f84ac2013-03-07 12:56:35 -05005765}
5766
5767static void si_init_pg(struct radeon_device *rdev)
5768{
Alex Deucher0116e1e2013-08-08 18:00:10 -04005769 if (rdev->pg_flags) {
5770 if (rdev->pg_flags & RADEON_PG_SUPPORT_SDMA) {
5771 si_init_dma_pg(rdev);
Alex Deucher0116e1e2013-08-08 18:00:10 -04005772 }
Alex Deucherf8f84ac2013-03-07 12:56:35 -05005773 si_init_ao_cu_mask(rdev);
Alex Deucher2b19d172013-09-04 16:58:29 -04005774 if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG) {
Alex Deucher0116e1e2013-08-08 18:00:10 -04005775 si_init_gfx_cgpg(rdev);
Alex Deucheraa34dba2014-01-16 10:39:17 -05005776 } else {
5777 WREG32(RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8);
5778 WREG32(RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8);
Alex Deucher0116e1e2013-08-08 18:00:10 -04005779 }
Alex Deucherf8f84ac2013-03-07 12:56:35 -05005780 si_enable_dma_pg(rdev, true);
Alex Deucherf8f84ac2013-03-07 12:56:35 -05005781 si_enable_gfx_cgpg(rdev, true);
5782 } else {
5783 WREG32(RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8);
5784 WREG32(RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8);
5785 }
5786}
5787
5788static void si_fini_pg(struct radeon_device *rdev)
5789{
Alex Deucher0116e1e2013-08-08 18:00:10 -04005790 if (rdev->pg_flags) {
Alex Deucherf8f84ac2013-03-07 12:56:35 -05005791 si_enable_dma_pg(rdev, false);
5792 si_enable_gfx_cgpg(rdev, false);
5793 }
5794}
5795
5796/*
Alex Deucher347e7592012-03-20 17:18:21 -04005797 * RLC
5798 */
Alex Deucher866d83d2013-04-15 17:13:29 -04005799void si_rlc_reset(struct radeon_device *rdev)
Alex Deucherd719cef2013-02-15 16:49:59 -05005800{
Alex Deucherf8f84ac2013-03-07 12:56:35 -05005801 u32 tmp = RREG32(GRBM_SOFT_RESET);
Alex Deucherd719cef2013-02-15 16:49:59 -05005802
Alex Deucherf8f84ac2013-03-07 12:56:35 -05005803 tmp |= SOFT_RESET_RLC;
5804 WREG32(GRBM_SOFT_RESET, tmp);
5805 udelay(50);
5806 tmp &= ~SOFT_RESET_RLC;
5807 WREG32(GRBM_SOFT_RESET, tmp);
5808 udelay(50);
Alex Deucherd719cef2013-02-15 16:49:59 -05005809}
5810
Alex Deucher347e7592012-03-20 17:18:21 -04005811static void si_rlc_stop(struct radeon_device *rdev)
5812{
5813 WREG32(RLC_CNTL, 0);
Alex Deucherd719cef2013-02-15 16:49:59 -05005814
5815 si_enable_gui_idle_interrupt(rdev, false);
5816
5817 si_wait_for_rlc_serdes(rdev);
Alex Deucher347e7592012-03-20 17:18:21 -04005818}
5819
5820static void si_rlc_start(struct radeon_device *rdev)
5821{
5822 WREG32(RLC_CNTL, RLC_ENABLE);
Alex Deucherd719cef2013-02-15 16:49:59 -05005823
5824 si_enable_gui_idle_interrupt(rdev, true);
5825
5826 udelay(50);
5827}
5828
5829static bool si_lbpw_supported(struct radeon_device *rdev)
5830{
5831 u32 tmp;
5832
5833 /* Enable LBPW only for DDR3 */
5834 tmp = RREG32(MC_SEQ_MISC0);
5835 if ((tmp & 0xF0000000) == 0xB0000000)
5836 return true;
5837 return false;
5838}
5839
5840static void si_enable_lbpw(struct radeon_device *rdev, bool enable)
5841{
5842 u32 tmp;
5843
5844 tmp = RREG32(RLC_LB_CNTL);
5845 if (enable)
5846 tmp |= LOAD_BALANCE_ENABLE;
5847 else
5848 tmp &= ~LOAD_BALANCE_ENABLE;
5849 WREG32(RLC_LB_CNTL, tmp);
5850
5851 if (!enable) {
5852 si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
5853 WREG32(SPI_LB_CU_MASK, 0x00ff);
5854 }
Alex Deucher347e7592012-03-20 17:18:21 -04005855}
5856
5857static int si_rlc_resume(struct radeon_device *rdev)
5858{
5859 u32 i;
Alex Deucher347e7592012-03-20 17:18:21 -04005860
5861 if (!rdev->rlc_fw)
5862 return -EINVAL;
5863
5864 si_rlc_stop(rdev);
5865
Alex Deucherf8f84ac2013-03-07 12:56:35 -05005866 si_rlc_reset(rdev);
5867
5868 si_init_pg(rdev);
5869
5870 si_init_cg(rdev);
5871
Alex Deucher347e7592012-03-20 17:18:21 -04005872 WREG32(RLC_RL_BASE, 0);
5873 WREG32(RLC_RL_SIZE, 0);
5874 WREG32(RLC_LB_CNTL, 0);
5875 WREG32(RLC_LB_CNTR_MAX, 0xffffffff);
5876 WREG32(RLC_LB_CNTR_INIT, 0);
Alex Deucherd719cef2013-02-15 16:49:59 -05005877 WREG32(RLC_LB_INIT_CU_MASK, 0xffffffff);
Alex Deucher347e7592012-03-20 17:18:21 -04005878
Alex Deucher347e7592012-03-20 17:18:21 -04005879 WREG32(RLC_MC_CNTL, 0);
5880 WREG32(RLC_UCODE_CNTL, 0);
5881
Alex Deucher629bd332014-06-25 18:41:34 -04005882 if (rdev->new_fw) {
5883 const struct rlc_firmware_header_v1_0 *hdr =
5884 (const struct rlc_firmware_header_v1_0 *)rdev->rlc_fw->data;
5885 u32 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
5886 const __le32 *fw_data = (const __le32 *)
5887 (rdev->rlc_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
5888
5889 radeon_ucode_print_rlc_hdr(&hdr->header);
5890
5891 for (i = 0; i < fw_size; i++) {
5892 WREG32(RLC_UCODE_ADDR, i);
5893 WREG32(RLC_UCODE_DATA, le32_to_cpup(fw_data++));
5894 }
5895 } else {
5896 const __be32 *fw_data =
5897 (const __be32 *)rdev->rlc_fw->data;
5898 for (i = 0; i < SI_RLC_UCODE_SIZE; i++) {
5899 WREG32(RLC_UCODE_ADDR, i);
5900 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
5901 }
Alex Deucher347e7592012-03-20 17:18:21 -04005902 }
5903 WREG32(RLC_UCODE_ADDR, 0);
5904
Alex Deucherd719cef2013-02-15 16:49:59 -05005905 si_enable_lbpw(rdev, si_lbpw_supported(rdev));
5906
Alex Deucher347e7592012-03-20 17:18:21 -04005907 si_rlc_start(rdev);
5908
5909 return 0;
5910}
5911
Alex Deucher25a857f2012-03-20 17:18:22 -04005912static void si_enable_interrupts(struct radeon_device *rdev)
5913{
5914 u32 ih_cntl = RREG32(IH_CNTL);
5915 u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
5916
5917 ih_cntl |= ENABLE_INTR;
5918 ih_rb_cntl |= IH_RB_ENABLE;
5919 WREG32(IH_CNTL, ih_cntl);
5920 WREG32(IH_RB_CNTL, ih_rb_cntl);
5921 rdev->ih.enabled = true;
5922}
5923
5924static void si_disable_interrupts(struct radeon_device *rdev)
5925{
5926 u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
5927 u32 ih_cntl = RREG32(IH_CNTL);
5928
5929 ih_rb_cntl &= ~IH_RB_ENABLE;
5930 ih_cntl &= ~ENABLE_INTR;
5931 WREG32(IH_RB_CNTL, ih_rb_cntl);
5932 WREG32(IH_CNTL, ih_cntl);
5933 /* set rptr, wptr to 0 */
5934 WREG32(IH_RB_RPTR, 0);
5935 WREG32(IH_RB_WPTR, 0);
5936 rdev->ih.enabled = false;
Alex Deucher25a857f2012-03-20 17:18:22 -04005937 rdev->ih.rptr = 0;
5938}
5939
5940static void si_disable_interrupt_state(struct radeon_device *rdev)
5941{
5942 u32 tmp;
5943
Alex Deucher811e4d52013-09-03 13:31:33 -04005944 tmp = RREG32(CP_INT_CNTL_RING0) &
5945 (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
5946 WREG32(CP_INT_CNTL_RING0, tmp);
Alex Deucher25a857f2012-03-20 17:18:22 -04005947 WREG32(CP_INT_CNTL_RING1, 0);
5948 WREG32(CP_INT_CNTL_RING2, 0);
Alex Deucher8c5fd7e2012-12-04 15:28:18 -05005949 tmp = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET) & ~TRAP_ENABLE;
5950 WREG32(DMA_CNTL + DMA0_REGISTER_OFFSET, tmp);
5951 tmp = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET) & ~TRAP_ENABLE;
5952 WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, tmp);
Alex Deucher25a857f2012-03-20 17:18:22 -04005953 WREG32(GRBM_INT_CNTL, 0);
Christian König18ad01e2015-02-18 13:19:27 +01005954 WREG32(SRBM_INT_CNTL, 0);
Alex Deucher51535502012-08-30 14:34:30 -04005955 if (rdev->num_crtc >= 2) {
5956 WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
5957 WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
5958 }
Alex Deucher25a857f2012-03-20 17:18:22 -04005959 if (rdev->num_crtc >= 4) {
5960 WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
5961 WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
5962 }
5963 if (rdev->num_crtc >= 6) {
5964 WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
5965 WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
5966 }
5967
Alex Deucher51535502012-08-30 14:34:30 -04005968 if (rdev->num_crtc >= 2) {
5969 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
5970 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
5971 }
Alex Deucher25a857f2012-03-20 17:18:22 -04005972 if (rdev->num_crtc >= 4) {
5973 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
5974 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
5975 }
5976 if (rdev->num_crtc >= 6) {
5977 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
5978 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
5979 }
5980
Alex Deucher51535502012-08-30 14:34:30 -04005981 if (!ASIC_IS_NODCE(rdev)) {
Alex Deuchere9a321c2014-01-27 11:54:44 -05005982 WREG32(DAC_AUTODETECT_INT_CONTROL, 0);
Alex Deucher25a857f2012-03-20 17:18:22 -04005983
Alex Deucher51535502012-08-30 14:34:30 -04005984 tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
5985 WREG32(DC_HPD1_INT_CONTROL, tmp);
5986 tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
5987 WREG32(DC_HPD2_INT_CONTROL, tmp);
5988 tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
5989 WREG32(DC_HPD3_INT_CONTROL, tmp);
5990 tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
5991 WREG32(DC_HPD4_INT_CONTROL, tmp);
5992 tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
5993 WREG32(DC_HPD5_INT_CONTROL, tmp);
5994 tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
5995 WREG32(DC_HPD6_INT_CONTROL, tmp);
5996 }
Alex Deucher25a857f2012-03-20 17:18:22 -04005997}
5998
5999static int si_irq_init(struct radeon_device *rdev)
6000{
6001 int ret = 0;
6002 int rb_bufsz;
6003 u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
6004
6005 /* allocate ring */
6006 ret = r600_ih_ring_alloc(rdev);
6007 if (ret)
6008 return ret;
6009
6010 /* disable irqs */
6011 si_disable_interrupts(rdev);
6012
6013 /* init rlc */
6014 ret = si_rlc_resume(rdev);
6015 if (ret) {
6016 r600_ih_ring_fini(rdev);
6017 return ret;
6018 }
6019
6020 /* setup interrupt control */
6021 /* set dummy read address to ring address */
6022 WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8);
6023 interrupt_cntl = RREG32(INTERRUPT_CNTL);
6024 /* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
6025 * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
6026 */
6027 interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE;
6028 /* IH_REQ_NONSNOOP_EN=1 if ring is in non-cacheable memory, e.g., vram */
6029 interrupt_cntl &= ~IH_REQ_NONSNOOP_EN;
6030 WREG32(INTERRUPT_CNTL, interrupt_cntl);
6031
6032 WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
Daniel Vetterb72a8922013-07-10 14:11:59 +02006033 rb_bufsz = order_base_2(rdev->ih.ring_size / 4);
Alex Deucher25a857f2012-03-20 17:18:22 -04006034
6035 ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
6036 IH_WPTR_OVERFLOW_CLEAR |
6037 (rb_bufsz << 1));
6038
6039 if (rdev->wb.enabled)
6040 ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE;
6041
6042 /* set the writeback address whether it's enabled or not */
6043 WREG32(IH_RB_WPTR_ADDR_LO, (rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFFFFFFFC);
6044 WREG32(IH_RB_WPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFF);
6045
6046 WREG32(IH_RB_CNTL, ih_rb_cntl);
6047
6048 /* set rptr, wptr to 0 */
6049 WREG32(IH_RB_RPTR, 0);
6050 WREG32(IH_RB_WPTR, 0);
6051
6052 /* Default settings for IH_CNTL (disabled at first) */
6053 ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10) | MC_VMID(0);
6054 /* RPTR_REARM only works if msi's are enabled */
6055 if (rdev->msi_enabled)
6056 ih_cntl |= RPTR_REARM;
6057 WREG32(IH_CNTL, ih_cntl);
6058
6059 /* force the active interrupt state to all disabled */
6060 si_disable_interrupt_state(rdev);
6061
Dave Airlie20998102012-04-03 11:53:05 +01006062 pci_set_master(rdev->pdev);
6063
Alex Deucher25a857f2012-03-20 17:18:22 -04006064 /* enable irqs */
6065 si_enable_interrupts(rdev);
6066
6067 return ret;
6068}
6069
6070int si_irq_set(struct radeon_device *rdev)
6071{
Alex Deucher811e4d52013-09-03 13:31:33 -04006072 u32 cp_int_cntl;
Alex Deucher25a857f2012-03-20 17:18:22 -04006073 u32 cp_int_cntl1 = 0, cp_int_cntl2 = 0;
6074 u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
Alex Deucher51535502012-08-30 14:34:30 -04006075 u32 hpd1 = 0, hpd2 = 0, hpd3 = 0, hpd4 = 0, hpd5 = 0, hpd6 = 0;
Alex Deucher25a857f2012-03-20 17:18:22 -04006076 u32 grbm_int_cntl = 0;
Alex Deucher8c5fd7e2012-12-04 15:28:18 -05006077 u32 dma_cntl, dma_cntl1;
Alex Deuchera9e61412013-06-25 17:56:16 -04006078 u32 thermal_int = 0;
Alex Deucher25a857f2012-03-20 17:18:22 -04006079
6080 if (!rdev->irq.installed) {
6081 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
6082 return -EINVAL;
6083 }
6084 /* don't enable anything if the ih is disabled */
6085 if (!rdev->ih.enabled) {
6086 si_disable_interrupts(rdev);
6087 /* force the active interrupt state to all disabled */
6088 si_disable_interrupt_state(rdev);
6089 return 0;
6090 }
6091
Alex Deucher811e4d52013-09-03 13:31:33 -04006092 cp_int_cntl = RREG32(CP_INT_CNTL_RING0) &
6093 (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
6094
Alex Deucher51535502012-08-30 14:34:30 -04006095 if (!ASIC_IS_NODCE(rdev)) {
Dave Airlie47f24672015-02-24 09:23:58 +10006096 hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
6097 hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
6098 hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
6099 hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
6100 hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
6101 hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
Alex Deucher51535502012-08-30 14:34:30 -04006102 }
Alex Deucher25a857f2012-03-20 17:18:22 -04006103
Alex Deucher8c5fd7e2012-12-04 15:28:18 -05006104 dma_cntl = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET) & ~TRAP_ENABLE;
6105 dma_cntl1 = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET) & ~TRAP_ENABLE;
6106
Alex Deuchera9e61412013-06-25 17:56:16 -04006107 thermal_int = RREG32(CG_THERMAL_INT) &
6108 ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
6109
Alex Deucher25a857f2012-03-20 17:18:22 -04006110 /* enable CP interrupts on all rings */
Christian Koenig736fc372012-05-17 19:52:00 +02006111 if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
Alex Deucher25a857f2012-03-20 17:18:22 -04006112 DRM_DEBUG("si_irq_set: sw int gfx\n");
6113 cp_int_cntl |= TIME_STAMP_INT_ENABLE;
6114 }
Christian Koenig736fc372012-05-17 19:52:00 +02006115 if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP1_INDEX])) {
Alex Deucher25a857f2012-03-20 17:18:22 -04006116 DRM_DEBUG("si_irq_set: sw int cp1\n");
6117 cp_int_cntl1 |= TIME_STAMP_INT_ENABLE;
6118 }
Christian Koenig736fc372012-05-17 19:52:00 +02006119 if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP2_INDEX])) {
Alex Deucher25a857f2012-03-20 17:18:22 -04006120 DRM_DEBUG("si_irq_set: sw int cp2\n");
6121 cp_int_cntl2 |= TIME_STAMP_INT_ENABLE;
6122 }
Alex Deucher8c5fd7e2012-12-04 15:28:18 -05006123 if (atomic_read(&rdev->irq.ring_int[R600_RING_TYPE_DMA_INDEX])) {
6124 DRM_DEBUG("si_irq_set: sw int dma\n");
6125 dma_cntl |= TRAP_ENABLE;
6126 }
6127
6128 if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_DMA1_INDEX])) {
6129 DRM_DEBUG("si_irq_set: sw int dma1\n");
6130 dma_cntl1 |= TRAP_ENABLE;
6131 }
Alex Deucher25a857f2012-03-20 17:18:22 -04006132 if (rdev->irq.crtc_vblank_int[0] ||
Christian Koenig736fc372012-05-17 19:52:00 +02006133 atomic_read(&rdev->irq.pflip[0])) {
Alex Deucher25a857f2012-03-20 17:18:22 -04006134 DRM_DEBUG("si_irq_set: vblank 0\n");
6135 crtc1 |= VBLANK_INT_MASK;
6136 }
6137 if (rdev->irq.crtc_vblank_int[1] ||
Christian Koenig736fc372012-05-17 19:52:00 +02006138 atomic_read(&rdev->irq.pflip[1])) {
Alex Deucher25a857f2012-03-20 17:18:22 -04006139 DRM_DEBUG("si_irq_set: vblank 1\n");
6140 crtc2 |= VBLANK_INT_MASK;
6141 }
6142 if (rdev->irq.crtc_vblank_int[2] ||
Christian Koenig736fc372012-05-17 19:52:00 +02006143 atomic_read(&rdev->irq.pflip[2])) {
Alex Deucher25a857f2012-03-20 17:18:22 -04006144 DRM_DEBUG("si_irq_set: vblank 2\n");
6145 crtc3 |= VBLANK_INT_MASK;
6146 }
6147 if (rdev->irq.crtc_vblank_int[3] ||
Christian Koenig736fc372012-05-17 19:52:00 +02006148 atomic_read(&rdev->irq.pflip[3])) {
Alex Deucher25a857f2012-03-20 17:18:22 -04006149 DRM_DEBUG("si_irq_set: vblank 3\n");
6150 crtc4 |= VBLANK_INT_MASK;
6151 }
6152 if (rdev->irq.crtc_vblank_int[4] ||
Christian Koenig736fc372012-05-17 19:52:00 +02006153 atomic_read(&rdev->irq.pflip[4])) {
Alex Deucher25a857f2012-03-20 17:18:22 -04006154 DRM_DEBUG("si_irq_set: vblank 4\n");
6155 crtc5 |= VBLANK_INT_MASK;
6156 }
6157 if (rdev->irq.crtc_vblank_int[5] ||
Christian Koenig736fc372012-05-17 19:52:00 +02006158 atomic_read(&rdev->irq.pflip[5])) {
Alex Deucher25a857f2012-03-20 17:18:22 -04006159 DRM_DEBUG("si_irq_set: vblank 5\n");
6160 crtc6 |= VBLANK_INT_MASK;
6161 }
6162 if (rdev->irq.hpd[0]) {
6163 DRM_DEBUG("si_irq_set: hpd 1\n");
Dave Airlie47f24672015-02-24 09:23:58 +10006164 hpd1 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN;
Alex Deucher25a857f2012-03-20 17:18:22 -04006165 }
6166 if (rdev->irq.hpd[1]) {
6167 DRM_DEBUG("si_irq_set: hpd 2\n");
Dave Airlie47f24672015-02-24 09:23:58 +10006168 hpd2 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN;
Alex Deucher25a857f2012-03-20 17:18:22 -04006169 }
6170 if (rdev->irq.hpd[2]) {
6171 DRM_DEBUG("si_irq_set: hpd 3\n");
Dave Airlie47f24672015-02-24 09:23:58 +10006172 hpd3 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN;
Alex Deucher25a857f2012-03-20 17:18:22 -04006173 }
6174 if (rdev->irq.hpd[3]) {
6175 DRM_DEBUG("si_irq_set: hpd 4\n");
Dave Airlie47f24672015-02-24 09:23:58 +10006176 hpd4 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN;
Alex Deucher25a857f2012-03-20 17:18:22 -04006177 }
6178 if (rdev->irq.hpd[4]) {
6179 DRM_DEBUG("si_irq_set: hpd 5\n");
Dave Airlie47f24672015-02-24 09:23:58 +10006180 hpd5 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN;
Alex Deucher25a857f2012-03-20 17:18:22 -04006181 }
6182 if (rdev->irq.hpd[5]) {
6183 DRM_DEBUG("si_irq_set: hpd 6\n");
Dave Airlie47f24672015-02-24 09:23:58 +10006184 hpd6 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN;
Alex Deucher25a857f2012-03-20 17:18:22 -04006185 }
Alex Deucher25a857f2012-03-20 17:18:22 -04006186
6187 WREG32(CP_INT_CNTL_RING0, cp_int_cntl);
6188 WREG32(CP_INT_CNTL_RING1, cp_int_cntl1);
6189 WREG32(CP_INT_CNTL_RING2, cp_int_cntl2);
6190
Alex Deucher8c5fd7e2012-12-04 15:28:18 -05006191 WREG32(DMA_CNTL + DMA0_REGISTER_OFFSET, dma_cntl);
6192 WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, dma_cntl1);
6193
Alex Deucher25a857f2012-03-20 17:18:22 -04006194 WREG32(GRBM_INT_CNTL, grbm_int_cntl);
6195
Alex Deuchera9e61412013-06-25 17:56:16 -04006196 if (rdev->irq.dpm_thermal) {
6197 DRM_DEBUG("dpm thermal\n");
6198 thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW;
6199 }
6200
Alex Deucher51535502012-08-30 14:34:30 -04006201 if (rdev->num_crtc >= 2) {
6202 WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1);
6203 WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, crtc2);
6204 }
Alex Deucher25a857f2012-03-20 17:18:22 -04006205 if (rdev->num_crtc >= 4) {
6206 WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, crtc3);
6207 WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, crtc4);
6208 }
6209 if (rdev->num_crtc >= 6) {
6210 WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, crtc5);
6211 WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6);
6212 }
6213
Alex Deucher51535502012-08-30 14:34:30 -04006214 if (rdev->num_crtc >= 2) {
Christian Königf5d636d2014-04-23 20:46:06 +02006215 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET,
6216 GRPH_PFLIP_INT_MASK);
6217 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET,
6218 GRPH_PFLIP_INT_MASK);
Alex Deucher51535502012-08-30 14:34:30 -04006219 }
Alex Deucher25a857f2012-03-20 17:18:22 -04006220 if (rdev->num_crtc >= 4) {
Christian Königf5d636d2014-04-23 20:46:06 +02006221 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET,
6222 GRPH_PFLIP_INT_MASK);
6223 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET,
6224 GRPH_PFLIP_INT_MASK);
Alex Deucher25a857f2012-03-20 17:18:22 -04006225 }
6226 if (rdev->num_crtc >= 6) {
Christian Königf5d636d2014-04-23 20:46:06 +02006227 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET,
6228 GRPH_PFLIP_INT_MASK);
6229 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET,
6230 GRPH_PFLIP_INT_MASK);
Alex Deucher25a857f2012-03-20 17:18:22 -04006231 }
6232
Alex Deucher51535502012-08-30 14:34:30 -04006233 if (!ASIC_IS_NODCE(rdev)) {
6234 WREG32(DC_HPD1_INT_CONTROL, hpd1);
6235 WREG32(DC_HPD2_INT_CONTROL, hpd2);
6236 WREG32(DC_HPD3_INT_CONTROL, hpd3);
6237 WREG32(DC_HPD4_INT_CONTROL, hpd4);
6238 WREG32(DC_HPD5_INT_CONTROL, hpd5);
6239 WREG32(DC_HPD6_INT_CONTROL, hpd6);
6240 }
Alex Deucher25a857f2012-03-20 17:18:22 -04006241
Alex Deuchera9e61412013-06-25 17:56:16 -04006242 WREG32(CG_THERMAL_INT, thermal_int);
6243
Alex Deucher05869152015-03-02 20:43:53 -05006244 /* posting read */
6245 RREG32(SRBM_STATUS);
6246
Alex Deucher25a857f2012-03-20 17:18:22 -04006247 return 0;
6248}
6249
6250static inline void si_irq_ack(struct radeon_device *rdev)
6251{
6252 u32 tmp;
6253
Alex Deucher51535502012-08-30 14:34:30 -04006254 if (ASIC_IS_NODCE(rdev))
6255 return;
6256
Alex Deucher25a857f2012-03-20 17:18:22 -04006257 rdev->irq.stat_regs.evergreen.disp_int = RREG32(DISP_INTERRUPT_STATUS);
6258 rdev->irq.stat_regs.evergreen.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
6259 rdev->irq.stat_regs.evergreen.disp_int_cont2 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE2);
6260 rdev->irq.stat_regs.evergreen.disp_int_cont3 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE3);
6261 rdev->irq.stat_regs.evergreen.disp_int_cont4 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE4);
6262 rdev->irq.stat_regs.evergreen.disp_int_cont5 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE5);
6263 rdev->irq.stat_regs.evergreen.d1grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET);
6264 rdev->irq.stat_regs.evergreen.d2grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET);
6265 if (rdev->num_crtc >= 4) {
6266 rdev->irq.stat_regs.evergreen.d3grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET);
6267 rdev->irq.stat_regs.evergreen.d4grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET);
6268 }
6269 if (rdev->num_crtc >= 6) {
6270 rdev->irq.stat_regs.evergreen.d5grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET);
6271 rdev->irq.stat_regs.evergreen.d6grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET);
6272 }
6273
6274 if (rdev->irq.stat_regs.evergreen.d1grph_int & GRPH_PFLIP_INT_OCCURRED)
6275 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
6276 if (rdev->irq.stat_regs.evergreen.d2grph_int & GRPH_PFLIP_INT_OCCURRED)
6277 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
6278 if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT)
6279 WREG32(VBLANK_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VBLANK_ACK);
6280 if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT)
6281 WREG32(VLINE_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VLINE_ACK);
6282 if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT)
6283 WREG32(VBLANK_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VBLANK_ACK);
6284 if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT)
6285 WREG32(VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VLINE_ACK);
6286
6287 if (rdev->num_crtc >= 4) {
6288 if (rdev->irq.stat_regs.evergreen.d3grph_int & GRPH_PFLIP_INT_OCCURRED)
6289 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
6290 if (rdev->irq.stat_regs.evergreen.d4grph_int & GRPH_PFLIP_INT_OCCURRED)
6291 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
6292 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT)
6293 WREG32(VBLANK_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VBLANK_ACK);
6294 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT)
6295 WREG32(VLINE_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VLINE_ACK);
6296 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT)
6297 WREG32(VBLANK_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VBLANK_ACK);
6298 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT)
6299 WREG32(VLINE_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VLINE_ACK);
6300 }
6301
6302 if (rdev->num_crtc >= 6) {
6303 if (rdev->irq.stat_regs.evergreen.d5grph_int & GRPH_PFLIP_INT_OCCURRED)
6304 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
6305 if (rdev->irq.stat_regs.evergreen.d6grph_int & GRPH_PFLIP_INT_OCCURRED)
6306 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
6307 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT)
6308 WREG32(VBLANK_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VBLANK_ACK);
6309 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT)
6310 WREG32(VLINE_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VLINE_ACK);
6311 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT)
6312 WREG32(VBLANK_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VBLANK_ACK);
6313 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT)
6314 WREG32(VLINE_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VLINE_ACK);
6315 }
6316
6317 if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) {
6318 tmp = RREG32(DC_HPD1_INT_CONTROL);
6319 tmp |= DC_HPDx_INT_ACK;
6320 WREG32(DC_HPD1_INT_CONTROL, tmp);
6321 }
6322 if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) {
6323 tmp = RREG32(DC_HPD2_INT_CONTROL);
6324 tmp |= DC_HPDx_INT_ACK;
6325 WREG32(DC_HPD2_INT_CONTROL, tmp);
6326 }
6327 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) {
6328 tmp = RREG32(DC_HPD3_INT_CONTROL);
6329 tmp |= DC_HPDx_INT_ACK;
6330 WREG32(DC_HPD3_INT_CONTROL, tmp);
6331 }
6332 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) {
6333 tmp = RREG32(DC_HPD4_INT_CONTROL);
6334 tmp |= DC_HPDx_INT_ACK;
6335 WREG32(DC_HPD4_INT_CONTROL, tmp);
6336 }
6337 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) {
6338 tmp = RREG32(DC_HPD5_INT_CONTROL);
6339 tmp |= DC_HPDx_INT_ACK;
6340 WREG32(DC_HPD5_INT_CONTROL, tmp);
6341 }
6342 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
Lyudeacc771f2017-05-11 19:31:12 -04006343 tmp = RREG32(DC_HPD6_INT_CONTROL);
Alex Deucher25a857f2012-03-20 17:18:22 -04006344 tmp |= DC_HPDx_INT_ACK;
6345 WREG32(DC_HPD6_INT_CONTROL, tmp);
6346 }
Dave Airlie47f24672015-02-24 09:23:58 +10006347
6348 if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_RX_INTERRUPT) {
6349 tmp = RREG32(DC_HPD1_INT_CONTROL);
6350 tmp |= DC_HPDx_RX_INT_ACK;
6351 WREG32(DC_HPD1_INT_CONTROL, tmp);
6352 }
6353 if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_RX_INTERRUPT) {
6354 tmp = RREG32(DC_HPD2_INT_CONTROL);
6355 tmp |= DC_HPDx_RX_INT_ACK;
6356 WREG32(DC_HPD2_INT_CONTROL, tmp);
6357 }
6358 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_RX_INTERRUPT) {
6359 tmp = RREG32(DC_HPD3_INT_CONTROL);
6360 tmp |= DC_HPDx_RX_INT_ACK;
6361 WREG32(DC_HPD3_INT_CONTROL, tmp);
6362 }
6363 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_RX_INTERRUPT) {
6364 tmp = RREG32(DC_HPD4_INT_CONTROL);
6365 tmp |= DC_HPDx_RX_INT_ACK;
6366 WREG32(DC_HPD4_INT_CONTROL, tmp);
6367 }
6368 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_RX_INTERRUPT) {
6369 tmp = RREG32(DC_HPD5_INT_CONTROL);
6370 tmp |= DC_HPDx_RX_INT_ACK;
6371 WREG32(DC_HPD5_INT_CONTROL, tmp);
6372 }
6373 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) {
Lyudeacc771f2017-05-11 19:31:12 -04006374 tmp = RREG32(DC_HPD6_INT_CONTROL);
Dave Airlie47f24672015-02-24 09:23:58 +10006375 tmp |= DC_HPDx_RX_INT_ACK;
6376 WREG32(DC_HPD6_INT_CONTROL, tmp);
6377 }
Alex Deucher25a857f2012-03-20 17:18:22 -04006378}
6379
6380static void si_irq_disable(struct radeon_device *rdev)
6381{
6382 si_disable_interrupts(rdev);
6383 /* Wait and acknowledge irq */
6384 mdelay(1);
6385 si_irq_ack(rdev);
6386 si_disable_interrupt_state(rdev);
6387}
6388
6389static void si_irq_suspend(struct radeon_device *rdev)
6390{
6391 si_irq_disable(rdev);
6392 si_rlc_stop(rdev);
6393}
6394
Alex Deucher9b136d52012-03-20 17:18:23 -04006395static void si_irq_fini(struct radeon_device *rdev)
6396{
6397 si_irq_suspend(rdev);
6398 r600_ih_ring_fini(rdev);
6399}
6400
Alex Deucher25a857f2012-03-20 17:18:22 -04006401static inline u32 si_get_ih_wptr(struct radeon_device *rdev)
6402{
6403 u32 wptr, tmp;
6404
6405 if (rdev->wb.enabled)
6406 wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]);
6407 else
6408 wptr = RREG32(IH_RB_WPTR);
6409
6410 if (wptr & RB_OVERFLOW) {
Michel Dänzer11bab0a2014-09-19 12:07:11 +09006411 wptr &= ~RB_OVERFLOW;
Alex Deucher25a857f2012-03-20 17:18:22 -04006412 /* When a ring buffer overflow happen start parsing interrupt
6413 * from the last not overwritten vector (wptr + 16). Hopefully
6414 * this should allow us to catchup.
6415 */
Michel Dänzer6cc2fda2014-09-19 12:22:07 +09006416 dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
6417 wptr, rdev->ih.rptr, (wptr + 16) & rdev->ih.ptr_mask);
Alex Deucher25a857f2012-03-20 17:18:22 -04006418 rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
6419 tmp = RREG32(IH_RB_CNTL);
6420 tmp |= IH_WPTR_OVERFLOW_CLEAR;
6421 WREG32(IH_RB_CNTL, tmp);
6422 }
6423 return (wptr & rdev->ih.ptr_mask);
6424}
6425
6426/* SI IV Ring
6427 * Each IV ring entry is 128 bits:
6428 * [7:0] - interrupt source id
6429 * [31:8] - reserved
6430 * [59:32] - interrupt source data
6431 * [63:60] - reserved
6432 * [71:64] - RINGID
6433 * [79:72] - VMID
6434 * [127:80] - reserved
6435 */
6436int si_irq_process(struct radeon_device *rdev)
6437{
6438 u32 wptr;
6439 u32 rptr;
6440 u32 src_id, src_data, ring_id;
6441 u32 ring_index;
Alex Deucher25a857f2012-03-20 17:18:22 -04006442 bool queue_hotplug = false;
Dave Airlie47f24672015-02-24 09:23:58 +10006443 bool queue_dp = false;
Alex Deuchera9e61412013-06-25 17:56:16 -04006444 bool queue_thermal = false;
Alex Deucherfbf6dc72013-06-13 18:47:58 -04006445 u32 status, addr;
Alex Deucher25a857f2012-03-20 17:18:22 -04006446
6447 if (!rdev->ih.enabled || rdev->shutdown)
6448 return IRQ_NONE;
6449
6450 wptr = si_get_ih_wptr(rdev);
Christian Koenigc20dc362012-05-16 21:45:24 +02006451
6452restart_ih:
6453 /* is somebody else already processing irqs? */
6454 if (atomic_xchg(&rdev->ih.lock, 1))
6455 return IRQ_NONE;
6456
Alex Deucher25a857f2012-03-20 17:18:22 -04006457 rptr = rdev->ih.rptr;
6458 DRM_DEBUG("si_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
6459
Alex Deucher25a857f2012-03-20 17:18:22 -04006460 /* Order reading of wptr vs. reading of IH ring data */
6461 rmb();
6462
6463 /* display interrupts */
6464 si_irq_ack(rdev);
6465
Alex Deucher25a857f2012-03-20 17:18:22 -04006466 while (rptr != wptr) {
6467 /* wptr/rptr are in bytes! */
6468 ring_index = rptr / 4;
6469 src_id = le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff;
6470 src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff;
6471 ring_id = le32_to_cpu(rdev->ih.ring[ring_index + 2]) & 0xff;
6472
6473 switch (src_id) {
6474 case 1: /* D1 vblank/vline */
6475 switch (src_data) {
6476 case 0: /* D1 vblank */
Mario Kleiner07f18f02015-07-03 06:03:06 +02006477 if (!(rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT))
6478 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6479
6480 if (rdev->irq.crtc_vblank_int[0]) {
6481 drm_handle_vblank(rdev->ddev, 0);
6482 rdev->pm.vblank_sync = true;
6483 wake_up(&rdev->irq.vblank_queue);
Alex Deucher25a857f2012-03-20 17:18:22 -04006484 }
Mario Kleiner07f18f02015-07-03 06:03:06 +02006485 if (atomic_read(&rdev->irq.pflip[0]))
6486 radeon_crtc_handle_vblank(rdev, 0);
6487 rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
6488 DRM_DEBUG("IH: D1 vblank\n");
6489
Alex Deucher25a857f2012-03-20 17:18:22 -04006490 break;
6491 case 1: /* D1 vline */
Mario Kleiner07f18f02015-07-03 06:03:06 +02006492 if (!(rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT))
6493 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6494
6495 rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT;
6496 DRM_DEBUG("IH: D1 vline\n");
6497
Alex Deucher25a857f2012-03-20 17:18:22 -04006498 break;
6499 default:
6500 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
6501 break;
6502 }
6503 break;
6504 case 2: /* D2 vblank/vline */
6505 switch (src_data) {
6506 case 0: /* D2 vblank */
Mario Kleiner07f18f02015-07-03 06:03:06 +02006507 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT))
6508 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6509
6510 if (rdev->irq.crtc_vblank_int[1]) {
6511 drm_handle_vblank(rdev->ddev, 1);
6512 rdev->pm.vblank_sync = true;
6513 wake_up(&rdev->irq.vblank_queue);
Alex Deucher25a857f2012-03-20 17:18:22 -04006514 }
Mario Kleiner07f18f02015-07-03 06:03:06 +02006515 if (atomic_read(&rdev->irq.pflip[1]))
6516 radeon_crtc_handle_vblank(rdev, 1);
6517 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
6518 DRM_DEBUG("IH: D2 vblank\n");
6519
Alex Deucher25a857f2012-03-20 17:18:22 -04006520 break;
6521 case 1: /* D2 vline */
Mario Kleiner07f18f02015-07-03 06:03:06 +02006522 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT))
6523 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6524
6525 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
6526 DRM_DEBUG("IH: D2 vline\n");
6527
Alex Deucher25a857f2012-03-20 17:18:22 -04006528 break;
6529 default:
6530 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
6531 break;
6532 }
6533 break;
6534 case 3: /* D3 vblank/vline */
6535 switch (src_data) {
6536 case 0: /* D3 vblank */
Mario Kleiner07f18f02015-07-03 06:03:06 +02006537 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT))
6538 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6539
6540 if (rdev->irq.crtc_vblank_int[2]) {
6541 drm_handle_vblank(rdev->ddev, 2);
6542 rdev->pm.vblank_sync = true;
6543 wake_up(&rdev->irq.vblank_queue);
Alex Deucher25a857f2012-03-20 17:18:22 -04006544 }
Mario Kleiner07f18f02015-07-03 06:03:06 +02006545 if (atomic_read(&rdev->irq.pflip[2]))
6546 radeon_crtc_handle_vblank(rdev, 2);
6547 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
6548 DRM_DEBUG("IH: D3 vblank\n");
6549
Alex Deucher25a857f2012-03-20 17:18:22 -04006550 break;
6551 case 1: /* D3 vline */
Mario Kleiner07f18f02015-07-03 06:03:06 +02006552 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT))
6553 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6554
6555 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
6556 DRM_DEBUG("IH: D3 vline\n");
6557
Alex Deucher25a857f2012-03-20 17:18:22 -04006558 break;
6559 default:
6560 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
6561 break;
6562 }
6563 break;
6564 case 4: /* D4 vblank/vline */
6565 switch (src_data) {
6566 case 0: /* D4 vblank */
Mario Kleiner07f18f02015-07-03 06:03:06 +02006567 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT))
6568 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6569
6570 if (rdev->irq.crtc_vblank_int[3]) {
6571 drm_handle_vblank(rdev->ddev, 3);
6572 rdev->pm.vblank_sync = true;
6573 wake_up(&rdev->irq.vblank_queue);
Alex Deucher25a857f2012-03-20 17:18:22 -04006574 }
Mario Kleiner07f18f02015-07-03 06:03:06 +02006575 if (atomic_read(&rdev->irq.pflip[3]))
6576 radeon_crtc_handle_vblank(rdev, 3);
6577 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
6578 DRM_DEBUG("IH: D4 vblank\n");
6579
Alex Deucher25a857f2012-03-20 17:18:22 -04006580 break;
6581 case 1: /* D4 vline */
Mario Kleiner07f18f02015-07-03 06:03:06 +02006582 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT))
6583 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6584
6585 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
6586 DRM_DEBUG("IH: D4 vline\n");
6587
Alex Deucher25a857f2012-03-20 17:18:22 -04006588 break;
6589 default:
6590 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
6591 break;
6592 }
6593 break;
6594 case 5: /* D5 vblank/vline */
6595 switch (src_data) {
6596 case 0: /* D5 vblank */
Mario Kleiner07f18f02015-07-03 06:03:06 +02006597 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT))
6598 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6599
6600 if (rdev->irq.crtc_vblank_int[4]) {
6601 drm_handle_vblank(rdev->ddev, 4);
6602 rdev->pm.vblank_sync = true;
6603 wake_up(&rdev->irq.vblank_queue);
Alex Deucher25a857f2012-03-20 17:18:22 -04006604 }
Mario Kleiner07f18f02015-07-03 06:03:06 +02006605 if (atomic_read(&rdev->irq.pflip[4]))
6606 radeon_crtc_handle_vblank(rdev, 4);
6607 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
6608 DRM_DEBUG("IH: D5 vblank\n");
6609
Alex Deucher25a857f2012-03-20 17:18:22 -04006610 break;
6611 case 1: /* D5 vline */
Mario Kleiner07f18f02015-07-03 06:03:06 +02006612 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT))
6613 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6614
6615 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
6616 DRM_DEBUG("IH: D5 vline\n");
6617
Alex Deucher25a857f2012-03-20 17:18:22 -04006618 break;
6619 default:
6620 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
6621 break;
6622 }
6623 break;
6624 case 6: /* D6 vblank/vline */
6625 switch (src_data) {
6626 case 0: /* D6 vblank */
Mario Kleiner07f18f02015-07-03 06:03:06 +02006627 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT))
6628 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6629
6630 if (rdev->irq.crtc_vblank_int[5]) {
6631 drm_handle_vblank(rdev->ddev, 5);
6632 rdev->pm.vblank_sync = true;
6633 wake_up(&rdev->irq.vblank_queue);
Alex Deucher25a857f2012-03-20 17:18:22 -04006634 }
Mario Kleiner07f18f02015-07-03 06:03:06 +02006635 if (atomic_read(&rdev->irq.pflip[5]))
6636 radeon_crtc_handle_vblank(rdev, 5);
6637 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
6638 DRM_DEBUG("IH: D6 vblank\n");
6639
Alex Deucher25a857f2012-03-20 17:18:22 -04006640 break;
6641 case 1: /* D6 vline */
Mario Kleiner07f18f02015-07-03 06:03:06 +02006642 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT))
6643 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6644
6645 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
6646 DRM_DEBUG("IH: D6 vline\n");
6647
Alex Deucher25a857f2012-03-20 17:18:22 -04006648 break;
6649 default:
6650 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
6651 break;
6652 }
6653 break;
Christian Königf5d636d2014-04-23 20:46:06 +02006654 case 8: /* D1 page flip */
6655 case 10: /* D2 page flip */
6656 case 12: /* D3 page flip */
6657 case 14: /* D4 page flip */
6658 case 16: /* D5 page flip */
6659 case 18: /* D6 page flip */
6660 DRM_DEBUG("IH: D%d flip\n", ((src_id - 8) >> 1) + 1);
Mario Kleiner39dc5452014-07-29 06:21:44 +02006661 if (radeon_use_pflipirq > 0)
6662 radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1);
Christian Königf5d636d2014-04-23 20:46:06 +02006663 break;
Alex Deucher25a857f2012-03-20 17:18:22 -04006664 case 42: /* HPD hotplug */
6665 switch (src_data) {
6666 case 0:
Mario Kleiner07f18f02015-07-03 06:03:06 +02006667 if (!(rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT))
6668 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6669
6670 rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT;
6671 queue_hotplug = true;
6672 DRM_DEBUG("IH: HPD1\n");
6673
Alex Deucher25a857f2012-03-20 17:18:22 -04006674 break;
6675 case 1:
Mario Kleiner07f18f02015-07-03 06:03:06 +02006676 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT))
6677 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6678
6679 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT;
6680 queue_hotplug = true;
6681 DRM_DEBUG("IH: HPD2\n");
6682
Alex Deucher25a857f2012-03-20 17:18:22 -04006683 break;
6684 case 2:
Mario Kleiner07f18f02015-07-03 06:03:06 +02006685 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT))
6686 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6687
6688 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
6689 queue_hotplug = true;
6690 DRM_DEBUG("IH: HPD3\n");
6691
Alex Deucher25a857f2012-03-20 17:18:22 -04006692 break;
6693 case 3:
Mario Kleiner07f18f02015-07-03 06:03:06 +02006694 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT))
6695 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6696
6697 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
6698 queue_hotplug = true;
6699 DRM_DEBUG("IH: HPD4\n");
6700
Alex Deucher25a857f2012-03-20 17:18:22 -04006701 break;
6702 case 4:
Mario Kleiner07f18f02015-07-03 06:03:06 +02006703 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT))
6704 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6705
6706 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
6707 queue_hotplug = true;
6708 DRM_DEBUG("IH: HPD5\n");
6709
Alex Deucher25a857f2012-03-20 17:18:22 -04006710 break;
6711 case 5:
Mario Kleiner07f18f02015-07-03 06:03:06 +02006712 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT))
6713 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6714
6715 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
6716 queue_hotplug = true;
6717 DRM_DEBUG("IH: HPD6\n");
6718
Alex Deucher25a857f2012-03-20 17:18:22 -04006719 break;
Dave Airlie47f24672015-02-24 09:23:58 +10006720 case 6:
Mario Kleiner07f18f02015-07-03 06:03:06 +02006721 if (!(rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_RX_INTERRUPT))
6722 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6723
6724 rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_RX_INTERRUPT;
6725 queue_dp = true;
6726 DRM_DEBUG("IH: HPD_RX 1\n");
6727
Dave Airlie47f24672015-02-24 09:23:58 +10006728 break;
6729 case 7:
Mario Kleiner07f18f02015-07-03 06:03:06 +02006730 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_RX_INTERRUPT))
6731 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6732
6733 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT;
6734 queue_dp = true;
6735 DRM_DEBUG("IH: HPD_RX 2\n");
6736
Dave Airlie47f24672015-02-24 09:23:58 +10006737 break;
6738 case 8:
Mario Kleiner07f18f02015-07-03 06:03:06 +02006739 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_RX_INTERRUPT))
6740 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6741
6742 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT;
6743 queue_dp = true;
6744 DRM_DEBUG("IH: HPD_RX 3\n");
6745
Dave Airlie47f24672015-02-24 09:23:58 +10006746 break;
6747 case 9:
Mario Kleiner07f18f02015-07-03 06:03:06 +02006748 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_RX_INTERRUPT))
6749 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6750
6751 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT;
6752 queue_dp = true;
6753 DRM_DEBUG("IH: HPD_RX 4\n");
6754
Dave Airlie47f24672015-02-24 09:23:58 +10006755 break;
6756 case 10:
Mario Kleiner07f18f02015-07-03 06:03:06 +02006757 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_RX_INTERRUPT))
6758 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6759
6760 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT;
6761 queue_dp = true;
6762 DRM_DEBUG("IH: HPD_RX 5\n");
6763
Dave Airlie47f24672015-02-24 09:23:58 +10006764 break;
6765 case 11:
Mario Kleiner07f18f02015-07-03 06:03:06 +02006766 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT))
6767 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6768
6769 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT;
6770 queue_dp = true;
6771 DRM_DEBUG("IH: HPD_RX 6\n");
6772
Dave Airlie47f24672015-02-24 09:23:58 +10006773 break;
Alex Deucher25a857f2012-03-20 17:18:22 -04006774 default:
6775 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
6776 break;
6777 }
6778 break;
Christian König18ad01e2015-02-18 13:19:27 +01006779 case 96:
6780 DRM_ERROR("SRBM_READ_ERROR: 0x%x\n", RREG32(SRBM_READ_ERROR));
6781 WREG32(SRBM_INT_ACK, 0x1);
6782 break;
Christian Königb927e1c2014-01-30 19:01:16 +01006783 case 124: /* UVD */
6784 DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data);
6785 radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX);
6786 break;
Christian Königae133a12012-09-18 15:30:44 -04006787 case 146:
6788 case 147:
Alex Deucherfbf6dc72013-06-13 18:47:58 -04006789 addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR);
6790 status = RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS);
Christian König9b7d7862014-07-07 11:16:29 +02006791 /* reset addr and status */
6792 WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
6793 if (addr == 0x0 && status == 0x0)
6794 break;
Christian Königae133a12012-09-18 15:30:44 -04006795 dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data);
6796 dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
Alex Deucherfbf6dc72013-06-13 18:47:58 -04006797 addr);
Christian Königae133a12012-09-18 15:30:44 -04006798 dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
Alex Deucherfbf6dc72013-06-13 18:47:58 -04006799 status);
6800 si_vm_decode_fault(rdev, status, addr);
Christian Königae133a12012-09-18 15:30:44 -04006801 break;
Alex Deucher25a857f2012-03-20 17:18:22 -04006802 case 176: /* RINGID0 CP_INT */
6803 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
6804 break;
6805 case 177: /* RINGID1 CP_INT */
6806 radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
6807 break;
6808 case 178: /* RINGID2 CP_INT */
6809 radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
6810 break;
6811 case 181: /* CP EOP event */
6812 DRM_DEBUG("IH: CP EOP\n");
6813 switch (ring_id) {
6814 case 0:
6815 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
6816 break;
6817 case 1:
6818 radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
6819 break;
6820 case 2:
6821 radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
6822 break;
6823 }
6824 break;
Alex Deucher8c5fd7e2012-12-04 15:28:18 -05006825 case 224: /* DMA trap event */
6826 DRM_DEBUG("IH: DMA trap\n");
6827 radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX);
6828 break;
Alex Deuchera9e61412013-06-25 17:56:16 -04006829 case 230: /* thermal low to high */
6830 DRM_DEBUG("IH: thermal low to high\n");
6831 rdev->pm.dpm.thermal.high_to_low = false;
6832 queue_thermal = true;
6833 break;
6834 case 231: /* thermal high to low */
6835 DRM_DEBUG("IH: thermal high to low\n");
6836 rdev->pm.dpm.thermal.high_to_low = true;
6837 queue_thermal = true;
6838 break;
Alex Deucher25a857f2012-03-20 17:18:22 -04006839 case 233: /* GUI IDLE */
6840 DRM_DEBUG("IH: GUI idle\n");
Alex Deucher25a857f2012-03-20 17:18:22 -04006841 break;
Alex Deucher8c5fd7e2012-12-04 15:28:18 -05006842 case 244: /* DMA trap event */
6843 DRM_DEBUG("IH: DMA1 trap\n");
6844 radeon_fence_process(rdev, CAYMAN_RING_TYPE_DMA1_INDEX);
6845 break;
Alex Deucher25a857f2012-03-20 17:18:22 -04006846 default:
6847 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
6848 break;
6849 }
6850
6851 /* wptr/rptr are in bytes! */
6852 rptr += 16;
6853 rptr &= rdev->ih.ptr_mask;
Michel Dänzerf55e03b2014-09-19 12:22:10 +09006854 WREG32(IH_RB_RPTR, rptr);
Alex Deucher25a857f2012-03-20 17:18:22 -04006855 }
Dave Airlie47f24672015-02-24 09:23:58 +10006856 if (queue_dp)
6857 schedule_work(&rdev->dp_work);
Alex Deucher25a857f2012-03-20 17:18:22 -04006858 if (queue_hotplug)
Lyudecb5d4162015-12-03 18:26:07 -05006859 schedule_delayed_work(&rdev->hotplug_work, 0);
Alex Deuchera9e61412013-06-25 17:56:16 -04006860 if (queue_thermal && rdev->pm.dpm_enabled)
6861 schedule_work(&rdev->pm.dpm.thermal.work);
Alex Deucher25a857f2012-03-20 17:18:22 -04006862 rdev->ih.rptr = rptr;
Christian Koenigc20dc362012-05-16 21:45:24 +02006863 atomic_set(&rdev->ih.lock, 0);
6864
6865 /* make sure wptr hasn't changed while processing */
6866 wptr = si_get_ih_wptr(rdev);
6867 if (wptr != rptr)
6868 goto restart_ih;
6869
Alex Deucher25a857f2012-03-20 17:18:22 -04006870 return IRQ_HANDLED;
6871}
6872
Alex Deucher9b136d52012-03-20 17:18:23 -04006873/*
6874 * startup/shutdown callbacks
6875 */
Jérome Glissefa25c222016-03-18 16:58:30 +01006876static void si_uvd_init(struct radeon_device *rdev)
6877{
6878 int r;
6879
6880 if (!rdev->has_uvd)
6881 return;
6882
6883 r = radeon_uvd_init(rdev);
6884 if (r) {
6885 dev_err(rdev->dev, "failed UVD (%d) init.\n", r);
6886 /*
6887 * At this point rdev->uvd.vcpu_bo is NULL which trickles down
6888 * to early fails uvd_v2_2_resume() and thus nothing happens
6889 * there. So it is pointless to try to go through that code
6890 * hence why we disable uvd here.
6891 */
6892 rdev->has_uvd = 0;
6893 return;
6894 }
6895 rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL;
6896 r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX], 4096);
6897}
6898
6899static void si_uvd_start(struct radeon_device *rdev)
6900{
6901 int r;
6902
6903 if (!rdev->has_uvd)
6904 return;
6905
6906 r = uvd_v2_2_resume(rdev);
6907 if (r) {
6908 dev_err(rdev->dev, "failed UVD resume (%d).\n", r);
6909 goto error;
6910 }
6911 r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_UVD_INDEX);
6912 if (r) {
6913 dev_err(rdev->dev, "failed initializing UVD fences (%d).\n", r);
6914 goto error;
6915 }
6916 return;
6917
6918error:
6919 rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
6920}
6921
6922static void si_uvd_resume(struct radeon_device *rdev)
6923{
6924 struct radeon_ring *ring;
6925 int r;
6926
6927 if (!rdev->has_uvd || !rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size)
6928 return;
6929
6930 ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
Alex Deucher70a033d2016-08-23 10:07:28 -04006931 r = radeon_ring_init(rdev, ring, ring->ring_size, 0, PACKET0(UVD_NO_OP, 0));
Jérome Glissefa25c222016-03-18 16:58:30 +01006932 if (r) {
6933 dev_err(rdev->dev, "failed initializing UVD ring (%d).\n", r);
6934 return;
6935 }
6936 r = uvd_v1_0_init(rdev);
6937 if (r) {
6938 dev_err(rdev->dev, "failed initializing UVD (%d).\n", r);
6939 return;
6940 }
6941}
6942
Jérome Glissed18dd752016-03-18 16:58:34 +01006943static void si_vce_init(struct radeon_device *rdev)
6944{
6945 int r;
6946
6947 if (!rdev->has_vce)
6948 return;
6949
6950 r = radeon_vce_init(rdev);
6951 if (r) {
6952 dev_err(rdev->dev, "failed VCE (%d) init.\n", r);
6953 /*
6954 * At this point rdev->vce.vcpu_bo is NULL which trickles down
6955 * to early fails si_vce_start() and thus nothing happens
6956 * there. So it is pointless to try to go through that code
6957 * hence why we disable vce here.
6958 */
6959 rdev->has_vce = 0;
6960 return;
6961 }
6962 rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_obj = NULL;
6963 r600_ring_init(rdev, &rdev->ring[TN_RING_TYPE_VCE1_INDEX], 4096);
6964 rdev->ring[TN_RING_TYPE_VCE2_INDEX].ring_obj = NULL;
6965 r600_ring_init(rdev, &rdev->ring[TN_RING_TYPE_VCE2_INDEX], 4096);
6966}
6967
6968static void si_vce_start(struct radeon_device *rdev)
6969{
6970 int r;
6971
6972 if (!rdev->has_vce)
6973 return;
6974
6975 r = radeon_vce_resume(rdev);
6976 if (r) {
6977 dev_err(rdev->dev, "failed VCE resume (%d).\n", r);
6978 goto error;
6979 }
6980 r = vce_v1_0_resume(rdev);
6981 if (r) {
6982 dev_err(rdev->dev, "failed VCE resume (%d).\n", r);
6983 goto error;
6984 }
6985 r = radeon_fence_driver_start_ring(rdev, TN_RING_TYPE_VCE1_INDEX);
6986 if (r) {
6987 dev_err(rdev->dev, "failed initializing VCE1 fences (%d).\n", r);
6988 goto error;
6989 }
6990 r = radeon_fence_driver_start_ring(rdev, TN_RING_TYPE_VCE2_INDEX);
6991 if (r) {
6992 dev_err(rdev->dev, "failed initializing VCE2 fences (%d).\n", r);
6993 goto error;
6994 }
6995 return;
6996
6997error:
6998 rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_size = 0;
6999 rdev->ring[TN_RING_TYPE_VCE2_INDEX].ring_size = 0;
7000}
7001
7002static void si_vce_resume(struct radeon_device *rdev)
7003{
7004 struct radeon_ring *ring;
7005 int r;
7006
7007 if (!rdev->has_vce || !rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_size)
7008 return;
7009
7010 ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX];
7011 r = radeon_ring_init(rdev, ring, ring->ring_size, 0, VCE_CMD_NO_OP);
7012 if (r) {
7013 dev_err(rdev->dev, "failed initializing VCE1 ring (%d).\n", r);
7014 return;
7015 }
7016 ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX];
7017 r = radeon_ring_init(rdev, ring, ring->ring_size, 0, VCE_CMD_NO_OP);
7018 if (r) {
7019 dev_err(rdev->dev, "failed initializing VCE1 ring (%d).\n", r);
7020 return;
7021 }
7022 r = vce_v1_0_init(rdev);
7023 if (r) {
7024 dev_err(rdev->dev, "failed initializing VCE (%d).\n", r);
7025 return;
7026 }
7027}
7028
Alex Deucher9b136d52012-03-20 17:18:23 -04007029static int si_startup(struct radeon_device *rdev)
7030{
7031 struct radeon_ring *ring;
7032 int r;
7033
Alex Deucherb9d305d2013-02-14 17:16:51 -05007034 /* enable pcie gen2/3 link */
7035 si_pcie_gen3_enable(rdev);
Alex Deuchere0bcf162013-02-15 11:56:59 -05007036 /* enable aspm */
7037 si_program_aspm(rdev);
Alex Deucherb9d305d2013-02-14 17:16:51 -05007038
Alex Deuchere5903d32013-08-30 08:58:20 -04007039 /* scratch needs to be initialized before MC */
7040 r = r600_vram_scratch_init(rdev);
7041 if (r)
7042 return r;
7043
Alex Deucher6fab3feb2013-08-04 12:13:17 -04007044 si_mc_program(rdev);
7045
Alex Deucher6c7bcce2013-12-18 14:07:14 -05007046 if (!rdev->pm.dpm_enabled) {
7047 r = si_mc_load_microcode(rdev);
7048 if (r) {
7049 DRM_ERROR("Failed to load MC firmware!\n");
7050 return r;
7051 }
Alex Deucher9b136d52012-03-20 17:18:23 -04007052 }
7053
Alex Deucher9b136d52012-03-20 17:18:23 -04007054 r = si_pcie_gart_enable(rdev);
7055 if (r)
7056 return r;
7057 si_gpu_init(rdev);
7058
Alex Deucher9b136d52012-03-20 17:18:23 -04007059 /* allocate rlc buffers */
Alex Deucher1fd11772013-04-17 17:53:50 -04007060 if (rdev->family == CHIP_VERDE) {
7061 rdev->rlc.reg_list = verde_rlc_save_restore_register_list;
7062 rdev->rlc.reg_list_size =
7063 (u32)ARRAY_SIZE(verde_rlc_save_restore_register_list);
7064 }
7065 rdev->rlc.cs_data = si_cs_data;
7066 r = sumo_rlc_init(rdev);
Alex Deucher9b136d52012-03-20 17:18:23 -04007067 if (r) {
7068 DRM_ERROR("Failed to init rlc BOs!\n");
7069 return r;
7070 }
7071
7072 /* allocate wb buffer */
7073 r = radeon_wb_init(rdev);
7074 if (r)
7075 return r;
7076
7077 r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
7078 if (r) {
7079 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
7080 return r;
7081 }
7082
7083 r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
7084 if (r) {
7085 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
7086 return r;
7087 }
7088
7089 r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
7090 if (r) {
7091 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
7092 return r;
7093 }
7094
Alex Deucher8c5fd7e2012-12-04 15:28:18 -05007095 r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
7096 if (r) {
7097 dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
7098 return r;
7099 }
7100
7101 r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_DMA1_INDEX);
7102 if (r) {
7103 dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
7104 return r;
7105 }
7106
Jérome Glissefa25c222016-03-18 16:58:30 +01007107 si_uvd_start(rdev);
Jérome Glissed18dd752016-03-18 16:58:34 +01007108 si_vce_start(rdev);
Christian Königa918efa2015-05-11 22:01:53 +02007109
Alex Deucher9b136d52012-03-20 17:18:23 -04007110 /* Enable IRQ */
Adis Hamziće49f3952013-06-02 16:47:54 +02007111 if (!rdev->irq.installed) {
7112 r = radeon_irq_kms_init(rdev);
7113 if (r)
7114 return r;
7115 }
7116
Alex Deucher9b136d52012-03-20 17:18:23 -04007117 r = si_irq_init(rdev);
7118 if (r) {
7119 DRM_ERROR("radeon: IH init failed (%d).\n", r);
7120 radeon_irq_kms_fini(rdev);
7121 return r;
7122 }
7123 si_irq_set(rdev);
7124
7125 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
7126 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
Christian König2e1e6da2013-08-13 11:56:52 +02007127 RADEON_CP_PACKET2);
Alex Deucher9b136d52012-03-20 17:18:23 -04007128 if (r)
7129 return r;
7130
7131 ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
7132 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP1_RPTR_OFFSET,
Christian König2e1e6da2013-08-13 11:56:52 +02007133 RADEON_CP_PACKET2);
Alex Deucher9b136d52012-03-20 17:18:23 -04007134 if (r)
7135 return r;
7136
7137 ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
7138 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP2_RPTR_OFFSET,
Christian König2e1e6da2013-08-13 11:56:52 +02007139 RADEON_CP_PACKET2);
Alex Deucher9b136d52012-03-20 17:18:23 -04007140 if (r)
7141 return r;
7142
Alex Deucher8c5fd7e2012-12-04 15:28:18 -05007143 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
7144 r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
Christian König2e1e6da2013-08-13 11:56:52 +02007145 DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0));
Alex Deucher8c5fd7e2012-12-04 15:28:18 -05007146 if (r)
7147 return r;
7148
7149 ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
7150 r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET,
Christian König2e1e6da2013-08-13 11:56:52 +02007151 DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0));
Alex Deucher8c5fd7e2012-12-04 15:28:18 -05007152 if (r)
7153 return r;
7154
Alex Deucher9b136d52012-03-20 17:18:23 -04007155 r = si_cp_load_microcode(rdev);
7156 if (r)
7157 return r;
7158 r = si_cp_resume(rdev);
7159 if (r)
7160 return r;
7161
Alex Deucher8c5fd7e2012-12-04 15:28:18 -05007162 r = cayman_dma_resume(rdev);
7163 if (r)
7164 return r;
7165
Jérome Glissefa25c222016-03-18 16:58:30 +01007166 si_uvd_resume(rdev);
Jérome Glissed18dd752016-03-18 16:58:34 +01007167 si_vce_resume(rdev);
Christian Königa918efa2015-05-11 22:01:53 +02007168
Christian König2898c342012-07-05 11:55:34 +02007169 r = radeon_ib_pool_init(rdev);
7170 if (r) {
7171 dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
Alex Deucher9b136d52012-03-20 17:18:23 -04007172 return r;
Christian König2898c342012-07-05 11:55:34 +02007173 }
Alex Deucher9b136d52012-03-20 17:18:23 -04007174
Christian Königc6105f22012-07-05 14:32:00 +02007175 r = radeon_vm_manager_init(rdev);
7176 if (r) {
7177 dev_err(rdev->dev, "vm manager initialization failed (%d).\n", r);
Alex Deucher9b136d52012-03-20 17:18:23 -04007178 return r;
Christian Königc6105f22012-07-05 14:32:00 +02007179 }
Alex Deucher9b136d52012-03-20 17:18:23 -04007180
Slava Grigorevbfc1f972014-12-22 17:26:51 -05007181 r = radeon_audio_init(rdev);
Alex Deucherb5306022013-07-31 16:51:33 -04007182 if (r)
7183 return r;
7184
Alex Deucher9b136d52012-03-20 17:18:23 -04007185 return 0;
7186}
7187
7188int si_resume(struct radeon_device *rdev)
7189{
7190 int r;
7191
7192 /* Do not reset GPU before posting, on rv770 hw unlike on r500 hw,
7193 * posting will perform necessary task to bring back GPU into good
7194 * shape.
7195 */
7196 /* post card */
7197 atom_asic_init(rdev->mode_info.atom_context);
7198
Alex Deucher205996c2013-03-01 17:08:42 -05007199 /* init golden registers */
7200 si_init_golden_registers(rdev);
7201
Alex Deucherbc6a6292014-02-25 12:01:28 -05007202 if (rdev->pm.pm_method == PM_METHOD_DPM)
7203 radeon_pm_resume(rdev);
Alex Deucher6c7bcce2013-12-18 14:07:14 -05007204
Alex Deucher9b136d52012-03-20 17:18:23 -04007205 rdev->accel_working = true;
7206 r = si_startup(rdev);
7207 if (r) {
7208 DRM_ERROR("si startup failed on resume\n");
7209 rdev->accel_working = false;
7210 return r;
7211 }
7212
7213 return r;
7214
7215}
7216
7217int si_suspend(struct radeon_device *rdev)
7218{
Alex Deucher6c7bcce2013-12-18 14:07:14 -05007219 radeon_pm_suspend(rdev);
Slava Grigorev7991d662014-12-03 17:07:01 -05007220 radeon_audio_fini(rdev);
Alex Deucherfa3daf92013-03-11 15:32:26 -04007221 radeon_vm_manager_fini(rdev);
Alex Deucher9b136d52012-03-20 17:18:23 -04007222 si_cp_enable(rdev, false);
Alex Deucher8c5fd7e2012-12-04 15:28:18 -05007223 cayman_dma_stop(rdev);
Alex Deucher1df0d522013-04-26 18:03:44 -04007224 if (rdev->has_uvd) {
Christian Könige409b122013-08-13 11:56:53 +02007225 uvd_v1_0_fini(rdev);
Alex Deucher1df0d522013-04-26 18:03:44 -04007226 radeon_uvd_suspend(rdev);
7227 }
Jérome Glissed18dd752016-03-18 16:58:34 +01007228 if (rdev->has_vce)
7229 radeon_vce_suspend(rdev);
Alex Deuchere16866e2013-08-08 19:34:07 -04007230 si_fini_pg(rdev);
7231 si_fini_cg(rdev);
Alex Deucher9b136d52012-03-20 17:18:23 -04007232 si_irq_suspend(rdev);
7233 radeon_wb_disable(rdev);
7234 si_pcie_gart_disable(rdev);
7235 return 0;
7236}
7237
7238/* Plan is to move initialization in that function and use
7239 * helper function so that radeon_device_init pretty much
7240 * do nothing more than calling asic specific function. This
7241 * should also allow to remove a bunch of callback function
7242 * like vram_info.
7243 */
7244int si_init(struct radeon_device *rdev)
7245{
7246 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
7247 int r;
7248
Alex Deucher9b136d52012-03-20 17:18:23 -04007249 /* Read BIOS */
7250 if (!radeon_get_bios(rdev)) {
7251 if (ASIC_IS_AVIVO(rdev))
7252 return -EINVAL;
7253 }
7254 /* Must be an ATOMBIOS */
7255 if (!rdev->is_atom_bios) {
7256 dev_err(rdev->dev, "Expecting atombios for cayman GPU\n");
7257 return -EINVAL;
7258 }
7259 r = radeon_atombios_init(rdev);
7260 if (r)
7261 return r;
7262
7263 /* Post card if necessary */
7264 if (!radeon_card_posted(rdev)) {
7265 if (!rdev->bios) {
7266 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
7267 return -EINVAL;
7268 }
7269 DRM_INFO("GPU not posted. posting now...\n");
7270 atom_asic_init(rdev->mode_info.atom_context);
7271 }
Alex Deucher205996c2013-03-01 17:08:42 -05007272 /* init golden registers */
7273 si_init_golden_registers(rdev);
Alex Deucher9b136d52012-03-20 17:18:23 -04007274 /* Initialize scratch registers */
7275 si_scratch_init(rdev);
7276 /* Initialize surface registers */
7277 radeon_surface_init(rdev);
7278 /* Initialize clocks */
7279 radeon_get_clock_info(rdev->ddev);
7280
7281 /* Fence driver */
7282 r = radeon_fence_driver_init(rdev);
7283 if (r)
7284 return r;
7285
7286 /* initialize memory controller */
7287 r = si_mc_init(rdev);
7288 if (r)
7289 return r;
7290 /* Memory manager */
7291 r = radeon_bo_init(rdev);
7292 if (r)
7293 return r;
7294
Alex Deucher01ac8792013-12-18 19:11:27 -05007295 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw ||
7296 !rdev->rlc_fw || !rdev->mc_fw) {
7297 r = si_init_microcode(rdev);
7298 if (r) {
7299 DRM_ERROR("Failed to load firmware!\n");
7300 return r;
7301 }
7302 }
7303
Alex Deucher6c7bcce2013-12-18 14:07:14 -05007304 /* Initialize power management */
7305 radeon_pm_init(rdev);
7306
Alex Deucher9b136d52012-03-20 17:18:23 -04007307 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
7308 ring->ring_obj = NULL;
7309 r600_ring_init(rdev, ring, 1024 * 1024);
7310
7311 ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
7312 ring->ring_obj = NULL;
7313 r600_ring_init(rdev, ring, 1024 * 1024);
7314
7315 ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
7316 ring->ring_obj = NULL;
7317 r600_ring_init(rdev, ring, 1024 * 1024);
7318
Alex Deucher8c5fd7e2012-12-04 15:28:18 -05007319 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
7320 ring->ring_obj = NULL;
7321 r600_ring_init(rdev, ring, 64 * 1024);
7322
7323 ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
7324 ring->ring_obj = NULL;
7325 r600_ring_init(rdev, ring, 64 * 1024);
7326
Jérome Glissefa25c222016-03-18 16:58:30 +01007327 si_uvd_init(rdev);
Jérome Glissed18dd752016-03-18 16:58:34 +01007328 si_vce_init(rdev);
Christian Königa918efa2015-05-11 22:01:53 +02007329
Alex Deucher9b136d52012-03-20 17:18:23 -04007330 rdev->ih.ring_obj = NULL;
7331 r600_ih_ring_init(rdev, 64 * 1024);
7332
7333 r = r600_pcie_gart_init(rdev);
7334 if (r)
7335 return r;
7336
Alex Deucher9b136d52012-03-20 17:18:23 -04007337 rdev->accel_working = true;
Alex Deucher9b136d52012-03-20 17:18:23 -04007338 r = si_startup(rdev);
7339 if (r) {
7340 dev_err(rdev->dev, "disabling GPU acceleration\n");
7341 si_cp_fini(rdev);
Alex Deucher8c5fd7e2012-12-04 15:28:18 -05007342 cayman_dma_fini(rdev);
Alex Deucher9b136d52012-03-20 17:18:23 -04007343 si_irq_fini(rdev);
Alex Deucher1fd11772013-04-17 17:53:50 -04007344 sumo_rlc_fini(rdev);
Alex Deucher9b136d52012-03-20 17:18:23 -04007345 radeon_wb_fini(rdev);
Christian König2898c342012-07-05 11:55:34 +02007346 radeon_ib_pool_fini(rdev);
Alex Deucher9b136d52012-03-20 17:18:23 -04007347 radeon_vm_manager_fini(rdev);
7348 radeon_irq_kms_fini(rdev);
7349 si_pcie_gart_fini(rdev);
7350 rdev->accel_working = false;
7351 }
7352
7353 /* Don't start up if the MC ucode is missing.
7354 * The default clocks and voltages before the MC ucode
7355 * is loaded are not suffient for advanced operations.
7356 */
7357 if (!rdev->mc_fw) {
7358 DRM_ERROR("radeon: MC ucode required for NI+.\n");
7359 return -EINVAL;
7360 }
7361
7362 return 0;
7363}
7364
7365void si_fini(struct radeon_device *rdev)
7366{
Alex Deucher6c7bcce2013-12-18 14:07:14 -05007367 radeon_pm_fini(rdev);
Alex Deucher9b136d52012-03-20 17:18:23 -04007368 si_cp_fini(rdev);
Alex Deucher8c5fd7e2012-12-04 15:28:18 -05007369 cayman_dma_fini(rdev);
Alex Deucherf8f84ac2013-03-07 12:56:35 -05007370 si_fini_pg(rdev);
Alex Deuchere16866e2013-08-08 19:34:07 -04007371 si_fini_cg(rdev);
Alex Deuchere0bcf162013-02-15 11:56:59 -05007372 si_irq_fini(rdev);
Alex Deucher1fd11772013-04-17 17:53:50 -04007373 sumo_rlc_fini(rdev);
Alex Deucher9b136d52012-03-20 17:18:23 -04007374 radeon_wb_fini(rdev);
7375 radeon_vm_manager_fini(rdev);
Christian König2898c342012-07-05 11:55:34 +02007376 radeon_ib_pool_fini(rdev);
Alex Deucher9b136d52012-03-20 17:18:23 -04007377 radeon_irq_kms_fini(rdev);
Christian König2858c002013-08-01 17:34:07 +02007378 if (rdev->has_uvd) {
Christian Könige409b122013-08-13 11:56:53 +02007379 uvd_v1_0_fini(rdev);
Alex Deucher1df0d522013-04-26 18:03:44 -04007380 radeon_uvd_fini(rdev);
Christian König2858c002013-08-01 17:34:07 +02007381 }
Jérome Glissed18dd752016-03-18 16:58:34 +01007382 if (rdev->has_vce)
7383 radeon_vce_fini(rdev);
Alex Deucher9b136d52012-03-20 17:18:23 -04007384 si_pcie_gart_fini(rdev);
7385 r600_vram_scratch_fini(rdev);
7386 radeon_gem_fini(rdev);
Alex Deucher9b136d52012-03-20 17:18:23 -04007387 radeon_fence_driver_fini(rdev);
7388 radeon_bo_fini(rdev);
7389 radeon_atombios_fini(rdev);
7390 kfree(rdev->bios);
7391 rdev->bios = NULL;
7392}
7393
Marek Olšák6759a0a2012-08-09 16:34:17 +02007394/**
Alex Deucherd0418892013-01-24 10:35:23 -05007395 * si_get_gpu_clock_counter - return GPU clock counter snapshot
Marek Olšák6759a0a2012-08-09 16:34:17 +02007396 *
7397 * @rdev: radeon_device pointer
7398 *
7399 * Fetches a GPU clock counter snapshot (SI).
7400 * Returns the 64 bit clock counter snapshot.
7401 */
Alex Deucherd0418892013-01-24 10:35:23 -05007402uint64_t si_get_gpu_clock_counter(struct radeon_device *rdev)
Marek Olšák6759a0a2012-08-09 16:34:17 +02007403{
7404 uint64_t clock;
7405
7406 mutex_lock(&rdev->gpu_clock_mutex);
7407 WREG32(RLC_CAPTURE_GPU_CLOCK_COUNT, 1);
7408 clock = (uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_LSB) |
Jérome Glisse3cf8bb12016-03-16 12:56:45 +01007409 ((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
Marek Olšák6759a0a2012-08-09 16:34:17 +02007410 mutex_unlock(&rdev->gpu_clock_mutex);
7411 return clock;
7412}
Christian König2539eb02013-04-08 12:41:34 +02007413
Christian König2539eb02013-04-08 12:41:34 +02007414int si_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
7415{
Christian Königfacd1122013-04-29 11:55:02 +02007416 unsigned fb_div = 0, vclk_div = 0, dclk_div = 0;
Christian König2539eb02013-04-08 12:41:34 +02007417 int r;
7418
Christian König4ed10832013-04-18 15:25:58 +02007419 /* bypass vclk and dclk with bclk */
7420 WREG32_P(CG_UPLL_FUNC_CNTL_2,
7421 VCLK_SRC_SEL(1) | DCLK_SRC_SEL(1),
7422 ~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK));
7423
7424 /* put PLL in bypass mode */
7425 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_BYPASS_EN_MASK, ~UPLL_BYPASS_EN_MASK);
7426
7427 if (!vclk || !dclk) {
Christian Königa17d4992015-02-19 09:40:28 +01007428 /* keep the Bypass mode */
Christian König4ed10832013-04-18 15:25:58 +02007429 return 0;
7430 }
7431
Christian Königfacd1122013-04-29 11:55:02 +02007432 r = radeon_uvd_calc_upll_dividers(rdev, vclk, dclk, 125000, 250000,
7433 16384, 0x03FFFFFF, 0, 128, 5,
7434 &fb_div, &vclk_div, &dclk_div);
7435 if (r)
7436 return r;
Christian König2539eb02013-04-08 12:41:34 +02007437
7438 /* set RESET_ANTI_MUX to 0 */
7439 WREG32_P(CG_UPLL_FUNC_CNTL_5, 0, ~RESET_ANTI_MUX_MASK);
7440
7441 /* set VCO_MODE to 1 */
7442 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_VCO_MODE_MASK, ~UPLL_VCO_MODE_MASK);
7443
Christian Königa17d4992015-02-19 09:40:28 +01007444 /* disable sleep mode */
Christian König2539eb02013-04-08 12:41:34 +02007445 WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_SLEEP_MASK);
7446
7447 /* deassert UPLL_RESET */
7448 WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_RESET_MASK);
7449
7450 mdelay(1);
7451
Christian Königfacd1122013-04-29 11:55:02 +02007452 r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL);
Christian König2539eb02013-04-08 12:41:34 +02007453 if (r)
7454 return r;
7455
7456 /* assert UPLL_RESET again */
7457 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_RESET_MASK, ~UPLL_RESET_MASK);
7458
7459 /* disable spread spectrum. */
7460 WREG32_P(CG_UPLL_SPREAD_SPECTRUM, 0, ~SSEN_MASK);
7461
7462 /* set feedback divider */
Christian Königfacd1122013-04-29 11:55:02 +02007463 WREG32_P(CG_UPLL_FUNC_CNTL_3, UPLL_FB_DIV(fb_div), ~UPLL_FB_DIV_MASK);
Christian König2539eb02013-04-08 12:41:34 +02007464
7465 /* set ref divider to 0 */
7466 WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_REF_DIV_MASK);
7467
Christian Königfacd1122013-04-29 11:55:02 +02007468 if (fb_div < 307200)
Christian König2539eb02013-04-08 12:41:34 +02007469 WREG32_P(CG_UPLL_FUNC_CNTL_4, 0, ~UPLL_SPARE_ISPARE9);
7470 else
7471 WREG32_P(CG_UPLL_FUNC_CNTL_4, UPLL_SPARE_ISPARE9, ~UPLL_SPARE_ISPARE9);
7472
7473 /* set PDIV_A and PDIV_B */
7474 WREG32_P(CG_UPLL_FUNC_CNTL_2,
Christian Königfacd1122013-04-29 11:55:02 +02007475 UPLL_PDIV_A(vclk_div) | UPLL_PDIV_B(dclk_div),
Christian König2539eb02013-04-08 12:41:34 +02007476 ~(UPLL_PDIV_A_MASK | UPLL_PDIV_B_MASK));
7477
7478 /* give the PLL some time to settle */
7479 mdelay(15);
7480
7481 /* deassert PLL_RESET */
7482 WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_RESET_MASK);
7483
7484 mdelay(15);
7485
7486 /* switch from bypass mode to normal mode */
7487 WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_BYPASS_EN_MASK);
7488
Christian Königfacd1122013-04-29 11:55:02 +02007489 r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL);
Christian König2539eb02013-04-08 12:41:34 +02007490 if (r)
7491 return r;
7492
7493 /* switch VCLK and DCLK selection */
7494 WREG32_P(CG_UPLL_FUNC_CNTL_2,
7495 VCLK_SRC_SEL(2) | DCLK_SRC_SEL(2),
7496 ~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK));
7497
7498 mdelay(100);
7499
7500 return 0;
7501}
Alex Deucherb9d305d2013-02-14 17:16:51 -05007502
7503static void si_pcie_gen3_enable(struct radeon_device *rdev)
7504{
7505 struct pci_dev *root = rdev->pdev->bus->self;
7506 int bridge_pos, gpu_pos;
7507 u32 speed_cntl, mask, current_data_rate;
7508 int ret, i;
7509 u16 tmp16;
7510
Alex Williamson0bd252d2014-08-27 13:01:35 -06007511 if (pci_is_root_bus(rdev->pdev->bus))
7512 return;
7513
Alex Deucherb9d305d2013-02-14 17:16:51 -05007514 if (radeon_pcie_gen2 == 0)
7515 return;
7516
7517 if (rdev->flags & RADEON_IS_IGP)
7518 return;
7519
7520 if (!(rdev->flags & RADEON_IS_PCIE))
7521 return;
7522
7523 ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask);
7524 if (ret != 0)
7525 return;
7526
7527 if (!(mask & (DRM_PCIE_SPEED_50 | DRM_PCIE_SPEED_80)))
7528 return;
7529
7530 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
7531 current_data_rate = (speed_cntl & LC_CURRENT_DATA_RATE_MASK) >>
7532 LC_CURRENT_DATA_RATE_SHIFT;
7533 if (mask & DRM_PCIE_SPEED_80) {
7534 if (current_data_rate == 2) {
7535 DRM_INFO("PCIE gen 3 link speeds already enabled\n");
7536 return;
7537 }
7538 DRM_INFO("enabling PCIE gen 3 link speeds, disable with radeon.pcie_gen2=0\n");
7539 } else if (mask & DRM_PCIE_SPEED_50) {
7540 if (current_data_rate == 1) {
7541 DRM_INFO("PCIE gen 2 link speeds already enabled\n");
7542 return;
7543 }
7544 DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
7545 }
7546
7547 bridge_pos = pci_pcie_cap(root);
7548 if (!bridge_pos)
7549 return;
7550
7551 gpu_pos = pci_pcie_cap(rdev->pdev);
7552 if (!gpu_pos)
7553 return;
7554
7555 if (mask & DRM_PCIE_SPEED_80) {
7556 /* re-try equalization if gen3 is not already enabled */
7557 if (current_data_rate != 2) {
7558 u16 bridge_cfg, gpu_cfg;
7559 u16 bridge_cfg2, gpu_cfg2;
7560 u32 max_lw, current_lw, tmp;
7561
7562 pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
7563 pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
7564
7565 tmp16 = bridge_cfg | PCI_EXP_LNKCTL_HAWD;
7566 pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
7567
7568 tmp16 = gpu_cfg | PCI_EXP_LNKCTL_HAWD;
7569 pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
7570
7571 tmp = RREG32_PCIE(PCIE_LC_STATUS1);
7572 max_lw = (tmp & LC_DETECTED_LINK_WIDTH_MASK) >> LC_DETECTED_LINK_WIDTH_SHIFT;
7573 current_lw = (tmp & LC_OPERATING_LINK_WIDTH_MASK) >> LC_OPERATING_LINK_WIDTH_SHIFT;
7574
7575 if (current_lw < max_lw) {
7576 tmp = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
7577 if (tmp & LC_RENEGOTIATION_SUPPORT) {
7578 tmp &= ~(LC_LINK_WIDTH_MASK | LC_UPCONFIGURE_DIS);
7579 tmp |= (max_lw << LC_LINK_WIDTH_SHIFT);
7580 tmp |= LC_UPCONFIGURE_SUPPORT | LC_RENEGOTIATE_EN | LC_RECONFIG_NOW;
7581 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, tmp);
7582 }
7583 }
7584
7585 for (i = 0; i < 10; i++) {
7586 /* check status */
7587 pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_DEVSTA, &tmp16);
7588 if (tmp16 & PCI_EXP_DEVSTA_TRPND)
7589 break;
7590
7591 pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
7592 pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
7593
7594 pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &bridge_cfg2);
7595 pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &gpu_cfg2);
7596
7597 tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
7598 tmp |= LC_SET_QUIESCE;
7599 WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
7600
7601 tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
7602 tmp |= LC_REDO_EQ;
7603 WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
7604
7605 mdelay(100);
7606
7607 /* linkctl */
7608 pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &tmp16);
7609 tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
7610 tmp16 |= (bridge_cfg & PCI_EXP_LNKCTL_HAWD);
7611 pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
7612
7613 pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &tmp16);
7614 tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
7615 tmp16 |= (gpu_cfg & PCI_EXP_LNKCTL_HAWD);
7616 pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
7617
7618 /* linkctl2 */
7619 pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &tmp16);
7620 tmp16 &= ~((1 << 4) | (7 << 9));
7621 tmp16 |= (bridge_cfg2 & ((1 << 4) | (7 << 9)));
7622 pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, tmp16);
7623
7624 pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
7625 tmp16 &= ~((1 << 4) | (7 << 9));
7626 tmp16 |= (gpu_cfg2 & ((1 << 4) | (7 << 9)));
7627 pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
7628
7629 tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
7630 tmp &= ~LC_SET_QUIESCE;
7631 WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
7632 }
7633 }
7634 }
7635
7636 /* set the link speed */
7637 speed_cntl |= LC_FORCE_EN_SW_SPEED_CHANGE | LC_FORCE_DIS_HW_SPEED_CHANGE;
7638 speed_cntl &= ~LC_FORCE_DIS_SW_SPEED_CHANGE;
7639 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
7640
7641 pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
7642 tmp16 &= ~0xf;
7643 if (mask & DRM_PCIE_SPEED_80)
7644 tmp16 |= 3; /* gen3 */
7645 else if (mask & DRM_PCIE_SPEED_50)
7646 tmp16 |= 2; /* gen2 */
7647 else
7648 tmp16 |= 1; /* gen1 */
7649 pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
7650
7651 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
7652 speed_cntl |= LC_INITIATE_LINK_SPEED_CHANGE;
7653 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
7654
7655 for (i = 0; i < rdev->usec_timeout; i++) {
7656 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
7657 if ((speed_cntl & LC_INITIATE_LINK_SPEED_CHANGE) == 0)
7658 break;
7659 udelay(1);
7660 }
7661}
7662
Alex Deuchere0bcf162013-02-15 11:56:59 -05007663static void si_program_aspm(struct radeon_device *rdev)
7664{
7665 u32 data, orig;
7666 bool disable_l0s = false, disable_l1 = false, disable_plloff_in_l1 = false;
7667 bool disable_clkreq = false;
7668
Alex Deucher1294d4a2013-07-16 15:58:50 -04007669 if (radeon_aspm == 0)
7670 return;
7671
Alex Deuchere0bcf162013-02-15 11:56:59 -05007672 if (!(rdev->flags & RADEON_IS_PCIE))
7673 return;
7674
7675 orig = data = RREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL);
7676 data &= ~LC_XMIT_N_FTS_MASK;
7677 data |= LC_XMIT_N_FTS(0x24) | LC_XMIT_N_FTS_OVERRIDE_EN;
7678 if (orig != data)
7679 WREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL, data);
7680
7681 orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL3);
7682 data |= LC_GO_TO_RECOVERY;
7683 if (orig != data)
7684 WREG32_PCIE_PORT(PCIE_LC_CNTL3, data);
7685
7686 orig = data = RREG32_PCIE(PCIE_P_CNTL);
7687 data |= P_IGNORE_EDB_ERR;
7688 if (orig != data)
7689 WREG32_PCIE(PCIE_P_CNTL, data);
7690
7691 orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL);
7692 data &= ~(LC_L0S_INACTIVITY_MASK | LC_L1_INACTIVITY_MASK);
7693 data |= LC_PMI_TO_L1_DIS;
7694 if (!disable_l0s)
7695 data |= LC_L0S_INACTIVITY(7);
7696
7697 if (!disable_l1) {
7698 data |= LC_L1_INACTIVITY(7);
7699 data &= ~LC_PMI_TO_L1_DIS;
7700 if (orig != data)
7701 WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
7702
7703 if (!disable_plloff_in_l1) {
7704 bool clk_req_support;
7705
7706 orig = data = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_0);
7707 data &= ~(PLL_POWER_STATE_IN_OFF_0_MASK | PLL_POWER_STATE_IN_TXS2_0_MASK);
7708 data |= PLL_POWER_STATE_IN_OFF_0(7) | PLL_POWER_STATE_IN_TXS2_0(7);
7709 if (orig != data)
7710 WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_0, data);
7711
7712 orig = data = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_1);
7713 data &= ~(PLL_POWER_STATE_IN_OFF_1_MASK | PLL_POWER_STATE_IN_TXS2_1_MASK);
7714 data |= PLL_POWER_STATE_IN_OFF_1(7) | PLL_POWER_STATE_IN_TXS2_1(7);
7715 if (orig != data)
7716 WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_1, data);
7717
7718 orig = data = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_0);
7719 data &= ~(PLL_POWER_STATE_IN_OFF_0_MASK | PLL_POWER_STATE_IN_TXS2_0_MASK);
7720 data |= PLL_POWER_STATE_IN_OFF_0(7) | PLL_POWER_STATE_IN_TXS2_0(7);
7721 if (orig != data)
7722 WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_0, data);
7723
7724 orig = data = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_1);
7725 data &= ~(PLL_POWER_STATE_IN_OFF_1_MASK | PLL_POWER_STATE_IN_TXS2_1_MASK);
7726 data |= PLL_POWER_STATE_IN_OFF_1(7) | PLL_POWER_STATE_IN_TXS2_1(7);
7727 if (orig != data)
7728 WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_1, data);
7729
7730 if ((rdev->family != CHIP_OLAND) && (rdev->family != CHIP_HAINAN)) {
7731 orig = data = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_0);
7732 data &= ~PLL_RAMP_UP_TIME_0_MASK;
7733 if (orig != data)
7734 WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_0, data);
7735
7736 orig = data = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_1);
7737 data &= ~PLL_RAMP_UP_TIME_1_MASK;
7738 if (orig != data)
7739 WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_1, data);
7740
7741 orig = data = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_2);
7742 data &= ~PLL_RAMP_UP_TIME_2_MASK;
7743 if (orig != data)
7744 WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_2, data);
7745
7746 orig = data = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_3);
7747 data &= ~PLL_RAMP_UP_TIME_3_MASK;
7748 if (orig != data)
7749 WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_3, data);
7750
7751 orig = data = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_0);
7752 data &= ~PLL_RAMP_UP_TIME_0_MASK;
7753 if (orig != data)
7754 WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_0, data);
7755
7756 orig = data = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_1);
7757 data &= ~PLL_RAMP_UP_TIME_1_MASK;
7758 if (orig != data)
7759 WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_1, data);
7760
7761 orig = data = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_2);
7762 data &= ~PLL_RAMP_UP_TIME_2_MASK;
7763 if (orig != data)
7764 WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_2, data);
7765
7766 orig = data = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_3);
7767 data &= ~PLL_RAMP_UP_TIME_3_MASK;
7768 if (orig != data)
7769 WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_3, data);
7770 }
7771 orig = data = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
7772 data &= ~LC_DYN_LANES_PWR_STATE_MASK;
7773 data |= LC_DYN_LANES_PWR_STATE(3);
7774 if (orig != data)
7775 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, data);
7776
7777 orig = data = RREG32_PIF_PHY0(PB0_PIF_CNTL);
7778 data &= ~LS2_EXIT_TIME_MASK;
7779 if ((rdev->family == CHIP_OLAND) || (rdev->family == CHIP_HAINAN))
7780 data |= LS2_EXIT_TIME(5);
7781 if (orig != data)
7782 WREG32_PIF_PHY0(PB0_PIF_CNTL, data);
7783
7784 orig = data = RREG32_PIF_PHY1(PB1_PIF_CNTL);
7785 data &= ~LS2_EXIT_TIME_MASK;
7786 if ((rdev->family == CHIP_OLAND) || (rdev->family == CHIP_HAINAN))
7787 data |= LS2_EXIT_TIME(5);
7788 if (orig != data)
7789 WREG32_PIF_PHY1(PB1_PIF_CNTL, data);
7790
Alex Williamson0bd252d2014-08-27 13:01:35 -06007791 if (!disable_clkreq &&
7792 !pci_is_root_bus(rdev->pdev->bus)) {
Alex Deuchere0bcf162013-02-15 11:56:59 -05007793 struct pci_dev *root = rdev->pdev->bus->self;
7794 u32 lnkcap;
7795
7796 clk_req_support = false;
7797 pcie_capability_read_dword(root, PCI_EXP_LNKCAP, &lnkcap);
7798 if (lnkcap & PCI_EXP_LNKCAP_CLKPM)
7799 clk_req_support = true;
7800 } else {
7801 clk_req_support = false;
7802 }
7803
7804 if (clk_req_support) {
7805 orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL2);
7806 data |= LC_ALLOW_PDWN_IN_L1 | LC_ALLOW_PDWN_IN_L23;
7807 if (orig != data)
7808 WREG32_PCIE_PORT(PCIE_LC_CNTL2, data);
7809
7810 orig = data = RREG32(THM_CLK_CNTL);
7811 data &= ~(CMON_CLK_SEL_MASK | TMON_CLK_SEL_MASK);
7812 data |= CMON_CLK_SEL(1) | TMON_CLK_SEL(1);
7813 if (orig != data)
7814 WREG32(THM_CLK_CNTL, data);
7815
7816 orig = data = RREG32(MISC_CLK_CNTL);
7817 data &= ~(DEEP_SLEEP_CLK_SEL_MASK | ZCLK_SEL_MASK);
7818 data |= DEEP_SLEEP_CLK_SEL(1) | ZCLK_SEL(1);
7819 if (orig != data)
7820 WREG32(MISC_CLK_CNTL, data);
7821
7822 orig = data = RREG32(CG_CLKPIN_CNTL);
7823 data &= ~BCLK_AS_XCLK;
7824 if (orig != data)
7825 WREG32(CG_CLKPIN_CNTL, data);
7826
7827 orig = data = RREG32(CG_CLKPIN_CNTL_2);
7828 data &= ~FORCE_BIF_REFCLK_EN;
7829 if (orig != data)
7830 WREG32(CG_CLKPIN_CNTL_2, data);
7831
7832 orig = data = RREG32(MPLL_BYPASSCLK_SEL);
7833 data &= ~MPLL_CLKOUT_SEL_MASK;
7834 data |= MPLL_CLKOUT_SEL(4);
7835 if (orig != data)
7836 WREG32(MPLL_BYPASSCLK_SEL, data);
7837
7838 orig = data = RREG32(SPLL_CNTL_MODE);
7839 data &= ~SPLL_REFCLK_SEL_MASK;
7840 if (orig != data)
7841 WREG32(SPLL_CNTL_MODE, data);
7842 }
7843 }
7844 } else {
7845 if (orig != data)
7846 WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
7847 }
7848
7849 orig = data = RREG32_PCIE(PCIE_CNTL2);
7850 data |= SLV_MEM_LS_EN | MST_MEM_LS_EN | REPLAY_MEM_LS_EN;
7851 if (orig != data)
7852 WREG32_PCIE(PCIE_CNTL2, data);
7853
7854 if (!disable_l0s) {
7855 data = RREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL);
7856 if((data & LC_N_FTS_MASK) == LC_N_FTS_MASK) {
7857 data = RREG32_PCIE(PCIE_LC_STATUS1);
7858 if ((data & LC_REVERSE_XMIT) && (data & LC_REVERSE_RCVR)) {
7859 orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL);
7860 data &= ~LC_L0S_INACTIVITY_MASK;
7861 if (orig != data)
7862 WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
7863 }
7864 }
7865 }
7866}
Christian Königb7af6302015-05-11 22:01:49 +02007867
7868int si_vce_send_vcepll_ctlreq(struct radeon_device *rdev)
7869{
Jérome Glisse3cf8bb12016-03-16 12:56:45 +01007870 unsigned i;
Christian Königb7af6302015-05-11 22:01:49 +02007871
Jérome Glisse3cf8bb12016-03-16 12:56:45 +01007872 /* make sure VCEPLL_CTLREQ is deasserted */
7873 WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~UPLL_CTLREQ_MASK);
Christian Königb7af6302015-05-11 22:01:49 +02007874
Jérome Glisse3cf8bb12016-03-16 12:56:45 +01007875 mdelay(10);
Christian Königb7af6302015-05-11 22:01:49 +02007876
Jérome Glisse3cf8bb12016-03-16 12:56:45 +01007877 /* assert UPLL_CTLREQ */
7878 WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, UPLL_CTLREQ_MASK, ~UPLL_CTLREQ_MASK);
Christian Königb7af6302015-05-11 22:01:49 +02007879
Jérome Glisse3cf8bb12016-03-16 12:56:45 +01007880 /* wait for CTLACK and CTLACK2 to get asserted */
7881 for (i = 0; i < 100; ++i) {
7882 uint32_t mask = UPLL_CTLACK_MASK | UPLL_CTLACK2_MASK;
7883 if ((RREG32_SMC(CG_VCEPLL_FUNC_CNTL) & mask) == mask)
7884 break;
7885 mdelay(10);
7886 }
Christian Königb7af6302015-05-11 22:01:49 +02007887
Jérome Glisse3cf8bb12016-03-16 12:56:45 +01007888 /* deassert UPLL_CTLREQ */
7889 WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~UPLL_CTLREQ_MASK);
Christian Königb7af6302015-05-11 22:01:49 +02007890
Jérome Glisse3cf8bb12016-03-16 12:56:45 +01007891 if (i == 100) {
7892 DRM_ERROR("Timeout setting UVD clocks!\n");
7893 return -ETIMEDOUT;
7894 }
Christian Königb7af6302015-05-11 22:01:49 +02007895
Jérome Glisse3cf8bb12016-03-16 12:56:45 +01007896 return 0;
Christian Königb7af6302015-05-11 22:01:49 +02007897}
7898
7899int si_set_vce_clocks(struct radeon_device *rdev, u32 evclk, u32 ecclk)
7900{
7901 unsigned fb_div = 0, evclk_div = 0, ecclk_div = 0;
7902 int r;
7903
7904 /* bypass evclk and ecclk with bclk */
7905 WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL_2,
7906 EVCLK_SRC_SEL(1) | ECCLK_SRC_SEL(1),
7907 ~(EVCLK_SRC_SEL_MASK | ECCLK_SRC_SEL_MASK));
7908
7909 /* put PLL in bypass mode */
7910 WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, VCEPLL_BYPASS_EN_MASK,
7911 ~VCEPLL_BYPASS_EN_MASK);
7912
7913 if (!evclk || !ecclk) {
7914 /* keep the Bypass mode, put PLL to sleep */
7915 WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, VCEPLL_SLEEP_MASK,
7916 ~VCEPLL_SLEEP_MASK);
7917 return 0;
7918 }
7919
7920 r = radeon_uvd_calc_upll_dividers(rdev, evclk, ecclk, 125000, 250000,
7921 16384, 0x03FFFFFF, 0, 128, 5,
7922 &fb_div, &evclk_div, &ecclk_div);
7923 if (r)
7924 return r;
7925
7926 /* set RESET_ANTI_MUX to 0 */
7927 WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL_5, 0, ~RESET_ANTI_MUX_MASK);
7928
7929 /* set VCO_MODE to 1 */
7930 WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, VCEPLL_VCO_MODE_MASK,
7931 ~VCEPLL_VCO_MODE_MASK);
7932
7933 /* toggle VCEPLL_SLEEP to 1 then back to 0 */
7934 WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, VCEPLL_SLEEP_MASK,
7935 ~VCEPLL_SLEEP_MASK);
7936 WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~VCEPLL_SLEEP_MASK);
7937
7938 /* deassert VCEPLL_RESET */
7939 WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~VCEPLL_RESET_MASK);
7940
7941 mdelay(1);
7942
7943 r = si_vce_send_vcepll_ctlreq(rdev);
7944 if (r)
7945 return r;
7946
7947 /* assert VCEPLL_RESET again */
7948 WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, VCEPLL_RESET_MASK, ~VCEPLL_RESET_MASK);
7949
7950 /* disable spread spectrum. */
7951 WREG32_SMC_P(CG_VCEPLL_SPREAD_SPECTRUM, 0, ~SSEN_MASK);
7952
7953 /* set feedback divider */
7954 WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL_3, VCEPLL_FB_DIV(fb_div), ~VCEPLL_FB_DIV_MASK);
7955
7956 /* set ref divider to 0 */
7957 WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~VCEPLL_REF_DIV_MASK);
7958
7959 /* set PDIV_A and PDIV_B */
7960 WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL_2,
7961 VCEPLL_PDIV_A(evclk_div) | VCEPLL_PDIV_B(ecclk_div),
7962 ~(VCEPLL_PDIV_A_MASK | VCEPLL_PDIV_B_MASK));
7963
7964 /* give the PLL some time to settle */
7965 mdelay(15);
7966
7967 /* deassert PLL_RESET */
7968 WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~VCEPLL_RESET_MASK);
7969
7970 mdelay(15);
7971
7972 /* switch from bypass mode to normal mode */
7973 WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~VCEPLL_BYPASS_EN_MASK);
7974
7975 r = si_vce_send_vcepll_ctlreq(rdev);
7976 if (r)
7977 return r;
7978
7979 /* switch VCLK and DCLK selection */
7980 WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL_2,
7981 EVCLK_SRC_SEL(16) | ECCLK_SRC_SEL(16),
7982 ~(EVCLK_SRC_SEL_MASK | ECCLK_SRC_SEL_MASK));
7983
7984 mdelay(100);
7985
7986 return 0;
7987}