blob: 4691309940646723c1f8ff37a423866fe0b43b30 [file] [log] [blame]
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090028#include <linux/slab.h>
Jerome Glisse3ce0a232009-09-08 10:10:24 +100029#include <linux/seq_file.h>
30#include <linux/firmware.h>
31#include <linux/platform_device.h>
Jerome Glisse771fe6b2009-06-05 14:42:42 +020032#include "drmP.h"
Jerome Glisse3ce0a232009-09-08 10:10:24 +100033#include "radeon_drm.h"
Jerome Glisse771fe6b2009-06-05 14:42:42 +020034#include "radeon.h"
Daniel Vettere6990372010-03-11 21:19:17 +000035#include "radeon_asic.h"
Jerome Glisse3ce0a232009-09-08 10:10:24 +100036#include "radeon_mode.h"
Jerome Glisse3ce0a232009-09-08 10:10:24 +100037#include "r600d.h"
Jerome Glisse3ce0a232009-09-08 10:10:24 +100038#include "atom.h"
Jerome Glissed39c3b82009-09-28 18:34:43 +020039#include "avivod.h"
Jerome Glisse771fe6b2009-06-05 14:42:42 +020040
Jerome Glisse3ce0a232009-09-08 10:10:24 +100041#define PFP_UCODE_SIZE 576
42#define PM4_UCODE_SIZE 1792
Alex Deucherd8f60cf2009-12-01 13:43:46 -050043#define RLC_UCODE_SIZE 768
Jerome Glisse3ce0a232009-09-08 10:10:24 +100044#define R700_PFP_UCODE_SIZE 848
45#define R700_PM4_UCODE_SIZE 1360
Alex Deucherd8f60cf2009-12-01 13:43:46 -050046#define R700_RLC_UCODE_SIZE 1024
Alex Deucherfe251e22010-03-24 13:36:43 -040047#define EVERGREEN_PFP_UCODE_SIZE 1120
48#define EVERGREEN_PM4_UCODE_SIZE 1376
Alex Deucher45f9a392010-03-24 13:55:51 -040049#define EVERGREEN_RLC_UCODE_SIZE 768
Jerome Glisse3ce0a232009-09-08 10:10:24 +100050
51/* Firmware Names */
52MODULE_FIRMWARE("radeon/R600_pfp.bin");
53MODULE_FIRMWARE("radeon/R600_me.bin");
54MODULE_FIRMWARE("radeon/RV610_pfp.bin");
55MODULE_FIRMWARE("radeon/RV610_me.bin");
56MODULE_FIRMWARE("radeon/RV630_pfp.bin");
57MODULE_FIRMWARE("radeon/RV630_me.bin");
58MODULE_FIRMWARE("radeon/RV620_pfp.bin");
59MODULE_FIRMWARE("radeon/RV620_me.bin");
60MODULE_FIRMWARE("radeon/RV635_pfp.bin");
61MODULE_FIRMWARE("radeon/RV635_me.bin");
62MODULE_FIRMWARE("radeon/RV670_pfp.bin");
63MODULE_FIRMWARE("radeon/RV670_me.bin");
64MODULE_FIRMWARE("radeon/RS780_pfp.bin");
65MODULE_FIRMWARE("radeon/RS780_me.bin");
66MODULE_FIRMWARE("radeon/RV770_pfp.bin");
67MODULE_FIRMWARE("radeon/RV770_me.bin");
68MODULE_FIRMWARE("radeon/RV730_pfp.bin");
69MODULE_FIRMWARE("radeon/RV730_me.bin");
70MODULE_FIRMWARE("radeon/RV710_pfp.bin");
71MODULE_FIRMWARE("radeon/RV710_me.bin");
Alex Deucherd8f60cf2009-12-01 13:43:46 -050072MODULE_FIRMWARE("radeon/R600_rlc.bin");
73MODULE_FIRMWARE("radeon/R700_rlc.bin");
Alex Deucherfe251e22010-03-24 13:36:43 -040074MODULE_FIRMWARE("radeon/CEDAR_pfp.bin");
75MODULE_FIRMWARE("radeon/CEDAR_me.bin");
Alex Deucher45f9a392010-03-24 13:55:51 -040076MODULE_FIRMWARE("radeon/CEDAR_rlc.bin");
Alex Deucherfe251e22010-03-24 13:36:43 -040077MODULE_FIRMWARE("radeon/REDWOOD_pfp.bin");
78MODULE_FIRMWARE("radeon/REDWOOD_me.bin");
Alex Deucher45f9a392010-03-24 13:55:51 -040079MODULE_FIRMWARE("radeon/REDWOOD_rlc.bin");
Alex Deucherfe251e22010-03-24 13:36:43 -040080MODULE_FIRMWARE("radeon/JUNIPER_pfp.bin");
81MODULE_FIRMWARE("radeon/JUNIPER_me.bin");
Alex Deucher45f9a392010-03-24 13:55:51 -040082MODULE_FIRMWARE("radeon/JUNIPER_rlc.bin");
Dave Airliea7433742010-04-09 15:31:09 +100083MODULE_FIRMWARE("radeon/CYPRESS_pfp.bin");
Alex Deucherfe251e22010-03-24 13:36:43 -040084MODULE_FIRMWARE("radeon/CYPRESS_me.bin");
Alex Deucher45f9a392010-03-24 13:55:51 -040085MODULE_FIRMWARE("radeon/CYPRESS_rlc.bin");
Jerome Glisse3ce0a232009-09-08 10:10:24 +100086
87int r600_debugfs_mc_info_init(struct radeon_device *rdev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +020088
Jerome Glisse1a029b72009-10-06 19:04:30 +020089/* r600,rv610,rv630,rv620,rv635,rv670 */
Jerome Glisse771fe6b2009-06-05 14:42:42 +020090int r600_mc_wait_for_idle(struct radeon_device *rdev);
91void r600_gpu_init(struct radeon_device *rdev);
Jerome Glisse3ce0a232009-09-08 10:10:24 +100092void r600_fini(struct radeon_device *rdev);
Alex Deucher45f9a392010-03-24 13:55:51 -040093void r600_irq_disable(struct radeon_device *rdev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +020094
Alex Deuchera48b9b42010-04-22 14:03:55 -040095void r600_get_power_state(struct radeon_device *rdev,
96 enum radeon_pm_action action)
97{
98 int i;
99
100 rdev->pm.can_upclock = true;
101 rdev->pm.can_downclock = true;
102
103 /* power state array is low to high, default is first */
104 if ((rdev->flags & RADEON_IS_IGP) || (rdev->family == CHIP_R600)) {
105 int min_power_state_index = 0;
106
107 if (rdev->pm.num_power_states > 2)
108 min_power_state_index = 1;
109
110 switch (action) {
111 case PM_ACTION_MINIMUM:
112 rdev->pm.requested_power_state_index = min_power_state_index;
113 rdev->pm.requested_clock_mode_index = 0;
114 rdev->pm.can_downclock = false;
115 break;
116 case PM_ACTION_DOWNCLOCK:
117 if (rdev->pm.current_power_state_index == min_power_state_index) {
118 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
119 rdev->pm.can_downclock = false;
120 } else {
121 if (rdev->pm.active_crtc_count > 1) {
122 for (i = 0; i < rdev->pm.num_power_states; i++) {
123 if (rdev->pm.power_state[i].flags & RADEON_PM_SINGLE_DISPLAY_ONLY)
124 continue;
125 else if (i >= rdev->pm.current_power_state_index) {
126 rdev->pm.requested_power_state_index =
127 rdev->pm.current_power_state_index;
128 break;
129 } else {
130 rdev->pm.requested_power_state_index = i;
131 break;
132 }
133 }
134 } else
135 rdev->pm.requested_power_state_index =
136 rdev->pm.current_power_state_index - 1;
137 }
138 rdev->pm.requested_clock_mode_index = 0;
139 break;
140 case PM_ACTION_UPCLOCK:
141 if (rdev->pm.current_power_state_index == (rdev->pm.num_power_states - 1)) {
142 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
143 rdev->pm.can_upclock = false;
144 } else {
145 if (rdev->pm.active_crtc_count > 1) {
146 for (i = (rdev->pm.num_power_states - 1); i >= 0; i--) {
147 if (rdev->pm.power_state[i].flags & RADEON_PM_SINGLE_DISPLAY_ONLY)
148 continue;
149 else if (i <= rdev->pm.current_power_state_index) {
150 rdev->pm.requested_power_state_index =
151 rdev->pm.current_power_state_index;
152 break;
153 } else {
154 rdev->pm.requested_power_state_index = i;
155 break;
156 }
157 }
158 } else
159 rdev->pm.requested_power_state_index =
160 rdev->pm.current_power_state_index + 1;
161 }
162 rdev->pm.requested_clock_mode_index = 0;
163 break;
Alex Deucher58e21df2010-03-22 13:31:08 -0400164 case PM_ACTION_DEFAULT:
165 rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
166 rdev->pm.requested_clock_mode_index = 0;
167 rdev->pm.can_upclock = false;
168 break;
Alex Deuchera48b9b42010-04-22 14:03:55 -0400169 case PM_ACTION_NONE:
170 default:
171 DRM_ERROR("Requested mode for not defined action\n");
172 return;
173 }
174 } else {
175 /* XXX select a power state based on AC/DC, single/dualhead, etc. */
176 /* for now just select the first power state and switch between clock modes */
177 /* power state array is low to high, default is first (0) */
178 if (rdev->pm.active_crtc_count > 1) {
179 rdev->pm.requested_power_state_index = -1;
180 /* start at 1 as we don't want the default mode */
181 for (i = 1; i < rdev->pm.num_power_states; i++) {
182 if (rdev->pm.power_state[i].flags & RADEON_PM_SINGLE_DISPLAY_ONLY)
183 continue;
184 else if ((rdev->pm.power_state[i].type == POWER_STATE_TYPE_PERFORMANCE) ||
185 (rdev->pm.power_state[i].type == POWER_STATE_TYPE_BATTERY)) {
186 rdev->pm.requested_power_state_index = i;
187 break;
188 }
189 }
190 /* if nothing selected, grab the default state. */
191 if (rdev->pm.requested_power_state_index == -1)
192 rdev->pm.requested_power_state_index = 0;
193 } else
194 rdev->pm.requested_power_state_index = 1;
195
196 switch (action) {
197 case PM_ACTION_MINIMUM:
198 rdev->pm.requested_clock_mode_index = 0;
199 rdev->pm.can_downclock = false;
200 break;
201 case PM_ACTION_DOWNCLOCK:
202 if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) {
203 if (rdev->pm.current_clock_mode_index == 0) {
204 rdev->pm.requested_clock_mode_index = 0;
205 rdev->pm.can_downclock = false;
206 } else
207 rdev->pm.requested_clock_mode_index =
208 rdev->pm.current_clock_mode_index - 1;
209 } else {
210 rdev->pm.requested_clock_mode_index = 0;
211 rdev->pm.can_downclock = false;
212 }
213 break;
214 case PM_ACTION_UPCLOCK:
215 if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) {
216 if (rdev->pm.current_clock_mode_index ==
217 (rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1)) {
218 rdev->pm.requested_clock_mode_index = rdev->pm.current_clock_mode_index;
219 rdev->pm.can_upclock = false;
220 } else
221 rdev->pm.requested_clock_mode_index =
222 rdev->pm.current_clock_mode_index + 1;
223 } else {
224 rdev->pm.requested_clock_mode_index =
225 rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1;
226 rdev->pm.can_upclock = false;
227 }
228 break;
Alex Deucher58e21df2010-03-22 13:31:08 -0400229 case PM_ACTION_DEFAULT:
230 rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
231 rdev->pm.requested_clock_mode_index = 0;
232 rdev->pm.can_upclock = false;
233 break;
Alex Deuchera48b9b42010-04-22 14:03:55 -0400234 case PM_ACTION_NONE:
235 default:
236 DRM_ERROR("Requested mode for not defined action\n");
237 return;
238 }
239 }
240
241 DRM_INFO("Requested: e: %d m: %d p: %d\n",
242 rdev->pm.power_state[rdev->pm.requested_power_state_index].
243 clock_info[rdev->pm.requested_clock_mode_index].sclk,
244 rdev->pm.power_state[rdev->pm.requested_power_state_index].
245 clock_info[rdev->pm.requested_clock_mode_index].mclk,
246 rdev->pm.power_state[rdev->pm.requested_power_state_index].
Alex Deucher79daedc2010-04-22 14:25:19 -0400247 pcie_lanes);
Alex Deuchera48b9b42010-04-22 14:03:55 -0400248}
249
Alex Deuchera4248162010-04-24 14:50:23 -0400250void r600_set_power_state(struct radeon_device *rdev, bool static_switch)
Alex Deucherbae6b5622010-04-22 13:38:05 -0400251{
Alex Deuchera48b9b42010-04-22 14:03:55 -0400252 u32 sclk, mclk;
253
254 if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) &&
255 (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index))
Alex Deucherbae6b5622010-04-22 13:38:05 -0400256 return;
257
Alex Deuchera48b9b42010-04-22 14:03:55 -0400258 if (radeon_gui_idle(rdev)) {
Alex Deucherbae6b5622010-04-22 13:38:05 -0400259
Alex Deuchera48b9b42010-04-22 14:03:55 -0400260 sclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
261 clock_info[rdev->pm.requested_clock_mode_index].sclk;
262 if (sclk > rdev->clock.default_sclk)
263 sclk = rdev->clock.default_sclk;
Alex Deucherbae6b5622010-04-22 13:38:05 -0400264
Alex Deuchera48b9b42010-04-22 14:03:55 -0400265 mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
266 clock_info[rdev->pm.requested_clock_mode_index].mclk;
267 if (mclk > rdev->clock.default_mclk)
268 mclk = rdev->clock.default_mclk;
Alex Deucherbae6b5622010-04-22 13:38:05 -0400269
Alex Deuchera4248162010-04-24 14:50:23 -0400270 /* voltage, pcie lanes, etc.*/
271 radeon_pm_misc(rdev);
Alex Deuchera48b9b42010-04-22 14:03:55 -0400272
Alex Deuchera4248162010-04-24 14:50:23 -0400273 if (static_switch) {
274 radeon_pm_prepare(rdev);
275 /* set engine clock */
276 if (sclk != rdev->pm.current_sclk) {
277 radeon_set_engine_clock(rdev, sclk);
278 rdev->pm.current_sclk = sclk;
279 DRM_INFO("Setting: e: %d\n", sclk);
280 }
281#if 0
282 /* set memory clock */
283 if (rdev->asic->set_memory_clock && (mclk != rdev->pm.current_mclk)) {
284 radeon_set_memory_clock(rdev, mclk);
285 rdev->pm.current_mclk = mclk;
286 DRM_INFO("Setting: m: %d\n", mclk);
287 }
288#endif
289 radeon_pm_finish(rdev);
290 } else {
291 /* set engine clock */
292 if (sclk != rdev->pm.current_sclk) {
293 radeon_sync_with_vblank(rdev);
294 radeon_pm_debug_check_in_vbl(rdev, false);
295 radeon_set_engine_clock(rdev, sclk);
296 radeon_pm_debug_check_in_vbl(rdev, true);
297 rdev->pm.current_sclk = sclk;
298 DRM_INFO("Setting: e: %d\n", sclk);
299 }
Alex Deucherbae6b5622010-04-22 13:38:05 -0400300
301#if 0
Alex Deuchera4248162010-04-24 14:50:23 -0400302 /* set memory clock */
303 if (rdev->asic->set_memory_clock && (mclk != rdev->pm.current_mclk)) {
304 radeon_sync_with_vblank(rdev);
305 radeon_pm_debug_check_in_vbl(rdev, false);
306 radeon_pm_prepare(rdev);
307 radeon_set_memory_clock(rdev, mclk);
308 radeon_pm_finish(rdev);
309 radeon_pm_debug_check_in_vbl(rdev, true);
310 rdev->pm.current_mclk = mclk;
311 DRM_INFO("Setting: m: %d\n", mclk);
312 }
Alex Deucherbae6b5622010-04-22 13:38:05 -0400313#endif
Alex Deuchera4248162010-04-24 14:50:23 -0400314 }
Alex Deucherbae6b5622010-04-22 13:38:05 -0400315
Alex Deuchera48b9b42010-04-22 14:03:55 -0400316 rdev->pm.current_power_state_index = rdev->pm.requested_power_state_index;
317 rdev->pm.current_clock_mode_index = rdev->pm.requested_clock_mode_index;
318 } else
319 DRM_INFO("GUI not idle!!!\n");
Alex Deucherbae6b5622010-04-22 13:38:05 -0400320}
321
Alex Deucher49e02b72010-04-23 17:57:27 -0400322void r600_pm_misc(struct radeon_device *rdev)
323{
324
325}
326
Alex Deucherdef9ba92010-04-22 12:39:58 -0400327bool r600_gui_idle(struct radeon_device *rdev)
328{
329 if (RREG32(GRBM_STATUS) & GUI_ACTIVE)
330 return false;
331 else
332 return true;
333}
334
Alex Deuchere0df1ac2009-12-04 15:12:21 -0500335/* hpd for digital panel detect/disconnect */
336bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
337{
338 bool connected = false;
339
340 if (ASIC_IS_DCE3(rdev)) {
341 switch (hpd) {
342 case RADEON_HPD_1:
343 if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE)
344 connected = true;
345 break;
346 case RADEON_HPD_2:
347 if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE)
348 connected = true;
349 break;
350 case RADEON_HPD_3:
351 if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE)
352 connected = true;
353 break;
354 case RADEON_HPD_4:
355 if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE)
356 connected = true;
357 break;
358 /* DCE 3.2 */
359 case RADEON_HPD_5:
360 if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE)
361 connected = true;
362 break;
363 case RADEON_HPD_6:
364 if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)
365 connected = true;
366 break;
367 default:
368 break;
369 }
370 } else {
371 switch (hpd) {
372 case RADEON_HPD_1:
373 if (RREG32(DC_HOT_PLUG_DETECT1_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
374 connected = true;
375 break;
376 case RADEON_HPD_2:
377 if (RREG32(DC_HOT_PLUG_DETECT2_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
378 connected = true;
379 break;
380 case RADEON_HPD_3:
381 if (RREG32(DC_HOT_PLUG_DETECT3_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
382 connected = true;
383 break;
384 default:
385 break;
386 }
387 }
388 return connected;
389}
390
391void r600_hpd_set_polarity(struct radeon_device *rdev,
Alex Deucher429770b2009-12-04 15:26:55 -0500392 enum radeon_hpd_id hpd)
Alex Deuchere0df1ac2009-12-04 15:12:21 -0500393{
394 u32 tmp;
395 bool connected = r600_hpd_sense(rdev, hpd);
396
397 if (ASIC_IS_DCE3(rdev)) {
398 switch (hpd) {
399 case RADEON_HPD_1:
400 tmp = RREG32(DC_HPD1_INT_CONTROL);
401 if (connected)
402 tmp &= ~DC_HPDx_INT_POLARITY;
403 else
404 tmp |= DC_HPDx_INT_POLARITY;
405 WREG32(DC_HPD1_INT_CONTROL, tmp);
406 break;
407 case RADEON_HPD_2:
408 tmp = RREG32(DC_HPD2_INT_CONTROL);
409 if (connected)
410 tmp &= ~DC_HPDx_INT_POLARITY;
411 else
412 tmp |= DC_HPDx_INT_POLARITY;
413 WREG32(DC_HPD2_INT_CONTROL, tmp);
414 break;
415 case RADEON_HPD_3:
416 tmp = RREG32(DC_HPD3_INT_CONTROL);
417 if (connected)
418 tmp &= ~DC_HPDx_INT_POLARITY;
419 else
420 tmp |= DC_HPDx_INT_POLARITY;
421 WREG32(DC_HPD3_INT_CONTROL, tmp);
422 break;
423 case RADEON_HPD_4:
424 tmp = RREG32(DC_HPD4_INT_CONTROL);
425 if (connected)
426 tmp &= ~DC_HPDx_INT_POLARITY;
427 else
428 tmp |= DC_HPDx_INT_POLARITY;
429 WREG32(DC_HPD4_INT_CONTROL, tmp);
430 break;
431 case RADEON_HPD_5:
432 tmp = RREG32(DC_HPD5_INT_CONTROL);
433 if (connected)
434 tmp &= ~DC_HPDx_INT_POLARITY;
435 else
436 tmp |= DC_HPDx_INT_POLARITY;
437 WREG32(DC_HPD5_INT_CONTROL, tmp);
438 break;
439 /* DCE 3.2 */
440 case RADEON_HPD_6:
441 tmp = RREG32(DC_HPD6_INT_CONTROL);
442 if (connected)
443 tmp &= ~DC_HPDx_INT_POLARITY;
444 else
445 tmp |= DC_HPDx_INT_POLARITY;
446 WREG32(DC_HPD6_INT_CONTROL, tmp);
447 break;
448 default:
449 break;
450 }
451 } else {
452 switch (hpd) {
453 case RADEON_HPD_1:
454 tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
455 if (connected)
456 tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
457 else
458 tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
459 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
460 break;
461 case RADEON_HPD_2:
462 tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
463 if (connected)
464 tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
465 else
466 tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
467 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
468 break;
469 case RADEON_HPD_3:
470 tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
471 if (connected)
472 tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
473 else
474 tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
475 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
476 break;
477 default:
478 break;
479 }
480 }
481}
482
483void r600_hpd_init(struct radeon_device *rdev)
484{
485 struct drm_device *dev = rdev->ddev;
486 struct drm_connector *connector;
487
488 if (ASIC_IS_DCE3(rdev)) {
489 u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) | DC_HPDx_RX_INT_TIMER(0xfa);
490 if (ASIC_IS_DCE32(rdev))
491 tmp |= DC_HPDx_EN;
492
493 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
494 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
495 switch (radeon_connector->hpd.hpd) {
496 case RADEON_HPD_1:
497 WREG32(DC_HPD1_CONTROL, tmp);
498 rdev->irq.hpd[0] = true;
499 break;
500 case RADEON_HPD_2:
501 WREG32(DC_HPD2_CONTROL, tmp);
502 rdev->irq.hpd[1] = true;
503 break;
504 case RADEON_HPD_3:
505 WREG32(DC_HPD3_CONTROL, tmp);
506 rdev->irq.hpd[2] = true;
507 break;
508 case RADEON_HPD_4:
509 WREG32(DC_HPD4_CONTROL, tmp);
510 rdev->irq.hpd[3] = true;
511 break;
512 /* DCE 3.2 */
513 case RADEON_HPD_5:
514 WREG32(DC_HPD5_CONTROL, tmp);
515 rdev->irq.hpd[4] = true;
516 break;
517 case RADEON_HPD_6:
518 WREG32(DC_HPD6_CONTROL, tmp);
519 rdev->irq.hpd[5] = true;
520 break;
521 default:
522 break;
523 }
524 }
525 } else {
526 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
527 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
528 switch (radeon_connector->hpd.hpd) {
529 case RADEON_HPD_1:
530 WREG32(DC_HOT_PLUG_DETECT1_CONTROL, DC_HOT_PLUG_DETECTx_EN);
531 rdev->irq.hpd[0] = true;
532 break;
533 case RADEON_HPD_2:
534 WREG32(DC_HOT_PLUG_DETECT2_CONTROL, DC_HOT_PLUG_DETECTx_EN);
535 rdev->irq.hpd[1] = true;
536 break;
537 case RADEON_HPD_3:
538 WREG32(DC_HOT_PLUG_DETECT3_CONTROL, DC_HOT_PLUG_DETECTx_EN);
539 rdev->irq.hpd[2] = true;
540 break;
541 default:
542 break;
543 }
544 }
545 }
Jerome Glisse003e69f2010-01-07 15:39:14 +0100546 if (rdev->irq.installed)
547 r600_irq_set(rdev);
Alex Deuchere0df1ac2009-12-04 15:12:21 -0500548}
549
550void r600_hpd_fini(struct radeon_device *rdev)
551{
552 struct drm_device *dev = rdev->ddev;
553 struct drm_connector *connector;
554
555 if (ASIC_IS_DCE3(rdev)) {
556 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
557 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
558 switch (radeon_connector->hpd.hpd) {
559 case RADEON_HPD_1:
560 WREG32(DC_HPD1_CONTROL, 0);
561 rdev->irq.hpd[0] = false;
562 break;
563 case RADEON_HPD_2:
564 WREG32(DC_HPD2_CONTROL, 0);
565 rdev->irq.hpd[1] = false;
566 break;
567 case RADEON_HPD_3:
568 WREG32(DC_HPD3_CONTROL, 0);
569 rdev->irq.hpd[2] = false;
570 break;
571 case RADEON_HPD_4:
572 WREG32(DC_HPD4_CONTROL, 0);
573 rdev->irq.hpd[3] = false;
574 break;
575 /* DCE 3.2 */
576 case RADEON_HPD_5:
577 WREG32(DC_HPD5_CONTROL, 0);
578 rdev->irq.hpd[4] = false;
579 break;
580 case RADEON_HPD_6:
581 WREG32(DC_HPD6_CONTROL, 0);
582 rdev->irq.hpd[5] = false;
583 break;
584 default:
585 break;
586 }
587 }
588 } else {
589 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
590 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
591 switch (radeon_connector->hpd.hpd) {
592 case RADEON_HPD_1:
593 WREG32(DC_HOT_PLUG_DETECT1_CONTROL, 0);
594 rdev->irq.hpd[0] = false;
595 break;
596 case RADEON_HPD_2:
597 WREG32(DC_HOT_PLUG_DETECT2_CONTROL, 0);
598 rdev->irq.hpd[1] = false;
599 break;
600 case RADEON_HPD_3:
601 WREG32(DC_HOT_PLUG_DETECT3_CONTROL, 0);
602 rdev->irq.hpd[2] = false;
603 break;
604 default:
605 break;
606 }
607 }
608 }
609}
610
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200611/*
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000612 * R600 PCIE GART
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200613 */
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000614void r600_pcie_gart_tlb_flush(struct radeon_device *rdev)
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200615{
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000616 unsigned i;
617 u32 tmp;
618
Dave Airlie2e98f102010-02-15 15:54:45 +1000619 /* flush hdp cache so updates hit vram */
620 WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
621
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000622 WREG32(VM_CONTEXT0_INVALIDATION_LOW_ADDR, rdev->mc.gtt_start >> 12);
623 WREG32(VM_CONTEXT0_INVALIDATION_HIGH_ADDR, (rdev->mc.gtt_end - 1) >> 12);
624 WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1));
625 for (i = 0; i < rdev->usec_timeout; i++) {
626 /* read MC_STATUS */
627 tmp = RREG32(VM_CONTEXT0_REQUEST_RESPONSE);
628 tmp = (tmp & RESPONSE_TYPE_MASK) >> RESPONSE_TYPE_SHIFT;
629 if (tmp == 2) {
630 printk(KERN_WARNING "[drm] r600 flush TLB failed\n");
631 return;
632 }
633 if (tmp) {
634 return;
635 }
636 udelay(1);
637 }
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200638}
639
Jerome Glisse4aac0472009-09-14 18:29:49 +0200640int r600_pcie_gart_init(struct radeon_device *rdev)
641{
642 int r;
643
644 if (rdev->gart.table.vram.robj) {
645 WARN(1, "R600 PCIE GART already initialized.\n");
646 return 0;
647 }
648 /* Initialize common gart structure */
649 r = radeon_gart_init(rdev);
650 if (r)
651 return r;
652 rdev->gart.table_size = rdev->gart.num_gpu_pages * 8;
653 return radeon_gart_table_vram_alloc(rdev);
654}
655
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000656int r600_pcie_gart_enable(struct radeon_device *rdev)
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200657{
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000658 u32 tmp;
659 int r, i;
660
Jerome Glisse4aac0472009-09-14 18:29:49 +0200661 if (rdev->gart.table.vram.robj == NULL) {
662 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
663 return -EINVAL;
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000664 }
Jerome Glisse4aac0472009-09-14 18:29:49 +0200665 r = radeon_gart_table_vram_pin(rdev);
666 if (r)
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000667 return r;
Dave Airlie82568562010-02-05 16:00:07 +1000668 radeon_gart_restore(rdev);
Dave Airliebc1a6312009-09-15 11:07:52 +1000669
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000670 /* Setup L2 cache */
671 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
672 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
673 EFFECTIVE_L2_QUEUE_SIZE(7));
674 WREG32(VM_L2_CNTL2, 0);
675 WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
676 /* Setup TLB control */
677 tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
678 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
679 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
680 ENABLE_WAIT_L2_QUERY;
681 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
682 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
683 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
684 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
685 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
686 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
687 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
688 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
689 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
690 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
691 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
692 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
693 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
694 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
695 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
Jerome Glisse1a029b72009-10-06 19:04:30 +0200696 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000697 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
698 WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
699 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
700 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
701 (u32)(rdev->dummy_page.addr >> 12));
702 for (i = 1; i < 7; i++)
703 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
704
705 r600_pcie_gart_tlb_flush(rdev);
706 rdev->gart.ready = true;
707 return 0;
708}
709
710void r600_pcie_gart_disable(struct radeon_device *rdev)
711{
712 u32 tmp;
Jerome Glisse4c788672009-11-20 14:29:23 +0100713 int i, r;
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000714
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000715 /* Disable all tables */
716 for (i = 0; i < 7; i++)
717 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
718
719 /* Disable L2 cache */
720 WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING |
721 EFFECTIVE_L2_QUEUE_SIZE(7));
722 WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
723 /* Setup L1 TLB control */
724 tmp = EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
725 ENABLE_WAIT_L2_QUERY;
726 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
727 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
728 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
729 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
730 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
731 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
732 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
733 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
734 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp);
735 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp);
736 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
737 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
738 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp);
739 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
Jerome Glisse4aac0472009-09-14 18:29:49 +0200740 if (rdev->gart.table.vram.robj) {
Jerome Glisse4c788672009-11-20 14:29:23 +0100741 r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
742 if (likely(r == 0)) {
743 radeon_bo_kunmap(rdev->gart.table.vram.robj);
744 radeon_bo_unpin(rdev->gart.table.vram.robj);
745 radeon_bo_unreserve(rdev->gart.table.vram.robj);
746 }
Jerome Glisse4aac0472009-09-14 18:29:49 +0200747 }
748}
749
750void r600_pcie_gart_fini(struct radeon_device *rdev)
751{
Jerome Glissef9274562010-03-17 14:44:29 +0000752 radeon_gart_fini(rdev);
Jerome Glisse4aac0472009-09-14 18:29:49 +0200753 r600_pcie_gart_disable(rdev);
754 radeon_gart_table_vram_free(rdev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200755}
756
Jerome Glisse1a029b72009-10-06 19:04:30 +0200757void r600_agp_enable(struct radeon_device *rdev)
758{
759 u32 tmp;
760 int i;
761
762 /* Setup L2 cache */
763 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
764 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
765 EFFECTIVE_L2_QUEUE_SIZE(7));
766 WREG32(VM_L2_CNTL2, 0);
767 WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
768 /* Setup TLB control */
769 tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
770 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
771 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
772 ENABLE_WAIT_L2_QUERY;
773 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
774 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
775 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
776 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
777 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
778 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
779 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
780 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
781 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
782 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
783 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
784 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
785 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
786 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
787 for (i = 0; i < 7; i++)
788 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
789}
790
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200791int r600_mc_wait_for_idle(struct radeon_device *rdev)
792{
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000793 unsigned i;
794 u32 tmp;
795
796 for (i = 0; i < rdev->usec_timeout; i++) {
797 /* read MC_STATUS */
798 tmp = RREG32(R_000E50_SRBM_STATUS) & 0x3F00;
799 if (!tmp)
800 return 0;
801 udelay(1);
802 }
803 return -1;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200804}
805
Jerome Glissea3c19452009-10-01 18:02:13 +0200806static void r600_mc_program(struct radeon_device *rdev)
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200807{
Jerome Glissea3c19452009-10-01 18:02:13 +0200808 struct rv515_mc_save save;
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000809 u32 tmp;
810 int i, j;
811
812 /* Initialize HDP */
813 for (i = 0, j = 0; i < 32; i++, j += 0x18) {
814 WREG32((0x2c14 + j), 0x00000000);
815 WREG32((0x2c18 + j), 0x00000000);
816 WREG32((0x2c1c + j), 0x00000000);
817 WREG32((0x2c20 + j), 0x00000000);
818 WREG32((0x2c24 + j), 0x00000000);
819 }
820 WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
821
Jerome Glissea3c19452009-10-01 18:02:13 +0200822 rv515_mc_stop(rdev, &save);
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000823 if (r600_mc_wait_for_idle(rdev)) {
Jerome Glissea3c19452009-10-01 18:02:13 +0200824 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000825 }
Jerome Glissea3c19452009-10-01 18:02:13 +0200826 /* Lockout access through VGA aperture (doesn't exist before R600) */
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000827 WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000828 /* Update configuration */
Jerome Glisse1a029b72009-10-06 19:04:30 +0200829 if (rdev->flags & RADEON_IS_AGP) {
830 if (rdev->mc.vram_start < rdev->mc.gtt_start) {
831 /* VRAM before AGP */
832 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
833 rdev->mc.vram_start >> 12);
834 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
835 rdev->mc.gtt_end >> 12);
836 } else {
837 /* VRAM after AGP */
838 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
839 rdev->mc.gtt_start >> 12);
840 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
841 rdev->mc.vram_end >> 12);
842 }
843 } else {
844 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start >> 12);
845 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end >> 12);
846 }
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000847 WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0);
Jerome Glisse1a029b72009-10-06 19:04:30 +0200848 tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000849 tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
850 WREG32(MC_VM_FB_LOCATION, tmp);
851 WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
852 WREG32(HDP_NONSURFACE_INFO, (2 << 7));
Jerome Glisse1a029b72009-10-06 19:04:30 +0200853 WREG32(HDP_NONSURFACE_SIZE, rdev->mc.mc_vram_size | 0x3FF);
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000854 if (rdev->flags & RADEON_IS_AGP) {
Jerome Glisse1a029b72009-10-06 19:04:30 +0200855 WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 22);
856 WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 22);
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000857 WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22);
858 } else {
859 WREG32(MC_VM_AGP_BASE, 0);
860 WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
861 WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
862 }
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000863 if (r600_mc_wait_for_idle(rdev)) {
Jerome Glissea3c19452009-10-01 18:02:13 +0200864 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000865 }
Jerome Glissea3c19452009-10-01 18:02:13 +0200866 rv515_mc_resume(rdev, &save);
Dave Airlie698443d2009-09-18 14:16:38 +1000867 /* we need to own VRAM, so turn off the VGA renderer here
868 * to stop it overwriting our objects */
Jerome Glissed39c3b82009-09-28 18:34:43 +0200869 rv515_vga_render_disable(rdev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200870}
871
Jerome Glissed594e462010-02-17 21:54:29 +0000872/**
873 * r600_vram_gtt_location - try to find VRAM & GTT location
874 * @rdev: radeon device structure holding all necessary informations
875 * @mc: memory controller structure holding memory informations
876 *
877 * Function will place try to place VRAM at same place as in CPU (PCI)
878 * address space as some GPU seems to have issue when we reprogram at
879 * different address space.
880 *
881 * If there is not enough space to fit the unvisible VRAM after the
882 * aperture then we limit the VRAM size to the aperture.
883 *
884 * If we are using AGP then place VRAM adjacent to AGP aperture are we need
885 * them to be in one from GPU point of view so that we can program GPU to
886 * catch access outside them (weird GPU policy see ??).
887 *
888 * This function will never fails, worst case are limiting VRAM or GTT.
889 *
890 * Note: GTT start, end, size should be initialized before calling this
891 * function on AGP platform.
892 */
893void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
894{
895 u64 size_bf, size_af;
896
897 if (mc->mc_vram_size > 0xE0000000) {
898 /* leave room for at least 512M GTT */
899 dev_warn(rdev->dev, "limiting VRAM\n");
900 mc->real_vram_size = 0xE0000000;
901 mc->mc_vram_size = 0xE0000000;
902 }
903 if (rdev->flags & RADEON_IS_AGP) {
904 size_bf = mc->gtt_start;
905 size_af = 0xFFFFFFFF - mc->gtt_end + 1;
906 if (size_bf > size_af) {
907 if (mc->mc_vram_size > size_bf) {
908 dev_warn(rdev->dev, "limiting VRAM\n");
909 mc->real_vram_size = size_bf;
910 mc->mc_vram_size = size_bf;
911 }
912 mc->vram_start = mc->gtt_start - mc->mc_vram_size;
913 } else {
914 if (mc->mc_vram_size > size_af) {
915 dev_warn(rdev->dev, "limiting VRAM\n");
916 mc->real_vram_size = size_af;
917 mc->mc_vram_size = size_af;
918 }
919 mc->vram_start = mc->gtt_end;
920 }
921 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
922 dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n",
923 mc->mc_vram_size >> 20, mc->vram_start,
924 mc->vram_end, mc->real_vram_size >> 20);
925 } else {
926 u64 base = 0;
927 if (rdev->flags & RADEON_IS_IGP)
928 base = (RREG32(MC_VM_FB_LOCATION) & 0xFFFF) << 24;
929 radeon_vram_location(rdev, &rdev->mc, base);
930 radeon_gtt_location(rdev, mc);
931 }
932}
933
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000934int r600_mc_init(struct radeon_device *rdev)
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200935{
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000936 u32 tmp;
Alex Deucher5885b7a2009-10-19 17:23:33 -0400937 int chansize, numchan;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200938
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000939 /* Get VRAM informations */
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200940 rdev->mc.vram_is_ddr = true;
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000941 tmp = RREG32(RAMCFG);
942 if (tmp & CHANSIZE_OVERRIDE) {
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200943 chansize = 16;
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000944 } else if (tmp & CHANSIZE_MASK) {
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200945 chansize = 64;
946 } else {
947 chansize = 32;
948 }
Alex Deucher5885b7a2009-10-19 17:23:33 -0400949 tmp = RREG32(CHMAP);
950 switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
951 case 0:
952 default:
953 numchan = 1;
954 break;
955 case 1:
956 numchan = 2;
957 break;
958 case 2:
959 numchan = 4;
960 break;
961 case 3:
962 numchan = 8;
963 break;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200964 }
Alex Deucher5885b7a2009-10-19 17:23:33 -0400965 rdev->mc.vram_width = numchan * chansize;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200966 /* Could aper size report 0 ? */
967 rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
968 rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000969 /* Setup GPU memory space */
970 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
971 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
Jerome Glisse51e5fcd2010-02-19 14:33:54 +0000972 rdev->mc.visible_vram_size = rdev->mc.aper_size;
Jerome Glissed594e462010-02-17 21:54:29 +0000973 r600_vram_gtt_location(rdev, &rdev->mc);
Alex Deucherf47299c2010-03-16 20:54:38 -0400974
Alex Deucher06b64762010-01-05 11:27:29 -0500975 if (rdev->flags & RADEON_IS_IGP)
976 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
Alex Deucherf47299c2010-03-16 20:54:38 -0400977 radeon_update_bandwidth_info(rdev);
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000978 return 0;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200979}
980
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000981/* We doesn't check that the GPU really needs a reset we simply do the
982 * reset, it's up to the caller to determine if the GPU needs one. We
983 * might add an helper function to check that.
984 */
985int r600_gpu_soft_reset(struct radeon_device *rdev)
986{
Jerome Glissea3c19452009-10-01 18:02:13 +0200987 struct rv515_mc_save save;
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000988 u32 grbm_busy_mask = S_008010_VC_BUSY(1) | S_008010_VGT_BUSY_NO_DMA(1) |
989 S_008010_VGT_BUSY(1) | S_008010_TA03_BUSY(1) |
990 S_008010_TC_BUSY(1) | S_008010_SX_BUSY(1) |
991 S_008010_SH_BUSY(1) | S_008010_SPI03_BUSY(1) |
992 S_008010_SMX_BUSY(1) | S_008010_SC_BUSY(1) |
993 S_008010_PA_BUSY(1) | S_008010_DB03_BUSY(1) |
994 S_008010_CR_BUSY(1) | S_008010_CB03_BUSY(1) |
995 S_008010_GUI_ACTIVE(1);
996 u32 grbm2_busy_mask = S_008014_SPI0_BUSY(1) | S_008014_SPI1_BUSY(1) |
997 S_008014_SPI2_BUSY(1) | S_008014_SPI3_BUSY(1) |
998 S_008014_TA0_BUSY(1) | S_008014_TA1_BUSY(1) |
999 S_008014_TA2_BUSY(1) | S_008014_TA3_BUSY(1) |
1000 S_008014_DB0_BUSY(1) | S_008014_DB1_BUSY(1) |
1001 S_008014_DB2_BUSY(1) | S_008014_DB3_BUSY(1) |
1002 S_008014_CB0_BUSY(1) | S_008014_CB1_BUSY(1) |
1003 S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1);
Jerome Glissea3c19452009-10-01 18:02:13 +02001004 u32 tmp;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001005
Jerome Glisse1a029b72009-10-06 19:04:30 +02001006 dev_info(rdev->dev, "GPU softreset \n");
1007 dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n",
1008 RREG32(R_008010_GRBM_STATUS));
1009 dev_info(rdev->dev, " R_008014_GRBM_STATUS2=0x%08X\n",
Jerome Glissea3c19452009-10-01 18:02:13 +02001010 RREG32(R_008014_GRBM_STATUS2));
Jerome Glisse1a029b72009-10-06 19:04:30 +02001011 dev_info(rdev->dev, " R_000E50_SRBM_STATUS=0x%08X\n",
1012 RREG32(R_000E50_SRBM_STATUS));
Jerome Glissea3c19452009-10-01 18:02:13 +02001013 rv515_mc_stop(rdev, &save);
1014 if (r600_mc_wait_for_idle(rdev)) {
1015 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1016 }
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001017 /* Disable CP parsing/prefetching */
Jerome Glisse90aca4d2010-03-09 14:45:12 +00001018 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001019 /* Check if any of the rendering block is busy and reset it */
1020 if ((RREG32(R_008010_GRBM_STATUS) & grbm_busy_mask) ||
1021 (RREG32(R_008014_GRBM_STATUS2) & grbm2_busy_mask)) {
Jerome Glissea3c19452009-10-01 18:02:13 +02001022 tmp = S_008020_SOFT_RESET_CR(1) |
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001023 S_008020_SOFT_RESET_DB(1) |
1024 S_008020_SOFT_RESET_CB(1) |
1025 S_008020_SOFT_RESET_PA(1) |
1026 S_008020_SOFT_RESET_SC(1) |
1027 S_008020_SOFT_RESET_SMX(1) |
1028 S_008020_SOFT_RESET_SPI(1) |
1029 S_008020_SOFT_RESET_SX(1) |
1030 S_008020_SOFT_RESET_SH(1) |
1031 S_008020_SOFT_RESET_TC(1) |
1032 S_008020_SOFT_RESET_TA(1) |
1033 S_008020_SOFT_RESET_VC(1) |
Jerome Glissea3c19452009-10-01 18:02:13 +02001034 S_008020_SOFT_RESET_VGT(1);
Jerome Glisse1a029b72009-10-06 19:04:30 +02001035 dev_info(rdev->dev, " R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
Jerome Glissea3c19452009-10-01 18:02:13 +02001036 WREG32(R_008020_GRBM_SOFT_RESET, tmp);
Jerome Glisse90aca4d2010-03-09 14:45:12 +00001037 RREG32(R_008020_GRBM_SOFT_RESET);
1038 mdelay(15);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001039 WREG32(R_008020_GRBM_SOFT_RESET, 0);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001040 }
1041 /* Reset CP (we always reset CP) */
Jerome Glissea3c19452009-10-01 18:02:13 +02001042 tmp = S_008020_SOFT_RESET_CP(1);
1043 dev_info(rdev->dev, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
1044 WREG32(R_008020_GRBM_SOFT_RESET, tmp);
Jerome Glisse90aca4d2010-03-09 14:45:12 +00001045 RREG32(R_008020_GRBM_SOFT_RESET);
1046 mdelay(15);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001047 WREG32(R_008020_GRBM_SOFT_RESET, 0);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001048 /* Wait a little for things to settle down */
Jerome Glisse225758d2010-03-09 14:45:10 +00001049 mdelay(1);
Jerome Glisse1a029b72009-10-06 19:04:30 +02001050 dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n",
1051 RREG32(R_008010_GRBM_STATUS));
1052 dev_info(rdev->dev, " R_008014_GRBM_STATUS2=0x%08X\n",
1053 RREG32(R_008014_GRBM_STATUS2));
1054 dev_info(rdev->dev, " R_000E50_SRBM_STATUS=0x%08X\n",
1055 RREG32(R_000E50_SRBM_STATUS));
Jerome Glissea3c19452009-10-01 18:02:13 +02001056 rv515_mc_resume(rdev, &save);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001057 return 0;
1058}
1059
Jerome Glisse225758d2010-03-09 14:45:10 +00001060bool r600_gpu_is_lockup(struct radeon_device *rdev)
1061{
1062 u32 srbm_status;
1063 u32 grbm_status;
1064 u32 grbm_status2;
1065 int r;
1066
1067 srbm_status = RREG32(R_000E50_SRBM_STATUS);
1068 grbm_status = RREG32(R_008010_GRBM_STATUS);
1069 grbm_status2 = RREG32(R_008014_GRBM_STATUS2);
1070 if (!G_008010_GUI_ACTIVE(grbm_status)) {
1071 r100_gpu_lockup_update(&rdev->config.r300.lockup, &rdev->cp);
1072 return false;
1073 }
1074 /* force CP activities */
1075 r = radeon_ring_lock(rdev, 2);
1076 if (!r) {
1077 /* PACKET2 NOP */
1078 radeon_ring_write(rdev, 0x80000000);
1079 radeon_ring_write(rdev, 0x80000000);
1080 radeon_ring_unlock_commit(rdev);
1081 }
1082 rdev->cp.rptr = RREG32(R600_CP_RB_RPTR);
1083 return r100_gpu_cp_is_lockup(rdev, &rdev->config.r300.lockup, &rdev->cp);
1084}
1085
Jerome Glissea2d07b72010-03-09 14:45:11 +00001086int r600_asic_reset(struct radeon_device *rdev)
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001087{
1088 return r600_gpu_soft_reset(rdev);
1089}
1090
1091static u32 r600_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
1092 u32 num_backends,
1093 u32 backend_disable_mask)
1094{
1095 u32 backend_map = 0;
1096 u32 enabled_backends_mask;
1097 u32 enabled_backends_count;
1098 u32 cur_pipe;
1099 u32 swizzle_pipe[R6XX_MAX_PIPES];
1100 u32 cur_backend;
1101 u32 i;
1102
1103 if (num_tile_pipes > R6XX_MAX_PIPES)
1104 num_tile_pipes = R6XX_MAX_PIPES;
1105 if (num_tile_pipes < 1)
1106 num_tile_pipes = 1;
1107 if (num_backends > R6XX_MAX_BACKENDS)
1108 num_backends = R6XX_MAX_BACKENDS;
1109 if (num_backends < 1)
1110 num_backends = 1;
1111
1112 enabled_backends_mask = 0;
1113 enabled_backends_count = 0;
1114 for (i = 0; i < R6XX_MAX_BACKENDS; ++i) {
1115 if (((backend_disable_mask >> i) & 1) == 0) {
1116 enabled_backends_mask |= (1 << i);
1117 ++enabled_backends_count;
1118 }
1119 if (enabled_backends_count == num_backends)
1120 break;
1121 }
1122
1123 if (enabled_backends_count == 0) {
1124 enabled_backends_mask = 1;
1125 enabled_backends_count = 1;
1126 }
1127
1128 if (enabled_backends_count != num_backends)
1129 num_backends = enabled_backends_count;
1130
1131 memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * R6XX_MAX_PIPES);
1132 switch (num_tile_pipes) {
1133 case 1:
1134 swizzle_pipe[0] = 0;
1135 break;
1136 case 2:
1137 swizzle_pipe[0] = 0;
1138 swizzle_pipe[1] = 1;
1139 break;
1140 case 3:
1141 swizzle_pipe[0] = 0;
1142 swizzle_pipe[1] = 1;
1143 swizzle_pipe[2] = 2;
1144 break;
1145 case 4:
1146 swizzle_pipe[0] = 0;
1147 swizzle_pipe[1] = 1;
1148 swizzle_pipe[2] = 2;
1149 swizzle_pipe[3] = 3;
1150 break;
1151 case 5:
1152 swizzle_pipe[0] = 0;
1153 swizzle_pipe[1] = 1;
1154 swizzle_pipe[2] = 2;
1155 swizzle_pipe[3] = 3;
1156 swizzle_pipe[4] = 4;
1157 break;
1158 case 6:
1159 swizzle_pipe[0] = 0;
1160 swizzle_pipe[1] = 2;
1161 swizzle_pipe[2] = 4;
1162 swizzle_pipe[3] = 5;
1163 swizzle_pipe[4] = 1;
1164 swizzle_pipe[5] = 3;
1165 break;
1166 case 7:
1167 swizzle_pipe[0] = 0;
1168 swizzle_pipe[1] = 2;
1169 swizzle_pipe[2] = 4;
1170 swizzle_pipe[3] = 6;
1171 swizzle_pipe[4] = 1;
1172 swizzle_pipe[5] = 3;
1173 swizzle_pipe[6] = 5;
1174 break;
1175 case 8:
1176 swizzle_pipe[0] = 0;
1177 swizzle_pipe[1] = 2;
1178 swizzle_pipe[2] = 4;
1179 swizzle_pipe[3] = 6;
1180 swizzle_pipe[4] = 1;
1181 swizzle_pipe[5] = 3;
1182 swizzle_pipe[6] = 5;
1183 swizzle_pipe[7] = 7;
1184 break;
1185 }
1186
1187 cur_backend = 0;
1188 for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
1189 while (((1 << cur_backend) & enabled_backends_mask) == 0)
1190 cur_backend = (cur_backend + 1) % R6XX_MAX_BACKENDS;
1191
1192 backend_map |= (u32)(((cur_backend & 3) << (swizzle_pipe[cur_pipe] * 2)));
1193
1194 cur_backend = (cur_backend + 1) % R6XX_MAX_BACKENDS;
1195 }
1196
1197 return backend_map;
1198}
1199
1200int r600_count_pipe_bits(uint32_t val)
1201{
1202 int i, ret = 0;
1203
1204 for (i = 0; i < 32; i++) {
1205 ret += val & 1;
1206 val >>= 1;
1207 }
1208 return ret;
1209}
1210
1211void r600_gpu_init(struct radeon_device *rdev)
1212{
1213 u32 tiling_config;
1214 u32 ramcfg;
Alex Deucherd03f5d52010-02-19 16:22:31 -05001215 u32 backend_map;
1216 u32 cc_rb_backend_disable;
1217 u32 cc_gc_shader_pipe_config;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001218 u32 tmp;
1219 int i, j;
1220 u32 sq_config;
1221 u32 sq_gpr_resource_mgmt_1 = 0;
1222 u32 sq_gpr_resource_mgmt_2 = 0;
1223 u32 sq_thread_resource_mgmt = 0;
1224 u32 sq_stack_resource_mgmt_1 = 0;
1225 u32 sq_stack_resource_mgmt_2 = 0;
1226
1227 /* FIXME: implement */
1228 switch (rdev->family) {
1229 case CHIP_R600:
1230 rdev->config.r600.max_pipes = 4;
1231 rdev->config.r600.max_tile_pipes = 8;
1232 rdev->config.r600.max_simds = 4;
1233 rdev->config.r600.max_backends = 4;
1234 rdev->config.r600.max_gprs = 256;
1235 rdev->config.r600.max_threads = 192;
1236 rdev->config.r600.max_stack_entries = 256;
1237 rdev->config.r600.max_hw_contexts = 8;
1238 rdev->config.r600.max_gs_threads = 16;
1239 rdev->config.r600.sx_max_export_size = 128;
1240 rdev->config.r600.sx_max_export_pos_size = 16;
1241 rdev->config.r600.sx_max_export_smx_size = 128;
1242 rdev->config.r600.sq_num_cf_insts = 2;
1243 break;
1244 case CHIP_RV630:
1245 case CHIP_RV635:
1246 rdev->config.r600.max_pipes = 2;
1247 rdev->config.r600.max_tile_pipes = 2;
1248 rdev->config.r600.max_simds = 3;
1249 rdev->config.r600.max_backends = 1;
1250 rdev->config.r600.max_gprs = 128;
1251 rdev->config.r600.max_threads = 192;
1252 rdev->config.r600.max_stack_entries = 128;
1253 rdev->config.r600.max_hw_contexts = 8;
1254 rdev->config.r600.max_gs_threads = 4;
1255 rdev->config.r600.sx_max_export_size = 128;
1256 rdev->config.r600.sx_max_export_pos_size = 16;
1257 rdev->config.r600.sx_max_export_smx_size = 128;
1258 rdev->config.r600.sq_num_cf_insts = 2;
1259 break;
1260 case CHIP_RV610:
1261 case CHIP_RV620:
1262 case CHIP_RS780:
1263 case CHIP_RS880:
1264 rdev->config.r600.max_pipes = 1;
1265 rdev->config.r600.max_tile_pipes = 1;
1266 rdev->config.r600.max_simds = 2;
1267 rdev->config.r600.max_backends = 1;
1268 rdev->config.r600.max_gprs = 128;
1269 rdev->config.r600.max_threads = 192;
1270 rdev->config.r600.max_stack_entries = 128;
1271 rdev->config.r600.max_hw_contexts = 4;
1272 rdev->config.r600.max_gs_threads = 4;
1273 rdev->config.r600.sx_max_export_size = 128;
1274 rdev->config.r600.sx_max_export_pos_size = 16;
1275 rdev->config.r600.sx_max_export_smx_size = 128;
1276 rdev->config.r600.sq_num_cf_insts = 1;
1277 break;
1278 case CHIP_RV670:
1279 rdev->config.r600.max_pipes = 4;
1280 rdev->config.r600.max_tile_pipes = 4;
1281 rdev->config.r600.max_simds = 4;
1282 rdev->config.r600.max_backends = 4;
1283 rdev->config.r600.max_gprs = 192;
1284 rdev->config.r600.max_threads = 192;
1285 rdev->config.r600.max_stack_entries = 256;
1286 rdev->config.r600.max_hw_contexts = 8;
1287 rdev->config.r600.max_gs_threads = 16;
1288 rdev->config.r600.sx_max_export_size = 128;
1289 rdev->config.r600.sx_max_export_pos_size = 16;
1290 rdev->config.r600.sx_max_export_smx_size = 128;
1291 rdev->config.r600.sq_num_cf_insts = 2;
1292 break;
1293 default:
1294 break;
1295 }
1296
1297 /* Initialize HDP */
1298 for (i = 0, j = 0; i < 32; i++, j += 0x18) {
1299 WREG32((0x2c14 + j), 0x00000000);
1300 WREG32((0x2c18 + j), 0x00000000);
1301 WREG32((0x2c1c + j), 0x00000000);
1302 WREG32((0x2c20 + j), 0x00000000);
1303 WREG32((0x2c24 + j), 0x00000000);
1304 }
1305
1306 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
1307
1308 /* Setup tiling */
1309 tiling_config = 0;
1310 ramcfg = RREG32(RAMCFG);
1311 switch (rdev->config.r600.max_tile_pipes) {
1312 case 1:
1313 tiling_config |= PIPE_TILING(0);
1314 break;
1315 case 2:
1316 tiling_config |= PIPE_TILING(1);
1317 break;
1318 case 4:
1319 tiling_config |= PIPE_TILING(2);
1320 break;
1321 case 8:
1322 tiling_config |= PIPE_TILING(3);
1323 break;
1324 default:
1325 break;
1326 }
Alex Deucherd03f5d52010-02-19 16:22:31 -05001327 rdev->config.r600.tiling_npipes = rdev->config.r600.max_tile_pipes;
Jerome Glisse961fb592010-02-10 22:30:05 +00001328 rdev->config.r600.tiling_nbanks = 4 << ((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001329 tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
1330 tiling_config |= GROUP_SIZE(0);
Jerome Glisse961fb592010-02-10 22:30:05 +00001331 rdev->config.r600.tiling_group_size = 256;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001332 tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
1333 if (tmp > 3) {
1334 tiling_config |= ROW_TILING(3);
1335 tiling_config |= SAMPLE_SPLIT(3);
1336 } else {
1337 tiling_config |= ROW_TILING(tmp);
1338 tiling_config |= SAMPLE_SPLIT(tmp);
1339 }
1340 tiling_config |= BANK_SWAPS(1);
Alex Deucherd03f5d52010-02-19 16:22:31 -05001341
1342 cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
1343 cc_rb_backend_disable |=
1344 BACKEND_DISABLE((R6XX_MAX_BACKENDS_MASK << rdev->config.r600.max_backends) & R6XX_MAX_BACKENDS_MASK);
1345
1346 cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
1347 cc_gc_shader_pipe_config |=
1348 INACTIVE_QD_PIPES((R6XX_MAX_PIPES_MASK << rdev->config.r600.max_pipes) & R6XX_MAX_PIPES_MASK);
1349 cc_gc_shader_pipe_config |=
1350 INACTIVE_SIMDS((R6XX_MAX_SIMDS_MASK << rdev->config.r600.max_simds) & R6XX_MAX_SIMDS_MASK);
1351
1352 backend_map = r600_get_tile_pipe_to_backend_map(rdev->config.r600.max_tile_pipes,
1353 (R6XX_MAX_BACKENDS -
1354 r600_count_pipe_bits((cc_rb_backend_disable &
1355 R6XX_MAX_BACKENDS_MASK) >> 16)),
1356 (cc_rb_backend_disable >> 16));
1357
1358 tiling_config |= BACKEND_MAP(backend_map);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001359 WREG32(GB_TILING_CONFIG, tiling_config);
1360 WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff);
1361 WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff);
1362
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001363 /* Setup pipes */
Alex Deucherd03f5d52010-02-19 16:22:31 -05001364 WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
1365 WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
Alex Deucherf867c60d2010-03-05 14:50:37 -05001366 WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001367
Alex Deucherd03f5d52010-02-19 16:22:31 -05001368 tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001369 WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK);
1370 WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((tmp * 4) - 2) & VTX_REUSE_DEPTH_MASK);
1371
1372 /* Setup some CP states */
1373 WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) | ROQ_IB2_START(0x2b)));
1374 WREG32(CP_MEQ_THRESHOLDS, (MEQ_END(0x40) | ROQ_END(0x40)));
1375
1376 WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO | SYNC_GRADIENT |
1377 SYNC_WALKER | SYNC_ALIGNER));
1378 /* Setup various GPU states */
1379 if (rdev->family == CHIP_RV670)
1380 WREG32(ARB_GDEC_RD_CNTL, 0x00000021);
1381
1382 tmp = RREG32(SX_DEBUG_1);
1383 tmp |= SMX_EVENT_RELEASE;
1384 if ((rdev->family > CHIP_R600))
1385 tmp |= ENABLE_NEW_SMX_ADDRESS;
1386 WREG32(SX_DEBUG_1, tmp);
1387
1388 if (((rdev->family) == CHIP_R600) ||
1389 ((rdev->family) == CHIP_RV630) ||
1390 ((rdev->family) == CHIP_RV610) ||
1391 ((rdev->family) == CHIP_RV620) ||
Alex Deucheree59f2b2009-11-05 13:11:46 -05001392 ((rdev->family) == CHIP_RS780) ||
1393 ((rdev->family) == CHIP_RS880)) {
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001394 WREG32(DB_DEBUG, PREZ_MUST_WAIT_FOR_POSTZ_DONE);
1395 } else {
1396 WREG32(DB_DEBUG, 0);
1397 }
1398 WREG32(DB_WATERMARKS, (DEPTH_FREE(4) | DEPTH_CACHELINE_FREE(16) |
1399 DEPTH_FLUSH(16) | DEPTH_PENDING_FREE(4)));
1400
1401 WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
1402 WREG32(VGT_NUM_INSTANCES, 0);
1403
1404 WREG32(SPI_CONFIG_CNTL, GPR_WRITE_PRIORITY(0));
1405 WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(0));
1406
1407 tmp = RREG32(SQ_MS_FIFO_SIZES);
1408 if (((rdev->family) == CHIP_RV610) ||
1409 ((rdev->family) == CHIP_RV620) ||
Alex Deucheree59f2b2009-11-05 13:11:46 -05001410 ((rdev->family) == CHIP_RS780) ||
1411 ((rdev->family) == CHIP_RS880)) {
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001412 tmp = (CACHE_FIFO_SIZE(0xa) |
1413 FETCH_FIFO_HIWATER(0xa) |
1414 DONE_FIFO_HIWATER(0xe0) |
1415 ALU_UPDATE_FIFO_HIWATER(0x8));
1416 } else if (((rdev->family) == CHIP_R600) ||
1417 ((rdev->family) == CHIP_RV630)) {
1418 tmp &= ~DONE_FIFO_HIWATER(0xff);
1419 tmp |= DONE_FIFO_HIWATER(0x4);
1420 }
1421 WREG32(SQ_MS_FIFO_SIZES, tmp);
1422
1423 /* SQ_CONFIG, SQ_GPR_RESOURCE_MGMT, SQ_THREAD_RESOURCE_MGMT, SQ_STACK_RESOURCE_MGMT
1424 * should be adjusted as needed by the 2D/3D drivers. This just sets default values
1425 */
1426 sq_config = RREG32(SQ_CONFIG);
1427 sq_config &= ~(PS_PRIO(3) |
1428 VS_PRIO(3) |
1429 GS_PRIO(3) |
1430 ES_PRIO(3));
1431 sq_config |= (DX9_CONSTS |
1432 VC_ENABLE |
1433 PS_PRIO(0) |
1434 VS_PRIO(1) |
1435 GS_PRIO(2) |
1436 ES_PRIO(3));
1437
1438 if ((rdev->family) == CHIP_R600) {
1439 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(124) |
1440 NUM_VS_GPRS(124) |
1441 NUM_CLAUSE_TEMP_GPRS(4));
1442 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(0) |
1443 NUM_ES_GPRS(0));
1444 sq_thread_resource_mgmt = (NUM_PS_THREADS(136) |
1445 NUM_VS_THREADS(48) |
1446 NUM_GS_THREADS(4) |
1447 NUM_ES_THREADS(4));
1448 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(128) |
1449 NUM_VS_STACK_ENTRIES(128));
1450 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(0) |
1451 NUM_ES_STACK_ENTRIES(0));
1452 } else if (((rdev->family) == CHIP_RV610) ||
1453 ((rdev->family) == CHIP_RV620) ||
Alex Deucheree59f2b2009-11-05 13:11:46 -05001454 ((rdev->family) == CHIP_RS780) ||
1455 ((rdev->family) == CHIP_RS880)) {
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001456 /* no vertex cache */
1457 sq_config &= ~VC_ENABLE;
1458
1459 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
1460 NUM_VS_GPRS(44) |
1461 NUM_CLAUSE_TEMP_GPRS(2));
1462 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) |
1463 NUM_ES_GPRS(17));
1464 sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
1465 NUM_VS_THREADS(78) |
1466 NUM_GS_THREADS(4) |
1467 NUM_ES_THREADS(31));
1468 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) |
1469 NUM_VS_STACK_ENTRIES(40));
1470 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) |
1471 NUM_ES_STACK_ENTRIES(16));
1472 } else if (((rdev->family) == CHIP_RV630) ||
1473 ((rdev->family) == CHIP_RV635)) {
1474 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
1475 NUM_VS_GPRS(44) |
1476 NUM_CLAUSE_TEMP_GPRS(2));
1477 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(18) |
1478 NUM_ES_GPRS(18));
1479 sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
1480 NUM_VS_THREADS(78) |
1481 NUM_GS_THREADS(4) |
1482 NUM_ES_THREADS(31));
1483 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) |
1484 NUM_VS_STACK_ENTRIES(40));
1485 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) |
1486 NUM_ES_STACK_ENTRIES(16));
1487 } else if ((rdev->family) == CHIP_RV670) {
1488 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
1489 NUM_VS_GPRS(44) |
1490 NUM_CLAUSE_TEMP_GPRS(2));
1491 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) |
1492 NUM_ES_GPRS(17));
1493 sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
1494 NUM_VS_THREADS(78) |
1495 NUM_GS_THREADS(4) |
1496 NUM_ES_THREADS(31));
1497 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(64) |
1498 NUM_VS_STACK_ENTRIES(64));
1499 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(64) |
1500 NUM_ES_STACK_ENTRIES(64));
1501 }
1502
1503 WREG32(SQ_CONFIG, sq_config);
1504 WREG32(SQ_GPR_RESOURCE_MGMT_1, sq_gpr_resource_mgmt_1);
1505 WREG32(SQ_GPR_RESOURCE_MGMT_2, sq_gpr_resource_mgmt_2);
1506 WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt);
1507 WREG32(SQ_STACK_RESOURCE_MGMT_1, sq_stack_resource_mgmt_1);
1508 WREG32(SQ_STACK_RESOURCE_MGMT_2, sq_stack_resource_mgmt_2);
1509
1510 if (((rdev->family) == CHIP_RV610) ||
1511 ((rdev->family) == CHIP_RV620) ||
Alex Deucheree59f2b2009-11-05 13:11:46 -05001512 ((rdev->family) == CHIP_RS780) ||
1513 ((rdev->family) == CHIP_RS880)) {
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001514 WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(TC_ONLY));
1515 } else {
1516 WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC));
1517 }
1518
1519 /* More default values. 2D/3D driver should adjust as needed */
1520 WREG32(PA_SC_AA_SAMPLE_LOCS_2S, (S0_X(0xc) | S0_Y(0x4) |
1521 S1_X(0x4) | S1_Y(0xc)));
1522 WREG32(PA_SC_AA_SAMPLE_LOCS_4S, (S0_X(0xe) | S0_Y(0xe) |
1523 S1_X(0x2) | S1_Y(0x2) |
1524 S2_X(0xa) | S2_Y(0x6) |
1525 S3_X(0x6) | S3_Y(0xa)));
1526 WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD0, (S0_X(0xe) | S0_Y(0xb) |
1527 S1_X(0x4) | S1_Y(0xc) |
1528 S2_X(0x1) | S2_Y(0x6) |
1529 S3_X(0xa) | S3_Y(0xe)));
1530 WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD1, (S4_X(0x6) | S4_Y(0x1) |
1531 S5_X(0x0) | S5_Y(0x0) |
1532 S6_X(0xb) | S6_Y(0x4) |
1533 S7_X(0x7) | S7_Y(0x8)));
1534
1535 WREG32(VGT_STRMOUT_EN, 0);
1536 tmp = rdev->config.r600.max_pipes * 16;
1537 switch (rdev->family) {
1538 case CHIP_RV610:
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001539 case CHIP_RV620:
Alex Deucheree59f2b2009-11-05 13:11:46 -05001540 case CHIP_RS780:
1541 case CHIP_RS880:
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001542 tmp += 32;
1543 break;
1544 case CHIP_RV670:
1545 tmp += 128;
1546 break;
1547 default:
1548 break;
1549 }
1550 if (tmp > 256) {
1551 tmp = 256;
1552 }
1553 WREG32(VGT_ES_PER_GS, 128);
1554 WREG32(VGT_GS_PER_ES, tmp);
1555 WREG32(VGT_GS_PER_VS, 2);
1556 WREG32(VGT_GS_VERTEX_REUSE, 16);
1557
1558 /* more default values. 2D/3D driver should adjust as needed */
1559 WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
1560 WREG32(VGT_STRMOUT_EN, 0);
1561 WREG32(SX_MISC, 0);
1562 WREG32(PA_SC_MODE_CNTL, 0);
1563 WREG32(PA_SC_AA_CONFIG, 0);
1564 WREG32(PA_SC_LINE_STIPPLE, 0);
1565 WREG32(SPI_INPUT_Z, 0);
1566 WREG32(SPI_PS_IN_CONTROL_0, NUM_INTERP(2));
1567 WREG32(CB_COLOR7_FRAG, 0);
1568
1569 /* Clear render buffer base addresses */
1570 WREG32(CB_COLOR0_BASE, 0);
1571 WREG32(CB_COLOR1_BASE, 0);
1572 WREG32(CB_COLOR2_BASE, 0);
1573 WREG32(CB_COLOR3_BASE, 0);
1574 WREG32(CB_COLOR4_BASE, 0);
1575 WREG32(CB_COLOR5_BASE, 0);
1576 WREG32(CB_COLOR6_BASE, 0);
1577 WREG32(CB_COLOR7_BASE, 0);
1578 WREG32(CB_COLOR7_FRAG, 0);
1579
1580 switch (rdev->family) {
1581 case CHIP_RV610:
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001582 case CHIP_RV620:
Alex Deucheree59f2b2009-11-05 13:11:46 -05001583 case CHIP_RS780:
1584 case CHIP_RS880:
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001585 tmp = TC_L2_SIZE(8);
1586 break;
1587 case CHIP_RV630:
1588 case CHIP_RV635:
1589 tmp = TC_L2_SIZE(4);
1590 break;
1591 case CHIP_R600:
1592 tmp = TC_L2_SIZE(0) | L2_DISABLE_LATE_HIT;
1593 break;
1594 default:
1595 tmp = TC_L2_SIZE(0);
1596 break;
1597 }
1598 WREG32(TC_CNTL, tmp);
1599
1600 tmp = RREG32(HDP_HOST_PATH_CNTL);
1601 WREG32(HDP_HOST_PATH_CNTL, tmp);
1602
1603 tmp = RREG32(ARB_POP);
1604 tmp |= ENABLE_TC128;
1605 WREG32(ARB_POP, tmp);
1606
1607 WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
1608 WREG32(PA_CL_ENHANCE, (CLIP_VTX_REORDER_ENA |
1609 NUM_CLIP_SEQ(3)));
1610 WREG32(PA_SC_ENHANCE, FORCE_EOV_MAX_CLK_CNT(4095));
1611}
1612
1613
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001614/*
1615 * Indirect registers accessor
1616 */
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001617u32 r600_pciep_rreg(struct radeon_device *rdev, u32 reg)
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001618{
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001619 u32 r;
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001620
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001621 WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
1622 (void)RREG32(PCIE_PORT_INDEX);
1623 r = RREG32(PCIE_PORT_DATA);
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001624 return r;
1625}
1626
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001627void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001628{
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001629 WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
1630 (void)RREG32(PCIE_PORT_INDEX);
1631 WREG32(PCIE_PORT_DATA, (v));
1632 (void)RREG32(PCIE_PORT_DATA);
1633}
1634
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001635/*
1636 * CP & Ring
1637 */
1638void r600_cp_stop(struct radeon_device *rdev)
1639{
1640 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
1641}
1642
Alex Deucherd8f60cf2009-12-01 13:43:46 -05001643int r600_init_microcode(struct radeon_device *rdev)
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001644{
1645 struct platform_device *pdev;
1646 const char *chip_name;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05001647 const char *rlc_chip_name;
1648 size_t pfp_req_size, me_req_size, rlc_req_size;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001649 char fw_name[30];
1650 int err;
1651
1652 DRM_DEBUG("\n");
1653
1654 pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0);
1655 err = IS_ERR(pdev);
1656 if (err) {
1657 printk(KERN_ERR "radeon_cp: Failed to register firmware\n");
1658 return -EINVAL;
1659 }
1660
1661 switch (rdev->family) {
Alex Deucherd8f60cf2009-12-01 13:43:46 -05001662 case CHIP_R600:
1663 chip_name = "R600";
1664 rlc_chip_name = "R600";
1665 break;
1666 case CHIP_RV610:
1667 chip_name = "RV610";
1668 rlc_chip_name = "R600";
1669 break;
1670 case CHIP_RV630:
1671 chip_name = "RV630";
1672 rlc_chip_name = "R600";
1673 break;
1674 case CHIP_RV620:
1675 chip_name = "RV620";
1676 rlc_chip_name = "R600";
1677 break;
1678 case CHIP_RV635:
1679 chip_name = "RV635";
1680 rlc_chip_name = "R600";
1681 break;
1682 case CHIP_RV670:
1683 chip_name = "RV670";
1684 rlc_chip_name = "R600";
1685 break;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001686 case CHIP_RS780:
Alex Deucherd8f60cf2009-12-01 13:43:46 -05001687 case CHIP_RS880:
1688 chip_name = "RS780";
1689 rlc_chip_name = "R600";
1690 break;
1691 case CHIP_RV770:
1692 chip_name = "RV770";
1693 rlc_chip_name = "R700";
1694 break;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001695 case CHIP_RV730:
Alex Deucherd8f60cf2009-12-01 13:43:46 -05001696 case CHIP_RV740:
1697 chip_name = "RV730";
1698 rlc_chip_name = "R700";
1699 break;
1700 case CHIP_RV710:
1701 chip_name = "RV710";
1702 rlc_chip_name = "R700";
1703 break;
Alex Deucherfe251e22010-03-24 13:36:43 -04001704 case CHIP_CEDAR:
1705 chip_name = "CEDAR";
Alex Deucher45f9a392010-03-24 13:55:51 -04001706 rlc_chip_name = "CEDAR";
Alex Deucherfe251e22010-03-24 13:36:43 -04001707 break;
1708 case CHIP_REDWOOD:
1709 chip_name = "REDWOOD";
Alex Deucher45f9a392010-03-24 13:55:51 -04001710 rlc_chip_name = "REDWOOD";
Alex Deucherfe251e22010-03-24 13:36:43 -04001711 break;
1712 case CHIP_JUNIPER:
1713 chip_name = "JUNIPER";
Alex Deucher45f9a392010-03-24 13:55:51 -04001714 rlc_chip_name = "JUNIPER";
Alex Deucherfe251e22010-03-24 13:36:43 -04001715 break;
1716 case CHIP_CYPRESS:
1717 case CHIP_HEMLOCK:
1718 chip_name = "CYPRESS";
Alex Deucher45f9a392010-03-24 13:55:51 -04001719 rlc_chip_name = "CYPRESS";
Alex Deucherfe251e22010-03-24 13:36:43 -04001720 break;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001721 default: BUG();
1722 }
1723
Alex Deucherfe251e22010-03-24 13:36:43 -04001724 if (rdev->family >= CHIP_CEDAR) {
1725 pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
1726 me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
Alex Deucher45f9a392010-03-24 13:55:51 -04001727 rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
Alex Deucherfe251e22010-03-24 13:36:43 -04001728 } else if (rdev->family >= CHIP_RV770) {
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001729 pfp_req_size = R700_PFP_UCODE_SIZE * 4;
1730 me_req_size = R700_PM4_UCODE_SIZE * 4;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05001731 rlc_req_size = R700_RLC_UCODE_SIZE * 4;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001732 } else {
1733 pfp_req_size = PFP_UCODE_SIZE * 4;
1734 me_req_size = PM4_UCODE_SIZE * 12;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05001735 rlc_req_size = RLC_UCODE_SIZE * 4;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001736 }
1737
Alex Deucherd8f60cf2009-12-01 13:43:46 -05001738 DRM_INFO("Loading %s Microcode\n", chip_name);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001739
1740 snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
1741 err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev);
1742 if (err)
1743 goto out;
1744 if (rdev->pfp_fw->size != pfp_req_size) {
1745 printk(KERN_ERR
1746 "r600_cp: Bogus length %zu in firmware \"%s\"\n",
1747 rdev->pfp_fw->size, fw_name);
1748 err = -EINVAL;
1749 goto out;
1750 }
1751
1752 snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
1753 err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev);
1754 if (err)
1755 goto out;
1756 if (rdev->me_fw->size != me_req_size) {
1757 printk(KERN_ERR
1758 "r600_cp: Bogus length %zu in firmware \"%s\"\n",
1759 rdev->me_fw->size, fw_name);
1760 err = -EINVAL;
1761 }
Alex Deucherd8f60cf2009-12-01 13:43:46 -05001762
1763 snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name);
1764 err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev);
1765 if (err)
1766 goto out;
1767 if (rdev->rlc_fw->size != rlc_req_size) {
1768 printk(KERN_ERR
1769 "r600_rlc: Bogus length %zu in firmware \"%s\"\n",
1770 rdev->rlc_fw->size, fw_name);
1771 err = -EINVAL;
1772 }
1773
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001774out:
1775 platform_device_unregister(pdev);
1776
1777 if (err) {
1778 if (err != -EINVAL)
1779 printk(KERN_ERR
1780 "r600_cp: Failed to load firmware \"%s\"\n",
1781 fw_name);
1782 release_firmware(rdev->pfp_fw);
1783 rdev->pfp_fw = NULL;
1784 release_firmware(rdev->me_fw);
1785 rdev->me_fw = NULL;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05001786 release_firmware(rdev->rlc_fw);
1787 rdev->rlc_fw = NULL;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001788 }
1789 return err;
1790}
1791
1792static int r600_cp_load_microcode(struct radeon_device *rdev)
1793{
1794 const __be32 *fw_data;
1795 int i;
1796
1797 if (!rdev->me_fw || !rdev->pfp_fw)
1798 return -EINVAL;
1799
1800 r600_cp_stop(rdev);
1801
1802 WREG32(CP_RB_CNTL, RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
1803
1804 /* Reset cp */
1805 WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
1806 RREG32(GRBM_SOFT_RESET);
1807 mdelay(15);
1808 WREG32(GRBM_SOFT_RESET, 0);
1809
1810 WREG32(CP_ME_RAM_WADDR, 0);
1811
1812 fw_data = (const __be32 *)rdev->me_fw->data;
1813 WREG32(CP_ME_RAM_WADDR, 0);
1814 for (i = 0; i < PM4_UCODE_SIZE * 3; i++)
1815 WREG32(CP_ME_RAM_DATA,
1816 be32_to_cpup(fw_data++));
1817
1818 fw_data = (const __be32 *)rdev->pfp_fw->data;
1819 WREG32(CP_PFP_UCODE_ADDR, 0);
1820 for (i = 0; i < PFP_UCODE_SIZE; i++)
1821 WREG32(CP_PFP_UCODE_DATA,
1822 be32_to_cpup(fw_data++));
1823
1824 WREG32(CP_PFP_UCODE_ADDR, 0);
1825 WREG32(CP_ME_RAM_WADDR, 0);
1826 WREG32(CP_ME_RAM_RADDR, 0);
1827 return 0;
1828}
1829
1830int r600_cp_start(struct radeon_device *rdev)
1831{
1832 int r;
1833 uint32_t cp_me;
1834
1835 r = radeon_ring_lock(rdev, 7);
1836 if (r) {
1837 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
1838 return r;
1839 }
1840 radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5));
1841 radeon_ring_write(rdev, 0x1);
Alex Deucherfe251e22010-03-24 13:36:43 -04001842 if (rdev->family >= CHIP_CEDAR) {
1843 radeon_ring_write(rdev, 0x0);
1844 radeon_ring_write(rdev, rdev->config.evergreen.max_hw_contexts - 1);
1845 } else if (rdev->family >= CHIP_RV770) {
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001846 radeon_ring_write(rdev, 0x0);
1847 radeon_ring_write(rdev, rdev->config.rv770.max_hw_contexts - 1);
Alex Deucherfe251e22010-03-24 13:36:43 -04001848 } else {
1849 radeon_ring_write(rdev, 0x3);
1850 radeon_ring_write(rdev, rdev->config.r600.max_hw_contexts - 1);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001851 }
1852 radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
1853 radeon_ring_write(rdev, 0);
1854 radeon_ring_write(rdev, 0);
1855 radeon_ring_unlock_commit(rdev);
1856
1857 cp_me = 0xff;
1858 WREG32(R_0086D8_CP_ME_CNTL, cp_me);
1859 return 0;
1860}
1861
1862int r600_cp_resume(struct radeon_device *rdev)
1863{
1864 u32 tmp;
1865 u32 rb_bufsz;
1866 int r;
1867
1868 /* Reset cp */
1869 WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
1870 RREG32(GRBM_SOFT_RESET);
1871 mdelay(15);
1872 WREG32(GRBM_SOFT_RESET, 0);
1873
1874 /* Set ring buffer size */
1875 rb_bufsz = drm_order(rdev->cp.ring_size / 8);
Alex Deucherd6f28932009-11-02 16:01:27 -05001876 tmp = RB_NO_UPDATE | (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001877#ifdef __BIG_ENDIAN
Alex Deucherd6f28932009-11-02 16:01:27 -05001878 tmp |= BUF_SWAP_32BIT;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001879#endif
Alex Deucherd6f28932009-11-02 16:01:27 -05001880 WREG32(CP_RB_CNTL, tmp);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001881 WREG32(CP_SEM_WAIT_TIMER, 0x4);
1882
1883 /* Set the write pointer delay */
1884 WREG32(CP_RB_WPTR_DELAY, 0);
1885
1886 /* Initialize the ring buffer's read and write pointers */
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001887 WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
1888 WREG32(CP_RB_RPTR_WR, 0);
1889 WREG32(CP_RB_WPTR, 0);
1890 WREG32(CP_RB_RPTR_ADDR, rdev->cp.gpu_addr & 0xFFFFFFFF);
1891 WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->cp.gpu_addr));
1892 mdelay(1);
1893 WREG32(CP_RB_CNTL, tmp);
1894
1895 WREG32(CP_RB_BASE, rdev->cp.gpu_addr >> 8);
1896 WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
1897
1898 rdev->cp.rptr = RREG32(CP_RB_RPTR);
1899 rdev->cp.wptr = RREG32(CP_RB_WPTR);
1900
1901 r600_cp_start(rdev);
1902 rdev->cp.ready = true;
1903 r = radeon_ring_test(rdev);
1904 if (r) {
1905 rdev->cp.ready = false;
1906 return r;
1907 }
1908 return 0;
1909}
1910
1911void r600_cp_commit(struct radeon_device *rdev)
1912{
1913 WREG32(CP_RB_WPTR, rdev->cp.wptr);
1914 (void)RREG32(CP_RB_WPTR);
1915}
1916
1917void r600_ring_init(struct radeon_device *rdev, unsigned ring_size)
1918{
1919 u32 rb_bufsz;
1920
1921 /* Align ring size */
1922 rb_bufsz = drm_order(ring_size / 8);
1923 ring_size = (1 << (rb_bufsz + 1)) * 4;
1924 rdev->cp.ring_size = ring_size;
1925 rdev->cp.align_mask = 16 - 1;
1926}
1927
Jerome Glisse655efd32010-02-02 11:51:45 +01001928void r600_cp_fini(struct radeon_device *rdev)
1929{
1930 r600_cp_stop(rdev);
1931 radeon_ring_fini(rdev);
1932}
1933
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001934
1935/*
1936 * GPU scratch registers helpers function.
1937 */
1938void r600_scratch_init(struct radeon_device *rdev)
1939{
1940 int i;
1941
1942 rdev->scratch.num_reg = 7;
1943 for (i = 0; i < rdev->scratch.num_reg; i++) {
1944 rdev->scratch.free[i] = true;
1945 rdev->scratch.reg[i] = SCRATCH_REG0 + (i * 4);
1946 }
1947}
1948
1949int r600_ring_test(struct radeon_device *rdev)
1950{
1951 uint32_t scratch;
1952 uint32_t tmp = 0;
1953 unsigned i;
1954 int r;
1955
1956 r = radeon_scratch_get(rdev, &scratch);
1957 if (r) {
1958 DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r);
1959 return r;
1960 }
1961 WREG32(scratch, 0xCAFEDEAD);
1962 r = radeon_ring_lock(rdev, 3);
1963 if (r) {
1964 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
1965 radeon_scratch_free(rdev, scratch);
1966 return r;
1967 }
1968 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
1969 radeon_ring_write(rdev, ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
1970 radeon_ring_write(rdev, 0xDEADBEEF);
1971 radeon_ring_unlock_commit(rdev);
1972 for (i = 0; i < rdev->usec_timeout; i++) {
1973 tmp = RREG32(scratch);
1974 if (tmp == 0xDEADBEEF)
1975 break;
1976 DRM_UDELAY(1);
1977 }
1978 if (i < rdev->usec_timeout) {
1979 DRM_INFO("ring test succeeded in %d usecs\n", i);
1980 } else {
1981 DRM_ERROR("radeon: ring test failed (scratch(0x%04X)=0x%08X)\n",
1982 scratch, tmp);
1983 r = -EINVAL;
1984 }
1985 radeon_scratch_free(rdev, scratch);
1986 return r;
1987}
1988
Jerome Glisse81cc35b2009-10-01 18:02:12 +02001989void r600_wb_disable(struct radeon_device *rdev)
1990{
Jerome Glisse4c788672009-11-20 14:29:23 +01001991 int r;
1992
Jerome Glisse81cc35b2009-10-01 18:02:12 +02001993 WREG32(SCRATCH_UMSK, 0);
1994 if (rdev->wb.wb_obj) {
Jerome Glisse4c788672009-11-20 14:29:23 +01001995 r = radeon_bo_reserve(rdev->wb.wb_obj, false);
1996 if (unlikely(r != 0))
1997 return;
1998 radeon_bo_kunmap(rdev->wb.wb_obj);
1999 radeon_bo_unpin(rdev->wb.wb_obj);
2000 radeon_bo_unreserve(rdev->wb.wb_obj);
Jerome Glisse81cc35b2009-10-01 18:02:12 +02002001 }
2002}
2003
2004void r600_wb_fini(struct radeon_device *rdev)
2005{
2006 r600_wb_disable(rdev);
2007 if (rdev->wb.wb_obj) {
Jerome Glisse4c788672009-11-20 14:29:23 +01002008 radeon_bo_unref(&rdev->wb.wb_obj);
Jerome Glisse81cc35b2009-10-01 18:02:12 +02002009 rdev->wb.wb = NULL;
2010 rdev->wb.wb_obj = NULL;
2011 }
2012}
2013
2014int r600_wb_enable(struct radeon_device *rdev)
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002015{
2016 int r;
2017
2018 if (rdev->wb.wb_obj == NULL) {
Jerome Glisse4c788672009-11-20 14:29:23 +01002019 r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, true,
2020 RADEON_GEM_DOMAIN_GTT, &rdev->wb.wb_obj);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002021 if (r) {
Jerome Glisse4c788672009-11-20 14:29:23 +01002022 dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002023 return r;
2024 }
Jerome Glisse4c788672009-11-20 14:29:23 +01002025 r = radeon_bo_reserve(rdev->wb.wb_obj, false);
2026 if (unlikely(r != 0)) {
Jerome Glisse81cc35b2009-10-01 18:02:12 +02002027 r600_wb_fini(rdev);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002028 return r;
2029 }
Jerome Glisse4c788672009-11-20 14:29:23 +01002030 r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
2031 &rdev->wb.gpu_addr);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002032 if (r) {
Jerome Glisse4c788672009-11-20 14:29:23 +01002033 radeon_bo_unreserve(rdev->wb.wb_obj);
2034 dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
2035 r600_wb_fini(rdev);
2036 return r;
2037 }
2038 r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
2039 radeon_bo_unreserve(rdev->wb.wb_obj);
2040 if (r) {
2041 dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
Jerome Glisse81cc35b2009-10-01 18:02:12 +02002042 r600_wb_fini(rdev);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002043 return r;
2044 }
2045 }
2046 WREG32(SCRATCH_ADDR, (rdev->wb.gpu_addr >> 8) & 0xFFFFFFFF);
2047 WREG32(CP_RB_RPTR_ADDR, (rdev->wb.gpu_addr + 1024) & 0xFFFFFFFC);
2048 WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + 1024) & 0xFF);
2049 WREG32(SCRATCH_UMSK, 0xff);
2050 return 0;
2051}
2052
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002053void r600_fence_ring_emit(struct radeon_device *rdev,
2054 struct radeon_fence *fence)
2055{
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002056 /* Also consider EVENT_WRITE_EOP. it handles the interrupts + timestamps + events */
Alex Deucher44224c32010-02-04 11:01:52 -05002057
2058 radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE, 0));
2059 radeon_ring_write(rdev, CACHE_FLUSH_AND_INV_EVENT);
2060 /* wait for 3D idle clean */
2061 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2062 radeon_ring_write(rdev, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
2063 radeon_ring_write(rdev, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002064 /* Emit fence sequence & fire IRQ */
2065 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2066 radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
2067 radeon_ring_write(rdev, fence->seq);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002068 /* CP_INTERRUPT packet 3 no longer exists, use packet 0 */
2069 radeon_ring_write(rdev, PACKET0(CP_INT_STATUS, 0));
2070 radeon_ring_write(rdev, RB_INT_STAT);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002071}
2072
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002073int r600_copy_blit(struct radeon_device *rdev,
2074 uint64_t src_offset, uint64_t dst_offset,
2075 unsigned num_pages, struct radeon_fence *fence)
2076{
Jerome Glisseff82f052010-01-22 15:19:00 +01002077 int r;
2078
2079 mutex_lock(&rdev->r600_blit.mutex);
2080 rdev->r600_blit.vb_ib = NULL;
2081 r = r600_blit_prepare_copy(rdev, num_pages * RADEON_GPU_PAGE_SIZE);
2082 if (r) {
2083 if (rdev->r600_blit.vb_ib)
2084 radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
2085 mutex_unlock(&rdev->r600_blit.mutex);
2086 return r;
2087 }
Matt Turnera77f1712009-10-14 00:34:41 -04002088 r600_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * RADEON_GPU_PAGE_SIZE);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002089 r600_blit_done_copy(rdev, fence);
Jerome Glisseff82f052010-01-22 15:19:00 +01002090 mutex_unlock(&rdev->r600_blit.mutex);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002091 return 0;
2092}
2093
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002094int r600_set_surface_reg(struct radeon_device *rdev, int reg,
2095 uint32_t tiling_flags, uint32_t pitch,
2096 uint32_t offset, uint32_t obj_size)
2097{
2098 /* FIXME: implement */
2099 return 0;
2100}
2101
2102void r600_clear_surface_reg(struct radeon_device *rdev, int reg)
2103{
2104 /* FIXME: implement */
2105}
2106
2107
2108bool r600_card_posted(struct radeon_device *rdev)
2109{
2110 uint32_t reg;
2111
2112 /* first check CRTCs */
2113 reg = RREG32(D1CRTC_CONTROL) |
2114 RREG32(D2CRTC_CONTROL);
2115 if (reg & CRTC_EN)
2116 return true;
2117
2118 /* then check MEM_SIZE, in case the crtcs are off */
2119 if (RREG32(CONFIG_MEMSIZE))
2120 return true;
2121
2122 return false;
2123}
2124
Dave Airliefc30b8e2009-09-18 15:19:37 +10002125int r600_startup(struct radeon_device *rdev)
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002126{
2127 int r;
2128
Alex Deucher779720a2009-12-09 19:31:44 -05002129 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
2130 r = r600_init_microcode(rdev);
2131 if (r) {
2132 DRM_ERROR("Failed to load firmware!\n");
2133 return r;
2134 }
2135 }
2136
Jerome Glissea3c19452009-10-01 18:02:13 +02002137 r600_mc_program(rdev);
Jerome Glisse1a029b72009-10-06 19:04:30 +02002138 if (rdev->flags & RADEON_IS_AGP) {
2139 r600_agp_enable(rdev);
2140 } else {
2141 r = r600_pcie_gart_enable(rdev);
2142 if (r)
2143 return r;
2144 }
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002145 r600_gpu_init(rdev);
Jerome Glissec38c7b62010-02-04 17:27:27 +01002146 r = r600_blit_init(rdev);
2147 if (r) {
2148 r600_blit_fini(rdev);
2149 rdev->asic->copy = NULL;
2150 dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
2151 }
Jerome Glisseff82f052010-01-22 15:19:00 +01002152 /* pin copy shader into vram */
2153 if (rdev->r600_blit.shader_obj) {
2154 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
2155 if (unlikely(r != 0))
2156 return r;
2157 r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
2158 &rdev->r600_blit.shader_gpu_addr);
2159 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
Alex Deucher7923c612009-12-15 17:15:07 -05002160 if (r) {
Jerome Glisseff82f052010-01-22 15:19:00 +01002161 dev_err(rdev->dev, "(%d) pin blit object failed\n", r);
Alex Deucher7923c612009-12-15 17:15:07 -05002162 return r;
2163 }
2164 }
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002165 /* Enable IRQ */
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002166 r = r600_irq_init(rdev);
2167 if (r) {
2168 DRM_ERROR("radeon: IH init failed (%d).\n", r);
2169 radeon_irq_kms_fini(rdev);
2170 return r;
2171 }
2172 r600_irq_set(rdev);
2173
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002174 r = radeon_ring_init(rdev, rdev->cp.ring_size);
2175 if (r)
2176 return r;
2177 r = r600_cp_load_microcode(rdev);
2178 if (r)
2179 return r;
2180 r = r600_cp_resume(rdev);
2181 if (r)
2182 return r;
Jerome Glisse81cc35b2009-10-01 18:02:12 +02002183 /* write back buffer are not vital so don't worry about failure */
2184 r600_wb_enable(rdev);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002185 return 0;
2186}
2187
Dave Airlie28d52042009-09-21 14:33:58 +10002188void r600_vga_set_state(struct radeon_device *rdev, bool state)
2189{
2190 uint32_t temp;
2191
2192 temp = RREG32(CONFIG_CNTL);
2193 if (state == false) {
2194 temp &= ~(1<<0);
2195 temp |= (1<<1);
2196 } else {
2197 temp &= ~(1<<1);
2198 }
2199 WREG32(CONFIG_CNTL, temp);
2200}
2201
Dave Airliefc30b8e2009-09-18 15:19:37 +10002202int r600_resume(struct radeon_device *rdev)
2203{
2204 int r;
2205
Jerome Glisse1a029b72009-10-06 19:04:30 +02002206 /* Do not reset GPU before posting, on r600 hw unlike on r500 hw,
2207 * posting will perform necessary task to bring back GPU into good
2208 * shape.
2209 */
Dave Airliefc30b8e2009-09-18 15:19:37 +10002210 /* post card */
Jerome Glissee7d40b92009-10-01 18:02:15 +02002211 atom_asic_init(rdev->mode_info.atom_context);
Dave Airliefc30b8e2009-09-18 15:19:37 +10002212 /* Initialize clocks */
2213 r = radeon_clocks_init(rdev);
2214 if (r) {
2215 return r;
2216 }
2217
2218 r = r600_startup(rdev);
2219 if (r) {
2220 DRM_ERROR("r600 startup failed on resume\n");
2221 return r;
2222 }
2223
Jerome Glisse62a8ea32009-10-01 18:02:11 +02002224 r = r600_ib_test(rdev);
Dave Airliefc30b8e2009-09-18 15:19:37 +10002225 if (r) {
2226 DRM_ERROR("radeon: failled testing IB (%d).\n", r);
2227 return r;
2228 }
Rafał Miłecki38fd2c62010-01-28 18:16:30 +01002229
2230 r = r600_audio_init(rdev);
2231 if (r) {
2232 DRM_ERROR("radeon: audio resume failed\n");
2233 return r;
2234 }
2235
Dave Airliefc30b8e2009-09-18 15:19:37 +10002236 return r;
2237}
2238
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002239int r600_suspend(struct radeon_device *rdev)
2240{
Jerome Glisse4c788672009-11-20 14:29:23 +01002241 int r;
2242
Rafał Miłecki38fd2c62010-01-28 18:16:30 +01002243 r600_audio_fini(rdev);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002244 /* FIXME: we should wait for ring to be empty */
2245 r600_cp_stop(rdev);
Dave Airliebc1a6312009-09-15 11:07:52 +10002246 rdev->cp.ready = false;
Jerome Glisse0c452492010-01-15 14:44:37 +01002247 r600_irq_suspend(rdev);
Jerome Glisse81cc35b2009-10-01 18:02:12 +02002248 r600_wb_disable(rdev);
Jerome Glisse4aac0472009-09-14 18:29:49 +02002249 r600_pcie_gart_disable(rdev);
Dave Airliebc1a6312009-09-15 11:07:52 +10002250 /* unpin shaders bo */
Jerome Glisse30d2d9a2010-01-13 10:29:27 +01002251 if (rdev->r600_blit.shader_obj) {
2252 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
2253 if (!r) {
2254 radeon_bo_unpin(rdev->r600_blit.shader_obj);
2255 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
2256 }
2257 }
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002258 return 0;
2259}
2260
2261/* Plan is to move initialization in that function and use
2262 * helper function so that radeon_device_init pretty much
2263 * do nothing more than calling asic specific function. This
2264 * should also allow to remove a bunch of callback function
2265 * like vram_info.
2266 */
2267int r600_init(struct radeon_device *rdev)
2268{
2269 int r;
2270
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002271 r = radeon_dummy_page_init(rdev);
2272 if (r)
2273 return r;
2274 if (r600_debugfs_mc_info_init(rdev)) {
2275 DRM_ERROR("Failed to register debugfs file for mc !\n");
2276 }
2277 /* This don't do much */
2278 r = radeon_gem_init(rdev);
2279 if (r)
2280 return r;
2281 /* Read BIOS */
2282 if (!radeon_get_bios(rdev)) {
2283 if (ASIC_IS_AVIVO(rdev))
2284 return -EINVAL;
2285 }
2286 /* Must be an ATOMBIOS */
Jerome Glissee7d40b92009-10-01 18:02:15 +02002287 if (!rdev->is_atom_bios) {
2288 dev_err(rdev->dev, "Expecting atombios for R600 GPU\n");
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002289 return -EINVAL;
Jerome Glissee7d40b92009-10-01 18:02:15 +02002290 }
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002291 r = radeon_atombios_init(rdev);
2292 if (r)
2293 return r;
2294 /* Post card if necessary */
Dave Airlie72542d72009-12-01 14:06:31 +10002295 if (!r600_card_posted(rdev)) {
2296 if (!rdev->bios) {
2297 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
2298 return -EINVAL;
2299 }
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002300 DRM_INFO("GPU not posted. posting now...\n");
2301 atom_asic_init(rdev->mode_info.atom_context);
2302 }
2303 /* Initialize scratch registers */
2304 r600_scratch_init(rdev);
2305 /* Initialize surface registers */
2306 radeon_surface_init(rdev);
Rafał Miłecki74338742009-11-03 00:53:02 +01002307 /* Initialize clocks */
Michel Dänzer5e6dde72009-09-17 09:42:28 +02002308 radeon_get_clock_info(rdev->ddev);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002309 r = radeon_clocks_init(rdev);
2310 if (r)
2311 return r;
Rafał Miłecki74338742009-11-03 00:53:02 +01002312 /* Initialize power management */
2313 radeon_pm_init(rdev);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002314 /* Fence driver */
2315 r = radeon_fence_driver_init(rdev);
2316 if (r)
2317 return r;
Jerome Glisse700a0cc2010-01-13 15:16:38 +01002318 if (rdev->flags & RADEON_IS_AGP) {
2319 r = radeon_agp_init(rdev);
2320 if (r)
2321 radeon_agp_disable(rdev);
2322 }
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002323 r = r600_mc_init(rdev);
Jerome Glisseb574f252009-10-06 19:04:29 +02002324 if (r)
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002325 return r;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002326 /* Memory manager */
Jerome Glisse4c788672009-11-20 14:29:23 +01002327 r = radeon_bo_init(rdev);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002328 if (r)
2329 return r;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002330
2331 r = radeon_irq_kms_init(rdev);
2332 if (r)
2333 return r;
2334
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002335 rdev->cp.ring_obj = NULL;
2336 r600_ring_init(rdev, 1024 * 1024);
2337
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002338 rdev->ih.ring_obj = NULL;
2339 r600_ih_ring_init(rdev, 64 * 1024);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002340
Jerome Glisse4aac0472009-09-14 18:29:49 +02002341 r = r600_pcie_gart_init(rdev);
2342 if (r)
2343 return r;
2344
Alex Deucher779720a2009-12-09 19:31:44 -05002345 rdev->accel_working = true;
Dave Airliefc30b8e2009-09-18 15:19:37 +10002346 r = r600_startup(rdev);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002347 if (r) {
Jerome Glisse655efd32010-02-02 11:51:45 +01002348 dev_err(rdev->dev, "disabling GPU acceleration\n");
2349 r600_cp_fini(rdev);
Jerome Glisse75c81292009-10-01 18:02:14 +02002350 r600_wb_fini(rdev);
Jerome Glisse655efd32010-02-02 11:51:45 +01002351 r600_irq_fini(rdev);
2352 radeon_irq_kms_fini(rdev);
Jerome Glisse75c81292009-10-01 18:02:14 +02002353 r600_pcie_gart_fini(rdev);
Jerome Glisse733289c2009-09-16 15:24:21 +02002354 rdev->accel_working = false;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002355 }
Jerome Glisse733289c2009-09-16 15:24:21 +02002356 if (rdev->accel_working) {
2357 r = radeon_ib_pool_init(rdev);
2358 if (r) {
Jerome Glissedb963802010-01-17 21:21:56 +01002359 dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
Jerome Glisse733289c2009-09-16 15:24:21 +02002360 rdev->accel_working = false;
Jerome Glissedb963802010-01-17 21:21:56 +01002361 } else {
2362 r = r600_ib_test(rdev);
2363 if (r) {
2364 dev_err(rdev->dev, "IB test failed (%d).\n", r);
2365 rdev->accel_working = false;
2366 }
Jerome Glisse733289c2009-09-16 15:24:21 +02002367 }
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002368 }
Christian Koenigdafc3bd2009-10-11 23:49:13 +02002369
2370 r = r600_audio_init(rdev);
2371 if (r)
2372 return r; /* TODO error handling */
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002373 return 0;
2374}
2375
2376void r600_fini(struct radeon_device *rdev)
2377{
Alex Deucher29fb52c2010-03-11 10:01:17 -05002378 radeon_pm_fini(rdev);
Christian Koenigdafc3bd2009-10-11 23:49:13 +02002379 r600_audio_fini(rdev);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002380 r600_blit_fini(rdev);
Jerome Glisse655efd32010-02-02 11:51:45 +01002381 r600_cp_fini(rdev);
2382 r600_wb_fini(rdev);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002383 r600_irq_fini(rdev);
2384 radeon_irq_kms_fini(rdev);
Jerome Glisse4aac0472009-09-14 18:29:49 +02002385 r600_pcie_gart_fini(rdev);
Jerome Glisse655efd32010-02-02 11:51:45 +01002386 radeon_agp_fini(rdev);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002387 radeon_gem_fini(rdev);
2388 radeon_fence_driver_fini(rdev);
2389 radeon_clocks_fini(rdev);
Jerome Glisse4c788672009-11-20 14:29:23 +01002390 radeon_bo_fini(rdev);
Jerome Glissee7d40b92009-10-01 18:02:15 +02002391 radeon_atombios_fini(rdev);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002392 kfree(rdev->bios);
2393 rdev->bios = NULL;
2394 radeon_dummy_page_fini(rdev);
2395}
2396
2397
2398/*
2399 * CS stuff
2400 */
2401void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
2402{
2403 /* FIXME: implement */
2404 radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
2405 radeon_ring_write(rdev, ib->gpu_addr & 0xFFFFFFFC);
2406 radeon_ring_write(rdev, upper_32_bits(ib->gpu_addr) & 0xFF);
2407 radeon_ring_write(rdev, ib->length_dw);
2408}
2409
2410int r600_ib_test(struct radeon_device *rdev)
2411{
2412 struct radeon_ib *ib;
2413 uint32_t scratch;
2414 uint32_t tmp = 0;
2415 unsigned i;
2416 int r;
2417
2418 r = radeon_scratch_get(rdev, &scratch);
2419 if (r) {
2420 DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r);
2421 return r;
2422 }
2423 WREG32(scratch, 0xCAFEDEAD);
2424 r = radeon_ib_get(rdev, &ib);
2425 if (r) {
2426 DRM_ERROR("radeon: failed to get ib (%d).\n", r);
2427 return r;
2428 }
2429 ib->ptr[0] = PACKET3(PACKET3_SET_CONFIG_REG, 1);
2430 ib->ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
2431 ib->ptr[2] = 0xDEADBEEF;
2432 ib->ptr[3] = PACKET2(0);
2433 ib->ptr[4] = PACKET2(0);
2434 ib->ptr[5] = PACKET2(0);
2435 ib->ptr[6] = PACKET2(0);
2436 ib->ptr[7] = PACKET2(0);
2437 ib->ptr[8] = PACKET2(0);
2438 ib->ptr[9] = PACKET2(0);
2439 ib->ptr[10] = PACKET2(0);
2440 ib->ptr[11] = PACKET2(0);
2441 ib->ptr[12] = PACKET2(0);
2442 ib->ptr[13] = PACKET2(0);
2443 ib->ptr[14] = PACKET2(0);
2444 ib->ptr[15] = PACKET2(0);
2445 ib->length_dw = 16;
2446 r = radeon_ib_schedule(rdev, ib);
2447 if (r) {
2448 radeon_scratch_free(rdev, scratch);
2449 radeon_ib_free(rdev, &ib);
2450 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
2451 return r;
2452 }
2453 r = radeon_fence_wait(ib->fence, false);
2454 if (r) {
2455 DRM_ERROR("radeon: fence wait failed (%d).\n", r);
2456 return r;
2457 }
2458 for (i = 0; i < rdev->usec_timeout; i++) {
2459 tmp = RREG32(scratch);
2460 if (tmp == 0xDEADBEEF)
2461 break;
2462 DRM_UDELAY(1);
2463 }
2464 if (i < rdev->usec_timeout) {
2465 DRM_INFO("ib test succeeded in %u usecs\n", i);
2466 } else {
2467 DRM_ERROR("radeon: ib test failed (sracth(0x%04X)=0x%08X)\n",
2468 scratch, tmp);
2469 r = -EINVAL;
2470 }
2471 radeon_scratch_free(rdev, scratch);
2472 radeon_ib_free(rdev, &ib);
2473 return r;
2474}
2475
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002476/*
2477 * Interrupts
2478 *
2479 * Interrupts use a ring buffer on r6xx/r7xx hardware. It works pretty
2480 * the same as the CP ring buffer, but in reverse. Rather than the CPU
2481 * writing to the ring and the GPU consuming, the GPU writes to the ring
2482 * and host consumes. As the host irq handler processes interrupts, it
2483 * increments the rptr. When the rptr catches up with the wptr, all the
2484 * current interrupts have been processed.
2485 */
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002486
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002487void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size)
2488{
2489 u32 rb_bufsz;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002490
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002491 /* Align ring size */
2492 rb_bufsz = drm_order(ring_size / 4);
2493 ring_size = (1 << rb_bufsz) * 4;
2494 rdev->ih.ring_size = ring_size;
Jerome Glisse0c452492010-01-15 14:44:37 +01002495 rdev->ih.ptr_mask = rdev->ih.ring_size - 1;
2496 rdev->ih.rptr = 0;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002497}
2498
Jerome Glisse0c452492010-01-15 14:44:37 +01002499static int r600_ih_ring_alloc(struct radeon_device *rdev)
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002500{
2501 int r;
2502
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002503 /* Allocate ring buffer */
2504 if (rdev->ih.ring_obj == NULL) {
Jerome Glisse4c788672009-11-20 14:29:23 +01002505 r = radeon_bo_create(rdev, NULL, rdev->ih.ring_size,
2506 true,
2507 RADEON_GEM_DOMAIN_GTT,
2508 &rdev->ih.ring_obj);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002509 if (r) {
2510 DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r);
2511 return r;
2512 }
Jerome Glisse4c788672009-11-20 14:29:23 +01002513 r = radeon_bo_reserve(rdev->ih.ring_obj, false);
2514 if (unlikely(r != 0))
2515 return r;
2516 r = radeon_bo_pin(rdev->ih.ring_obj,
2517 RADEON_GEM_DOMAIN_GTT,
2518 &rdev->ih.gpu_addr);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002519 if (r) {
Jerome Glisse4c788672009-11-20 14:29:23 +01002520 radeon_bo_unreserve(rdev->ih.ring_obj);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002521 DRM_ERROR("radeon: failed to pin ih ring buffer (%d).\n", r);
2522 return r;
2523 }
Jerome Glisse4c788672009-11-20 14:29:23 +01002524 r = radeon_bo_kmap(rdev->ih.ring_obj,
2525 (void **)&rdev->ih.ring);
2526 radeon_bo_unreserve(rdev->ih.ring_obj);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002527 if (r) {
2528 DRM_ERROR("radeon: failed to map ih ring buffer (%d).\n", r);
2529 return r;
2530 }
2531 }
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002532 return 0;
2533}
2534
2535static void r600_ih_ring_fini(struct radeon_device *rdev)
2536{
Jerome Glisse4c788672009-11-20 14:29:23 +01002537 int r;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002538 if (rdev->ih.ring_obj) {
Jerome Glisse4c788672009-11-20 14:29:23 +01002539 r = radeon_bo_reserve(rdev->ih.ring_obj, false);
2540 if (likely(r == 0)) {
2541 radeon_bo_kunmap(rdev->ih.ring_obj);
2542 radeon_bo_unpin(rdev->ih.ring_obj);
2543 radeon_bo_unreserve(rdev->ih.ring_obj);
2544 }
2545 radeon_bo_unref(&rdev->ih.ring_obj);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002546 rdev->ih.ring = NULL;
2547 rdev->ih.ring_obj = NULL;
2548 }
2549}
2550
Alex Deucher45f9a392010-03-24 13:55:51 -04002551void r600_rlc_stop(struct radeon_device *rdev)
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002552{
2553
Alex Deucher45f9a392010-03-24 13:55:51 -04002554 if ((rdev->family >= CHIP_RV770) &&
2555 (rdev->family <= CHIP_RV740)) {
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002556 /* r7xx asics need to soft reset RLC before halting */
2557 WREG32(SRBM_SOFT_RESET, SOFT_RESET_RLC);
2558 RREG32(SRBM_SOFT_RESET);
2559 udelay(15000);
2560 WREG32(SRBM_SOFT_RESET, 0);
2561 RREG32(SRBM_SOFT_RESET);
2562 }
2563
2564 WREG32(RLC_CNTL, 0);
2565}
2566
2567static void r600_rlc_start(struct radeon_device *rdev)
2568{
2569 WREG32(RLC_CNTL, RLC_ENABLE);
2570}
2571
2572static int r600_rlc_init(struct radeon_device *rdev)
2573{
2574 u32 i;
2575 const __be32 *fw_data;
2576
2577 if (!rdev->rlc_fw)
2578 return -EINVAL;
2579
2580 r600_rlc_stop(rdev);
2581
2582 WREG32(RLC_HB_BASE, 0);
2583 WREG32(RLC_HB_CNTL, 0);
2584 WREG32(RLC_HB_RPTR, 0);
2585 WREG32(RLC_HB_WPTR, 0);
2586 WREG32(RLC_HB_WPTR_LSB_ADDR, 0);
2587 WREG32(RLC_HB_WPTR_MSB_ADDR, 0);
2588 WREG32(RLC_MC_CNTL, 0);
2589 WREG32(RLC_UCODE_CNTL, 0);
2590
2591 fw_data = (const __be32 *)rdev->rlc_fw->data;
Alex Deucher45f9a392010-03-24 13:55:51 -04002592 if (rdev->family >= CHIP_CEDAR) {
2593 for (i = 0; i < EVERGREEN_RLC_UCODE_SIZE; i++) {
2594 WREG32(RLC_UCODE_ADDR, i);
2595 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
2596 }
2597 } else if (rdev->family >= CHIP_RV770) {
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002598 for (i = 0; i < R700_RLC_UCODE_SIZE; i++) {
2599 WREG32(RLC_UCODE_ADDR, i);
2600 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
2601 }
2602 } else {
2603 for (i = 0; i < RLC_UCODE_SIZE; i++) {
2604 WREG32(RLC_UCODE_ADDR, i);
2605 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
2606 }
2607 }
2608 WREG32(RLC_UCODE_ADDR, 0);
2609
2610 r600_rlc_start(rdev);
2611
2612 return 0;
2613}
2614
2615static void r600_enable_interrupts(struct radeon_device *rdev)
2616{
2617 u32 ih_cntl = RREG32(IH_CNTL);
2618 u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
2619
2620 ih_cntl |= ENABLE_INTR;
2621 ih_rb_cntl |= IH_RB_ENABLE;
2622 WREG32(IH_CNTL, ih_cntl);
2623 WREG32(IH_RB_CNTL, ih_rb_cntl);
2624 rdev->ih.enabled = true;
2625}
2626
Alex Deucher45f9a392010-03-24 13:55:51 -04002627void r600_disable_interrupts(struct radeon_device *rdev)
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002628{
2629 u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
2630 u32 ih_cntl = RREG32(IH_CNTL);
2631
2632 ih_rb_cntl &= ~IH_RB_ENABLE;
2633 ih_cntl &= ~ENABLE_INTR;
2634 WREG32(IH_RB_CNTL, ih_rb_cntl);
2635 WREG32(IH_CNTL, ih_cntl);
2636 /* set rptr, wptr to 0 */
2637 WREG32(IH_RB_RPTR, 0);
2638 WREG32(IH_RB_WPTR, 0);
2639 rdev->ih.enabled = false;
2640 rdev->ih.wptr = 0;
2641 rdev->ih.rptr = 0;
2642}
2643
Alex Deuchere0df1ac2009-12-04 15:12:21 -05002644static void r600_disable_interrupt_state(struct radeon_device *rdev)
2645{
2646 u32 tmp;
2647
2648 WREG32(CP_INT_CNTL, 0);
2649 WREG32(GRBM_INT_CNTL, 0);
2650 WREG32(DxMODE_INT_MASK, 0);
2651 if (ASIC_IS_DCE3(rdev)) {
2652 WREG32(DCE3_DACA_AUTODETECT_INT_CONTROL, 0);
2653 WREG32(DCE3_DACB_AUTODETECT_INT_CONTROL, 0);
2654 tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2655 WREG32(DC_HPD1_INT_CONTROL, tmp);
2656 tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2657 WREG32(DC_HPD2_INT_CONTROL, tmp);
2658 tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2659 WREG32(DC_HPD3_INT_CONTROL, tmp);
2660 tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2661 WREG32(DC_HPD4_INT_CONTROL, tmp);
2662 if (ASIC_IS_DCE32(rdev)) {
2663 tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
Alex Deucher5898b1f2010-03-24 13:57:29 -04002664 WREG32(DC_HPD5_INT_CONTROL, tmp);
Alex Deuchere0df1ac2009-12-04 15:12:21 -05002665 tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
Alex Deucher5898b1f2010-03-24 13:57:29 -04002666 WREG32(DC_HPD6_INT_CONTROL, tmp);
Alex Deuchere0df1ac2009-12-04 15:12:21 -05002667 }
2668 } else {
2669 WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
2670 WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
2671 tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
Alex Deucher5898b1f2010-03-24 13:57:29 -04002672 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
Alex Deuchere0df1ac2009-12-04 15:12:21 -05002673 tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
Alex Deucher5898b1f2010-03-24 13:57:29 -04002674 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
Alex Deuchere0df1ac2009-12-04 15:12:21 -05002675 tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
Alex Deucher5898b1f2010-03-24 13:57:29 -04002676 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
Alex Deuchere0df1ac2009-12-04 15:12:21 -05002677 }
2678}
2679
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002680int r600_irq_init(struct radeon_device *rdev)
2681{
2682 int ret = 0;
2683 int rb_bufsz;
2684 u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
2685
2686 /* allocate ring */
Jerome Glisse0c452492010-01-15 14:44:37 +01002687 ret = r600_ih_ring_alloc(rdev);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002688 if (ret)
2689 return ret;
2690
2691 /* disable irqs */
2692 r600_disable_interrupts(rdev);
2693
2694 /* init rlc */
2695 ret = r600_rlc_init(rdev);
2696 if (ret) {
2697 r600_ih_ring_fini(rdev);
2698 return ret;
2699 }
2700
2701 /* setup interrupt control */
2702 /* set dummy read address to ring address */
2703 WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8);
2704 interrupt_cntl = RREG32(INTERRUPT_CNTL);
2705 /* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
2706 * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
2707 */
2708 interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE;
2709 /* IH_REQ_NONSNOOP_EN=1 if ring is in non-cacheable memory, e.g., vram */
2710 interrupt_cntl &= ~IH_REQ_NONSNOOP_EN;
2711 WREG32(INTERRUPT_CNTL, interrupt_cntl);
2712
2713 WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
2714 rb_bufsz = drm_order(rdev->ih.ring_size / 4);
2715
2716 ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
2717 IH_WPTR_OVERFLOW_CLEAR |
2718 (rb_bufsz << 1));
2719 /* WPTR writeback, not yet */
2720 /*ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE;*/
2721 WREG32(IH_RB_WPTR_ADDR_LO, 0);
2722 WREG32(IH_RB_WPTR_ADDR_HI, 0);
2723
2724 WREG32(IH_RB_CNTL, ih_rb_cntl);
2725
2726 /* set rptr, wptr to 0 */
2727 WREG32(IH_RB_RPTR, 0);
2728 WREG32(IH_RB_WPTR, 0);
2729
2730 /* Default settings for IH_CNTL (disabled at first) */
2731 ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10);
2732 /* RPTR_REARM only works if msi's are enabled */
2733 if (rdev->msi_enabled)
2734 ih_cntl |= RPTR_REARM;
2735
2736#ifdef __BIG_ENDIAN
2737 ih_cntl |= IH_MC_SWAP(IH_MC_SWAP_32BIT);
2738#endif
2739 WREG32(IH_CNTL, ih_cntl);
2740
2741 /* force the active interrupt state to all disabled */
Alex Deucher45f9a392010-03-24 13:55:51 -04002742 if (rdev->family >= CHIP_CEDAR)
2743 evergreen_disable_interrupt_state(rdev);
2744 else
2745 r600_disable_interrupt_state(rdev);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002746
2747 /* enable irqs */
2748 r600_enable_interrupts(rdev);
2749
2750 return ret;
2751}
2752
Jerome Glisse0c452492010-01-15 14:44:37 +01002753void r600_irq_suspend(struct radeon_device *rdev)
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002754{
Alex Deucher45f9a392010-03-24 13:55:51 -04002755 r600_irq_disable(rdev);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002756 r600_rlc_stop(rdev);
Jerome Glisse0c452492010-01-15 14:44:37 +01002757}
2758
2759void r600_irq_fini(struct radeon_device *rdev)
2760{
2761 r600_irq_suspend(rdev);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002762 r600_ih_ring_fini(rdev);
2763}
2764
2765int r600_irq_set(struct radeon_device *rdev)
2766{
Alex Deuchere0df1ac2009-12-04 15:12:21 -05002767 u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
2768 u32 mode_int = 0;
2769 u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0;
Alex Deucher2031f772010-04-22 12:52:11 -04002770 u32 grbm_int_cntl = 0;
Christian Koenigf2594932010-04-10 03:13:16 +02002771 u32 hdmi1, hdmi2;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002772
Jerome Glisse003e69f2010-01-07 15:39:14 +01002773 if (!rdev->irq.installed) {
2774 WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n");
2775 return -EINVAL;
2776 }
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002777 /* don't enable anything if the ih is disabled */
Jerome Glisse79c2bbc2010-01-15 14:44:38 +01002778 if (!rdev->ih.enabled) {
2779 r600_disable_interrupts(rdev);
2780 /* force the active interrupt state to all disabled */
2781 r600_disable_interrupt_state(rdev);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002782 return 0;
Jerome Glisse79c2bbc2010-01-15 14:44:38 +01002783 }
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002784
Christian Koenigf2594932010-04-10 03:13:16 +02002785 hdmi1 = RREG32(R600_HDMI_BLOCK1 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN;
Alex Deuchere0df1ac2009-12-04 15:12:21 -05002786 if (ASIC_IS_DCE3(rdev)) {
Christian Koenigf2594932010-04-10 03:13:16 +02002787 hdmi2 = RREG32(R600_HDMI_BLOCK3 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN;
Alex Deuchere0df1ac2009-12-04 15:12:21 -05002788 hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
2789 hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
2790 hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
2791 hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
2792 if (ASIC_IS_DCE32(rdev)) {
2793 hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
2794 hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
2795 }
2796 } else {
Christian Koenigf2594932010-04-10 03:13:16 +02002797 hdmi2 = RREG32(R600_HDMI_BLOCK2 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN;
Alex Deuchere0df1ac2009-12-04 15:12:21 -05002798 hpd1 = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & ~DC_HPDx_INT_EN;
2799 hpd2 = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & ~DC_HPDx_INT_EN;
2800 hpd3 = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & ~DC_HPDx_INT_EN;
2801 }
2802
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002803 if (rdev->irq.sw_int) {
2804 DRM_DEBUG("r600_irq_set: sw int\n");
2805 cp_int_cntl |= RB_INT_ENABLE;
2806 }
2807 if (rdev->irq.crtc_vblank_int[0]) {
2808 DRM_DEBUG("r600_irq_set: vblank 0\n");
2809 mode_int |= D1MODE_VBLANK_INT_MASK;
2810 }
2811 if (rdev->irq.crtc_vblank_int[1]) {
2812 DRM_DEBUG("r600_irq_set: vblank 1\n");
2813 mode_int |= D2MODE_VBLANK_INT_MASK;
2814 }
Alex Deuchere0df1ac2009-12-04 15:12:21 -05002815 if (rdev->irq.hpd[0]) {
2816 DRM_DEBUG("r600_irq_set: hpd 1\n");
2817 hpd1 |= DC_HPDx_INT_EN;
2818 }
2819 if (rdev->irq.hpd[1]) {
2820 DRM_DEBUG("r600_irq_set: hpd 2\n");
2821 hpd2 |= DC_HPDx_INT_EN;
2822 }
2823 if (rdev->irq.hpd[2]) {
2824 DRM_DEBUG("r600_irq_set: hpd 3\n");
2825 hpd3 |= DC_HPDx_INT_EN;
2826 }
2827 if (rdev->irq.hpd[3]) {
2828 DRM_DEBUG("r600_irq_set: hpd 4\n");
2829 hpd4 |= DC_HPDx_INT_EN;
2830 }
2831 if (rdev->irq.hpd[4]) {
2832 DRM_DEBUG("r600_irq_set: hpd 5\n");
2833 hpd5 |= DC_HPDx_INT_EN;
2834 }
2835 if (rdev->irq.hpd[5]) {
2836 DRM_DEBUG("r600_irq_set: hpd 6\n");
2837 hpd6 |= DC_HPDx_INT_EN;
2838 }
Christian Koenigf2594932010-04-10 03:13:16 +02002839 if (rdev->irq.hdmi[0]) {
2840 DRM_DEBUG("r600_irq_set: hdmi 1\n");
2841 hdmi1 |= R600_HDMI_INT_EN;
2842 }
2843 if (rdev->irq.hdmi[1]) {
2844 DRM_DEBUG("r600_irq_set: hdmi 2\n");
2845 hdmi2 |= R600_HDMI_INT_EN;
2846 }
Alex Deucher2031f772010-04-22 12:52:11 -04002847 if (rdev->irq.gui_idle) {
2848 DRM_DEBUG("gui idle\n");
2849 grbm_int_cntl |= GUI_IDLE_INT_ENABLE;
2850 }
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002851
2852 WREG32(CP_INT_CNTL, cp_int_cntl);
2853 WREG32(DxMODE_INT_MASK, mode_int);
Alex Deucher2031f772010-04-22 12:52:11 -04002854 WREG32(GRBM_INT_CNTL, grbm_int_cntl);
Christian Koenigf2594932010-04-10 03:13:16 +02002855 WREG32(R600_HDMI_BLOCK1 + R600_HDMI_CNTL, hdmi1);
Alex Deuchere0df1ac2009-12-04 15:12:21 -05002856 if (ASIC_IS_DCE3(rdev)) {
Christian Koenigf2594932010-04-10 03:13:16 +02002857 WREG32(R600_HDMI_BLOCK3 + R600_HDMI_CNTL, hdmi2);
Alex Deuchere0df1ac2009-12-04 15:12:21 -05002858 WREG32(DC_HPD1_INT_CONTROL, hpd1);
2859 WREG32(DC_HPD2_INT_CONTROL, hpd2);
2860 WREG32(DC_HPD3_INT_CONTROL, hpd3);
2861 WREG32(DC_HPD4_INT_CONTROL, hpd4);
2862 if (ASIC_IS_DCE32(rdev)) {
2863 WREG32(DC_HPD5_INT_CONTROL, hpd5);
2864 WREG32(DC_HPD6_INT_CONTROL, hpd6);
2865 }
2866 } else {
Christian Koenigf2594932010-04-10 03:13:16 +02002867 WREG32(R600_HDMI_BLOCK2 + R600_HDMI_CNTL, hdmi2);
Alex Deuchere0df1ac2009-12-04 15:12:21 -05002868 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1);
2869 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
2870 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, hpd3);
2871 }
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002872
2873 return 0;
2874}
2875
Alex Deuchere0df1ac2009-12-04 15:12:21 -05002876static inline void r600_irq_ack(struct radeon_device *rdev,
2877 u32 *disp_int,
2878 u32 *disp_int_cont,
2879 u32 *disp_int_cont2)
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002880{
Alex Deuchere0df1ac2009-12-04 15:12:21 -05002881 u32 tmp;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002882
Alex Deuchere0df1ac2009-12-04 15:12:21 -05002883 if (ASIC_IS_DCE3(rdev)) {
2884 *disp_int = RREG32(DCE3_DISP_INTERRUPT_STATUS);
2885 *disp_int_cont = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE);
2886 *disp_int_cont2 = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE2);
2887 } else {
2888 *disp_int = RREG32(DISP_INTERRUPT_STATUS);
2889 *disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
2890 *disp_int_cont2 = 0;
2891 }
2892
2893 if (*disp_int & LB_D1_VBLANK_INTERRUPT)
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002894 WREG32(D1MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
Alex Deuchere0df1ac2009-12-04 15:12:21 -05002895 if (*disp_int & LB_D1_VLINE_INTERRUPT)
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002896 WREG32(D1MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
Alex Deuchere0df1ac2009-12-04 15:12:21 -05002897 if (*disp_int & LB_D2_VBLANK_INTERRUPT)
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002898 WREG32(D2MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
Alex Deuchere0df1ac2009-12-04 15:12:21 -05002899 if (*disp_int & LB_D2_VLINE_INTERRUPT)
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002900 WREG32(D2MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
Alex Deuchere0df1ac2009-12-04 15:12:21 -05002901 if (*disp_int & DC_HPD1_INTERRUPT) {
2902 if (ASIC_IS_DCE3(rdev)) {
2903 tmp = RREG32(DC_HPD1_INT_CONTROL);
2904 tmp |= DC_HPDx_INT_ACK;
2905 WREG32(DC_HPD1_INT_CONTROL, tmp);
2906 } else {
2907 tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
2908 tmp |= DC_HPDx_INT_ACK;
2909 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
2910 }
2911 }
2912 if (*disp_int & DC_HPD2_INTERRUPT) {
2913 if (ASIC_IS_DCE3(rdev)) {
2914 tmp = RREG32(DC_HPD2_INT_CONTROL);
2915 tmp |= DC_HPDx_INT_ACK;
2916 WREG32(DC_HPD2_INT_CONTROL, tmp);
2917 } else {
2918 tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
2919 tmp |= DC_HPDx_INT_ACK;
2920 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
2921 }
2922 }
2923 if (*disp_int_cont & DC_HPD3_INTERRUPT) {
2924 if (ASIC_IS_DCE3(rdev)) {
2925 tmp = RREG32(DC_HPD3_INT_CONTROL);
2926 tmp |= DC_HPDx_INT_ACK;
2927 WREG32(DC_HPD3_INT_CONTROL, tmp);
2928 } else {
2929 tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
2930 tmp |= DC_HPDx_INT_ACK;
2931 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
2932 }
2933 }
2934 if (*disp_int_cont & DC_HPD4_INTERRUPT) {
2935 tmp = RREG32(DC_HPD4_INT_CONTROL);
2936 tmp |= DC_HPDx_INT_ACK;
2937 WREG32(DC_HPD4_INT_CONTROL, tmp);
2938 }
2939 if (ASIC_IS_DCE32(rdev)) {
2940 if (*disp_int_cont2 & DC_HPD5_INTERRUPT) {
2941 tmp = RREG32(DC_HPD5_INT_CONTROL);
2942 tmp |= DC_HPDx_INT_ACK;
2943 WREG32(DC_HPD5_INT_CONTROL, tmp);
2944 }
2945 if (*disp_int_cont2 & DC_HPD6_INTERRUPT) {
2946 tmp = RREG32(DC_HPD5_INT_CONTROL);
2947 tmp |= DC_HPDx_INT_ACK;
2948 WREG32(DC_HPD6_INT_CONTROL, tmp);
2949 }
2950 }
Christian Koenigf2594932010-04-10 03:13:16 +02002951 if (RREG32(R600_HDMI_BLOCK1 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) {
2952 WREG32_P(R600_HDMI_BLOCK1 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK);
2953 }
2954 if (ASIC_IS_DCE3(rdev)) {
2955 if (RREG32(R600_HDMI_BLOCK3 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) {
2956 WREG32_P(R600_HDMI_BLOCK3 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK);
2957 }
2958 } else {
2959 if (RREG32(R600_HDMI_BLOCK2 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) {
2960 WREG32_P(R600_HDMI_BLOCK2 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK);
2961 }
2962 }
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002963}
2964
2965void r600_irq_disable(struct radeon_device *rdev)
2966{
Alex Deuchere0df1ac2009-12-04 15:12:21 -05002967 u32 disp_int, disp_int_cont, disp_int_cont2;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002968
2969 r600_disable_interrupts(rdev);
2970 /* Wait and acknowledge irq */
2971 mdelay(1);
Alex Deuchere0df1ac2009-12-04 15:12:21 -05002972 r600_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2);
2973 r600_disable_interrupt_state(rdev);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002974}
2975
2976static inline u32 r600_get_ih_wptr(struct radeon_device *rdev)
2977{
2978 u32 wptr, tmp;
2979
2980 /* XXX use writeback */
2981 wptr = RREG32(IH_RB_WPTR);
2982
2983 if (wptr & RB_OVERFLOW) {
Jerome Glisse7924e5e2010-01-15 14:44:39 +01002984 /* When a ring buffer overflow happen start parsing interrupt
2985 * from the last not overwritten vector (wptr + 16). Hopefully
2986 * this should allow us to catchup.
2987 */
2988 dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
2989 wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
2990 rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002991 tmp = RREG32(IH_RB_CNTL);
2992 tmp |= IH_WPTR_OVERFLOW_CLEAR;
2993 WREG32(IH_RB_CNTL, tmp);
2994 }
Jerome Glisse0c452492010-01-15 14:44:37 +01002995 return (wptr & rdev->ih.ptr_mask);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002996}
2997
2998/* r600 IV Ring
2999 * Each IV ring entry is 128 bits:
3000 * [7:0] - interrupt source id
3001 * [31:8] - reserved
3002 * [59:32] - interrupt source data
3003 * [127:60] - reserved
3004 *
3005 * The basic interrupt vector entries
3006 * are decoded as follows:
3007 * src_id src_data description
3008 * 1 0 D1 Vblank
3009 * 1 1 D1 Vline
3010 * 5 0 D2 Vblank
3011 * 5 1 D2 Vline
3012 * 19 0 FP Hot plug detection A
3013 * 19 1 FP Hot plug detection B
3014 * 19 2 DAC A auto-detection
3015 * 19 3 DAC B auto-detection
Christian Koenigf2594932010-04-10 03:13:16 +02003016 * 21 4 HDMI block A
3017 * 21 5 HDMI block B
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003018 * 176 - CP_INT RB
3019 * 177 - CP_INT IB1
3020 * 178 - CP_INT IB2
3021 * 181 - EOP Interrupt
3022 * 233 - GUI Idle
3023 *
3024 * Note, these are based on r600 and may need to be
3025 * adjusted or added to on newer asics
3026 */
3027
3028int r600_irq_process(struct radeon_device *rdev)
3029{
3030 u32 wptr = r600_get_ih_wptr(rdev);
3031 u32 rptr = rdev->ih.rptr;
3032 u32 src_id, src_data;
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003033 u32 ring_index, disp_int, disp_int_cont, disp_int_cont2;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003034 unsigned long flags;
Alex Deucherd4877cf2009-12-04 16:56:37 -05003035 bool queue_hotplug = false;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003036
3037 DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
Jerome Glisse79c2bbc2010-01-15 14:44:38 +01003038 if (!rdev->ih.enabled)
3039 return IRQ_NONE;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003040
3041 spin_lock_irqsave(&rdev->ih.lock, flags);
3042
3043 if (rptr == wptr) {
3044 spin_unlock_irqrestore(&rdev->ih.lock, flags);
3045 return IRQ_NONE;
3046 }
3047 if (rdev->shutdown) {
3048 spin_unlock_irqrestore(&rdev->ih.lock, flags);
3049 return IRQ_NONE;
3050 }
3051
3052restart_ih:
3053 /* display interrupts */
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003054 r600_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003055
3056 rdev->ih.wptr = wptr;
3057 while (rptr != wptr) {
3058 /* wptr/rptr are in bytes! */
3059 ring_index = rptr / 4;
3060 src_id = rdev->ih.ring[ring_index] & 0xff;
3061 src_data = rdev->ih.ring[ring_index + 1] & 0xfffffff;
3062
3063 switch (src_id) {
3064 case 1: /* D1 vblank/vline */
3065 switch (src_data) {
3066 case 0: /* D1 vblank */
3067 if (disp_int & LB_D1_VBLANK_INTERRUPT) {
3068 drm_handle_vblank(rdev->ddev, 0);
Rafał Miłecki839461d2010-03-02 22:06:51 +01003069 rdev->pm.vblank_sync = true;
Rafał Miłecki73a6d3f2010-01-08 00:22:47 +01003070 wake_up(&rdev->irq.vblank_queue);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003071 disp_int &= ~LB_D1_VBLANK_INTERRUPT;
3072 DRM_DEBUG("IH: D1 vblank\n");
3073 }
3074 break;
3075 case 1: /* D1 vline */
3076 if (disp_int & LB_D1_VLINE_INTERRUPT) {
3077 disp_int &= ~LB_D1_VLINE_INTERRUPT;
3078 DRM_DEBUG("IH: D1 vline\n");
3079 }
3080 break;
3081 default:
Alex Deucherb0425892010-01-11 19:47:38 -05003082 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003083 break;
3084 }
3085 break;
3086 case 5: /* D2 vblank/vline */
3087 switch (src_data) {
3088 case 0: /* D2 vblank */
3089 if (disp_int & LB_D2_VBLANK_INTERRUPT) {
3090 drm_handle_vblank(rdev->ddev, 1);
Rafał Miłecki839461d2010-03-02 22:06:51 +01003091 rdev->pm.vblank_sync = true;
Rafał Miłecki73a6d3f2010-01-08 00:22:47 +01003092 wake_up(&rdev->irq.vblank_queue);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003093 disp_int &= ~LB_D2_VBLANK_INTERRUPT;
3094 DRM_DEBUG("IH: D2 vblank\n");
3095 }
3096 break;
3097 case 1: /* D1 vline */
3098 if (disp_int & LB_D2_VLINE_INTERRUPT) {
3099 disp_int &= ~LB_D2_VLINE_INTERRUPT;
3100 DRM_DEBUG("IH: D2 vline\n");
3101 }
3102 break;
3103 default:
Alex Deucherb0425892010-01-11 19:47:38 -05003104 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003105 break;
3106 }
3107 break;
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003108 case 19: /* HPD/DAC hotplug */
3109 switch (src_data) {
3110 case 0:
3111 if (disp_int & DC_HPD1_INTERRUPT) {
3112 disp_int &= ~DC_HPD1_INTERRUPT;
Alex Deucherd4877cf2009-12-04 16:56:37 -05003113 queue_hotplug = true;
3114 DRM_DEBUG("IH: HPD1\n");
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003115 }
3116 break;
3117 case 1:
3118 if (disp_int & DC_HPD2_INTERRUPT) {
3119 disp_int &= ~DC_HPD2_INTERRUPT;
Alex Deucherd4877cf2009-12-04 16:56:37 -05003120 queue_hotplug = true;
3121 DRM_DEBUG("IH: HPD2\n");
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003122 }
3123 break;
3124 case 4:
3125 if (disp_int_cont & DC_HPD3_INTERRUPT) {
3126 disp_int_cont &= ~DC_HPD3_INTERRUPT;
Alex Deucherd4877cf2009-12-04 16:56:37 -05003127 queue_hotplug = true;
3128 DRM_DEBUG("IH: HPD3\n");
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003129 }
3130 break;
3131 case 5:
3132 if (disp_int_cont & DC_HPD4_INTERRUPT) {
3133 disp_int_cont &= ~DC_HPD4_INTERRUPT;
Alex Deucherd4877cf2009-12-04 16:56:37 -05003134 queue_hotplug = true;
3135 DRM_DEBUG("IH: HPD4\n");
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003136 }
3137 break;
3138 case 10:
3139 if (disp_int_cont2 & DC_HPD5_INTERRUPT) {
Alex Deucher5898b1f2010-03-24 13:57:29 -04003140 disp_int_cont2 &= ~DC_HPD5_INTERRUPT;
Alex Deucherd4877cf2009-12-04 16:56:37 -05003141 queue_hotplug = true;
3142 DRM_DEBUG("IH: HPD5\n");
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003143 }
3144 break;
3145 case 12:
3146 if (disp_int_cont2 & DC_HPD6_INTERRUPT) {
Alex Deucher5898b1f2010-03-24 13:57:29 -04003147 disp_int_cont2 &= ~DC_HPD6_INTERRUPT;
Alex Deucherd4877cf2009-12-04 16:56:37 -05003148 queue_hotplug = true;
3149 DRM_DEBUG("IH: HPD6\n");
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003150 }
3151 break;
3152 default:
Alex Deucherb0425892010-01-11 19:47:38 -05003153 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003154 break;
3155 }
3156 break;
Christian Koenigf2594932010-04-10 03:13:16 +02003157 case 21: /* HDMI */
3158 DRM_DEBUG("IH: HDMI: 0x%x\n", src_data);
3159 r600_audio_schedule_polling(rdev);
3160 break;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003161 case 176: /* CP_INT in ring buffer */
3162 case 177: /* CP_INT in IB1 */
3163 case 178: /* CP_INT in IB2 */
3164 DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
3165 radeon_fence_process(rdev);
3166 break;
3167 case 181: /* CP EOP event */
3168 DRM_DEBUG("IH: CP EOP\n");
3169 break;
Alex Deucher2031f772010-04-22 12:52:11 -04003170 case 233: /* GUI IDLE */
3171 DRM_DEBUG("IH: CP EOP\n");
3172 rdev->pm.gui_idle = true;
3173 wake_up(&rdev->irq.idle_queue);
3174 break;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003175 default:
Alex Deucherb0425892010-01-11 19:47:38 -05003176 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003177 break;
3178 }
3179
3180 /* wptr/rptr are in bytes! */
Jerome Glisse0c452492010-01-15 14:44:37 +01003181 rptr += 16;
3182 rptr &= rdev->ih.ptr_mask;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003183 }
3184 /* make sure wptr hasn't changed while processing */
3185 wptr = r600_get_ih_wptr(rdev);
3186 if (wptr != rdev->ih.wptr)
3187 goto restart_ih;
Alex Deucherd4877cf2009-12-04 16:56:37 -05003188 if (queue_hotplug)
3189 queue_work(rdev->wq, &rdev->hotplug_work);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003190 rdev->ih.rptr = rptr;
3191 WREG32(IH_RB_RPTR, rdev->ih.rptr);
3192 spin_unlock_irqrestore(&rdev->ih.lock, flags);
3193 return IRQ_HANDLED;
3194}
Jerome Glisse3ce0a232009-09-08 10:10:24 +10003195
3196/*
3197 * Debugfs info
3198 */
3199#if defined(CONFIG_DEBUG_FS)
3200
3201static int r600_debugfs_cp_ring_info(struct seq_file *m, void *data)
3202{
3203 struct drm_info_node *node = (struct drm_info_node *) m->private;
3204 struct drm_device *dev = node->minor->dev;
3205 struct radeon_device *rdev = dev->dev_private;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10003206 unsigned count, i, j;
3207
3208 radeon_ring_free_size(rdev);
Rafał Miłeckid6840762009-11-10 22:26:21 +01003209 count = (rdev->cp.ring_size / 4) - rdev->cp.ring_free_dw;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10003210 seq_printf(m, "CP_STAT 0x%08x\n", RREG32(CP_STAT));
Rafał Miłeckid6840762009-11-10 22:26:21 +01003211 seq_printf(m, "CP_RB_WPTR 0x%08x\n", RREG32(CP_RB_WPTR));
3212 seq_printf(m, "CP_RB_RPTR 0x%08x\n", RREG32(CP_RB_RPTR));
3213 seq_printf(m, "driver's copy of the CP_RB_WPTR 0x%08x\n", rdev->cp.wptr);
3214 seq_printf(m, "driver's copy of the CP_RB_RPTR 0x%08x\n", rdev->cp.rptr);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10003215 seq_printf(m, "%u free dwords in ring\n", rdev->cp.ring_free_dw);
3216 seq_printf(m, "%u dwords in ring\n", count);
Rafał Miłeckid6840762009-11-10 22:26:21 +01003217 i = rdev->cp.rptr;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10003218 for (j = 0; j <= count; j++) {
Jerome Glisse3ce0a232009-09-08 10:10:24 +10003219 seq_printf(m, "r[%04d]=0x%08x\n", i, rdev->cp.ring[i]);
Rafał Miłeckid6840762009-11-10 22:26:21 +01003220 i = (i + 1) & rdev->cp.ptr_mask;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10003221 }
3222 return 0;
3223}
3224
3225static int r600_debugfs_mc_info(struct seq_file *m, void *data)
3226{
3227 struct drm_info_node *node = (struct drm_info_node *) m->private;
3228 struct drm_device *dev = node->minor->dev;
3229 struct radeon_device *rdev = dev->dev_private;
3230
3231 DREG32_SYS(m, rdev, R_000E50_SRBM_STATUS);
3232 DREG32_SYS(m, rdev, VM_L2_STATUS);
3233 return 0;
3234}
3235
3236static struct drm_info_list r600_mc_info_list[] = {
3237 {"r600_mc_info", r600_debugfs_mc_info, 0, NULL},
3238 {"r600_ring_info", r600_debugfs_cp_ring_info, 0, NULL},
3239};
3240#endif
3241
3242int r600_debugfs_mc_info_init(struct radeon_device *rdev)
3243{
3244#if defined(CONFIG_DEBUG_FS)
3245 return radeon_debugfs_add_files(rdev, r600_mc_info_list, ARRAY_SIZE(r600_mc_info_list));
3246#else
3247 return 0;
3248#endif
Jerome Glisse771fe6b2009-06-05 14:42:42 +02003249}
Jerome Glisse062b3892010-02-04 20:36:39 +01003250
3251/**
3252 * r600_ioctl_wait_idle - flush host path cache on wait idle ioctl
3253 * rdev: radeon device structure
3254 * bo: buffer object struct which userspace is waiting for idle
3255 *
3256 * Some R6XX/R7XX doesn't seems to take into account HDP flush performed
3257 * through ring buffer, this leads to corruption in rendering, see
3258 * http://bugzilla.kernel.org/show_bug.cgi?id=15186 to avoid this we
3259 * directly perform HDP flush by writing register through MMIO.
3260 */
3261void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo)
3262{
3263 WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
3264}