blob: 75c825cb8790bedb1318b165206dae2ca98a0550 [file] [log] [blame]
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090028#include <linux/slab.h>
Jerome Glisse3ce0a232009-09-08 10:10:24 +100029#include <linux/seq_file.h>
30#include <linux/firmware.h>
31#include <linux/platform_device.h>
Jerome Glisse771fe6b2009-06-05 14:42:42 +020032#include "drmP.h"
Jerome Glisse3ce0a232009-09-08 10:10:24 +100033#include "radeon_drm.h"
Jerome Glisse771fe6b2009-06-05 14:42:42 +020034#include "radeon.h"
Daniel Vettere6990372010-03-11 21:19:17 +000035#include "radeon_asic.h"
Jerome Glisse3ce0a232009-09-08 10:10:24 +100036#include "radeon_mode.h"
Jerome Glisse3ce0a232009-09-08 10:10:24 +100037#include "r600d.h"
Jerome Glisse3ce0a232009-09-08 10:10:24 +100038#include "atom.h"
Jerome Glissed39c3b82009-09-28 18:34:43 +020039#include "avivod.h"
Jerome Glisse771fe6b2009-06-05 14:42:42 +020040
Jerome Glisse3ce0a232009-09-08 10:10:24 +100041#define PFP_UCODE_SIZE 576
42#define PM4_UCODE_SIZE 1792
Alex Deucherd8f60cf2009-12-01 13:43:46 -050043#define RLC_UCODE_SIZE 768
Jerome Glisse3ce0a232009-09-08 10:10:24 +100044#define R700_PFP_UCODE_SIZE 848
45#define R700_PM4_UCODE_SIZE 1360
Alex Deucherd8f60cf2009-12-01 13:43:46 -050046#define R700_RLC_UCODE_SIZE 1024
Alex Deucherfe251e22010-03-24 13:36:43 -040047#define EVERGREEN_PFP_UCODE_SIZE 1120
48#define EVERGREEN_PM4_UCODE_SIZE 1376
Alex Deucher45f9a392010-03-24 13:55:51 -040049#define EVERGREEN_RLC_UCODE_SIZE 768
Jerome Glisse3ce0a232009-09-08 10:10:24 +100050
51/* Firmware Names */
52MODULE_FIRMWARE("radeon/R600_pfp.bin");
53MODULE_FIRMWARE("radeon/R600_me.bin");
54MODULE_FIRMWARE("radeon/RV610_pfp.bin");
55MODULE_FIRMWARE("radeon/RV610_me.bin");
56MODULE_FIRMWARE("radeon/RV630_pfp.bin");
57MODULE_FIRMWARE("radeon/RV630_me.bin");
58MODULE_FIRMWARE("radeon/RV620_pfp.bin");
59MODULE_FIRMWARE("radeon/RV620_me.bin");
60MODULE_FIRMWARE("radeon/RV635_pfp.bin");
61MODULE_FIRMWARE("radeon/RV635_me.bin");
62MODULE_FIRMWARE("radeon/RV670_pfp.bin");
63MODULE_FIRMWARE("radeon/RV670_me.bin");
64MODULE_FIRMWARE("radeon/RS780_pfp.bin");
65MODULE_FIRMWARE("radeon/RS780_me.bin");
66MODULE_FIRMWARE("radeon/RV770_pfp.bin");
67MODULE_FIRMWARE("radeon/RV770_me.bin");
68MODULE_FIRMWARE("radeon/RV730_pfp.bin");
69MODULE_FIRMWARE("radeon/RV730_me.bin");
70MODULE_FIRMWARE("radeon/RV710_pfp.bin");
71MODULE_FIRMWARE("radeon/RV710_me.bin");
Alex Deucherd8f60cf2009-12-01 13:43:46 -050072MODULE_FIRMWARE("radeon/R600_rlc.bin");
73MODULE_FIRMWARE("radeon/R700_rlc.bin");
Alex Deucherfe251e22010-03-24 13:36:43 -040074MODULE_FIRMWARE("radeon/CEDAR_pfp.bin");
75MODULE_FIRMWARE("radeon/CEDAR_me.bin");
Alex Deucher45f9a392010-03-24 13:55:51 -040076MODULE_FIRMWARE("radeon/CEDAR_rlc.bin");
Alex Deucherfe251e22010-03-24 13:36:43 -040077MODULE_FIRMWARE("radeon/REDWOOD_pfp.bin");
78MODULE_FIRMWARE("radeon/REDWOOD_me.bin");
Alex Deucher45f9a392010-03-24 13:55:51 -040079MODULE_FIRMWARE("radeon/REDWOOD_rlc.bin");
Alex Deucherfe251e22010-03-24 13:36:43 -040080MODULE_FIRMWARE("radeon/JUNIPER_pfp.bin");
81MODULE_FIRMWARE("radeon/JUNIPER_me.bin");
Alex Deucher45f9a392010-03-24 13:55:51 -040082MODULE_FIRMWARE("radeon/JUNIPER_rlc.bin");
Dave Airliea7433742010-04-09 15:31:09 +100083MODULE_FIRMWARE("radeon/CYPRESS_pfp.bin");
Alex Deucherfe251e22010-03-24 13:36:43 -040084MODULE_FIRMWARE("radeon/CYPRESS_me.bin");
Alex Deucher45f9a392010-03-24 13:55:51 -040085MODULE_FIRMWARE("radeon/CYPRESS_rlc.bin");
Jerome Glisse3ce0a232009-09-08 10:10:24 +100086
87int r600_debugfs_mc_info_init(struct radeon_device *rdev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +020088
Jerome Glisse1a029b72009-10-06 19:04:30 +020089/* r600,rv610,rv630,rv620,rv635,rv670 */
Jerome Glisse771fe6b2009-06-05 14:42:42 +020090int r600_mc_wait_for_idle(struct radeon_device *rdev);
91void r600_gpu_init(struct radeon_device *rdev);
Jerome Glisse3ce0a232009-09-08 10:10:24 +100092void r600_fini(struct radeon_device *rdev);
Alex Deucher45f9a392010-03-24 13:55:51 -040093void r600_irq_disable(struct radeon_device *rdev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +020094
Alex Deuchera48b9b42010-04-22 14:03:55 -040095void r600_get_power_state(struct radeon_device *rdev,
96 enum radeon_pm_action action)
97{
98 int i;
99
100 rdev->pm.can_upclock = true;
101 rdev->pm.can_downclock = true;
102
103 /* power state array is low to high, default is first */
104 if ((rdev->flags & RADEON_IS_IGP) || (rdev->family == CHIP_R600)) {
105 int min_power_state_index = 0;
106
107 if (rdev->pm.num_power_states > 2)
108 min_power_state_index = 1;
109
110 switch (action) {
111 case PM_ACTION_MINIMUM:
112 rdev->pm.requested_power_state_index = min_power_state_index;
113 rdev->pm.requested_clock_mode_index = 0;
114 rdev->pm.can_downclock = false;
115 break;
116 case PM_ACTION_DOWNCLOCK:
117 if (rdev->pm.current_power_state_index == min_power_state_index) {
118 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
119 rdev->pm.can_downclock = false;
120 } else {
121 if (rdev->pm.active_crtc_count > 1) {
122 for (i = 0; i < rdev->pm.num_power_states; i++) {
123 if (rdev->pm.power_state[i].flags & RADEON_PM_SINGLE_DISPLAY_ONLY)
124 continue;
125 else if (i >= rdev->pm.current_power_state_index) {
126 rdev->pm.requested_power_state_index =
127 rdev->pm.current_power_state_index;
128 break;
129 } else {
130 rdev->pm.requested_power_state_index = i;
131 break;
132 }
133 }
134 } else
135 rdev->pm.requested_power_state_index =
136 rdev->pm.current_power_state_index - 1;
137 }
138 rdev->pm.requested_clock_mode_index = 0;
139 break;
140 case PM_ACTION_UPCLOCK:
141 if (rdev->pm.current_power_state_index == (rdev->pm.num_power_states - 1)) {
142 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
143 rdev->pm.can_upclock = false;
144 } else {
145 if (rdev->pm.active_crtc_count > 1) {
146 for (i = (rdev->pm.num_power_states - 1); i >= 0; i--) {
147 if (rdev->pm.power_state[i].flags & RADEON_PM_SINGLE_DISPLAY_ONLY)
148 continue;
149 else if (i <= rdev->pm.current_power_state_index) {
150 rdev->pm.requested_power_state_index =
151 rdev->pm.current_power_state_index;
152 break;
153 } else {
154 rdev->pm.requested_power_state_index = i;
155 break;
156 }
157 }
158 } else
159 rdev->pm.requested_power_state_index =
160 rdev->pm.current_power_state_index + 1;
161 }
162 rdev->pm.requested_clock_mode_index = 0;
163 break;
Alex Deucher58e21df2010-03-22 13:31:08 -0400164 case PM_ACTION_DEFAULT:
165 rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
166 rdev->pm.requested_clock_mode_index = 0;
167 rdev->pm.can_upclock = false;
168 break;
Alex Deuchera48b9b42010-04-22 14:03:55 -0400169 case PM_ACTION_NONE:
170 default:
171 DRM_ERROR("Requested mode for not defined action\n");
172 return;
173 }
174 } else {
175 /* XXX select a power state based on AC/DC, single/dualhead, etc. */
176 /* for now just select the first power state and switch between clock modes */
177 /* power state array is low to high, default is first (0) */
178 if (rdev->pm.active_crtc_count > 1) {
179 rdev->pm.requested_power_state_index = -1;
180 /* start at 1 as we don't want the default mode */
181 for (i = 1; i < rdev->pm.num_power_states; i++) {
182 if (rdev->pm.power_state[i].flags & RADEON_PM_SINGLE_DISPLAY_ONLY)
183 continue;
184 else if ((rdev->pm.power_state[i].type == POWER_STATE_TYPE_PERFORMANCE) ||
185 (rdev->pm.power_state[i].type == POWER_STATE_TYPE_BATTERY)) {
186 rdev->pm.requested_power_state_index = i;
187 break;
188 }
189 }
190 /* if nothing selected, grab the default state. */
191 if (rdev->pm.requested_power_state_index == -1)
192 rdev->pm.requested_power_state_index = 0;
193 } else
194 rdev->pm.requested_power_state_index = 1;
195
196 switch (action) {
197 case PM_ACTION_MINIMUM:
198 rdev->pm.requested_clock_mode_index = 0;
199 rdev->pm.can_downclock = false;
200 break;
201 case PM_ACTION_DOWNCLOCK:
202 if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) {
203 if (rdev->pm.current_clock_mode_index == 0) {
204 rdev->pm.requested_clock_mode_index = 0;
205 rdev->pm.can_downclock = false;
206 } else
207 rdev->pm.requested_clock_mode_index =
208 rdev->pm.current_clock_mode_index - 1;
209 } else {
210 rdev->pm.requested_clock_mode_index = 0;
211 rdev->pm.can_downclock = false;
212 }
213 break;
214 case PM_ACTION_UPCLOCK:
215 if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) {
216 if (rdev->pm.current_clock_mode_index ==
217 (rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1)) {
218 rdev->pm.requested_clock_mode_index = rdev->pm.current_clock_mode_index;
219 rdev->pm.can_upclock = false;
220 } else
221 rdev->pm.requested_clock_mode_index =
222 rdev->pm.current_clock_mode_index + 1;
223 } else {
224 rdev->pm.requested_clock_mode_index =
225 rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1;
226 rdev->pm.can_upclock = false;
227 }
228 break;
Alex Deucher58e21df2010-03-22 13:31:08 -0400229 case PM_ACTION_DEFAULT:
230 rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
231 rdev->pm.requested_clock_mode_index = 0;
232 rdev->pm.can_upclock = false;
233 break;
Alex Deuchera48b9b42010-04-22 14:03:55 -0400234 case PM_ACTION_NONE:
235 default:
236 DRM_ERROR("Requested mode for not defined action\n");
237 return;
238 }
239 }
240
241 DRM_INFO("Requested: e: %d m: %d p: %d\n",
242 rdev->pm.power_state[rdev->pm.requested_power_state_index].
243 clock_info[rdev->pm.requested_clock_mode_index].sclk,
244 rdev->pm.power_state[rdev->pm.requested_power_state_index].
245 clock_info[rdev->pm.requested_clock_mode_index].mclk,
246 rdev->pm.power_state[rdev->pm.requested_power_state_index].
Alex Deucher79daedc2010-04-22 14:25:19 -0400247 pcie_lanes);
Alex Deuchera48b9b42010-04-22 14:03:55 -0400248}
249
Alex Deuchera4248162010-04-24 14:50:23 -0400250void r600_set_power_state(struct radeon_device *rdev, bool static_switch)
Alex Deucherbae6b5622010-04-22 13:38:05 -0400251{
Alex Deuchera48b9b42010-04-22 14:03:55 -0400252 u32 sclk, mclk;
253
254 if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) &&
255 (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index))
Alex Deucherbae6b5622010-04-22 13:38:05 -0400256 return;
257
Alex Deuchera48b9b42010-04-22 14:03:55 -0400258 if (radeon_gui_idle(rdev)) {
Alex Deuchera48b9b42010-04-22 14:03:55 -0400259 sclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
260 clock_info[rdev->pm.requested_clock_mode_index].sclk;
261 if (sclk > rdev->clock.default_sclk)
262 sclk = rdev->clock.default_sclk;
Alex Deucherbae6b5622010-04-22 13:38:05 -0400263
Alex Deuchera48b9b42010-04-22 14:03:55 -0400264 mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
265 clock_info[rdev->pm.requested_clock_mode_index].mclk;
266 if (mclk > rdev->clock.default_mclk)
267 mclk = rdev->clock.default_mclk;
Alex Deucherbae6b5622010-04-22 13:38:05 -0400268
Alex Deuchera4248162010-04-24 14:50:23 -0400269 /* voltage, pcie lanes, etc.*/
270 radeon_pm_misc(rdev);
Alex Deuchera48b9b42010-04-22 14:03:55 -0400271
Alex Deuchera4248162010-04-24 14:50:23 -0400272 if (static_switch) {
Alex Deucher539d2412010-04-29 00:22:43 -0400273 radeon_pm_prepare(rdev);
Alex Deuchera4248162010-04-24 14:50:23 -0400274 /* set engine clock */
275 if (sclk != rdev->pm.current_sclk) {
276 radeon_set_engine_clock(rdev, sclk);
277 rdev->pm.current_sclk = sclk;
278 DRM_INFO("Setting: e: %d\n", sclk);
279 }
Alex Deuchera4248162010-04-24 14:50:23 -0400280 /* set memory clock */
281 if (rdev->asic->set_memory_clock && (mclk != rdev->pm.current_mclk)) {
282 radeon_set_memory_clock(rdev, mclk);
283 rdev->pm.current_mclk = mclk;
284 DRM_INFO("Setting: m: %d\n", mclk);
285 }
Alex Deucher539d2412010-04-29 00:22:43 -0400286 radeon_pm_finish(rdev);
Alex Deuchera4248162010-04-24 14:50:23 -0400287 } else {
Matthew Garrett956ac862010-04-26 17:01:16 -0400288 radeon_sync_with_vblank(rdev);
289
Matthew Garrettf81f2022010-04-28 12:13:06 -0400290 if (!radeon_pm_in_vbl(rdev))
291 return;
292
Alex Deucher539d2412010-04-29 00:22:43 -0400293 radeon_pm_prepare(rdev);
Alex Deuchera4248162010-04-24 14:50:23 -0400294 if (sclk != rdev->pm.current_sclk) {
Alex Deuchera4248162010-04-24 14:50:23 -0400295 radeon_pm_debug_check_in_vbl(rdev, false);
296 radeon_set_engine_clock(rdev, sclk);
297 radeon_pm_debug_check_in_vbl(rdev, true);
298 rdev->pm.current_sclk = sclk;
299 DRM_INFO("Setting: e: %d\n", sclk);
300 }
Alex Deucherbae6b5622010-04-22 13:38:05 -0400301
Alex Deuchera4248162010-04-24 14:50:23 -0400302 /* set memory clock */
303 if (rdev->asic->set_memory_clock && (mclk != rdev->pm.current_mclk)) {
Alex Deuchera4248162010-04-24 14:50:23 -0400304 radeon_pm_debug_check_in_vbl(rdev, false);
Alex Deuchera4248162010-04-24 14:50:23 -0400305 radeon_set_memory_clock(rdev, mclk);
Alex Deuchera4248162010-04-24 14:50:23 -0400306 radeon_pm_debug_check_in_vbl(rdev, true);
307 rdev->pm.current_mclk = mclk;
308 DRM_INFO("Setting: m: %d\n", mclk);
309 }
Alex Deucher539d2412010-04-29 00:22:43 -0400310 radeon_pm_finish(rdev);
Alex Deuchera4248162010-04-24 14:50:23 -0400311 }
Alex Deucherbae6b5622010-04-22 13:38:05 -0400312
Alex Deuchera48b9b42010-04-22 14:03:55 -0400313 rdev->pm.current_power_state_index = rdev->pm.requested_power_state_index;
314 rdev->pm.current_clock_mode_index = rdev->pm.requested_clock_mode_index;
315 } else
316 DRM_INFO("GUI not idle!!!\n");
Alex Deucherbae6b5622010-04-22 13:38:05 -0400317}
318
Alex Deucher49e02b72010-04-23 17:57:27 -0400319void r600_pm_misc(struct radeon_device *rdev)
320{
321
322}
323
Alex Deucherdef9ba92010-04-22 12:39:58 -0400324bool r600_gui_idle(struct radeon_device *rdev)
325{
326 if (RREG32(GRBM_STATUS) & GUI_ACTIVE)
327 return false;
328 else
329 return true;
330}
331
Alex Deuchere0df1ac2009-12-04 15:12:21 -0500332/* hpd for digital panel detect/disconnect */
333bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
334{
335 bool connected = false;
336
337 if (ASIC_IS_DCE3(rdev)) {
338 switch (hpd) {
339 case RADEON_HPD_1:
340 if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE)
341 connected = true;
342 break;
343 case RADEON_HPD_2:
344 if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE)
345 connected = true;
346 break;
347 case RADEON_HPD_3:
348 if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE)
349 connected = true;
350 break;
351 case RADEON_HPD_4:
352 if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE)
353 connected = true;
354 break;
355 /* DCE 3.2 */
356 case RADEON_HPD_5:
357 if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE)
358 connected = true;
359 break;
360 case RADEON_HPD_6:
361 if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)
362 connected = true;
363 break;
364 default:
365 break;
366 }
367 } else {
368 switch (hpd) {
369 case RADEON_HPD_1:
370 if (RREG32(DC_HOT_PLUG_DETECT1_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
371 connected = true;
372 break;
373 case RADEON_HPD_2:
374 if (RREG32(DC_HOT_PLUG_DETECT2_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
375 connected = true;
376 break;
377 case RADEON_HPD_3:
378 if (RREG32(DC_HOT_PLUG_DETECT3_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
379 connected = true;
380 break;
381 default:
382 break;
383 }
384 }
385 return connected;
386}
387
388void r600_hpd_set_polarity(struct radeon_device *rdev,
Alex Deucher429770b2009-12-04 15:26:55 -0500389 enum radeon_hpd_id hpd)
Alex Deuchere0df1ac2009-12-04 15:12:21 -0500390{
391 u32 tmp;
392 bool connected = r600_hpd_sense(rdev, hpd);
393
394 if (ASIC_IS_DCE3(rdev)) {
395 switch (hpd) {
396 case RADEON_HPD_1:
397 tmp = RREG32(DC_HPD1_INT_CONTROL);
398 if (connected)
399 tmp &= ~DC_HPDx_INT_POLARITY;
400 else
401 tmp |= DC_HPDx_INT_POLARITY;
402 WREG32(DC_HPD1_INT_CONTROL, tmp);
403 break;
404 case RADEON_HPD_2:
405 tmp = RREG32(DC_HPD2_INT_CONTROL);
406 if (connected)
407 tmp &= ~DC_HPDx_INT_POLARITY;
408 else
409 tmp |= DC_HPDx_INT_POLARITY;
410 WREG32(DC_HPD2_INT_CONTROL, tmp);
411 break;
412 case RADEON_HPD_3:
413 tmp = RREG32(DC_HPD3_INT_CONTROL);
414 if (connected)
415 tmp &= ~DC_HPDx_INT_POLARITY;
416 else
417 tmp |= DC_HPDx_INT_POLARITY;
418 WREG32(DC_HPD3_INT_CONTROL, tmp);
419 break;
420 case RADEON_HPD_4:
421 tmp = RREG32(DC_HPD4_INT_CONTROL);
422 if (connected)
423 tmp &= ~DC_HPDx_INT_POLARITY;
424 else
425 tmp |= DC_HPDx_INT_POLARITY;
426 WREG32(DC_HPD4_INT_CONTROL, tmp);
427 break;
428 case RADEON_HPD_5:
429 tmp = RREG32(DC_HPD5_INT_CONTROL);
430 if (connected)
431 tmp &= ~DC_HPDx_INT_POLARITY;
432 else
433 tmp |= DC_HPDx_INT_POLARITY;
434 WREG32(DC_HPD5_INT_CONTROL, tmp);
435 break;
436 /* DCE 3.2 */
437 case RADEON_HPD_6:
438 tmp = RREG32(DC_HPD6_INT_CONTROL);
439 if (connected)
440 tmp &= ~DC_HPDx_INT_POLARITY;
441 else
442 tmp |= DC_HPDx_INT_POLARITY;
443 WREG32(DC_HPD6_INT_CONTROL, tmp);
444 break;
445 default:
446 break;
447 }
448 } else {
449 switch (hpd) {
450 case RADEON_HPD_1:
451 tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
452 if (connected)
453 tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
454 else
455 tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
456 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
457 break;
458 case RADEON_HPD_2:
459 tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
460 if (connected)
461 tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
462 else
463 tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
464 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
465 break;
466 case RADEON_HPD_3:
467 tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
468 if (connected)
469 tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
470 else
471 tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
472 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
473 break;
474 default:
475 break;
476 }
477 }
478}
479
480void r600_hpd_init(struct radeon_device *rdev)
481{
482 struct drm_device *dev = rdev->ddev;
483 struct drm_connector *connector;
484
485 if (ASIC_IS_DCE3(rdev)) {
486 u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) | DC_HPDx_RX_INT_TIMER(0xfa);
487 if (ASIC_IS_DCE32(rdev))
488 tmp |= DC_HPDx_EN;
489
490 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
491 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
492 switch (radeon_connector->hpd.hpd) {
493 case RADEON_HPD_1:
494 WREG32(DC_HPD1_CONTROL, tmp);
495 rdev->irq.hpd[0] = true;
496 break;
497 case RADEON_HPD_2:
498 WREG32(DC_HPD2_CONTROL, tmp);
499 rdev->irq.hpd[1] = true;
500 break;
501 case RADEON_HPD_3:
502 WREG32(DC_HPD3_CONTROL, tmp);
503 rdev->irq.hpd[2] = true;
504 break;
505 case RADEON_HPD_4:
506 WREG32(DC_HPD4_CONTROL, tmp);
507 rdev->irq.hpd[3] = true;
508 break;
509 /* DCE 3.2 */
510 case RADEON_HPD_5:
511 WREG32(DC_HPD5_CONTROL, tmp);
512 rdev->irq.hpd[4] = true;
513 break;
514 case RADEON_HPD_6:
515 WREG32(DC_HPD6_CONTROL, tmp);
516 rdev->irq.hpd[5] = true;
517 break;
518 default:
519 break;
520 }
521 }
522 } else {
523 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
524 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
525 switch (radeon_connector->hpd.hpd) {
526 case RADEON_HPD_1:
527 WREG32(DC_HOT_PLUG_DETECT1_CONTROL, DC_HOT_PLUG_DETECTx_EN);
528 rdev->irq.hpd[0] = true;
529 break;
530 case RADEON_HPD_2:
531 WREG32(DC_HOT_PLUG_DETECT2_CONTROL, DC_HOT_PLUG_DETECTx_EN);
532 rdev->irq.hpd[1] = true;
533 break;
534 case RADEON_HPD_3:
535 WREG32(DC_HOT_PLUG_DETECT3_CONTROL, DC_HOT_PLUG_DETECTx_EN);
536 rdev->irq.hpd[2] = true;
537 break;
538 default:
539 break;
540 }
541 }
542 }
Jerome Glisse003e69f2010-01-07 15:39:14 +0100543 if (rdev->irq.installed)
544 r600_irq_set(rdev);
Alex Deuchere0df1ac2009-12-04 15:12:21 -0500545}
546
547void r600_hpd_fini(struct radeon_device *rdev)
548{
549 struct drm_device *dev = rdev->ddev;
550 struct drm_connector *connector;
551
552 if (ASIC_IS_DCE3(rdev)) {
553 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
554 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
555 switch (radeon_connector->hpd.hpd) {
556 case RADEON_HPD_1:
557 WREG32(DC_HPD1_CONTROL, 0);
558 rdev->irq.hpd[0] = false;
559 break;
560 case RADEON_HPD_2:
561 WREG32(DC_HPD2_CONTROL, 0);
562 rdev->irq.hpd[1] = false;
563 break;
564 case RADEON_HPD_3:
565 WREG32(DC_HPD3_CONTROL, 0);
566 rdev->irq.hpd[2] = false;
567 break;
568 case RADEON_HPD_4:
569 WREG32(DC_HPD4_CONTROL, 0);
570 rdev->irq.hpd[3] = false;
571 break;
572 /* DCE 3.2 */
573 case RADEON_HPD_5:
574 WREG32(DC_HPD5_CONTROL, 0);
575 rdev->irq.hpd[4] = false;
576 break;
577 case RADEON_HPD_6:
578 WREG32(DC_HPD6_CONTROL, 0);
579 rdev->irq.hpd[5] = false;
580 break;
581 default:
582 break;
583 }
584 }
585 } else {
586 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
587 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
588 switch (radeon_connector->hpd.hpd) {
589 case RADEON_HPD_1:
590 WREG32(DC_HOT_PLUG_DETECT1_CONTROL, 0);
591 rdev->irq.hpd[0] = false;
592 break;
593 case RADEON_HPD_2:
594 WREG32(DC_HOT_PLUG_DETECT2_CONTROL, 0);
595 rdev->irq.hpd[1] = false;
596 break;
597 case RADEON_HPD_3:
598 WREG32(DC_HOT_PLUG_DETECT3_CONTROL, 0);
599 rdev->irq.hpd[2] = false;
600 break;
601 default:
602 break;
603 }
604 }
605 }
606}
607
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200608/*
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000609 * R600 PCIE GART
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200610 */
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000611void r600_pcie_gart_tlb_flush(struct radeon_device *rdev)
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200612{
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000613 unsigned i;
614 u32 tmp;
615
Dave Airlie2e98f102010-02-15 15:54:45 +1000616 /* flush hdp cache so updates hit vram */
617 WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
618
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000619 WREG32(VM_CONTEXT0_INVALIDATION_LOW_ADDR, rdev->mc.gtt_start >> 12);
620 WREG32(VM_CONTEXT0_INVALIDATION_HIGH_ADDR, (rdev->mc.gtt_end - 1) >> 12);
621 WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1));
622 for (i = 0; i < rdev->usec_timeout; i++) {
623 /* read MC_STATUS */
624 tmp = RREG32(VM_CONTEXT0_REQUEST_RESPONSE);
625 tmp = (tmp & RESPONSE_TYPE_MASK) >> RESPONSE_TYPE_SHIFT;
626 if (tmp == 2) {
627 printk(KERN_WARNING "[drm] r600 flush TLB failed\n");
628 return;
629 }
630 if (tmp) {
631 return;
632 }
633 udelay(1);
634 }
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200635}
636
Jerome Glisse4aac0472009-09-14 18:29:49 +0200637int r600_pcie_gart_init(struct radeon_device *rdev)
638{
639 int r;
640
641 if (rdev->gart.table.vram.robj) {
642 WARN(1, "R600 PCIE GART already initialized.\n");
643 return 0;
644 }
645 /* Initialize common gart structure */
646 r = radeon_gart_init(rdev);
647 if (r)
648 return r;
649 rdev->gart.table_size = rdev->gart.num_gpu_pages * 8;
650 return radeon_gart_table_vram_alloc(rdev);
651}
652
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000653int r600_pcie_gart_enable(struct radeon_device *rdev)
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200654{
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000655 u32 tmp;
656 int r, i;
657
Jerome Glisse4aac0472009-09-14 18:29:49 +0200658 if (rdev->gart.table.vram.robj == NULL) {
659 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
660 return -EINVAL;
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000661 }
Jerome Glisse4aac0472009-09-14 18:29:49 +0200662 r = radeon_gart_table_vram_pin(rdev);
663 if (r)
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000664 return r;
Dave Airlie82568562010-02-05 16:00:07 +1000665 radeon_gart_restore(rdev);
Dave Airliebc1a6312009-09-15 11:07:52 +1000666
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000667 /* Setup L2 cache */
668 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
669 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
670 EFFECTIVE_L2_QUEUE_SIZE(7));
671 WREG32(VM_L2_CNTL2, 0);
672 WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
673 /* Setup TLB control */
674 tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
675 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
676 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
677 ENABLE_WAIT_L2_QUERY;
678 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
679 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
680 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
681 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
682 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
683 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
684 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
685 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
686 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
687 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
688 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
689 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
690 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
691 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
692 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
Jerome Glisse1a029b72009-10-06 19:04:30 +0200693 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000694 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
695 WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
696 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
697 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
698 (u32)(rdev->dummy_page.addr >> 12));
699 for (i = 1; i < 7; i++)
700 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
701
702 r600_pcie_gart_tlb_flush(rdev);
703 rdev->gart.ready = true;
704 return 0;
705}
706
707void r600_pcie_gart_disable(struct radeon_device *rdev)
708{
709 u32 tmp;
Jerome Glisse4c788672009-11-20 14:29:23 +0100710 int i, r;
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000711
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000712 /* Disable all tables */
713 for (i = 0; i < 7; i++)
714 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
715
716 /* Disable L2 cache */
717 WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING |
718 EFFECTIVE_L2_QUEUE_SIZE(7));
719 WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
720 /* Setup L1 TLB control */
721 tmp = EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
722 ENABLE_WAIT_L2_QUERY;
723 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
724 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
725 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
726 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
727 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
728 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
729 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
730 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
731 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp);
732 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp);
733 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
734 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
735 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp);
736 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
Jerome Glisse4aac0472009-09-14 18:29:49 +0200737 if (rdev->gart.table.vram.robj) {
Jerome Glisse4c788672009-11-20 14:29:23 +0100738 r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
739 if (likely(r == 0)) {
740 radeon_bo_kunmap(rdev->gart.table.vram.robj);
741 radeon_bo_unpin(rdev->gart.table.vram.robj);
742 radeon_bo_unreserve(rdev->gart.table.vram.robj);
743 }
Jerome Glisse4aac0472009-09-14 18:29:49 +0200744 }
745}
746
747void r600_pcie_gart_fini(struct radeon_device *rdev)
748{
Jerome Glissef9274562010-03-17 14:44:29 +0000749 radeon_gart_fini(rdev);
Jerome Glisse4aac0472009-09-14 18:29:49 +0200750 r600_pcie_gart_disable(rdev);
751 radeon_gart_table_vram_free(rdev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200752}
753
Jerome Glisse1a029b72009-10-06 19:04:30 +0200754void r600_agp_enable(struct radeon_device *rdev)
755{
756 u32 tmp;
757 int i;
758
759 /* Setup L2 cache */
760 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
761 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
762 EFFECTIVE_L2_QUEUE_SIZE(7));
763 WREG32(VM_L2_CNTL2, 0);
764 WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
765 /* Setup TLB control */
766 tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
767 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
768 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
769 ENABLE_WAIT_L2_QUERY;
770 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
771 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
772 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
773 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
774 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
775 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
776 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
777 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
778 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
779 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
780 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
781 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
782 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
783 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
784 for (i = 0; i < 7; i++)
785 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
786}
787
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200788int r600_mc_wait_for_idle(struct radeon_device *rdev)
789{
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000790 unsigned i;
791 u32 tmp;
792
793 for (i = 0; i < rdev->usec_timeout; i++) {
794 /* read MC_STATUS */
795 tmp = RREG32(R_000E50_SRBM_STATUS) & 0x3F00;
796 if (!tmp)
797 return 0;
798 udelay(1);
799 }
800 return -1;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200801}
802
Jerome Glissea3c19452009-10-01 18:02:13 +0200803static void r600_mc_program(struct radeon_device *rdev)
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200804{
Jerome Glissea3c19452009-10-01 18:02:13 +0200805 struct rv515_mc_save save;
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000806 u32 tmp;
807 int i, j;
808
809 /* Initialize HDP */
810 for (i = 0, j = 0; i < 32; i++, j += 0x18) {
811 WREG32((0x2c14 + j), 0x00000000);
812 WREG32((0x2c18 + j), 0x00000000);
813 WREG32((0x2c1c + j), 0x00000000);
814 WREG32((0x2c20 + j), 0x00000000);
815 WREG32((0x2c24 + j), 0x00000000);
816 }
817 WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
818
Jerome Glissea3c19452009-10-01 18:02:13 +0200819 rv515_mc_stop(rdev, &save);
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000820 if (r600_mc_wait_for_idle(rdev)) {
Jerome Glissea3c19452009-10-01 18:02:13 +0200821 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000822 }
Jerome Glissea3c19452009-10-01 18:02:13 +0200823 /* Lockout access through VGA aperture (doesn't exist before R600) */
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000824 WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000825 /* Update configuration */
Jerome Glisse1a029b72009-10-06 19:04:30 +0200826 if (rdev->flags & RADEON_IS_AGP) {
827 if (rdev->mc.vram_start < rdev->mc.gtt_start) {
828 /* VRAM before AGP */
829 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
830 rdev->mc.vram_start >> 12);
831 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
832 rdev->mc.gtt_end >> 12);
833 } else {
834 /* VRAM after AGP */
835 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
836 rdev->mc.gtt_start >> 12);
837 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
838 rdev->mc.vram_end >> 12);
839 }
840 } else {
841 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start >> 12);
842 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end >> 12);
843 }
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000844 WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0);
Jerome Glisse1a029b72009-10-06 19:04:30 +0200845 tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000846 tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
847 WREG32(MC_VM_FB_LOCATION, tmp);
848 WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
849 WREG32(HDP_NONSURFACE_INFO, (2 << 7));
Jerome Glisse1a029b72009-10-06 19:04:30 +0200850 WREG32(HDP_NONSURFACE_SIZE, rdev->mc.mc_vram_size | 0x3FF);
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000851 if (rdev->flags & RADEON_IS_AGP) {
Jerome Glisse1a029b72009-10-06 19:04:30 +0200852 WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 22);
853 WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 22);
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000854 WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22);
855 } else {
856 WREG32(MC_VM_AGP_BASE, 0);
857 WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
858 WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
859 }
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000860 if (r600_mc_wait_for_idle(rdev)) {
Jerome Glissea3c19452009-10-01 18:02:13 +0200861 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000862 }
Jerome Glissea3c19452009-10-01 18:02:13 +0200863 rv515_mc_resume(rdev, &save);
Dave Airlie698443d2009-09-18 14:16:38 +1000864 /* we need to own VRAM, so turn off the VGA renderer here
865 * to stop it overwriting our objects */
Jerome Glissed39c3b82009-09-28 18:34:43 +0200866 rv515_vga_render_disable(rdev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200867}
868
Jerome Glissed594e462010-02-17 21:54:29 +0000869/**
870 * r600_vram_gtt_location - try to find VRAM & GTT location
871 * @rdev: radeon device structure holding all necessary informations
872 * @mc: memory controller structure holding memory informations
873 *
874 * Function will place try to place VRAM at same place as in CPU (PCI)
875 * address space as some GPU seems to have issue when we reprogram at
876 * different address space.
877 *
878 * If there is not enough space to fit the unvisible VRAM after the
879 * aperture then we limit the VRAM size to the aperture.
880 *
881 * If we are using AGP then place VRAM adjacent to AGP aperture are we need
882 * them to be in one from GPU point of view so that we can program GPU to
883 * catch access outside them (weird GPU policy see ??).
884 *
885 * This function will never fails, worst case are limiting VRAM or GTT.
886 *
887 * Note: GTT start, end, size should be initialized before calling this
888 * function on AGP platform.
889 */
890void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
891{
892 u64 size_bf, size_af;
893
894 if (mc->mc_vram_size > 0xE0000000) {
895 /* leave room for at least 512M GTT */
896 dev_warn(rdev->dev, "limiting VRAM\n");
897 mc->real_vram_size = 0xE0000000;
898 mc->mc_vram_size = 0xE0000000;
899 }
900 if (rdev->flags & RADEON_IS_AGP) {
901 size_bf = mc->gtt_start;
902 size_af = 0xFFFFFFFF - mc->gtt_end + 1;
903 if (size_bf > size_af) {
904 if (mc->mc_vram_size > size_bf) {
905 dev_warn(rdev->dev, "limiting VRAM\n");
906 mc->real_vram_size = size_bf;
907 mc->mc_vram_size = size_bf;
908 }
909 mc->vram_start = mc->gtt_start - mc->mc_vram_size;
910 } else {
911 if (mc->mc_vram_size > size_af) {
912 dev_warn(rdev->dev, "limiting VRAM\n");
913 mc->real_vram_size = size_af;
914 mc->mc_vram_size = size_af;
915 }
916 mc->vram_start = mc->gtt_end;
917 }
918 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
919 dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n",
920 mc->mc_vram_size >> 20, mc->vram_start,
921 mc->vram_end, mc->real_vram_size >> 20);
922 } else {
923 u64 base = 0;
924 if (rdev->flags & RADEON_IS_IGP)
925 base = (RREG32(MC_VM_FB_LOCATION) & 0xFFFF) << 24;
926 radeon_vram_location(rdev, &rdev->mc, base);
927 radeon_gtt_location(rdev, mc);
928 }
929}
930
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000931int r600_mc_init(struct radeon_device *rdev)
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200932{
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000933 u32 tmp;
Alex Deucher5885b7a2009-10-19 17:23:33 -0400934 int chansize, numchan;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200935
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000936 /* Get VRAM informations */
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200937 rdev->mc.vram_is_ddr = true;
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000938 tmp = RREG32(RAMCFG);
939 if (tmp & CHANSIZE_OVERRIDE) {
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200940 chansize = 16;
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000941 } else if (tmp & CHANSIZE_MASK) {
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200942 chansize = 64;
943 } else {
944 chansize = 32;
945 }
Alex Deucher5885b7a2009-10-19 17:23:33 -0400946 tmp = RREG32(CHMAP);
947 switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
948 case 0:
949 default:
950 numchan = 1;
951 break;
952 case 1:
953 numchan = 2;
954 break;
955 case 2:
956 numchan = 4;
957 break;
958 case 3:
959 numchan = 8;
960 break;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200961 }
Alex Deucher5885b7a2009-10-19 17:23:33 -0400962 rdev->mc.vram_width = numchan * chansize;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200963 /* Could aper size report 0 ? */
964 rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
965 rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000966 /* Setup GPU memory space */
967 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
968 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
Jerome Glisse51e5fcd2010-02-19 14:33:54 +0000969 rdev->mc.visible_vram_size = rdev->mc.aper_size;
Jerome Glissed594e462010-02-17 21:54:29 +0000970 r600_vram_gtt_location(rdev, &rdev->mc);
Alex Deucherf47299c2010-03-16 20:54:38 -0400971
Alex Deucher06b64762010-01-05 11:27:29 -0500972 if (rdev->flags & RADEON_IS_IGP)
973 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
Alex Deucherf47299c2010-03-16 20:54:38 -0400974 radeon_update_bandwidth_info(rdev);
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000975 return 0;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200976}
977
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000978/* We doesn't check that the GPU really needs a reset we simply do the
979 * reset, it's up to the caller to determine if the GPU needs one. We
980 * might add an helper function to check that.
981 */
982int r600_gpu_soft_reset(struct radeon_device *rdev)
983{
Jerome Glissea3c19452009-10-01 18:02:13 +0200984 struct rv515_mc_save save;
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000985 u32 grbm_busy_mask = S_008010_VC_BUSY(1) | S_008010_VGT_BUSY_NO_DMA(1) |
986 S_008010_VGT_BUSY(1) | S_008010_TA03_BUSY(1) |
987 S_008010_TC_BUSY(1) | S_008010_SX_BUSY(1) |
988 S_008010_SH_BUSY(1) | S_008010_SPI03_BUSY(1) |
989 S_008010_SMX_BUSY(1) | S_008010_SC_BUSY(1) |
990 S_008010_PA_BUSY(1) | S_008010_DB03_BUSY(1) |
991 S_008010_CR_BUSY(1) | S_008010_CB03_BUSY(1) |
992 S_008010_GUI_ACTIVE(1);
993 u32 grbm2_busy_mask = S_008014_SPI0_BUSY(1) | S_008014_SPI1_BUSY(1) |
994 S_008014_SPI2_BUSY(1) | S_008014_SPI3_BUSY(1) |
995 S_008014_TA0_BUSY(1) | S_008014_TA1_BUSY(1) |
996 S_008014_TA2_BUSY(1) | S_008014_TA3_BUSY(1) |
997 S_008014_DB0_BUSY(1) | S_008014_DB1_BUSY(1) |
998 S_008014_DB2_BUSY(1) | S_008014_DB3_BUSY(1) |
999 S_008014_CB0_BUSY(1) | S_008014_CB1_BUSY(1) |
1000 S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1);
Jerome Glissea3c19452009-10-01 18:02:13 +02001001 u32 tmp;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001002
Jerome Glisse1a029b72009-10-06 19:04:30 +02001003 dev_info(rdev->dev, "GPU softreset \n");
1004 dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n",
1005 RREG32(R_008010_GRBM_STATUS));
1006 dev_info(rdev->dev, " R_008014_GRBM_STATUS2=0x%08X\n",
Jerome Glissea3c19452009-10-01 18:02:13 +02001007 RREG32(R_008014_GRBM_STATUS2));
Jerome Glisse1a029b72009-10-06 19:04:30 +02001008 dev_info(rdev->dev, " R_000E50_SRBM_STATUS=0x%08X\n",
1009 RREG32(R_000E50_SRBM_STATUS));
Jerome Glissea3c19452009-10-01 18:02:13 +02001010 rv515_mc_stop(rdev, &save);
1011 if (r600_mc_wait_for_idle(rdev)) {
1012 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1013 }
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001014 /* Disable CP parsing/prefetching */
Jerome Glisse90aca4d2010-03-09 14:45:12 +00001015 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001016 /* Check if any of the rendering block is busy and reset it */
1017 if ((RREG32(R_008010_GRBM_STATUS) & grbm_busy_mask) ||
1018 (RREG32(R_008014_GRBM_STATUS2) & grbm2_busy_mask)) {
Jerome Glissea3c19452009-10-01 18:02:13 +02001019 tmp = S_008020_SOFT_RESET_CR(1) |
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001020 S_008020_SOFT_RESET_DB(1) |
1021 S_008020_SOFT_RESET_CB(1) |
1022 S_008020_SOFT_RESET_PA(1) |
1023 S_008020_SOFT_RESET_SC(1) |
1024 S_008020_SOFT_RESET_SMX(1) |
1025 S_008020_SOFT_RESET_SPI(1) |
1026 S_008020_SOFT_RESET_SX(1) |
1027 S_008020_SOFT_RESET_SH(1) |
1028 S_008020_SOFT_RESET_TC(1) |
1029 S_008020_SOFT_RESET_TA(1) |
1030 S_008020_SOFT_RESET_VC(1) |
Jerome Glissea3c19452009-10-01 18:02:13 +02001031 S_008020_SOFT_RESET_VGT(1);
Jerome Glisse1a029b72009-10-06 19:04:30 +02001032 dev_info(rdev->dev, " R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
Jerome Glissea3c19452009-10-01 18:02:13 +02001033 WREG32(R_008020_GRBM_SOFT_RESET, tmp);
Jerome Glisse90aca4d2010-03-09 14:45:12 +00001034 RREG32(R_008020_GRBM_SOFT_RESET);
1035 mdelay(15);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001036 WREG32(R_008020_GRBM_SOFT_RESET, 0);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001037 }
1038 /* Reset CP (we always reset CP) */
Jerome Glissea3c19452009-10-01 18:02:13 +02001039 tmp = S_008020_SOFT_RESET_CP(1);
1040 dev_info(rdev->dev, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
1041 WREG32(R_008020_GRBM_SOFT_RESET, tmp);
Jerome Glisse90aca4d2010-03-09 14:45:12 +00001042 RREG32(R_008020_GRBM_SOFT_RESET);
1043 mdelay(15);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001044 WREG32(R_008020_GRBM_SOFT_RESET, 0);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001045 /* Wait a little for things to settle down */
Jerome Glisse225758d2010-03-09 14:45:10 +00001046 mdelay(1);
Jerome Glisse1a029b72009-10-06 19:04:30 +02001047 dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n",
1048 RREG32(R_008010_GRBM_STATUS));
1049 dev_info(rdev->dev, " R_008014_GRBM_STATUS2=0x%08X\n",
1050 RREG32(R_008014_GRBM_STATUS2));
1051 dev_info(rdev->dev, " R_000E50_SRBM_STATUS=0x%08X\n",
1052 RREG32(R_000E50_SRBM_STATUS));
Jerome Glissea3c19452009-10-01 18:02:13 +02001053 rv515_mc_resume(rdev, &save);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001054 return 0;
1055}
1056
Jerome Glisse225758d2010-03-09 14:45:10 +00001057bool r600_gpu_is_lockup(struct radeon_device *rdev)
1058{
1059 u32 srbm_status;
1060 u32 grbm_status;
1061 u32 grbm_status2;
1062 int r;
1063
1064 srbm_status = RREG32(R_000E50_SRBM_STATUS);
1065 grbm_status = RREG32(R_008010_GRBM_STATUS);
1066 grbm_status2 = RREG32(R_008014_GRBM_STATUS2);
1067 if (!G_008010_GUI_ACTIVE(grbm_status)) {
1068 r100_gpu_lockup_update(&rdev->config.r300.lockup, &rdev->cp);
1069 return false;
1070 }
1071 /* force CP activities */
1072 r = radeon_ring_lock(rdev, 2);
1073 if (!r) {
1074 /* PACKET2 NOP */
1075 radeon_ring_write(rdev, 0x80000000);
1076 radeon_ring_write(rdev, 0x80000000);
1077 radeon_ring_unlock_commit(rdev);
1078 }
1079 rdev->cp.rptr = RREG32(R600_CP_RB_RPTR);
1080 return r100_gpu_cp_is_lockup(rdev, &rdev->config.r300.lockup, &rdev->cp);
1081}
1082
Jerome Glissea2d07b72010-03-09 14:45:11 +00001083int r600_asic_reset(struct radeon_device *rdev)
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001084{
1085 return r600_gpu_soft_reset(rdev);
1086}
1087
1088static u32 r600_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
1089 u32 num_backends,
1090 u32 backend_disable_mask)
1091{
1092 u32 backend_map = 0;
1093 u32 enabled_backends_mask;
1094 u32 enabled_backends_count;
1095 u32 cur_pipe;
1096 u32 swizzle_pipe[R6XX_MAX_PIPES];
1097 u32 cur_backend;
1098 u32 i;
1099
1100 if (num_tile_pipes > R6XX_MAX_PIPES)
1101 num_tile_pipes = R6XX_MAX_PIPES;
1102 if (num_tile_pipes < 1)
1103 num_tile_pipes = 1;
1104 if (num_backends > R6XX_MAX_BACKENDS)
1105 num_backends = R6XX_MAX_BACKENDS;
1106 if (num_backends < 1)
1107 num_backends = 1;
1108
1109 enabled_backends_mask = 0;
1110 enabled_backends_count = 0;
1111 for (i = 0; i < R6XX_MAX_BACKENDS; ++i) {
1112 if (((backend_disable_mask >> i) & 1) == 0) {
1113 enabled_backends_mask |= (1 << i);
1114 ++enabled_backends_count;
1115 }
1116 if (enabled_backends_count == num_backends)
1117 break;
1118 }
1119
1120 if (enabled_backends_count == 0) {
1121 enabled_backends_mask = 1;
1122 enabled_backends_count = 1;
1123 }
1124
1125 if (enabled_backends_count != num_backends)
1126 num_backends = enabled_backends_count;
1127
1128 memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * R6XX_MAX_PIPES);
1129 switch (num_tile_pipes) {
1130 case 1:
1131 swizzle_pipe[0] = 0;
1132 break;
1133 case 2:
1134 swizzle_pipe[0] = 0;
1135 swizzle_pipe[1] = 1;
1136 break;
1137 case 3:
1138 swizzle_pipe[0] = 0;
1139 swizzle_pipe[1] = 1;
1140 swizzle_pipe[2] = 2;
1141 break;
1142 case 4:
1143 swizzle_pipe[0] = 0;
1144 swizzle_pipe[1] = 1;
1145 swizzle_pipe[2] = 2;
1146 swizzle_pipe[3] = 3;
1147 break;
1148 case 5:
1149 swizzle_pipe[0] = 0;
1150 swizzle_pipe[1] = 1;
1151 swizzle_pipe[2] = 2;
1152 swizzle_pipe[3] = 3;
1153 swizzle_pipe[4] = 4;
1154 break;
1155 case 6:
1156 swizzle_pipe[0] = 0;
1157 swizzle_pipe[1] = 2;
1158 swizzle_pipe[2] = 4;
1159 swizzle_pipe[3] = 5;
1160 swizzle_pipe[4] = 1;
1161 swizzle_pipe[5] = 3;
1162 break;
1163 case 7:
1164 swizzle_pipe[0] = 0;
1165 swizzle_pipe[1] = 2;
1166 swizzle_pipe[2] = 4;
1167 swizzle_pipe[3] = 6;
1168 swizzle_pipe[4] = 1;
1169 swizzle_pipe[5] = 3;
1170 swizzle_pipe[6] = 5;
1171 break;
1172 case 8:
1173 swizzle_pipe[0] = 0;
1174 swizzle_pipe[1] = 2;
1175 swizzle_pipe[2] = 4;
1176 swizzle_pipe[3] = 6;
1177 swizzle_pipe[4] = 1;
1178 swizzle_pipe[5] = 3;
1179 swizzle_pipe[6] = 5;
1180 swizzle_pipe[7] = 7;
1181 break;
1182 }
1183
1184 cur_backend = 0;
1185 for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
1186 while (((1 << cur_backend) & enabled_backends_mask) == 0)
1187 cur_backend = (cur_backend + 1) % R6XX_MAX_BACKENDS;
1188
1189 backend_map |= (u32)(((cur_backend & 3) << (swizzle_pipe[cur_pipe] * 2)));
1190
1191 cur_backend = (cur_backend + 1) % R6XX_MAX_BACKENDS;
1192 }
1193
1194 return backend_map;
1195}
1196
1197int r600_count_pipe_bits(uint32_t val)
1198{
1199 int i, ret = 0;
1200
1201 for (i = 0; i < 32; i++) {
1202 ret += val & 1;
1203 val >>= 1;
1204 }
1205 return ret;
1206}
1207
1208void r600_gpu_init(struct radeon_device *rdev)
1209{
1210 u32 tiling_config;
1211 u32 ramcfg;
Alex Deucherd03f5d52010-02-19 16:22:31 -05001212 u32 backend_map;
1213 u32 cc_rb_backend_disable;
1214 u32 cc_gc_shader_pipe_config;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001215 u32 tmp;
1216 int i, j;
1217 u32 sq_config;
1218 u32 sq_gpr_resource_mgmt_1 = 0;
1219 u32 sq_gpr_resource_mgmt_2 = 0;
1220 u32 sq_thread_resource_mgmt = 0;
1221 u32 sq_stack_resource_mgmt_1 = 0;
1222 u32 sq_stack_resource_mgmt_2 = 0;
1223
1224 /* FIXME: implement */
1225 switch (rdev->family) {
1226 case CHIP_R600:
1227 rdev->config.r600.max_pipes = 4;
1228 rdev->config.r600.max_tile_pipes = 8;
1229 rdev->config.r600.max_simds = 4;
1230 rdev->config.r600.max_backends = 4;
1231 rdev->config.r600.max_gprs = 256;
1232 rdev->config.r600.max_threads = 192;
1233 rdev->config.r600.max_stack_entries = 256;
1234 rdev->config.r600.max_hw_contexts = 8;
1235 rdev->config.r600.max_gs_threads = 16;
1236 rdev->config.r600.sx_max_export_size = 128;
1237 rdev->config.r600.sx_max_export_pos_size = 16;
1238 rdev->config.r600.sx_max_export_smx_size = 128;
1239 rdev->config.r600.sq_num_cf_insts = 2;
1240 break;
1241 case CHIP_RV630:
1242 case CHIP_RV635:
1243 rdev->config.r600.max_pipes = 2;
1244 rdev->config.r600.max_tile_pipes = 2;
1245 rdev->config.r600.max_simds = 3;
1246 rdev->config.r600.max_backends = 1;
1247 rdev->config.r600.max_gprs = 128;
1248 rdev->config.r600.max_threads = 192;
1249 rdev->config.r600.max_stack_entries = 128;
1250 rdev->config.r600.max_hw_contexts = 8;
1251 rdev->config.r600.max_gs_threads = 4;
1252 rdev->config.r600.sx_max_export_size = 128;
1253 rdev->config.r600.sx_max_export_pos_size = 16;
1254 rdev->config.r600.sx_max_export_smx_size = 128;
1255 rdev->config.r600.sq_num_cf_insts = 2;
1256 break;
1257 case CHIP_RV610:
1258 case CHIP_RV620:
1259 case CHIP_RS780:
1260 case CHIP_RS880:
1261 rdev->config.r600.max_pipes = 1;
1262 rdev->config.r600.max_tile_pipes = 1;
1263 rdev->config.r600.max_simds = 2;
1264 rdev->config.r600.max_backends = 1;
1265 rdev->config.r600.max_gprs = 128;
1266 rdev->config.r600.max_threads = 192;
1267 rdev->config.r600.max_stack_entries = 128;
1268 rdev->config.r600.max_hw_contexts = 4;
1269 rdev->config.r600.max_gs_threads = 4;
1270 rdev->config.r600.sx_max_export_size = 128;
1271 rdev->config.r600.sx_max_export_pos_size = 16;
1272 rdev->config.r600.sx_max_export_smx_size = 128;
1273 rdev->config.r600.sq_num_cf_insts = 1;
1274 break;
1275 case CHIP_RV670:
1276 rdev->config.r600.max_pipes = 4;
1277 rdev->config.r600.max_tile_pipes = 4;
1278 rdev->config.r600.max_simds = 4;
1279 rdev->config.r600.max_backends = 4;
1280 rdev->config.r600.max_gprs = 192;
1281 rdev->config.r600.max_threads = 192;
1282 rdev->config.r600.max_stack_entries = 256;
1283 rdev->config.r600.max_hw_contexts = 8;
1284 rdev->config.r600.max_gs_threads = 16;
1285 rdev->config.r600.sx_max_export_size = 128;
1286 rdev->config.r600.sx_max_export_pos_size = 16;
1287 rdev->config.r600.sx_max_export_smx_size = 128;
1288 rdev->config.r600.sq_num_cf_insts = 2;
1289 break;
1290 default:
1291 break;
1292 }
1293
1294 /* Initialize HDP */
1295 for (i = 0, j = 0; i < 32; i++, j += 0x18) {
1296 WREG32((0x2c14 + j), 0x00000000);
1297 WREG32((0x2c18 + j), 0x00000000);
1298 WREG32((0x2c1c + j), 0x00000000);
1299 WREG32((0x2c20 + j), 0x00000000);
1300 WREG32((0x2c24 + j), 0x00000000);
1301 }
1302
1303 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
1304
1305 /* Setup tiling */
1306 tiling_config = 0;
1307 ramcfg = RREG32(RAMCFG);
1308 switch (rdev->config.r600.max_tile_pipes) {
1309 case 1:
1310 tiling_config |= PIPE_TILING(0);
1311 break;
1312 case 2:
1313 tiling_config |= PIPE_TILING(1);
1314 break;
1315 case 4:
1316 tiling_config |= PIPE_TILING(2);
1317 break;
1318 case 8:
1319 tiling_config |= PIPE_TILING(3);
1320 break;
1321 default:
1322 break;
1323 }
Alex Deucherd03f5d52010-02-19 16:22:31 -05001324 rdev->config.r600.tiling_npipes = rdev->config.r600.max_tile_pipes;
Jerome Glisse961fb592010-02-10 22:30:05 +00001325 rdev->config.r600.tiling_nbanks = 4 << ((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001326 tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
1327 tiling_config |= GROUP_SIZE(0);
Jerome Glisse961fb592010-02-10 22:30:05 +00001328 rdev->config.r600.tiling_group_size = 256;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001329 tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
1330 if (tmp > 3) {
1331 tiling_config |= ROW_TILING(3);
1332 tiling_config |= SAMPLE_SPLIT(3);
1333 } else {
1334 tiling_config |= ROW_TILING(tmp);
1335 tiling_config |= SAMPLE_SPLIT(tmp);
1336 }
1337 tiling_config |= BANK_SWAPS(1);
Alex Deucherd03f5d52010-02-19 16:22:31 -05001338
1339 cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
1340 cc_rb_backend_disable |=
1341 BACKEND_DISABLE((R6XX_MAX_BACKENDS_MASK << rdev->config.r600.max_backends) & R6XX_MAX_BACKENDS_MASK);
1342
1343 cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
1344 cc_gc_shader_pipe_config |=
1345 INACTIVE_QD_PIPES((R6XX_MAX_PIPES_MASK << rdev->config.r600.max_pipes) & R6XX_MAX_PIPES_MASK);
1346 cc_gc_shader_pipe_config |=
1347 INACTIVE_SIMDS((R6XX_MAX_SIMDS_MASK << rdev->config.r600.max_simds) & R6XX_MAX_SIMDS_MASK);
1348
1349 backend_map = r600_get_tile_pipe_to_backend_map(rdev->config.r600.max_tile_pipes,
1350 (R6XX_MAX_BACKENDS -
1351 r600_count_pipe_bits((cc_rb_backend_disable &
1352 R6XX_MAX_BACKENDS_MASK) >> 16)),
1353 (cc_rb_backend_disable >> 16));
1354
1355 tiling_config |= BACKEND_MAP(backend_map);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001356 WREG32(GB_TILING_CONFIG, tiling_config);
1357 WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff);
1358 WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff);
1359
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001360 /* Setup pipes */
Alex Deucherd03f5d52010-02-19 16:22:31 -05001361 WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
1362 WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
Alex Deucherf867c60d2010-03-05 14:50:37 -05001363 WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001364
Alex Deucherd03f5d52010-02-19 16:22:31 -05001365 tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001366 WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK);
1367 WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((tmp * 4) - 2) & VTX_REUSE_DEPTH_MASK);
1368
1369 /* Setup some CP states */
1370 WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) | ROQ_IB2_START(0x2b)));
1371 WREG32(CP_MEQ_THRESHOLDS, (MEQ_END(0x40) | ROQ_END(0x40)));
1372
1373 WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO | SYNC_GRADIENT |
1374 SYNC_WALKER | SYNC_ALIGNER));
1375 /* Setup various GPU states */
1376 if (rdev->family == CHIP_RV670)
1377 WREG32(ARB_GDEC_RD_CNTL, 0x00000021);
1378
1379 tmp = RREG32(SX_DEBUG_1);
1380 tmp |= SMX_EVENT_RELEASE;
1381 if ((rdev->family > CHIP_R600))
1382 tmp |= ENABLE_NEW_SMX_ADDRESS;
1383 WREG32(SX_DEBUG_1, tmp);
1384
1385 if (((rdev->family) == CHIP_R600) ||
1386 ((rdev->family) == CHIP_RV630) ||
1387 ((rdev->family) == CHIP_RV610) ||
1388 ((rdev->family) == CHIP_RV620) ||
Alex Deucheree59f2b2009-11-05 13:11:46 -05001389 ((rdev->family) == CHIP_RS780) ||
1390 ((rdev->family) == CHIP_RS880)) {
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001391 WREG32(DB_DEBUG, PREZ_MUST_WAIT_FOR_POSTZ_DONE);
1392 } else {
1393 WREG32(DB_DEBUG, 0);
1394 }
1395 WREG32(DB_WATERMARKS, (DEPTH_FREE(4) | DEPTH_CACHELINE_FREE(16) |
1396 DEPTH_FLUSH(16) | DEPTH_PENDING_FREE(4)));
1397
1398 WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
1399 WREG32(VGT_NUM_INSTANCES, 0);
1400
1401 WREG32(SPI_CONFIG_CNTL, GPR_WRITE_PRIORITY(0));
1402 WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(0));
1403
1404 tmp = RREG32(SQ_MS_FIFO_SIZES);
1405 if (((rdev->family) == CHIP_RV610) ||
1406 ((rdev->family) == CHIP_RV620) ||
Alex Deucheree59f2b2009-11-05 13:11:46 -05001407 ((rdev->family) == CHIP_RS780) ||
1408 ((rdev->family) == CHIP_RS880)) {
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001409 tmp = (CACHE_FIFO_SIZE(0xa) |
1410 FETCH_FIFO_HIWATER(0xa) |
1411 DONE_FIFO_HIWATER(0xe0) |
1412 ALU_UPDATE_FIFO_HIWATER(0x8));
1413 } else if (((rdev->family) == CHIP_R600) ||
1414 ((rdev->family) == CHIP_RV630)) {
1415 tmp &= ~DONE_FIFO_HIWATER(0xff);
1416 tmp |= DONE_FIFO_HIWATER(0x4);
1417 }
1418 WREG32(SQ_MS_FIFO_SIZES, tmp);
1419
1420 /* SQ_CONFIG, SQ_GPR_RESOURCE_MGMT, SQ_THREAD_RESOURCE_MGMT, SQ_STACK_RESOURCE_MGMT
1421 * should be adjusted as needed by the 2D/3D drivers. This just sets default values
1422 */
1423 sq_config = RREG32(SQ_CONFIG);
1424 sq_config &= ~(PS_PRIO(3) |
1425 VS_PRIO(3) |
1426 GS_PRIO(3) |
1427 ES_PRIO(3));
1428 sq_config |= (DX9_CONSTS |
1429 VC_ENABLE |
1430 PS_PRIO(0) |
1431 VS_PRIO(1) |
1432 GS_PRIO(2) |
1433 ES_PRIO(3));
1434
1435 if ((rdev->family) == CHIP_R600) {
1436 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(124) |
1437 NUM_VS_GPRS(124) |
1438 NUM_CLAUSE_TEMP_GPRS(4));
1439 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(0) |
1440 NUM_ES_GPRS(0));
1441 sq_thread_resource_mgmt = (NUM_PS_THREADS(136) |
1442 NUM_VS_THREADS(48) |
1443 NUM_GS_THREADS(4) |
1444 NUM_ES_THREADS(4));
1445 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(128) |
1446 NUM_VS_STACK_ENTRIES(128));
1447 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(0) |
1448 NUM_ES_STACK_ENTRIES(0));
1449 } else if (((rdev->family) == CHIP_RV610) ||
1450 ((rdev->family) == CHIP_RV620) ||
Alex Deucheree59f2b2009-11-05 13:11:46 -05001451 ((rdev->family) == CHIP_RS780) ||
1452 ((rdev->family) == CHIP_RS880)) {
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001453 /* no vertex cache */
1454 sq_config &= ~VC_ENABLE;
1455
1456 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
1457 NUM_VS_GPRS(44) |
1458 NUM_CLAUSE_TEMP_GPRS(2));
1459 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) |
1460 NUM_ES_GPRS(17));
1461 sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
1462 NUM_VS_THREADS(78) |
1463 NUM_GS_THREADS(4) |
1464 NUM_ES_THREADS(31));
1465 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) |
1466 NUM_VS_STACK_ENTRIES(40));
1467 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) |
1468 NUM_ES_STACK_ENTRIES(16));
1469 } else if (((rdev->family) == CHIP_RV630) ||
1470 ((rdev->family) == CHIP_RV635)) {
1471 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
1472 NUM_VS_GPRS(44) |
1473 NUM_CLAUSE_TEMP_GPRS(2));
1474 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(18) |
1475 NUM_ES_GPRS(18));
1476 sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
1477 NUM_VS_THREADS(78) |
1478 NUM_GS_THREADS(4) |
1479 NUM_ES_THREADS(31));
1480 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) |
1481 NUM_VS_STACK_ENTRIES(40));
1482 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) |
1483 NUM_ES_STACK_ENTRIES(16));
1484 } else if ((rdev->family) == CHIP_RV670) {
1485 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
1486 NUM_VS_GPRS(44) |
1487 NUM_CLAUSE_TEMP_GPRS(2));
1488 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) |
1489 NUM_ES_GPRS(17));
1490 sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
1491 NUM_VS_THREADS(78) |
1492 NUM_GS_THREADS(4) |
1493 NUM_ES_THREADS(31));
1494 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(64) |
1495 NUM_VS_STACK_ENTRIES(64));
1496 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(64) |
1497 NUM_ES_STACK_ENTRIES(64));
1498 }
1499
1500 WREG32(SQ_CONFIG, sq_config);
1501 WREG32(SQ_GPR_RESOURCE_MGMT_1, sq_gpr_resource_mgmt_1);
1502 WREG32(SQ_GPR_RESOURCE_MGMT_2, sq_gpr_resource_mgmt_2);
1503 WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt);
1504 WREG32(SQ_STACK_RESOURCE_MGMT_1, sq_stack_resource_mgmt_1);
1505 WREG32(SQ_STACK_RESOURCE_MGMT_2, sq_stack_resource_mgmt_2);
1506
1507 if (((rdev->family) == CHIP_RV610) ||
1508 ((rdev->family) == CHIP_RV620) ||
Alex Deucheree59f2b2009-11-05 13:11:46 -05001509 ((rdev->family) == CHIP_RS780) ||
1510 ((rdev->family) == CHIP_RS880)) {
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001511 WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(TC_ONLY));
1512 } else {
1513 WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC));
1514 }
1515
1516 /* More default values. 2D/3D driver should adjust as needed */
1517 WREG32(PA_SC_AA_SAMPLE_LOCS_2S, (S0_X(0xc) | S0_Y(0x4) |
1518 S1_X(0x4) | S1_Y(0xc)));
1519 WREG32(PA_SC_AA_SAMPLE_LOCS_4S, (S0_X(0xe) | S0_Y(0xe) |
1520 S1_X(0x2) | S1_Y(0x2) |
1521 S2_X(0xa) | S2_Y(0x6) |
1522 S3_X(0x6) | S3_Y(0xa)));
1523 WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD0, (S0_X(0xe) | S0_Y(0xb) |
1524 S1_X(0x4) | S1_Y(0xc) |
1525 S2_X(0x1) | S2_Y(0x6) |
1526 S3_X(0xa) | S3_Y(0xe)));
1527 WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD1, (S4_X(0x6) | S4_Y(0x1) |
1528 S5_X(0x0) | S5_Y(0x0) |
1529 S6_X(0xb) | S6_Y(0x4) |
1530 S7_X(0x7) | S7_Y(0x8)));
1531
1532 WREG32(VGT_STRMOUT_EN, 0);
1533 tmp = rdev->config.r600.max_pipes * 16;
1534 switch (rdev->family) {
1535 case CHIP_RV610:
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001536 case CHIP_RV620:
Alex Deucheree59f2b2009-11-05 13:11:46 -05001537 case CHIP_RS780:
1538 case CHIP_RS880:
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001539 tmp += 32;
1540 break;
1541 case CHIP_RV670:
1542 tmp += 128;
1543 break;
1544 default:
1545 break;
1546 }
1547 if (tmp > 256) {
1548 tmp = 256;
1549 }
1550 WREG32(VGT_ES_PER_GS, 128);
1551 WREG32(VGT_GS_PER_ES, tmp);
1552 WREG32(VGT_GS_PER_VS, 2);
1553 WREG32(VGT_GS_VERTEX_REUSE, 16);
1554
1555 /* more default values. 2D/3D driver should adjust as needed */
1556 WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
1557 WREG32(VGT_STRMOUT_EN, 0);
1558 WREG32(SX_MISC, 0);
1559 WREG32(PA_SC_MODE_CNTL, 0);
1560 WREG32(PA_SC_AA_CONFIG, 0);
1561 WREG32(PA_SC_LINE_STIPPLE, 0);
1562 WREG32(SPI_INPUT_Z, 0);
1563 WREG32(SPI_PS_IN_CONTROL_0, NUM_INTERP(2));
1564 WREG32(CB_COLOR7_FRAG, 0);
1565
1566 /* Clear render buffer base addresses */
1567 WREG32(CB_COLOR0_BASE, 0);
1568 WREG32(CB_COLOR1_BASE, 0);
1569 WREG32(CB_COLOR2_BASE, 0);
1570 WREG32(CB_COLOR3_BASE, 0);
1571 WREG32(CB_COLOR4_BASE, 0);
1572 WREG32(CB_COLOR5_BASE, 0);
1573 WREG32(CB_COLOR6_BASE, 0);
1574 WREG32(CB_COLOR7_BASE, 0);
1575 WREG32(CB_COLOR7_FRAG, 0);
1576
1577 switch (rdev->family) {
1578 case CHIP_RV610:
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001579 case CHIP_RV620:
Alex Deucheree59f2b2009-11-05 13:11:46 -05001580 case CHIP_RS780:
1581 case CHIP_RS880:
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001582 tmp = TC_L2_SIZE(8);
1583 break;
1584 case CHIP_RV630:
1585 case CHIP_RV635:
1586 tmp = TC_L2_SIZE(4);
1587 break;
1588 case CHIP_R600:
1589 tmp = TC_L2_SIZE(0) | L2_DISABLE_LATE_HIT;
1590 break;
1591 default:
1592 tmp = TC_L2_SIZE(0);
1593 break;
1594 }
1595 WREG32(TC_CNTL, tmp);
1596
1597 tmp = RREG32(HDP_HOST_PATH_CNTL);
1598 WREG32(HDP_HOST_PATH_CNTL, tmp);
1599
1600 tmp = RREG32(ARB_POP);
1601 tmp |= ENABLE_TC128;
1602 WREG32(ARB_POP, tmp);
1603
1604 WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
1605 WREG32(PA_CL_ENHANCE, (CLIP_VTX_REORDER_ENA |
1606 NUM_CLIP_SEQ(3)));
1607 WREG32(PA_SC_ENHANCE, FORCE_EOV_MAX_CLK_CNT(4095));
1608}
1609
1610
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001611/*
1612 * Indirect registers accessor
1613 */
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001614u32 r600_pciep_rreg(struct radeon_device *rdev, u32 reg)
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001615{
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001616 u32 r;
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001617
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001618 WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
1619 (void)RREG32(PCIE_PORT_INDEX);
1620 r = RREG32(PCIE_PORT_DATA);
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001621 return r;
1622}
1623
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001624void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001625{
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001626 WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
1627 (void)RREG32(PCIE_PORT_INDEX);
1628 WREG32(PCIE_PORT_DATA, (v));
1629 (void)RREG32(PCIE_PORT_DATA);
1630}
1631
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001632/*
1633 * CP & Ring
1634 */
1635void r600_cp_stop(struct radeon_device *rdev)
1636{
1637 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
1638}
1639
Alex Deucherd8f60cf2009-12-01 13:43:46 -05001640int r600_init_microcode(struct radeon_device *rdev)
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001641{
1642 struct platform_device *pdev;
1643 const char *chip_name;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05001644 const char *rlc_chip_name;
1645 size_t pfp_req_size, me_req_size, rlc_req_size;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001646 char fw_name[30];
1647 int err;
1648
1649 DRM_DEBUG("\n");
1650
1651 pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0);
1652 err = IS_ERR(pdev);
1653 if (err) {
1654 printk(KERN_ERR "radeon_cp: Failed to register firmware\n");
1655 return -EINVAL;
1656 }
1657
1658 switch (rdev->family) {
Alex Deucherd8f60cf2009-12-01 13:43:46 -05001659 case CHIP_R600:
1660 chip_name = "R600";
1661 rlc_chip_name = "R600";
1662 break;
1663 case CHIP_RV610:
1664 chip_name = "RV610";
1665 rlc_chip_name = "R600";
1666 break;
1667 case CHIP_RV630:
1668 chip_name = "RV630";
1669 rlc_chip_name = "R600";
1670 break;
1671 case CHIP_RV620:
1672 chip_name = "RV620";
1673 rlc_chip_name = "R600";
1674 break;
1675 case CHIP_RV635:
1676 chip_name = "RV635";
1677 rlc_chip_name = "R600";
1678 break;
1679 case CHIP_RV670:
1680 chip_name = "RV670";
1681 rlc_chip_name = "R600";
1682 break;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001683 case CHIP_RS780:
Alex Deucherd8f60cf2009-12-01 13:43:46 -05001684 case CHIP_RS880:
1685 chip_name = "RS780";
1686 rlc_chip_name = "R600";
1687 break;
1688 case CHIP_RV770:
1689 chip_name = "RV770";
1690 rlc_chip_name = "R700";
1691 break;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001692 case CHIP_RV730:
Alex Deucherd8f60cf2009-12-01 13:43:46 -05001693 case CHIP_RV740:
1694 chip_name = "RV730";
1695 rlc_chip_name = "R700";
1696 break;
1697 case CHIP_RV710:
1698 chip_name = "RV710";
1699 rlc_chip_name = "R700";
1700 break;
Alex Deucherfe251e22010-03-24 13:36:43 -04001701 case CHIP_CEDAR:
1702 chip_name = "CEDAR";
Alex Deucher45f9a392010-03-24 13:55:51 -04001703 rlc_chip_name = "CEDAR";
Alex Deucherfe251e22010-03-24 13:36:43 -04001704 break;
1705 case CHIP_REDWOOD:
1706 chip_name = "REDWOOD";
Alex Deucher45f9a392010-03-24 13:55:51 -04001707 rlc_chip_name = "REDWOOD";
Alex Deucherfe251e22010-03-24 13:36:43 -04001708 break;
1709 case CHIP_JUNIPER:
1710 chip_name = "JUNIPER";
Alex Deucher45f9a392010-03-24 13:55:51 -04001711 rlc_chip_name = "JUNIPER";
Alex Deucherfe251e22010-03-24 13:36:43 -04001712 break;
1713 case CHIP_CYPRESS:
1714 case CHIP_HEMLOCK:
1715 chip_name = "CYPRESS";
Alex Deucher45f9a392010-03-24 13:55:51 -04001716 rlc_chip_name = "CYPRESS";
Alex Deucherfe251e22010-03-24 13:36:43 -04001717 break;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001718 default: BUG();
1719 }
1720
Alex Deucherfe251e22010-03-24 13:36:43 -04001721 if (rdev->family >= CHIP_CEDAR) {
1722 pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
1723 me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
Alex Deucher45f9a392010-03-24 13:55:51 -04001724 rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
Alex Deucherfe251e22010-03-24 13:36:43 -04001725 } else if (rdev->family >= CHIP_RV770) {
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001726 pfp_req_size = R700_PFP_UCODE_SIZE * 4;
1727 me_req_size = R700_PM4_UCODE_SIZE * 4;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05001728 rlc_req_size = R700_RLC_UCODE_SIZE * 4;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001729 } else {
1730 pfp_req_size = PFP_UCODE_SIZE * 4;
1731 me_req_size = PM4_UCODE_SIZE * 12;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05001732 rlc_req_size = RLC_UCODE_SIZE * 4;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001733 }
1734
Alex Deucherd8f60cf2009-12-01 13:43:46 -05001735 DRM_INFO("Loading %s Microcode\n", chip_name);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001736
1737 snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
1738 err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev);
1739 if (err)
1740 goto out;
1741 if (rdev->pfp_fw->size != pfp_req_size) {
1742 printk(KERN_ERR
1743 "r600_cp: Bogus length %zu in firmware \"%s\"\n",
1744 rdev->pfp_fw->size, fw_name);
1745 err = -EINVAL;
1746 goto out;
1747 }
1748
1749 snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
1750 err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev);
1751 if (err)
1752 goto out;
1753 if (rdev->me_fw->size != me_req_size) {
1754 printk(KERN_ERR
1755 "r600_cp: Bogus length %zu in firmware \"%s\"\n",
1756 rdev->me_fw->size, fw_name);
1757 err = -EINVAL;
1758 }
Alex Deucherd8f60cf2009-12-01 13:43:46 -05001759
1760 snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name);
1761 err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev);
1762 if (err)
1763 goto out;
1764 if (rdev->rlc_fw->size != rlc_req_size) {
1765 printk(KERN_ERR
1766 "r600_rlc: Bogus length %zu in firmware \"%s\"\n",
1767 rdev->rlc_fw->size, fw_name);
1768 err = -EINVAL;
1769 }
1770
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001771out:
1772 platform_device_unregister(pdev);
1773
1774 if (err) {
1775 if (err != -EINVAL)
1776 printk(KERN_ERR
1777 "r600_cp: Failed to load firmware \"%s\"\n",
1778 fw_name);
1779 release_firmware(rdev->pfp_fw);
1780 rdev->pfp_fw = NULL;
1781 release_firmware(rdev->me_fw);
1782 rdev->me_fw = NULL;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05001783 release_firmware(rdev->rlc_fw);
1784 rdev->rlc_fw = NULL;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001785 }
1786 return err;
1787}
1788
1789static int r600_cp_load_microcode(struct radeon_device *rdev)
1790{
1791 const __be32 *fw_data;
1792 int i;
1793
1794 if (!rdev->me_fw || !rdev->pfp_fw)
1795 return -EINVAL;
1796
1797 r600_cp_stop(rdev);
1798
1799 WREG32(CP_RB_CNTL, RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
1800
1801 /* Reset cp */
1802 WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
1803 RREG32(GRBM_SOFT_RESET);
1804 mdelay(15);
1805 WREG32(GRBM_SOFT_RESET, 0);
1806
1807 WREG32(CP_ME_RAM_WADDR, 0);
1808
1809 fw_data = (const __be32 *)rdev->me_fw->data;
1810 WREG32(CP_ME_RAM_WADDR, 0);
1811 for (i = 0; i < PM4_UCODE_SIZE * 3; i++)
1812 WREG32(CP_ME_RAM_DATA,
1813 be32_to_cpup(fw_data++));
1814
1815 fw_data = (const __be32 *)rdev->pfp_fw->data;
1816 WREG32(CP_PFP_UCODE_ADDR, 0);
1817 for (i = 0; i < PFP_UCODE_SIZE; i++)
1818 WREG32(CP_PFP_UCODE_DATA,
1819 be32_to_cpup(fw_data++));
1820
1821 WREG32(CP_PFP_UCODE_ADDR, 0);
1822 WREG32(CP_ME_RAM_WADDR, 0);
1823 WREG32(CP_ME_RAM_RADDR, 0);
1824 return 0;
1825}
1826
1827int r600_cp_start(struct radeon_device *rdev)
1828{
1829 int r;
1830 uint32_t cp_me;
1831
1832 r = radeon_ring_lock(rdev, 7);
1833 if (r) {
1834 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
1835 return r;
1836 }
1837 radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5));
1838 radeon_ring_write(rdev, 0x1);
Alex Deucherfe251e22010-03-24 13:36:43 -04001839 if (rdev->family >= CHIP_CEDAR) {
1840 radeon_ring_write(rdev, 0x0);
1841 radeon_ring_write(rdev, rdev->config.evergreen.max_hw_contexts - 1);
1842 } else if (rdev->family >= CHIP_RV770) {
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001843 radeon_ring_write(rdev, 0x0);
1844 radeon_ring_write(rdev, rdev->config.rv770.max_hw_contexts - 1);
Alex Deucherfe251e22010-03-24 13:36:43 -04001845 } else {
1846 radeon_ring_write(rdev, 0x3);
1847 radeon_ring_write(rdev, rdev->config.r600.max_hw_contexts - 1);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001848 }
1849 radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
1850 radeon_ring_write(rdev, 0);
1851 radeon_ring_write(rdev, 0);
1852 radeon_ring_unlock_commit(rdev);
1853
1854 cp_me = 0xff;
1855 WREG32(R_0086D8_CP_ME_CNTL, cp_me);
1856 return 0;
1857}
1858
1859int r600_cp_resume(struct radeon_device *rdev)
1860{
1861 u32 tmp;
1862 u32 rb_bufsz;
1863 int r;
1864
1865 /* Reset cp */
1866 WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
1867 RREG32(GRBM_SOFT_RESET);
1868 mdelay(15);
1869 WREG32(GRBM_SOFT_RESET, 0);
1870
1871 /* Set ring buffer size */
1872 rb_bufsz = drm_order(rdev->cp.ring_size / 8);
Alex Deucherd6f28932009-11-02 16:01:27 -05001873 tmp = RB_NO_UPDATE | (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001874#ifdef __BIG_ENDIAN
Alex Deucherd6f28932009-11-02 16:01:27 -05001875 tmp |= BUF_SWAP_32BIT;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001876#endif
Alex Deucherd6f28932009-11-02 16:01:27 -05001877 WREG32(CP_RB_CNTL, tmp);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001878 WREG32(CP_SEM_WAIT_TIMER, 0x4);
1879
1880 /* Set the write pointer delay */
1881 WREG32(CP_RB_WPTR_DELAY, 0);
1882
1883 /* Initialize the ring buffer's read and write pointers */
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001884 WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
1885 WREG32(CP_RB_RPTR_WR, 0);
1886 WREG32(CP_RB_WPTR, 0);
1887 WREG32(CP_RB_RPTR_ADDR, rdev->cp.gpu_addr & 0xFFFFFFFF);
1888 WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->cp.gpu_addr));
1889 mdelay(1);
1890 WREG32(CP_RB_CNTL, tmp);
1891
1892 WREG32(CP_RB_BASE, rdev->cp.gpu_addr >> 8);
1893 WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
1894
1895 rdev->cp.rptr = RREG32(CP_RB_RPTR);
1896 rdev->cp.wptr = RREG32(CP_RB_WPTR);
1897
1898 r600_cp_start(rdev);
1899 rdev->cp.ready = true;
1900 r = radeon_ring_test(rdev);
1901 if (r) {
1902 rdev->cp.ready = false;
1903 return r;
1904 }
1905 return 0;
1906}
1907
1908void r600_cp_commit(struct radeon_device *rdev)
1909{
1910 WREG32(CP_RB_WPTR, rdev->cp.wptr);
1911 (void)RREG32(CP_RB_WPTR);
1912}
1913
1914void r600_ring_init(struct radeon_device *rdev, unsigned ring_size)
1915{
1916 u32 rb_bufsz;
1917
1918 /* Align ring size */
1919 rb_bufsz = drm_order(ring_size / 8);
1920 ring_size = (1 << (rb_bufsz + 1)) * 4;
1921 rdev->cp.ring_size = ring_size;
1922 rdev->cp.align_mask = 16 - 1;
1923}
1924
Jerome Glisse655efd32010-02-02 11:51:45 +01001925void r600_cp_fini(struct radeon_device *rdev)
1926{
1927 r600_cp_stop(rdev);
1928 radeon_ring_fini(rdev);
1929}
1930
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001931
1932/*
1933 * GPU scratch registers helpers function.
1934 */
1935void r600_scratch_init(struct radeon_device *rdev)
1936{
1937 int i;
1938
1939 rdev->scratch.num_reg = 7;
1940 for (i = 0; i < rdev->scratch.num_reg; i++) {
1941 rdev->scratch.free[i] = true;
1942 rdev->scratch.reg[i] = SCRATCH_REG0 + (i * 4);
1943 }
1944}
1945
1946int r600_ring_test(struct radeon_device *rdev)
1947{
1948 uint32_t scratch;
1949 uint32_t tmp = 0;
1950 unsigned i;
1951 int r;
1952
1953 r = radeon_scratch_get(rdev, &scratch);
1954 if (r) {
1955 DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r);
1956 return r;
1957 }
1958 WREG32(scratch, 0xCAFEDEAD);
1959 r = radeon_ring_lock(rdev, 3);
1960 if (r) {
1961 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
1962 radeon_scratch_free(rdev, scratch);
1963 return r;
1964 }
1965 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
1966 radeon_ring_write(rdev, ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
1967 radeon_ring_write(rdev, 0xDEADBEEF);
1968 radeon_ring_unlock_commit(rdev);
1969 for (i = 0; i < rdev->usec_timeout; i++) {
1970 tmp = RREG32(scratch);
1971 if (tmp == 0xDEADBEEF)
1972 break;
1973 DRM_UDELAY(1);
1974 }
1975 if (i < rdev->usec_timeout) {
1976 DRM_INFO("ring test succeeded in %d usecs\n", i);
1977 } else {
1978 DRM_ERROR("radeon: ring test failed (scratch(0x%04X)=0x%08X)\n",
1979 scratch, tmp);
1980 r = -EINVAL;
1981 }
1982 radeon_scratch_free(rdev, scratch);
1983 return r;
1984}
1985
Jerome Glisse81cc35b2009-10-01 18:02:12 +02001986void r600_wb_disable(struct radeon_device *rdev)
1987{
Jerome Glisse4c788672009-11-20 14:29:23 +01001988 int r;
1989
Jerome Glisse81cc35b2009-10-01 18:02:12 +02001990 WREG32(SCRATCH_UMSK, 0);
1991 if (rdev->wb.wb_obj) {
Jerome Glisse4c788672009-11-20 14:29:23 +01001992 r = radeon_bo_reserve(rdev->wb.wb_obj, false);
1993 if (unlikely(r != 0))
1994 return;
1995 radeon_bo_kunmap(rdev->wb.wb_obj);
1996 radeon_bo_unpin(rdev->wb.wb_obj);
1997 radeon_bo_unreserve(rdev->wb.wb_obj);
Jerome Glisse81cc35b2009-10-01 18:02:12 +02001998 }
1999}
2000
2001void r600_wb_fini(struct radeon_device *rdev)
2002{
2003 r600_wb_disable(rdev);
2004 if (rdev->wb.wb_obj) {
Jerome Glisse4c788672009-11-20 14:29:23 +01002005 radeon_bo_unref(&rdev->wb.wb_obj);
Jerome Glisse81cc35b2009-10-01 18:02:12 +02002006 rdev->wb.wb = NULL;
2007 rdev->wb.wb_obj = NULL;
2008 }
2009}
2010
2011int r600_wb_enable(struct radeon_device *rdev)
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002012{
2013 int r;
2014
2015 if (rdev->wb.wb_obj == NULL) {
Jerome Glisse4c788672009-11-20 14:29:23 +01002016 r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, true,
2017 RADEON_GEM_DOMAIN_GTT, &rdev->wb.wb_obj);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002018 if (r) {
Jerome Glisse4c788672009-11-20 14:29:23 +01002019 dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002020 return r;
2021 }
Jerome Glisse4c788672009-11-20 14:29:23 +01002022 r = radeon_bo_reserve(rdev->wb.wb_obj, false);
2023 if (unlikely(r != 0)) {
Jerome Glisse81cc35b2009-10-01 18:02:12 +02002024 r600_wb_fini(rdev);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002025 return r;
2026 }
Jerome Glisse4c788672009-11-20 14:29:23 +01002027 r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
2028 &rdev->wb.gpu_addr);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002029 if (r) {
Jerome Glisse4c788672009-11-20 14:29:23 +01002030 radeon_bo_unreserve(rdev->wb.wb_obj);
2031 dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
2032 r600_wb_fini(rdev);
2033 return r;
2034 }
2035 r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
2036 radeon_bo_unreserve(rdev->wb.wb_obj);
2037 if (r) {
2038 dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
Jerome Glisse81cc35b2009-10-01 18:02:12 +02002039 r600_wb_fini(rdev);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002040 return r;
2041 }
2042 }
2043 WREG32(SCRATCH_ADDR, (rdev->wb.gpu_addr >> 8) & 0xFFFFFFFF);
2044 WREG32(CP_RB_RPTR_ADDR, (rdev->wb.gpu_addr + 1024) & 0xFFFFFFFC);
2045 WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + 1024) & 0xFF);
2046 WREG32(SCRATCH_UMSK, 0xff);
2047 return 0;
2048}
2049
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002050void r600_fence_ring_emit(struct radeon_device *rdev,
2051 struct radeon_fence *fence)
2052{
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002053 /* Also consider EVENT_WRITE_EOP. it handles the interrupts + timestamps + events */
Alex Deucher44224c32010-02-04 11:01:52 -05002054
2055 radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE, 0));
2056 radeon_ring_write(rdev, CACHE_FLUSH_AND_INV_EVENT);
2057 /* wait for 3D idle clean */
2058 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2059 radeon_ring_write(rdev, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
2060 radeon_ring_write(rdev, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002061 /* Emit fence sequence & fire IRQ */
2062 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2063 radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
2064 radeon_ring_write(rdev, fence->seq);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002065 /* CP_INTERRUPT packet 3 no longer exists, use packet 0 */
2066 radeon_ring_write(rdev, PACKET0(CP_INT_STATUS, 0));
2067 radeon_ring_write(rdev, RB_INT_STAT);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002068}
2069
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002070int r600_copy_blit(struct radeon_device *rdev,
2071 uint64_t src_offset, uint64_t dst_offset,
2072 unsigned num_pages, struct radeon_fence *fence)
2073{
Jerome Glisseff82f052010-01-22 15:19:00 +01002074 int r;
2075
2076 mutex_lock(&rdev->r600_blit.mutex);
2077 rdev->r600_blit.vb_ib = NULL;
2078 r = r600_blit_prepare_copy(rdev, num_pages * RADEON_GPU_PAGE_SIZE);
2079 if (r) {
2080 if (rdev->r600_blit.vb_ib)
2081 radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
2082 mutex_unlock(&rdev->r600_blit.mutex);
2083 return r;
2084 }
Matt Turnera77f1712009-10-14 00:34:41 -04002085 r600_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * RADEON_GPU_PAGE_SIZE);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002086 r600_blit_done_copy(rdev, fence);
Jerome Glisseff82f052010-01-22 15:19:00 +01002087 mutex_unlock(&rdev->r600_blit.mutex);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002088 return 0;
2089}
2090
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002091int r600_set_surface_reg(struct radeon_device *rdev, int reg,
2092 uint32_t tiling_flags, uint32_t pitch,
2093 uint32_t offset, uint32_t obj_size)
2094{
2095 /* FIXME: implement */
2096 return 0;
2097}
2098
2099void r600_clear_surface_reg(struct radeon_device *rdev, int reg)
2100{
2101 /* FIXME: implement */
2102}
2103
2104
2105bool r600_card_posted(struct radeon_device *rdev)
2106{
2107 uint32_t reg;
2108
2109 /* first check CRTCs */
2110 reg = RREG32(D1CRTC_CONTROL) |
2111 RREG32(D2CRTC_CONTROL);
2112 if (reg & CRTC_EN)
2113 return true;
2114
2115 /* then check MEM_SIZE, in case the crtcs are off */
2116 if (RREG32(CONFIG_MEMSIZE))
2117 return true;
2118
2119 return false;
2120}
2121
Dave Airliefc30b8e2009-09-18 15:19:37 +10002122int r600_startup(struct radeon_device *rdev)
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002123{
2124 int r;
2125
Alex Deucher779720a2009-12-09 19:31:44 -05002126 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
2127 r = r600_init_microcode(rdev);
2128 if (r) {
2129 DRM_ERROR("Failed to load firmware!\n");
2130 return r;
2131 }
2132 }
2133
Jerome Glissea3c19452009-10-01 18:02:13 +02002134 r600_mc_program(rdev);
Jerome Glisse1a029b72009-10-06 19:04:30 +02002135 if (rdev->flags & RADEON_IS_AGP) {
2136 r600_agp_enable(rdev);
2137 } else {
2138 r = r600_pcie_gart_enable(rdev);
2139 if (r)
2140 return r;
2141 }
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002142 r600_gpu_init(rdev);
Jerome Glissec38c7b62010-02-04 17:27:27 +01002143 r = r600_blit_init(rdev);
2144 if (r) {
2145 r600_blit_fini(rdev);
2146 rdev->asic->copy = NULL;
2147 dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
2148 }
Jerome Glisseff82f052010-01-22 15:19:00 +01002149 /* pin copy shader into vram */
2150 if (rdev->r600_blit.shader_obj) {
2151 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
2152 if (unlikely(r != 0))
2153 return r;
2154 r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
2155 &rdev->r600_blit.shader_gpu_addr);
2156 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
Alex Deucher7923c612009-12-15 17:15:07 -05002157 if (r) {
Jerome Glisseff82f052010-01-22 15:19:00 +01002158 dev_err(rdev->dev, "(%d) pin blit object failed\n", r);
Alex Deucher7923c612009-12-15 17:15:07 -05002159 return r;
2160 }
2161 }
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002162 /* Enable IRQ */
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002163 r = r600_irq_init(rdev);
2164 if (r) {
2165 DRM_ERROR("radeon: IH init failed (%d).\n", r);
2166 radeon_irq_kms_fini(rdev);
2167 return r;
2168 }
2169 r600_irq_set(rdev);
2170
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002171 r = radeon_ring_init(rdev, rdev->cp.ring_size);
2172 if (r)
2173 return r;
2174 r = r600_cp_load_microcode(rdev);
2175 if (r)
2176 return r;
2177 r = r600_cp_resume(rdev);
2178 if (r)
2179 return r;
Jerome Glisse81cc35b2009-10-01 18:02:12 +02002180 /* write back buffer are not vital so don't worry about failure */
2181 r600_wb_enable(rdev);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002182 return 0;
2183}
2184
Dave Airlie28d52042009-09-21 14:33:58 +10002185void r600_vga_set_state(struct radeon_device *rdev, bool state)
2186{
2187 uint32_t temp;
2188
2189 temp = RREG32(CONFIG_CNTL);
2190 if (state == false) {
2191 temp &= ~(1<<0);
2192 temp |= (1<<1);
2193 } else {
2194 temp &= ~(1<<1);
2195 }
2196 WREG32(CONFIG_CNTL, temp);
2197}
2198
Dave Airliefc30b8e2009-09-18 15:19:37 +10002199int r600_resume(struct radeon_device *rdev)
2200{
2201 int r;
2202
Jerome Glisse1a029b72009-10-06 19:04:30 +02002203 /* Do not reset GPU before posting, on r600 hw unlike on r500 hw,
2204 * posting will perform necessary task to bring back GPU into good
2205 * shape.
2206 */
Dave Airliefc30b8e2009-09-18 15:19:37 +10002207 /* post card */
Jerome Glissee7d40b92009-10-01 18:02:15 +02002208 atom_asic_init(rdev->mode_info.atom_context);
Dave Airliefc30b8e2009-09-18 15:19:37 +10002209 /* Initialize clocks */
2210 r = radeon_clocks_init(rdev);
2211 if (r) {
2212 return r;
2213 }
2214
2215 r = r600_startup(rdev);
2216 if (r) {
2217 DRM_ERROR("r600 startup failed on resume\n");
2218 return r;
2219 }
2220
Jerome Glisse62a8ea32009-10-01 18:02:11 +02002221 r = r600_ib_test(rdev);
Dave Airliefc30b8e2009-09-18 15:19:37 +10002222 if (r) {
2223 DRM_ERROR("radeon: failled testing IB (%d).\n", r);
2224 return r;
2225 }
Rafał Miłecki38fd2c62010-01-28 18:16:30 +01002226
2227 r = r600_audio_init(rdev);
2228 if (r) {
2229 DRM_ERROR("radeon: audio resume failed\n");
2230 return r;
2231 }
2232
Dave Airliefc30b8e2009-09-18 15:19:37 +10002233 return r;
2234}
2235
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002236int r600_suspend(struct radeon_device *rdev)
2237{
Jerome Glisse4c788672009-11-20 14:29:23 +01002238 int r;
2239
Rafał Miłecki38fd2c62010-01-28 18:16:30 +01002240 r600_audio_fini(rdev);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002241 /* FIXME: we should wait for ring to be empty */
2242 r600_cp_stop(rdev);
Dave Airliebc1a6312009-09-15 11:07:52 +10002243 rdev->cp.ready = false;
Jerome Glisse0c452492010-01-15 14:44:37 +01002244 r600_irq_suspend(rdev);
Jerome Glisse81cc35b2009-10-01 18:02:12 +02002245 r600_wb_disable(rdev);
Jerome Glisse4aac0472009-09-14 18:29:49 +02002246 r600_pcie_gart_disable(rdev);
Dave Airliebc1a6312009-09-15 11:07:52 +10002247 /* unpin shaders bo */
Jerome Glisse30d2d9a2010-01-13 10:29:27 +01002248 if (rdev->r600_blit.shader_obj) {
2249 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
2250 if (!r) {
2251 radeon_bo_unpin(rdev->r600_blit.shader_obj);
2252 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
2253 }
2254 }
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002255 return 0;
2256}
2257
2258/* Plan is to move initialization in that function and use
2259 * helper function so that radeon_device_init pretty much
2260 * do nothing more than calling asic specific function. This
2261 * should also allow to remove a bunch of callback function
2262 * like vram_info.
2263 */
2264int r600_init(struct radeon_device *rdev)
2265{
2266 int r;
2267
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002268 r = radeon_dummy_page_init(rdev);
2269 if (r)
2270 return r;
2271 if (r600_debugfs_mc_info_init(rdev)) {
2272 DRM_ERROR("Failed to register debugfs file for mc !\n");
2273 }
2274 /* This don't do much */
2275 r = radeon_gem_init(rdev);
2276 if (r)
2277 return r;
2278 /* Read BIOS */
2279 if (!radeon_get_bios(rdev)) {
2280 if (ASIC_IS_AVIVO(rdev))
2281 return -EINVAL;
2282 }
2283 /* Must be an ATOMBIOS */
Jerome Glissee7d40b92009-10-01 18:02:15 +02002284 if (!rdev->is_atom_bios) {
2285 dev_err(rdev->dev, "Expecting atombios for R600 GPU\n");
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002286 return -EINVAL;
Jerome Glissee7d40b92009-10-01 18:02:15 +02002287 }
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002288 r = radeon_atombios_init(rdev);
2289 if (r)
2290 return r;
2291 /* Post card if necessary */
Dave Airlie72542d72009-12-01 14:06:31 +10002292 if (!r600_card_posted(rdev)) {
2293 if (!rdev->bios) {
2294 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
2295 return -EINVAL;
2296 }
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002297 DRM_INFO("GPU not posted. posting now...\n");
2298 atom_asic_init(rdev->mode_info.atom_context);
2299 }
2300 /* Initialize scratch registers */
2301 r600_scratch_init(rdev);
2302 /* Initialize surface registers */
2303 radeon_surface_init(rdev);
Rafał Miłecki74338742009-11-03 00:53:02 +01002304 /* Initialize clocks */
Michel Dänzer5e6dde72009-09-17 09:42:28 +02002305 radeon_get_clock_info(rdev->ddev);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002306 r = radeon_clocks_init(rdev);
2307 if (r)
2308 return r;
Rafał Miłecki74338742009-11-03 00:53:02 +01002309 /* Initialize power management */
2310 radeon_pm_init(rdev);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002311 /* Fence driver */
2312 r = radeon_fence_driver_init(rdev);
2313 if (r)
2314 return r;
Jerome Glisse700a0cc2010-01-13 15:16:38 +01002315 if (rdev->flags & RADEON_IS_AGP) {
2316 r = radeon_agp_init(rdev);
2317 if (r)
2318 radeon_agp_disable(rdev);
2319 }
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002320 r = r600_mc_init(rdev);
Jerome Glisseb574f252009-10-06 19:04:29 +02002321 if (r)
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002322 return r;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002323 /* Memory manager */
Jerome Glisse4c788672009-11-20 14:29:23 +01002324 r = radeon_bo_init(rdev);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002325 if (r)
2326 return r;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002327
2328 r = radeon_irq_kms_init(rdev);
2329 if (r)
2330 return r;
2331
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002332 rdev->cp.ring_obj = NULL;
2333 r600_ring_init(rdev, 1024 * 1024);
2334
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002335 rdev->ih.ring_obj = NULL;
2336 r600_ih_ring_init(rdev, 64 * 1024);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002337
Jerome Glisse4aac0472009-09-14 18:29:49 +02002338 r = r600_pcie_gart_init(rdev);
2339 if (r)
2340 return r;
2341
Alex Deucher779720a2009-12-09 19:31:44 -05002342 rdev->accel_working = true;
Dave Airliefc30b8e2009-09-18 15:19:37 +10002343 r = r600_startup(rdev);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002344 if (r) {
Jerome Glisse655efd32010-02-02 11:51:45 +01002345 dev_err(rdev->dev, "disabling GPU acceleration\n");
2346 r600_cp_fini(rdev);
Jerome Glisse75c81292009-10-01 18:02:14 +02002347 r600_wb_fini(rdev);
Jerome Glisse655efd32010-02-02 11:51:45 +01002348 r600_irq_fini(rdev);
2349 radeon_irq_kms_fini(rdev);
Jerome Glisse75c81292009-10-01 18:02:14 +02002350 r600_pcie_gart_fini(rdev);
Jerome Glisse733289c2009-09-16 15:24:21 +02002351 rdev->accel_working = false;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002352 }
Jerome Glisse733289c2009-09-16 15:24:21 +02002353 if (rdev->accel_working) {
2354 r = radeon_ib_pool_init(rdev);
2355 if (r) {
Jerome Glissedb963802010-01-17 21:21:56 +01002356 dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
Jerome Glisse733289c2009-09-16 15:24:21 +02002357 rdev->accel_working = false;
Jerome Glissedb963802010-01-17 21:21:56 +01002358 } else {
2359 r = r600_ib_test(rdev);
2360 if (r) {
2361 dev_err(rdev->dev, "IB test failed (%d).\n", r);
2362 rdev->accel_working = false;
2363 }
Jerome Glisse733289c2009-09-16 15:24:21 +02002364 }
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002365 }
Christian Koenigdafc3bd2009-10-11 23:49:13 +02002366
2367 r = r600_audio_init(rdev);
2368 if (r)
2369 return r; /* TODO error handling */
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002370 return 0;
2371}
2372
2373void r600_fini(struct radeon_device *rdev)
2374{
Alex Deucher29fb52c2010-03-11 10:01:17 -05002375 radeon_pm_fini(rdev);
Christian Koenigdafc3bd2009-10-11 23:49:13 +02002376 r600_audio_fini(rdev);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002377 r600_blit_fini(rdev);
Jerome Glisse655efd32010-02-02 11:51:45 +01002378 r600_cp_fini(rdev);
2379 r600_wb_fini(rdev);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002380 r600_irq_fini(rdev);
2381 radeon_irq_kms_fini(rdev);
Jerome Glisse4aac0472009-09-14 18:29:49 +02002382 r600_pcie_gart_fini(rdev);
Jerome Glisse655efd32010-02-02 11:51:45 +01002383 radeon_agp_fini(rdev);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002384 radeon_gem_fini(rdev);
2385 radeon_fence_driver_fini(rdev);
2386 radeon_clocks_fini(rdev);
Jerome Glisse4c788672009-11-20 14:29:23 +01002387 radeon_bo_fini(rdev);
Jerome Glissee7d40b92009-10-01 18:02:15 +02002388 radeon_atombios_fini(rdev);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002389 kfree(rdev->bios);
2390 rdev->bios = NULL;
2391 radeon_dummy_page_fini(rdev);
2392}
2393
2394
2395/*
2396 * CS stuff
2397 */
2398void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
2399{
2400 /* FIXME: implement */
2401 radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
2402 radeon_ring_write(rdev, ib->gpu_addr & 0xFFFFFFFC);
2403 radeon_ring_write(rdev, upper_32_bits(ib->gpu_addr) & 0xFF);
2404 radeon_ring_write(rdev, ib->length_dw);
2405}
2406
2407int r600_ib_test(struct radeon_device *rdev)
2408{
2409 struct radeon_ib *ib;
2410 uint32_t scratch;
2411 uint32_t tmp = 0;
2412 unsigned i;
2413 int r;
2414
2415 r = radeon_scratch_get(rdev, &scratch);
2416 if (r) {
2417 DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r);
2418 return r;
2419 }
2420 WREG32(scratch, 0xCAFEDEAD);
2421 r = radeon_ib_get(rdev, &ib);
2422 if (r) {
2423 DRM_ERROR("radeon: failed to get ib (%d).\n", r);
2424 return r;
2425 }
2426 ib->ptr[0] = PACKET3(PACKET3_SET_CONFIG_REG, 1);
2427 ib->ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
2428 ib->ptr[2] = 0xDEADBEEF;
2429 ib->ptr[3] = PACKET2(0);
2430 ib->ptr[4] = PACKET2(0);
2431 ib->ptr[5] = PACKET2(0);
2432 ib->ptr[6] = PACKET2(0);
2433 ib->ptr[7] = PACKET2(0);
2434 ib->ptr[8] = PACKET2(0);
2435 ib->ptr[9] = PACKET2(0);
2436 ib->ptr[10] = PACKET2(0);
2437 ib->ptr[11] = PACKET2(0);
2438 ib->ptr[12] = PACKET2(0);
2439 ib->ptr[13] = PACKET2(0);
2440 ib->ptr[14] = PACKET2(0);
2441 ib->ptr[15] = PACKET2(0);
2442 ib->length_dw = 16;
2443 r = radeon_ib_schedule(rdev, ib);
2444 if (r) {
2445 radeon_scratch_free(rdev, scratch);
2446 radeon_ib_free(rdev, &ib);
2447 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
2448 return r;
2449 }
2450 r = radeon_fence_wait(ib->fence, false);
2451 if (r) {
2452 DRM_ERROR("radeon: fence wait failed (%d).\n", r);
2453 return r;
2454 }
2455 for (i = 0; i < rdev->usec_timeout; i++) {
2456 tmp = RREG32(scratch);
2457 if (tmp == 0xDEADBEEF)
2458 break;
2459 DRM_UDELAY(1);
2460 }
2461 if (i < rdev->usec_timeout) {
2462 DRM_INFO("ib test succeeded in %u usecs\n", i);
2463 } else {
2464 DRM_ERROR("radeon: ib test failed (sracth(0x%04X)=0x%08X)\n",
2465 scratch, tmp);
2466 r = -EINVAL;
2467 }
2468 radeon_scratch_free(rdev, scratch);
2469 radeon_ib_free(rdev, &ib);
2470 return r;
2471}
2472
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002473/*
2474 * Interrupts
2475 *
2476 * Interrupts use a ring buffer on r6xx/r7xx hardware. It works pretty
2477 * the same as the CP ring buffer, but in reverse. Rather than the CPU
2478 * writing to the ring and the GPU consuming, the GPU writes to the ring
2479 * and host consumes. As the host irq handler processes interrupts, it
2480 * increments the rptr. When the rptr catches up with the wptr, all the
2481 * current interrupts have been processed.
2482 */
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002483
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002484void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size)
2485{
2486 u32 rb_bufsz;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002487
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002488 /* Align ring size */
2489 rb_bufsz = drm_order(ring_size / 4);
2490 ring_size = (1 << rb_bufsz) * 4;
2491 rdev->ih.ring_size = ring_size;
Jerome Glisse0c452492010-01-15 14:44:37 +01002492 rdev->ih.ptr_mask = rdev->ih.ring_size - 1;
2493 rdev->ih.rptr = 0;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002494}
2495
Jerome Glisse0c452492010-01-15 14:44:37 +01002496static int r600_ih_ring_alloc(struct radeon_device *rdev)
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002497{
2498 int r;
2499
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002500 /* Allocate ring buffer */
2501 if (rdev->ih.ring_obj == NULL) {
Jerome Glisse4c788672009-11-20 14:29:23 +01002502 r = radeon_bo_create(rdev, NULL, rdev->ih.ring_size,
2503 true,
2504 RADEON_GEM_DOMAIN_GTT,
2505 &rdev->ih.ring_obj);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002506 if (r) {
2507 DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r);
2508 return r;
2509 }
Jerome Glisse4c788672009-11-20 14:29:23 +01002510 r = radeon_bo_reserve(rdev->ih.ring_obj, false);
2511 if (unlikely(r != 0))
2512 return r;
2513 r = radeon_bo_pin(rdev->ih.ring_obj,
2514 RADEON_GEM_DOMAIN_GTT,
2515 &rdev->ih.gpu_addr);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002516 if (r) {
Jerome Glisse4c788672009-11-20 14:29:23 +01002517 radeon_bo_unreserve(rdev->ih.ring_obj);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002518 DRM_ERROR("radeon: failed to pin ih ring buffer (%d).\n", r);
2519 return r;
2520 }
Jerome Glisse4c788672009-11-20 14:29:23 +01002521 r = radeon_bo_kmap(rdev->ih.ring_obj,
2522 (void **)&rdev->ih.ring);
2523 radeon_bo_unreserve(rdev->ih.ring_obj);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002524 if (r) {
2525 DRM_ERROR("radeon: failed to map ih ring buffer (%d).\n", r);
2526 return r;
2527 }
2528 }
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002529 return 0;
2530}
2531
2532static void r600_ih_ring_fini(struct radeon_device *rdev)
2533{
Jerome Glisse4c788672009-11-20 14:29:23 +01002534 int r;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002535 if (rdev->ih.ring_obj) {
Jerome Glisse4c788672009-11-20 14:29:23 +01002536 r = radeon_bo_reserve(rdev->ih.ring_obj, false);
2537 if (likely(r == 0)) {
2538 radeon_bo_kunmap(rdev->ih.ring_obj);
2539 radeon_bo_unpin(rdev->ih.ring_obj);
2540 radeon_bo_unreserve(rdev->ih.ring_obj);
2541 }
2542 radeon_bo_unref(&rdev->ih.ring_obj);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002543 rdev->ih.ring = NULL;
2544 rdev->ih.ring_obj = NULL;
2545 }
2546}
2547
Alex Deucher45f9a392010-03-24 13:55:51 -04002548void r600_rlc_stop(struct radeon_device *rdev)
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002549{
2550
Alex Deucher45f9a392010-03-24 13:55:51 -04002551 if ((rdev->family >= CHIP_RV770) &&
2552 (rdev->family <= CHIP_RV740)) {
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002553 /* r7xx asics need to soft reset RLC before halting */
2554 WREG32(SRBM_SOFT_RESET, SOFT_RESET_RLC);
2555 RREG32(SRBM_SOFT_RESET);
2556 udelay(15000);
2557 WREG32(SRBM_SOFT_RESET, 0);
2558 RREG32(SRBM_SOFT_RESET);
2559 }
2560
2561 WREG32(RLC_CNTL, 0);
2562}
2563
2564static void r600_rlc_start(struct radeon_device *rdev)
2565{
2566 WREG32(RLC_CNTL, RLC_ENABLE);
2567}
2568
2569static int r600_rlc_init(struct radeon_device *rdev)
2570{
2571 u32 i;
2572 const __be32 *fw_data;
2573
2574 if (!rdev->rlc_fw)
2575 return -EINVAL;
2576
2577 r600_rlc_stop(rdev);
2578
2579 WREG32(RLC_HB_BASE, 0);
2580 WREG32(RLC_HB_CNTL, 0);
2581 WREG32(RLC_HB_RPTR, 0);
2582 WREG32(RLC_HB_WPTR, 0);
2583 WREG32(RLC_HB_WPTR_LSB_ADDR, 0);
2584 WREG32(RLC_HB_WPTR_MSB_ADDR, 0);
2585 WREG32(RLC_MC_CNTL, 0);
2586 WREG32(RLC_UCODE_CNTL, 0);
2587
2588 fw_data = (const __be32 *)rdev->rlc_fw->data;
Alex Deucher45f9a392010-03-24 13:55:51 -04002589 if (rdev->family >= CHIP_CEDAR) {
2590 for (i = 0; i < EVERGREEN_RLC_UCODE_SIZE; i++) {
2591 WREG32(RLC_UCODE_ADDR, i);
2592 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
2593 }
2594 } else if (rdev->family >= CHIP_RV770) {
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002595 for (i = 0; i < R700_RLC_UCODE_SIZE; i++) {
2596 WREG32(RLC_UCODE_ADDR, i);
2597 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
2598 }
2599 } else {
2600 for (i = 0; i < RLC_UCODE_SIZE; i++) {
2601 WREG32(RLC_UCODE_ADDR, i);
2602 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
2603 }
2604 }
2605 WREG32(RLC_UCODE_ADDR, 0);
2606
2607 r600_rlc_start(rdev);
2608
2609 return 0;
2610}
2611
2612static void r600_enable_interrupts(struct radeon_device *rdev)
2613{
2614 u32 ih_cntl = RREG32(IH_CNTL);
2615 u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
2616
2617 ih_cntl |= ENABLE_INTR;
2618 ih_rb_cntl |= IH_RB_ENABLE;
2619 WREG32(IH_CNTL, ih_cntl);
2620 WREG32(IH_RB_CNTL, ih_rb_cntl);
2621 rdev->ih.enabled = true;
2622}
2623
Alex Deucher45f9a392010-03-24 13:55:51 -04002624void r600_disable_interrupts(struct radeon_device *rdev)
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002625{
2626 u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
2627 u32 ih_cntl = RREG32(IH_CNTL);
2628
2629 ih_rb_cntl &= ~IH_RB_ENABLE;
2630 ih_cntl &= ~ENABLE_INTR;
2631 WREG32(IH_RB_CNTL, ih_rb_cntl);
2632 WREG32(IH_CNTL, ih_cntl);
2633 /* set rptr, wptr to 0 */
2634 WREG32(IH_RB_RPTR, 0);
2635 WREG32(IH_RB_WPTR, 0);
2636 rdev->ih.enabled = false;
2637 rdev->ih.wptr = 0;
2638 rdev->ih.rptr = 0;
2639}
2640
Alex Deuchere0df1ac2009-12-04 15:12:21 -05002641static void r600_disable_interrupt_state(struct radeon_device *rdev)
2642{
2643 u32 tmp;
2644
2645 WREG32(CP_INT_CNTL, 0);
2646 WREG32(GRBM_INT_CNTL, 0);
2647 WREG32(DxMODE_INT_MASK, 0);
2648 if (ASIC_IS_DCE3(rdev)) {
2649 WREG32(DCE3_DACA_AUTODETECT_INT_CONTROL, 0);
2650 WREG32(DCE3_DACB_AUTODETECT_INT_CONTROL, 0);
2651 tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2652 WREG32(DC_HPD1_INT_CONTROL, tmp);
2653 tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2654 WREG32(DC_HPD2_INT_CONTROL, tmp);
2655 tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2656 WREG32(DC_HPD3_INT_CONTROL, tmp);
2657 tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2658 WREG32(DC_HPD4_INT_CONTROL, tmp);
2659 if (ASIC_IS_DCE32(rdev)) {
2660 tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
Alex Deucher5898b1f2010-03-24 13:57:29 -04002661 WREG32(DC_HPD5_INT_CONTROL, tmp);
Alex Deuchere0df1ac2009-12-04 15:12:21 -05002662 tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
Alex Deucher5898b1f2010-03-24 13:57:29 -04002663 WREG32(DC_HPD6_INT_CONTROL, tmp);
Alex Deuchere0df1ac2009-12-04 15:12:21 -05002664 }
2665 } else {
2666 WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
2667 WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
2668 tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
Alex Deucher5898b1f2010-03-24 13:57:29 -04002669 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
Alex Deuchere0df1ac2009-12-04 15:12:21 -05002670 tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
Alex Deucher5898b1f2010-03-24 13:57:29 -04002671 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
Alex Deuchere0df1ac2009-12-04 15:12:21 -05002672 tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
Alex Deucher5898b1f2010-03-24 13:57:29 -04002673 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
Alex Deuchere0df1ac2009-12-04 15:12:21 -05002674 }
2675}
2676
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002677int r600_irq_init(struct radeon_device *rdev)
2678{
2679 int ret = 0;
2680 int rb_bufsz;
2681 u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
2682
2683 /* allocate ring */
Jerome Glisse0c452492010-01-15 14:44:37 +01002684 ret = r600_ih_ring_alloc(rdev);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002685 if (ret)
2686 return ret;
2687
2688 /* disable irqs */
2689 r600_disable_interrupts(rdev);
2690
2691 /* init rlc */
2692 ret = r600_rlc_init(rdev);
2693 if (ret) {
2694 r600_ih_ring_fini(rdev);
2695 return ret;
2696 }
2697
2698 /* setup interrupt control */
2699 /* set dummy read address to ring address */
2700 WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8);
2701 interrupt_cntl = RREG32(INTERRUPT_CNTL);
2702 /* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
2703 * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
2704 */
2705 interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE;
2706 /* IH_REQ_NONSNOOP_EN=1 if ring is in non-cacheable memory, e.g., vram */
2707 interrupt_cntl &= ~IH_REQ_NONSNOOP_EN;
2708 WREG32(INTERRUPT_CNTL, interrupt_cntl);
2709
2710 WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
2711 rb_bufsz = drm_order(rdev->ih.ring_size / 4);
2712
2713 ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
2714 IH_WPTR_OVERFLOW_CLEAR |
2715 (rb_bufsz << 1));
2716 /* WPTR writeback, not yet */
2717 /*ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE;*/
2718 WREG32(IH_RB_WPTR_ADDR_LO, 0);
2719 WREG32(IH_RB_WPTR_ADDR_HI, 0);
2720
2721 WREG32(IH_RB_CNTL, ih_rb_cntl);
2722
2723 /* set rptr, wptr to 0 */
2724 WREG32(IH_RB_RPTR, 0);
2725 WREG32(IH_RB_WPTR, 0);
2726
2727 /* Default settings for IH_CNTL (disabled at first) */
2728 ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10);
2729 /* RPTR_REARM only works if msi's are enabled */
2730 if (rdev->msi_enabled)
2731 ih_cntl |= RPTR_REARM;
2732
2733#ifdef __BIG_ENDIAN
2734 ih_cntl |= IH_MC_SWAP(IH_MC_SWAP_32BIT);
2735#endif
2736 WREG32(IH_CNTL, ih_cntl);
2737
2738 /* force the active interrupt state to all disabled */
Alex Deucher45f9a392010-03-24 13:55:51 -04002739 if (rdev->family >= CHIP_CEDAR)
2740 evergreen_disable_interrupt_state(rdev);
2741 else
2742 r600_disable_interrupt_state(rdev);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002743
2744 /* enable irqs */
2745 r600_enable_interrupts(rdev);
2746
2747 return ret;
2748}
2749
Jerome Glisse0c452492010-01-15 14:44:37 +01002750void r600_irq_suspend(struct radeon_device *rdev)
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002751{
Alex Deucher45f9a392010-03-24 13:55:51 -04002752 r600_irq_disable(rdev);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002753 r600_rlc_stop(rdev);
Jerome Glisse0c452492010-01-15 14:44:37 +01002754}
2755
2756void r600_irq_fini(struct radeon_device *rdev)
2757{
2758 r600_irq_suspend(rdev);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002759 r600_ih_ring_fini(rdev);
2760}
2761
2762int r600_irq_set(struct radeon_device *rdev)
2763{
Alex Deuchere0df1ac2009-12-04 15:12:21 -05002764 u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
2765 u32 mode_int = 0;
2766 u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0;
Alex Deucher2031f772010-04-22 12:52:11 -04002767 u32 grbm_int_cntl = 0;
Christian Koenigf2594932010-04-10 03:13:16 +02002768 u32 hdmi1, hdmi2;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002769
Jerome Glisse003e69f2010-01-07 15:39:14 +01002770 if (!rdev->irq.installed) {
2771 WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n");
2772 return -EINVAL;
2773 }
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002774 /* don't enable anything if the ih is disabled */
Jerome Glisse79c2bbc2010-01-15 14:44:38 +01002775 if (!rdev->ih.enabled) {
2776 r600_disable_interrupts(rdev);
2777 /* force the active interrupt state to all disabled */
2778 r600_disable_interrupt_state(rdev);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002779 return 0;
Jerome Glisse79c2bbc2010-01-15 14:44:38 +01002780 }
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002781
Christian Koenigf2594932010-04-10 03:13:16 +02002782 hdmi1 = RREG32(R600_HDMI_BLOCK1 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN;
Alex Deuchere0df1ac2009-12-04 15:12:21 -05002783 if (ASIC_IS_DCE3(rdev)) {
Christian Koenigf2594932010-04-10 03:13:16 +02002784 hdmi2 = RREG32(R600_HDMI_BLOCK3 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN;
Alex Deuchere0df1ac2009-12-04 15:12:21 -05002785 hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
2786 hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
2787 hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
2788 hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
2789 if (ASIC_IS_DCE32(rdev)) {
2790 hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
2791 hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
2792 }
2793 } else {
Christian Koenigf2594932010-04-10 03:13:16 +02002794 hdmi2 = RREG32(R600_HDMI_BLOCK2 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN;
Alex Deuchere0df1ac2009-12-04 15:12:21 -05002795 hpd1 = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & ~DC_HPDx_INT_EN;
2796 hpd2 = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & ~DC_HPDx_INT_EN;
2797 hpd3 = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & ~DC_HPDx_INT_EN;
2798 }
2799
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002800 if (rdev->irq.sw_int) {
2801 DRM_DEBUG("r600_irq_set: sw int\n");
2802 cp_int_cntl |= RB_INT_ENABLE;
2803 }
2804 if (rdev->irq.crtc_vblank_int[0]) {
2805 DRM_DEBUG("r600_irq_set: vblank 0\n");
2806 mode_int |= D1MODE_VBLANK_INT_MASK;
2807 }
2808 if (rdev->irq.crtc_vblank_int[1]) {
2809 DRM_DEBUG("r600_irq_set: vblank 1\n");
2810 mode_int |= D2MODE_VBLANK_INT_MASK;
2811 }
Alex Deuchere0df1ac2009-12-04 15:12:21 -05002812 if (rdev->irq.hpd[0]) {
2813 DRM_DEBUG("r600_irq_set: hpd 1\n");
2814 hpd1 |= DC_HPDx_INT_EN;
2815 }
2816 if (rdev->irq.hpd[1]) {
2817 DRM_DEBUG("r600_irq_set: hpd 2\n");
2818 hpd2 |= DC_HPDx_INT_EN;
2819 }
2820 if (rdev->irq.hpd[2]) {
2821 DRM_DEBUG("r600_irq_set: hpd 3\n");
2822 hpd3 |= DC_HPDx_INT_EN;
2823 }
2824 if (rdev->irq.hpd[3]) {
2825 DRM_DEBUG("r600_irq_set: hpd 4\n");
2826 hpd4 |= DC_HPDx_INT_EN;
2827 }
2828 if (rdev->irq.hpd[4]) {
2829 DRM_DEBUG("r600_irq_set: hpd 5\n");
2830 hpd5 |= DC_HPDx_INT_EN;
2831 }
2832 if (rdev->irq.hpd[5]) {
2833 DRM_DEBUG("r600_irq_set: hpd 6\n");
2834 hpd6 |= DC_HPDx_INT_EN;
2835 }
Christian Koenigf2594932010-04-10 03:13:16 +02002836 if (rdev->irq.hdmi[0]) {
2837 DRM_DEBUG("r600_irq_set: hdmi 1\n");
2838 hdmi1 |= R600_HDMI_INT_EN;
2839 }
2840 if (rdev->irq.hdmi[1]) {
2841 DRM_DEBUG("r600_irq_set: hdmi 2\n");
2842 hdmi2 |= R600_HDMI_INT_EN;
2843 }
Alex Deucher2031f772010-04-22 12:52:11 -04002844 if (rdev->irq.gui_idle) {
2845 DRM_DEBUG("gui idle\n");
2846 grbm_int_cntl |= GUI_IDLE_INT_ENABLE;
2847 }
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002848
2849 WREG32(CP_INT_CNTL, cp_int_cntl);
2850 WREG32(DxMODE_INT_MASK, mode_int);
Alex Deucher2031f772010-04-22 12:52:11 -04002851 WREG32(GRBM_INT_CNTL, grbm_int_cntl);
Christian Koenigf2594932010-04-10 03:13:16 +02002852 WREG32(R600_HDMI_BLOCK1 + R600_HDMI_CNTL, hdmi1);
Alex Deuchere0df1ac2009-12-04 15:12:21 -05002853 if (ASIC_IS_DCE3(rdev)) {
Christian Koenigf2594932010-04-10 03:13:16 +02002854 WREG32(R600_HDMI_BLOCK3 + R600_HDMI_CNTL, hdmi2);
Alex Deuchere0df1ac2009-12-04 15:12:21 -05002855 WREG32(DC_HPD1_INT_CONTROL, hpd1);
2856 WREG32(DC_HPD2_INT_CONTROL, hpd2);
2857 WREG32(DC_HPD3_INT_CONTROL, hpd3);
2858 WREG32(DC_HPD4_INT_CONTROL, hpd4);
2859 if (ASIC_IS_DCE32(rdev)) {
2860 WREG32(DC_HPD5_INT_CONTROL, hpd5);
2861 WREG32(DC_HPD6_INT_CONTROL, hpd6);
2862 }
2863 } else {
Christian Koenigf2594932010-04-10 03:13:16 +02002864 WREG32(R600_HDMI_BLOCK2 + R600_HDMI_CNTL, hdmi2);
Alex Deuchere0df1ac2009-12-04 15:12:21 -05002865 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1);
2866 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
2867 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, hpd3);
2868 }
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002869
2870 return 0;
2871}
2872
Alex Deuchere0df1ac2009-12-04 15:12:21 -05002873static inline void r600_irq_ack(struct radeon_device *rdev,
2874 u32 *disp_int,
2875 u32 *disp_int_cont,
2876 u32 *disp_int_cont2)
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002877{
Alex Deuchere0df1ac2009-12-04 15:12:21 -05002878 u32 tmp;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002879
Alex Deuchere0df1ac2009-12-04 15:12:21 -05002880 if (ASIC_IS_DCE3(rdev)) {
2881 *disp_int = RREG32(DCE3_DISP_INTERRUPT_STATUS);
2882 *disp_int_cont = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE);
2883 *disp_int_cont2 = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE2);
2884 } else {
2885 *disp_int = RREG32(DISP_INTERRUPT_STATUS);
2886 *disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
2887 *disp_int_cont2 = 0;
2888 }
2889
2890 if (*disp_int & LB_D1_VBLANK_INTERRUPT)
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002891 WREG32(D1MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
Alex Deuchere0df1ac2009-12-04 15:12:21 -05002892 if (*disp_int & LB_D1_VLINE_INTERRUPT)
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002893 WREG32(D1MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
Alex Deuchere0df1ac2009-12-04 15:12:21 -05002894 if (*disp_int & LB_D2_VBLANK_INTERRUPT)
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002895 WREG32(D2MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
Alex Deuchere0df1ac2009-12-04 15:12:21 -05002896 if (*disp_int & LB_D2_VLINE_INTERRUPT)
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002897 WREG32(D2MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
Alex Deuchere0df1ac2009-12-04 15:12:21 -05002898 if (*disp_int & DC_HPD1_INTERRUPT) {
2899 if (ASIC_IS_DCE3(rdev)) {
2900 tmp = RREG32(DC_HPD1_INT_CONTROL);
2901 tmp |= DC_HPDx_INT_ACK;
2902 WREG32(DC_HPD1_INT_CONTROL, tmp);
2903 } else {
2904 tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
2905 tmp |= DC_HPDx_INT_ACK;
2906 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
2907 }
2908 }
2909 if (*disp_int & DC_HPD2_INTERRUPT) {
2910 if (ASIC_IS_DCE3(rdev)) {
2911 tmp = RREG32(DC_HPD2_INT_CONTROL);
2912 tmp |= DC_HPDx_INT_ACK;
2913 WREG32(DC_HPD2_INT_CONTROL, tmp);
2914 } else {
2915 tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
2916 tmp |= DC_HPDx_INT_ACK;
2917 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
2918 }
2919 }
2920 if (*disp_int_cont & DC_HPD3_INTERRUPT) {
2921 if (ASIC_IS_DCE3(rdev)) {
2922 tmp = RREG32(DC_HPD3_INT_CONTROL);
2923 tmp |= DC_HPDx_INT_ACK;
2924 WREG32(DC_HPD3_INT_CONTROL, tmp);
2925 } else {
2926 tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
2927 tmp |= DC_HPDx_INT_ACK;
2928 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
2929 }
2930 }
2931 if (*disp_int_cont & DC_HPD4_INTERRUPT) {
2932 tmp = RREG32(DC_HPD4_INT_CONTROL);
2933 tmp |= DC_HPDx_INT_ACK;
2934 WREG32(DC_HPD4_INT_CONTROL, tmp);
2935 }
2936 if (ASIC_IS_DCE32(rdev)) {
2937 if (*disp_int_cont2 & DC_HPD5_INTERRUPT) {
2938 tmp = RREG32(DC_HPD5_INT_CONTROL);
2939 tmp |= DC_HPDx_INT_ACK;
2940 WREG32(DC_HPD5_INT_CONTROL, tmp);
2941 }
2942 if (*disp_int_cont2 & DC_HPD6_INTERRUPT) {
2943 tmp = RREG32(DC_HPD5_INT_CONTROL);
2944 tmp |= DC_HPDx_INT_ACK;
2945 WREG32(DC_HPD6_INT_CONTROL, tmp);
2946 }
2947 }
Christian Koenigf2594932010-04-10 03:13:16 +02002948 if (RREG32(R600_HDMI_BLOCK1 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) {
2949 WREG32_P(R600_HDMI_BLOCK1 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK);
2950 }
2951 if (ASIC_IS_DCE3(rdev)) {
2952 if (RREG32(R600_HDMI_BLOCK3 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) {
2953 WREG32_P(R600_HDMI_BLOCK3 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK);
2954 }
2955 } else {
2956 if (RREG32(R600_HDMI_BLOCK2 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) {
2957 WREG32_P(R600_HDMI_BLOCK2 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK);
2958 }
2959 }
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002960}
2961
2962void r600_irq_disable(struct radeon_device *rdev)
2963{
Alex Deuchere0df1ac2009-12-04 15:12:21 -05002964 u32 disp_int, disp_int_cont, disp_int_cont2;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002965
2966 r600_disable_interrupts(rdev);
2967 /* Wait and acknowledge irq */
2968 mdelay(1);
Alex Deuchere0df1ac2009-12-04 15:12:21 -05002969 r600_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2);
2970 r600_disable_interrupt_state(rdev);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002971}
2972
2973static inline u32 r600_get_ih_wptr(struct radeon_device *rdev)
2974{
2975 u32 wptr, tmp;
2976
2977 /* XXX use writeback */
2978 wptr = RREG32(IH_RB_WPTR);
2979
2980 if (wptr & RB_OVERFLOW) {
Jerome Glisse7924e5e2010-01-15 14:44:39 +01002981 /* When a ring buffer overflow happen start parsing interrupt
2982 * from the last not overwritten vector (wptr + 16). Hopefully
2983 * this should allow us to catchup.
2984 */
2985 dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
2986 wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
2987 rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002988 tmp = RREG32(IH_RB_CNTL);
2989 tmp |= IH_WPTR_OVERFLOW_CLEAR;
2990 WREG32(IH_RB_CNTL, tmp);
2991 }
Jerome Glisse0c452492010-01-15 14:44:37 +01002992 return (wptr & rdev->ih.ptr_mask);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002993}
2994
2995/* r600 IV Ring
2996 * Each IV ring entry is 128 bits:
2997 * [7:0] - interrupt source id
2998 * [31:8] - reserved
2999 * [59:32] - interrupt source data
3000 * [127:60] - reserved
3001 *
3002 * The basic interrupt vector entries
3003 * are decoded as follows:
3004 * src_id src_data description
3005 * 1 0 D1 Vblank
3006 * 1 1 D1 Vline
3007 * 5 0 D2 Vblank
3008 * 5 1 D2 Vline
3009 * 19 0 FP Hot plug detection A
3010 * 19 1 FP Hot plug detection B
3011 * 19 2 DAC A auto-detection
3012 * 19 3 DAC B auto-detection
Christian Koenigf2594932010-04-10 03:13:16 +02003013 * 21 4 HDMI block A
3014 * 21 5 HDMI block B
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003015 * 176 - CP_INT RB
3016 * 177 - CP_INT IB1
3017 * 178 - CP_INT IB2
3018 * 181 - EOP Interrupt
3019 * 233 - GUI Idle
3020 *
3021 * Note, these are based on r600 and may need to be
3022 * adjusted or added to on newer asics
3023 */
3024
3025int r600_irq_process(struct radeon_device *rdev)
3026{
3027 u32 wptr = r600_get_ih_wptr(rdev);
3028 u32 rptr = rdev->ih.rptr;
3029 u32 src_id, src_data;
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003030 u32 ring_index, disp_int, disp_int_cont, disp_int_cont2;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003031 unsigned long flags;
Alex Deucherd4877cf2009-12-04 16:56:37 -05003032 bool queue_hotplug = false;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003033
3034 DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
Jerome Glisse79c2bbc2010-01-15 14:44:38 +01003035 if (!rdev->ih.enabled)
3036 return IRQ_NONE;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003037
3038 spin_lock_irqsave(&rdev->ih.lock, flags);
3039
3040 if (rptr == wptr) {
3041 spin_unlock_irqrestore(&rdev->ih.lock, flags);
3042 return IRQ_NONE;
3043 }
3044 if (rdev->shutdown) {
3045 spin_unlock_irqrestore(&rdev->ih.lock, flags);
3046 return IRQ_NONE;
3047 }
3048
3049restart_ih:
3050 /* display interrupts */
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003051 r600_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003052
3053 rdev->ih.wptr = wptr;
3054 while (rptr != wptr) {
3055 /* wptr/rptr are in bytes! */
3056 ring_index = rptr / 4;
3057 src_id = rdev->ih.ring[ring_index] & 0xff;
3058 src_data = rdev->ih.ring[ring_index + 1] & 0xfffffff;
3059
3060 switch (src_id) {
3061 case 1: /* D1 vblank/vline */
3062 switch (src_data) {
3063 case 0: /* D1 vblank */
3064 if (disp_int & LB_D1_VBLANK_INTERRUPT) {
3065 drm_handle_vblank(rdev->ddev, 0);
Rafał Miłecki839461d2010-03-02 22:06:51 +01003066 rdev->pm.vblank_sync = true;
Rafał Miłecki73a6d3f2010-01-08 00:22:47 +01003067 wake_up(&rdev->irq.vblank_queue);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003068 disp_int &= ~LB_D1_VBLANK_INTERRUPT;
3069 DRM_DEBUG("IH: D1 vblank\n");
3070 }
3071 break;
3072 case 1: /* D1 vline */
3073 if (disp_int & LB_D1_VLINE_INTERRUPT) {
3074 disp_int &= ~LB_D1_VLINE_INTERRUPT;
3075 DRM_DEBUG("IH: D1 vline\n");
3076 }
3077 break;
3078 default:
Alex Deucherb0425892010-01-11 19:47:38 -05003079 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003080 break;
3081 }
3082 break;
3083 case 5: /* D2 vblank/vline */
3084 switch (src_data) {
3085 case 0: /* D2 vblank */
3086 if (disp_int & LB_D2_VBLANK_INTERRUPT) {
3087 drm_handle_vblank(rdev->ddev, 1);
Rafał Miłecki839461d2010-03-02 22:06:51 +01003088 rdev->pm.vblank_sync = true;
Rafał Miłecki73a6d3f2010-01-08 00:22:47 +01003089 wake_up(&rdev->irq.vblank_queue);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003090 disp_int &= ~LB_D2_VBLANK_INTERRUPT;
3091 DRM_DEBUG("IH: D2 vblank\n");
3092 }
3093 break;
3094 case 1: /* D1 vline */
3095 if (disp_int & LB_D2_VLINE_INTERRUPT) {
3096 disp_int &= ~LB_D2_VLINE_INTERRUPT;
3097 DRM_DEBUG("IH: D2 vline\n");
3098 }
3099 break;
3100 default:
Alex Deucherb0425892010-01-11 19:47:38 -05003101 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003102 break;
3103 }
3104 break;
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003105 case 19: /* HPD/DAC hotplug */
3106 switch (src_data) {
3107 case 0:
3108 if (disp_int & DC_HPD1_INTERRUPT) {
3109 disp_int &= ~DC_HPD1_INTERRUPT;
Alex Deucherd4877cf2009-12-04 16:56:37 -05003110 queue_hotplug = true;
3111 DRM_DEBUG("IH: HPD1\n");
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003112 }
3113 break;
3114 case 1:
3115 if (disp_int & DC_HPD2_INTERRUPT) {
3116 disp_int &= ~DC_HPD2_INTERRUPT;
Alex Deucherd4877cf2009-12-04 16:56:37 -05003117 queue_hotplug = true;
3118 DRM_DEBUG("IH: HPD2\n");
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003119 }
3120 break;
3121 case 4:
3122 if (disp_int_cont & DC_HPD3_INTERRUPT) {
3123 disp_int_cont &= ~DC_HPD3_INTERRUPT;
Alex Deucherd4877cf2009-12-04 16:56:37 -05003124 queue_hotplug = true;
3125 DRM_DEBUG("IH: HPD3\n");
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003126 }
3127 break;
3128 case 5:
3129 if (disp_int_cont & DC_HPD4_INTERRUPT) {
3130 disp_int_cont &= ~DC_HPD4_INTERRUPT;
Alex Deucherd4877cf2009-12-04 16:56:37 -05003131 queue_hotplug = true;
3132 DRM_DEBUG("IH: HPD4\n");
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003133 }
3134 break;
3135 case 10:
3136 if (disp_int_cont2 & DC_HPD5_INTERRUPT) {
Alex Deucher5898b1f2010-03-24 13:57:29 -04003137 disp_int_cont2 &= ~DC_HPD5_INTERRUPT;
Alex Deucherd4877cf2009-12-04 16:56:37 -05003138 queue_hotplug = true;
3139 DRM_DEBUG("IH: HPD5\n");
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003140 }
3141 break;
3142 case 12:
3143 if (disp_int_cont2 & DC_HPD6_INTERRUPT) {
Alex Deucher5898b1f2010-03-24 13:57:29 -04003144 disp_int_cont2 &= ~DC_HPD6_INTERRUPT;
Alex Deucherd4877cf2009-12-04 16:56:37 -05003145 queue_hotplug = true;
3146 DRM_DEBUG("IH: HPD6\n");
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003147 }
3148 break;
3149 default:
Alex Deucherb0425892010-01-11 19:47:38 -05003150 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003151 break;
3152 }
3153 break;
Christian Koenigf2594932010-04-10 03:13:16 +02003154 case 21: /* HDMI */
3155 DRM_DEBUG("IH: HDMI: 0x%x\n", src_data);
3156 r600_audio_schedule_polling(rdev);
3157 break;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003158 case 176: /* CP_INT in ring buffer */
3159 case 177: /* CP_INT in IB1 */
3160 case 178: /* CP_INT in IB2 */
3161 DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
3162 radeon_fence_process(rdev);
3163 break;
3164 case 181: /* CP EOP event */
3165 DRM_DEBUG("IH: CP EOP\n");
3166 break;
Alex Deucher2031f772010-04-22 12:52:11 -04003167 case 233: /* GUI IDLE */
3168 DRM_DEBUG("IH: CP EOP\n");
3169 rdev->pm.gui_idle = true;
3170 wake_up(&rdev->irq.idle_queue);
3171 break;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003172 default:
Alex Deucherb0425892010-01-11 19:47:38 -05003173 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003174 break;
3175 }
3176
3177 /* wptr/rptr are in bytes! */
Jerome Glisse0c452492010-01-15 14:44:37 +01003178 rptr += 16;
3179 rptr &= rdev->ih.ptr_mask;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003180 }
3181 /* make sure wptr hasn't changed while processing */
3182 wptr = r600_get_ih_wptr(rdev);
3183 if (wptr != rdev->ih.wptr)
3184 goto restart_ih;
Alex Deucherd4877cf2009-12-04 16:56:37 -05003185 if (queue_hotplug)
3186 queue_work(rdev->wq, &rdev->hotplug_work);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003187 rdev->ih.rptr = rptr;
3188 WREG32(IH_RB_RPTR, rdev->ih.rptr);
3189 spin_unlock_irqrestore(&rdev->ih.lock, flags);
3190 return IRQ_HANDLED;
3191}
Jerome Glisse3ce0a232009-09-08 10:10:24 +10003192
3193/*
3194 * Debugfs info
3195 */
3196#if defined(CONFIG_DEBUG_FS)
3197
3198static int r600_debugfs_cp_ring_info(struct seq_file *m, void *data)
3199{
3200 struct drm_info_node *node = (struct drm_info_node *) m->private;
3201 struct drm_device *dev = node->minor->dev;
3202 struct radeon_device *rdev = dev->dev_private;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10003203 unsigned count, i, j;
3204
3205 radeon_ring_free_size(rdev);
Rafał Miłeckid6840762009-11-10 22:26:21 +01003206 count = (rdev->cp.ring_size / 4) - rdev->cp.ring_free_dw;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10003207 seq_printf(m, "CP_STAT 0x%08x\n", RREG32(CP_STAT));
Rafał Miłeckid6840762009-11-10 22:26:21 +01003208 seq_printf(m, "CP_RB_WPTR 0x%08x\n", RREG32(CP_RB_WPTR));
3209 seq_printf(m, "CP_RB_RPTR 0x%08x\n", RREG32(CP_RB_RPTR));
3210 seq_printf(m, "driver's copy of the CP_RB_WPTR 0x%08x\n", rdev->cp.wptr);
3211 seq_printf(m, "driver's copy of the CP_RB_RPTR 0x%08x\n", rdev->cp.rptr);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10003212 seq_printf(m, "%u free dwords in ring\n", rdev->cp.ring_free_dw);
3213 seq_printf(m, "%u dwords in ring\n", count);
Rafał Miłeckid6840762009-11-10 22:26:21 +01003214 i = rdev->cp.rptr;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10003215 for (j = 0; j <= count; j++) {
Jerome Glisse3ce0a232009-09-08 10:10:24 +10003216 seq_printf(m, "r[%04d]=0x%08x\n", i, rdev->cp.ring[i]);
Rafał Miłeckid6840762009-11-10 22:26:21 +01003217 i = (i + 1) & rdev->cp.ptr_mask;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10003218 }
3219 return 0;
3220}
3221
3222static int r600_debugfs_mc_info(struct seq_file *m, void *data)
3223{
3224 struct drm_info_node *node = (struct drm_info_node *) m->private;
3225 struct drm_device *dev = node->minor->dev;
3226 struct radeon_device *rdev = dev->dev_private;
3227
3228 DREG32_SYS(m, rdev, R_000E50_SRBM_STATUS);
3229 DREG32_SYS(m, rdev, VM_L2_STATUS);
3230 return 0;
3231}
3232
3233static struct drm_info_list r600_mc_info_list[] = {
3234 {"r600_mc_info", r600_debugfs_mc_info, 0, NULL},
3235 {"r600_ring_info", r600_debugfs_cp_ring_info, 0, NULL},
3236};
3237#endif
3238
3239int r600_debugfs_mc_info_init(struct radeon_device *rdev)
3240{
3241#if defined(CONFIG_DEBUG_FS)
3242 return radeon_debugfs_add_files(rdev, r600_mc_info_list, ARRAY_SIZE(r600_mc_info_list));
3243#else
3244 return 0;
3245#endif
Jerome Glisse771fe6b2009-06-05 14:42:42 +02003246}
Jerome Glisse062b3892010-02-04 20:36:39 +01003247
3248/**
3249 * r600_ioctl_wait_idle - flush host path cache on wait idle ioctl
3250 * rdev: radeon device structure
3251 * bo: buffer object struct which userspace is waiting for idle
3252 *
3253 * Some R6XX/R7XX doesn't seems to take into account HDP flush performed
3254 * through ring buffer, this leads to corruption in rendering, see
3255 * http://bugzilla.kernel.org/show_bug.cgi?id=15186 to avoid this we
3256 * directly perform HDP flush by writing register through MMIO.
3257 */
3258void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo)
3259{
3260 WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
3261}