blob: 75b8e004ca80ddfc3efd63e0c0ba486874341163 [file] [log] [blame]
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090028#include <linux/slab.h>
Jerome Glisse3ce0a232009-09-08 10:10:24 +100029#include <linux/seq_file.h>
30#include <linux/firmware.h>
31#include <linux/platform_device.h>
Jerome Glisse771fe6b2009-06-05 14:42:42 +020032#include "drmP.h"
Jerome Glisse3ce0a232009-09-08 10:10:24 +100033#include "radeon_drm.h"
Jerome Glisse771fe6b2009-06-05 14:42:42 +020034#include "radeon.h"
Daniel Vettere6990372010-03-11 21:19:17 +000035#include "radeon_asic.h"
Jerome Glisse3ce0a232009-09-08 10:10:24 +100036#include "radeon_mode.h"
Jerome Glisse3ce0a232009-09-08 10:10:24 +100037#include "r600d.h"
Jerome Glisse3ce0a232009-09-08 10:10:24 +100038#include "atom.h"
Jerome Glissed39c3b82009-09-28 18:34:43 +020039#include "avivod.h"
Jerome Glisse771fe6b2009-06-05 14:42:42 +020040
Jerome Glisse3ce0a232009-09-08 10:10:24 +100041#define PFP_UCODE_SIZE 576
42#define PM4_UCODE_SIZE 1792
Alex Deucherd8f60cf2009-12-01 13:43:46 -050043#define RLC_UCODE_SIZE 768
Jerome Glisse3ce0a232009-09-08 10:10:24 +100044#define R700_PFP_UCODE_SIZE 848
45#define R700_PM4_UCODE_SIZE 1360
Alex Deucherd8f60cf2009-12-01 13:43:46 -050046#define R700_RLC_UCODE_SIZE 1024
Alex Deucherfe251e22010-03-24 13:36:43 -040047#define EVERGREEN_PFP_UCODE_SIZE 1120
48#define EVERGREEN_PM4_UCODE_SIZE 1376
Alex Deucher45f9a392010-03-24 13:55:51 -040049#define EVERGREEN_RLC_UCODE_SIZE 768
Alex Deucher12727802011-03-02 20:07:32 -050050#define CAYMAN_RLC_UCODE_SIZE 1024
Jerome Glisse3ce0a232009-09-08 10:10:24 +100051
52/* Firmware Names */
53MODULE_FIRMWARE("radeon/R600_pfp.bin");
54MODULE_FIRMWARE("radeon/R600_me.bin");
55MODULE_FIRMWARE("radeon/RV610_pfp.bin");
56MODULE_FIRMWARE("radeon/RV610_me.bin");
57MODULE_FIRMWARE("radeon/RV630_pfp.bin");
58MODULE_FIRMWARE("radeon/RV630_me.bin");
59MODULE_FIRMWARE("radeon/RV620_pfp.bin");
60MODULE_FIRMWARE("radeon/RV620_me.bin");
61MODULE_FIRMWARE("radeon/RV635_pfp.bin");
62MODULE_FIRMWARE("radeon/RV635_me.bin");
63MODULE_FIRMWARE("radeon/RV670_pfp.bin");
64MODULE_FIRMWARE("radeon/RV670_me.bin");
65MODULE_FIRMWARE("radeon/RS780_pfp.bin");
66MODULE_FIRMWARE("radeon/RS780_me.bin");
67MODULE_FIRMWARE("radeon/RV770_pfp.bin");
68MODULE_FIRMWARE("radeon/RV770_me.bin");
69MODULE_FIRMWARE("radeon/RV730_pfp.bin");
70MODULE_FIRMWARE("radeon/RV730_me.bin");
71MODULE_FIRMWARE("radeon/RV710_pfp.bin");
72MODULE_FIRMWARE("radeon/RV710_me.bin");
Alex Deucherd8f60cf2009-12-01 13:43:46 -050073MODULE_FIRMWARE("radeon/R600_rlc.bin");
74MODULE_FIRMWARE("radeon/R700_rlc.bin");
Alex Deucherfe251e22010-03-24 13:36:43 -040075MODULE_FIRMWARE("radeon/CEDAR_pfp.bin");
76MODULE_FIRMWARE("radeon/CEDAR_me.bin");
Alex Deucher45f9a392010-03-24 13:55:51 -040077MODULE_FIRMWARE("radeon/CEDAR_rlc.bin");
Alex Deucherfe251e22010-03-24 13:36:43 -040078MODULE_FIRMWARE("radeon/REDWOOD_pfp.bin");
79MODULE_FIRMWARE("radeon/REDWOOD_me.bin");
Alex Deucher45f9a392010-03-24 13:55:51 -040080MODULE_FIRMWARE("radeon/REDWOOD_rlc.bin");
Alex Deucherfe251e22010-03-24 13:36:43 -040081MODULE_FIRMWARE("radeon/JUNIPER_pfp.bin");
82MODULE_FIRMWARE("radeon/JUNIPER_me.bin");
Alex Deucher45f9a392010-03-24 13:55:51 -040083MODULE_FIRMWARE("radeon/JUNIPER_rlc.bin");
Dave Airliea7433742010-04-09 15:31:09 +100084MODULE_FIRMWARE("radeon/CYPRESS_pfp.bin");
Alex Deucherfe251e22010-03-24 13:36:43 -040085MODULE_FIRMWARE("radeon/CYPRESS_me.bin");
Alex Deucher45f9a392010-03-24 13:55:51 -040086MODULE_FIRMWARE("radeon/CYPRESS_rlc.bin");
Alex Deucher439bd6c2010-11-22 17:56:31 -050087MODULE_FIRMWARE("radeon/PALM_pfp.bin");
88MODULE_FIRMWARE("radeon/PALM_me.bin");
89MODULE_FIRMWARE("radeon/SUMO_rlc.bin");
Alex Deucherd5c5a722011-05-31 15:42:48 -040090MODULE_FIRMWARE("radeon/SUMO_pfp.bin");
91MODULE_FIRMWARE("radeon/SUMO_me.bin");
92MODULE_FIRMWARE("radeon/SUMO2_pfp.bin");
93MODULE_FIRMWARE("radeon/SUMO2_me.bin");
Jerome Glisse3ce0a232009-09-08 10:10:24 +100094
95int r600_debugfs_mc_info_init(struct radeon_device *rdev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +020096
Jerome Glisse1a029b72009-10-06 19:04:30 +020097/* r600,rv610,rv630,rv620,rv635,rv670 */
Jerome Glisse771fe6b2009-06-05 14:42:42 +020098int r600_mc_wait_for_idle(struct radeon_device *rdev);
99void r600_gpu_init(struct radeon_device *rdev);
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000100void r600_fini(struct radeon_device *rdev);
Alex Deucher45f9a392010-03-24 13:55:51 -0400101void r600_irq_disable(struct radeon_device *rdev);
Alex Deucher9e46a482011-01-06 18:49:35 -0500102static void r600_pcie_gen2_enable(struct radeon_device *rdev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200103
Alex Deucher21a81222010-07-02 12:58:16 -0400104/* get temperature in millidegrees */
Alex Deucher20d391d2011-02-01 16:12:34 -0500105int rv6xx_get_temp(struct radeon_device *rdev)
Alex Deucher21a81222010-07-02 12:58:16 -0400106{
107 u32 temp = (RREG32(CG_THERMAL_STATUS) & ASIC_T_MASK) >>
108 ASIC_T_SHIFT;
Alex Deucher20d391d2011-02-01 16:12:34 -0500109 int actual_temp = temp & 0xff;
Alex Deucher21a81222010-07-02 12:58:16 -0400110
Alex Deucher20d391d2011-02-01 16:12:34 -0500111 if (temp & 0x100)
112 actual_temp -= 256;
113
114 return actual_temp * 1000;
Alex Deucher21a81222010-07-02 12:58:16 -0400115}
116
Alex Deucherce8f5372010-05-07 15:10:16 -0400117void r600_pm_get_dynpm_state(struct radeon_device *rdev)
Alex Deuchera48b9b42010-04-22 14:03:55 -0400118{
119 int i;
120
Alex Deucherce8f5372010-05-07 15:10:16 -0400121 rdev->pm.dynpm_can_upclock = true;
122 rdev->pm.dynpm_can_downclock = true;
Alex Deuchera48b9b42010-04-22 14:03:55 -0400123
124 /* power state array is low to high, default is first */
125 if ((rdev->flags & RADEON_IS_IGP) || (rdev->family == CHIP_R600)) {
126 int min_power_state_index = 0;
127
128 if (rdev->pm.num_power_states > 2)
129 min_power_state_index = 1;
130
Alex Deucherce8f5372010-05-07 15:10:16 -0400131 switch (rdev->pm.dynpm_planned_action) {
132 case DYNPM_ACTION_MINIMUM:
Alex Deuchera48b9b42010-04-22 14:03:55 -0400133 rdev->pm.requested_power_state_index = min_power_state_index;
134 rdev->pm.requested_clock_mode_index = 0;
Alex Deucherce8f5372010-05-07 15:10:16 -0400135 rdev->pm.dynpm_can_downclock = false;
Alex Deuchera48b9b42010-04-22 14:03:55 -0400136 break;
Alex Deucherce8f5372010-05-07 15:10:16 -0400137 case DYNPM_ACTION_DOWNCLOCK:
Alex Deuchera48b9b42010-04-22 14:03:55 -0400138 if (rdev->pm.current_power_state_index == min_power_state_index) {
139 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
Alex Deucherce8f5372010-05-07 15:10:16 -0400140 rdev->pm.dynpm_can_downclock = false;
Alex Deuchera48b9b42010-04-22 14:03:55 -0400141 } else {
142 if (rdev->pm.active_crtc_count > 1) {
143 for (i = 0; i < rdev->pm.num_power_states; i++) {
Alex Deucherd7311172010-05-03 01:13:14 -0400144 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
Alex Deuchera48b9b42010-04-22 14:03:55 -0400145 continue;
146 else if (i >= rdev->pm.current_power_state_index) {
147 rdev->pm.requested_power_state_index =
148 rdev->pm.current_power_state_index;
149 break;
150 } else {
151 rdev->pm.requested_power_state_index = i;
152 break;
153 }
154 }
Alex Deucher773c3fa2010-06-25 16:21:27 -0400155 } else {
156 if (rdev->pm.current_power_state_index == 0)
157 rdev->pm.requested_power_state_index =
158 rdev->pm.num_power_states - 1;
159 else
160 rdev->pm.requested_power_state_index =
161 rdev->pm.current_power_state_index - 1;
162 }
Alex Deuchera48b9b42010-04-22 14:03:55 -0400163 }
164 rdev->pm.requested_clock_mode_index = 0;
Alex Deucherd7311172010-05-03 01:13:14 -0400165 /* don't use the power state if crtcs are active and no display flag is set */
166 if ((rdev->pm.active_crtc_count > 0) &&
167 (rdev->pm.power_state[rdev->pm.requested_power_state_index].
168 clock_info[rdev->pm.requested_clock_mode_index].flags &
169 RADEON_PM_MODE_NO_DISPLAY)) {
170 rdev->pm.requested_power_state_index++;
171 }
Alex Deuchera48b9b42010-04-22 14:03:55 -0400172 break;
Alex Deucherce8f5372010-05-07 15:10:16 -0400173 case DYNPM_ACTION_UPCLOCK:
Alex Deuchera48b9b42010-04-22 14:03:55 -0400174 if (rdev->pm.current_power_state_index == (rdev->pm.num_power_states - 1)) {
175 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
Alex Deucherce8f5372010-05-07 15:10:16 -0400176 rdev->pm.dynpm_can_upclock = false;
Alex Deuchera48b9b42010-04-22 14:03:55 -0400177 } else {
178 if (rdev->pm.active_crtc_count > 1) {
179 for (i = (rdev->pm.num_power_states - 1); i >= 0; i--) {
Alex Deucherd7311172010-05-03 01:13:14 -0400180 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
Alex Deuchera48b9b42010-04-22 14:03:55 -0400181 continue;
182 else if (i <= rdev->pm.current_power_state_index) {
183 rdev->pm.requested_power_state_index =
184 rdev->pm.current_power_state_index;
185 break;
186 } else {
187 rdev->pm.requested_power_state_index = i;
188 break;
189 }
190 }
191 } else
192 rdev->pm.requested_power_state_index =
193 rdev->pm.current_power_state_index + 1;
194 }
195 rdev->pm.requested_clock_mode_index = 0;
196 break;
Alex Deucherce8f5372010-05-07 15:10:16 -0400197 case DYNPM_ACTION_DEFAULT:
Alex Deucher58e21df2010-03-22 13:31:08 -0400198 rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
199 rdev->pm.requested_clock_mode_index = 0;
Alex Deucherce8f5372010-05-07 15:10:16 -0400200 rdev->pm.dynpm_can_upclock = false;
Alex Deucher58e21df2010-03-22 13:31:08 -0400201 break;
Alex Deucherce8f5372010-05-07 15:10:16 -0400202 case DYNPM_ACTION_NONE:
Alex Deuchera48b9b42010-04-22 14:03:55 -0400203 default:
204 DRM_ERROR("Requested mode for not defined action\n");
205 return;
206 }
207 } else {
208 /* XXX select a power state based on AC/DC, single/dualhead, etc. */
209 /* for now just select the first power state and switch between clock modes */
210 /* power state array is low to high, default is first (0) */
211 if (rdev->pm.active_crtc_count > 1) {
212 rdev->pm.requested_power_state_index = -1;
213 /* start at 1 as we don't want the default mode */
214 for (i = 1; i < rdev->pm.num_power_states; i++) {
Alex Deucherd7311172010-05-03 01:13:14 -0400215 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
Alex Deuchera48b9b42010-04-22 14:03:55 -0400216 continue;
217 else if ((rdev->pm.power_state[i].type == POWER_STATE_TYPE_PERFORMANCE) ||
218 (rdev->pm.power_state[i].type == POWER_STATE_TYPE_BATTERY)) {
219 rdev->pm.requested_power_state_index = i;
220 break;
221 }
222 }
223 /* if nothing selected, grab the default state. */
224 if (rdev->pm.requested_power_state_index == -1)
225 rdev->pm.requested_power_state_index = 0;
226 } else
227 rdev->pm.requested_power_state_index = 1;
228
Alex Deucherce8f5372010-05-07 15:10:16 -0400229 switch (rdev->pm.dynpm_planned_action) {
230 case DYNPM_ACTION_MINIMUM:
Alex Deuchera48b9b42010-04-22 14:03:55 -0400231 rdev->pm.requested_clock_mode_index = 0;
Alex Deucherce8f5372010-05-07 15:10:16 -0400232 rdev->pm.dynpm_can_downclock = false;
Alex Deuchera48b9b42010-04-22 14:03:55 -0400233 break;
Alex Deucherce8f5372010-05-07 15:10:16 -0400234 case DYNPM_ACTION_DOWNCLOCK:
Alex Deuchera48b9b42010-04-22 14:03:55 -0400235 if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) {
236 if (rdev->pm.current_clock_mode_index == 0) {
237 rdev->pm.requested_clock_mode_index = 0;
Alex Deucherce8f5372010-05-07 15:10:16 -0400238 rdev->pm.dynpm_can_downclock = false;
Alex Deuchera48b9b42010-04-22 14:03:55 -0400239 } else
240 rdev->pm.requested_clock_mode_index =
241 rdev->pm.current_clock_mode_index - 1;
242 } else {
243 rdev->pm.requested_clock_mode_index = 0;
Alex Deucherce8f5372010-05-07 15:10:16 -0400244 rdev->pm.dynpm_can_downclock = false;
Alex Deuchera48b9b42010-04-22 14:03:55 -0400245 }
Alex Deucherd7311172010-05-03 01:13:14 -0400246 /* don't use the power state if crtcs are active and no display flag is set */
247 if ((rdev->pm.active_crtc_count > 0) &&
248 (rdev->pm.power_state[rdev->pm.requested_power_state_index].
249 clock_info[rdev->pm.requested_clock_mode_index].flags &
250 RADEON_PM_MODE_NO_DISPLAY)) {
251 rdev->pm.requested_clock_mode_index++;
252 }
Alex Deuchera48b9b42010-04-22 14:03:55 -0400253 break;
Alex Deucherce8f5372010-05-07 15:10:16 -0400254 case DYNPM_ACTION_UPCLOCK:
Alex Deuchera48b9b42010-04-22 14:03:55 -0400255 if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) {
256 if (rdev->pm.current_clock_mode_index ==
257 (rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1)) {
258 rdev->pm.requested_clock_mode_index = rdev->pm.current_clock_mode_index;
Alex Deucherce8f5372010-05-07 15:10:16 -0400259 rdev->pm.dynpm_can_upclock = false;
Alex Deuchera48b9b42010-04-22 14:03:55 -0400260 } else
261 rdev->pm.requested_clock_mode_index =
262 rdev->pm.current_clock_mode_index + 1;
263 } else {
264 rdev->pm.requested_clock_mode_index =
265 rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1;
Alex Deucherce8f5372010-05-07 15:10:16 -0400266 rdev->pm.dynpm_can_upclock = false;
Alex Deuchera48b9b42010-04-22 14:03:55 -0400267 }
268 break;
Alex Deucherce8f5372010-05-07 15:10:16 -0400269 case DYNPM_ACTION_DEFAULT:
Alex Deucher58e21df2010-03-22 13:31:08 -0400270 rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
271 rdev->pm.requested_clock_mode_index = 0;
Alex Deucherce8f5372010-05-07 15:10:16 -0400272 rdev->pm.dynpm_can_upclock = false;
Alex Deucher58e21df2010-03-22 13:31:08 -0400273 break;
Alex Deucherce8f5372010-05-07 15:10:16 -0400274 case DYNPM_ACTION_NONE:
Alex Deuchera48b9b42010-04-22 14:03:55 -0400275 default:
276 DRM_ERROR("Requested mode for not defined action\n");
277 return;
278 }
279 }
280
Dave Airlied9fdaaf2010-08-02 10:42:55 +1000281 DRM_DEBUG_DRIVER("Requested: e: %d m: %d p: %d\n",
Alex Deucherce8a3eb2010-05-07 16:58:27 -0400282 rdev->pm.power_state[rdev->pm.requested_power_state_index].
283 clock_info[rdev->pm.requested_clock_mode_index].sclk,
284 rdev->pm.power_state[rdev->pm.requested_power_state_index].
285 clock_info[rdev->pm.requested_clock_mode_index].mclk,
286 rdev->pm.power_state[rdev->pm.requested_power_state_index].
287 pcie_lanes);
Alex Deuchera48b9b42010-04-22 14:03:55 -0400288}
289
Alex Deucherce8f5372010-05-07 15:10:16 -0400290static int r600_pm_get_type_index(struct radeon_device *rdev,
291 enum radeon_pm_state_type ps_type,
292 int instance)
Alex Deucherbae6b5622010-04-22 13:38:05 -0400293{
Alex Deucherce8f5372010-05-07 15:10:16 -0400294 int i;
295 int found_instance = -1;
Alex Deuchera48b9b42010-04-22 14:03:55 -0400296
Alex Deucherce8f5372010-05-07 15:10:16 -0400297 for (i = 0; i < rdev->pm.num_power_states; i++) {
298 if (rdev->pm.power_state[i].type == ps_type) {
299 found_instance++;
300 if (found_instance == instance)
301 return i;
Alex Deuchera4248162010-04-24 14:50:23 -0400302 }
Alex Deucherce8f5372010-05-07 15:10:16 -0400303 }
304 /* return default if no match */
305 return rdev->pm.default_power_state_index;
306}
Alex Deucherbae6b5622010-04-22 13:38:05 -0400307
Alex Deucherce8f5372010-05-07 15:10:16 -0400308void rs780_pm_init_profile(struct radeon_device *rdev)
309{
310 if (rdev->pm.num_power_states == 2) {
311 /* default */
312 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
313 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
314 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
315 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
316 /* low sh */
317 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0;
318 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0;
319 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
320 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
Alex Deucherc9e75b22010-06-02 17:56:01 -0400321 /* mid sh */
322 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 0;
323 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 0;
324 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
325 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
Alex Deucherce8f5372010-05-07 15:10:16 -0400326 /* high sh */
327 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0;
328 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1;
329 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
330 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
331 /* low mh */
332 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0;
333 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0;
334 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
335 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
Alex Deucherc9e75b22010-06-02 17:56:01 -0400336 /* mid mh */
337 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 0;
338 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 0;
339 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
340 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
Alex Deucherce8f5372010-05-07 15:10:16 -0400341 /* high mh */
342 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0;
343 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 1;
344 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
345 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
346 } else if (rdev->pm.num_power_states == 3) {
347 /* default */
348 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
349 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
350 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
351 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
352 /* low sh */
353 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1;
354 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1;
355 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
356 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
Alex Deucherc9e75b22010-06-02 17:56:01 -0400357 /* mid sh */
358 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 1;
359 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1;
360 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
361 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
Alex Deucherce8f5372010-05-07 15:10:16 -0400362 /* high sh */
363 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1;
364 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 2;
365 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
366 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
367 /* low mh */
368 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 1;
369 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 1;
370 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
371 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
Alex Deucherc9e75b22010-06-02 17:56:01 -0400372 /* mid mh */
373 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 1;
374 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 1;
375 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
376 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
Alex Deucherce8f5372010-05-07 15:10:16 -0400377 /* high mh */
378 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 1;
379 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2;
380 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
381 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
382 } else {
383 /* default */
384 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
385 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
386 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
387 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
388 /* low sh */
389 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 2;
390 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 2;
391 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
392 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
Alex Deucherc9e75b22010-06-02 17:56:01 -0400393 /* mid sh */
394 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 2;
395 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 2;
396 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
397 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
Alex Deucherce8f5372010-05-07 15:10:16 -0400398 /* high sh */
399 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 2;
400 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 3;
401 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
402 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
403 /* low mh */
404 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2;
405 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0;
406 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
407 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
Alex Deucherc9e75b22010-06-02 17:56:01 -0400408 /* mid mh */
409 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 2;
410 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 0;
411 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
412 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
Alex Deucherce8f5372010-05-07 15:10:16 -0400413 /* high mh */
414 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2;
415 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 3;
416 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
417 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
418 }
419}
420
421void r600_pm_init_profile(struct radeon_device *rdev)
422{
423 if (rdev->family == CHIP_R600) {
424 /* XXX */
425 /* default */
426 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
427 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
428 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
Alex Deucher4bff5172010-05-17 19:41:26 -0400429 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
Alex Deucherce8f5372010-05-07 15:10:16 -0400430 /* low sh */
431 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
432 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
433 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
Alex Deucher4bff5172010-05-17 19:41:26 -0400434 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
Alex Deucherc9e75b22010-06-02 17:56:01 -0400435 /* mid sh */
436 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
437 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
438 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
439 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
Alex Deucherce8f5372010-05-07 15:10:16 -0400440 /* high sh */
441 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
442 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
443 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
Alex Deucher4bff5172010-05-17 19:41:26 -0400444 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
Alex Deucherce8f5372010-05-07 15:10:16 -0400445 /* low mh */
446 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
447 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
448 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
Alex Deucher4bff5172010-05-17 19:41:26 -0400449 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
Alex Deucherc9e75b22010-06-02 17:56:01 -0400450 /* mid mh */
451 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
452 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
453 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
454 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
Alex Deucherce8f5372010-05-07 15:10:16 -0400455 /* high mh */
456 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
457 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
458 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
Alex Deucher4bff5172010-05-17 19:41:26 -0400459 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
Alex Deucherce8f5372010-05-07 15:10:16 -0400460 } else {
461 if (rdev->pm.num_power_states < 4) {
462 /* default */
463 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
464 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
465 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
466 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
467 /* low sh */
Alex Deucherce8f5372010-05-07 15:10:16 -0400468 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1;
469 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1;
470 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
Alex Deucherc9e75b22010-06-02 17:56:01 -0400471 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
472 /* mid sh */
473 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 1;
474 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1;
475 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
476 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
Alex Deucherce8f5372010-05-07 15:10:16 -0400477 /* high sh */
478 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1;
479 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1;
480 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
481 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
482 /* low mh */
Alex Deucher4bff5172010-05-17 19:41:26 -0400483 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2;
484 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 2;
Alex Deucherce8f5372010-05-07 15:10:16 -0400485 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
Alex Deucherc9e75b22010-06-02 17:56:01 -0400486 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
487 /* low mh */
488 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 2;
489 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 2;
490 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
491 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
Alex Deucherce8f5372010-05-07 15:10:16 -0400492 /* high mh */
Alex Deucher4bff5172010-05-17 19:41:26 -0400493 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2;
494 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2;
495 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
496 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
497 } else {
498 /* default */
499 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
500 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
501 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
502 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
503 /* low sh */
504 if (rdev->flags & RADEON_IS_MOBILITY) {
505 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx =
506 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
507 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx =
508 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
509 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
Alex Deucherc9e75b22010-06-02 17:56:01 -0400510 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
Alex Deucher4bff5172010-05-17 19:41:26 -0400511 } else {
512 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx =
513 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
514 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx =
515 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
516 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
Alex Deucherc9e75b22010-06-02 17:56:01 -0400517 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
518 }
519 /* mid sh */
520 if (rdev->flags & RADEON_IS_MOBILITY) {
521 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx =
522 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
523 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx =
524 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
525 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
526 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
527 } else {
528 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx =
529 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
530 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx =
531 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
532 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
533 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
Alex Deucher4bff5172010-05-17 19:41:26 -0400534 }
535 /* high sh */
536 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx =
537 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
538 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx =
539 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
540 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
541 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
542 /* low mh */
543 if (rdev->flags & RADEON_IS_MOBILITY) {
544 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx =
545 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
546 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx =
547 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
548 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
Alex Deucherc9e75b22010-06-02 17:56:01 -0400549 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
Alex Deucher4bff5172010-05-17 19:41:26 -0400550 } else {
551 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx =
552 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
553 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx =
554 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
555 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
Alex Deucherc9e75b22010-06-02 17:56:01 -0400556 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
557 }
558 /* mid mh */
559 if (rdev->flags & RADEON_IS_MOBILITY) {
560 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx =
561 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
562 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx =
563 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
564 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
565 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
566 } else {
567 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx =
568 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
569 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx =
570 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
571 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
572 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
Alex Deucher4bff5172010-05-17 19:41:26 -0400573 }
574 /* high mh */
575 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx =
576 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
577 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx =
578 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
Alex Deucherce8f5372010-05-07 15:10:16 -0400579 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
580 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
581 }
582 }
Alex Deucherbae6b5622010-04-22 13:38:05 -0400583}
584
Alex Deucher49e02b72010-04-23 17:57:27 -0400585void r600_pm_misc(struct radeon_device *rdev)
586{
Rafał Miłeckia081a9d2010-06-07 18:20:25 -0400587 int req_ps_idx = rdev->pm.requested_power_state_index;
588 int req_cm_idx = rdev->pm.requested_clock_mode_index;
589 struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx];
590 struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage;
Alex Deucher7ac9aa52010-05-27 19:25:54 -0400591
Alex Deucher4d601732010-06-07 18:15:18 -0400592 if ((voltage->type == VOLTAGE_SW) && voltage->voltage) {
Alex Deuchera377e182011-06-20 13:00:31 -0400593 /* 0xff01 is a flag rather then an actual voltage */
594 if (voltage->voltage == 0xff01)
595 return;
Alex Deucher4d601732010-06-07 18:15:18 -0400596 if (voltage->voltage != rdev->pm.current_vddc) {
Alex Deucher8a83ec52011-04-12 14:49:23 -0400597 radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC);
Alex Deucher4d601732010-06-07 18:15:18 -0400598 rdev->pm.current_vddc = voltage->voltage;
Dave Airlied9fdaaf2010-08-02 10:42:55 +1000599 DRM_DEBUG_DRIVER("Setting: v: %d\n", voltage->voltage);
Alex Deucher4d601732010-06-07 18:15:18 -0400600 }
601 }
Alex Deucher49e02b72010-04-23 17:57:27 -0400602}
603
Alex Deucherdef9ba92010-04-22 12:39:58 -0400604bool r600_gui_idle(struct radeon_device *rdev)
605{
606 if (RREG32(GRBM_STATUS) & GUI_ACTIVE)
607 return false;
608 else
609 return true;
610}
611
Alex Deuchere0df1ac2009-12-04 15:12:21 -0500612/* hpd for digital panel detect/disconnect */
613bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
614{
615 bool connected = false;
616
617 if (ASIC_IS_DCE3(rdev)) {
618 switch (hpd) {
619 case RADEON_HPD_1:
620 if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE)
621 connected = true;
622 break;
623 case RADEON_HPD_2:
624 if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE)
625 connected = true;
626 break;
627 case RADEON_HPD_3:
628 if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE)
629 connected = true;
630 break;
631 case RADEON_HPD_4:
632 if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE)
633 connected = true;
634 break;
635 /* DCE 3.2 */
636 case RADEON_HPD_5:
637 if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE)
638 connected = true;
639 break;
640 case RADEON_HPD_6:
641 if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)
642 connected = true;
643 break;
644 default:
645 break;
646 }
647 } else {
648 switch (hpd) {
649 case RADEON_HPD_1:
650 if (RREG32(DC_HOT_PLUG_DETECT1_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
651 connected = true;
652 break;
653 case RADEON_HPD_2:
654 if (RREG32(DC_HOT_PLUG_DETECT2_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
655 connected = true;
656 break;
657 case RADEON_HPD_3:
658 if (RREG32(DC_HOT_PLUG_DETECT3_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
659 connected = true;
660 break;
661 default:
662 break;
663 }
664 }
665 return connected;
666}
667
668void r600_hpd_set_polarity(struct radeon_device *rdev,
Alex Deucher429770b2009-12-04 15:26:55 -0500669 enum radeon_hpd_id hpd)
Alex Deuchere0df1ac2009-12-04 15:12:21 -0500670{
671 u32 tmp;
672 bool connected = r600_hpd_sense(rdev, hpd);
673
674 if (ASIC_IS_DCE3(rdev)) {
675 switch (hpd) {
676 case RADEON_HPD_1:
677 tmp = RREG32(DC_HPD1_INT_CONTROL);
678 if (connected)
679 tmp &= ~DC_HPDx_INT_POLARITY;
680 else
681 tmp |= DC_HPDx_INT_POLARITY;
682 WREG32(DC_HPD1_INT_CONTROL, tmp);
683 break;
684 case RADEON_HPD_2:
685 tmp = RREG32(DC_HPD2_INT_CONTROL);
686 if (connected)
687 tmp &= ~DC_HPDx_INT_POLARITY;
688 else
689 tmp |= DC_HPDx_INT_POLARITY;
690 WREG32(DC_HPD2_INT_CONTROL, tmp);
691 break;
692 case RADEON_HPD_3:
693 tmp = RREG32(DC_HPD3_INT_CONTROL);
694 if (connected)
695 tmp &= ~DC_HPDx_INT_POLARITY;
696 else
697 tmp |= DC_HPDx_INT_POLARITY;
698 WREG32(DC_HPD3_INT_CONTROL, tmp);
699 break;
700 case RADEON_HPD_4:
701 tmp = RREG32(DC_HPD4_INT_CONTROL);
702 if (connected)
703 tmp &= ~DC_HPDx_INT_POLARITY;
704 else
705 tmp |= DC_HPDx_INT_POLARITY;
706 WREG32(DC_HPD4_INT_CONTROL, tmp);
707 break;
708 case RADEON_HPD_5:
709 tmp = RREG32(DC_HPD5_INT_CONTROL);
710 if (connected)
711 tmp &= ~DC_HPDx_INT_POLARITY;
712 else
713 tmp |= DC_HPDx_INT_POLARITY;
714 WREG32(DC_HPD5_INT_CONTROL, tmp);
715 break;
716 /* DCE 3.2 */
717 case RADEON_HPD_6:
718 tmp = RREG32(DC_HPD6_INT_CONTROL);
719 if (connected)
720 tmp &= ~DC_HPDx_INT_POLARITY;
721 else
722 tmp |= DC_HPDx_INT_POLARITY;
723 WREG32(DC_HPD6_INT_CONTROL, tmp);
724 break;
725 default:
726 break;
727 }
728 } else {
729 switch (hpd) {
730 case RADEON_HPD_1:
731 tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
732 if (connected)
733 tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
734 else
735 tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
736 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
737 break;
738 case RADEON_HPD_2:
739 tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
740 if (connected)
741 tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
742 else
743 tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
744 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
745 break;
746 case RADEON_HPD_3:
747 tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
748 if (connected)
749 tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
750 else
751 tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
752 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
753 break;
754 default:
755 break;
756 }
757 }
758}
759
760void r600_hpd_init(struct radeon_device *rdev)
761{
762 struct drm_device *dev = rdev->ddev;
763 struct drm_connector *connector;
764
765 if (ASIC_IS_DCE3(rdev)) {
766 u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) | DC_HPDx_RX_INT_TIMER(0xfa);
767 if (ASIC_IS_DCE32(rdev))
768 tmp |= DC_HPDx_EN;
769
770 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
771 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
772 switch (radeon_connector->hpd.hpd) {
773 case RADEON_HPD_1:
774 WREG32(DC_HPD1_CONTROL, tmp);
775 rdev->irq.hpd[0] = true;
776 break;
777 case RADEON_HPD_2:
778 WREG32(DC_HPD2_CONTROL, tmp);
779 rdev->irq.hpd[1] = true;
780 break;
781 case RADEON_HPD_3:
782 WREG32(DC_HPD3_CONTROL, tmp);
783 rdev->irq.hpd[2] = true;
784 break;
785 case RADEON_HPD_4:
786 WREG32(DC_HPD4_CONTROL, tmp);
787 rdev->irq.hpd[3] = true;
788 break;
789 /* DCE 3.2 */
790 case RADEON_HPD_5:
791 WREG32(DC_HPD5_CONTROL, tmp);
792 rdev->irq.hpd[4] = true;
793 break;
794 case RADEON_HPD_6:
795 WREG32(DC_HPD6_CONTROL, tmp);
796 rdev->irq.hpd[5] = true;
797 break;
798 default:
799 break;
800 }
801 }
802 } else {
803 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
804 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
805 switch (radeon_connector->hpd.hpd) {
806 case RADEON_HPD_1:
807 WREG32(DC_HOT_PLUG_DETECT1_CONTROL, DC_HOT_PLUG_DETECTx_EN);
808 rdev->irq.hpd[0] = true;
809 break;
810 case RADEON_HPD_2:
811 WREG32(DC_HOT_PLUG_DETECT2_CONTROL, DC_HOT_PLUG_DETECTx_EN);
812 rdev->irq.hpd[1] = true;
813 break;
814 case RADEON_HPD_3:
815 WREG32(DC_HOT_PLUG_DETECT3_CONTROL, DC_HOT_PLUG_DETECTx_EN);
816 rdev->irq.hpd[2] = true;
817 break;
818 default:
819 break;
820 }
821 }
822 }
Jerome Glisse003e69f2010-01-07 15:39:14 +0100823 if (rdev->irq.installed)
824 r600_irq_set(rdev);
Alex Deuchere0df1ac2009-12-04 15:12:21 -0500825}
826
827void r600_hpd_fini(struct radeon_device *rdev)
828{
829 struct drm_device *dev = rdev->ddev;
830 struct drm_connector *connector;
831
832 if (ASIC_IS_DCE3(rdev)) {
833 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
834 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
835 switch (radeon_connector->hpd.hpd) {
836 case RADEON_HPD_1:
837 WREG32(DC_HPD1_CONTROL, 0);
838 rdev->irq.hpd[0] = false;
839 break;
840 case RADEON_HPD_2:
841 WREG32(DC_HPD2_CONTROL, 0);
842 rdev->irq.hpd[1] = false;
843 break;
844 case RADEON_HPD_3:
845 WREG32(DC_HPD3_CONTROL, 0);
846 rdev->irq.hpd[2] = false;
847 break;
848 case RADEON_HPD_4:
849 WREG32(DC_HPD4_CONTROL, 0);
850 rdev->irq.hpd[3] = false;
851 break;
852 /* DCE 3.2 */
853 case RADEON_HPD_5:
854 WREG32(DC_HPD5_CONTROL, 0);
855 rdev->irq.hpd[4] = false;
856 break;
857 case RADEON_HPD_6:
858 WREG32(DC_HPD6_CONTROL, 0);
859 rdev->irq.hpd[5] = false;
860 break;
861 default:
862 break;
863 }
864 }
865 } else {
866 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
867 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
868 switch (radeon_connector->hpd.hpd) {
869 case RADEON_HPD_1:
870 WREG32(DC_HOT_PLUG_DETECT1_CONTROL, 0);
871 rdev->irq.hpd[0] = false;
872 break;
873 case RADEON_HPD_2:
874 WREG32(DC_HOT_PLUG_DETECT2_CONTROL, 0);
875 rdev->irq.hpd[1] = false;
876 break;
877 case RADEON_HPD_3:
878 WREG32(DC_HOT_PLUG_DETECT3_CONTROL, 0);
879 rdev->irq.hpd[2] = false;
880 break;
881 default:
882 break;
883 }
884 }
885 }
886}
887
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200888/*
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000889 * R600 PCIE GART
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200890 */
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000891void r600_pcie_gart_tlb_flush(struct radeon_device *rdev)
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200892{
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000893 unsigned i;
894 u32 tmp;
895
Dave Airlie2e98f102010-02-15 15:54:45 +1000896 /* flush hdp cache so updates hit vram */
Alex Deucherf3886f82010-12-08 10:05:34 -0500897 if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) &&
898 !(rdev->flags & RADEON_IS_AGP)) {
Alex Deucher812d0462010-07-26 18:51:53 -0400899 void __iomem *ptr = (void *)rdev->gart.table.vram.ptr;
900 u32 tmp;
901
902 /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read
903 * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL
Alex Deucherf3886f82010-12-08 10:05:34 -0500904 * This seems to cause problems on some AGP cards. Just use the old
905 * method for them.
Alex Deucher812d0462010-07-26 18:51:53 -0400906 */
907 WREG32(HDP_DEBUG1, 0);
908 tmp = readl((void __iomem *)ptr);
909 } else
910 WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
Dave Airlie2e98f102010-02-15 15:54:45 +1000911
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000912 WREG32(VM_CONTEXT0_INVALIDATION_LOW_ADDR, rdev->mc.gtt_start >> 12);
913 WREG32(VM_CONTEXT0_INVALIDATION_HIGH_ADDR, (rdev->mc.gtt_end - 1) >> 12);
914 WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1));
915 for (i = 0; i < rdev->usec_timeout; i++) {
916 /* read MC_STATUS */
917 tmp = RREG32(VM_CONTEXT0_REQUEST_RESPONSE);
918 tmp = (tmp & RESPONSE_TYPE_MASK) >> RESPONSE_TYPE_SHIFT;
919 if (tmp == 2) {
920 printk(KERN_WARNING "[drm] r600 flush TLB failed\n");
921 return;
922 }
923 if (tmp) {
924 return;
925 }
926 udelay(1);
927 }
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200928}
929
Jerome Glisse4aac0472009-09-14 18:29:49 +0200930int r600_pcie_gart_init(struct radeon_device *rdev)
931{
932 int r;
933
934 if (rdev->gart.table.vram.robj) {
Joe Perchesfce7d612010-10-30 21:08:30 +0000935 WARN(1, "R600 PCIE GART already initialized\n");
Jerome Glisse4aac0472009-09-14 18:29:49 +0200936 return 0;
937 }
938 /* Initialize common gart structure */
939 r = radeon_gart_init(rdev);
940 if (r)
941 return r;
942 rdev->gart.table_size = rdev->gart.num_gpu_pages * 8;
943 return radeon_gart_table_vram_alloc(rdev);
944}
945
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000946int r600_pcie_gart_enable(struct radeon_device *rdev)
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200947{
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000948 u32 tmp;
949 int r, i;
950
Jerome Glisse4aac0472009-09-14 18:29:49 +0200951 if (rdev->gart.table.vram.robj == NULL) {
952 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
953 return -EINVAL;
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000954 }
Jerome Glisse4aac0472009-09-14 18:29:49 +0200955 r = radeon_gart_table_vram_pin(rdev);
956 if (r)
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000957 return r;
Dave Airlie82568562010-02-05 16:00:07 +1000958 radeon_gart_restore(rdev);
Dave Airliebc1a6312009-09-15 11:07:52 +1000959
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000960 /* Setup L2 cache */
961 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
962 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
963 EFFECTIVE_L2_QUEUE_SIZE(7));
964 WREG32(VM_L2_CNTL2, 0);
965 WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
966 /* Setup TLB control */
967 tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
968 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
969 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
970 ENABLE_WAIT_L2_QUERY;
971 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
972 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
973 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
974 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
975 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
976 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
977 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
978 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
979 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
980 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
981 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
982 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
983 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
984 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
985 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
Jerome Glisse1a029b72009-10-06 19:04:30 +0200986 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000987 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
988 WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
989 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
990 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
991 (u32)(rdev->dummy_page.addr >> 12));
992 for (i = 1; i < 7; i++)
993 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
994
995 r600_pcie_gart_tlb_flush(rdev);
Tormod Voldenfcf4de52011-08-31 21:54:07 +0000996 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
997 (unsigned)(rdev->mc.gtt_size >> 20),
998 (unsigned long long)rdev->gart.table_addr);
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000999 rdev->gart.ready = true;
1000 return 0;
1001}
1002
1003void r600_pcie_gart_disable(struct radeon_device *rdev)
1004{
1005 u32 tmp;
Jerome Glisse4c788672009-11-20 14:29:23 +01001006 int i, r;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001007
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001008 /* Disable all tables */
1009 for (i = 0; i < 7; i++)
1010 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
1011
1012 /* Disable L2 cache */
1013 WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING |
1014 EFFECTIVE_L2_QUEUE_SIZE(7));
1015 WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
1016 /* Setup L1 TLB control */
1017 tmp = EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
1018 ENABLE_WAIT_L2_QUERY;
1019 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
1020 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
1021 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
1022 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
1023 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
1024 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
1025 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
1026 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
1027 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp);
1028 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp);
1029 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
1030 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
1031 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp);
1032 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
Jerome Glisse4aac0472009-09-14 18:29:49 +02001033 if (rdev->gart.table.vram.robj) {
Jerome Glisse4c788672009-11-20 14:29:23 +01001034 r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
1035 if (likely(r == 0)) {
1036 radeon_bo_kunmap(rdev->gart.table.vram.robj);
1037 radeon_bo_unpin(rdev->gart.table.vram.robj);
1038 radeon_bo_unreserve(rdev->gart.table.vram.robj);
1039 }
Jerome Glisse4aac0472009-09-14 18:29:49 +02001040 }
1041}
1042
1043void r600_pcie_gart_fini(struct radeon_device *rdev)
1044{
Jerome Glissef9274562010-03-17 14:44:29 +00001045 radeon_gart_fini(rdev);
Jerome Glisse4aac0472009-09-14 18:29:49 +02001046 r600_pcie_gart_disable(rdev);
1047 radeon_gart_table_vram_free(rdev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001048}
1049
Jerome Glisse1a029b72009-10-06 19:04:30 +02001050void r600_agp_enable(struct radeon_device *rdev)
1051{
1052 u32 tmp;
1053 int i;
1054
1055 /* Setup L2 cache */
1056 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
1057 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
1058 EFFECTIVE_L2_QUEUE_SIZE(7));
1059 WREG32(VM_L2_CNTL2, 0);
1060 WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
1061 /* Setup TLB control */
1062 tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
1063 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
1064 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
1065 ENABLE_WAIT_L2_QUERY;
1066 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
1067 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
1068 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
1069 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
1070 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
1071 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
1072 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
1073 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
1074 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
1075 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
1076 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
1077 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
1078 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
1079 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
1080 for (i = 0; i < 7; i++)
1081 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
1082}
1083
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001084int r600_mc_wait_for_idle(struct radeon_device *rdev)
1085{
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001086 unsigned i;
1087 u32 tmp;
1088
1089 for (i = 0; i < rdev->usec_timeout; i++) {
1090 /* read MC_STATUS */
1091 tmp = RREG32(R_000E50_SRBM_STATUS) & 0x3F00;
1092 if (!tmp)
1093 return 0;
1094 udelay(1);
1095 }
1096 return -1;
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001097}
1098
Jerome Glissea3c19452009-10-01 18:02:13 +02001099static void r600_mc_program(struct radeon_device *rdev)
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001100{
Jerome Glissea3c19452009-10-01 18:02:13 +02001101 struct rv515_mc_save save;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001102 u32 tmp;
1103 int i, j;
1104
1105 /* Initialize HDP */
1106 for (i = 0, j = 0; i < 32; i++, j += 0x18) {
1107 WREG32((0x2c14 + j), 0x00000000);
1108 WREG32((0x2c18 + j), 0x00000000);
1109 WREG32((0x2c1c + j), 0x00000000);
1110 WREG32((0x2c20 + j), 0x00000000);
1111 WREG32((0x2c24 + j), 0x00000000);
1112 }
1113 WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
1114
Jerome Glissea3c19452009-10-01 18:02:13 +02001115 rv515_mc_stop(rdev, &save);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001116 if (r600_mc_wait_for_idle(rdev)) {
Jerome Glissea3c19452009-10-01 18:02:13 +02001117 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001118 }
Jerome Glissea3c19452009-10-01 18:02:13 +02001119 /* Lockout access through VGA aperture (doesn't exist before R600) */
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001120 WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001121 /* Update configuration */
Jerome Glisse1a029b72009-10-06 19:04:30 +02001122 if (rdev->flags & RADEON_IS_AGP) {
1123 if (rdev->mc.vram_start < rdev->mc.gtt_start) {
1124 /* VRAM before AGP */
1125 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
1126 rdev->mc.vram_start >> 12);
1127 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
1128 rdev->mc.gtt_end >> 12);
1129 } else {
1130 /* VRAM after AGP */
1131 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
1132 rdev->mc.gtt_start >> 12);
1133 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
1134 rdev->mc.vram_end >> 12);
1135 }
1136 } else {
1137 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start >> 12);
1138 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end >> 12);
1139 }
Alex Deucher16cdf042011-10-28 10:30:02 -04001140 WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, rdev->vram_scratch.gpu_addr >> 12);
Jerome Glisse1a029b72009-10-06 19:04:30 +02001141 tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001142 tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
1143 WREG32(MC_VM_FB_LOCATION, tmp);
1144 WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
1145 WREG32(HDP_NONSURFACE_INFO, (2 << 7));
Jerome Glisse46fcd2b2010-06-03 19:34:48 +02001146 WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001147 if (rdev->flags & RADEON_IS_AGP) {
Jerome Glisse1a029b72009-10-06 19:04:30 +02001148 WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 22);
1149 WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 22);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001150 WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22);
1151 } else {
1152 WREG32(MC_VM_AGP_BASE, 0);
1153 WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
1154 WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
1155 }
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001156 if (r600_mc_wait_for_idle(rdev)) {
Jerome Glissea3c19452009-10-01 18:02:13 +02001157 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001158 }
Jerome Glissea3c19452009-10-01 18:02:13 +02001159 rv515_mc_resume(rdev, &save);
Dave Airlie698443d2009-09-18 14:16:38 +10001160 /* we need to own VRAM, so turn off the VGA renderer here
1161 * to stop it overwriting our objects */
Jerome Glissed39c3b82009-09-28 18:34:43 +02001162 rv515_vga_render_disable(rdev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001163}
1164
Jerome Glissed594e462010-02-17 21:54:29 +00001165/**
1166 * r600_vram_gtt_location - try to find VRAM & GTT location
1167 * @rdev: radeon device structure holding all necessary informations
1168 * @mc: memory controller structure holding memory informations
1169 *
1170 * Function will place try to place VRAM at same place as in CPU (PCI)
1171 * address space as some GPU seems to have issue when we reprogram at
1172 * different address space.
1173 *
1174 * If there is not enough space to fit the unvisible VRAM after the
1175 * aperture then we limit the VRAM size to the aperture.
1176 *
1177 * If we are using AGP then place VRAM adjacent to AGP aperture are we need
1178 * them to be in one from GPU point of view so that we can program GPU to
1179 * catch access outside them (weird GPU policy see ??).
1180 *
1181 * This function will never fails, worst case are limiting VRAM or GTT.
1182 *
1183 * Note: GTT start, end, size should be initialized before calling this
1184 * function on AGP platform.
1185 */
Alex Deucher0ef0c1f2010-11-22 17:56:26 -05001186static void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
Jerome Glissed594e462010-02-17 21:54:29 +00001187{
1188 u64 size_bf, size_af;
1189
1190 if (mc->mc_vram_size > 0xE0000000) {
1191 /* leave room for at least 512M GTT */
1192 dev_warn(rdev->dev, "limiting VRAM\n");
1193 mc->real_vram_size = 0xE0000000;
1194 mc->mc_vram_size = 0xE0000000;
1195 }
1196 if (rdev->flags & RADEON_IS_AGP) {
1197 size_bf = mc->gtt_start;
1198 size_af = 0xFFFFFFFF - mc->gtt_end + 1;
1199 if (size_bf > size_af) {
1200 if (mc->mc_vram_size > size_bf) {
1201 dev_warn(rdev->dev, "limiting VRAM\n");
1202 mc->real_vram_size = size_bf;
1203 mc->mc_vram_size = size_bf;
1204 }
1205 mc->vram_start = mc->gtt_start - mc->mc_vram_size;
1206 } else {
1207 if (mc->mc_vram_size > size_af) {
1208 dev_warn(rdev->dev, "limiting VRAM\n");
1209 mc->real_vram_size = size_af;
1210 mc->mc_vram_size = size_af;
1211 }
1212 mc->vram_start = mc->gtt_end;
1213 }
1214 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
1215 dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n",
1216 mc->mc_vram_size >> 20, mc->vram_start,
1217 mc->vram_end, mc->real_vram_size >> 20);
1218 } else {
1219 u64 base = 0;
Alex Deucher8961d522010-12-03 14:37:22 -05001220 if (rdev->flags & RADEON_IS_IGP) {
1221 base = RREG32(MC_VM_FB_LOCATION) & 0xFFFF;
1222 base <<= 24;
1223 }
Jerome Glissed594e462010-02-17 21:54:29 +00001224 radeon_vram_location(rdev, &rdev->mc, base);
Alex Deucher8d369bb2010-07-15 10:51:10 -04001225 rdev->mc.gtt_base_align = 0;
Jerome Glissed594e462010-02-17 21:54:29 +00001226 radeon_gtt_location(rdev, mc);
1227 }
1228}
1229
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001230int r600_mc_init(struct radeon_device *rdev)
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001231{
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001232 u32 tmp;
Alex Deucher5885b7a2009-10-19 17:23:33 -04001233 int chansize, numchan;
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001234
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001235 /* Get VRAM informations */
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001236 rdev->mc.vram_is_ddr = true;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001237 tmp = RREG32(RAMCFG);
1238 if (tmp & CHANSIZE_OVERRIDE) {
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001239 chansize = 16;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001240 } else if (tmp & CHANSIZE_MASK) {
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001241 chansize = 64;
1242 } else {
1243 chansize = 32;
1244 }
Alex Deucher5885b7a2009-10-19 17:23:33 -04001245 tmp = RREG32(CHMAP);
1246 switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
1247 case 0:
1248 default:
1249 numchan = 1;
1250 break;
1251 case 1:
1252 numchan = 2;
1253 break;
1254 case 2:
1255 numchan = 4;
1256 break;
1257 case 3:
1258 numchan = 8;
1259 break;
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001260 }
Alex Deucher5885b7a2009-10-19 17:23:33 -04001261 rdev->mc.vram_width = numchan * chansize;
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001262 /* Could aper size report 0 ? */
Jordan Crouse01d73a62010-05-27 13:40:24 -06001263 rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
1264 rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001265 /* Setup GPU memory space */
1266 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
1267 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
Jerome Glisse51e5fcd2010-02-19 14:33:54 +00001268 rdev->mc.visible_vram_size = rdev->mc.aper_size;
Jerome Glissed594e462010-02-17 21:54:29 +00001269 r600_vram_gtt_location(rdev, &rdev->mc);
Alex Deucherf47299c2010-03-16 20:54:38 -04001270
Alex Deucherf8920342010-06-30 12:02:03 -04001271 if (rdev->flags & RADEON_IS_IGP) {
1272 rs690_pm_info(rdev);
Alex Deucher06b64762010-01-05 11:27:29 -05001273 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
Alex Deucherf8920342010-06-30 12:02:03 -04001274 }
Alex Deucherf47299c2010-03-16 20:54:38 -04001275 radeon_update_bandwidth_info(rdev);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001276 return 0;
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001277}
1278
Alex Deucher16cdf042011-10-28 10:30:02 -04001279int r600_vram_scratch_init(struct radeon_device *rdev)
1280{
1281 int r;
1282
1283 if (rdev->vram_scratch.robj == NULL) {
1284 r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE,
1285 PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
1286 &rdev->vram_scratch.robj);
1287 if (r) {
1288 return r;
1289 }
1290 }
1291
1292 r = radeon_bo_reserve(rdev->vram_scratch.robj, false);
1293 if (unlikely(r != 0))
1294 return r;
1295 r = radeon_bo_pin(rdev->vram_scratch.robj,
1296 RADEON_GEM_DOMAIN_VRAM, &rdev->vram_scratch.gpu_addr);
1297 if (r) {
1298 radeon_bo_unreserve(rdev->vram_scratch.robj);
1299 return r;
1300 }
1301 r = radeon_bo_kmap(rdev->vram_scratch.robj,
1302 (void **)&rdev->vram_scratch.ptr);
1303 if (r)
1304 radeon_bo_unpin(rdev->vram_scratch.robj);
1305 radeon_bo_unreserve(rdev->vram_scratch.robj);
1306
1307 return r;
1308}
1309
1310void r600_vram_scratch_fini(struct radeon_device *rdev)
1311{
1312 int r;
1313
1314 if (rdev->vram_scratch.robj == NULL) {
1315 return;
1316 }
1317 r = radeon_bo_reserve(rdev->vram_scratch.robj, false);
1318 if (likely(r == 0)) {
1319 radeon_bo_kunmap(rdev->vram_scratch.robj);
1320 radeon_bo_unpin(rdev->vram_scratch.robj);
1321 radeon_bo_unreserve(rdev->vram_scratch.robj);
1322 }
1323 radeon_bo_unref(&rdev->vram_scratch.robj);
1324}
1325
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001326/* We doesn't check that the GPU really needs a reset we simply do the
1327 * reset, it's up to the caller to determine if the GPU needs one. We
1328 * might add an helper function to check that.
1329 */
1330int r600_gpu_soft_reset(struct radeon_device *rdev)
1331{
Jerome Glissea3c19452009-10-01 18:02:13 +02001332 struct rv515_mc_save save;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001333 u32 grbm_busy_mask = S_008010_VC_BUSY(1) | S_008010_VGT_BUSY_NO_DMA(1) |
1334 S_008010_VGT_BUSY(1) | S_008010_TA03_BUSY(1) |
1335 S_008010_TC_BUSY(1) | S_008010_SX_BUSY(1) |
1336 S_008010_SH_BUSY(1) | S_008010_SPI03_BUSY(1) |
1337 S_008010_SMX_BUSY(1) | S_008010_SC_BUSY(1) |
1338 S_008010_PA_BUSY(1) | S_008010_DB03_BUSY(1) |
1339 S_008010_CR_BUSY(1) | S_008010_CB03_BUSY(1) |
1340 S_008010_GUI_ACTIVE(1);
1341 u32 grbm2_busy_mask = S_008014_SPI0_BUSY(1) | S_008014_SPI1_BUSY(1) |
1342 S_008014_SPI2_BUSY(1) | S_008014_SPI3_BUSY(1) |
1343 S_008014_TA0_BUSY(1) | S_008014_TA1_BUSY(1) |
1344 S_008014_TA2_BUSY(1) | S_008014_TA3_BUSY(1) |
1345 S_008014_DB0_BUSY(1) | S_008014_DB1_BUSY(1) |
1346 S_008014_DB2_BUSY(1) | S_008014_DB3_BUSY(1) |
1347 S_008014_CB0_BUSY(1) | S_008014_CB1_BUSY(1) |
1348 S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1);
Jerome Glissea3c19452009-10-01 18:02:13 +02001349 u32 tmp;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001350
Alex Deucher8d96fe92011-01-21 15:38:22 +00001351 if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
1352 return 0;
1353
Jerome Glisse1a029b72009-10-06 19:04:30 +02001354 dev_info(rdev->dev, "GPU softreset \n");
1355 dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n",
1356 RREG32(R_008010_GRBM_STATUS));
1357 dev_info(rdev->dev, " R_008014_GRBM_STATUS2=0x%08X\n",
Jerome Glissea3c19452009-10-01 18:02:13 +02001358 RREG32(R_008014_GRBM_STATUS2));
Jerome Glisse1a029b72009-10-06 19:04:30 +02001359 dev_info(rdev->dev, " R_000E50_SRBM_STATUS=0x%08X\n",
1360 RREG32(R_000E50_SRBM_STATUS));
Jerome Glissea3c19452009-10-01 18:02:13 +02001361 rv515_mc_stop(rdev, &save);
1362 if (r600_mc_wait_for_idle(rdev)) {
1363 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1364 }
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001365 /* Disable CP parsing/prefetching */
Jerome Glisse90aca4d2010-03-09 14:45:12 +00001366 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001367 /* Check if any of the rendering block is busy and reset it */
1368 if ((RREG32(R_008010_GRBM_STATUS) & grbm_busy_mask) ||
1369 (RREG32(R_008014_GRBM_STATUS2) & grbm2_busy_mask)) {
Jerome Glissea3c19452009-10-01 18:02:13 +02001370 tmp = S_008020_SOFT_RESET_CR(1) |
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001371 S_008020_SOFT_RESET_DB(1) |
1372 S_008020_SOFT_RESET_CB(1) |
1373 S_008020_SOFT_RESET_PA(1) |
1374 S_008020_SOFT_RESET_SC(1) |
1375 S_008020_SOFT_RESET_SMX(1) |
1376 S_008020_SOFT_RESET_SPI(1) |
1377 S_008020_SOFT_RESET_SX(1) |
1378 S_008020_SOFT_RESET_SH(1) |
1379 S_008020_SOFT_RESET_TC(1) |
1380 S_008020_SOFT_RESET_TA(1) |
1381 S_008020_SOFT_RESET_VC(1) |
Jerome Glissea3c19452009-10-01 18:02:13 +02001382 S_008020_SOFT_RESET_VGT(1);
Jerome Glisse1a029b72009-10-06 19:04:30 +02001383 dev_info(rdev->dev, " R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
Jerome Glissea3c19452009-10-01 18:02:13 +02001384 WREG32(R_008020_GRBM_SOFT_RESET, tmp);
Jerome Glisse90aca4d2010-03-09 14:45:12 +00001385 RREG32(R_008020_GRBM_SOFT_RESET);
1386 mdelay(15);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001387 WREG32(R_008020_GRBM_SOFT_RESET, 0);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001388 }
1389 /* Reset CP (we always reset CP) */
Jerome Glissea3c19452009-10-01 18:02:13 +02001390 tmp = S_008020_SOFT_RESET_CP(1);
1391 dev_info(rdev->dev, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
1392 WREG32(R_008020_GRBM_SOFT_RESET, tmp);
Jerome Glisse90aca4d2010-03-09 14:45:12 +00001393 RREG32(R_008020_GRBM_SOFT_RESET);
1394 mdelay(15);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001395 WREG32(R_008020_GRBM_SOFT_RESET, 0);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001396 /* Wait a little for things to settle down */
Jerome Glisse225758d2010-03-09 14:45:10 +00001397 mdelay(1);
Jerome Glisse1a029b72009-10-06 19:04:30 +02001398 dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n",
1399 RREG32(R_008010_GRBM_STATUS));
1400 dev_info(rdev->dev, " R_008014_GRBM_STATUS2=0x%08X\n",
1401 RREG32(R_008014_GRBM_STATUS2));
1402 dev_info(rdev->dev, " R_000E50_SRBM_STATUS=0x%08X\n",
1403 RREG32(R_000E50_SRBM_STATUS));
Jerome Glissea3c19452009-10-01 18:02:13 +02001404 rv515_mc_resume(rdev, &save);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001405 return 0;
1406}
1407
Jerome Glisse225758d2010-03-09 14:45:10 +00001408bool r600_gpu_is_lockup(struct radeon_device *rdev)
1409{
1410 u32 srbm_status;
1411 u32 grbm_status;
1412 u32 grbm_status2;
Alex Deuchere29ff722010-12-21 16:05:38 -05001413 struct r100_gpu_lockup *lockup;
Jerome Glisse225758d2010-03-09 14:45:10 +00001414 int r;
1415
Alex Deuchere29ff722010-12-21 16:05:38 -05001416 if (rdev->family >= CHIP_RV770)
1417 lockup = &rdev->config.rv770.lockup;
1418 else
1419 lockup = &rdev->config.r600.lockup;
1420
Jerome Glisse225758d2010-03-09 14:45:10 +00001421 srbm_status = RREG32(R_000E50_SRBM_STATUS);
1422 grbm_status = RREG32(R_008010_GRBM_STATUS);
1423 grbm_status2 = RREG32(R_008014_GRBM_STATUS2);
1424 if (!G_008010_GUI_ACTIVE(grbm_status)) {
Alex Deuchere29ff722010-12-21 16:05:38 -05001425 r100_gpu_lockup_update(lockup, &rdev->cp);
Jerome Glisse225758d2010-03-09 14:45:10 +00001426 return false;
1427 }
1428 /* force CP activities */
1429 r = radeon_ring_lock(rdev, 2);
1430 if (!r) {
1431 /* PACKET2 NOP */
1432 radeon_ring_write(rdev, 0x80000000);
1433 radeon_ring_write(rdev, 0x80000000);
1434 radeon_ring_unlock_commit(rdev);
1435 }
1436 rdev->cp.rptr = RREG32(R600_CP_RB_RPTR);
Alex Deuchere29ff722010-12-21 16:05:38 -05001437 return r100_gpu_cp_is_lockup(rdev, lockup, &rdev->cp);
Jerome Glisse225758d2010-03-09 14:45:10 +00001438}
1439
Jerome Glissea2d07b72010-03-09 14:45:11 +00001440int r600_asic_reset(struct radeon_device *rdev)
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001441{
1442 return r600_gpu_soft_reset(rdev);
1443}
1444
1445static u32 r600_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
1446 u32 num_backends,
1447 u32 backend_disable_mask)
1448{
1449 u32 backend_map = 0;
1450 u32 enabled_backends_mask;
1451 u32 enabled_backends_count;
1452 u32 cur_pipe;
1453 u32 swizzle_pipe[R6XX_MAX_PIPES];
1454 u32 cur_backend;
1455 u32 i;
1456
1457 if (num_tile_pipes > R6XX_MAX_PIPES)
1458 num_tile_pipes = R6XX_MAX_PIPES;
1459 if (num_tile_pipes < 1)
1460 num_tile_pipes = 1;
1461 if (num_backends > R6XX_MAX_BACKENDS)
1462 num_backends = R6XX_MAX_BACKENDS;
1463 if (num_backends < 1)
1464 num_backends = 1;
1465
1466 enabled_backends_mask = 0;
1467 enabled_backends_count = 0;
1468 for (i = 0; i < R6XX_MAX_BACKENDS; ++i) {
1469 if (((backend_disable_mask >> i) & 1) == 0) {
1470 enabled_backends_mask |= (1 << i);
1471 ++enabled_backends_count;
1472 }
1473 if (enabled_backends_count == num_backends)
1474 break;
1475 }
1476
1477 if (enabled_backends_count == 0) {
1478 enabled_backends_mask = 1;
1479 enabled_backends_count = 1;
1480 }
1481
1482 if (enabled_backends_count != num_backends)
1483 num_backends = enabled_backends_count;
1484
1485 memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * R6XX_MAX_PIPES);
1486 switch (num_tile_pipes) {
1487 case 1:
1488 swizzle_pipe[0] = 0;
1489 break;
1490 case 2:
1491 swizzle_pipe[0] = 0;
1492 swizzle_pipe[1] = 1;
1493 break;
1494 case 3:
1495 swizzle_pipe[0] = 0;
1496 swizzle_pipe[1] = 1;
1497 swizzle_pipe[2] = 2;
1498 break;
1499 case 4:
1500 swizzle_pipe[0] = 0;
1501 swizzle_pipe[1] = 1;
1502 swizzle_pipe[2] = 2;
1503 swizzle_pipe[3] = 3;
1504 break;
1505 case 5:
1506 swizzle_pipe[0] = 0;
1507 swizzle_pipe[1] = 1;
1508 swizzle_pipe[2] = 2;
1509 swizzle_pipe[3] = 3;
1510 swizzle_pipe[4] = 4;
1511 break;
1512 case 6:
1513 swizzle_pipe[0] = 0;
1514 swizzle_pipe[1] = 2;
1515 swizzle_pipe[2] = 4;
1516 swizzle_pipe[3] = 5;
1517 swizzle_pipe[4] = 1;
1518 swizzle_pipe[5] = 3;
1519 break;
1520 case 7:
1521 swizzle_pipe[0] = 0;
1522 swizzle_pipe[1] = 2;
1523 swizzle_pipe[2] = 4;
1524 swizzle_pipe[3] = 6;
1525 swizzle_pipe[4] = 1;
1526 swizzle_pipe[5] = 3;
1527 swizzle_pipe[6] = 5;
1528 break;
1529 case 8:
1530 swizzle_pipe[0] = 0;
1531 swizzle_pipe[1] = 2;
1532 swizzle_pipe[2] = 4;
1533 swizzle_pipe[3] = 6;
1534 swizzle_pipe[4] = 1;
1535 swizzle_pipe[5] = 3;
1536 swizzle_pipe[6] = 5;
1537 swizzle_pipe[7] = 7;
1538 break;
1539 }
1540
1541 cur_backend = 0;
1542 for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
1543 while (((1 << cur_backend) & enabled_backends_mask) == 0)
1544 cur_backend = (cur_backend + 1) % R6XX_MAX_BACKENDS;
1545
1546 backend_map |= (u32)(((cur_backend & 3) << (swizzle_pipe[cur_pipe] * 2)));
1547
1548 cur_backend = (cur_backend + 1) % R6XX_MAX_BACKENDS;
1549 }
1550
1551 return backend_map;
1552}
1553
1554int r600_count_pipe_bits(uint32_t val)
1555{
1556 int i, ret = 0;
1557
1558 for (i = 0; i < 32; i++) {
1559 ret += val & 1;
1560 val >>= 1;
1561 }
1562 return ret;
1563}
1564
1565void r600_gpu_init(struct radeon_device *rdev)
1566{
1567 u32 tiling_config;
1568 u32 ramcfg;
Alex Deucherd03f5d52010-02-19 16:22:31 -05001569 u32 backend_map;
1570 u32 cc_rb_backend_disable;
1571 u32 cc_gc_shader_pipe_config;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001572 u32 tmp;
1573 int i, j;
1574 u32 sq_config;
1575 u32 sq_gpr_resource_mgmt_1 = 0;
1576 u32 sq_gpr_resource_mgmt_2 = 0;
1577 u32 sq_thread_resource_mgmt = 0;
1578 u32 sq_stack_resource_mgmt_1 = 0;
1579 u32 sq_stack_resource_mgmt_2 = 0;
1580
1581 /* FIXME: implement */
1582 switch (rdev->family) {
1583 case CHIP_R600:
1584 rdev->config.r600.max_pipes = 4;
1585 rdev->config.r600.max_tile_pipes = 8;
1586 rdev->config.r600.max_simds = 4;
1587 rdev->config.r600.max_backends = 4;
1588 rdev->config.r600.max_gprs = 256;
1589 rdev->config.r600.max_threads = 192;
1590 rdev->config.r600.max_stack_entries = 256;
1591 rdev->config.r600.max_hw_contexts = 8;
1592 rdev->config.r600.max_gs_threads = 16;
1593 rdev->config.r600.sx_max_export_size = 128;
1594 rdev->config.r600.sx_max_export_pos_size = 16;
1595 rdev->config.r600.sx_max_export_smx_size = 128;
1596 rdev->config.r600.sq_num_cf_insts = 2;
1597 break;
1598 case CHIP_RV630:
1599 case CHIP_RV635:
1600 rdev->config.r600.max_pipes = 2;
1601 rdev->config.r600.max_tile_pipes = 2;
1602 rdev->config.r600.max_simds = 3;
1603 rdev->config.r600.max_backends = 1;
1604 rdev->config.r600.max_gprs = 128;
1605 rdev->config.r600.max_threads = 192;
1606 rdev->config.r600.max_stack_entries = 128;
1607 rdev->config.r600.max_hw_contexts = 8;
1608 rdev->config.r600.max_gs_threads = 4;
1609 rdev->config.r600.sx_max_export_size = 128;
1610 rdev->config.r600.sx_max_export_pos_size = 16;
1611 rdev->config.r600.sx_max_export_smx_size = 128;
1612 rdev->config.r600.sq_num_cf_insts = 2;
1613 break;
1614 case CHIP_RV610:
1615 case CHIP_RV620:
1616 case CHIP_RS780:
1617 case CHIP_RS880:
1618 rdev->config.r600.max_pipes = 1;
1619 rdev->config.r600.max_tile_pipes = 1;
1620 rdev->config.r600.max_simds = 2;
1621 rdev->config.r600.max_backends = 1;
1622 rdev->config.r600.max_gprs = 128;
1623 rdev->config.r600.max_threads = 192;
1624 rdev->config.r600.max_stack_entries = 128;
1625 rdev->config.r600.max_hw_contexts = 4;
1626 rdev->config.r600.max_gs_threads = 4;
1627 rdev->config.r600.sx_max_export_size = 128;
1628 rdev->config.r600.sx_max_export_pos_size = 16;
1629 rdev->config.r600.sx_max_export_smx_size = 128;
1630 rdev->config.r600.sq_num_cf_insts = 1;
1631 break;
1632 case CHIP_RV670:
1633 rdev->config.r600.max_pipes = 4;
1634 rdev->config.r600.max_tile_pipes = 4;
1635 rdev->config.r600.max_simds = 4;
1636 rdev->config.r600.max_backends = 4;
1637 rdev->config.r600.max_gprs = 192;
1638 rdev->config.r600.max_threads = 192;
1639 rdev->config.r600.max_stack_entries = 256;
1640 rdev->config.r600.max_hw_contexts = 8;
1641 rdev->config.r600.max_gs_threads = 16;
1642 rdev->config.r600.sx_max_export_size = 128;
1643 rdev->config.r600.sx_max_export_pos_size = 16;
1644 rdev->config.r600.sx_max_export_smx_size = 128;
1645 rdev->config.r600.sq_num_cf_insts = 2;
1646 break;
1647 default:
1648 break;
1649 }
1650
1651 /* Initialize HDP */
1652 for (i = 0, j = 0; i < 32; i++, j += 0x18) {
1653 WREG32((0x2c14 + j), 0x00000000);
1654 WREG32((0x2c18 + j), 0x00000000);
1655 WREG32((0x2c1c + j), 0x00000000);
1656 WREG32((0x2c20 + j), 0x00000000);
1657 WREG32((0x2c24 + j), 0x00000000);
1658 }
1659
1660 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
1661
1662 /* Setup tiling */
1663 tiling_config = 0;
1664 ramcfg = RREG32(RAMCFG);
1665 switch (rdev->config.r600.max_tile_pipes) {
1666 case 1:
1667 tiling_config |= PIPE_TILING(0);
1668 break;
1669 case 2:
1670 tiling_config |= PIPE_TILING(1);
1671 break;
1672 case 4:
1673 tiling_config |= PIPE_TILING(2);
1674 break;
1675 case 8:
1676 tiling_config |= PIPE_TILING(3);
1677 break;
1678 default:
1679 break;
1680 }
Alex Deucherd03f5d52010-02-19 16:22:31 -05001681 rdev->config.r600.tiling_npipes = rdev->config.r600.max_tile_pipes;
Jerome Glisse961fb592010-02-10 22:30:05 +00001682 rdev->config.r600.tiling_nbanks = 4 << ((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001683 tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
Alex Deucher881fe6c2010-10-18 23:54:56 -04001684 tiling_config |= GROUP_SIZE((ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT);
1685 if ((ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT)
1686 rdev->config.r600.tiling_group_size = 512;
1687 else
1688 rdev->config.r600.tiling_group_size = 256;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001689 tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
1690 if (tmp > 3) {
1691 tiling_config |= ROW_TILING(3);
1692 tiling_config |= SAMPLE_SPLIT(3);
1693 } else {
1694 tiling_config |= ROW_TILING(tmp);
1695 tiling_config |= SAMPLE_SPLIT(tmp);
1696 }
1697 tiling_config |= BANK_SWAPS(1);
Alex Deucherd03f5d52010-02-19 16:22:31 -05001698
1699 cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
1700 cc_rb_backend_disable |=
1701 BACKEND_DISABLE((R6XX_MAX_BACKENDS_MASK << rdev->config.r600.max_backends) & R6XX_MAX_BACKENDS_MASK);
1702
1703 cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
1704 cc_gc_shader_pipe_config |=
1705 INACTIVE_QD_PIPES((R6XX_MAX_PIPES_MASK << rdev->config.r600.max_pipes) & R6XX_MAX_PIPES_MASK);
1706 cc_gc_shader_pipe_config |=
1707 INACTIVE_SIMDS((R6XX_MAX_SIMDS_MASK << rdev->config.r600.max_simds) & R6XX_MAX_SIMDS_MASK);
1708
1709 backend_map = r600_get_tile_pipe_to_backend_map(rdev->config.r600.max_tile_pipes,
1710 (R6XX_MAX_BACKENDS -
1711 r600_count_pipe_bits((cc_rb_backend_disable &
1712 R6XX_MAX_BACKENDS_MASK) >> 16)),
1713 (cc_rb_backend_disable >> 16));
Alex Deuchere7aeeba2010-06-04 13:10:12 -04001714 rdev->config.r600.tile_config = tiling_config;
Alex Deuchere55b9422011-07-15 19:53:52 +00001715 rdev->config.r600.backend_map = backend_map;
Alex Deucherd03f5d52010-02-19 16:22:31 -05001716 tiling_config |= BACKEND_MAP(backend_map);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001717 WREG32(GB_TILING_CONFIG, tiling_config);
1718 WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff);
1719 WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff);
1720
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001721 /* Setup pipes */
Alex Deucherd03f5d52010-02-19 16:22:31 -05001722 WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
1723 WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
Alex Deucherf867c60d2010-03-05 14:50:37 -05001724 WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001725
Alex Deucherd03f5d52010-02-19 16:22:31 -05001726 tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001727 WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK);
1728 WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((tmp * 4) - 2) & VTX_REUSE_DEPTH_MASK);
1729
1730 /* Setup some CP states */
1731 WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) | ROQ_IB2_START(0x2b)));
1732 WREG32(CP_MEQ_THRESHOLDS, (MEQ_END(0x40) | ROQ_END(0x40)));
1733
1734 WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO | SYNC_GRADIENT |
1735 SYNC_WALKER | SYNC_ALIGNER));
1736 /* Setup various GPU states */
1737 if (rdev->family == CHIP_RV670)
1738 WREG32(ARB_GDEC_RD_CNTL, 0x00000021);
1739
1740 tmp = RREG32(SX_DEBUG_1);
1741 tmp |= SMX_EVENT_RELEASE;
1742 if ((rdev->family > CHIP_R600))
1743 tmp |= ENABLE_NEW_SMX_ADDRESS;
1744 WREG32(SX_DEBUG_1, tmp);
1745
1746 if (((rdev->family) == CHIP_R600) ||
1747 ((rdev->family) == CHIP_RV630) ||
1748 ((rdev->family) == CHIP_RV610) ||
1749 ((rdev->family) == CHIP_RV620) ||
Alex Deucheree59f2b2009-11-05 13:11:46 -05001750 ((rdev->family) == CHIP_RS780) ||
1751 ((rdev->family) == CHIP_RS880)) {
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001752 WREG32(DB_DEBUG, PREZ_MUST_WAIT_FOR_POSTZ_DONE);
1753 } else {
1754 WREG32(DB_DEBUG, 0);
1755 }
1756 WREG32(DB_WATERMARKS, (DEPTH_FREE(4) | DEPTH_CACHELINE_FREE(16) |
1757 DEPTH_FLUSH(16) | DEPTH_PENDING_FREE(4)));
1758
1759 WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
1760 WREG32(VGT_NUM_INSTANCES, 0);
1761
1762 WREG32(SPI_CONFIG_CNTL, GPR_WRITE_PRIORITY(0));
1763 WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(0));
1764
1765 tmp = RREG32(SQ_MS_FIFO_SIZES);
1766 if (((rdev->family) == CHIP_RV610) ||
1767 ((rdev->family) == CHIP_RV620) ||
Alex Deucheree59f2b2009-11-05 13:11:46 -05001768 ((rdev->family) == CHIP_RS780) ||
1769 ((rdev->family) == CHIP_RS880)) {
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001770 tmp = (CACHE_FIFO_SIZE(0xa) |
1771 FETCH_FIFO_HIWATER(0xa) |
1772 DONE_FIFO_HIWATER(0xe0) |
1773 ALU_UPDATE_FIFO_HIWATER(0x8));
1774 } else if (((rdev->family) == CHIP_R600) ||
1775 ((rdev->family) == CHIP_RV630)) {
1776 tmp &= ~DONE_FIFO_HIWATER(0xff);
1777 tmp |= DONE_FIFO_HIWATER(0x4);
1778 }
1779 WREG32(SQ_MS_FIFO_SIZES, tmp);
1780
1781 /* SQ_CONFIG, SQ_GPR_RESOURCE_MGMT, SQ_THREAD_RESOURCE_MGMT, SQ_STACK_RESOURCE_MGMT
1782 * should be adjusted as needed by the 2D/3D drivers. This just sets default values
1783 */
1784 sq_config = RREG32(SQ_CONFIG);
1785 sq_config &= ~(PS_PRIO(3) |
1786 VS_PRIO(3) |
1787 GS_PRIO(3) |
1788 ES_PRIO(3));
1789 sq_config |= (DX9_CONSTS |
1790 VC_ENABLE |
1791 PS_PRIO(0) |
1792 VS_PRIO(1) |
1793 GS_PRIO(2) |
1794 ES_PRIO(3));
1795
1796 if ((rdev->family) == CHIP_R600) {
1797 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(124) |
1798 NUM_VS_GPRS(124) |
1799 NUM_CLAUSE_TEMP_GPRS(4));
1800 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(0) |
1801 NUM_ES_GPRS(0));
1802 sq_thread_resource_mgmt = (NUM_PS_THREADS(136) |
1803 NUM_VS_THREADS(48) |
1804 NUM_GS_THREADS(4) |
1805 NUM_ES_THREADS(4));
1806 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(128) |
1807 NUM_VS_STACK_ENTRIES(128));
1808 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(0) |
1809 NUM_ES_STACK_ENTRIES(0));
1810 } else if (((rdev->family) == CHIP_RV610) ||
1811 ((rdev->family) == CHIP_RV620) ||
Alex Deucheree59f2b2009-11-05 13:11:46 -05001812 ((rdev->family) == CHIP_RS780) ||
1813 ((rdev->family) == CHIP_RS880)) {
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001814 /* no vertex cache */
1815 sq_config &= ~VC_ENABLE;
1816
1817 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
1818 NUM_VS_GPRS(44) |
1819 NUM_CLAUSE_TEMP_GPRS(2));
1820 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) |
1821 NUM_ES_GPRS(17));
1822 sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
1823 NUM_VS_THREADS(78) |
1824 NUM_GS_THREADS(4) |
1825 NUM_ES_THREADS(31));
1826 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) |
1827 NUM_VS_STACK_ENTRIES(40));
1828 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) |
1829 NUM_ES_STACK_ENTRIES(16));
1830 } else if (((rdev->family) == CHIP_RV630) ||
1831 ((rdev->family) == CHIP_RV635)) {
1832 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
1833 NUM_VS_GPRS(44) |
1834 NUM_CLAUSE_TEMP_GPRS(2));
1835 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(18) |
1836 NUM_ES_GPRS(18));
1837 sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
1838 NUM_VS_THREADS(78) |
1839 NUM_GS_THREADS(4) |
1840 NUM_ES_THREADS(31));
1841 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) |
1842 NUM_VS_STACK_ENTRIES(40));
1843 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) |
1844 NUM_ES_STACK_ENTRIES(16));
1845 } else if ((rdev->family) == CHIP_RV670) {
1846 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
1847 NUM_VS_GPRS(44) |
1848 NUM_CLAUSE_TEMP_GPRS(2));
1849 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) |
1850 NUM_ES_GPRS(17));
1851 sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
1852 NUM_VS_THREADS(78) |
1853 NUM_GS_THREADS(4) |
1854 NUM_ES_THREADS(31));
1855 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(64) |
1856 NUM_VS_STACK_ENTRIES(64));
1857 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(64) |
1858 NUM_ES_STACK_ENTRIES(64));
1859 }
1860
1861 WREG32(SQ_CONFIG, sq_config);
1862 WREG32(SQ_GPR_RESOURCE_MGMT_1, sq_gpr_resource_mgmt_1);
1863 WREG32(SQ_GPR_RESOURCE_MGMT_2, sq_gpr_resource_mgmt_2);
1864 WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt);
1865 WREG32(SQ_STACK_RESOURCE_MGMT_1, sq_stack_resource_mgmt_1);
1866 WREG32(SQ_STACK_RESOURCE_MGMT_2, sq_stack_resource_mgmt_2);
1867
1868 if (((rdev->family) == CHIP_RV610) ||
1869 ((rdev->family) == CHIP_RV620) ||
Alex Deucheree59f2b2009-11-05 13:11:46 -05001870 ((rdev->family) == CHIP_RS780) ||
1871 ((rdev->family) == CHIP_RS880)) {
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001872 WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(TC_ONLY));
1873 } else {
1874 WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC));
1875 }
1876
1877 /* More default values. 2D/3D driver should adjust as needed */
1878 WREG32(PA_SC_AA_SAMPLE_LOCS_2S, (S0_X(0xc) | S0_Y(0x4) |
1879 S1_X(0x4) | S1_Y(0xc)));
1880 WREG32(PA_SC_AA_SAMPLE_LOCS_4S, (S0_X(0xe) | S0_Y(0xe) |
1881 S1_X(0x2) | S1_Y(0x2) |
1882 S2_X(0xa) | S2_Y(0x6) |
1883 S3_X(0x6) | S3_Y(0xa)));
1884 WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD0, (S0_X(0xe) | S0_Y(0xb) |
1885 S1_X(0x4) | S1_Y(0xc) |
1886 S2_X(0x1) | S2_Y(0x6) |
1887 S3_X(0xa) | S3_Y(0xe)));
1888 WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD1, (S4_X(0x6) | S4_Y(0x1) |
1889 S5_X(0x0) | S5_Y(0x0) |
1890 S6_X(0xb) | S6_Y(0x4) |
1891 S7_X(0x7) | S7_Y(0x8)));
1892
1893 WREG32(VGT_STRMOUT_EN, 0);
1894 tmp = rdev->config.r600.max_pipes * 16;
1895 switch (rdev->family) {
1896 case CHIP_RV610:
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001897 case CHIP_RV620:
Alex Deucheree59f2b2009-11-05 13:11:46 -05001898 case CHIP_RS780:
1899 case CHIP_RS880:
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001900 tmp += 32;
1901 break;
1902 case CHIP_RV670:
1903 tmp += 128;
1904 break;
1905 default:
1906 break;
1907 }
1908 if (tmp > 256) {
1909 tmp = 256;
1910 }
1911 WREG32(VGT_ES_PER_GS, 128);
1912 WREG32(VGT_GS_PER_ES, tmp);
1913 WREG32(VGT_GS_PER_VS, 2);
1914 WREG32(VGT_GS_VERTEX_REUSE, 16);
1915
1916 /* more default values. 2D/3D driver should adjust as needed */
1917 WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
1918 WREG32(VGT_STRMOUT_EN, 0);
1919 WREG32(SX_MISC, 0);
1920 WREG32(PA_SC_MODE_CNTL, 0);
1921 WREG32(PA_SC_AA_CONFIG, 0);
1922 WREG32(PA_SC_LINE_STIPPLE, 0);
1923 WREG32(SPI_INPUT_Z, 0);
1924 WREG32(SPI_PS_IN_CONTROL_0, NUM_INTERP(2));
1925 WREG32(CB_COLOR7_FRAG, 0);
1926
1927 /* Clear render buffer base addresses */
1928 WREG32(CB_COLOR0_BASE, 0);
1929 WREG32(CB_COLOR1_BASE, 0);
1930 WREG32(CB_COLOR2_BASE, 0);
1931 WREG32(CB_COLOR3_BASE, 0);
1932 WREG32(CB_COLOR4_BASE, 0);
1933 WREG32(CB_COLOR5_BASE, 0);
1934 WREG32(CB_COLOR6_BASE, 0);
1935 WREG32(CB_COLOR7_BASE, 0);
1936 WREG32(CB_COLOR7_FRAG, 0);
1937
1938 switch (rdev->family) {
1939 case CHIP_RV610:
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001940 case CHIP_RV620:
Alex Deucheree59f2b2009-11-05 13:11:46 -05001941 case CHIP_RS780:
1942 case CHIP_RS880:
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001943 tmp = TC_L2_SIZE(8);
1944 break;
1945 case CHIP_RV630:
1946 case CHIP_RV635:
1947 tmp = TC_L2_SIZE(4);
1948 break;
1949 case CHIP_R600:
1950 tmp = TC_L2_SIZE(0) | L2_DISABLE_LATE_HIT;
1951 break;
1952 default:
1953 tmp = TC_L2_SIZE(0);
1954 break;
1955 }
1956 WREG32(TC_CNTL, tmp);
1957
1958 tmp = RREG32(HDP_HOST_PATH_CNTL);
1959 WREG32(HDP_HOST_PATH_CNTL, tmp);
1960
1961 tmp = RREG32(ARB_POP);
1962 tmp |= ENABLE_TC128;
1963 WREG32(ARB_POP, tmp);
1964
1965 WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
1966 WREG32(PA_CL_ENHANCE, (CLIP_VTX_REORDER_ENA |
1967 NUM_CLIP_SEQ(3)));
1968 WREG32(PA_SC_ENHANCE, FORCE_EOV_MAX_CLK_CNT(4095));
1969}
1970
1971
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001972/*
1973 * Indirect registers accessor
1974 */
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001975u32 r600_pciep_rreg(struct radeon_device *rdev, u32 reg)
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001976{
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001977 u32 r;
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001978
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001979 WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
1980 (void)RREG32(PCIE_PORT_INDEX);
1981 r = RREG32(PCIE_PORT_DATA);
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001982 return r;
1983}
1984
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001985void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001986{
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001987 WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
1988 (void)RREG32(PCIE_PORT_INDEX);
1989 WREG32(PCIE_PORT_DATA, (v));
1990 (void)RREG32(PCIE_PORT_DATA);
1991}
1992
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001993/*
1994 * CP & Ring
1995 */
1996void r600_cp_stop(struct radeon_device *rdev)
1997{
Dave Airlie53595332011-03-14 09:47:24 +10001998 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001999 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
Alex Deucher724c80e2010-08-27 18:25:25 -04002000 WREG32(SCRATCH_UMSK, 0);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002001}
2002
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002003int r600_init_microcode(struct radeon_device *rdev)
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002004{
2005 struct platform_device *pdev;
2006 const char *chip_name;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002007 const char *rlc_chip_name;
2008 size_t pfp_req_size, me_req_size, rlc_req_size;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002009 char fw_name[30];
2010 int err;
2011
2012 DRM_DEBUG("\n");
2013
2014 pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0);
2015 err = IS_ERR(pdev);
2016 if (err) {
2017 printk(KERN_ERR "radeon_cp: Failed to register firmware\n");
2018 return -EINVAL;
2019 }
2020
2021 switch (rdev->family) {
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002022 case CHIP_R600:
2023 chip_name = "R600";
2024 rlc_chip_name = "R600";
2025 break;
2026 case CHIP_RV610:
2027 chip_name = "RV610";
2028 rlc_chip_name = "R600";
2029 break;
2030 case CHIP_RV630:
2031 chip_name = "RV630";
2032 rlc_chip_name = "R600";
2033 break;
2034 case CHIP_RV620:
2035 chip_name = "RV620";
2036 rlc_chip_name = "R600";
2037 break;
2038 case CHIP_RV635:
2039 chip_name = "RV635";
2040 rlc_chip_name = "R600";
2041 break;
2042 case CHIP_RV670:
2043 chip_name = "RV670";
2044 rlc_chip_name = "R600";
2045 break;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002046 case CHIP_RS780:
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002047 case CHIP_RS880:
2048 chip_name = "RS780";
2049 rlc_chip_name = "R600";
2050 break;
2051 case CHIP_RV770:
2052 chip_name = "RV770";
2053 rlc_chip_name = "R700";
2054 break;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002055 case CHIP_RV730:
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002056 case CHIP_RV740:
2057 chip_name = "RV730";
2058 rlc_chip_name = "R700";
2059 break;
2060 case CHIP_RV710:
2061 chip_name = "RV710";
2062 rlc_chip_name = "R700";
2063 break;
Alex Deucherfe251e22010-03-24 13:36:43 -04002064 case CHIP_CEDAR:
2065 chip_name = "CEDAR";
Alex Deucher45f9a392010-03-24 13:55:51 -04002066 rlc_chip_name = "CEDAR";
Alex Deucherfe251e22010-03-24 13:36:43 -04002067 break;
2068 case CHIP_REDWOOD:
2069 chip_name = "REDWOOD";
Alex Deucher45f9a392010-03-24 13:55:51 -04002070 rlc_chip_name = "REDWOOD";
Alex Deucherfe251e22010-03-24 13:36:43 -04002071 break;
2072 case CHIP_JUNIPER:
2073 chip_name = "JUNIPER";
Alex Deucher45f9a392010-03-24 13:55:51 -04002074 rlc_chip_name = "JUNIPER";
Alex Deucherfe251e22010-03-24 13:36:43 -04002075 break;
2076 case CHIP_CYPRESS:
2077 case CHIP_HEMLOCK:
2078 chip_name = "CYPRESS";
Alex Deucher45f9a392010-03-24 13:55:51 -04002079 rlc_chip_name = "CYPRESS";
Alex Deucherfe251e22010-03-24 13:36:43 -04002080 break;
Alex Deucher439bd6c2010-11-22 17:56:31 -05002081 case CHIP_PALM:
2082 chip_name = "PALM";
2083 rlc_chip_name = "SUMO";
2084 break;
Alex Deucherd5c5a722011-05-31 15:42:48 -04002085 case CHIP_SUMO:
2086 chip_name = "SUMO";
2087 rlc_chip_name = "SUMO";
2088 break;
2089 case CHIP_SUMO2:
2090 chip_name = "SUMO2";
2091 rlc_chip_name = "SUMO";
2092 break;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002093 default: BUG();
2094 }
2095
Alex Deucherfe251e22010-03-24 13:36:43 -04002096 if (rdev->family >= CHIP_CEDAR) {
2097 pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
2098 me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
Alex Deucher45f9a392010-03-24 13:55:51 -04002099 rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
Alex Deucherfe251e22010-03-24 13:36:43 -04002100 } else if (rdev->family >= CHIP_RV770) {
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002101 pfp_req_size = R700_PFP_UCODE_SIZE * 4;
2102 me_req_size = R700_PM4_UCODE_SIZE * 4;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002103 rlc_req_size = R700_RLC_UCODE_SIZE * 4;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002104 } else {
2105 pfp_req_size = PFP_UCODE_SIZE * 4;
2106 me_req_size = PM4_UCODE_SIZE * 12;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002107 rlc_req_size = RLC_UCODE_SIZE * 4;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002108 }
2109
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002110 DRM_INFO("Loading %s Microcode\n", chip_name);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002111
2112 snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
2113 err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev);
2114 if (err)
2115 goto out;
2116 if (rdev->pfp_fw->size != pfp_req_size) {
2117 printk(KERN_ERR
2118 "r600_cp: Bogus length %zu in firmware \"%s\"\n",
2119 rdev->pfp_fw->size, fw_name);
2120 err = -EINVAL;
2121 goto out;
2122 }
2123
2124 snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
2125 err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev);
2126 if (err)
2127 goto out;
2128 if (rdev->me_fw->size != me_req_size) {
2129 printk(KERN_ERR
2130 "r600_cp: Bogus length %zu in firmware \"%s\"\n",
2131 rdev->me_fw->size, fw_name);
2132 err = -EINVAL;
2133 }
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002134
2135 snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name);
2136 err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev);
2137 if (err)
2138 goto out;
2139 if (rdev->rlc_fw->size != rlc_req_size) {
2140 printk(KERN_ERR
2141 "r600_rlc: Bogus length %zu in firmware \"%s\"\n",
2142 rdev->rlc_fw->size, fw_name);
2143 err = -EINVAL;
2144 }
2145
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002146out:
2147 platform_device_unregister(pdev);
2148
2149 if (err) {
2150 if (err != -EINVAL)
2151 printk(KERN_ERR
2152 "r600_cp: Failed to load firmware \"%s\"\n",
2153 fw_name);
2154 release_firmware(rdev->pfp_fw);
2155 rdev->pfp_fw = NULL;
2156 release_firmware(rdev->me_fw);
2157 rdev->me_fw = NULL;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002158 release_firmware(rdev->rlc_fw);
2159 rdev->rlc_fw = NULL;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002160 }
2161 return err;
2162}
2163
2164static int r600_cp_load_microcode(struct radeon_device *rdev)
2165{
2166 const __be32 *fw_data;
2167 int i;
2168
2169 if (!rdev->me_fw || !rdev->pfp_fw)
2170 return -EINVAL;
2171
2172 r600_cp_stop(rdev);
2173
Cédric Cano4eace7f2011-02-11 19:45:38 -05002174 WREG32(CP_RB_CNTL,
2175#ifdef __BIG_ENDIAN
2176 BUF_SWAP_32BIT |
2177#endif
2178 RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002179
2180 /* Reset cp */
2181 WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
2182 RREG32(GRBM_SOFT_RESET);
2183 mdelay(15);
2184 WREG32(GRBM_SOFT_RESET, 0);
2185
2186 WREG32(CP_ME_RAM_WADDR, 0);
2187
2188 fw_data = (const __be32 *)rdev->me_fw->data;
2189 WREG32(CP_ME_RAM_WADDR, 0);
2190 for (i = 0; i < PM4_UCODE_SIZE * 3; i++)
2191 WREG32(CP_ME_RAM_DATA,
2192 be32_to_cpup(fw_data++));
2193
2194 fw_data = (const __be32 *)rdev->pfp_fw->data;
2195 WREG32(CP_PFP_UCODE_ADDR, 0);
2196 for (i = 0; i < PFP_UCODE_SIZE; i++)
2197 WREG32(CP_PFP_UCODE_DATA,
2198 be32_to_cpup(fw_data++));
2199
2200 WREG32(CP_PFP_UCODE_ADDR, 0);
2201 WREG32(CP_ME_RAM_WADDR, 0);
2202 WREG32(CP_ME_RAM_RADDR, 0);
2203 return 0;
2204}
2205
2206int r600_cp_start(struct radeon_device *rdev)
2207{
2208 int r;
2209 uint32_t cp_me;
2210
2211 r = radeon_ring_lock(rdev, 7);
2212 if (r) {
2213 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
2214 return r;
2215 }
2216 radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5));
2217 radeon_ring_write(rdev, 0x1);
Alex Deucher7e7b41d2010-09-02 21:32:32 -04002218 if (rdev->family >= CHIP_RV770) {
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002219 radeon_ring_write(rdev, 0x0);
2220 radeon_ring_write(rdev, rdev->config.rv770.max_hw_contexts - 1);
Alex Deucherfe251e22010-03-24 13:36:43 -04002221 } else {
2222 radeon_ring_write(rdev, 0x3);
2223 radeon_ring_write(rdev, rdev->config.r600.max_hw_contexts - 1);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002224 }
2225 radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
2226 radeon_ring_write(rdev, 0);
2227 radeon_ring_write(rdev, 0);
2228 radeon_ring_unlock_commit(rdev);
2229
2230 cp_me = 0xff;
2231 WREG32(R_0086D8_CP_ME_CNTL, cp_me);
2232 return 0;
2233}
2234
2235int r600_cp_resume(struct radeon_device *rdev)
2236{
2237 u32 tmp;
2238 u32 rb_bufsz;
2239 int r;
2240
2241 /* Reset cp */
2242 WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
2243 RREG32(GRBM_SOFT_RESET);
2244 mdelay(15);
2245 WREG32(GRBM_SOFT_RESET, 0);
2246
2247 /* Set ring buffer size */
2248 rb_bufsz = drm_order(rdev->cp.ring_size / 8);
Alex Deucher724c80e2010-08-27 18:25:25 -04002249 tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002250#ifdef __BIG_ENDIAN
Alex Deucherd6f28932009-11-02 16:01:27 -05002251 tmp |= BUF_SWAP_32BIT;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002252#endif
Alex Deucherd6f28932009-11-02 16:01:27 -05002253 WREG32(CP_RB_CNTL, tmp);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002254 WREG32(CP_SEM_WAIT_TIMER, 0x4);
2255
2256 /* Set the write pointer delay */
2257 WREG32(CP_RB_WPTR_DELAY, 0);
2258
2259 /* Initialize the ring buffer's read and write pointers */
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002260 WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
2261 WREG32(CP_RB_RPTR_WR, 0);
Michel Dänzer87463ff2011-09-13 11:27:35 +02002262 rdev->cp.wptr = 0;
2263 WREG32(CP_RB_WPTR, rdev->cp.wptr);
Alex Deucher724c80e2010-08-27 18:25:25 -04002264
2265 /* set the wb address whether it's enabled or not */
Cédric Cano4eace7f2011-02-11 19:45:38 -05002266 WREG32(CP_RB_RPTR_ADDR,
Cédric Cano4eace7f2011-02-11 19:45:38 -05002267 ((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC));
Alex Deucher724c80e2010-08-27 18:25:25 -04002268 WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
2269 WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
2270
2271 if (rdev->wb.enabled)
2272 WREG32(SCRATCH_UMSK, 0xff);
2273 else {
2274 tmp |= RB_NO_UPDATE;
2275 WREG32(SCRATCH_UMSK, 0);
2276 }
2277
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002278 mdelay(1);
2279 WREG32(CP_RB_CNTL, tmp);
2280
2281 WREG32(CP_RB_BASE, rdev->cp.gpu_addr >> 8);
2282 WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
2283
2284 rdev->cp.rptr = RREG32(CP_RB_RPTR);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002285
2286 r600_cp_start(rdev);
2287 rdev->cp.ready = true;
2288 r = radeon_ring_test(rdev);
2289 if (r) {
2290 rdev->cp.ready = false;
2291 return r;
2292 }
2293 return 0;
2294}
2295
2296void r600_cp_commit(struct radeon_device *rdev)
2297{
2298 WREG32(CP_RB_WPTR, rdev->cp.wptr);
2299 (void)RREG32(CP_RB_WPTR);
2300}
2301
2302void r600_ring_init(struct radeon_device *rdev, unsigned ring_size)
2303{
2304 u32 rb_bufsz;
2305
2306 /* Align ring size */
2307 rb_bufsz = drm_order(ring_size / 8);
2308 ring_size = (1 << (rb_bufsz + 1)) * 4;
2309 rdev->cp.ring_size = ring_size;
2310 rdev->cp.align_mask = 16 - 1;
2311}
2312
Jerome Glisse655efd32010-02-02 11:51:45 +01002313void r600_cp_fini(struct radeon_device *rdev)
2314{
2315 r600_cp_stop(rdev);
2316 radeon_ring_fini(rdev);
2317}
2318
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002319
2320/*
2321 * GPU scratch registers helpers function.
2322 */
2323void r600_scratch_init(struct radeon_device *rdev)
2324{
2325 int i;
2326
2327 rdev->scratch.num_reg = 7;
Alex Deucher724c80e2010-08-27 18:25:25 -04002328 rdev->scratch.reg_base = SCRATCH_REG0;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002329 for (i = 0; i < rdev->scratch.num_reg; i++) {
2330 rdev->scratch.free[i] = true;
Alex Deucher724c80e2010-08-27 18:25:25 -04002331 rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002332 }
2333}
2334
2335int r600_ring_test(struct radeon_device *rdev)
2336{
2337 uint32_t scratch;
2338 uint32_t tmp = 0;
2339 unsigned i;
2340 int r;
2341
2342 r = radeon_scratch_get(rdev, &scratch);
2343 if (r) {
2344 DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r);
2345 return r;
2346 }
2347 WREG32(scratch, 0xCAFEDEAD);
2348 r = radeon_ring_lock(rdev, 3);
2349 if (r) {
2350 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
2351 radeon_scratch_free(rdev, scratch);
2352 return r;
2353 }
2354 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2355 radeon_ring_write(rdev, ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
2356 radeon_ring_write(rdev, 0xDEADBEEF);
2357 radeon_ring_unlock_commit(rdev);
2358 for (i = 0; i < rdev->usec_timeout; i++) {
2359 tmp = RREG32(scratch);
2360 if (tmp == 0xDEADBEEF)
2361 break;
2362 DRM_UDELAY(1);
2363 }
2364 if (i < rdev->usec_timeout) {
2365 DRM_INFO("ring test succeeded in %d usecs\n", i);
2366 } else {
2367 DRM_ERROR("radeon: ring test failed (scratch(0x%04X)=0x%08X)\n",
2368 scratch, tmp);
2369 r = -EINVAL;
2370 }
2371 radeon_scratch_free(rdev, scratch);
2372 return r;
2373}
2374
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002375void r600_fence_ring_emit(struct radeon_device *rdev,
2376 struct radeon_fence *fence)
2377{
Alex Deucherd0f8a852010-09-04 05:04:34 -04002378 if (rdev->wb.use_event) {
2379 u64 addr = rdev->wb.gpu_addr + R600_WB_EVENT_OFFSET +
2380 (u64)(rdev->fence_drv.scratch_reg - rdev->scratch.reg_base);
Jerome Glisse77b1bad2011-10-26 11:41:22 -04002381 /* flush read cache over gart */
2382 radeon_ring_write(rdev, PACKET3(PACKET3_SURFACE_SYNC, 3));
2383 radeon_ring_write(rdev, PACKET3_TC_ACTION_ENA |
2384 PACKET3_VC_ACTION_ENA |
2385 PACKET3_SH_ACTION_ENA);
2386 radeon_ring_write(rdev, 0xFFFFFFFF);
2387 radeon_ring_write(rdev, 0);
2388 radeon_ring_write(rdev, 10); /* poll interval */
Alex Deucherd0f8a852010-09-04 05:04:34 -04002389 /* EVENT_WRITE_EOP - flush caches, send int */
2390 radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
2391 radeon_ring_write(rdev, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5));
2392 radeon_ring_write(rdev, addr & 0xffffffff);
2393 radeon_ring_write(rdev, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
2394 radeon_ring_write(rdev, fence->seq);
2395 radeon_ring_write(rdev, 0);
2396 } else {
Jerome Glisse77b1bad2011-10-26 11:41:22 -04002397 /* flush read cache over gart */
2398 radeon_ring_write(rdev, PACKET3(PACKET3_SURFACE_SYNC, 3));
2399 radeon_ring_write(rdev, PACKET3_TC_ACTION_ENA |
2400 PACKET3_VC_ACTION_ENA |
2401 PACKET3_SH_ACTION_ENA);
2402 radeon_ring_write(rdev, 0xFFFFFFFF);
2403 radeon_ring_write(rdev, 0);
2404 radeon_ring_write(rdev, 10); /* poll interval */
Alex Deucherd0f8a852010-09-04 05:04:34 -04002405 radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE, 0));
2406 radeon_ring_write(rdev, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT) | EVENT_INDEX(0));
2407 /* wait for 3D idle clean */
2408 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2409 radeon_ring_write(rdev, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
2410 radeon_ring_write(rdev, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit);
2411 /* Emit fence sequence & fire IRQ */
2412 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2413 radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
2414 radeon_ring_write(rdev, fence->seq);
2415 /* CP_INTERRUPT packet 3 no longer exists, use packet 0 */
2416 radeon_ring_write(rdev, PACKET0(CP_INT_STATUS, 0));
2417 radeon_ring_write(rdev, RB_INT_STAT);
2418 }
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002419}
2420
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002421int r600_copy_blit(struct radeon_device *rdev,
Alex Deucher003cefe2011-09-16 12:04:08 -04002422 uint64_t src_offset,
2423 uint64_t dst_offset,
2424 unsigned num_gpu_pages,
2425 struct radeon_fence *fence)
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002426{
Jerome Glisseff82f052010-01-22 15:19:00 +01002427 int r;
2428
2429 mutex_lock(&rdev->r600_blit.mutex);
2430 rdev->r600_blit.vb_ib = NULL;
Dave Airlie017ed802011-10-18 10:54:30 +01002431 r = r600_blit_prepare_copy(rdev, num_gpu_pages);
Jerome Glisseff82f052010-01-22 15:19:00 +01002432 if (r) {
2433 if (rdev->r600_blit.vb_ib)
2434 radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
2435 mutex_unlock(&rdev->r600_blit.mutex);
2436 return r;
2437 }
Dave Airlie017ed802011-10-18 10:54:30 +01002438 r600_kms_blit_copy(rdev, src_offset, dst_offset, num_gpu_pages);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002439 r600_blit_done_copy(rdev, fence);
Jerome Glisseff82f052010-01-22 15:19:00 +01002440 mutex_unlock(&rdev->r600_blit.mutex);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002441 return 0;
2442}
2443
Alex Deucher6ddddfe2011-10-14 10:51:22 -04002444void r600_blit_suspend(struct radeon_device *rdev)
2445{
2446 int r;
2447
2448 /* unpin shaders bo */
2449 if (rdev->r600_blit.shader_obj) {
2450 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
2451 if (!r) {
2452 radeon_bo_unpin(rdev->r600_blit.shader_obj);
2453 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
2454 }
2455 }
2456}
2457
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002458int r600_set_surface_reg(struct radeon_device *rdev, int reg,
2459 uint32_t tiling_flags, uint32_t pitch,
2460 uint32_t offset, uint32_t obj_size)
2461{
2462 /* FIXME: implement */
2463 return 0;
2464}
2465
2466void r600_clear_surface_reg(struct radeon_device *rdev, int reg)
2467{
2468 /* FIXME: implement */
2469}
2470
Dave Airliefc30b8e2009-09-18 15:19:37 +10002471int r600_startup(struct radeon_device *rdev)
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002472{
2473 int r;
2474
Alex Deucher9e46a482011-01-06 18:49:35 -05002475 /* enable pcie gen2 link */
2476 r600_pcie_gen2_enable(rdev);
2477
Alex Deucher779720a2009-12-09 19:31:44 -05002478 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
2479 r = r600_init_microcode(rdev);
2480 if (r) {
2481 DRM_ERROR("Failed to load firmware!\n");
2482 return r;
2483 }
2484 }
2485
Alex Deucher16cdf042011-10-28 10:30:02 -04002486 r = r600_vram_scratch_init(rdev);
2487 if (r)
2488 return r;
2489
Jerome Glissea3c19452009-10-01 18:02:13 +02002490 r600_mc_program(rdev);
Jerome Glisse1a029b72009-10-06 19:04:30 +02002491 if (rdev->flags & RADEON_IS_AGP) {
2492 r600_agp_enable(rdev);
2493 } else {
2494 r = r600_pcie_gart_enable(rdev);
2495 if (r)
2496 return r;
2497 }
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002498 r600_gpu_init(rdev);
Jerome Glissec38c7b62010-02-04 17:27:27 +01002499 r = r600_blit_init(rdev);
2500 if (r) {
2501 r600_blit_fini(rdev);
2502 rdev->asic->copy = NULL;
2503 dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
2504 }
Alex Deucherb70d6bb2010-08-06 21:36:58 -04002505
Alex Deucher724c80e2010-08-27 18:25:25 -04002506 /* allocate wb buffer */
2507 r = radeon_wb_init(rdev);
2508 if (r)
2509 return r;
2510
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002511 /* Enable IRQ */
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002512 r = r600_irq_init(rdev);
2513 if (r) {
2514 DRM_ERROR("radeon: IH init failed (%d).\n", r);
2515 radeon_irq_kms_fini(rdev);
2516 return r;
2517 }
2518 r600_irq_set(rdev);
2519
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002520 r = radeon_ring_init(rdev, rdev->cp.ring_size);
2521 if (r)
2522 return r;
2523 r = r600_cp_load_microcode(rdev);
2524 if (r)
2525 return r;
2526 r = r600_cp_resume(rdev);
2527 if (r)
2528 return r;
Alex Deucher724c80e2010-08-27 18:25:25 -04002529
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002530 return 0;
2531}
2532
Dave Airlie28d52042009-09-21 14:33:58 +10002533void r600_vga_set_state(struct radeon_device *rdev, bool state)
2534{
2535 uint32_t temp;
2536
2537 temp = RREG32(CONFIG_CNTL);
2538 if (state == false) {
2539 temp &= ~(1<<0);
2540 temp |= (1<<1);
2541 } else {
2542 temp &= ~(1<<1);
2543 }
2544 WREG32(CONFIG_CNTL, temp);
2545}
2546
Dave Airliefc30b8e2009-09-18 15:19:37 +10002547int r600_resume(struct radeon_device *rdev)
2548{
2549 int r;
2550
Jerome Glisse1a029b72009-10-06 19:04:30 +02002551 /* Do not reset GPU before posting, on r600 hw unlike on r500 hw,
2552 * posting will perform necessary task to bring back GPU into good
2553 * shape.
2554 */
Dave Airliefc30b8e2009-09-18 15:19:37 +10002555 /* post card */
Jerome Glissee7d40b92009-10-01 18:02:15 +02002556 atom_asic_init(rdev->mode_info.atom_context);
Dave Airliefc30b8e2009-09-18 15:19:37 +10002557
2558 r = r600_startup(rdev);
2559 if (r) {
2560 DRM_ERROR("r600 startup failed on resume\n");
2561 return r;
2562 }
2563
Jerome Glisse62a8ea32009-10-01 18:02:11 +02002564 r = r600_ib_test(rdev);
Dave Airliefc30b8e2009-09-18 15:19:37 +10002565 if (r) {
Paul Bolleec4f2ac2011-01-28 23:32:04 +01002566 DRM_ERROR("radeon: failed testing IB (%d).\n", r);
Dave Airliefc30b8e2009-09-18 15:19:37 +10002567 return r;
2568 }
Rafał Miłecki38fd2c62010-01-28 18:16:30 +01002569
2570 r = r600_audio_init(rdev);
2571 if (r) {
2572 DRM_ERROR("radeon: audio resume failed\n");
2573 return r;
2574 }
2575
Dave Airliefc30b8e2009-09-18 15:19:37 +10002576 return r;
2577}
2578
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002579int r600_suspend(struct radeon_device *rdev)
2580{
Rafał Miłecki38fd2c62010-01-28 18:16:30 +01002581 r600_audio_fini(rdev);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002582 /* FIXME: we should wait for ring to be empty */
2583 r600_cp_stop(rdev);
Dave Airliebc1a6312009-09-15 11:07:52 +10002584 rdev->cp.ready = false;
Jerome Glisse0c452492010-01-15 14:44:37 +01002585 r600_irq_suspend(rdev);
Alex Deucher724c80e2010-08-27 18:25:25 -04002586 radeon_wb_disable(rdev);
Jerome Glisse4aac0472009-09-14 18:29:49 +02002587 r600_pcie_gart_disable(rdev);
Alex Deucher6ddddfe2011-10-14 10:51:22 -04002588 r600_blit_suspend(rdev);
2589
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002590 return 0;
2591}
2592
2593/* Plan is to move initialization in that function and use
2594 * helper function so that radeon_device_init pretty much
2595 * do nothing more than calling asic specific function. This
2596 * should also allow to remove a bunch of callback function
2597 * like vram_info.
2598 */
2599int r600_init(struct radeon_device *rdev)
2600{
2601 int r;
2602
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002603 if (r600_debugfs_mc_info_init(rdev)) {
2604 DRM_ERROR("Failed to register debugfs file for mc !\n");
2605 }
2606 /* This don't do much */
2607 r = radeon_gem_init(rdev);
2608 if (r)
2609 return r;
2610 /* Read BIOS */
2611 if (!radeon_get_bios(rdev)) {
2612 if (ASIC_IS_AVIVO(rdev))
2613 return -EINVAL;
2614 }
2615 /* Must be an ATOMBIOS */
Jerome Glissee7d40b92009-10-01 18:02:15 +02002616 if (!rdev->is_atom_bios) {
2617 dev_err(rdev->dev, "Expecting atombios for R600 GPU\n");
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002618 return -EINVAL;
Jerome Glissee7d40b92009-10-01 18:02:15 +02002619 }
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002620 r = radeon_atombios_init(rdev);
2621 if (r)
2622 return r;
2623 /* Post card if necessary */
Alex Deucherfd909c32011-01-11 18:08:59 -05002624 if (!radeon_card_posted(rdev)) {
Dave Airlie72542d72009-12-01 14:06:31 +10002625 if (!rdev->bios) {
2626 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
2627 return -EINVAL;
2628 }
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002629 DRM_INFO("GPU not posted. posting now...\n");
2630 atom_asic_init(rdev->mode_info.atom_context);
2631 }
2632 /* Initialize scratch registers */
2633 r600_scratch_init(rdev);
2634 /* Initialize surface registers */
2635 radeon_surface_init(rdev);
Rafał Miłecki74338742009-11-03 00:53:02 +01002636 /* Initialize clocks */
Michel Dänzer5e6dde72009-09-17 09:42:28 +02002637 radeon_get_clock_info(rdev->ddev);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002638 /* Fence driver */
2639 r = radeon_fence_driver_init(rdev);
2640 if (r)
2641 return r;
Jerome Glisse700a0cc2010-01-13 15:16:38 +01002642 if (rdev->flags & RADEON_IS_AGP) {
2643 r = radeon_agp_init(rdev);
2644 if (r)
2645 radeon_agp_disable(rdev);
2646 }
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002647 r = r600_mc_init(rdev);
Jerome Glisseb574f252009-10-06 19:04:29 +02002648 if (r)
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002649 return r;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002650 /* Memory manager */
Jerome Glisse4c788672009-11-20 14:29:23 +01002651 r = radeon_bo_init(rdev);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002652 if (r)
2653 return r;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002654
2655 r = radeon_irq_kms_init(rdev);
2656 if (r)
2657 return r;
2658
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002659 rdev->cp.ring_obj = NULL;
2660 r600_ring_init(rdev, 1024 * 1024);
2661
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002662 rdev->ih.ring_obj = NULL;
2663 r600_ih_ring_init(rdev, 64 * 1024);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002664
Jerome Glisse4aac0472009-09-14 18:29:49 +02002665 r = r600_pcie_gart_init(rdev);
2666 if (r)
2667 return r;
2668
Alex Deucher779720a2009-12-09 19:31:44 -05002669 rdev->accel_working = true;
Dave Airliefc30b8e2009-09-18 15:19:37 +10002670 r = r600_startup(rdev);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002671 if (r) {
Jerome Glisse655efd32010-02-02 11:51:45 +01002672 dev_err(rdev->dev, "disabling GPU acceleration\n");
2673 r600_cp_fini(rdev);
Jerome Glisse655efd32010-02-02 11:51:45 +01002674 r600_irq_fini(rdev);
Alex Deucher724c80e2010-08-27 18:25:25 -04002675 radeon_wb_fini(rdev);
Jerome Glisse655efd32010-02-02 11:51:45 +01002676 radeon_irq_kms_fini(rdev);
Jerome Glisse75c81292009-10-01 18:02:14 +02002677 r600_pcie_gart_fini(rdev);
Jerome Glisse733289c2009-09-16 15:24:21 +02002678 rdev->accel_working = false;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002679 }
Jerome Glisse733289c2009-09-16 15:24:21 +02002680 if (rdev->accel_working) {
2681 r = radeon_ib_pool_init(rdev);
2682 if (r) {
Jerome Glissedb963802010-01-17 21:21:56 +01002683 dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
Jerome Glisse733289c2009-09-16 15:24:21 +02002684 rdev->accel_working = false;
Jerome Glissedb963802010-01-17 21:21:56 +01002685 } else {
2686 r = r600_ib_test(rdev);
2687 if (r) {
2688 dev_err(rdev->dev, "IB test failed (%d).\n", r);
2689 rdev->accel_working = false;
2690 }
Jerome Glisse733289c2009-09-16 15:24:21 +02002691 }
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002692 }
Christian Koenigdafc3bd2009-10-11 23:49:13 +02002693
2694 r = r600_audio_init(rdev);
2695 if (r)
2696 return r; /* TODO error handling */
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002697 return 0;
2698}
2699
2700void r600_fini(struct radeon_device *rdev)
2701{
Christian Koenigdafc3bd2009-10-11 23:49:13 +02002702 r600_audio_fini(rdev);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002703 r600_blit_fini(rdev);
Jerome Glisse655efd32010-02-02 11:51:45 +01002704 r600_cp_fini(rdev);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002705 r600_irq_fini(rdev);
Alex Deucher724c80e2010-08-27 18:25:25 -04002706 radeon_wb_fini(rdev);
Jerome Glisseccd68952011-07-06 18:30:09 +00002707 radeon_ib_pool_fini(rdev);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002708 radeon_irq_kms_fini(rdev);
Jerome Glisse4aac0472009-09-14 18:29:49 +02002709 r600_pcie_gart_fini(rdev);
Alex Deucher16cdf042011-10-28 10:30:02 -04002710 r600_vram_scratch_fini(rdev);
Jerome Glisse655efd32010-02-02 11:51:45 +01002711 radeon_agp_fini(rdev);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002712 radeon_gem_fini(rdev);
2713 radeon_fence_driver_fini(rdev);
Jerome Glisse4c788672009-11-20 14:29:23 +01002714 radeon_bo_fini(rdev);
Jerome Glissee7d40b92009-10-01 18:02:15 +02002715 radeon_atombios_fini(rdev);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002716 kfree(rdev->bios);
2717 rdev->bios = NULL;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002718}
2719
2720
2721/*
2722 * CS stuff
2723 */
2724void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
2725{
2726 /* FIXME: implement */
2727 radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
Cédric Cano4eace7f2011-02-11 19:45:38 -05002728 radeon_ring_write(rdev,
2729#ifdef __BIG_ENDIAN
2730 (2 << 0) |
2731#endif
2732 (ib->gpu_addr & 0xFFFFFFFC));
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002733 radeon_ring_write(rdev, upper_32_bits(ib->gpu_addr) & 0xFF);
2734 radeon_ring_write(rdev, ib->length_dw);
2735}
2736
2737int r600_ib_test(struct radeon_device *rdev)
2738{
2739 struct radeon_ib *ib;
2740 uint32_t scratch;
2741 uint32_t tmp = 0;
2742 unsigned i;
2743 int r;
2744
2745 r = radeon_scratch_get(rdev, &scratch);
2746 if (r) {
2747 DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r);
2748 return r;
2749 }
2750 WREG32(scratch, 0xCAFEDEAD);
2751 r = radeon_ib_get(rdev, &ib);
2752 if (r) {
2753 DRM_ERROR("radeon: failed to get ib (%d).\n", r);
2754 return r;
2755 }
2756 ib->ptr[0] = PACKET3(PACKET3_SET_CONFIG_REG, 1);
2757 ib->ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
2758 ib->ptr[2] = 0xDEADBEEF;
2759 ib->ptr[3] = PACKET2(0);
2760 ib->ptr[4] = PACKET2(0);
2761 ib->ptr[5] = PACKET2(0);
2762 ib->ptr[6] = PACKET2(0);
2763 ib->ptr[7] = PACKET2(0);
2764 ib->ptr[8] = PACKET2(0);
2765 ib->ptr[9] = PACKET2(0);
2766 ib->ptr[10] = PACKET2(0);
2767 ib->ptr[11] = PACKET2(0);
2768 ib->ptr[12] = PACKET2(0);
2769 ib->ptr[13] = PACKET2(0);
2770 ib->ptr[14] = PACKET2(0);
2771 ib->ptr[15] = PACKET2(0);
2772 ib->length_dw = 16;
2773 r = radeon_ib_schedule(rdev, ib);
2774 if (r) {
2775 radeon_scratch_free(rdev, scratch);
2776 radeon_ib_free(rdev, &ib);
2777 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
2778 return r;
2779 }
2780 r = radeon_fence_wait(ib->fence, false);
2781 if (r) {
2782 DRM_ERROR("radeon: fence wait failed (%d).\n", r);
2783 return r;
2784 }
2785 for (i = 0; i < rdev->usec_timeout; i++) {
2786 tmp = RREG32(scratch);
2787 if (tmp == 0xDEADBEEF)
2788 break;
2789 DRM_UDELAY(1);
2790 }
2791 if (i < rdev->usec_timeout) {
2792 DRM_INFO("ib test succeeded in %u usecs\n", i);
2793 } else {
Daniel J Blueman4417d7f2010-09-22 17:57:19 +01002794 DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n",
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002795 scratch, tmp);
2796 r = -EINVAL;
2797 }
2798 radeon_scratch_free(rdev, scratch);
2799 radeon_ib_free(rdev, &ib);
2800 return r;
2801}
2802
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002803/*
2804 * Interrupts
2805 *
2806 * Interrupts use a ring buffer on r6xx/r7xx hardware. It works pretty
2807 * the same as the CP ring buffer, but in reverse. Rather than the CPU
2808 * writing to the ring and the GPU consuming, the GPU writes to the ring
2809 * and host consumes. As the host irq handler processes interrupts, it
2810 * increments the rptr. When the rptr catches up with the wptr, all the
2811 * current interrupts have been processed.
2812 */
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002813
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002814void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size)
2815{
2816 u32 rb_bufsz;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002817
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002818 /* Align ring size */
2819 rb_bufsz = drm_order(ring_size / 4);
2820 ring_size = (1 << rb_bufsz) * 4;
2821 rdev->ih.ring_size = ring_size;
Jerome Glisse0c452492010-01-15 14:44:37 +01002822 rdev->ih.ptr_mask = rdev->ih.ring_size - 1;
2823 rdev->ih.rptr = 0;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002824}
2825
Jerome Glisse0c452492010-01-15 14:44:37 +01002826static int r600_ih_ring_alloc(struct radeon_device *rdev)
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002827{
2828 int r;
2829
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002830 /* Allocate ring buffer */
2831 if (rdev->ih.ring_obj == NULL) {
Daniel Vetter441921d2011-02-18 17:59:16 +01002832 r = radeon_bo_create(rdev, rdev->ih.ring_size,
Alex Deucher268b2512010-11-17 19:00:26 -05002833 PAGE_SIZE, true,
Jerome Glisse4c788672009-11-20 14:29:23 +01002834 RADEON_GEM_DOMAIN_GTT,
2835 &rdev->ih.ring_obj);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002836 if (r) {
2837 DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r);
2838 return r;
2839 }
Jerome Glisse4c788672009-11-20 14:29:23 +01002840 r = radeon_bo_reserve(rdev->ih.ring_obj, false);
2841 if (unlikely(r != 0))
2842 return r;
2843 r = radeon_bo_pin(rdev->ih.ring_obj,
2844 RADEON_GEM_DOMAIN_GTT,
2845 &rdev->ih.gpu_addr);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002846 if (r) {
Jerome Glisse4c788672009-11-20 14:29:23 +01002847 radeon_bo_unreserve(rdev->ih.ring_obj);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002848 DRM_ERROR("radeon: failed to pin ih ring buffer (%d).\n", r);
2849 return r;
2850 }
Jerome Glisse4c788672009-11-20 14:29:23 +01002851 r = radeon_bo_kmap(rdev->ih.ring_obj,
2852 (void **)&rdev->ih.ring);
2853 radeon_bo_unreserve(rdev->ih.ring_obj);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002854 if (r) {
2855 DRM_ERROR("radeon: failed to map ih ring buffer (%d).\n", r);
2856 return r;
2857 }
2858 }
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002859 return 0;
2860}
2861
2862static void r600_ih_ring_fini(struct radeon_device *rdev)
2863{
Jerome Glisse4c788672009-11-20 14:29:23 +01002864 int r;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002865 if (rdev->ih.ring_obj) {
Jerome Glisse4c788672009-11-20 14:29:23 +01002866 r = radeon_bo_reserve(rdev->ih.ring_obj, false);
2867 if (likely(r == 0)) {
2868 radeon_bo_kunmap(rdev->ih.ring_obj);
2869 radeon_bo_unpin(rdev->ih.ring_obj);
2870 radeon_bo_unreserve(rdev->ih.ring_obj);
2871 }
2872 radeon_bo_unref(&rdev->ih.ring_obj);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002873 rdev->ih.ring = NULL;
2874 rdev->ih.ring_obj = NULL;
2875 }
2876}
2877
Alex Deucher45f9a392010-03-24 13:55:51 -04002878void r600_rlc_stop(struct radeon_device *rdev)
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002879{
2880
Alex Deucher45f9a392010-03-24 13:55:51 -04002881 if ((rdev->family >= CHIP_RV770) &&
2882 (rdev->family <= CHIP_RV740)) {
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002883 /* r7xx asics need to soft reset RLC before halting */
2884 WREG32(SRBM_SOFT_RESET, SOFT_RESET_RLC);
2885 RREG32(SRBM_SOFT_RESET);
2886 udelay(15000);
2887 WREG32(SRBM_SOFT_RESET, 0);
2888 RREG32(SRBM_SOFT_RESET);
2889 }
2890
2891 WREG32(RLC_CNTL, 0);
2892}
2893
2894static void r600_rlc_start(struct radeon_device *rdev)
2895{
2896 WREG32(RLC_CNTL, RLC_ENABLE);
2897}
2898
2899static int r600_rlc_init(struct radeon_device *rdev)
2900{
2901 u32 i;
2902 const __be32 *fw_data;
2903
2904 if (!rdev->rlc_fw)
2905 return -EINVAL;
2906
2907 r600_rlc_stop(rdev);
2908
2909 WREG32(RLC_HB_BASE, 0);
2910 WREG32(RLC_HB_CNTL, 0);
2911 WREG32(RLC_HB_RPTR, 0);
2912 WREG32(RLC_HB_WPTR, 0);
Alex Deucher12727802011-03-02 20:07:32 -05002913 if (rdev->family <= CHIP_CAICOS) {
2914 WREG32(RLC_HB_WPTR_LSB_ADDR, 0);
2915 WREG32(RLC_HB_WPTR_MSB_ADDR, 0);
2916 }
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002917 WREG32(RLC_MC_CNTL, 0);
2918 WREG32(RLC_UCODE_CNTL, 0);
2919
2920 fw_data = (const __be32 *)rdev->rlc_fw->data;
Alex Deucher12727802011-03-02 20:07:32 -05002921 if (rdev->family >= CHIP_CAYMAN) {
2922 for (i = 0; i < CAYMAN_RLC_UCODE_SIZE; i++) {
2923 WREG32(RLC_UCODE_ADDR, i);
2924 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
2925 }
2926 } else if (rdev->family >= CHIP_CEDAR) {
Alex Deucher45f9a392010-03-24 13:55:51 -04002927 for (i = 0; i < EVERGREEN_RLC_UCODE_SIZE; i++) {
2928 WREG32(RLC_UCODE_ADDR, i);
2929 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
2930 }
2931 } else if (rdev->family >= CHIP_RV770) {
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002932 for (i = 0; i < R700_RLC_UCODE_SIZE; i++) {
2933 WREG32(RLC_UCODE_ADDR, i);
2934 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
2935 }
2936 } else {
2937 for (i = 0; i < RLC_UCODE_SIZE; i++) {
2938 WREG32(RLC_UCODE_ADDR, i);
2939 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
2940 }
2941 }
2942 WREG32(RLC_UCODE_ADDR, 0);
2943
2944 r600_rlc_start(rdev);
2945
2946 return 0;
2947}
2948
2949static void r600_enable_interrupts(struct radeon_device *rdev)
2950{
2951 u32 ih_cntl = RREG32(IH_CNTL);
2952 u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
2953
2954 ih_cntl |= ENABLE_INTR;
2955 ih_rb_cntl |= IH_RB_ENABLE;
2956 WREG32(IH_CNTL, ih_cntl);
2957 WREG32(IH_RB_CNTL, ih_rb_cntl);
2958 rdev->ih.enabled = true;
2959}
2960
Alex Deucher45f9a392010-03-24 13:55:51 -04002961void r600_disable_interrupts(struct radeon_device *rdev)
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002962{
2963 u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
2964 u32 ih_cntl = RREG32(IH_CNTL);
2965
2966 ih_rb_cntl &= ~IH_RB_ENABLE;
2967 ih_cntl &= ~ENABLE_INTR;
2968 WREG32(IH_RB_CNTL, ih_rb_cntl);
2969 WREG32(IH_CNTL, ih_cntl);
2970 /* set rptr, wptr to 0 */
2971 WREG32(IH_RB_RPTR, 0);
2972 WREG32(IH_RB_WPTR, 0);
2973 rdev->ih.enabled = false;
2974 rdev->ih.wptr = 0;
2975 rdev->ih.rptr = 0;
2976}
2977
Alex Deuchere0df1ac2009-12-04 15:12:21 -05002978static void r600_disable_interrupt_state(struct radeon_device *rdev)
2979{
2980 u32 tmp;
2981
Alex Deucher3555e532010-10-08 12:09:12 -04002982 WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
Alex Deuchere0df1ac2009-12-04 15:12:21 -05002983 WREG32(GRBM_INT_CNTL, 0);
2984 WREG32(DxMODE_INT_MASK, 0);
Alex Deucher6f34be52010-11-21 10:59:01 -05002985 WREG32(D1GRPH_INTERRUPT_CONTROL, 0);
2986 WREG32(D2GRPH_INTERRUPT_CONTROL, 0);
Alex Deuchere0df1ac2009-12-04 15:12:21 -05002987 if (ASIC_IS_DCE3(rdev)) {
2988 WREG32(DCE3_DACA_AUTODETECT_INT_CONTROL, 0);
2989 WREG32(DCE3_DACB_AUTODETECT_INT_CONTROL, 0);
2990 tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2991 WREG32(DC_HPD1_INT_CONTROL, tmp);
2992 tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2993 WREG32(DC_HPD2_INT_CONTROL, tmp);
2994 tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2995 WREG32(DC_HPD3_INT_CONTROL, tmp);
2996 tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2997 WREG32(DC_HPD4_INT_CONTROL, tmp);
2998 if (ASIC_IS_DCE32(rdev)) {
2999 tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
Alex Deucher5898b1f2010-03-24 13:57:29 -04003000 WREG32(DC_HPD5_INT_CONTROL, tmp);
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003001 tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
Alex Deucher5898b1f2010-03-24 13:57:29 -04003002 WREG32(DC_HPD6_INT_CONTROL, tmp);
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003003 }
3004 } else {
3005 WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
3006 WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
3007 tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
Alex Deucher5898b1f2010-03-24 13:57:29 -04003008 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003009 tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
Alex Deucher5898b1f2010-03-24 13:57:29 -04003010 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003011 tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
Alex Deucher5898b1f2010-03-24 13:57:29 -04003012 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003013 }
3014}
3015
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003016int r600_irq_init(struct radeon_device *rdev)
3017{
3018 int ret = 0;
3019 int rb_bufsz;
3020 u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
3021
3022 /* allocate ring */
Jerome Glisse0c452492010-01-15 14:44:37 +01003023 ret = r600_ih_ring_alloc(rdev);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003024 if (ret)
3025 return ret;
3026
3027 /* disable irqs */
3028 r600_disable_interrupts(rdev);
3029
3030 /* init rlc */
3031 ret = r600_rlc_init(rdev);
3032 if (ret) {
3033 r600_ih_ring_fini(rdev);
3034 return ret;
3035 }
3036
3037 /* setup interrupt control */
3038 /* set dummy read address to ring address */
3039 WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8);
3040 interrupt_cntl = RREG32(INTERRUPT_CNTL);
3041 /* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
3042 * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
3043 */
3044 interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE;
3045 /* IH_REQ_NONSNOOP_EN=1 if ring is in non-cacheable memory, e.g., vram */
3046 interrupt_cntl &= ~IH_REQ_NONSNOOP_EN;
3047 WREG32(INTERRUPT_CNTL, interrupt_cntl);
3048
3049 WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
3050 rb_bufsz = drm_order(rdev->ih.ring_size / 4);
3051
3052 ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
3053 IH_WPTR_OVERFLOW_CLEAR |
3054 (rb_bufsz << 1));
Alex Deucher724c80e2010-08-27 18:25:25 -04003055
3056 if (rdev->wb.enabled)
3057 ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE;
3058
3059 /* set the writeback address whether it's enabled or not */
3060 WREG32(IH_RB_WPTR_ADDR_LO, (rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFFFFFFFC);
3061 WREG32(IH_RB_WPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFF);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003062
3063 WREG32(IH_RB_CNTL, ih_rb_cntl);
3064
3065 /* set rptr, wptr to 0 */
3066 WREG32(IH_RB_RPTR, 0);
3067 WREG32(IH_RB_WPTR, 0);
3068
3069 /* Default settings for IH_CNTL (disabled at first) */
3070 ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10);
3071 /* RPTR_REARM only works if msi's are enabled */
3072 if (rdev->msi_enabled)
3073 ih_cntl |= RPTR_REARM;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003074 WREG32(IH_CNTL, ih_cntl);
3075
3076 /* force the active interrupt state to all disabled */
Alex Deucher45f9a392010-03-24 13:55:51 -04003077 if (rdev->family >= CHIP_CEDAR)
3078 evergreen_disable_interrupt_state(rdev);
3079 else
3080 r600_disable_interrupt_state(rdev);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003081
3082 /* enable irqs */
3083 r600_enable_interrupts(rdev);
3084
3085 return ret;
3086}
3087
Jerome Glisse0c452492010-01-15 14:44:37 +01003088void r600_irq_suspend(struct radeon_device *rdev)
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003089{
Alex Deucher45f9a392010-03-24 13:55:51 -04003090 r600_irq_disable(rdev);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003091 r600_rlc_stop(rdev);
Jerome Glisse0c452492010-01-15 14:44:37 +01003092}
3093
3094void r600_irq_fini(struct radeon_device *rdev)
3095{
3096 r600_irq_suspend(rdev);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003097 r600_ih_ring_fini(rdev);
3098}
3099
3100int r600_irq_set(struct radeon_device *rdev)
3101{
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003102 u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
3103 u32 mode_int = 0;
3104 u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0;
Alex Deucher2031f772010-04-22 12:52:11 -04003105 u32 grbm_int_cntl = 0;
Christian Koenigf2594932010-04-10 03:13:16 +02003106 u32 hdmi1, hdmi2;
Alex Deucher6f34be52010-11-21 10:59:01 -05003107 u32 d1grph = 0, d2grph = 0;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003108
Jerome Glisse003e69f2010-01-07 15:39:14 +01003109 if (!rdev->irq.installed) {
Joe Perchesfce7d612010-10-30 21:08:30 +00003110 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
Jerome Glisse003e69f2010-01-07 15:39:14 +01003111 return -EINVAL;
3112 }
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003113 /* don't enable anything if the ih is disabled */
Jerome Glisse79c2bbc2010-01-15 14:44:38 +01003114 if (!rdev->ih.enabled) {
3115 r600_disable_interrupts(rdev);
3116 /* force the active interrupt state to all disabled */
3117 r600_disable_interrupt_state(rdev);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003118 return 0;
Jerome Glisse79c2bbc2010-01-15 14:44:38 +01003119 }
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003120
Christian Koenigf2594932010-04-10 03:13:16 +02003121 hdmi1 = RREG32(R600_HDMI_BLOCK1 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN;
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003122 if (ASIC_IS_DCE3(rdev)) {
Christian Koenigf2594932010-04-10 03:13:16 +02003123 hdmi2 = RREG32(R600_HDMI_BLOCK3 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN;
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003124 hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
3125 hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
3126 hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
3127 hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
3128 if (ASIC_IS_DCE32(rdev)) {
3129 hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
3130 hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
3131 }
3132 } else {
Christian Koenigf2594932010-04-10 03:13:16 +02003133 hdmi2 = RREG32(R600_HDMI_BLOCK2 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN;
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003134 hpd1 = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & ~DC_HPDx_INT_EN;
3135 hpd2 = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & ~DC_HPDx_INT_EN;
3136 hpd3 = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & ~DC_HPDx_INT_EN;
3137 }
3138
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003139 if (rdev->irq.sw_int) {
3140 DRM_DEBUG("r600_irq_set: sw int\n");
3141 cp_int_cntl |= RB_INT_ENABLE;
Alex Deucherd0f8a852010-09-04 05:04:34 -04003142 cp_int_cntl |= TIME_STAMP_INT_ENABLE;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003143 }
Alex Deucher6f34be52010-11-21 10:59:01 -05003144 if (rdev->irq.crtc_vblank_int[0] ||
3145 rdev->irq.pflip[0]) {
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003146 DRM_DEBUG("r600_irq_set: vblank 0\n");
3147 mode_int |= D1MODE_VBLANK_INT_MASK;
3148 }
Alex Deucher6f34be52010-11-21 10:59:01 -05003149 if (rdev->irq.crtc_vblank_int[1] ||
3150 rdev->irq.pflip[1]) {
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003151 DRM_DEBUG("r600_irq_set: vblank 1\n");
3152 mode_int |= D2MODE_VBLANK_INT_MASK;
3153 }
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003154 if (rdev->irq.hpd[0]) {
3155 DRM_DEBUG("r600_irq_set: hpd 1\n");
3156 hpd1 |= DC_HPDx_INT_EN;
3157 }
3158 if (rdev->irq.hpd[1]) {
3159 DRM_DEBUG("r600_irq_set: hpd 2\n");
3160 hpd2 |= DC_HPDx_INT_EN;
3161 }
3162 if (rdev->irq.hpd[2]) {
3163 DRM_DEBUG("r600_irq_set: hpd 3\n");
3164 hpd3 |= DC_HPDx_INT_EN;
3165 }
3166 if (rdev->irq.hpd[3]) {
3167 DRM_DEBUG("r600_irq_set: hpd 4\n");
3168 hpd4 |= DC_HPDx_INT_EN;
3169 }
3170 if (rdev->irq.hpd[4]) {
3171 DRM_DEBUG("r600_irq_set: hpd 5\n");
3172 hpd5 |= DC_HPDx_INT_EN;
3173 }
3174 if (rdev->irq.hpd[5]) {
3175 DRM_DEBUG("r600_irq_set: hpd 6\n");
3176 hpd6 |= DC_HPDx_INT_EN;
3177 }
Christian Koenigf2594932010-04-10 03:13:16 +02003178 if (rdev->irq.hdmi[0]) {
3179 DRM_DEBUG("r600_irq_set: hdmi 1\n");
3180 hdmi1 |= R600_HDMI_INT_EN;
3181 }
3182 if (rdev->irq.hdmi[1]) {
3183 DRM_DEBUG("r600_irq_set: hdmi 2\n");
3184 hdmi2 |= R600_HDMI_INT_EN;
3185 }
Alex Deucher2031f772010-04-22 12:52:11 -04003186 if (rdev->irq.gui_idle) {
3187 DRM_DEBUG("gui idle\n");
3188 grbm_int_cntl |= GUI_IDLE_INT_ENABLE;
3189 }
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003190
3191 WREG32(CP_INT_CNTL, cp_int_cntl);
3192 WREG32(DxMODE_INT_MASK, mode_int);
Alex Deucher6f34be52010-11-21 10:59:01 -05003193 WREG32(D1GRPH_INTERRUPT_CONTROL, d1grph);
3194 WREG32(D2GRPH_INTERRUPT_CONTROL, d2grph);
Alex Deucher2031f772010-04-22 12:52:11 -04003195 WREG32(GRBM_INT_CNTL, grbm_int_cntl);
Christian Koenigf2594932010-04-10 03:13:16 +02003196 WREG32(R600_HDMI_BLOCK1 + R600_HDMI_CNTL, hdmi1);
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003197 if (ASIC_IS_DCE3(rdev)) {
Christian Koenigf2594932010-04-10 03:13:16 +02003198 WREG32(R600_HDMI_BLOCK3 + R600_HDMI_CNTL, hdmi2);
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003199 WREG32(DC_HPD1_INT_CONTROL, hpd1);
3200 WREG32(DC_HPD2_INT_CONTROL, hpd2);
3201 WREG32(DC_HPD3_INT_CONTROL, hpd3);
3202 WREG32(DC_HPD4_INT_CONTROL, hpd4);
3203 if (ASIC_IS_DCE32(rdev)) {
3204 WREG32(DC_HPD5_INT_CONTROL, hpd5);
3205 WREG32(DC_HPD6_INT_CONTROL, hpd6);
3206 }
3207 } else {
Christian Koenigf2594932010-04-10 03:13:16 +02003208 WREG32(R600_HDMI_BLOCK2 + R600_HDMI_CNTL, hdmi2);
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003209 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1);
3210 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
3211 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, hpd3);
3212 }
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003213
3214 return 0;
3215}
3216
Andi Kleence580fa2011-10-13 16:08:47 -07003217static void r600_irq_ack(struct radeon_device *rdev)
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003218{
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003219 u32 tmp;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003220
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003221 if (ASIC_IS_DCE3(rdev)) {
Alex Deucher6f34be52010-11-21 10:59:01 -05003222 rdev->irq.stat_regs.r600.disp_int = RREG32(DCE3_DISP_INTERRUPT_STATUS);
3223 rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE);
3224 rdev->irq.stat_regs.r600.disp_int_cont2 = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE2);
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003225 } else {
Alex Deucher6f34be52010-11-21 10:59:01 -05003226 rdev->irq.stat_regs.r600.disp_int = RREG32(DISP_INTERRUPT_STATUS);
3227 rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
3228 rdev->irq.stat_regs.r600.disp_int_cont2 = 0;
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003229 }
Alex Deucher6f34be52010-11-21 10:59:01 -05003230 rdev->irq.stat_regs.r600.d1grph_int = RREG32(D1GRPH_INTERRUPT_STATUS);
3231 rdev->irq.stat_regs.r600.d2grph_int = RREG32(D2GRPH_INTERRUPT_STATUS);
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003232
Alex Deucher6f34be52010-11-21 10:59:01 -05003233 if (rdev->irq.stat_regs.r600.d1grph_int & DxGRPH_PFLIP_INT_OCCURRED)
3234 WREG32(D1GRPH_INTERRUPT_STATUS, DxGRPH_PFLIP_INT_CLEAR);
3235 if (rdev->irq.stat_regs.r600.d2grph_int & DxGRPH_PFLIP_INT_OCCURRED)
3236 WREG32(D2GRPH_INTERRUPT_STATUS, DxGRPH_PFLIP_INT_CLEAR);
3237 if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT)
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003238 WREG32(D1MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
Alex Deucher6f34be52010-11-21 10:59:01 -05003239 if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT)
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003240 WREG32(D1MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
Alex Deucher6f34be52010-11-21 10:59:01 -05003241 if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT)
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003242 WREG32(D2MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
Alex Deucher6f34be52010-11-21 10:59:01 -05003243 if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT)
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003244 WREG32(D2MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
Alex Deucher6f34be52010-11-21 10:59:01 -05003245 if (rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT) {
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003246 if (ASIC_IS_DCE3(rdev)) {
3247 tmp = RREG32(DC_HPD1_INT_CONTROL);
3248 tmp |= DC_HPDx_INT_ACK;
3249 WREG32(DC_HPD1_INT_CONTROL, tmp);
3250 } else {
3251 tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
3252 tmp |= DC_HPDx_INT_ACK;
3253 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
3254 }
3255 }
Alex Deucher6f34be52010-11-21 10:59:01 -05003256 if (rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT) {
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003257 if (ASIC_IS_DCE3(rdev)) {
3258 tmp = RREG32(DC_HPD2_INT_CONTROL);
3259 tmp |= DC_HPDx_INT_ACK;
3260 WREG32(DC_HPD2_INT_CONTROL, tmp);
3261 } else {
3262 tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
3263 tmp |= DC_HPDx_INT_ACK;
3264 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
3265 }
3266 }
Alex Deucher6f34be52010-11-21 10:59:01 -05003267 if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT) {
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003268 if (ASIC_IS_DCE3(rdev)) {
3269 tmp = RREG32(DC_HPD3_INT_CONTROL);
3270 tmp |= DC_HPDx_INT_ACK;
3271 WREG32(DC_HPD3_INT_CONTROL, tmp);
3272 } else {
3273 tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
3274 tmp |= DC_HPDx_INT_ACK;
3275 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
3276 }
3277 }
Alex Deucher6f34be52010-11-21 10:59:01 -05003278 if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT) {
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003279 tmp = RREG32(DC_HPD4_INT_CONTROL);
3280 tmp |= DC_HPDx_INT_ACK;
3281 WREG32(DC_HPD4_INT_CONTROL, tmp);
3282 }
3283 if (ASIC_IS_DCE32(rdev)) {
Alex Deucher6f34be52010-11-21 10:59:01 -05003284 if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT) {
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003285 tmp = RREG32(DC_HPD5_INT_CONTROL);
3286 tmp |= DC_HPDx_INT_ACK;
3287 WREG32(DC_HPD5_INT_CONTROL, tmp);
3288 }
Alex Deucher6f34be52010-11-21 10:59:01 -05003289 if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) {
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003290 tmp = RREG32(DC_HPD5_INT_CONTROL);
3291 tmp |= DC_HPDx_INT_ACK;
3292 WREG32(DC_HPD6_INT_CONTROL, tmp);
3293 }
3294 }
Christian Koenigf2594932010-04-10 03:13:16 +02003295 if (RREG32(R600_HDMI_BLOCK1 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) {
3296 WREG32_P(R600_HDMI_BLOCK1 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK);
3297 }
3298 if (ASIC_IS_DCE3(rdev)) {
3299 if (RREG32(R600_HDMI_BLOCK3 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) {
3300 WREG32_P(R600_HDMI_BLOCK3 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK);
3301 }
3302 } else {
3303 if (RREG32(R600_HDMI_BLOCK2 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) {
3304 WREG32_P(R600_HDMI_BLOCK2 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK);
3305 }
3306 }
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003307}
3308
3309void r600_irq_disable(struct radeon_device *rdev)
3310{
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003311 r600_disable_interrupts(rdev);
3312 /* Wait and acknowledge irq */
3313 mdelay(1);
Alex Deucher6f34be52010-11-21 10:59:01 -05003314 r600_irq_ack(rdev);
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003315 r600_disable_interrupt_state(rdev);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003316}
3317
Andi Kleence580fa2011-10-13 16:08:47 -07003318static u32 r600_get_ih_wptr(struct radeon_device *rdev)
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003319{
3320 u32 wptr, tmp;
3321
Alex Deucher724c80e2010-08-27 18:25:25 -04003322 if (rdev->wb.enabled)
Cédric Cano204ae242011-04-19 11:07:13 -04003323 wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]);
Alex Deucher724c80e2010-08-27 18:25:25 -04003324 else
3325 wptr = RREG32(IH_RB_WPTR);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003326
3327 if (wptr & RB_OVERFLOW) {
Jerome Glisse7924e5e2010-01-15 14:44:39 +01003328 /* When a ring buffer overflow happen start parsing interrupt
3329 * from the last not overwritten vector (wptr + 16). Hopefully
3330 * this should allow us to catchup.
3331 */
3332 dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
3333 wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
3334 rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003335 tmp = RREG32(IH_RB_CNTL);
3336 tmp |= IH_WPTR_OVERFLOW_CLEAR;
3337 WREG32(IH_RB_CNTL, tmp);
3338 }
Jerome Glisse0c452492010-01-15 14:44:37 +01003339 return (wptr & rdev->ih.ptr_mask);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003340}
3341
3342/* r600 IV Ring
3343 * Each IV ring entry is 128 bits:
3344 * [7:0] - interrupt source id
3345 * [31:8] - reserved
3346 * [59:32] - interrupt source data
3347 * [127:60] - reserved
3348 *
3349 * The basic interrupt vector entries
3350 * are decoded as follows:
3351 * src_id src_data description
3352 * 1 0 D1 Vblank
3353 * 1 1 D1 Vline
3354 * 5 0 D2 Vblank
3355 * 5 1 D2 Vline
3356 * 19 0 FP Hot plug detection A
3357 * 19 1 FP Hot plug detection B
3358 * 19 2 DAC A auto-detection
3359 * 19 3 DAC B auto-detection
Christian Koenigf2594932010-04-10 03:13:16 +02003360 * 21 4 HDMI block A
3361 * 21 5 HDMI block B
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003362 * 176 - CP_INT RB
3363 * 177 - CP_INT IB1
3364 * 178 - CP_INT IB2
3365 * 181 - EOP Interrupt
3366 * 233 - GUI Idle
3367 *
3368 * Note, these are based on r600 and may need to be
3369 * adjusted or added to on newer asics
3370 */
3371
3372int r600_irq_process(struct radeon_device *rdev)
3373{
Dave Airlie682f1a52011-06-18 03:59:51 +00003374 u32 wptr;
3375 u32 rptr;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003376 u32 src_id, src_data;
Alex Deucher6f34be52010-11-21 10:59:01 -05003377 u32 ring_index;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003378 unsigned long flags;
Alex Deucherd4877cf2009-12-04 16:56:37 -05003379 bool queue_hotplug = false;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003380
Dave Airlie682f1a52011-06-18 03:59:51 +00003381 if (!rdev->ih.enabled || rdev->shutdown)
Jerome Glisse79c2bbc2010-01-15 14:44:38 +01003382 return IRQ_NONE;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003383
Benjamin Herrenschmidtf6a56932011-07-13 06:28:22 +00003384 /* No MSIs, need a dummy read to flush PCI DMAs */
3385 if (!rdev->msi_enabled)
3386 RREG32(IH_RB_WPTR);
3387
Dave Airlie682f1a52011-06-18 03:59:51 +00003388 wptr = r600_get_ih_wptr(rdev);
3389 rptr = rdev->ih.rptr;
3390 DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
3391
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003392 spin_lock_irqsave(&rdev->ih.lock, flags);
3393
3394 if (rptr == wptr) {
3395 spin_unlock_irqrestore(&rdev->ih.lock, flags);
3396 return IRQ_NONE;
3397 }
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003398
3399restart_ih:
Benjamin Herrenschmidt964f6642011-07-13 16:28:19 +10003400 /* Order reading of wptr vs. reading of IH ring data */
3401 rmb();
3402
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003403 /* display interrupts */
Alex Deucher6f34be52010-11-21 10:59:01 -05003404 r600_irq_ack(rdev);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003405
3406 rdev->ih.wptr = wptr;
3407 while (rptr != wptr) {
3408 /* wptr/rptr are in bytes! */
3409 ring_index = rptr / 4;
Cédric Cano4eace7f2011-02-11 19:45:38 -05003410 src_id = le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff;
3411 src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003412
3413 switch (src_id) {
3414 case 1: /* D1 vblank/vline */
3415 switch (src_data) {
3416 case 0: /* D1 vblank */
Alex Deucher6f34be52010-11-21 10:59:01 -05003417 if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT) {
Alex Deucher6f34be52010-11-21 10:59:01 -05003418 if (rdev->irq.crtc_vblank_int[0]) {
3419 drm_handle_vblank(rdev->ddev, 0);
3420 rdev->pm.vblank_sync = true;
3421 wake_up(&rdev->irq.vblank_queue);
3422 }
Mario Kleiner3e4ea742010-11-21 10:59:02 -05003423 if (rdev->irq.pflip[0])
3424 radeon_crtc_handle_flip(rdev, 0);
Alex Deucher6f34be52010-11-21 10:59:01 -05003425 rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003426 DRM_DEBUG("IH: D1 vblank\n");
3427 }
3428 break;
3429 case 1: /* D1 vline */
Alex Deucher6f34be52010-11-21 10:59:01 -05003430 if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT) {
3431 rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VLINE_INTERRUPT;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003432 DRM_DEBUG("IH: D1 vline\n");
3433 }
3434 break;
3435 default:
Alex Deucherb0425892010-01-11 19:47:38 -05003436 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003437 break;
3438 }
3439 break;
3440 case 5: /* D2 vblank/vline */
3441 switch (src_data) {
3442 case 0: /* D2 vblank */
Alex Deucher6f34be52010-11-21 10:59:01 -05003443 if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT) {
Alex Deucher6f34be52010-11-21 10:59:01 -05003444 if (rdev->irq.crtc_vblank_int[1]) {
3445 drm_handle_vblank(rdev->ddev, 1);
3446 rdev->pm.vblank_sync = true;
3447 wake_up(&rdev->irq.vblank_queue);
3448 }
Mario Kleiner3e4ea742010-11-21 10:59:02 -05003449 if (rdev->irq.pflip[1])
3450 radeon_crtc_handle_flip(rdev, 1);
Alex Deucher6f34be52010-11-21 10:59:01 -05003451 rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VBLANK_INTERRUPT;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003452 DRM_DEBUG("IH: D2 vblank\n");
3453 }
3454 break;
3455 case 1: /* D1 vline */
Alex Deucher6f34be52010-11-21 10:59:01 -05003456 if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT) {
3457 rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VLINE_INTERRUPT;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003458 DRM_DEBUG("IH: D2 vline\n");
3459 }
3460 break;
3461 default:
Alex Deucherb0425892010-01-11 19:47:38 -05003462 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003463 break;
3464 }
3465 break;
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003466 case 19: /* HPD/DAC hotplug */
3467 switch (src_data) {
3468 case 0:
Alex Deucher6f34be52010-11-21 10:59:01 -05003469 if (rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT) {
3470 rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD1_INTERRUPT;
Alex Deucherd4877cf2009-12-04 16:56:37 -05003471 queue_hotplug = true;
3472 DRM_DEBUG("IH: HPD1\n");
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003473 }
3474 break;
3475 case 1:
Alex Deucher6f34be52010-11-21 10:59:01 -05003476 if (rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT) {
3477 rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD2_INTERRUPT;
Alex Deucherd4877cf2009-12-04 16:56:37 -05003478 queue_hotplug = true;
3479 DRM_DEBUG("IH: HPD2\n");
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003480 }
3481 break;
3482 case 4:
Alex Deucher6f34be52010-11-21 10:59:01 -05003483 if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT) {
3484 rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD3_INTERRUPT;
Alex Deucherd4877cf2009-12-04 16:56:37 -05003485 queue_hotplug = true;
3486 DRM_DEBUG("IH: HPD3\n");
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003487 }
3488 break;
3489 case 5:
Alex Deucher6f34be52010-11-21 10:59:01 -05003490 if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT) {
3491 rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD4_INTERRUPT;
Alex Deucherd4877cf2009-12-04 16:56:37 -05003492 queue_hotplug = true;
3493 DRM_DEBUG("IH: HPD4\n");
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003494 }
3495 break;
3496 case 10:
Alex Deucher6f34be52010-11-21 10:59:01 -05003497 if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT) {
3498 rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD5_INTERRUPT;
Alex Deucherd4877cf2009-12-04 16:56:37 -05003499 queue_hotplug = true;
3500 DRM_DEBUG("IH: HPD5\n");
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003501 }
3502 break;
3503 case 12:
Alex Deucher6f34be52010-11-21 10:59:01 -05003504 if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) {
3505 rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD6_INTERRUPT;
Alex Deucherd4877cf2009-12-04 16:56:37 -05003506 queue_hotplug = true;
3507 DRM_DEBUG("IH: HPD6\n");
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003508 }
3509 break;
3510 default:
Alex Deucherb0425892010-01-11 19:47:38 -05003511 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003512 break;
3513 }
3514 break;
Christian Koenigf2594932010-04-10 03:13:16 +02003515 case 21: /* HDMI */
3516 DRM_DEBUG("IH: HDMI: 0x%x\n", src_data);
3517 r600_audio_schedule_polling(rdev);
3518 break;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003519 case 176: /* CP_INT in ring buffer */
3520 case 177: /* CP_INT in IB1 */
3521 case 178: /* CP_INT in IB2 */
3522 DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
3523 radeon_fence_process(rdev);
3524 break;
3525 case 181: /* CP EOP event */
3526 DRM_DEBUG("IH: CP EOP\n");
Alex Deucherd0f8a852010-09-04 05:04:34 -04003527 radeon_fence_process(rdev);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003528 break;
Alex Deucher2031f772010-04-22 12:52:11 -04003529 case 233: /* GUI IDLE */
Ilija Hadzic303c8052011-06-07 14:54:48 -04003530 DRM_DEBUG("IH: GUI idle\n");
Alex Deucher2031f772010-04-22 12:52:11 -04003531 rdev->pm.gui_idle = true;
3532 wake_up(&rdev->irq.idle_queue);
3533 break;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003534 default:
Alex Deucherb0425892010-01-11 19:47:38 -05003535 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003536 break;
3537 }
3538
3539 /* wptr/rptr are in bytes! */
Jerome Glisse0c452492010-01-15 14:44:37 +01003540 rptr += 16;
3541 rptr &= rdev->ih.ptr_mask;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003542 }
3543 /* make sure wptr hasn't changed while processing */
3544 wptr = r600_get_ih_wptr(rdev);
3545 if (wptr != rdev->ih.wptr)
3546 goto restart_ih;
Alex Deucherd4877cf2009-12-04 16:56:37 -05003547 if (queue_hotplug)
Tejun Heo32c87fc2011-01-03 14:49:32 +01003548 schedule_work(&rdev->hotplug_work);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003549 rdev->ih.rptr = rptr;
3550 WREG32(IH_RB_RPTR, rdev->ih.rptr);
3551 spin_unlock_irqrestore(&rdev->ih.lock, flags);
3552 return IRQ_HANDLED;
3553}
Jerome Glisse3ce0a232009-09-08 10:10:24 +10003554
3555/*
3556 * Debugfs info
3557 */
3558#if defined(CONFIG_DEBUG_FS)
3559
3560static int r600_debugfs_cp_ring_info(struct seq_file *m, void *data)
3561{
3562 struct drm_info_node *node = (struct drm_info_node *) m->private;
3563 struct drm_device *dev = node->minor->dev;
3564 struct radeon_device *rdev = dev->dev_private;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10003565 unsigned count, i, j;
3566
3567 radeon_ring_free_size(rdev);
Rafał Miłeckid6840762009-11-10 22:26:21 +01003568 count = (rdev->cp.ring_size / 4) - rdev->cp.ring_free_dw;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10003569 seq_printf(m, "CP_STAT 0x%08x\n", RREG32(CP_STAT));
Rafał Miłeckid6840762009-11-10 22:26:21 +01003570 seq_printf(m, "CP_RB_WPTR 0x%08x\n", RREG32(CP_RB_WPTR));
3571 seq_printf(m, "CP_RB_RPTR 0x%08x\n", RREG32(CP_RB_RPTR));
3572 seq_printf(m, "driver's copy of the CP_RB_WPTR 0x%08x\n", rdev->cp.wptr);
3573 seq_printf(m, "driver's copy of the CP_RB_RPTR 0x%08x\n", rdev->cp.rptr);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10003574 seq_printf(m, "%u free dwords in ring\n", rdev->cp.ring_free_dw);
3575 seq_printf(m, "%u dwords in ring\n", count);
Rafał Miłeckid6840762009-11-10 22:26:21 +01003576 i = rdev->cp.rptr;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10003577 for (j = 0; j <= count; j++) {
Jerome Glisse3ce0a232009-09-08 10:10:24 +10003578 seq_printf(m, "r[%04d]=0x%08x\n", i, rdev->cp.ring[i]);
Rafał Miłeckid6840762009-11-10 22:26:21 +01003579 i = (i + 1) & rdev->cp.ptr_mask;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10003580 }
3581 return 0;
3582}
3583
3584static int r600_debugfs_mc_info(struct seq_file *m, void *data)
3585{
3586 struct drm_info_node *node = (struct drm_info_node *) m->private;
3587 struct drm_device *dev = node->minor->dev;
3588 struct radeon_device *rdev = dev->dev_private;
3589
3590 DREG32_SYS(m, rdev, R_000E50_SRBM_STATUS);
3591 DREG32_SYS(m, rdev, VM_L2_STATUS);
3592 return 0;
3593}
3594
3595static struct drm_info_list r600_mc_info_list[] = {
3596 {"r600_mc_info", r600_debugfs_mc_info, 0, NULL},
3597 {"r600_ring_info", r600_debugfs_cp_ring_info, 0, NULL},
3598};
3599#endif
3600
3601int r600_debugfs_mc_info_init(struct radeon_device *rdev)
3602{
3603#if defined(CONFIG_DEBUG_FS)
3604 return radeon_debugfs_add_files(rdev, r600_mc_info_list, ARRAY_SIZE(r600_mc_info_list));
3605#else
3606 return 0;
3607#endif
Jerome Glisse771fe6b2009-06-05 14:42:42 +02003608}
Jerome Glisse062b3892010-02-04 20:36:39 +01003609
3610/**
3611 * r600_ioctl_wait_idle - flush host path cache on wait idle ioctl
3612 * rdev: radeon device structure
3613 * bo: buffer object struct which userspace is waiting for idle
3614 *
3615 * Some R6XX/R7XX doesn't seems to take into account HDP flush performed
3616 * through ring buffer, this leads to corruption in rendering, see
3617 * http://bugzilla.kernel.org/show_bug.cgi?id=15186 to avoid this we
3618 * directly perform HDP flush by writing register through MMIO.
3619 */
3620void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo)
3621{
Alex Deucher812d0462010-07-26 18:51:53 -04003622 /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read
Alex Deucherf3886f82010-12-08 10:05:34 -05003623 * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL.
3624 * This seems to cause problems on some AGP cards. Just use the old
3625 * method for them.
Alex Deucher812d0462010-07-26 18:51:53 -04003626 */
Alex Deuchere4884592010-09-27 10:57:10 -04003627 if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) &&
Alex Deucherf3886f82010-12-08 10:05:34 -05003628 rdev->vram_scratch.ptr && !(rdev->flags & RADEON_IS_AGP)) {
Alex Deucher87cbf8f2010-08-27 13:59:54 -04003629 void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
Alex Deucher812d0462010-07-26 18:51:53 -04003630 u32 tmp;
3631
3632 WREG32(HDP_DEBUG1, 0);
3633 tmp = readl((void __iomem *)ptr);
3634 } else
3635 WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
Jerome Glisse062b3892010-02-04 20:36:39 +01003636}
Alex Deucher3313e3d2011-01-06 18:49:34 -05003637
3638void r600_set_pcie_lanes(struct radeon_device *rdev, int lanes)
3639{
3640 u32 link_width_cntl, mask, target_reg;
3641
3642 if (rdev->flags & RADEON_IS_IGP)
3643 return;
3644
3645 if (!(rdev->flags & RADEON_IS_PCIE))
3646 return;
3647
3648 /* x2 cards have a special sequence */
3649 if (ASIC_IS_X2(rdev))
3650 return;
3651
3652 /* FIXME wait for idle */
3653
3654 switch (lanes) {
3655 case 0:
3656 mask = RADEON_PCIE_LC_LINK_WIDTH_X0;
3657 break;
3658 case 1:
3659 mask = RADEON_PCIE_LC_LINK_WIDTH_X1;
3660 break;
3661 case 2:
3662 mask = RADEON_PCIE_LC_LINK_WIDTH_X2;
3663 break;
3664 case 4:
3665 mask = RADEON_PCIE_LC_LINK_WIDTH_X4;
3666 break;
3667 case 8:
3668 mask = RADEON_PCIE_LC_LINK_WIDTH_X8;
3669 break;
3670 case 12:
3671 mask = RADEON_PCIE_LC_LINK_WIDTH_X12;
3672 break;
3673 case 16:
3674 default:
3675 mask = RADEON_PCIE_LC_LINK_WIDTH_X16;
3676 break;
3677 }
3678
3679 link_width_cntl = RREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
3680
3681 if ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) ==
3682 (mask << RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT))
3683 return;
3684
3685 if (link_width_cntl & R600_PCIE_LC_UPCONFIGURE_DIS)
3686 return;
3687
3688 link_width_cntl &= ~(RADEON_PCIE_LC_LINK_WIDTH_MASK |
3689 RADEON_PCIE_LC_RECONFIG_NOW |
3690 R600_PCIE_LC_RENEGOTIATE_EN |
3691 R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE);
3692 link_width_cntl |= mask;
3693
3694 WREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
3695
3696 /* some northbridges can renegotiate the link rather than requiring
3697 * a complete re-config.
3698 * e.g., AMD 780/790 northbridges (pci ids: 0x5956, 0x5957, 0x5958, etc.)
3699 */
3700 if (link_width_cntl & R600_PCIE_LC_RENEGOTIATION_SUPPORT)
3701 link_width_cntl |= R600_PCIE_LC_RENEGOTIATE_EN | R600_PCIE_LC_UPCONFIGURE_SUPPORT;
3702 else
3703 link_width_cntl |= R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE;
3704
3705 WREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL, (link_width_cntl |
3706 RADEON_PCIE_LC_RECONFIG_NOW));
3707
3708 if (rdev->family >= CHIP_RV770)
3709 target_reg = R700_TARGET_AND_CURRENT_PROFILE_INDEX;
3710 else
3711 target_reg = R600_TARGET_AND_CURRENT_PROFILE_INDEX;
3712
3713 /* wait for lane set to complete */
3714 link_width_cntl = RREG32(target_reg);
3715 while (link_width_cntl == 0xffffffff)
3716 link_width_cntl = RREG32(target_reg);
3717
3718}
3719
3720int r600_get_pcie_lanes(struct radeon_device *rdev)
3721{
3722 u32 link_width_cntl;
3723
3724 if (rdev->flags & RADEON_IS_IGP)
3725 return 0;
3726
3727 if (!(rdev->flags & RADEON_IS_PCIE))
3728 return 0;
3729
3730 /* x2 cards have a special sequence */
3731 if (ASIC_IS_X2(rdev))
3732 return 0;
3733
3734 /* FIXME wait for idle */
3735
3736 link_width_cntl = RREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
3737
3738 switch ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT) {
3739 case RADEON_PCIE_LC_LINK_WIDTH_X0:
3740 return 0;
3741 case RADEON_PCIE_LC_LINK_WIDTH_X1:
3742 return 1;
3743 case RADEON_PCIE_LC_LINK_WIDTH_X2:
3744 return 2;
3745 case RADEON_PCIE_LC_LINK_WIDTH_X4:
3746 return 4;
3747 case RADEON_PCIE_LC_LINK_WIDTH_X8:
3748 return 8;
3749 case RADEON_PCIE_LC_LINK_WIDTH_X16:
3750 default:
3751 return 16;
3752 }
3753}
3754
Alex Deucher9e46a482011-01-06 18:49:35 -05003755static void r600_pcie_gen2_enable(struct radeon_device *rdev)
3756{
3757 u32 link_width_cntl, lanes, speed_cntl, training_cntl, tmp;
3758 u16 link_cntl2;
3759
Alex Deucherd42dd572011-01-12 20:05:11 -05003760 if (radeon_pcie_gen2 == 0)
3761 return;
3762
Alex Deucher9e46a482011-01-06 18:49:35 -05003763 if (rdev->flags & RADEON_IS_IGP)
3764 return;
3765
3766 if (!(rdev->flags & RADEON_IS_PCIE))
3767 return;
3768
3769 /* x2 cards have a special sequence */
3770 if (ASIC_IS_X2(rdev))
3771 return;
3772
3773 /* only RV6xx+ chips are supported */
3774 if (rdev->family <= CHIP_R600)
3775 return;
3776
3777 /* 55 nm r6xx asics */
3778 if ((rdev->family == CHIP_RV670) ||
3779 (rdev->family == CHIP_RV620) ||
3780 (rdev->family == CHIP_RV635)) {
3781 /* advertise upconfig capability */
3782 link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
3783 link_width_cntl &= ~LC_UPCONFIGURE_DIS;
3784 WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
3785 link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
3786 if (link_width_cntl & LC_RENEGOTIATION_SUPPORT) {
3787 lanes = (link_width_cntl & LC_LINK_WIDTH_RD_MASK) >> LC_LINK_WIDTH_RD_SHIFT;
3788 link_width_cntl &= ~(LC_LINK_WIDTH_MASK |
3789 LC_RECONFIG_ARC_MISSING_ESCAPE);
3790 link_width_cntl |= lanes | LC_RECONFIG_NOW | LC_RENEGOTIATE_EN;
3791 WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
3792 } else {
3793 link_width_cntl |= LC_UPCONFIGURE_DIS;
3794 WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
3795 }
3796 }
3797
3798 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
3799 if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) &&
3800 (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
3801
3802 /* 55 nm r6xx asics */
3803 if ((rdev->family == CHIP_RV670) ||
3804 (rdev->family == CHIP_RV620) ||
3805 (rdev->family == CHIP_RV635)) {
3806 WREG32(MM_CFGREGS_CNTL, 0x8);
3807 link_cntl2 = RREG32(0x4088);
3808 WREG32(MM_CFGREGS_CNTL, 0);
3809 /* not supported yet */
3810 if (link_cntl2 & SELECTABLE_DEEMPHASIS)
3811 return;
3812 }
3813
3814 speed_cntl &= ~LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK;
3815 speed_cntl |= (0x3 << LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT);
3816 speed_cntl &= ~LC_VOLTAGE_TIMER_SEL_MASK;
3817 speed_cntl &= ~LC_FORCE_DIS_HW_SPEED_CHANGE;
3818 speed_cntl |= LC_FORCE_EN_HW_SPEED_CHANGE;
3819 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
3820
3821 tmp = RREG32(0x541c);
3822 WREG32(0x541c, tmp | 0x8);
3823 WREG32(MM_CFGREGS_CNTL, MM_WR_TO_CFG_EN);
3824 link_cntl2 = RREG16(0x4088);
3825 link_cntl2 &= ~TARGET_LINK_SPEED_MASK;
3826 link_cntl2 |= 0x2;
3827 WREG16(0x4088, link_cntl2);
3828 WREG32(MM_CFGREGS_CNTL, 0);
3829
3830 if ((rdev->family == CHIP_RV670) ||
3831 (rdev->family == CHIP_RV620) ||
3832 (rdev->family == CHIP_RV635)) {
3833 training_cntl = RREG32_PCIE_P(PCIE_LC_TRAINING_CNTL);
3834 training_cntl &= ~LC_POINT_7_PLUS_EN;
3835 WREG32_PCIE_P(PCIE_LC_TRAINING_CNTL, training_cntl);
3836 } else {
3837 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
3838 speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN;
3839 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
3840 }
3841
3842 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
3843 speed_cntl |= LC_GEN2_EN_STRAP;
3844 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
3845
3846 } else {
3847 link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
3848 /* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */
3849 if (1)
3850 link_width_cntl |= LC_UPCONFIGURE_DIS;
3851 else
3852 link_width_cntl &= ~LC_UPCONFIGURE_DIS;
3853 WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
3854 }
3855}