blob: 4e777c1e4b7b3401047f32ad23dcf3f49266c7d9 [file] [log] [blame]
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090028#include <linux/slab.h>
Jerome Glisse3ce0a232009-09-08 10:10:24 +100029#include <linux/seq_file.h>
30#include <linux/firmware.h>
31#include <linux/platform_device.h>
Paul Gortmakere0cd3602011-08-30 11:04:30 -040032#include <linux/module.h>
Jerome Glisse771fe6b2009-06-05 14:42:42 +020033#include "drmP.h"
Jerome Glisse3ce0a232009-09-08 10:10:24 +100034#include "radeon_drm.h"
Jerome Glisse771fe6b2009-06-05 14:42:42 +020035#include "radeon.h"
Daniel Vettere6990372010-03-11 21:19:17 +000036#include "radeon_asic.h"
Jerome Glisse3ce0a232009-09-08 10:10:24 +100037#include "radeon_mode.h"
Jerome Glisse3ce0a232009-09-08 10:10:24 +100038#include "r600d.h"
Jerome Glisse3ce0a232009-09-08 10:10:24 +100039#include "atom.h"
Jerome Glissed39c3b82009-09-28 18:34:43 +020040#include "avivod.h"
Jerome Glisse771fe6b2009-06-05 14:42:42 +020041
Jerome Glisse3ce0a232009-09-08 10:10:24 +100042#define PFP_UCODE_SIZE 576
43#define PM4_UCODE_SIZE 1792
Alex Deucherd8f60cf2009-12-01 13:43:46 -050044#define RLC_UCODE_SIZE 768
Jerome Glisse3ce0a232009-09-08 10:10:24 +100045#define R700_PFP_UCODE_SIZE 848
46#define R700_PM4_UCODE_SIZE 1360
Alex Deucherd8f60cf2009-12-01 13:43:46 -050047#define R700_RLC_UCODE_SIZE 1024
Alex Deucherfe251e22010-03-24 13:36:43 -040048#define EVERGREEN_PFP_UCODE_SIZE 1120
49#define EVERGREEN_PM4_UCODE_SIZE 1376
Alex Deucher45f9a392010-03-24 13:55:51 -040050#define EVERGREEN_RLC_UCODE_SIZE 768
Alex Deucher12727802011-03-02 20:07:32 -050051#define CAYMAN_RLC_UCODE_SIZE 1024
Jerome Glisse3ce0a232009-09-08 10:10:24 +100052
53/* Firmware Names */
54MODULE_FIRMWARE("radeon/R600_pfp.bin");
55MODULE_FIRMWARE("radeon/R600_me.bin");
56MODULE_FIRMWARE("radeon/RV610_pfp.bin");
57MODULE_FIRMWARE("radeon/RV610_me.bin");
58MODULE_FIRMWARE("radeon/RV630_pfp.bin");
59MODULE_FIRMWARE("radeon/RV630_me.bin");
60MODULE_FIRMWARE("radeon/RV620_pfp.bin");
61MODULE_FIRMWARE("radeon/RV620_me.bin");
62MODULE_FIRMWARE("radeon/RV635_pfp.bin");
63MODULE_FIRMWARE("radeon/RV635_me.bin");
64MODULE_FIRMWARE("radeon/RV670_pfp.bin");
65MODULE_FIRMWARE("radeon/RV670_me.bin");
66MODULE_FIRMWARE("radeon/RS780_pfp.bin");
67MODULE_FIRMWARE("radeon/RS780_me.bin");
68MODULE_FIRMWARE("radeon/RV770_pfp.bin");
69MODULE_FIRMWARE("radeon/RV770_me.bin");
70MODULE_FIRMWARE("radeon/RV730_pfp.bin");
71MODULE_FIRMWARE("radeon/RV730_me.bin");
72MODULE_FIRMWARE("radeon/RV710_pfp.bin");
73MODULE_FIRMWARE("radeon/RV710_me.bin");
Alex Deucherd8f60cf2009-12-01 13:43:46 -050074MODULE_FIRMWARE("radeon/R600_rlc.bin");
75MODULE_FIRMWARE("radeon/R700_rlc.bin");
Alex Deucherfe251e22010-03-24 13:36:43 -040076MODULE_FIRMWARE("radeon/CEDAR_pfp.bin");
77MODULE_FIRMWARE("radeon/CEDAR_me.bin");
Alex Deucher45f9a392010-03-24 13:55:51 -040078MODULE_FIRMWARE("radeon/CEDAR_rlc.bin");
Alex Deucherfe251e22010-03-24 13:36:43 -040079MODULE_FIRMWARE("radeon/REDWOOD_pfp.bin");
80MODULE_FIRMWARE("radeon/REDWOOD_me.bin");
Alex Deucher45f9a392010-03-24 13:55:51 -040081MODULE_FIRMWARE("radeon/REDWOOD_rlc.bin");
Alex Deucherfe251e22010-03-24 13:36:43 -040082MODULE_FIRMWARE("radeon/JUNIPER_pfp.bin");
83MODULE_FIRMWARE("radeon/JUNIPER_me.bin");
Alex Deucher45f9a392010-03-24 13:55:51 -040084MODULE_FIRMWARE("radeon/JUNIPER_rlc.bin");
Dave Airliea7433742010-04-09 15:31:09 +100085MODULE_FIRMWARE("radeon/CYPRESS_pfp.bin");
Alex Deucherfe251e22010-03-24 13:36:43 -040086MODULE_FIRMWARE("radeon/CYPRESS_me.bin");
Alex Deucher45f9a392010-03-24 13:55:51 -040087MODULE_FIRMWARE("radeon/CYPRESS_rlc.bin");
Alex Deucher439bd6c2010-11-22 17:56:31 -050088MODULE_FIRMWARE("radeon/PALM_pfp.bin");
89MODULE_FIRMWARE("radeon/PALM_me.bin");
90MODULE_FIRMWARE("radeon/SUMO_rlc.bin");
Alex Deucherd5c5a722011-05-31 15:42:48 -040091MODULE_FIRMWARE("radeon/SUMO_pfp.bin");
92MODULE_FIRMWARE("radeon/SUMO_me.bin");
93MODULE_FIRMWARE("radeon/SUMO2_pfp.bin");
94MODULE_FIRMWARE("radeon/SUMO2_me.bin");
Jerome Glisse3ce0a232009-09-08 10:10:24 +100095
96int r600_debugfs_mc_info_init(struct radeon_device *rdev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +020097
Jerome Glisse1a029b72009-10-06 19:04:30 +020098/* r600,rv610,rv630,rv620,rv635,rv670 */
Jerome Glisse771fe6b2009-06-05 14:42:42 +020099int r600_mc_wait_for_idle(struct radeon_device *rdev);
100void r600_gpu_init(struct radeon_device *rdev);
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000101void r600_fini(struct radeon_device *rdev);
Alex Deucher45f9a392010-03-24 13:55:51 -0400102void r600_irq_disable(struct radeon_device *rdev);
Alex Deucher9e46a482011-01-06 18:49:35 -0500103static void r600_pcie_gen2_enable(struct radeon_device *rdev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200104
Alex Deucher21a81222010-07-02 12:58:16 -0400105/* get temperature in millidegrees */
Alex Deucher20d391d2011-02-01 16:12:34 -0500106int rv6xx_get_temp(struct radeon_device *rdev)
Alex Deucher21a81222010-07-02 12:58:16 -0400107{
108 u32 temp = (RREG32(CG_THERMAL_STATUS) & ASIC_T_MASK) >>
109 ASIC_T_SHIFT;
Alex Deucher20d391d2011-02-01 16:12:34 -0500110 int actual_temp = temp & 0xff;
Alex Deucher21a81222010-07-02 12:58:16 -0400111
Alex Deucher20d391d2011-02-01 16:12:34 -0500112 if (temp & 0x100)
113 actual_temp -= 256;
114
115 return actual_temp * 1000;
Alex Deucher21a81222010-07-02 12:58:16 -0400116}
117
Alex Deucherce8f5372010-05-07 15:10:16 -0400118void r600_pm_get_dynpm_state(struct radeon_device *rdev)
Alex Deuchera48b9b42010-04-22 14:03:55 -0400119{
120 int i;
121
Alex Deucherce8f5372010-05-07 15:10:16 -0400122 rdev->pm.dynpm_can_upclock = true;
123 rdev->pm.dynpm_can_downclock = true;
Alex Deuchera48b9b42010-04-22 14:03:55 -0400124
125 /* power state array is low to high, default is first */
126 if ((rdev->flags & RADEON_IS_IGP) || (rdev->family == CHIP_R600)) {
127 int min_power_state_index = 0;
128
129 if (rdev->pm.num_power_states > 2)
130 min_power_state_index = 1;
131
Alex Deucherce8f5372010-05-07 15:10:16 -0400132 switch (rdev->pm.dynpm_planned_action) {
133 case DYNPM_ACTION_MINIMUM:
Alex Deuchera48b9b42010-04-22 14:03:55 -0400134 rdev->pm.requested_power_state_index = min_power_state_index;
135 rdev->pm.requested_clock_mode_index = 0;
Alex Deucherce8f5372010-05-07 15:10:16 -0400136 rdev->pm.dynpm_can_downclock = false;
Alex Deuchera48b9b42010-04-22 14:03:55 -0400137 break;
Alex Deucherce8f5372010-05-07 15:10:16 -0400138 case DYNPM_ACTION_DOWNCLOCK:
Alex Deuchera48b9b42010-04-22 14:03:55 -0400139 if (rdev->pm.current_power_state_index == min_power_state_index) {
140 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
Alex Deucherce8f5372010-05-07 15:10:16 -0400141 rdev->pm.dynpm_can_downclock = false;
Alex Deuchera48b9b42010-04-22 14:03:55 -0400142 } else {
143 if (rdev->pm.active_crtc_count > 1) {
144 for (i = 0; i < rdev->pm.num_power_states; i++) {
Alex Deucherd7311172010-05-03 01:13:14 -0400145 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
Alex Deuchera48b9b42010-04-22 14:03:55 -0400146 continue;
147 else if (i >= rdev->pm.current_power_state_index) {
148 rdev->pm.requested_power_state_index =
149 rdev->pm.current_power_state_index;
150 break;
151 } else {
152 rdev->pm.requested_power_state_index = i;
153 break;
154 }
155 }
Alex Deucher773c3fa2010-06-25 16:21:27 -0400156 } else {
157 if (rdev->pm.current_power_state_index == 0)
158 rdev->pm.requested_power_state_index =
159 rdev->pm.num_power_states - 1;
160 else
161 rdev->pm.requested_power_state_index =
162 rdev->pm.current_power_state_index - 1;
163 }
Alex Deuchera48b9b42010-04-22 14:03:55 -0400164 }
165 rdev->pm.requested_clock_mode_index = 0;
Alex Deucherd7311172010-05-03 01:13:14 -0400166 /* don't use the power state if crtcs are active and no display flag is set */
167 if ((rdev->pm.active_crtc_count > 0) &&
168 (rdev->pm.power_state[rdev->pm.requested_power_state_index].
169 clock_info[rdev->pm.requested_clock_mode_index].flags &
170 RADEON_PM_MODE_NO_DISPLAY)) {
171 rdev->pm.requested_power_state_index++;
172 }
Alex Deuchera48b9b42010-04-22 14:03:55 -0400173 break;
Alex Deucherce8f5372010-05-07 15:10:16 -0400174 case DYNPM_ACTION_UPCLOCK:
Alex Deuchera48b9b42010-04-22 14:03:55 -0400175 if (rdev->pm.current_power_state_index == (rdev->pm.num_power_states - 1)) {
176 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
Alex Deucherce8f5372010-05-07 15:10:16 -0400177 rdev->pm.dynpm_can_upclock = false;
Alex Deuchera48b9b42010-04-22 14:03:55 -0400178 } else {
179 if (rdev->pm.active_crtc_count > 1) {
180 for (i = (rdev->pm.num_power_states - 1); i >= 0; i--) {
Alex Deucherd7311172010-05-03 01:13:14 -0400181 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
Alex Deuchera48b9b42010-04-22 14:03:55 -0400182 continue;
183 else if (i <= rdev->pm.current_power_state_index) {
184 rdev->pm.requested_power_state_index =
185 rdev->pm.current_power_state_index;
186 break;
187 } else {
188 rdev->pm.requested_power_state_index = i;
189 break;
190 }
191 }
192 } else
193 rdev->pm.requested_power_state_index =
194 rdev->pm.current_power_state_index + 1;
195 }
196 rdev->pm.requested_clock_mode_index = 0;
197 break;
Alex Deucherce8f5372010-05-07 15:10:16 -0400198 case DYNPM_ACTION_DEFAULT:
Alex Deucher58e21df2010-03-22 13:31:08 -0400199 rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
200 rdev->pm.requested_clock_mode_index = 0;
Alex Deucherce8f5372010-05-07 15:10:16 -0400201 rdev->pm.dynpm_can_upclock = false;
Alex Deucher58e21df2010-03-22 13:31:08 -0400202 break;
Alex Deucherce8f5372010-05-07 15:10:16 -0400203 case DYNPM_ACTION_NONE:
Alex Deuchera48b9b42010-04-22 14:03:55 -0400204 default:
205 DRM_ERROR("Requested mode for not defined action\n");
206 return;
207 }
208 } else {
209 /* XXX select a power state based on AC/DC, single/dualhead, etc. */
210 /* for now just select the first power state and switch between clock modes */
211 /* power state array is low to high, default is first (0) */
212 if (rdev->pm.active_crtc_count > 1) {
213 rdev->pm.requested_power_state_index = -1;
214 /* start at 1 as we don't want the default mode */
215 for (i = 1; i < rdev->pm.num_power_states; i++) {
Alex Deucherd7311172010-05-03 01:13:14 -0400216 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
Alex Deuchera48b9b42010-04-22 14:03:55 -0400217 continue;
218 else if ((rdev->pm.power_state[i].type == POWER_STATE_TYPE_PERFORMANCE) ||
219 (rdev->pm.power_state[i].type == POWER_STATE_TYPE_BATTERY)) {
220 rdev->pm.requested_power_state_index = i;
221 break;
222 }
223 }
224 /* if nothing selected, grab the default state. */
225 if (rdev->pm.requested_power_state_index == -1)
226 rdev->pm.requested_power_state_index = 0;
227 } else
228 rdev->pm.requested_power_state_index = 1;
229
Alex Deucherce8f5372010-05-07 15:10:16 -0400230 switch (rdev->pm.dynpm_planned_action) {
231 case DYNPM_ACTION_MINIMUM:
Alex Deuchera48b9b42010-04-22 14:03:55 -0400232 rdev->pm.requested_clock_mode_index = 0;
Alex Deucherce8f5372010-05-07 15:10:16 -0400233 rdev->pm.dynpm_can_downclock = false;
Alex Deuchera48b9b42010-04-22 14:03:55 -0400234 break;
Alex Deucherce8f5372010-05-07 15:10:16 -0400235 case DYNPM_ACTION_DOWNCLOCK:
Alex Deuchera48b9b42010-04-22 14:03:55 -0400236 if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) {
237 if (rdev->pm.current_clock_mode_index == 0) {
238 rdev->pm.requested_clock_mode_index = 0;
Alex Deucherce8f5372010-05-07 15:10:16 -0400239 rdev->pm.dynpm_can_downclock = false;
Alex Deuchera48b9b42010-04-22 14:03:55 -0400240 } else
241 rdev->pm.requested_clock_mode_index =
242 rdev->pm.current_clock_mode_index - 1;
243 } else {
244 rdev->pm.requested_clock_mode_index = 0;
Alex Deucherce8f5372010-05-07 15:10:16 -0400245 rdev->pm.dynpm_can_downclock = false;
Alex Deuchera48b9b42010-04-22 14:03:55 -0400246 }
Alex Deucherd7311172010-05-03 01:13:14 -0400247 /* don't use the power state if crtcs are active and no display flag is set */
248 if ((rdev->pm.active_crtc_count > 0) &&
249 (rdev->pm.power_state[rdev->pm.requested_power_state_index].
250 clock_info[rdev->pm.requested_clock_mode_index].flags &
251 RADEON_PM_MODE_NO_DISPLAY)) {
252 rdev->pm.requested_clock_mode_index++;
253 }
Alex Deuchera48b9b42010-04-22 14:03:55 -0400254 break;
Alex Deucherce8f5372010-05-07 15:10:16 -0400255 case DYNPM_ACTION_UPCLOCK:
Alex Deuchera48b9b42010-04-22 14:03:55 -0400256 if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) {
257 if (rdev->pm.current_clock_mode_index ==
258 (rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1)) {
259 rdev->pm.requested_clock_mode_index = rdev->pm.current_clock_mode_index;
Alex Deucherce8f5372010-05-07 15:10:16 -0400260 rdev->pm.dynpm_can_upclock = false;
Alex Deuchera48b9b42010-04-22 14:03:55 -0400261 } else
262 rdev->pm.requested_clock_mode_index =
263 rdev->pm.current_clock_mode_index + 1;
264 } else {
265 rdev->pm.requested_clock_mode_index =
266 rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1;
Alex Deucherce8f5372010-05-07 15:10:16 -0400267 rdev->pm.dynpm_can_upclock = false;
Alex Deuchera48b9b42010-04-22 14:03:55 -0400268 }
269 break;
Alex Deucherce8f5372010-05-07 15:10:16 -0400270 case DYNPM_ACTION_DEFAULT:
Alex Deucher58e21df2010-03-22 13:31:08 -0400271 rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
272 rdev->pm.requested_clock_mode_index = 0;
Alex Deucherce8f5372010-05-07 15:10:16 -0400273 rdev->pm.dynpm_can_upclock = false;
Alex Deucher58e21df2010-03-22 13:31:08 -0400274 break;
Alex Deucherce8f5372010-05-07 15:10:16 -0400275 case DYNPM_ACTION_NONE:
Alex Deuchera48b9b42010-04-22 14:03:55 -0400276 default:
277 DRM_ERROR("Requested mode for not defined action\n");
278 return;
279 }
280 }
281
Dave Airlied9fdaaf2010-08-02 10:42:55 +1000282 DRM_DEBUG_DRIVER("Requested: e: %d m: %d p: %d\n",
Alex Deucherce8a3eb2010-05-07 16:58:27 -0400283 rdev->pm.power_state[rdev->pm.requested_power_state_index].
284 clock_info[rdev->pm.requested_clock_mode_index].sclk,
285 rdev->pm.power_state[rdev->pm.requested_power_state_index].
286 clock_info[rdev->pm.requested_clock_mode_index].mclk,
287 rdev->pm.power_state[rdev->pm.requested_power_state_index].
288 pcie_lanes);
Alex Deuchera48b9b42010-04-22 14:03:55 -0400289}
290
Alex Deucherce8f5372010-05-07 15:10:16 -0400291static int r600_pm_get_type_index(struct radeon_device *rdev,
292 enum radeon_pm_state_type ps_type,
293 int instance)
Alex Deucherbae6b5622010-04-22 13:38:05 -0400294{
Alex Deucherce8f5372010-05-07 15:10:16 -0400295 int i;
296 int found_instance = -1;
Alex Deuchera48b9b42010-04-22 14:03:55 -0400297
Alex Deucherce8f5372010-05-07 15:10:16 -0400298 for (i = 0; i < rdev->pm.num_power_states; i++) {
299 if (rdev->pm.power_state[i].type == ps_type) {
300 found_instance++;
301 if (found_instance == instance)
302 return i;
Alex Deuchera4248162010-04-24 14:50:23 -0400303 }
Alex Deucherce8f5372010-05-07 15:10:16 -0400304 }
305 /* return default if no match */
306 return rdev->pm.default_power_state_index;
307}
Alex Deucherbae6b5622010-04-22 13:38:05 -0400308
Alex Deucherce8f5372010-05-07 15:10:16 -0400309void rs780_pm_init_profile(struct radeon_device *rdev)
310{
311 if (rdev->pm.num_power_states == 2) {
312 /* default */
313 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
314 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
315 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
316 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
317 /* low sh */
318 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0;
319 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0;
320 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
321 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
Alex Deucherc9e75b22010-06-02 17:56:01 -0400322 /* mid sh */
323 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 0;
324 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 0;
325 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
326 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
Alex Deucherce8f5372010-05-07 15:10:16 -0400327 /* high sh */
328 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0;
329 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1;
330 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
331 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
332 /* low mh */
333 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0;
334 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0;
335 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
336 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
Alex Deucherc9e75b22010-06-02 17:56:01 -0400337 /* mid mh */
338 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 0;
339 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 0;
340 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
341 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
Alex Deucherce8f5372010-05-07 15:10:16 -0400342 /* high mh */
343 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0;
344 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 1;
345 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
346 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
347 } else if (rdev->pm.num_power_states == 3) {
348 /* default */
349 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
350 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
351 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
352 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
353 /* low sh */
354 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1;
355 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1;
356 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
357 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
Alex Deucherc9e75b22010-06-02 17:56:01 -0400358 /* mid sh */
359 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 1;
360 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1;
361 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
362 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
Alex Deucherce8f5372010-05-07 15:10:16 -0400363 /* high sh */
364 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1;
365 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 2;
366 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
367 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
368 /* low mh */
369 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 1;
370 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 1;
371 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
372 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
Alex Deucherc9e75b22010-06-02 17:56:01 -0400373 /* mid mh */
374 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 1;
375 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 1;
376 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
377 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
Alex Deucherce8f5372010-05-07 15:10:16 -0400378 /* high mh */
379 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 1;
380 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2;
381 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
382 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
383 } else {
384 /* default */
385 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
386 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
387 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
388 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
389 /* low sh */
390 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 2;
391 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 2;
392 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
393 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
Alex Deucherc9e75b22010-06-02 17:56:01 -0400394 /* mid sh */
395 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 2;
396 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 2;
397 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
398 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
Alex Deucherce8f5372010-05-07 15:10:16 -0400399 /* high sh */
400 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 2;
401 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 3;
402 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
403 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
404 /* low mh */
405 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2;
406 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0;
407 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
408 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
Alex Deucherc9e75b22010-06-02 17:56:01 -0400409 /* mid mh */
410 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 2;
411 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 0;
412 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
413 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
Alex Deucherce8f5372010-05-07 15:10:16 -0400414 /* high mh */
415 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2;
416 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 3;
417 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
418 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
419 }
420}
421
422void r600_pm_init_profile(struct radeon_device *rdev)
423{
424 if (rdev->family == CHIP_R600) {
425 /* XXX */
426 /* default */
427 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
428 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
429 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
Alex Deucher4bff5172010-05-17 19:41:26 -0400430 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
Alex Deucherce8f5372010-05-07 15:10:16 -0400431 /* low sh */
432 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
433 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
434 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
Alex Deucher4bff5172010-05-17 19:41:26 -0400435 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
Alex Deucherc9e75b22010-06-02 17:56:01 -0400436 /* mid sh */
437 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
438 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
439 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
440 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
Alex Deucherce8f5372010-05-07 15:10:16 -0400441 /* high sh */
442 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
443 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
444 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
Alex Deucher4bff5172010-05-17 19:41:26 -0400445 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
Alex Deucherce8f5372010-05-07 15:10:16 -0400446 /* low mh */
447 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
448 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
449 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
Alex Deucher4bff5172010-05-17 19:41:26 -0400450 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
Alex Deucherc9e75b22010-06-02 17:56:01 -0400451 /* mid mh */
452 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
453 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
454 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
455 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
Alex Deucherce8f5372010-05-07 15:10:16 -0400456 /* high mh */
457 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
458 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
459 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
Alex Deucher4bff5172010-05-17 19:41:26 -0400460 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
Alex Deucherce8f5372010-05-07 15:10:16 -0400461 } else {
462 if (rdev->pm.num_power_states < 4) {
463 /* default */
464 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
465 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
466 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
467 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
468 /* low sh */
Alex Deucherce8f5372010-05-07 15:10:16 -0400469 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1;
470 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1;
471 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
Alex Deucherc9e75b22010-06-02 17:56:01 -0400472 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
473 /* mid sh */
474 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 1;
475 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1;
476 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
477 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
Alex Deucherce8f5372010-05-07 15:10:16 -0400478 /* high sh */
479 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1;
480 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1;
481 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
482 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
483 /* low mh */
Alex Deucher4bff5172010-05-17 19:41:26 -0400484 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2;
485 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 2;
Alex Deucherce8f5372010-05-07 15:10:16 -0400486 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
Alex Deucherc9e75b22010-06-02 17:56:01 -0400487 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
488 /* low mh */
489 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 2;
490 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 2;
491 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
492 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
Alex Deucherce8f5372010-05-07 15:10:16 -0400493 /* high mh */
Alex Deucher4bff5172010-05-17 19:41:26 -0400494 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2;
495 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2;
496 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
497 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
498 } else {
499 /* default */
500 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
501 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
502 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
503 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
504 /* low sh */
505 if (rdev->flags & RADEON_IS_MOBILITY) {
506 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx =
507 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
508 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx =
509 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
510 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
Alex Deucherc9e75b22010-06-02 17:56:01 -0400511 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
Alex Deucher4bff5172010-05-17 19:41:26 -0400512 } else {
513 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx =
514 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
515 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx =
516 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
517 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
Alex Deucherc9e75b22010-06-02 17:56:01 -0400518 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
519 }
520 /* mid sh */
521 if (rdev->flags & RADEON_IS_MOBILITY) {
522 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx =
523 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
524 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx =
525 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
526 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
527 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
528 } else {
529 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx =
530 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
531 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx =
532 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
533 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
534 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
Alex Deucher4bff5172010-05-17 19:41:26 -0400535 }
536 /* high sh */
537 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx =
538 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
539 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx =
540 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
541 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
542 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
543 /* low mh */
544 if (rdev->flags & RADEON_IS_MOBILITY) {
545 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx =
546 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
547 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx =
548 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
549 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
Alex Deucherc9e75b22010-06-02 17:56:01 -0400550 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
Alex Deucher4bff5172010-05-17 19:41:26 -0400551 } else {
552 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx =
553 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
554 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx =
555 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
556 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
Alex Deucherc9e75b22010-06-02 17:56:01 -0400557 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
558 }
559 /* mid mh */
560 if (rdev->flags & RADEON_IS_MOBILITY) {
561 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx =
562 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
563 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx =
564 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
565 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
566 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
567 } else {
568 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx =
569 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
570 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx =
571 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
572 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
573 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
Alex Deucher4bff5172010-05-17 19:41:26 -0400574 }
575 /* high mh */
576 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx =
577 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
578 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx =
579 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
Alex Deucherce8f5372010-05-07 15:10:16 -0400580 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
581 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
582 }
583 }
Alex Deucherbae6b5622010-04-22 13:38:05 -0400584}
585
Alex Deucher49e02b72010-04-23 17:57:27 -0400586void r600_pm_misc(struct radeon_device *rdev)
587{
Rafał Miłeckia081a9d2010-06-07 18:20:25 -0400588 int req_ps_idx = rdev->pm.requested_power_state_index;
589 int req_cm_idx = rdev->pm.requested_clock_mode_index;
590 struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx];
591 struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage;
Alex Deucher7ac9aa52010-05-27 19:25:54 -0400592
Alex Deucher4d601732010-06-07 18:15:18 -0400593 if ((voltage->type == VOLTAGE_SW) && voltage->voltage) {
Alex Deuchera377e182011-06-20 13:00:31 -0400594 /* 0xff01 is a flag rather then an actual voltage */
595 if (voltage->voltage == 0xff01)
596 return;
Alex Deucher4d601732010-06-07 18:15:18 -0400597 if (voltage->voltage != rdev->pm.current_vddc) {
Alex Deucher8a83ec52011-04-12 14:49:23 -0400598 radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC);
Alex Deucher4d601732010-06-07 18:15:18 -0400599 rdev->pm.current_vddc = voltage->voltage;
Dave Airlied9fdaaf2010-08-02 10:42:55 +1000600 DRM_DEBUG_DRIVER("Setting: v: %d\n", voltage->voltage);
Alex Deucher4d601732010-06-07 18:15:18 -0400601 }
602 }
Alex Deucher49e02b72010-04-23 17:57:27 -0400603}
604
Alex Deucherdef9ba92010-04-22 12:39:58 -0400605bool r600_gui_idle(struct radeon_device *rdev)
606{
607 if (RREG32(GRBM_STATUS) & GUI_ACTIVE)
608 return false;
609 else
610 return true;
611}
612
Alex Deuchere0df1ac2009-12-04 15:12:21 -0500613/* hpd for digital panel detect/disconnect */
614bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
615{
616 bool connected = false;
617
618 if (ASIC_IS_DCE3(rdev)) {
619 switch (hpd) {
620 case RADEON_HPD_1:
621 if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE)
622 connected = true;
623 break;
624 case RADEON_HPD_2:
625 if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE)
626 connected = true;
627 break;
628 case RADEON_HPD_3:
629 if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE)
630 connected = true;
631 break;
632 case RADEON_HPD_4:
633 if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE)
634 connected = true;
635 break;
636 /* DCE 3.2 */
637 case RADEON_HPD_5:
638 if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE)
639 connected = true;
640 break;
641 case RADEON_HPD_6:
642 if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)
643 connected = true;
644 break;
645 default:
646 break;
647 }
648 } else {
649 switch (hpd) {
650 case RADEON_HPD_1:
651 if (RREG32(DC_HOT_PLUG_DETECT1_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
652 connected = true;
653 break;
654 case RADEON_HPD_2:
655 if (RREG32(DC_HOT_PLUG_DETECT2_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
656 connected = true;
657 break;
658 case RADEON_HPD_3:
659 if (RREG32(DC_HOT_PLUG_DETECT3_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
660 connected = true;
661 break;
662 default:
663 break;
664 }
665 }
666 return connected;
667}
668
669void r600_hpd_set_polarity(struct radeon_device *rdev,
Alex Deucher429770b2009-12-04 15:26:55 -0500670 enum radeon_hpd_id hpd)
Alex Deuchere0df1ac2009-12-04 15:12:21 -0500671{
672 u32 tmp;
673 bool connected = r600_hpd_sense(rdev, hpd);
674
675 if (ASIC_IS_DCE3(rdev)) {
676 switch (hpd) {
677 case RADEON_HPD_1:
678 tmp = RREG32(DC_HPD1_INT_CONTROL);
679 if (connected)
680 tmp &= ~DC_HPDx_INT_POLARITY;
681 else
682 tmp |= DC_HPDx_INT_POLARITY;
683 WREG32(DC_HPD1_INT_CONTROL, tmp);
684 break;
685 case RADEON_HPD_2:
686 tmp = RREG32(DC_HPD2_INT_CONTROL);
687 if (connected)
688 tmp &= ~DC_HPDx_INT_POLARITY;
689 else
690 tmp |= DC_HPDx_INT_POLARITY;
691 WREG32(DC_HPD2_INT_CONTROL, tmp);
692 break;
693 case RADEON_HPD_3:
694 tmp = RREG32(DC_HPD3_INT_CONTROL);
695 if (connected)
696 tmp &= ~DC_HPDx_INT_POLARITY;
697 else
698 tmp |= DC_HPDx_INT_POLARITY;
699 WREG32(DC_HPD3_INT_CONTROL, tmp);
700 break;
701 case RADEON_HPD_4:
702 tmp = RREG32(DC_HPD4_INT_CONTROL);
703 if (connected)
704 tmp &= ~DC_HPDx_INT_POLARITY;
705 else
706 tmp |= DC_HPDx_INT_POLARITY;
707 WREG32(DC_HPD4_INT_CONTROL, tmp);
708 break;
709 case RADEON_HPD_5:
710 tmp = RREG32(DC_HPD5_INT_CONTROL);
711 if (connected)
712 tmp &= ~DC_HPDx_INT_POLARITY;
713 else
714 tmp |= DC_HPDx_INT_POLARITY;
715 WREG32(DC_HPD5_INT_CONTROL, tmp);
716 break;
717 /* DCE 3.2 */
718 case RADEON_HPD_6:
719 tmp = RREG32(DC_HPD6_INT_CONTROL);
720 if (connected)
721 tmp &= ~DC_HPDx_INT_POLARITY;
722 else
723 tmp |= DC_HPDx_INT_POLARITY;
724 WREG32(DC_HPD6_INT_CONTROL, tmp);
725 break;
726 default:
727 break;
728 }
729 } else {
730 switch (hpd) {
731 case RADEON_HPD_1:
732 tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
733 if (connected)
734 tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
735 else
736 tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
737 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
738 break;
739 case RADEON_HPD_2:
740 tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
741 if (connected)
742 tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
743 else
744 tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
745 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
746 break;
747 case RADEON_HPD_3:
748 tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
749 if (connected)
750 tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
751 else
752 tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
753 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
754 break;
755 default:
756 break;
757 }
758 }
759}
760
761void r600_hpd_init(struct radeon_device *rdev)
762{
763 struct drm_device *dev = rdev->ddev;
764 struct drm_connector *connector;
765
766 if (ASIC_IS_DCE3(rdev)) {
767 u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) | DC_HPDx_RX_INT_TIMER(0xfa);
768 if (ASIC_IS_DCE32(rdev))
769 tmp |= DC_HPDx_EN;
770
771 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
772 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
773 switch (radeon_connector->hpd.hpd) {
774 case RADEON_HPD_1:
775 WREG32(DC_HPD1_CONTROL, tmp);
776 rdev->irq.hpd[0] = true;
777 break;
778 case RADEON_HPD_2:
779 WREG32(DC_HPD2_CONTROL, tmp);
780 rdev->irq.hpd[1] = true;
781 break;
782 case RADEON_HPD_3:
783 WREG32(DC_HPD3_CONTROL, tmp);
784 rdev->irq.hpd[2] = true;
785 break;
786 case RADEON_HPD_4:
787 WREG32(DC_HPD4_CONTROL, tmp);
788 rdev->irq.hpd[3] = true;
789 break;
790 /* DCE 3.2 */
791 case RADEON_HPD_5:
792 WREG32(DC_HPD5_CONTROL, tmp);
793 rdev->irq.hpd[4] = true;
794 break;
795 case RADEON_HPD_6:
796 WREG32(DC_HPD6_CONTROL, tmp);
797 rdev->irq.hpd[5] = true;
798 break;
799 default:
800 break;
801 }
802 }
803 } else {
804 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
805 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
806 switch (radeon_connector->hpd.hpd) {
807 case RADEON_HPD_1:
808 WREG32(DC_HOT_PLUG_DETECT1_CONTROL, DC_HOT_PLUG_DETECTx_EN);
809 rdev->irq.hpd[0] = true;
810 break;
811 case RADEON_HPD_2:
812 WREG32(DC_HOT_PLUG_DETECT2_CONTROL, DC_HOT_PLUG_DETECTx_EN);
813 rdev->irq.hpd[1] = true;
814 break;
815 case RADEON_HPD_3:
816 WREG32(DC_HOT_PLUG_DETECT3_CONTROL, DC_HOT_PLUG_DETECTx_EN);
817 rdev->irq.hpd[2] = true;
818 break;
819 default:
820 break;
821 }
822 }
823 }
Jerome Glisse003e69f2010-01-07 15:39:14 +0100824 if (rdev->irq.installed)
825 r600_irq_set(rdev);
Alex Deuchere0df1ac2009-12-04 15:12:21 -0500826}
827
828void r600_hpd_fini(struct radeon_device *rdev)
829{
830 struct drm_device *dev = rdev->ddev;
831 struct drm_connector *connector;
832
833 if (ASIC_IS_DCE3(rdev)) {
834 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
835 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
836 switch (radeon_connector->hpd.hpd) {
837 case RADEON_HPD_1:
838 WREG32(DC_HPD1_CONTROL, 0);
839 rdev->irq.hpd[0] = false;
840 break;
841 case RADEON_HPD_2:
842 WREG32(DC_HPD2_CONTROL, 0);
843 rdev->irq.hpd[1] = false;
844 break;
845 case RADEON_HPD_3:
846 WREG32(DC_HPD3_CONTROL, 0);
847 rdev->irq.hpd[2] = false;
848 break;
849 case RADEON_HPD_4:
850 WREG32(DC_HPD4_CONTROL, 0);
851 rdev->irq.hpd[3] = false;
852 break;
853 /* DCE 3.2 */
854 case RADEON_HPD_5:
855 WREG32(DC_HPD5_CONTROL, 0);
856 rdev->irq.hpd[4] = false;
857 break;
858 case RADEON_HPD_6:
859 WREG32(DC_HPD6_CONTROL, 0);
860 rdev->irq.hpd[5] = false;
861 break;
862 default:
863 break;
864 }
865 }
866 } else {
867 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
868 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
869 switch (radeon_connector->hpd.hpd) {
870 case RADEON_HPD_1:
871 WREG32(DC_HOT_PLUG_DETECT1_CONTROL, 0);
872 rdev->irq.hpd[0] = false;
873 break;
874 case RADEON_HPD_2:
875 WREG32(DC_HOT_PLUG_DETECT2_CONTROL, 0);
876 rdev->irq.hpd[1] = false;
877 break;
878 case RADEON_HPD_3:
879 WREG32(DC_HOT_PLUG_DETECT3_CONTROL, 0);
880 rdev->irq.hpd[2] = false;
881 break;
882 default:
883 break;
884 }
885 }
886 }
887}
888
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200889/*
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000890 * R600 PCIE GART
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200891 */
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000892void r600_pcie_gart_tlb_flush(struct radeon_device *rdev)
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200893{
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000894 unsigned i;
895 u32 tmp;
896
Dave Airlie2e98f102010-02-15 15:54:45 +1000897 /* flush hdp cache so updates hit vram */
Alex Deucherf3886f82010-12-08 10:05:34 -0500898 if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) &&
899 !(rdev->flags & RADEON_IS_AGP)) {
Alex Deucher812d0462010-07-26 18:51:53 -0400900 void __iomem *ptr = (void *)rdev->gart.table.vram.ptr;
901 u32 tmp;
902
903 /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read
904 * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL
Alex Deucherf3886f82010-12-08 10:05:34 -0500905 * This seems to cause problems on some AGP cards. Just use the old
906 * method for them.
Alex Deucher812d0462010-07-26 18:51:53 -0400907 */
908 WREG32(HDP_DEBUG1, 0);
909 tmp = readl((void __iomem *)ptr);
910 } else
911 WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
Dave Airlie2e98f102010-02-15 15:54:45 +1000912
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000913 WREG32(VM_CONTEXT0_INVALIDATION_LOW_ADDR, rdev->mc.gtt_start >> 12);
914 WREG32(VM_CONTEXT0_INVALIDATION_HIGH_ADDR, (rdev->mc.gtt_end - 1) >> 12);
915 WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1));
916 for (i = 0; i < rdev->usec_timeout; i++) {
917 /* read MC_STATUS */
918 tmp = RREG32(VM_CONTEXT0_REQUEST_RESPONSE);
919 tmp = (tmp & RESPONSE_TYPE_MASK) >> RESPONSE_TYPE_SHIFT;
920 if (tmp == 2) {
921 printk(KERN_WARNING "[drm] r600 flush TLB failed\n");
922 return;
923 }
924 if (tmp) {
925 return;
926 }
927 udelay(1);
928 }
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200929}
930
Jerome Glisse4aac0472009-09-14 18:29:49 +0200931int r600_pcie_gart_init(struct radeon_device *rdev)
932{
933 int r;
934
935 if (rdev->gart.table.vram.robj) {
Joe Perchesfce7d612010-10-30 21:08:30 +0000936 WARN(1, "R600 PCIE GART already initialized\n");
Jerome Glisse4aac0472009-09-14 18:29:49 +0200937 return 0;
938 }
939 /* Initialize common gart structure */
940 r = radeon_gart_init(rdev);
941 if (r)
942 return r;
943 rdev->gart.table_size = rdev->gart.num_gpu_pages * 8;
944 return radeon_gart_table_vram_alloc(rdev);
945}
946
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000947int r600_pcie_gart_enable(struct radeon_device *rdev)
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200948{
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000949 u32 tmp;
950 int r, i;
951
Jerome Glisse4aac0472009-09-14 18:29:49 +0200952 if (rdev->gart.table.vram.robj == NULL) {
953 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
954 return -EINVAL;
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000955 }
Jerome Glisse4aac0472009-09-14 18:29:49 +0200956 r = radeon_gart_table_vram_pin(rdev);
957 if (r)
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000958 return r;
Dave Airlie82568562010-02-05 16:00:07 +1000959 radeon_gart_restore(rdev);
Dave Airliebc1a6312009-09-15 11:07:52 +1000960
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000961 /* Setup L2 cache */
962 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
963 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
964 EFFECTIVE_L2_QUEUE_SIZE(7));
965 WREG32(VM_L2_CNTL2, 0);
966 WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
967 /* Setup TLB control */
968 tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
969 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
970 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
971 ENABLE_WAIT_L2_QUERY;
972 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
973 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
974 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
975 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
976 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
977 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
978 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
979 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
980 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
981 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
982 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
983 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
984 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
985 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
986 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
Jerome Glisse1a029b72009-10-06 19:04:30 +0200987 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000988 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
989 WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
990 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
991 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
992 (u32)(rdev->dummy_page.addr >> 12));
993 for (i = 1; i < 7; i++)
994 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
995
996 r600_pcie_gart_tlb_flush(rdev);
Tormod Voldenfcf4de52011-08-31 21:54:07 +0000997 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
998 (unsigned)(rdev->mc.gtt_size >> 20),
999 (unsigned long long)rdev->gart.table_addr);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001000 rdev->gart.ready = true;
1001 return 0;
1002}
1003
1004void r600_pcie_gart_disable(struct radeon_device *rdev)
1005{
1006 u32 tmp;
Jerome Glisse4c788672009-11-20 14:29:23 +01001007 int i, r;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001008
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001009 /* Disable all tables */
1010 for (i = 0; i < 7; i++)
1011 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
1012
1013 /* Disable L2 cache */
1014 WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING |
1015 EFFECTIVE_L2_QUEUE_SIZE(7));
1016 WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
1017 /* Setup L1 TLB control */
1018 tmp = EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
1019 ENABLE_WAIT_L2_QUERY;
1020 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
1021 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
1022 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
1023 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
1024 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
1025 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
1026 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
1027 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
1028 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp);
1029 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp);
1030 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
1031 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
1032 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp);
1033 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
Jerome Glisse4aac0472009-09-14 18:29:49 +02001034 if (rdev->gart.table.vram.robj) {
Jerome Glisse4c788672009-11-20 14:29:23 +01001035 r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
1036 if (likely(r == 0)) {
1037 radeon_bo_kunmap(rdev->gart.table.vram.robj);
1038 radeon_bo_unpin(rdev->gart.table.vram.robj);
1039 radeon_bo_unreserve(rdev->gart.table.vram.robj);
1040 }
Jerome Glisse4aac0472009-09-14 18:29:49 +02001041 }
1042}
1043
1044void r600_pcie_gart_fini(struct radeon_device *rdev)
1045{
Jerome Glissef9274562010-03-17 14:44:29 +00001046 radeon_gart_fini(rdev);
Jerome Glisse4aac0472009-09-14 18:29:49 +02001047 r600_pcie_gart_disable(rdev);
1048 radeon_gart_table_vram_free(rdev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001049}
1050
Jerome Glisse1a029b72009-10-06 19:04:30 +02001051void r600_agp_enable(struct radeon_device *rdev)
1052{
1053 u32 tmp;
1054 int i;
1055
1056 /* Setup L2 cache */
1057 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
1058 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
1059 EFFECTIVE_L2_QUEUE_SIZE(7));
1060 WREG32(VM_L2_CNTL2, 0);
1061 WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
1062 /* Setup TLB control */
1063 tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
1064 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
1065 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
1066 ENABLE_WAIT_L2_QUERY;
1067 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
1068 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
1069 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
1070 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
1071 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
1072 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
1073 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
1074 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
1075 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
1076 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
1077 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
1078 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
1079 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
1080 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
1081 for (i = 0; i < 7; i++)
1082 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
1083}
1084
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001085int r600_mc_wait_for_idle(struct radeon_device *rdev)
1086{
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001087 unsigned i;
1088 u32 tmp;
1089
1090 for (i = 0; i < rdev->usec_timeout; i++) {
1091 /* read MC_STATUS */
1092 tmp = RREG32(R_000E50_SRBM_STATUS) & 0x3F00;
1093 if (!tmp)
1094 return 0;
1095 udelay(1);
1096 }
1097 return -1;
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001098}
1099
Jerome Glissea3c19452009-10-01 18:02:13 +02001100static void r600_mc_program(struct radeon_device *rdev)
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001101{
Jerome Glissea3c19452009-10-01 18:02:13 +02001102 struct rv515_mc_save save;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001103 u32 tmp;
1104 int i, j;
1105
1106 /* Initialize HDP */
1107 for (i = 0, j = 0; i < 32; i++, j += 0x18) {
1108 WREG32((0x2c14 + j), 0x00000000);
1109 WREG32((0x2c18 + j), 0x00000000);
1110 WREG32((0x2c1c + j), 0x00000000);
1111 WREG32((0x2c20 + j), 0x00000000);
1112 WREG32((0x2c24 + j), 0x00000000);
1113 }
1114 WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
1115
Jerome Glissea3c19452009-10-01 18:02:13 +02001116 rv515_mc_stop(rdev, &save);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001117 if (r600_mc_wait_for_idle(rdev)) {
Jerome Glissea3c19452009-10-01 18:02:13 +02001118 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001119 }
Jerome Glissea3c19452009-10-01 18:02:13 +02001120 /* Lockout access through VGA aperture (doesn't exist before R600) */
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001121 WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001122 /* Update configuration */
Jerome Glisse1a029b72009-10-06 19:04:30 +02001123 if (rdev->flags & RADEON_IS_AGP) {
1124 if (rdev->mc.vram_start < rdev->mc.gtt_start) {
1125 /* VRAM before AGP */
1126 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
1127 rdev->mc.vram_start >> 12);
1128 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
1129 rdev->mc.gtt_end >> 12);
1130 } else {
1131 /* VRAM after AGP */
1132 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
1133 rdev->mc.gtt_start >> 12);
1134 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
1135 rdev->mc.vram_end >> 12);
1136 }
1137 } else {
1138 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start >> 12);
1139 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end >> 12);
1140 }
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001141 WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0);
Jerome Glisse1a029b72009-10-06 19:04:30 +02001142 tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001143 tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
1144 WREG32(MC_VM_FB_LOCATION, tmp);
1145 WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
1146 WREG32(HDP_NONSURFACE_INFO, (2 << 7));
Jerome Glisse46fcd2b2010-06-03 19:34:48 +02001147 WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001148 if (rdev->flags & RADEON_IS_AGP) {
Jerome Glisse1a029b72009-10-06 19:04:30 +02001149 WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 22);
1150 WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 22);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001151 WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22);
1152 } else {
1153 WREG32(MC_VM_AGP_BASE, 0);
1154 WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
1155 WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
1156 }
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001157 if (r600_mc_wait_for_idle(rdev)) {
Jerome Glissea3c19452009-10-01 18:02:13 +02001158 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001159 }
Jerome Glissea3c19452009-10-01 18:02:13 +02001160 rv515_mc_resume(rdev, &save);
Dave Airlie698443d2009-09-18 14:16:38 +10001161 /* we need to own VRAM, so turn off the VGA renderer here
1162 * to stop it overwriting our objects */
Jerome Glissed39c3b82009-09-28 18:34:43 +02001163 rv515_vga_render_disable(rdev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001164}
1165
Jerome Glissed594e462010-02-17 21:54:29 +00001166/**
1167 * r600_vram_gtt_location - try to find VRAM & GTT location
1168 * @rdev: radeon device structure holding all necessary informations
1169 * @mc: memory controller structure holding memory informations
1170 *
1171 * Function will place try to place VRAM at same place as in CPU (PCI)
1172 * address space as some GPU seems to have issue when we reprogram at
1173 * different address space.
1174 *
1175 * If there is not enough space to fit the unvisible VRAM after the
1176 * aperture then we limit the VRAM size to the aperture.
1177 *
1178 * If we are using AGP then place VRAM adjacent to AGP aperture are we need
1179 * them to be in one from GPU point of view so that we can program GPU to
1180 * catch access outside them (weird GPU policy see ??).
1181 *
1182 * This function will never fails, worst case are limiting VRAM or GTT.
1183 *
1184 * Note: GTT start, end, size should be initialized before calling this
1185 * function on AGP platform.
1186 */
Alex Deucher0ef0c1f2010-11-22 17:56:26 -05001187static void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
Jerome Glissed594e462010-02-17 21:54:29 +00001188{
1189 u64 size_bf, size_af;
1190
1191 if (mc->mc_vram_size > 0xE0000000) {
1192 /* leave room for at least 512M GTT */
1193 dev_warn(rdev->dev, "limiting VRAM\n");
1194 mc->real_vram_size = 0xE0000000;
1195 mc->mc_vram_size = 0xE0000000;
1196 }
1197 if (rdev->flags & RADEON_IS_AGP) {
1198 size_bf = mc->gtt_start;
1199 size_af = 0xFFFFFFFF - mc->gtt_end + 1;
1200 if (size_bf > size_af) {
1201 if (mc->mc_vram_size > size_bf) {
1202 dev_warn(rdev->dev, "limiting VRAM\n");
1203 mc->real_vram_size = size_bf;
1204 mc->mc_vram_size = size_bf;
1205 }
1206 mc->vram_start = mc->gtt_start - mc->mc_vram_size;
1207 } else {
1208 if (mc->mc_vram_size > size_af) {
1209 dev_warn(rdev->dev, "limiting VRAM\n");
1210 mc->real_vram_size = size_af;
1211 mc->mc_vram_size = size_af;
1212 }
1213 mc->vram_start = mc->gtt_end;
1214 }
1215 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
1216 dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n",
1217 mc->mc_vram_size >> 20, mc->vram_start,
1218 mc->vram_end, mc->real_vram_size >> 20);
1219 } else {
1220 u64 base = 0;
Alex Deucher8961d522010-12-03 14:37:22 -05001221 if (rdev->flags & RADEON_IS_IGP) {
1222 base = RREG32(MC_VM_FB_LOCATION) & 0xFFFF;
1223 base <<= 24;
1224 }
Jerome Glissed594e462010-02-17 21:54:29 +00001225 radeon_vram_location(rdev, &rdev->mc, base);
Alex Deucher8d369bb2010-07-15 10:51:10 -04001226 rdev->mc.gtt_base_align = 0;
Jerome Glissed594e462010-02-17 21:54:29 +00001227 radeon_gtt_location(rdev, mc);
1228 }
1229}
1230
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001231int r600_mc_init(struct radeon_device *rdev)
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001232{
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001233 u32 tmp;
Alex Deucher5885b7a2009-10-19 17:23:33 -04001234 int chansize, numchan;
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001235
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001236 /* Get VRAM informations */
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001237 rdev->mc.vram_is_ddr = true;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001238 tmp = RREG32(RAMCFG);
1239 if (tmp & CHANSIZE_OVERRIDE) {
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001240 chansize = 16;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001241 } else if (tmp & CHANSIZE_MASK) {
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001242 chansize = 64;
1243 } else {
1244 chansize = 32;
1245 }
Alex Deucher5885b7a2009-10-19 17:23:33 -04001246 tmp = RREG32(CHMAP);
1247 switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
1248 case 0:
1249 default:
1250 numchan = 1;
1251 break;
1252 case 1:
1253 numchan = 2;
1254 break;
1255 case 2:
1256 numchan = 4;
1257 break;
1258 case 3:
1259 numchan = 8;
1260 break;
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001261 }
Alex Deucher5885b7a2009-10-19 17:23:33 -04001262 rdev->mc.vram_width = numchan * chansize;
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001263 /* Could aper size report 0 ? */
Jordan Crouse01d73a62010-05-27 13:40:24 -06001264 rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
1265 rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001266 /* Setup GPU memory space */
1267 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
1268 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
Jerome Glisse51e5fcd2010-02-19 14:33:54 +00001269 rdev->mc.visible_vram_size = rdev->mc.aper_size;
Jerome Glissed594e462010-02-17 21:54:29 +00001270 r600_vram_gtt_location(rdev, &rdev->mc);
Alex Deucherf47299c2010-03-16 20:54:38 -04001271
Alex Deucherf8920342010-06-30 12:02:03 -04001272 if (rdev->flags & RADEON_IS_IGP) {
1273 rs690_pm_info(rdev);
Alex Deucher06b64762010-01-05 11:27:29 -05001274 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
Alex Deucherf8920342010-06-30 12:02:03 -04001275 }
Alex Deucherf47299c2010-03-16 20:54:38 -04001276 radeon_update_bandwidth_info(rdev);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001277 return 0;
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001278}
1279
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001280/* We doesn't check that the GPU really needs a reset we simply do the
1281 * reset, it's up to the caller to determine if the GPU needs one. We
1282 * might add an helper function to check that.
1283 */
1284int r600_gpu_soft_reset(struct radeon_device *rdev)
1285{
Jerome Glissea3c19452009-10-01 18:02:13 +02001286 struct rv515_mc_save save;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001287 u32 grbm_busy_mask = S_008010_VC_BUSY(1) | S_008010_VGT_BUSY_NO_DMA(1) |
1288 S_008010_VGT_BUSY(1) | S_008010_TA03_BUSY(1) |
1289 S_008010_TC_BUSY(1) | S_008010_SX_BUSY(1) |
1290 S_008010_SH_BUSY(1) | S_008010_SPI03_BUSY(1) |
1291 S_008010_SMX_BUSY(1) | S_008010_SC_BUSY(1) |
1292 S_008010_PA_BUSY(1) | S_008010_DB03_BUSY(1) |
1293 S_008010_CR_BUSY(1) | S_008010_CB03_BUSY(1) |
1294 S_008010_GUI_ACTIVE(1);
1295 u32 grbm2_busy_mask = S_008014_SPI0_BUSY(1) | S_008014_SPI1_BUSY(1) |
1296 S_008014_SPI2_BUSY(1) | S_008014_SPI3_BUSY(1) |
1297 S_008014_TA0_BUSY(1) | S_008014_TA1_BUSY(1) |
1298 S_008014_TA2_BUSY(1) | S_008014_TA3_BUSY(1) |
1299 S_008014_DB0_BUSY(1) | S_008014_DB1_BUSY(1) |
1300 S_008014_DB2_BUSY(1) | S_008014_DB3_BUSY(1) |
1301 S_008014_CB0_BUSY(1) | S_008014_CB1_BUSY(1) |
1302 S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1);
Jerome Glissea3c19452009-10-01 18:02:13 +02001303 u32 tmp;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001304
Alex Deucher8d96fe92011-01-21 15:38:22 +00001305 if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
1306 return 0;
1307
Jerome Glisse1a029b72009-10-06 19:04:30 +02001308 dev_info(rdev->dev, "GPU softreset \n");
1309 dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n",
1310 RREG32(R_008010_GRBM_STATUS));
1311 dev_info(rdev->dev, " R_008014_GRBM_STATUS2=0x%08X\n",
Jerome Glissea3c19452009-10-01 18:02:13 +02001312 RREG32(R_008014_GRBM_STATUS2));
Jerome Glisse1a029b72009-10-06 19:04:30 +02001313 dev_info(rdev->dev, " R_000E50_SRBM_STATUS=0x%08X\n",
1314 RREG32(R_000E50_SRBM_STATUS));
Jerome Glissea3c19452009-10-01 18:02:13 +02001315 rv515_mc_stop(rdev, &save);
1316 if (r600_mc_wait_for_idle(rdev)) {
1317 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1318 }
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001319 /* Disable CP parsing/prefetching */
Jerome Glisse90aca4d2010-03-09 14:45:12 +00001320 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001321 /* Check if any of the rendering block is busy and reset it */
1322 if ((RREG32(R_008010_GRBM_STATUS) & grbm_busy_mask) ||
1323 (RREG32(R_008014_GRBM_STATUS2) & grbm2_busy_mask)) {
Jerome Glissea3c19452009-10-01 18:02:13 +02001324 tmp = S_008020_SOFT_RESET_CR(1) |
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001325 S_008020_SOFT_RESET_DB(1) |
1326 S_008020_SOFT_RESET_CB(1) |
1327 S_008020_SOFT_RESET_PA(1) |
1328 S_008020_SOFT_RESET_SC(1) |
1329 S_008020_SOFT_RESET_SMX(1) |
1330 S_008020_SOFT_RESET_SPI(1) |
1331 S_008020_SOFT_RESET_SX(1) |
1332 S_008020_SOFT_RESET_SH(1) |
1333 S_008020_SOFT_RESET_TC(1) |
1334 S_008020_SOFT_RESET_TA(1) |
1335 S_008020_SOFT_RESET_VC(1) |
Jerome Glissea3c19452009-10-01 18:02:13 +02001336 S_008020_SOFT_RESET_VGT(1);
Jerome Glisse1a029b72009-10-06 19:04:30 +02001337 dev_info(rdev->dev, " R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
Jerome Glissea3c19452009-10-01 18:02:13 +02001338 WREG32(R_008020_GRBM_SOFT_RESET, tmp);
Jerome Glisse90aca4d2010-03-09 14:45:12 +00001339 RREG32(R_008020_GRBM_SOFT_RESET);
1340 mdelay(15);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001341 WREG32(R_008020_GRBM_SOFT_RESET, 0);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001342 }
1343 /* Reset CP (we always reset CP) */
Jerome Glissea3c19452009-10-01 18:02:13 +02001344 tmp = S_008020_SOFT_RESET_CP(1);
1345 dev_info(rdev->dev, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
1346 WREG32(R_008020_GRBM_SOFT_RESET, tmp);
Jerome Glisse90aca4d2010-03-09 14:45:12 +00001347 RREG32(R_008020_GRBM_SOFT_RESET);
1348 mdelay(15);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001349 WREG32(R_008020_GRBM_SOFT_RESET, 0);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001350 /* Wait a little for things to settle down */
Jerome Glisse225758d2010-03-09 14:45:10 +00001351 mdelay(1);
Jerome Glisse1a029b72009-10-06 19:04:30 +02001352 dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n",
1353 RREG32(R_008010_GRBM_STATUS));
1354 dev_info(rdev->dev, " R_008014_GRBM_STATUS2=0x%08X\n",
1355 RREG32(R_008014_GRBM_STATUS2));
1356 dev_info(rdev->dev, " R_000E50_SRBM_STATUS=0x%08X\n",
1357 RREG32(R_000E50_SRBM_STATUS));
Jerome Glissea3c19452009-10-01 18:02:13 +02001358 rv515_mc_resume(rdev, &save);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001359 return 0;
1360}
1361
Jerome Glisse225758d2010-03-09 14:45:10 +00001362bool r600_gpu_is_lockup(struct radeon_device *rdev)
1363{
1364 u32 srbm_status;
1365 u32 grbm_status;
1366 u32 grbm_status2;
Alex Deuchere29ff722010-12-21 16:05:38 -05001367 struct r100_gpu_lockup *lockup;
Jerome Glisse225758d2010-03-09 14:45:10 +00001368 int r;
1369
Alex Deuchere29ff722010-12-21 16:05:38 -05001370 if (rdev->family >= CHIP_RV770)
1371 lockup = &rdev->config.rv770.lockup;
1372 else
1373 lockup = &rdev->config.r600.lockup;
1374
Jerome Glisse225758d2010-03-09 14:45:10 +00001375 srbm_status = RREG32(R_000E50_SRBM_STATUS);
1376 grbm_status = RREG32(R_008010_GRBM_STATUS);
1377 grbm_status2 = RREG32(R_008014_GRBM_STATUS2);
1378 if (!G_008010_GUI_ACTIVE(grbm_status)) {
Alex Deuchere29ff722010-12-21 16:05:38 -05001379 r100_gpu_lockup_update(lockup, &rdev->cp);
Jerome Glisse225758d2010-03-09 14:45:10 +00001380 return false;
1381 }
1382 /* force CP activities */
1383 r = radeon_ring_lock(rdev, 2);
1384 if (!r) {
1385 /* PACKET2 NOP */
1386 radeon_ring_write(rdev, 0x80000000);
1387 radeon_ring_write(rdev, 0x80000000);
1388 radeon_ring_unlock_commit(rdev);
1389 }
1390 rdev->cp.rptr = RREG32(R600_CP_RB_RPTR);
Alex Deuchere29ff722010-12-21 16:05:38 -05001391 return r100_gpu_cp_is_lockup(rdev, lockup, &rdev->cp);
Jerome Glisse225758d2010-03-09 14:45:10 +00001392}
1393
Jerome Glissea2d07b72010-03-09 14:45:11 +00001394int r600_asic_reset(struct radeon_device *rdev)
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001395{
1396 return r600_gpu_soft_reset(rdev);
1397}
1398
1399static u32 r600_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
1400 u32 num_backends,
1401 u32 backend_disable_mask)
1402{
1403 u32 backend_map = 0;
1404 u32 enabled_backends_mask;
1405 u32 enabled_backends_count;
1406 u32 cur_pipe;
1407 u32 swizzle_pipe[R6XX_MAX_PIPES];
1408 u32 cur_backend;
1409 u32 i;
1410
1411 if (num_tile_pipes > R6XX_MAX_PIPES)
1412 num_tile_pipes = R6XX_MAX_PIPES;
1413 if (num_tile_pipes < 1)
1414 num_tile_pipes = 1;
1415 if (num_backends > R6XX_MAX_BACKENDS)
1416 num_backends = R6XX_MAX_BACKENDS;
1417 if (num_backends < 1)
1418 num_backends = 1;
1419
1420 enabled_backends_mask = 0;
1421 enabled_backends_count = 0;
1422 for (i = 0; i < R6XX_MAX_BACKENDS; ++i) {
1423 if (((backend_disable_mask >> i) & 1) == 0) {
1424 enabled_backends_mask |= (1 << i);
1425 ++enabled_backends_count;
1426 }
1427 if (enabled_backends_count == num_backends)
1428 break;
1429 }
1430
1431 if (enabled_backends_count == 0) {
1432 enabled_backends_mask = 1;
1433 enabled_backends_count = 1;
1434 }
1435
1436 if (enabled_backends_count != num_backends)
1437 num_backends = enabled_backends_count;
1438
1439 memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * R6XX_MAX_PIPES);
1440 switch (num_tile_pipes) {
1441 case 1:
1442 swizzle_pipe[0] = 0;
1443 break;
1444 case 2:
1445 swizzle_pipe[0] = 0;
1446 swizzle_pipe[1] = 1;
1447 break;
1448 case 3:
1449 swizzle_pipe[0] = 0;
1450 swizzle_pipe[1] = 1;
1451 swizzle_pipe[2] = 2;
1452 break;
1453 case 4:
1454 swizzle_pipe[0] = 0;
1455 swizzle_pipe[1] = 1;
1456 swizzle_pipe[2] = 2;
1457 swizzle_pipe[3] = 3;
1458 break;
1459 case 5:
1460 swizzle_pipe[0] = 0;
1461 swizzle_pipe[1] = 1;
1462 swizzle_pipe[2] = 2;
1463 swizzle_pipe[3] = 3;
1464 swizzle_pipe[4] = 4;
1465 break;
1466 case 6:
1467 swizzle_pipe[0] = 0;
1468 swizzle_pipe[1] = 2;
1469 swizzle_pipe[2] = 4;
1470 swizzle_pipe[3] = 5;
1471 swizzle_pipe[4] = 1;
1472 swizzle_pipe[5] = 3;
1473 break;
1474 case 7:
1475 swizzle_pipe[0] = 0;
1476 swizzle_pipe[1] = 2;
1477 swizzle_pipe[2] = 4;
1478 swizzle_pipe[3] = 6;
1479 swizzle_pipe[4] = 1;
1480 swizzle_pipe[5] = 3;
1481 swizzle_pipe[6] = 5;
1482 break;
1483 case 8:
1484 swizzle_pipe[0] = 0;
1485 swizzle_pipe[1] = 2;
1486 swizzle_pipe[2] = 4;
1487 swizzle_pipe[3] = 6;
1488 swizzle_pipe[4] = 1;
1489 swizzle_pipe[5] = 3;
1490 swizzle_pipe[6] = 5;
1491 swizzle_pipe[7] = 7;
1492 break;
1493 }
1494
1495 cur_backend = 0;
1496 for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
1497 while (((1 << cur_backend) & enabled_backends_mask) == 0)
1498 cur_backend = (cur_backend + 1) % R6XX_MAX_BACKENDS;
1499
1500 backend_map |= (u32)(((cur_backend & 3) << (swizzle_pipe[cur_pipe] * 2)));
1501
1502 cur_backend = (cur_backend + 1) % R6XX_MAX_BACKENDS;
1503 }
1504
1505 return backend_map;
1506}
1507
1508int r600_count_pipe_bits(uint32_t val)
1509{
1510 int i, ret = 0;
1511
1512 for (i = 0; i < 32; i++) {
1513 ret += val & 1;
1514 val >>= 1;
1515 }
1516 return ret;
1517}
1518
1519void r600_gpu_init(struct radeon_device *rdev)
1520{
1521 u32 tiling_config;
1522 u32 ramcfg;
Alex Deucherd03f5d52010-02-19 16:22:31 -05001523 u32 backend_map;
1524 u32 cc_rb_backend_disable;
1525 u32 cc_gc_shader_pipe_config;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001526 u32 tmp;
1527 int i, j;
1528 u32 sq_config;
1529 u32 sq_gpr_resource_mgmt_1 = 0;
1530 u32 sq_gpr_resource_mgmt_2 = 0;
1531 u32 sq_thread_resource_mgmt = 0;
1532 u32 sq_stack_resource_mgmt_1 = 0;
1533 u32 sq_stack_resource_mgmt_2 = 0;
1534
1535 /* FIXME: implement */
1536 switch (rdev->family) {
1537 case CHIP_R600:
1538 rdev->config.r600.max_pipes = 4;
1539 rdev->config.r600.max_tile_pipes = 8;
1540 rdev->config.r600.max_simds = 4;
1541 rdev->config.r600.max_backends = 4;
1542 rdev->config.r600.max_gprs = 256;
1543 rdev->config.r600.max_threads = 192;
1544 rdev->config.r600.max_stack_entries = 256;
1545 rdev->config.r600.max_hw_contexts = 8;
1546 rdev->config.r600.max_gs_threads = 16;
1547 rdev->config.r600.sx_max_export_size = 128;
1548 rdev->config.r600.sx_max_export_pos_size = 16;
1549 rdev->config.r600.sx_max_export_smx_size = 128;
1550 rdev->config.r600.sq_num_cf_insts = 2;
1551 break;
1552 case CHIP_RV630:
1553 case CHIP_RV635:
1554 rdev->config.r600.max_pipes = 2;
1555 rdev->config.r600.max_tile_pipes = 2;
1556 rdev->config.r600.max_simds = 3;
1557 rdev->config.r600.max_backends = 1;
1558 rdev->config.r600.max_gprs = 128;
1559 rdev->config.r600.max_threads = 192;
1560 rdev->config.r600.max_stack_entries = 128;
1561 rdev->config.r600.max_hw_contexts = 8;
1562 rdev->config.r600.max_gs_threads = 4;
1563 rdev->config.r600.sx_max_export_size = 128;
1564 rdev->config.r600.sx_max_export_pos_size = 16;
1565 rdev->config.r600.sx_max_export_smx_size = 128;
1566 rdev->config.r600.sq_num_cf_insts = 2;
1567 break;
1568 case CHIP_RV610:
1569 case CHIP_RV620:
1570 case CHIP_RS780:
1571 case CHIP_RS880:
1572 rdev->config.r600.max_pipes = 1;
1573 rdev->config.r600.max_tile_pipes = 1;
1574 rdev->config.r600.max_simds = 2;
1575 rdev->config.r600.max_backends = 1;
1576 rdev->config.r600.max_gprs = 128;
1577 rdev->config.r600.max_threads = 192;
1578 rdev->config.r600.max_stack_entries = 128;
1579 rdev->config.r600.max_hw_contexts = 4;
1580 rdev->config.r600.max_gs_threads = 4;
1581 rdev->config.r600.sx_max_export_size = 128;
1582 rdev->config.r600.sx_max_export_pos_size = 16;
1583 rdev->config.r600.sx_max_export_smx_size = 128;
1584 rdev->config.r600.sq_num_cf_insts = 1;
1585 break;
1586 case CHIP_RV670:
1587 rdev->config.r600.max_pipes = 4;
1588 rdev->config.r600.max_tile_pipes = 4;
1589 rdev->config.r600.max_simds = 4;
1590 rdev->config.r600.max_backends = 4;
1591 rdev->config.r600.max_gprs = 192;
1592 rdev->config.r600.max_threads = 192;
1593 rdev->config.r600.max_stack_entries = 256;
1594 rdev->config.r600.max_hw_contexts = 8;
1595 rdev->config.r600.max_gs_threads = 16;
1596 rdev->config.r600.sx_max_export_size = 128;
1597 rdev->config.r600.sx_max_export_pos_size = 16;
1598 rdev->config.r600.sx_max_export_smx_size = 128;
1599 rdev->config.r600.sq_num_cf_insts = 2;
1600 break;
1601 default:
1602 break;
1603 }
1604
1605 /* Initialize HDP */
1606 for (i = 0, j = 0; i < 32; i++, j += 0x18) {
1607 WREG32((0x2c14 + j), 0x00000000);
1608 WREG32((0x2c18 + j), 0x00000000);
1609 WREG32((0x2c1c + j), 0x00000000);
1610 WREG32((0x2c20 + j), 0x00000000);
1611 WREG32((0x2c24 + j), 0x00000000);
1612 }
1613
1614 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
1615
1616 /* Setup tiling */
1617 tiling_config = 0;
1618 ramcfg = RREG32(RAMCFG);
1619 switch (rdev->config.r600.max_tile_pipes) {
1620 case 1:
1621 tiling_config |= PIPE_TILING(0);
1622 break;
1623 case 2:
1624 tiling_config |= PIPE_TILING(1);
1625 break;
1626 case 4:
1627 tiling_config |= PIPE_TILING(2);
1628 break;
1629 case 8:
1630 tiling_config |= PIPE_TILING(3);
1631 break;
1632 default:
1633 break;
1634 }
Alex Deucherd03f5d52010-02-19 16:22:31 -05001635 rdev->config.r600.tiling_npipes = rdev->config.r600.max_tile_pipes;
Jerome Glisse961fb592010-02-10 22:30:05 +00001636 rdev->config.r600.tiling_nbanks = 4 << ((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001637 tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
Alex Deucher881fe6c2010-10-18 23:54:56 -04001638 tiling_config |= GROUP_SIZE((ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT);
1639 if ((ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT)
1640 rdev->config.r600.tiling_group_size = 512;
1641 else
1642 rdev->config.r600.tiling_group_size = 256;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001643 tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
1644 if (tmp > 3) {
1645 tiling_config |= ROW_TILING(3);
1646 tiling_config |= SAMPLE_SPLIT(3);
1647 } else {
1648 tiling_config |= ROW_TILING(tmp);
1649 tiling_config |= SAMPLE_SPLIT(tmp);
1650 }
1651 tiling_config |= BANK_SWAPS(1);
Alex Deucherd03f5d52010-02-19 16:22:31 -05001652
1653 cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
1654 cc_rb_backend_disable |=
1655 BACKEND_DISABLE((R6XX_MAX_BACKENDS_MASK << rdev->config.r600.max_backends) & R6XX_MAX_BACKENDS_MASK);
1656
1657 cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
1658 cc_gc_shader_pipe_config |=
1659 INACTIVE_QD_PIPES((R6XX_MAX_PIPES_MASK << rdev->config.r600.max_pipes) & R6XX_MAX_PIPES_MASK);
1660 cc_gc_shader_pipe_config |=
1661 INACTIVE_SIMDS((R6XX_MAX_SIMDS_MASK << rdev->config.r600.max_simds) & R6XX_MAX_SIMDS_MASK);
1662
1663 backend_map = r600_get_tile_pipe_to_backend_map(rdev->config.r600.max_tile_pipes,
1664 (R6XX_MAX_BACKENDS -
1665 r600_count_pipe_bits((cc_rb_backend_disable &
1666 R6XX_MAX_BACKENDS_MASK) >> 16)),
1667 (cc_rb_backend_disable >> 16));
Alex Deuchere7aeeba2010-06-04 13:10:12 -04001668 rdev->config.r600.tile_config = tiling_config;
Alex Deuchere55b9422011-07-15 19:53:52 +00001669 rdev->config.r600.backend_map = backend_map;
Alex Deucherd03f5d52010-02-19 16:22:31 -05001670 tiling_config |= BACKEND_MAP(backend_map);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001671 WREG32(GB_TILING_CONFIG, tiling_config);
1672 WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff);
1673 WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff);
1674
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001675 /* Setup pipes */
Alex Deucherd03f5d52010-02-19 16:22:31 -05001676 WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
1677 WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
Alex Deucherf867c60d2010-03-05 14:50:37 -05001678 WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001679
Alex Deucherd03f5d52010-02-19 16:22:31 -05001680 tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001681 WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK);
1682 WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((tmp * 4) - 2) & VTX_REUSE_DEPTH_MASK);
1683
1684 /* Setup some CP states */
1685 WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) | ROQ_IB2_START(0x2b)));
1686 WREG32(CP_MEQ_THRESHOLDS, (MEQ_END(0x40) | ROQ_END(0x40)));
1687
1688 WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO | SYNC_GRADIENT |
1689 SYNC_WALKER | SYNC_ALIGNER));
1690 /* Setup various GPU states */
1691 if (rdev->family == CHIP_RV670)
1692 WREG32(ARB_GDEC_RD_CNTL, 0x00000021);
1693
1694 tmp = RREG32(SX_DEBUG_1);
1695 tmp |= SMX_EVENT_RELEASE;
1696 if ((rdev->family > CHIP_R600))
1697 tmp |= ENABLE_NEW_SMX_ADDRESS;
1698 WREG32(SX_DEBUG_1, tmp);
1699
1700 if (((rdev->family) == CHIP_R600) ||
1701 ((rdev->family) == CHIP_RV630) ||
1702 ((rdev->family) == CHIP_RV610) ||
1703 ((rdev->family) == CHIP_RV620) ||
Alex Deucheree59f2b2009-11-05 13:11:46 -05001704 ((rdev->family) == CHIP_RS780) ||
1705 ((rdev->family) == CHIP_RS880)) {
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001706 WREG32(DB_DEBUG, PREZ_MUST_WAIT_FOR_POSTZ_DONE);
1707 } else {
1708 WREG32(DB_DEBUG, 0);
1709 }
1710 WREG32(DB_WATERMARKS, (DEPTH_FREE(4) | DEPTH_CACHELINE_FREE(16) |
1711 DEPTH_FLUSH(16) | DEPTH_PENDING_FREE(4)));
1712
1713 WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
1714 WREG32(VGT_NUM_INSTANCES, 0);
1715
1716 WREG32(SPI_CONFIG_CNTL, GPR_WRITE_PRIORITY(0));
1717 WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(0));
1718
1719 tmp = RREG32(SQ_MS_FIFO_SIZES);
1720 if (((rdev->family) == CHIP_RV610) ||
1721 ((rdev->family) == CHIP_RV620) ||
Alex Deucheree59f2b2009-11-05 13:11:46 -05001722 ((rdev->family) == CHIP_RS780) ||
1723 ((rdev->family) == CHIP_RS880)) {
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001724 tmp = (CACHE_FIFO_SIZE(0xa) |
1725 FETCH_FIFO_HIWATER(0xa) |
1726 DONE_FIFO_HIWATER(0xe0) |
1727 ALU_UPDATE_FIFO_HIWATER(0x8));
1728 } else if (((rdev->family) == CHIP_R600) ||
1729 ((rdev->family) == CHIP_RV630)) {
1730 tmp &= ~DONE_FIFO_HIWATER(0xff);
1731 tmp |= DONE_FIFO_HIWATER(0x4);
1732 }
1733 WREG32(SQ_MS_FIFO_SIZES, tmp);
1734
1735 /* SQ_CONFIG, SQ_GPR_RESOURCE_MGMT, SQ_THREAD_RESOURCE_MGMT, SQ_STACK_RESOURCE_MGMT
1736 * should be adjusted as needed by the 2D/3D drivers. This just sets default values
1737 */
1738 sq_config = RREG32(SQ_CONFIG);
1739 sq_config &= ~(PS_PRIO(3) |
1740 VS_PRIO(3) |
1741 GS_PRIO(3) |
1742 ES_PRIO(3));
1743 sq_config |= (DX9_CONSTS |
1744 VC_ENABLE |
1745 PS_PRIO(0) |
1746 VS_PRIO(1) |
1747 GS_PRIO(2) |
1748 ES_PRIO(3));
1749
1750 if ((rdev->family) == CHIP_R600) {
1751 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(124) |
1752 NUM_VS_GPRS(124) |
1753 NUM_CLAUSE_TEMP_GPRS(4));
1754 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(0) |
1755 NUM_ES_GPRS(0));
1756 sq_thread_resource_mgmt = (NUM_PS_THREADS(136) |
1757 NUM_VS_THREADS(48) |
1758 NUM_GS_THREADS(4) |
1759 NUM_ES_THREADS(4));
1760 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(128) |
1761 NUM_VS_STACK_ENTRIES(128));
1762 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(0) |
1763 NUM_ES_STACK_ENTRIES(0));
1764 } else if (((rdev->family) == CHIP_RV610) ||
1765 ((rdev->family) == CHIP_RV620) ||
Alex Deucheree59f2b2009-11-05 13:11:46 -05001766 ((rdev->family) == CHIP_RS780) ||
1767 ((rdev->family) == CHIP_RS880)) {
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001768 /* no vertex cache */
1769 sq_config &= ~VC_ENABLE;
1770
1771 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
1772 NUM_VS_GPRS(44) |
1773 NUM_CLAUSE_TEMP_GPRS(2));
1774 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) |
1775 NUM_ES_GPRS(17));
1776 sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
1777 NUM_VS_THREADS(78) |
1778 NUM_GS_THREADS(4) |
1779 NUM_ES_THREADS(31));
1780 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) |
1781 NUM_VS_STACK_ENTRIES(40));
1782 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) |
1783 NUM_ES_STACK_ENTRIES(16));
1784 } else if (((rdev->family) == CHIP_RV630) ||
1785 ((rdev->family) == CHIP_RV635)) {
1786 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
1787 NUM_VS_GPRS(44) |
1788 NUM_CLAUSE_TEMP_GPRS(2));
1789 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(18) |
1790 NUM_ES_GPRS(18));
1791 sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
1792 NUM_VS_THREADS(78) |
1793 NUM_GS_THREADS(4) |
1794 NUM_ES_THREADS(31));
1795 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) |
1796 NUM_VS_STACK_ENTRIES(40));
1797 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) |
1798 NUM_ES_STACK_ENTRIES(16));
1799 } else if ((rdev->family) == CHIP_RV670) {
1800 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
1801 NUM_VS_GPRS(44) |
1802 NUM_CLAUSE_TEMP_GPRS(2));
1803 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) |
1804 NUM_ES_GPRS(17));
1805 sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
1806 NUM_VS_THREADS(78) |
1807 NUM_GS_THREADS(4) |
1808 NUM_ES_THREADS(31));
1809 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(64) |
1810 NUM_VS_STACK_ENTRIES(64));
1811 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(64) |
1812 NUM_ES_STACK_ENTRIES(64));
1813 }
1814
1815 WREG32(SQ_CONFIG, sq_config);
1816 WREG32(SQ_GPR_RESOURCE_MGMT_1, sq_gpr_resource_mgmt_1);
1817 WREG32(SQ_GPR_RESOURCE_MGMT_2, sq_gpr_resource_mgmt_2);
1818 WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt);
1819 WREG32(SQ_STACK_RESOURCE_MGMT_1, sq_stack_resource_mgmt_1);
1820 WREG32(SQ_STACK_RESOURCE_MGMT_2, sq_stack_resource_mgmt_2);
1821
1822 if (((rdev->family) == CHIP_RV610) ||
1823 ((rdev->family) == CHIP_RV620) ||
Alex Deucheree59f2b2009-11-05 13:11:46 -05001824 ((rdev->family) == CHIP_RS780) ||
1825 ((rdev->family) == CHIP_RS880)) {
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001826 WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(TC_ONLY));
1827 } else {
1828 WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC));
1829 }
1830
1831 /* More default values. 2D/3D driver should adjust as needed */
1832 WREG32(PA_SC_AA_SAMPLE_LOCS_2S, (S0_X(0xc) | S0_Y(0x4) |
1833 S1_X(0x4) | S1_Y(0xc)));
1834 WREG32(PA_SC_AA_SAMPLE_LOCS_4S, (S0_X(0xe) | S0_Y(0xe) |
1835 S1_X(0x2) | S1_Y(0x2) |
1836 S2_X(0xa) | S2_Y(0x6) |
1837 S3_X(0x6) | S3_Y(0xa)));
1838 WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD0, (S0_X(0xe) | S0_Y(0xb) |
1839 S1_X(0x4) | S1_Y(0xc) |
1840 S2_X(0x1) | S2_Y(0x6) |
1841 S3_X(0xa) | S3_Y(0xe)));
1842 WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD1, (S4_X(0x6) | S4_Y(0x1) |
1843 S5_X(0x0) | S5_Y(0x0) |
1844 S6_X(0xb) | S6_Y(0x4) |
1845 S7_X(0x7) | S7_Y(0x8)));
1846
1847 WREG32(VGT_STRMOUT_EN, 0);
1848 tmp = rdev->config.r600.max_pipes * 16;
1849 switch (rdev->family) {
1850 case CHIP_RV610:
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001851 case CHIP_RV620:
Alex Deucheree59f2b2009-11-05 13:11:46 -05001852 case CHIP_RS780:
1853 case CHIP_RS880:
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001854 tmp += 32;
1855 break;
1856 case CHIP_RV670:
1857 tmp += 128;
1858 break;
1859 default:
1860 break;
1861 }
1862 if (tmp > 256) {
1863 tmp = 256;
1864 }
1865 WREG32(VGT_ES_PER_GS, 128);
1866 WREG32(VGT_GS_PER_ES, tmp);
1867 WREG32(VGT_GS_PER_VS, 2);
1868 WREG32(VGT_GS_VERTEX_REUSE, 16);
1869
1870 /* more default values. 2D/3D driver should adjust as needed */
1871 WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
1872 WREG32(VGT_STRMOUT_EN, 0);
1873 WREG32(SX_MISC, 0);
1874 WREG32(PA_SC_MODE_CNTL, 0);
1875 WREG32(PA_SC_AA_CONFIG, 0);
1876 WREG32(PA_SC_LINE_STIPPLE, 0);
1877 WREG32(SPI_INPUT_Z, 0);
1878 WREG32(SPI_PS_IN_CONTROL_0, NUM_INTERP(2));
1879 WREG32(CB_COLOR7_FRAG, 0);
1880
1881 /* Clear render buffer base addresses */
1882 WREG32(CB_COLOR0_BASE, 0);
1883 WREG32(CB_COLOR1_BASE, 0);
1884 WREG32(CB_COLOR2_BASE, 0);
1885 WREG32(CB_COLOR3_BASE, 0);
1886 WREG32(CB_COLOR4_BASE, 0);
1887 WREG32(CB_COLOR5_BASE, 0);
1888 WREG32(CB_COLOR6_BASE, 0);
1889 WREG32(CB_COLOR7_BASE, 0);
1890 WREG32(CB_COLOR7_FRAG, 0);
1891
1892 switch (rdev->family) {
1893 case CHIP_RV610:
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001894 case CHIP_RV620:
Alex Deucheree59f2b2009-11-05 13:11:46 -05001895 case CHIP_RS780:
1896 case CHIP_RS880:
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001897 tmp = TC_L2_SIZE(8);
1898 break;
1899 case CHIP_RV630:
1900 case CHIP_RV635:
1901 tmp = TC_L2_SIZE(4);
1902 break;
1903 case CHIP_R600:
1904 tmp = TC_L2_SIZE(0) | L2_DISABLE_LATE_HIT;
1905 break;
1906 default:
1907 tmp = TC_L2_SIZE(0);
1908 break;
1909 }
1910 WREG32(TC_CNTL, tmp);
1911
1912 tmp = RREG32(HDP_HOST_PATH_CNTL);
1913 WREG32(HDP_HOST_PATH_CNTL, tmp);
1914
1915 tmp = RREG32(ARB_POP);
1916 tmp |= ENABLE_TC128;
1917 WREG32(ARB_POP, tmp);
1918
1919 WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
1920 WREG32(PA_CL_ENHANCE, (CLIP_VTX_REORDER_ENA |
1921 NUM_CLIP_SEQ(3)));
1922 WREG32(PA_SC_ENHANCE, FORCE_EOV_MAX_CLK_CNT(4095));
1923}
1924
1925
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001926/*
1927 * Indirect registers accessor
1928 */
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001929u32 r600_pciep_rreg(struct radeon_device *rdev, u32 reg)
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001930{
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001931 u32 r;
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001932
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001933 WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
1934 (void)RREG32(PCIE_PORT_INDEX);
1935 r = RREG32(PCIE_PORT_DATA);
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001936 return r;
1937}
1938
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001939void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001940{
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001941 WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
1942 (void)RREG32(PCIE_PORT_INDEX);
1943 WREG32(PCIE_PORT_DATA, (v));
1944 (void)RREG32(PCIE_PORT_DATA);
1945}
1946
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001947/*
1948 * CP & Ring
1949 */
1950void r600_cp_stop(struct radeon_device *rdev)
1951{
Dave Airlie53595332011-03-14 09:47:24 +10001952 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001953 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
Alex Deucher724c80e2010-08-27 18:25:25 -04001954 WREG32(SCRATCH_UMSK, 0);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001955}
1956
Alex Deucherd8f60cf2009-12-01 13:43:46 -05001957int r600_init_microcode(struct radeon_device *rdev)
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001958{
1959 struct platform_device *pdev;
1960 const char *chip_name;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05001961 const char *rlc_chip_name;
1962 size_t pfp_req_size, me_req_size, rlc_req_size;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001963 char fw_name[30];
1964 int err;
1965
1966 DRM_DEBUG("\n");
1967
1968 pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0);
1969 err = IS_ERR(pdev);
1970 if (err) {
1971 printk(KERN_ERR "radeon_cp: Failed to register firmware\n");
1972 return -EINVAL;
1973 }
1974
1975 switch (rdev->family) {
Alex Deucherd8f60cf2009-12-01 13:43:46 -05001976 case CHIP_R600:
1977 chip_name = "R600";
1978 rlc_chip_name = "R600";
1979 break;
1980 case CHIP_RV610:
1981 chip_name = "RV610";
1982 rlc_chip_name = "R600";
1983 break;
1984 case CHIP_RV630:
1985 chip_name = "RV630";
1986 rlc_chip_name = "R600";
1987 break;
1988 case CHIP_RV620:
1989 chip_name = "RV620";
1990 rlc_chip_name = "R600";
1991 break;
1992 case CHIP_RV635:
1993 chip_name = "RV635";
1994 rlc_chip_name = "R600";
1995 break;
1996 case CHIP_RV670:
1997 chip_name = "RV670";
1998 rlc_chip_name = "R600";
1999 break;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002000 case CHIP_RS780:
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002001 case CHIP_RS880:
2002 chip_name = "RS780";
2003 rlc_chip_name = "R600";
2004 break;
2005 case CHIP_RV770:
2006 chip_name = "RV770";
2007 rlc_chip_name = "R700";
2008 break;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002009 case CHIP_RV730:
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002010 case CHIP_RV740:
2011 chip_name = "RV730";
2012 rlc_chip_name = "R700";
2013 break;
2014 case CHIP_RV710:
2015 chip_name = "RV710";
2016 rlc_chip_name = "R700";
2017 break;
Alex Deucherfe251e22010-03-24 13:36:43 -04002018 case CHIP_CEDAR:
2019 chip_name = "CEDAR";
Alex Deucher45f9a392010-03-24 13:55:51 -04002020 rlc_chip_name = "CEDAR";
Alex Deucherfe251e22010-03-24 13:36:43 -04002021 break;
2022 case CHIP_REDWOOD:
2023 chip_name = "REDWOOD";
Alex Deucher45f9a392010-03-24 13:55:51 -04002024 rlc_chip_name = "REDWOOD";
Alex Deucherfe251e22010-03-24 13:36:43 -04002025 break;
2026 case CHIP_JUNIPER:
2027 chip_name = "JUNIPER";
Alex Deucher45f9a392010-03-24 13:55:51 -04002028 rlc_chip_name = "JUNIPER";
Alex Deucherfe251e22010-03-24 13:36:43 -04002029 break;
2030 case CHIP_CYPRESS:
2031 case CHIP_HEMLOCK:
2032 chip_name = "CYPRESS";
Alex Deucher45f9a392010-03-24 13:55:51 -04002033 rlc_chip_name = "CYPRESS";
Alex Deucherfe251e22010-03-24 13:36:43 -04002034 break;
Alex Deucher439bd6c2010-11-22 17:56:31 -05002035 case CHIP_PALM:
2036 chip_name = "PALM";
2037 rlc_chip_name = "SUMO";
2038 break;
Alex Deucherd5c5a722011-05-31 15:42:48 -04002039 case CHIP_SUMO:
2040 chip_name = "SUMO";
2041 rlc_chip_name = "SUMO";
2042 break;
2043 case CHIP_SUMO2:
2044 chip_name = "SUMO2";
2045 rlc_chip_name = "SUMO";
2046 break;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002047 default: BUG();
2048 }
2049
Alex Deucherfe251e22010-03-24 13:36:43 -04002050 if (rdev->family >= CHIP_CEDAR) {
2051 pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
2052 me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
Alex Deucher45f9a392010-03-24 13:55:51 -04002053 rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
Alex Deucherfe251e22010-03-24 13:36:43 -04002054 } else if (rdev->family >= CHIP_RV770) {
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002055 pfp_req_size = R700_PFP_UCODE_SIZE * 4;
2056 me_req_size = R700_PM4_UCODE_SIZE * 4;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002057 rlc_req_size = R700_RLC_UCODE_SIZE * 4;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002058 } else {
2059 pfp_req_size = PFP_UCODE_SIZE * 4;
2060 me_req_size = PM4_UCODE_SIZE * 12;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002061 rlc_req_size = RLC_UCODE_SIZE * 4;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002062 }
2063
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002064 DRM_INFO("Loading %s Microcode\n", chip_name);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002065
2066 snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
2067 err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev);
2068 if (err)
2069 goto out;
2070 if (rdev->pfp_fw->size != pfp_req_size) {
2071 printk(KERN_ERR
2072 "r600_cp: Bogus length %zu in firmware \"%s\"\n",
2073 rdev->pfp_fw->size, fw_name);
2074 err = -EINVAL;
2075 goto out;
2076 }
2077
2078 snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
2079 err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev);
2080 if (err)
2081 goto out;
2082 if (rdev->me_fw->size != me_req_size) {
2083 printk(KERN_ERR
2084 "r600_cp: Bogus length %zu in firmware \"%s\"\n",
2085 rdev->me_fw->size, fw_name);
2086 err = -EINVAL;
2087 }
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002088
2089 snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name);
2090 err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev);
2091 if (err)
2092 goto out;
2093 if (rdev->rlc_fw->size != rlc_req_size) {
2094 printk(KERN_ERR
2095 "r600_rlc: Bogus length %zu in firmware \"%s\"\n",
2096 rdev->rlc_fw->size, fw_name);
2097 err = -EINVAL;
2098 }
2099
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002100out:
2101 platform_device_unregister(pdev);
2102
2103 if (err) {
2104 if (err != -EINVAL)
2105 printk(KERN_ERR
2106 "r600_cp: Failed to load firmware \"%s\"\n",
2107 fw_name);
2108 release_firmware(rdev->pfp_fw);
2109 rdev->pfp_fw = NULL;
2110 release_firmware(rdev->me_fw);
2111 rdev->me_fw = NULL;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002112 release_firmware(rdev->rlc_fw);
2113 rdev->rlc_fw = NULL;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002114 }
2115 return err;
2116}
2117
2118static int r600_cp_load_microcode(struct radeon_device *rdev)
2119{
2120 const __be32 *fw_data;
2121 int i;
2122
2123 if (!rdev->me_fw || !rdev->pfp_fw)
2124 return -EINVAL;
2125
2126 r600_cp_stop(rdev);
2127
Cédric Cano4eace7f2011-02-11 19:45:38 -05002128 WREG32(CP_RB_CNTL,
2129#ifdef __BIG_ENDIAN
2130 BUF_SWAP_32BIT |
2131#endif
2132 RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002133
2134 /* Reset cp */
2135 WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
2136 RREG32(GRBM_SOFT_RESET);
2137 mdelay(15);
2138 WREG32(GRBM_SOFT_RESET, 0);
2139
2140 WREG32(CP_ME_RAM_WADDR, 0);
2141
2142 fw_data = (const __be32 *)rdev->me_fw->data;
2143 WREG32(CP_ME_RAM_WADDR, 0);
2144 for (i = 0; i < PM4_UCODE_SIZE * 3; i++)
2145 WREG32(CP_ME_RAM_DATA,
2146 be32_to_cpup(fw_data++));
2147
2148 fw_data = (const __be32 *)rdev->pfp_fw->data;
2149 WREG32(CP_PFP_UCODE_ADDR, 0);
2150 for (i = 0; i < PFP_UCODE_SIZE; i++)
2151 WREG32(CP_PFP_UCODE_DATA,
2152 be32_to_cpup(fw_data++));
2153
2154 WREG32(CP_PFP_UCODE_ADDR, 0);
2155 WREG32(CP_ME_RAM_WADDR, 0);
2156 WREG32(CP_ME_RAM_RADDR, 0);
2157 return 0;
2158}
2159
2160int r600_cp_start(struct radeon_device *rdev)
2161{
2162 int r;
2163 uint32_t cp_me;
2164
2165 r = radeon_ring_lock(rdev, 7);
2166 if (r) {
2167 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
2168 return r;
2169 }
2170 radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5));
2171 radeon_ring_write(rdev, 0x1);
Alex Deucher7e7b41d2010-09-02 21:32:32 -04002172 if (rdev->family >= CHIP_RV770) {
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002173 radeon_ring_write(rdev, 0x0);
2174 radeon_ring_write(rdev, rdev->config.rv770.max_hw_contexts - 1);
Alex Deucherfe251e22010-03-24 13:36:43 -04002175 } else {
2176 radeon_ring_write(rdev, 0x3);
2177 radeon_ring_write(rdev, rdev->config.r600.max_hw_contexts - 1);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002178 }
2179 radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
2180 radeon_ring_write(rdev, 0);
2181 radeon_ring_write(rdev, 0);
2182 radeon_ring_unlock_commit(rdev);
2183
2184 cp_me = 0xff;
2185 WREG32(R_0086D8_CP_ME_CNTL, cp_me);
2186 return 0;
2187}
2188
2189int r600_cp_resume(struct radeon_device *rdev)
2190{
2191 u32 tmp;
2192 u32 rb_bufsz;
2193 int r;
2194
2195 /* Reset cp */
2196 WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
2197 RREG32(GRBM_SOFT_RESET);
2198 mdelay(15);
2199 WREG32(GRBM_SOFT_RESET, 0);
2200
2201 /* Set ring buffer size */
2202 rb_bufsz = drm_order(rdev->cp.ring_size / 8);
Alex Deucher724c80e2010-08-27 18:25:25 -04002203 tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002204#ifdef __BIG_ENDIAN
Alex Deucherd6f28932009-11-02 16:01:27 -05002205 tmp |= BUF_SWAP_32BIT;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002206#endif
Alex Deucherd6f28932009-11-02 16:01:27 -05002207 WREG32(CP_RB_CNTL, tmp);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002208 WREG32(CP_SEM_WAIT_TIMER, 0x4);
2209
2210 /* Set the write pointer delay */
2211 WREG32(CP_RB_WPTR_DELAY, 0);
2212
2213 /* Initialize the ring buffer's read and write pointers */
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002214 WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
2215 WREG32(CP_RB_RPTR_WR, 0);
Michel Dänzer87463ff2011-09-13 11:27:35 +02002216 rdev->cp.wptr = 0;
2217 WREG32(CP_RB_WPTR, rdev->cp.wptr);
Alex Deucher724c80e2010-08-27 18:25:25 -04002218
2219 /* set the wb address whether it's enabled or not */
Cédric Cano4eace7f2011-02-11 19:45:38 -05002220 WREG32(CP_RB_RPTR_ADDR,
Cédric Cano4eace7f2011-02-11 19:45:38 -05002221 ((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC));
Alex Deucher724c80e2010-08-27 18:25:25 -04002222 WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
2223 WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
2224
2225 if (rdev->wb.enabled)
2226 WREG32(SCRATCH_UMSK, 0xff);
2227 else {
2228 tmp |= RB_NO_UPDATE;
2229 WREG32(SCRATCH_UMSK, 0);
2230 }
2231
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002232 mdelay(1);
2233 WREG32(CP_RB_CNTL, tmp);
2234
2235 WREG32(CP_RB_BASE, rdev->cp.gpu_addr >> 8);
2236 WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
2237
2238 rdev->cp.rptr = RREG32(CP_RB_RPTR);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002239
2240 r600_cp_start(rdev);
2241 rdev->cp.ready = true;
2242 r = radeon_ring_test(rdev);
2243 if (r) {
2244 rdev->cp.ready = false;
2245 return r;
2246 }
2247 return 0;
2248}
2249
2250void r600_cp_commit(struct radeon_device *rdev)
2251{
2252 WREG32(CP_RB_WPTR, rdev->cp.wptr);
2253 (void)RREG32(CP_RB_WPTR);
2254}
2255
2256void r600_ring_init(struct radeon_device *rdev, unsigned ring_size)
2257{
2258 u32 rb_bufsz;
2259
2260 /* Align ring size */
2261 rb_bufsz = drm_order(ring_size / 8);
2262 ring_size = (1 << (rb_bufsz + 1)) * 4;
2263 rdev->cp.ring_size = ring_size;
2264 rdev->cp.align_mask = 16 - 1;
2265}
2266
Jerome Glisse655efd32010-02-02 11:51:45 +01002267void r600_cp_fini(struct radeon_device *rdev)
2268{
2269 r600_cp_stop(rdev);
2270 radeon_ring_fini(rdev);
2271}
2272
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002273
2274/*
2275 * GPU scratch registers helpers function.
2276 */
2277void r600_scratch_init(struct radeon_device *rdev)
2278{
2279 int i;
2280
2281 rdev->scratch.num_reg = 7;
Alex Deucher724c80e2010-08-27 18:25:25 -04002282 rdev->scratch.reg_base = SCRATCH_REG0;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002283 for (i = 0; i < rdev->scratch.num_reg; i++) {
2284 rdev->scratch.free[i] = true;
Alex Deucher724c80e2010-08-27 18:25:25 -04002285 rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002286 }
2287}
2288
2289int r600_ring_test(struct radeon_device *rdev)
2290{
2291 uint32_t scratch;
2292 uint32_t tmp = 0;
2293 unsigned i;
2294 int r;
2295
2296 r = radeon_scratch_get(rdev, &scratch);
2297 if (r) {
2298 DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r);
2299 return r;
2300 }
2301 WREG32(scratch, 0xCAFEDEAD);
2302 r = radeon_ring_lock(rdev, 3);
2303 if (r) {
2304 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
2305 radeon_scratch_free(rdev, scratch);
2306 return r;
2307 }
2308 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2309 radeon_ring_write(rdev, ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
2310 radeon_ring_write(rdev, 0xDEADBEEF);
2311 radeon_ring_unlock_commit(rdev);
2312 for (i = 0; i < rdev->usec_timeout; i++) {
2313 tmp = RREG32(scratch);
2314 if (tmp == 0xDEADBEEF)
2315 break;
2316 DRM_UDELAY(1);
2317 }
2318 if (i < rdev->usec_timeout) {
2319 DRM_INFO("ring test succeeded in %d usecs\n", i);
2320 } else {
2321 DRM_ERROR("radeon: ring test failed (scratch(0x%04X)=0x%08X)\n",
2322 scratch, tmp);
2323 r = -EINVAL;
2324 }
2325 radeon_scratch_free(rdev, scratch);
2326 return r;
2327}
2328
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002329void r600_fence_ring_emit(struct radeon_device *rdev,
2330 struct radeon_fence *fence)
2331{
Alex Deucherd0f8a852010-09-04 05:04:34 -04002332 if (rdev->wb.use_event) {
2333 u64 addr = rdev->wb.gpu_addr + R600_WB_EVENT_OFFSET +
2334 (u64)(rdev->fence_drv.scratch_reg - rdev->scratch.reg_base);
2335 /* EVENT_WRITE_EOP - flush caches, send int */
2336 radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
2337 radeon_ring_write(rdev, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5));
2338 radeon_ring_write(rdev, addr & 0xffffffff);
2339 radeon_ring_write(rdev, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
2340 radeon_ring_write(rdev, fence->seq);
2341 radeon_ring_write(rdev, 0);
2342 } else {
2343 radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE, 0));
2344 radeon_ring_write(rdev, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT) | EVENT_INDEX(0));
2345 /* wait for 3D idle clean */
2346 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2347 radeon_ring_write(rdev, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
2348 radeon_ring_write(rdev, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit);
2349 /* Emit fence sequence & fire IRQ */
2350 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2351 radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
2352 radeon_ring_write(rdev, fence->seq);
2353 /* CP_INTERRUPT packet 3 no longer exists, use packet 0 */
2354 radeon_ring_write(rdev, PACKET0(CP_INT_STATUS, 0));
2355 radeon_ring_write(rdev, RB_INT_STAT);
2356 }
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002357}
2358
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002359int r600_copy_blit(struct radeon_device *rdev,
Alex Deucher003cefe2011-09-16 12:04:08 -04002360 uint64_t src_offset,
2361 uint64_t dst_offset,
2362 unsigned num_gpu_pages,
2363 struct radeon_fence *fence)
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002364{
Jerome Glisseff82f052010-01-22 15:19:00 +01002365 int r;
2366
2367 mutex_lock(&rdev->r600_blit.mutex);
2368 rdev->r600_blit.vb_ib = NULL;
Dave Airlie017ed802011-10-18 10:54:30 +01002369 r = r600_blit_prepare_copy(rdev, num_gpu_pages);
Jerome Glisseff82f052010-01-22 15:19:00 +01002370 if (r) {
2371 if (rdev->r600_blit.vb_ib)
2372 radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
2373 mutex_unlock(&rdev->r600_blit.mutex);
2374 return r;
2375 }
Dave Airlie017ed802011-10-18 10:54:30 +01002376 r600_kms_blit_copy(rdev, src_offset, dst_offset, num_gpu_pages);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002377 r600_blit_done_copy(rdev, fence);
Jerome Glisseff82f052010-01-22 15:19:00 +01002378 mutex_unlock(&rdev->r600_blit.mutex);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002379 return 0;
2380}
2381
Alex Deucher6ddddfe2011-10-14 10:51:22 -04002382void r600_blit_suspend(struct radeon_device *rdev)
2383{
2384 int r;
2385
2386 /* unpin shaders bo */
2387 if (rdev->r600_blit.shader_obj) {
2388 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
2389 if (!r) {
2390 radeon_bo_unpin(rdev->r600_blit.shader_obj);
2391 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
2392 }
2393 }
2394}
2395
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002396int r600_set_surface_reg(struct radeon_device *rdev, int reg,
2397 uint32_t tiling_flags, uint32_t pitch,
2398 uint32_t offset, uint32_t obj_size)
2399{
2400 /* FIXME: implement */
2401 return 0;
2402}
2403
2404void r600_clear_surface_reg(struct radeon_device *rdev, int reg)
2405{
2406 /* FIXME: implement */
2407}
2408
Dave Airliefc30b8e2009-09-18 15:19:37 +10002409int r600_startup(struct radeon_device *rdev)
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002410{
2411 int r;
2412
Alex Deucher9e46a482011-01-06 18:49:35 -05002413 /* enable pcie gen2 link */
2414 r600_pcie_gen2_enable(rdev);
2415
Alex Deucher779720a2009-12-09 19:31:44 -05002416 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
2417 r = r600_init_microcode(rdev);
2418 if (r) {
2419 DRM_ERROR("Failed to load firmware!\n");
2420 return r;
2421 }
2422 }
2423
Jerome Glissea3c19452009-10-01 18:02:13 +02002424 r600_mc_program(rdev);
Jerome Glisse1a029b72009-10-06 19:04:30 +02002425 if (rdev->flags & RADEON_IS_AGP) {
2426 r600_agp_enable(rdev);
2427 } else {
2428 r = r600_pcie_gart_enable(rdev);
2429 if (r)
2430 return r;
2431 }
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002432 r600_gpu_init(rdev);
Jerome Glissec38c7b62010-02-04 17:27:27 +01002433 r = r600_blit_init(rdev);
2434 if (r) {
2435 r600_blit_fini(rdev);
2436 rdev->asic->copy = NULL;
2437 dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
2438 }
Alex Deucherb70d6bb2010-08-06 21:36:58 -04002439
Alex Deucher724c80e2010-08-27 18:25:25 -04002440 /* allocate wb buffer */
2441 r = radeon_wb_init(rdev);
2442 if (r)
2443 return r;
2444
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002445 /* Enable IRQ */
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002446 r = r600_irq_init(rdev);
2447 if (r) {
2448 DRM_ERROR("radeon: IH init failed (%d).\n", r);
2449 radeon_irq_kms_fini(rdev);
2450 return r;
2451 }
2452 r600_irq_set(rdev);
2453
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002454 r = radeon_ring_init(rdev, rdev->cp.ring_size);
2455 if (r)
2456 return r;
2457 r = r600_cp_load_microcode(rdev);
2458 if (r)
2459 return r;
2460 r = r600_cp_resume(rdev);
2461 if (r)
2462 return r;
Alex Deucher724c80e2010-08-27 18:25:25 -04002463
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002464 return 0;
2465}
2466
Dave Airlie28d52042009-09-21 14:33:58 +10002467void r600_vga_set_state(struct radeon_device *rdev, bool state)
2468{
2469 uint32_t temp;
2470
2471 temp = RREG32(CONFIG_CNTL);
2472 if (state == false) {
2473 temp &= ~(1<<0);
2474 temp |= (1<<1);
2475 } else {
2476 temp &= ~(1<<1);
2477 }
2478 WREG32(CONFIG_CNTL, temp);
2479}
2480
Dave Airliefc30b8e2009-09-18 15:19:37 +10002481int r600_resume(struct radeon_device *rdev)
2482{
2483 int r;
2484
Jerome Glisse1a029b72009-10-06 19:04:30 +02002485 /* Do not reset GPU before posting, on r600 hw unlike on r500 hw,
2486 * posting will perform necessary task to bring back GPU into good
2487 * shape.
2488 */
Dave Airliefc30b8e2009-09-18 15:19:37 +10002489 /* post card */
Jerome Glissee7d40b92009-10-01 18:02:15 +02002490 atom_asic_init(rdev->mode_info.atom_context);
Dave Airliefc30b8e2009-09-18 15:19:37 +10002491
2492 r = r600_startup(rdev);
2493 if (r) {
2494 DRM_ERROR("r600 startup failed on resume\n");
2495 return r;
2496 }
2497
Jerome Glisse62a8ea32009-10-01 18:02:11 +02002498 r = r600_ib_test(rdev);
Dave Airliefc30b8e2009-09-18 15:19:37 +10002499 if (r) {
Paul Bolleec4f2ac2011-01-28 23:32:04 +01002500 DRM_ERROR("radeon: failed testing IB (%d).\n", r);
Dave Airliefc30b8e2009-09-18 15:19:37 +10002501 return r;
2502 }
Rafał Miłecki38fd2c62010-01-28 18:16:30 +01002503
2504 r = r600_audio_init(rdev);
2505 if (r) {
2506 DRM_ERROR("radeon: audio resume failed\n");
2507 return r;
2508 }
2509
Dave Airliefc30b8e2009-09-18 15:19:37 +10002510 return r;
2511}
2512
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002513int r600_suspend(struct radeon_device *rdev)
2514{
Rafał Miłecki38fd2c62010-01-28 18:16:30 +01002515 r600_audio_fini(rdev);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002516 /* FIXME: we should wait for ring to be empty */
2517 r600_cp_stop(rdev);
Dave Airliebc1a6312009-09-15 11:07:52 +10002518 rdev->cp.ready = false;
Jerome Glisse0c452492010-01-15 14:44:37 +01002519 r600_irq_suspend(rdev);
Alex Deucher724c80e2010-08-27 18:25:25 -04002520 radeon_wb_disable(rdev);
Jerome Glisse4aac0472009-09-14 18:29:49 +02002521 r600_pcie_gart_disable(rdev);
Alex Deucher6ddddfe2011-10-14 10:51:22 -04002522 r600_blit_suspend(rdev);
2523
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002524 return 0;
2525}
2526
2527/* Plan is to move initialization in that function and use
2528 * helper function so that radeon_device_init pretty much
2529 * do nothing more than calling asic specific function. This
2530 * should also allow to remove a bunch of callback function
2531 * like vram_info.
2532 */
2533int r600_init(struct radeon_device *rdev)
2534{
2535 int r;
2536
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002537 if (r600_debugfs_mc_info_init(rdev)) {
2538 DRM_ERROR("Failed to register debugfs file for mc !\n");
2539 }
2540 /* This don't do much */
2541 r = radeon_gem_init(rdev);
2542 if (r)
2543 return r;
2544 /* Read BIOS */
2545 if (!radeon_get_bios(rdev)) {
2546 if (ASIC_IS_AVIVO(rdev))
2547 return -EINVAL;
2548 }
2549 /* Must be an ATOMBIOS */
Jerome Glissee7d40b92009-10-01 18:02:15 +02002550 if (!rdev->is_atom_bios) {
2551 dev_err(rdev->dev, "Expecting atombios for R600 GPU\n");
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002552 return -EINVAL;
Jerome Glissee7d40b92009-10-01 18:02:15 +02002553 }
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002554 r = radeon_atombios_init(rdev);
2555 if (r)
2556 return r;
2557 /* Post card if necessary */
Alex Deucherfd909c32011-01-11 18:08:59 -05002558 if (!radeon_card_posted(rdev)) {
Dave Airlie72542d72009-12-01 14:06:31 +10002559 if (!rdev->bios) {
2560 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
2561 return -EINVAL;
2562 }
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002563 DRM_INFO("GPU not posted. posting now...\n");
2564 atom_asic_init(rdev->mode_info.atom_context);
2565 }
2566 /* Initialize scratch registers */
2567 r600_scratch_init(rdev);
2568 /* Initialize surface registers */
2569 radeon_surface_init(rdev);
Rafał Miłecki74338742009-11-03 00:53:02 +01002570 /* Initialize clocks */
Michel Dänzer5e6dde72009-09-17 09:42:28 +02002571 radeon_get_clock_info(rdev->ddev);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002572 /* Fence driver */
2573 r = radeon_fence_driver_init(rdev);
2574 if (r)
2575 return r;
Jerome Glisse700a0cc2010-01-13 15:16:38 +01002576 if (rdev->flags & RADEON_IS_AGP) {
2577 r = radeon_agp_init(rdev);
2578 if (r)
2579 radeon_agp_disable(rdev);
2580 }
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002581 r = r600_mc_init(rdev);
Jerome Glisseb574f252009-10-06 19:04:29 +02002582 if (r)
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002583 return r;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002584 /* Memory manager */
Jerome Glisse4c788672009-11-20 14:29:23 +01002585 r = radeon_bo_init(rdev);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002586 if (r)
2587 return r;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002588
2589 r = radeon_irq_kms_init(rdev);
2590 if (r)
2591 return r;
2592
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002593 rdev->cp.ring_obj = NULL;
2594 r600_ring_init(rdev, 1024 * 1024);
2595
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002596 rdev->ih.ring_obj = NULL;
2597 r600_ih_ring_init(rdev, 64 * 1024);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002598
Jerome Glisse4aac0472009-09-14 18:29:49 +02002599 r = r600_pcie_gart_init(rdev);
2600 if (r)
2601 return r;
2602
Alex Deucher779720a2009-12-09 19:31:44 -05002603 rdev->accel_working = true;
Dave Airliefc30b8e2009-09-18 15:19:37 +10002604 r = r600_startup(rdev);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002605 if (r) {
Jerome Glisse655efd32010-02-02 11:51:45 +01002606 dev_err(rdev->dev, "disabling GPU acceleration\n");
2607 r600_cp_fini(rdev);
Jerome Glisse655efd32010-02-02 11:51:45 +01002608 r600_irq_fini(rdev);
Alex Deucher724c80e2010-08-27 18:25:25 -04002609 radeon_wb_fini(rdev);
Jerome Glisse655efd32010-02-02 11:51:45 +01002610 radeon_irq_kms_fini(rdev);
Jerome Glisse75c81292009-10-01 18:02:14 +02002611 r600_pcie_gart_fini(rdev);
Jerome Glisse733289c2009-09-16 15:24:21 +02002612 rdev->accel_working = false;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002613 }
Jerome Glisse733289c2009-09-16 15:24:21 +02002614 if (rdev->accel_working) {
2615 r = radeon_ib_pool_init(rdev);
2616 if (r) {
Jerome Glissedb963802010-01-17 21:21:56 +01002617 dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
Jerome Glisse733289c2009-09-16 15:24:21 +02002618 rdev->accel_working = false;
Jerome Glissedb963802010-01-17 21:21:56 +01002619 } else {
2620 r = r600_ib_test(rdev);
2621 if (r) {
2622 dev_err(rdev->dev, "IB test failed (%d).\n", r);
2623 rdev->accel_working = false;
2624 }
Jerome Glisse733289c2009-09-16 15:24:21 +02002625 }
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002626 }
Christian Koenigdafc3bd2009-10-11 23:49:13 +02002627
2628 r = r600_audio_init(rdev);
2629 if (r)
2630 return r; /* TODO error handling */
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002631 return 0;
2632}
2633
2634void r600_fini(struct radeon_device *rdev)
2635{
Christian Koenigdafc3bd2009-10-11 23:49:13 +02002636 r600_audio_fini(rdev);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002637 r600_blit_fini(rdev);
Jerome Glisse655efd32010-02-02 11:51:45 +01002638 r600_cp_fini(rdev);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002639 r600_irq_fini(rdev);
Alex Deucher724c80e2010-08-27 18:25:25 -04002640 radeon_wb_fini(rdev);
Jerome Glisseccd68952011-07-06 18:30:09 +00002641 radeon_ib_pool_fini(rdev);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002642 radeon_irq_kms_fini(rdev);
Jerome Glisse4aac0472009-09-14 18:29:49 +02002643 r600_pcie_gart_fini(rdev);
Jerome Glisse655efd32010-02-02 11:51:45 +01002644 radeon_agp_fini(rdev);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002645 radeon_gem_fini(rdev);
2646 radeon_fence_driver_fini(rdev);
Jerome Glisse4c788672009-11-20 14:29:23 +01002647 radeon_bo_fini(rdev);
Jerome Glissee7d40b92009-10-01 18:02:15 +02002648 radeon_atombios_fini(rdev);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002649 kfree(rdev->bios);
2650 rdev->bios = NULL;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002651}
2652
2653
2654/*
2655 * CS stuff
2656 */
2657void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
2658{
2659 /* FIXME: implement */
2660 radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
Cédric Cano4eace7f2011-02-11 19:45:38 -05002661 radeon_ring_write(rdev,
2662#ifdef __BIG_ENDIAN
2663 (2 << 0) |
2664#endif
2665 (ib->gpu_addr & 0xFFFFFFFC));
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002666 radeon_ring_write(rdev, upper_32_bits(ib->gpu_addr) & 0xFF);
2667 radeon_ring_write(rdev, ib->length_dw);
2668}
2669
2670int r600_ib_test(struct radeon_device *rdev)
2671{
2672 struct radeon_ib *ib;
2673 uint32_t scratch;
2674 uint32_t tmp = 0;
2675 unsigned i;
2676 int r;
2677
2678 r = radeon_scratch_get(rdev, &scratch);
2679 if (r) {
2680 DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r);
2681 return r;
2682 }
2683 WREG32(scratch, 0xCAFEDEAD);
2684 r = radeon_ib_get(rdev, &ib);
2685 if (r) {
2686 DRM_ERROR("radeon: failed to get ib (%d).\n", r);
2687 return r;
2688 }
2689 ib->ptr[0] = PACKET3(PACKET3_SET_CONFIG_REG, 1);
2690 ib->ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
2691 ib->ptr[2] = 0xDEADBEEF;
2692 ib->ptr[3] = PACKET2(0);
2693 ib->ptr[4] = PACKET2(0);
2694 ib->ptr[5] = PACKET2(0);
2695 ib->ptr[6] = PACKET2(0);
2696 ib->ptr[7] = PACKET2(0);
2697 ib->ptr[8] = PACKET2(0);
2698 ib->ptr[9] = PACKET2(0);
2699 ib->ptr[10] = PACKET2(0);
2700 ib->ptr[11] = PACKET2(0);
2701 ib->ptr[12] = PACKET2(0);
2702 ib->ptr[13] = PACKET2(0);
2703 ib->ptr[14] = PACKET2(0);
2704 ib->ptr[15] = PACKET2(0);
2705 ib->length_dw = 16;
2706 r = radeon_ib_schedule(rdev, ib);
2707 if (r) {
2708 radeon_scratch_free(rdev, scratch);
2709 radeon_ib_free(rdev, &ib);
2710 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
2711 return r;
2712 }
2713 r = radeon_fence_wait(ib->fence, false);
2714 if (r) {
2715 DRM_ERROR("radeon: fence wait failed (%d).\n", r);
2716 return r;
2717 }
2718 for (i = 0; i < rdev->usec_timeout; i++) {
2719 tmp = RREG32(scratch);
2720 if (tmp == 0xDEADBEEF)
2721 break;
2722 DRM_UDELAY(1);
2723 }
2724 if (i < rdev->usec_timeout) {
2725 DRM_INFO("ib test succeeded in %u usecs\n", i);
2726 } else {
Daniel J Blueman4417d7f2010-09-22 17:57:19 +01002727 DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n",
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002728 scratch, tmp);
2729 r = -EINVAL;
2730 }
2731 radeon_scratch_free(rdev, scratch);
2732 radeon_ib_free(rdev, &ib);
2733 return r;
2734}
2735
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002736/*
2737 * Interrupts
2738 *
2739 * Interrupts use a ring buffer on r6xx/r7xx hardware. It works pretty
2740 * the same as the CP ring buffer, but in reverse. Rather than the CPU
2741 * writing to the ring and the GPU consuming, the GPU writes to the ring
2742 * and host consumes. As the host irq handler processes interrupts, it
2743 * increments the rptr. When the rptr catches up with the wptr, all the
2744 * current interrupts have been processed.
2745 */
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002746
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002747void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size)
2748{
2749 u32 rb_bufsz;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002750
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002751 /* Align ring size */
2752 rb_bufsz = drm_order(ring_size / 4);
2753 ring_size = (1 << rb_bufsz) * 4;
2754 rdev->ih.ring_size = ring_size;
Jerome Glisse0c452492010-01-15 14:44:37 +01002755 rdev->ih.ptr_mask = rdev->ih.ring_size - 1;
2756 rdev->ih.rptr = 0;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002757}
2758
Jerome Glisse0c452492010-01-15 14:44:37 +01002759static int r600_ih_ring_alloc(struct radeon_device *rdev)
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002760{
2761 int r;
2762
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002763 /* Allocate ring buffer */
2764 if (rdev->ih.ring_obj == NULL) {
Daniel Vetter441921d2011-02-18 17:59:16 +01002765 r = radeon_bo_create(rdev, rdev->ih.ring_size,
Alex Deucher268b2512010-11-17 19:00:26 -05002766 PAGE_SIZE, true,
Jerome Glisse4c788672009-11-20 14:29:23 +01002767 RADEON_GEM_DOMAIN_GTT,
2768 &rdev->ih.ring_obj);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002769 if (r) {
2770 DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r);
2771 return r;
2772 }
Jerome Glisse4c788672009-11-20 14:29:23 +01002773 r = radeon_bo_reserve(rdev->ih.ring_obj, false);
2774 if (unlikely(r != 0))
2775 return r;
2776 r = radeon_bo_pin(rdev->ih.ring_obj,
2777 RADEON_GEM_DOMAIN_GTT,
2778 &rdev->ih.gpu_addr);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002779 if (r) {
Jerome Glisse4c788672009-11-20 14:29:23 +01002780 radeon_bo_unreserve(rdev->ih.ring_obj);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002781 DRM_ERROR("radeon: failed to pin ih ring buffer (%d).\n", r);
2782 return r;
2783 }
Jerome Glisse4c788672009-11-20 14:29:23 +01002784 r = radeon_bo_kmap(rdev->ih.ring_obj,
2785 (void **)&rdev->ih.ring);
2786 radeon_bo_unreserve(rdev->ih.ring_obj);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002787 if (r) {
2788 DRM_ERROR("radeon: failed to map ih ring buffer (%d).\n", r);
2789 return r;
2790 }
2791 }
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002792 return 0;
2793}
2794
2795static void r600_ih_ring_fini(struct radeon_device *rdev)
2796{
Jerome Glisse4c788672009-11-20 14:29:23 +01002797 int r;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002798 if (rdev->ih.ring_obj) {
Jerome Glisse4c788672009-11-20 14:29:23 +01002799 r = radeon_bo_reserve(rdev->ih.ring_obj, false);
2800 if (likely(r == 0)) {
2801 radeon_bo_kunmap(rdev->ih.ring_obj);
2802 radeon_bo_unpin(rdev->ih.ring_obj);
2803 radeon_bo_unreserve(rdev->ih.ring_obj);
2804 }
2805 radeon_bo_unref(&rdev->ih.ring_obj);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002806 rdev->ih.ring = NULL;
2807 rdev->ih.ring_obj = NULL;
2808 }
2809}
2810
Alex Deucher45f9a392010-03-24 13:55:51 -04002811void r600_rlc_stop(struct radeon_device *rdev)
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002812{
2813
Alex Deucher45f9a392010-03-24 13:55:51 -04002814 if ((rdev->family >= CHIP_RV770) &&
2815 (rdev->family <= CHIP_RV740)) {
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002816 /* r7xx asics need to soft reset RLC before halting */
2817 WREG32(SRBM_SOFT_RESET, SOFT_RESET_RLC);
2818 RREG32(SRBM_SOFT_RESET);
2819 udelay(15000);
2820 WREG32(SRBM_SOFT_RESET, 0);
2821 RREG32(SRBM_SOFT_RESET);
2822 }
2823
2824 WREG32(RLC_CNTL, 0);
2825}
2826
2827static void r600_rlc_start(struct radeon_device *rdev)
2828{
2829 WREG32(RLC_CNTL, RLC_ENABLE);
2830}
2831
2832static int r600_rlc_init(struct radeon_device *rdev)
2833{
2834 u32 i;
2835 const __be32 *fw_data;
2836
2837 if (!rdev->rlc_fw)
2838 return -EINVAL;
2839
2840 r600_rlc_stop(rdev);
2841
2842 WREG32(RLC_HB_BASE, 0);
2843 WREG32(RLC_HB_CNTL, 0);
2844 WREG32(RLC_HB_RPTR, 0);
2845 WREG32(RLC_HB_WPTR, 0);
Alex Deucher12727802011-03-02 20:07:32 -05002846 if (rdev->family <= CHIP_CAICOS) {
2847 WREG32(RLC_HB_WPTR_LSB_ADDR, 0);
2848 WREG32(RLC_HB_WPTR_MSB_ADDR, 0);
2849 }
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002850 WREG32(RLC_MC_CNTL, 0);
2851 WREG32(RLC_UCODE_CNTL, 0);
2852
2853 fw_data = (const __be32 *)rdev->rlc_fw->data;
Alex Deucher12727802011-03-02 20:07:32 -05002854 if (rdev->family >= CHIP_CAYMAN) {
2855 for (i = 0; i < CAYMAN_RLC_UCODE_SIZE; i++) {
2856 WREG32(RLC_UCODE_ADDR, i);
2857 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
2858 }
2859 } else if (rdev->family >= CHIP_CEDAR) {
Alex Deucher45f9a392010-03-24 13:55:51 -04002860 for (i = 0; i < EVERGREEN_RLC_UCODE_SIZE; i++) {
2861 WREG32(RLC_UCODE_ADDR, i);
2862 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
2863 }
2864 } else if (rdev->family >= CHIP_RV770) {
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002865 for (i = 0; i < R700_RLC_UCODE_SIZE; i++) {
2866 WREG32(RLC_UCODE_ADDR, i);
2867 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
2868 }
2869 } else {
2870 for (i = 0; i < RLC_UCODE_SIZE; i++) {
2871 WREG32(RLC_UCODE_ADDR, i);
2872 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
2873 }
2874 }
2875 WREG32(RLC_UCODE_ADDR, 0);
2876
2877 r600_rlc_start(rdev);
2878
2879 return 0;
2880}
2881
2882static void r600_enable_interrupts(struct radeon_device *rdev)
2883{
2884 u32 ih_cntl = RREG32(IH_CNTL);
2885 u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
2886
2887 ih_cntl |= ENABLE_INTR;
2888 ih_rb_cntl |= IH_RB_ENABLE;
2889 WREG32(IH_CNTL, ih_cntl);
2890 WREG32(IH_RB_CNTL, ih_rb_cntl);
2891 rdev->ih.enabled = true;
2892}
2893
Alex Deucher45f9a392010-03-24 13:55:51 -04002894void r600_disable_interrupts(struct radeon_device *rdev)
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002895{
2896 u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
2897 u32 ih_cntl = RREG32(IH_CNTL);
2898
2899 ih_rb_cntl &= ~IH_RB_ENABLE;
2900 ih_cntl &= ~ENABLE_INTR;
2901 WREG32(IH_RB_CNTL, ih_rb_cntl);
2902 WREG32(IH_CNTL, ih_cntl);
2903 /* set rptr, wptr to 0 */
2904 WREG32(IH_RB_RPTR, 0);
2905 WREG32(IH_RB_WPTR, 0);
2906 rdev->ih.enabled = false;
2907 rdev->ih.wptr = 0;
2908 rdev->ih.rptr = 0;
2909}
2910
Alex Deuchere0df1ac2009-12-04 15:12:21 -05002911static void r600_disable_interrupt_state(struct radeon_device *rdev)
2912{
2913 u32 tmp;
2914
Alex Deucher3555e532010-10-08 12:09:12 -04002915 WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
Alex Deuchere0df1ac2009-12-04 15:12:21 -05002916 WREG32(GRBM_INT_CNTL, 0);
2917 WREG32(DxMODE_INT_MASK, 0);
Alex Deucher6f34be52010-11-21 10:59:01 -05002918 WREG32(D1GRPH_INTERRUPT_CONTROL, 0);
2919 WREG32(D2GRPH_INTERRUPT_CONTROL, 0);
Alex Deuchere0df1ac2009-12-04 15:12:21 -05002920 if (ASIC_IS_DCE3(rdev)) {
2921 WREG32(DCE3_DACA_AUTODETECT_INT_CONTROL, 0);
2922 WREG32(DCE3_DACB_AUTODETECT_INT_CONTROL, 0);
2923 tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2924 WREG32(DC_HPD1_INT_CONTROL, tmp);
2925 tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2926 WREG32(DC_HPD2_INT_CONTROL, tmp);
2927 tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2928 WREG32(DC_HPD3_INT_CONTROL, tmp);
2929 tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2930 WREG32(DC_HPD4_INT_CONTROL, tmp);
2931 if (ASIC_IS_DCE32(rdev)) {
2932 tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
Alex Deucher5898b1f2010-03-24 13:57:29 -04002933 WREG32(DC_HPD5_INT_CONTROL, tmp);
Alex Deuchere0df1ac2009-12-04 15:12:21 -05002934 tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
Alex Deucher5898b1f2010-03-24 13:57:29 -04002935 WREG32(DC_HPD6_INT_CONTROL, tmp);
Alex Deuchere0df1ac2009-12-04 15:12:21 -05002936 }
2937 } else {
2938 WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
2939 WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
2940 tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
Alex Deucher5898b1f2010-03-24 13:57:29 -04002941 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
Alex Deuchere0df1ac2009-12-04 15:12:21 -05002942 tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
Alex Deucher5898b1f2010-03-24 13:57:29 -04002943 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
Alex Deuchere0df1ac2009-12-04 15:12:21 -05002944 tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
Alex Deucher5898b1f2010-03-24 13:57:29 -04002945 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
Alex Deuchere0df1ac2009-12-04 15:12:21 -05002946 }
2947}
2948
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002949int r600_irq_init(struct radeon_device *rdev)
2950{
2951 int ret = 0;
2952 int rb_bufsz;
2953 u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
2954
2955 /* allocate ring */
Jerome Glisse0c452492010-01-15 14:44:37 +01002956 ret = r600_ih_ring_alloc(rdev);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002957 if (ret)
2958 return ret;
2959
2960 /* disable irqs */
2961 r600_disable_interrupts(rdev);
2962
2963 /* init rlc */
2964 ret = r600_rlc_init(rdev);
2965 if (ret) {
2966 r600_ih_ring_fini(rdev);
2967 return ret;
2968 }
2969
2970 /* setup interrupt control */
2971 /* set dummy read address to ring address */
2972 WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8);
2973 interrupt_cntl = RREG32(INTERRUPT_CNTL);
2974 /* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
2975 * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
2976 */
2977 interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE;
2978 /* IH_REQ_NONSNOOP_EN=1 if ring is in non-cacheable memory, e.g., vram */
2979 interrupt_cntl &= ~IH_REQ_NONSNOOP_EN;
2980 WREG32(INTERRUPT_CNTL, interrupt_cntl);
2981
2982 WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
2983 rb_bufsz = drm_order(rdev->ih.ring_size / 4);
2984
2985 ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
2986 IH_WPTR_OVERFLOW_CLEAR |
2987 (rb_bufsz << 1));
Alex Deucher724c80e2010-08-27 18:25:25 -04002988
2989 if (rdev->wb.enabled)
2990 ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE;
2991
2992 /* set the writeback address whether it's enabled or not */
2993 WREG32(IH_RB_WPTR_ADDR_LO, (rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFFFFFFFC);
2994 WREG32(IH_RB_WPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFF);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002995
2996 WREG32(IH_RB_CNTL, ih_rb_cntl);
2997
2998 /* set rptr, wptr to 0 */
2999 WREG32(IH_RB_RPTR, 0);
3000 WREG32(IH_RB_WPTR, 0);
3001
3002 /* Default settings for IH_CNTL (disabled at first) */
3003 ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10);
3004 /* RPTR_REARM only works if msi's are enabled */
3005 if (rdev->msi_enabled)
3006 ih_cntl |= RPTR_REARM;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003007 WREG32(IH_CNTL, ih_cntl);
3008
3009 /* force the active interrupt state to all disabled */
Alex Deucher45f9a392010-03-24 13:55:51 -04003010 if (rdev->family >= CHIP_CEDAR)
3011 evergreen_disable_interrupt_state(rdev);
3012 else
3013 r600_disable_interrupt_state(rdev);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003014
3015 /* enable irqs */
3016 r600_enable_interrupts(rdev);
3017
3018 return ret;
3019}
3020
Jerome Glisse0c452492010-01-15 14:44:37 +01003021void r600_irq_suspend(struct radeon_device *rdev)
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003022{
Alex Deucher45f9a392010-03-24 13:55:51 -04003023 r600_irq_disable(rdev);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003024 r600_rlc_stop(rdev);
Jerome Glisse0c452492010-01-15 14:44:37 +01003025}
3026
3027void r600_irq_fini(struct radeon_device *rdev)
3028{
3029 r600_irq_suspend(rdev);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003030 r600_ih_ring_fini(rdev);
3031}
3032
3033int r600_irq_set(struct radeon_device *rdev)
3034{
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003035 u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
3036 u32 mode_int = 0;
3037 u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0;
Alex Deucher2031f772010-04-22 12:52:11 -04003038 u32 grbm_int_cntl = 0;
Christian Koenigf2594932010-04-10 03:13:16 +02003039 u32 hdmi1, hdmi2;
Alex Deucher6f34be52010-11-21 10:59:01 -05003040 u32 d1grph = 0, d2grph = 0;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003041
Jerome Glisse003e69f2010-01-07 15:39:14 +01003042 if (!rdev->irq.installed) {
Joe Perchesfce7d612010-10-30 21:08:30 +00003043 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
Jerome Glisse003e69f2010-01-07 15:39:14 +01003044 return -EINVAL;
3045 }
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003046 /* don't enable anything if the ih is disabled */
Jerome Glisse79c2bbc2010-01-15 14:44:38 +01003047 if (!rdev->ih.enabled) {
3048 r600_disable_interrupts(rdev);
3049 /* force the active interrupt state to all disabled */
3050 r600_disable_interrupt_state(rdev);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003051 return 0;
Jerome Glisse79c2bbc2010-01-15 14:44:38 +01003052 }
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003053
Christian Koenigf2594932010-04-10 03:13:16 +02003054 hdmi1 = RREG32(R600_HDMI_BLOCK1 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN;
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003055 if (ASIC_IS_DCE3(rdev)) {
Christian Koenigf2594932010-04-10 03:13:16 +02003056 hdmi2 = RREG32(R600_HDMI_BLOCK3 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN;
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003057 hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
3058 hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
3059 hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
3060 hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
3061 if (ASIC_IS_DCE32(rdev)) {
3062 hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
3063 hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
3064 }
3065 } else {
Christian Koenigf2594932010-04-10 03:13:16 +02003066 hdmi2 = RREG32(R600_HDMI_BLOCK2 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN;
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003067 hpd1 = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & ~DC_HPDx_INT_EN;
3068 hpd2 = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & ~DC_HPDx_INT_EN;
3069 hpd3 = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & ~DC_HPDx_INT_EN;
3070 }
3071
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003072 if (rdev->irq.sw_int) {
3073 DRM_DEBUG("r600_irq_set: sw int\n");
3074 cp_int_cntl |= RB_INT_ENABLE;
Alex Deucherd0f8a852010-09-04 05:04:34 -04003075 cp_int_cntl |= TIME_STAMP_INT_ENABLE;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003076 }
Alex Deucher6f34be52010-11-21 10:59:01 -05003077 if (rdev->irq.crtc_vblank_int[0] ||
3078 rdev->irq.pflip[0]) {
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003079 DRM_DEBUG("r600_irq_set: vblank 0\n");
3080 mode_int |= D1MODE_VBLANK_INT_MASK;
3081 }
Alex Deucher6f34be52010-11-21 10:59:01 -05003082 if (rdev->irq.crtc_vblank_int[1] ||
3083 rdev->irq.pflip[1]) {
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003084 DRM_DEBUG("r600_irq_set: vblank 1\n");
3085 mode_int |= D2MODE_VBLANK_INT_MASK;
3086 }
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003087 if (rdev->irq.hpd[0]) {
3088 DRM_DEBUG("r600_irq_set: hpd 1\n");
3089 hpd1 |= DC_HPDx_INT_EN;
3090 }
3091 if (rdev->irq.hpd[1]) {
3092 DRM_DEBUG("r600_irq_set: hpd 2\n");
3093 hpd2 |= DC_HPDx_INT_EN;
3094 }
3095 if (rdev->irq.hpd[2]) {
3096 DRM_DEBUG("r600_irq_set: hpd 3\n");
3097 hpd3 |= DC_HPDx_INT_EN;
3098 }
3099 if (rdev->irq.hpd[3]) {
3100 DRM_DEBUG("r600_irq_set: hpd 4\n");
3101 hpd4 |= DC_HPDx_INT_EN;
3102 }
3103 if (rdev->irq.hpd[4]) {
3104 DRM_DEBUG("r600_irq_set: hpd 5\n");
3105 hpd5 |= DC_HPDx_INT_EN;
3106 }
3107 if (rdev->irq.hpd[5]) {
3108 DRM_DEBUG("r600_irq_set: hpd 6\n");
3109 hpd6 |= DC_HPDx_INT_EN;
3110 }
Christian Koenigf2594932010-04-10 03:13:16 +02003111 if (rdev->irq.hdmi[0]) {
3112 DRM_DEBUG("r600_irq_set: hdmi 1\n");
3113 hdmi1 |= R600_HDMI_INT_EN;
3114 }
3115 if (rdev->irq.hdmi[1]) {
3116 DRM_DEBUG("r600_irq_set: hdmi 2\n");
3117 hdmi2 |= R600_HDMI_INT_EN;
3118 }
Alex Deucher2031f772010-04-22 12:52:11 -04003119 if (rdev->irq.gui_idle) {
3120 DRM_DEBUG("gui idle\n");
3121 grbm_int_cntl |= GUI_IDLE_INT_ENABLE;
3122 }
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003123
3124 WREG32(CP_INT_CNTL, cp_int_cntl);
3125 WREG32(DxMODE_INT_MASK, mode_int);
Alex Deucher6f34be52010-11-21 10:59:01 -05003126 WREG32(D1GRPH_INTERRUPT_CONTROL, d1grph);
3127 WREG32(D2GRPH_INTERRUPT_CONTROL, d2grph);
Alex Deucher2031f772010-04-22 12:52:11 -04003128 WREG32(GRBM_INT_CNTL, grbm_int_cntl);
Christian Koenigf2594932010-04-10 03:13:16 +02003129 WREG32(R600_HDMI_BLOCK1 + R600_HDMI_CNTL, hdmi1);
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003130 if (ASIC_IS_DCE3(rdev)) {
Christian Koenigf2594932010-04-10 03:13:16 +02003131 WREG32(R600_HDMI_BLOCK3 + R600_HDMI_CNTL, hdmi2);
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003132 WREG32(DC_HPD1_INT_CONTROL, hpd1);
3133 WREG32(DC_HPD2_INT_CONTROL, hpd2);
3134 WREG32(DC_HPD3_INT_CONTROL, hpd3);
3135 WREG32(DC_HPD4_INT_CONTROL, hpd4);
3136 if (ASIC_IS_DCE32(rdev)) {
3137 WREG32(DC_HPD5_INT_CONTROL, hpd5);
3138 WREG32(DC_HPD6_INT_CONTROL, hpd6);
3139 }
3140 } else {
Christian Koenigf2594932010-04-10 03:13:16 +02003141 WREG32(R600_HDMI_BLOCK2 + R600_HDMI_CNTL, hdmi2);
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003142 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1);
3143 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
3144 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, hpd3);
3145 }
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003146
3147 return 0;
3148}
3149
Andi Kleence580fa2011-10-13 16:08:47 -07003150static void r600_irq_ack(struct radeon_device *rdev)
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003151{
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003152 u32 tmp;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003153
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003154 if (ASIC_IS_DCE3(rdev)) {
Alex Deucher6f34be52010-11-21 10:59:01 -05003155 rdev->irq.stat_regs.r600.disp_int = RREG32(DCE3_DISP_INTERRUPT_STATUS);
3156 rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE);
3157 rdev->irq.stat_regs.r600.disp_int_cont2 = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE2);
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003158 } else {
Alex Deucher6f34be52010-11-21 10:59:01 -05003159 rdev->irq.stat_regs.r600.disp_int = RREG32(DISP_INTERRUPT_STATUS);
3160 rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
3161 rdev->irq.stat_regs.r600.disp_int_cont2 = 0;
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003162 }
Alex Deucher6f34be52010-11-21 10:59:01 -05003163 rdev->irq.stat_regs.r600.d1grph_int = RREG32(D1GRPH_INTERRUPT_STATUS);
3164 rdev->irq.stat_regs.r600.d2grph_int = RREG32(D2GRPH_INTERRUPT_STATUS);
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003165
Alex Deucher6f34be52010-11-21 10:59:01 -05003166 if (rdev->irq.stat_regs.r600.d1grph_int & DxGRPH_PFLIP_INT_OCCURRED)
3167 WREG32(D1GRPH_INTERRUPT_STATUS, DxGRPH_PFLIP_INT_CLEAR);
3168 if (rdev->irq.stat_regs.r600.d2grph_int & DxGRPH_PFLIP_INT_OCCURRED)
3169 WREG32(D2GRPH_INTERRUPT_STATUS, DxGRPH_PFLIP_INT_CLEAR);
3170 if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT)
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003171 WREG32(D1MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
Alex Deucher6f34be52010-11-21 10:59:01 -05003172 if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT)
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003173 WREG32(D1MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
Alex Deucher6f34be52010-11-21 10:59:01 -05003174 if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT)
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003175 WREG32(D2MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
Alex Deucher6f34be52010-11-21 10:59:01 -05003176 if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT)
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003177 WREG32(D2MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
Alex Deucher6f34be52010-11-21 10:59:01 -05003178 if (rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT) {
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003179 if (ASIC_IS_DCE3(rdev)) {
3180 tmp = RREG32(DC_HPD1_INT_CONTROL);
3181 tmp |= DC_HPDx_INT_ACK;
3182 WREG32(DC_HPD1_INT_CONTROL, tmp);
3183 } else {
3184 tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
3185 tmp |= DC_HPDx_INT_ACK;
3186 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
3187 }
3188 }
Alex Deucher6f34be52010-11-21 10:59:01 -05003189 if (rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT) {
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003190 if (ASIC_IS_DCE3(rdev)) {
3191 tmp = RREG32(DC_HPD2_INT_CONTROL);
3192 tmp |= DC_HPDx_INT_ACK;
3193 WREG32(DC_HPD2_INT_CONTROL, tmp);
3194 } else {
3195 tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
3196 tmp |= DC_HPDx_INT_ACK;
3197 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
3198 }
3199 }
Alex Deucher6f34be52010-11-21 10:59:01 -05003200 if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT) {
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003201 if (ASIC_IS_DCE3(rdev)) {
3202 tmp = RREG32(DC_HPD3_INT_CONTROL);
3203 tmp |= DC_HPDx_INT_ACK;
3204 WREG32(DC_HPD3_INT_CONTROL, tmp);
3205 } else {
3206 tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
3207 tmp |= DC_HPDx_INT_ACK;
3208 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
3209 }
3210 }
Alex Deucher6f34be52010-11-21 10:59:01 -05003211 if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT) {
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003212 tmp = RREG32(DC_HPD4_INT_CONTROL);
3213 tmp |= DC_HPDx_INT_ACK;
3214 WREG32(DC_HPD4_INT_CONTROL, tmp);
3215 }
3216 if (ASIC_IS_DCE32(rdev)) {
Alex Deucher6f34be52010-11-21 10:59:01 -05003217 if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT) {
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003218 tmp = RREG32(DC_HPD5_INT_CONTROL);
3219 tmp |= DC_HPDx_INT_ACK;
3220 WREG32(DC_HPD5_INT_CONTROL, tmp);
3221 }
Alex Deucher6f34be52010-11-21 10:59:01 -05003222 if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) {
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003223 tmp = RREG32(DC_HPD5_INT_CONTROL);
3224 tmp |= DC_HPDx_INT_ACK;
3225 WREG32(DC_HPD6_INT_CONTROL, tmp);
3226 }
3227 }
Christian Koenigf2594932010-04-10 03:13:16 +02003228 if (RREG32(R600_HDMI_BLOCK1 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) {
3229 WREG32_P(R600_HDMI_BLOCK1 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK);
3230 }
3231 if (ASIC_IS_DCE3(rdev)) {
3232 if (RREG32(R600_HDMI_BLOCK3 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) {
3233 WREG32_P(R600_HDMI_BLOCK3 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK);
3234 }
3235 } else {
3236 if (RREG32(R600_HDMI_BLOCK2 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) {
3237 WREG32_P(R600_HDMI_BLOCK2 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK);
3238 }
3239 }
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003240}
3241
3242void r600_irq_disable(struct radeon_device *rdev)
3243{
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003244 r600_disable_interrupts(rdev);
3245 /* Wait and acknowledge irq */
3246 mdelay(1);
Alex Deucher6f34be52010-11-21 10:59:01 -05003247 r600_irq_ack(rdev);
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003248 r600_disable_interrupt_state(rdev);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003249}
3250
Andi Kleence580fa2011-10-13 16:08:47 -07003251static u32 r600_get_ih_wptr(struct radeon_device *rdev)
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003252{
3253 u32 wptr, tmp;
3254
Alex Deucher724c80e2010-08-27 18:25:25 -04003255 if (rdev->wb.enabled)
Cédric Cano204ae242011-04-19 11:07:13 -04003256 wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]);
Alex Deucher724c80e2010-08-27 18:25:25 -04003257 else
3258 wptr = RREG32(IH_RB_WPTR);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003259
3260 if (wptr & RB_OVERFLOW) {
Jerome Glisse7924e5e2010-01-15 14:44:39 +01003261 /* When a ring buffer overflow happen start parsing interrupt
3262 * from the last not overwritten vector (wptr + 16). Hopefully
3263 * this should allow us to catchup.
3264 */
3265 dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
3266 wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
3267 rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003268 tmp = RREG32(IH_RB_CNTL);
3269 tmp |= IH_WPTR_OVERFLOW_CLEAR;
3270 WREG32(IH_RB_CNTL, tmp);
3271 }
Jerome Glisse0c452492010-01-15 14:44:37 +01003272 return (wptr & rdev->ih.ptr_mask);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003273}
3274
3275/* r600 IV Ring
3276 * Each IV ring entry is 128 bits:
3277 * [7:0] - interrupt source id
3278 * [31:8] - reserved
3279 * [59:32] - interrupt source data
3280 * [127:60] - reserved
3281 *
3282 * The basic interrupt vector entries
3283 * are decoded as follows:
3284 * src_id src_data description
3285 * 1 0 D1 Vblank
3286 * 1 1 D1 Vline
3287 * 5 0 D2 Vblank
3288 * 5 1 D2 Vline
3289 * 19 0 FP Hot plug detection A
3290 * 19 1 FP Hot plug detection B
3291 * 19 2 DAC A auto-detection
3292 * 19 3 DAC B auto-detection
Christian Koenigf2594932010-04-10 03:13:16 +02003293 * 21 4 HDMI block A
3294 * 21 5 HDMI block B
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003295 * 176 - CP_INT RB
3296 * 177 - CP_INT IB1
3297 * 178 - CP_INT IB2
3298 * 181 - EOP Interrupt
3299 * 233 - GUI Idle
3300 *
3301 * Note, these are based on r600 and may need to be
3302 * adjusted or added to on newer asics
3303 */
3304
3305int r600_irq_process(struct radeon_device *rdev)
3306{
Dave Airlie682f1a52011-06-18 03:59:51 +00003307 u32 wptr;
3308 u32 rptr;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003309 u32 src_id, src_data;
Alex Deucher6f34be52010-11-21 10:59:01 -05003310 u32 ring_index;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003311 unsigned long flags;
Alex Deucherd4877cf2009-12-04 16:56:37 -05003312 bool queue_hotplug = false;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003313
Dave Airlie682f1a52011-06-18 03:59:51 +00003314 if (!rdev->ih.enabled || rdev->shutdown)
Jerome Glisse79c2bbc2010-01-15 14:44:38 +01003315 return IRQ_NONE;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003316
Benjamin Herrenschmidtf6a56932011-07-13 06:28:22 +00003317 /* No MSIs, need a dummy read to flush PCI DMAs */
3318 if (!rdev->msi_enabled)
3319 RREG32(IH_RB_WPTR);
3320
Dave Airlie682f1a52011-06-18 03:59:51 +00003321 wptr = r600_get_ih_wptr(rdev);
3322 rptr = rdev->ih.rptr;
3323 DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
3324
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003325 spin_lock_irqsave(&rdev->ih.lock, flags);
3326
3327 if (rptr == wptr) {
3328 spin_unlock_irqrestore(&rdev->ih.lock, flags);
3329 return IRQ_NONE;
3330 }
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003331
3332restart_ih:
Benjamin Herrenschmidt964f6642011-07-13 16:28:19 +10003333 /* Order reading of wptr vs. reading of IH ring data */
3334 rmb();
3335
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003336 /* display interrupts */
Alex Deucher6f34be52010-11-21 10:59:01 -05003337 r600_irq_ack(rdev);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003338
3339 rdev->ih.wptr = wptr;
3340 while (rptr != wptr) {
3341 /* wptr/rptr are in bytes! */
3342 ring_index = rptr / 4;
Cédric Cano4eace7f2011-02-11 19:45:38 -05003343 src_id = le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff;
3344 src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003345
3346 switch (src_id) {
3347 case 1: /* D1 vblank/vline */
3348 switch (src_data) {
3349 case 0: /* D1 vblank */
Alex Deucher6f34be52010-11-21 10:59:01 -05003350 if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT) {
Alex Deucher6f34be52010-11-21 10:59:01 -05003351 if (rdev->irq.crtc_vblank_int[0]) {
3352 drm_handle_vblank(rdev->ddev, 0);
3353 rdev->pm.vblank_sync = true;
3354 wake_up(&rdev->irq.vblank_queue);
3355 }
Mario Kleiner3e4ea742010-11-21 10:59:02 -05003356 if (rdev->irq.pflip[0])
3357 radeon_crtc_handle_flip(rdev, 0);
Alex Deucher6f34be52010-11-21 10:59:01 -05003358 rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003359 DRM_DEBUG("IH: D1 vblank\n");
3360 }
3361 break;
3362 case 1: /* D1 vline */
Alex Deucher6f34be52010-11-21 10:59:01 -05003363 if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT) {
3364 rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VLINE_INTERRUPT;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003365 DRM_DEBUG("IH: D1 vline\n");
3366 }
3367 break;
3368 default:
Alex Deucherb0425892010-01-11 19:47:38 -05003369 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003370 break;
3371 }
3372 break;
3373 case 5: /* D2 vblank/vline */
3374 switch (src_data) {
3375 case 0: /* D2 vblank */
Alex Deucher6f34be52010-11-21 10:59:01 -05003376 if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT) {
Alex Deucher6f34be52010-11-21 10:59:01 -05003377 if (rdev->irq.crtc_vblank_int[1]) {
3378 drm_handle_vblank(rdev->ddev, 1);
3379 rdev->pm.vblank_sync = true;
3380 wake_up(&rdev->irq.vblank_queue);
3381 }
Mario Kleiner3e4ea742010-11-21 10:59:02 -05003382 if (rdev->irq.pflip[1])
3383 radeon_crtc_handle_flip(rdev, 1);
Alex Deucher6f34be52010-11-21 10:59:01 -05003384 rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VBLANK_INTERRUPT;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003385 DRM_DEBUG("IH: D2 vblank\n");
3386 }
3387 break;
3388 case 1: /* D1 vline */
Alex Deucher6f34be52010-11-21 10:59:01 -05003389 if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT) {
3390 rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VLINE_INTERRUPT;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003391 DRM_DEBUG("IH: D2 vline\n");
3392 }
3393 break;
3394 default:
Alex Deucherb0425892010-01-11 19:47:38 -05003395 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003396 break;
3397 }
3398 break;
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003399 case 19: /* HPD/DAC hotplug */
3400 switch (src_data) {
3401 case 0:
Alex Deucher6f34be52010-11-21 10:59:01 -05003402 if (rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT) {
3403 rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD1_INTERRUPT;
Alex Deucherd4877cf2009-12-04 16:56:37 -05003404 queue_hotplug = true;
3405 DRM_DEBUG("IH: HPD1\n");
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003406 }
3407 break;
3408 case 1:
Alex Deucher6f34be52010-11-21 10:59:01 -05003409 if (rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT) {
3410 rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD2_INTERRUPT;
Alex Deucherd4877cf2009-12-04 16:56:37 -05003411 queue_hotplug = true;
3412 DRM_DEBUG("IH: HPD2\n");
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003413 }
3414 break;
3415 case 4:
Alex Deucher6f34be52010-11-21 10:59:01 -05003416 if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT) {
3417 rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD3_INTERRUPT;
Alex Deucherd4877cf2009-12-04 16:56:37 -05003418 queue_hotplug = true;
3419 DRM_DEBUG("IH: HPD3\n");
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003420 }
3421 break;
3422 case 5:
Alex Deucher6f34be52010-11-21 10:59:01 -05003423 if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT) {
3424 rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD4_INTERRUPT;
Alex Deucherd4877cf2009-12-04 16:56:37 -05003425 queue_hotplug = true;
3426 DRM_DEBUG("IH: HPD4\n");
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003427 }
3428 break;
3429 case 10:
Alex Deucher6f34be52010-11-21 10:59:01 -05003430 if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT) {
3431 rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD5_INTERRUPT;
Alex Deucherd4877cf2009-12-04 16:56:37 -05003432 queue_hotplug = true;
3433 DRM_DEBUG("IH: HPD5\n");
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003434 }
3435 break;
3436 case 12:
Alex Deucher6f34be52010-11-21 10:59:01 -05003437 if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) {
3438 rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD6_INTERRUPT;
Alex Deucherd4877cf2009-12-04 16:56:37 -05003439 queue_hotplug = true;
3440 DRM_DEBUG("IH: HPD6\n");
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003441 }
3442 break;
3443 default:
Alex Deucherb0425892010-01-11 19:47:38 -05003444 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003445 break;
3446 }
3447 break;
Christian Koenigf2594932010-04-10 03:13:16 +02003448 case 21: /* HDMI */
3449 DRM_DEBUG("IH: HDMI: 0x%x\n", src_data);
3450 r600_audio_schedule_polling(rdev);
3451 break;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003452 case 176: /* CP_INT in ring buffer */
3453 case 177: /* CP_INT in IB1 */
3454 case 178: /* CP_INT in IB2 */
3455 DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
3456 radeon_fence_process(rdev);
3457 break;
3458 case 181: /* CP EOP event */
3459 DRM_DEBUG("IH: CP EOP\n");
Alex Deucherd0f8a852010-09-04 05:04:34 -04003460 radeon_fence_process(rdev);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003461 break;
Alex Deucher2031f772010-04-22 12:52:11 -04003462 case 233: /* GUI IDLE */
Ilija Hadzic303c8052011-06-07 14:54:48 -04003463 DRM_DEBUG("IH: GUI idle\n");
Alex Deucher2031f772010-04-22 12:52:11 -04003464 rdev->pm.gui_idle = true;
3465 wake_up(&rdev->irq.idle_queue);
3466 break;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003467 default:
Alex Deucherb0425892010-01-11 19:47:38 -05003468 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003469 break;
3470 }
3471
3472 /* wptr/rptr are in bytes! */
Jerome Glisse0c452492010-01-15 14:44:37 +01003473 rptr += 16;
3474 rptr &= rdev->ih.ptr_mask;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003475 }
3476 /* make sure wptr hasn't changed while processing */
3477 wptr = r600_get_ih_wptr(rdev);
3478 if (wptr != rdev->ih.wptr)
3479 goto restart_ih;
Alex Deucherd4877cf2009-12-04 16:56:37 -05003480 if (queue_hotplug)
Tejun Heo32c87fc2011-01-03 14:49:32 +01003481 schedule_work(&rdev->hotplug_work);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003482 rdev->ih.rptr = rptr;
3483 WREG32(IH_RB_RPTR, rdev->ih.rptr);
3484 spin_unlock_irqrestore(&rdev->ih.lock, flags);
3485 return IRQ_HANDLED;
3486}
Jerome Glisse3ce0a232009-09-08 10:10:24 +10003487
3488/*
3489 * Debugfs info
3490 */
3491#if defined(CONFIG_DEBUG_FS)
3492
3493static int r600_debugfs_cp_ring_info(struct seq_file *m, void *data)
3494{
3495 struct drm_info_node *node = (struct drm_info_node *) m->private;
3496 struct drm_device *dev = node->minor->dev;
3497 struct radeon_device *rdev = dev->dev_private;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10003498 unsigned count, i, j;
3499
3500 radeon_ring_free_size(rdev);
Rafał Miłeckid6840762009-11-10 22:26:21 +01003501 count = (rdev->cp.ring_size / 4) - rdev->cp.ring_free_dw;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10003502 seq_printf(m, "CP_STAT 0x%08x\n", RREG32(CP_STAT));
Rafał Miłeckid6840762009-11-10 22:26:21 +01003503 seq_printf(m, "CP_RB_WPTR 0x%08x\n", RREG32(CP_RB_WPTR));
3504 seq_printf(m, "CP_RB_RPTR 0x%08x\n", RREG32(CP_RB_RPTR));
3505 seq_printf(m, "driver's copy of the CP_RB_WPTR 0x%08x\n", rdev->cp.wptr);
3506 seq_printf(m, "driver's copy of the CP_RB_RPTR 0x%08x\n", rdev->cp.rptr);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10003507 seq_printf(m, "%u free dwords in ring\n", rdev->cp.ring_free_dw);
3508 seq_printf(m, "%u dwords in ring\n", count);
Rafał Miłeckid6840762009-11-10 22:26:21 +01003509 i = rdev->cp.rptr;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10003510 for (j = 0; j <= count; j++) {
Jerome Glisse3ce0a232009-09-08 10:10:24 +10003511 seq_printf(m, "r[%04d]=0x%08x\n", i, rdev->cp.ring[i]);
Rafał Miłeckid6840762009-11-10 22:26:21 +01003512 i = (i + 1) & rdev->cp.ptr_mask;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10003513 }
3514 return 0;
3515}
3516
3517static int r600_debugfs_mc_info(struct seq_file *m, void *data)
3518{
3519 struct drm_info_node *node = (struct drm_info_node *) m->private;
3520 struct drm_device *dev = node->minor->dev;
3521 struct radeon_device *rdev = dev->dev_private;
3522
3523 DREG32_SYS(m, rdev, R_000E50_SRBM_STATUS);
3524 DREG32_SYS(m, rdev, VM_L2_STATUS);
3525 return 0;
3526}
3527
3528static struct drm_info_list r600_mc_info_list[] = {
3529 {"r600_mc_info", r600_debugfs_mc_info, 0, NULL},
3530 {"r600_ring_info", r600_debugfs_cp_ring_info, 0, NULL},
3531};
3532#endif
3533
3534int r600_debugfs_mc_info_init(struct radeon_device *rdev)
3535{
3536#if defined(CONFIG_DEBUG_FS)
3537 return radeon_debugfs_add_files(rdev, r600_mc_info_list, ARRAY_SIZE(r600_mc_info_list));
3538#else
3539 return 0;
3540#endif
Jerome Glisse771fe6b2009-06-05 14:42:42 +02003541}
Jerome Glisse062b3892010-02-04 20:36:39 +01003542
3543/**
3544 * r600_ioctl_wait_idle - flush host path cache on wait idle ioctl
3545 * rdev: radeon device structure
3546 * bo: buffer object struct which userspace is waiting for idle
3547 *
3548 * Some R6XX/R7XX doesn't seems to take into account HDP flush performed
3549 * through ring buffer, this leads to corruption in rendering, see
3550 * http://bugzilla.kernel.org/show_bug.cgi?id=15186 to avoid this we
3551 * directly perform HDP flush by writing register through MMIO.
3552 */
3553void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo)
3554{
Alex Deucher812d0462010-07-26 18:51:53 -04003555 /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read
Alex Deucherf3886f82010-12-08 10:05:34 -05003556 * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL.
3557 * This seems to cause problems on some AGP cards. Just use the old
3558 * method for them.
Alex Deucher812d0462010-07-26 18:51:53 -04003559 */
Alex Deuchere4884592010-09-27 10:57:10 -04003560 if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) &&
Alex Deucherf3886f82010-12-08 10:05:34 -05003561 rdev->vram_scratch.ptr && !(rdev->flags & RADEON_IS_AGP)) {
Alex Deucher87cbf8f2010-08-27 13:59:54 -04003562 void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
Alex Deucher812d0462010-07-26 18:51:53 -04003563 u32 tmp;
3564
3565 WREG32(HDP_DEBUG1, 0);
3566 tmp = readl((void __iomem *)ptr);
3567 } else
3568 WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
Jerome Glisse062b3892010-02-04 20:36:39 +01003569}
Alex Deucher3313e3d2011-01-06 18:49:34 -05003570
3571void r600_set_pcie_lanes(struct radeon_device *rdev, int lanes)
3572{
3573 u32 link_width_cntl, mask, target_reg;
3574
3575 if (rdev->flags & RADEON_IS_IGP)
3576 return;
3577
3578 if (!(rdev->flags & RADEON_IS_PCIE))
3579 return;
3580
3581 /* x2 cards have a special sequence */
3582 if (ASIC_IS_X2(rdev))
3583 return;
3584
3585 /* FIXME wait for idle */
3586
3587 switch (lanes) {
3588 case 0:
3589 mask = RADEON_PCIE_LC_LINK_WIDTH_X0;
3590 break;
3591 case 1:
3592 mask = RADEON_PCIE_LC_LINK_WIDTH_X1;
3593 break;
3594 case 2:
3595 mask = RADEON_PCIE_LC_LINK_WIDTH_X2;
3596 break;
3597 case 4:
3598 mask = RADEON_PCIE_LC_LINK_WIDTH_X4;
3599 break;
3600 case 8:
3601 mask = RADEON_PCIE_LC_LINK_WIDTH_X8;
3602 break;
3603 case 12:
3604 mask = RADEON_PCIE_LC_LINK_WIDTH_X12;
3605 break;
3606 case 16:
3607 default:
3608 mask = RADEON_PCIE_LC_LINK_WIDTH_X16;
3609 break;
3610 }
3611
3612 link_width_cntl = RREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
3613
3614 if ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) ==
3615 (mask << RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT))
3616 return;
3617
3618 if (link_width_cntl & R600_PCIE_LC_UPCONFIGURE_DIS)
3619 return;
3620
3621 link_width_cntl &= ~(RADEON_PCIE_LC_LINK_WIDTH_MASK |
3622 RADEON_PCIE_LC_RECONFIG_NOW |
3623 R600_PCIE_LC_RENEGOTIATE_EN |
3624 R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE);
3625 link_width_cntl |= mask;
3626
3627 WREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
3628
3629 /* some northbridges can renegotiate the link rather than requiring
3630 * a complete re-config.
3631 * e.g., AMD 780/790 northbridges (pci ids: 0x5956, 0x5957, 0x5958, etc.)
3632 */
3633 if (link_width_cntl & R600_PCIE_LC_RENEGOTIATION_SUPPORT)
3634 link_width_cntl |= R600_PCIE_LC_RENEGOTIATE_EN | R600_PCIE_LC_UPCONFIGURE_SUPPORT;
3635 else
3636 link_width_cntl |= R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE;
3637
3638 WREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL, (link_width_cntl |
3639 RADEON_PCIE_LC_RECONFIG_NOW));
3640
3641 if (rdev->family >= CHIP_RV770)
3642 target_reg = R700_TARGET_AND_CURRENT_PROFILE_INDEX;
3643 else
3644 target_reg = R600_TARGET_AND_CURRENT_PROFILE_INDEX;
3645
3646 /* wait for lane set to complete */
3647 link_width_cntl = RREG32(target_reg);
3648 while (link_width_cntl == 0xffffffff)
3649 link_width_cntl = RREG32(target_reg);
3650
3651}
3652
3653int r600_get_pcie_lanes(struct radeon_device *rdev)
3654{
3655 u32 link_width_cntl;
3656
3657 if (rdev->flags & RADEON_IS_IGP)
3658 return 0;
3659
3660 if (!(rdev->flags & RADEON_IS_PCIE))
3661 return 0;
3662
3663 /* x2 cards have a special sequence */
3664 if (ASIC_IS_X2(rdev))
3665 return 0;
3666
3667 /* FIXME wait for idle */
3668
3669 link_width_cntl = RREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
3670
3671 switch ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT) {
3672 case RADEON_PCIE_LC_LINK_WIDTH_X0:
3673 return 0;
3674 case RADEON_PCIE_LC_LINK_WIDTH_X1:
3675 return 1;
3676 case RADEON_PCIE_LC_LINK_WIDTH_X2:
3677 return 2;
3678 case RADEON_PCIE_LC_LINK_WIDTH_X4:
3679 return 4;
3680 case RADEON_PCIE_LC_LINK_WIDTH_X8:
3681 return 8;
3682 case RADEON_PCIE_LC_LINK_WIDTH_X16:
3683 default:
3684 return 16;
3685 }
3686}
3687
Alex Deucher9e46a482011-01-06 18:49:35 -05003688static void r600_pcie_gen2_enable(struct radeon_device *rdev)
3689{
3690 u32 link_width_cntl, lanes, speed_cntl, training_cntl, tmp;
3691 u16 link_cntl2;
3692
Alex Deucherd42dd572011-01-12 20:05:11 -05003693 if (radeon_pcie_gen2 == 0)
3694 return;
3695
Alex Deucher9e46a482011-01-06 18:49:35 -05003696 if (rdev->flags & RADEON_IS_IGP)
3697 return;
3698
3699 if (!(rdev->flags & RADEON_IS_PCIE))
3700 return;
3701
3702 /* x2 cards have a special sequence */
3703 if (ASIC_IS_X2(rdev))
3704 return;
3705
3706 /* only RV6xx+ chips are supported */
3707 if (rdev->family <= CHIP_R600)
3708 return;
3709
3710 /* 55 nm r6xx asics */
3711 if ((rdev->family == CHIP_RV670) ||
3712 (rdev->family == CHIP_RV620) ||
3713 (rdev->family == CHIP_RV635)) {
3714 /* advertise upconfig capability */
3715 link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
3716 link_width_cntl &= ~LC_UPCONFIGURE_DIS;
3717 WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
3718 link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
3719 if (link_width_cntl & LC_RENEGOTIATION_SUPPORT) {
3720 lanes = (link_width_cntl & LC_LINK_WIDTH_RD_MASK) >> LC_LINK_WIDTH_RD_SHIFT;
3721 link_width_cntl &= ~(LC_LINK_WIDTH_MASK |
3722 LC_RECONFIG_ARC_MISSING_ESCAPE);
3723 link_width_cntl |= lanes | LC_RECONFIG_NOW | LC_RENEGOTIATE_EN;
3724 WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
3725 } else {
3726 link_width_cntl |= LC_UPCONFIGURE_DIS;
3727 WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
3728 }
3729 }
3730
3731 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
3732 if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) &&
3733 (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
3734
3735 /* 55 nm r6xx asics */
3736 if ((rdev->family == CHIP_RV670) ||
3737 (rdev->family == CHIP_RV620) ||
3738 (rdev->family == CHIP_RV635)) {
3739 WREG32(MM_CFGREGS_CNTL, 0x8);
3740 link_cntl2 = RREG32(0x4088);
3741 WREG32(MM_CFGREGS_CNTL, 0);
3742 /* not supported yet */
3743 if (link_cntl2 & SELECTABLE_DEEMPHASIS)
3744 return;
3745 }
3746
3747 speed_cntl &= ~LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK;
3748 speed_cntl |= (0x3 << LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT);
3749 speed_cntl &= ~LC_VOLTAGE_TIMER_SEL_MASK;
3750 speed_cntl &= ~LC_FORCE_DIS_HW_SPEED_CHANGE;
3751 speed_cntl |= LC_FORCE_EN_HW_SPEED_CHANGE;
3752 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
3753
3754 tmp = RREG32(0x541c);
3755 WREG32(0x541c, tmp | 0x8);
3756 WREG32(MM_CFGREGS_CNTL, MM_WR_TO_CFG_EN);
3757 link_cntl2 = RREG16(0x4088);
3758 link_cntl2 &= ~TARGET_LINK_SPEED_MASK;
3759 link_cntl2 |= 0x2;
3760 WREG16(0x4088, link_cntl2);
3761 WREG32(MM_CFGREGS_CNTL, 0);
3762
3763 if ((rdev->family == CHIP_RV670) ||
3764 (rdev->family == CHIP_RV620) ||
3765 (rdev->family == CHIP_RV635)) {
3766 training_cntl = RREG32_PCIE_P(PCIE_LC_TRAINING_CNTL);
3767 training_cntl &= ~LC_POINT_7_PLUS_EN;
3768 WREG32_PCIE_P(PCIE_LC_TRAINING_CNTL, training_cntl);
3769 } else {
3770 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
3771 speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN;
3772 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
3773 }
3774
3775 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
3776 speed_cntl |= LC_GEN2_EN_STRAP;
3777 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
3778
3779 } else {
3780 link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
3781 /* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */
3782 if (1)
3783 link_width_cntl |= LC_UPCONFIGURE_DIS;
3784 else
3785 link_width_cntl &= ~LC_UPCONFIGURE_DIS;
3786 WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
3787 }
3788}