blob: 4d7a2e1bdb90e498138eea542e22884c9e36eb0d [file] [log] [blame]
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090028#include <linux/slab.h>
Jerome Glisse3ce0a232009-09-08 10:10:24 +100029#include <linux/seq_file.h>
30#include <linux/firmware.h>
31#include <linux/platform_device.h>
Jerome Glisse771fe6b2009-06-05 14:42:42 +020032#include "drmP.h"
Jerome Glisse3ce0a232009-09-08 10:10:24 +100033#include "radeon_drm.h"
Jerome Glisse771fe6b2009-06-05 14:42:42 +020034#include "radeon.h"
Daniel Vettere6990372010-03-11 21:19:17 +000035#include "radeon_asic.h"
Jerome Glisse3ce0a232009-09-08 10:10:24 +100036#include "radeon_mode.h"
Jerome Glisse3ce0a232009-09-08 10:10:24 +100037#include "r600d.h"
Jerome Glisse3ce0a232009-09-08 10:10:24 +100038#include "atom.h"
Jerome Glissed39c3b82009-09-28 18:34:43 +020039#include "avivod.h"
Jerome Glisse771fe6b2009-06-05 14:42:42 +020040
Jerome Glisse3ce0a232009-09-08 10:10:24 +100041#define PFP_UCODE_SIZE 576
42#define PM4_UCODE_SIZE 1792
Alex Deucherd8f60cf2009-12-01 13:43:46 -050043#define RLC_UCODE_SIZE 768
Jerome Glisse3ce0a232009-09-08 10:10:24 +100044#define R700_PFP_UCODE_SIZE 848
45#define R700_PM4_UCODE_SIZE 1360
Alex Deucherd8f60cf2009-12-01 13:43:46 -050046#define R700_RLC_UCODE_SIZE 1024
Alex Deucherfe251e22010-03-24 13:36:43 -040047#define EVERGREEN_PFP_UCODE_SIZE 1120
48#define EVERGREEN_PM4_UCODE_SIZE 1376
Alex Deucher45f9a392010-03-24 13:55:51 -040049#define EVERGREEN_RLC_UCODE_SIZE 768
Jerome Glisse3ce0a232009-09-08 10:10:24 +100050
51/* Firmware Names */
52MODULE_FIRMWARE("radeon/R600_pfp.bin");
53MODULE_FIRMWARE("radeon/R600_me.bin");
54MODULE_FIRMWARE("radeon/RV610_pfp.bin");
55MODULE_FIRMWARE("radeon/RV610_me.bin");
56MODULE_FIRMWARE("radeon/RV630_pfp.bin");
57MODULE_FIRMWARE("radeon/RV630_me.bin");
58MODULE_FIRMWARE("radeon/RV620_pfp.bin");
59MODULE_FIRMWARE("radeon/RV620_me.bin");
60MODULE_FIRMWARE("radeon/RV635_pfp.bin");
61MODULE_FIRMWARE("radeon/RV635_me.bin");
62MODULE_FIRMWARE("radeon/RV670_pfp.bin");
63MODULE_FIRMWARE("radeon/RV670_me.bin");
64MODULE_FIRMWARE("radeon/RS780_pfp.bin");
65MODULE_FIRMWARE("radeon/RS780_me.bin");
66MODULE_FIRMWARE("radeon/RV770_pfp.bin");
67MODULE_FIRMWARE("radeon/RV770_me.bin");
68MODULE_FIRMWARE("radeon/RV730_pfp.bin");
69MODULE_FIRMWARE("radeon/RV730_me.bin");
70MODULE_FIRMWARE("radeon/RV710_pfp.bin");
71MODULE_FIRMWARE("radeon/RV710_me.bin");
Alex Deucherd8f60cf2009-12-01 13:43:46 -050072MODULE_FIRMWARE("radeon/R600_rlc.bin");
73MODULE_FIRMWARE("radeon/R700_rlc.bin");
Alex Deucherfe251e22010-03-24 13:36:43 -040074MODULE_FIRMWARE("radeon/CEDAR_pfp.bin");
75MODULE_FIRMWARE("radeon/CEDAR_me.bin");
Alex Deucher45f9a392010-03-24 13:55:51 -040076MODULE_FIRMWARE("radeon/CEDAR_rlc.bin");
Alex Deucherfe251e22010-03-24 13:36:43 -040077MODULE_FIRMWARE("radeon/REDWOOD_pfp.bin");
78MODULE_FIRMWARE("radeon/REDWOOD_me.bin");
Alex Deucher45f9a392010-03-24 13:55:51 -040079MODULE_FIRMWARE("radeon/REDWOOD_rlc.bin");
Alex Deucherfe251e22010-03-24 13:36:43 -040080MODULE_FIRMWARE("radeon/JUNIPER_pfp.bin");
81MODULE_FIRMWARE("radeon/JUNIPER_me.bin");
Alex Deucher45f9a392010-03-24 13:55:51 -040082MODULE_FIRMWARE("radeon/JUNIPER_rlc.bin");
Dave Airliea7433742010-04-09 15:31:09 +100083MODULE_FIRMWARE("radeon/CYPRESS_pfp.bin");
Alex Deucherfe251e22010-03-24 13:36:43 -040084MODULE_FIRMWARE("radeon/CYPRESS_me.bin");
Alex Deucher45f9a392010-03-24 13:55:51 -040085MODULE_FIRMWARE("radeon/CYPRESS_rlc.bin");
Jerome Glisse3ce0a232009-09-08 10:10:24 +100086
87int r600_debugfs_mc_info_init(struct radeon_device *rdev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +020088
Jerome Glisse1a029b72009-10-06 19:04:30 +020089/* r600,rv610,rv630,rv620,rv635,rv670 */
Jerome Glisse771fe6b2009-06-05 14:42:42 +020090int r600_mc_wait_for_idle(struct radeon_device *rdev);
91void r600_gpu_init(struct radeon_device *rdev);
Jerome Glisse3ce0a232009-09-08 10:10:24 +100092void r600_fini(struct radeon_device *rdev);
Alex Deucher45f9a392010-03-24 13:55:51 -040093void r600_irq_disable(struct radeon_device *rdev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +020094
Alex Deucher21a81222010-07-02 12:58:16 -040095/* get temperature in millidegrees */
96u32 rv6xx_get_temp(struct radeon_device *rdev)
97{
98 u32 temp = (RREG32(CG_THERMAL_STATUS) & ASIC_T_MASK) >>
99 ASIC_T_SHIFT;
Alex Deucher21a81222010-07-02 12:58:16 -0400100
Alex Deucherb2298fd2010-11-08 18:39:18 +0000101 return temp * 1000;
Alex Deucher21a81222010-07-02 12:58:16 -0400102}
103
Alex Deucherce8f5372010-05-07 15:10:16 -0400104void r600_pm_get_dynpm_state(struct radeon_device *rdev)
Alex Deuchera48b9b42010-04-22 14:03:55 -0400105{
106 int i;
107
Alex Deucherce8f5372010-05-07 15:10:16 -0400108 rdev->pm.dynpm_can_upclock = true;
109 rdev->pm.dynpm_can_downclock = true;
Alex Deuchera48b9b42010-04-22 14:03:55 -0400110
111 /* power state array is low to high, default is first */
112 if ((rdev->flags & RADEON_IS_IGP) || (rdev->family == CHIP_R600)) {
113 int min_power_state_index = 0;
114
115 if (rdev->pm.num_power_states > 2)
116 min_power_state_index = 1;
117
Alex Deucherce8f5372010-05-07 15:10:16 -0400118 switch (rdev->pm.dynpm_planned_action) {
119 case DYNPM_ACTION_MINIMUM:
Alex Deuchera48b9b42010-04-22 14:03:55 -0400120 rdev->pm.requested_power_state_index = min_power_state_index;
121 rdev->pm.requested_clock_mode_index = 0;
Alex Deucherce8f5372010-05-07 15:10:16 -0400122 rdev->pm.dynpm_can_downclock = false;
Alex Deuchera48b9b42010-04-22 14:03:55 -0400123 break;
Alex Deucherce8f5372010-05-07 15:10:16 -0400124 case DYNPM_ACTION_DOWNCLOCK:
Alex Deuchera48b9b42010-04-22 14:03:55 -0400125 if (rdev->pm.current_power_state_index == min_power_state_index) {
126 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
Alex Deucherce8f5372010-05-07 15:10:16 -0400127 rdev->pm.dynpm_can_downclock = false;
Alex Deuchera48b9b42010-04-22 14:03:55 -0400128 } else {
129 if (rdev->pm.active_crtc_count > 1) {
130 for (i = 0; i < rdev->pm.num_power_states; i++) {
Alex Deucherd7311172010-05-03 01:13:14 -0400131 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
Alex Deuchera48b9b42010-04-22 14:03:55 -0400132 continue;
133 else if (i >= rdev->pm.current_power_state_index) {
134 rdev->pm.requested_power_state_index =
135 rdev->pm.current_power_state_index;
136 break;
137 } else {
138 rdev->pm.requested_power_state_index = i;
139 break;
140 }
141 }
Alex Deucher773c3fa2010-06-25 16:21:27 -0400142 } else {
143 if (rdev->pm.current_power_state_index == 0)
144 rdev->pm.requested_power_state_index =
145 rdev->pm.num_power_states - 1;
146 else
147 rdev->pm.requested_power_state_index =
148 rdev->pm.current_power_state_index - 1;
149 }
Alex Deuchera48b9b42010-04-22 14:03:55 -0400150 }
151 rdev->pm.requested_clock_mode_index = 0;
Alex Deucherd7311172010-05-03 01:13:14 -0400152 /* don't use the power state if crtcs are active and no display flag is set */
153 if ((rdev->pm.active_crtc_count > 0) &&
154 (rdev->pm.power_state[rdev->pm.requested_power_state_index].
155 clock_info[rdev->pm.requested_clock_mode_index].flags &
156 RADEON_PM_MODE_NO_DISPLAY)) {
157 rdev->pm.requested_power_state_index++;
158 }
Alex Deuchera48b9b42010-04-22 14:03:55 -0400159 break;
Alex Deucherce8f5372010-05-07 15:10:16 -0400160 case DYNPM_ACTION_UPCLOCK:
Alex Deuchera48b9b42010-04-22 14:03:55 -0400161 if (rdev->pm.current_power_state_index == (rdev->pm.num_power_states - 1)) {
162 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
Alex Deucherce8f5372010-05-07 15:10:16 -0400163 rdev->pm.dynpm_can_upclock = false;
Alex Deuchera48b9b42010-04-22 14:03:55 -0400164 } else {
165 if (rdev->pm.active_crtc_count > 1) {
166 for (i = (rdev->pm.num_power_states - 1); i >= 0; i--) {
Alex Deucherd7311172010-05-03 01:13:14 -0400167 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
Alex Deuchera48b9b42010-04-22 14:03:55 -0400168 continue;
169 else if (i <= rdev->pm.current_power_state_index) {
170 rdev->pm.requested_power_state_index =
171 rdev->pm.current_power_state_index;
172 break;
173 } else {
174 rdev->pm.requested_power_state_index = i;
175 break;
176 }
177 }
178 } else
179 rdev->pm.requested_power_state_index =
180 rdev->pm.current_power_state_index + 1;
181 }
182 rdev->pm.requested_clock_mode_index = 0;
183 break;
Alex Deucherce8f5372010-05-07 15:10:16 -0400184 case DYNPM_ACTION_DEFAULT:
Alex Deucher58e21df2010-03-22 13:31:08 -0400185 rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
186 rdev->pm.requested_clock_mode_index = 0;
Alex Deucherce8f5372010-05-07 15:10:16 -0400187 rdev->pm.dynpm_can_upclock = false;
Alex Deucher58e21df2010-03-22 13:31:08 -0400188 break;
Alex Deucherce8f5372010-05-07 15:10:16 -0400189 case DYNPM_ACTION_NONE:
Alex Deuchera48b9b42010-04-22 14:03:55 -0400190 default:
191 DRM_ERROR("Requested mode for not defined action\n");
192 return;
193 }
194 } else {
195 /* XXX select a power state based on AC/DC, single/dualhead, etc. */
196 /* for now just select the first power state and switch between clock modes */
197 /* power state array is low to high, default is first (0) */
198 if (rdev->pm.active_crtc_count > 1) {
199 rdev->pm.requested_power_state_index = -1;
200 /* start at 1 as we don't want the default mode */
201 for (i = 1; i < rdev->pm.num_power_states; i++) {
Alex Deucherd7311172010-05-03 01:13:14 -0400202 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
Alex Deuchera48b9b42010-04-22 14:03:55 -0400203 continue;
204 else if ((rdev->pm.power_state[i].type == POWER_STATE_TYPE_PERFORMANCE) ||
205 (rdev->pm.power_state[i].type == POWER_STATE_TYPE_BATTERY)) {
206 rdev->pm.requested_power_state_index = i;
207 break;
208 }
209 }
210 /* if nothing selected, grab the default state. */
211 if (rdev->pm.requested_power_state_index == -1)
212 rdev->pm.requested_power_state_index = 0;
213 } else
214 rdev->pm.requested_power_state_index = 1;
215
Alex Deucherce8f5372010-05-07 15:10:16 -0400216 switch (rdev->pm.dynpm_planned_action) {
217 case DYNPM_ACTION_MINIMUM:
Alex Deuchera48b9b42010-04-22 14:03:55 -0400218 rdev->pm.requested_clock_mode_index = 0;
Alex Deucherce8f5372010-05-07 15:10:16 -0400219 rdev->pm.dynpm_can_downclock = false;
Alex Deuchera48b9b42010-04-22 14:03:55 -0400220 break;
Alex Deucherce8f5372010-05-07 15:10:16 -0400221 case DYNPM_ACTION_DOWNCLOCK:
Alex Deuchera48b9b42010-04-22 14:03:55 -0400222 if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) {
223 if (rdev->pm.current_clock_mode_index == 0) {
224 rdev->pm.requested_clock_mode_index = 0;
Alex Deucherce8f5372010-05-07 15:10:16 -0400225 rdev->pm.dynpm_can_downclock = false;
Alex Deuchera48b9b42010-04-22 14:03:55 -0400226 } else
227 rdev->pm.requested_clock_mode_index =
228 rdev->pm.current_clock_mode_index - 1;
229 } else {
230 rdev->pm.requested_clock_mode_index = 0;
Alex Deucherce8f5372010-05-07 15:10:16 -0400231 rdev->pm.dynpm_can_downclock = false;
Alex Deuchera48b9b42010-04-22 14:03:55 -0400232 }
Alex Deucherd7311172010-05-03 01:13:14 -0400233 /* don't use the power state if crtcs are active and no display flag is set */
234 if ((rdev->pm.active_crtc_count > 0) &&
235 (rdev->pm.power_state[rdev->pm.requested_power_state_index].
236 clock_info[rdev->pm.requested_clock_mode_index].flags &
237 RADEON_PM_MODE_NO_DISPLAY)) {
238 rdev->pm.requested_clock_mode_index++;
239 }
Alex Deuchera48b9b42010-04-22 14:03:55 -0400240 break;
Alex Deucherce8f5372010-05-07 15:10:16 -0400241 case DYNPM_ACTION_UPCLOCK:
Alex Deuchera48b9b42010-04-22 14:03:55 -0400242 if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) {
243 if (rdev->pm.current_clock_mode_index ==
244 (rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1)) {
245 rdev->pm.requested_clock_mode_index = rdev->pm.current_clock_mode_index;
Alex Deucherce8f5372010-05-07 15:10:16 -0400246 rdev->pm.dynpm_can_upclock = false;
Alex Deuchera48b9b42010-04-22 14:03:55 -0400247 } else
248 rdev->pm.requested_clock_mode_index =
249 rdev->pm.current_clock_mode_index + 1;
250 } else {
251 rdev->pm.requested_clock_mode_index =
252 rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1;
Alex Deucherce8f5372010-05-07 15:10:16 -0400253 rdev->pm.dynpm_can_upclock = false;
Alex Deuchera48b9b42010-04-22 14:03:55 -0400254 }
255 break;
Alex Deucherce8f5372010-05-07 15:10:16 -0400256 case DYNPM_ACTION_DEFAULT:
Alex Deucher58e21df2010-03-22 13:31:08 -0400257 rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
258 rdev->pm.requested_clock_mode_index = 0;
Alex Deucherce8f5372010-05-07 15:10:16 -0400259 rdev->pm.dynpm_can_upclock = false;
Alex Deucher58e21df2010-03-22 13:31:08 -0400260 break;
Alex Deucherce8f5372010-05-07 15:10:16 -0400261 case DYNPM_ACTION_NONE:
Alex Deuchera48b9b42010-04-22 14:03:55 -0400262 default:
263 DRM_ERROR("Requested mode for not defined action\n");
264 return;
265 }
266 }
267
Dave Airlied9fdaaf2010-08-02 10:42:55 +1000268 DRM_DEBUG_DRIVER("Requested: e: %d m: %d p: %d\n",
Alex Deucherce8a3eb2010-05-07 16:58:27 -0400269 rdev->pm.power_state[rdev->pm.requested_power_state_index].
270 clock_info[rdev->pm.requested_clock_mode_index].sclk,
271 rdev->pm.power_state[rdev->pm.requested_power_state_index].
272 clock_info[rdev->pm.requested_clock_mode_index].mclk,
273 rdev->pm.power_state[rdev->pm.requested_power_state_index].
274 pcie_lanes);
Alex Deuchera48b9b42010-04-22 14:03:55 -0400275}
276
Alex Deucherce8f5372010-05-07 15:10:16 -0400277static int r600_pm_get_type_index(struct radeon_device *rdev,
278 enum radeon_pm_state_type ps_type,
279 int instance)
Alex Deucherbae6b562010-04-22 13:38:05 -0400280{
Alex Deucherce8f5372010-05-07 15:10:16 -0400281 int i;
282 int found_instance = -1;
Alex Deuchera48b9b42010-04-22 14:03:55 -0400283
Alex Deucherce8f5372010-05-07 15:10:16 -0400284 for (i = 0; i < rdev->pm.num_power_states; i++) {
285 if (rdev->pm.power_state[i].type == ps_type) {
286 found_instance++;
287 if (found_instance == instance)
288 return i;
Alex Deuchera4248162010-04-24 14:50:23 -0400289 }
Alex Deucherce8f5372010-05-07 15:10:16 -0400290 }
291 /* return default if no match */
292 return rdev->pm.default_power_state_index;
293}
Alex Deucherbae6b562010-04-22 13:38:05 -0400294
Alex Deucherce8f5372010-05-07 15:10:16 -0400295void rs780_pm_init_profile(struct radeon_device *rdev)
296{
297 if (rdev->pm.num_power_states == 2) {
298 /* default */
299 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
300 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
301 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
302 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
303 /* low sh */
304 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0;
305 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0;
306 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
307 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
Alex Deucherc9e75b22010-06-02 17:56:01 -0400308 /* mid sh */
309 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 0;
310 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 0;
311 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
312 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
Alex Deucherce8f5372010-05-07 15:10:16 -0400313 /* high sh */
314 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0;
315 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1;
316 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
317 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
318 /* low mh */
319 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0;
320 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0;
321 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
322 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
Alex Deucherc9e75b22010-06-02 17:56:01 -0400323 /* mid mh */
324 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 0;
325 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 0;
326 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
327 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
Alex Deucherce8f5372010-05-07 15:10:16 -0400328 /* high mh */
329 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0;
330 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 1;
331 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
332 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
333 } else if (rdev->pm.num_power_states == 3) {
334 /* default */
335 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
336 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
337 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
338 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
339 /* low sh */
340 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1;
341 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1;
342 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
343 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
Alex Deucherc9e75b22010-06-02 17:56:01 -0400344 /* mid sh */
345 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 1;
346 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1;
347 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
348 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
Alex Deucherce8f5372010-05-07 15:10:16 -0400349 /* high sh */
350 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1;
351 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 2;
352 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
353 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
354 /* low mh */
355 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 1;
356 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 1;
357 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
358 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
Alex Deucherc9e75b22010-06-02 17:56:01 -0400359 /* mid mh */
360 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 1;
361 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 1;
362 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
363 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
Alex Deucherce8f5372010-05-07 15:10:16 -0400364 /* high mh */
365 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 1;
366 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2;
367 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
368 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
369 } else {
370 /* default */
371 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
372 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
373 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
374 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
375 /* low sh */
376 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 2;
377 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 2;
378 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
379 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
Alex Deucherc9e75b22010-06-02 17:56:01 -0400380 /* mid sh */
381 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 2;
382 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 2;
383 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
384 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
Alex Deucherce8f5372010-05-07 15:10:16 -0400385 /* high sh */
386 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 2;
387 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 3;
388 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
389 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
390 /* low mh */
391 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2;
392 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0;
393 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
394 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
Alex Deucherc9e75b22010-06-02 17:56:01 -0400395 /* mid mh */
396 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 2;
397 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 0;
398 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
399 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
Alex Deucherce8f5372010-05-07 15:10:16 -0400400 /* high mh */
401 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2;
402 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 3;
403 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
404 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
405 }
406}
407
408void r600_pm_init_profile(struct radeon_device *rdev)
409{
410 if (rdev->family == CHIP_R600) {
411 /* XXX */
412 /* default */
413 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
414 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
415 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
Alex Deucher4bff5172010-05-17 19:41:26 -0400416 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
Alex Deucherce8f5372010-05-07 15:10:16 -0400417 /* low sh */
418 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
419 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
420 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
Alex Deucher4bff5172010-05-17 19:41:26 -0400421 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
Alex Deucherc9e75b22010-06-02 17:56:01 -0400422 /* mid sh */
423 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
424 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
425 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
426 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
Alex Deucherce8f5372010-05-07 15:10:16 -0400427 /* high sh */
428 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
429 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
430 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
Alex Deucher4bff5172010-05-17 19:41:26 -0400431 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
Alex Deucherce8f5372010-05-07 15:10:16 -0400432 /* low mh */
433 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
434 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
435 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
Alex Deucher4bff5172010-05-17 19:41:26 -0400436 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
Alex Deucherc9e75b22010-06-02 17:56:01 -0400437 /* mid mh */
438 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
439 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
440 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
441 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
Alex Deucherce8f5372010-05-07 15:10:16 -0400442 /* high mh */
443 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
444 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
445 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
Alex Deucher4bff5172010-05-17 19:41:26 -0400446 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
Alex Deucherce8f5372010-05-07 15:10:16 -0400447 } else {
448 if (rdev->pm.num_power_states < 4) {
449 /* default */
450 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
451 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
452 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
453 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
454 /* low sh */
Alex Deucherce8f5372010-05-07 15:10:16 -0400455 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1;
456 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1;
457 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
Alex Deucherc9e75b22010-06-02 17:56:01 -0400458 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
459 /* mid sh */
460 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 1;
461 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1;
462 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
463 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
Alex Deucherce8f5372010-05-07 15:10:16 -0400464 /* high sh */
465 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1;
466 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1;
467 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
468 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
469 /* low mh */
Alex Deucher4bff5172010-05-17 19:41:26 -0400470 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2;
471 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 2;
Alex Deucherce8f5372010-05-07 15:10:16 -0400472 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
Alex Deucherc9e75b22010-06-02 17:56:01 -0400473 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
474 /* low mh */
475 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 2;
476 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 2;
477 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
478 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
Alex Deucherce8f5372010-05-07 15:10:16 -0400479 /* high mh */
Alex Deucher4bff5172010-05-17 19:41:26 -0400480 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2;
481 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2;
482 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
483 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
484 } else {
485 /* default */
486 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
487 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
488 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
489 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
490 /* low sh */
491 if (rdev->flags & RADEON_IS_MOBILITY) {
492 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx =
493 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
494 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx =
495 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
496 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
Alex Deucherc9e75b22010-06-02 17:56:01 -0400497 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
Alex Deucher4bff5172010-05-17 19:41:26 -0400498 } else {
499 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx =
500 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
501 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx =
502 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
503 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
Alex Deucherc9e75b22010-06-02 17:56:01 -0400504 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
505 }
506 /* mid sh */
507 if (rdev->flags & RADEON_IS_MOBILITY) {
508 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx =
509 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
510 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx =
511 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
512 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
513 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
514 } else {
515 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx =
516 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
517 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx =
518 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
519 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
520 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
Alex Deucher4bff5172010-05-17 19:41:26 -0400521 }
522 /* high sh */
523 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx =
524 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
525 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx =
526 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
527 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
528 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
529 /* low mh */
530 if (rdev->flags & RADEON_IS_MOBILITY) {
531 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx =
532 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
533 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx =
534 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
535 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
Alex Deucherc9e75b22010-06-02 17:56:01 -0400536 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
Alex Deucher4bff5172010-05-17 19:41:26 -0400537 } else {
538 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx =
539 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
540 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx =
541 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
542 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
Alex Deucherc9e75b22010-06-02 17:56:01 -0400543 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
544 }
545 /* mid mh */
546 if (rdev->flags & RADEON_IS_MOBILITY) {
547 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx =
548 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
549 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx =
550 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
551 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
552 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
553 } else {
554 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx =
555 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
556 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx =
557 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
558 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
559 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
Alex Deucher4bff5172010-05-17 19:41:26 -0400560 }
561 /* high mh */
562 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx =
563 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
564 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx =
565 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
Alex Deucherce8f5372010-05-07 15:10:16 -0400566 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
567 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
568 }
569 }
Alex Deucherbae6b562010-04-22 13:38:05 -0400570}
571
Alex Deucher49e02b72010-04-23 17:57:27 -0400572void r600_pm_misc(struct radeon_device *rdev)
573{
Rafał Miłeckia081a9d2010-06-07 18:20:25 -0400574 int req_ps_idx = rdev->pm.requested_power_state_index;
575 int req_cm_idx = rdev->pm.requested_clock_mode_index;
576 struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx];
577 struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage;
Alex Deucher7ac9aa52010-05-27 19:25:54 -0400578
Alex Deucher4d601732010-06-07 18:15:18 -0400579 if ((voltage->type == VOLTAGE_SW) && voltage->voltage) {
580 if (voltage->voltage != rdev->pm.current_vddc) {
581 radeon_atom_set_voltage(rdev, voltage->voltage);
582 rdev->pm.current_vddc = voltage->voltage;
Dave Airlied9fdaaf2010-08-02 10:42:55 +1000583 DRM_DEBUG_DRIVER("Setting: v: %d\n", voltage->voltage);
Alex Deucher4d601732010-06-07 18:15:18 -0400584 }
585 }
Alex Deucher49e02b72010-04-23 17:57:27 -0400586}
587
Alex Deucherdef9ba92010-04-22 12:39:58 -0400588bool r600_gui_idle(struct radeon_device *rdev)
589{
590 if (RREG32(GRBM_STATUS) & GUI_ACTIVE)
591 return false;
592 else
593 return true;
594}
595
Alex Deuchere0df1ac2009-12-04 15:12:21 -0500596/* hpd for digital panel detect/disconnect */
597bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
598{
599 bool connected = false;
600
601 if (ASIC_IS_DCE3(rdev)) {
602 switch (hpd) {
603 case RADEON_HPD_1:
604 if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE)
605 connected = true;
606 break;
607 case RADEON_HPD_2:
608 if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE)
609 connected = true;
610 break;
611 case RADEON_HPD_3:
612 if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE)
613 connected = true;
614 break;
615 case RADEON_HPD_4:
616 if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE)
617 connected = true;
618 break;
619 /* DCE 3.2 */
620 case RADEON_HPD_5:
621 if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE)
622 connected = true;
623 break;
624 case RADEON_HPD_6:
625 if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)
626 connected = true;
627 break;
628 default:
629 break;
630 }
631 } else {
632 switch (hpd) {
633 case RADEON_HPD_1:
634 if (RREG32(DC_HOT_PLUG_DETECT1_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
635 connected = true;
636 break;
637 case RADEON_HPD_2:
638 if (RREG32(DC_HOT_PLUG_DETECT2_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
639 connected = true;
640 break;
641 case RADEON_HPD_3:
642 if (RREG32(DC_HOT_PLUG_DETECT3_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
643 connected = true;
644 break;
645 default:
646 break;
647 }
648 }
649 return connected;
650}
651
652void r600_hpd_set_polarity(struct radeon_device *rdev,
Alex Deucher429770b2009-12-04 15:26:55 -0500653 enum radeon_hpd_id hpd)
Alex Deuchere0df1ac2009-12-04 15:12:21 -0500654{
655 u32 tmp;
656 bool connected = r600_hpd_sense(rdev, hpd);
657
658 if (ASIC_IS_DCE3(rdev)) {
659 switch (hpd) {
660 case RADEON_HPD_1:
661 tmp = RREG32(DC_HPD1_INT_CONTROL);
662 if (connected)
663 tmp &= ~DC_HPDx_INT_POLARITY;
664 else
665 tmp |= DC_HPDx_INT_POLARITY;
666 WREG32(DC_HPD1_INT_CONTROL, tmp);
667 break;
668 case RADEON_HPD_2:
669 tmp = RREG32(DC_HPD2_INT_CONTROL);
670 if (connected)
671 tmp &= ~DC_HPDx_INT_POLARITY;
672 else
673 tmp |= DC_HPDx_INT_POLARITY;
674 WREG32(DC_HPD2_INT_CONTROL, tmp);
675 break;
676 case RADEON_HPD_3:
677 tmp = RREG32(DC_HPD3_INT_CONTROL);
678 if (connected)
679 tmp &= ~DC_HPDx_INT_POLARITY;
680 else
681 tmp |= DC_HPDx_INT_POLARITY;
682 WREG32(DC_HPD3_INT_CONTROL, tmp);
683 break;
684 case RADEON_HPD_4:
685 tmp = RREG32(DC_HPD4_INT_CONTROL);
686 if (connected)
687 tmp &= ~DC_HPDx_INT_POLARITY;
688 else
689 tmp |= DC_HPDx_INT_POLARITY;
690 WREG32(DC_HPD4_INT_CONTROL, tmp);
691 break;
692 case RADEON_HPD_5:
693 tmp = RREG32(DC_HPD5_INT_CONTROL);
694 if (connected)
695 tmp &= ~DC_HPDx_INT_POLARITY;
696 else
697 tmp |= DC_HPDx_INT_POLARITY;
698 WREG32(DC_HPD5_INT_CONTROL, tmp);
699 break;
700 /* DCE 3.2 */
701 case RADEON_HPD_6:
702 tmp = RREG32(DC_HPD6_INT_CONTROL);
703 if (connected)
704 tmp &= ~DC_HPDx_INT_POLARITY;
705 else
706 tmp |= DC_HPDx_INT_POLARITY;
707 WREG32(DC_HPD6_INT_CONTROL, tmp);
708 break;
709 default:
710 break;
711 }
712 } else {
713 switch (hpd) {
714 case RADEON_HPD_1:
715 tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
716 if (connected)
717 tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
718 else
719 tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
720 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
721 break;
722 case RADEON_HPD_2:
723 tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
724 if (connected)
725 tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
726 else
727 tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
728 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
729 break;
730 case RADEON_HPD_3:
731 tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
732 if (connected)
733 tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
734 else
735 tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
736 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
737 break;
738 default:
739 break;
740 }
741 }
742}
743
744void r600_hpd_init(struct radeon_device *rdev)
745{
746 struct drm_device *dev = rdev->ddev;
747 struct drm_connector *connector;
748
749 if (ASIC_IS_DCE3(rdev)) {
750 u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) | DC_HPDx_RX_INT_TIMER(0xfa);
751 if (ASIC_IS_DCE32(rdev))
752 tmp |= DC_HPDx_EN;
753
754 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
755 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
756 switch (radeon_connector->hpd.hpd) {
757 case RADEON_HPD_1:
758 WREG32(DC_HPD1_CONTROL, tmp);
759 rdev->irq.hpd[0] = true;
760 break;
761 case RADEON_HPD_2:
762 WREG32(DC_HPD2_CONTROL, tmp);
763 rdev->irq.hpd[1] = true;
764 break;
765 case RADEON_HPD_3:
766 WREG32(DC_HPD3_CONTROL, tmp);
767 rdev->irq.hpd[2] = true;
768 break;
769 case RADEON_HPD_4:
770 WREG32(DC_HPD4_CONTROL, tmp);
771 rdev->irq.hpd[3] = true;
772 break;
773 /* DCE 3.2 */
774 case RADEON_HPD_5:
775 WREG32(DC_HPD5_CONTROL, tmp);
776 rdev->irq.hpd[4] = true;
777 break;
778 case RADEON_HPD_6:
779 WREG32(DC_HPD6_CONTROL, tmp);
780 rdev->irq.hpd[5] = true;
781 break;
782 default:
783 break;
784 }
785 }
786 } else {
787 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
788 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
789 switch (radeon_connector->hpd.hpd) {
790 case RADEON_HPD_1:
791 WREG32(DC_HOT_PLUG_DETECT1_CONTROL, DC_HOT_PLUG_DETECTx_EN);
792 rdev->irq.hpd[0] = true;
793 break;
794 case RADEON_HPD_2:
795 WREG32(DC_HOT_PLUG_DETECT2_CONTROL, DC_HOT_PLUG_DETECTx_EN);
796 rdev->irq.hpd[1] = true;
797 break;
798 case RADEON_HPD_3:
799 WREG32(DC_HOT_PLUG_DETECT3_CONTROL, DC_HOT_PLUG_DETECTx_EN);
800 rdev->irq.hpd[2] = true;
801 break;
802 default:
803 break;
804 }
805 }
806 }
Jerome Glisse003e69f2010-01-07 15:39:14 +0100807 if (rdev->irq.installed)
808 r600_irq_set(rdev);
Alex Deuchere0df1ac2009-12-04 15:12:21 -0500809}
810
811void r600_hpd_fini(struct radeon_device *rdev)
812{
813 struct drm_device *dev = rdev->ddev;
814 struct drm_connector *connector;
815
816 if (ASIC_IS_DCE3(rdev)) {
817 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
818 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
819 switch (radeon_connector->hpd.hpd) {
820 case RADEON_HPD_1:
821 WREG32(DC_HPD1_CONTROL, 0);
822 rdev->irq.hpd[0] = false;
823 break;
824 case RADEON_HPD_2:
825 WREG32(DC_HPD2_CONTROL, 0);
826 rdev->irq.hpd[1] = false;
827 break;
828 case RADEON_HPD_3:
829 WREG32(DC_HPD3_CONTROL, 0);
830 rdev->irq.hpd[2] = false;
831 break;
832 case RADEON_HPD_4:
833 WREG32(DC_HPD4_CONTROL, 0);
834 rdev->irq.hpd[3] = false;
835 break;
836 /* DCE 3.2 */
837 case RADEON_HPD_5:
838 WREG32(DC_HPD5_CONTROL, 0);
839 rdev->irq.hpd[4] = false;
840 break;
841 case RADEON_HPD_6:
842 WREG32(DC_HPD6_CONTROL, 0);
843 rdev->irq.hpd[5] = false;
844 break;
845 default:
846 break;
847 }
848 }
849 } else {
850 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
851 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
852 switch (radeon_connector->hpd.hpd) {
853 case RADEON_HPD_1:
854 WREG32(DC_HOT_PLUG_DETECT1_CONTROL, 0);
855 rdev->irq.hpd[0] = false;
856 break;
857 case RADEON_HPD_2:
858 WREG32(DC_HOT_PLUG_DETECT2_CONTROL, 0);
859 rdev->irq.hpd[1] = false;
860 break;
861 case RADEON_HPD_3:
862 WREG32(DC_HOT_PLUG_DETECT3_CONTROL, 0);
863 rdev->irq.hpd[2] = false;
864 break;
865 default:
866 break;
867 }
868 }
869 }
870}
871
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200872/*
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000873 * R600 PCIE GART
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200874 */
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000875void r600_pcie_gart_tlb_flush(struct radeon_device *rdev)
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200876{
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000877 unsigned i;
878 u32 tmp;
879
Dave Airlie2e98f102010-02-15 15:54:45 +1000880 /* flush hdp cache so updates hit vram */
Alex Deucherf3886f82010-12-08 10:05:34 -0500881 if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) &&
882 !(rdev->flags & RADEON_IS_AGP)) {
Alex Deucher812d0462010-07-26 18:51:53 -0400883 void __iomem *ptr = (void *)rdev->gart.table.vram.ptr;
884 u32 tmp;
885
886 /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read
887 * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL
Alex Deucherf3886f82010-12-08 10:05:34 -0500888 * This seems to cause problems on some AGP cards. Just use the old
889 * method for them.
Alex Deucher812d0462010-07-26 18:51:53 -0400890 */
891 WREG32(HDP_DEBUG1, 0);
892 tmp = readl((void __iomem *)ptr);
893 } else
894 WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
Dave Airlie2e98f102010-02-15 15:54:45 +1000895
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000896 WREG32(VM_CONTEXT0_INVALIDATION_LOW_ADDR, rdev->mc.gtt_start >> 12);
897 WREG32(VM_CONTEXT0_INVALIDATION_HIGH_ADDR, (rdev->mc.gtt_end - 1) >> 12);
898 WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1));
899 for (i = 0; i < rdev->usec_timeout; i++) {
900 /* read MC_STATUS */
901 tmp = RREG32(VM_CONTEXT0_REQUEST_RESPONSE);
902 tmp = (tmp & RESPONSE_TYPE_MASK) >> RESPONSE_TYPE_SHIFT;
903 if (tmp == 2) {
904 printk(KERN_WARNING "[drm] r600 flush TLB failed\n");
905 return;
906 }
907 if (tmp) {
908 return;
909 }
910 udelay(1);
911 }
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200912}
913
Jerome Glisse4aac0472009-09-14 18:29:49 +0200914int r600_pcie_gart_init(struct radeon_device *rdev)
915{
916 int r;
917
918 if (rdev->gart.table.vram.robj) {
Joe Perchesfce7d612010-10-30 21:08:30 +0000919 WARN(1, "R600 PCIE GART already initialized\n");
Jerome Glisse4aac0472009-09-14 18:29:49 +0200920 return 0;
921 }
922 /* Initialize common gart structure */
923 r = radeon_gart_init(rdev);
924 if (r)
925 return r;
926 rdev->gart.table_size = rdev->gart.num_gpu_pages * 8;
927 return radeon_gart_table_vram_alloc(rdev);
928}
929
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000930int r600_pcie_gart_enable(struct radeon_device *rdev)
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200931{
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000932 u32 tmp;
933 int r, i;
934
Jerome Glisse4aac0472009-09-14 18:29:49 +0200935 if (rdev->gart.table.vram.robj == NULL) {
936 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
937 return -EINVAL;
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000938 }
Jerome Glisse4aac0472009-09-14 18:29:49 +0200939 r = radeon_gart_table_vram_pin(rdev);
940 if (r)
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000941 return r;
Dave Airlie82568562010-02-05 16:00:07 +1000942 radeon_gart_restore(rdev);
Dave Airliebc1a6312009-09-15 11:07:52 +1000943
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000944 /* Setup L2 cache */
945 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
946 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
947 EFFECTIVE_L2_QUEUE_SIZE(7));
948 WREG32(VM_L2_CNTL2, 0);
949 WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
950 /* Setup TLB control */
951 tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
952 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
953 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
954 ENABLE_WAIT_L2_QUERY;
955 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
956 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
957 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
958 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
959 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
960 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
961 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
962 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
963 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
964 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
965 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
966 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
967 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
968 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
969 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
Jerome Glisse1a029b72009-10-06 19:04:30 +0200970 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000971 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
972 WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
973 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
974 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
975 (u32)(rdev->dummy_page.addr >> 12));
976 for (i = 1; i < 7; i++)
977 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
978
979 r600_pcie_gart_tlb_flush(rdev);
980 rdev->gart.ready = true;
981 return 0;
982}
983
984void r600_pcie_gart_disable(struct radeon_device *rdev)
985{
986 u32 tmp;
Jerome Glisse4c788672009-11-20 14:29:23 +0100987 int i, r;
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000988
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000989 /* Disable all tables */
990 for (i = 0; i < 7; i++)
991 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
992
993 /* Disable L2 cache */
994 WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING |
995 EFFECTIVE_L2_QUEUE_SIZE(7));
996 WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
997 /* Setup L1 TLB control */
998 tmp = EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
999 ENABLE_WAIT_L2_QUERY;
1000 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
1001 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
1002 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
1003 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
1004 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
1005 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
1006 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
1007 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
1008 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp);
1009 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp);
1010 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
1011 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
1012 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp);
1013 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
Jerome Glisse4aac0472009-09-14 18:29:49 +02001014 if (rdev->gart.table.vram.robj) {
Jerome Glisse4c788672009-11-20 14:29:23 +01001015 r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
1016 if (likely(r == 0)) {
1017 radeon_bo_kunmap(rdev->gart.table.vram.robj);
1018 radeon_bo_unpin(rdev->gart.table.vram.robj);
1019 radeon_bo_unreserve(rdev->gart.table.vram.robj);
1020 }
Jerome Glisse4aac0472009-09-14 18:29:49 +02001021 }
1022}
1023
1024void r600_pcie_gart_fini(struct radeon_device *rdev)
1025{
Jerome Glissef9274562010-03-17 14:44:29 +00001026 radeon_gart_fini(rdev);
Jerome Glisse4aac0472009-09-14 18:29:49 +02001027 r600_pcie_gart_disable(rdev);
1028 radeon_gart_table_vram_free(rdev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001029}
1030
Jerome Glisse1a029b72009-10-06 19:04:30 +02001031void r600_agp_enable(struct radeon_device *rdev)
1032{
1033 u32 tmp;
1034 int i;
1035
1036 /* Setup L2 cache */
1037 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
1038 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
1039 EFFECTIVE_L2_QUEUE_SIZE(7));
1040 WREG32(VM_L2_CNTL2, 0);
1041 WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
1042 /* Setup TLB control */
1043 tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
1044 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
1045 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
1046 ENABLE_WAIT_L2_QUERY;
1047 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
1048 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
1049 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
1050 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
1051 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
1052 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
1053 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
1054 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
1055 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
1056 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
1057 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
1058 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
1059 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
1060 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
1061 for (i = 0; i < 7; i++)
1062 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
1063}
1064
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001065int r600_mc_wait_for_idle(struct radeon_device *rdev)
1066{
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001067 unsigned i;
1068 u32 tmp;
1069
1070 for (i = 0; i < rdev->usec_timeout; i++) {
1071 /* read MC_STATUS */
1072 tmp = RREG32(R_000E50_SRBM_STATUS) & 0x3F00;
1073 if (!tmp)
1074 return 0;
1075 udelay(1);
1076 }
1077 return -1;
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001078}
1079
Jerome Glissea3c19452009-10-01 18:02:13 +02001080static void r600_mc_program(struct radeon_device *rdev)
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001081{
Jerome Glissea3c19452009-10-01 18:02:13 +02001082 struct rv515_mc_save save;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001083 u32 tmp;
1084 int i, j;
1085
1086 /* Initialize HDP */
1087 for (i = 0, j = 0; i < 32; i++, j += 0x18) {
1088 WREG32((0x2c14 + j), 0x00000000);
1089 WREG32((0x2c18 + j), 0x00000000);
1090 WREG32((0x2c1c + j), 0x00000000);
1091 WREG32((0x2c20 + j), 0x00000000);
1092 WREG32((0x2c24 + j), 0x00000000);
1093 }
1094 WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
1095
Jerome Glissea3c19452009-10-01 18:02:13 +02001096 rv515_mc_stop(rdev, &save);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001097 if (r600_mc_wait_for_idle(rdev)) {
Jerome Glissea3c19452009-10-01 18:02:13 +02001098 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001099 }
Jerome Glissea3c19452009-10-01 18:02:13 +02001100 /* Lockout access through VGA aperture (doesn't exist before R600) */
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001101 WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001102 /* Update configuration */
Jerome Glisse1a029b72009-10-06 19:04:30 +02001103 if (rdev->flags & RADEON_IS_AGP) {
1104 if (rdev->mc.vram_start < rdev->mc.gtt_start) {
1105 /* VRAM before AGP */
1106 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
1107 rdev->mc.vram_start >> 12);
1108 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
1109 rdev->mc.gtt_end >> 12);
1110 } else {
1111 /* VRAM after AGP */
1112 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
1113 rdev->mc.gtt_start >> 12);
1114 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
1115 rdev->mc.vram_end >> 12);
1116 }
1117 } else {
1118 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start >> 12);
1119 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end >> 12);
1120 }
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001121 WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0);
Jerome Glisse1a029b72009-10-06 19:04:30 +02001122 tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001123 tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
1124 WREG32(MC_VM_FB_LOCATION, tmp);
1125 WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
1126 WREG32(HDP_NONSURFACE_INFO, (2 << 7));
Jerome Glisse46fcd2b2010-06-03 19:34:48 +02001127 WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001128 if (rdev->flags & RADEON_IS_AGP) {
Jerome Glisse1a029b72009-10-06 19:04:30 +02001129 WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 22);
1130 WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 22);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001131 WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22);
1132 } else {
1133 WREG32(MC_VM_AGP_BASE, 0);
1134 WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
1135 WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
1136 }
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001137 if (r600_mc_wait_for_idle(rdev)) {
Jerome Glissea3c19452009-10-01 18:02:13 +02001138 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001139 }
Jerome Glissea3c19452009-10-01 18:02:13 +02001140 rv515_mc_resume(rdev, &save);
Dave Airlie698443d2009-09-18 14:16:38 +10001141 /* we need to own VRAM, so turn off the VGA renderer here
1142 * to stop it overwriting our objects */
Jerome Glissed39c3b82009-09-28 18:34:43 +02001143 rv515_vga_render_disable(rdev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001144}
1145
Jerome Glissed594e462010-02-17 21:54:29 +00001146/**
1147 * r600_vram_gtt_location - try to find VRAM & GTT location
1148 * @rdev: radeon device structure holding all necessary informations
1149 * @mc: memory controller structure holding memory informations
1150 *
1151 * Function will place try to place VRAM at same place as in CPU (PCI)
1152 * address space as some GPU seems to have issue when we reprogram at
1153 * different address space.
1154 *
1155 * If there is not enough space to fit the unvisible VRAM after the
1156 * aperture then we limit the VRAM size to the aperture.
1157 *
1158 * If we are using AGP then place VRAM adjacent to AGP aperture are we need
1159 * them to be in one from GPU point of view so that we can program GPU to
1160 * catch access outside them (weird GPU policy see ??).
1161 *
1162 * This function will never fails, worst case are limiting VRAM or GTT.
1163 *
1164 * Note: GTT start, end, size should be initialized before calling this
1165 * function on AGP platform.
1166 */
1167void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
1168{
1169 u64 size_bf, size_af;
1170
1171 if (mc->mc_vram_size > 0xE0000000) {
1172 /* leave room for at least 512M GTT */
1173 dev_warn(rdev->dev, "limiting VRAM\n");
1174 mc->real_vram_size = 0xE0000000;
1175 mc->mc_vram_size = 0xE0000000;
1176 }
1177 if (rdev->flags & RADEON_IS_AGP) {
1178 size_bf = mc->gtt_start;
1179 size_af = 0xFFFFFFFF - mc->gtt_end + 1;
1180 if (size_bf > size_af) {
1181 if (mc->mc_vram_size > size_bf) {
1182 dev_warn(rdev->dev, "limiting VRAM\n");
1183 mc->real_vram_size = size_bf;
1184 mc->mc_vram_size = size_bf;
1185 }
1186 mc->vram_start = mc->gtt_start - mc->mc_vram_size;
1187 } else {
1188 if (mc->mc_vram_size > size_af) {
1189 dev_warn(rdev->dev, "limiting VRAM\n");
1190 mc->real_vram_size = size_af;
1191 mc->mc_vram_size = size_af;
1192 }
1193 mc->vram_start = mc->gtt_end;
1194 }
1195 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
1196 dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n",
1197 mc->mc_vram_size >> 20, mc->vram_start,
1198 mc->vram_end, mc->real_vram_size >> 20);
1199 } else {
1200 u64 base = 0;
Alex Deucher8961d522010-12-03 14:37:22 -05001201 if (rdev->flags & RADEON_IS_IGP) {
1202 base = RREG32(MC_VM_FB_LOCATION) & 0xFFFF;
1203 base <<= 24;
1204 }
Jerome Glissed594e462010-02-17 21:54:29 +00001205 radeon_vram_location(rdev, &rdev->mc, base);
Alex Deucher8d369bb2010-07-15 10:51:10 -04001206 rdev->mc.gtt_base_align = 0;
Jerome Glissed594e462010-02-17 21:54:29 +00001207 radeon_gtt_location(rdev, mc);
1208 }
1209}
1210
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001211int r600_mc_init(struct radeon_device *rdev)
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001212{
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001213 u32 tmp;
Alex Deucher5885b7a2009-10-19 17:23:33 -04001214 int chansize, numchan;
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001215
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001216 /* Get VRAM informations */
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001217 rdev->mc.vram_is_ddr = true;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001218 tmp = RREG32(RAMCFG);
1219 if (tmp & CHANSIZE_OVERRIDE) {
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001220 chansize = 16;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001221 } else if (tmp & CHANSIZE_MASK) {
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001222 chansize = 64;
1223 } else {
1224 chansize = 32;
1225 }
Alex Deucher5885b7a2009-10-19 17:23:33 -04001226 tmp = RREG32(CHMAP);
1227 switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
1228 case 0:
1229 default:
1230 numchan = 1;
1231 break;
1232 case 1:
1233 numchan = 2;
1234 break;
1235 case 2:
1236 numchan = 4;
1237 break;
1238 case 3:
1239 numchan = 8;
1240 break;
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001241 }
Alex Deucher5885b7a2009-10-19 17:23:33 -04001242 rdev->mc.vram_width = numchan * chansize;
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001243 /* Could aper size report 0 ? */
Jordan Crouse01d73a62010-05-27 13:40:24 -06001244 rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
1245 rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001246 /* Setup GPU memory space */
1247 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
1248 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
Jerome Glisse51e5fcd2010-02-19 14:33:54 +00001249 rdev->mc.visible_vram_size = rdev->mc.aper_size;
Jerome Glissec919b372010-08-10 17:41:31 -04001250 rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
Jerome Glissed594e462010-02-17 21:54:29 +00001251 r600_vram_gtt_location(rdev, &rdev->mc);
Alex Deucherf47299c2010-03-16 20:54:38 -04001252
Alex Deucherf8920342010-06-30 12:02:03 -04001253 if (rdev->flags & RADEON_IS_IGP) {
1254 rs690_pm_info(rdev);
Alex Deucher06b64762010-01-05 11:27:29 -05001255 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
Alex Deucherf8920342010-06-30 12:02:03 -04001256 }
Alex Deucherf47299c2010-03-16 20:54:38 -04001257 radeon_update_bandwidth_info(rdev);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001258 return 0;
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001259}
1260
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001261/* We doesn't check that the GPU really needs a reset we simply do the
1262 * reset, it's up to the caller to determine if the GPU needs one. We
1263 * might add an helper function to check that.
1264 */
1265int r600_gpu_soft_reset(struct radeon_device *rdev)
1266{
Jerome Glissea3c19452009-10-01 18:02:13 +02001267 struct rv515_mc_save save;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001268 u32 grbm_busy_mask = S_008010_VC_BUSY(1) | S_008010_VGT_BUSY_NO_DMA(1) |
1269 S_008010_VGT_BUSY(1) | S_008010_TA03_BUSY(1) |
1270 S_008010_TC_BUSY(1) | S_008010_SX_BUSY(1) |
1271 S_008010_SH_BUSY(1) | S_008010_SPI03_BUSY(1) |
1272 S_008010_SMX_BUSY(1) | S_008010_SC_BUSY(1) |
1273 S_008010_PA_BUSY(1) | S_008010_DB03_BUSY(1) |
1274 S_008010_CR_BUSY(1) | S_008010_CB03_BUSY(1) |
1275 S_008010_GUI_ACTIVE(1);
1276 u32 grbm2_busy_mask = S_008014_SPI0_BUSY(1) | S_008014_SPI1_BUSY(1) |
1277 S_008014_SPI2_BUSY(1) | S_008014_SPI3_BUSY(1) |
1278 S_008014_TA0_BUSY(1) | S_008014_TA1_BUSY(1) |
1279 S_008014_TA2_BUSY(1) | S_008014_TA3_BUSY(1) |
1280 S_008014_DB0_BUSY(1) | S_008014_DB1_BUSY(1) |
1281 S_008014_DB2_BUSY(1) | S_008014_DB3_BUSY(1) |
1282 S_008014_CB0_BUSY(1) | S_008014_CB1_BUSY(1) |
1283 S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1);
Jerome Glissea3c19452009-10-01 18:02:13 +02001284 u32 tmp;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001285
Jerome Glisse1a029b72009-10-06 19:04:30 +02001286 dev_info(rdev->dev, "GPU softreset \n");
1287 dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n",
1288 RREG32(R_008010_GRBM_STATUS));
1289 dev_info(rdev->dev, " R_008014_GRBM_STATUS2=0x%08X\n",
Jerome Glissea3c19452009-10-01 18:02:13 +02001290 RREG32(R_008014_GRBM_STATUS2));
Jerome Glisse1a029b72009-10-06 19:04:30 +02001291 dev_info(rdev->dev, " R_000E50_SRBM_STATUS=0x%08X\n",
1292 RREG32(R_000E50_SRBM_STATUS));
Jerome Glissea3c19452009-10-01 18:02:13 +02001293 rv515_mc_stop(rdev, &save);
1294 if (r600_mc_wait_for_idle(rdev)) {
1295 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1296 }
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001297 /* Disable CP parsing/prefetching */
Jerome Glisse90aca4d2010-03-09 14:45:12 +00001298 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001299 /* Check if any of the rendering block is busy and reset it */
1300 if ((RREG32(R_008010_GRBM_STATUS) & grbm_busy_mask) ||
1301 (RREG32(R_008014_GRBM_STATUS2) & grbm2_busy_mask)) {
Jerome Glissea3c19452009-10-01 18:02:13 +02001302 tmp = S_008020_SOFT_RESET_CR(1) |
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001303 S_008020_SOFT_RESET_DB(1) |
1304 S_008020_SOFT_RESET_CB(1) |
1305 S_008020_SOFT_RESET_PA(1) |
1306 S_008020_SOFT_RESET_SC(1) |
1307 S_008020_SOFT_RESET_SMX(1) |
1308 S_008020_SOFT_RESET_SPI(1) |
1309 S_008020_SOFT_RESET_SX(1) |
1310 S_008020_SOFT_RESET_SH(1) |
1311 S_008020_SOFT_RESET_TC(1) |
1312 S_008020_SOFT_RESET_TA(1) |
1313 S_008020_SOFT_RESET_VC(1) |
Jerome Glissea3c19452009-10-01 18:02:13 +02001314 S_008020_SOFT_RESET_VGT(1);
Jerome Glisse1a029b72009-10-06 19:04:30 +02001315 dev_info(rdev->dev, " R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
Jerome Glissea3c19452009-10-01 18:02:13 +02001316 WREG32(R_008020_GRBM_SOFT_RESET, tmp);
Jerome Glisse90aca4d2010-03-09 14:45:12 +00001317 RREG32(R_008020_GRBM_SOFT_RESET);
1318 mdelay(15);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001319 WREG32(R_008020_GRBM_SOFT_RESET, 0);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001320 }
1321 /* Reset CP (we always reset CP) */
Jerome Glissea3c19452009-10-01 18:02:13 +02001322 tmp = S_008020_SOFT_RESET_CP(1);
1323 dev_info(rdev->dev, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
1324 WREG32(R_008020_GRBM_SOFT_RESET, tmp);
Jerome Glisse90aca4d2010-03-09 14:45:12 +00001325 RREG32(R_008020_GRBM_SOFT_RESET);
1326 mdelay(15);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001327 WREG32(R_008020_GRBM_SOFT_RESET, 0);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001328 /* Wait a little for things to settle down */
Jerome Glisse225758d2010-03-09 14:45:10 +00001329 mdelay(1);
Jerome Glisse1a029b72009-10-06 19:04:30 +02001330 dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n",
1331 RREG32(R_008010_GRBM_STATUS));
1332 dev_info(rdev->dev, " R_008014_GRBM_STATUS2=0x%08X\n",
1333 RREG32(R_008014_GRBM_STATUS2));
1334 dev_info(rdev->dev, " R_000E50_SRBM_STATUS=0x%08X\n",
1335 RREG32(R_000E50_SRBM_STATUS));
Jerome Glissea3c19452009-10-01 18:02:13 +02001336 rv515_mc_resume(rdev, &save);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001337 return 0;
1338}
1339
Jerome Glisse225758d2010-03-09 14:45:10 +00001340bool r600_gpu_is_lockup(struct radeon_device *rdev)
1341{
1342 u32 srbm_status;
1343 u32 grbm_status;
1344 u32 grbm_status2;
1345 int r;
1346
1347 srbm_status = RREG32(R_000E50_SRBM_STATUS);
1348 grbm_status = RREG32(R_008010_GRBM_STATUS);
1349 grbm_status2 = RREG32(R_008014_GRBM_STATUS2);
1350 if (!G_008010_GUI_ACTIVE(grbm_status)) {
1351 r100_gpu_lockup_update(&rdev->config.r300.lockup, &rdev->cp);
1352 return false;
1353 }
1354 /* force CP activities */
1355 r = radeon_ring_lock(rdev, 2);
1356 if (!r) {
1357 /* PACKET2 NOP */
1358 radeon_ring_write(rdev, 0x80000000);
1359 radeon_ring_write(rdev, 0x80000000);
1360 radeon_ring_unlock_commit(rdev);
1361 }
1362 rdev->cp.rptr = RREG32(R600_CP_RB_RPTR);
1363 return r100_gpu_cp_is_lockup(rdev, &rdev->config.r300.lockup, &rdev->cp);
1364}
1365
Jerome Glissea2d07b72010-03-09 14:45:11 +00001366int r600_asic_reset(struct radeon_device *rdev)
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001367{
1368 return r600_gpu_soft_reset(rdev);
1369}
1370
1371static u32 r600_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
1372 u32 num_backends,
1373 u32 backend_disable_mask)
1374{
1375 u32 backend_map = 0;
1376 u32 enabled_backends_mask;
1377 u32 enabled_backends_count;
1378 u32 cur_pipe;
1379 u32 swizzle_pipe[R6XX_MAX_PIPES];
1380 u32 cur_backend;
1381 u32 i;
1382
1383 if (num_tile_pipes > R6XX_MAX_PIPES)
1384 num_tile_pipes = R6XX_MAX_PIPES;
1385 if (num_tile_pipes < 1)
1386 num_tile_pipes = 1;
1387 if (num_backends > R6XX_MAX_BACKENDS)
1388 num_backends = R6XX_MAX_BACKENDS;
1389 if (num_backends < 1)
1390 num_backends = 1;
1391
1392 enabled_backends_mask = 0;
1393 enabled_backends_count = 0;
1394 for (i = 0; i < R6XX_MAX_BACKENDS; ++i) {
1395 if (((backend_disable_mask >> i) & 1) == 0) {
1396 enabled_backends_mask |= (1 << i);
1397 ++enabled_backends_count;
1398 }
1399 if (enabled_backends_count == num_backends)
1400 break;
1401 }
1402
1403 if (enabled_backends_count == 0) {
1404 enabled_backends_mask = 1;
1405 enabled_backends_count = 1;
1406 }
1407
1408 if (enabled_backends_count != num_backends)
1409 num_backends = enabled_backends_count;
1410
1411 memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * R6XX_MAX_PIPES);
1412 switch (num_tile_pipes) {
1413 case 1:
1414 swizzle_pipe[0] = 0;
1415 break;
1416 case 2:
1417 swizzle_pipe[0] = 0;
1418 swizzle_pipe[1] = 1;
1419 break;
1420 case 3:
1421 swizzle_pipe[0] = 0;
1422 swizzle_pipe[1] = 1;
1423 swizzle_pipe[2] = 2;
1424 break;
1425 case 4:
1426 swizzle_pipe[0] = 0;
1427 swizzle_pipe[1] = 1;
1428 swizzle_pipe[2] = 2;
1429 swizzle_pipe[3] = 3;
1430 break;
1431 case 5:
1432 swizzle_pipe[0] = 0;
1433 swizzle_pipe[1] = 1;
1434 swizzle_pipe[2] = 2;
1435 swizzle_pipe[3] = 3;
1436 swizzle_pipe[4] = 4;
1437 break;
1438 case 6:
1439 swizzle_pipe[0] = 0;
1440 swizzle_pipe[1] = 2;
1441 swizzle_pipe[2] = 4;
1442 swizzle_pipe[3] = 5;
1443 swizzle_pipe[4] = 1;
1444 swizzle_pipe[5] = 3;
1445 break;
1446 case 7:
1447 swizzle_pipe[0] = 0;
1448 swizzle_pipe[1] = 2;
1449 swizzle_pipe[2] = 4;
1450 swizzle_pipe[3] = 6;
1451 swizzle_pipe[4] = 1;
1452 swizzle_pipe[5] = 3;
1453 swizzle_pipe[6] = 5;
1454 break;
1455 case 8:
1456 swizzle_pipe[0] = 0;
1457 swizzle_pipe[1] = 2;
1458 swizzle_pipe[2] = 4;
1459 swizzle_pipe[3] = 6;
1460 swizzle_pipe[4] = 1;
1461 swizzle_pipe[5] = 3;
1462 swizzle_pipe[6] = 5;
1463 swizzle_pipe[7] = 7;
1464 break;
1465 }
1466
1467 cur_backend = 0;
1468 for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
1469 while (((1 << cur_backend) & enabled_backends_mask) == 0)
1470 cur_backend = (cur_backend + 1) % R6XX_MAX_BACKENDS;
1471
1472 backend_map |= (u32)(((cur_backend & 3) << (swizzle_pipe[cur_pipe] * 2)));
1473
1474 cur_backend = (cur_backend + 1) % R6XX_MAX_BACKENDS;
1475 }
1476
1477 return backend_map;
1478}
1479
1480int r600_count_pipe_bits(uint32_t val)
1481{
1482 int i, ret = 0;
1483
1484 for (i = 0; i < 32; i++) {
1485 ret += val & 1;
1486 val >>= 1;
1487 }
1488 return ret;
1489}
1490
1491void r600_gpu_init(struct radeon_device *rdev)
1492{
1493 u32 tiling_config;
1494 u32 ramcfg;
Alex Deucherd03f5d52010-02-19 16:22:31 -05001495 u32 backend_map;
1496 u32 cc_rb_backend_disable;
1497 u32 cc_gc_shader_pipe_config;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001498 u32 tmp;
1499 int i, j;
1500 u32 sq_config;
1501 u32 sq_gpr_resource_mgmt_1 = 0;
1502 u32 sq_gpr_resource_mgmt_2 = 0;
1503 u32 sq_thread_resource_mgmt = 0;
1504 u32 sq_stack_resource_mgmt_1 = 0;
1505 u32 sq_stack_resource_mgmt_2 = 0;
1506
1507 /* FIXME: implement */
1508 switch (rdev->family) {
1509 case CHIP_R600:
1510 rdev->config.r600.max_pipes = 4;
1511 rdev->config.r600.max_tile_pipes = 8;
1512 rdev->config.r600.max_simds = 4;
1513 rdev->config.r600.max_backends = 4;
1514 rdev->config.r600.max_gprs = 256;
1515 rdev->config.r600.max_threads = 192;
1516 rdev->config.r600.max_stack_entries = 256;
1517 rdev->config.r600.max_hw_contexts = 8;
1518 rdev->config.r600.max_gs_threads = 16;
1519 rdev->config.r600.sx_max_export_size = 128;
1520 rdev->config.r600.sx_max_export_pos_size = 16;
1521 rdev->config.r600.sx_max_export_smx_size = 128;
1522 rdev->config.r600.sq_num_cf_insts = 2;
1523 break;
1524 case CHIP_RV630:
1525 case CHIP_RV635:
1526 rdev->config.r600.max_pipes = 2;
1527 rdev->config.r600.max_tile_pipes = 2;
1528 rdev->config.r600.max_simds = 3;
1529 rdev->config.r600.max_backends = 1;
1530 rdev->config.r600.max_gprs = 128;
1531 rdev->config.r600.max_threads = 192;
1532 rdev->config.r600.max_stack_entries = 128;
1533 rdev->config.r600.max_hw_contexts = 8;
1534 rdev->config.r600.max_gs_threads = 4;
1535 rdev->config.r600.sx_max_export_size = 128;
1536 rdev->config.r600.sx_max_export_pos_size = 16;
1537 rdev->config.r600.sx_max_export_smx_size = 128;
1538 rdev->config.r600.sq_num_cf_insts = 2;
1539 break;
1540 case CHIP_RV610:
1541 case CHIP_RV620:
1542 case CHIP_RS780:
1543 case CHIP_RS880:
1544 rdev->config.r600.max_pipes = 1;
1545 rdev->config.r600.max_tile_pipes = 1;
1546 rdev->config.r600.max_simds = 2;
1547 rdev->config.r600.max_backends = 1;
1548 rdev->config.r600.max_gprs = 128;
1549 rdev->config.r600.max_threads = 192;
1550 rdev->config.r600.max_stack_entries = 128;
1551 rdev->config.r600.max_hw_contexts = 4;
1552 rdev->config.r600.max_gs_threads = 4;
1553 rdev->config.r600.sx_max_export_size = 128;
1554 rdev->config.r600.sx_max_export_pos_size = 16;
1555 rdev->config.r600.sx_max_export_smx_size = 128;
1556 rdev->config.r600.sq_num_cf_insts = 1;
1557 break;
1558 case CHIP_RV670:
1559 rdev->config.r600.max_pipes = 4;
1560 rdev->config.r600.max_tile_pipes = 4;
1561 rdev->config.r600.max_simds = 4;
1562 rdev->config.r600.max_backends = 4;
1563 rdev->config.r600.max_gprs = 192;
1564 rdev->config.r600.max_threads = 192;
1565 rdev->config.r600.max_stack_entries = 256;
1566 rdev->config.r600.max_hw_contexts = 8;
1567 rdev->config.r600.max_gs_threads = 16;
1568 rdev->config.r600.sx_max_export_size = 128;
1569 rdev->config.r600.sx_max_export_pos_size = 16;
1570 rdev->config.r600.sx_max_export_smx_size = 128;
1571 rdev->config.r600.sq_num_cf_insts = 2;
1572 break;
1573 default:
1574 break;
1575 }
1576
1577 /* Initialize HDP */
1578 for (i = 0, j = 0; i < 32; i++, j += 0x18) {
1579 WREG32((0x2c14 + j), 0x00000000);
1580 WREG32((0x2c18 + j), 0x00000000);
1581 WREG32((0x2c1c + j), 0x00000000);
1582 WREG32((0x2c20 + j), 0x00000000);
1583 WREG32((0x2c24 + j), 0x00000000);
1584 }
1585
1586 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
1587
1588 /* Setup tiling */
1589 tiling_config = 0;
1590 ramcfg = RREG32(RAMCFG);
1591 switch (rdev->config.r600.max_tile_pipes) {
1592 case 1:
1593 tiling_config |= PIPE_TILING(0);
1594 break;
1595 case 2:
1596 tiling_config |= PIPE_TILING(1);
1597 break;
1598 case 4:
1599 tiling_config |= PIPE_TILING(2);
1600 break;
1601 case 8:
1602 tiling_config |= PIPE_TILING(3);
1603 break;
1604 default:
1605 break;
1606 }
Alex Deucherd03f5d52010-02-19 16:22:31 -05001607 rdev->config.r600.tiling_npipes = rdev->config.r600.max_tile_pipes;
Jerome Glisse961fb592010-02-10 22:30:05 +00001608 rdev->config.r600.tiling_nbanks = 4 << ((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001609 tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
Alex Deucher881fe6c2010-10-18 23:54:56 -04001610 tiling_config |= GROUP_SIZE((ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT);
1611 if ((ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT)
1612 rdev->config.r600.tiling_group_size = 512;
1613 else
1614 rdev->config.r600.tiling_group_size = 256;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001615 tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
1616 if (tmp > 3) {
1617 tiling_config |= ROW_TILING(3);
1618 tiling_config |= SAMPLE_SPLIT(3);
1619 } else {
1620 tiling_config |= ROW_TILING(tmp);
1621 tiling_config |= SAMPLE_SPLIT(tmp);
1622 }
1623 tiling_config |= BANK_SWAPS(1);
Alex Deucherd03f5d52010-02-19 16:22:31 -05001624
1625 cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
1626 cc_rb_backend_disable |=
1627 BACKEND_DISABLE((R6XX_MAX_BACKENDS_MASK << rdev->config.r600.max_backends) & R6XX_MAX_BACKENDS_MASK);
1628
1629 cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
1630 cc_gc_shader_pipe_config |=
1631 INACTIVE_QD_PIPES((R6XX_MAX_PIPES_MASK << rdev->config.r600.max_pipes) & R6XX_MAX_PIPES_MASK);
1632 cc_gc_shader_pipe_config |=
1633 INACTIVE_SIMDS((R6XX_MAX_SIMDS_MASK << rdev->config.r600.max_simds) & R6XX_MAX_SIMDS_MASK);
1634
1635 backend_map = r600_get_tile_pipe_to_backend_map(rdev->config.r600.max_tile_pipes,
1636 (R6XX_MAX_BACKENDS -
1637 r600_count_pipe_bits((cc_rb_backend_disable &
1638 R6XX_MAX_BACKENDS_MASK) >> 16)),
1639 (cc_rb_backend_disable >> 16));
Alex Deuchere7aeeba2010-06-04 13:10:12 -04001640 rdev->config.r600.tile_config = tiling_config;
Alex Deucherd03f5d52010-02-19 16:22:31 -05001641 tiling_config |= BACKEND_MAP(backend_map);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001642 WREG32(GB_TILING_CONFIG, tiling_config);
1643 WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff);
1644 WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff);
1645
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001646 /* Setup pipes */
Alex Deucherd03f5d52010-02-19 16:22:31 -05001647 WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
1648 WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
Alex Deucherf867c60d2010-03-05 14:50:37 -05001649 WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001650
Alex Deucherd03f5d52010-02-19 16:22:31 -05001651 tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001652 WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK);
1653 WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((tmp * 4) - 2) & VTX_REUSE_DEPTH_MASK);
1654
1655 /* Setup some CP states */
1656 WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) | ROQ_IB2_START(0x2b)));
1657 WREG32(CP_MEQ_THRESHOLDS, (MEQ_END(0x40) | ROQ_END(0x40)));
1658
1659 WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO | SYNC_GRADIENT |
1660 SYNC_WALKER | SYNC_ALIGNER));
1661 /* Setup various GPU states */
1662 if (rdev->family == CHIP_RV670)
1663 WREG32(ARB_GDEC_RD_CNTL, 0x00000021);
1664
1665 tmp = RREG32(SX_DEBUG_1);
1666 tmp |= SMX_EVENT_RELEASE;
1667 if ((rdev->family > CHIP_R600))
1668 tmp |= ENABLE_NEW_SMX_ADDRESS;
1669 WREG32(SX_DEBUG_1, tmp);
1670
1671 if (((rdev->family) == CHIP_R600) ||
1672 ((rdev->family) == CHIP_RV630) ||
1673 ((rdev->family) == CHIP_RV610) ||
1674 ((rdev->family) == CHIP_RV620) ||
Alex Deucheree59f2b2009-11-05 13:11:46 -05001675 ((rdev->family) == CHIP_RS780) ||
1676 ((rdev->family) == CHIP_RS880)) {
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001677 WREG32(DB_DEBUG, PREZ_MUST_WAIT_FOR_POSTZ_DONE);
1678 } else {
1679 WREG32(DB_DEBUG, 0);
1680 }
1681 WREG32(DB_WATERMARKS, (DEPTH_FREE(4) | DEPTH_CACHELINE_FREE(16) |
1682 DEPTH_FLUSH(16) | DEPTH_PENDING_FREE(4)));
1683
1684 WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
1685 WREG32(VGT_NUM_INSTANCES, 0);
1686
1687 WREG32(SPI_CONFIG_CNTL, GPR_WRITE_PRIORITY(0));
1688 WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(0));
1689
1690 tmp = RREG32(SQ_MS_FIFO_SIZES);
1691 if (((rdev->family) == CHIP_RV610) ||
1692 ((rdev->family) == CHIP_RV620) ||
Alex Deucheree59f2b2009-11-05 13:11:46 -05001693 ((rdev->family) == CHIP_RS780) ||
1694 ((rdev->family) == CHIP_RS880)) {
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001695 tmp = (CACHE_FIFO_SIZE(0xa) |
1696 FETCH_FIFO_HIWATER(0xa) |
1697 DONE_FIFO_HIWATER(0xe0) |
1698 ALU_UPDATE_FIFO_HIWATER(0x8));
1699 } else if (((rdev->family) == CHIP_R600) ||
1700 ((rdev->family) == CHIP_RV630)) {
1701 tmp &= ~DONE_FIFO_HIWATER(0xff);
1702 tmp |= DONE_FIFO_HIWATER(0x4);
1703 }
1704 WREG32(SQ_MS_FIFO_SIZES, tmp);
1705
1706 /* SQ_CONFIG, SQ_GPR_RESOURCE_MGMT, SQ_THREAD_RESOURCE_MGMT, SQ_STACK_RESOURCE_MGMT
1707 * should be adjusted as needed by the 2D/3D drivers. This just sets default values
1708 */
1709 sq_config = RREG32(SQ_CONFIG);
1710 sq_config &= ~(PS_PRIO(3) |
1711 VS_PRIO(3) |
1712 GS_PRIO(3) |
1713 ES_PRIO(3));
1714 sq_config |= (DX9_CONSTS |
1715 VC_ENABLE |
1716 PS_PRIO(0) |
1717 VS_PRIO(1) |
1718 GS_PRIO(2) |
1719 ES_PRIO(3));
1720
1721 if ((rdev->family) == CHIP_R600) {
1722 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(124) |
1723 NUM_VS_GPRS(124) |
1724 NUM_CLAUSE_TEMP_GPRS(4));
1725 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(0) |
1726 NUM_ES_GPRS(0));
1727 sq_thread_resource_mgmt = (NUM_PS_THREADS(136) |
1728 NUM_VS_THREADS(48) |
1729 NUM_GS_THREADS(4) |
1730 NUM_ES_THREADS(4));
1731 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(128) |
1732 NUM_VS_STACK_ENTRIES(128));
1733 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(0) |
1734 NUM_ES_STACK_ENTRIES(0));
1735 } else if (((rdev->family) == CHIP_RV610) ||
1736 ((rdev->family) == CHIP_RV620) ||
Alex Deucheree59f2b2009-11-05 13:11:46 -05001737 ((rdev->family) == CHIP_RS780) ||
1738 ((rdev->family) == CHIP_RS880)) {
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001739 /* no vertex cache */
1740 sq_config &= ~VC_ENABLE;
1741
1742 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
1743 NUM_VS_GPRS(44) |
1744 NUM_CLAUSE_TEMP_GPRS(2));
1745 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) |
1746 NUM_ES_GPRS(17));
1747 sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
1748 NUM_VS_THREADS(78) |
1749 NUM_GS_THREADS(4) |
1750 NUM_ES_THREADS(31));
1751 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) |
1752 NUM_VS_STACK_ENTRIES(40));
1753 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) |
1754 NUM_ES_STACK_ENTRIES(16));
1755 } else if (((rdev->family) == CHIP_RV630) ||
1756 ((rdev->family) == CHIP_RV635)) {
1757 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
1758 NUM_VS_GPRS(44) |
1759 NUM_CLAUSE_TEMP_GPRS(2));
1760 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(18) |
1761 NUM_ES_GPRS(18));
1762 sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
1763 NUM_VS_THREADS(78) |
1764 NUM_GS_THREADS(4) |
1765 NUM_ES_THREADS(31));
1766 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) |
1767 NUM_VS_STACK_ENTRIES(40));
1768 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) |
1769 NUM_ES_STACK_ENTRIES(16));
1770 } else if ((rdev->family) == CHIP_RV670) {
1771 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
1772 NUM_VS_GPRS(44) |
1773 NUM_CLAUSE_TEMP_GPRS(2));
1774 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) |
1775 NUM_ES_GPRS(17));
1776 sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
1777 NUM_VS_THREADS(78) |
1778 NUM_GS_THREADS(4) |
1779 NUM_ES_THREADS(31));
1780 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(64) |
1781 NUM_VS_STACK_ENTRIES(64));
1782 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(64) |
1783 NUM_ES_STACK_ENTRIES(64));
1784 }
1785
1786 WREG32(SQ_CONFIG, sq_config);
1787 WREG32(SQ_GPR_RESOURCE_MGMT_1, sq_gpr_resource_mgmt_1);
1788 WREG32(SQ_GPR_RESOURCE_MGMT_2, sq_gpr_resource_mgmt_2);
1789 WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt);
1790 WREG32(SQ_STACK_RESOURCE_MGMT_1, sq_stack_resource_mgmt_1);
1791 WREG32(SQ_STACK_RESOURCE_MGMT_2, sq_stack_resource_mgmt_2);
1792
1793 if (((rdev->family) == CHIP_RV610) ||
1794 ((rdev->family) == CHIP_RV620) ||
Alex Deucheree59f2b2009-11-05 13:11:46 -05001795 ((rdev->family) == CHIP_RS780) ||
1796 ((rdev->family) == CHIP_RS880)) {
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001797 WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(TC_ONLY));
1798 } else {
1799 WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC));
1800 }
1801
1802 /* More default values. 2D/3D driver should adjust as needed */
1803 WREG32(PA_SC_AA_SAMPLE_LOCS_2S, (S0_X(0xc) | S0_Y(0x4) |
1804 S1_X(0x4) | S1_Y(0xc)));
1805 WREG32(PA_SC_AA_SAMPLE_LOCS_4S, (S0_X(0xe) | S0_Y(0xe) |
1806 S1_X(0x2) | S1_Y(0x2) |
1807 S2_X(0xa) | S2_Y(0x6) |
1808 S3_X(0x6) | S3_Y(0xa)));
1809 WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD0, (S0_X(0xe) | S0_Y(0xb) |
1810 S1_X(0x4) | S1_Y(0xc) |
1811 S2_X(0x1) | S2_Y(0x6) |
1812 S3_X(0xa) | S3_Y(0xe)));
1813 WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD1, (S4_X(0x6) | S4_Y(0x1) |
1814 S5_X(0x0) | S5_Y(0x0) |
1815 S6_X(0xb) | S6_Y(0x4) |
1816 S7_X(0x7) | S7_Y(0x8)));
1817
1818 WREG32(VGT_STRMOUT_EN, 0);
1819 tmp = rdev->config.r600.max_pipes * 16;
1820 switch (rdev->family) {
1821 case CHIP_RV610:
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001822 case CHIP_RV620:
Alex Deucheree59f2b2009-11-05 13:11:46 -05001823 case CHIP_RS780:
1824 case CHIP_RS880:
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001825 tmp += 32;
1826 break;
1827 case CHIP_RV670:
1828 tmp += 128;
1829 break;
1830 default:
1831 break;
1832 }
1833 if (tmp > 256) {
1834 tmp = 256;
1835 }
1836 WREG32(VGT_ES_PER_GS, 128);
1837 WREG32(VGT_GS_PER_ES, tmp);
1838 WREG32(VGT_GS_PER_VS, 2);
1839 WREG32(VGT_GS_VERTEX_REUSE, 16);
1840
1841 /* more default values. 2D/3D driver should adjust as needed */
1842 WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
1843 WREG32(VGT_STRMOUT_EN, 0);
1844 WREG32(SX_MISC, 0);
1845 WREG32(PA_SC_MODE_CNTL, 0);
1846 WREG32(PA_SC_AA_CONFIG, 0);
1847 WREG32(PA_SC_LINE_STIPPLE, 0);
1848 WREG32(SPI_INPUT_Z, 0);
1849 WREG32(SPI_PS_IN_CONTROL_0, NUM_INTERP(2));
1850 WREG32(CB_COLOR7_FRAG, 0);
1851
1852 /* Clear render buffer base addresses */
1853 WREG32(CB_COLOR0_BASE, 0);
1854 WREG32(CB_COLOR1_BASE, 0);
1855 WREG32(CB_COLOR2_BASE, 0);
1856 WREG32(CB_COLOR3_BASE, 0);
1857 WREG32(CB_COLOR4_BASE, 0);
1858 WREG32(CB_COLOR5_BASE, 0);
1859 WREG32(CB_COLOR6_BASE, 0);
1860 WREG32(CB_COLOR7_BASE, 0);
1861 WREG32(CB_COLOR7_FRAG, 0);
1862
1863 switch (rdev->family) {
1864 case CHIP_RV610:
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001865 case CHIP_RV620:
Alex Deucheree59f2b2009-11-05 13:11:46 -05001866 case CHIP_RS780:
1867 case CHIP_RS880:
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001868 tmp = TC_L2_SIZE(8);
1869 break;
1870 case CHIP_RV630:
1871 case CHIP_RV635:
1872 tmp = TC_L2_SIZE(4);
1873 break;
1874 case CHIP_R600:
1875 tmp = TC_L2_SIZE(0) | L2_DISABLE_LATE_HIT;
1876 break;
1877 default:
1878 tmp = TC_L2_SIZE(0);
1879 break;
1880 }
1881 WREG32(TC_CNTL, tmp);
1882
1883 tmp = RREG32(HDP_HOST_PATH_CNTL);
1884 WREG32(HDP_HOST_PATH_CNTL, tmp);
1885
1886 tmp = RREG32(ARB_POP);
1887 tmp |= ENABLE_TC128;
1888 WREG32(ARB_POP, tmp);
1889
1890 WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
1891 WREG32(PA_CL_ENHANCE, (CLIP_VTX_REORDER_ENA |
1892 NUM_CLIP_SEQ(3)));
1893 WREG32(PA_SC_ENHANCE, FORCE_EOV_MAX_CLK_CNT(4095));
1894}
1895
1896
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001897/*
1898 * Indirect registers accessor
1899 */
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001900u32 r600_pciep_rreg(struct radeon_device *rdev, u32 reg)
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001901{
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001902 u32 r;
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001903
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001904 WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
1905 (void)RREG32(PCIE_PORT_INDEX);
1906 r = RREG32(PCIE_PORT_DATA);
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001907 return r;
1908}
1909
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001910void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001911{
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001912 WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
1913 (void)RREG32(PCIE_PORT_INDEX);
1914 WREG32(PCIE_PORT_DATA, (v));
1915 (void)RREG32(PCIE_PORT_DATA);
1916}
1917
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001918/*
1919 * CP & Ring
1920 */
1921void r600_cp_stop(struct radeon_device *rdev)
1922{
Jerome Glissec919b372010-08-10 17:41:31 -04001923 rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001924 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
Alex Deucher724c80e2010-08-27 18:25:25 -04001925 WREG32(SCRATCH_UMSK, 0);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001926}
1927
Alex Deucherd8f60cf2009-12-01 13:43:46 -05001928int r600_init_microcode(struct radeon_device *rdev)
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001929{
1930 struct platform_device *pdev;
1931 const char *chip_name;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05001932 const char *rlc_chip_name;
1933 size_t pfp_req_size, me_req_size, rlc_req_size;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001934 char fw_name[30];
1935 int err;
1936
1937 DRM_DEBUG("\n");
1938
1939 pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0);
1940 err = IS_ERR(pdev);
1941 if (err) {
1942 printk(KERN_ERR "radeon_cp: Failed to register firmware\n");
1943 return -EINVAL;
1944 }
1945
1946 switch (rdev->family) {
Alex Deucherd8f60cf2009-12-01 13:43:46 -05001947 case CHIP_R600:
1948 chip_name = "R600";
1949 rlc_chip_name = "R600";
1950 break;
1951 case CHIP_RV610:
1952 chip_name = "RV610";
1953 rlc_chip_name = "R600";
1954 break;
1955 case CHIP_RV630:
1956 chip_name = "RV630";
1957 rlc_chip_name = "R600";
1958 break;
1959 case CHIP_RV620:
1960 chip_name = "RV620";
1961 rlc_chip_name = "R600";
1962 break;
1963 case CHIP_RV635:
1964 chip_name = "RV635";
1965 rlc_chip_name = "R600";
1966 break;
1967 case CHIP_RV670:
1968 chip_name = "RV670";
1969 rlc_chip_name = "R600";
1970 break;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001971 case CHIP_RS780:
Alex Deucherd8f60cf2009-12-01 13:43:46 -05001972 case CHIP_RS880:
1973 chip_name = "RS780";
1974 rlc_chip_name = "R600";
1975 break;
1976 case CHIP_RV770:
1977 chip_name = "RV770";
1978 rlc_chip_name = "R700";
1979 break;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001980 case CHIP_RV730:
Alex Deucherd8f60cf2009-12-01 13:43:46 -05001981 case CHIP_RV740:
1982 chip_name = "RV730";
1983 rlc_chip_name = "R700";
1984 break;
1985 case CHIP_RV710:
1986 chip_name = "RV710";
1987 rlc_chip_name = "R700";
1988 break;
Alex Deucherfe251e22010-03-24 13:36:43 -04001989 case CHIP_CEDAR:
1990 chip_name = "CEDAR";
Alex Deucher45f9a392010-03-24 13:55:51 -04001991 rlc_chip_name = "CEDAR";
Alex Deucherfe251e22010-03-24 13:36:43 -04001992 break;
1993 case CHIP_REDWOOD:
1994 chip_name = "REDWOOD";
Alex Deucher45f9a392010-03-24 13:55:51 -04001995 rlc_chip_name = "REDWOOD";
Alex Deucherfe251e22010-03-24 13:36:43 -04001996 break;
1997 case CHIP_JUNIPER:
1998 chip_name = "JUNIPER";
Alex Deucher45f9a392010-03-24 13:55:51 -04001999 rlc_chip_name = "JUNIPER";
Alex Deucherfe251e22010-03-24 13:36:43 -04002000 break;
2001 case CHIP_CYPRESS:
2002 case CHIP_HEMLOCK:
2003 chip_name = "CYPRESS";
Alex Deucher45f9a392010-03-24 13:55:51 -04002004 rlc_chip_name = "CYPRESS";
Alex Deucherfe251e22010-03-24 13:36:43 -04002005 break;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002006 default: BUG();
2007 }
2008
Alex Deucherfe251e22010-03-24 13:36:43 -04002009 if (rdev->family >= CHIP_CEDAR) {
2010 pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
2011 me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
Alex Deucher45f9a392010-03-24 13:55:51 -04002012 rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
Alex Deucherfe251e22010-03-24 13:36:43 -04002013 } else if (rdev->family >= CHIP_RV770) {
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002014 pfp_req_size = R700_PFP_UCODE_SIZE * 4;
2015 me_req_size = R700_PM4_UCODE_SIZE * 4;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002016 rlc_req_size = R700_RLC_UCODE_SIZE * 4;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002017 } else {
2018 pfp_req_size = PFP_UCODE_SIZE * 4;
2019 me_req_size = PM4_UCODE_SIZE * 12;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002020 rlc_req_size = RLC_UCODE_SIZE * 4;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002021 }
2022
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002023 DRM_INFO("Loading %s Microcode\n", chip_name);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002024
2025 snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
2026 err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev);
2027 if (err)
2028 goto out;
2029 if (rdev->pfp_fw->size != pfp_req_size) {
2030 printk(KERN_ERR
2031 "r600_cp: Bogus length %zu in firmware \"%s\"\n",
2032 rdev->pfp_fw->size, fw_name);
2033 err = -EINVAL;
2034 goto out;
2035 }
2036
2037 snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
2038 err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev);
2039 if (err)
2040 goto out;
2041 if (rdev->me_fw->size != me_req_size) {
2042 printk(KERN_ERR
2043 "r600_cp: Bogus length %zu in firmware \"%s\"\n",
2044 rdev->me_fw->size, fw_name);
2045 err = -EINVAL;
2046 }
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002047
2048 snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name);
2049 err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev);
2050 if (err)
2051 goto out;
2052 if (rdev->rlc_fw->size != rlc_req_size) {
2053 printk(KERN_ERR
2054 "r600_rlc: Bogus length %zu in firmware \"%s\"\n",
2055 rdev->rlc_fw->size, fw_name);
2056 err = -EINVAL;
2057 }
2058
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002059out:
2060 platform_device_unregister(pdev);
2061
2062 if (err) {
2063 if (err != -EINVAL)
2064 printk(KERN_ERR
2065 "r600_cp: Failed to load firmware \"%s\"\n",
2066 fw_name);
2067 release_firmware(rdev->pfp_fw);
2068 rdev->pfp_fw = NULL;
2069 release_firmware(rdev->me_fw);
2070 rdev->me_fw = NULL;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002071 release_firmware(rdev->rlc_fw);
2072 rdev->rlc_fw = NULL;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002073 }
2074 return err;
2075}
2076
2077static int r600_cp_load_microcode(struct radeon_device *rdev)
2078{
2079 const __be32 *fw_data;
2080 int i;
2081
2082 if (!rdev->me_fw || !rdev->pfp_fw)
2083 return -EINVAL;
2084
2085 r600_cp_stop(rdev);
2086
2087 WREG32(CP_RB_CNTL, RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
2088
2089 /* Reset cp */
2090 WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
2091 RREG32(GRBM_SOFT_RESET);
2092 mdelay(15);
2093 WREG32(GRBM_SOFT_RESET, 0);
2094
2095 WREG32(CP_ME_RAM_WADDR, 0);
2096
2097 fw_data = (const __be32 *)rdev->me_fw->data;
2098 WREG32(CP_ME_RAM_WADDR, 0);
2099 for (i = 0; i < PM4_UCODE_SIZE * 3; i++)
2100 WREG32(CP_ME_RAM_DATA,
2101 be32_to_cpup(fw_data++));
2102
2103 fw_data = (const __be32 *)rdev->pfp_fw->data;
2104 WREG32(CP_PFP_UCODE_ADDR, 0);
2105 for (i = 0; i < PFP_UCODE_SIZE; i++)
2106 WREG32(CP_PFP_UCODE_DATA,
2107 be32_to_cpup(fw_data++));
2108
2109 WREG32(CP_PFP_UCODE_ADDR, 0);
2110 WREG32(CP_ME_RAM_WADDR, 0);
2111 WREG32(CP_ME_RAM_RADDR, 0);
2112 return 0;
2113}
2114
2115int r600_cp_start(struct radeon_device *rdev)
2116{
2117 int r;
2118 uint32_t cp_me;
2119
2120 r = radeon_ring_lock(rdev, 7);
2121 if (r) {
2122 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
2123 return r;
2124 }
2125 radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5));
2126 radeon_ring_write(rdev, 0x1);
Alex Deucher7e7b41d2010-09-02 21:32:32 -04002127 if (rdev->family >= CHIP_RV770) {
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002128 radeon_ring_write(rdev, 0x0);
2129 radeon_ring_write(rdev, rdev->config.rv770.max_hw_contexts - 1);
Alex Deucherfe251e22010-03-24 13:36:43 -04002130 } else {
2131 radeon_ring_write(rdev, 0x3);
2132 radeon_ring_write(rdev, rdev->config.r600.max_hw_contexts - 1);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002133 }
2134 radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
2135 radeon_ring_write(rdev, 0);
2136 radeon_ring_write(rdev, 0);
2137 radeon_ring_unlock_commit(rdev);
2138
2139 cp_me = 0xff;
2140 WREG32(R_0086D8_CP_ME_CNTL, cp_me);
2141 return 0;
2142}
2143
2144int r600_cp_resume(struct radeon_device *rdev)
2145{
2146 u32 tmp;
2147 u32 rb_bufsz;
2148 int r;
2149
2150 /* Reset cp */
2151 WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
2152 RREG32(GRBM_SOFT_RESET);
2153 mdelay(15);
2154 WREG32(GRBM_SOFT_RESET, 0);
2155
2156 /* Set ring buffer size */
2157 rb_bufsz = drm_order(rdev->cp.ring_size / 8);
Alex Deucher724c80e2010-08-27 18:25:25 -04002158 tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002159#ifdef __BIG_ENDIAN
Alex Deucherd6f28932009-11-02 16:01:27 -05002160 tmp |= BUF_SWAP_32BIT;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002161#endif
Alex Deucherd6f28932009-11-02 16:01:27 -05002162 WREG32(CP_RB_CNTL, tmp);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002163 WREG32(CP_SEM_WAIT_TIMER, 0x4);
2164
2165 /* Set the write pointer delay */
2166 WREG32(CP_RB_WPTR_DELAY, 0);
2167
2168 /* Initialize the ring buffer's read and write pointers */
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002169 WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
2170 WREG32(CP_RB_RPTR_WR, 0);
2171 WREG32(CP_RB_WPTR, 0);
Alex Deucher724c80e2010-08-27 18:25:25 -04002172
2173 /* set the wb address whether it's enabled or not */
2174 WREG32(CP_RB_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC);
2175 WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
2176 WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
2177
2178 if (rdev->wb.enabled)
2179 WREG32(SCRATCH_UMSK, 0xff);
2180 else {
2181 tmp |= RB_NO_UPDATE;
2182 WREG32(SCRATCH_UMSK, 0);
2183 }
2184
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002185 mdelay(1);
2186 WREG32(CP_RB_CNTL, tmp);
2187
2188 WREG32(CP_RB_BASE, rdev->cp.gpu_addr >> 8);
2189 WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
2190
2191 rdev->cp.rptr = RREG32(CP_RB_RPTR);
2192 rdev->cp.wptr = RREG32(CP_RB_WPTR);
2193
2194 r600_cp_start(rdev);
2195 rdev->cp.ready = true;
2196 r = radeon_ring_test(rdev);
2197 if (r) {
2198 rdev->cp.ready = false;
2199 return r;
2200 }
2201 return 0;
2202}
2203
2204void r600_cp_commit(struct radeon_device *rdev)
2205{
2206 WREG32(CP_RB_WPTR, rdev->cp.wptr);
2207 (void)RREG32(CP_RB_WPTR);
2208}
2209
2210void r600_ring_init(struct radeon_device *rdev, unsigned ring_size)
2211{
2212 u32 rb_bufsz;
2213
2214 /* Align ring size */
2215 rb_bufsz = drm_order(ring_size / 8);
2216 ring_size = (1 << (rb_bufsz + 1)) * 4;
2217 rdev->cp.ring_size = ring_size;
2218 rdev->cp.align_mask = 16 - 1;
2219}
2220
Jerome Glisse655efd32010-02-02 11:51:45 +01002221void r600_cp_fini(struct radeon_device *rdev)
2222{
2223 r600_cp_stop(rdev);
2224 radeon_ring_fini(rdev);
2225}
2226
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002227
2228/*
2229 * GPU scratch registers helpers function.
2230 */
2231void r600_scratch_init(struct radeon_device *rdev)
2232{
2233 int i;
2234
2235 rdev->scratch.num_reg = 7;
Alex Deucher724c80e2010-08-27 18:25:25 -04002236 rdev->scratch.reg_base = SCRATCH_REG0;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002237 for (i = 0; i < rdev->scratch.num_reg; i++) {
2238 rdev->scratch.free[i] = true;
Alex Deucher724c80e2010-08-27 18:25:25 -04002239 rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002240 }
2241}
2242
2243int r600_ring_test(struct radeon_device *rdev)
2244{
2245 uint32_t scratch;
2246 uint32_t tmp = 0;
2247 unsigned i;
2248 int r;
2249
2250 r = radeon_scratch_get(rdev, &scratch);
2251 if (r) {
2252 DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r);
2253 return r;
2254 }
2255 WREG32(scratch, 0xCAFEDEAD);
2256 r = radeon_ring_lock(rdev, 3);
2257 if (r) {
2258 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
2259 radeon_scratch_free(rdev, scratch);
2260 return r;
2261 }
2262 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2263 radeon_ring_write(rdev, ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
2264 radeon_ring_write(rdev, 0xDEADBEEF);
2265 radeon_ring_unlock_commit(rdev);
2266 for (i = 0; i < rdev->usec_timeout; i++) {
2267 tmp = RREG32(scratch);
2268 if (tmp == 0xDEADBEEF)
2269 break;
2270 DRM_UDELAY(1);
2271 }
2272 if (i < rdev->usec_timeout) {
2273 DRM_INFO("ring test succeeded in %d usecs\n", i);
2274 } else {
2275 DRM_ERROR("radeon: ring test failed (scratch(0x%04X)=0x%08X)\n",
2276 scratch, tmp);
2277 r = -EINVAL;
2278 }
2279 radeon_scratch_free(rdev, scratch);
2280 return r;
2281}
2282
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002283void r600_fence_ring_emit(struct radeon_device *rdev,
2284 struct radeon_fence *fence)
2285{
Alex Deucherd0f8a852010-09-04 05:04:34 -04002286 if (rdev->wb.use_event) {
2287 u64 addr = rdev->wb.gpu_addr + R600_WB_EVENT_OFFSET +
2288 (u64)(rdev->fence_drv.scratch_reg - rdev->scratch.reg_base);
2289 /* EVENT_WRITE_EOP - flush caches, send int */
2290 radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
2291 radeon_ring_write(rdev, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5));
2292 radeon_ring_write(rdev, addr & 0xffffffff);
2293 radeon_ring_write(rdev, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
2294 radeon_ring_write(rdev, fence->seq);
2295 radeon_ring_write(rdev, 0);
2296 } else {
2297 radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE, 0));
2298 radeon_ring_write(rdev, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT) | EVENT_INDEX(0));
2299 /* wait for 3D idle clean */
2300 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2301 radeon_ring_write(rdev, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
2302 radeon_ring_write(rdev, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit);
2303 /* Emit fence sequence & fire IRQ */
2304 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2305 radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
2306 radeon_ring_write(rdev, fence->seq);
2307 /* CP_INTERRUPT packet 3 no longer exists, use packet 0 */
2308 radeon_ring_write(rdev, PACKET0(CP_INT_STATUS, 0));
2309 radeon_ring_write(rdev, RB_INT_STAT);
2310 }
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002311}
2312
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002313int r600_copy_blit(struct radeon_device *rdev,
2314 uint64_t src_offset, uint64_t dst_offset,
2315 unsigned num_pages, struct radeon_fence *fence)
2316{
Jerome Glisseff82f052010-01-22 15:19:00 +01002317 int r;
2318
2319 mutex_lock(&rdev->r600_blit.mutex);
2320 rdev->r600_blit.vb_ib = NULL;
2321 r = r600_blit_prepare_copy(rdev, num_pages * RADEON_GPU_PAGE_SIZE);
2322 if (r) {
2323 if (rdev->r600_blit.vb_ib)
2324 radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
2325 mutex_unlock(&rdev->r600_blit.mutex);
2326 return r;
2327 }
Matt Turnera77f1712009-10-14 00:34:41 -04002328 r600_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * RADEON_GPU_PAGE_SIZE);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002329 r600_blit_done_copy(rdev, fence);
Jerome Glisseff82f052010-01-22 15:19:00 +01002330 mutex_unlock(&rdev->r600_blit.mutex);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002331 return 0;
2332}
2333
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002334int r600_set_surface_reg(struct radeon_device *rdev, int reg,
2335 uint32_t tiling_flags, uint32_t pitch,
2336 uint32_t offset, uint32_t obj_size)
2337{
2338 /* FIXME: implement */
2339 return 0;
2340}
2341
2342void r600_clear_surface_reg(struct radeon_device *rdev, int reg)
2343{
2344 /* FIXME: implement */
2345}
2346
2347
2348bool r600_card_posted(struct radeon_device *rdev)
2349{
2350 uint32_t reg;
2351
2352 /* first check CRTCs */
2353 reg = RREG32(D1CRTC_CONTROL) |
2354 RREG32(D2CRTC_CONTROL);
2355 if (reg & CRTC_EN)
2356 return true;
2357
2358 /* then check MEM_SIZE, in case the crtcs are off */
2359 if (RREG32(CONFIG_MEMSIZE))
2360 return true;
2361
2362 return false;
2363}
2364
Dave Airliefc30b8e2009-09-18 15:19:37 +10002365int r600_startup(struct radeon_device *rdev)
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002366{
2367 int r;
2368
Alex Deucher779720a2009-12-09 19:31:44 -05002369 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
2370 r = r600_init_microcode(rdev);
2371 if (r) {
2372 DRM_ERROR("Failed to load firmware!\n");
2373 return r;
2374 }
2375 }
2376
Jerome Glissea3c19452009-10-01 18:02:13 +02002377 r600_mc_program(rdev);
Jerome Glisse1a029b72009-10-06 19:04:30 +02002378 if (rdev->flags & RADEON_IS_AGP) {
2379 r600_agp_enable(rdev);
2380 } else {
2381 r = r600_pcie_gart_enable(rdev);
2382 if (r)
2383 return r;
2384 }
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002385 r600_gpu_init(rdev);
Jerome Glissec38c7b62010-02-04 17:27:27 +01002386 r = r600_blit_init(rdev);
2387 if (r) {
2388 r600_blit_fini(rdev);
2389 rdev->asic->copy = NULL;
2390 dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
2391 }
Alex Deucherb70d6bb2010-08-06 21:36:58 -04002392
Alex Deucher724c80e2010-08-27 18:25:25 -04002393 /* allocate wb buffer */
2394 r = radeon_wb_init(rdev);
2395 if (r)
2396 return r;
2397
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002398 /* Enable IRQ */
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002399 r = r600_irq_init(rdev);
2400 if (r) {
2401 DRM_ERROR("radeon: IH init failed (%d).\n", r);
2402 radeon_irq_kms_fini(rdev);
2403 return r;
2404 }
2405 r600_irq_set(rdev);
2406
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002407 r = radeon_ring_init(rdev, rdev->cp.ring_size);
2408 if (r)
2409 return r;
2410 r = r600_cp_load_microcode(rdev);
2411 if (r)
2412 return r;
2413 r = r600_cp_resume(rdev);
2414 if (r)
2415 return r;
Alex Deucher724c80e2010-08-27 18:25:25 -04002416
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002417 return 0;
2418}
2419
Dave Airlie28d52042009-09-21 14:33:58 +10002420void r600_vga_set_state(struct radeon_device *rdev, bool state)
2421{
2422 uint32_t temp;
2423
2424 temp = RREG32(CONFIG_CNTL);
2425 if (state == false) {
2426 temp &= ~(1<<0);
2427 temp |= (1<<1);
2428 } else {
2429 temp &= ~(1<<1);
2430 }
2431 WREG32(CONFIG_CNTL, temp);
2432}
2433
Dave Airliefc30b8e2009-09-18 15:19:37 +10002434int r600_resume(struct radeon_device *rdev)
2435{
2436 int r;
2437
Jerome Glisse1a029b72009-10-06 19:04:30 +02002438 /* Do not reset GPU before posting, on r600 hw unlike on r500 hw,
2439 * posting will perform necessary task to bring back GPU into good
2440 * shape.
2441 */
Dave Airliefc30b8e2009-09-18 15:19:37 +10002442 /* post card */
Jerome Glissee7d40b92009-10-01 18:02:15 +02002443 atom_asic_init(rdev->mode_info.atom_context);
Dave Airliefc30b8e2009-09-18 15:19:37 +10002444
2445 r = r600_startup(rdev);
2446 if (r) {
2447 DRM_ERROR("r600 startup failed on resume\n");
2448 return r;
2449 }
2450
Jerome Glisse62a8ea32009-10-01 18:02:11 +02002451 r = r600_ib_test(rdev);
Dave Airliefc30b8e2009-09-18 15:19:37 +10002452 if (r) {
2453 DRM_ERROR("radeon: failled testing IB (%d).\n", r);
2454 return r;
2455 }
Rafał Miłecki38fd2c62010-01-28 18:16:30 +01002456
2457 r = r600_audio_init(rdev);
2458 if (r) {
2459 DRM_ERROR("radeon: audio resume failed\n");
2460 return r;
2461 }
2462
Dave Airliefc30b8e2009-09-18 15:19:37 +10002463 return r;
2464}
2465
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002466int r600_suspend(struct radeon_device *rdev)
2467{
Jerome Glisse4c788672009-11-20 14:29:23 +01002468 int r;
2469
Rafał Miłecki38fd2c62010-01-28 18:16:30 +01002470 r600_audio_fini(rdev);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002471 /* FIXME: we should wait for ring to be empty */
2472 r600_cp_stop(rdev);
Dave Airliebc1a6312009-09-15 11:07:52 +10002473 rdev->cp.ready = false;
Jerome Glisse0c452492010-01-15 14:44:37 +01002474 r600_irq_suspend(rdev);
Alex Deucher724c80e2010-08-27 18:25:25 -04002475 radeon_wb_disable(rdev);
Jerome Glisse4aac0472009-09-14 18:29:49 +02002476 r600_pcie_gart_disable(rdev);
Dave Airliebc1a6312009-09-15 11:07:52 +10002477 /* unpin shaders bo */
Jerome Glisse30d2d9a2010-01-13 10:29:27 +01002478 if (rdev->r600_blit.shader_obj) {
2479 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
2480 if (!r) {
2481 radeon_bo_unpin(rdev->r600_blit.shader_obj);
2482 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
2483 }
2484 }
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002485 return 0;
2486}
2487
2488/* Plan is to move initialization in that function and use
2489 * helper function so that radeon_device_init pretty much
2490 * do nothing more than calling asic specific function. This
2491 * should also allow to remove a bunch of callback function
2492 * like vram_info.
2493 */
2494int r600_init(struct radeon_device *rdev)
2495{
2496 int r;
2497
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002498 r = radeon_dummy_page_init(rdev);
2499 if (r)
2500 return r;
2501 if (r600_debugfs_mc_info_init(rdev)) {
2502 DRM_ERROR("Failed to register debugfs file for mc !\n");
2503 }
2504 /* This don't do much */
2505 r = radeon_gem_init(rdev);
2506 if (r)
2507 return r;
2508 /* Read BIOS */
2509 if (!radeon_get_bios(rdev)) {
2510 if (ASIC_IS_AVIVO(rdev))
2511 return -EINVAL;
2512 }
2513 /* Must be an ATOMBIOS */
Jerome Glissee7d40b92009-10-01 18:02:15 +02002514 if (!rdev->is_atom_bios) {
2515 dev_err(rdev->dev, "Expecting atombios for R600 GPU\n");
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002516 return -EINVAL;
Jerome Glissee7d40b92009-10-01 18:02:15 +02002517 }
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002518 r = radeon_atombios_init(rdev);
2519 if (r)
2520 return r;
2521 /* Post card if necessary */
Dave Airlie72542d72009-12-01 14:06:31 +10002522 if (!r600_card_posted(rdev)) {
2523 if (!rdev->bios) {
2524 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
2525 return -EINVAL;
2526 }
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002527 DRM_INFO("GPU not posted. posting now...\n");
2528 atom_asic_init(rdev->mode_info.atom_context);
2529 }
2530 /* Initialize scratch registers */
2531 r600_scratch_init(rdev);
2532 /* Initialize surface registers */
2533 radeon_surface_init(rdev);
Rafał Miłecki74338742009-11-03 00:53:02 +01002534 /* Initialize clocks */
Michel Dänzer5e6dde72009-09-17 09:42:28 +02002535 radeon_get_clock_info(rdev->ddev);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002536 /* Fence driver */
2537 r = radeon_fence_driver_init(rdev);
2538 if (r)
2539 return r;
Jerome Glisse700a0cc2010-01-13 15:16:38 +01002540 if (rdev->flags & RADEON_IS_AGP) {
2541 r = radeon_agp_init(rdev);
2542 if (r)
2543 radeon_agp_disable(rdev);
2544 }
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002545 r = r600_mc_init(rdev);
Jerome Glisseb574f252009-10-06 19:04:29 +02002546 if (r)
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002547 return r;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002548 /* Memory manager */
Jerome Glisse4c788672009-11-20 14:29:23 +01002549 r = radeon_bo_init(rdev);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002550 if (r)
2551 return r;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002552
2553 r = radeon_irq_kms_init(rdev);
2554 if (r)
2555 return r;
2556
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002557 rdev->cp.ring_obj = NULL;
2558 r600_ring_init(rdev, 1024 * 1024);
2559
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002560 rdev->ih.ring_obj = NULL;
2561 r600_ih_ring_init(rdev, 64 * 1024);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002562
Jerome Glisse4aac0472009-09-14 18:29:49 +02002563 r = r600_pcie_gart_init(rdev);
2564 if (r)
2565 return r;
2566
Alex Deucher779720a2009-12-09 19:31:44 -05002567 rdev->accel_working = true;
Dave Airliefc30b8e2009-09-18 15:19:37 +10002568 r = r600_startup(rdev);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002569 if (r) {
Jerome Glisse655efd32010-02-02 11:51:45 +01002570 dev_err(rdev->dev, "disabling GPU acceleration\n");
2571 r600_cp_fini(rdev);
Jerome Glisse655efd32010-02-02 11:51:45 +01002572 r600_irq_fini(rdev);
Alex Deucher724c80e2010-08-27 18:25:25 -04002573 radeon_wb_fini(rdev);
Jerome Glisse655efd32010-02-02 11:51:45 +01002574 radeon_irq_kms_fini(rdev);
Jerome Glisse75c81292009-10-01 18:02:14 +02002575 r600_pcie_gart_fini(rdev);
Jerome Glisse733289c2009-09-16 15:24:21 +02002576 rdev->accel_working = false;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002577 }
Jerome Glisse733289c2009-09-16 15:24:21 +02002578 if (rdev->accel_working) {
2579 r = radeon_ib_pool_init(rdev);
2580 if (r) {
Jerome Glissedb963802010-01-17 21:21:56 +01002581 dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
Jerome Glisse733289c2009-09-16 15:24:21 +02002582 rdev->accel_working = false;
Jerome Glissedb963802010-01-17 21:21:56 +01002583 } else {
2584 r = r600_ib_test(rdev);
2585 if (r) {
2586 dev_err(rdev->dev, "IB test failed (%d).\n", r);
2587 rdev->accel_working = false;
2588 }
Jerome Glisse733289c2009-09-16 15:24:21 +02002589 }
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002590 }
Christian Koenigdafc3bd2009-10-11 23:49:13 +02002591
2592 r = r600_audio_init(rdev);
2593 if (r)
2594 return r; /* TODO error handling */
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002595 return 0;
2596}
2597
2598void r600_fini(struct radeon_device *rdev)
2599{
Christian Koenigdafc3bd2009-10-11 23:49:13 +02002600 r600_audio_fini(rdev);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002601 r600_blit_fini(rdev);
Jerome Glisse655efd32010-02-02 11:51:45 +01002602 r600_cp_fini(rdev);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002603 r600_irq_fini(rdev);
Alex Deucher724c80e2010-08-27 18:25:25 -04002604 radeon_wb_fini(rdev);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002605 radeon_irq_kms_fini(rdev);
Jerome Glisse4aac0472009-09-14 18:29:49 +02002606 r600_pcie_gart_fini(rdev);
Jerome Glisse655efd32010-02-02 11:51:45 +01002607 radeon_agp_fini(rdev);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002608 radeon_gem_fini(rdev);
2609 radeon_fence_driver_fini(rdev);
Jerome Glisse4c788672009-11-20 14:29:23 +01002610 radeon_bo_fini(rdev);
Jerome Glissee7d40b92009-10-01 18:02:15 +02002611 radeon_atombios_fini(rdev);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002612 kfree(rdev->bios);
2613 rdev->bios = NULL;
2614 radeon_dummy_page_fini(rdev);
2615}
2616
2617
2618/*
2619 * CS stuff
2620 */
2621void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
2622{
2623 /* FIXME: implement */
2624 radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
2625 radeon_ring_write(rdev, ib->gpu_addr & 0xFFFFFFFC);
2626 radeon_ring_write(rdev, upper_32_bits(ib->gpu_addr) & 0xFF);
2627 radeon_ring_write(rdev, ib->length_dw);
2628}
2629
2630int r600_ib_test(struct radeon_device *rdev)
2631{
2632 struct radeon_ib *ib;
2633 uint32_t scratch;
2634 uint32_t tmp = 0;
2635 unsigned i;
2636 int r;
2637
2638 r = radeon_scratch_get(rdev, &scratch);
2639 if (r) {
2640 DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r);
2641 return r;
2642 }
2643 WREG32(scratch, 0xCAFEDEAD);
2644 r = radeon_ib_get(rdev, &ib);
2645 if (r) {
2646 DRM_ERROR("radeon: failed to get ib (%d).\n", r);
2647 return r;
2648 }
2649 ib->ptr[0] = PACKET3(PACKET3_SET_CONFIG_REG, 1);
2650 ib->ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
2651 ib->ptr[2] = 0xDEADBEEF;
2652 ib->ptr[3] = PACKET2(0);
2653 ib->ptr[4] = PACKET2(0);
2654 ib->ptr[5] = PACKET2(0);
2655 ib->ptr[6] = PACKET2(0);
2656 ib->ptr[7] = PACKET2(0);
2657 ib->ptr[8] = PACKET2(0);
2658 ib->ptr[9] = PACKET2(0);
2659 ib->ptr[10] = PACKET2(0);
2660 ib->ptr[11] = PACKET2(0);
2661 ib->ptr[12] = PACKET2(0);
2662 ib->ptr[13] = PACKET2(0);
2663 ib->ptr[14] = PACKET2(0);
2664 ib->ptr[15] = PACKET2(0);
2665 ib->length_dw = 16;
2666 r = radeon_ib_schedule(rdev, ib);
2667 if (r) {
2668 radeon_scratch_free(rdev, scratch);
2669 radeon_ib_free(rdev, &ib);
2670 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
2671 return r;
2672 }
2673 r = radeon_fence_wait(ib->fence, false);
2674 if (r) {
2675 DRM_ERROR("radeon: fence wait failed (%d).\n", r);
2676 return r;
2677 }
2678 for (i = 0; i < rdev->usec_timeout; i++) {
2679 tmp = RREG32(scratch);
2680 if (tmp == 0xDEADBEEF)
2681 break;
2682 DRM_UDELAY(1);
2683 }
2684 if (i < rdev->usec_timeout) {
2685 DRM_INFO("ib test succeeded in %u usecs\n", i);
2686 } else {
Daniel J Blueman4417d7f2010-09-22 17:57:19 +01002687 DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n",
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002688 scratch, tmp);
2689 r = -EINVAL;
2690 }
2691 radeon_scratch_free(rdev, scratch);
2692 radeon_ib_free(rdev, &ib);
2693 return r;
2694}
2695
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002696/*
2697 * Interrupts
2698 *
2699 * Interrupts use a ring buffer on r6xx/r7xx hardware. It works pretty
2700 * the same as the CP ring buffer, but in reverse. Rather than the CPU
2701 * writing to the ring and the GPU consuming, the GPU writes to the ring
2702 * and host consumes. As the host irq handler processes interrupts, it
2703 * increments the rptr. When the rptr catches up with the wptr, all the
2704 * current interrupts have been processed.
2705 */
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002706
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002707void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size)
2708{
2709 u32 rb_bufsz;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002710
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002711 /* Align ring size */
2712 rb_bufsz = drm_order(ring_size / 4);
2713 ring_size = (1 << rb_bufsz) * 4;
2714 rdev->ih.ring_size = ring_size;
Jerome Glisse0c452492010-01-15 14:44:37 +01002715 rdev->ih.ptr_mask = rdev->ih.ring_size - 1;
2716 rdev->ih.rptr = 0;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002717}
2718
Jerome Glisse0c452492010-01-15 14:44:37 +01002719static int r600_ih_ring_alloc(struct radeon_device *rdev)
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002720{
2721 int r;
2722
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002723 /* Allocate ring buffer */
2724 if (rdev->ih.ring_obj == NULL) {
Jerome Glisse4c788672009-11-20 14:29:23 +01002725 r = radeon_bo_create(rdev, NULL, rdev->ih.ring_size,
Alex Deucher268b2512010-11-17 19:00:26 -05002726 PAGE_SIZE, true,
Jerome Glisse4c788672009-11-20 14:29:23 +01002727 RADEON_GEM_DOMAIN_GTT,
2728 &rdev->ih.ring_obj);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002729 if (r) {
2730 DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r);
2731 return r;
2732 }
Jerome Glisse4c788672009-11-20 14:29:23 +01002733 r = radeon_bo_reserve(rdev->ih.ring_obj, false);
2734 if (unlikely(r != 0))
2735 return r;
2736 r = radeon_bo_pin(rdev->ih.ring_obj,
2737 RADEON_GEM_DOMAIN_GTT,
2738 &rdev->ih.gpu_addr);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002739 if (r) {
Jerome Glisse4c788672009-11-20 14:29:23 +01002740 radeon_bo_unreserve(rdev->ih.ring_obj);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002741 DRM_ERROR("radeon: failed to pin ih ring buffer (%d).\n", r);
2742 return r;
2743 }
Jerome Glisse4c788672009-11-20 14:29:23 +01002744 r = radeon_bo_kmap(rdev->ih.ring_obj,
2745 (void **)&rdev->ih.ring);
2746 radeon_bo_unreserve(rdev->ih.ring_obj);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002747 if (r) {
2748 DRM_ERROR("radeon: failed to map ih ring buffer (%d).\n", r);
2749 return r;
2750 }
2751 }
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002752 return 0;
2753}
2754
2755static void r600_ih_ring_fini(struct radeon_device *rdev)
2756{
Jerome Glisse4c788672009-11-20 14:29:23 +01002757 int r;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002758 if (rdev->ih.ring_obj) {
Jerome Glisse4c788672009-11-20 14:29:23 +01002759 r = radeon_bo_reserve(rdev->ih.ring_obj, false);
2760 if (likely(r == 0)) {
2761 radeon_bo_kunmap(rdev->ih.ring_obj);
2762 radeon_bo_unpin(rdev->ih.ring_obj);
2763 radeon_bo_unreserve(rdev->ih.ring_obj);
2764 }
2765 radeon_bo_unref(&rdev->ih.ring_obj);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002766 rdev->ih.ring = NULL;
2767 rdev->ih.ring_obj = NULL;
2768 }
2769}
2770
Alex Deucher45f9a392010-03-24 13:55:51 -04002771void r600_rlc_stop(struct radeon_device *rdev)
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002772{
2773
Alex Deucher45f9a392010-03-24 13:55:51 -04002774 if ((rdev->family >= CHIP_RV770) &&
2775 (rdev->family <= CHIP_RV740)) {
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002776 /* r7xx asics need to soft reset RLC before halting */
2777 WREG32(SRBM_SOFT_RESET, SOFT_RESET_RLC);
2778 RREG32(SRBM_SOFT_RESET);
2779 udelay(15000);
2780 WREG32(SRBM_SOFT_RESET, 0);
2781 RREG32(SRBM_SOFT_RESET);
2782 }
2783
2784 WREG32(RLC_CNTL, 0);
2785}
2786
2787static void r600_rlc_start(struct radeon_device *rdev)
2788{
2789 WREG32(RLC_CNTL, RLC_ENABLE);
2790}
2791
2792static int r600_rlc_init(struct radeon_device *rdev)
2793{
2794 u32 i;
2795 const __be32 *fw_data;
2796
2797 if (!rdev->rlc_fw)
2798 return -EINVAL;
2799
2800 r600_rlc_stop(rdev);
2801
2802 WREG32(RLC_HB_BASE, 0);
2803 WREG32(RLC_HB_CNTL, 0);
2804 WREG32(RLC_HB_RPTR, 0);
2805 WREG32(RLC_HB_WPTR, 0);
2806 WREG32(RLC_HB_WPTR_LSB_ADDR, 0);
2807 WREG32(RLC_HB_WPTR_MSB_ADDR, 0);
2808 WREG32(RLC_MC_CNTL, 0);
2809 WREG32(RLC_UCODE_CNTL, 0);
2810
2811 fw_data = (const __be32 *)rdev->rlc_fw->data;
Alex Deucher45f9a392010-03-24 13:55:51 -04002812 if (rdev->family >= CHIP_CEDAR) {
2813 for (i = 0; i < EVERGREEN_RLC_UCODE_SIZE; i++) {
2814 WREG32(RLC_UCODE_ADDR, i);
2815 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
2816 }
2817 } else if (rdev->family >= CHIP_RV770) {
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002818 for (i = 0; i < R700_RLC_UCODE_SIZE; i++) {
2819 WREG32(RLC_UCODE_ADDR, i);
2820 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
2821 }
2822 } else {
2823 for (i = 0; i < RLC_UCODE_SIZE; i++) {
2824 WREG32(RLC_UCODE_ADDR, i);
2825 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
2826 }
2827 }
2828 WREG32(RLC_UCODE_ADDR, 0);
2829
2830 r600_rlc_start(rdev);
2831
2832 return 0;
2833}
2834
2835static void r600_enable_interrupts(struct radeon_device *rdev)
2836{
2837 u32 ih_cntl = RREG32(IH_CNTL);
2838 u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
2839
2840 ih_cntl |= ENABLE_INTR;
2841 ih_rb_cntl |= IH_RB_ENABLE;
2842 WREG32(IH_CNTL, ih_cntl);
2843 WREG32(IH_RB_CNTL, ih_rb_cntl);
2844 rdev->ih.enabled = true;
2845}
2846
Alex Deucher45f9a392010-03-24 13:55:51 -04002847void r600_disable_interrupts(struct radeon_device *rdev)
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002848{
2849 u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
2850 u32 ih_cntl = RREG32(IH_CNTL);
2851
2852 ih_rb_cntl &= ~IH_RB_ENABLE;
2853 ih_cntl &= ~ENABLE_INTR;
2854 WREG32(IH_RB_CNTL, ih_rb_cntl);
2855 WREG32(IH_CNTL, ih_cntl);
2856 /* set rptr, wptr to 0 */
2857 WREG32(IH_RB_RPTR, 0);
2858 WREG32(IH_RB_WPTR, 0);
2859 rdev->ih.enabled = false;
2860 rdev->ih.wptr = 0;
2861 rdev->ih.rptr = 0;
2862}
2863
Alex Deuchere0df1ac2009-12-04 15:12:21 -05002864static void r600_disable_interrupt_state(struct radeon_device *rdev)
2865{
2866 u32 tmp;
2867
Alex Deucher3555e532010-10-08 12:09:12 -04002868 WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
Alex Deuchere0df1ac2009-12-04 15:12:21 -05002869 WREG32(GRBM_INT_CNTL, 0);
2870 WREG32(DxMODE_INT_MASK, 0);
2871 if (ASIC_IS_DCE3(rdev)) {
2872 WREG32(DCE3_DACA_AUTODETECT_INT_CONTROL, 0);
2873 WREG32(DCE3_DACB_AUTODETECT_INT_CONTROL, 0);
2874 tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2875 WREG32(DC_HPD1_INT_CONTROL, tmp);
2876 tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2877 WREG32(DC_HPD2_INT_CONTROL, tmp);
2878 tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2879 WREG32(DC_HPD3_INT_CONTROL, tmp);
2880 tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2881 WREG32(DC_HPD4_INT_CONTROL, tmp);
2882 if (ASIC_IS_DCE32(rdev)) {
2883 tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
Alex Deucher5898b1f2010-03-24 13:57:29 -04002884 WREG32(DC_HPD5_INT_CONTROL, tmp);
Alex Deuchere0df1ac2009-12-04 15:12:21 -05002885 tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
Alex Deucher5898b1f2010-03-24 13:57:29 -04002886 WREG32(DC_HPD6_INT_CONTROL, tmp);
Alex Deuchere0df1ac2009-12-04 15:12:21 -05002887 }
2888 } else {
2889 WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
2890 WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
2891 tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
Alex Deucher5898b1f2010-03-24 13:57:29 -04002892 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
Alex Deuchere0df1ac2009-12-04 15:12:21 -05002893 tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
Alex Deucher5898b1f2010-03-24 13:57:29 -04002894 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
Alex Deuchere0df1ac2009-12-04 15:12:21 -05002895 tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
Alex Deucher5898b1f2010-03-24 13:57:29 -04002896 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
Alex Deuchere0df1ac2009-12-04 15:12:21 -05002897 }
2898}
2899
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002900int r600_irq_init(struct radeon_device *rdev)
2901{
2902 int ret = 0;
2903 int rb_bufsz;
2904 u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
2905
2906 /* allocate ring */
Jerome Glisse0c452492010-01-15 14:44:37 +01002907 ret = r600_ih_ring_alloc(rdev);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002908 if (ret)
2909 return ret;
2910
2911 /* disable irqs */
2912 r600_disable_interrupts(rdev);
2913
2914 /* init rlc */
2915 ret = r600_rlc_init(rdev);
2916 if (ret) {
2917 r600_ih_ring_fini(rdev);
2918 return ret;
2919 }
2920
2921 /* setup interrupt control */
2922 /* set dummy read address to ring address */
2923 WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8);
2924 interrupt_cntl = RREG32(INTERRUPT_CNTL);
2925 /* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
2926 * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
2927 */
2928 interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE;
2929 /* IH_REQ_NONSNOOP_EN=1 if ring is in non-cacheable memory, e.g., vram */
2930 interrupt_cntl &= ~IH_REQ_NONSNOOP_EN;
2931 WREG32(INTERRUPT_CNTL, interrupt_cntl);
2932
2933 WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
2934 rb_bufsz = drm_order(rdev->ih.ring_size / 4);
2935
2936 ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
2937 IH_WPTR_OVERFLOW_CLEAR |
2938 (rb_bufsz << 1));
Alex Deucher724c80e2010-08-27 18:25:25 -04002939
2940 if (rdev->wb.enabled)
2941 ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE;
2942
2943 /* set the writeback address whether it's enabled or not */
2944 WREG32(IH_RB_WPTR_ADDR_LO, (rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFFFFFFFC);
2945 WREG32(IH_RB_WPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFF);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002946
2947 WREG32(IH_RB_CNTL, ih_rb_cntl);
2948
2949 /* set rptr, wptr to 0 */
2950 WREG32(IH_RB_RPTR, 0);
2951 WREG32(IH_RB_WPTR, 0);
2952
2953 /* Default settings for IH_CNTL (disabled at first) */
2954 ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10);
2955 /* RPTR_REARM only works if msi's are enabled */
2956 if (rdev->msi_enabled)
2957 ih_cntl |= RPTR_REARM;
2958
2959#ifdef __BIG_ENDIAN
2960 ih_cntl |= IH_MC_SWAP(IH_MC_SWAP_32BIT);
2961#endif
2962 WREG32(IH_CNTL, ih_cntl);
2963
2964 /* force the active interrupt state to all disabled */
Alex Deucher45f9a392010-03-24 13:55:51 -04002965 if (rdev->family >= CHIP_CEDAR)
2966 evergreen_disable_interrupt_state(rdev);
2967 else
2968 r600_disable_interrupt_state(rdev);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002969
2970 /* enable irqs */
2971 r600_enable_interrupts(rdev);
2972
2973 return ret;
2974}
2975
Jerome Glisse0c452492010-01-15 14:44:37 +01002976void r600_irq_suspend(struct radeon_device *rdev)
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002977{
Alex Deucher45f9a392010-03-24 13:55:51 -04002978 r600_irq_disable(rdev);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002979 r600_rlc_stop(rdev);
Jerome Glisse0c452492010-01-15 14:44:37 +01002980}
2981
2982void r600_irq_fini(struct radeon_device *rdev)
2983{
2984 r600_irq_suspend(rdev);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002985 r600_ih_ring_fini(rdev);
2986}
2987
2988int r600_irq_set(struct radeon_device *rdev)
2989{
Alex Deuchere0df1ac2009-12-04 15:12:21 -05002990 u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
2991 u32 mode_int = 0;
2992 u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0;
Alex Deucher2031f772010-04-22 12:52:11 -04002993 u32 grbm_int_cntl = 0;
Christian Koenigf2594932010-04-10 03:13:16 +02002994 u32 hdmi1, hdmi2;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002995
Jerome Glisse003e69f2010-01-07 15:39:14 +01002996 if (!rdev->irq.installed) {
Joe Perchesfce7d612010-10-30 21:08:30 +00002997 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
Jerome Glisse003e69f2010-01-07 15:39:14 +01002998 return -EINVAL;
2999 }
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003000 /* don't enable anything if the ih is disabled */
Jerome Glisse79c2bbc2010-01-15 14:44:38 +01003001 if (!rdev->ih.enabled) {
3002 r600_disable_interrupts(rdev);
3003 /* force the active interrupt state to all disabled */
3004 r600_disable_interrupt_state(rdev);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003005 return 0;
Jerome Glisse79c2bbc2010-01-15 14:44:38 +01003006 }
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003007
Christian Koenigf2594932010-04-10 03:13:16 +02003008 hdmi1 = RREG32(R600_HDMI_BLOCK1 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN;
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003009 if (ASIC_IS_DCE3(rdev)) {
Christian Koenigf2594932010-04-10 03:13:16 +02003010 hdmi2 = RREG32(R600_HDMI_BLOCK3 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN;
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003011 hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
3012 hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
3013 hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
3014 hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
3015 if (ASIC_IS_DCE32(rdev)) {
3016 hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
3017 hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
3018 }
3019 } else {
Christian Koenigf2594932010-04-10 03:13:16 +02003020 hdmi2 = RREG32(R600_HDMI_BLOCK2 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN;
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003021 hpd1 = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & ~DC_HPDx_INT_EN;
3022 hpd2 = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & ~DC_HPDx_INT_EN;
3023 hpd3 = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & ~DC_HPDx_INT_EN;
3024 }
3025
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003026 if (rdev->irq.sw_int) {
3027 DRM_DEBUG("r600_irq_set: sw int\n");
3028 cp_int_cntl |= RB_INT_ENABLE;
Alex Deucherd0f8a852010-09-04 05:04:34 -04003029 cp_int_cntl |= TIME_STAMP_INT_ENABLE;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003030 }
3031 if (rdev->irq.crtc_vblank_int[0]) {
3032 DRM_DEBUG("r600_irq_set: vblank 0\n");
3033 mode_int |= D1MODE_VBLANK_INT_MASK;
3034 }
3035 if (rdev->irq.crtc_vblank_int[1]) {
3036 DRM_DEBUG("r600_irq_set: vblank 1\n");
3037 mode_int |= D2MODE_VBLANK_INT_MASK;
3038 }
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003039 if (rdev->irq.hpd[0]) {
3040 DRM_DEBUG("r600_irq_set: hpd 1\n");
3041 hpd1 |= DC_HPDx_INT_EN;
3042 }
3043 if (rdev->irq.hpd[1]) {
3044 DRM_DEBUG("r600_irq_set: hpd 2\n");
3045 hpd2 |= DC_HPDx_INT_EN;
3046 }
3047 if (rdev->irq.hpd[2]) {
3048 DRM_DEBUG("r600_irq_set: hpd 3\n");
3049 hpd3 |= DC_HPDx_INT_EN;
3050 }
3051 if (rdev->irq.hpd[3]) {
3052 DRM_DEBUG("r600_irq_set: hpd 4\n");
3053 hpd4 |= DC_HPDx_INT_EN;
3054 }
3055 if (rdev->irq.hpd[4]) {
3056 DRM_DEBUG("r600_irq_set: hpd 5\n");
3057 hpd5 |= DC_HPDx_INT_EN;
3058 }
3059 if (rdev->irq.hpd[5]) {
3060 DRM_DEBUG("r600_irq_set: hpd 6\n");
3061 hpd6 |= DC_HPDx_INT_EN;
3062 }
Christian Koenigf2594932010-04-10 03:13:16 +02003063 if (rdev->irq.hdmi[0]) {
3064 DRM_DEBUG("r600_irq_set: hdmi 1\n");
3065 hdmi1 |= R600_HDMI_INT_EN;
3066 }
3067 if (rdev->irq.hdmi[1]) {
3068 DRM_DEBUG("r600_irq_set: hdmi 2\n");
3069 hdmi2 |= R600_HDMI_INT_EN;
3070 }
Alex Deucher2031f772010-04-22 12:52:11 -04003071 if (rdev->irq.gui_idle) {
3072 DRM_DEBUG("gui idle\n");
3073 grbm_int_cntl |= GUI_IDLE_INT_ENABLE;
3074 }
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003075
3076 WREG32(CP_INT_CNTL, cp_int_cntl);
3077 WREG32(DxMODE_INT_MASK, mode_int);
Alex Deucher2031f772010-04-22 12:52:11 -04003078 WREG32(GRBM_INT_CNTL, grbm_int_cntl);
Christian Koenigf2594932010-04-10 03:13:16 +02003079 WREG32(R600_HDMI_BLOCK1 + R600_HDMI_CNTL, hdmi1);
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003080 if (ASIC_IS_DCE3(rdev)) {
Christian Koenigf2594932010-04-10 03:13:16 +02003081 WREG32(R600_HDMI_BLOCK3 + R600_HDMI_CNTL, hdmi2);
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003082 WREG32(DC_HPD1_INT_CONTROL, hpd1);
3083 WREG32(DC_HPD2_INT_CONTROL, hpd2);
3084 WREG32(DC_HPD3_INT_CONTROL, hpd3);
3085 WREG32(DC_HPD4_INT_CONTROL, hpd4);
3086 if (ASIC_IS_DCE32(rdev)) {
3087 WREG32(DC_HPD5_INT_CONTROL, hpd5);
3088 WREG32(DC_HPD6_INT_CONTROL, hpd6);
3089 }
3090 } else {
Christian Koenigf2594932010-04-10 03:13:16 +02003091 WREG32(R600_HDMI_BLOCK2 + R600_HDMI_CNTL, hdmi2);
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003092 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1);
3093 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
3094 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, hpd3);
3095 }
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003096
3097 return 0;
3098}
3099
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003100static inline void r600_irq_ack(struct radeon_device *rdev,
3101 u32 *disp_int,
3102 u32 *disp_int_cont,
3103 u32 *disp_int_cont2)
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003104{
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003105 u32 tmp;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003106
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003107 if (ASIC_IS_DCE3(rdev)) {
3108 *disp_int = RREG32(DCE3_DISP_INTERRUPT_STATUS);
3109 *disp_int_cont = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE);
3110 *disp_int_cont2 = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE2);
3111 } else {
3112 *disp_int = RREG32(DISP_INTERRUPT_STATUS);
3113 *disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
3114 *disp_int_cont2 = 0;
3115 }
3116
3117 if (*disp_int & LB_D1_VBLANK_INTERRUPT)
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003118 WREG32(D1MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003119 if (*disp_int & LB_D1_VLINE_INTERRUPT)
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003120 WREG32(D1MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003121 if (*disp_int & LB_D2_VBLANK_INTERRUPT)
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003122 WREG32(D2MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003123 if (*disp_int & LB_D2_VLINE_INTERRUPT)
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003124 WREG32(D2MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003125 if (*disp_int & DC_HPD1_INTERRUPT) {
3126 if (ASIC_IS_DCE3(rdev)) {
3127 tmp = RREG32(DC_HPD1_INT_CONTROL);
3128 tmp |= DC_HPDx_INT_ACK;
3129 WREG32(DC_HPD1_INT_CONTROL, tmp);
3130 } else {
3131 tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
3132 tmp |= DC_HPDx_INT_ACK;
3133 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
3134 }
3135 }
3136 if (*disp_int & DC_HPD2_INTERRUPT) {
3137 if (ASIC_IS_DCE3(rdev)) {
3138 tmp = RREG32(DC_HPD2_INT_CONTROL);
3139 tmp |= DC_HPDx_INT_ACK;
3140 WREG32(DC_HPD2_INT_CONTROL, tmp);
3141 } else {
3142 tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
3143 tmp |= DC_HPDx_INT_ACK;
3144 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
3145 }
3146 }
3147 if (*disp_int_cont & DC_HPD3_INTERRUPT) {
3148 if (ASIC_IS_DCE3(rdev)) {
3149 tmp = RREG32(DC_HPD3_INT_CONTROL);
3150 tmp |= DC_HPDx_INT_ACK;
3151 WREG32(DC_HPD3_INT_CONTROL, tmp);
3152 } else {
3153 tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
3154 tmp |= DC_HPDx_INT_ACK;
3155 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
3156 }
3157 }
3158 if (*disp_int_cont & DC_HPD4_INTERRUPT) {
3159 tmp = RREG32(DC_HPD4_INT_CONTROL);
3160 tmp |= DC_HPDx_INT_ACK;
3161 WREG32(DC_HPD4_INT_CONTROL, tmp);
3162 }
3163 if (ASIC_IS_DCE32(rdev)) {
3164 if (*disp_int_cont2 & DC_HPD5_INTERRUPT) {
3165 tmp = RREG32(DC_HPD5_INT_CONTROL);
3166 tmp |= DC_HPDx_INT_ACK;
3167 WREG32(DC_HPD5_INT_CONTROL, tmp);
3168 }
3169 if (*disp_int_cont2 & DC_HPD6_INTERRUPT) {
3170 tmp = RREG32(DC_HPD5_INT_CONTROL);
3171 tmp |= DC_HPDx_INT_ACK;
3172 WREG32(DC_HPD6_INT_CONTROL, tmp);
3173 }
3174 }
Christian Koenigf2594932010-04-10 03:13:16 +02003175 if (RREG32(R600_HDMI_BLOCK1 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) {
3176 WREG32_P(R600_HDMI_BLOCK1 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK);
3177 }
3178 if (ASIC_IS_DCE3(rdev)) {
3179 if (RREG32(R600_HDMI_BLOCK3 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) {
3180 WREG32_P(R600_HDMI_BLOCK3 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK);
3181 }
3182 } else {
3183 if (RREG32(R600_HDMI_BLOCK2 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) {
3184 WREG32_P(R600_HDMI_BLOCK2 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK);
3185 }
3186 }
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003187}
3188
3189void r600_irq_disable(struct radeon_device *rdev)
3190{
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003191 u32 disp_int, disp_int_cont, disp_int_cont2;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003192
3193 r600_disable_interrupts(rdev);
3194 /* Wait and acknowledge irq */
3195 mdelay(1);
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003196 r600_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2);
3197 r600_disable_interrupt_state(rdev);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003198}
3199
3200static inline u32 r600_get_ih_wptr(struct radeon_device *rdev)
3201{
3202 u32 wptr, tmp;
3203
Alex Deucher724c80e2010-08-27 18:25:25 -04003204 if (rdev->wb.enabled)
3205 wptr = rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4];
3206 else
3207 wptr = RREG32(IH_RB_WPTR);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003208
3209 if (wptr & RB_OVERFLOW) {
Jerome Glisse7924e5e2010-01-15 14:44:39 +01003210 /* When a ring buffer overflow happen start parsing interrupt
3211 * from the last not overwritten vector (wptr + 16). Hopefully
3212 * this should allow us to catchup.
3213 */
3214 dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
3215 wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
3216 rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003217 tmp = RREG32(IH_RB_CNTL);
3218 tmp |= IH_WPTR_OVERFLOW_CLEAR;
3219 WREG32(IH_RB_CNTL, tmp);
3220 }
Jerome Glisse0c452492010-01-15 14:44:37 +01003221 return (wptr & rdev->ih.ptr_mask);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003222}
3223
3224/* r600 IV Ring
3225 * Each IV ring entry is 128 bits:
3226 * [7:0] - interrupt source id
3227 * [31:8] - reserved
3228 * [59:32] - interrupt source data
3229 * [127:60] - reserved
3230 *
3231 * The basic interrupt vector entries
3232 * are decoded as follows:
3233 * src_id src_data description
3234 * 1 0 D1 Vblank
3235 * 1 1 D1 Vline
3236 * 5 0 D2 Vblank
3237 * 5 1 D2 Vline
3238 * 19 0 FP Hot plug detection A
3239 * 19 1 FP Hot plug detection B
3240 * 19 2 DAC A auto-detection
3241 * 19 3 DAC B auto-detection
Christian Koenigf2594932010-04-10 03:13:16 +02003242 * 21 4 HDMI block A
3243 * 21 5 HDMI block B
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003244 * 176 - CP_INT RB
3245 * 177 - CP_INT IB1
3246 * 178 - CP_INT IB2
3247 * 181 - EOP Interrupt
3248 * 233 - GUI Idle
3249 *
3250 * Note, these are based on r600 and may need to be
3251 * adjusted or added to on newer asics
3252 */
3253
3254int r600_irq_process(struct radeon_device *rdev)
3255{
3256 u32 wptr = r600_get_ih_wptr(rdev);
3257 u32 rptr = rdev->ih.rptr;
3258 u32 src_id, src_data;
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003259 u32 ring_index, disp_int, disp_int_cont, disp_int_cont2;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003260 unsigned long flags;
Alex Deucherd4877cf2009-12-04 16:56:37 -05003261 bool queue_hotplug = false;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003262
3263 DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
Jerome Glisse79c2bbc2010-01-15 14:44:38 +01003264 if (!rdev->ih.enabled)
3265 return IRQ_NONE;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003266
3267 spin_lock_irqsave(&rdev->ih.lock, flags);
3268
3269 if (rptr == wptr) {
3270 spin_unlock_irqrestore(&rdev->ih.lock, flags);
3271 return IRQ_NONE;
3272 }
3273 if (rdev->shutdown) {
3274 spin_unlock_irqrestore(&rdev->ih.lock, flags);
3275 return IRQ_NONE;
3276 }
3277
3278restart_ih:
3279 /* display interrupts */
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003280 r600_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003281
3282 rdev->ih.wptr = wptr;
3283 while (rptr != wptr) {
3284 /* wptr/rptr are in bytes! */
3285 ring_index = rptr / 4;
3286 src_id = rdev->ih.ring[ring_index] & 0xff;
3287 src_data = rdev->ih.ring[ring_index + 1] & 0xfffffff;
3288
3289 switch (src_id) {
3290 case 1: /* D1 vblank/vline */
3291 switch (src_data) {
3292 case 0: /* D1 vblank */
3293 if (disp_int & LB_D1_VBLANK_INTERRUPT) {
3294 drm_handle_vblank(rdev->ddev, 0);
Rafał Miłecki839461d2010-03-02 22:06:51 +01003295 rdev->pm.vblank_sync = true;
Rafał Miłecki73a6d3f2010-01-08 00:22:47 +01003296 wake_up(&rdev->irq.vblank_queue);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003297 disp_int &= ~LB_D1_VBLANK_INTERRUPT;
3298 DRM_DEBUG("IH: D1 vblank\n");
3299 }
3300 break;
3301 case 1: /* D1 vline */
3302 if (disp_int & LB_D1_VLINE_INTERRUPT) {
3303 disp_int &= ~LB_D1_VLINE_INTERRUPT;
3304 DRM_DEBUG("IH: D1 vline\n");
3305 }
3306 break;
3307 default:
Alex Deucherb0425892010-01-11 19:47:38 -05003308 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003309 break;
3310 }
3311 break;
3312 case 5: /* D2 vblank/vline */
3313 switch (src_data) {
3314 case 0: /* D2 vblank */
3315 if (disp_int & LB_D2_VBLANK_INTERRUPT) {
3316 drm_handle_vblank(rdev->ddev, 1);
Rafał Miłecki839461d2010-03-02 22:06:51 +01003317 rdev->pm.vblank_sync = true;
Rafał Miłecki73a6d3f2010-01-08 00:22:47 +01003318 wake_up(&rdev->irq.vblank_queue);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003319 disp_int &= ~LB_D2_VBLANK_INTERRUPT;
3320 DRM_DEBUG("IH: D2 vblank\n");
3321 }
3322 break;
3323 case 1: /* D1 vline */
3324 if (disp_int & LB_D2_VLINE_INTERRUPT) {
3325 disp_int &= ~LB_D2_VLINE_INTERRUPT;
3326 DRM_DEBUG("IH: D2 vline\n");
3327 }
3328 break;
3329 default:
Alex Deucherb0425892010-01-11 19:47:38 -05003330 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003331 break;
3332 }
3333 break;
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003334 case 19: /* HPD/DAC hotplug */
3335 switch (src_data) {
3336 case 0:
3337 if (disp_int & DC_HPD1_INTERRUPT) {
3338 disp_int &= ~DC_HPD1_INTERRUPT;
Alex Deucherd4877cf2009-12-04 16:56:37 -05003339 queue_hotplug = true;
3340 DRM_DEBUG("IH: HPD1\n");
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003341 }
3342 break;
3343 case 1:
3344 if (disp_int & DC_HPD2_INTERRUPT) {
3345 disp_int &= ~DC_HPD2_INTERRUPT;
Alex Deucherd4877cf2009-12-04 16:56:37 -05003346 queue_hotplug = true;
3347 DRM_DEBUG("IH: HPD2\n");
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003348 }
3349 break;
3350 case 4:
3351 if (disp_int_cont & DC_HPD3_INTERRUPT) {
3352 disp_int_cont &= ~DC_HPD3_INTERRUPT;
Alex Deucherd4877cf2009-12-04 16:56:37 -05003353 queue_hotplug = true;
3354 DRM_DEBUG("IH: HPD3\n");
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003355 }
3356 break;
3357 case 5:
3358 if (disp_int_cont & DC_HPD4_INTERRUPT) {
3359 disp_int_cont &= ~DC_HPD4_INTERRUPT;
Alex Deucherd4877cf2009-12-04 16:56:37 -05003360 queue_hotplug = true;
3361 DRM_DEBUG("IH: HPD4\n");
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003362 }
3363 break;
3364 case 10:
3365 if (disp_int_cont2 & DC_HPD5_INTERRUPT) {
Alex Deucher5898b1f2010-03-24 13:57:29 -04003366 disp_int_cont2 &= ~DC_HPD5_INTERRUPT;
Alex Deucherd4877cf2009-12-04 16:56:37 -05003367 queue_hotplug = true;
3368 DRM_DEBUG("IH: HPD5\n");
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003369 }
3370 break;
3371 case 12:
3372 if (disp_int_cont2 & DC_HPD6_INTERRUPT) {
Alex Deucher5898b1f2010-03-24 13:57:29 -04003373 disp_int_cont2 &= ~DC_HPD6_INTERRUPT;
Alex Deucherd4877cf2009-12-04 16:56:37 -05003374 queue_hotplug = true;
3375 DRM_DEBUG("IH: HPD6\n");
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003376 }
3377 break;
3378 default:
Alex Deucherb0425892010-01-11 19:47:38 -05003379 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003380 break;
3381 }
3382 break;
Christian Koenigf2594932010-04-10 03:13:16 +02003383 case 21: /* HDMI */
3384 DRM_DEBUG("IH: HDMI: 0x%x\n", src_data);
3385 r600_audio_schedule_polling(rdev);
3386 break;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003387 case 176: /* CP_INT in ring buffer */
3388 case 177: /* CP_INT in IB1 */
3389 case 178: /* CP_INT in IB2 */
3390 DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
3391 radeon_fence_process(rdev);
3392 break;
3393 case 181: /* CP EOP event */
3394 DRM_DEBUG("IH: CP EOP\n");
Alex Deucherd0f8a852010-09-04 05:04:34 -04003395 radeon_fence_process(rdev);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003396 break;
Alex Deucher2031f772010-04-22 12:52:11 -04003397 case 233: /* GUI IDLE */
3398 DRM_DEBUG("IH: CP EOP\n");
3399 rdev->pm.gui_idle = true;
3400 wake_up(&rdev->irq.idle_queue);
3401 break;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003402 default:
Alex Deucherb0425892010-01-11 19:47:38 -05003403 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003404 break;
3405 }
3406
3407 /* wptr/rptr are in bytes! */
Jerome Glisse0c452492010-01-15 14:44:37 +01003408 rptr += 16;
3409 rptr &= rdev->ih.ptr_mask;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003410 }
3411 /* make sure wptr hasn't changed while processing */
3412 wptr = r600_get_ih_wptr(rdev);
3413 if (wptr != rdev->ih.wptr)
3414 goto restart_ih;
Alex Deucherd4877cf2009-12-04 16:56:37 -05003415 if (queue_hotplug)
3416 queue_work(rdev->wq, &rdev->hotplug_work);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003417 rdev->ih.rptr = rptr;
3418 WREG32(IH_RB_RPTR, rdev->ih.rptr);
3419 spin_unlock_irqrestore(&rdev->ih.lock, flags);
3420 return IRQ_HANDLED;
3421}
Jerome Glisse3ce0a232009-09-08 10:10:24 +10003422
3423/*
3424 * Debugfs info
3425 */
3426#if defined(CONFIG_DEBUG_FS)
3427
3428static int r600_debugfs_cp_ring_info(struct seq_file *m, void *data)
3429{
3430 struct drm_info_node *node = (struct drm_info_node *) m->private;
3431 struct drm_device *dev = node->minor->dev;
3432 struct radeon_device *rdev = dev->dev_private;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10003433 unsigned count, i, j;
3434
3435 radeon_ring_free_size(rdev);
Rafał Miłeckid6840762009-11-10 22:26:21 +01003436 count = (rdev->cp.ring_size / 4) - rdev->cp.ring_free_dw;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10003437 seq_printf(m, "CP_STAT 0x%08x\n", RREG32(CP_STAT));
Rafał Miłeckid6840762009-11-10 22:26:21 +01003438 seq_printf(m, "CP_RB_WPTR 0x%08x\n", RREG32(CP_RB_WPTR));
3439 seq_printf(m, "CP_RB_RPTR 0x%08x\n", RREG32(CP_RB_RPTR));
3440 seq_printf(m, "driver's copy of the CP_RB_WPTR 0x%08x\n", rdev->cp.wptr);
3441 seq_printf(m, "driver's copy of the CP_RB_RPTR 0x%08x\n", rdev->cp.rptr);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10003442 seq_printf(m, "%u free dwords in ring\n", rdev->cp.ring_free_dw);
3443 seq_printf(m, "%u dwords in ring\n", count);
Rafał Miłeckid6840762009-11-10 22:26:21 +01003444 i = rdev->cp.rptr;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10003445 for (j = 0; j <= count; j++) {
Jerome Glisse3ce0a232009-09-08 10:10:24 +10003446 seq_printf(m, "r[%04d]=0x%08x\n", i, rdev->cp.ring[i]);
Rafał Miłeckid6840762009-11-10 22:26:21 +01003447 i = (i + 1) & rdev->cp.ptr_mask;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10003448 }
3449 return 0;
3450}
3451
3452static int r600_debugfs_mc_info(struct seq_file *m, void *data)
3453{
3454 struct drm_info_node *node = (struct drm_info_node *) m->private;
3455 struct drm_device *dev = node->minor->dev;
3456 struct radeon_device *rdev = dev->dev_private;
3457
3458 DREG32_SYS(m, rdev, R_000E50_SRBM_STATUS);
3459 DREG32_SYS(m, rdev, VM_L2_STATUS);
3460 return 0;
3461}
3462
3463static struct drm_info_list r600_mc_info_list[] = {
3464 {"r600_mc_info", r600_debugfs_mc_info, 0, NULL},
3465 {"r600_ring_info", r600_debugfs_cp_ring_info, 0, NULL},
3466};
3467#endif
3468
3469int r600_debugfs_mc_info_init(struct radeon_device *rdev)
3470{
3471#if defined(CONFIG_DEBUG_FS)
3472 return radeon_debugfs_add_files(rdev, r600_mc_info_list, ARRAY_SIZE(r600_mc_info_list));
3473#else
3474 return 0;
3475#endif
Jerome Glisse771fe6b2009-06-05 14:42:42 +02003476}
Jerome Glisse062b3892010-02-04 20:36:39 +01003477
3478/**
3479 * r600_ioctl_wait_idle - flush host path cache on wait idle ioctl
3480 * rdev: radeon device structure
3481 * bo: buffer object struct which userspace is waiting for idle
3482 *
3483 * Some R6XX/R7XX doesn't seems to take into account HDP flush performed
3484 * through ring buffer, this leads to corruption in rendering, see
3485 * http://bugzilla.kernel.org/show_bug.cgi?id=15186 to avoid this we
3486 * directly perform HDP flush by writing register through MMIO.
3487 */
3488void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo)
3489{
Alex Deucher812d0462010-07-26 18:51:53 -04003490 /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read
Alex Deucherf3886f82010-12-08 10:05:34 -05003491 * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL.
3492 * This seems to cause problems on some AGP cards. Just use the old
3493 * method for them.
Alex Deucher812d0462010-07-26 18:51:53 -04003494 */
Alex Deuchere4884592010-09-27 10:57:10 -04003495 if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) &&
Alex Deucherf3886f82010-12-08 10:05:34 -05003496 rdev->vram_scratch.ptr && !(rdev->flags & RADEON_IS_AGP)) {
Alex Deucher87cbf8f2010-08-27 13:59:54 -04003497 void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
Alex Deucher812d0462010-07-26 18:51:53 -04003498 u32 tmp;
3499
3500 WREG32(HDP_DEBUG1, 0);
3501 tmp = readl((void __iomem *)ptr);
3502 } else
3503 WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
Jerome Glisse062b3892010-02-04 20:36:39 +01003504}