blob: 4678ed102af6a6877043bfa5fc0ec1504d5f70af [file] [log] [blame]
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090028#include <linux/slab.h>
Jerome Glisse3ce0a232009-09-08 10:10:24 +100029#include <linux/seq_file.h>
30#include <linux/firmware.h>
31#include <linux/platform_device.h>
Paul Gortmakere0cd3602011-08-30 11:04:30 -040032#include <linux/module.h>
David Howells760285e2012-10-02 18:01:07 +010033#include <drm/drmP.h>
34#include <drm/radeon_drm.h>
Jerome Glisse771fe6b2009-06-05 14:42:42 +020035#include "radeon.h"
Daniel Vettere6990372010-03-11 21:19:17 +000036#include "radeon_asic.h"
Jerome Glisse3ce0a232009-09-08 10:10:24 +100037#include "radeon_mode.h"
Jerome Glisse3ce0a232009-09-08 10:10:24 +100038#include "r600d.h"
Jerome Glisse3ce0a232009-09-08 10:10:24 +100039#include "atom.h"
Jerome Glissed39c3b82009-09-28 18:34:43 +020040#include "avivod.h"
Alex Deucher138e4e12013-01-11 15:33:13 -050041#include "radeon_ucode.h"
Jerome Glisse3ce0a232009-09-08 10:10:24 +100042
43/* Firmware Names */
44MODULE_FIRMWARE("radeon/R600_pfp.bin");
45MODULE_FIRMWARE("radeon/R600_me.bin");
46MODULE_FIRMWARE("radeon/RV610_pfp.bin");
47MODULE_FIRMWARE("radeon/RV610_me.bin");
48MODULE_FIRMWARE("radeon/RV630_pfp.bin");
49MODULE_FIRMWARE("radeon/RV630_me.bin");
50MODULE_FIRMWARE("radeon/RV620_pfp.bin");
51MODULE_FIRMWARE("radeon/RV620_me.bin");
52MODULE_FIRMWARE("radeon/RV635_pfp.bin");
53MODULE_FIRMWARE("radeon/RV635_me.bin");
54MODULE_FIRMWARE("radeon/RV670_pfp.bin");
55MODULE_FIRMWARE("radeon/RV670_me.bin");
56MODULE_FIRMWARE("radeon/RS780_pfp.bin");
57MODULE_FIRMWARE("radeon/RS780_me.bin");
58MODULE_FIRMWARE("radeon/RV770_pfp.bin");
59MODULE_FIRMWARE("radeon/RV770_me.bin");
60MODULE_FIRMWARE("radeon/RV730_pfp.bin");
61MODULE_FIRMWARE("radeon/RV730_me.bin");
62MODULE_FIRMWARE("radeon/RV710_pfp.bin");
63MODULE_FIRMWARE("radeon/RV710_me.bin");
Alex Deucherd8f60cf2009-12-01 13:43:46 -050064MODULE_FIRMWARE("radeon/R600_rlc.bin");
65MODULE_FIRMWARE("radeon/R700_rlc.bin");
Alex Deucherfe251e22010-03-24 13:36:43 -040066MODULE_FIRMWARE("radeon/CEDAR_pfp.bin");
67MODULE_FIRMWARE("radeon/CEDAR_me.bin");
Alex Deucher45f9a392010-03-24 13:55:51 -040068MODULE_FIRMWARE("radeon/CEDAR_rlc.bin");
Alex Deucherfe251e22010-03-24 13:36:43 -040069MODULE_FIRMWARE("radeon/REDWOOD_pfp.bin");
70MODULE_FIRMWARE("radeon/REDWOOD_me.bin");
Alex Deucher45f9a392010-03-24 13:55:51 -040071MODULE_FIRMWARE("radeon/REDWOOD_rlc.bin");
Alex Deucherfe251e22010-03-24 13:36:43 -040072MODULE_FIRMWARE("radeon/JUNIPER_pfp.bin");
73MODULE_FIRMWARE("radeon/JUNIPER_me.bin");
Alex Deucher45f9a392010-03-24 13:55:51 -040074MODULE_FIRMWARE("radeon/JUNIPER_rlc.bin");
Dave Airliea7433742010-04-09 15:31:09 +100075MODULE_FIRMWARE("radeon/CYPRESS_pfp.bin");
Alex Deucherfe251e22010-03-24 13:36:43 -040076MODULE_FIRMWARE("radeon/CYPRESS_me.bin");
Alex Deucher45f9a392010-03-24 13:55:51 -040077MODULE_FIRMWARE("radeon/CYPRESS_rlc.bin");
Alex Deucher439bd6c2010-11-22 17:56:31 -050078MODULE_FIRMWARE("radeon/PALM_pfp.bin");
79MODULE_FIRMWARE("radeon/PALM_me.bin");
80MODULE_FIRMWARE("radeon/SUMO_rlc.bin");
Alex Deucherd5c5a722011-05-31 15:42:48 -040081MODULE_FIRMWARE("radeon/SUMO_pfp.bin");
82MODULE_FIRMWARE("radeon/SUMO_me.bin");
83MODULE_FIRMWARE("radeon/SUMO2_pfp.bin");
84MODULE_FIRMWARE("radeon/SUMO2_me.bin");
Jerome Glisse3ce0a232009-09-08 10:10:24 +100085
Alex Deucherf13f7732013-01-18 18:12:22 -050086static const u32 crtc_offsets[2] =
87{
88 0,
89 AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL
90};
91
Jerome Glisse3ce0a232009-09-08 10:10:24 +100092int r600_debugfs_mc_info_init(struct radeon_device *rdev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +020093
Jerome Glisse1a029b72009-10-06 19:04:30 +020094/* r600,rv610,rv630,rv620,rv635,rv670 */
Jerome Glisse771fe6b2009-06-05 14:42:42 +020095int r600_mc_wait_for_idle(struct radeon_device *rdev);
Lauri Kasanen1109ca02012-08-31 13:43:50 -040096static void r600_gpu_init(struct radeon_device *rdev);
Jerome Glisse3ce0a232009-09-08 10:10:24 +100097void r600_fini(struct radeon_device *rdev);
Alex Deucher45f9a392010-03-24 13:55:51 -040098void r600_irq_disable(struct radeon_device *rdev);
Alex Deucher9e46a482011-01-06 18:49:35 -050099static void r600_pcie_gen2_enable(struct radeon_device *rdev);
Alex Deucher2948f5e2013-04-12 13:52:52 -0400100extern int evergreen_rlc_resume(struct radeon_device *rdev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200101
Alex Deucher454d2e22013-02-14 10:04:02 -0500102/**
103 * r600_get_xclk - get the xclk
104 *
105 * @rdev: radeon_device pointer
106 *
107 * Returns the reference clock used by the gfx engine
108 * (r6xx, IGPs, APUs).
109 */
110u32 r600_get_xclk(struct radeon_device *rdev)
111{
112 return rdev->clock.spll.reference_freq;
113}
114
Alex Deucher21a81222010-07-02 12:58:16 -0400115/* get temperature in millidegrees */
Alex Deucher20d391d2011-02-01 16:12:34 -0500116int rv6xx_get_temp(struct radeon_device *rdev)
Alex Deucher21a81222010-07-02 12:58:16 -0400117{
118 u32 temp = (RREG32(CG_THERMAL_STATUS) & ASIC_T_MASK) >>
119 ASIC_T_SHIFT;
Alex Deucher20d391d2011-02-01 16:12:34 -0500120 int actual_temp = temp & 0xff;
Alex Deucher21a81222010-07-02 12:58:16 -0400121
Alex Deucher20d391d2011-02-01 16:12:34 -0500122 if (temp & 0x100)
123 actual_temp -= 256;
124
125 return actual_temp * 1000;
Alex Deucher21a81222010-07-02 12:58:16 -0400126}
127
Alex Deucherce8f5372010-05-07 15:10:16 -0400128void r600_pm_get_dynpm_state(struct radeon_device *rdev)
Alex Deuchera48b9b42010-04-22 14:03:55 -0400129{
130 int i;
131
Alex Deucherce8f5372010-05-07 15:10:16 -0400132 rdev->pm.dynpm_can_upclock = true;
133 rdev->pm.dynpm_can_downclock = true;
Alex Deuchera48b9b42010-04-22 14:03:55 -0400134
135 /* power state array is low to high, default is first */
136 if ((rdev->flags & RADEON_IS_IGP) || (rdev->family == CHIP_R600)) {
137 int min_power_state_index = 0;
138
139 if (rdev->pm.num_power_states > 2)
140 min_power_state_index = 1;
141
Alex Deucherce8f5372010-05-07 15:10:16 -0400142 switch (rdev->pm.dynpm_planned_action) {
143 case DYNPM_ACTION_MINIMUM:
Alex Deuchera48b9b42010-04-22 14:03:55 -0400144 rdev->pm.requested_power_state_index = min_power_state_index;
145 rdev->pm.requested_clock_mode_index = 0;
Alex Deucherce8f5372010-05-07 15:10:16 -0400146 rdev->pm.dynpm_can_downclock = false;
Alex Deuchera48b9b42010-04-22 14:03:55 -0400147 break;
Alex Deucherce8f5372010-05-07 15:10:16 -0400148 case DYNPM_ACTION_DOWNCLOCK:
Alex Deuchera48b9b42010-04-22 14:03:55 -0400149 if (rdev->pm.current_power_state_index == min_power_state_index) {
150 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
Alex Deucherce8f5372010-05-07 15:10:16 -0400151 rdev->pm.dynpm_can_downclock = false;
Alex Deuchera48b9b42010-04-22 14:03:55 -0400152 } else {
153 if (rdev->pm.active_crtc_count > 1) {
154 for (i = 0; i < rdev->pm.num_power_states; i++) {
Alex Deucherd7311172010-05-03 01:13:14 -0400155 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
Alex Deuchera48b9b42010-04-22 14:03:55 -0400156 continue;
157 else if (i >= rdev->pm.current_power_state_index) {
158 rdev->pm.requested_power_state_index =
159 rdev->pm.current_power_state_index;
160 break;
161 } else {
162 rdev->pm.requested_power_state_index = i;
163 break;
164 }
165 }
Alex Deucher773c3fa2010-06-25 16:21:27 -0400166 } else {
167 if (rdev->pm.current_power_state_index == 0)
168 rdev->pm.requested_power_state_index =
169 rdev->pm.num_power_states - 1;
170 else
171 rdev->pm.requested_power_state_index =
172 rdev->pm.current_power_state_index - 1;
173 }
Alex Deuchera48b9b42010-04-22 14:03:55 -0400174 }
175 rdev->pm.requested_clock_mode_index = 0;
Alex Deucherd7311172010-05-03 01:13:14 -0400176 /* don't use the power state if crtcs are active and no display flag is set */
177 if ((rdev->pm.active_crtc_count > 0) &&
178 (rdev->pm.power_state[rdev->pm.requested_power_state_index].
179 clock_info[rdev->pm.requested_clock_mode_index].flags &
180 RADEON_PM_MODE_NO_DISPLAY)) {
181 rdev->pm.requested_power_state_index++;
182 }
Alex Deuchera48b9b42010-04-22 14:03:55 -0400183 break;
Alex Deucherce8f5372010-05-07 15:10:16 -0400184 case DYNPM_ACTION_UPCLOCK:
Alex Deuchera48b9b42010-04-22 14:03:55 -0400185 if (rdev->pm.current_power_state_index == (rdev->pm.num_power_states - 1)) {
186 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
Alex Deucherce8f5372010-05-07 15:10:16 -0400187 rdev->pm.dynpm_can_upclock = false;
Alex Deuchera48b9b42010-04-22 14:03:55 -0400188 } else {
189 if (rdev->pm.active_crtc_count > 1) {
190 for (i = (rdev->pm.num_power_states - 1); i >= 0; i--) {
Alex Deucherd7311172010-05-03 01:13:14 -0400191 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
Alex Deuchera48b9b42010-04-22 14:03:55 -0400192 continue;
193 else if (i <= rdev->pm.current_power_state_index) {
194 rdev->pm.requested_power_state_index =
195 rdev->pm.current_power_state_index;
196 break;
197 } else {
198 rdev->pm.requested_power_state_index = i;
199 break;
200 }
201 }
202 } else
203 rdev->pm.requested_power_state_index =
204 rdev->pm.current_power_state_index + 1;
205 }
206 rdev->pm.requested_clock_mode_index = 0;
207 break;
Alex Deucherce8f5372010-05-07 15:10:16 -0400208 case DYNPM_ACTION_DEFAULT:
Alex Deucher58e21df2010-03-22 13:31:08 -0400209 rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
210 rdev->pm.requested_clock_mode_index = 0;
Alex Deucherce8f5372010-05-07 15:10:16 -0400211 rdev->pm.dynpm_can_upclock = false;
Alex Deucher58e21df2010-03-22 13:31:08 -0400212 break;
Alex Deucherce8f5372010-05-07 15:10:16 -0400213 case DYNPM_ACTION_NONE:
Alex Deuchera48b9b42010-04-22 14:03:55 -0400214 default:
215 DRM_ERROR("Requested mode for not defined action\n");
216 return;
217 }
218 } else {
219 /* XXX select a power state based on AC/DC, single/dualhead, etc. */
220 /* for now just select the first power state and switch between clock modes */
221 /* power state array is low to high, default is first (0) */
222 if (rdev->pm.active_crtc_count > 1) {
223 rdev->pm.requested_power_state_index = -1;
224 /* start at 1 as we don't want the default mode */
225 for (i = 1; i < rdev->pm.num_power_states; i++) {
Alex Deucherd7311172010-05-03 01:13:14 -0400226 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
Alex Deuchera48b9b42010-04-22 14:03:55 -0400227 continue;
228 else if ((rdev->pm.power_state[i].type == POWER_STATE_TYPE_PERFORMANCE) ||
229 (rdev->pm.power_state[i].type == POWER_STATE_TYPE_BATTERY)) {
230 rdev->pm.requested_power_state_index = i;
231 break;
232 }
233 }
234 /* if nothing selected, grab the default state. */
235 if (rdev->pm.requested_power_state_index == -1)
236 rdev->pm.requested_power_state_index = 0;
237 } else
238 rdev->pm.requested_power_state_index = 1;
239
Alex Deucherce8f5372010-05-07 15:10:16 -0400240 switch (rdev->pm.dynpm_planned_action) {
241 case DYNPM_ACTION_MINIMUM:
Alex Deuchera48b9b42010-04-22 14:03:55 -0400242 rdev->pm.requested_clock_mode_index = 0;
Alex Deucherce8f5372010-05-07 15:10:16 -0400243 rdev->pm.dynpm_can_downclock = false;
Alex Deuchera48b9b42010-04-22 14:03:55 -0400244 break;
Alex Deucherce8f5372010-05-07 15:10:16 -0400245 case DYNPM_ACTION_DOWNCLOCK:
Alex Deuchera48b9b42010-04-22 14:03:55 -0400246 if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) {
247 if (rdev->pm.current_clock_mode_index == 0) {
248 rdev->pm.requested_clock_mode_index = 0;
Alex Deucherce8f5372010-05-07 15:10:16 -0400249 rdev->pm.dynpm_can_downclock = false;
Alex Deuchera48b9b42010-04-22 14:03:55 -0400250 } else
251 rdev->pm.requested_clock_mode_index =
252 rdev->pm.current_clock_mode_index - 1;
253 } else {
254 rdev->pm.requested_clock_mode_index = 0;
Alex Deucherce8f5372010-05-07 15:10:16 -0400255 rdev->pm.dynpm_can_downclock = false;
Alex Deuchera48b9b42010-04-22 14:03:55 -0400256 }
Alex Deucherd7311172010-05-03 01:13:14 -0400257 /* don't use the power state if crtcs are active and no display flag is set */
258 if ((rdev->pm.active_crtc_count > 0) &&
259 (rdev->pm.power_state[rdev->pm.requested_power_state_index].
260 clock_info[rdev->pm.requested_clock_mode_index].flags &
261 RADEON_PM_MODE_NO_DISPLAY)) {
262 rdev->pm.requested_clock_mode_index++;
263 }
Alex Deuchera48b9b42010-04-22 14:03:55 -0400264 break;
Alex Deucherce8f5372010-05-07 15:10:16 -0400265 case DYNPM_ACTION_UPCLOCK:
Alex Deuchera48b9b42010-04-22 14:03:55 -0400266 if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) {
267 if (rdev->pm.current_clock_mode_index ==
268 (rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1)) {
269 rdev->pm.requested_clock_mode_index = rdev->pm.current_clock_mode_index;
Alex Deucherce8f5372010-05-07 15:10:16 -0400270 rdev->pm.dynpm_can_upclock = false;
Alex Deuchera48b9b42010-04-22 14:03:55 -0400271 } else
272 rdev->pm.requested_clock_mode_index =
273 rdev->pm.current_clock_mode_index + 1;
274 } else {
275 rdev->pm.requested_clock_mode_index =
276 rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1;
Alex Deucherce8f5372010-05-07 15:10:16 -0400277 rdev->pm.dynpm_can_upclock = false;
Alex Deuchera48b9b42010-04-22 14:03:55 -0400278 }
279 break;
Alex Deucherce8f5372010-05-07 15:10:16 -0400280 case DYNPM_ACTION_DEFAULT:
Alex Deucher58e21df2010-03-22 13:31:08 -0400281 rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
282 rdev->pm.requested_clock_mode_index = 0;
Alex Deucherce8f5372010-05-07 15:10:16 -0400283 rdev->pm.dynpm_can_upclock = false;
Alex Deucher58e21df2010-03-22 13:31:08 -0400284 break;
Alex Deucherce8f5372010-05-07 15:10:16 -0400285 case DYNPM_ACTION_NONE:
Alex Deuchera48b9b42010-04-22 14:03:55 -0400286 default:
287 DRM_ERROR("Requested mode for not defined action\n");
288 return;
289 }
290 }
291
Dave Airlied9fdaaf2010-08-02 10:42:55 +1000292 DRM_DEBUG_DRIVER("Requested: e: %d m: %d p: %d\n",
Alex Deucherce8a3eb2010-05-07 16:58:27 -0400293 rdev->pm.power_state[rdev->pm.requested_power_state_index].
294 clock_info[rdev->pm.requested_clock_mode_index].sclk,
295 rdev->pm.power_state[rdev->pm.requested_power_state_index].
296 clock_info[rdev->pm.requested_clock_mode_index].mclk,
297 rdev->pm.power_state[rdev->pm.requested_power_state_index].
298 pcie_lanes);
Alex Deuchera48b9b42010-04-22 14:03:55 -0400299}
300
Alex Deucherce8f5372010-05-07 15:10:16 -0400301void rs780_pm_init_profile(struct radeon_device *rdev)
302{
303 if (rdev->pm.num_power_states == 2) {
304 /* default */
305 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
306 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
307 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
308 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
309 /* low sh */
310 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0;
311 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0;
312 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
313 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
Alex Deucherc9e75b22010-06-02 17:56:01 -0400314 /* mid sh */
315 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 0;
316 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 0;
317 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
318 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
Alex Deucherce8f5372010-05-07 15:10:16 -0400319 /* high sh */
320 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0;
321 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1;
322 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
323 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
324 /* low mh */
325 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0;
326 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0;
327 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
328 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
Alex Deucherc9e75b22010-06-02 17:56:01 -0400329 /* mid mh */
330 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 0;
331 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 0;
332 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
333 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
Alex Deucherce8f5372010-05-07 15:10:16 -0400334 /* high mh */
335 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0;
336 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 1;
337 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
338 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
339 } else if (rdev->pm.num_power_states == 3) {
340 /* default */
341 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
342 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
343 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
344 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
345 /* low sh */
346 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1;
347 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1;
348 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
349 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
Alex Deucherc9e75b22010-06-02 17:56:01 -0400350 /* mid sh */
351 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 1;
352 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1;
353 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
354 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
Alex Deucherce8f5372010-05-07 15:10:16 -0400355 /* high sh */
356 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1;
357 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 2;
358 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
359 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
360 /* low mh */
361 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 1;
362 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 1;
363 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
364 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
Alex Deucherc9e75b22010-06-02 17:56:01 -0400365 /* mid mh */
366 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 1;
367 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 1;
368 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
369 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
Alex Deucherce8f5372010-05-07 15:10:16 -0400370 /* high mh */
371 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 1;
372 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2;
373 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
374 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
375 } else {
376 /* default */
377 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
378 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
379 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
380 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
381 /* low sh */
382 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 2;
383 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 2;
384 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
385 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
Alex Deucherc9e75b22010-06-02 17:56:01 -0400386 /* mid sh */
387 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 2;
388 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 2;
389 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
390 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
Alex Deucherce8f5372010-05-07 15:10:16 -0400391 /* high sh */
392 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 2;
393 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 3;
394 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
395 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
396 /* low mh */
397 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2;
398 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0;
399 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
400 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
Alex Deucherc9e75b22010-06-02 17:56:01 -0400401 /* mid mh */
402 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 2;
403 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 0;
404 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
405 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
Alex Deucherce8f5372010-05-07 15:10:16 -0400406 /* high mh */
407 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2;
408 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 3;
409 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
410 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
411 }
412}
413
414void r600_pm_init_profile(struct radeon_device *rdev)
415{
Alex Deucherbbe26ff2011-11-04 10:09:42 -0400416 int idx;
417
Alex Deucherce8f5372010-05-07 15:10:16 -0400418 if (rdev->family == CHIP_R600) {
419 /* XXX */
420 /* default */
421 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
422 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
423 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
Alex Deucher4bff5172010-05-17 19:41:26 -0400424 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
Alex Deucherce8f5372010-05-07 15:10:16 -0400425 /* low sh */
426 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
427 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
428 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
Alex Deucher4bff5172010-05-17 19:41:26 -0400429 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
Alex Deucherc9e75b22010-06-02 17:56:01 -0400430 /* mid sh */
431 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
432 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
433 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
434 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
Alex Deucherce8f5372010-05-07 15:10:16 -0400435 /* high sh */
436 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
437 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
438 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
Alex Deucher4bff5172010-05-17 19:41:26 -0400439 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
Alex Deucherce8f5372010-05-07 15:10:16 -0400440 /* low mh */
441 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
442 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
443 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
Alex Deucher4bff5172010-05-17 19:41:26 -0400444 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
Alex Deucherc9e75b22010-06-02 17:56:01 -0400445 /* mid mh */
446 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
447 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
448 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
449 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
Alex Deucherce8f5372010-05-07 15:10:16 -0400450 /* high mh */
451 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
452 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
453 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
Alex Deucher4bff5172010-05-17 19:41:26 -0400454 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
Alex Deucherce8f5372010-05-07 15:10:16 -0400455 } else {
456 if (rdev->pm.num_power_states < 4) {
457 /* default */
458 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
459 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
460 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
461 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
462 /* low sh */
Alex Deucherce8f5372010-05-07 15:10:16 -0400463 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1;
464 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1;
465 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
Alex Deucherc9e75b22010-06-02 17:56:01 -0400466 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
467 /* mid sh */
468 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 1;
469 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1;
470 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
471 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
Alex Deucherce8f5372010-05-07 15:10:16 -0400472 /* high sh */
473 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1;
474 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1;
475 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
476 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
477 /* low mh */
Alex Deucher4bff5172010-05-17 19:41:26 -0400478 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2;
479 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 2;
Alex Deucherce8f5372010-05-07 15:10:16 -0400480 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
Alex Deucherc9e75b22010-06-02 17:56:01 -0400481 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
482 /* low mh */
483 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 2;
484 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 2;
485 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
486 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
Alex Deucherce8f5372010-05-07 15:10:16 -0400487 /* high mh */
Alex Deucher4bff5172010-05-17 19:41:26 -0400488 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2;
489 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2;
490 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
491 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
492 } else {
493 /* default */
494 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
495 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
496 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
497 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
498 /* low sh */
Alex Deucherbbe26ff2011-11-04 10:09:42 -0400499 if (rdev->flags & RADEON_IS_MOBILITY)
500 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
501 else
502 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
503 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = idx;
504 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = idx;
505 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
506 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
Alex Deucherc9e75b22010-06-02 17:56:01 -0400507 /* mid sh */
Alex Deucherbbe26ff2011-11-04 10:09:42 -0400508 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = idx;
509 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = idx;
510 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
511 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
Alex Deucher4bff5172010-05-17 19:41:26 -0400512 /* high sh */
Alex Deucherbbe26ff2011-11-04 10:09:42 -0400513 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
514 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = idx;
515 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = idx;
Alex Deucher4bff5172010-05-17 19:41:26 -0400516 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
517 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
518 /* low mh */
Alex Deucherbbe26ff2011-11-04 10:09:42 -0400519 if (rdev->flags & RADEON_IS_MOBILITY)
520 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
521 else
522 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
523 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = idx;
524 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = idx;
525 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
526 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
Alex Deucherc9e75b22010-06-02 17:56:01 -0400527 /* mid mh */
Alex Deucherbbe26ff2011-11-04 10:09:42 -0400528 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = idx;
529 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = idx;
530 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
531 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
Alex Deucher4bff5172010-05-17 19:41:26 -0400532 /* high mh */
Alex Deucherbbe26ff2011-11-04 10:09:42 -0400533 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
534 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = idx;
535 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = idx;
Alex Deucherce8f5372010-05-07 15:10:16 -0400536 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
537 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
538 }
539 }
Alex Deucherbae6b5622010-04-22 13:38:05 -0400540}
541
Alex Deucher49e02b72010-04-23 17:57:27 -0400542void r600_pm_misc(struct radeon_device *rdev)
543{
Rafał Miłeckia081a9d2010-06-07 18:20:25 -0400544 int req_ps_idx = rdev->pm.requested_power_state_index;
545 int req_cm_idx = rdev->pm.requested_clock_mode_index;
546 struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx];
547 struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage;
Alex Deucher7ac9aa52010-05-27 19:25:54 -0400548
Alex Deucher4d601732010-06-07 18:15:18 -0400549 if ((voltage->type == VOLTAGE_SW) && voltage->voltage) {
Alex Deuchera377e182011-06-20 13:00:31 -0400550 /* 0xff01 is a flag rather then an actual voltage */
551 if (voltage->voltage == 0xff01)
552 return;
Alex Deucher4d601732010-06-07 18:15:18 -0400553 if (voltage->voltage != rdev->pm.current_vddc) {
Alex Deucher8a83ec52011-04-12 14:49:23 -0400554 radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC);
Alex Deucher4d601732010-06-07 18:15:18 -0400555 rdev->pm.current_vddc = voltage->voltage;
Dave Airlied9fdaaf2010-08-02 10:42:55 +1000556 DRM_DEBUG_DRIVER("Setting: v: %d\n", voltage->voltage);
Alex Deucher4d601732010-06-07 18:15:18 -0400557 }
558 }
Alex Deucher49e02b72010-04-23 17:57:27 -0400559}
560
Alex Deucherdef9ba92010-04-22 12:39:58 -0400561bool r600_gui_idle(struct radeon_device *rdev)
562{
563 if (RREG32(GRBM_STATUS) & GUI_ACTIVE)
564 return false;
565 else
566 return true;
567}
568
Alex Deuchere0df1ac2009-12-04 15:12:21 -0500569/* hpd for digital panel detect/disconnect */
570bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
571{
572 bool connected = false;
573
574 if (ASIC_IS_DCE3(rdev)) {
575 switch (hpd) {
576 case RADEON_HPD_1:
577 if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE)
578 connected = true;
579 break;
580 case RADEON_HPD_2:
581 if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE)
582 connected = true;
583 break;
584 case RADEON_HPD_3:
585 if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE)
586 connected = true;
587 break;
588 case RADEON_HPD_4:
589 if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE)
590 connected = true;
591 break;
592 /* DCE 3.2 */
593 case RADEON_HPD_5:
594 if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE)
595 connected = true;
596 break;
597 case RADEON_HPD_6:
598 if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)
599 connected = true;
600 break;
601 default:
602 break;
603 }
604 } else {
605 switch (hpd) {
606 case RADEON_HPD_1:
607 if (RREG32(DC_HOT_PLUG_DETECT1_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
608 connected = true;
609 break;
610 case RADEON_HPD_2:
611 if (RREG32(DC_HOT_PLUG_DETECT2_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
612 connected = true;
613 break;
614 case RADEON_HPD_3:
615 if (RREG32(DC_HOT_PLUG_DETECT3_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
616 connected = true;
617 break;
618 default:
619 break;
620 }
621 }
622 return connected;
623}
624
625void r600_hpd_set_polarity(struct radeon_device *rdev,
Alex Deucher429770b2009-12-04 15:26:55 -0500626 enum radeon_hpd_id hpd)
Alex Deuchere0df1ac2009-12-04 15:12:21 -0500627{
628 u32 tmp;
629 bool connected = r600_hpd_sense(rdev, hpd);
630
631 if (ASIC_IS_DCE3(rdev)) {
632 switch (hpd) {
633 case RADEON_HPD_1:
634 tmp = RREG32(DC_HPD1_INT_CONTROL);
635 if (connected)
636 tmp &= ~DC_HPDx_INT_POLARITY;
637 else
638 tmp |= DC_HPDx_INT_POLARITY;
639 WREG32(DC_HPD1_INT_CONTROL, tmp);
640 break;
641 case RADEON_HPD_2:
642 tmp = RREG32(DC_HPD2_INT_CONTROL);
643 if (connected)
644 tmp &= ~DC_HPDx_INT_POLARITY;
645 else
646 tmp |= DC_HPDx_INT_POLARITY;
647 WREG32(DC_HPD2_INT_CONTROL, tmp);
648 break;
649 case RADEON_HPD_3:
650 tmp = RREG32(DC_HPD3_INT_CONTROL);
651 if (connected)
652 tmp &= ~DC_HPDx_INT_POLARITY;
653 else
654 tmp |= DC_HPDx_INT_POLARITY;
655 WREG32(DC_HPD3_INT_CONTROL, tmp);
656 break;
657 case RADEON_HPD_4:
658 tmp = RREG32(DC_HPD4_INT_CONTROL);
659 if (connected)
660 tmp &= ~DC_HPDx_INT_POLARITY;
661 else
662 tmp |= DC_HPDx_INT_POLARITY;
663 WREG32(DC_HPD4_INT_CONTROL, tmp);
664 break;
665 case RADEON_HPD_5:
666 tmp = RREG32(DC_HPD5_INT_CONTROL);
667 if (connected)
668 tmp &= ~DC_HPDx_INT_POLARITY;
669 else
670 tmp |= DC_HPDx_INT_POLARITY;
671 WREG32(DC_HPD5_INT_CONTROL, tmp);
672 break;
673 /* DCE 3.2 */
674 case RADEON_HPD_6:
675 tmp = RREG32(DC_HPD6_INT_CONTROL);
676 if (connected)
677 tmp &= ~DC_HPDx_INT_POLARITY;
678 else
679 tmp |= DC_HPDx_INT_POLARITY;
680 WREG32(DC_HPD6_INT_CONTROL, tmp);
681 break;
682 default:
683 break;
684 }
685 } else {
686 switch (hpd) {
687 case RADEON_HPD_1:
688 tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
689 if (connected)
690 tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
691 else
692 tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
693 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
694 break;
695 case RADEON_HPD_2:
696 tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
697 if (connected)
698 tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
699 else
700 tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
701 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
702 break;
703 case RADEON_HPD_3:
704 tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
705 if (connected)
706 tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
707 else
708 tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
709 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
710 break;
711 default:
712 break;
713 }
714 }
715}
716
717void r600_hpd_init(struct radeon_device *rdev)
718{
719 struct drm_device *dev = rdev->ddev;
720 struct drm_connector *connector;
Christian Koenigfb982572012-05-17 01:33:30 +0200721 unsigned enable = 0;
Alex Deuchere0df1ac2009-12-04 15:12:21 -0500722
Alex Deucher64912e92011-11-03 11:21:39 -0400723 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
724 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
Alex Deuchere0df1ac2009-12-04 15:12:21 -0500725
Jerome Glisse455c89b2012-05-04 11:06:22 -0400726 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
727 connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
728 /* don't try to enable hpd on eDP or LVDS avoid breaking the
729 * aux dp channel on imac and help (but not completely fix)
730 * https://bugzilla.redhat.com/show_bug.cgi?id=726143
731 */
732 continue;
733 }
Alex Deucher64912e92011-11-03 11:21:39 -0400734 if (ASIC_IS_DCE3(rdev)) {
735 u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) | DC_HPDx_RX_INT_TIMER(0xfa);
736 if (ASIC_IS_DCE32(rdev))
737 tmp |= DC_HPDx_EN;
738
Alex Deuchere0df1ac2009-12-04 15:12:21 -0500739 switch (radeon_connector->hpd.hpd) {
740 case RADEON_HPD_1:
741 WREG32(DC_HPD1_CONTROL, tmp);
Alex Deuchere0df1ac2009-12-04 15:12:21 -0500742 break;
743 case RADEON_HPD_2:
744 WREG32(DC_HPD2_CONTROL, tmp);
Alex Deuchere0df1ac2009-12-04 15:12:21 -0500745 break;
746 case RADEON_HPD_3:
747 WREG32(DC_HPD3_CONTROL, tmp);
Alex Deuchere0df1ac2009-12-04 15:12:21 -0500748 break;
749 case RADEON_HPD_4:
750 WREG32(DC_HPD4_CONTROL, tmp);
Alex Deuchere0df1ac2009-12-04 15:12:21 -0500751 break;
752 /* DCE 3.2 */
753 case RADEON_HPD_5:
754 WREG32(DC_HPD5_CONTROL, tmp);
Alex Deuchere0df1ac2009-12-04 15:12:21 -0500755 break;
756 case RADEON_HPD_6:
757 WREG32(DC_HPD6_CONTROL, tmp);
Alex Deuchere0df1ac2009-12-04 15:12:21 -0500758 break;
759 default:
760 break;
761 }
Alex Deucher64912e92011-11-03 11:21:39 -0400762 } else {
Alex Deuchere0df1ac2009-12-04 15:12:21 -0500763 switch (radeon_connector->hpd.hpd) {
764 case RADEON_HPD_1:
765 WREG32(DC_HOT_PLUG_DETECT1_CONTROL, DC_HOT_PLUG_DETECTx_EN);
Alex Deuchere0df1ac2009-12-04 15:12:21 -0500766 break;
767 case RADEON_HPD_2:
768 WREG32(DC_HOT_PLUG_DETECT2_CONTROL, DC_HOT_PLUG_DETECTx_EN);
Alex Deuchere0df1ac2009-12-04 15:12:21 -0500769 break;
770 case RADEON_HPD_3:
771 WREG32(DC_HOT_PLUG_DETECT3_CONTROL, DC_HOT_PLUG_DETECTx_EN);
Alex Deuchere0df1ac2009-12-04 15:12:21 -0500772 break;
773 default:
774 break;
775 }
776 }
Christian Koenigfb982572012-05-17 01:33:30 +0200777 enable |= 1 << radeon_connector->hpd.hpd;
Alex Deucher64912e92011-11-03 11:21:39 -0400778 radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
Alex Deuchere0df1ac2009-12-04 15:12:21 -0500779 }
Christian Koenigfb982572012-05-17 01:33:30 +0200780 radeon_irq_kms_enable_hpd(rdev, enable);
Alex Deuchere0df1ac2009-12-04 15:12:21 -0500781}
782
783void r600_hpd_fini(struct radeon_device *rdev)
784{
785 struct drm_device *dev = rdev->ddev;
786 struct drm_connector *connector;
Christian Koenigfb982572012-05-17 01:33:30 +0200787 unsigned disable = 0;
Alex Deuchere0df1ac2009-12-04 15:12:21 -0500788
Christian Koenigfb982572012-05-17 01:33:30 +0200789 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
790 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
791 if (ASIC_IS_DCE3(rdev)) {
Alex Deuchere0df1ac2009-12-04 15:12:21 -0500792 switch (radeon_connector->hpd.hpd) {
793 case RADEON_HPD_1:
794 WREG32(DC_HPD1_CONTROL, 0);
Alex Deuchere0df1ac2009-12-04 15:12:21 -0500795 break;
796 case RADEON_HPD_2:
797 WREG32(DC_HPD2_CONTROL, 0);
Alex Deuchere0df1ac2009-12-04 15:12:21 -0500798 break;
799 case RADEON_HPD_3:
800 WREG32(DC_HPD3_CONTROL, 0);
Alex Deuchere0df1ac2009-12-04 15:12:21 -0500801 break;
802 case RADEON_HPD_4:
803 WREG32(DC_HPD4_CONTROL, 0);
Alex Deuchere0df1ac2009-12-04 15:12:21 -0500804 break;
805 /* DCE 3.2 */
806 case RADEON_HPD_5:
807 WREG32(DC_HPD5_CONTROL, 0);
Alex Deuchere0df1ac2009-12-04 15:12:21 -0500808 break;
809 case RADEON_HPD_6:
810 WREG32(DC_HPD6_CONTROL, 0);
Alex Deuchere0df1ac2009-12-04 15:12:21 -0500811 break;
812 default:
813 break;
814 }
Christian Koenigfb982572012-05-17 01:33:30 +0200815 } else {
Alex Deuchere0df1ac2009-12-04 15:12:21 -0500816 switch (radeon_connector->hpd.hpd) {
817 case RADEON_HPD_1:
818 WREG32(DC_HOT_PLUG_DETECT1_CONTROL, 0);
Alex Deuchere0df1ac2009-12-04 15:12:21 -0500819 break;
820 case RADEON_HPD_2:
821 WREG32(DC_HOT_PLUG_DETECT2_CONTROL, 0);
Alex Deuchere0df1ac2009-12-04 15:12:21 -0500822 break;
823 case RADEON_HPD_3:
824 WREG32(DC_HOT_PLUG_DETECT3_CONTROL, 0);
Alex Deuchere0df1ac2009-12-04 15:12:21 -0500825 break;
826 default:
827 break;
828 }
829 }
Christian Koenigfb982572012-05-17 01:33:30 +0200830 disable |= 1 << radeon_connector->hpd.hpd;
Alex Deuchere0df1ac2009-12-04 15:12:21 -0500831 }
Christian Koenigfb982572012-05-17 01:33:30 +0200832 radeon_irq_kms_disable_hpd(rdev, disable);
Alex Deuchere0df1ac2009-12-04 15:12:21 -0500833}
834
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200835/*
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000836 * R600 PCIE GART
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200837 */
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000838void r600_pcie_gart_tlb_flush(struct radeon_device *rdev)
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200839{
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000840 unsigned i;
841 u32 tmp;
842
Dave Airlie2e98f102010-02-15 15:54:45 +1000843 /* flush hdp cache so updates hit vram */
Alex Deucherf3886f82010-12-08 10:05:34 -0500844 if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) &&
845 !(rdev->flags & RADEON_IS_AGP)) {
Jerome Glissec9a1be92011-11-03 11:16:49 -0400846 void __iomem *ptr = (void *)rdev->gart.ptr;
Alex Deucher812d0462010-07-26 18:51:53 -0400847 u32 tmp;
848
849 /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read
850 * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL
Alex Deucherf3886f82010-12-08 10:05:34 -0500851 * This seems to cause problems on some AGP cards. Just use the old
852 * method for them.
Alex Deucher812d0462010-07-26 18:51:53 -0400853 */
854 WREG32(HDP_DEBUG1, 0);
855 tmp = readl((void __iomem *)ptr);
856 } else
857 WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
Dave Airlie2e98f102010-02-15 15:54:45 +1000858
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000859 WREG32(VM_CONTEXT0_INVALIDATION_LOW_ADDR, rdev->mc.gtt_start >> 12);
860 WREG32(VM_CONTEXT0_INVALIDATION_HIGH_ADDR, (rdev->mc.gtt_end - 1) >> 12);
861 WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1));
862 for (i = 0; i < rdev->usec_timeout; i++) {
863 /* read MC_STATUS */
864 tmp = RREG32(VM_CONTEXT0_REQUEST_RESPONSE);
865 tmp = (tmp & RESPONSE_TYPE_MASK) >> RESPONSE_TYPE_SHIFT;
866 if (tmp == 2) {
867 printk(KERN_WARNING "[drm] r600 flush TLB failed\n");
868 return;
869 }
870 if (tmp) {
871 return;
872 }
873 udelay(1);
874 }
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200875}
876
Jerome Glisse4aac0472009-09-14 18:29:49 +0200877int r600_pcie_gart_init(struct radeon_device *rdev)
878{
879 int r;
880
Jerome Glissec9a1be92011-11-03 11:16:49 -0400881 if (rdev->gart.robj) {
Joe Perchesfce7d612010-10-30 21:08:30 +0000882 WARN(1, "R600 PCIE GART already initialized\n");
Jerome Glisse4aac0472009-09-14 18:29:49 +0200883 return 0;
884 }
885 /* Initialize common gart structure */
886 r = radeon_gart_init(rdev);
887 if (r)
888 return r;
889 rdev->gart.table_size = rdev->gart.num_gpu_pages * 8;
890 return radeon_gart_table_vram_alloc(rdev);
891}
892
Lauri Kasanen1109ca02012-08-31 13:43:50 -0400893static int r600_pcie_gart_enable(struct radeon_device *rdev)
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200894{
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000895 u32 tmp;
896 int r, i;
897
Jerome Glissec9a1be92011-11-03 11:16:49 -0400898 if (rdev->gart.robj == NULL) {
Jerome Glisse4aac0472009-09-14 18:29:49 +0200899 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
900 return -EINVAL;
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000901 }
Jerome Glisse4aac0472009-09-14 18:29:49 +0200902 r = radeon_gart_table_vram_pin(rdev);
903 if (r)
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000904 return r;
Dave Airlie82568562010-02-05 16:00:07 +1000905 radeon_gart_restore(rdev);
Dave Airliebc1a6312009-09-15 11:07:52 +1000906
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000907 /* Setup L2 cache */
908 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
909 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
910 EFFECTIVE_L2_QUEUE_SIZE(7));
911 WREG32(VM_L2_CNTL2, 0);
912 WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
913 /* Setup TLB control */
914 tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
915 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
916 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
917 ENABLE_WAIT_L2_QUERY;
918 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
919 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
920 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
921 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
922 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
923 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
924 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
925 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
926 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
927 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
928 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
929 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
930 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
931 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
932 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
Jerome Glisse1a029b72009-10-06 19:04:30 +0200933 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000934 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
935 WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
936 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
937 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
938 (u32)(rdev->dummy_page.addr >> 12));
939 for (i = 1; i < 7; i++)
940 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
941
942 r600_pcie_gart_tlb_flush(rdev);
Tormod Voldenfcf4de52011-08-31 21:54:07 +0000943 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
944 (unsigned)(rdev->mc.gtt_size >> 20),
945 (unsigned long long)rdev->gart.table_addr);
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000946 rdev->gart.ready = true;
947 return 0;
948}
949
Lauri Kasanen1109ca02012-08-31 13:43:50 -0400950static void r600_pcie_gart_disable(struct radeon_device *rdev)
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000951{
952 u32 tmp;
Jerome Glissec9a1be92011-11-03 11:16:49 -0400953 int i;
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000954
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000955 /* Disable all tables */
956 for (i = 0; i < 7; i++)
957 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
958
959 /* Disable L2 cache */
960 WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING |
961 EFFECTIVE_L2_QUEUE_SIZE(7));
962 WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
963 /* Setup L1 TLB control */
964 tmp = EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
965 ENABLE_WAIT_L2_QUERY;
966 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
967 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
968 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
969 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
970 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
971 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
972 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
973 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
974 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp);
975 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp);
976 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
977 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
978 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp);
979 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
Jerome Glissec9a1be92011-11-03 11:16:49 -0400980 radeon_gart_table_vram_unpin(rdev);
Jerome Glisse4aac0472009-09-14 18:29:49 +0200981}
982
Lauri Kasanen1109ca02012-08-31 13:43:50 -0400983static void r600_pcie_gart_fini(struct radeon_device *rdev)
Jerome Glisse4aac0472009-09-14 18:29:49 +0200984{
Jerome Glissef9274562010-03-17 14:44:29 +0000985 radeon_gart_fini(rdev);
Jerome Glisse4aac0472009-09-14 18:29:49 +0200986 r600_pcie_gart_disable(rdev);
987 radeon_gart_table_vram_free(rdev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200988}
989
Lauri Kasanen1109ca02012-08-31 13:43:50 -0400990static void r600_agp_enable(struct radeon_device *rdev)
Jerome Glisse1a029b72009-10-06 19:04:30 +0200991{
992 u32 tmp;
993 int i;
994
995 /* Setup L2 cache */
996 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
997 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
998 EFFECTIVE_L2_QUEUE_SIZE(7));
999 WREG32(VM_L2_CNTL2, 0);
1000 WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
1001 /* Setup TLB control */
1002 tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
1003 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
1004 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
1005 ENABLE_WAIT_L2_QUERY;
1006 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
1007 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
1008 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
1009 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
1010 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
1011 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
1012 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
1013 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
1014 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
1015 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
1016 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
1017 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
1018 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
1019 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
1020 for (i = 0; i < 7; i++)
1021 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
1022}
1023
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001024int r600_mc_wait_for_idle(struct radeon_device *rdev)
1025{
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001026 unsigned i;
1027 u32 tmp;
1028
1029 for (i = 0; i < rdev->usec_timeout; i++) {
1030 /* read MC_STATUS */
1031 tmp = RREG32(R_000E50_SRBM_STATUS) & 0x3F00;
1032 if (!tmp)
1033 return 0;
1034 udelay(1);
1035 }
1036 return -1;
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001037}
1038
Samuel Li65337e62013-04-05 17:50:53 -04001039uint32_t rs780_mc_rreg(struct radeon_device *rdev, uint32_t reg)
1040{
1041 uint32_t r;
1042
1043 WREG32(R_0028F8_MC_INDEX, S_0028F8_MC_IND_ADDR(reg));
1044 r = RREG32(R_0028FC_MC_DATA);
1045 WREG32(R_0028F8_MC_INDEX, ~C_0028F8_MC_IND_ADDR);
1046 return r;
1047}
1048
1049void rs780_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
1050{
1051 WREG32(R_0028F8_MC_INDEX, S_0028F8_MC_IND_ADDR(reg) |
1052 S_0028F8_MC_IND_WR_EN(1));
1053 WREG32(R_0028FC_MC_DATA, v);
1054 WREG32(R_0028F8_MC_INDEX, 0x7F);
1055}
1056
Jerome Glissea3c19452009-10-01 18:02:13 +02001057static void r600_mc_program(struct radeon_device *rdev)
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001058{
Jerome Glissea3c19452009-10-01 18:02:13 +02001059 struct rv515_mc_save save;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001060 u32 tmp;
1061 int i, j;
1062
1063 /* Initialize HDP */
1064 for (i = 0, j = 0; i < 32; i++, j += 0x18) {
1065 WREG32((0x2c14 + j), 0x00000000);
1066 WREG32((0x2c18 + j), 0x00000000);
1067 WREG32((0x2c1c + j), 0x00000000);
1068 WREG32((0x2c20 + j), 0x00000000);
1069 WREG32((0x2c24 + j), 0x00000000);
1070 }
1071 WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
1072
Jerome Glissea3c19452009-10-01 18:02:13 +02001073 rv515_mc_stop(rdev, &save);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001074 if (r600_mc_wait_for_idle(rdev)) {
Jerome Glissea3c19452009-10-01 18:02:13 +02001075 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001076 }
Jerome Glissea3c19452009-10-01 18:02:13 +02001077 /* Lockout access through VGA aperture (doesn't exist before R600) */
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001078 WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001079 /* Update configuration */
Jerome Glisse1a029b72009-10-06 19:04:30 +02001080 if (rdev->flags & RADEON_IS_AGP) {
1081 if (rdev->mc.vram_start < rdev->mc.gtt_start) {
1082 /* VRAM before AGP */
1083 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
1084 rdev->mc.vram_start >> 12);
1085 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
1086 rdev->mc.gtt_end >> 12);
1087 } else {
1088 /* VRAM after AGP */
1089 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
1090 rdev->mc.gtt_start >> 12);
1091 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
1092 rdev->mc.vram_end >> 12);
1093 }
1094 } else {
1095 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start >> 12);
1096 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end >> 12);
1097 }
Alex Deucher16cdf042011-10-28 10:30:02 -04001098 WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, rdev->vram_scratch.gpu_addr >> 12);
Jerome Glisse1a029b72009-10-06 19:04:30 +02001099 tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001100 tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
1101 WREG32(MC_VM_FB_LOCATION, tmp);
1102 WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
1103 WREG32(HDP_NONSURFACE_INFO, (2 << 7));
Jerome Glisse46fcd2b2010-06-03 19:34:48 +02001104 WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001105 if (rdev->flags & RADEON_IS_AGP) {
Jerome Glisse1a029b72009-10-06 19:04:30 +02001106 WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 22);
1107 WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 22);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001108 WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22);
1109 } else {
1110 WREG32(MC_VM_AGP_BASE, 0);
1111 WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
1112 WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
1113 }
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001114 if (r600_mc_wait_for_idle(rdev)) {
Jerome Glissea3c19452009-10-01 18:02:13 +02001115 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001116 }
Jerome Glissea3c19452009-10-01 18:02:13 +02001117 rv515_mc_resume(rdev, &save);
Dave Airlie698443d2009-09-18 14:16:38 +10001118 /* we need to own VRAM, so turn off the VGA renderer here
1119 * to stop it overwriting our objects */
Jerome Glissed39c3b82009-09-28 18:34:43 +02001120 rv515_vga_render_disable(rdev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001121}
1122
Jerome Glissed594e462010-02-17 21:54:29 +00001123/**
1124 * r600_vram_gtt_location - try to find VRAM & GTT location
1125 * @rdev: radeon device structure holding all necessary informations
1126 * @mc: memory controller structure holding memory informations
1127 *
1128 * Function will place try to place VRAM at same place as in CPU (PCI)
1129 * address space as some GPU seems to have issue when we reprogram at
1130 * different address space.
1131 *
1132 * If there is not enough space to fit the unvisible VRAM after the
1133 * aperture then we limit the VRAM size to the aperture.
1134 *
1135 * If we are using AGP then place VRAM adjacent to AGP aperture are we need
1136 * them to be in one from GPU point of view so that we can program GPU to
1137 * catch access outside them (weird GPU policy see ??).
1138 *
1139 * This function will never fails, worst case are limiting VRAM or GTT.
1140 *
1141 * Note: GTT start, end, size should be initialized before calling this
1142 * function on AGP platform.
1143 */
Alex Deucher0ef0c1f2010-11-22 17:56:26 -05001144static void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
Jerome Glissed594e462010-02-17 21:54:29 +00001145{
1146 u64 size_bf, size_af;
1147
1148 if (mc->mc_vram_size > 0xE0000000) {
1149 /* leave room for at least 512M GTT */
1150 dev_warn(rdev->dev, "limiting VRAM\n");
1151 mc->real_vram_size = 0xE0000000;
1152 mc->mc_vram_size = 0xE0000000;
1153 }
1154 if (rdev->flags & RADEON_IS_AGP) {
1155 size_bf = mc->gtt_start;
Alex Deucher9ed8b1f2013-04-08 11:13:01 -04001156 size_af = mc->mc_mask - mc->gtt_end;
Jerome Glissed594e462010-02-17 21:54:29 +00001157 if (size_bf > size_af) {
1158 if (mc->mc_vram_size > size_bf) {
1159 dev_warn(rdev->dev, "limiting VRAM\n");
1160 mc->real_vram_size = size_bf;
1161 mc->mc_vram_size = size_bf;
1162 }
1163 mc->vram_start = mc->gtt_start - mc->mc_vram_size;
1164 } else {
1165 if (mc->mc_vram_size > size_af) {
1166 dev_warn(rdev->dev, "limiting VRAM\n");
1167 mc->real_vram_size = size_af;
1168 mc->mc_vram_size = size_af;
1169 }
Jerome Glissedfc6ae52012-04-17 16:51:38 -04001170 mc->vram_start = mc->gtt_end + 1;
Jerome Glissed594e462010-02-17 21:54:29 +00001171 }
1172 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
1173 dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n",
1174 mc->mc_vram_size >> 20, mc->vram_start,
1175 mc->vram_end, mc->real_vram_size >> 20);
1176 } else {
1177 u64 base = 0;
Alex Deucher8961d522010-12-03 14:37:22 -05001178 if (rdev->flags & RADEON_IS_IGP) {
1179 base = RREG32(MC_VM_FB_LOCATION) & 0xFFFF;
1180 base <<= 24;
1181 }
Jerome Glissed594e462010-02-17 21:54:29 +00001182 radeon_vram_location(rdev, &rdev->mc, base);
Alex Deucher8d369bb2010-07-15 10:51:10 -04001183 rdev->mc.gtt_base_align = 0;
Jerome Glissed594e462010-02-17 21:54:29 +00001184 radeon_gtt_location(rdev, mc);
1185 }
1186}
1187
Lauri Kasanen1109ca02012-08-31 13:43:50 -04001188static int r600_mc_init(struct radeon_device *rdev)
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001189{
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001190 u32 tmp;
Alex Deucher5885b7a2009-10-19 17:23:33 -04001191 int chansize, numchan;
Samuel Li65337e62013-04-05 17:50:53 -04001192 uint32_t h_addr, l_addr;
1193 unsigned long long k8_addr;
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001194
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001195 /* Get VRAM informations */
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001196 rdev->mc.vram_is_ddr = true;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001197 tmp = RREG32(RAMCFG);
1198 if (tmp & CHANSIZE_OVERRIDE) {
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001199 chansize = 16;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001200 } else if (tmp & CHANSIZE_MASK) {
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001201 chansize = 64;
1202 } else {
1203 chansize = 32;
1204 }
Alex Deucher5885b7a2009-10-19 17:23:33 -04001205 tmp = RREG32(CHMAP);
1206 switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
1207 case 0:
1208 default:
1209 numchan = 1;
1210 break;
1211 case 1:
1212 numchan = 2;
1213 break;
1214 case 2:
1215 numchan = 4;
1216 break;
1217 case 3:
1218 numchan = 8;
1219 break;
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001220 }
Alex Deucher5885b7a2009-10-19 17:23:33 -04001221 rdev->mc.vram_width = numchan * chansize;
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001222 /* Could aper size report 0 ? */
Jordan Crouse01d73a62010-05-27 13:40:24 -06001223 rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
1224 rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001225 /* Setup GPU memory space */
1226 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
1227 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
Jerome Glisse51e5fcd2010-02-19 14:33:54 +00001228 rdev->mc.visible_vram_size = rdev->mc.aper_size;
Jerome Glissed594e462010-02-17 21:54:29 +00001229 r600_vram_gtt_location(rdev, &rdev->mc);
Alex Deucherf47299c2010-03-16 20:54:38 -04001230
Alex Deucherf8920342010-06-30 12:02:03 -04001231 if (rdev->flags & RADEON_IS_IGP) {
1232 rs690_pm_info(rdev);
Alex Deucher06b64762010-01-05 11:27:29 -05001233 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
Samuel Li65337e62013-04-05 17:50:53 -04001234
1235 if (rdev->family == CHIP_RS780 || rdev->family == CHIP_RS880) {
1236 /* Use K8 direct mapping for fast fb access. */
1237 rdev->fastfb_working = false;
1238 h_addr = G_000012_K8_ADDR_EXT(RREG32_MC(R_000012_MC_MISC_UMA_CNTL));
1239 l_addr = RREG32_MC(R_000011_K8_FB_LOCATION);
1240 k8_addr = ((unsigned long long)h_addr) << 32 | l_addr;
1241#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
1242 if (k8_addr + rdev->mc.visible_vram_size < 0x100000000ULL)
1243#endif
1244 {
1245 /* FastFB shall be used with UMA memory. Here it is simply disabled when sideport
1246 * memory is present.
1247 */
1248 if (rdev->mc.igp_sideport_enabled == false && radeon_fastfb == 1) {
1249 DRM_INFO("Direct mapping: aper base at 0x%llx, replaced by direct mapping base 0x%llx.\n",
1250 (unsigned long long)rdev->mc.aper_base, k8_addr);
1251 rdev->mc.aper_base = (resource_size_t)k8_addr;
1252 rdev->fastfb_working = true;
1253 }
1254 }
1255 }
Alex Deucherf8920342010-06-30 12:02:03 -04001256 }
Samuel Li65337e62013-04-05 17:50:53 -04001257
Alex Deucherf47299c2010-03-16 20:54:38 -04001258 radeon_update_bandwidth_info(rdev);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001259 return 0;
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001260}
1261
Alex Deucher16cdf042011-10-28 10:30:02 -04001262int r600_vram_scratch_init(struct radeon_device *rdev)
1263{
1264 int r;
1265
1266 if (rdev->vram_scratch.robj == NULL) {
1267 r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE,
1268 PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
Alex Deucher40f5cf92012-05-10 18:33:13 -04001269 NULL, &rdev->vram_scratch.robj);
Alex Deucher16cdf042011-10-28 10:30:02 -04001270 if (r) {
1271 return r;
1272 }
1273 }
1274
1275 r = radeon_bo_reserve(rdev->vram_scratch.robj, false);
1276 if (unlikely(r != 0))
1277 return r;
1278 r = radeon_bo_pin(rdev->vram_scratch.robj,
1279 RADEON_GEM_DOMAIN_VRAM, &rdev->vram_scratch.gpu_addr);
1280 if (r) {
1281 radeon_bo_unreserve(rdev->vram_scratch.robj);
1282 return r;
1283 }
1284 r = radeon_bo_kmap(rdev->vram_scratch.robj,
1285 (void **)&rdev->vram_scratch.ptr);
1286 if (r)
1287 radeon_bo_unpin(rdev->vram_scratch.robj);
1288 radeon_bo_unreserve(rdev->vram_scratch.robj);
1289
1290 return r;
1291}
1292
1293void r600_vram_scratch_fini(struct radeon_device *rdev)
1294{
1295 int r;
1296
1297 if (rdev->vram_scratch.robj == NULL) {
1298 return;
1299 }
1300 r = radeon_bo_reserve(rdev->vram_scratch.robj, false);
1301 if (likely(r == 0)) {
1302 radeon_bo_kunmap(rdev->vram_scratch.robj);
1303 radeon_bo_unpin(rdev->vram_scratch.robj);
1304 radeon_bo_unreserve(rdev->vram_scratch.robj);
1305 }
1306 radeon_bo_unref(&rdev->vram_scratch.robj);
1307}
1308
Alex Deucher410a3412013-01-18 13:05:39 -05001309void r600_set_bios_scratch_engine_hung(struct radeon_device *rdev, bool hung)
1310{
1311 u32 tmp = RREG32(R600_BIOS_3_SCRATCH);
1312
1313 if (hung)
1314 tmp |= ATOM_S3_ASIC_GUI_ENGINE_HUNG;
1315 else
1316 tmp &= ~ATOM_S3_ASIC_GUI_ENGINE_HUNG;
1317
1318 WREG32(R600_BIOS_3_SCRATCH, tmp);
1319}
1320
Alex Deucherd3cb7812013-01-18 13:53:37 -05001321static void r600_print_gpu_status_regs(struct radeon_device *rdev)
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001322{
Jerome Glisse64c56e82013-01-02 17:30:35 -05001323 dev_info(rdev->dev, " R_008010_GRBM_STATUS = 0x%08X\n",
Alex Deucherd3cb7812013-01-18 13:53:37 -05001324 RREG32(R_008010_GRBM_STATUS));
Jerome Glisse64c56e82013-01-02 17:30:35 -05001325 dev_info(rdev->dev, " R_008014_GRBM_STATUS2 = 0x%08X\n",
Alex Deucherd3cb7812013-01-18 13:53:37 -05001326 RREG32(R_008014_GRBM_STATUS2));
Jerome Glisse64c56e82013-01-02 17:30:35 -05001327 dev_info(rdev->dev, " R_000E50_SRBM_STATUS = 0x%08X\n",
Alex Deucherd3cb7812013-01-18 13:53:37 -05001328 RREG32(R_000E50_SRBM_STATUS));
Jerome Glisse440a7cd2012-06-27 12:25:01 -04001329 dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
Alex Deucherd3cb7812013-01-18 13:53:37 -05001330 RREG32(CP_STALLED_STAT1));
Jerome Glisse440a7cd2012-06-27 12:25:01 -04001331 dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n",
Alex Deucherd3cb7812013-01-18 13:53:37 -05001332 RREG32(CP_STALLED_STAT2));
Jerome Glisse440a7cd2012-06-27 12:25:01 -04001333 dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n",
Alex Deucherd3cb7812013-01-18 13:53:37 -05001334 RREG32(CP_BUSY_STAT));
Jerome Glisse440a7cd2012-06-27 12:25:01 -04001335 dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
Alex Deucherd3cb7812013-01-18 13:53:37 -05001336 RREG32(CP_STAT));
Alex Deucher71e3d152013-01-03 12:20:35 -05001337 dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n",
1338 RREG32(DMA_STATUS_REG));
1339}
1340
Alex Deucherf13f7732013-01-18 18:12:22 -05001341static bool r600_is_display_hung(struct radeon_device *rdev)
1342{
1343 u32 crtc_hung = 0;
1344 u32 crtc_status[2];
1345 u32 i, j, tmp;
1346
1347 for (i = 0; i < rdev->num_crtc; i++) {
1348 if (RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i]) & AVIVO_CRTC_EN) {
1349 crtc_status[i] = RREG32(AVIVO_D1CRTC_STATUS_HV_COUNT + crtc_offsets[i]);
1350 crtc_hung |= (1 << i);
1351 }
1352 }
1353
1354 for (j = 0; j < 10; j++) {
1355 for (i = 0; i < rdev->num_crtc; i++) {
1356 if (crtc_hung & (1 << i)) {
1357 tmp = RREG32(AVIVO_D1CRTC_STATUS_HV_COUNT + crtc_offsets[i]);
1358 if (tmp != crtc_status[i])
1359 crtc_hung &= ~(1 << i);
1360 }
1361 }
1362 if (crtc_hung == 0)
1363 return false;
1364 udelay(100);
1365 }
1366
1367 return true;
1368}
1369
1370static u32 r600_gpu_check_soft_reset(struct radeon_device *rdev)
1371{
1372 u32 reset_mask = 0;
1373 u32 tmp;
1374
1375 /* GRBM_STATUS */
1376 tmp = RREG32(R_008010_GRBM_STATUS);
1377 if (rdev->family >= CHIP_RV770) {
1378 if (G_008010_PA_BUSY(tmp) | G_008010_SC_BUSY(tmp) |
1379 G_008010_SH_BUSY(tmp) | G_008010_SX_BUSY(tmp) |
1380 G_008010_TA_BUSY(tmp) | G_008010_VGT_BUSY(tmp) |
1381 G_008010_DB03_BUSY(tmp) | G_008010_CB03_BUSY(tmp) |
1382 G_008010_SPI03_BUSY(tmp) | G_008010_VGT_BUSY_NO_DMA(tmp))
1383 reset_mask |= RADEON_RESET_GFX;
1384 } else {
1385 if (G_008010_PA_BUSY(tmp) | G_008010_SC_BUSY(tmp) |
1386 G_008010_SH_BUSY(tmp) | G_008010_SX_BUSY(tmp) |
1387 G_008010_TA03_BUSY(tmp) | G_008010_VGT_BUSY(tmp) |
1388 G_008010_DB03_BUSY(tmp) | G_008010_CB03_BUSY(tmp) |
1389 G_008010_SPI03_BUSY(tmp) | G_008010_VGT_BUSY_NO_DMA(tmp))
1390 reset_mask |= RADEON_RESET_GFX;
1391 }
1392
1393 if (G_008010_CF_RQ_PENDING(tmp) | G_008010_PF_RQ_PENDING(tmp) |
1394 G_008010_CP_BUSY(tmp) | G_008010_CP_COHERENCY_BUSY(tmp))
1395 reset_mask |= RADEON_RESET_CP;
1396
1397 if (G_008010_GRBM_EE_BUSY(tmp))
1398 reset_mask |= RADEON_RESET_GRBM | RADEON_RESET_GFX | RADEON_RESET_CP;
1399
1400 /* DMA_STATUS_REG */
1401 tmp = RREG32(DMA_STATUS_REG);
1402 if (!(tmp & DMA_IDLE))
1403 reset_mask |= RADEON_RESET_DMA;
1404
1405 /* SRBM_STATUS */
1406 tmp = RREG32(R_000E50_SRBM_STATUS);
1407 if (G_000E50_RLC_RQ_PENDING(tmp) | G_000E50_RLC_BUSY(tmp))
1408 reset_mask |= RADEON_RESET_RLC;
1409
1410 if (G_000E50_IH_BUSY(tmp))
1411 reset_mask |= RADEON_RESET_IH;
1412
1413 if (G_000E50_SEM_BUSY(tmp))
1414 reset_mask |= RADEON_RESET_SEM;
1415
1416 if (G_000E50_GRBM_RQ_PENDING(tmp))
1417 reset_mask |= RADEON_RESET_GRBM;
1418
1419 if (G_000E50_VMC_BUSY(tmp))
1420 reset_mask |= RADEON_RESET_VMC;
1421
1422 if (G_000E50_MCB_BUSY(tmp) | G_000E50_MCDZ_BUSY(tmp) |
1423 G_000E50_MCDY_BUSY(tmp) | G_000E50_MCDX_BUSY(tmp) |
1424 G_000E50_MCDW_BUSY(tmp))
1425 reset_mask |= RADEON_RESET_MC;
1426
1427 if (r600_is_display_hung(rdev))
1428 reset_mask |= RADEON_RESET_DISPLAY;
1429
Alex Deucherd808fc82013-02-28 10:03:08 -05001430 /* Skip MC reset as it's mostly likely not hung, just busy */
1431 if (reset_mask & RADEON_RESET_MC) {
1432 DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask);
1433 reset_mask &= ~RADEON_RESET_MC;
1434 }
1435
Alex Deucherf13f7732013-01-18 18:12:22 -05001436 return reset_mask;
1437}
1438
1439static void r600_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
Alex Deucher71e3d152013-01-03 12:20:35 -05001440{
1441 struct rv515_mc_save save;
Alex Deucherd3cb7812013-01-18 13:53:37 -05001442 u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
1443 u32 tmp;
Alex Deucher19fc42e2013-01-14 11:04:39 -05001444
Alex Deucher71e3d152013-01-03 12:20:35 -05001445 if (reset_mask == 0)
Alex Deucherf13f7732013-01-18 18:12:22 -05001446 return;
Alex Deucher71e3d152013-01-03 12:20:35 -05001447
1448 dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
1449
Alex Deucherd3cb7812013-01-18 13:53:37 -05001450 r600_print_gpu_status_regs(rdev);
1451
Alex Deucherd3cb7812013-01-18 13:53:37 -05001452 /* Disable CP parsing/prefetching */
1453 if (rdev->family >= CHIP_RV770)
1454 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1) | S_0086D8_CP_PFP_HALT(1));
1455 else
1456 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
Alex Deucher71e3d152013-01-03 12:20:35 -05001457
Alex Deucherd3cb7812013-01-18 13:53:37 -05001458 /* disable the RLC */
1459 WREG32(RLC_CNTL, 0);
1460
1461 if (reset_mask & RADEON_RESET_DMA) {
1462 /* Disable DMA */
1463 tmp = RREG32(DMA_RB_CNTL);
1464 tmp &= ~DMA_RB_ENABLE;
1465 WREG32(DMA_RB_CNTL, tmp);
1466 }
1467
1468 mdelay(50);
1469
Alex Deucherca578022013-01-23 18:56:08 -05001470 rv515_mc_stop(rdev, &save);
1471 if (r600_mc_wait_for_idle(rdev)) {
1472 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1473 }
1474
Alex Deucherd3cb7812013-01-18 13:53:37 -05001475 if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE)) {
1476 if (rdev->family >= CHIP_RV770)
1477 grbm_soft_reset |= S_008020_SOFT_RESET_DB(1) |
1478 S_008020_SOFT_RESET_CB(1) |
1479 S_008020_SOFT_RESET_PA(1) |
1480 S_008020_SOFT_RESET_SC(1) |
1481 S_008020_SOFT_RESET_SPI(1) |
1482 S_008020_SOFT_RESET_SX(1) |
1483 S_008020_SOFT_RESET_SH(1) |
1484 S_008020_SOFT_RESET_TC(1) |
1485 S_008020_SOFT_RESET_TA(1) |
1486 S_008020_SOFT_RESET_VC(1) |
1487 S_008020_SOFT_RESET_VGT(1);
1488 else
1489 grbm_soft_reset |= S_008020_SOFT_RESET_CR(1) |
1490 S_008020_SOFT_RESET_DB(1) |
1491 S_008020_SOFT_RESET_CB(1) |
1492 S_008020_SOFT_RESET_PA(1) |
1493 S_008020_SOFT_RESET_SC(1) |
1494 S_008020_SOFT_RESET_SMX(1) |
1495 S_008020_SOFT_RESET_SPI(1) |
1496 S_008020_SOFT_RESET_SX(1) |
1497 S_008020_SOFT_RESET_SH(1) |
1498 S_008020_SOFT_RESET_TC(1) |
1499 S_008020_SOFT_RESET_TA(1) |
1500 S_008020_SOFT_RESET_VC(1) |
1501 S_008020_SOFT_RESET_VGT(1);
1502 }
1503
1504 if (reset_mask & RADEON_RESET_CP) {
1505 grbm_soft_reset |= S_008020_SOFT_RESET_CP(1) |
1506 S_008020_SOFT_RESET_VGT(1);
1507
1508 srbm_soft_reset |= S_000E60_SOFT_RESET_GRBM(1);
1509 }
1510
1511 if (reset_mask & RADEON_RESET_DMA) {
1512 if (rdev->family >= CHIP_RV770)
1513 srbm_soft_reset |= RV770_SOFT_RESET_DMA;
1514 else
1515 srbm_soft_reset |= SOFT_RESET_DMA;
1516 }
1517
Alex Deucherf13f7732013-01-18 18:12:22 -05001518 if (reset_mask & RADEON_RESET_RLC)
1519 srbm_soft_reset |= S_000E60_SOFT_RESET_RLC(1);
1520
1521 if (reset_mask & RADEON_RESET_SEM)
1522 srbm_soft_reset |= S_000E60_SOFT_RESET_SEM(1);
1523
1524 if (reset_mask & RADEON_RESET_IH)
1525 srbm_soft_reset |= S_000E60_SOFT_RESET_IH(1);
1526
1527 if (reset_mask & RADEON_RESET_GRBM)
1528 srbm_soft_reset |= S_000E60_SOFT_RESET_GRBM(1);
1529
Alex Deucher24178ec2013-01-24 15:00:17 -05001530 if (!(rdev->flags & RADEON_IS_IGP)) {
1531 if (reset_mask & RADEON_RESET_MC)
1532 srbm_soft_reset |= S_000E60_SOFT_RESET_MC(1);
1533 }
Alex Deucherf13f7732013-01-18 18:12:22 -05001534
1535 if (reset_mask & RADEON_RESET_VMC)
1536 srbm_soft_reset |= S_000E60_SOFT_RESET_VMC(1);
1537
Alex Deucherd3cb7812013-01-18 13:53:37 -05001538 if (grbm_soft_reset) {
1539 tmp = RREG32(R_008020_GRBM_SOFT_RESET);
1540 tmp |= grbm_soft_reset;
1541 dev_info(rdev->dev, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
1542 WREG32(R_008020_GRBM_SOFT_RESET, tmp);
1543 tmp = RREG32(R_008020_GRBM_SOFT_RESET);
1544
1545 udelay(50);
1546
1547 tmp &= ~grbm_soft_reset;
1548 WREG32(R_008020_GRBM_SOFT_RESET, tmp);
1549 tmp = RREG32(R_008020_GRBM_SOFT_RESET);
1550 }
1551
1552 if (srbm_soft_reset) {
1553 tmp = RREG32(SRBM_SOFT_RESET);
1554 tmp |= srbm_soft_reset;
1555 dev_info(rdev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1556 WREG32(SRBM_SOFT_RESET, tmp);
1557 tmp = RREG32(SRBM_SOFT_RESET);
1558
1559 udelay(50);
1560
1561 tmp &= ~srbm_soft_reset;
1562 WREG32(SRBM_SOFT_RESET, tmp);
1563 tmp = RREG32(SRBM_SOFT_RESET);
1564 }
Alex Deucher71e3d152013-01-03 12:20:35 -05001565
1566 /* Wait a little for things to settle down */
1567 mdelay(1);
1568
Jerome Glissea3c19452009-10-01 18:02:13 +02001569 rv515_mc_resume(rdev, &save);
Alex Deucherd3cb7812013-01-18 13:53:37 -05001570 udelay(50);
Alex Deucher410a3412013-01-18 13:05:39 -05001571
Alex Deucherd3cb7812013-01-18 13:53:37 -05001572 r600_print_gpu_status_regs(rdev);
Alex Deucherd3cb7812013-01-18 13:53:37 -05001573}
1574
1575int r600_asic_reset(struct radeon_device *rdev)
1576{
Alex Deucherf13f7732013-01-18 18:12:22 -05001577 u32 reset_mask;
1578
1579 reset_mask = r600_gpu_check_soft_reset(rdev);
1580
1581 if (reset_mask)
1582 r600_set_bios_scratch_engine_hung(rdev, true);
1583
1584 r600_gpu_soft_reset(rdev, reset_mask);
1585
1586 reset_mask = r600_gpu_check_soft_reset(rdev);
1587
1588 if (!reset_mask)
1589 r600_set_bios_scratch_engine_hung(rdev, false);
1590
1591 return 0;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001592}
1593
Alex Deucher123bc182013-01-24 11:37:19 -05001594/**
1595 * r600_gfx_is_lockup - Check if the GFX engine is locked up
1596 *
1597 * @rdev: radeon_device pointer
1598 * @ring: radeon_ring structure holding ring information
1599 *
1600 * Check if the GFX engine is locked up.
1601 * Returns true if the engine appears to be locked up, false if not.
1602 */
1603bool r600_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
Jerome Glisse225758d2010-03-09 14:45:10 +00001604{
Alex Deucher123bc182013-01-24 11:37:19 -05001605 u32 reset_mask = r600_gpu_check_soft_reset(rdev);
Jerome Glisse225758d2010-03-09 14:45:10 +00001606
Alex Deucher123bc182013-01-24 11:37:19 -05001607 if (!(reset_mask & (RADEON_RESET_GFX |
1608 RADEON_RESET_COMPUTE |
1609 RADEON_RESET_CP))) {
Christian König069211e2012-05-02 15:11:20 +02001610 radeon_ring_lockup_update(ring);
Jerome Glisse225758d2010-03-09 14:45:10 +00001611 return false;
1612 }
1613 /* force CP activities */
Christian König7b9ef162012-05-02 15:11:23 +02001614 radeon_ring_force_activity(rdev, ring);
Christian König069211e2012-05-02 15:11:20 +02001615 return radeon_ring_test_lockup(rdev, ring);
Jerome Glisse225758d2010-03-09 14:45:10 +00001616}
1617
Alex Deucher4d756582012-09-27 15:08:35 -04001618/**
1619 * r600_dma_is_lockup - Check if the DMA engine is locked up
1620 *
1621 * @rdev: radeon_device pointer
1622 * @ring: radeon_ring structure holding ring information
1623 *
Alex Deucher123bc182013-01-24 11:37:19 -05001624 * Check if the async DMA engine is locked up.
Alex Deucher4d756582012-09-27 15:08:35 -04001625 * Returns true if the engine appears to be locked up, false if not.
1626 */
1627bool r600_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
1628{
Alex Deucher123bc182013-01-24 11:37:19 -05001629 u32 reset_mask = r600_gpu_check_soft_reset(rdev);
Alex Deucher4d756582012-09-27 15:08:35 -04001630
Alex Deucher123bc182013-01-24 11:37:19 -05001631 if (!(reset_mask & RADEON_RESET_DMA)) {
Alex Deucher4d756582012-09-27 15:08:35 -04001632 radeon_ring_lockup_update(ring);
1633 return false;
1634 }
1635 /* force ring activities */
1636 radeon_ring_force_activity(rdev, ring);
1637 return radeon_ring_test_lockup(rdev, ring);
1638}
1639
Alex Deucher416a2bd2012-05-31 19:00:25 -04001640u32 r6xx_remap_render_backend(struct radeon_device *rdev,
1641 u32 tiling_pipe_num,
1642 u32 max_rb_num,
1643 u32 total_max_rb_num,
1644 u32 disabled_rb_mask)
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001645{
Alex Deucher416a2bd2012-05-31 19:00:25 -04001646 u32 rendering_pipe_num, rb_num_width, req_rb_num;
Mikko Tiihonenf689e3a2013-01-30 14:10:04 -05001647 u32 pipe_rb_ratio, pipe_rb_remain, tmp;
Alex Deucher416a2bd2012-05-31 19:00:25 -04001648 u32 data = 0, mask = 1 << (max_rb_num - 1);
1649 unsigned i, j;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001650
Alex Deucher416a2bd2012-05-31 19:00:25 -04001651 /* mask out the RBs that don't exist on that asic */
Mikko Tiihonenf689e3a2013-01-30 14:10:04 -05001652 tmp = disabled_rb_mask | ((0xff << max_rb_num) & 0xff);
1653 /* make sure at least one RB is available */
1654 if ((tmp & 0xff) != 0xff)
1655 disabled_rb_mask = tmp;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001656
Alex Deucher416a2bd2012-05-31 19:00:25 -04001657 rendering_pipe_num = 1 << tiling_pipe_num;
1658 req_rb_num = total_max_rb_num - r600_count_pipe_bits(disabled_rb_mask);
1659 BUG_ON(rendering_pipe_num < req_rb_num);
1660
1661 pipe_rb_ratio = rendering_pipe_num / req_rb_num;
1662 pipe_rb_remain = rendering_pipe_num - pipe_rb_ratio * req_rb_num;
1663
1664 if (rdev->family <= CHIP_RV740) {
1665 /* r6xx/r7xx */
1666 rb_num_width = 2;
1667 } else {
1668 /* eg+ */
1669 rb_num_width = 4;
1670 }
1671
1672 for (i = 0; i < max_rb_num; i++) {
1673 if (!(mask & disabled_rb_mask)) {
1674 for (j = 0; j < pipe_rb_ratio; j++) {
1675 data <<= rb_num_width;
1676 data |= max_rb_num - i - 1;
1677 }
1678 if (pipe_rb_remain) {
1679 data <<= rb_num_width;
1680 data |= max_rb_num - i - 1;
1681 pipe_rb_remain--;
1682 }
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001683 }
Alex Deucher416a2bd2012-05-31 19:00:25 -04001684 mask >>= 1;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001685 }
1686
Alex Deucher416a2bd2012-05-31 19:00:25 -04001687 return data;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001688}
1689
1690int r600_count_pipe_bits(uint32_t val)
1691{
Akinobu Mitaef8cf3a2012-11-09 12:10:41 +00001692 return hweight32(val);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001693}
1694
Lauri Kasanen1109ca02012-08-31 13:43:50 -04001695static void r600_gpu_init(struct radeon_device *rdev)
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001696{
1697 u32 tiling_config;
1698 u32 ramcfg;
Alex Deucherd03f5d52010-02-19 16:22:31 -05001699 u32 cc_rb_backend_disable;
1700 u32 cc_gc_shader_pipe_config;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001701 u32 tmp;
1702 int i, j;
1703 u32 sq_config;
1704 u32 sq_gpr_resource_mgmt_1 = 0;
1705 u32 sq_gpr_resource_mgmt_2 = 0;
1706 u32 sq_thread_resource_mgmt = 0;
1707 u32 sq_stack_resource_mgmt_1 = 0;
1708 u32 sq_stack_resource_mgmt_2 = 0;
Alex Deucher416a2bd2012-05-31 19:00:25 -04001709 u32 disabled_rb_mask;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001710
Alex Deucher416a2bd2012-05-31 19:00:25 -04001711 rdev->config.r600.tiling_group_size = 256;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001712 switch (rdev->family) {
1713 case CHIP_R600:
1714 rdev->config.r600.max_pipes = 4;
1715 rdev->config.r600.max_tile_pipes = 8;
1716 rdev->config.r600.max_simds = 4;
1717 rdev->config.r600.max_backends = 4;
1718 rdev->config.r600.max_gprs = 256;
1719 rdev->config.r600.max_threads = 192;
1720 rdev->config.r600.max_stack_entries = 256;
1721 rdev->config.r600.max_hw_contexts = 8;
1722 rdev->config.r600.max_gs_threads = 16;
1723 rdev->config.r600.sx_max_export_size = 128;
1724 rdev->config.r600.sx_max_export_pos_size = 16;
1725 rdev->config.r600.sx_max_export_smx_size = 128;
1726 rdev->config.r600.sq_num_cf_insts = 2;
1727 break;
1728 case CHIP_RV630:
1729 case CHIP_RV635:
1730 rdev->config.r600.max_pipes = 2;
1731 rdev->config.r600.max_tile_pipes = 2;
1732 rdev->config.r600.max_simds = 3;
1733 rdev->config.r600.max_backends = 1;
1734 rdev->config.r600.max_gprs = 128;
1735 rdev->config.r600.max_threads = 192;
1736 rdev->config.r600.max_stack_entries = 128;
1737 rdev->config.r600.max_hw_contexts = 8;
1738 rdev->config.r600.max_gs_threads = 4;
1739 rdev->config.r600.sx_max_export_size = 128;
1740 rdev->config.r600.sx_max_export_pos_size = 16;
1741 rdev->config.r600.sx_max_export_smx_size = 128;
1742 rdev->config.r600.sq_num_cf_insts = 2;
1743 break;
1744 case CHIP_RV610:
1745 case CHIP_RV620:
1746 case CHIP_RS780:
1747 case CHIP_RS880:
1748 rdev->config.r600.max_pipes = 1;
1749 rdev->config.r600.max_tile_pipes = 1;
1750 rdev->config.r600.max_simds = 2;
1751 rdev->config.r600.max_backends = 1;
1752 rdev->config.r600.max_gprs = 128;
1753 rdev->config.r600.max_threads = 192;
1754 rdev->config.r600.max_stack_entries = 128;
1755 rdev->config.r600.max_hw_contexts = 4;
1756 rdev->config.r600.max_gs_threads = 4;
1757 rdev->config.r600.sx_max_export_size = 128;
1758 rdev->config.r600.sx_max_export_pos_size = 16;
1759 rdev->config.r600.sx_max_export_smx_size = 128;
1760 rdev->config.r600.sq_num_cf_insts = 1;
1761 break;
1762 case CHIP_RV670:
1763 rdev->config.r600.max_pipes = 4;
1764 rdev->config.r600.max_tile_pipes = 4;
1765 rdev->config.r600.max_simds = 4;
1766 rdev->config.r600.max_backends = 4;
1767 rdev->config.r600.max_gprs = 192;
1768 rdev->config.r600.max_threads = 192;
1769 rdev->config.r600.max_stack_entries = 256;
1770 rdev->config.r600.max_hw_contexts = 8;
1771 rdev->config.r600.max_gs_threads = 16;
1772 rdev->config.r600.sx_max_export_size = 128;
1773 rdev->config.r600.sx_max_export_pos_size = 16;
1774 rdev->config.r600.sx_max_export_smx_size = 128;
1775 rdev->config.r600.sq_num_cf_insts = 2;
1776 break;
1777 default:
1778 break;
1779 }
1780
1781 /* Initialize HDP */
1782 for (i = 0, j = 0; i < 32; i++, j += 0x18) {
1783 WREG32((0x2c14 + j), 0x00000000);
1784 WREG32((0x2c18 + j), 0x00000000);
1785 WREG32((0x2c1c + j), 0x00000000);
1786 WREG32((0x2c20 + j), 0x00000000);
1787 WREG32((0x2c24 + j), 0x00000000);
1788 }
1789
1790 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
1791
1792 /* Setup tiling */
1793 tiling_config = 0;
1794 ramcfg = RREG32(RAMCFG);
1795 switch (rdev->config.r600.max_tile_pipes) {
1796 case 1:
1797 tiling_config |= PIPE_TILING(0);
1798 break;
1799 case 2:
1800 tiling_config |= PIPE_TILING(1);
1801 break;
1802 case 4:
1803 tiling_config |= PIPE_TILING(2);
1804 break;
1805 case 8:
1806 tiling_config |= PIPE_TILING(3);
1807 break;
1808 default:
1809 break;
1810 }
Alex Deucherd03f5d52010-02-19 16:22:31 -05001811 rdev->config.r600.tiling_npipes = rdev->config.r600.max_tile_pipes;
Jerome Glisse961fb592010-02-10 22:30:05 +00001812 rdev->config.r600.tiling_nbanks = 4 << ((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001813 tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
Alex Deucher881fe6c2010-10-18 23:54:56 -04001814 tiling_config |= GROUP_SIZE((ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT);
Alex Deucher416a2bd2012-05-31 19:00:25 -04001815
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001816 tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
1817 if (tmp > 3) {
1818 tiling_config |= ROW_TILING(3);
1819 tiling_config |= SAMPLE_SPLIT(3);
1820 } else {
1821 tiling_config |= ROW_TILING(tmp);
1822 tiling_config |= SAMPLE_SPLIT(tmp);
1823 }
1824 tiling_config |= BANK_SWAPS(1);
Alex Deucherd03f5d52010-02-19 16:22:31 -05001825
1826 cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
Alex Deucher416a2bd2012-05-31 19:00:25 -04001827 tmp = R6XX_MAX_BACKENDS -
1828 r600_count_pipe_bits((cc_rb_backend_disable >> 16) & R6XX_MAX_BACKENDS_MASK);
1829 if (tmp < rdev->config.r600.max_backends) {
1830 rdev->config.r600.max_backends = tmp;
1831 }
Alex Deucherd03f5d52010-02-19 16:22:31 -05001832
Alex Deucher416a2bd2012-05-31 19:00:25 -04001833 cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0x00ffff00;
1834 tmp = R6XX_MAX_PIPES -
1835 r600_count_pipe_bits((cc_gc_shader_pipe_config >> 8) & R6XX_MAX_PIPES_MASK);
1836 if (tmp < rdev->config.r600.max_pipes) {
1837 rdev->config.r600.max_pipes = tmp;
1838 }
1839 tmp = R6XX_MAX_SIMDS -
1840 r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R6XX_MAX_SIMDS_MASK);
1841 if (tmp < rdev->config.r600.max_simds) {
1842 rdev->config.r600.max_simds = tmp;
1843 }
Alex Deucherd03f5d52010-02-19 16:22:31 -05001844
Alex Deucher416a2bd2012-05-31 19:00:25 -04001845 disabled_rb_mask = (RREG32(CC_RB_BACKEND_DISABLE) >> 16) & R6XX_MAX_BACKENDS_MASK;
1846 tmp = (tiling_config & PIPE_TILING__MASK) >> PIPE_TILING__SHIFT;
1847 tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.r600.max_backends,
1848 R6XX_MAX_BACKENDS, disabled_rb_mask);
1849 tiling_config |= tmp << 16;
1850 rdev->config.r600.backend_map = tmp;
1851
Alex Deuchere7aeeba2010-06-04 13:10:12 -04001852 rdev->config.r600.tile_config = tiling_config;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001853 WREG32(GB_TILING_CONFIG, tiling_config);
1854 WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff);
1855 WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff);
Alex Deucher4d756582012-09-27 15:08:35 -04001856 WREG32(DMA_TILING_CONFIG, tiling_config & 0xffff);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001857
Alex Deucherd03f5d52010-02-19 16:22:31 -05001858 tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001859 WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK);
1860 WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((tmp * 4) - 2) & VTX_REUSE_DEPTH_MASK);
1861
1862 /* Setup some CP states */
1863 WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) | ROQ_IB2_START(0x2b)));
1864 WREG32(CP_MEQ_THRESHOLDS, (MEQ_END(0x40) | ROQ_END(0x40)));
1865
1866 WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO | SYNC_GRADIENT |
1867 SYNC_WALKER | SYNC_ALIGNER));
1868 /* Setup various GPU states */
1869 if (rdev->family == CHIP_RV670)
1870 WREG32(ARB_GDEC_RD_CNTL, 0x00000021);
1871
1872 tmp = RREG32(SX_DEBUG_1);
1873 tmp |= SMX_EVENT_RELEASE;
1874 if ((rdev->family > CHIP_R600))
1875 tmp |= ENABLE_NEW_SMX_ADDRESS;
1876 WREG32(SX_DEBUG_1, tmp);
1877
1878 if (((rdev->family) == CHIP_R600) ||
1879 ((rdev->family) == CHIP_RV630) ||
1880 ((rdev->family) == CHIP_RV610) ||
1881 ((rdev->family) == CHIP_RV620) ||
Alex Deucheree59f2b2009-11-05 13:11:46 -05001882 ((rdev->family) == CHIP_RS780) ||
1883 ((rdev->family) == CHIP_RS880)) {
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001884 WREG32(DB_DEBUG, PREZ_MUST_WAIT_FOR_POSTZ_DONE);
1885 } else {
1886 WREG32(DB_DEBUG, 0);
1887 }
1888 WREG32(DB_WATERMARKS, (DEPTH_FREE(4) | DEPTH_CACHELINE_FREE(16) |
1889 DEPTH_FLUSH(16) | DEPTH_PENDING_FREE(4)));
1890
1891 WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
1892 WREG32(VGT_NUM_INSTANCES, 0);
1893
1894 WREG32(SPI_CONFIG_CNTL, GPR_WRITE_PRIORITY(0));
1895 WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(0));
1896
1897 tmp = RREG32(SQ_MS_FIFO_SIZES);
1898 if (((rdev->family) == CHIP_RV610) ||
1899 ((rdev->family) == CHIP_RV620) ||
Alex Deucheree59f2b2009-11-05 13:11:46 -05001900 ((rdev->family) == CHIP_RS780) ||
1901 ((rdev->family) == CHIP_RS880)) {
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001902 tmp = (CACHE_FIFO_SIZE(0xa) |
1903 FETCH_FIFO_HIWATER(0xa) |
1904 DONE_FIFO_HIWATER(0xe0) |
1905 ALU_UPDATE_FIFO_HIWATER(0x8));
1906 } else if (((rdev->family) == CHIP_R600) ||
1907 ((rdev->family) == CHIP_RV630)) {
1908 tmp &= ~DONE_FIFO_HIWATER(0xff);
1909 tmp |= DONE_FIFO_HIWATER(0x4);
1910 }
1911 WREG32(SQ_MS_FIFO_SIZES, tmp);
1912
1913 /* SQ_CONFIG, SQ_GPR_RESOURCE_MGMT, SQ_THREAD_RESOURCE_MGMT, SQ_STACK_RESOURCE_MGMT
1914 * should be adjusted as needed by the 2D/3D drivers. This just sets default values
1915 */
1916 sq_config = RREG32(SQ_CONFIG);
1917 sq_config &= ~(PS_PRIO(3) |
1918 VS_PRIO(3) |
1919 GS_PRIO(3) |
1920 ES_PRIO(3));
1921 sq_config |= (DX9_CONSTS |
1922 VC_ENABLE |
1923 PS_PRIO(0) |
1924 VS_PRIO(1) |
1925 GS_PRIO(2) |
1926 ES_PRIO(3));
1927
1928 if ((rdev->family) == CHIP_R600) {
1929 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(124) |
1930 NUM_VS_GPRS(124) |
1931 NUM_CLAUSE_TEMP_GPRS(4));
1932 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(0) |
1933 NUM_ES_GPRS(0));
1934 sq_thread_resource_mgmt = (NUM_PS_THREADS(136) |
1935 NUM_VS_THREADS(48) |
1936 NUM_GS_THREADS(4) |
1937 NUM_ES_THREADS(4));
1938 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(128) |
1939 NUM_VS_STACK_ENTRIES(128));
1940 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(0) |
1941 NUM_ES_STACK_ENTRIES(0));
1942 } else if (((rdev->family) == CHIP_RV610) ||
1943 ((rdev->family) == CHIP_RV620) ||
Alex Deucheree59f2b2009-11-05 13:11:46 -05001944 ((rdev->family) == CHIP_RS780) ||
1945 ((rdev->family) == CHIP_RS880)) {
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001946 /* no vertex cache */
1947 sq_config &= ~VC_ENABLE;
1948
1949 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
1950 NUM_VS_GPRS(44) |
1951 NUM_CLAUSE_TEMP_GPRS(2));
1952 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) |
1953 NUM_ES_GPRS(17));
1954 sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
1955 NUM_VS_THREADS(78) |
1956 NUM_GS_THREADS(4) |
1957 NUM_ES_THREADS(31));
1958 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) |
1959 NUM_VS_STACK_ENTRIES(40));
1960 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) |
1961 NUM_ES_STACK_ENTRIES(16));
1962 } else if (((rdev->family) == CHIP_RV630) ||
1963 ((rdev->family) == CHIP_RV635)) {
1964 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
1965 NUM_VS_GPRS(44) |
1966 NUM_CLAUSE_TEMP_GPRS(2));
1967 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(18) |
1968 NUM_ES_GPRS(18));
1969 sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
1970 NUM_VS_THREADS(78) |
1971 NUM_GS_THREADS(4) |
1972 NUM_ES_THREADS(31));
1973 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) |
1974 NUM_VS_STACK_ENTRIES(40));
1975 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) |
1976 NUM_ES_STACK_ENTRIES(16));
1977 } else if ((rdev->family) == CHIP_RV670) {
1978 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
1979 NUM_VS_GPRS(44) |
1980 NUM_CLAUSE_TEMP_GPRS(2));
1981 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) |
1982 NUM_ES_GPRS(17));
1983 sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
1984 NUM_VS_THREADS(78) |
1985 NUM_GS_THREADS(4) |
1986 NUM_ES_THREADS(31));
1987 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(64) |
1988 NUM_VS_STACK_ENTRIES(64));
1989 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(64) |
1990 NUM_ES_STACK_ENTRIES(64));
1991 }
1992
1993 WREG32(SQ_CONFIG, sq_config);
1994 WREG32(SQ_GPR_RESOURCE_MGMT_1, sq_gpr_resource_mgmt_1);
1995 WREG32(SQ_GPR_RESOURCE_MGMT_2, sq_gpr_resource_mgmt_2);
1996 WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt);
1997 WREG32(SQ_STACK_RESOURCE_MGMT_1, sq_stack_resource_mgmt_1);
1998 WREG32(SQ_STACK_RESOURCE_MGMT_2, sq_stack_resource_mgmt_2);
1999
2000 if (((rdev->family) == CHIP_RV610) ||
2001 ((rdev->family) == CHIP_RV620) ||
Alex Deucheree59f2b2009-11-05 13:11:46 -05002002 ((rdev->family) == CHIP_RS780) ||
2003 ((rdev->family) == CHIP_RS880)) {
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002004 WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(TC_ONLY));
2005 } else {
2006 WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC));
2007 }
2008
2009 /* More default values. 2D/3D driver should adjust as needed */
2010 WREG32(PA_SC_AA_SAMPLE_LOCS_2S, (S0_X(0xc) | S0_Y(0x4) |
2011 S1_X(0x4) | S1_Y(0xc)));
2012 WREG32(PA_SC_AA_SAMPLE_LOCS_4S, (S0_X(0xe) | S0_Y(0xe) |
2013 S1_X(0x2) | S1_Y(0x2) |
2014 S2_X(0xa) | S2_Y(0x6) |
2015 S3_X(0x6) | S3_Y(0xa)));
2016 WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD0, (S0_X(0xe) | S0_Y(0xb) |
2017 S1_X(0x4) | S1_Y(0xc) |
2018 S2_X(0x1) | S2_Y(0x6) |
2019 S3_X(0xa) | S3_Y(0xe)));
2020 WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD1, (S4_X(0x6) | S4_Y(0x1) |
2021 S5_X(0x0) | S5_Y(0x0) |
2022 S6_X(0xb) | S6_Y(0x4) |
2023 S7_X(0x7) | S7_Y(0x8)));
2024
2025 WREG32(VGT_STRMOUT_EN, 0);
2026 tmp = rdev->config.r600.max_pipes * 16;
2027 switch (rdev->family) {
2028 case CHIP_RV610:
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002029 case CHIP_RV620:
Alex Deucheree59f2b2009-11-05 13:11:46 -05002030 case CHIP_RS780:
2031 case CHIP_RS880:
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002032 tmp += 32;
2033 break;
2034 case CHIP_RV670:
2035 tmp += 128;
2036 break;
2037 default:
2038 break;
2039 }
2040 if (tmp > 256) {
2041 tmp = 256;
2042 }
2043 WREG32(VGT_ES_PER_GS, 128);
2044 WREG32(VGT_GS_PER_ES, tmp);
2045 WREG32(VGT_GS_PER_VS, 2);
2046 WREG32(VGT_GS_VERTEX_REUSE, 16);
2047
2048 /* more default values. 2D/3D driver should adjust as needed */
2049 WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
2050 WREG32(VGT_STRMOUT_EN, 0);
2051 WREG32(SX_MISC, 0);
2052 WREG32(PA_SC_MODE_CNTL, 0);
2053 WREG32(PA_SC_AA_CONFIG, 0);
2054 WREG32(PA_SC_LINE_STIPPLE, 0);
2055 WREG32(SPI_INPUT_Z, 0);
2056 WREG32(SPI_PS_IN_CONTROL_0, NUM_INTERP(2));
2057 WREG32(CB_COLOR7_FRAG, 0);
2058
2059 /* Clear render buffer base addresses */
2060 WREG32(CB_COLOR0_BASE, 0);
2061 WREG32(CB_COLOR1_BASE, 0);
2062 WREG32(CB_COLOR2_BASE, 0);
2063 WREG32(CB_COLOR3_BASE, 0);
2064 WREG32(CB_COLOR4_BASE, 0);
2065 WREG32(CB_COLOR5_BASE, 0);
2066 WREG32(CB_COLOR6_BASE, 0);
2067 WREG32(CB_COLOR7_BASE, 0);
2068 WREG32(CB_COLOR7_FRAG, 0);
2069
2070 switch (rdev->family) {
2071 case CHIP_RV610:
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002072 case CHIP_RV620:
Alex Deucheree59f2b2009-11-05 13:11:46 -05002073 case CHIP_RS780:
2074 case CHIP_RS880:
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002075 tmp = TC_L2_SIZE(8);
2076 break;
2077 case CHIP_RV630:
2078 case CHIP_RV635:
2079 tmp = TC_L2_SIZE(4);
2080 break;
2081 case CHIP_R600:
2082 tmp = TC_L2_SIZE(0) | L2_DISABLE_LATE_HIT;
2083 break;
2084 default:
2085 tmp = TC_L2_SIZE(0);
2086 break;
2087 }
2088 WREG32(TC_CNTL, tmp);
2089
2090 tmp = RREG32(HDP_HOST_PATH_CNTL);
2091 WREG32(HDP_HOST_PATH_CNTL, tmp);
2092
2093 tmp = RREG32(ARB_POP);
2094 tmp |= ENABLE_TC128;
2095 WREG32(ARB_POP, tmp);
2096
2097 WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
2098 WREG32(PA_CL_ENHANCE, (CLIP_VTX_REORDER_ENA |
2099 NUM_CLIP_SEQ(3)));
2100 WREG32(PA_SC_ENHANCE, FORCE_EOV_MAX_CLK_CNT(4095));
Alex Deucherb866d132012-06-14 22:06:36 +02002101 WREG32(VC_ENHANCE, 0);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002102}
2103
2104
Jerome Glisse771fe6b2009-06-05 14:42:42 +02002105/*
2106 * Indirect registers accessor
2107 */
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002108u32 r600_pciep_rreg(struct radeon_device *rdev, u32 reg)
Jerome Glisse771fe6b2009-06-05 14:42:42 +02002109{
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002110 u32 r;
Jerome Glisse771fe6b2009-06-05 14:42:42 +02002111
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002112 WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
2113 (void)RREG32(PCIE_PORT_INDEX);
2114 r = RREG32(PCIE_PORT_DATA);
Jerome Glisse771fe6b2009-06-05 14:42:42 +02002115 return r;
2116}
2117
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002118void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
Jerome Glisse771fe6b2009-06-05 14:42:42 +02002119{
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002120 WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
2121 (void)RREG32(PCIE_PORT_INDEX);
2122 WREG32(PCIE_PORT_DATA, (v));
2123 (void)RREG32(PCIE_PORT_DATA);
2124}
2125
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002126/*
2127 * CP & Ring
2128 */
2129void r600_cp_stop(struct radeon_device *rdev)
2130{
Dave Airlie53595332011-03-14 09:47:24 +10002131 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002132 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
Alex Deucher724c80e2010-08-27 18:25:25 -04002133 WREG32(SCRATCH_UMSK, 0);
Alex Deucher4d756582012-09-27 15:08:35 -04002134 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002135}
2136
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002137int r600_init_microcode(struct radeon_device *rdev)
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002138{
2139 struct platform_device *pdev;
2140 const char *chip_name;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002141 const char *rlc_chip_name;
2142 size_t pfp_req_size, me_req_size, rlc_req_size;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002143 char fw_name[30];
2144 int err;
2145
2146 DRM_DEBUG("\n");
2147
2148 pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0);
2149 err = IS_ERR(pdev);
2150 if (err) {
2151 printk(KERN_ERR "radeon_cp: Failed to register firmware\n");
2152 return -EINVAL;
2153 }
2154
2155 switch (rdev->family) {
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002156 case CHIP_R600:
2157 chip_name = "R600";
2158 rlc_chip_name = "R600";
2159 break;
2160 case CHIP_RV610:
2161 chip_name = "RV610";
2162 rlc_chip_name = "R600";
2163 break;
2164 case CHIP_RV630:
2165 chip_name = "RV630";
2166 rlc_chip_name = "R600";
2167 break;
2168 case CHIP_RV620:
2169 chip_name = "RV620";
2170 rlc_chip_name = "R600";
2171 break;
2172 case CHIP_RV635:
2173 chip_name = "RV635";
2174 rlc_chip_name = "R600";
2175 break;
2176 case CHIP_RV670:
2177 chip_name = "RV670";
2178 rlc_chip_name = "R600";
2179 break;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002180 case CHIP_RS780:
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002181 case CHIP_RS880:
2182 chip_name = "RS780";
2183 rlc_chip_name = "R600";
2184 break;
2185 case CHIP_RV770:
2186 chip_name = "RV770";
2187 rlc_chip_name = "R700";
2188 break;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002189 case CHIP_RV730:
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002190 case CHIP_RV740:
2191 chip_name = "RV730";
2192 rlc_chip_name = "R700";
2193 break;
2194 case CHIP_RV710:
2195 chip_name = "RV710";
2196 rlc_chip_name = "R700";
2197 break;
Alex Deucherfe251e22010-03-24 13:36:43 -04002198 case CHIP_CEDAR:
2199 chip_name = "CEDAR";
Alex Deucher45f9a392010-03-24 13:55:51 -04002200 rlc_chip_name = "CEDAR";
Alex Deucherfe251e22010-03-24 13:36:43 -04002201 break;
2202 case CHIP_REDWOOD:
2203 chip_name = "REDWOOD";
Alex Deucher45f9a392010-03-24 13:55:51 -04002204 rlc_chip_name = "REDWOOD";
Alex Deucherfe251e22010-03-24 13:36:43 -04002205 break;
2206 case CHIP_JUNIPER:
2207 chip_name = "JUNIPER";
Alex Deucher45f9a392010-03-24 13:55:51 -04002208 rlc_chip_name = "JUNIPER";
Alex Deucherfe251e22010-03-24 13:36:43 -04002209 break;
2210 case CHIP_CYPRESS:
2211 case CHIP_HEMLOCK:
2212 chip_name = "CYPRESS";
Alex Deucher45f9a392010-03-24 13:55:51 -04002213 rlc_chip_name = "CYPRESS";
Alex Deucherfe251e22010-03-24 13:36:43 -04002214 break;
Alex Deucher439bd6c2010-11-22 17:56:31 -05002215 case CHIP_PALM:
2216 chip_name = "PALM";
2217 rlc_chip_name = "SUMO";
2218 break;
Alex Deucherd5c5a722011-05-31 15:42:48 -04002219 case CHIP_SUMO:
2220 chip_name = "SUMO";
2221 rlc_chip_name = "SUMO";
2222 break;
2223 case CHIP_SUMO2:
2224 chip_name = "SUMO2";
2225 rlc_chip_name = "SUMO";
2226 break;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002227 default: BUG();
2228 }
2229
Alex Deucherfe251e22010-03-24 13:36:43 -04002230 if (rdev->family >= CHIP_CEDAR) {
2231 pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
2232 me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
Alex Deucher45f9a392010-03-24 13:55:51 -04002233 rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
Alex Deucherfe251e22010-03-24 13:36:43 -04002234 } else if (rdev->family >= CHIP_RV770) {
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002235 pfp_req_size = R700_PFP_UCODE_SIZE * 4;
2236 me_req_size = R700_PM4_UCODE_SIZE * 4;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002237 rlc_req_size = R700_RLC_UCODE_SIZE * 4;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002238 } else {
Alex Deucher138e4e12013-01-11 15:33:13 -05002239 pfp_req_size = R600_PFP_UCODE_SIZE * 4;
2240 me_req_size = R600_PM4_UCODE_SIZE * 12;
2241 rlc_req_size = R600_RLC_UCODE_SIZE * 4;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002242 }
2243
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002244 DRM_INFO("Loading %s Microcode\n", chip_name);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002245
2246 snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
2247 err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev);
2248 if (err)
2249 goto out;
2250 if (rdev->pfp_fw->size != pfp_req_size) {
2251 printk(KERN_ERR
2252 "r600_cp: Bogus length %zu in firmware \"%s\"\n",
2253 rdev->pfp_fw->size, fw_name);
2254 err = -EINVAL;
2255 goto out;
2256 }
2257
2258 snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
2259 err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev);
2260 if (err)
2261 goto out;
2262 if (rdev->me_fw->size != me_req_size) {
2263 printk(KERN_ERR
2264 "r600_cp: Bogus length %zu in firmware \"%s\"\n",
2265 rdev->me_fw->size, fw_name);
2266 err = -EINVAL;
2267 }
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002268
2269 snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name);
2270 err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev);
2271 if (err)
2272 goto out;
2273 if (rdev->rlc_fw->size != rlc_req_size) {
2274 printk(KERN_ERR
2275 "r600_rlc: Bogus length %zu in firmware \"%s\"\n",
2276 rdev->rlc_fw->size, fw_name);
2277 err = -EINVAL;
2278 }
2279
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002280out:
2281 platform_device_unregister(pdev);
2282
2283 if (err) {
2284 if (err != -EINVAL)
2285 printk(KERN_ERR
2286 "r600_cp: Failed to load firmware \"%s\"\n",
2287 fw_name);
2288 release_firmware(rdev->pfp_fw);
2289 rdev->pfp_fw = NULL;
2290 release_firmware(rdev->me_fw);
2291 rdev->me_fw = NULL;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002292 release_firmware(rdev->rlc_fw);
2293 rdev->rlc_fw = NULL;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002294 }
2295 return err;
2296}
2297
2298static int r600_cp_load_microcode(struct radeon_device *rdev)
2299{
2300 const __be32 *fw_data;
2301 int i;
2302
2303 if (!rdev->me_fw || !rdev->pfp_fw)
2304 return -EINVAL;
2305
2306 r600_cp_stop(rdev);
2307
Cédric Cano4eace7f2011-02-11 19:45:38 -05002308 WREG32(CP_RB_CNTL,
2309#ifdef __BIG_ENDIAN
2310 BUF_SWAP_32BIT |
2311#endif
2312 RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002313
2314 /* Reset cp */
2315 WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
2316 RREG32(GRBM_SOFT_RESET);
2317 mdelay(15);
2318 WREG32(GRBM_SOFT_RESET, 0);
2319
2320 WREG32(CP_ME_RAM_WADDR, 0);
2321
2322 fw_data = (const __be32 *)rdev->me_fw->data;
2323 WREG32(CP_ME_RAM_WADDR, 0);
Alex Deucher138e4e12013-01-11 15:33:13 -05002324 for (i = 0; i < R600_PM4_UCODE_SIZE * 3; i++)
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002325 WREG32(CP_ME_RAM_DATA,
2326 be32_to_cpup(fw_data++));
2327
2328 fw_data = (const __be32 *)rdev->pfp_fw->data;
2329 WREG32(CP_PFP_UCODE_ADDR, 0);
Alex Deucher138e4e12013-01-11 15:33:13 -05002330 for (i = 0; i < R600_PFP_UCODE_SIZE; i++)
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002331 WREG32(CP_PFP_UCODE_DATA,
2332 be32_to_cpup(fw_data++));
2333
2334 WREG32(CP_PFP_UCODE_ADDR, 0);
2335 WREG32(CP_ME_RAM_WADDR, 0);
2336 WREG32(CP_ME_RAM_RADDR, 0);
2337 return 0;
2338}
2339
2340int r600_cp_start(struct radeon_device *rdev)
2341{
Christian Könige32eb502011-10-23 12:56:27 +02002342 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002343 int r;
2344 uint32_t cp_me;
2345
Christian Könige32eb502011-10-23 12:56:27 +02002346 r = radeon_ring_lock(rdev, ring, 7);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002347 if (r) {
2348 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
2349 return r;
2350 }
Christian Könige32eb502011-10-23 12:56:27 +02002351 radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
2352 radeon_ring_write(ring, 0x1);
Alex Deucher7e7b41d2010-09-02 21:32:32 -04002353 if (rdev->family >= CHIP_RV770) {
Christian Könige32eb502011-10-23 12:56:27 +02002354 radeon_ring_write(ring, 0x0);
2355 radeon_ring_write(ring, rdev->config.rv770.max_hw_contexts - 1);
Alex Deucherfe251e22010-03-24 13:36:43 -04002356 } else {
Christian Könige32eb502011-10-23 12:56:27 +02002357 radeon_ring_write(ring, 0x3);
2358 radeon_ring_write(ring, rdev->config.r600.max_hw_contexts - 1);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002359 }
Christian Könige32eb502011-10-23 12:56:27 +02002360 radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
2361 radeon_ring_write(ring, 0);
2362 radeon_ring_write(ring, 0);
2363 radeon_ring_unlock_commit(rdev, ring);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002364
2365 cp_me = 0xff;
2366 WREG32(R_0086D8_CP_ME_CNTL, cp_me);
2367 return 0;
2368}
2369
2370int r600_cp_resume(struct radeon_device *rdev)
2371{
Christian Könige32eb502011-10-23 12:56:27 +02002372 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002373 u32 tmp;
2374 u32 rb_bufsz;
2375 int r;
2376
2377 /* Reset cp */
2378 WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
2379 RREG32(GRBM_SOFT_RESET);
2380 mdelay(15);
2381 WREG32(GRBM_SOFT_RESET, 0);
2382
2383 /* Set ring buffer size */
Christian Könige32eb502011-10-23 12:56:27 +02002384 rb_bufsz = drm_order(ring->ring_size / 8);
Alex Deucher724c80e2010-08-27 18:25:25 -04002385 tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002386#ifdef __BIG_ENDIAN
Alex Deucherd6f28932009-11-02 16:01:27 -05002387 tmp |= BUF_SWAP_32BIT;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002388#endif
Alex Deucherd6f28932009-11-02 16:01:27 -05002389 WREG32(CP_RB_CNTL, tmp);
Christian König15d33322011-09-15 19:02:22 +02002390 WREG32(CP_SEM_WAIT_TIMER, 0x0);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002391
2392 /* Set the write pointer delay */
2393 WREG32(CP_RB_WPTR_DELAY, 0);
2394
2395 /* Initialize the ring buffer's read and write pointers */
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002396 WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
2397 WREG32(CP_RB_RPTR_WR, 0);
Christian Könige32eb502011-10-23 12:56:27 +02002398 ring->wptr = 0;
2399 WREG32(CP_RB_WPTR, ring->wptr);
Alex Deucher724c80e2010-08-27 18:25:25 -04002400
2401 /* set the wb address whether it's enabled or not */
Cédric Cano4eace7f2011-02-11 19:45:38 -05002402 WREG32(CP_RB_RPTR_ADDR,
Cédric Cano4eace7f2011-02-11 19:45:38 -05002403 ((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC));
Alex Deucher724c80e2010-08-27 18:25:25 -04002404 WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
2405 WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
2406
2407 if (rdev->wb.enabled)
2408 WREG32(SCRATCH_UMSK, 0xff);
2409 else {
2410 tmp |= RB_NO_UPDATE;
2411 WREG32(SCRATCH_UMSK, 0);
2412 }
2413
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002414 mdelay(1);
2415 WREG32(CP_RB_CNTL, tmp);
2416
Christian Könige32eb502011-10-23 12:56:27 +02002417 WREG32(CP_RB_BASE, ring->gpu_addr >> 8);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002418 WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
2419
Christian Könige32eb502011-10-23 12:56:27 +02002420 ring->rptr = RREG32(CP_RB_RPTR);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002421
2422 r600_cp_start(rdev);
Christian Könige32eb502011-10-23 12:56:27 +02002423 ring->ready = true;
Alex Deucherf7128122012-02-23 17:53:45 -05002424 r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002425 if (r) {
Christian Könige32eb502011-10-23 12:56:27 +02002426 ring->ready = false;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002427 return r;
2428 }
2429 return 0;
2430}
2431
Christian Könige32eb502011-10-23 12:56:27 +02002432void r600_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size)
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002433{
2434 u32 rb_bufsz;
Christian König45df6802012-07-06 16:22:55 +02002435 int r;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002436
2437 /* Align ring size */
2438 rb_bufsz = drm_order(ring_size / 8);
2439 ring_size = (1 << (rb_bufsz + 1)) * 4;
Christian Könige32eb502011-10-23 12:56:27 +02002440 ring->ring_size = ring_size;
2441 ring->align_mask = 16 - 1;
Christian König45df6802012-07-06 16:22:55 +02002442
Alex Deucher89d35802012-07-17 14:02:31 -04002443 if (radeon_ring_supports_scratch_reg(rdev, ring)) {
2444 r = radeon_scratch_get(rdev, &ring->rptr_save_reg);
2445 if (r) {
2446 DRM_ERROR("failed to get scratch reg for rptr save (%d).\n", r);
2447 ring->rptr_save_reg = 0;
2448 }
Christian König45df6802012-07-06 16:22:55 +02002449 }
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002450}
2451
Jerome Glisse655efd32010-02-02 11:51:45 +01002452void r600_cp_fini(struct radeon_device *rdev)
2453{
Christian König45df6802012-07-06 16:22:55 +02002454 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
Jerome Glisse655efd32010-02-02 11:51:45 +01002455 r600_cp_stop(rdev);
Christian König45df6802012-07-06 16:22:55 +02002456 radeon_ring_fini(rdev, ring);
2457 radeon_scratch_free(rdev, ring->rptr_save_reg);
Jerome Glisse655efd32010-02-02 11:51:45 +01002458}
2459
Alex Deucher4d756582012-09-27 15:08:35 -04002460/*
2461 * DMA
2462 * Starting with R600, the GPU has an asynchronous
2463 * DMA engine. The programming model is very similar
2464 * to the 3D engine (ring buffer, IBs, etc.), but the
2465 * DMA controller has it's own packet format that is
2466 * different form the PM4 format used by the 3D engine.
2467 * It supports copying data, writing embedded data,
2468 * solid fills, and a number of other things. It also
2469 * has support for tiling/detiling of buffers.
2470 */
2471/**
2472 * r600_dma_stop - stop the async dma engine
2473 *
2474 * @rdev: radeon_device pointer
2475 *
2476 * Stop the async dma engine (r6xx-evergreen).
2477 */
2478void r600_dma_stop(struct radeon_device *rdev)
2479{
2480 u32 rb_cntl = RREG32(DMA_RB_CNTL);
2481
2482 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
2483
2484 rb_cntl &= ~DMA_RB_ENABLE;
2485 WREG32(DMA_RB_CNTL, rb_cntl);
2486
2487 rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false;
2488}
2489
2490/**
2491 * r600_dma_resume - setup and start the async dma engine
2492 *
2493 * @rdev: radeon_device pointer
2494 *
2495 * Set up the DMA ring buffer and enable it. (r6xx-evergreen).
2496 * Returns 0 for success, error for failure.
2497 */
2498int r600_dma_resume(struct radeon_device *rdev)
2499{
2500 struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
Michel Dänzerb3dfcb22013-01-24 19:02:01 +01002501 u32 rb_cntl, dma_cntl, ib_cntl;
Alex Deucher4d756582012-09-27 15:08:35 -04002502 u32 rb_bufsz;
2503 int r;
2504
2505 /* Reset dma */
2506 if (rdev->family >= CHIP_RV770)
2507 WREG32(SRBM_SOFT_RESET, RV770_SOFT_RESET_DMA);
2508 else
2509 WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA);
2510 RREG32(SRBM_SOFT_RESET);
2511 udelay(50);
2512 WREG32(SRBM_SOFT_RESET, 0);
2513
2514 WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL, 0);
2515 WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL, 0);
2516
2517 /* Set ring buffer size in dwords */
2518 rb_bufsz = drm_order(ring->ring_size / 4);
2519 rb_cntl = rb_bufsz << 1;
2520#ifdef __BIG_ENDIAN
2521 rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE;
2522#endif
2523 WREG32(DMA_RB_CNTL, rb_cntl);
2524
2525 /* Initialize the ring buffer's read and write pointers */
2526 WREG32(DMA_RB_RPTR, 0);
2527 WREG32(DMA_RB_WPTR, 0);
2528
2529 /* set the wb address whether it's enabled or not */
2530 WREG32(DMA_RB_RPTR_ADDR_HI,
2531 upper_32_bits(rdev->wb.gpu_addr + R600_WB_DMA_RPTR_OFFSET) & 0xFF);
2532 WREG32(DMA_RB_RPTR_ADDR_LO,
2533 ((rdev->wb.gpu_addr + R600_WB_DMA_RPTR_OFFSET) & 0xFFFFFFFC));
2534
2535 if (rdev->wb.enabled)
2536 rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE;
2537
2538 WREG32(DMA_RB_BASE, ring->gpu_addr >> 8);
2539
2540 /* enable DMA IBs */
Michel Dänzerb3dfcb22013-01-24 19:02:01 +01002541 ib_cntl = DMA_IB_ENABLE;
2542#ifdef __BIG_ENDIAN
2543 ib_cntl |= DMA_IB_SWAP_ENABLE;
2544#endif
2545 WREG32(DMA_IB_CNTL, ib_cntl);
Alex Deucher4d756582012-09-27 15:08:35 -04002546
2547 dma_cntl = RREG32(DMA_CNTL);
2548 dma_cntl &= ~CTXEMPTY_INT_ENABLE;
2549 WREG32(DMA_CNTL, dma_cntl);
2550
2551 if (rdev->family >= CHIP_RV770)
2552 WREG32(DMA_MODE, 1);
2553
2554 ring->wptr = 0;
2555 WREG32(DMA_RB_WPTR, ring->wptr << 2);
2556
2557 ring->rptr = RREG32(DMA_RB_RPTR) >> 2;
2558
2559 WREG32(DMA_RB_CNTL, rb_cntl | DMA_RB_ENABLE);
2560
2561 ring->ready = true;
2562
2563 r = radeon_ring_test(rdev, R600_RING_TYPE_DMA_INDEX, ring);
2564 if (r) {
2565 ring->ready = false;
2566 return r;
2567 }
2568
2569 radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
2570
2571 return 0;
2572}
2573
2574/**
2575 * r600_dma_fini - tear down the async dma engine
2576 *
2577 * @rdev: radeon_device pointer
2578 *
2579 * Stop the async dma engine and free the ring (r6xx-evergreen).
2580 */
2581void r600_dma_fini(struct radeon_device *rdev)
2582{
2583 r600_dma_stop(rdev);
2584 radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]);
2585}
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002586
2587/*
Christian Königf2ba57b2013-04-08 12:41:29 +02002588 * UVD
2589 */
2590int r600_uvd_rbc_start(struct radeon_device *rdev)
2591{
2592 struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
2593 uint64_t rptr_addr;
2594 uint32_t rb_bufsz, tmp;
2595 int r;
2596
2597 rptr_addr = rdev->wb.gpu_addr + R600_WB_UVD_RPTR_OFFSET;
2598
2599 if (upper_32_bits(rptr_addr) != upper_32_bits(ring->gpu_addr)) {
2600 DRM_ERROR("UVD ring and rptr not in the same 4GB segment!\n");
2601 return -EINVAL;
2602 }
2603
2604 /* force RBC into idle state */
2605 WREG32(UVD_RBC_RB_CNTL, 0x11010101);
2606
2607 /* Set the write pointer delay */
2608 WREG32(UVD_RBC_RB_WPTR_CNTL, 0);
2609
2610 /* set the wb address */
2611 WREG32(UVD_RBC_RB_RPTR_ADDR, rptr_addr >> 2);
2612
2613 /* programm the 4GB memory segment for rptr and ring buffer */
2614 WREG32(UVD_LMI_EXT40_ADDR, upper_32_bits(rptr_addr) |
2615 (0x7 << 16) | (0x1 << 31));
2616
2617 /* Initialize the ring buffer's read and write pointers */
2618 WREG32(UVD_RBC_RB_RPTR, 0x0);
2619
2620 ring->wptr = ring->rptr = RREG32(UVD_RBC_RB_RPTR);
2621 WREG32(UVD_RBC_RB_WPTR, ring->wptr);
2622
2623 /* set the ring address */
2624 WREG32(UVD_RBC_RB_BASE, ring->gpu_addr);
2625
2626 /* Set ring buffer size */
2627 rb_bufsz = drm_order(ring->ring_size);
2628 rb_bufsz = (0x1 << 8) | rb_bufsz;
2629 WREG32(UVD_RBC_RB_CNTL, rb_bufsz);
2630
2631 ring->ready = true;
2632 r = radeon_ring_test(rdev, R600_RING_TYPE_UVD_INDEX, ring);
2633 if (r) {
2634 ring->ready = false;
2635 return r;
2636 }
2637
2638 r = radeon_ring_lock(rdev, ring, 10);
2639 if (r) {
2640 DRM_ERROR("radeon: ring failed to lock UVD ring (%d).\n", r);
2641 return r;
2642 }
2643
2644 tmp = PACKET0(UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0);
2645 radeon_ring_write(ring, tmp);
2646 radeon_ring_write(ring, 0xFFFFF);
2647
2648 tmp = PACKET0(UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0);
2649 radeon_ring_write(ring, tmp);
2650 radeon_ring_write(ring, 0xFFFFF);
2651
2652 tmp = PACKET0(UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0);
2653 radeon_ring_write(ring, tmp);
2654 radeon_ring_write(ring, 0xFFFFF);
2655
2656 /* Clear timeout status bits */
2657 radeon_ring_write(ring, PACKET0(UVD_SEMA_TIMEOUT_STATUS, 0));
2658 radeon_ring_write(ring, 0x8);
2659
2660 radeon_ring_write(ring, PACKET0(UVD_SEMA_CNTL, 0));
Christian König03708b052013-04-23 11:01:31 +02002661 radeon_ring_write(ring, 3);
Christian Königf2ba57b2013-04-08 12:41:29 +02002662
2663 radeon_ring_unlock_commit(rdev, ring);
2664
2665 return 0;
2666}
2667
2668void r600_uvd_rbc_stop(struct radeon_device *rdev)
2669{
2670 struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
2671
2672 /* force RBC into idle state */
2673 WREG32(UVD_RBC_RB_CNTL, 0x11010101);
2674 ring->ready = false;
2675}
2676
2677int r600_uvd_init(struct radeon_device *rdev)
2678{
2679 int i, j, r;
Alex Deucher9b1be4d2013-06-07 10:04:54 -04002680 /* disable byte swapping */
2681 u32 lmi_swap_cntl = 0;
2682 u32 mp_swap_cntl = 0;
Christian Königf2ba57b2013-04-08 12:41:29 +02002683
Christian Königb05e9e42013-04-19 16:14:19 +02002684 /* raise clocks while booting up the VCPU */
2685 radeon_set_uvd_clocks(rdev, 53300, 40000);
2686
Christian Königf2ba57b2013-04-08 12:41:29 +02002687 /* disable clock gating */
2688 WREG32(UVD_CGC_GATE, 0);
2689
2690 /* disable interupt */
2691 WREG32_P(UVD_MASTINT_EN, 0, ~(1 << 1));
2692
2693 /* put LMI, VCPU, RBC etc... into reset */
2694 WREG32(UVD_SOFT_RESET, LMI_SOFT_RESET | VCPU_SOFT_RESET |
2695 LBSI_SOFT_RESET | RBC_SOFT_RESET | CSM_SOFT_RESET |
2696 CXW_SOFT_RESET | TAP_SOFT_RESET | LMI_UMC_SOFT_RESET);
2697 mdelay(5);
2698
2699 /* take UVD block out of reset */
2700 WREG32_P(SRBM_SOFT_RESET, 0, ~SOFT_RESET_UVD);
2701 mdelay(5);
2702
2703 /* initialize UVD memory controller */
2704 WREG32(UVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) |
2705 (1 << 21) | (1 << 9) | (1 << 20));
2706
Alex Deucher9b1be4d2013-06-07 10:04:54 -04002707#ifdef __BIG_ENDIAN
2708 /* swap (8 in 32) RB and IB */
2709 lmi_swap_cntl = 0xa;
2710 mp_swap_cntl = 0;
2711#endif
2712 WREG32(UVD_LMI_SWAP_CNTL, lmi_swap_cntl);
2713 WREG32(UVD_MP_SWAP_CNTL, mp_swap_cntl);
Christian Königf2ba57b2013-04-08 12:41:29 +02002714
2715 WREG32(UVD_MPC_SET_MUXA0, 0x40c2040);
2716 WREG32(UVD_MPC_SET_MUXA1, 0x0);
2717 WREG32(UVD_MPC_SET_MUXB0, 0x40c2040);
2718 WREG32(UVD_MPC_SET_MUXB1, 0x0);
2719 WREG32(UVD_MPC_SET_ALU, 0);
2720 WREG32(UVD_MPC_SET_MUX, 0x88);
2721
2722 /* Stall UMC */
2723 WREG32_P(UVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
2724 WREG32_P(UVD_RB_ARB_CTRL, 1 << 3, ~(1 << 3));
2725
2726 /* take all subblocks out of reset, except VCPU */
2727 WREG32(UVD_SOFT_RESET, VCPU_SOFT_RESET);
2728 mdelay(5);
2729
2730 /* enable VCPU clock */
2731 WREG32(UVD_VCPU_CNTL, 1 << 9);
2732
2733 /* enable UMC */
2734 WREG32_P(UVD_LMI_CTRL2, 0, ~(1 << 8));
2735
2736 /* boot up the VCPU */
2737 WREG32(UVD_SOFT_RESET, 0);
2738 mdelay(10);
2739
2740 WREG32_P(UVD_RB_ARB_CTRL, 0, ~(1 << 3));
2741
2742 for (i = 0; i < 10; ++i) {
2743 uint32_t status;
2744 for (j = 0; j < 100; ++j) {
2745 status = RREG32(UVD_STATUS);
2746 if (status & 2)
2747 break;
2748 mdelay(10);
2749 }
2750 r = 0;
2751 if (status & 2)
2752 break;
2753
2754 DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n");
2755 WREG32_P(UVD_SOFT_RESET, VCPU_SOFT_RESET, ~VCPU_SOFT_RESET);
2756 mdelay(10);
2757 WREG32_P(UVD_SOFT_RESET, 0, ~VCPU_SOFT_RESET);
2758 mdelay(10);
2759 r = -1;
2760 }
Christian Königb05e9e42013-04-19 16:14:19 +02002761
Christian Königf2ba57b2013-04-08 12:41:29 +02002762 if (r) {
2763 DRM_ERROR("UVD not responding, giving up!!!\n");
Christian Königb05e9e42013-04-19 16:14:19 +02002764 radeon_set_uvd_clocks(rdev, 0, 0);
Christian Königf2ba57b2013-04-08 12:41:29 +02002765 return r;
2766 }
Christian Königb05e9e42013-04-19 16:14:19 +02002767
Christian Königf2ba57b2013-04-08 12:41:29 +02002768 /* enable interupt */
2769 WREG32_P(UVD_MASTINT_EN, 3<<1, ~(3 << 1));
2770
2771 r = r600_uvd_rbc_start(rdev);
Christian Königb05e9e42013-04-19 16:14:19 +02002772 if (!r)
2773 DRM_INFO("UVD initialized successfully.\n");
Christian Königf2ba57b2013-04-08 12:41:29 +02002774
Christian Königb05e9e42013-04-19 16:14:19 +02002775 /* lower clocks again */
2776 radeon_set_uvd_clocks(rdev, 0, 0);
2777
2778 return r;
Christian Königf2ba57b2013-04-08 12:41:29 +02002779}
2780
2781/*
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002782 * GPU scratch registers helpers function.
2783 */
2784void r600_scratch_init(struct radeon_device *rdev)
2785{
2786 int i;
2787
2788 rdev->scratch.num_reg = 7;
Alex Deucher724c80e2010-08-27 18:25:25 -04002789 rdev->scratch.reg_base = SCRATCH_REG0;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002790 for (i = 0; i < rdev->scratch.num_reg; i++) {
2791 rdev->scratch.free[i] = true;
Alex Deucher724c80e2010-08-27 18:25:25 -04002792 rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002793 }
2794}
2795
Christian Könige32eb502011-10-23 12:56:27 +02002796int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002797{
2798 uint32_t scratch;
2799 uint32_t tmp = 0;
Alex Deucher8b25ed32012-07-17 14:02:30 -04002800 unsigned i;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002801 int r;
2802
2803 r = radeon_scratch_get(rdev, &scratch);
2804 if (r) {
2805 DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r);
2806 return r;
2807 }
2808 WREG32(scratch, 0xCAFEDEAD);
Christian Könige32eb502011-10-23 12:56:27 +02002809 r = radeon_ring_lock(rdev, ring, 3);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002810 if (r) {
Alex Deucher8b25ed32012-07-17 14:02:30 -04002811 DRM_ERROR("radeon: cp failed to lock ring %d (%d).\n", ring->idx, r);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002812 radeon_scratch_free(rdev, scratch);
2813 return r;
2814 }
Christian Könige32eb502011-10-23 12:56:27 +02002815 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2816 radeon_ring_write(ring, ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
2817 radeon_ring_write(ring, 0xDEADBEEF);
2818 radeon_ring_unlock_commit(rdev, ring);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002819 for (i = 0; i < rdev->usec_timeout; i++) {
2820 tmp = RREG32(scratch);
2821 if (tmp == 0xDEADBEEF)
2822 break;
2823 DRM_UDELAY(1);
2824 }
2825 if (i < rdev->usec_timeout) {
Alex Deucher8b25ed32012-07-17 14:02:30 -04002826 DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002827 } else {
Christian Königbf852792011-10-13 13:19:22 +02002828 DRM_ERROR("radeon: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
Alex Deucher8b25ed32012-07-17 14:02:30 -04002829 ring->idx, scratch, tmp);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002830 r = -EINVAL;
2831 }
2832 radeon_scratch_free(rdev, scratch);
2833 return r;
2834}
2835
Alex Deucher4d756582012-09-27 15:08:35 -04002836/**
2837 * r600_dma_ring_test - simple async dma engine test
2838 *
2839 * @rdev: radeon_device pointer
2840 * @ring: radeon_ring structure holding ring information
2841 *
2842 * Test the DMA engine by writing using it to write an
2843 * value to memory. (r6xx-SI).
2844 * Returns 0 for success, error for failure.
2845 */
2846int r600_dma_ring_test(struct radeon_device *rdev,
2847 struct radeon_ring *ring)
2848{
2849 unsigned i;
2850 int r;
2851 void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
2852 u32 tmp;
2853
2854 if (!ptr) {
2855 DRM_ERROR("invalid vram scratch pointer\n");
2856 return -EINVAL;
2857 }
2858
2859 tmp = 0xCAFEDEAD;
2860 writel(tmp, ptr);
2861
2862 r = radeon_ring_lock(rdev, ring, 4);
2863 if (r) {
2864 DRM_ERROR("radeon: dma failed to lock ring %d (%d).\n", ring->idx, r);
2865 return r;
2866 }
2867 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
2868 radeon_ring_write(ring, rdev->vram_scratch.gpu_addr & 0xfffffffc);
2869 radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff);
2870 radeon_ring_write(ring, 0xDEADBEEF);
2871 radeon_ring_unlock_commit(rdev, ring);
2872
2873 for (i = 0; i < rdev->usec_timeout; i++) {
2874 tmp = readl(ptr);
2875 if (tmp == 0xDEADBEEF)
2876 break;
2877 DRM_UDELAY(1);
2878 }
2879
2880 if (i < rdev->usec_timeout) {
2881 DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
2882 } else {
2883 DRM_ERROR("radeon: ring %d test failed (0x%08X)\n",
2884 ring->idx, tmp);
2885 r = -EINVAL;
2886 }
2887 return r;
2888}
2889
Christian Königf2ba57b2013-04-08 12:41:29 +02002890int r600_uvd_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
2891{
2892 uint32_t tmp = 0;
2893 unsigned i;
2894 int r;
2895
2896 WREG32(UVD_CONTEXT_ID, 0xCAFEDEAD);
2897 r = radeon_ring_lock(rdev, ring, 3);
2898 if (r) {
2899 DRM_ERROR("radeon: cp failed to lock ring %d (%d).\n",
2900 ring->idx, r);
2901 return r;
2902 }
2903 radeon_ring_write(ring, PACKET0(UVD_CONTEXT_ID, 0));
2904 radeon_ring_write(ring, 0xDEADBEEF);
2905 radeon_ring_unlock_commit(rdev, ring);
2906 for (i = 0; i < rdev->usec_timeout; i++) {
2907 tmp = RREG32(UVD_CONTEXT_ID);
2908 if (tmp == 0xDEADBEEF)
2909 break;
2910 DRM_UDELAY(1);
2911 }
2912
2913 if (i < rdev->usec_timeout) {
2914 DRM_INFO("ring test on %d succeeded in %d usecs\n",
2915 ring->idx, i);
2916 } else {
2917 DRM_ERROR("radeon: ring %d test failed (0x%08X)\n",
2918 ring->idx, tmp);
2919 r = -EINVAL;
2920 }
2921 return r;
2922}
2923
Alex Deucher4d756582012-09-27 15:08:35 -04002924/*
2925 * CP fences/semaphores
2926 */
2927
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002928void r600_fence_ring_emit(struct radeon_device *rdev,
2929 struct radeon_fence *fence)
2930{
Christian Könige32eb502011-10-23 12:56:27 +02002931 struct radeon_ring *ring = &rdev->ring[fence->ring];
Christian König7b1f2482011-09-23 15:11:23 +02002932
Alex Deucherd0f8a852010-09-04 05:04:34 -04002933 if (rdev->wb.use_event) {
Jerome Glisse30eb77f2011-11-20 20:45:34 +00002934 u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
Jerome Glisse77b1bad2011-10-26 11:41:22 -04002935 /* flush read cache over gart */
Christian Könige32eb502011-10-23 12:56:27 +02002936 radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
2937 radeon_ring_write(ring, PACKET3_TC_ACTION_ENA |
2938 PACKET3_VC_ACTION_ENA |
2939 PACKET3_SH_ACTION_ENA);
2940 radeon_ring_write(ring, 0xFFFFFFFF);
2941 radeon_ring_write(ring, 0);
2942 radeon_ring_write(ring, 10); /* poll interval */
Alex Deucherd0f8a852010-09-04 05:04:34 -04002943 /* EVENT_WRITE_EOP - flush caches, send int */
Christian Könige32eb502011-10-23 12:56:27 +02002944 radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
2945 radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5));
2946 radeon_ring_write(ring, addr & 0xffffffff);
2947 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
2948 radeon_ring_write(ring, fence->seq);
2949 radeon_ring_write(ring, 0);
Alex Deucherd0f8a852010-09-04 05:04:34 -04002950 } else {
Jerome Glisse77b1bad2011-10-26 11:41:22 -04002951 /* flush read cache over gart */
Christian Könige32eb502011-10-23 12:56:27 +02002952 radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
2953 radeon_ring_write(ring, PACKET3_TC_ACTION_ENA |
2954 PACKET3_VC_ACTION_ENA |
2955 PACKET3_SH_ACTION_ENA);
2956 radeon_ring_write(ring, 0xFFFFFFFF);
2957 radeon_ring_write(ring, 0);
2958 radeon_ring_write(ring, 10); /* poll interval */
2959 radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE, 0));
2960 radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT) | EVENT_INDEX(0));
Alex Deucherd0f8a852010-09-04 05:04:34 -04002961 /* wait for 3D idle clean */
Christian Könige32eb502011-10-23 12:56:27 +02002962 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2963 radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
2964 radeon_ring_write(ring, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit);
Alex Deucherd0f8a852010-09-04 05:04:34 -04002965 /* Emit fence sequence & fire IRQ */
Christian Könige32eb502011-10-23 12:56:27 +02002966 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2967 radeon_ring_write(ring, ((rdev->fence_drv[fence->ring].scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
2968 radeon_ring_write(ring, fence->seq);
Alex Deucherd0f8a852010-09-04 05:04:34 -04002969 /* CP_INTERRUPT packet 3 no longer exists, use packet 0 */
Christian Könige32eb502011-10-23 12:56:27 +02002970 radeon_ring_write(ring, PACKET0(CP_INT_STATUS, 0));
2971 radeon_ring_write(ring, RB_INT_STAT);
Alex Deucherd0f8a852010-09-04 05:04:34 -04002972 }
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002973}
2974
Christian Königf2ba57b2013-04-08 12:41:29 +02002975void r600_uvd_fence_emit(struct radeon_device *rdev,
2976 struct radeon_fence *fence)
2977{
2978 struct radeon_ring *ring = &rdev->ring[fence->ring];
2979 uint32_t addr = rdev->fence_drv[fence->ring].gpu_addr;
2980
2981 radeon_ring_write(ring, PACKET0(UVD_CONTEXT_ID, 0));
2982 radeon_ring_write(ring, fence->seq);
2983 radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA0, 0));
2984 radeon_ring_write(ring, addr & 0xffffffff);
2985 radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA1, 0));
2986 radeon_ring_write(ring, upper_32_bits(addr) & 0xff);
2987 radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_CMD, 0));
2988 radeon_ring_write(ring, 0);
2989
2990 radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA0, 0));
2991 radeon_ring_write(ring, 0);
2992 radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA1, 0));
2993 radeon_ring_write(ring, 0);
2994 radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_CMD, 0));
2995 radeon_ring_write(ring, 2);
2996 return;
2997}
2998
Christian König15d33322011-09-15 19:02:22 +02002999void r600_semaphore_ring_emit(struct radeon_device *rdev,
Christian Könige32eb502011-10-23 12:56:27 +02003000 struct radeon_ring *ring,
Christian König15d33322011-09-15 19:02:22 +02003001 struct radeon_semaphore *semaphore,
Christian König7b1f2482011-09-23 15:11:23 +02003002 bool emit_wait)
Christian König15d33322011-09-15 19:02:22 +02003003{
3004 uint64_t addr = semaphore->gpu_addr;
3005 unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL;
3006
Christian König0be70432012-03-07 11:28:57 +01003007 if (rdev->family < CHIP_CAYMAN)
3008 sel |= PACKET3_SEM_WAIT_ON_SIGNAL;
3009
Christian Könige32eb502011-10-23 12:56:27 +02003010 radeon_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1));
3011 radeon_ring_write(ring, addr & 0xffffffff);
3012 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel);
Christian König15d33322011-09-15 19:02:22 +02003013}
3014
Alex Deucher4d756582012-09-27 15:08:35 -04003015/*
3016 * DMA fences/semaphores
3017 */
3018
3019/**
3020 * r600_dma_fence_ring_emit - emit a fence on the DMA ring
3021 *
3022 * @rdev: radeon_device pointer
3023 * @fence: radeon fence object
3024 *
3025 * Add a DMA fence packet to the ring to write
3026 * the fence seq number and DMA trap packet to generate
3027 * an interrupt if needed (r6xx-r7xx).
3028 */
3029void r600_dma_fence_ring_emit(struct radeon_device *rdev,
3030 struct radeon_fence *fence)
3031{
3032 struct radeon_ring *ring = &rdev->ring[fence->ring];
3033 u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
Jerome Glisse86a18812012-12-12 16:43:15 -05003034
Alex Deucher4d756582012-09-27 15:08:35 -04003035 /* write the fence */
3036 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0));
3037 radeon_ring_write(ring, addr & 0xfffffffc);
3038 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff));
Jerome Glisse86a18812012-12-12 16:43:15 -05003039 radeon_ring_write(ring, lower_32_bits(fence->seq));
Alex Deucher4d756582012-09-27 15:08:35 -04003040 /* generate an interrupt */
3041 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0, 0));
3042}
3043
3044/**
3045 * r600_dma_semaphore_ring_emit - emit a semaphore on the dma ring
3046 *
3047 * @rdev: radeon_device pointer
3048 * @ring: radeon_ring structure holding ring information
3049 * @semaphore: radeon semaphore object
3050 * @emit_wait: wait or signal semaphore
3051 *
3052 * Add a DMA semaphore packet to the ring wait on or signal
3053 * other rings (r6xx-SI).
3054 */
3055void r600_dma_semaphore_ring_emit(struct radeon_device *rdev,
3056 struct radeon_ring *ring,
3057 struct radeon_semaphore *semaphore,
3058 bool emit_wait)
3059{
3060 u64 addr = semaphore->gpu_addr;
3061 u32 s = emit_wait ? 0 : 1;
3062
3063 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SEMAPHORE, 0, s, 0));
3064 radeon_ring_write(ring, addr & 0xfffffffc);
3065 radeon_ring_write(ring, upper_32_bits(addr) & 0xff);
3066}
3067
Christian Königf2ba57b2013-04-08 12:41:29 +02003068void r600_uvd_semaphore_emit(struct radeon_device *rdev,
3069 struct radeon_ring *ring,
3070 struct radeon_semaphore *semaphore,
3071 bool emit_wait)
3072{
3073 uint64_t addr = semaphore->gpu_addr;
3074
3075 radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_LOW, 0));
3076 radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF);
3077
3078 radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_HIGH, 0));
3079 radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF);
3080
3081 radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0));
3082 radeon_ring_write(ring, emit_wait ? 1 : 0);
3083}
3084
Jerome Glisse3ce0a232009-09-08 10:10:24 +10003085int r600_copy_blit(struct radeon_device *rdev,
Alex Deucher003cefe2011-09-16 12:04:08 -04003086 uint64_t src_offset,
3087 uint64_t dst_offset,
3088 unsigned num_gpu_pages,
Christian König876dc9f2012-05-08 14:24:01 +02003089 struct radeon_fence **fence)
Jerome Glisse3ce0a232009-09-08 10:10:24 +10003090{
Christian König220907d2012-05-10 16:46:43 +02003091 struct radeon_semaphore *sem = NULL;
Christian Königf2377502012-05-09 15:35:01 +02003092 struct radeon_sa_bo *vb = NULL;
Jerome Glisseff82f052010-01-22 15:19:00 +01003093 int r;
3094
Christian König220907d2012-05-10 16:46:43 +02003095 r = r600_blit_prepare_copy(rdev, num_gpu_pages, fence, &vb, &sem);
Jerome Glisseff82f052010-01-22 15:19:00 +01003096 if (r) {
Jerome Glisseff82f052010-01-22 15:19:00 +01003097 return r;
3098 }
Christian Königf2377502012-05-09 15:35:01 +02003099 r600_kms_blit_copy(rdev, src_offset, dst_offset, num_gpu_pages, vb);
Christian König220907d2012-05-10 16:46:43 +02003100 r600_blit_done_copy(rdev, fence, vb, sem);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10003101 return 0;
3102}
3103
Alex Deucher4d756582012-09-27 15:08:35 -04003104/**
3105 * r600_copy_dma - copy pages using the DMA engine
3106 *
3107 * @rdev: radeon_device pointer
3108 * @src_offset: src GPU address
3109 * @dst_offset: dst GPU address
3110 * @num_gpu_pages: number of GPU pages to xfer
3111 * @fence: radeon fence object
3112 *
Alex Deucher43fb7782013-01-04 09:24:18 -05003113 * Copy GPU paging using the DMA engine (r6xx).
Alex Deucher4d756582012-09-27 15:08:35 -04003114 * Used by the radeon ttm implementation to move pages if
3115 * registered as the asic copy callback.
3116 */
3117int r600_copy_dma(struct radeon_device *rdev,
3118 uint64_t src_offset, uint64_t dst_offset,
3119 unsigned num_gpu_pages,
3120 struct radeon_fence **fence)
3121{
3122 struct radeon_semaphore *sem = NULL;
3123 int ring_index = rdev->asic->copy.dma_ring_index;
3124 struct radeon_ring *ring = &rdev->ring[ring_index];
3125 u32 size_in_dw, cur_size_in_dw;
3126 int i, num_loops;
3127 int r = 0;
3128
3129 r = radeon_semaphore_create(rdev, &sem);
3130 if (r) {
3131 DRM_ERROR("radeon: moving bo (%d).\n", r);
3132 return r;
3133 }
3134
3135 size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
Alex Deucher43fb7782013-01-04 09:24:18 -05003136 num_loops = DIV_ROUND_UP(size_in_dw, 0xFFFE);
3137 r = radeon_ring_lock(rdev, ring, num_loops * 4 + 8);
Alex Deucher4d756582012-09-27 15:08:35 -04003138 if (r) {
3139 DRM_ERROR("radeon: moving bo (%d).\n", r);
3140 radeon_semaphore_free(rdev, &sem, NULL);
3141 return r;
3142 }
3143
3144 if (radeon_fence_need_sync(*fence, ring->idx)) {
3145 radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
3146 ring->idx);
3147 radeon_fence_note_sync(*fence, ring->idx);
3148 } else {
3149 radeon_semaphore_free(rdev, &sem, NULL);
3150 }
3151
3152 for (i = 0; i < num_loops; i++) {
3153 cur_size_in_dw = size_in_dw;
Alex Deucher909d9eb2013-01-02 18:30:21 -05003154 if (cur_size_in_dw > 0xFFFE)
3155 cur_size_in_dw = 0xFFFE;
Alex Deucher4d756582012-09-27 15:08:35 -04003156 size_in_dw -= cur_size_in_dw;
3157 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw));
3158 radeon_ring_write(ring, dst_offset & 0xfffffffc);
3159 radeon_ring_write(ring, src_offset & 0xfffffffc);
Alex Deucher43fb7782013-01-04 09:24:18 -05003160 radeon_ring_write(ring, (((upper_32_bits(dst_offset) & 0xff) << 16) |
3161 (upper_32_bits(src_offset) & 0xff)));
Alex Deucher4d756582012-09-27 15:08:35 -04003162 src_offset += cur_size_in_dw * 4;
3163 dst_offset += cur_size_in_dw * 4;
3164 }
3165
3166 r = radeon_fence_emit(rdev, fence, ring->idx);
3167 if (r) {
3168 radeon_ring_unlock_undo(rdev, ring);
3169 return r;
3170 }
3171
3172 radeon_ring_unlock_commit(rdev, ring);
3173 radeon_semaphore_free(rdev, &sem, *fence);
3174
3175 return r;
3176}
3177
Jerome Glisse3ce0a232009-09-08 10:10:24 +10003178int r600_set_surface_reg(struct radeon_device *rdev, int reg,
3179 uint32_t tiling_flags, uint32_t pitch,
3180 uint32_t offset, uint32_t obj_size)
3181{
3182 /* FIXME: implement */
3183 return 0;
3184}
3185
3186void r600_clear_surface_reg(struct radeon_device *rdev, int reg)
3187{
3188 /* FIXME: implement */
3189}
3190
Lauri Kasanen1109ca02012-08-31 13:43:50 -04003191static int r600_startup(struct radeon_device *rdev)
Jerome Glisse3ce0a232009-09-08 10:10:24 +10003192{
Alex Deucher4d756582012-09-27 15:08:35 -04003193 struct radeon_ring *ring;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10003194 int r;
3195
Alex Deucher9e46a482011-01-06 18:49:35 -05003196 /* enable pcie gen2 link */
3197 r600_pcie_gen2_enable(rdev);
3198
Alex Deucher779720a2009-12-09 19:31:44 -05003199 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
3200 r = r600_init_microcode(rdev);
3201 if (r) {
3202 DRM_ERROR("Failed to load firmware!\n");
3203 return r;
3204 }
3205 }
3206
Alex Deucher16cdf042011-10-28 10:30:02 -04003207 r = r600_vram_scratch_init(rdev);
3208 if (r)
3209 return r;
3210
Jerome Glissea3c19452009-10-01 18:02:13 +02003211 r600_mc_program(rdev);
Jerome Glisse1a029b72009-10-06 19:04:30 +02003212 if (rdev->flags & RADEON_IS_AGP) {
3213 r600_agp_enable(rdev);
3214 } else {
3215 r = r600_pcie_gart_enable(rdev);
3216 if (r)
3217 return r;
3218 }
Jerome Glisse3ce0a232009-09-08 10:10:24 +10003219 r600_gpu_init(rdev);
Jerome Glissec38c7b62010-02-04 17:27:27 +01003220 r = r600_blit_init(rdev);
3221 if (r) {
3222 r600_blit_fini(rdev);
Alex Deucher27cd7762012-02-23 17:53:42 -05003223 rdev->asic->copy.copy = NULL;
Jerome Glissec38c7b62010-02-04 17:27:27 +01003224 dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
3225 }
Alex Deucherb70d6bb2010-08-06 21:36:58 -04003226
Alex Deucher724c80e2010-08-27 18:25:25 -04003227 /* allocate wb buffer */
3228 r = radeon_wb_init(rdev);
3229 if (r)
3230 return r;
3231
Jerome Glisse30eb77f2011-11-20 20:45:34 +00003232 r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
3233 if (r) {
3234 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
3235 return r;
3236 }
3237
Alex Deucher4d756582012-09-27 15:08:35 -04003238 r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
3239 if (r) {
3240 dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
3241 return r;
3242 }
3243
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003244 /* Enable IRQ */
Adis Hamziće49f3952013-06-02 16:47:54 +02003245 if (!rdev->irq.installed) {
3246 r = radeon_irq_kms_init(rdev);
3247 if (r)
3248 return r;
3249 }
3250
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003251 r = r600_irq_init(rdev);
3252 if (r) {
3253 DRM_ERROR("radeon: IH init failed (%d).\n", r);
3254 radeon_irq_kms_fini(rdev);
3255 return r;
3256 }
3257 r600_irq_set(rdev);
3258
Alex Deucher4d756582012-09-27 15:08:35 -04003259 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
Christian Könige32eb502011-10-23 12:56:27 +02003260 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
Alex Deucher78c55602011-11-17 14:25:56 -05003261 R600_CP_RB_RPTR, R600_CP_RB_WPTR,
3262 0, 0xfffff, RADEON_CP_PACKET2);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10003263 if (r)
3264 return r;
Alex Deucher4d756582012-09-27 15:08:35 -04003265
3266 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
3267 r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
3268 DMA_RB_RPTR, DMA_RB_WPTR,
3269 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
3270 if (r)
3271 return r;
3272
Jerome Glisse3ce0a232009-09-08 10:10:24 +10003273 r = r600_cp_load_microcode(rdev);
3274 if (r)
3275 return r;
3276 r = r600_cp_resume(rdev);
3277 if (r)
3278 return r;
Alex Deucher724c80e2010-08-27 18:25:25 -04003279
Alex Deucher4d756582012-09-27 15:08:35 -04003280 r = r600_dma_resume(rdev);
3281 if (r)
3282 return r;
3283
Christian König2898c342012-07-05 11:55:34 +02003284 r = radeon_ib_pool_init(rdev);
3285 if (r) {
3286 dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
Jerome Glisseb15ba512011-11-15 11:48:34 -05003287 return r;
Christian König2898c342012-07-05 11:55:34 +02003288 }
Jerome Glisseb15ba512011-11-15 11:48:34 -05003289
Alex Deucherd4e30ef2012-06-04 17:18:51 -04003290 r = r600_audio_init(rdev);
3291 if (r) {
3292 DRM_ERROR("radeon: audio init failed\n");
3293 return r;
3294 }
3295
Jerome Glisse3ce0a232009-09-08 10:10:24 +10003296 return 0;
3297}
3298
Dave Airlie28d52042009-09-21 14:33:58 +10003299void r600_vga_set_state(struct radeon_device *rdev, bool state)
3300{
3301 uint32_t temp;
3302
3303 temp = RREG32(CONFIG_CNTL);
3304 if (state == false) {
3305 temp &= ~(1<<0);
3306 temp |= (1<<1);
3307 } else {
3308 temp &= ~(1<<1);
3309 }
3310 WREG32(CONFIG_CNTL, temp);
3311}
3312
Dave Airliefc30b8e2009-09-18 15:19:37 +10003313int r600_resume(struct radeon_device *rdev)
3314{
3315 int r;
3316
Jerome Glisse1a029b72009-10-06 19:04:30 +02003317 /* Do not reset GPU before posting, on r600 hw unlike on r500 hw,
3318 * posting will perform necessary task to bring back GPU into good
3319 * shape.
3320 */
Dave Airliefc30b8e2009-09-18 15:19:37 +10003321 /* post card */
Jerome Glissee7d40b92009-10-01 18:02:15 +02003322 atom_asic_init(rdev->mode_info.atom_context);
Dave Airliefc30b8e2009-09-18 15:19:37 +10003323
Jerome Glisseb15ba512011-11-15 11:48:34 -05003324 rdev->accel_working = true;
Dave Airliefc30b8e2009-09-18 15:19:37 +10003325 r = r600_startup(rdev);
3326 if (r) {
3327 DRM_ERROR("r600 startup failed on resume\n");
Jerome Glisse6b7746e2012-02-20 17:57:20 -05003328 rdev->accel_working = false;
Dave Airliefc30b8e2009-09-18 15:19:37 +10003329 return r;
3330 }
3331
Dave Airliefc30b8e2009-09-18 15:19:37 +10003332 return r;
3333}
3334
Jerome Glisse3ce0a232009-09-08 10:10:24 +10003335int r600_suspend(struct radeon_device *rdev)
3336{
Rafał Miłecki38fd2c62010-01-28 18:16:30 +01003337 r600_audio_fini(rdev);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10003338 r600_cp_stop(rdev);
Alex Deucher4d756582012-09-27 15:08:35 -04003339 r600_dma_stop(rdev);
Jerome Glisse0c452492010-01-15 14:44:37 +01003340 r600_irq_suspend(rdev);
Alex Deucher724c80e2010-08-27 18:25:25 -04003341 radeon_wb_disable(rdev);
Jerome Glisse4aac0472009-09-14 18:29:49 +02003342 r600_pcie_gart_disable(rdev);
Alex Deucher6ddddfe2011-10-14 10:51:22 -04003343
Jerome Glisse3ce0a232009-09-08 10:10:24 +10003344 return 0;
3345}
3346
3347/* Plan is to move initialization in that function and use
3348 * helper function so that radeon_device_init pretty much
3349 * do nothing more than calling asic specific function. This
3350 * should also allow to remove a bunch of callback function
3351 * like vram_info.
3352 */
3353int r600_init(struct radeon_device *rdev)
3354{
3355 int r;
3356
Jerome Glisse3ce0a232009-09-08 10:10:24 +10003357 if (r600_debugfs_mc_info_init(rdev)) {
3358 DRM_ERROR("Failed to register debugfs file for mc !\n");
3359 }
Jerome Glisse3ce0a232009-09-08 10:10:24 +10003360 /* Read BIOS */
3361 if (!radeon_get_bios(rdev)) {
3362 if (ASIC_IS_AVIVO(rdev))
3363 return -EINVAL;
3364 }
3365 /* Must be an ATOMBIOS */
Jerome Glissee7d40b92009-10-01 18:02:15 +02003366 if (!rdev->is_atom_bios) {
3367 dev_err(rdev->dev, "Expecting atombios for R600 GPU\n");
Jerome Glisse3ce0a232009-09-08 10:10:24 +10003368 return -EINVAL;
Jerome Glissee7d40b92009-10-01 18:02:15 +02003369 }
Jerome Glisse3ce0a232009-09-08 10:10:24 +10003370 r = radeon_atombios_init(rdev);
3371 if (r)
3372 return r;
3373 /* Post card if necessary */
Alex Deucherfd909c32011-01-11 18:08:59 -05003374 if (!radeon_card_posted(rdev)) {
Dave Airlie72542d72009-12-01 14:06:31 +10003375 if (!rdev->bios) {
3376 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
3377 return -EINVAL;
3378 }
Jerome Glisse3ce0a232009-09-08 10:10:24 +10003379 DRM_INFO("GPU not posted. posting now...\n");
3380 atom_asic_init(rdev->mode_info.atom_context);
3381 }
3382 /* Initialize scratch registers */
3383 r600_scratch_init(rdev);
3384 /* Initialize surface registers */
3385 radeon_surface_init(rdev);
Rafał Miłecki74338742009-11-03 00:53:02 +01003386 /* Initialize clocks */
Michel Dänzer5e6dde72009-09-17 09:42:28 +02003387 radeon_get_clock_info(rdev->ddev);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10003388 /* Fence driver */
Jerome Glisse30eb77f2011-11-20 20:45:34 +00003389 r = radeon_fence_driver_init(rdev);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10003390 if (r)
3391 return r;
Jerome Glisse700a0cc2010-01-13 15:16:38 +01003392 if (rdev->flags & RADEON_IS_AGP) {
3393 r = radeon_agp_init(rdev);
3394 if (r)
3395 radeon_agp_disable(rdev);
3396 }
Jerome Glisse3ce0a232009-09-08 10:10:24 +10003397 r = r600_mc_init(rdev);
Jerome Glisseb574f252009-10-06 19:04:29 +02003398 if (r)
Jerome Glisse3ce0a232009-09-08 10:10:24 +10003399 return r;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10003400 /* Memory manager */
Jerome Glisse4c788672009-11-20 14:29:23 +01003401 r = radeon_bo_init(rdev);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10003402 if (r)
3403 return r;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003404
Christian Könige32eb502011-10-23 12:56:27 +02003405 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
3406 r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10003407
Alex Deucher4d756582012-09-27 15:08:35 -04003408 rdev->ring[R600_RING_TYPE_DMA_INDEX].ring_obj = NULL;
3409 r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX], 64 * 1024);
3410
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003411 rdev->ih.ring_obj = NULL;
3412 r600_ih_ring_init(rdev, 64 * 1024);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10003413
Jerome Glisse4aac0472009-09-14 18:29:49 +02003414 r = r600_pcie_gart_init(rdev);
3415 if (r)
3416 return r;
3417
Alex Deucher779720a2009-12-09 19:31:44 -05003418 rdev->accel_working = true;
Dave Airliefc30b8e2009-09-18 15:19:37 +10003419 r = r600_startup(rdev);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10003420 if (r) {
Jerome Glisse655efd32010-02-02 11:51:45 +01003421 dev_err(rdev->dev, "disabling GPU acceleration\n");
3422 r600_cp_fini(rdev);
Alex Deucher4d756582012-09-27 15:08:35 -04003423 r600_dma_fini(rdev);
Jerome Glisse655efd32010-02-02 11:51:45 +01003424 r600_irq_fini(rdev);
Alex Deucher724c80e2010-08-27 18:25:25 -04003425 radeon_wb_fini(rdev);
Christian König2898c342012-07-05 11:55:34 +02003426 radeon_ib_pool_fini(rdev);
Jerome Glisse655efd32010-02-02 11:51:45 +01003427 radeon_irq_kms_fini(rdev);
Jerome Glisse75c81292009-10-01 18:02:14 +02003428 r600_pcie_gart_fini(rdev);
Jerome Glisse733289c2009-09-16 15:24:21 +02003429 rdev->accel_working = false;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10003430 }
Christian Koenigdafc3bd2009-10-11 23:49:13 +02003431
Jerome Glisse3ce0a232009-09-08 10:10:24 +10003432 return 0;
3433}
3434
3435void r600_fini(struct radeon_device *rdev)
3436{
Christian Koenigdafc3bd2009-10-11 23:49:13 +02003437 r600_audio_fini(rdev);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10003438 r600_blit_fini(rdev);
Jerome Glisse655efd32010-02-02 11:51:45 +01003439 r600_cp_fini(rdev);
Alex Deucher4d756582012-09-27 15:08:35 -04003440 r600_dma_fini(rdev);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003441 r600_irq_fini(rdev);
Alex Deucher724c80e2010-08-27 18:25:25 -04003442 radeon_wb_fini(rdev);
Christian König2898c342012-07-05 11:55:34 +02003443 radeon_ib_pool_fini(rdev);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003444 radeon_irq_kms_fini(rdev);
Jerome Glisse4aac0472009-09-14 18:29:49 +02003445 r600_pcie_gart_fini(rdev);
Alex Deucher16cdf042011-10-28 10:30:02 -04003446 r600_vram_scratch_fini(rdev);
Jerome Glisse655efd32010-02-02 11:51:45 +01003447 radeon_agp_fini(rdev);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10003448 radeon_gem_fini(rdev);
3449 radeon_fence_driver_fini(rdev);
Jerome Glisse4c788672009-11-20 14:29:23 +01003450 radeon_bo_fini(rdev);
Jerome Glissee7d40b92009-10-01 18:02:15 +02003451 radeon_atombios_fini(rdev);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10003452 kfree(rdev->bios);
3453 rdev->bios = NULL;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10003454}
3455
3456
3457/*
3458 * CS stuff
3459 */
3460void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
3461{
Christian König876dc9f2012-05-08 14:24:01 +02003462 struct radeon_ring *ring = &rdev->ring[ib->ring];
Alex Deucher89d35802012-07-17 14:02:31 -04003463 u32 next_rptr;
Christian König7b1f2482011-09-23 15:11:23 +02003464
Christian König45df6802012-07-06 16:22:55 +02003465 if (ring->rptr_save_reg) {
Alex Deucher89d35802012-07-17 14:02:31 -04003466 next_rptr = ring->wptr + 3 + 4;
Christian König45df6802012-07-06 16:22:55 +02003467 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
3468 radeon_ring_write(ring, ((ring->rptr_save_reg -
3469 PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
3470 radeon_ring_write(ring, next_rptr);
Alex Deucher89d35802012-07-17 14:02:31 -04003471 } else if (rdev->wb.enabled) {
3472 next_rptr = ring->wptr + 5 + 4;
3473 radeon_ring_write(ring, PACKET3(PACKET3_MEM_WRITE, 3));
3474 radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
3475 radeon_ring_write(ring, (upper_32_bits(ring->next_rptr_gpu_addr) & 0xff) | (1 << 18));
3476 radeon_ring_write(ring, next_rptr);
3477 radeon_ring_write(ring, 0);
Christian König45df6802012-07-06 16:22:55 +02003478 }
3479
Christian Könige32eb502011-10-23 12:56:27 +02003480 radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
3481 radeon_ring_write(ring,
Cédric Cano4eace7f2011-02-11 19:45:38 -05003482#ifdef __BIG_ENDIAN
3483 (2 << 0) |
3484#endif
3485 (ib->gpu_addr & 0xFFFFFFFC));
Christian Könige32eb502011-10-23 12:56:27 +02003486 radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF);
3487 radeon_ring_write(ring, ib->length_dw);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10003488}
3489
Christian Königf2ba57b2013-04-08 12:41:29 +02003490void r600_uvd_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
3491{
3492 struct radeon_ring *ring = &rdev->ring[ib->ring];
3493
3494 radeon_ring_write(ring, PACKET0(UVD_RBC_IB_BASE, 0));
3495 radeon_ring_write(ring, ib->gpu_addr);
3496 radeon_ring_write(ring, PACKET0(UVD_RBC_IB_SIZE, 0));
3497 radeon_ring_write(ring, ib->length_dw);
3498}
3499
Alex Deucherf7128122012-02-23 17:53:45 -05003500int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
Jerome Glisse3ce0a232009-09-08 10:10:24 +10003501{
Jerome Glissef2e39222012-05-09 15:35:02 +02003502 struct radeon_ib ib;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10003503 uint32_t scratch;
3504 uint32_t tmp = 0;
3505 unsigned i;
3506 int r;
3507
3508 r = radeon_scratch_get(rdev, &scratch);
3509 if (r) {
3510 DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r);
3511 return r;
3512 }
3513 WREG32(scratch, 0xCAFEDEAD);
Christian König4bf3dd92012-08-06 18:57:44 +02003514 r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10003515 if (r) {
3516 DRM_ERROR("radeon: failed to get ib (%d).\n", r);
Michel Dänzeraf026c52012-09-20 10:31:10 +02003517 goto free_scratch;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10003518 }
Jerome Glissef2e39222012-05-09 15:35:02 +02003519 ib.ptr[0] = PACKET3(PACKET3_SET_CONFIG_REG, 1);
3520 ib.ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
3521 ib.ptr[2] = 0xDEADBEEF;
3522 ib.length_dw = 3;
Christian König4ef72562012-07-13 13:06:00 +02003523 r = radeon_ib_schedule(rdev, &ib, NULL);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10003524 if (r) {
Jerome Glisse3ce0a232009-09-08 10:10:24 +10003525 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
Michel Dänzeraf026c52012-09-20 10:31:10 +02003526 goto free_ib;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10003527 }
Jerome Glissef2e39222012-05-09 15:35:02 +02003528 r = radeon_fence_wait(ib.fence, false);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10003529 if (r) {
3530 DRM_ERROR("radeon: fence wait failed (%d).\n", r);
Michel Dänzeraf026c52012-09-20 10:31:10 +02003531 goto free_ib;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10003532 }
3533 for (i = 0; i < rdev->usec_timeout; i++) {
3534 tmp = RREG32(scratch);
3535 if (tmp == 0xDEADBEEF)
3536 break;
3537 DRM_UDELAY(1);
3538 }
3539 if (i < rdev->usec_timeout) {
Jerome Glissef2e39222012-05-09 15:35:02 +02003540 DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10003541 } else {
Daniel J Blueman4417d7f2010-09-22 17:57:19 +01003542 DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n",
Jerome Glisse3ce0a232009-09-08 10:10:24 +10003543 scratch, tmp);
3544 r = -EINVAL;
3545 }
Michel Dänzeraf026c52012-09-20 10:31:10 +02003546free_ib:
Jerome Glisse3ce0a232009-09-08 10:10:24 +10003547 radeon_ib_free(rdev, &ib);
Michel Dänzeraf026c52012-09-20 10:31:10 +02003548free_scratch:
3549 radeon_scratch_free(rdev, scratch);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10003550 return r;
3551}
3552
Alex Deucher4d756582012-09-27 15:08:35 -04003553/**
3554 * r600_dma_ib_test - test an IB on the DMA engine
3555 *
3556 * @rdev: radeon_device pointer
3557 * @ring: radeon_ring structure holding ring information
3558 *
3559 * Test a simple IB in the DMA ring (r6xx-SI).
3560 * Returns 0 on success, error on failure.
3561 */
3562int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
3563{
3564 struct radeon_ib ib;
3565 unsigned i;
3566 int r;
3567 void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
3568 u32 tmp = 0;
3569
3570 if (!ptr) {
3571 DRM_ERROR("invalid vram scratch pointer\n");
3572 return -EINVAL;
3573 }
3574
3575 tmp = 0xCAFEDEAD;
3576 writel(tmp, ptr);
3577
3578 r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
3579 if (r) {
3580 DRM_ERROR("radeon: failed to get ib (%d).\n", r);
3581 return r;
3582 }
3583
3584 ib.ptr[0] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1);
3585 ib.ptr[1] = rdev->vram_scratch.gpu_addr & 0xfffffffc;
3586 ib.ptr[2] = upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff;
3587 ib.ptr[3] = 0xDEADBEEF;
3588 ib.length_dw = 4;
3589
3590 r = radeon_ib_schedule(rdev, &ib, NULL);
3591 if (r) {
3592 radeon_ib_free(rdev, &ib);
3593 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
3594 return r;
3595 }
3596 r = radeon_fence_wait(ib.fence, false);
3597 if (r) {
3598 DRM_ERROR("radeon: fence wait failed (%d).\n", r);
3599 return r;
3600 }
3601 for (i = 0; i < rdev->usec_timeout; i++) {
3602 tmp = readl(ptr);
3603 if (tmp == 0xDEADBEEF)
3604 break;
3605 DRM_UDELAY(1);
3606 }
3607 if (i < rdev->usec_timeout) {
3608 DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i);
3609 } else {
3610 DRM_ERROR("radeon: ib test failed (0x%08X)\n", tmp);
3611 r = -EINVAL;
3612 }
3613 radeon_ib_free(rdev, &ib);
3614 return r;
3615}
3616
Christian Königf2ba57b2013-04-08 12:41:29 +02003617int r600_uvd_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
3618{
Christian Königb05e9e42013-04-19 16:14:19 +02003619 struct radeon_fence *fence = NULL;
Christian Königf2ba57b2013-04-08 12:41:29 +02003620 int r;
3621
Christian Königb05e9e42013-04-19 16:14:19 +02003622 r = radeon_set_uvd_clocks(rdev, 53300, 40000);
3623 if (r) {
3624 DRM_ERROR("radeon: failed to raise UVD clocks (%d).\n", r);
3625 return r;
3626 }
3627
Christian Königf2ba57b2013-04-08 12:41:29 +02003628 r = radeon_uvd_get_create_msg(rdev, ring->idx, 1, NULL);
3629 if (r) {
3630 DRM_ERROR("radeon: failed to get create msg (%d).\n", r);
Christian Königb05e9e42013-04-19 16:14:19 +02003631 goto error;
Christian Königf2ba57b2013-04-08 12:41:29 +02003632 }
3633
3634 r = radeon_uvd_get_destroy_msg(rdev, ring->idx, 1, &fence);
3635 if (r) {
3636 DRM_ERROR("radeon: failed to get destroy ib (%d).\n", r);
Christian Königb05e9e42013-04-19 16:14:19 +02003637 goto error;
Christian Königf2ba57b2013-04-08 12:41:29 +02003638 }
3639
3640 r = radeon_fence_wait(fence, false);
3641 if (r) {
3642 DRM_ERROR("radeon: fence wait failed (%d).\n", r);
Christian Königb05e9e42013-04-19 16:14:19 +02003643 goto error;
Christian Königf2ba57b2013-04-08 12:41:29 +02003644 }
3645 DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
Christian Königb05e9e42013-04-19 16:14:19 +02003646error:
Christian Königf2ba57b2013-04-08 12:41:29 +02003647 radeon_fence_unref(&fence);
Christian Königb05e9e42013-04-19 16:14:19 +02003648 radeon_set_uvd_clocks(rdev, 0, 0);
Christian Königf2ba57b2013-04-08 12:41:29 +02003649 return r;
3650}
3651
Alex Deucher4d756582012-09-27 15:08:35 -04003652/**
3653 * r600_dma_ring_ib_execute - Schedule an IB on the DMA engine
3654 *
3655 * @rdev: radeon_device pointer
3656 * @ib: IB object to schedule
3657 *
3658 * Schedule an IB in the DMA ring (r6xx-r7xx).
3659 */
3660void r600_dma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
3661{
3662 struct radeon_ring *ring = &rdev->ring[ib->ring];
3663
3664 if (rdev->wb.enabled) {
3665 u32 next_rptr = ring->wptr + 4;
3666 while ((next_rptr & 7) != 5)
3667 next_rptr++;
3668 next_rptr += 3;
3669 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
3670 radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
3671 radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
3672 radeon_ring_write(ring, next_rptr);
3673 }
3674
3675 /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
3676 * Pad as necessary with NOPs.
3677 */
3678 while ((ring->wptr & 7) != 5)
3679 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
3680 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0, 0));
3681 radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
3682 radeon_ring_write(ring, (ib->length_dw << 16) | (upper_32_bits(ib->gpu_addr) & 0xFF));
3683
3684}
3685
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003686/*
3687 * Interrupts
3688 *
3689 * Interrupts use a ring buffer on r6xx/r7xx hardware. It works pretty
3690 * the same as the CP ring buffer, but in reverse. Rather than the CPU
3691 * writing to the ring and the GPU consuming, the GPU writes to the ring
3692 * and host consumes. As the host irq handler processes interrupts, it
3693 * increments the rptr. When the rptr catches up with the wptr, all the
3694 * current interrupts have been processed.
3695 */
Jerome Glisse3ce0a232009-09-08 10:10:24 +10003696
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003697void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size)
3698{
3699 u32 rb_bufsz;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10003700
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003701 /* Align ring size */
3702 rb_bufsz = drm_order(ring_size / 4);
3703 ring_size = (1 << rb_bufsz) * 4;
3704 rdev->ih.ring_size = ring_size;
Jerome Glisse0c452492010-01-15 14:44:37 +01003705 rdev->ih.ptr_mask = rdev->ih.ring_size - 1;
3706 rdev->ih.rptr = 0;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003707}
3708
Alex Deucher25a857f2012-03-20 17:18:22 -04003709int r600_ih_ring_alloc(struct radeon_device *rdev)
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003710{
3711 int r;
3712
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003713 /* Allocate ring buffer */
3714 if (rdev->ih.ring_obj == NULL) {
Daniel Vetter441921d2011-02-18 17:59:16 +01003715 r = radeon_bo_create(rdev, rdev->ih.ring_size,
Alex Deucher268b2512010-11-17 19:00:26 -05003716 PAGE_SIZE, true,
Jerome Glisse4c788672009-11-20 14:29:23 +01003717 RADEON_GEM_DOMAIN_GTT,
Alex Deucher40f5cf92012-05-10 18:33:13 -04003718 NULL, &rdev->ih.ring_obj);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003719 if (r) {
3720 DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r);
3721 return r;
3722 }
Jerome Glisse4c788672009-11-20 14:29:23 +01003723 r = radeon_bo_reserve(rdev->ih.ring_obj, false);
3724 if (unlikely(r != 0))
3725 return r;
3726 r = radeon_bo_pin(rdev->ih.ring_obj,
3727 RADEON_GEM_DOMAIN_GTT,
3728 &rdev->ih.gpu_addr);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003729 if (r) {
Jerome Glisse4c788672009-11-20 14:29:23 +01003730 radeon_bo_unreserve(rdev->ih.ring_obj);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003731 DRM_ERROR("radeon: failed to pin ih ring buffer (%d).\n", r);
3732 return r;
3733 }
Jerome Glisse4c788672009-11-20 14:29:23 +01003734 r = radeon_bo_kmap(rdev->ih.ring_obj,
3735 (void **)&rdev->ih.ring);
3736 radeon_bo_unreserve(rdev->ih.ring_obj);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003737 if (r) {
3738 DRM_ERROR("radeon: failed to map ih ring buffer (%d).\n", r);
3739 return r;
3740 }
3741 }
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003742 return 0;
3743}
3744
Alex Deucher25a857f2012-03-20 17:18:22 -04003745void r600_ih_ring_fini(struct radeon_device *rdev)
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003746{
Jerome Glisse4c788672009-11-20 14:29:23 +01003747 int r;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003748 if (rdev->ih.ring_obj) {
Jerome Glisse4c788672009-11-20 14:29:23 +01003749 r = radeon_bo_reserve(rdev->ih.ring_obj, false);
3750 if (likely(r == 0)) {
3751 radeon_bo_kunmap(rdev->ih.ring_obj);
3752 radeon_bo_unpin(rdev->ih.ring_obj);
3753 radeon_bo_unreserve(rdev->ih.ring_obj);
3754 }
3755 radeon_bo_unref(&rdev->ih.ring_obj);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003756 rdev->ih.ring = NULL;
3757 rdev->ih.ring_obj = NULL;
3758 }
3759}
3760
Alex Deucher45f9a392010-03-24 13:55:51 -04003761void r600_rlc_stop(struct radeon_device *rdev)
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003762{
3763
Alex Deucher45f9a392010-03-24 13:55:51 -04003764 if ((rdev->family >= CHIP_RV770) &&
3765 (rdev->family <= CHIP_RV740)) {
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003766 /* r7xx asics need to soft reset RLC before halting */
3767 WREG32(SRBM_SOFT_RESET, SOFT_RESET_RLC);
3768 RREG32(SRBM_SOFT_RESET);
Arnd Bergmann4de833c2012-04-05 12:58:22 -06003769 mdelay(15);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003770 WREG32(SRBM_SOFT_RESET, 0);
3771 RREG32(SRBM_SOFT_RESET);
3772 }
3773
3774 WREG32(RLC_CNTL, 0);
3775}
3776
3777static void r600_rlc_start(struct radeon_device *rdev)
3778{
3779 WREG32(RLC_CNTL, RLC_ENABLE);
3780}
3781
Alex Deucher2948f5e2013-04-12 13:52:52 -04003782static int r600_rlc_resume(struct radeon_device *rdev)
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003783{
3784 u32 i;
3785 const __be32 *fw_data;
3786
3787 if (!rdev->rlc_fw)
3788 return -EINVAL;
3789
3790 r600_rlc_stop(rdev);
3791
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003792 WREG32(RLC_HB_CNTL, 0);
Alex Deucherc420c742012-03-20 17:18:39 -04003793
Alex Deucher2948f5e2013-04-12 13:52:52 -04003794 WREG32(RLC_HB_BASE, 0);
3795 WREG32(RLC_HB_RPTR, 0);
3796 WREG32(RLC_HB_WPTR, 0);
3797 WREG32(RLC_HB_WPTR_LSB_ADDR, 0);
3798 WREG32(RLC_HB_WPTR_MSB_ADDR, 0);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003799 WREG32(RLC_MC_CNTL, 0);
3800 WREG32(RLC_UCODE_CNTL, 0);
3801
3802 fw_data = (const __be32 *)rdev->rlc_fw->data;
Alex Deucher2948f5e2013-04-12 13:52:52 -04003803 if (rdev->family >= CHIP_RV770) {
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003804 for (i = 0; i < R700_RLC_UCODE_SIZE; i++) {
3805 WREG32(RLC_UCODE_ADDR, i);
3806 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
3807 }
3808 } else {
Alex Deucher138e4e12013-01-11 15:33:13 -05003809 for (i = 0; i < R600_RLC_UCODE_SIZE; i++) {
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003810 WREG32(RLC_UCODE_ADDR, i);
3811 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
3812 }
3813 }
3814 WREG32(RLC_UCODE_ADDR, 0);
3815
3816 r600_rlc_start(rdev);
3817
3818 return 0;
3819}
3820
3821static void r600_enable_interrupts(struct radeon_device *rdev)
3822{
3823 u32 ih_cntl = RREG32(IH_CNTL);
3824 u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
3825
3826 ih_cntl |= ENABLE_INTR;
3827 ih_rb_cntl |= IH_RB_ENABLE;
3828 WREG32(IH_CNTL, ih_cntl);
3829 WREG32(IH_RB_CNTL, ih_rb_cntl);
3830 rdev->ih.enabled = true;
3831}
3832
Alex Deucher45f9a392010-03-24 13:55:51 -04003833void r600_disable_interrupts(struct radeon_device *rdev)
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003834{
3835 u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
3836 u32 ih_cntl = RREG32(IH_CNTL);
3837
3838 ih_rb_cntl &= ~IH_RB_ENABLE;
3839 ih_cntl &= ~ENABLE_INTR;
3840 WREG32(IH_RB_CNTL, ih_rb_cntl);
3841 WREG32(IH_CNTL, ih_cntl);
3842 /* set rptr, wptr to 0 */
3843 WREG32(IH_RB_RPTR, 0);
3844 WREG32(IH_RB_WPTR, 0);
3845 rdev->ih.enabled = false;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003846 rdev->ih.rptr = 0;
3847}
3848
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003849static void r600_disable_interrupt_state(struct radeon_device *rdev)
3850{
3851 u32 tmp;
3852
Alex Deucher3555e532010-10-08 12:09:12 -04003853 WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
Alex Deucher4d756582012-09-27 15:08:35 -04003854 tmp = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
3855 WREG32(DMA_CNTL, tmp);
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003856 WREG32(GRBM_INT_CNTL, 0);
3857 WREG32(DxMODE_INT_MASK, 0);
Alex Deucher6f34be52010-11-21 10:59:01 -05003858 WREG32(D1GRPH_INTERRUPT_CONTROL, 0);
3859 WREG32(D2GRPH_INTERRUPT_CONTROL, 0);
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003860 if (ASIC_IS_DCE3(rdev)) {
3861 WREG32(DCE3_DACA_AUTODETECT_INT_CONTROL, 0);
3862 WREG32(DCE3_DACB_AUTODETECT_INT_CONTROL, 0);
3863 tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
3864 WREG32(DC_HPD1_INT_CONTROL, tmp);
3865 tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
3866 WREG32(DC_HPD2_INT_CONTROL, tmp);
3867 tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
3868 WREG32(DC_HPD3_INT_CONTROL, tmp);
3869 tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
3870 WREG32(DC_HPD4_INT_CONTROL, tmp);
3871 if (ASIC_IS_DCE32(rdev)) {
3872 tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
Alex Deucher5898b1f2010-03-24 13:57:29 -04003873 WREG32(DC_HPD5_INT_CONTROL, tmp);
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003874 tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
Alex Deucher5898b1f2010-03-24 13:57:29 -04003875 WREG32(DC_HPD6_INT_CONTROL, tmp);
Rafał Miłeckic6543a62012-04-28 23:35:24 +02003876 tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3877 WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0, tmp);
3878 tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3879 WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1, tmp);
Alex Deucherf122c612012-03-30 08:59:57 -04003880 } else {
3881 tmp = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3882 WREG32(HDMI0_AUDIO_PACKET_CONTROL, tmp);
3883 tmp = RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3884 WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL, tmp);
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003885 }
3886 } else {
3887 WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
3888 WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
3889 tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
Alex Deucher5898b1f2010-03-24 13:57:29 -04003890 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003891 tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
Alex Deucher5898b1f2010-03-24 13:57:29 -04003892 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003893 tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
Alex Deucher5898b1f2010-03-24 13:57:29 -04003894 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
Alex Deucherf122c612012-03-30 08:59:57 -04003895 tmp = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3896 WREG32(HDMI0_AUDIO_PACKET_CONTROL, tmp);
3897 tmp = RREG32(HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3898 WREG32(HDMI1_AUDIO_PACKET_CONTROL, tmp);
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003899 }
3900}
3901
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003902int r600_irq_init(struct radeon_device *rdev)
3903{
3904 int ret = 0;
3905 int rb_bufsz;
3906 u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
3907
3908 /* allocate ring */
Jerome Glisse0c452492010-01-15 14:44:37 +01003909 ret = r600_ih_ring_alloc(rdev);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003910 if (ret)
3911 return ret;
3912
3913 /* disable irqs */
3914 r600_disable_interrupts(rdev);
3915
3916 /* init rlc */
Alex Deucher2948f5e2013-04-12 13:52:52 -04003917 if (rdev->family >= CHIP_CEDAR)
3918 ret = evergreen_rlc_resume(rdev);
3919 else
3920 ret = r600_rlc_resume(rdev);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003921 if (ret) {
3922 r600_ih_ring_fini(rdev);
3923 return ret;
3924 }
3925
3926 /* setup interrupt control */
3927 /* set dummy read address to ring address */
3928 WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8);
3929 interrupt_cntl = RREG32(INTERRUPT_CNTL);
3930 /* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
3931 * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
3932 */
3933 interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE;
3934 /* IH_REQ_NONSNOOP_EN=1 if ring is in non-cacheable memory, e.g., vram */
3935 interrupt_cntl &= ~IH_REQ_NONSNOOP_EN;
3936 WREG32(INTERRUPT_CNTL, interrupt_cntl);
3937
3938 WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
3939 rb_bufsz = drm_order(rdev->ih.ring_size / 4);
3940
3941 ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
3942 IH_WPTR_OVERFLOW_CLEAR |
3943 (rb_bufsz << 1));
Alex Deucher724c80e2010-08-27 18:25:25 -04003944
3945 if (rdev->wb.enabled)
3946 ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE;
3947
3948 /* set the writeback address whether it's enabled or not */
3949 WREG32(IH_RB_WPTR_ADDR_LO, (rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFFFFFFFC);
3950 WREG32(IH_RB_WPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFF);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003951
3952 WREG32(IH_RB_CNTL, ih_rb_cntl);
3953
3954 /* set rptr, wptr to 0 */
3955 WREG32(IH_RB_RPTR, 0);
3956 WREG32(IH_RB_WPTR, 0);
3957
3958 /* Default settings for IH_CNTL (disabled at first) */
3959 ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10);
3960 /* RPTR_REARM only works if msi's are enabled */
3961 if (rdev->msi_enabled)
3962 ih_cntl |= RPTR_REARM;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003963 WREG32(IH_CNTL, ih_cntl);
3964
3965 /* force the active interrupt state to all disabled */
Alex Deucher45f9a392010-03-24 13:55:51 -04003966 if (rdev->family >= CHIP_CEDAR)
3967 evergreen_disable_interrupt_state(rdev);
3968 else
3969 r600_disable_interrupt_state(rdev);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003970
Dave Airlie20998102012-04-03 11:53:05 +01003971 /* at this point everything should be setup correctly to enable master */
3972 pci_set_master(rdev->pdev);
3973
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003974 /* enable irqs */
3975 r600_enable_interrupts(rdev);
3976
3977 return ret;
3978}
3979
Jerome Glisse0c452492010-01-15 14:44:37 +01003980void r600_irq_suspend(struct radeon_device *rdev)
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003981{
Alex Deucher45f9a392010-03-24 13:55:51 -04003982 r600_irq_disable(rdev);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003983 r600_rlc_stop(rdev);
Jerome Glisse0c452492010-01-15 14:44:37 +01003984}
3985
3986void r600_irq_fini(struct radeon_device *rdev)
3987{
3988 r600_irq_suspend(rdev);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003989 r600_ih_ring_fini(rdev);
3990}
3991
3992int r600_irq_set(struct radeon_device *rdev)
3993{
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003994 u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
3995 u32 mode_int = 0;
3996 u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0;
Alex Deucher2031f772010-04-22 12:52:11 -04003997 u32 grbm_int_cntl = 0;
Alex Deucherf122c612012-03-30 08:59:57 -04003998 u32 hdmi0, hdmi1;
Alex Deucher6f34be52010-11-21 10:59:01 -05003999 u32 d1grph = 0, d2grph = 0;
Alex Deucher4d756582012-09-27 15:08:35 -04004000 u32 dma_cntl;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05004001
Jerome Glisse003e69f2010-01-07 15:39:14 +01004002 if (!rdev->irq.installed) {
Joe Perchesfce7d612010-10-30 21:08:30 +00004003 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
Jerome Glisse003e69f2010-01-07 15:39:14 +01004004 return -EINVAL;
4005 }
Alex Deucherd8f60cf2009-12-01 13:43:46 -05004006 /* don't enable anything if the ih is disabled */
Jerome Glisse79c2bbc2010-01-15 14:44:38 +01004007 if (!rdev->ih.enabled) {
4008 r600_disable_interrupts(rdev);
4009 /* force the active interrupt state to all disabled */
4010 r600_disable_interrupt_state(rdev);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05004011 return 0;
Jerome Glisse79c2bbc2010-01-15 14:44:38 +01004012 }
Alex Deucherd8f60cf2009-12-01 13:43:46 -05004013
Alex Deuchere0df1ac2009-12-04 15:12:21 -05004014 if (ASIC_IS_DCE3(rdev)) {
4015 hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
4016 hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
4017 hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
4018 hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
4019 if (ASIC_IS_DCE32(rdev)) {
4020 hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
4021 hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
Rafał Miłeckic6543a62012-04-28 23:35:24 +02004022 hdmi0 = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
4023 hdmi1 = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
Alex Deucherf122c612012-03-30 08:59:57 -04004024 } else {
4025 hdmi0 = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
4026 hdmi1 = RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
Alex Deuchere0df1ac2009-12-04 15:12:21 -05004027 }
4028 } else {
4029 hpd1 = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & ~DC_HPDx_INT_EN;
4030 hpd2 = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & ~DC_HPDx_INT_EN;
4031 hpd3 = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & ~DC_HPDx_INT_EN;
Alex Deucherf122c612012-03-30 08:59:57 -04004032 hdmi0 = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
4033 hdmi1 = RREG32(HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
Alex Deuchere0df1ac2009-12-04 15:12:21 -05004034 }
Alex Deucher4d756582012-09-27 15:08:35 -04004035 dma_cntl = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
Alex Deuchere0df1ac2009-12-04 15:12:21 -05004036
Christian Koenig736fc372012-05-17 19:52:00 +02004037 if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
Alex Deucherd8f60cf2009-12-01 13:43:46 -05004038 DRM_DEBUG("r600_irq_set: sw int\n");
4039 cp_int_cntl |= RB_INT_ENABLE;
Alex Deucherd0f8a852010-09-04 05:04:34 -04004040 cp_int_cntl |= TIME_STAMP_INT_ENABLE;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05004041 }
Alex Deucher4d756582012-09-27 15:08:35 -04004042
4043 if (atomic_read(&rdev->irq.ring_int[R600_RING_TYPE_DMA_INDEX])) {
4044 DRM_DEBUG("r600_irq_set: sw int dma\n");
4045 dma_cntl |= TRAP_ENABLE;
4046 }
4047
Alex Deucher6f34be52010-11-21 10:59:01 -05004048 if (rdev->irq.crtc_vblank_int[0] ||
Christian Koenig736fc372012-05-17 19:52:00 +02004049 atomic_read(&rdev->irq.pflip[0])) {
Alex Deucherd8f60cf2009-12-01 13:43:46 -05004050 DRM_DEBUG("r600_irq_set: vblank 0\n");
4051 mode_int |= D1MODE_VBLANK_INT_MASK;
4052 }
Alex Deucher6f34be52010-11-21 10:59:01 -05004053 if (rdev->irq.crtc_vblank_int[1] ||
Christian Koenig736fc372012-05-17 19:52:00 +02004054 atomic_read(&rdev->irq.pflip[1])) {
Alex Deucherd8f60cf2009-12-01 13:43:46 -05004055 DRM_DEBUG("r600_irq_set: vblank 1\n");
4056 mode_int |= D2MODE_VBLANK_INT_MASK;
4057 }
Alex Deuchere0df1ac2009-12-04 15:12:21 -05004058 if (rdev->irq.hpd[0]) {
4059 DRM_DEBUG("r600_irq_set: hpd 1\n");
4060 hpd1 |= DC_HPDx_INT_EN;
4061 }
4062 if (rdev->irq.hpd[1]) {
4063 DRM_DEBUG("r600_irq_set: hpd 2\n");
4064 hpd2 |= DC_HPDx_INT_EN;
4065 }
4066 if (rdev->irq.hpd[2]) {
4067 DRM_DEBUG("r600_irq_set: hpd 3\n");
4068 hpd3 |= DC_HPDx_INT_EN;
4069 }
4070 if (rdev->irq.hpd[3]) {
4071 DRM_DEBUG("r600_irq_set: hpd 4\n");
4072 hpd4 |= DC_HPDx_INT_EN;
4073 }
4074 if (rdev->irq.hpd[4]) {
4075 DRM_DEBUG("r600_irq_set: hpd 5\n");
4076 hpd5 |= DC_HPDx_INT_EN;
4077 }
4078 if (rdev->irq.hpd[5]) {
4079 DRM_DEBUG("r600_irq_set: hpd 6\n");
4080 hpd6 |= DC_HPDx_INT_EN;
4081 }
Alex Deucherf122c612012-03-30 08:59:57 -04004082 if (rdev->irq.afmt[0]) {
4083 DRM_DEBUG("r600_irq_set: hdmi 0\n");
4084 hdmi0 |= HDMI0_AZ_FORMAT_WTRIG_MASK;
Christian Koenigf2594932010-04-10 03:13:16 +02004085 }
Alex Deucherf122c612012-03-30 08:59:57 -04004086 if (rdev->irq.afmt[1]) {
4087 DRM_DEBUG("r600_irq_set: hdmi 0\n");
4088 hdmi1 |= HDMI0_AZ_FORMAT_WTRIG_MASK;
Christian Koenigf2594932010-04-10 03:13:16 +02004089 }
Alex Deucherd8f60cf2009-12-01 13:43:46 -05004090
4091 WREG32(CP_INT_CNTL, cp_int_cntl);
Alex Deucher4d756582012-09-27 15:08:35 -04004092 WREG32(DMA_CNTL, dma_cntl);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05004093 WREG32(DxMODE_INT_MASK, mode_int);
Alex Deucher6f34be52010-11-21 10:59:01 -05004094 WREG32(D1GRPH_INTERRUPT_CONTROL, d1grph);
4095 WREG32(D2GRPH_INTERRUPT_CONTROL, d2grph);
Alex Deucher2031f772010-04-22 12:52:11 -04004096 WREG32(GRBM_INT_CNTL, grbm_int_cntl);
Alex Deuchere0df1ac2009-12-04 15:12:21 -05004097 if (ASIC_IS_DCE3(rdev)) {
4098 WREG32(DC_HPD1_INT_CONTROL, hpd1);
4099 WREG32(DC_HPD2_INT_CONTROL, hpd2);
4100 WREG32(DC_HPD3_INT_CONTROL, hpd3);
4101 WREG32(DC_HPD4_INT_CONTROL, hpd4);
4102 if (ASIC_IS_DCE32(rdev)) {
4103 WREG32(DC_HPD5_INT_CONTROL, hpd5);
4104 WREG32(DC_HPD6_INT_CONTROL, hpd6);
Rafał Miłeckic6543a62012-04-28 23:35:24 +02004105 WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0, hdmi0);
4106 WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1, hdmi1);
Alex Deucherf122c612012-03-30 08:59:57 -04004107 } else {
4108 WREG32(HDMI0_AUDIO_PACKET_CONTROL, hdmi0);
4109 WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL, hdmi1);
Alex Deuchere0df1ac2009-12-04 15:12:21 -05004110 }
4111 } else {
4112 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1);
4113 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
4114 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, hpd3);
Alex Deucherf122c612012-03-30 08:59:57 -04004115 WREG32(HDMI0_AUDIO_PACKET_CONTROL, hdmi0);
4116 WREG32(HDMI1_AUDIO_PACKET_CONTROL, hdmi1);
Alex Deuchere0df1ac2009-12-04 15:12:21 -05004117 }
Alex Deucherd8f60cf2009-12-01 13:43:46 -05004118
4119 return 0;
4120}
4121
Andi Kleence580fa2011-10-13 16:08:47 -07004122static void r600_irq_ack(struct radeon_device *rdev)
Alex Deucherd8f60cf2009-12-01 13:43:46 -05004123{
Alex Deuchere0df1ac2009-12-04 15:12:21 -05004124 u32 tmp;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05004125
Alex Deuchere0df1ac2009-12-04 15:12:21 -05004126 if (ASIC_IS_DCE3(rdev)) {
Alex Deucher6f34be52010-11-21 10:59:01 -05004127 rdev->irq.stat_regs.r600.disp_int = RREG32(DCE3_DISP_INTERRUPT_STATUS);
4128 rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE);
4129 rdev->irq.stat_regs.r600.disp_int_cont2 = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE2);
Alex Deucherf122c612012-03-30 08:59:57 -04004130 if (ASIC_IS_DCE32(rdev)) {
Rafał Miłeckic6543a62012-04-28 23:35:24 +02004131 rdev->irq.stat_regs.r600.hdmi0_status = RREG32(AFMT_STATUS + DCE3_HDMI_OFFSET0);
4132 rdev->irq.stat_regs.r600.hdmi1_status = RREG32(AFMT_STATUS + DCE3_HDMI_OFFSET1);
Alex Deucherf122c612012-03-30 08:59:57 -04004133 } else {
4134 rdev->irq.stat_regs.r600.hdmi0_status = RREG32(HDMI0_STATUS);
4135 rdev->irq.stat_regs.r600.hdmi1_status = RREG32(DCE3_HDMI1_STATUS);
4136 }
Alex Deuchere0df1ac2009-12-04 15:12:21 -05004137 } else {
Alex Deucher6f34be52010-11-21 10:59:01 -05004138 rdev->irq.stat_regs.r600.disp_int = RREG32(DISP_INTERRUPT_STATUS);
4139 rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
4140 rdev->irq.stat_regs.r600.disp_int_cont2 = 0;
Alex Deucherf122c612012-03-30 08:59:57 -04004141 rdev->irq.stat_regs.r600.hdmi0_status = RREG32(HDMI0_STATUS);
4142 rdev->irq.stat_regs.r600.hdmi1_status = RREG32(HDMI1_STATUS);
Alex Deuchere0df1ac2009-12-04 15:12:21 -05004143 }
Alex Deucher6f34be52010-11-21 10:59:01 -05004144 rdev->irq.stat_regs.r600.d1grph_int = RREG32(D1GRPH_INTERRUPT_STATUS);
4145 rdev->irq.stat_regs.r600.d2grph_int = RREG32(D2GRPH_INTERRUPT_STATUS);
Alex Deuchere0df1ac2009-12-04 15:12:21 -05004146
Alex Deucher6f34be52010-11-21 10:59:01 -05004147 if (rdev->irq.stat_regs.r600.d1grph_int & DxGRPH_PFLIP_INT_OCCURRED)
4148 WREG32(D1GRPH_INTERRUPT_STATUS, DxGRPH_PFLIP_INT_CLEAR);
4149 if (rdev->irq.stat_regs.r600.d2grph_int & DxGRPH_PFLIP_INT_OCCURRED)
4150 WREG32(D2GRPH_INTERRUPT_STATUS, DxGRPH_PFLIP_INT_CLEAR);
4151 if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT)
Alex Deucherd8f60cf2009-12-01 13:43:46 -05004152 WREG32(D1MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
Alex Deucher6f34be52010-11-21 10:59:01 -05004153 if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT)
Alex Deucherd8f60cf2009-12-01 13:43:46 -05004154 WREG32(D1MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
Alex Deucher6f34be52010-11-21 10:59:01 -05004155 if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT)
Alex Deucherd8f60cf2009-12-01 13:43:46 -05004156 WREG32(D2MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
Alex Deucher6f34be52010-11-21 10:59:01 -05004157 if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT)
Alex Deucherd8f60cf2009-12-01 13:43:46 -05004158 WREG32(D2MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
Alex Deucher6f34be52010-11-21 10:59:01 -05004159 if (rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT) {
Alex Deuchere0df1ac2009-12-04 15:12:21 -05004160 if (ASIC_IS_DCE3(rdev)) {
4161 tmp = RREG32(DC_HPD1_INT_CONTROL);
4162 tmp |= DC_HPDx_INT_ACK;
4163 WREG32(DC_HPD1_INT_CONTROL, tmp);
4164 } else {
4165 tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
4166 tmp |= DC_HPDx_INT_ACK;
4167 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
4168 }
4169 }
Alex Deucher6f34be52010-11-21 10:59:01 -05004170 if (rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT) {
Alex Deuchere0df1ac2009-12-04 15:12:21 -05004171 if (ASIC_IS_DCE3(rdev)) {
4172 tmp = RREG32(DC_HPD2_INT_CONTROL);
4173 tmp |= DC_HPDx_INT_ACK;
4174 WREG32(DC_HPD2_INT_CONTROL, tmp);
4175 } else {
4176 tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
4177 tmp |= DC_HPDx_INT_ACK;
4178 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
4179 }
4180 }
Alex Deucher6f34be52010-11-21 10:59:01 -05004181 if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT) {
Alex Deuchere0df1ac2009-12-04 15:12:21 -05004182 if (ASIC_IS_DCE3(rdev)) {
4183 tmp = RREG32(DC_HPD3_INT_CONTROL);
4184 tmp |= DC_HPDx_INT_ACK;
4185 WREG32(DC_HPD3_INT_CONTROL, tmp);
4186 } else {
4187 tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
4188 tmp |= DC_HPDx_INT_ACK;
4189 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
4190 }
4191 }
Alex Deucher6f34be52010-11-21 10:59:01 -05004192 if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT) {
Alex Deuchere0df1ac2009-12-04 15:12:21 -05004193 tmp = RREG32(DC_HPD4_INT_CONTROL);
4194 tmp |= DC_HPDx_INT_ACK;
4195 WREG32(DC_HPD4_INT_CONTROL, tmp);
4196 }
4197 if (ASIC_IS_DCE32(rdev)) {
Alex Deucher6f34be52010-11-21 10:59:01 -05004198 if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT) {
Alex Deuchere0df1ac2009-12-04 15:12:21 -05004199 tmp = RREG32(DC_HPD5_INT_CONTROL);
4200 tmp |= DC_HPDx_INT_ACK;
4201 WREG32(DC_HPD5_INT_CONTROL, tmp);
4202 }
Alex Deucher6f34be52010-11-21 10:59:01 -05004203 if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) {
Alex Deuchere0df1ac2009-12-04 15:12:21 -05004204 tmp = RREG32(DC_HPD5_INT_CONTROL);
4205 tmp |= DC_HPDx_INT_ACK;
4206 WREG32(DC_HPD6_INT_CONTROL, tmp);
4207 }
Alex Deucherf122c612012-03-30 08:59:57 -04004208 if (rdev->irq.stat_regs.r600.hdmi0_status & AFMT_AZ_FORMAT_WTRIG) {
Rafał Miłeckic6543a62012-04-28 23:35:24 +02004209 tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0);
Alex Deucherf122c612012-03-30 08:59:57 -04004210 tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
Rafał Miłeckic6543a62012-04-28 23:35:24 +02004211 WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0, tmp);
Alex Deucherf122c612012-03-30 08:59:57 -04004212 }
4213 if (rdev->irq.stat_regs.r600.hdmi1_status & AFMT_AZ_FORMAT_WTRIG) {
Rafał Miłeckic6543a62012-04-28 23:35:24 +02004214 tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1);
Alex Deucherf122c612012-03-30 08:59:57 -04004215 tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
Rafał Miłeckic6543a62012-04-28 23:35:24 +02004216 WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1, tmp);
Christian Koenigf2594932010-04-10 03:13:16 +02004217 }
4218 } else {
Alex Deucherf122c612012-03-30 08:59:57 -04004219 if (rdev->irq.stat_regs.r600.hdmi0_status & HDMI0_AZ_FORMAT_WTRIG) {
4220 tmp = RREG32(HDMI0_AUDIO_PACKET_CONTROL);
4221 tmp |= HDMI0_AZ_FORMAT_WTRIG_ACK;
4222 WREG32(HDMI0_AUDIO_PACKET_CONTROL, tmp);
4223 }
4224 if (rdev->irq.stat_regs.r600.hdmi1_status & HDMI0_AZ_FORMAT_WTRIG) {
4225 if (ASIC_IS_DCE3(rdev)) {
4226 tmp = RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL);
4227 tmp |= HDMI0_AZ_FORMAT_WTRIG_ACK;
4228 WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL, tmp);
4229 } else {
4230 tmp = RREG32(HDMI1_AUDIO_PACKET_CONTROL);
4231 tmp |= HDMI0_AZ_FORMAT_WTRIG_ACK;
4232 WREG32(HDMI1_AUDIO_PACKET_CONTROL, tmp);
4233 }
Christian Koenigf2594932010-04-10 03:13:16 +02004234 }
4235 }
Alex Deucherd8f60cf2009-12-01 13:43:46 -05004236}
4237
4238void r600_irq_disable(struct radeon_device *rdev)
4239{
Alex Deucherd8f60cf2009-12-01 13:43:46 -05004240 r600_disable_interrupts(rdev);
4241 /* Wait and acknowledge irq */
4242 mdelay(1);
Alex Deucher6f34be52010-11-21 10:59:01 -05004243 r600_irq_ack(rdev);
Alex Deuchere0df1ac2009-12-04 15:12:21 -05004244 r600_disable_interrupt_state(rdev);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05004245}
4246
Andi Kleence580fa2011-10-13 16:08:47 -07004247static u32 r600_get_ih_wptr(struct radeon_device *rdev)
Alex Deucherd8f60cf2009-12-01 13:43:46 -05004248{
4249 u32 wptr, tmp;
4250
Alex Deucher724c80e2010-08-27 18:25:25 -04004251 if (rdev->wb.enabled)
Cédric Cano204ae242011-04-19 11:07:13 -04004252 wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]);
Alex Deucher724c80e2010-08-27 18:25:25 -04004253 else
4254 wptr = RREG32(IH_RB_WPTR);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05004255
4256 if (wptr & RB_OVERFLOW) {
Jerome Glisse7924e5e2010-01-15 14:44:39 +01004257 /* When a ring buffer overflow happen start parsing interrupt
4258 * from the last not overwritten vector (wptr + 16). Hopefully
4259 * this should allow us to catchup.
4260 */
4261 dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
4262 wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
4263 rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05004264 tmp = RREG32(IH_RB_CNTL);
4265 tmp |= IH_WPTR_OVERFLOW_CLEAR;
4266 WREG32(IH_RB_CNTL, tmp);
4267 }
Jerome Glisse0c452492010-01-15 14:44:37 +01004268 return (wptr & rdev->ih.ptr_mask);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05004269}
4270
4271/* r600 IV Ring
4272 * Each IV ring entry is 128 bits:
4273 * [7:0] - interrupt source id
4274 * [31:8] - reserved
4275 * [59:32] - interrupt source data
4276 * [127:60] - reserved
4277 *
4278 * The basic interrupt vector entries
4279 * are decoded as follows:
4280 * src_id src_data description
4281 * 1 0 D1 Vblank
4282 * 1 1 D1 Vline
4283 * 5 0 D2 Vblank
4284 * 5 1 D2 Vline
4285 * 19 0 FP Hot plug detection A
4286 * 19 1 FP Hot plug detection B
4287 * 19 2 DAC A auto-detection
4288 * 19 3 DAC B auto-detection
Christian Koenigf2594932010-04-10 03:13:16 +02004289 * 21 4 HDMI block A
4290 * 21 5 HDMI block B
Alex Deucherd8f60cf2009-12-01 13:43:46 -05004291 * 176 - CP_INT RB
4292 * 177 - CP_INT IB1
4293 * 178 - CP_INT IB2
4294 * 181 - EOP Interrupt
4295 * 233 - GUI Idle
4296 *
4297 * Note, these are based on r600 and may need to be
4298 * adjusted or added to on newer asics
4299 */
4300
4301int r600_irq_process(struct radeon_device *rdev)
4302{
Dave Airlie682f1a52011-06-18 03:59:51 +00004303 u32 wptr;
4304 u32 rptr;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05004305 u32 src_id, src_data;
Alex Deucher6f34be52010-11-21 10:59:01 -05004306 u32 ring_index;
Alex Deucherd4877cf2009-12-04 16:56:37 -05004307 bool queue_hotplug = false;
Alex Deucherf122c612012-03-30 08:59:57 -04004308 bool queue_hdmi = false;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05004309
Dave Airlie682f1a52011-06-18 03:59:51 +00004310 if (!rdev->ih.enabled || rdev->shutdown)
Jerome Glisse79c2bbc2010-01-15 14:44:38 +01004311 return IRQ_NONE;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05004312
Benjamin Herrenschmidtf6a56932011-07-13 06:28:22 +00004313 /* No MSIs, need a dummy read to flush PCI DMAs */
4314 if (!rdev->msi_enabled)
4315 RREG32(IH_RB_WPTR);
4316
Dave Airlie682f1a52011-06-18 03:59:51 +00004317 wptr = r600_get_ih_wptr(rdev);
Christian Koenigc20dc362012-05-16 21:45:24 +02004318
4319restart_ih:
4320 /* is somebody else already processing irqs? */
4321 if (atomic_xchg(&rdev->ih.lock, 1))
4322 return IRQ_NONE;
4323
Dave Airlie682f1a52011-06-18 03:59:51 +00004324 rptr = rdev->ih.rptr;
4325 DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
4326
Benjamin Herrenschmidt964f6642011-07-13 16:28:19 +10004327 /* Order reading of wptr vs. reading of IH ring data */
4328 rmb();
4329
Alex Deucherd8f60cf2009-12-01 13:43:46 -05004330 /* display interrupts */
Alex Deucher6f34be52010-11-21 10:59:01 -05004331 r600_irq_ack(rdev);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05004332
Alex Deucherd8f60cf2009-12-01 13:43:46 -05004333 while (rptr != wptr) {
4334 /* wptr/rptr are in bytes! */
4335 ring_index = rptr / 4;
Cédric Cano4eace7f2011-02-11 19:45:38 -05004336 src_id = le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff;
4337 src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05004338
4339 switch (src_id) {
4340 case 1: /* D1 vblank/vline */
4341 switch (src_data) {
4342 case 0: /* D1 vblank */
Alex Deucher6f34be52010-11-21 10:59:01 -05004343 if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT) {
Alex Deucher6f34be52010-11-21 10:59:01 -05004344 if (rdev->irq.crtc_vblank_int[0]) {
4345 drm_handle_vblank(rdev->ddev, 0);
4346 rdev->pm.vblank_sync = true;
4347 wake_up(&rdev->irq.vblank_queue);
4348 }
Christian Koenig736fc372012-05-17 19:52:00 +02004349 if (atomic_read(&rdev->irq.pflip[0]))
Mario Kleiner3e4ea742010-11-21 10:59:02 -05004350 radeon_crtc_handle_flip(rdev, 0);
Alex Deucher6f34be52010-11-21 10:59:01 -05004351 rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05004352 DRM_DEBUG("IH: D1 vblank\n");
4353 }
4354 break;
4355 case 1: /* D1 vline */
Alex Deucher6f34be52010-11-21 10:59:01 -05004356 if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT) {
4357 rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VLINE_INTERRUPT;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05004358 DRM_DEBUG("IH: D1 vline\n");
4359 }
4360 break;
4361 default:
Alex Deucherb0425892010-01-11 19:47:38 -05004362 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05004363 break;
4364 }
4365 break;
4366 case 5: /* D2 vblank/vline */
4367 switch (src_data) {
4368 case 0: /* D2 vblank */
Alex Deucher6f34be52010-11-21 10:59:01 -05004369 if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT) {
Alex Deucher6f34be52010-11-21 10:59:01 -05004370 if (rdev->irq.crtc_vblank_int[1]) {
4371 drm_handle_vblank(rdev->ddev, 1);
4372 rdev->pm.vblank_sync = true;
4373 wake_up(&rdev->irq.vblank_queue);
4374 }
Christian Koenig736fc372012-05-17 19:52:00 +02004375 if (atomic_read(&rdev->irq.pflip[1]))
Mario Kleiner3e4ea742010-11-21 10:59:02 -05004376 radeon_crtc_handle_flip(rdev, 1);
Alex Deucher6f34be52010-11-21 10:59:01 -05004377 rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VBLANK_INTERRUPT;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05004378 DRM_DEBUG("IH: D2 vblank\n");
4379 }
4380 break;
4381 case 1: /* D1 vline */
Alex Deucher6f34be52010-11-21 10:59:01 -05004382 if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT) {
4383 rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VLINE_INTERRUPT;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05004384 DRM_DEBUG("IH: D2 vline\n");
4385 }
4386 break;
4387 default:
Alex Deucherb0425892010-01-11 19:47:38 -05004388 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05004389 break;
4390 }
4391 break;
Alex Deuchere0df1ac2009-12-04 15:12:21 -05004392 case 19: /* HPD/DAC hotplug */
4393 switch (src_data) {
4394 case 0:
Alex Deucher6f34be52010-11-21 10:59:01 -05004395 if (rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT) {
4396 rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD1_INTERRUPT;
Alex Deucherd4877cf2009-12-04 16:56:37 -05004397 queue_hotplug = true;
4398 DRM_DEBUG("IH: HPD1\n");
Alex Deuchere0df1ac2009-12-04 15:12:21 -05004399 }
4400 break;
4401 case 1:
Alex Deucher6f34be52010-11-21 10:59:01 -05004402 if (rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT) {
4403 rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD2_INTERRUPT;
Alex Deucherd4877cf2009-12-04 16:56:37 -05004404 queue_hotplug = true;
4405 DRM_DEBUG("IH: HPD2\n");
Alex Deuchere0df1ac2009-12-04 15:12:21 -05004406 }
4407 break;
4408 case 4:
Alex Deucher6f34be52010-11-21 10:59:01 -05004409 if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT) {
4410 rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD3_INTERRUPT;
Alex Deucherd4877cf2009-12-04 16:56:37 -05004411 queue_hotplug = true;
4412 DRM_DEBUG("IH: HPD3\n");
Alex Deuchere0df1ac2009-12-04 15:12:21 -05004413 }
4414 break;
4415 case 5:
Alex Deucher6f34be52010-11-21 10:59:01 -05004416 if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT) {
4417 rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD4_INTERRUPT;
Alex Deucherd4877cf2009-12-04 16:56:37 -05004418 queue_hotplug = true;
4419 DRM_DEBUG("IH: HPD4\n");
Alex Deuchere0df1ac2009-12-04 15:12:21 -05004420 }
4421 break;
4422 case 10:
Alex Deucher6f34be52010-11-21 10:59:01 -05004423 if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT) {
4424 rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD5_INTERRUPT;
Alex Deucherd4877cf2009-12-04 16:56:37 -05004425 queue_hotplug = true;
4426 DRM_DEBUG("IH: HPD5\n");
Alex Deuchere0df1ac2009-12-04 15:12:21 -05004427 }
4428 break;
4429 case 12:
Alex Deucher6f34be52010-11-21 10:59:01 -05004430 if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) {
4431 rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD6_INTERRUPT;
Alex Deucherd4877cf2009-12-04 16:56:37 -05004432 queue_hotplug = true;
4433 DRM_DEBUG("IH: HPD6\n");
Alex Deuchere0df1ac2009-12-04 15:12:21 -05004434 }
4435 break;
4436 default:
Alex Deucherb0425892010-01-11 19:47:38 -05004437 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
Alex Deuchere0df1ac2009-12-04 15:12:21 -05004438 break;
4439 }
4440 break;
Alex Deucherf122c612012-03-30 08:59:57 -04004441 case 21: /* hdmi */
4442 switch (src_data) {
4443 case 4:
4444 if (rdev->irq.stat_regs.r600.hdmi0_status & HDMI0_AZ_FORMAT_WTRIG) {
4445 rdev->irq.stat_regs.r600.hdmi0_status &= ~HDMI0_AZ_FORMAT_WTRIG;
4446 queue_hdmi = true;
4447 DRM_DEBUG("IH: HDMI0\n");
4448 }
4449 break;
4450 case 5:
4451 if (rdev->irq.stat_regs.r600.hdmi1_status & HDMI0_AZ_FORMAT_WTRIG) {
4452 rdev->irq.stat_regs.r600.hdmi1_status &= ~HDMI0_AZ_FORMAT_WTRIG;
4453 queue_hdmi = true;
4454 DRM_DEBUG("IH: HDMI1\n");
4455 }
4456 break;
4457 default:
4458 DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
4459 break;
4460 }
Christian Koenigf2594932010-04-10 03:13:16 +02004461 break;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05004462 case 176: /* CP_INT in ring buffer */
4463 case 177: /* CP_INT in IB1 */
4464 case 178: /* CP_INT in IB2 */
4465 DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
Alex Deucher74652802011-08-25 13:39:48 -04004466 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05004467 break;
4468 case 181: /* CP EOP event */
4469 DRM_DEBUG("IH: CP EOP\n");
Alex Deucher74652802011-08-25 13:39:48 -04004470 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05004471 break;
Alex Deucher4d756582012-09-27 15:08:35 -04004472 case 224: /* DMA trap event */
4473 DRM_DEBUG("IH: DMA trap\n");
4474 radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX);
4475 break;
Alex Deucher2031f772010-04-22 12:52:11 -04004476 case 233: /* GUI IDLE */
Ilija Hadzic303c8052011-06-07 14:54:48 -04004477 DRM_DEBUG("IH: GUI idle\n");
Alex Deucher2031f772010-04-22 12:52:11 -04004478 break;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05004479 default:
Alex Deucherb0425892010-01-11 19:47:38 -05004480 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05004481 break;
4482 }
4483
4484 /* wptr/rptr are in bytes! */
Jerome Glisse0c452492010-01-15 14:44:37 +01004485 rptr += 16;
4486 rptr &= rdev->ih.ptr_mask;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05004487 }
Alex Deucherd4877cf2009-12-04 16:56:37 -05004488 if (queue_hotplug)
Tejun Heo32c87fc2011-01-03 14:49:32 +01004489 schedule_work(&rdev->hotplug_work);
Alex Deucherf122c612012-03-30 08:59:57 -04004490 if (queue_hdmi)
4491 schedule_work(&rdev->audio_work);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05004492 rdev->ih.rptr = rptr;
4493 WREG32(IH_RB_RPTR, rdev->ih.rptr);
Christian Koenigc20dc362012-05-16 21:45:24 +02004494 atomic_set(&rdev->ih.lock, 0);
4495
4496 /* make sure wptr hasn't changed while processing */
4497 wptr = r600_get_ih_wptr(rdev);
4498 if (wptr != rptr)
4499 goto restart_ih;
4500
Alex Deucherd8f60cf2009-12-01 13:43:46 -05004501 return IRQ_HANDLED;
4502}
Jerome Glisse3ce0a232009-09-08 10:10:24 +10004503
4504/*
4505 * Debugfs info
4506 */
4507#if defined(CONFIG_DEBUG_FS)
4508
Jerome Glisse3ce0a232009-09-08 10:10:24 +10004509static int r600_debugfs_mc_info(struct seq_file *m, void *data)
4510{
4511 struct drm_info_node *node = (struct drm_info_node *) m->private;
4512 struct drm_device *dev = node->minor->dev;
4513 struct radeon_device *rdev = dev->dev_private;
4514
4515 DREG32_SYS(m, rdev, R_000E50_SRBM_STATUS);
4516 DREG32_SYS(m, rdev, VM_L2_STATUS);
4517 return 0;
4518}
4519
4520static struct drm_info_list r600_mc_info_list[] = {
4521 {"r600_mc_info", r600_debugfs_mc_info, 0, NULL},
Jerome Glisse3ce0a232009-09-08 10:10:24 +10004522};
4523#endif
4524
4525int r600_debugfs_mc_info_init(struct radeon_device *rdev)
4526{
4527#if defined(CONFIG_DEBUG_FS)
4528 return radeon_debugfs_add_files(rdev, r600_mc_info_list, ARRAY_SIZE(r600_mc_info_list));
4529#else
4530 return 0;
4531#endif
Jerome Glisse771fe6b2009-06-05 14:42:42 +02004532}
Jerome Glisse062b3892010-02-04 20:36:39 +01004533
4534/**
4535 * r600_ioctl_wait_idle - flush host path cache on wait idle ioctl
4536 * rdev: radeon device structure
4537 * bo: buffer object struct which userspace is waiting for idle
4538 *
4539 * Some R6XX/R7XX doesn't seems to take into account HDP flush performed
4540 * through ring buffer, this leads to corruption in rendering, see
4541 * http://bugzilla.kernel.org/show_bug.cgi?id=15186 to avoid this we
4542 * directly perform HDP flush by writing register through MMIO.
4543 */
4544void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo)
4545{
Alex Deucher812d0462010-07-26 18:51:53 -04004546 /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read
Alex Deucherf3886f82010-12-08 10:05:34 -05004547 * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL.
4548 * This seems to cause problems on some AGP cards. Just use the old
4549 * method for them.
Alex Deucher812d0462010-07-26 18:51:53 -04004550 */
Alex Deuchere4884592010-09-27 10:57:10 -04004551 if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) &&
Alex Deucherf3886f82010-12-08 10:05:34 -05004552 rdev->vram_scratch.ptr && !(rdev->flags & RADEON_IS_AGP)) {
Alex Deucher87cbf8f2010-08-27 13:59:54 -04004553 void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
Alex Deucher812d0462010-07-26 18:51:53 -04004554 u32 tmp;
4555
4556 WREG32(HDP_DEBUG1, 0);
4557 tmp = readl((void __iomem *)ptr);
4558 } else
4559 WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
Jerome Glisse062b3892010-02-04 20:36:39 +01004560}
Alex Deucher3313e3d2011-01-06 18:49:34 -05004561
4562void r600_set_pcie_lanes(struct radeon_device *rdev, int lanes)
4563{
Alex Deucherd5445a12013-03-18 18:52:13 -04004564 u32 link_width_cntl, mask;
Alex Deucher3313e3d2011-01-06 18:49:34 -05004565
4566 if (rdev->flags & RADEON_IS_IGP)
4567 return;
4568
4569 if (!(rdev->flags & RADEON_IS_PCIE))
4570 return;
4571
4572 /* x2 cards have a special sequence */
4573 if (ASIC_IS_X2(rdev))
4574 return;
4575
Alex Deucherd5445a12013-03-18 18:52:13 -04004576 radeon_gui_idle(rdev);
Alex Deucher3313e3d2011-01-06 18:49:34 -05004577
4578 switch (lanes) {
4579 case 0:
4580 mask = RADEON_PCIE_LC_LINK_WIDTH_X0;
4581 break;
4582 case 1:
4583 mask = RADEON_PCIE_LC_LINK_WIDTH_X1;
4584 break;
4585 case 2:
4586 mask = RADEON_PCIE_LC_LINK_WIDTH_X2;
4587 break;
4588 case 4:
4589 mask = RADEON_PCIE_LC_LINK_WIDTH_X4;
4590 break;
4591 case 8:
4592 mask = RADEON_PCIE_LC_LINK_WIDTH_X8;
4593 break;
4594 case 12:
Alex Deucherd5445a12013-03-18 18:52:13 -04004595 /* not actually supported */
Alex Deucher3313e3d2011-01-06 18:49:34 -05004596 mask = RADEON_PCIE_LC_LINK_WIDTH_X12;
4597 break;
4598 case 16:
Alex Deucher3313e3d2011-01-06 18:49:34 -05004599 mask = RADEON_PCIE_LC_LINK_WIDTH_X16;
4600 break;
Alex Deucherd5445a12013-03-18 18:52:13 -04004601 default:
4602 DRM_ERROR("invalid pcie lane request: %d\n", lanes);
4603 return;
Alex Deucher3313e3d2011-01-06 18:49:34 -05004604 }
4605
Alex Deucher492d2b62012-10-25 16:06:59 -04004606 link_width_cntl = RREG32_PCIE_PORT(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
Alex Deucherd5445a12013-03-18 18:52:13 -04004607 link_width_cntl &= ~RADEON_PCIE_LC_LINK_WIDTH_MASK;
4608 link_width_cntl |= mask << RADEON_PCIE_LC_LINK_WIDTH_SHIFT;
4609 link_width_cntl |= (RADEON_PCIE_LC_RECONFIG_NOW |
4610 R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE);
Alex Deucher3313e3d2011-01-06 18:49:34 -05004611
Alex Deucher492d2b62012-10-25 16:06:59 -04004612 WREG32_PCIE_PORT(RADEON_PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
Alex Deucher3313e3d2011-01-06 18:49:34 -05004613}
4614
4615int r600_get_pcie_lanes(struct radeon_device *rdev)
4616{
4617 u32 link_width_cntl;
4618
4619 if (rdev->flags & RADEON_IS_IGP)
4620 return 0;
4621
4622 if (!(rdev->flags & RADEON_IS_PCIE))
4623 return 0;
4624
4625 /* x2 cards have a special sequence */
4626 if (ASIC_IS_X2(rdev))
4627 return 0;
4628
Alex Deucherd5445a12013-03-18 18:52:13 -04004629 radeon_gui_idle(rdev);
Alex Deucher3313e3d2011-01-06 18:49:34 -05004630
Alex Deucher492d2b62012-10-25 16:06:59 -04004631 link_width_cntl = RREG32_PCIE_PORT(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
Alex Deucher3313e3d2011-01-06 18:49:34 -05004632
4633 switch ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT) {
Alex Deucher3313e3d2011-01-06 18:49:34 -05004634 case RADEON_PCIE_LC_LINK_WIDTH_X1:
4635 return 1;
4636 case RADEON_PCIE_LC_LINK_WIDTH_X2:
4637 return 2;
4638 case RADEON_PCIE_LC_LINK_WIDTH_X4:
4639 return 4;
4640 case RADEON_PCIE_LC_LINK_WIDTH_X8:
4641 return 8;
Alex Deucherd5445a12013-03-18 18:52:13 -04004642 case RADEON_PCIE_LC_LINK_WIDTH_X12:
4643 /* not actually supported */
4644 return 12;
4645 case RADEON_PCIE_LC_LINK_WIDTH_X0:
Alex Deucher3313e3d2011-01-06 18:49:34 -05004646 case RADEON_PCIE_LC_LINK_WIDTH_X16:
4647 default:
4648 return 16;
4649 }
4650}
4651
Alex Deucher9e46a482011-01-06 18:49:35 -05004652static void r600_pcie_gen2_enable(struct radeon_device *rdev)
4653{
4654 u32 link_width_cntl, lanes, speed_cntl, training_cntl, tmp;
4655 u16 link_cntl2;
4656
Alex Deucherd42dd572011-01-12 20:05:11 -05004657 if (radeon_pcie_gen2 == 0)
4658 return;
4659
Alex Deucher9e46a482011-01-06 18:49:35 -05004660 if (rdev->flags & RADEON_IS_IGP)
4661 return;
4662
4663 if (!(rdev->flags & RADEON_IS_PCIE))
4664 return;
4665
4666 /* x2 cards have a special sequence */
4667 if (ASIC_IS_X2(rdev))
4668 return;
4669
4670 /* only RV6xx+ chips are supported */
4671 if (rdev->family <= CHIP_R600)
4672 return;
4673
Kleber Sacilotto de Souza7e0e4192013-05-03 19:43:13 -03004674 if ((rdev->pdev->bus->max_bus_speed != PCIE_SPEED_5_0GT) &&
4675 (rdev->pdev->bus->max_bus_speed != PCIE_SPEED_8_0GT))
Dave Airlie197bbb32012-06-27 08:35:54 +01004676 return;
4677
Alex Deucher492d2b62012-10-25 16:06:59 -04004678 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
Alex Deucher3691fee2012-10-08 17:46:27 -04004679 if (speed_cntl & LC_CURRENT_DATA_RATE) {
4680 DRM_INFO("PCIE gen 2 link speeds already enabled\n");
4681 return;
4682 }
4683
Dave Airlie197bbb32012-06-27 08:35:54 +01004684 DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
4685
Alex Deucher9e46a482011-01-06 18:49:35 -05004686 /* 55 nm r6xx asics */
4687 if ((rdev->family == CHIP_RV670) ||
4688 (rdev->family == CHIP_RV620) ||
4689 (rdev->family == CHIP_RV635)) {
4690 /* advertise upconfig capability */
Alex Deucher492d2b62012-10-25 16:06:59 -04004691 link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
Alex Deucher9e46a482011-01-06 18:49:35 -05004692 link_width_cntl &= ~LC_UPCONFIGURE_DIS;
Alex Deucher492d2b62012-10-25 16:06:59 -04004693 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
4694 link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
Alex Deucher9e46a482011-01-06 18:49:35 -05004695 if (link_width_cntl & LC_RENEGOTIATION_SUPPORT) {
4696 lanes = (link_width_cntl & LC_LINK_WIDTH_RD_MASK) >> LC_LINK_WIDTH_RD_SHIFT;
4697 link_width_cntl &= ~(LC_LINK_WIDTH_MASK |
4698 LC_RECONFIG_ARC_MISSING_ESCAPE);
4699 link_width_cntl |= lanes | LC_RECONFIG_NOW | LC_RENEGOTIATE_EN;
Alex Deucher492d2b62012-10-25 16:06:59 -04004700 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
Alex Deucher9e46a482011-01-06 18:49:35 -05004701 } else {
4702 link_width_cntl |= LC_UPCONFIGURE_DIS;
Alex Deucher492d2b62012-10-25 16:06:59 -04004703 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
Alex Deucher9e46a482011-01-06 18:49:35 -05004704 }
4705 }
4706
Alex Deucher492d2b62012-10-25 16:06:59 -04004707 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
Alex Deucher9e46a482011-01-06 18:49:35 -05004708 if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) &&
4709 (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
4710
4711 /* 55 nm r6xx asics */
4712 if ((rdev->family == CHIP_RV670) ||
4713 (rdev->family == CHIP_RV620) ||
4714 (rdev->family == CHIP_RV635)) {
4715 WREG32(MM_CFGREGS_CNTL, 0x8);
4716 link_cntl2 = RREG32(0x4088);
4717 WREG32(MM_CFGREGS_CNTL, 0);
4718 /* not supported yet */
4719 if (link_cntl2 & SELECTABLE_DEEMPHASIS)
4720 return;
4721 }
4722
4723 speed_cntl &= ~LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK;
4724 speed_cntl |= (0x3 << LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT);
4725 speed_cntl &= ~LC_VOLTAGE_TIMER_SEL_MASK;
4726 speed_cntl &= ~LC_FORCE_DIS_HW_SPEED_CHANGE;
4727 speed_cntl |= LC_FORCE_EN_HW_SPEED_CHANGE;
Alex Deucher492d2b62012-10-25 16:06:59 -04004728 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
Alex Deucher9e46a482011-01-06 18:49:35 -05004729
4730 tmp = RREG32(0x541c);
4731 WREG32(0x541c, tmp | 0x8);
4732 WREG32(MM_CFGREGS_CNTL, MM_WR_TO_CFG_EN);
4733 link_cntl2 = RREG16(0x4088);
4734 link_cntl2 &= ~TARGET_LINK_SPEED_MASK;
4735 link_cntl2 |= 0x2;
4736 WREG16(0x4088, link_cntl2);
4737 WREG32(MM_CFGREGS_CNTL, 0);
4738
4739 if ((rdev->family == CHIP_RV670) ||
4740 (rdev->family == CHIP_RV620) ||
4741 (rdev->family == CHIP_RV635)) {
Alex Deucher492d2b62012-10-25 16:06:59 -04004742 training_cntl = RREG32_PCIE_PORT(PCIE_LC_TRAINING_CNTL);
Alex Deucher9e46a482011-01-06 18:49:35 -05004743 training_cntl &= ~LC_POINT_7_PLUS_EN;
Alex Deucher492d2b62012-10-25 16:06:59 -04004744 WREG32_PCIE_PORT(PCIE_LC_TRAINING_CNTL, training_cntl);
Alex Deucher9e46a482011-01-06 18:49:35 -05004745 } else {
Alex Deucher492d2b62012-10-25 16:06:59 -04004746 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
Alex Deucher9e46a482011-01-06 18:49:35 -05004747 speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN;
Alex Deucher492d2b62012-10-25 16:06:59 -04004748 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
Alex Deucher9e46a482011-01-06 18:49:35 -05004749 }
4750
Alex Deucher492d2b62012-10-25 16:06:59 -04004751 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
Alex Deucher9e46a482011-01-06 18:49:35 -05004752 speed_cntl |= LC_GEN2_EN_STRAP;
Alex Deucher492d2b62012-10-25 16:06:59 -04004753 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
Alex Deucher9e46a482011-01-06 18:49:35 -05004754
4755 } else {
Alex Deucher492d2b62012-10-25 16:06:59 -04004756 link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
Alex Deucher9e46a482011-01-06 18:49:35 -05004757 /* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */
4758 if (1)
4759 link_width_cntl |= LC_UPCONFIGURE_DIS;
4760 else
4761 link_width_cntl &= ~LC_UPCONFIGURE_DIS;
Alex Deucher492d2b62012-10-25 16:06:59 -04004762 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
Alex Deucher9e46a482011-01-06 18:49:35 -05004763 }
4764}
Marek Olšák6759a0a2012-08-09 16:34:17 +02004765
4766/**
Alex Deucherd0418892013-01-24 10:35:23 -05004767 * r600_get_gpu_clock_counter - return GPU clock counter snapshot
Marek Olšák6759a0a2012-08-09 16:34:17 +02004768 *
4769 * @rdev: radeon_device pointer
4770 *
4771 * Fetches a GPU clock counter snapshot (R6xx-cayman).
4772 * Returns the 64 bit clock counter snapshot.
4773 */
Alex Deucherd0418892013-01-24 10:35:23 -05004774uint64_t r600_get_gpu_clock_counter(struct radeon_device *rdev)
Marek Olšák6759a0a2012-08-09 16:34:17 +02004775{
4776 uint64_t clock;
4777
4778 mutex_lock(&rdev->gpu_clock_mutex);
4779 WREG32(RLC_CAPTURE_GPU_CLOCK_COUNT, 1);
4780 clock = (uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_LSB) |
4781 ((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
4782 mutex_unlock(&rdev->gpu_clock_mutex);
4783 return clock;
4784}