blob: 8a52cf007ff041a9926f6a667d86faa551162829 [file] [log] [blame]
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
Jerome Glissec010f802009-09-30 22:09:06 +020028/* RS600 / Radeon X1250/X1270 integrated GPU
29 *
30 * This file gather function specific to RS600 which is the IGP of
31 * the X1250/X1270 family supporting intel CPU (while RS690/RS740
32 * is the X1250/X1270 supporting AMD CPU). The display engine are
33 * the avivo one, bios is an atombios, 3D block are the one of the
34 * R4XX family. The GART is different from the RS400 one and is very
35 * close to the one of the R600 family (R600 likely being an evolution
36 * of the RS600 GART block).
37 */
Jerome Glisse771fe6b2009-06-05 14:42:42 +020038#include "drmP.h"
Jerome Glisse771fe6b2009-06-05 14:42:42 +020039#include "radeon.h"
Daniel Vettere6990372010-03-11 21:19:17 +000040#include "radeon_asic.h"
Jerome Glissec010f802009-09-30 22:09:06 +020041#include "atom.h"
42#include "rs600d.h"
Jerome Glisse771fe6b2009-06-05 14:42:42 +020043
Dave Airlie3f7dc91a2009-08-27 11:10:15 +100044#include "rs600_reg_safe.h"
45
Jerome Glisse771fe6b2009-06-05 14:42:42 +020046void rs600_gpu_init(struct radeon_device *rdev);
47int rs600_mc_wait_for_idle(struct radeon_device *rdev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +020048
Alex Deucher6f34be52010-11-21 10:59:01 -050049void rs600_pre_page_flip(struct radeon_device *rdev, int crtc)
50{
Alex Deucher6f34be52010-11-21 10:59:01 -050051 /* enable the pflip int */
52 radeon_irq_kms_pflip_irq_get(rdev, crtc);
53}
54
55void rs600_post_page_flip(struct radeon_device *rdev, int crtc)
56{
57 /* disable the pflip int */
58 radeon_irq_kms_pflip_irq_put(rdev, crtc);
59}
60
61u32 rs600_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
62{
63 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
64 u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset);
Alex Deucherf6496472011-11-28 14:49:26 -050065 int i;
Alex Deucher6f34be52010-11-21 10:59:01 -050066
67 /* Lock the graphics update lock */
68 tmp |= AVIVO_D1GRPH_UPDATE_LOCK;
69 WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
70
71 /* update the scanout addresses */
72 WREG32(AVIVO_D1GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
73 (u32)crtc_base);
74 WREG32(AVIVO_D1GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
75 (u32)crtc_base);
76
77 /* Wait for update_pending to go high. */
Alex Deucherf6496472011-11-28 14:49:26 -050078 for (i = 0; i < rdev->usec_timeout; i++) {
79 if (RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING)
80 break;
81 udelay(1);
82 }
Alex Deucher6f34be52010-11-21 10:59:01 -050083 DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
84
85 /* Unlock the lock, so double-buffering can take place inside vblank */
86 tmp &= ~AVIVO_D1GRPH_UPDATE_LOCK;
87 WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
88
89 /* Return current update_pending status: */
90 return RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING;
91}
92
Alex Deucher49e02b72010-04-23 17:57:27 -040093void rs600_pm_misc(struct radeon_device *rdev)
94{
Alex Deucher49e02b72010-04-23 17:57:27 -040095 int requested_index = rdev->pm.requested_power_state_index;
96 struct radeon_power_state *ps = &rdev->pm.power_state[requested_index];
97 struct radeon_voltage *voltage = &ps->clock_info[0].voltage;
98 u32 tmp, dyn_pwrmgt_sclk_length, dyn_sclk_vol_cntl;
Alex Deucher536fcd52010-04-29 16:33:38 -040099 u32 hdp_dyn_cntl, /*mc_host_dyn_cntl,*/ dyn_backbias_cntl;
Alex Deucher49e02b72010-04-23 17:57:27 -0400100
101 if ((voltage->type == VOLTAGE_GPIO) && (voltage->gpio.valid)) {
102 if (ps->misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) {
103 tmp = RREG32(voltage->gpio.reg);
104 if (voltage->active_high)
105 tmp |= voltage->gpio.mask;
106 else
107 tmp &= ~(voltage->gpio.mask);
108 WREG32(voltage->gpio.reg, tmp);
109 if (voltage->delay)
110 udelay(voltage->delay);
111 } else {
112 tmp = RREG32(voltage->gpio.reg);
113 if (voltage->active_high)
114 tmp &= ~voltage->gpio.mask;
115 else
116 tmp |= voltage->gpio.mask;
117 WREG32(voltage->gpio.reg, tmp);
118 if (voltage->delay)
119 udelay(voltage->delay);
120 }
Alex Deucher7ac9aa52010-05-27 19:25:54 -0400121 } else if (voltage->type == VOLTAGE_VDDC)
Alex Deucher8a83ec52011-04-12 14:49:23 -0400122 radeon_atom_set_voltage(rdev, voltage->vddc_id, SET_VOLTAGE_TYPE_ASIC_VDDC);
Alex Deucher49e02b72010-04-23 17:57:27 -0400123
124 dyn_pwrmgt_sclk_length = RREG32_PLL(DYN_PWRMGT_SCLK_LENGTH);
125 dyn_pwrmgt_sclk_length &= ~REDUCED_POWER_SCLK_HILEN(0xf);
126 dyn_pwrmgt_sclk_length &= ~REDUCED_POWER_SCLK_LOLEN(0xf);
127 if (ps->misc & ATOM_PM_MISCINFO_ASIC_REDUCED_SPEED_SCLK_EN) {
128 if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_2) {
129 dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_HILEN(2);
130 dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_LOLEN(2);
131 } else if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_4) {
132 dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_HILEN(4);
133 dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_LOLEN(4);
134 }
135 } else {
136 dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_HILEN(1);
137 dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_LOLEN(1);
138 }
139 WREG32_PLL(DYN_PWRMGT_SCLK_LENGTH, dyn_pwrmgt_sclk_length);
140
141 dyn_sclk_vol_cntl = RREG32_PLL(DYN_SCLK_VOL_CNTL);
142 if (ps->misc & ATOM_PM_MISCINFO_ASIC_DYNAMIC_VOLTAGE_EN) {
143 dyn_sclk_vol_cntl |= IO_CG_VOLTAGE_DROP;
144 if (voltage->delay) {
145 dyn_sclk_vol_cntl |= VOLTAGE_DROP_SYNC;
146 dyn_sclk_vol_cntl |= VOLTAGE_DELAY_SEL(voltage->delay);
147 } else
148 dyn_sclk_vol_cntl &= ~VOLTAGE_DROP_SYNC;
149 } else
150 dyn_sclk_vol_cntl &= ~IO_CG_VOLTAGE_DROP;
151 WREG32_PLL(DYN_SCLK_VOL_CNTL, dyn_sclk_vol_cntl);
152
153 hdp_dyn_cntl = RREG32_PLL(HDP_DYN_CNTL);
154 if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_HDP_BLOCK_EN)
155 hdp_dyn_cntl &= ~HDP_FORCEON;
156 else
157 hdp_dyn_cntl |= HDP_FORCEON;
158 WREG32_PLL(HDP_DYN_CNTL, hdp_dyn_cntl);
Alex Deucher536fcd52010-04-29 16:33:38 -0400159#if 0
160 /* mc_host_dyn seems to cause hangs from time to time */
Alex Deucher49e02b72010-04-23 17:57:27 -0400161 mc_host_dyn_cntl = RREG32_PLL(MC_HOST_DYN_CNTL);
162 if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_MC_HOST_BLOCK_EN)
163 mc_host_dyn_cntl &= ~MC_HOST_FORCEON;
164 else
165 mc_host_dyn_cntl |= MC_HOST_FORCEON;
166 WREG32_PLL(MC_HOST_DYN_CNTL, mc_host_dyn_cntl);
Alex Deucher536fcd52010-04-29 16:33:38 -0400167#endif
168 dyn_backbias_cntl = RREG32_PLL(DYN_BACKBIAS_CNTL);
169 if (ps->misc & ATOM_PM_MISCINFO2_DYNAMIC_BACK_BIAS_EN)
170 dyn_backbias_cntl |= IO_CG_BACKBIAS_EN;
171 else
172 dyn_backbias_cntl &= ~IO_CG_BACKBIAS_EN;
173 WREG32_PLL(DYN_BACKBIAS_CNTL, dyn_backbias_cntl);
Alex Deucher49e02b72010-04-23 17:57:27 -0400174
175 /* set pcie lanes */
176 if ((rdev->flags & RADEON_IS_PCIE) &&
177 !(rdev->flags & RADEON_IS_IGP) &&
178 rdev->asic->set_pcie_lanes &&
179 (ps->pcie_lanes !=
180 rdev->pm.power_state[rdev->pm.current_power_state_index].pcie_lanes)) {
181 radeon_set_pcie_lanes(rdev,
182 ps->pcie_lanes);
Alex Deucherce8a3eb2010-05-07 16:58:27 -0400183 DRM_DEBUG("Setting: p: %d\n", ps->pcie_lanes);
Alex Deucher49e02b72010-04-23 17:57:27 -0400184 }
Alex Deucher49e02b72010-04-23 17:57:27 -0400185}
186
187void rs600_pm_prepare(struct radeon_device *rdev)
188{
189 struct drm_device *ddev = rdev->ddev;
190 struct drm_crtc *crtc;
191 struct radeon_crtc *radeon_crtc;
192 u32 tmp;
193
194 /* disable any active CRTCs */
195 list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
196 radeon_crtc = to_radeon_crtc(crtc);
197 if (radeon_crtc->enabled) {
198 tmp = RREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset);
199 tmp |= AVIVO_CRTC_DISP_READ_REQUEST_DISABLE;
200 WREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset, tmp);
201 }
202 }
203}
204
205void rs600_pm_finish(struct radeon_device *rdev)
206{
207 struct drm_device *ddev = rdev->ddev;
208 struct drm_crtc *crtc;
209 struct radeon_crtc *radeon_crtc;
210 u32 tmp;
211
212 /* enable any active CRTCs */
213 list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
214 radeon_crtc = to_radeon_crtc(crtc);
215 if (radeon_crtc->enabled) {
216 tmp = RREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset);
217 tmp &= ~AVIVO_CRTC_DISP_READ_REQUEST_DISABLE;
218 WREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset, tmp);
219 }
220 }
221}
222
Alex Deucherdcfdd402009-12-04 15:04:19 -0500223/* hpd for digital panel detect/disconnect */
224bool rs600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
225{
226 u32 tmp;
227 bool connected = false;
228
229 switch (hpd) {
230 case RADEON_HPD_1:
231 tmp = RREG32(R_007D04_DC_HOT_PLUG_DETECT1_INT_STATUS);
232 if (G_007D04_DC_HOT_PLUG_DETECT1_SENSE(tmp))
233 connected = true;
234 break;
235 case RADEON_HPD_2:
236 tmp = RREG32(R_007D14_DC_HOT_PLUG_DETECT2_INT_STATUS);
237 if (G_007D14_DC_HOT_PLUG_DETECT2_SENSE(tmp))
238 connected = true;
239 break;
240 default:
241 break;
242 }
243 return connected;
244}
245
246void rs600_hpd_set_polarity(struct radeon_device *rdev,
247 enum radeon_hpd_id hpd)
248{
249 u32 tmp;
250 bool connected = rs600_hpd_sense(rdev, hpd);
251
252 switch (hpd) {
253 case RADEON_HPD_1:
254 tmp = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL);
255 if (connected)
256 tmp &= ~S_007D08_DC_HOT_PLUG_DETECT1_INT_POLARITY(1);
257 else
258 tmp |= S_007D08_DC_HOT_PLUG_DETECT1_INT_POLARITY(1);
259 WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
260 break;
261 case RADEON_HPD_2:
262 tmp = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL);
263 if (connected)
264 tmp &= ~S_007D18_DC_HOT_PLUG_DETECT2_INT_POLARITY(1);
265 else
266 tmp |= S_007D18_DC_HOT_PLUG_DETECT2_INT_POLARITY(1);
267 WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
268 break;
269 default:
270 break;
271 }
272}
273
274void rs600_hpd_init(struct radeon_device *rdev)
275{
276 struct drm_device *dev = rdev->ddev;
277 struct drm_connector *connector;
278
279 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
280 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
281 switch (radeon_connector->hpd.hpd) {
282 case RADEON_HPD_1:
283 WREG32(R_007D00_DC_HOT_PLUG_DETECT1_CONTROL,
284 S_007D00_DC_HOT_PLUG_DETECT1_EN(1));
285 rdev->irq.hpd[0] = true;
286 break;
287 case RADEON_HPD_2:
288 WREG32(R_007D10_DC_HOT_PLUG_DETECT2_CONTROL,
289 S_007D10_DC_HOT_PLUG_DETECT2_EN(1));
290 rdev->irq.hpd[1] = true;
291 break;
292 default:
293 break;
294 }
Alex Deucher64912e92011-11-03 11:21:39 -0400295 radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
Alex Deucherdcfdd402009-12-04 15:04:19 -0500296 }
Jerome Glisse003e69f2010-01-07 15:39:14 +0100297 if (rdev->irq.installed)
298 rs600_irq_set(rdev);
Alex Deucherdcfdd402009-12-04 15:04:19 -0500299}
300
301void rs600_hpd_fini(struct radeon_device *rdev)
302{
303 struct drm_device *dev = rdev->ddev;
304 struct drm_connector *connector;
305
306 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
307 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
308 switch (radeon_connector->hpd.hpd) {
309 case RADEON_HPD_1:
310 WREG32(R_007D00_DC_HOT_PLUG_DETECT1_CONTROL,
311 S_007D00_DC_HOT_PLUG_DETECT1_EN(0));
312 rdev->irq.hpd[0] = false;
313 break;
314 case RADEON_HPD_2:
315 WREG32(R_007D10_DC_HOT_PLUG_DETECT2_CONTROL,
316 S_007D10_DC_HOT_PLUG_DETECT2_EN(0));
317 rdev->irq.hpd[1] = false;
318 break;
319 default:
320 break;
321 }
322 }
323}
324
Jerome Glisse90aca4d2010-03-09 14:45:12 +0000325void rs600_bm_disable(struct radeon_device *rdev)
326{
327 u32 tmp;
328
329 /* disable bus mastering */
330 pci_read_config_word(rdev->pdev, 0x4, (u16*)&tmp);
331 pci_write_config_word(rdev->pdev, 0x4, tmp & 0xFFFB);
332 mdelay(1);
333}
334
335int rs600_asic_reset(struct radeon_device *rdev)
336{
Jerome Glisse90aca4d2010-03-09 14:45:12 +0000337 struct rv515_mc_save save;
Alex Deucher25b2ec5b2011-01-11 13:36:55 -0500338 u32 status, tmp;
339 int ret = 0;
Jerome Glisse90aca4d2010-03-09 14:45:12 +0000340
Jerome Glisse90aca4d2010-03-09 14:45:12 +0000341 status = RREG32(R_000E40_RBBM_STATUS);
342 if (!G_000E40_GUI_ACTIVE(status)) {
343 return 0;
344 }
Alex Deucher25b2ec5b2011-01-11 13:36:55 -0500345 /* Stops all mc clients */
346 rv515_mc_stop(rdev, &save);
Jerome Glisse90aca4d2010-03-09 14:45:12 +0000347 status = RREG32(R_000E40_RBBM_STATUS);
348 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
349 /* stop CP */
350 WREG32(RADEON_CP_CSQ_CNTL, 0);
351 tmp = RREG32(RADEON_CP_RB_CNTL);
352 WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA);
353 WREG32(RADEON_CP_RB_RPTR_WR, 0);
354 WREG32(RADEON_CP_RB_WPTR, 0);
355 WREG32(RADEON_CP_RB_CNTL, tmp);
356 pci_save_state(rdev->pdev);
357 /* disable bus mastering */
358 rs600_bm_disable(rdev);
359 /* reset GA+VAP */
360 WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_VAP(1) |
361 S_0000F0_SOFT_RESET_GA(1));
362 RREG32(R_0000F0_RBBM_SOFT_RESET);
363 mdelay(500);
364 WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
365 mdelay(1);
366 status = RREG32(R_000E40_RBBM_STATUS);
367 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
368 /* reset CP */
369 WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1));
370 RREG32(R_0000F0_RBBM_SOFT_RESET);
371 mdelay(500);
372 WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
373 mdelay(1);
374 status = RREG32(R_000E40_RBBM_STATUS);
375 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
376 /* reset MC */
377 WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_MC(1));
378 RREG32(R_0000F0_RBBM_SOFT_RESET);
379 mdelay(500);
380 WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
381 mdelay(1);
382 status = RREG32(R_000E40_RBBM_STATUS);
383 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
384 /* restore PCI & busmastering */
385 pci_restore_state(rdev->pdev);
386 /* Check if GPU is idle */
387 if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) {
388 dev_err(rdev->dev, "failed to reset GPU\n");
389 rdev->gpu_lockup = true;
Alex Deucher25b2ec5b2011-01-11 13:36:55 -0500390 ret = -1;
391 } else
392 dev_info(rdev->dev, "GPU reset succeed\n");
Jerome Glisse90aca4d2010-03-09 14:45:12 +0000393 rv515_mc_resume(rdev, &save);
Alex Deucher25b2ec5b2011-01-11 13:36:55 -0500394 return ret;
Jerome Glisse90aca4d2010-03-09 14:45:12 +0000395}
396
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200397/*
398 * GART.
399 */
400void rs600_gart_tlb_flush(struct radeon_device *rdev)
401{
402 uint32_t tmp;
403
Jerome Glissec010f802009-09-30 22:09:06 +0200404 tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
405 tmp &= C_000100_INVALIDATE_ALL_L1_TLBS & C_000100_INVALIDATE_L2_CACHE;
406 WREG32_MC(R_000100_MC_PT0_CNTL, tmp);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200407
Jerome Glissec010f802009-09-30 22:09:06 +0200408 tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
Jerome Glisse30f69f32010-04-16 18:46:35 +0200409 tmp |= S_000100_INVALIDATE_ALL_L1_TLBS(1) | S_000100_INVALIDATE_L2_CACHE(1);
Jerome Glissec010f802009-09-30 22:09:06 +0200410 WREG32_MC(R_000100_MC_PT0_CNTL, tmp);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200411
Jerome Glissec010f802009-09-30 22:09:06 +0200412 tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
413 tmp &= C_000100_INVALIDATE_ALL_L1_TLBS & C_000100_INVALIDATE_L2_CACHE;
414 WREG32_MC(R_000100_MC_PT0_CNTL, tmp);
415 tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200416}
417
Jerome Glisse4aac0472009-09-14 18:29:49 +0200418int rs600_gart_init(struct radeon_device *rdev)
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200419{
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200420 int r;
421
Jerome Glissec9a1be92011-11-03 11:16:49 -0400422 if (rdev->gart.robj) {
Joe Perchesfce7d612010-10-30 21:08:30 +0000423 WARN(1, "RS600 GART already initialized\n");
Jerome Glisse4aac0472009-09-14 18:29:49 +0200424 return 0;
425 }
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200426 /* Initialize common gart structure */
427 r = radeon_gart_init(rdev);
428 if (r) {
429 return r;
430 }
431 rdev->gart.table_size = rdev->gart.num_gpu_pages * 8;
Jerome Glisse4aac0472009-09-14 18:29:49 +0200432 return radeon_gart_table_vram_alloc(rdev);
433}
434
Alex Deuchere22e6d22011-07-11 20:27:23 +0000435static int rs600_gart_enable(struct radeon_device *rdev)
Jerome Glisse4aac0472009-09-14 18:29:49 +0200436{
Jerome Glissec010f802009-09-30 22:09:06 +0200437 u32 tmp;
Jerome Glisse4aac0472009-09-14 18:29:49 +0200438 int r, i;
439
Jerome Glissec9a1be92011-11-03 11:16:49 -0400440 if (rdev->gart.robj == NULL) {
Jerome Glisse4aac0472009-09-14 18:29:49 +0200441 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
442 return -EINVAL;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200443 }
Jerome Glisse4aac0472009-09-14 18:29:49 +0200444 r = radeon_gart_table_vram_pin(rdev);
445 if (r)
446 return r;
Dave Airlie82568562010-02-05 16:00:07 +1000447 radeon_gart_restore(rdev);
Jerome Glissec010f802009-09-30 22:09:06 +0200448 /* Enable bus master */
Alex Deuchere22e6d22011-07-11 20:27:23 +0000449 tmp = RREG32(RADEON_BUS_CNTL) & ~RS600_BUS_MASTER_DIS;
450 WREG32(RADEON_BUS_CNTL, tmp);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200451 /* FIXME: setup default page */
Jerome Glissec010f802009-09-30 22:09:06 +0200452 WREG32_MC(R_000100_MC_PT0_CNTL,
Alex Deucher4f15d242009-12-05 17:55:37 -0500453 (S_000100_EFFECTIVE_L2_CACHE_SIZE(6) |
454 S_000100_EFFECTIVE_L2_QUEUE_SIZE(6)));
455
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200456 for (i = 0; i < 19; i++) {
Jerome Glissec010f802009-09-30 22:09:06 +0200457 WREG32_MC(R_00016C_MC_PT0_CLIENT0_CNTL + i,
Alex Deucher4f15d242009-12-05 17:55:37 -0500458 S_00016C_ENABLE_TRANSLATION_MODE_OVERRIDE(1) |
459 S_00016C_SYSTEM_ACCESS_MODE_MASK(
460 V_00016C_SYSTEM_ACCESS_MODE_NOT_IN_SYS) |
461 S_00016C_SYSTEM_APERTURE_UNMAPPED_ACCESS(
462 V_00016C_SYSTEM_APERTURE_UNMAPPED_PASSTHROUGH) |
463 S_00016C_EFFECTIVE_L1_CACHE_SIZE(3) |
464 S_00016C_ENABLE_FRAGMENT_PROCESSING(1) |
465 S_00016C_EFFECTIVE_L1_QUEUE_SIZE(3));
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200466 }
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200467 /* enable first context */
Jerome Glissec010f802009-09-30 22:09:06 +0200468 WREG32_MC(R_000102_MC_PT0_CONTEXT0_CNTL,
Alex Deucher4f15d242009-12-05 17:55:37 -0500469 S_000102_ENABLE_PAGE_TABLE(1) |
470 S_000102_PAGE_TABLE_DEPTH(V_000102_PAGE_TABLE_FLAT));
471
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200472 /* disable all other contexts */
Alex Deucher4f15d242009-12-05 17:55:37 -0500473 for (i = 1; i < 8; i++)
Jerome Glissec010f802009-09-30 22:09:06 +0200474 WREG32_MC(R_000102_MC_PT0_CONTEXT0_CNTL + i, 0);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200475
476 /* setup the page table */
Jerome Glissec010f802009-09-30 22:09:06 +0200477 WREG32_MC(R_00012C_MC_PT0_CONTEXT0_FLAT_BASE_ADDR,
Alex Deucher4f15d242009-12-05 17:55:37 -0500478 rdev->gart.table_addr);
479 WREG32_MC(R_00013C_MC_PT0_CONTEXT0_FLAT_START_ADDR, rdev->mc.gtt_start);
480 WREG32_MC(R_00014C_MC_PT0_CONTEXT0_FLAT_END_ADDR, rdev->mc.gtt_end);
Jerome Glissec010f802009-09-30 22:09:06 +0200481 WREG32_MC(R_00011C_MC_PT0_CONTEXT0_DEFAULT_READ_ADDR, 0);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200482
Alex Deucher4f15d242009-12-05 17:55:37 -0500483 /* System context maps to VRAM space */
484 WREG32_MC(R_000112_MC_PT0_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start);
485 WREG32_MC(R_000114_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end);
486
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200487 /* enable page tables */
Jerome Glissec010f802009-09-30 22:09:06 +0200488 tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
489 WREG32_MC(R_000100_MC_PT0_CNTL, (tmp | S_000100_ENABLE_PT(1)));
490 tmp = RREG32_MC(R_000009_MC_CNTL1);
491 WREG32_MC(R_000009_MC_CNTL1, (tmp | S_000009_ENABLE_PAGE_TABLES(1)));
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200492 rs600_gart_tlb_flush(rdev);
Tormod Voldenfcf4de52011-08-31 21:54:07 +0000493 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
494 (unsigned)(rdev->mc.gtt_size >> 20),
495 (unsigned long long)rdev->gart.table_addr);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200496 rdev->gart.ready = true;
497 return 0;
498}
499
500void rs600_gart_disable(struct radeon_device *rdev)
501{
Jerome Glisse4c788672009-11-20 14:29:23 +0100502 u32 tmp;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200503
504 /* FIXME: disable out of gart access */
Jerome Glissec010f802009-09-30 22:09:06 +0200505 WREG32_MC(R_000100_MC_PT0_CNTL, 0);
506 tmp = RREG32_MC(R_000009_MC_CNTL1);
507 WREG32_MC(R_000009_MC_CNTL1, tmp & C_000009_ENABLE_PAGE_TABLES);
Jerome Glissec9a1be92011-11-03 11:16:49 -0400508 radeon_gart_table_vram_unpin(rdev);
Jerome Glisse4aac0472009-09-14 18:29:49 +0200509}
510
511void rs600_gart_fini(struct radeon_device *rdev)
512{
Jerome Glissef9274562010-03-17 14:44:29 +0000513 radeon_gart_fini(rdev);
Jerome Glisse4aac0472009-09-14 18:29:49 +0200514 rs600_gart_disable(rdev);
515 radeon_gart_table_vram_free(rdev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200516}
517
518#define R600_PTE_VALID (1 << 0)
519#define R600_PTE_SYSTEM (1 << 1)
520#define R600_PTE_SNOOPED (1 << 2)
521#define R600_PTE_READABLE (1 << 5)
522#define R600_PTE_WRITEABLE (1 << 6)
523
524int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
525{
Jerome Glissec9a1be92011-11-03 11:16:49 -0400526 void __iomem *ptr = (void *)rdev->gart.ptr;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200527
528 if (i < 0 || i > rdev->gart.num_gpu_pages) {
529 return -EINVAL;
530 }
531 addr = addr & 0xFFFFFFFFFFFFF000ULL;
532 addr |= R600_PTE_VALID | R600_PTE_SYSTEM | R600_PTE_SNOOPED;
533 addr |= R600_PTE_READABLE | R600_PTE_WRITEABLE;
Benjamin Herrenschmidta0533fb2011-07-13 06:28:12 +0000534 writeq(addr, ptr + (i * 8));
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200535 return 0;
536}
537
Michel Dänzer7ed220d2009-08-13 11:10:51 +0200538int rs600_irq_set(struct radeon_device *rdev)
539{
540 uint32_t tmp = 0;
541 uint32_t mode_int = 0;
Alex Deucherdcfdd402009-12-04 15:04:19 -0500542 u32 hpd1 = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL) &
543 ~S_007D08_DC_HOT_PLUG_DETECT1_INT_EN(1);
544 u32 hpd2 = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL) &
545 ~S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1);
Michel Dänzer7ed220d2009-08-13 11:10:51 +0200546
Jerome Glisse003e69f2010-01-07 15:39:14 +0100547 if (!rdev->irq.installed) {
Joe Perchesfce7d612010-10-30 21:08:30 +0000548 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
Jerome Glisse003e69f2010-01-07 15:39:14 +0100549 WREG32(R_000040_GEN_INT_CNTL, 0);
550 return -EINVAL;
551 }
Alex Deucher1b370782011-11-17 20:13:28 -0500552 if (rdev->irq.sw_int[RADEON_RING_TYPE_GFX_INDEX]) {
Jerome Glissec010f802009-09-30 22:09:06 +0200553 tmp |= S_000040_SW_INT_EN(1);
Michel Dänzer7ed220d2009-08-13 11:10:51 +0200554 }
Alex Deucher2031f772010-04-22 12:52:11 -0400555 if (rdev->irq.gui_idle) {
556 tmp |= S_000040_GUI_IDLE(1);
557 }
Alex Deucher6f34be52010-11-21 10:59:01 -0500558 if (rdev->irq.crtc_vblank_int[0] ||
559 rdev->irq.pflip[0]) {
Jerome Glissec010f802009-09-30 22:09:06 +0200560 mode_int |= S_006540_D1MODE_VBLANK_INT_MASK(1);
Michel Dänzer7ed220d2009-08-13 11:10:51 +0200561 }
Alex Deucher6f34be52010-11-21 10:59:01 -0500562 if (rdev->irq.crtc_vblank_int[1] ||
563 rdev->irq.pflip[1]) {
Jerome Glissec010f802009-09-30 22:09:06 +0200564 mode_int |= S_006540_D2MODE_VBLANK_INT_MASK(1);
Michel Dänzer7ed220d2009-08-13 11:10:51 +0200565 }
Alex Deucherdcfdd402009-12-04 15:04:19 -0500566 if (rdev->irq.hpd[0]) {
567 hpd1 |= S_007D08_DC_HOT_PLUG_DETECT1_INT_EN(1);
568 }
569 if (rdev->irq.hpd[1]) {
570 hpd2 |= S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1);
571 }
Jerome Glissec010f802009-09-30 22:09:06 +0200572 WREG32(R_000040_GEN_INT_CNTL, tmp);
573 WREG32(R_006540_DxMODE_INT_MASK, mode_int);
Alex Deucherdcfdd402009-12-04 15:04:19 -0500574 WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1);
575 WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
Michel Dänzer7ed220d2009-08-13 11:10:51 +0200576 return 0;
577}
578
Alex Deucher6f34be52010-11-21 10:59:01 -0500579static inline u32 rs600_irq_ack(struct radeon_device *rdev)
Michel Dänzer7ed220d2009-08-13 11:10:51 +0200580{
Jerome Glisse01ceae82009-10-07 11:08:22 +0200581 uint32_t irqs = RREG32(R_000044_GEN_INT_STATUS);
Alex Deucher2031f772010-04-22 12:52:11 -0400582 uint32_t irq_mask = S_000044_SW_INT(1);
Alex Deucherdcfdd402009-12-04 15:04:19 -0500583 u32 tmp;
Michel Dänzer7ed220d2009-08-13 11:10:51 +0200584
Alex Deucher2031f772010-04-22 12:52:11 -0400585 /* the interrupt works, but the status bit is permanently asserted */
586 if (rdev->irq.gui_idle && radeon_gui_idle(rdev)) {
587 if (!rdev->irq.gui_idle_acked)
588 irq_mask |= S_000044_GUI_IDLE_STAT(1);
589 }
590
Jerome Glisse01ceae82009-10-07 11:08:22 +0200591 if (G_000044_DISPLAY_INT_STAT(irqs)) {
Alex Deucher6f34be52010-11-21 10:59:01 -0500592 rdev->irq.stat_regs.r500.disp_int = RREG32(R_007EDC_DISP_INTERRUPT_STATUS);
593 if (G_007EDC_LB_D1_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
Jerome Glissec010f802009-09-30 22:09:06 +0200594 WREG32(R_006534_D1MODE_VBLANK_STATUS,
595 S_006534_D1MODE_VBLANK_ACK(1));
Michel Dänzer7ed220d2009-08-13 11:10:51 +0200596 }
Alex Deucher6f34be52010-11-21 10:59:01 -0500597 if (G_007EDC_LB_D2_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
Jerome Glissec010f802009-09-30 22:09:06 +0200598 WREG32(R_006D34_D2MODE_VBLANK_STATUS,
599 S_006D34_D2MODE_VBLANK_ACK(1));
Michel Dänzer7ed220d2009-08-13 11:10:51 +0200600 }
Alex Deucher6f34be52010-11-21 10:59:01 -0500601 if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
Alex Deucherdcfdd402009-12-04 15:04:19 -0500602 tmp = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL);
603 tmp |= S_007D08_DC_HOT_PLUG_DETECT1_INT_ACK(1);
604 WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
605 }
Alex Deucher6f34be52010-11-21 10:59:01 -0500606 if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
Alex Deucherdcfdd402009-12-04 15:04:19 -0500607 tmp = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL);
608 tmp |= S_007D18_DC_HOT_PLUG_DETECT2_INT_ACK(1);
609 WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
610 }
Michel Dänzer7ed220d2009-08-13 11:10:51 +0200611 } else {
Alex Deucher6f34be52010-11-21 10:59:01 -0500612 rdev->irq.stat_regs.r500.disp_int = 0;
Michel Dänzer7ed220d2009-08-13 11:10:51 +0200613 }
614
615 if (irqs) {
Jerome Glisse01ceae82009-10-07 11:08:22 +0200616 WREG32(R_000044_GEN_INT_STATUS, irqs);
Michel Dänzer7ed220d2009-08-13 11:10:51 +0200617 }
618 return irqs & irq_mask;
619}
620
Jerome Glisseac447df2009-09-30 22:18:43 +0200621void rs600_irq_disable(struct radeon_device *rdev)
622{
Jerome Glisseac447df2009-09-30 22:18:43 +0200623 WREG32(R_000040_GEN_INT_CNTL, 0);
624 WREG32(R_006540_DxMODE_INT_MASK, 0);
625 /* Wait and acknowledge irq */
626 mdelay(1);
Alex Deucher6f34be52010-11-21 10:59:01 -0500627 rs600_irq_ack(rdev);
Jerome Glisseac447df2009-09-30 22:18:43 +0200628}
629
Michel Dänzer7ed220d2009-08-13 11:10:51 +0200630int rs600_irq_process(struct radeon_device *rdev)
631{
Alex Deucher6f34be52010-11-21 10:59:01 -0500632 u32 status, msi_rearm;
Alex Deucherd4877cf2009-12-04 16:56:37 -0500633 bool queue_hotplug = false;
Michel Dänzer7ed220d2009-08-13 11:10:51 +0200634
Alex Deucher2031f772010-04-22 12:52:11 -0400635 /* reset gui idle ack. the status bit is broken */
636 rdev->irq.gui_idle_acked = false;
637
Alex Deucher6f34be52010-11-21 10:59:01 -0500638 status = rs600_irq_ack(rdev);
639 if (!status && !rdev->irq.stat_regs.r500.disp_int) {
Michel Dänzer7ed220d2009-08-13 11:10:51 +0200640 return IRQ_NONE;
641 }
Alex Deucher6f34be52010-11-21 10:59:01 -0500642 while (status || rdev->irq.stat_regs.r500.disp_int) {
Michel Dänzer7ed220d2009-08-13 11:10:51 +0200643 /* SW interrupt */
Alex Deucher6f34be52010-11-21 10:59:01 -0500644 if (G_000044_SW_INT(status)) {
Alex Deucher74652802011-08-25 13:39:48 -0400645 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
Alex Deucher6f34be52010-11-21 10:59:01 -0500646 }
Alex Deucher2031f772010-04-22 12:52:11 -0400647 /* GUI idle */
648 if (G_000040_GUI_IDLE(status)) {
649 rdev->irq.gui_idle_acked = true;
650 rdev->pm.gui_idle = true;
651 wake_up(&rdev->irq.idle_queue);
652 }
Michel Dänzer7ed220d2009-08-13 11:10:51 +0200653 /* Vertical blank interrupts */
Alex Deucher6f34be52010-11-21 10:59:01 -0500654 if (G_007EDC_LB_D1_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
Alex Deucher6f34be52010-11-21 10:59:01 -0500655 if (rdev->irq.crtc_vblank_int[0]) {
656 drm_handle_vblank(rdev->ddev, 0);
657 rdev->pm.vblank_sync = true;
658 wake_up(&rdev->irq.vblank_queue);
659 }
Mario Kleiner3e4ea742010-11-21 10:59:02 -0500660 if (rdev->irq.pflip[0])
661 radeon_crtc_handle_flip(rdev, 0);
Rafał Miłeckic913e232009-12-22 23:02:16 +0100662 }
Alex Deucher6f34be52010-11-21 10:59:01 -0500663 if (G_007EDC_LB_D2_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
Alex Deucher6f34be52010-11-21 10:59:01 -0500664 if (rdev->irq.crtc_vblank_int[1]) {
665 drm_handle_vblank(rdev->ddev, 1);
666 rdev->pm.vblank_sync = true;
667 wake_up(&rdev->irq.vblank_queue);
668 }
Mario Kleiner3e4ea742010-11-21 10:59:02 -0500669 if (rdev->irq.pflip[1])
670 radeon_crtc_handle_flip(rdev, 1);
Rafał Miłeckic913e232009-12-22 23:02:16 +0100671 }
Alex Deucher6f34be52010-11-21 10:59:01 -0500672 if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
Alex Deucherd4877cf2009-12-04 16:56:37 -0500673 queue_hotplug = true;
674 DRM_DEBUG("HPD1\n");
Alex Deucherdcfdd402009-12-04 15:04:19 -0500675 }
Alex Deucher6f34be52010-11-21 10:59:01 -0500676 if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
Alex Deucherd4877cf2009-12-04 16:56:37 -0500677 queue_hotplug = true;
678 DRM_DEBUG("HPD2\n");
Alex Deucherdcfdd402009-12-04 15:04:19 -0500679 }
Alex Deucher6f34be52010-11-21 10:59:01 -0500680 status = rs600_irq_ack(rdev);
Michel Dänzer7ed220d2009-08-13 11:10:51 +0200681 }
Alex Deucher2031f772010-04-22 12:52:11 -0400682 /* reset gui idle ack. the status bit is broken */
683 rdev->irq.gui_idle_acked = false;
Alex Deucherd4877cf2009-12-04 16:56:37 -0500684 if (queue_hotplug)
Tejun Heo32c87fc2011-01-03 14:49:32 +0100685 schedule_work(&rdev->hotplug_work);
Alex Deucher3e5cb982009-10-16 12:21:24 -0400686 if (rdev->msi_enabled) {
687 switch (rdev->family) {
688 case CHIP_RS600:
689 case CHIP_RS690:
690 case CHIP_RS740:
691 msi_rearm = RREG32(RADEON_BUS_CNTL) & ~RS600_MSI_REARM;
692 WREG32(RADEON_BUS_CNTL, msi_rearm);
693 WREG32(RADEON_BUS_CNTL, msi_rearm | RS600_MSI_REARM);
694 break;
695 default:
696 msi_rearm = RREG32(RADEON_MSI_REARM_EN) & ~RV370_MSI_REARM_EN;
697 WREG32(RADEON_MSI_REARM_EN, msi_rearm);
698 WREG32(RADEON_MSI_REARM_EN, msi_rearm | RV370_MSI_REARM_EN);
699 break;
700 }
701 }
Michel Dänzer7ed220d2009-08-13 11:10:51 +0200702 return IRQ_HANDLED;
703}
704
705u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc)
706{
707 if (crtc == 0)
Jerome Glissec010f802009-09-30 22:09:06 +0200708 return RREG32(R_0060A4_D1CRTC_STATUS_FRAME_COUNT);
Michel Dänzer7ed220d2009-08-13 11:10:51 +0200709 else
Jerome Glissec010f802009-09-30 22:09:06 +0200710 return RREG32(R_0068A4_D2CRTC_STATUS_FRAME_COUNT);
Michel Dänzer7ed220d2009-08-13 11:10:51 +0200711}
712
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200713int rs600_mc_wait_for_idle(struct radeon_device *rdev)
714{
715 unsigned i;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200716
717 for (i = 0; i < rdev->usec_timeout; i++) {
Jerome Glissec010f802009-09-30 22:09:06 +0200718 if (G_000000_MC_IDLE(RREG32_MC(R_000000_MC_STATUS)))
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200719 return 0;
Jerome Glissec010f802009-09-30 22:09:06 +0200720 udelay(1);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200721 }
722 return -1;
723}
724
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200725void rs600_gpu_init(struct radeon_device *rdev)
726{
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200727 r420_pipes_init(rdev);
Jerome Glissec010f802009-09-30 22:09:06 +0200728 /* Wait for mc idle */
729 if (rs600_mc_wait_for_idle(rdev))
730 dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n");
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200731}
732
Jerome Glissed594e462010-02-17 21:54:29 +0000733void rs600_mc_init(struct radeon_device *rdev)
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200734{
Jerome Glissed594e462010-02-17 21:54:29 +0000735 u64 base;
736
Jordan Crouse01d73a62010-05-27 13:40:24 -0600737 rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
738 rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200739 rdev->mc.vram_is_ddr = true;
740 rdev->mc.vram_width = 128;
Alex Deucher722f2942009-12-03 16:18:19 -0500741 rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
742 rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
Jerome Glisse51e5fcd2010-02-19 14:33:54 +0000743 rdev->mc.visible_vram_size = rdev->mc.aper_size;
Jerome Glissed594e462010-02-17 21:54:29 +0000744 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
745 base = RREG32_MC(R_000004_MC_FB_LOCATION);
746 base = G_000004_MC_FB_START(base) << 16;
747 radeon_vram_location(rdev, &rdev->mc, base);
Alex Deucher8d369bb2010-07-15 10:51:10 -0400748 rdev->mc.gtt_base_align = 0;
Jerome Glissed594e462010-02-17 21:54:29 +0000749 radeon_gtt_location(rdev, &rdev->mc);
Alex Deucherf47299c2010-03-16 20:54:38 -0400750 radeon_update_bandwidth_info(rdev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200751}
752
Jerome Glissec93bb852009-07-13 21:04:08 +0200753void rs600_bandwidth_update(struct radeon_device *rdev)
754{
Alex Deucherf46c0122010-03-31 00:33:27 -0400755 struct drm_display_mode *mode0 = NULL;
756 struct drm_display_mode *mode1 = NULL;
757 u32 d1mode_priority_a_cnt, d2mode_priority_a_cnt;
758 /* FIXME: implement full support */
759
760 radeon_update_display_priority(rdev);
761
762 if (rdev->mode_info.crtcs[0]->base.enabled)
763 mode0 = &rdev->mode_info.crtcs[0]->base.mode;
764 if (rdev->mode_info.crtcs[1]->base.enabled)
765 mode1 = &rdev->mode_info.crtcs[1]->base.mode;
766
767 rs690_line_buffer_adjust(rdev, mode0, mode1);
768
769 if (rdev->disp_priority == 2) {
770 d1mode_priority_a_cnt = RREG32(R_006548_D1MODE_PRIORITY_A_CNT);
771 d2mode_priority_a_cnt = RREG32(R_006D48_D2MODE_PRIORITY_A_CNT);
772 d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1);
773 d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1);
774 WREG32(R_006548_D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt);
775 WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt);
776 WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt);
777 WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt);
778 }
Jerome Glissec93bb852009-07-13 21:04:08 +0200779}
780
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200781uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg)
782{
Jerome Glissec010f802009-09-30 22:09:06 +0200783 WREG32(R_000070_MC_IND_INDEX, S_000070_MC_IND_ADDR(reg) |
784 S_000070_MC_IND_CITF_ARB0(1));
785 return RREG32(R_000074_MC_IND_DATA);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200786}
787
788void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
789{
Jerome Glissec010f802009-09-30 22:09:06 +0200790 WREG32(R_000070_MC_IND_INDEX, S_000070_MC_IND_ADDR(reg) |
791 S_000070_MC_IND_CITF_ARB0(1) | S_000070_MC_IND_WR_EN(1));
792 WREG32(R_000074_MC_IND_DATA, v);
793}
794
795void rs600_debugfs(struct radeon_device *rdev)
796{
797 if (r100_debugfs_rbbm_init(rdev))
798 DRM_ERROR("Failed to register debugfs file for RBBM !\n");
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200799}
Dave Airlie3f7dc91a2009-08-27 11:10:15 +1000800
Jerome Glisse3bc68532009-10-01 09:39:24 +0200801void rs600_set_safe_registers(struct radeon_device *rdev)
Dave Airlie3f7dc91a2009-08-27 11:10:15 +1000802{
803 rdev->config.r300.reg_safe_bm = rs600_reg_safe_bm;
804 rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(rs600_reg_safe_bm);
Jerome Glisse3bc68532009-10-01 09:39:24 +0200805}
806
Jerome Glissec010f802009-09-30 22:09:06 +0200807static void rs600_mc_program(struct radeon_device *rdev)
808{
809 struct rv515_mc_save save;
810
811 /* Stops all mc clients */
812 rv515_mc_stop(rdev, &save);
813
814 /* Wait for mc idle */
815 if (rs600_mc_wait_for_idle(rdev))
816 dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n");
817
818 /* FIXME: What does AGP means for such chipset ? */
819 WREG32_MC(R_000005_MC_AGP_LOCATION, 0x0FFFFFFF);
820 WREG32_MC(R_000006_AGP_BASE, 0);
821 WREG32_MC(R_000007_AGP_BASE_2, 0);
822 /* Program MC */
823 WREG32_MC(R_000004_MC_FB_LOCATION,
824 S_000004_MC_FB_START(rdev->mc.vram_start >> 16) |
825 S_000004_MC_FB_TOP(rdev->mc.vram_end >> 16));
826 WREG32(R_000134_HDP_FB_LOCATION,
827 S_000134_HDP_FB_START(rdev->mc.vram_start >> 16));
828
829 rv515_mc_resume(rdev, &save);
830}
831
832static int rs600_startup(struct radeon_device *rdev)
833{
834 int r;
835
836 rs600_mc_program(rdev);
837 /* Resume clock */
838 rv515_clock_startup(rdev);
839 /* Initialize GPU configuration (# pipes, ...) */
840 rs600_gpu_init(rdev);
841 /* Initialize GART (initialize after TTM so we can allocate
842 * memory through TTM but finalize after TTM) */
843 r = rs600_gart_enable(rdev);
844 if (r)
845 return r;
Alex Deucher724c80e2010-08-27 18:25:25 -0400846
847 /* allocate wb buffer */
848 r = radeon_wb_init(rdev);
849 if (r)
850 return r;
851
Jerome Glisse30eb77f2011-11-20 20:45:34 +0000852 r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
853 if (r) {
854 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
855 return r;
856 }
857
Jerome Glissec010f802009-09-30 22:09:06 +0200858 /* Enable IRQ */
Jerome Glissec010f802009-09-30 22:09:06 +0200859 rs600_irq_set(rdev);
Jerome Glissecafe6602010-01-07 12:39:21 +0100860 rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
Jerome Glissec010f802009-09-30 22:09:06 +0200861 /* 1M ring buffer */
862 r = r100_cp_init(rdev, 1024 * 1024);
863 if (r) {
Paul Bolleec4f2ac2011-01-28 23:32:04 +0100864 dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
Jerome Glissec010f802009-09-30 22:09:06 +0200865 return r;
866 }
Jerome Glissec010f802009-09-30 22:09:06 +0200867 r = r100_ib_init(rdev);
868 if (r) {
Paul Bolleec4f2ac2011-01-28 23:32:04 +0100869 dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
Jerome Glissec010f802009-09-30 22:09:06 +0200870 return r;
871 }
Rafał Miłeckife50ac72010-06-19 12:24:57 +0200872
873 r = r600_audio_init(rdev);
874 if (r) {
875 dev_err(rdev->dev, "failed initializing audio\n");
876 return r;
877 }
878
Jerome Glissec010f802009-09-30 22:09:06 +0200879 return 0;
880}
881
882int rs600_resume(struct radeon_device *rdev)
883{
884 /* Make sur GART are not working */
885 rs600_gart_disable(rdev);
886 /* Resume clock before doing reset */
887 rv515_clock_startup(rdev);
888 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
Jerome Glissea2d07b72010-03-09 14:45:11 +0000889 if (radeon_asic_reset(rdev)) {
Jerome Glissec010f802009-09-30 22:09:06 +0200890 dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
891 RREG32(R_000E40_RBBM_STATUS),
892 RREG32(R_0007C0_CP_STAT));
893 }
894 /* post */
895 atom_asic_init(rdev->mode_info.atom_context);
896 /* Resume clock after posting */
897 rv515_clock_startup(rdev);
Dave Airlie550e2d92009-12-09 14:15:38 +1000898 /* Initialize surface registers */
899 radeon_surface_init(rdev);
Jerome Glissec010f802009-09-30 22:09:06 +0200900 return rs600_startup(rdev);
901}
902
903int rs600_suspend(struct radeon_device *rdev)
904{
Rafał Miłeckife50ac72010-06-19 12:24:57 +0200905 r600_audio_fini(rdev);
Jerome Glissec010f802009-09-30 22:09:06 +0200906 r100_cp_disable(rdev);
Alex Deucher724c80e2010-08-27 18:25:25 -0400907 radeon_wb_disable(rdev);
Jerome Glisseac447df2009-09-30 22:18:43 +0200908 rs600_irq_disable(rdev);
Jerome Glissec010f802009-09-30 22:09:06 +0200909 rs600_gart_disable(rdev);
910 return 0;
911}
912
913void rs600_fini(struct radeon_device *rdev)
914{
Rafał Miłeckife50ac72010-06-19 12:24:57 +0200915 r600_audio_fini(rdev);
Jerome Glissec010f802009-09-30 22:09:06 +0200916 r100_cp_fini(rdev);
Alex Deucher724c80e2010-08-27 18:25:25 -0400917 radeon_wb_fini(rdev);
Jerome Glissec010f802009-09-30 22:09:06 +0200918 r100_ib_fini(rdev);
919 radeon_gem_fini(rdev);
920 rs600_gart_fini(rdev);
921 radeon_irq_kms_fini(rdev);
922 radeon_fence_driver_fini(rdev);
Jerome Glisse4c788672009-11-20 14:29:23 +0100923 radeon_bo_fini(rdev);
Jerome Glissec010f802009-09-30 22:09:06 +0200924 radeon_atombios_fini(rdev);
925 kfree(rdev->bios);
926 rdev->bios = NULL;
927}
928
Jerome Glisse3bc68532009-10-01 09:39:24 +0200929int rs600_init(struct radeon_device *rdev)
930{
Jerome Glissec010f802009-09-30 22:09:06 +0200931 int r;
932
Jerome Glissec010f802009-09-30 22:09:06 +0200933 /* Disable VGA */
934 rv515_vga_render_disable(rdev);
935 /* Initialize scratch registers */
936 radeon_scratch_init(rdev);
937 /* Initialize surface registers */
938 radeon_surface_init(rdev);
Dave Airlie4c712e62010-07-15 12:13:50 +1000939 /* restore some register to sane defaults */
940 r100_restore_sanity(rdev);
Jerome Glissec010f802009-09-30 22:09:06 +0200941 /* BIOS */
942 if (!radeon_get_bios(rdev)) {
943 if (ASIC_IS_AVIVO(rdev))
944 return -EINVAL;
945 }
946 if (rdev->is_atom_bios) {
947 r = radeon_atombios_init(rdev);
948 if (r)
949 return r;
950 } else {
951 dev_err(rdev->dev, "Expecting atombios for RS600 GPU\n");
952 return -EINVAL;
953 }
954 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
Jerome Glissea2d07b72010-03-09 14:45:11 +0000955 if (radeon_asic_reset(rdev)) {
Jerome Glissec010f802009-09-30 22:09:06 +0200956 dev_warn(rdev->dev,
957 "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
958 RREG32(R_000E40_RBBM_STATUS),
959 RREG32(R_0007C0_CP_STAT));
960 }
961 /* check if cards are posted or not */
Dave Airlie72542d72009-12-01 14:06:31 +1000962 if (radeon_boot_test_post_card(rdev) == false)
963 return -EINVAL;
964
Jerome Glissec010f802009-09-30 22:09:06 +0200965 /* Initialize clocks */
966 radeon_get_clock_info(rdev->ddev);
Jerome Glissed594e462010-02-17 21:54:29 +0000967 /* initialize memory controller */
968 rs600_mc_init(rdev);
Jerome Glissec010f802009-09-30 22:09:06 +0200969 rs600_debugfs(rdev);
970 /* Fence driver */
Jerome Glisse30eb77f2011-11-20 20:45:34 +0000971 r = radeon_fence_driver_init(rdev);
Jerome Glissec010f802009-09-30 22:09:06 +0200972 if (r)
973 return r;
974 r = radeon_irq_kms_init(rdev);
975 if (r)
976 return r;
977 /* Memory manager */
Jerome Glisse4c788672009-11-20 14:29:23 +0100978 r = radeon_bo_init(rdev);
Jerome Glissec010f802009-09-30 22:09:06 +0200979 if (r)
980 return r;
981 r = rs600_gart_init(rdev);
982 if (r)
983 return r;
984 rs600_set_safe_registers(rdev);
985 rdev->accel_working = true;
986 r = rs600_startup(rdev);
987 if (r) {
988 /* Somethings want wront with the accel init stop accel */
989 dev_err(rdev->dev, "Disabling GPU acceleration\n");
Jerome Glissec010f802009-09-30 22:09:06 +0200990 r100_cp_fini(rdev);
Alex Deucher724c80e2010-08-27 18:25:25 -0400991 radeon_wb_fini(rdev);
Jerome Glissec010f802009-09-30 22:09:06 +0200992 r100_ib_fini(rdev);
993 rs600_gart_fini(rdev);
994 radeon_irq_kms_fini(rdev);
995 rdev->accel_working = false;
996 }
Dave Airlie3f7dc91a2009-08-27 11:10:15 +1000997 return 0;
998}