blob: 045955d5f3372ebd8a705541e43cf78d423e0b66 [file] [log] [blame]
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001/*
2 * Copyright 2010 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
24#include <linux/firmware.h>
25#include <linux/platform_device.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090026#include <linux/slab.h>
David Howells760285e2012-10-02 18:01:07 +010027#include <drm/drmP.h>
Alex Deucherbcc1c2a2010-01-12 17:54:34 -050028#include "radeon.h"
Daniel Vettere6990372010-03-11 21:19:17 +000029#include "radeon_asic.h"
David Howells760285e2012-10-02 18:01:07 +010030#include <drm/radeon_drm.h>
Alex Deucher0fcdb612010-03-24 13:20:41 -040031#include "evergreend.h"
Alex Deucherbcc1c2a2010-01-12 17:54:34 -050032#include "atom.h"
33#include "avivod.h"
34#include "evergreen_reg.h"
Alex Deucher2281a372010-10-21 13:31:38 -040035#include "evergreen_blit_shaders.h"
Alex Deucherbcc1c2a2010-01-12 17:54:34 -050036
Alex Deucherfe251e22010-03-24 13:36:43 -040037#define EVERGREEN_PFP_UCODE_SIZE 1120
38#define EVERGREEN_PM4_UCODE_SIZE 1376
39
Alex Deucher4a159032012-08-15 17:13:53 -040040static const u32 crtc_offsets[6] =
41{
42 EVERGREEN_CRTC0_REGISTER_OFFSET,
43 EVERGREEN_CRTC1_REGISTER_OFFSET,
44 EVERGREEN_CRTC2_REGISTER_OFFSET,
45 EVERGREEN_CRTC3_REGISTER_OFFSET,
46 EVERGREEN_CRTC4_REGISTER_OFFSET,
47 EVERGREEN_CRTC5_REGISTER_OFFSET
48};
49
Alex Deucherbcc1c2a2010-01-12 17:54:34 -050050static void evergreen_gpu_init(struct radeon_device *rdev);
51void evergreen_fini(struct radeon_device *rdev);
Ilija Hadzicb07759b2011-09-20 10:22:58 -040052void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
Alex Deucher1b370782011-11-17 20:13:28 -050053extern void cayman_cp_int_cntl_setup(struct radeon_device *rdev,
54 int ring, u32 cp_int_cntl);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -050055
Jerome Glisse285484e2011-12-16 17:03:42 -050056void evergreen_tiling_fields(unsigned tiling_flags, unsigned *bankw,
57 unsigned *bankh, unsigned *mtaspect,
58 unsigned *tile_split)
59{
60 *bankw = (tiling_flags >> RADEON_TILING_EG_BANKW_SHIFT) & RADEON_TILING_EG_BANKW_MASK;
61 *bankh = (tiling_flags >> RADEON_TILING_EG_BANKH_SHIFT) & RADEON_TILING_EG_BANKH_MASK;
62 *mtaspect = (tiling_flags >> RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK;
63 *tile_split = (tiling_flags >> RADEON_TILING_EG_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_TILE_SPLIT_MASK;
64 switch (*bankw) {
65 default:
66 case 1: *bankw = EVERGREEN_ADDR_SURF_BANK_WIDTH_1; break;
67 case 2: *bankw = EVERGREEN_ADDR_SURF_BANK_WIDTH_2; break;
68 case 4: *bankw = EVERGREEN_ADDR_SURF_BANK_WIDTH_4; break;
69 case 8: *bankw = EVERGREEN_ADDR_SURF_BANK_WIDTH_8; break;
70 }
71 switch (*bankh) {
72 default:
73 case 1: *bankh = EVERGREEN_ADDR_SURF_BANK_HEIGHT_1; break;
74 case 2: *bankh = EVERGREEN_ADDR_SURF_BANK_HEIGHT_2; break;
75 case 4: *bankh = EVERGREEN_ADDR_SURF_BANK_HEIGHT_4; break;
76 case 8: *bankh = EVERGREEN_ADDR_SURF_BANK_HEIGHT_8; break;
77 }
78 switch (*mtaspect) {
79 default:
80 case 1: *mtaspect = EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_1; break;
81 case 2: *mtaspect = EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_2; break;
82 case 4: *mtaspect = EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_4; break;
83 case 8: *mtaspect = EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_8; break;
84 }
85}
86
Alex Deucherd054ac12011-09-01 17:46:15 +000087void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev)
88{
89 u16 ctl, v;
Jiang Liu32195ae2012-07-24 17:20:30 +080090 int err;
Alex Deucherd054ac12011-09-01 17:46:15 +000091
Jiang Liu32195ae2012-07-24 17:20:30 +080092 err = pcie_capability_read_word(rdev->pdev, PCI_EXP_DEVCTL, &ctl);
Alex Deucherd054ac12011-09-01 17:46:15 +000093 if (err)
94 return;
95
96 v = (ctl & PCI_EXP_DEVCTL_READRQ) >> 12;
97
98 /* if bios or OS sets MAX_READ_REQUEST_SIZE to an invalid value, fix it
99 * to avoid hangs or perfomance issues
100 */
101 if ((v == 0) || (v == 6) || (v == 7)) {
102 ctl &= ~PCI_EXP_DEVCTL_READRQ;
103 ctl |= (2 << 12);
Jiang Liu32195ae2012-07-24 17:20:30 +0800104 pcie_capability_write_word(rdev->pdev, PCI_EXP_DEVCTL, ctl);
Alex Deucherd054ac12011-09-01 17:46:15 +0000105 }
106}
107
Alex Deucher377edc82012-07-17 14:02:42 -0400108/**
109 * dce4_wait_for_vblank - vblank wait asic callback.
110 *
111 * @rdev: radeon_device pointer
112 * @crtc: crtc to wait for vblank on
113 *
114 * Wait for vblank on the requested crtc (evergreen+).
115 */
Alex Deucher3ae19b72012-02-23 17:53:37 -0500116void dce4_wait_for_vblank(struct radeon_device *rdev, int crtc)
117{
Alex Deucher3ae19b72012-02-23 17:53:37 -0500118 int i;
119
Alex Deucher4a159032012-08-15 17:13:53 -0400120 if (crtc >= rdev->num_crtc)
121 return;
122
123 if (RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[crtc]) & EVERGREEN_CRTC_MASTER_EN) {
Alex Deucher3ae19b72012-02-23 17:53:37 -0500124 for (i = 0; i < rdev->usec_timeout; i++) {
Alex Deucher4a159032012-08-15 17:13:53 -0400125 if (!(RREG32(EVERGREEN_CRTC_STATUS + crtc_offsets[crtc]) & EVERGREEN_CRTC_V_BLANK))
Alex Deucher3ae19b72012-02-23 17:53:37 -0500126 break;
127 udelay(1);
128 }
129 for (i = 0; i < rdev->usec_timeout; i++) {
Alex Deucher4a159032012-08-15 17:13:53 -0400130 if (RREG32(EVERGREEN_CRTC_STATUS + crtc_offsets[crtc]) & EVERGREEN_CRTC_V_BLANK)
Alex Deucher3ae19b72012-02-23 17:53:37 -0500131 break;
132 udelay(1);
133 }
134 }
135}
136
Alex Deucher377edc82012-07-17 14:02:42 -0400137/**
138 * radeon_irq_kms_pflip_irq_get - pre-pageflip callback.
139 *
140 * @rdev: radeon_device pointer
141 * @crtc: crtc to prepare for pageflip on
142 *
143 * Pre-pageflip callback (evergreen+).
144 * Enables the pageflip irq (vblank irq).
145 */
Alex Deucher6f34be52010-11-21 10:59:01 -0500146void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc)
147{
Alex Deucher6f34be52010-11-21 10:59:01 -0500148 /* enable the pflip int */
149 radeon_irq_kms_pflip_irq_get(rdev, crtc);
150}
151
Alex Deucher377edc82012-07-17 14:02:42 -0400152/**
153 * evergreen_post_page_flip - pos-pageflip callback.
154 *
155 * @rdev: radeon_device pointer
156 * @crtc: crtc to cleanup pageflip on
157 *
158 * Post-pageflip callback (evergreen+).
159 * Disables the pageflip irq (vblank irq).
160 */
Alex Deucher6f34be52010-11-21 10:59:01 -0500161void evergreen_post_page_flip(struct radeon_device *rdev, int crtc)
162{
163 /* disable the pflip int */
164 radeon_irq_kms_pflip_irq_put(rdev, crtc);
165}
166
Alex Deucher377edc82012-07-17 14:02:42 -0400167/**
168 * evergreen_page_flip - pageflip callback.
169 *
170 * @rdev: radeon_device pointer
171 * @crtc_id: crtc to cleanup pageflip on
172 * @crtc_base: new address of the crtc (GPU MC address)
173 *
174 * Does the actual pageflip (evergreen+).
175 * During vblank we take the crtc lock and wait for the update_pending
176 * bit to go high, when it does, we release the lock, and allow the
177 * double buffered update to take place.
178 * Returns the current update pending status.
179 */
Alex Deucher6f34be52010-11-21 10:59:01 -0500180u32 evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
181{
182 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
183 u32 tmp = RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset);
Alex Deucherf6496472011-11-28 14:49:26 -0500184 int i;
Alex Deucher6f34be52010-11-21 10:59:01 -0500185
186 /* Lock the graphics update lock */
187 tmp |= EVERGREEN_GRPH_UPDATE_LOCK;
188 WREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
189
190 /* update the scanout addresses */
191 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
192 upper_32_bits(crtc_base));
193 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
194 (u32)crtc_base);
195
196 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
197 upper_32_bits(crtc_base));
198 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
199 (u32)crtc_base);
200
201 /* Wait for update_pending to go high. */
Alex Deucherf6496472011-11-28 14:49:26 -0500202 for (i = 0; i < rdev->usec_timeout; i++) {
203 if (RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING)
204 break;
205 udelay(1);
206 }
Alex Deucher6f34be52010-11-21 10:59:01 -0500207 DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
208
209 /* Unlock the lock, so double-buffering can take place inside vblank */
210 tmp &= ~EVERGREEN_GRPH_UPDATE_LOCK;
211 WREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
212
213 /* Return current update_pending status: */
214 return RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING;
215}
216
Alex Deucher21a81222010-07-02 12:58:16 -0400217/* get temperature in millidegrees */
Alex Deucher20d391d2011-02-01 16:12:34 -0500218int evergreen_get_temp(struct radeon_device *rdev)
Alex Deucher21a81222010-07-02 12:58:16 -0400219{
Alex Deucher1c88d742011-06-14 19:15:53 +0000220 u32 temp, toffset;
221 int actual_temp = 0;
Alex Deucher21a81222010-07-02 12:58:16 -0400222
Alex Deucher67b3f822011-05-25 18:45:37 -0400223 if (rdev->family == CHIP_JUNIPER) {
224 toffset = (RREG32(CG_THERMAL_CTRL) & TOFFSET_MASK) >>
225 TOFFSET_SHIFT;
226 temp = (RREG32(CG_TS0_STATUS) & TS0_ADC_DOUT_MASK) >>
227 TS0_ADC_DOUT_SHIFT;
Alex Deucher21a81222010-07-02 12:58:16 -0400228
Alex Deucher67b3f822011-05-25 18:45:37 -0400229 if (toffset & 0x100)
230 actual_temp = temp / 2 - (0x200 - toffset);
231 else
232 actual_temp = temp / 2 + toffset;
233
234 actual_temp = actual_temp * 1000;
235
236 } else {
237 temp = (RREG32(CG_MULT_THERMAL_STATUS) & ASIC_T_MASK) >>
238 ASIC_T_SHIFT;
239
240 if (temp & 0x400)
241 actual_temp = -256;
242 else if (temp & 0x200)
243 actual_temp = 255;
244 else if (temp & 0x100) {
245 actual_temp = temp & 0x1ff;
246 actual_temp |= ~0x1ff;
247 } else
248 actual_temp = temp & 0xff;
249
250 actual_temp = (actual_temp * 1000) / 2;
251 }
252
253 return actual_temp;
Alex Deucher21a81222010-07-02 12:58:16 -0400254}
255
Alex Deucher20d391d2011-02-01 16:12:34 -0500256int sumo_get_temp(struct radeon_device *rdev)
Alex Deuchere33df252010-11-22 17:56:32 -0500257{
258 u32 temp = RREG32(CG_THERMAL_STATUS) & 0xff;
Alex Deucher20d391d2011-02-01 16:12:34 -0500259 int actual_temp = temp - 49;
Alex Deuchere33df252010-11-22 17:56:32 -0500260
261 return actual_temp * 1000;
262}
263
Alex Deucher377edc82012-07-17 14:02:42 -0400264/**
265 * sumo_pm_init_profile - Initialize power profiles callback.
266 *
267 * @rdev: radeon_device pointer
268 *
269 * Initialize the power states used in profile mode
270 * (sumo, trinity, SI).
271 * Used for profile mode only.
272 */
Alex Deuchera4c9e2e2011-11-04 10:09:41 -0400273void sumo_pm_init_profile(struct radeon_device *rdev)
274{
275 int idx;
276
277 /* default */
278 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
279 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
280 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
281 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
282
283 /* low,mid sh/mh */
284 if (rdev->flags & RADEON_IS_MOBILITY)
285 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
286 else
287 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
288
289 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = idx;
290 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = idx;
291 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
292 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
293
294 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = idx;
295 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = idx;
296 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
297 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
298
299 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = idx;
300 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = idx;
301 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
302 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
303
304 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = idx;
305 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = idx;
306 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
307 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
308
309 /* high sh/mh */
310 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
311 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = idx;
312 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = idx;
313 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
314 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx =
315 rdev->pm.power_state[idx].num_clock_modes - 1;
316
317 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = idx;
318 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = idx;
319 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
320 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx =
321 rdev->pm.power_state[idx].num_clock_modes - 1;
322}
323
Alex Deucher377edc82012-07-17 14:02:42 -0400324/**
Alex Deucher27810fb2012-10-01 19:25:11 -0400325 * btc_pm_init_profile - Initialize power profiles callback.
326 *
327 * @rdev: radeon_device pointer
328 *
329 * Initialize the power states used in profile mode
330 * (BTC, cayman).
331 * Used for profile mode only.
332 */
333void btc_pm_init_profile(struct radeon_device *rdev)
334{
335 int idx;
336
337 /* default */
338 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
339 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
340 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
341 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
342 /* starting with BTC, there is one state that is used for both
343 * MH and SH. Difference is that we always use the high clock index for
344 * mclk.
345 */
346 if (rdev->flags & RADEON_IS_MOBILITY)
347 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
348 else
349 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
350 /* low sh */
351 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = idx;
352 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = idx;
353 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
354 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
355 /* mid sh */
356 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = idx;
357 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = idx;
358 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
359 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
360 /* high sh */
361 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = idx;
362 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = idx;
363 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
364 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
365 /* low mh */
366 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = idx;
367 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = idx;
368 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
369 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
370 /* mid mh */
371 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = idx;
372 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = idx;
373 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
374 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
375 /* high mh */
376 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = idx;
377 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = idx;
378 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
379 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
380}
381
382/**
Alex Deucher377edc82012-07-17 14:02:42 -0400383 * evergreen_pm_misc - set additional pm hw parameters callback.
384 *
385 * @rdev: radeon_device pointer
386 *
387 * Set non-clock parameters associated with a power state
388 * (voltage, etc.) (evergreen+).
389 */
Alex Deucher49e02b72010-04-23 17:57:27 -0400390void evergreen_pm_misc(struct radeon_device *rdev)
391{
Rafał Miłeckia081a9d2010-06-07 18:20:25 -0400392 int req_ps_idx = rdev->pm.requested_power_state_index;
393 int req_cm_idx = rdev->pm.requested_clock_mode_index;
394 struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx];
395 struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage;
Alex Deucher49e02b72010-04-23 17:57:27 -0400396
Alex Deucher2feea492011-04-12 14:49:24 -0400397 if (voltage->type == VOLTAGE_SW) {
Alex Deuchera377e182011-06-20 13:00:31 -0400398 /* 0xff01 is a flag rather then an actual voltage */
399 if (voltage->voltage == 0xff01)
400 return;
Alex Deucher2feea492011-04-12 14:49:24 -0400401 if (voltage->voltage && (voltage->voltage != rdev->pm.current_vddc)) {
Alex Deucher8a83ec52011-04-12 14:49:23 -0400402 radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC);
Alex Deucher4d601732010-06-07 18:15:18 -0400403 rdev->pm.current_vddc = voltage->voltage;
Alex Deucher2feea492011-04-12 14:49:24 -0400404 DRM_DEBUG("Setting: vddc: %d\n", voltage->voltage);
405 }
Alex Deuchera377e182011-06-20 13:00:31 -0400406 /* 0xff01 is a flag rather then an actual voltage */
407 if (voltage->vddci == 0xff01)
408 return;
Alex Deucher2feea492011-04-12 14:49:24 -0400409 if (voltage->vddci && (voltage->vddci != rdev->pm.current_vddci)) {
410 radeon_atom_set_voltage(rdev, voltage->vddci, SET_VOLTAGE_TYPE_ASIC_VDDCI);
411 rdev->pm.current_vddci = voltage->vddci;
412 DRM_DEBUG("Setting: vddci: %d\n", voltage->vddci);
Alex Deucher4d601732010-06-07 18:15:18 -0400413 }
414 }
Alex Deucher49e02b72010-04-23 17:57:27 -0400415}
416
Alex Deucher377edc82012-07-17 14:02:42 -0400417/**
418 * evergreen_pm_prepare - pre-power state change callback.
419 *
420 * @rdev: radeon_device pointer
421 *
422 * Prepare for a power state change (evergreen+).
423 */
Alex Deucher49e02b72010-04-23 17:57:27 -0400424void evergreen_pm_prepare(struct radeon_device *rdev)
425{
426 struct drm_device *ddev = rdev->ddev;
427 struct drm_crtc *crtc;
428 struct radeon_crtc *radeon_crtc;
429 u32 tmp;
430
431 /* disable any active CRTCs */
432 list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
433 radeon_crtc = to_radeon_crtc(crtc);
434 if (radeon_crtc->enabled) {
435 tmp = RREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset);
436 tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
437 WREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset, tmp);
438 }
439 }
440}
441
Alex Deucher377edc82012-07-17 14:02:42 -0400442/**
443 * evergreen_pm_finish - post-power state change callback.
444 *
445 * @rdev: radeon_device pointer
446 *
447 * Clean up after a power state change (evergreen+).
448 */
Alex Deucher49e02b72010-04-23 17:57:27 -0400449void evergreen_pm_finish(struct radeon_device *rdev)
450{
451 struct drm_device *ddev = rdev->ddev;
452 struct drm_crtc *crtc;
453 struct radeon_crtc *radeon_crtc;
454 u32 tmp;
455
456 /* enable any active CRTCs */
457 list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
458 radeon_crtc = to_radeon_crtc(crtc);
459 if (radeon_crtc->enabled) {
460 tmp = RREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset);
461 tmp &= ~EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
462 WREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset, tmp);
463 }
464 }
465}
466
Alex Deucher377edc82012-07-17 14:02:42 -0400467/**
468 * evergreen_hpd_sense - hpd sense callback.
469 *
470 * @rdev: radeon_device pointer
471 * @hpd: hpd (hotplug detect) pin
472 *
473 * Checks if a digital monitor is connected (evergreen+).
474 * Returns true if connected, false if not connected.
475 */
Alex Deucherbcc1c2a2010-01-12 17:54:34 -0500476bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
477{
478 bool connected = false;
Alex Deucher0ca2ab52010-02-26 13:57:45 -0500479
480 switch (hpd) {
481 case RADEON_HPD_1:
482 if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE)
483 connected = true;
484 break;
485 case RADEON_HPD_2:
486 if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE)
487 connected = true;
488 break;
489 case RADEON_HPD_3:
490 if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE)
491 connected = true;
492 break;
493 case RADEON_HPD_4:
494 if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE)
495 connected = true;
496 break;
497 case RADEON_HPD_5:
498 if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE)
499 connected = true;
500 break;
501 case RADEON_HPD_6:
502 if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)
503 connected = true;
504 break;
505 default:
506 break;
507 }
508
Alex Deucherbcc1c2a2010-01-12 17:54:34 -0500509 return connected;
510}
511
Alex Deucher377edc82012-07-17 14:02:42 -0400512/**
513 * evergreen_hpd_set_polarity - hpd set polarity callback.
514 *
515 * @rdev: radeon_device pointer
516 * @hpd: hpd (hotplug detect) pin
517 *
518 * Set the polarity of the hpd pin (evergreen+).
519 */
Alex Deucherbcc1c2a2010-01-12 17:54:34 -0500520void evergreen_hpd_set_polarity(struct radeon_device *rdev,
521 enum radeon_hpd_id hpd)
522{
Alex Deucher0ca2ab52010-02-26 13:57:45 -0500523 u32 tmp;
524 bool connected = evergreen_hpd_sense(rdev, hpd);
525
526 switch (hpd) {
527 case RADEON_HPD_1:
528 tmp = RREG32(DC_HPD1_INT_CONTROL);
529 if (connected)
530 tmp &= ~DC_HPDx_INT_POLARITY;
531 else
532 tmp |= DC_HPDx_INT_POLARITY;
533 WREG32(DC_HPD1_INT_CONTROL, tmp);
534 break;
535 case RADEON_HPD_2:
536 tmp = RREG32(DC_HPD2_INT_CONTROL);
537 if (connected)
538 tmp &= ~DC_HPDx_INT_POLARITY;
539 else
540 tmp |= DC_HPDx_INT_POLARITY;
541 WREG32(DC_HPD2_INT_CONTROL, tmp);
542 break;
543 case RADEON_HPD_3:
544 tmp = RREG32(DC_HPD3_INT_CONTROL);
545 if (connected)
546 tmp &= ~DC_HPDx_INT_POLARITY;
547 else
548 tmp |= DC_HPDx_INT_POLARITY;
549 WREG32(DC_HPD3_INT_CONTROL, tmp);
550 break;
551 case RADEON_HPD_4:
552 tmp = RREG32(DC_HPD4_INT_CONTROL);
553 if (connected)
554 tmp &= ~DC_HPDx_INT_POLARITY;
555 else
556 tmp |= DC_HPDx_INT_POLARITY;
557 WREG32(DC_HPD4_INT_CONTROL, tmp);
558 break;
559 case RADEON_HPD_5:
560 tmp = RREG32(DC_HPD5_INT_CONTROL);
561 if (connected)
562 tmp &= ~DC_HPDx_INT_POLARITY;
563 else
564 tmp |= DC_HPDx_INT_POLARITY;
565 WREG32(DC_HPD5_INT_CONTROL, tmp);
566 break;
567 case RADEON_HPD_6:
568 tmp = RREG32(DC_HPD6_INT_CONTROL);
569 if (connected)
570 tmp &= ~DC_HPDx_INT_POLARITY;
571 else
572 tmp |= DC_HPDx_INT_POLARITY;
573 WREG32(DC_HPD6_INT_CONTROL, tmp);
574 break;
575 default:
576 break;
577 }
Alex Deucherbcc1c2a2010-01-12 17:54:34 -0500578}
579
Alex Deucher377edc82012-07-17 14:02:42 -0400580/**
581 * evergreen_hpd_init - hpd setup callback.
582 *
583 * @rdev: radeon_device pointer
584 *
585 * Setup the hpd pins used by the card (evergreen+).
586 * Enable the pin, set the polarity, and enable the hpd interrupts.
587 */
Alex Deucherbcc1c2a2010-01-12 17:54:34 -0500588void evergreen_hpd_init(struct radeon_device *rdev)
589{
Alex Deucher0ca2ab52010-02-26 13:57:45 -0500590 struct drm_device *dev = rdev->ddev;
591 struct drm_connector *connector;
Christian Koenigfb982572012-05-17 01:33:30 +0200592 unsigned enabled = 0;
Alex Deucher0ca2ab52010-02-26 13:57:45 -0500593 u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) |
594 DC_HPDx_RX_INT_TIMER(0xfa) | DC_HPDx_EN;
Alex Deucherbcc1c2a2010-01-12 17:54:34 -0500595
Alex Deucher0ca2ab52010-02-26 13:57:45 -0500596 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
597 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
598 switch (radeon_connector->hpd.hpd) {
599 case RADEON_HPD_1:
600 WREG32(DC_HPD1_CONTROL, tmp);
Alex Deucher0ca2ab52010-02-26 13:57:45 -0500601 break;
602 case RADEON_HPD_2:
603 WREG32(DC_HPD2_CONTROL, tmp);
Alex Deucher0ca2ab52010-02-26 13:57:45 -0500604 break;
605 case RADEON_HPD_3:
606 WREG32(DC_HPD3_CONTROL, tmp);
Alex Deucher0ca2ab52010-02-26 13:57:45 -0500607 break;
608 case RADEON_HPD_4:
609 WREG32(DC_HPD4_CONTROL, tmp);
Alex Deucher0ca2ab52010-02-26 13:57:45 -0500610 break;
611 case RADEON_HPD_5:
612 WREG32(DC_HPD5_CONTROL, tmp);
Alex Deucher0ca2ab52010-02-26 13:57:45 -0500613 break;
614 case RADEON_HPD_6:
615 WREG32(DC_HPD6_CONTROL, tmp);
Alex Deucher0ca2ab52010-02-26 13:57:45 -0500616 break;
617 default:
618 break;
619 }
Alex Deucher64912e92011-11-03 11:21:39 -0400620 radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
Christian Koenigfb982572012-05-17 01:33:30 +0200621 enabled |= 1 << radeon_connector->hpd.hpd;
Alex Deucher0ca2ab52010-02-26 13:57:45 -0500622 }
Christian Koenigfb982572012-05-17 01:33:30 +0200623 radeon_irq_kms_enable_hpd(rdev, enabled);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -0500624}
625
Alex Deucher377edc82012-07-17 14:02:42 -0400626/**
627 * evergreen_hpd_fini - hpd tear down callback.
628 *
629 * @rdev: radeon_device pointer
630 *
631 * Tear down the hpd pins used by the card (evergreen+).
632 * Disable the hpd interrupts.
633 */
Alex Deucherbcc1c2a2010-01-12 17:54:34 -0500634void evergreen_hpd_fini(struct radeon_device *rdev)
635{
Alex Deucher0ca2ab52010-02-26 13:57:45 -0500636 struct drm_device *dev = rdev->ddev;
637 struct drm_connector *connector;
Christian Koenigfb982572012-05-17 01:33:30 +0200638 unsigned disabled = 0;
Alex Deucher0ca2ab52010-02-26 13:57:45 -0500639
640 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
641 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
642 switch (radeon_connector->hpd.hpd) {
643 case RADEON_HPD_1:
644 WREG32(DC_HPD1_CONTROL, 0);
Alex Deucher0ca2ab52010-02-26 13:57:45 -0500645 break;
646 case RADEON_HPD_2:
647 WREG32(DC_HPD2_CONTROL, 0);
Alex Deucher0ca2ab52010-02-26 13:57:45 -0500648 break;
649 case RADEON_HPD_3:
650 WREG32(DC_HPD3_CONTROL, 0);
Alex Deucher0ca2ab52010-02-26 13:57:45 -0500651 break;
652 case RADEON_HPD_4:
653 WREG32(DC_HPD4_CONTROL, 0);
Alex Deucher0ca2ab52010-02-26 13:57:45 -0500654 break;
655 case RADEON_HPD_5:
656 WREG32(DC_HPD5_CONTROL, 0);
Alex Deucher0ca2ab52010-02-26 13:57:45 -0500657 break;
658 case RADEON_HPD_6:
659 WREG32(DC_HPD6_CONTROL, 0);
Alex Deucher0ca2ab52010-02-26 13:57:45 -0500660 break;
661 default:
662 break;
663 }
Christian Koenigfb982572012-05-17 01:33:30 +0200664 disabled |= 1 << radeon_connector->hpd.hpd;
Alex Deucher0ca2ab52010-02-26 13:57:45 -0500665 }
Christian Koenigfb982572012-05-17 01:33:30 +0200666 radeon_irq_kms_disable_hpd(rdev, disabled);
Alex Deucher0ca2ab52010-02-26 13:57:45 -0500667}
668
Alex Deucherf9d9c362010-10-22 02:51:05 -0400669/* watermark setup */
670
671static u32 evergreen_line_buffer_adjust(struct radeon_device *rdev,
672 struct radeon_crtc *radeon_crtc,
673 struct drm_display_mode *mode,
674 struct drm_display_mode *other_mode)
675{
Alex Deucher12dfc842011-04-14 19:07:34 -0400676 u32 tmp;
Alex Deucherf9d9c362010-10-22 02:51:05 -0400677 /*
678 * Line Buffer Setup
679 * There are 3 line buffers, each one shared by 2 display controllers.
680 * DC_LB_MEMORY_SPLIT controls how that line buffer is shared between
681 * the display controllers. The paritioning is done via one of four
682 * preset allocations specified in bits 2:0:
683 * first display controller
684 * 0 - first half of lb (3840 * 2)
685 * 1 - first 3/4 of lb (5760 * 2)
Alex Deucher12dfc842011-04-14 19:07:34 -0400686 * 2 - whole lb (7680 * 2), other crtc must be disabled
Alex Deucherf9d9c362010-10-22 02:51:05 -0400687 * 3 - first 1/4 of lb (1920 * 2)
688 * second display controller
689 * 4 - second half of lb (3840 * 2)
690 * 5 - second 3/4 of lb (5760 * 2)
Alex Deucher12dfc842011-04-14 19:07:34 -0400691 * 6 - whole lb (7680 * 2), other crtc must be disabled
Alex Deucherf9d9c362010-10-22 02:51:05 -0400692 * 7 - last 1/4 of lb (1920 * 2)
693 */
Alex Deucher12dfc842011-04-14 19:07:34 -0400694 /* this can get tricky if we have two large displays on a paired group
695 * of crtcs. Ideally for multiple large displays we'd assign them to
696 * non-linked crtcs for maximum line buffer allocation.
697 */
698 if (radeon_crtc->base.enabled && mode) {
699 if (other_mode)
Alex Deucherf9d9c362010-10-22 02:51:05 -0400700 tmp = 0; /* 1/2 */
Alex Deucher12dfc842011-04-14 19:07:34 -0400701 else
702 tmp = 2; /* whole */
703 } else
704 tmp = 0;
Alex Deucherf9d9c362010-10-22 02:51:05 -0400705
706 /* second controller of the pair uses second half of the lb */
707 if (radeon_crtc->crtc_id % 2)
708 tmp += 4;
709 WREG32(DC_LB_MEMORY_SPLIT + radeon_crtc->crtc_offset, tmp);
710
Alex Deucher12dfc842011-04-14 19:07:34 -0400711 if (radeon_crtc->base.enabled && mode) {
712 switch (tmp) {
713 case 0:
714 case 4:
715 default:
716 if (ASIC_IS_DCE5(rdev))
717 return 4096 * 2;
718 else
719 return 3840 * 2;
720 case 1:
721 case 5:
722 if (ASIC_IS_DCE5(rdev))
723 return 6144 * 2;
724 else
725 return 5760 * 2;
726 case 2:
727 case 6:
728 if (ASIC_IS_DCE5(rdev))
729 return 8192 * 2;
730 else
731 return 7680 * 2;
732 case 3:
733 case 7:
734 if (ASIC_IS_DCE5(rdev))
735 return 2048 * 2;
736 else
737 return 1920 * 2;
738 }
Alex Deucherf9d9c362010-10-22 02:51:05 -0400739 }
Alex Deucher12dfc842011-04-14 19:07:34 -0400740
741 /* controller not enabled, so no lb used */
742 return 0;
Alex Deucherf9d9c362010-10-22 02:51:05 -0400743}
744
Alex Deucherca7db222012-03-20 17:18:30 -0400745u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev)
Alex Deucherf9d9c362010-10-22 02:51:05 -0400746{
747 u32 tmp = RREG32(MC_SHARED_CHMAP);
748
749 switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
750 case 0:
751 default:
752 return 1;
753 case 1:
754 return 2;
755 case 2:
756 return 4;
757 case 3:
758 return 8;
759 }
760}
761
762struct evergreen_wm_params {
763 u32 dram_channels; /* number of dram channels */
764 u32 yclk; /* bandwidth per dram data pin in kHz */
765 u32 sclk; /* engine clock in kHz */
766 u32 disp_clk; /* display clock in kHz */
767 u32 src_width; /* viewport width */
768 u32 active_time; /* active display time in ns */
769 u32 blank_time; /* blank time in ns */
770 bool interlaced; /* mode is interlaced */
771 fixed20_12 vsc; /* vertical scale ratio */
772 u32 num_heads; /* number of active crtcs */
773 u32 bytes_per_pixel; /* bytes per pixel display + overlay */
774 u32 lb_size; /* line buffer allocated to pipe */
775 u32 vtaps; /* vertical scaler taps */
776};
777
778static u32 evergreen_dram_bandwidth(struct evergreen_wm_params *wm)
779{
780 /* Calculate DRAM Bandwidth and the part allocated to display. */
781 fixed20_12 dram_efficiency; /* 0.7 */
782 fixed20_12 yclk, dram_channels, bandwidth;
783 fixed20_12 a;
784
785 a.full = dfixed_const(1000);
786 yclk.full = dfixed_const(wm->yclk);
787 yclk.full = dfixed_div(yclk, a);
788 dram_channels.full = dfixed_const(wm->dram_channels * 4);
789 a.full = dfixed_const(10);
790 dram_efficiency.full = dfixed_const(7);
791 dram_efficiency.full = dfixed_div(dram_efficiency, a);
792 bandwidth.full = dfixed_mul(dram_channels, yclk);
793 bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
794
795 return dfixed_trunc(bandwidth);
796}
797
798static u32 evergreen_dram_bandwidth_for_display(struct evergreen_wm_params *wm)
799{
800 /* Calculate DRAM Bandwidth and the part allocated to display. */
801 fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
802 fixed20_12 yclk, dram_channels, bandwidth;
803 fixed20_12 a;
804
805 a.full = dfixed_const(1000);
806 yclk.full = dfixed_const(wm->yclk);
807 yclk.full = dfixed_div(yclk, a);
808 dram_channels.full = dfixed_const(wm->dram_channels * 4);
809 a.full = dfixed_const(10);
810 disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
811 disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
812 bandwidth.full = dfixed_mul(dram_channels, yclk);
813 bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
814
815 return dfixed_trunc(bandwidth);
816}
817
818static u32 evergreen_data_return_bandwidth(struct evergreen_wm_params *wm)
819{
820 /* Calculate the display Data return Bandwidth */
821 fixed20_12 return_efficiency; /* 0.8 */
822 fixed20_12 sclk, bandwidth;
823 fixed20_12 a;
824
825 a.full = dfixed_const(1000);
826 sclk.full = dfixed_const(wm->sclk);
827 sclk.full = dfixed_div(sclk, a);
828 a.full = dfixed_const(10);
829 return_efficiency.full = dfixed_const(8);
830 return_efficiency.full = dfixed_div(return_efficiency, a);
831 a.full = dfixed_const(32);
832 bandwidth.full = dfixed_mul(a, sclk);
833 bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
834
835 return dfixed_trunc(bandwidth);
836}
837
838static u32 evergreen_dmif_request_bandwidth(struct evergreen_wm_params *wm)
839{
840 /* Calculate the DMIF Request Bandwidth */
841 fixed20_12 disp_clk_request_efficiency; /* 0.8 */
842 fixed20_12 disp_clk, bandwidth;
843 fixed20_12 a;
844
845 a.full = dfixed_const(1000);
846 disp_clk.full = dfixed_const(wm->disp_clk);
847 disp_clk.full = dfixed_div(disp_clk, a);
848 a.full = dfixed_const(10);
849 disp_clk_request_efficiency.full = dfixed_const(8);
850 disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
851 a.full = dfixed_const(32);
852 bandwidth.full = dfixed_mul(a, disp_clk);
853 bandwidth.full = dfixed_mul(bandwidth, disp_clk_request_efficiency);
854
855 return dfixed_trunc(bandwidth);
856}
857
858static u32 evergreen_available_bandwidth(struct evergreen_wm_params *wm)
859{
860 /* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
861 u32 dram_bandwidth = evergreen_dram_bandwidth(wm);
862 u32 data_return_bandwidth = evergreen_data_return_bandwidth(wm);
863 u32 dmif_req_bandwidth = evergreen_dmif_request_bandwidth(wm);
864
865 return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
866}
867
868static u32 evergreen_average_bandwidth(struct evergreen_wm_params *wm)
869{
870 /* Calculate the display mode Average Bandwidth
871 * DisplayMode should contain the source and destination dimensions,
872 * timing, etc.
873 */
874 fixed20_12 bpp;
875 fixed20_12 line_time;
876 fixed20_12 src_width;
877 fixed20_12 bandwidth;
878 fixed20_12 a;
879
880 a.full = dfixed_const(1000);
881 line_time.full = dfixed_const(wm->active_time + wm->blank_time);
882 line_time.full = dfixed_div(line_time, a);
883 bpp.full = dfixed_const(wm->bytes_per_pixel);
884 src_width.full = dfixed_const(wm->src_width);
885 bandwidth.full = dfixed_mul(src_width, bpp);
886 bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
887 bandwidth.full = dfixed_div(bandwidth, line_time);
888
889 return dfixed_trunc(bandwidth);
890}
891
892static u32 evergreen_latency_watermark(struct evergreen_wm_params *wm)
893{
894 /* First calcualte the latency in ns */
895 u32 mc_latency = 2000; /* 2000 ns. */
896 u32 available_bandwidth = evergreen_available_bandwidth(wm);
897 u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
898 u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
899 u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
900 u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
901 (wm->num_heads * cursor_line_pair_return_time);
902 u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
903 u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
904 fixed20_12 a, b, c;
905
906 if (wm->num_heads == 0)
907 return 0;
908
909 a.full = dfixed_const(2);
910 b.full = dfixed_const(1);
911 if ((wm->vsc.full > a.full) ||
912 ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
913 (wm->vtaps >= 5) ||
914 ((wm->vsc.full >= a.full) && wm->interlaced))
915 max_src_lines_per_dst_line = 4;
916 else
917 max_src_lines_per_dst_line = 2;
918
919 a.full = dfixed_const(available_bandwidth);
920 b.full = dfixed_const(wm->num_heads);
921 a.full = dfixed_div(a, b);
922
923 b.full = dfixed_const(1000);
924 c.full = dfixed_const(wm->disp_clk);
925 b.full = dfixed_div(c, b);
926 c.full = dfixed_const(wm->bytes_per_pixel);
927 b.full = dfixed_mul(b, c);
928
929 lb_fill_bw = min(dfixed_trunc(a), dfixed_trunc(b));
930
931 a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
932 b.full = dfixed_const(1000);
933 c.full = dfixed_const(lb_fill_bw);
934 b.full = dfixed_div(c, b);
935 a.full = dfixed_div(a, b);
936 line_fill_time = dfixed_trunc(a);
937
938 if (line_fill_time < wm->active_time)
939 return latency;
940 else
941 return latency + (line_fill_time - wm->active_time);
942
943}
944
945static bool evergreen_average_bandwidth_vs_dram_bandwidth_for_display(struct evergreen_wm_params *wm)
946{
947 if (evergreen_average_bandwidth(wm) <=
948 (evergreen_dram_bandwidth_for_display(wm) / wm->num_heads))
949 return true;
950 else
951 return false;
952};
953
954static bool evergreen_average_bandwidth_vs_available_bandwidth(struct evergreen_wm_params *wm)
955{
956 if (evergreen_average_bandwidth(wm) <=
957 (evergreen_available_bandwidth(wm) / wm->num_heads))
958 return true;
959 else
960 return false;
961};
962
963static bool evergreen_check_latency_hiding(struct evergreen_wm_params *wm)
964{
965 u32 lb_partitions = wm->lb_size / wm->src_width;
966 u32 line_time = wm->active_time + wm->blank_time;
967 u32 latency_tolerant_lines;
968 u32 latency_hiding;
969 fixed20_12 a;
970
971 a.full = dfixed_const(1);
972 if (wm->vsc.full > a.full)
973 latency_tolerant_lines = 1;
974 else {
975 if (lb_partitions <= (wm->vtaps + 1))
976 latency_tolerant_lines = 1;
977 else
978 latency_tolerant_lines = 2;
979 }
980
981 latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
982
983 if (evergreen_latency_watermark(wm) <= latency_hiding)
984 return true;
985 else
986 return false;
987}
988
989static void evergreen_program_watermarks(struct radeon_device *rdev,
990 struct radeon_crtc *radeon_crtc,
991 u32 lb_size, u32 num_heads)
992{
993 struct drm_display_mode *mode = &radeon_crtc->base.mode;
994 struct evergreen_wm_params wm;
995 u32 pixel_period;
996 u32 line_time = 0;
997 u32 latency_watermark_a = 0, latency_watermark_b = 0;
998 u32 priority_a_mark = 0, priority_b_mark = 0;
999 u32 priority_a_cnt = PRIORITY_OFF;
1000 u32 priority_b_cnt = PRIORITY_OFF;
1001 u32 pipe_offset = radeon_crtc->crtc_id * 16;
1002 u32 tmp, arb_control3;
1003 fixed20_12 a, b, c;
1004
1005 if (radeon_crtc->base.enabled && num_heads && mode) {
1006 pixel_period = 1000000 / (u32)mode->clock;
1007 line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
1008 priority_a_cnt = 0;
1009 priority_b_cnt = 0;
1010
1011 wm.yclk = rdev->pm.current_mclk * 10;
1012 wm.sclk = rdev->pm.current_sclk * 10;
1013 wm.disp_clk = mode->clock;
1014 wm.src_width = mode->crtc_hdisplay;
1015 wm.active_time = mode->crtc_hdisplay * pixel_period;
1016 wm.blank_time = line_time - wm.active_time;
1017 wm.interlaced = false;
1018 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1019 wm.interlaced = true;
1020 wm.vsc = radeon_crtc->vsc;
1021 wm.vtaps = 1;
1022 if (radeon_crtc->rmx_type != RMX_OFF)
1023 wm.vtaps = 2;
1024 wm.bytes_per_pixel = 4; /* XXX: get this from fb config */
1025 wm.lb_size = lb_size;
1026 wm.dram_channels = evergreen_get_number_of_dram_channels(rdev);
1027 wm.num_heads = num_heads;
1028
1029 /* set for high clocks */
1030 latency_watermark_a = min(evergreen_latency_watermark(&wm), (u32)65535);
1031 /* set for low clocks */
1032 /* wm.yclk = low clk; wm.sclk = low clk */
1033 latency_watermark_b = min(evergreen_latency_watermark(&wm), (u32)65535);
1034
1035 /* possibly force display priority to high */
1036 /* should really do this at mode validation time... */
1037 if (!evergreen_average_bandwidth_vs_dram_bandwidth_for_display(&wm) ||
1038 !evergreen_average_bandwidth_vs_available_bandwidth(&wm) ||
1039 !evergreen_check_latency_hiding(&wm) ||
1040 (rdev->disp_priority == 2)) {
Alex Deucher92bdfd42011-08-04 17:28:40 +00001041 DRM_DEBUG_KMS("force priority to high\n");
Alex Deucherf9d9c362010-10-22 02:51:05 -04001042 priority_a_cnt |= PRIORITY_ALWAYS_ON;
1043 priority_b_cnt |= PRIORITY_ALWAYS_ON;
1044 }
1045
1046 a.full = dfixed_const(1000);
1047 b.full = dfixed_const(mode->clock);
1048 b.full = dfixed_div(b, a);
1049 c.full = dfixed_const(latency_watermark_a);
1050 c.full = dfixed_mul(c, b);
1051 c.full = dfixed_mul(c, radeon_crtc->hsc);
1052 c.full = dfixed_div(c, a);
1053 a.full = dfixed_const(16);
1054 c.full = dfixed_div(c, a);
1055 priority_a_mark = dfixed_trunc(c);
1056 priority_a_cnt |= priority_a_mark & PRIORITY_MARK_MASK;
1057
1058 a.full = dfixed_const(1000);
1059 b.full = dfixed_const(mode->clock);
1060 b.full = dfixed_div(b, a);
1061 c.full = dfixed_const(latency_watermark_b);
1062 c.full = dfixed_mul(c, b);
1063 c.full = dfixed_mul(c, radeon_crtc->hsc);
1064 c.full = dfixed_div(c, a);
1065 a.full = dfixed_const(16);
1066 c.full = dfixed_div(c, a);
1067 priority_b_mark = dfixed_trunc(c);
1068 priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK;
1069 }
1070
1071 /* select wm A */
1072 arb_control3 = RREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset);
1073 tmp = arb_control3;
1074 tmp &= ~LATENCY_WATERMARK_MASK(3);
1075 tmp |= LATENCY_WATERMARK_MASK(1);
1076 WREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset, tmp);
1077 WREG32(PIPE0_LATENCY_CONTROL + pipe_offset,
1078 (LATENCY_LOW_WATERMARK(latency_watermark_a) |
1079 LATENCY_HIGH_WATERMARK(line_time)));
1080 /* select wm B */
1081 tmp = RREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset);
1082 tmp &= ~LATENCY_WATERMARK_MASK(3);
1083 tmp |= LATENCY_WATERMARK_MASK(2);
1084 WREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset, tmp);
1085 WREG32(PIPE0_LATENCY_CONTROL + pipe_offset,
1086 (LATENCY_LOW_WATERMARK(latency_watermark_b) |
1087 LATENCY_HIGH_WATERMARK(line_time)));
1088 /* restore original selection */
1089 WREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset, arb_control3);
1090
1091 /* write the priority marks */
1092 WREG32(PRIORITY_A_CNT + radeon_crtc->crtc_offset, priority_a_cnt);
1093 WREG32(PRIORITY_B_CNT + radeon_crtc->crtc_offset, priority_b_cnt);
1094
1095}
1096
Alex Deucher377edc82012-07-17 14:02:42 -04001097/**
1098 * evergreen_bandwidth_update - update display watermarks callback.
1099 *
1100 * @rdev: radeon_device pointer
1101 *
1102 * Update the display watermarks based on the requested mode(s)
1103 * (evergreen+).
1104 */
Alex Deucher0ca2ab52010-02-26 13:57:45 -05001105void evergreen_bandwidth_update(struct radeon_device *rdev)
1106{
Alex Deucherf9d9c362010-10-22 02:51:05 -04001107 struct drm_display_mode *mode0 = NULL;
1108 struct drm_display_mode *mode1 = NULL;
1109 u32 num_heads = 0, lb_size;
1110 int i;
1111
1112 radeon_update_display_priority(rdev);
1113
1114 for (i = 0; i < rdev->num_crtc; i++) {
1115 if (rdev->mode_info.crtcs[i]->base.enabled)
1116 num_heads++;
1117 }
1118 for (i = 0; i < rdev->num_crtc; i += 2) {
1119 mode0 = &rdev->mode_info.crtcs[i]->base.mode;
1120 mode1 = &rdev->mode_info.crtcs[i+1]->base.mode;
1121 lb_size = evergreen_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i], mode0, mode1);
1122 evergreen_program_watermarks(rdev, rdev->mode_info.crtcs[i], lb_size, num_heads);
1123 lb_size = evergreen_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i+1], mode1, mode0);
1124 evergreen_program_watermarks(rdev, rdev->mode_info.crtcs[i+1], lb_size, num_heads);
1125 }
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001126}
1127
Alex Deucher377edc82012-07-17 14:02:42 -04001128/**
1129 * evergreen_mc_wait_for_idle - wait for MC idle callback.
1130 *
1131 * @rdev: radeon_device pointer
1132 *
1133 * Wait for the MC (memory controller) to be idle.
1134 * (evergreen+).
1135 * Returns 0 if the MC is idle, -1 if not.
1136 */
Alex Deucherb9952a82011-03-02 20:07:33 -05001137int evergreen_mc_wait_for_idle(struct radeon_device *rdev)
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001138{
1139 unsigned i;
1140 u32 tmp;
1141
1142 for (i = 0; i < rdev->usec_timeout; i++) {
1143 /* read MC_STATUS */
1144 tmp = RREG32(SRBM_STATUS) & 0x1F00;
1145 if (!tmp)
1146 return 0;
1147 udelay(1);
1148 }
1149 return -1;
1150}
1151
1152/*
1153 * GART
1154 */
Alex Deucher0fcdb612010-03-24 13:20:41 -04001155void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev)
1156{
1157 unsigned i;
1158 u32 tmp;
1159
Alex Deucher6f2f48a2010-12-15 11:01:56 -05001160 WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
1161
Alex Deucher0fcdb612010-03-24 13:20:41 -04001162 WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1));
1163 for (i = 0; i < rdev->usec_timeout; i++) {
1164 /* read MC_STATUS */
1165 tmp = RREG32(VM_CONTEXT0_REQUEST_RESPONSE);
1166 tmp = (tmp & RESPONSE_TYPE_MASK) >> RESPONSE_TYPE_SHIFT;
1167 if (tmp == 2) {
1168 printk(KERN_WARNING "[drm] r600 flush TLB failed\n");
1169 return;
1170 }
1171 if (tmp) {
1172 return;
1173 }
1174 udelay(1);
1175 }
1176}
1177
Lauri Kasanen1109ca02012-08-31 13:43:50 -04001178static int evergreen_pcie_gart_enable(struct radeon_device *rdev)
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001179{
1180 u32 tmp;
Alex Deucher0fcdb612010-03-24 13:20:41 -04001181 int r;
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001182
Jerome Glissec9a1be92011-11-03 11:16:49 -04001183 if (rdev->gart.robj == NULL) {
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001184 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
1185 return -EINVAL;
1186 }
1187 r = radeon_gart_table_vram_pin(rdev);
1188 if (r)
1189 return r;
Dave Airlie82568562010-02-05 16:00:07 +10001190 radeon_gart_restore(rdev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001191 /* Setup L2 cache */
1192 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
1193 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
1194 EFFECTIVE_L2_QUEUE_SIZE(7));
1195 WREG32(VM_L2_CNTL2, 0);
1196 WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
1197 /* Setup TLB control */
1198 tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
1199 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
1200 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU |
1201 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
Alex Deucher8aeb96f2011-05-03 19:28:02 -04001202 if (rdev->flags & RADEON_IS_IGP) {
1203 WREG32(FUS_MC_VM_MD_L1_TLB0_CNTL, tmp);
1204 WREG32(FUS_MC_VM_MD_L1_TLB1_CNTL, tmp);
1205 WREG32(FUS_MC_VM_MD_L1_TLB2_CNTL, tmp);
1206 } else {
1207 WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
1208 WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
1209 WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
Alex Deucher0b8c30b2012-05-31 18:54:43 -04001210 if ((rdev->family == CHIP_JUNIPER) ||
1211 (rdev->family == CHIP_CYPRESS) ||
1212 (rdev->family == CHIP_HEMLOCK) ||
1213 (rdev->family == CHIP_BARTS))
1214 WREG32(MC_VM_MD_L1_TLB3_CNTL, tmp);
Alex Deucher8aeb96f2011-05-03 19:28:02 -04001215 }
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001216 WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
1217 WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
1218 WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
1219 WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
1220 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
1221 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
1222 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
1223 WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
1224 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
1225 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
1226 (u32)(rdev->dummy_page.addr >> 12));
Alex Deucher0fcdb612010-03-24 13:20:41 -04001227 WREG32(VM_CONTEXT1_CNTL, 0);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001228
Alex Deucher0fcdb612010-03-24 13:20:41 -04001229 evergreen_pcie_gart_tlb_flush(rdev);
Tormod Voldenfcf4de52011-08-31 21:54:07 +00001230 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
1231 (unsigned)(rdev->mc.gtt_size >> 20),
1232 (unsigned long long)rdev->gart.table_addr);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001233 rdev->gart.ready = true;
1234 return 0;
1235}
1236
Lauri Kasanen1109ca02012-08-31 13:43:50 -04001237static void evergreen_pcie_gart_disable(struct radeon_device *rdev)
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001238{
1239 u32 tmp;
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001240
1241 /* Disable all tables */
Alex Deucher0fcdb612010-03-24 13:20:41 -04001242 WREG32(VM_CONTEXT0_CNTL, 0);
1243 WREG32(VM_CONTEXT1_CNTL, 0);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001244
1245 /* Setup L2 cache */
1246 WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING |
1247 EFFECTIVE_L2_QUEUE_SIZE(7));
1248 WREG32(VM_L2_CNTL2, 0);
1249 WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
1250 /* Setup TLB control */
1251 tmp = EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
1252 WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
1253 WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
1254 WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
1255 WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
1256 WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
1257 WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
1258 WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
Jerome Glissec9a1be92011-11-03 11:16:49 -04001259 radeon_gart_table_vram_unpin(rdev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001260}
1261
Lauri Kasanen1109ca02012-08-31 13:43:50 -04001262static void evergreen_pcie_gart_fini(struct radeon_device *rdev)
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001263{
1264 evergreen_pcie_gart_disable(rdev);
1265 radeon_gart_table_vram_free(rdev);
1266 radeon_gart_fini(rdev);
1267}
1268
1269
Lauri Kasanen1109ca02012-08-31 13:43:50 -04001270static void evergreen_agp_enable(struct radeon_device *rdev)
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001271{
1272 u32 tmp;
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001273
1274 /* Setup L2 cache */
1275 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
1276 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
1277 EFFECTIVE_L2_QUEUE_SIZE(7));
1278 WREG32(VM_L2_CNTL2, 0);
1279 WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
1280 /* Setup TLB control */
1281 tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
1282 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
1283 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU |
1284 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
1285 WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
1286 WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
1287 WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
1288 WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
1289 WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
1290 WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
1291 WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
Alex Deucher0fcdb612010-03-24 13:20:41 -04001292 WREG32(VM_CONTEXT0_CNTL, 0);
1293 WREG32(VM_CONTEXT1_CNTL, 0);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001294}
1295
Alex Deucherb9952a82011-03-02 20:07:33 -05001296void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save)
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001297{
Alex Deucher62444b72012-08-15 17:18:42 -04001298 u32 crtc_enabled, tmp, frame_count, blackout;
1299 int i, j;
1300
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001301 save->vga_render_control = RREG32(VGA_RENDER_CONTROL);
1302 save->vga_hdp_control = RREG32(VGA_HDP_CONTROL);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001303
Alex Deucher62444b72012-08-15 17:18:42 -04001304 /* disable VGA render */
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001305 WREG32(VGA_RENDER_CONTROL, 0);
Alex Deucher62444b72012-08-15 17:18:42 -04001306 /* blank the display controllers */
1307 for (i = 0; i < rdev->num_crtc; i++) {
1308 crtc_enabled = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]) & EVERGREEN_CRTC_MASTER_EN;
1309 if (crtc_enabled) {
1310 save->crtc_enabled[i] = true;
1311 if (ASIC_IS_DCE6(rdev)) {
1312 tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]);
1313 if (!(tmp & EVERGREEN_CRTC_BLANK_DATA_EN)) {
1314 radeon_wait_for_vblank(rdev, i);
1315 tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
Christopher Staitebb5888202013-01-26 11:10:58 -05001316 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
Alex Deucher62444b72012-08-15 17:18:42 -04001317 WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
Christopher Staitebb5888202013-01-26 11:10:58 -05001318 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
Alex Deucher62444b72012-08-15 17:18:42 -04001319 }
1320 } else {
1321 tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
1322 if (!(tmp & EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE)) {
1323 radeon_wait_for_vblank(rdev, i);
1324 tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
Christopher Staitebb5888202013-01-26 11:10:58 -05001325 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
Alex Deucher62444b72012-08-15 17:18:42 -04001326 WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
Christopher Staitebb5888202013-01-26 11:10:58 -05001327 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
Alex Deucher62444b72012-08-15 17:18:42 -04001328 }
1329 }
1330 /* wait for the next frame */
1331 frame_count = radeon_get_vblank_counter(rdev, i);
1332 for (j = 0; j < rdev->usec_timeout; j++) {
1333 if (radeon_get_vblank_counter(rdev, i) != frame_count)
1334 break;
1335 udelay(1);
1336 }
Alex Deucher804cc4a02012-11-19 09:11:27 -05001337 } else {
1338 save->crtc_enabled[i] = false;
Alex Deucher62444b72012-08-15 17:18:42 -04001339 }
Alex Deucher18007402010-11-22 17:56:28 -05001340 }
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001341
Alex Deucher62444b72012-08-15 17:18:42 -04001342 radeon_mc_wait_for_idle(rdev);
1343
1344 blackout = RREG32(MC_SHARED_BLACKOUT_CNTL);
1345 if ((blackout & BLACKOUT_MODE_MASK) != 1) {
1346 /* Block CPU access */
1347 WREG32(BIF_FB_EN, 0);
1348 /* blackout the MC */
1349 blackout &= ~BLACKOUT_MODE_MASK;
1350 WREG32(MC_SHARED_BLACKOUT_CNTL, blackout | 1);
Alex Deucherb7eff392011-07-08 11:44:56 -04001351 }
Alex Deuchered39fad2013-01-31 09:00:52 -05001352 /* wait for the MC to settle */
1353 udelay(100);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001354}
1355
Alex Deucherb9952a82011-03-02 20:07:33 -05001356void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save)
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001357{
Alex Deucher62444b72012-08-15 17:18:42 -04001358 u32 tmp, frame_count;
1359 int i, j;
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001360
Alex Deucher62444b72012-08-15 17:18:42 -04001361 /* update crtc base addresses */
1362 for (i = 0; i < rdev->num_crtc; i++) {
1363 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
Alex Deucher18007402010-11-22 17:56:28 -05001364 upper_32_bits(rdev->mc.vram_start));
Alex Deucher62444b72012-08-15 17:18:42 -04001365 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
Alex Deucher18007402010-11-22 17:56:28 -05001366 upper_32_bits(rdev->mc.vram_start));
Alex Deucher62444b72012-08-15 17:18:42 -04001367 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
Alex Deucher18007402010-11-22 17:56:28 -05001368 (u32)rdev->mc.vram_start);
Alex Deucher62444b72012-08-15 17:18:42 -04001369 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i],
Alex Deucher18007402010-11-22 17:56:28 -05001370 (u32)rdev->mc.vram_start);
Alex Deucherb7eff392011-07-08 11:44:56 -04001371 }
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001372 WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(rdev->mc.vram_start));
1373 WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start);
Alex Deucher62444b72012-08-15 17:18:42 -04001374
1375 /* unblackout the MC */
1376 tmp = RREG32(MC_SHARED_BLACKOUT_CNTL);
1377 tmp &= ~BLACKOUT_MODE_MASK;
1378 WREG32(MC_SHARED_BLACKOUT_CNTL, tmp);
1379 /* allow CPU access */
1380 WREG32(BIF_FB_EN, FB_READ_EN | FB_WRITE_EN);
1381
1382 for (i = 0; i < rdev->num_crtc; i++) {
Alex Deucher695ddeb2012-11-05 16:34:58 +00001383 if (save->crtc_enabled[i]) {
Alex Deucher62444b72012-08-15 17:18:42 -04001384 if (ASIC_IS_DCE6(rdev)) {
1385 tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]);
1386 tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
Christopher Staitebb5888202013-01-26 11:10:58 -05001387 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
Alex Deucher62444b72012-08-15 17:18:42 -04001388 WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
Christopher Staitebb5888202013-01-26 11:10:58 -05001389 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
Alex Deucher62444b72012-08-15 17:18:42 -04001390 } else {
1391 tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
1392 tmp &= ~EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
Christopher Staitebb5888202013-01-26 11:10:58 -05001393 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
Alex Deucher62444b72012-08-15 17:18:42 -04001394 WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
Christopher Staitebb5888202013-01-26 11:10:58 -05001395 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
Alex Deucher62444b72012-08-15 17:18:42 -04001396 }
1397 /* wait for the next frame */
1398 frame_count = radeon_get_vblank_counter(rdev, i);
1399 for (j = 0; j < rdev->usec_timeout; j++) {
1400 if (radeon_get_vblank_counter(rdev, i) != frame_count)
1401 break;
1402 udelay(1);
1403 }
1404 }
1405 }
1406 /* Unlock vga access */
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001407 WREG32(VGA_HDP_CONTROL, save->vga_hdp_control);
1408 mdelay(1);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001409 WREG32(VGA_RENDER_CONTROL, save->vga_render_control);
1410}
1411
Alex Deucher755d8192011-03-02 20:07:34 -05001412void evergreen_mc_program(struct radeon_device *rdev)
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001413{
1414 struct evergreen_mc_save save;
1415 u32 tmp;
1416 int i, j;
1417
1418 /* Initialize HDP */
1419 for (i = 0, j = 0; i < 32; i++, j += 0x18) {
1420 WREG32((0x2c14 + j), 0x00000000);
1421 WREG32((0x2c18 + j), 0x00000000);
1422 WREG32((0x2c1c + j), 0x00000000);
1423 WREG32((0x2c20 + j), 0x00000000);
1424 WREG32((0x2c24 + j), 0x00000000);
1425 }
1426 WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
1427
1428 evergreen_mc_stop(rdev, &save);
1429 if (evergreen_mc_wait_for_idle(rdev)) {
1430 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1431 }
1432 /* Lockout access through VGA aperture*/
1433 WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
1434 /* Update configuration */
1435 if (rdev->flags & RADEON_IS_AGP) {
1436 if (rdev->mc.vram_start < rdev->mc.gtt_start) {
1437 /* VRAM before AGP */
1438 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
1439 rdev->mc.vram_start >> 12);
1440 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
1441 rdev->mc.gtt_end >> 12);
1442 } else {
1443 /* VRAM after AGP */
1444 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
1445 rdev->mc.gtt_start >> 12);
1446 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
1447 rdev->mc.vram_end >> 12);
1448 }
1449 } else {
1450 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
1451 rdev->mc.vram_start >> 12);
1452 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
1453 rdev->mc.vram_end >> 12);
1454 }
Alex Deucher3b9832f2011-11-10 08:59:39 -05001455 WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, rdev->vram_scratch.gpu_addr >> 12);
Alex Deucher05b3ef62012-03-20 17:18:37 -04001456 /* llano/ontario only */
1457 if ((rdev->family == CHIP_PALM) ||
1458 (rdev->family == CHIP_SUMO) ||
1459 (rdev->family == CHIP_SUMO2)) {
Alex Deucherb4183e32010-12-15 11:04:10 -05001460 tmp = RREG32(MC_FUS_VM_FB_OFFSET) & 0x000FFFFF;
1461 tmp |= ((rdev->mc.vram_end >> 20) & 0xF) << 24;
1462 tmp |= ((rdev->mc.vram_start >> 20) & 0xF) << 20;
1463 WREG32(MC_FUS_VM_FB_OFFSET, tmp);
1464 }
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001465 tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
1466 tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
1467 WREG32(MC_VM_FB_LOCATION, tmp);
1468 WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
Alex Deucherc46cb4d2011-01-06 19:12:37 -05001469 WREG32(HDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
Jerome Glisse46fcd2b2010-06-03 19:34:48 +02001470 WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001471 if (rdev->flags & RADEON_IS_AGP) {
1472 WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 16);
1473 WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 16);
1474 WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22);
1475 } else {
1476 WREG32(MC_VM_AGP_BASE, 0);
1477 WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
1478 WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
1479 }
1480 if (evergreen_mc_wait_for_idle(rdev)) {
1481 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1482 }
1483 evergreen_mc_resume(rdev, &save);
1484 /* we need to own VRAM, so turn off the VGA renderer here
1485 * to stop it overwriting our objects */
1486 rv515_vga_render_disable(rdev);
1487}
1488
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001489/*
1490 * CP.
1491 */
Alex Deucher12920592011-02-02 12:37:40 -05001492void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
1493{
Christian König876dc9f2012-05-08 14:24:01 +02001494 struct radeon_ring *ring = &rdev->ring[ib->ring];
Alex Deucher89d35802012-07-17 14:02:31 -04001495 u32 next_rptr;
Christian König7b1f2482011-09-23 15:11:23 +02001496
Alex Deucher12920592011-02-02 12:37:40 -05001497 /* set to DX10/11 mode */
Christian Könige32eb502011-10-23 12:56:27 +02001498 radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0));
1499 radeon_ring_write(ring, 1);
Christian König45df6802012-07-06 16:22:55 +02001500
1501 if (ring->rptr_save_reg) {
Alex Deucher89d35802012-07-17 14:02:31 -04001502 next_rptr = ring->wptr + 3 + 4;
Christian König45df6802012-07-06 16:22:55 +02001503 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
1504 radeon_ring_write(ring, ((ring->rptr_save_reg -
1505 PACKET3_SET_CONFIG_REG_START) >> 2));
1506 radeon_ring_write(ring, next_rptr);
Alex Deucher89d35802012-07-17 14:02:31 -04001507 } else if (rdev->wb.enabled) {
1508 next_rptr = ring->wptr + 5 + 4;
1509 radeon_ring_write(ring, PACKET3(PACKET3_MEM_WRITE, 3));
1510 radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
1511 radeon_ring_write(ring, (upper_32_bits(ring->next_rptr_gpu_addr) & 0xff) | (1 << 18));
1512 radeon_ring_write(ring, next_rptr);
1513 radeon_ring_write(ring, 0);
Christian König45df6802012-07-06 16:22:55 +02001514 }
1515
Christian Könige32eb502011-10-23 12:56:27 +02001516 radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
1517 radeon_ring_write(ring,
Alex Deucher0f234f5f2011-02-13 19:06:33 -05001518#ifdef __BIG_ENDIAN
1519 (2 << 0) |
1520#endif
1521 (ib->gpu_addr & 0xFFFFFFFC));
Christian Könige32eb502011-10-23 12:56:27 +02001522 radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF);
1523 radeon_ring_write(ring, ib->length_dw);
Alex Deucher12920592011-02-02 12:37:40 -05001524}
1525
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001526
1527static int evergreen_cp_load_microcode(struct radeon_device *rdev)
1528{
Alex Deucherfe251e22010-03-24 13:36:43 -04001529 const __be32 *fw_data;
1530 int i;
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001531
Alex Deucherfe251e22010-03-24 13:36:43 -04001532 if (!rdev->me_fw || !rdev->pfp_fw)
1533 return -EINVAL;
1534
1535 r700_cp_stop(rdev);
Alex Deucher0f234f5f2011-02-13 19:06:33 -05001536 WREG32(CP_RB_CNTL,
1537#ifdef __BIG_ENDIAN
1538 BUF_SWAP_32BIT |
1539#endif
1540 RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
Alex Deucherfe251e22010-03-24 13:36:43 -04001541
1542 fw_data = (const __be32 *)rdev->pfp_fw->data;
1543 WREG32(CP_PFP_UCODE_ADDR, 0);
1544 for (i = 0; i < EVERGREEN_PFP_UCODE_SIZE; i++)
1545 WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
1546 WREG32(CP_PFP_UCODE_ADDR, 0);
1547
1548 fw_data = (const __be32 *)rdev->me_fw->data;
1549 WREG32(CP_ME_RAM_WADDR, 0);
1550 for (i = 0; i < EVERGREEN_PM4_UCODE_SIZE; i++)
1551 WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
1552
1553 WREG32(CP_PFP_UCODE_ADDR, 0);
1554 WREG32(CP_ME_RAM_WADDR, 0);
1555 WREG32(CP_ME_RAM_RADDR, 0);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001556 return 0;
1557}
1558
Alex Deucher7e7b41d2010-09-02 21:32:32 -04001559static int evergreen_cp_start(struct radeon_device *rdev)
1560{
Christian Könige32eb502011-10-23 12:56:27 +02001561 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
Alex Deucher2281a372010-10-21 13:31:38 -04001562 int r, i;
Alex Deucher7e7b41d2010-09-02 21:32:32 -04001563 uint32_t cp_me;
1564
Christian Könige32eb502011-10-23 12:56:27 +02001565 r = radeon_ring_lock(rdev, ring, 7);
Alex Deucher7e7b41d2010-09-02 21:32:32 -04001566 if (r) {
1567 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
1568 return r;
1569 }
Christian Könige32eb502011-10-23 12:56:27 +02001570 radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
1571 radeon_ring_write(ring, 0x1);
1572 radeon_ring_write(ring, 0x0);
1573 radeon_ring_write(ring, rdev->config.evergreen.max_hw_contexts - 1);
1574 radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
1575 radeon_ring_write(ring, 0);
1576 radeon_ring_write(ring, 0);
1577 radeon_ring_unlock_commit(rdev, ring);
Alex Deucher7e7b41d2010-09-02 21:32:32 -04001578
1579 cp_me = 0xff;
1580 WREG32(CP_ME_CNTL, cp_me);
1581
Christian Könige32eb502011-10-23 12:56:27 +02001582 r = radeon_ring_lock(rdev, ring, evergreen_default_size + 19);
Alex Deucher7e7b41d2010-09-02 21:32:32 -04001583 if (r) {
1584 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
1585 return r;
1586 }
Alex Deucher2281a372010-10-21 13:31:38 -04001587
1588 /* setup clear context state */
Christian Könige32eb502011-10-23 12:56:27 +02001589 radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
1590 radeon_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
Alex Deucher2281a372010-10-21 13:31:38 -04001591
1592 for (i = 0; i < evergreen_default_size; i++)
Christian Könige32eb502011-10-23 12:56:27 +02001593 radeon_ring_write(ring, evergreen_default_state[i]);
Alex Deucher2281a372010-10-21 13:31:38 -04001594
Christian Könige32eb502011-10-23 12:56:27 +02001595 radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
1596 radeon_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
Alex Deucher2281a372010-10-21 13:31:38 -04001597
1598 /* set clear context state */
Christian Könige32eb502011-10-23 12:56:27 +02001599 radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
1600 radeon_ring_write(ring, 0);
Alex Deucher2281a372010-10-21 13:31:38 -04001601
1602 /* SQ_VTX_BASE_VTX_LOC */
Christian Könige32eb502011-10-23 12:56:27 +02001603 radeon_ring_write(ring, 0xc0026f00);
1604 radeon_ring_write(ring, 0x00000000);
1605 radeon_ring_write(ring, 0x00000000);
1606 radeon_ring_write(ring, 0x00000000);
Alex Deucher2281a372010-10-21 13:31:38 -04001607
1608 /* Clear consts */
Christian Könige32eb502011-10-23 12:56:27 +02001609 radeon_ring_write(ring, 0xc0036f00);
1610 radeon_ring_write(ring, 0x00000bc4);
1611 radeon_ring_write(ring, 0xffffffff);
1612 radeon_ring_write(ring, 0xffffffff);
1613 radeon_ring_write(ring, 0xffffffff);
Alex Deucher2281a372010-10-21 13:31:38 -04001614
Christian Könige32eb502011-10-23 12:56:27 +02001615 radeon_ring_write(ring, 0xc0026900);
1616 radeon_ring_write(ring, 0x00000316);
1617 radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
1618 radeon_ring_write(ring, 0x00000010); /* */
Alex Deucher18ff84d2011-02-02 12:37:41 -05001619
Christian Könige32eb502011-10-23 12:56:27 +02001620 radeon_ring_unlock_commit(rdev, ring);
Alex Deucher7e7b41d2010-09-02 21:32:32 -04001621
1622 return 0;
1623}
1624
Lauri Kasanen1109ca02012-08-31 13:43:50 -04001625static int evergreen_cp_resume(struct radeon_device *rdev)
Alex Deucherfe251e22010-03-24 13:36:43 -04001626{
Christian Könige32eb502011-10-23 12:56:27 +02001627 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
Alex Deucherfe251e22010-03-24 13:36:43 -04001628 u32 tmp;
1629 u32 rb_bufsz;
1630 int r;
1631
1632 /* Reset cp; if cp is reset, then PA, SH, VGT also need to be reset */
1633 WREG32(GRBM_SOFT_RESET, (SOFT_RESET_CP |
1634 SOFT_RESET_PA |
1635 SOFT_RESET_SH |
1636 SOFT_RESET_VGT |
Jerome Glissea49a50d2011-08-24 20:00:17 +00001637 SOFT_RESET_SPI |
Alex Deucherfe251e22010-03-24 13:36:43 -04001638 SOFT_RESET_SX));
1639 RREG32(GRBM_SOFT_RESET);
1640 mdelay(15);
1641 WREG32(GRBM_SOFT_RESET, 0);
1642 RREG32(GRBM_SOFT_RESET);
1643
1644 /* Set ring buffer size */
Christian Könige32eb502011-10-23 12:56:27 +02001645 rb_bufsz = drm_order(ring->ring_size / 8);
Alex Deucher724c80e2010-08-27 18:25:25 -04001646 tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
Alex Deucherfe251e22010-03-24 13:36:43 -04001647#ifdef __BIG_ENDIAN
1648 tmp |= BUF_SWAP_32BIT;
Alex Deucher32fcdbf2010-03-24 13:33:47 -04001649#endif
Alex Deucherfe251e22010-03-24 13:36:43 -04001650 WREG32(CP_RB_CNTL, tmp);
Christian König15d33322011-09-15 19:02:22 +02001651 WREG32(CP_SEM_WAIT_TIMER, 0x0);
Alex Deucher11ef3f12012-01-20 14:47:43 -05001652 WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
Alex Deucherfe251e22010-03-24 13:36:43 -04001653
1654 /* Set the write pointer delay */
1655 WREG32(CP_RB_WPTR_DELAY, 0);
1656
1657 /* Initialize the ring buffer's read and write pointers */
1658 WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
1659 WREG32(CP_RB_RPTR_WR, 0);
Christian Könige32eb502011-10-23 12:56:27 +02001660 ring->wptr = 0;
1661 WREG32(CP_RB_WPTR, ring->wptr);
Alex Deucher724c80e2010-08-27 18:25:25 -04001662
Adam Buchbinder48fc7f72012-09-19 21:48:00 -04001663 /* set the wb address whether it's enabled or not */
Alex Deucher0f234f5f2011-02-13 19:06:33 -05001664 WREG32(CP_RB_RPTR_ADDR,
Alex Deucher0f234f5f2011-02-13 19:06:33 -05001665 ((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC));
Alex Deucher724c80e2010-08-27 18:25:25 -04001666 WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
1667 WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
1668
1669 if (rdev->wb.enabled)
1670 WREG32(SCRATCH_UMSK, 0xff);
1671 else {
1672 tmp |= RB_NO_UPDATE;
1673 WREG32(SCRATCH_UMSK, 0);
1674 }
1675
Alex Deucherfe251e22010-03-24 13:36:43 -04001676 mdelay(1);
1677 WREG32(CP_RB_CNTL, tmp);
1678
Christian Könige32eb502011-10-23 12:56:27 +02001679 WREG32(CP_RB_BASE, ring->gpu_addr >> 8);
Alex Deucherfe251e22010-03-24 13:36:43 -04001680 WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
1681
Christian Könige32eb502011-10-23 12:56:27 +02001682 ring->rptr = RREG32(CP_RB_RPTR);
Alex Deucherfe251e22010-03-24 13:36:43 -04001683
Alex Deucher7e7b41d2010-09-02 21:32:32 -04001684 evergreen_cp_start(rdev);
Christian Könige32eb502011-10-23 12:56:27 +02001685 ring->ready = true;
Alex Deucherf7128122012-02-23 17:53:45 -05001686 r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring);
Alex Deucherfe251e22010-03-24 13:36:43 -04001687 if (r) {
Christian Könige32eb502011-10-23 12:56:27 +02001688 ring->ready = false;
Alex Deucherfe251e22010-03-24 13:36:43 -04001689 return r;
1690 }
1691 return 0;
1692}
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001693
1694/*
1695 * Core functions
1696 */
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001697static void evergreen_gpu_init(struct radeon_device *rdev)
1698{
Alex Deucher416a2bd2012-05-31 19:00:25 -04001699 u32 gb_addr_config;
Alex Deucher32fcdbf2010-03-24 13:33:47 -04001700 u32 mc_shared_chmap, mc_arb_ramcfg;
Alex Deucher32fcdbf2010-03-24 13:33:47 -04001701 u32 sx_debug_1;
1702 u32 smx_dc_ctl0;
1703 u32 sq_config;
1704 u32 sq_lds_resource_mgmt;
1705 u32 sq_gpr_resource_mgmt_1;
1706 u32 sq_gpr_resource_mgmt_2;
1707 u32 sq_gpr_resource_mgmt_3;
1708 u32 sq_thread_resource_mgmt;
1709 u32 sq_thread_resource_mgmt_2;
1710 u32 sq_stack_resource_mgmt_1;
1711 u32 sq_stack_resource_mgmt_2;
1712 u32 sq_stack_resource_mgmt_3;
1713 u32 vgt_cache_invalidation;
Alex Deucherf25a5c62011-05-19 11:07:57 -04001714 u32 hdp_host_path_cntl, tmp;
Alex Deucher416a2bd2012-05-31 19:00:25 -04001715 u32 disabled_rb_mask;
Alex Deucher32fcdbf2010-03-24 13:33:47 -04001716 int i, j, num_shader_engines, ps_thread_count;
1717
1718 switch (rdev->family) {
1719 case CHIP_CYPRESS:
1720 case CHIP_HEMLOCK:
1721 rdev->config.evergreen.num_ses = 2;
1722 rdev->config.evergreen.max_pipes = 4;
1723 rdev->config.evergreen.max_tile_pipes = 8;
1724 rdev->config.evergreen.max_simds = 10;
1725 rdev->config.evergreen.max_backends = 4 * rdev->config.evergreen.num_ses;
1726 rdev->config.evergreen.max_gprs = 256;
1727 rdev->config.evergreen.max_threads = 248;
1728 rdev->config.evergreen.max_gs_threads = 32;
1729 rdev->config.evergreen.max_stack_entries = 512;
1730 rdev->config.evergreen.sx_num_of_sets = 4;
1731 rdev->config.evergreen.sx_max_export_size = 256;
1732 rdev->config.evergreen.sx_max_export_pos_size = 64;
1733 rdev->config.evergreen.sx_max_export_smx_size = 192;
1734 rdev->config.evergreen.max_hw_contexts = 8;
1735 rdev->config.evergreen.sq_num_cf_insts = 2;
1736
1737 rdev->config.evergreen.sc_prim_fifo_size = 0x100;
1738 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1739 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
Alex Deucher416a2bd2012-05-31 19:00:25 -04001740 gb_addr_config = CYPRESS_GB_ADDR_CONFIG_GOLDEN;
Alex Deucher32fcdbf2010-03-24 13:33:47 -04001741 break;
1742 case CHIP_JUNIPER:
1743 rdev->config.evergreen.num_ses = 1;
1744 rdev->config.evergreen.max_pipes = 4;
1745 rdev->config.evergreen.max_tile_pipes = 4;
1746 rdev->config.evergreen.max_simds = 10;
1747 rdev->config.evergreen.max_backends = 4 * rdev->config.evergreen.num_ses;
1748 rdev->config.evergreen.max_gprs = 256;
1749 rdev->config.evergreen.max_threads = 248;
1750 rdev->config.evergreen.max_gs_threads = 32;
1751 rdev->config.evergreen.max_stack_entries = 512;
1752 rdev->config.evergreen.sx_num_of_sets = 4;
1753 rdev->config.evergreen.sx_max_export_size = 256;
1754 rdev->config.evergreen.sx_max_export_pos_size = 64;
1755 rdev->config.evergreen.sx_max_export_smx_size = 192;
1756 rdev->config.evergreen.max_hw_contexts = 8;
1757 rdev->config.evergreen.sq_num_cf_insts = 2;
1758
1759 rdev->config.evergreen.sc_prim_fifo_size = 0x100;
1760 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1761 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
Alex Deucher416a2bd2012-05-31 19:00:25 -04001762 gb_addr_config = JUNIPER_GB_ADDR_CONFIG_GOLDEN;
Alex Deucher32fcdbf2010-03-24 13:33:47 -04001763 break;
1764 case CHIP_REDWOOD:
1765 rdev->config.evergreen.num_ses = 1;
1766 rdev->config.evergreen.max_pipes = 4;
1767 rdev->config.evergreen.max_tile_pipes = 4;
1768 rdev->config.evergreen.max_simds = 5;
1769 rdev->config.evergreen.max_backends = 2 * rdev->config.evergreen.num_ses;
1770 rdev->config.evergreen.max_gprs = 256;
1771 rdev->config.evergreen.max_threads = 248;
1772 rdev->config.evergreen.max_gs_threads = 32;
1773 rdev->config.evergreen.max_stack_entries = 256;
1774 rdev->config.evergreen.sx_num_of_sets = 4;
1775 rdev->config.evergreen.sx_max_export_size = 256;
1776 rdev->config.evergreen.sx_max_export_pos_size = 64;
1777 rdev->config.evergreen.sx_max_export_smx_size = 192;
1778 rdev->config.evergreen.max_hw_contexts = 8;
1779 rdev->config.evergreen.sq_num_cf_insts = 2;
1780
1781 rdev->config.evergreen.sc_prim_fifo_size = 0x100;
1782 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1783 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
Alex Deucher416a2bd2012-05-31 19:00:25 -04001784 gb_addr_config = REDWOOD_GB_ADDR_CONFIG_GOLDEN;
Alex Deucher32fcdbf2010-03-24 13:33:47 -04001785 break;
1786 case CHIP_CEDAR:
1787 default:
1788 rdev->config.evergreen.num_ses = 1;
1789 rdev->config.evergreen.max_pipes = 2;
1790 rdev->config.evergreen.max_tile_pipes = 2;
1791 rdev->config.evergreen.max_simds = 2;
1792 rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
1793 rdev->config.evergreen.max_gprs = 256;
1794 rdev->config.evergreen.max_threads = 192;
1795 rdev->config.evergreen.max_gs_threads = 16;
1796 rdev->config.evergreen.max_stack_entries = 256;
1797 rdev->config.evergreen.sx_num_of_sets = 4;
1798 rdev->config.evergreen.sx_max_export_size = 128;
1799 rdev->config.evergreen.sx_max_export_pos_size = 32;
1800 rdev->config.evergreen.sx_max_export_smx_size = 96;
1801 rdev->config.evergreen.max_hw_contexts = 4;
1802 rdev->config.evergreen.sq_num_cf_insts = 1;
1803
1804 rdev->config.evergreen.sc_prim_fifo_size = 0x40;
1805 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1806 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
Alex Deucher416a2bd2012-05-31 19:00:25 -04001807 gb_addr_config = CEDAR_GB_ADDR_CONFIG_GOLDEN;
Alex Deucher32fcdbf2010-03-24 13:33:47 -04001808 break;
Alex Deucherd5e455e2010-11-22 17:56:29 -05001809 case CHIP_PALM:
1810 rdev->config.evergreen.num_ses = 1;
1811 rdev->config.evergreen.max_pipes = 2;
1812 rdev->config.evergreen.max_tile_pipes = 2;
1813 rdev->config.evergreen.max_simds = 2;
1814 rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
1815 rdev->config.evergreen.max_gprs = 256;
1816 rdev->config.evergreen.max_threads = 192;
1817 rdev->config.evergreen.max_gs_threads = 16;
1818 rdev->config.evergreen.max_stack_entries = 256;
1819 rdev->config.evergreen.sx_num_of_sets = 4;
1820 rdev->config.evergreen.sx_max_export_size = 128;
1821 rdev->config.evergreen.sx_max_export_pos_size = 32;
1822 rdev->config.evergreen.sx_max_export_smx_size = 96;
1823 rdev->config.evergreen.max_hw_contexts = 4;
1824 rdev->config.evergreen.sq_num_cf_insts = 1;
1825
1826 rdev->config.evergreen.sc_prim_fifo_size = 0x40;
1827 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1828 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
Alex Deucher416a2bd2012-05-31 19:00:25 -04001829 gb_addr_config = CEDAR_GB_ADDR_CONFIG_GOLDEN;
Alex Deucherd5e455e2010-11-22 17:56:29 -05001830 break;
Alex Deucherd5c5a722011-05-31 15:42:48 -04001831 case CHIP_SUMO:
1832 rdev->config.evergreen.num_ses = 1;
1833 rdev->config.evergreen.max_pipes = 4;
Jerome Glissebd25f072012-12-11 11:56:52 -05001834 rdev->config.evergreen.max_tile_pipes = 4;
Alex Deucherd5c5a722011-05-31 15:42:48 -04001835 if (rdev->pdev->device == 0x9648)
1836 rdev->config.evergreen.max_simds = 3;
1837 else if ((rdev->pdev->device == 0x9647) ||
1838 (rdev->pdev->device == 0x964a))
1839 rdev->config.evergreen.max_simds = 4;
1840 else
1841 rdev->config.evergreen.max_simds = 5;
1842 rdev->config.evergreen.max_backends = 2 * rdev->config.evergreen.num_ses;
1843 rdev->config.evergreen.max_gprs = 256;
1844 rdev->config.evergreen.max_threads = 248;
1845 rdev->config.evergreen.max_gs_threads = 32;
1846 rdev->config.evergreen.max_stack_entries = 256;
1847 rdev->config.evergreen.sx_num_of_sets = 4;
1848 rdev->config.evergreen.sx_max_export_size = 256;
1849 rdev->config.evergreen.sx_max_export_pos_size = 64;
1850 rdev->config.evergreen.sx_max_export_smx_size = 192;
1851 rdev->config.evergreen.max_hw_contexts = 8;
1852 rdev->config.evergreen.sq_num_cf_insts = 2;
1853
1854 rdev->config.evergreen.sc_prim_fifo_size = 0x40;
1855 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1856 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
Jerome Glissebd25f072012-12-11 11:56:52 -05001857 gb_addr_config = SUMO_GB_ADDR_CONFIG_GOLDEN;
Alex Deucherd5c5a722011-05-31 15:42:48 -04001858 break;
1859 case CHIP_SUMO2:
1860 rdev->config.evergreen.num_ses = 1;
1861 rdev->config.evergreen.max_pipes = 4;
1862 rdev->config.evergreen.max_tile_pipes = 4;
1863 rdev->config.evergreen.max_simds = 2;
1864 rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
1865 rdev->config.evergreen.max_gprs = 256;
1866 rdev->config.evergreen.max_threads = 248;
1867 rdev->config.evergreen.max_gs_threads = 32;
1868 rdev->config.evergreen.max_stack_entries = 512;
1869 rdev->config.evergreen.sx_num_of_sets = 4;
1870 rdev->config.evergreen.sx_max_export_size = 256;
1871 rdev->config.evergreen.sx_max_export_pos_size = 64;
1872 rdev->config.evergreen.sx_max_export_smx_size = 192;
1873 rdev->config.evergreen.max_hw_contexts = 8;
1874 rdev->config.evergreen.sq_num_cf_insts = 2;
1875
1876 rdev->config.evergreen.sc_prim_fifo_size = 0x40;
1877 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1878 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
Jerome Glissebd25f072012-12-11 11:56:52 -05001879 gb_addr_config = SUMO2_GB_ADDR_CONFIG_GOLDEN;
Alex Deucherd5c5a722011-05-31 15:42:48 -04001880 break;
Alex Deucheradb68fa2011-01-06 21:19:24 -05001881 case CHIP_BARTS:
1882 rdev->config.evergreen.num_ses = 2;
1883 rdev->config.evergreen.max_pipes = 4;
1884 rdev->config.evergreen.max_tile_pipes = 8;
1885 rdev->config.evergreen.max_simds = 7;
1886 rdev->config.evergreen.max_backends = 4 * rdev->config.evergreen.num_ses;
1887 rdev->config.evergreen.max_gprs = 256;
1888 rdev->config.evergreen.max_threads = 248;
1889 rdev->config.evergreen.max_gs_threads = 32;
1890 rdev->config.evergreen.max_stack_entries = 512;
1891 rdev->config.evergreen.sx_num_of_sets = 4;
1892 rdev->config.evergreen.sx_max_export_size = 256;
1893 rdev->config.evergreen.sx_max_export_pos_size = 64;
1894 rdev->config.evergreen.sx_max_export_smx_size = 192;
1895 rdev->config.evergreen.max_hw_contexts = 8;
1896 rdev->config.evergreen.sq_num_cf_insts = 2;
1897
1898 rdev->config.evergreen.sc_prim_fifo_size = 0x100;
1899 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1900 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
Alex Deucher416a2bd2012-05-31 19:00:25 -04001901 gb_addr_config = BARTS_GB_ADDR_CONFIG_GOLDEN;
Alex Deucheradb68fa2011-01-06 21:19:24 -05001902 break;
1903 case CHIP_TURKS:
1904 rdev->config.evergreen.num_ses = 1;
1905 rdev->config.evergreen.max_pipes = 4;
1906 rdev->config.evergreen.max_tile_pipes = 4;
1907 rdev->config.evergreen.max_simds = 6;
1908 rdev->config.evergreen.max_backends = 2 * rdev->config.evergreen.num_ses;
1909 rdev->config.evergreen.max_gprs = 256;
1910 rdev->config.evergreen.max_threads = 248;
1911 rdev->config.evergreen.max_gs_threads = 32;
1912 rdev->config.evergreen.max_stack_entries = 256;
1913 rdev->config.evergreen.sx_num_of_sets = 4;
1914 rdev->config.evergreen.sx_max_export_size = 256;
1915 rdev->config.evergreen.sx_max_export_pos_size = 64;
1916 rdev->config.evergreen.sx_max_export_smx_size = 192;
1917 rdev->config.evergreen.max_hw_contexts = 8;
1918 rdev->config.evergreen.sq_num_cf_insts = 2;
1919
1920 rdev->config.evergreen.sc_prim_fifo_size = 0x100;
1921 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1922 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
Alex Deucher416a2bd2012-05-31 19:00:25 -04001923 gb_addr_config = TURKS_GB_ADDR_CONFIG_GOLDEN;
Alex Deucheradb68fa2011-01-06 21:19:24 -05001924 break;
1925 case CHIP_CAICOS:
1926 rdev->config.evergreen.num_ses = 1;
Jerome Glissebd25f072012-12-11 11:56:52 -05001927 rdev->config.evergreen.max_pipes = 2;
Alex Deucheradb68fa2011-01-06 21:19:24 -05001928 rdev->config.evergreen.max_tile_pipes = 2;
1929 rdev->config.evergreen.max_simds = 2;
1930 rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
1931 rdev->config.evergreen.max_gprs = 256;
1932 rdev->config.evergreen.max_threads = 192;
1933 rdev->config.evergreen.max_gs_threads = 16;
1934 rdev->config.evergreen.max_stack_entries = 256;
1935 rdev->config.evergreen.sx_num_of_sets = 4;
1936 rdev->config.evergreen.sx_max_export_size = 128;
1937 rdev->config.evergreen.sx_max_export_pos_size = 32;
1938 rdev->config.evergreen.sx_max_export_smx_size = 96;
1939 rdev->config.evergreen.max_hw_contexts = 4;
1940 rdev->config.evergreen.sq_num_cf_insts = 1;
1941
1942 rdev->config.evergreen.sc_prim_fifo_size = 0x40;
1943 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1944 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
Alex Deucher416a2bd2012-05-31 19:00:25 -04001945 gb_addr_config = CAICOS_GB_ADDR_CONFIG_GOLDEN;
Alex Deucheradb68fa2011-01-06 21:19:24 -05001946 break;
Alex Deucher32fcdbf2010-03-24 13:33:47 -04001947 }
1948
1949 /* Initialize HDP */
1950 for (i = 0, j = 0; i < 32; i++, j += 0x18) {
1951 WREG32((0x2c14 + j), 0x00000000);
1952 WREG32((0x2c18 + j), 0x00000000);
1953 WREG32((0x2c1c + j), 0x00000000);
1954 WREG32((0x2c20 + j), 0x00000000);
1955 WREG32((0x2c24 + j), 0x00000000);
1956 }
1957
1958 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
1959
Alex Deucherd054ac12011-09-01 17:46:15 +00001960 evergreen_fix_pci_max_read_req_size(rdev);
1961
Alex Deucher32fcdbf2010-03-24 13:33:47 -04001962 mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
Alex Deucher05b3ef62012-03-20 17:18:37 -04001963 if ((rdev->family == CHIP_PALM) ||
1964 (rdev->family == CHIP_SUMO) ||
1965 (rdev->family == CHIP_SUMO2))
Alex Deucherd9282fc2011-05-11 03:15:24 -04001966 mc_arb_ramcfg = RREG32(FUS_MC_ARB_RAMCFG);
1967 else
1968 mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
Alex Deucher32fcdbf2010-03-24 13:33:47 -04001969
Alex Deucher1aa52bd2010-11-17 12:11:03 -05001970 /* setup tiling info dword. gb_addr_config is not adequate since it does
1971 * not have bank info, so create a custom tiling dword.
1972 * bits 3:0 num_pipes
1973 * bits 7:4 num_banks
1974 * bits 11:8 group_size
1975 * bits 15:12 row_size
1976 */
1977 rdev->config.evergreen.tile_config = 0;
1978 switch (rdev->config.evergreen.max_tile_pipes) {
1979 case 1:
1980 default:
1981 rdev->config.evergreen.tile_config |= (0 << 0);
1982 break;
1983 case 2:
1984 rdev->config.evergreen.tile_config |= (1 << 0);
1985 break;
1986 case 4:
1987 rdev->config.evergreen.tile_config |= (2 << 0);
1988 break;
1989 case 8:
1990 rdev->config.evergreen.tile_config |= (3 << 0);
1991 break;
1992 }
Alex Deucherd698a342011-06-23 00:49:29 -04001993 /* num banks is 8 on all fusion asics. 0 = 4, 1 = 8, 2 = 16 */
Alex Deucher5bfa4872011-05-20 12:35:22 -04001994 if (rdev->flags & RADEON_IS_IGP)
Alex Deucherd698a342011-06-23 00:49:29 -04001995 rdev->config.evergreen.tile_config |= 1 << 4;
Alex Deucher29d65402012-05-31 18:53:36 -04001996 else {
Alex Deucherc8d15ed2012-07-31 11:01:10 -04001997 switch ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) {
1998 case 0: /* four banks */
Alex Deucher29d65402012-05-31 18:53:36 -04001999 rdev->config.evergreen.tile_config |= 0 << 4;
Alex Deucherc8d15ed2012-07-31 11:01:10 -04002000 break;
2001 case 1: /* eight banks */
2002 rdev->config.evergreen.tile_config |= 1 << 4;
2003 break;
2004 case 2: /* sixteen banks */
2005 default:
2006 rdev->config.evergreen.tile_config |= 2 << 4;
2007 break;
2008 }
Alex Deucher29d65402012-05-31 18:53:36 -04002009 }
Alex Deucher416a2bd2012-05-31 19:00:25 -04002010 rdev->config.evergreen.tile_config |= 0 << 8;
Alex Deucher1aa52bd2010-11-17 12:11:03 -05002011 rdev->config.evergreen.tile_config |=
2012 ((gb_addr_config & 0x30000000) >> 28) << 12;
2013
Alex Deucher416a2bd2012-05-31 19:00:25 -04002014 num_shader_engines = (gb_addr_config & NUM_SHADER_ENGINES(3) >> 12) + 1;
2015
2016 if ((rdev->family >= CHIP_CEDAR) && (rdev->family <= CHIP_HEMLOCK)) {
2017 u32 efuse_straps_4;
2018 u32 efuse_straps_3;
2019
2020 WREG32(RCU_IND_INDEX, 0x204);
2021 efuse_straps_4 = RREG32(RCU_IND_DATA);
2022 WREG32(RCU_IND_INDEX, 0x203);
2023 efuse_straps_3 = RREG32(RCU_IND_DATA);
2024 tmp = (((efuse_straps_4 & 0xf) << 4) |
2025 ((efuse_straps_3 & 0xf0000000) >> 28));
2026 } else {
2027 tmp = 0;
2028 for (i = (rdev->config.evergreen.num_ses - 1); i >= 0; i--) {
2029 u32 rb_disable_bitmap;
2030
2031 WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
2032 WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
2033 rb_disable_bitmap = (RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000) >> 16;
2034 tmp <<= 4;
2035 tmp |= rb_disable_bitmap;
2036 }
2037 }
2038 /* enabled rb are just the one not disabled :) */
2039 disabled_rb_mask = tmp;
2040
2041 WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
2042 WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
2043
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002044 WREG32(GB_ADDR_CONFIG, gb_addr_config);
2045 WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
2046 WREG32(HDP_ADDR_CONFIG, gb_addr_config);
Alex Deucher233d1ad2012-12-04 15:25:59 -05002047 WREG32(DMA_TILING_CONFIG, gb_addr_config);
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002048
Alex Deucherf7eb9732013-01-30 13:57:40 -05002049 if ((rdev->config.evergreen.max_backends == 1) &&
2050 (rdev->flags & RADEON_IS_IGP)) {
2051 if ((disabled_rb_mask & 3) == 1) {
2052 /* RB0 disabled, RB1 enabled */
2053 tmp = 0x11111111;
2054 } else {
2055 /* RB1 disabled, RB0 enabled */
2056 tmp = 0x00000000;
2057 }
2058 } else {
2059 tmp = gb_addr_config & NUM_PIPES_MASK;
2060 tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.evergreen.max_backends,
2061 EVERGREEN_MAX_BACKENDS, disabled_rb_mask);
2062 }
Alex Deucher416a2bd2012-05-31 19:00:25 -04002063 WREG32(GB_BACKEND_MAP, tmp);
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002064
2065 WREG32(CGTS_SYS_TCC_DISABLE, 0);
2066 WREG32(CGTS_TCC_DISABLE, 0);
2067 WREG32(CGTS_USER_SYS_TCC_DISABLE, 0);
2068 WREG32(CGTS_USER_TCC_DISABLE, 0);
2069
2070 /* set HW defaults for 3D engine */
2071 WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) |
2072 ROQ_IB2_START(0x2b)));
2073
2074 WREG32(CP_MEQ_THRESHOLDS, STQ_SPLIT(0x30));
2075
2076 WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO |
2077 SYNC_GRADIENT |
2078 SYNC_WALKER |
2079 SYNC_ALIGNER));
2080
2081 sx_debug_1 = RREG32(SX_DEBUG_1);
2082 sx_debug_1 |= ENABLE_NEW_SMX_ADDRESS;
2083 WREG32(SX_DEBUG_1, sx_debug_1);
2084
2085
2086 smx_dc_ctl0 = RREG32(SMX_DC_CTL0);
2087 smx_dc_ctl0 &= ~NUMBER_OF_SETS(0x1ff);
2088 smx_dc_ctl0 |= NUMBER_OF_SETS(rdev->config.evergreen.sx_num_of_sets);
2089 WREG32(SMX_DC_CTL0, smx_dc_ctl0);
2090
Alex Deucherb866d132012-06-14 22:06:36 +02002091 if (rdev->family <= CHIP_SUMO2)
2092 WREG32(SMX_SAR_CTL0, 0x00010000);
2093
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002094 WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_size / 4) - 1) |
2095 POSITION_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_pos_size / 4) - 1) |
2096 SMX_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_smx_size / 4) - 1)));
2097
2098 WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.evergreen.sc_prim_fifo_size) |
2099 SC_HIZ_TILE_FIFO_SIZE(rdev->config.evergreen.sc_hiz_tile_fifo_size) |
2100 SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.evergreen.sc_earlyz_tile_fifo_size)));
2101
2102 WREG32(VGT_NUM_INSTANCES, 1);
2103 WREG32(SPI_CONFIG_CNTL, 0);
2104 WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4));
2105 WREG32(CP_PERFMON_CNTL, 0);
2106
2107 WREG32(SQ_MS_FIFO_SIZES, (CACHE_FIFO_SIZE(16 * rdev->config.evergreen.sq_num_cf_insts) |
2108 FETCH_FIFO_HIWATER(0x4) |
2109 DONE_FIFO_HIWATER(0xe0) |
2110 ALU_UPDATE_FIFO_HIWATER(0x8)));
2111
2112 sq_config = RREG32(SQ_CONFIG);
2113 sq_config &= ~(PS_PRIO(3) |
2114 VS_PRIO(3) |
2115 GS_PRIO(3) |
2116 ES_PRIO(3));
2117 sq_config |= (VC_ENABLE |
2118 EXPORT_SRC_C |
2119 PS_PRIO(0) |
2120 VS_PRIO(1) |
2121 GS_PRIO(2) |
2122 ES_PRIO(3));
2123
Alex Deucherd5e455e2010-11-22 17:56:29 -05002124 switch (rdev->family) {
2125 case CHIP_CEDAR:
2126 case CHIP_PALM:
Alex Deucherd5c5a722011-05-31 15:42:48 -04002127 case CHIP_SUMO:
2128 case CHIP_SUMO2:
Alex Deucheradb68fa2011-01-06 21:19:24 -05002129 case CHIP_CAICOS:
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002130 /* no vertex cache */
2131 sq_config &= ~VC_ENABLE;
Alex Deucherd5e455e2010-11-22 17:56:29 -05002132 break;
2133 default:
2134 break;
2135 }
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002136
2137 sq_lds_resource_mgmt = RREG32(SQ_LDS_RESOURCE_MGMT);
2138
2139 sq_gpr_resource_mgmt_1 = NUM_PS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2))* 12 / 32);
2140 sq_gpr_resource_mgmt_1 |= NUM_VS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 6 / 32);
2141 sq_gpr_resource_mgmt_1 |= NUM_CLAUSE_TEMP_GPRS(4);
2142 sq_gpr_resource_mgmt_2 = NUM_GS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 4 / 32);
2143 sq_gpr_resource_mgmt_2 |= NUM_ES_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 4 / 32);
2144 sq_gpr_resource_mgmt_3 = NUM_HS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 3 / 32);
2145 sq_gpr_resource_mgmt_3 |= NUM_LS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 3 / 32);
2146
Alex Deucherd5e455e2010-11-22 17:56:29 -05002147 switch (rdev->family) {
2148 case CHIP_CEDAR:
2149 case CHIP_PALM:
Alex Deucherd5c5a722011-05-31 15:42:48 -04002150 case CHIP_SUMO:
2151 case CHIP_SUMO2:
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002152 ps_thread_count = 96;
Alex Deucherd5e455e2010-11-22 17:56:29 -05002153 break;
2154 default:
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002155 ps_thread_count = 128;
Alex Deucherd5e455e2010-11-22 17:56:29 -05002156 break;
2157 }
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002158
2159 sq_thread_resource_mgmt = NUM_PS_THREADS(ps_thread_count);
Alex Deucherf96b35c2010-06-16 12:24:07 -04002160 sq_thread_resource_mgmt |= NUM_VS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
2161 sq_thread_resource_mgmt |= NUM_GS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
2162 sq_thread_resource_mgmt |= NUM_ES_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
2163 sq_thread_resource_mgmt_2 = NUM_HS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
2164 sq_thread_resource_mgmt_2 |= NUM_LS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002165
2166 sq_stack_resource_mgmt_1 = NUM_PS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
2167 sq_stack_resource_mgmt_1 |= NUM_VS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
2168 sq_stack_resource_mgmt_2 = NUM_GS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
2169 sq_stack_resource_mgmt_2 |= NUM_ES_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
2170 sq_stack_resource_mgmt_3 = NUM_HS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
2171 sq_stack_resource_mgmt_3 |= NUM_LS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
2172
2173 WREG32(SQ_CONFIG, sq_config);
2174 WREG32(SQ_GPR_RESOURCE_MGMT_1, sq_gpr_resource_mgmt_1);
2175 WREG32(SQ_GPR_RESOURCE_MGMT_2, sq_gpr_resource_mgmt_2);
2176 WREG32(SQ_GPR_RESOURCE_MGMT_3, sq_gpr_resource_mgmt_3);
2177 WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt);
2178 WREG32(SQ_THREAD_RESOURCE_MGMT_2, sq_thread_resource_mgmt_2);
2179 WREG32(SQ_STACK_RESOURCE_MGMT_1, sq_stack_resource_mgmt_1);
2180 WREG32(SQ_STACK_RESOURCE_MGMT_2, sq_stack_resource_mgmt_2);
2181 WREG32(SQ_STACK_RESOURCE_MGMT_3, sq_stack_resource_mgmt_3);
2182 WREG32(SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, 0);
2183 WREG32(SQ_LDS_RESOURCE_MGMT, sq_lds_resource_mgmt);
2184
2185 WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) |
2186 FORCE_EOV_MAX_REZ_CNT(255)));
2187
Alex Deucherd5e455e2010-11-22 17:56:29 -05002188 switch (rdev->family) {
2189 case CHIP_CEDAR:
2190 case CHIP_PALM:
Alex Deucherd5c5a722011-05-31 15:42:48 -04002191 case CHIP_SUMO:
2192 case CHIP_SUMO2:
Alex Deucheradb68fa2011-01-06 21:19:24 -05002193 case CHIP_CAICOS:
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002194 vgt_cache_invalidation = CACHE_INVALIDATION(TC_ONLY);
Alex Deucherd5e455e2010-11-22 17:56:29 -05002195 break;
2196 default:
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002197 vgt_cache_invalidation = CACHE_INVALIDATION(VC_AND_TC);
Alex Deucherd5e455e2010-11-22 17:56:29 -05002198 break;
2199 }
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002200 vgt_cache_invalidation |= AUTO_INVLD_EN(ES_AND_GS_AUTO);
2201 WREG32(VGT_CACHE_INVALIDATION, vgt_cache_invalidation);
2202
2203 WREG32(VGT_GS_VERTEX_REUSE, 16);
Alex Deucher12920592011-02-02 12:37:40 -05002204 WREG32(PA_SU_LINE_STIPPLE_VALUE, 0);
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002205 WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
2206
Alex Deucher60a4a3e2010-06-29 17:03:35 -04002207 WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, 14);
2208 WREG32(VGT_OUT_DEALLOC_CNTL, 16);
2209
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002210 WREG32(CB_PERF_CTR0_SEL_0, 0);
2211 WREG32(CB_PERF_CTR0_SEL_1, 0);
2212 WREG32(CB_PERF_CTR1_SEL_0, 0);
2213 WREG32(CB_PERF_CTR1_SEL_1, 0);
2214 WREG32(CB_PERF_CTR2_SEL_0, 0);
2215 WREG32(CB_PERF_CTR2_SEL_1, 0);
2216 WREG32(CB_PERF_CTR3_SEL_0, 0);
2217 WREG32(CB_PERF_CTR3_SEL_1, 0);
2218
Alex Deucher60a4a3e2010-06-29 17:03:35 -04002219 /* clear render buffer base addresses */
2220 WREG32(CB_COLOR0_BASE, 0);
2221 WREG32(CB_COLOR1_BASE, 0);
2222 WREG32(CB_COLOR2_BASE, 0);
2223 WREG32(CB_COLOR3_BASE, 0);
2224 WREG32(CB_COLOR4_BASE, 0);
2225 WREG32(CB_COLOR5_BASE, 0);
2226 WREG32(CB_COLOR6_BASE, 0);
2227 WREG32(CB_COLOR7_BASE, 0);
2228 WREG32(CB_COLOR8_BASE, 0);
2229 WREG32(CB_COLOR9_BASE, 0);
2230 WREG32(CB_COLOR10_BASE, 0);
2231 WREG32(CB_COLOR11_BASE, 0);
2232
2233 /* set the shader const cache sizes to 0 */
2234 for (i = SQ_ALU_CONST_BUFFER_SIZE_PS_0; i < 0x28200; i += 4)
2235 WREG32(i, 0);
2236 for (i = SQ_ALU_CONST_BUFFER_SIZE_HS_0; i < 0x29000; i += 4)
2237 WREG32(i, 0);
2238
Alex Deucherf25a5c62011-05-19 11:07:57 -04002239 tmp = RREG32(HDP_MISC_CNTL);
2240 tmp |= HDP_FLUSH_INVALIDATE_CACHE;
2241 WREG32(HDP_MISC_CNTL, tmp);
2242
Alex Deucher32fcdbf2010-03-24 13:33:47 -04002243 hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL);
2244 WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
2245
2246 WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3));
2247
2248 udelay(50);
2249
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05002250}
2251
2252int evergreen_mc_init(struct radeon_device *rdev)
2253{
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05002254 u32 tmp;
2255 int chansize, numchan;
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05002256
2257 /* Get VRAM informations */
2258 rdev->mc.vram_is_ddr = true;
Alex Deucher05b3ef62012-03-20 17:18:37 -04002259 if ((rdev->family == CHIP_PALM) ||
2260 (rdev->family == CHIP_SUMO) ||
2261 (rdev->family == CHIP_SUMO2))
Alex Deucher82084412011-07-01 13:18:28 -04002262 tmp = RREG32(FUS_MC_ARB_RAMCFG);
2263 else
2264 tmp = RREG32(MC_ARB_RAMCFG);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05002265 if (tmp & CHANSIZE_OVERRIDE) {
2266 chansize = 16;
2267 } else if (tmp & CHANSIZE_MASK) {
2268 chansize = 64;
2269 } else {
2270 chansize = 32;
2271 }
2272 tmp = RREG32(MC_SHARED_CHMAP);
2273 switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
2274 case 0:
2275 default:
2276 numchan = 1;
2277 break;
2278 case 1:
2279 numchan = 2;
2280 break;
2281 case 2:
2282 numchan = 4;
2283 break;
2284 case 3:
2285 numchan = 8;
2286 break;
2287 }
2288 rdev->mc.vram_width = numchan * chansize;
2289 /* Could aper size report 0 ? */
Jordan Crouse01d73a62010-05-27 13:40:24 -06002290 rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
2291 rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05002292 /* Setup GPU memory space */
Alex Deucher05b3ef62012-03-20 17:18:37 -04002293 if ((rdev->family == CHIP_PALM) ||
2294 (rdev->family == CHIP_SUMO) ||
2295 (rdev->family == CHIP_SUMO2)) {
Alex Deucher6eb18f82010-11-22 17:56:27 -05002296 /* size in bytes on fusion */
2297 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
2298 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
2299 } else {
Alex Deucher05b3ef62012-03-20 17:18:37 -04002300 /* size in MB on evergreen/cayman/tn */
Alex Deucher6eb18f82010-11-22 17:56:27 -05002301 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
2302 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
2303 }
Jerome Glisse51e5fcd2010-02-19 14:33:54 +00002304 rdev->mc.visible_vram_size = rdev->mc.aper_size;
Alex Deucher0ef0c1f2010-11-22 17:56:26 -05002305 r700_vram_gtt_location(rdev, &rdev->mc);
Alex Deucherf47299c2010-03-16 20:54:38 -04002306 radeon_update_bandwidth_info(rdev);
2307
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05002308 return 0;
2309}
Jerome Glissed594e462010-02-17 21:54:29 +00002310
Alex Deucher187e3592013-01-18 14:51:38 -05002311void evergreen_print_gpu_status_regs(struct radeon_device *rdev)
Alex Deucher747943e2010-03-24 13:26:36 -04002312{
Jerome Glisse64c56e82013-01-02 17:30:35 -05002313 dev_info(rdev->dev, " GRBM_STATUS = 0x%08X\n",
Alex Deucher747943e2010-03-24 13:26:36 -04002314 RREG32(GRBM_STATUS));
Jerome Glisse64c56e82013-01-02 17:30:35 -05002315 dev_info(rdev->dev, " GRBM_STATUS_SE0 = 0x%08X\n",
Alex Deucher747943e2010-03-24 13:26:36 -04002316 RREG32(GRBM_STATUS_SE0));
Jerome Glisse64c56e82013-01-02 17:30:35 -05002317 dev_info(rdev->dev, " GRBM_STATUS_SE1 = 0x%08X\n",
Alex Deucher747943e2010-03-24 13:26:36 -04002318 RREG32(GRBM_STATUS_SE1));
Jerome Glisse64c56e82013-01-02 17:30:35 -05002319 dev_info(rdev->dev, " SRBM_STATUS = 0x%08X\n",
Alex Deucher747943e2010-03-24 13:26:36 -04002320 RREG32(SRBM_STATUS));
Alex Deuchera65a4362013-01-18 18:55:54 -05002321 dev_info(rdev->dev, " SRBM_STATUS2 = 0x%08X\n",
2322 RREG32(SRBM_STATUS2));
Jerome Glisse440a7cd2012-06-27 12:25:01 -04002323 dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
2324 RREG32(CP_STALLED_STAT1));
2325 dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n",
2326 RREG32(CP_STALLED_STAT2));
2327 dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n",
2328 RREG32(CP_BUSY_STAT));
2329 dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
2330 RREG32(CP_STAT));
Alex Deucher0ecebb92013-01-03 12:40:13 -05002331 dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n",
2332 RREG32(DMA_STATUS_REG));
Alex Deucher168757e2013-01-18 19:17:22 -05002333 if (rdev->family >= CHIP_CAYMAN) {
2334 dev_info(rdev->dev, " R_00D834_DMA_STATUS_REG = 0x%08X\n",
2335 RREG32(DMA_STATUS_REG + 0x800));
2336 }
Alex Deucher0ecebb92013-01-03 12:40:13 -05002337}
2338
Alex Deucher168757e2013-01-18 19:17:22 -05002339bool evergreen_is_display_hung(struct radeon_device *rdev)
Alex Deuchera65a4362013-01-18 18:55:54 -05002340{
2341 u32 crtc_hung = 0;
2342 u32 crtc_status[6];
2343 u32 i, j, tmp;
2344
2345 for (i = 0; i < rdev->num_crtc; i++) {
2346 if (RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]) & EVERGREEN_CRTC_MASTER_EN) {
2347 crtc_status[i] = RREG32(EVERGREEN_CRTC_STATUS_HV_COUNT + crtc_offsets[i]);
2348 crtc_hung |= (1 << i);
2349 }
2350 }
2351
2352 for (j = 0; j < 10; j++) {
2353 for (i = 0; i < rdev->num_crtc; i++) {
2354 if (crtc_hung & (1 << i)) {
2355 tmp = RREG32(EVERGREEN_CRTC_STATUS_HV_COUNT + crtc_offsets[i]);
2356 if (tmp != crtc_status[i])
2357 crtc_hung &= ~(1 << i);
2358 }
2359 }
2360 if (crtc_hung == 0)
2361 return false;
2362 udelay(100);
2363 }
2364
2365 return true;
2366}
2367
2368static u32 evergreen_gpu_check_soft_reset(struct radeon_device *rdev)
2369{
2370 u32 reset_mask = 0;
2371 u32 tmp;
2372
2373 /* GRBM_STATUS */
2374 tmp = RREG32(GRBM_STATUS);
2375 if (tmp & (PA_BUSY | SC_BUSY |
2376 SH_BUSY | SX_BUSY |
2377 TA_BUSY | VGT_BUSY |
2378 DB_BUSY | CB_BUSY |
2379 SPI_BUSY | VGT_BUSY_NO_DMA))
2380 reset_mask |= RADEON_RESET_GFX;
2381
2382 if (tmp & (CF_RQ_PENDING | PF_RQ_PENDING |
2383 CP_BUSY | CP_COHERENCY_BUSY))
2384 reset_mask |= RADEON_RESET_CP;
2385
2386 if (tmp & GRBM_EE_BUSY)
2387 reset_mask |= RADEON_RESET_GRBM | RADEON_RESET_GFX | RADEON_RESET_CP;
2388
2389 /* DMA_STATUS_REG */
2390 tmp = RREG32(DMA_STATUS_REG);
2391 if (!(tmp & DMA_IDLE))
2392 reset_mask |= RADEON_RESET_DMA;
2393
2394 /* SRBM_STATUS2 */
2395 tmp = RREG32(SRBM_STATUS2);
2396 if (tmp & DMA_BUSY)
2397 reset_mask |= RADEON_RESET_DMA;
2398
2399 /* SRBM_STATUS */
2400 tmp = RREG32(SRBM_STATUS);
2401 if (tmp & (RLC_RQ_PENDING | RLC_BUSY))
2402 reset_mask |= RADEON_RESET_RLC;
2403
2404 if (tmp & IH_BUSY)
2405 reset_mask |= RADEON_RESET_IH;
2406
2407 if (tmp & SEM_BUSY)
2408 reset_mask |= RADEON_RESET_SEM;
2409
2410 if (tmp & GRBM_RQ_PENDING)
2411 reset_mask |= RADEON_RESET_GRBM;
2412
2413 if (tmp & VMC_BUSY)
2414 reset_mask |= RADEON_RESET_VMC;
2415
2416 if (tmp & (MCB_BUSY | MCB_NON_DISPLAY_BUSY |
2417 MCC_BUSY | MCD_BUSY))
2418 reset_mask |= RADEON_RESET_MC;
2419
2420 if (evergreen_is_display_hung(rdev))
2421 reset_mask |= RADEON_RESET_DISPLAY;
2422
2423 /* VM_L2_STATUS */
2424 tmp = RREG32(VM_L2_STATUS);
2425 if (tmp & L2_BUSY)
2426 reset_mask |= RADEON_RESET_VMC;
2427
2428 return reset_mask;
2429}
2430
2431static void evergreen_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
Alex Deucher0ecebb92013-01-03 12:40:13 -05002432{
2433 struct evergreen_mc_save save;
Alex Deucherb7630472013-01-18 14:28:41 -05002434 u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
2435 u32 tmp;
Alex Deucher19fc42e2013-01-14 11:04:39 -05002436
Alex Deucher0ecebb92013-01-03 12:40:13 -05002437 if (reset_mask == 0)
Alex Deuchera65a4362013-01-18 18:55:54 -05002438 return;
Alex Deucher0ecebb92013-01-03 12:40:13 -05002439
2440 dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
2441
Alex Deucherb7630472013-01-18 14:28:41 -05002442 evergreen_print_gpu_status_regs(rdev);
2443
Alex Deucherb7630472013-01-18 14:28:41 -05002444 /* Disable CP parsing/prefetching */
2445 WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT);
2446
2447 if (reset_mask & RADEON_RESET_DMA) {
2448 /* Disable DMA */
2449 tmp = RREG32(DMA_RB_CNTL);
2450 tmp &= ~DMA_RB_ENABLE;
2451 WREG32(DMA_RB_CNTL, tmp);
2452 }
2453
Alex Deucherb21b6e72013-01-23 18:57:56 -05002454 udelay(50);
2455
2456 evergreen_mc_stop(rdev, &save);
2457 if (evergreen_mc_wait_for_idle(rdev)) {
2458 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
2459 }
2460
Alex Deucherb7630472013-01-18 14:28:41 -05002461 if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE)) {
2462 grbm_soft_reset |= SOFT_RESET_DB |
2463 SOFT_RESET_CB |
2464 SOFT_RESET_PA |
2465 SOFT_RESET_SC |
2466 SOFT_RESET_SPI |
2467 SOFT_RESET_SX |
2468 SOFT_RESET_SH |
2469 SOFT_RESET_TC |
2470 SOFT_RESET_TA |
2471 SOFT_RESET_VC |
2472 SOFT_RESET_VGT;
2473 }
2474
2475 if (reset_mask & RADEON_RESET_CP) {
2476 grbm_soft_reset |= SOFT_RESET_CP |
2477 SOFT_RESET_VGT;
2478
2479 srbm_soft_reset |= SOFT_RESET_GRBM;
2480 }
Alex Deucher0ecebb92013-01-03 12:40:13 -05002481
2482 if (reset_mask & RADEON_RESET_DMA)
Alex Deucherb7630472013-01-18 14:28:41 -05002483 srbm_soft_reset |= SOFT_RESET_DMA;
2484
Alex Deuchera65a4362013-01-18 18:55:54 -05002485 if (reset_mask & RADEON_RESET_DISPLAY)
2486 srbm_soft_reset |= SOFT_RESET_DC;
2487
2488 if (reset_mask & RADEON_RESET_RLC)
2489 srbm_soft_reset |= SOFT_RESET_RLC;
2490
2491 if (reset_mask & RADEON_RESET_SEM)
2492 srbm_soft_reset |= SOFT_RESET_SEM;
2493
2494 if (reset_mask & RADEON_RESET_IH)
2495 srbm_soft_reset |= SOFT_RESET_IH;
2496
2497 if (reset_mask & RADEON_RESET_GRBM)
2498 srbm_soft_reset |= SOFT_RESET_GRBM;
2499
2500 if (reset_mask & RADEON_RESET_VMC)
2501 srbm_soft_reset |= SOFT_RESET_VMC;
2502
2503 if (reset_mask & RADEON_RESET_MC)
2504 srbm_soft_reset |= SOFT_RESET_MC;
2505
Alex Deucherb7630472013-01-18 14:28:41 -05002506 if (grbm_soft_reset) {
2507 tmp = RREG32(GRBM_SOFT_RESET);
2508 tmp |= grbm_soft_reset;
2509 dev_info(rdev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
2510 WREG32(GRBM_SOFT_RESET, tmp);
2511 tmp = RREG32(GRBM_SOFT_RESET);
2512
2513 udelay(50);
2514
2515 tmp &= ~grbm_soft_reset;
2516 WREG32(GRBM_SOFT_RESET, tmp);
2517 tmp = RREG32(GRBM_SOFT_RESET);
2518 }
2519
2520 if (srbm_soft_reset) {
2521 tmp = RREG32(SRBM_SOFT_RESET);
2522 tmp |= srbm_soft_reset;
2523 dev_info(rdev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
2524 WREG32(SRBM_SOFT_RESET, tmp);
2525 tmp = RREG32(SRBM_SOFT_RESET);
2526
2527 udelay(50);
2528
2529 tmp &= ~srbm_soft_reset;
2530 WREG32(SRBM_SOFT_RESET, tmp);
2531 tmp = RREG32(SRBM_SOFT_RESET);
2532 }
Alex Deucher0ecebb92013-01-03 12:40:13 -05002533
2534 /* Wait a little for things to settle down */
2535 udelay(50);
2536
Alex Deucher747943e2010-03-24 13:26:36 -04002537 evergreen_mc_resume(rdev, &save);
Alex Deucherb7630472013-01-18 14:28:41 -05002538 udelay(50);
Alex Deucher410a3412013-01-18 13:05:39 -05002539
Alex Deucherb7630472013-01-18 14:28:41 -05002540 evergreen_print_gpu_status_regs(rdev);
Alex Deucher747943e2010-03-24 13:26:36 -04002541}
2542
Jerome Glissea2d07b72010-03-09 14:45:11 +00002543int evergreen_asic_reset(struct radeon_device *rdev)
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05002544{
Alex Deuchera65a4362013-01-18 18:55:54 -05002545 u32 reset_mask;
2546
2547 reset_mask = evergreen_gpu_check_soft_reset(rdev);
2548
2549 if (reset_mask)
2550 r600_set_bios_scratch_engine_hung(rdev, true);
2551
2552 evergreen_gpu_soft_reset(rdev, reset_mask);
2553
2554 reset_mask = evergreen_gpu_check_soft_reset(rdev);
2555
2556 if (!reset_mask)
2557 r600_set_bios_scratch_engine_hung(rdev, false);
2558
2559 return 0;
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05002560}
2561
Alex Deucher123bc182013-01-24 11:37:19 -05002562/**
2563 * evergreen_gfx_is_lockup - Check if the GFX engine is locked up
2564 *
2565 * @rdev: radeon_device pointer
2566 * @ring: radeon_ring structure holding ring information
2567 *
2568 * Check if the GFX engine is locked up.
2569 * Returns true if the engine appears to be locked up, false if not.
2570 */
2571bool evergreen_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
2572{
2573 u32 reset_mask = evergreen_gpu_check_soft_reset(rdev);
2574
2575 if (!(reset_mask & (RADEON_RESET_GFX |
2576 RADEON_RESET_COMPUTE |
2577 RADEON_RESET_CP))) {
2578 radeon_ring_lockup_update(ring);
2579 return false;
2580 }
2581 /* force CP activities */
2582 radeon_ring_force_activity(rdev, ring);
2583 return radeon_ring_test_lockup(rdev, ring);
2584}
2585
2586/**
2587 * evergreen_dma_is_lockup - Check if the DMA engine is locked up
2588 *
2589 * @rdev: radeon_device pointer
2590 * @ring: radeon_ring structure holding ring information
2591 *
2592 * Check if the async DMA engine is locked up.
2593 * Returns true if the engine appears to be locked up, false if not.
2594 */
2595bool evergreen_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
2596{
2597 u32 reset_mask = evergreen_gpu_check_soft_reset(rdev);
2598
2599 if (!(reset_mask & RADEON_RESET_DMA)) {
2600 radeon_ring_lockup_update(ring);
2601 return false;
2602 }
2603 /* force ring activities */
2604 radeon_ring_force_activity(rdev, ring);
2605 return radeon_ring_test_lockup(rdev, ring);
2606}
2607
Alex Deucher45f9a392010-03-24 13:55:51 -04002608/* Interrupts */
2609
2610u32 evergreen_get_vblank_counter(struct radeon_device *rdev, int crtc)
2611{
Alex Deucher46437052012-08-15 17:10:32 -04002612 if (crtc >= rdev->num_crtc)
Alex Deucher45f9a392010-03-24 13:55:51 -04002613 return 0;
Alex Deucher46437052012-08-15 17:10:32 -04002614 else
2615 return RREG32(CRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
Alex Deucher45f9a392010-03-24 13:55:51 -04002616}
2617
2618void evergreen_disable_interrupt_state(struct radeon_device *rdev)
2619{
2620 u32 tmp;
2621
Alex Deucher1b370782011-11-17 20:13:28 -05002622 if (rdev->family >= CHIP_CAYMAN) {
2623 cayman_cp_int_cntl_setup(rdev, 0,
2624 CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
2625 cayman_cp_int_cntl_setup(rdev, 1, 0);
2626 cayman_cp_int_cntl_setup(rdev, 2, 0);
Alex Deucherf60cbd12012-12-04 15:27:33 -05002627 tmp = RREG32(CAYMAN_DMA1_CNTL) & ~TRAP_ENABLE;
2628 WREG32(CAYMAN_DMA1_CNTL, tmp);
Alex Deucher1b370782011-11-17 20:13:28 -05002629 } else
2630 WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
Alex Deucher233d1ad2012-12-04 15:25:59 -05002631 tmp = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
2632 WREG32(DMA_CNTL, tmp);
Alex Deucher45f9a392010-03-24 13:55:51 -04002633 WREG32(GRBM_INT_CNTL, 0);
2634 WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
2635 WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
Alex Deucherb7eff392011-07-08 11:44:56 -04002636 if (rdev->num_crtc >= 4) {
Alex Deucher18007402010-11-22 17:56:28 -05002637 WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
2638 WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
Alex Deucherb7eff392011-07-08 11:44:56 -04002639 }
2640 if (rdev->num_crtc >= 6) {
Alex Deucher18007402010-11-22 17:56:28 -05002641 WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
2642 WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
2643 }
Alex Deucher45f9a392010-03-24 13:55:51 -04002644
2645 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
2646 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
Alex Deucherb7eff392011-07-08 11:44:56 -04002647 if (rdev->num_crtc >= 4) {
Alex Deucher18007402010-11-22 17:56:28 -05002648 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
2649 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
Alex Deucherb7eff392011-07-08 11:44:56 -04002650 }
2651 if (rdev->num_crtc >= 6) {
Alex Deucher18007402010-11-22 17:56:28 -05002652 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
2653 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
2654 }
Alex Deucher45f9a392010-03-24 13:55:51 -04002655
Alex Deucher05b3ef62012-03-20 17:18:37 -04002656 /* only one DAC on DCE6 */
2657 if (!ASIC_IS_DCE6(rdev))
2658 WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
Alex Deucher45f9a392010-03-24 13:55:51 -04002659 WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
2660
2661 tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2662 WREG32(DC_HPD1_INT_CONTROL, tmp);
2663 tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2664 WREG32(DC_HPD2_INT_CONTROL, tmp);
2665 tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2666 WREG32(DC_HPD3_INT_CONTROL, tmp);
2667 tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2668 WREG32(DC_HPD4_INT_CONTROL, tmp);
2669 tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2670 WREG32(DC_HPD5_INT_CONTROL, tmp);
2671 tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2672 WREG32(DC_HPD6_INT_CONTROL, tmp);
2673
2674}
2675
2676int evergreen_irq_set(struct radeon_device *rdev)
2677{
2678 u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
Alex Deucher1b370782011-11-17 20:13:28 -05002679 u32 cp_int_cntl1 = 0, cp_int_cntl2 = 0;
Alex Deucher45f9a392010-03-24 13:55:51 -04002680 u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
2681 u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
Alex Deucher2031f772010-04-22 12:52:11 -04002682 u32 grbm_int_cntl = 0;
Alex Deucher6f34be52010-11-21 10:59:01 -05002683 u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0;
Alex Deucherf122c612012-03-30 08:59:57 -04002684 u32 afmt1 = 0, afmt2 = 0, afmt3 = 0, afmt4 = 0, afmt5 = 0, afmt6 = 0;
Alex Deucherf60cbd12012-12-04 15:27:33 -05002685 u32 dma_cntl, dma_cntl1 = 0;
Alex Deucher45f9a392010-03-24 13:55:51 -04002686
2687 if (!rdev->irq.installed) {
Joe Perchesfce7d612010-10-30 21:08:30 +00002688 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
Alex Deucher45f9a392010-03-24 13:55:51 -04002689 return -EINVAL;
2690 }
2691 /* don't enable anything if the ih is disabled */
2692 if (!rdev->ih.enabled) {
2693 r600_disable_interrupts(rdev);
2694 /* force the active interrupt state to all disabled */
2695 evergreen_disable_interrupt_state(rdev);
2696 return 0;
2697 }
2698
2699 hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
2700 hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
2701 hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
2702 hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
2703 hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
2704 hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
2705
Alex Deucherf122c612012-03-30 08:59:57 -04002706 afmt1 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
2707 afmt2 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
2708 afmt3 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
2709 afmt4 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
2710 afmt5 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
2711 afmt6 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
2712
Alex Deucher233d1ad2012-12-04 15:25:59 -05002713 dma_cntl = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
2714
Alex Deucher1b370782011-11-17 20:13:28 -05002715 if (rdev->family >= CHIP_CAYMAN) {
2716 /* enable CP interrupts on all rings */
Christian Koenig736fc372012-05-17 19:52:00 +02002717 if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
Alex Deucher1b370782011-11-17 20:13:28 -05002718 DRM_DEBUG("evergreen_irq_set: sw int gfx\n");
2719 cp_int_cntl |= TIME_STAMP_INT_ENABLE;
2720 }
Christian Koenig736fc372012-05-17 19:52:00 +02002721 if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP1_INDEX])) {
Alex Deucher1b370782011-11-17 20:13:28 -05002722 DRM_DEBUG("evergreen_irq_set: sw int cp1\n");
2723 cp_int_cntl1 |= TIME_STAMP_INT_ENABLE;
2724 }
Christian Koenig736fc372012-05-17 19:52:00 +02002725 if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP2_INDEX])) {
Alex Deucher1b370782011-11-17 20:13:28 -05002726 DRM_DEBUG("evergreen_irq_set: sw int cp2\n");
2727 cp_int_cntl2 |= TIME_STAMP_INT_ENABLE;
2728 }
2729 } else {
Christian Koenig736fc372012-05-17 19:52:00 +02002730 if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
Alex Deucher1b370782011-11-17 20:13:28 -05002731 DRM_DEBUG("evergreen_irq_set: sw int gfx\n");
2732 cp_int_cntl |= RB_INT_ENABLE;
2733 cp_int_cntl |= TIME_STAMP_INT_ENABLE;
2734 }
Alex Deucher45f9a392010-03-24 13:55:51 -04002735 }
Alex Deucher1b370782011-11-17 20:13:28 -05002736
Alex Deucher233d1ad2012-12-04 15:25:59 -05002737 if (atomic_read(&rdev->irq.ring_int[R600_RING_TYPE_DMA_INDEX])) {
2738 DRM_DEBUG("r600_irq_set: sw int dma\n");
2739 dma_cntl |= TRAP_ENABLE;
2740 }
2741
Alex Deucherf60cbd12012-12-04 15:27:33 -05002742 if (rdev->family >= CHIP_CAYMAN) {
2743 dma_cntl1 = RREG32(CAYMAN_DMA1_CNTL) & ~TRAP_ENABLE;
2744 if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_DMA1_INDEX])) {
2745 DRM_DEBUG("r600_irq_set: sw int dma1\n");
2746 dma_cntl1 |= TRAP_ENABLE;
2747 }
2748 }
2749
Alex Deucher6f34be52010-11-21 10:59:01 -05002750 if (rdev->irq.crtc_vblank_int[0] ||
Christian Koenig736fc372012-05-17 19:52:00 +02002751 atomic_read(&rdev->irq.pflip[0])) {
Alex Deucher45f9a392010-03-24 13:55:51 -04002752 DRM_DEBUG("evergreen_irq_set: vblank 0\n");
2753 crtc1 |= VBLANK_INT_MASK;
2754 }
Alex Deucher6f34be52010-11-21 10:59:01 -05002755 if (rdev->irq.crtc_vblank_int[1] ||
Christian Koenig736fc372012-05-17 19:52:00 +02002756 atomic_read(&rdev->irq.pflip[1])) {
Alex Deucher45f9a392010-03-24 13:55:51 -04002757 DRM_DEBUG("evergreen_irq_set: vblank 1\n");
2758 crtc2 |= VBLANK_INT_MASK;
2759 }
Alex Deucher6f34be52010-11-21 10:59:01 -05002760 if (rdev->irq.crtc_vblank_int[2] ||
Christian Koenig736fc372012-05-17 19:52:00 +02002761 atomic_read(&rdev->irq.pflip[2])) {
Alex Deucher45f9a392010-03-24 13:55:51 -04002762 DRM_DEBUG("evergreen_irq_set: vblank 2\n");
2763 crtc3 |= VBLANK_INT_MASK;
2764 }
Alex Deucher6f34be52010-11-21 10:59:01 -05002765 if (rdev->irq.crtc_vblank_int[3] ||
Christian Koenig736fc372012-05-17 19:52:00 +02002766 atomic_read(&rdev->irq.pflip[3])) {
Alex Deucher45f9a392010-03-24 13:55:51 -04002767 DRM_DEBUG("evergreen_irq_set: vblank 3\n");
2768 crtc4 |= VBLANK_INT_MASK;
2769 }
Alex Deucher6f34be52010-11-21 10:59:01 -05002770 if (rdev->irq.crtc_vblank_int[4] ||
Christian Koenig736fc372012-05-17 19:52:00 +02002771 atomic_read(&rdev->irq.pflip[4])) {
Alex Deucher45f9a392010-03-24 13:55:51 -04002772 DRM_DEBUG("evergreen_irq_set: vblank 4\n");
2773 crtc5 |= VBLANK_INT_MASK;
2774 }
Alex Deucher6f34be52010-11-21 10:59:01 -05002775 if (rdev->irq.crtc_vblank_int[5] ||
Christian Koenig736fc372012-05-17 19:52:00 +02002776 atomic_read(&rdev->irq.pflip[5])) {
Alex Deucher45f9a392010-03-24 13:55:51 -04002777 DRM_DEBUG("evergreen_irq_set: vblank 5\n");
2778 crtc6 |= VBLANK_INT_MASK;
2779 }
2780 if (rdev->irq.hpd[0]) {
2781 DRM_DEBUG("evergreen_irq_set: hpd 1\n");
2782 hpd1 |= DC_HPDx_INT_EN;
2783 }
2784 if (rdev->irq.hpd[1]) {
2785 DRM_DEBUG("evergreen_irq_set: hpd 2\n");
2786 hpd2 |= DC_HPDx_INT_EN;
2787 }
2788 if (rdev->irq.hpd[2]) {
2789 DRM_DEBUG("evergreen_irq_set: hpd 3\n");
2790 hpd3 |= DC_HPDx_INT_EN;
2791 }
2792 if (rdev->irq.hpd[3]) {
2793 DRM_DEBUG("evergreen_irq_set: hpd 4\n");
2794 hpd4 |= DC_HPDx_INT_EN;
2795 }
2796 if (rdev->irq.hpd[4]) {
2797 DRM_DEBUG("evergreen_irq_set: hpd 5\n");
2798 hpd5 |= DC_HPDx_INT_EN;
2799 }
2800 if (rdev->irq.hpd[5]) {
2801 DRM_DEBUG("evergreen_irq_set: hpd 6\n");
2802 hpd6 |= DC_HPDx_INT_EN;
2803 }
Alex Deucherf122c612012-03-30 08:59:57 -04002804 if (rdev->irq.afmt[0]) {
2805 DRM_DEBUG("evergreen_irq_set: hdmi 0\n");
2806 afmt1 |= AFMT_AZ_FORMAT_WTRIG_MASK;
2807 }
2808 if (rdev->irq.afmt[1]) {
2809 DRM_DEBUG("evergreen_irq_set: hdmi 1\n");
2810 afmt2 |= AFMT_AZ_FORMAT_WTRIG_MASK;
2811 }
2812 if (rdev->irq.afmt[2]) {
2813 DRM_DEBUG("evergreen_irq_set: hdmi 2\n");
2814 afmt3 |= AFMT_AZ_FORMAT_WTRIG_MASK;
2815 }
2816 if (rdev->irq.afmt[3]) {
2817 DRM_DEBUG("evergreen_irq_set: hdmi 3\n");
2818 afmt4 |= AFMT_AZ_FORMAT_WTRIG_MASK;
2819 }
2820 if (rdev->irq.afmt[4]) {
2821 DRM_DEBUG("evergreen_irq_set: hdmi 4\n");
2822 afmt5 |= AFMT_AZ_FORMAT_WTRIG_MASK;
2823 }
2824 if (rdev->irq.afmt[5]) {
2825 DRM_DEBUG("evergreen_irq_set: hdmi 5\n");
2826 afmt6 |= AFMT_AZ_FORMAT_WTRIG_MASK;
2827 }
Alex Deucher45f9a392010-03-24 13:55:51 -04002828
Alex Deucher1b370782011-11-17 20:13:28 -05002829 if (rdev->family >= CHIP_CAYMAN) {
2830 cayman_cp_int_cntl_setup(rdev, 0, cp_int_cntl);
2831 cayman_cp_int_cntl_setup(rdev, 1, cp_int_cntl1);
2832 cayman_cp_int_cntl_setup(rdev, 2, cp_int_cntl2);
2833 } else
2834 WREG32(CP_INT_CNTL, cp_int_cntl);
Alex Deucher233d1ad2012-12-04 15:25:59 -05002835
2836 WREG32(DMA_CNTL, dma_cntl);
2837
Alex Deucherf60cbd12012-12-04 15:27:33 -05002838 if (rdev->family >= CHIP_CAYMAN)
2839 WREG32(CAYMAN_DMA1_CNTL, dma_cntl1);
2840
Alex Deucher2031f772010-04-22 12:52:11 -04002841 WREG32(GRBM_INT_CNTL, grbm_int_cntl);
Alex Deucher45f9a392010-03-24 13:55:51 -04002842
2843 WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1);
2844 WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, crtc2);
Alex Deucherb7eff392011-07-08 11:44:56 -04002845 if (rdev->num_crtc >= 4) {
Alex Deucher18007402010-11-22 17:56:28 -05002846 WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, crtc3);
2847 WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, crtc4);
Alex Deucherb7eff392011-07-08 11:44:56 -04002848 }
2849 if (rdev->num_crtc >= 6) {
Alex Deucher18007402010-11-22 17:56:28 -05002850 WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, crtc5);
2851 WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6);
2852 }
Alex Deucher45f9a392010-03-24 13:55:51 -04002853
Alex Deucher6f34be52010-11-21 10:59:01 -05002854 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, grph1);
2855 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, grph2);
Alex Deucherb7eff392011-07-08 11:44:56 -04002856 if (rdev->num_crtc >= 4) {
2857 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, grph3);
2858 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, grph4);
2859 }
2860 if (rdev->num_crtc >= 6) {
2861 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, grph5);
2862 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, grph6);
2863 }
Alex Deucher6f34be52010-11-21 10:59:01 -05002864
Alex Deucher45f9a392010-03-24 13:55:51 -04002865 WREG32(DC_HPD1_INT_CONTROL, hpd1);
2866 WREG32(DC_HPD2_INT_CONTROL, hpd2);
2867 WREG32(DC_HPD3_INT_CONTROL, hpd3);
2868 WREG32(DC_HPD4_INT_CONTROL, hpd4);
2869 WREG32(DC_HPD5_INT_CONTROL, hpd5);
2870 WREG32(DC_HPD6_INT_CONTROL, hpd6);
2871
Alex Deucherf122c612012-03-30 08:59:57 -04002872 WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, afmt1);
2873 WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, afmt2);
2874 WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, afmt3);
2875 WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, afmt4);
2876 WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, afmt5);
2877 WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, afmt6);
2878
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05002879 return 0;
2880}
2881
Andi Kleencbdd4502011-10-13 16:08:46 -07002882static void evergreen_irq_ack(struct radeon_device *rdev)
Alex Deucher45f9a392010-03-24 13:55:51 -04002883{
2884 u32 tmp;
2885
Alex Deucher6f34be52010-11-21 10:59:01 -05002886 rdev->irq.stat_regs.evergreen.disp_int = RREG32(DISP_INTERRUPT_STATUS);
2887 rdev->irq.stat_regs.evergreen.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
2888 rdev->irq.stat_regs.evergreen.disp_int_cont2 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE2);
2889 rdev->irq.stat_regs.evergreen.disp_int_cont3 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE3);
2890 rdev->irq.stat_regs.evergreen.disp_int_cont4 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE4);
2891 rdev->irq.stat_regs.evergreen.disp_int_cont5 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE5);
2892 rdev->irq.stat_regs.evergreen.d1grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET);
2893 rdev->irq.stat_regs.evergreen.d2grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET);
Alex Deucherb7eff392011-07-08 11:44:56 -04002894 if (rdev->num_crtc >= 4) {
2895 rdev->irq.stat_regs.evergreen.d3grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET);
2896 rdev->irq.stat_regs.evergreen.d4grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET);
2897 }
2898 if (rdev->num_crtc >= 6) {
2899 rdev->irq.stat_regs.evergreen.d5grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET);
2900 rdev->irq.stat_regs.evergreen.d6grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET);
2901 }
Alex Deucher45f9a392010-03-24 13:55:51 -04002902
Alex Deucherf122c612012-03-30 08:59:57 -04002903 rdev->irq.stat_regs.evergreen.afmt_status1 = RREG32(AFMT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET);
2904 rdev->irq.stat_regs.evergreen.afmt_status2 = RREG32(AFMT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET);
2905 rdev->irq.stat_regs.evergreen.afmt_status3 = RREG32(AFMT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET);
2906 rdev->irq.stat_regs.evergreen.afmt_status4 = RREG32(AFMT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET);
2907 rdev->irq.stat_regs.evergreen.afmt_status5 = RREG32(AFMT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET);
2908 rdev->irq.stat_regs.evergreen.afmt_status6 = RREG32(AFMT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET);
2909
Alex Deucher6f34be52010-11-21 10:59:01 -05002910 if (rdev->irq.stat_regs.evergreen.d1grph_int & GRPH_PFLIP_INT_OCCURRED)
2911 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
2912 if (rdev->irq.stat_regs.evergreen.d2grph_int & GRPH_PFLIP_INT_OCCURRED)
2913 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
Alex Deucher6f34be52010-11-21 10:59:01 -05002914 if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT)
Alex Deucher45f9a392010-03-24 13:55:51 -04002915 WREG32(VBLANK_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VBLANK_ACK);
Alex Deucher6f34be52010-11-21 10:59:01 -05002916 if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT)
Alex Deucher45f9a392010-03-24 13:55:51 -04002917 WREG32(VLINE_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VLINE_ACK);
Alex Deucher6f34be52010-11-21 10:59:01 -05002918 if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT)
Alex Deucher45f9a392010-03-24 13:55:51 -04002919 WREG32(VBLANK_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VBLANK_ACK);
Alex Deucher6f34be52010-11-21 10:59:01 -05002920 if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT)
Alex Deucher45f9a392010-03-24 13:55:51 -04002921 WREG32(VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VLINE_ACK);
2922
Alex Deucherb7eff392011-07-08 11:44:56 -04002923 if (rdev->num_crtc >= 4) {
2924 if (rdev->irq.stat_regs.evergreen.d3grph_int & GRPH_PFLIP_INT_OCCURRED)
2925 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
2926 if (rdev->irq.stat_regs.evergreen.d4grph_int & GRPH_PFLIP_INT_OCCURRED)
2927 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
2928 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT)
2929 WREG32(VBLANK_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VBLANK_ACK);
2930 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT)
2931 WREG32(VLINE_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VLINE_ACK);
2932 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT)
2933 WREG32(VBLANK_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VBLANK_ACK);
2934 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT)
2935 WREG32(VLINE_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VLINE_ACK);
2936 }
Alex Deucher45f9a392010-03-24 13:55:51 -04002937
Alex Deucherb7eff392011-07-08 11:44:56 -04002938 if (rdev->num_crtc >= 6) {
2939 if (rdev->irq.stat_regs.evergreen.d5grph_int & GRPH_PFLIP_INT_OCCURRED)
2940 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
2941 if (rdev->irq.stat_regs.evergreen.d6grph_int & GRPH_PFLIP_INT_OCCURRED)
2942 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
2943 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT)
2944 WREG32(VBLANK_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VBLANK_ACK);
2945 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT)
2946 WREG32(VLINE_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VLINE_ACK);
2947 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT)
2948 WREG32(VBLANK_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VBLANK_ACK);
2949 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT)
2950 WREG32(VLINE_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VLINE_ACK);
2951 }
Alex Deucher45f9a392010-03-24 13:55:51 -04002952
Alex Deucher6f34be52010-11-21 10:59:01 -05002953 if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) {
Alex Deucher45f9a392010-03-24 13:55:51 -04002954 tmp = RREG32(DC_HPD1_INT_CONTROL);
2955 tmp |= DC_HPDx_INT_ACK;
2956 WREG32(DC_HPD1_INT_CONTROL, tmp);
2957 }
Alex Deucher6f34be52010-11-21 10:59:01 -05002958 if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) {
Alex Deucher45f9a392010-03-24 13:55:51 -04002959 tmp = RREG32(DC_HPD2_INT_CONTROL);
2960 tmp |= DC_HPDx_INT_ACK;
2961 WREG32(DC_HPD2_INT_CONTROL, tmp);
2962 }
Alex Deucher6f34be52010-11-21 10:59:01 -05002963 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) {
Alex Deucher45f9a392010-03-24 13:55:51 -04002964 tmp = RREG32(DC_HPD3_INT_CONTROL);
2965 tmp |= DC_HPDx_INT_ACK;
2966 WREG32(DC_HPD3_INT_CONTROL, tmp);
2967 }
Alex Deucher6f34be52010-11-21 10:59:01 -05002968 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) {
Alex Deucher45f9a392010-03-24 13:55:51 -04002969 tmp = RREG32(DC_HPD4_INT_CONTROL);
2970 tmp |= DC_HPDx_INT_ACK;
2971 WREG32(DC_HPD4_INT_CONTROL, tmp);
2972 }
Alex Deucher6f34be52010-11-21 10:59:01 -05002973 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) {
Alex Deucher45f9a392010-03-24 13:55:51 -04002974 tmp = RREG32(DC_HPD5_INT_CONTROL);
2975 tmp |= DC_HPDx_INT_ACK;
2976 WREG32(DC_HPD5_INT_CONTROL, tmp);
2977 }
Alex Deucher6f34be52010-11-21 10:59:01 -05002978 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
Alex Deucher45f9a392010-03-24 13:55:51 -04002979 tmp = RREG32(DC_HPD5_INT_CONTROL);
2980 tmp |= DC_HPDx_INT_ACK;
2981 WREG32(DC_HPD6_INT_CONTROL, tmp);
2982 }
Alex Deucherf122c612012-03-30 08:59:57 -04002983 if (rdev->irq.stat_regs.evergreen.afmt_status1 & AFMT_AZ_FORMAT_WTRIG) {
2984 tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET);
2985 tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
2986 WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, tmp);
2987 }
2988 if (rdev->irq.stat_regs.evergreen.afmt_status2 & AFMT_AZ_FORMAT_WTRIG) {
2989 tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
2990 tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
2991 WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, tmp);
2992 }
2993 if (rdev->irq.stat_regs.evergreen.afmt_status3 & AFMT_AZ_FORMAT_WTRIG) {
2994 tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET);
2995 tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
2996 WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, tmp);
2997 }
2998 if (rdev->irq.stat_regs.evergreen.afmt_status4 & AFMT_AZ_FORMAT_WTRIG) {
2999 tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
3000 tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
3001 WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, tmp);
3002 }
3003 if (rdev->irq.stat_regs.evergreen.afmt_status5 & AFMT_AZ_FORMAT_WTRIG) {
3004 tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET);
3005 tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
3006 WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, tmp);
3007 }
3008 if (rdev->irq.stat_regs.evergreen.afmt_status6 & AFMT_AZ_FORMAT_WTRIG) {
3009 tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
3010 tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
3011 WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, tmp);
3012 }
Alex Deucher45f9a392010-03-24 13:55:51 -04003013}
3014
Lauri Kasanen1109ca02012-08-31 13:43:50 -04003015static void evergreen_irq_disable(struct radeon_device *rdev)
Alex Deucher45f9a392010-03-24 13:55:51 -04003016{
Alex Deucher45f9a392010-03-24 13:55:51 -04003017 r600_disable_interrupts(rdev);
3018 /* Wait and acknowledge irq */
3019 mdelay(1);
Alex Deucher6f34be52010-11-21 10:59:01 -05003020 evergreen_irq_ack(rdev);
Alex Deucher45f9a392010-03-24 13:55:51 -04003021 evergreen_disable_interrupt_state(rdev);
3022}
3023
Alex Deucher755d8192011-03-02 20:07:34 -05003024void evergreen_irq_suspend(struct radeon_device *rdev)
Alex Deucher45f9a392010-03-24 13:55:51 -04003025{
3026 evergreen_irq_disable(rdev);
3027 r600_rlc_stop(rdev);
3028}
3029
Andi Kleencbdd4502011-10-13 16:08:46 -07003030static u32 evergreen_get_ih_wptr(struct radeon_device *rdev)
Alex Deucher45f9a392010-03-24 13:55:51 -04003031{
3032 u32 wptr, tmp;
3033
Alex Deucher724c80e2010-08-27 18:25:25 -04003034 if (rdev->wb.enabled)
Cédric Cano204ae242011-04-19 11:07:13 -04003035 wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]);
Alex Deucher724c80e2010-08-27 18:25:25 -04003036 else
3037 wptr = RREG32(IH_RB_WPTR);
Alex Deucher45f9a392010-03-24 13:55:51 -04003038
3039 if (wptr & RB_OVERFLOW) {
3040 /* When a ring buffer overflow happen start parsing interrupt
3041 * from the last not overwritten vector (wptr + 16). Hopefully
3042 * this should allow us to catchup.
3043 */
3044 dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
3045 wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
3046 rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
3047 tmp = RREG32(IH_RB_CNTL);
3048 tmp |= IH_WPTR_OVERFLOW_CLEAR;
3049 WREG32(IH_RB_CNTL, tmp);
3050 }
3051 return (wptr & rdev->ih.ptr_mask);
3052}
3053
3054int evergreen_irq_process(struct radeon_device *rdev)
3055{
Dave Airlie682f1a52011-06-18 03:59:51 +00003056 u32 wptr;
3057 u32 rptr;
Alex Deucher45f9a392010-03-24 13:55:51 -04003058 u32 src_id, src_data;
3059 u32 ring_index;
Alex Deucher45f9a392010-03-24 13:55:51 -04003060 bool queue_hotplug = false;
Alex Deucherf122c612012-03-30 08:59:57 -04003061 bool queue_hdmi = false;
Alex Deucher45f9a392010-03-24 13:55:51 -04003062
Dave Airlie682f1a52011-06-18 03:59:51 +00003063 if (!rdev->ih.enabled || rdev->shutdown)
Alex Deucher45f9a392010-03-24 13:55:51 -04003064 return IRQ_NONE;
3065
Dave Airlie682f1a52011-06-18 03:59:51 +00003066 wptr = evergreen_get_ih_wptr(rdev);
Christian Koenigc20dc362012-05-16 21:45:24 +02003067
3068restart_ih:
3069 /* is somebody else already processing irqs? */
3070 if (atomic_xchg(&rdev->ih.lock, 1))
3071 return IRQ_NONE;
3072
Dave Airlie682f1a52011-06-18 03:59:51 +00003073 rptr = rdev->ih.rptr;
3074 DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
Alex Deucher45f9a392010-03-24 13:55:51 -04003075
Benjamin Herrenschmidt964f6642011-07-13 16:28:19 +10003076 /* Order reading of wptr vs. reading of IH ring data */
3077 rmb();
3078
Alex Deucher45f9a392010-03-24 13:55:51 -04003079 /* display interrupts */
Alex Deucher6f34be52010-11-21 10:59:01 -05003080 evergreen_irq_ack(rdev);
Alex Deucher45f9a392010-03-24 13:55:51 -04003081
Alex Deucher45f9a392010-03-24 13:55:51 -04003082 while (rptr != wptr) {
3083 /* wptr/rptr are in bytes! */
3084 ring_index = rptr / 4;
Alex Deucher0f234f5f2011-02-13 19:06:33 -05003085 src_id = le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff;
3086 src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff;
Alex Deucher45f9a392010-03-24 13:55:51 -04003087
3088 switch (src_id) {
3089 case 1: /* D1 vblank/vline */
3090 switch (src_data) {
3091 case 0: /* D1 vblank */
Alex Deucher6f34be52010-11-21 10:59:01 -05003092 if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT) {
Alex Deucher6f34be52010-11-21 10:59:01 -05003093 if (rdev->irq.crtc_vblank_int[0]) {
3094 drm_handle_vblank(rdev->ddev, 0);
3095 rdev->pm.vblank_sync = true;
3096 wake_up(&rdev->irq.vblank_queue);
3097 }
Christian Koenig736fc372012-05-17 19:52:00 +02003098 if (atomic_read(&rdev->irq.pflip[0]))
Mario Kleiner3e4ea742010-11-21 10:59:02 -05003099 radeon_crtc_handle_flip(rdev, 0);
Alex Deucher6f34be52010-11-21 10:59:01 -05003100 rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
Alex Deucher45f9a392010-03-24 13:55:51 -04003101 DRM_DEBUG("IH: D1 vblank\n");
3102 }
3103 break;
3104 case 1: /* D1 vline */
Alex Deucher6f34be52010-11-21 10:59:01 -05003105 if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT) {
3106 rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT;
Alex Deucher45f9a392010-03-24 13:55:51 -04003107 DRM_DEBUG("IH: D1 vline\n");
3108 }
3109 break;
3110 default:
3111 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3112 break;
3113 }
3114 break;
3115 case 2: /* D2 vblank/vline */
3116 switch (src_data) {
3117 case 0: /* D2 vblank */
Alex Deucher6f34be52010-11-21 10:59:01 -05003118 if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT) {
Alex Deucher6f34be52010-11-21 10:59:01 -05003119 if (rdev->irq.crtc_vblank_int[1]) {
3120 drm_handle_vblank(rdev->ddev, 1);
3121 rdev->pm.vblank_sync = true;
3122 wake_up(&rdev->irq.vblank_queue);
3123 }
Christian Koenig736fc372012-05-17 19:52:00 +02003124 if (atomic_read(&rdev->irq.pflip[1]))
Mario Kleiner3e4ea742010-11-21 10:59:02 -05003125 radeon_crtc_handle_flip(rdev, 1);
Alex Deucher6f34be52010-11-21 10:59:01 -05003126 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
Alex Deucher45f9a392010-03-24 13:55:51 -04003127 DRM_DEBUG("IH: D2 vblank\n");
3128 }
3129 break;
3130 case 1: /* D2 vline */
Alex Deucher6f34be52010-11-21 10:59:01 -05003131 if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT) {
3132 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
Alex Deucher45f9a392010-03-24 13:55:51 -04003133 DRM_DEBUG("IH: D2 vline\n");
3134 }
3135 break;
3136 default:
3137 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3138 break;
3139 }
3140 break;
3141 case 3: /* D3 vblank/vline */
3142 switch (src_data) {
3143 case 0: /* D3 vblank */
Alex Deucher6f34be52010-11-21 10:59:01 -05003144 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) {
3145 if (rdev->irq.crtc_vblank_int[2]) {
3146 drm_handle_vblank(rdev->ddev, 2);
3147 rdev->pm.vblank_sync = true;
3148 wake_up(&rdev->irq.vblank_queue);
3149 }
Christian Koenig736fc372012-05-17 19:52:00 +02003150 if (atomic_read(&rdev->irq.pflip[2]))
Alex Deucher6f34be52010-11-21 10:59:01 -05003151 radeon_crtc_handle_flip(rdev, 2);
3152 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
Alex Deucher45f9a392010-03-24 13:55:51 -04003153 DRM_DEBUG("IH: D3 vblank\n");
3154 }
3155 break;
3156 case 1: /* D3 vline */
Alex Deucher6f34be52010-11-21 10:59:01 -05003157 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) {
3158 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
Alex Deucher45f9a392010-03-24 13:55:51 -04003159 DRM_DEBUG("IH: D3 vline\n");
3160 }
3161 break;
3162 default:
3163 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3164 break;
3165 }
3166 break;
3167 case 4: /* D4 vblank/vline */
3168 switch (src_data) {
3169 case 0: /* D4 vblank */
Alex Deucher6f34be52010-11-21 10:59:01 -05003170 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) {
3171 if (rdev->irq.crtc_vblank_int[3]) {
3172 drm_handle_vblank(rdev->ddev, 3);
3173 rdev->pm.vblank_sync = true;
3174 wake_up(&rdev->irq.vblank_queue);
3175 }
Christian Koenig736fc372012-05-17 19:52:00 +02003176 if (atomic_read(&rdev->irq.pflip[3]))
Alex Deucher6f34be52010-11-21 10:59:01 -05003177 radeon_crtc_handle_flip(rdev, 3);
3178 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
Alex Deucher45f9a392010-03-24 13:55:51 -04003179 DRM_DEBUG("IH: D4 vblank\n");
3180 }
3181 break;
3182 case 1: /* D4 vline */
Alex Deucher6f34be52010-11-21 10:59:01 -05003183 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) {
3184 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
Alex Deucher45f9a392010-03-24 13:55:51 -04003185 DRM_DEBUG("IH: D4 vline\n");
3186 }
3187 break;
3188 default:
3189 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3190 break;
3191 }
3192 break;
3193 case 5: /* D5 vblank/vline */
3194 switch (src_data) {
3195 case 0: /* D5 vblank */
Alex Deucher6f34be52010-11-21 10:59:01 -05003196 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) {
3197 if (rdev->irq.crtc_vblank_int[4]) {
3198 drm_handle_vblank(rdev->ddev, 4);
3199 rdev->pm.vblank_sync = true;
3200 wake_up(&rdev->irq.vblank_queue);
3201 }
Christian Koenig736fc372012-05-17 19:52:00 +02003202 if (atomic_read(&rdev->irq.pflip[4]))
Alex Deucher6f34be52010-11-21 10:59:01 -05003203 radeon_crtc_handle_flip(rdev, 4);
3204 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
Alex Deucher45f9a392010-03-24 13:55:51 -04003205 DRM_DEBUG("IH: D5 vblank\n");
3206 }
3207 break;
3208 case 1: /* D5 vline */
Alex Deucher6f34be52010-11-21 10:59:01 -05003209 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) {
3210 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
Alex Deucher45f9a392010-03-24 13:55:51 -04003211 DRM_DEBUG("IH: D5 vline\n");
3212 }
3213 break;
3214 default:
3215 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3216 break;
3217 }
3218 break;
3219 case 6: /* D6 vblank/vline */
3220 switch (src_data) {
3221 case 0: /* D6 vblank */
Alex Deucher6f34be52010-11-21 10:59:01 -05003222 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) {
3223 if (rdev->irq.crtc_vblank_int[5]) {
3224 drm_handle_vblank(rdev->ddev, 5);
3225 rdev->pm.vblank_sync = true;
3226 wake_up(&rdev->irq.vblank_queue);
3227 }
Christian Koenig736fc372012-05-17 19:52:00 +02003228 if (atomic_read(&rdev->irq.pflip[5]))
Alex Deucher6f34be52010-11-21 10:59:01 -05003229 radeon_crtc_handle_flip(rdev, 5);
3230 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
Alex Deucher45f9a392010-03-24 13:55:51 -04003231 DRM_DEBUG("IH: D6 vblank\n");
3232 }
3233 break;
3234 case 1: /* D6 vline */
Alex Deucher6f34be52010-11-21 10:59:01 -05003235 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) {
3236 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
Alex Deucher45f9a392010-03-24 13:55:51 -04003237 DRM_DEBUG("IH: D6 vline\n");
3238 }
3239 break;
3240 default:
3241 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3242 break;
3243 }
3244 break;
3245 case 42: /* HPD hotplug */
3246 switch (src_data) {
3247 case 0:
Alex Deucher6f34be52010-11-21 10:59:01 -05003248 if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) {
3249 rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT;
Alex Deucher45f9a392010-03-24 13:55:51 -04003250 queue_hotplug = true;
3251 DRM_DEBUG("IH: HPD1\n");
3252 }
3253 break;
3254 case 1:
Alex Deucher6f34be52010-11-21 10:59:01 -05003255 if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) {
3256 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT;
Alex Deucher45f9a392010-03-24 13:55:51 -04003257 queue_hotplug = true;
3258 DRM_DEBUG("IH: HPD2\n");
3259 }
3260 break;
3261 case 2:
Alex Deucher6f34be52010-11-21 10:59:01 -05003262 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) {
3263 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
Alex Deucher45f9a392010-03-24 13:55:51 -04003264 queue_hotplug = true;
3265 DRM_DEBUG("IH: HPD3\n");
3266 }
3267 break;
3268 case 3:
Alex Deucher6f34be52010-11-21 10:59:01 -05003269 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) {
3270 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
Alex Deucher45f9a392010-03-24 13:55:51 -04003271 queue_hotplug = true;
3272 DRM_DEBUG("IH: HPD4\n");
3273 }
3274 break;
3275 case 4:
Alex Deucher6f34be52010-11-21 10:59:01 -05003276 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) {
3277 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
Alex Deucher45f9a392010-03-24 13:55:51 -04003278 queue_hotplug = true;
3279 DRM_DEBUG("IH: HPD5\n");
3280 }
3281 break;
3282 case 5:
Alex Deucher6f34be52010-11-21 10:59:01 -05003283 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
3284 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
Alex Deucher45f9a392010-03-24 13:55:51 -04003285 queue_hotplug = true;
3286 DRM_DEBUG("IH: HPD6\n");
3287 }
3288 break;
3289 default:
3290 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3291 break;
3292 }
3293 break;
Alex Deucherf122c612012-03-30 08:59:57 -04003294 case 44: /* hdmi */
3295 switch (src_data) {
3296 case 0:
3297 if (rdev->irq.stat_regs.evergreen.afmt_status1 & AFMT_AZ_FORMAT_WTRIG) {
3298 rdev->irq.stat_regs.evergreen.afmt_status1 &= ~AFMT_AZ_FORMAT_WTRIG;
3299 queue_hdmi = true;
3300 DRM_DEBUG("IH: HDMI0\n");
3301 }
3302 break;
3303 case 1:
3304 if (rdev->irq.stat_regs.evergreen.afmt_status2 & AFMT_AZ_FORMAT_WTRIG) {
3305 rdev->irq.stat_regs.evergreen.afmt_status2 &= ~AFMT_AZ_FORMAT_WTRIG;
3306 queue_hdmi = true;
3307 DRM_DEBUG("IH: HDMI1\n");
3308 }
3309 break;
3310 case 2:
3311 if (rdev->irq.stat_regs.evergreen.afmt_status3 & AFMT_AZ_FORMAT_WTRIG) {
3312 rdev->irq.stat_regs.evergreen.afmt_status3 &= ~AFMT_AZ_FORMAT_WTRIG;
3313 queue_hdmi = true;
3314 DRM_DEBUG("IH: HDMI2\n");
3315 }
3316 break;
3317 case 3:
3318 if (rdev->irq.stat_regs.evergreen.afmt_status4 & AFMT_AZ_FORMAT_WTRIG) {
3319 rdev->irq.stat_regs.evergreen.afmt_status4 &= ~AFMT_AZ_FORMAT_WTRIG;
3320 queue_hdmi = true;
3321 DRM_DEBUG("IH: HDMI3\n");
3322 }
3323 break;
3324 case 4:
3325 if (rdev->irq.stat_regs.evergreen.afmt_status5 & AFMT_AZ_FORMAT_WTRIG) {
3326 rdev->irq.stat_regs.evergreen.afmt_status5 &= ~AFMT_AZ_FORMAT_WTRIG;
3327 queue_hdmi = true;
3328 DRM_DEBUG("IH: HDMI4\n");
3329 }
3330 break;
3331 case 5:
3332 if (rdev->irq.stat_regs.evergreen.afmt_status6 & AFMT_AZ_FORMAT_WTRIG) {
3333 rdev->irq.stat_regs.evergreen.afmt_status6 &= ~AFMT_AZ_FORMAT_WTRIG;
3334 queue_hdmi = true;
3335 DRM_DEBUG("IH: HDMI5\n");
3336 }
3337 break;
3338 default:
3339 DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
3340 break;
3341 }
3342 break;
Christian Königae133a12012-09-18 15:30:44 -04003343 case 146:
3344 case 147:
3345 dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data);
3346 dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
3347 RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR));
3348 dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
3349 RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS));
3350 /* reset addr and status */
3351 WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
3352 break;
Alex Deucher45f9a392010-03-24 13:55:51 -04003353 case 176: /* CP_INT in ring buffer */
3354 case 177: /* CP_INT in IB1 */
3355 case 178: /* CP_INT in IB2 */
3356 DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
Alex Deucher74652802011-08-25 13:39:48 -04003357 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
Alex Deucher45f9a392010-03-24 13:55:51 -04003358 break;
3359 case 181: /* CP EOP event */
3360 DRM_DEBUG("IH: CP EOP\n");
Alex Deucher1b370782011-11-17 20:13:28 -05003361 if (rdev->family >= CHIP_CAYMAN) {
3362 switch (src_data) {
3363 case 0:
3364 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
3365 break;
3366 case 1:
3367 radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
3368 break;
3369 case 2:
3370 radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
3371 break;
3372 }
3373 } else
3374 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
Alex Deucher45f9a392010-03-24 13:55:51 -04003375 break;
Alex Deucher233d1ad2012-12-04 15:25:59 -05003376 case 224: /* DMA trap event */
3377 DRM_DEBUG("IH: DMA trap\n");
3378 radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX);
3379 break;
Alex Deucher2031f772010-04-22 12:52:11 -04003380 case 233: /* GUI IDLE */
Ilija Hadzic303c8052011-06-07 14:54:48 -04003381 DRM_DEBUG("IH: GUI idle\n");
Alex Deucher2031f772010-04-22 12:52:11 -04003382 break;
Alex Deucherf60cbd12012-12-04 15:27:33 -05003383 case 244: /* DMA trap event */
3384 if (rdev->family >= CHIP_CAYMAN) {
3385 DRM_DEBUG("IH: DMA1 trap\n");
3386 radeon_fence_process(rdev, CAYMAN_RING_TYPE_DMA1_INDEX);
3387 }
3388 break;
Alex Deucher45f9a392010-03-24 13:55:51 -04003389 default:
3390 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3391 break;
3392 }
3393
3394 /* wptr/rptr are in bytes! */
3395 rptr += 16;
3396 rptr &= rdev->ih.ptr_mask;
3397 }
Alex Deucher45f9a392010-03-24 13:55:51 -04003398 if (queue_hotplug)
Tejun Heo32c87fc2011-01-03 14:49:32 +01003399 schedule_work(&rdev->hotplug_work);
Alex Deucherf122c612012-03-30 08:59:57 -04003400 if (queue_hdmi)
3401 schedule_work(&rdev->audio_work);
Alex Deucher45f9a392010-03-24 13:55:51 -04003402 rdev->ih.rptr = rptr;
3403 WREG32(IH_RB_RPTR, rdev->ih.rptr);
Christian Koenigc20dc362012-05-16 21:45:24 +02003404 atomic_set(&rdev->ih.lock, 0);
3405
3406 /* make sure wptr hasn't changed while processing */
3407 wptr = evergreen_get_ih_wptr(rdev);
3408 if (wptr != rptr)
3409 goto restart_ih;
3410
Alex Deucher45f9a392010-03-24 13:55:51 -04003411 return IRQ_HANDLED;
3412}
3413
Alex Deucher233d1ad2012-12-04 15:25:59 -05003414/**
3415 * evergreen_dma_fence_ring_emit - emit a fence on the DMA ring
3416 *
3417 * @rdev: radeon_device pointer
3418 * @fence: radeon fence object
3419 *
3420 * Add a DMA fence packet to the ring to write
3421 * the fence seq number and DMA trap packet to generate
3422 * an interrupt if needed (evergreen-SI).
3423 */
3424void evergreen_dma_fence_ring_emit(struct radeon_device *rdev,
3425 struct radeon_fence *fence)
3426{
3427 struct radeon_ring *ring = &rdev->ring[fence->ring];
3428 u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
3429 /* write the fence */
Jerome Glisse0fcb6152013-01-14 11:32:27 -05003430 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0));
Alex Deucher233d1ad2012-12-04 15:25:59 -05003431 radeon_ring_write(ring, addr & 0xfffffffc);
3432 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff));
3433 radeon_ring_write(ring, fence->seq);
3434 /* generate an interrupt */
Jerome Glisse0fcb6152013-01-14 11:32:27 -05003435 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0));
Alex Deucher233d1ad2012-12-04 15:25:59 -05003436 /* flush HDP */
Jerome Glisse0fcb6152013-01-14 11:32:27 -05003437 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0));
Alex Deucher4b681c22013-01-03 19:54:34 -05003438 radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
Alex Deucher233d1ad2012-12-04 15:25:59 -05003439 radeon_ring_write(ring, 1);
3440}
3441
3442/**
3443 * evergreen_dma_ring_ib_execute - schedule an IB on the DMA engine
3444 *
3445 * @rdev: radeon_device pointer
3446 * @ib: IB object to schedule
3447 *
3448 * Schedule an IB in the DMA ring (evergreen).
3449 */
3450void evergreen_dma_ring_ib_execute(struct radeon_device *rdev,
3451 struct radeon_ib *ib)
3452{
3453 struct radeon_ring *ring = &rdev->ring[ib->ring];
3454
3455 if (rdev->wb.enabled) {
3456 u32 next_rptr = ring->wptr + 4;
3457 while ((next_rptr & 7) != 5)
3458 next_rptr++;
3459 next_rptr += 3;
Jerome Glisse0fcb6152013-01-14 11:32:27 -05003460 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 1));
Alex Deucher233d1ad2012-12-04 15:25:59 -05003461 radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
3462 radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
3463 radeon_ring_write(ring, next_rptr);
3464 }
3465
3466 /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
3467 * Pad as necessary with NOPs.
3468 */
3469 while ((ring->wptr & 7) != 5)
Jerome Glisse0fcb6152013-01-14 11:32:27 -05003470 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0));
3471 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0));
Alex Deucher233d1ad2012-12-04 15:25:59 -05003472 radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
3473 radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF));
3474
3475}
3476
3477/**
3478 * evergreen_copy_dma - copy pages using the DMA engine
3479 *
3480 * @rdev: radeon_device pointer
3481 * @src_offset: src GPU address
3482 * @dst_offset: dst GPU address
3483 * @num_gpu_pages: number of GPU pages to xfer
3484 * @fence: radeon fence object
3485 *
3486 * Copy GPU paging using the DMA engine (evergreen-cayman).
3487 * Used by the radeon ttm implementation to move pages if
3488 * registered as the asic copy callback.
3489 */
3490int evergreen_copy_dma(struct radeon_device *rdev,
3491 uint64_t src_offset, uint64_t dst_offset,
3492 unsigned num_gpu_pages,
3493 struct radeon_fence **fence)
3494{
3495 struct radeon_semaphore *sem = NULL;
3496 int ring_index = rdev->asic->copy.dma_ring_index;
3497 struct radeon_ring *ring = &rdev->ring[ring_index];
3498 u32 size_in_dw, cur_size_in_dw;
3499 int i, num_loops;
3500 int r = 0;
3501
3502 r = radeon_semaphore_create(rdev, &sem);
3503 if (r) {
3504 DRM_ERROR("radeon: moving bo (%d).\n", r);
3505 return r;
3506 }
3507
3508 size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
3509 num_loops = DIV_ROUND_UP(size_in_dw, 0xfffff);
3510 r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11);
3511 if (r) {
3512 DRM_ERROR("radeon: moving bo (%d).\n", r);
3513 radeon_semaphore_free(rdev, &sem, NULL);
3514 return r;
3515 }
3516
3517 if (radeon_fence_need_sync(*fence, ring->idx)) {
3518 radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
3519 ring->idx);
3520 radeon_fence_note_sync(*fence, ring->idx);
3521 } else {
3522 radeon_semaphore_free(rdev, &sem, NULL);
3523 }
3524
3525 for (i = 0; i < num_loops; i++) {
3526 cur_size_in_dw = size_in_dw;
3527 if (cur_size_in_dw > 0xFFFFF)
3528 cur_size_in_dw = 0xFFFFF;
3529 size_in_dw -= cur_size_in_dw;
Jerome Glisse0fcb6152013-01-14 11:32:27 -05003530 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, cur_size_in_dw));
Alex Deucher233d1ad2012-12-04 15:25:59 -05003531 radeon_ring_write(ring, dst_offset & 0xfffffffc);
3532 radeon_ring_write(ring, src_offset & 0xfffffffc);
3533 radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
3534 radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
3535 src_offset += cur_size_in_dw * 4;
3536 dst_offset += cur_size_in_dw * 4;
3537 }
3538
3539 r = radeon_fence_emit(rdev, fence, ring->idx);
3540 if (r) {
3541 radeon_ring_unlock_undo(rdev, ring);
3542 return r;
3543 }
3544
3545 radeon_ring_unlock_commit(rdev, ring);
3546 radeon_semaphore_free(rdev, &sem, *fence);
3547
3548 return r;
3549}
3550
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003551static int evergreen_startup(struct radeon_device *rdev)
3552{
Christian Könige32eb502011-10-23 12:56:27 +02003553 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003554 int r;
3555
Alex Deucher9e46a482011-01-06 18:49:35 -05003556 /* enable pcie gen2 link */
Ilija Hadziccd540332011-09-20 10:22:57 -04003557 evergreen_pcie_gen2_enable(rdev);
Alex Deucher9e46a482011-01-06 18:49:35 -05003558
Alex Deucher0af62b02011-01-06 21:19:31 -05003559 if (ASIC_IS_DCE5(rdev)) {
3560 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
3561 r = ni_init_microcode(rdev);
3562 if (r) {
3563 DRM_ERROR("Failed to load firmware!\n");
3564 return r;
3565 }
3566 }
Alex Deucher755d8192011-03-02 20:07:34 -05003567 r = ni_mc_load_microcode(rdev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003568 if (r) {
Alex Deucher0af62b02011-01-06 21:19:31 -05003569 DRM_ERROR("Failed to load MC firmware!\n");
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003570 return r;
3571 }
Alex Deucher0af62b02011-01-06 21:19:31 -05003572 } else {
3573 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
3574 r = r600_init_microcode(rdev);
3575 if (r) {
3576 DRM_ERROR("Failed to load firmware!\n");
3577 return r;
3578 }
3579 }
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003580 }
Alex Deucherfe251e22010-03-24 13:36:43 -04003581
Alex Deucher16cdf042011-10-28 10:30:02 -04003582 r = r600_vram_scratch_init(rdev);
3583 if (r)
3584 return r;
3585
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003586 evergreen_mc_program(rdev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003587 if (rdev->flags & RADEON_IS_AGP) {
Alex Deucher0fcdb612010-03-24 13:20:41 -04003588 evergreen_agp_enable(rdev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003589 } else {
3590 r = evergreen_pcie_gart_enable(rdev);
3591 if (r)
3592 return r;
3593 }
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003594 evergreen_gpu_init(rdev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003595
Alex Deucherd7ccd8f2010-09-09 11:33:36 -04003596 r = evergreen_blit_init(rdev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003597 if (r) {
Ilija Hadzicfb3d9e92011-10-12 23:29:41 -04003598 r600_blit_fini(rdev);
Alex Deucher27cd7762012-02-23 17:53:42 -05003599 rdev->asic->copy.copy = NULL;
Alex Deucherd7ccd8f2010-09-09 11:33:36 -04003600 dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003601 }
3602
Alex Deucher724c80e2010-08-27 18:25:25 -04003603 /* allocate wb buffer */
3604 r = radeon_wb_init(rdev);
3605 if (r)
3606 return r;
3607
Jerome Glisse30eb77f2011-11-20 20:45:34 +00003608 r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
3609 if (r) {
3610 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
3611 return r;
3612 }
3613
Alex Deucher233d1ad2012-12-04 15:25:59 -05003614 r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
3615 if (r) {
3616 dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
3617 return r;
3618 }
3619
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003620 /* Enable IRQ */
3621 r = r600_irq_init(rdev);
3622 if (r) {
3623 DRM_ERROR("radeon: IH init failed (%d).\n", r);
3624 radeon_irq_kms_fini(rdev);
3625 return r;
3626 }
Alex Deucher45f9a392010-03-24 13:55:51 -04003627 evergreen_irq_set(rdev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003628
Christian Könige32eb502011-10-23 12:56:27 +02003629 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
Alex Deucher78c55602011-11-17 14:25:56 -05003630 R600_CP_RB_RPTR, R600_CP_RB_WPTR,
3631 0, 0xfffff, RADEON_CP_PACKET2);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003632 if (r)
3633 return r;
Alex Deucher233d1ad2012-12-04 15:25:59 -05003634
3635 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
3636 r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
3637 DMA_RB_RPTR, DMA_RB_WPTR,
Jerome Glisse0fcb6152013-01-14 11:32:27 -05003638 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0));
Alex Deucher233d1ad2012-12-04 15:25:59 -05003639 if (r)
3640 return r;
3641
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003642 r = evergreen_cp_load_microcode(rdev);
3643 if (r)
3644 return r;
Alex Deucherfe251e22010-03-24 13:36:43 -04003645 r = evergreen_cp_resume(rdev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003646 if (r)
3647 return r;
Alex Deucher233d1ad2012-12-04 15:25:59 -05003648 r = r600_dma_resume(rdev);
3649 if (r)
3650 return r;
Alex Deucherfe251e22010-03-24 13:36:43 -04003651
Christian König2898c342012-07-05 11:55:34 +02003652 r = radeon_ib_pool_init(rdev);
3653 if (r) {
3654 dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
Jerome Glisseb15ba512011-11-15 11:48:34 -05003655 return r;
Christian König2898c342012-07-05 11:55:34 +02003656 }
Jerome Glisseb15ba512011-11-15 11:48:34 -05003657
Rafał Miłecki69d2ae52011-12-07 23:32:24 +01003658 r = r600_audio_init(rdev);
3659 if (r) {
3660 DRM_ERROR("radeon: audio init failed\n");
Jerome Glisseb15ba512011-11-15 11:48:34 -05003661 return r;
3662 }
3663
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003664 return 0;
3665}
3666
3667int evergreen_resume(struct radeon_device *rdev)
3668{
3669 int r;
3670
Alex Deucher86f5c9e2010-12-20 12:35:04 -05003671 /* reset the asic, the gfx blocks are often in a bad state
3672 * after the driver is unloaded or after a resume
3673 */
3674 if (radeon_asic_reset(rdev))
3675 dev_warn(rdev->dev, "GPU reset failed !\n");
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003676 /* Do not reset GPU before posting, on rv770 hw unlike on r500 hw,
3677 * posting will perform necessary task to bring back GPU into good
3678 * shape.
3679 */
3680 /* post card */
3681 atom_asic_init(rdev->mode_info.atom_context);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003682
Jerome Glisseb15ba512011-11-15 11:48:34 -05003683 rdev->accel_working = true;
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003684 r = evergreen_startup(rdev);
3685 if (r) {
Alex Deucher755d8192011-03-02 20:07:34 -05003686 DRM_ERROR("evergreen startup failed on resume\n");
Jerome Glisse6b7746e2012-02-20 17:57:20 -05003687 rdev->accel_working = false;
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003688 return r;
3689 }
Alex Deucherfe251e22010-03-24 13:36:43 -04003690
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003691 return r;
3692
3693}
3694
3695int evergreen_suspend(struct radeon_device *rdev)
3696{
Rafał Miłecki69d2ae52011-12-07 23:32:24 +01003697 r600_audio_fini(rdev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003698 r700_cp_stop(rdev);
Alex Deucher233d1ad2012-12-04 15:25:59 -05003699 r600_dma_stop(rdev);
Alex Deucher45f9a392010-03-24 13:55:51 -04003700 evergreen_irq_suspend(rdev);
Alex Deucher724c80e2010-08-27 18:25:25 -04003701 radeon_wb_disable(rdev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003702 evergreen_pcie_gart_disable(rdev);
Alex Deucherd7ccd8f2010-09-09 11:33:36 -04003703
3704 return 0;
3705}
3706
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003707/* Plan is to move initialization in that function and use
3708 * helper function so that radeon_device_init pretty much
3709 * do nothing more than calling asic specific function. This
3710 * should also allow to remove a bunch of callback function
3711 * like vram_info.
3712 */
3713int evergreen_init(struct radeon_device *rdev)
3714{
3715 int r;
3716
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003717 /* Read BIOS */
3718 if (!radeon_get_bios(rdev)) {
3719 if (ASIC_IS_AVIVO(rdev))
3720 return -EINVAL;
3721 }
3722 /* Must be an ATOMBIOS */
3723 if (!rdev->is_atom_bios) {
Alex Deucher755d8192011-03-02 20:07:34 -05003724 dev_err(rdev->dev, "Expecting atombios for evergreen GPU\n");
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003725 return -EINVAL;
3726 }
3727 r = radeon_atombios_init(rdev);
3728 if (r)
3729 return r;
Alex Deucher86f5c9e2010-12-20 12:35:04 -05003730 /* reset the asic, the gfx blocks are often in a bad state
3731 * after the driver is unloaded or after a resume
3732 */
3733 if (radeon_asic_reset(rdev))
3734 dev_warn(rdev->dev, "GPU reset failed !\n");
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003735 /* Post card if necessary */
Alex Deucherfd909c32011-01-11 18:08:59 -05003736 if (!radeon_card_posted(rdev)) {
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003737 if (!rdev->bios) {
3738 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
3739 return -EINVAL;
3740 }
3741 DRM_INFO("GPU not posted. posting now...\n");
3742 atom_asic_init(rdev->mode_info.atom_context);
3743 }
3744 /* Initialize scratch registers */
3745 r600_scratch_init(rdev);
3746 /* Initialize surface registers */
3747 radeon_surface_init(rdev);
3748 /* Initialize clocks */
3749 radeon_get_clock_info(rdev->ddev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003750 /* Fence driver */
3751 r = radeon_fence_driver_init(rdev);
3752 if (r)
3753 return r;
Jerome Glissed594e462010-02-17 21:54:29 +00003754 /* initialize AGP */
3755 if (rdev->flags & RADEON_IS_AGP) {
3756 r = radeon_agp_init(rdev);
3757 if (r)
3758 radeon_agp_disable(rdev);
3759 }
3760 /* initialize memory controller */
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003761 r = evergreen_mc_init(rdev);
3762 if (r)
3763 return r;
3764 /* Memory manager */
3765 r = radeon_bo_init(rdev);
3766 if (r)
3767 return r;
Alex Deucher45f9a392010-03-24 13:55:51 -04003768
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003769 r = radeon_irq_kms_init(rdev);
3770 if (r)
3771 return r;
3772
Christian Könige32eb502011-10-23 12:56:27 +02003773 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
3774 r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003775
Alex Deucher233d1ad2012-12-04 15:25:59 -05003776 rdev->ring[R600_RING_TYPE_DMA_INDEX].ring_obj = NULL;
3777 r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX], 64 * 1024);
3778
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003779 rdev->ih.ring_obj = NULL;
3780 r600_ih_ring_init(rdev, 64 * 1024);
3781
3782 r = r600_pcie_gart_init(rdev);
3783 if (r)
3784 return r;
Alex Deucher0fcdb612010-03-24 13:20:41 -04003785
Alex Deucher148a03b2010-06-03 19:00:03 -04003786 rdev->accel_working = true;
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003787 r = evergreen_startup(rdev);
3788 if (r) {
Alex Deucherfe251e22010-03-24 13:36:43 -04003789 dev_err(rdev->dev, "disabling GPU acceleration\n");
3790 r700_cp_fini(rdev);
Alex Deucher233d1ad2012-12-04 15:25:59 -05003791 r600_dma_fini(rdev);
Alex Deucherfe251e22010-03-24 13:36:43 -04003792 r600_irq_fini(rdev);
Alex Deucher724c80e2010-08-27 18:25:25 -04003793 radeon_wb_fini(rdev);
Christian König2898c342012-07-05 11:55:34 +02003794 radeon_ib_pool_fini(rdev);
Alex Deucherfe251e22010-03-24 13:36:43 -04003795 radeon_irq_kms_fini(rdev);
Alex Deucher0fcdb612010-03-24 13:20:41 -04003796 evergreen_pcie_gart_fini(rdev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003797 rdev->accel_working = false;
3798 }
Alex Deucher77e00f22011-12-21 11:58:17 -05003799
3800 /* Don't start up if the MC ucode is missing on BTC parts.
3801 * The default clocks and voltages before the MC ucode
3802 * is loaded are not suffient for advanced operations.
3803 */
3804 if (ASIC_IS_DCE5(rdev)) {
3805 if (!rdev->mc_fw && !(rdev->flags & RADEON_IS_IGP)) {
3806 DRM_ERROR("radeon: MC ucode required for NI+.\n");
3807 return -EINVAL;
3808 }
3809 }
3810
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003811 return 0;
3812}
3813
3814void evergreen_fini(struct radeon_device *rdev)
3815{
Rafał Miłecki69d2ae52011-12-07 23:32:24 +01003816 r600_audio_fini(rdev);
Ilija Hadzicfb3d9e92011-10-12 23:29:41 -04003817 r600_blit_fini(rdev);
Alex Deucher45f9a392010-03-24 13:55:51 -04003818 r700_cp_fini(rdev);
Alex Deucher233d1ad2012-12-04 15:25:59 -05003819 r600_dma_fini(rdev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003820 r600_irq_fini(rdev);
Alex Deucher724c80e2010-08-27 18:25:25 -04003821 radeon_wb_fini(rdev);
Christian König2898c342012-07-05 11:55:34 +02003822 radeon_ib_pool_fini(rdev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003823 radeon_irq_kms_fini(rdev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003824 evergreen_pcie_gart_fini(rdev);
Alex Deucher16cdf042011-10-28 10:30:02 -04003825 r600_vram_scratch_fini(rdev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003826 radeon_gem_fini(rdev);
3827 radeon_fence_driver_fini(rdev);
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003828 radeon_agp_fini(rdev);
3829 radeon_bo_fini(rdev);
3830 radeon_atombios_fini(rdev);
3831 kfree(rdev->bios);
3832 rdev->bios = NULL;
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05003833}
Alex Deucher9e46a482011-01-06 18:49:35 -05003834
Ilija Hadzicb07759b2011-09-20 10:22:58 -04003835void evergreen_pcie_gen2_enable(struct radeon_device *rdev)
Alex Deucher9e46a482011-01-06 18:49:35 -05003836{
Dave Airlie197bbb32012-06-27 08:35:54 +01003837 u32 link_width_cntl, speed_cntl, mask;
3838 int ret;
Alex Deucher9e46a482011-01-06 18:49:35 -05003839
Alex Deucherd42dd572011-01-12 20:05:11 -05003840 if (radeon_pcie_gen2 == 0)
3841 return;
3842
Alex Deucher9e46a482011-01-06 18:49:35 -05003843 if (rdev->flags & RADEON_IS_IGP)
3844 return;
3845
3846 if (!(rdev->flags & RADEON_IS_PCIE))
3847 return;
3848
3849 /* x2 cards have a special sequence */
3850 if (ASIC_IS_X2(rdev))
3851 return;
3852
Dave Airlie197bbb32012-06-27 08:35:54 +01003853 ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask);
3854 if (ret != 0)
3855 return;
3856
3857 if (!(mask & DRM_PCIE_SPEED_50))
3858 return;
3859
Alex Deucher3691fee2012-10-08 17:46:27 -04003860 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
3861 if (speed_cntl & LC_CURRENT_DATA_RATE) {
3862 DRM_INFO("PCIE gen 2 link speeds already enabled\n");
3863 return;
3864 }
3865
Dave Airlie197bbb32012-06-27 08:35:54 +01003866 DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
3867
Alex Deucher9e46a482011-01-06 18:49:35 -05003868 if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) ||
3869 (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
3870
3871 link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
3872 link_width_cntl &= ~LC_UPCONFIGURE_DIS;
3873 WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
3874
3875 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
3876 speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN;
3877 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
3878
3879 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
3880 speed_cntl |= LC_CLR_FAILED_SPD_CHANGE_CNT;
3881 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
3882
3883 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
3884 speed_cntl &= ~LC_CLR_FAILED_SPD_CHANGE_CNT;
3885 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
3886
3887 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
3888 speed_cntl |= LC_GEN2_EN_STRAP;
3889 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
3890
3891 } else {
3892 link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
3893 /* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */
3894 if (1)
3895 link_width_cntl |= LC_UPCONFIGURE_DIS;
3896 else
3897 link_width_cntl &= ~LC_UPCONFIGURE_DIS;
3898 WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
3899 }
3900}