blob: 9eebf1f7e223bfdeadb3d4d53516831b0b7a4625 [file] [log] [blame]
Alex Deuchercc8dbbb2013-08-14 01:03:41 -04001/*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include "drmP.h"
25#include "radeon.h"
26#include "cikd.h"
27#include "r600_dpm.h"
28#include "ci_dpm.h"
29#include "atom.h"
Alex Deucher94b4adc2013-07-15 17:34:33 -040030#include <linux/seq_file.h>
Alex Deuchercc8dbbb2013-08-14 01:03:41 -040031
32#define MC_CG_ARB_FREQ_F0 0x0a
33#define MC_CG_ARB_FREQ_F1 0x0b
34#define MC_CG_ARB_FREQ_F2 0x0c
35#define MC_CG_ARB_FREQ_F3 0x0d
36
37#define SMC_RAM_END 0x40000
38
39#define VOLTAGE_SCALE 4
40#define VOLTAGE_VID_OFFSET_SCALE1 625
41#define VOLTAGE_VID_OFFSET_SCALE2 100
42
43static const struct ci_pt_defaults defaults_bonaire_xt =
44{
45 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
46 { 0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61 },
47 { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 }
48};
49
50static const struct ci_pt_defaults defaults_bonaire_pro =
51{
52 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x65062,
53 { 0x8C, 0x23F, 0x244, 0xA6, 0x83, 0x85, 0x86, 0x86, 0x83, 0xDB, 0xDB, 0xDA, 0x67, 0x60, 0x5F },
54 { 0x187, 0x193, 0x193, 0x1C7, 0x1D1, 0x1D1, 0x210, 0x219, 0x219, 0x266, 0x26C, 0x26C, 0x2C9, 0x2CB, 0x2CB }
55};
56
57static const struct ci_pt_defaults defaults_saturn_xt =
58{
59 1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x70000,
60 { 0x8C, 0x247, 0x249, 0xA6, 0x80, 0x81, 0x8B, 0x89, 0x86, 0xC9, 0xCA, 0xC9, 0x4D, 0x4D, 0x4D },
61 { 0x187, 0x187, 0x187, 0x1C7, 0x1C7, 0x1C7, 0x210, 0x210, 0x210, 0x266, 0x266, 0x266, 0x2C9, 0x2C9, 0x2C9 }
62};
63
64static const struct ci_pt_defaults defaults_saturn_pro =
65{
66 1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x30000,
67 { 0x96, 0x21D, 0x23B, 0xA1, 0x85, 0x87, 0x83, 0x84, 0x81, 0xE6, 0xE6, 0xE6, 0x71, 0x6A, 0x6A },
68 { 0x193, 0x19E, 0x19E, 0x1D2, 0x1DC, 0x1DC, 0x21A, 0x223, 0x223, 0x26E, 0x27E, 0x274, 0x2CF, 0x2D2, 0x2D2 }
69};
70
71static const struct ci_pt_config_reg didt_config_ci[] =
72{
73 { 0x10, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
74 { 0x10, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
75 { 0x10, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
76 { 0x10, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
77 { 0x11, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
78 { 0x11, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
79 { 0x11, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
80 { 0x11, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
81 { 0x12, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
82 { 0x12, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
83 { 0x12, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
84 { 0x12, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
85 { 0x2, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
86 { 0x2, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
87 { 0x2, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
88 { 0x1, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
89 { 0x1, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
90 { 0x0, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
91 { 0x30, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
92 { 0x30, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
93 { 0x30, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
94 { 0x30, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
95 { 0x31, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
96 { 0x31, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
97 { 0x31, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
98 { 0x31, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
99 { 0x32, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
100 { 0x32, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
101 { 0x32, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
102 { 0x32, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
103 { 0x22, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
104 { 0x22, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
105 { 0x22, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
106 { 0x21, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
107 { 0x21, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
108 { 0x20, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
109 { 0x50, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
110 { 0x50, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
111 { 0x50, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
112 { 0x50, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
113 { 0x51, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
114 { 0x51, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
115 { 0x51, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
116 { 0x51, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
117 { 0x52, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
118 { 0x52, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
119 { 0x52, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
120 { 0x52, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
121 { 0x42, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
122 { 0x42, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
123 { 0x42, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
124 { 0x41, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
125 { 0x41, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
126 { 0x40, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
127 { 0x70, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
128 { 0x70, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
129 { 0x70, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
130 { 0x70, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
131 { 0x71, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
132 { 0x71, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
133 { 0x71, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
134 { 0x71, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
135 { 0x72, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
136 { 0x72, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
137 { 0x72, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
138 { 0x72, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
139 { 0x62, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
140 { 0x62, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
141 { 0x62, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
142 { 0x61, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
143 { 0x61, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
144 { 0x60, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
145 { 0xFFFFFFFF }
146};
147
148extern u8 rv770_get_memory_module_index(struct radeon_device *rdev);
149extern int ni_copy_and_switch_arb_sets(struct radeon_device *rdev,
150 u32 arb_freq_src, u32 arb_freq_dest);
151extern u8 si_get_ddr3_mclk_frequency_ratio(u32 memory_clock);
152extern u8 si_get_mclk_frequency_ratio(u32 memory_clock, bool strobe_mode);
153extern void si_trim_voltage_table_to_fit_state_table(struct radeon_device *rdev,
154 u32 max_voltage_steps,
155 struct atom_voltage_table *voltage_table);
156extern void cik_enter_rlc_safe_mode(struct radeon_device *rdev);
157extern void cik_exit_rlc_safe_mode(struct radeon_device *rdev);
158
159static int ci_get_std_voltage_value_sidd(struct radeon_device *rdev,
160 struct atom_voltage_table_entry *voltage_table,
161 u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd);
162static int ci_set_power_limit(struct radeon_device *rdev, u32 n);
163static int ci_set_overdrive_target_tdp(struct radeon_device *rdev,
164 u32 target_tdp);
165static int ci_update_uvd_dpm(struct radeon_device *rdev, bool gate);
166
167static struct ci_power_info *ci_get_pi(struct radeon_device *rdev)
168{
169 struct ci_power_info *pi = rdev->pm.dpm.priv;
170
171 return pi;
172}
173
174static struct ci_ps *ci_get_ps(struct radeon_ps *rps)
175{
176 struct ci_ps *ps = rps->ps_priv;
177
178 return ps;
179}
180
181static void ci_initialize_powertune_defaults(struct radeon_device *rdev)
182{
183 struct ci_power_info *pi = ci_get_pi(rdev);
184
185 switch (rdev->pdev->device) {
186 case 0x6650:
187 case 0x6658:
188 case 0x665C:
189 default:
190 pi->powertune_defaults = &defaults_bonaire_xt;
191 break;
192 case 0x6651:
193 case 0x665D:
194 pi->powertune_defaults = &defaults_bonaire_pro;
195 break;
196 case 0x6640:
197 pi->powertune_defaults = &defaults_saturn_xt;
198 break;
199 case 0x6641:
200 pi->powertune_defaults = &defaults_saturn_pro;
201 break;
202 }
203
204 pi->dte_tj_offset = 0;
205
206 pi->caps_power_containment = true;
207 pi->caps_cac = false;
208 pi->caps_sq_ramping = false;
209 pi->caps_db_ramping = false;
210 pi->caps_td_ramping = false;
211 pi->caps_tcp_ramping = false;
212
213 if (pi->caps_power_containment) {
214 pi->caps_cac = true;
215 pi->enable_bapm_feature = true;
216 pi->enable_tdc_limit_feature = true;
217 pi->enable_pkg_pwr_tracking_feature = true;
218 }
219}
220
221static u8 ci_convert_to_vid(u16 vddc)
222{
223 return (6200 - (vddc * VOLTAGE_SCALE)) / 25;
224}
225
226static int ci_populate_bapm_vddc_vid_sidd(struct radeon_device *rdev)
227{
228 struct ci_power_info *pi = ci_get_pi(rdev);
229 u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd;
230 u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd;
231 u8 *hi2_vid = pi->smc_powertune_table.BapmVddCVidHiSidd2;
232 u32 i;
233
234 if (rdev->pm.dpm.dyn_state.cac_leakage_table.entries == NULL)
235 return -EINVAL;
236 if (rdev->pm.dpm.dyn_state.cac_leakage_table.count > 8)
237 return -EINVAL;
238 if (rdev->pm.dpm.dyn_state.cac_leakage_table.count !=
239 rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count)
240 return -EINVAL;
241
242 for (i = 0; i < rdev->pm.dpm.dyn_state.cac_leakage_table.count; i++) {
243 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
244 lo_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1);
245 hi_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2);
246 hi2_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3);
247 } else {
248 lo_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc);
249 hi_vid[i] = ci_convert_to_vid((u16)rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage);
250 }
251 }
252 return 0;
253}
254
255static int ci_populate_vddc_vid(struct radeon_device *rdev)
256{
257 struct ci_power_info *pi = ci_get_pi(rdev);
258 u8 *vid = pi->smc_powertune_table.VddCVid;
259 u32 i;
260
261 if (pi->vddc_voltage_table.count > 8)
262 return -EINVAL;
263
264 for (i = 0; i < pi->vddc_voltage_table.count; i++)
265 vid[i] = ci_convert_to_vid(pi->vddc_voltage_table.entries[i].value);
266
267 return 0;
268}
269
270static int ci_populate_svi_load_line(struct radeon_device *rdev)
271{
272 struct ci_power_info *pi = ci_get_pi(rdev);
273 const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
274
275 pi->smc_powertune_table.SviLoadLineEn = pt_defaults->svi_load_line_en;
276 pi->smc_powertune_table.SviLoadLineVddC = pt_defaults->svi_load_line_vddc;
277 pi->smc_powertune_table.SviLoadLineTrimVddC = 3;
278 pi->smc_powertune_table.SviLoadLineOffsetVddC = 0;
279
280 return 0;
281}
282
283static int ci_populate_tdc_limit(struct radeon_device *rdev)
284{
285 struct ci_power_info *pi = ci_get_pi(rdev);
286 const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
287 u16 tdc_limit;
288
289 tdc_limit = rdev->pm.dpm.dyn_state.cac_tdp_table->tdc * 256;
290 pi->smc_powertune_table.TDC_VDDC_PkgLimit = cpu_to_be16(tdc_limit);
291 pi->smc_powertune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
292 pt_defaults->tdc_vddc_throttle_release_limit_perc;
293 pi->smc_powertune_table.TDC_MAWt = pt_defaults->tdc_mawt;
294
295 return 0;
296}
297
298static int ci_populate_dw8(struct radeon_device *rdev)
299{
300 struct ci_power_info *pi = ci_get_pi(rdev);
301 const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
302 int ret;
303
304 ret = ci_read_smc_sram_dword(rdev,
305 SMU7_FIRMWARE_HEADER_LOCATION +
306 offsetof(SMU7_Firmware_Header, PmFuseTable) +
307 offsetof(SMU7_Discrete_PmFuses, TdcWaterfallCtl),
308 (u32 *)&pi->smc_powertune_table.TdcWaterfallCtl,
309 pi->sram_end);
310 if (ret)
311 return -EINVAL;
312 else
313 pi->smc_powertune_table.TdcWaterfallCtl = pt_defaults->tdc_waterfall_ctl;
314
315 return 0;
316}
317
318static int ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(struct radeon_device *rdev)
319{
320 struct ci_power_info *pi = ci_get_pi(rdev);
321 u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd;
322 u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd;
323 int i, min, max;
324
325 min = max = hi_vid[0];
326 for (i = 0; i < 8; i++) {
327 if (0 != hi_vid[i]) {
328 if (min > hi_vid[i])
329 min = hi_vid[i];
330 if (max < hi_vid[i])
331 max = hi_vid[i];
332 }
333
334 if (0 != lo_vid[i]) {
335 if (min > lo_vid[i])
336 min = lo_vid[i];
337 if (max < lo_vid[i])
338 max = lo_vid[i];
339 }
340 }
341
342 if ((min == 0) || (max == 0))
343 return -EINVAL;
344 pi->smc_powertune_table.GnbLPMLMaxVid = (u8)max;
345 pi->smc_powertune_table.GnbLPMLMinVid = (u8)min;
346
347 return 0;
348}
349
350static int ci_populate_bapm_vddc_base_leakage_sidd(struct radeon_device *rdev)
351{
352 struct ci_power_info *pi = ci_get_pi(rdev);
353 u16 hi_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd;
354 u16 lo_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd;
355 struct radeon_cac_tdp_table *cac_tdp_table =
356 rdev->pm.dpm.dyn_state.cac_tdp_table;
357
358 hi_sidd = cac_tdp_table->high_cac_leakage / 100 * 256;
359 lo_sidd = cac_tdp_table->low_cac_leakage / 100 * 256;
360
361 pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd = cpu_to_be16(hi_sidd);
362 pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd = cpu_to_be16(lo_sidd);
363
364 return 0;
365}
366
367static int ci_populate_bapm_parameters_in_dpm_table(struct radeon_device *rdev)
368{
369 struct ci_power_info *pi = ci_get_pi(rdev);
370 const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
371 SMU7_Discrete_DpmTable *dpm_table = &pi->smc_state_table;
372 struct radeon_cac_tdp_table *cac_tdp_table =
373 rdev->pm.dpm.dyn_state.cac_tdp_table;
374 struct radeon_ppm_table *ppm = rdev->pm.dpm.dyn_state.ppm_table;
375 int i, j, k;
376 const u16 *def1;
377 const u16 *def2;
378
379 dpm_table->DefaultTdp = cac_tdp_table->tdp * 256;
380 dpm_table->TargetTdp = cac_tdp_table->configurable_tdp * 256;
381
382 dpm_table->DTETjOffset = (u8)pi->dte_tj_offset;
383 dpm_table->GpuTjMax =
384 (u8)(pi->thermal_temp_setting.temperature_high / 1000);
385 dpm_table->GpuTjHyst = 8;
386
387 dpm_table->DTEAmbientTempBase = pt_defaults->dte_ambient_temp_base;
388
389 if (ppm) {
390 dpm_table->PPM_PkgPwrLimit = cpu_to_be16((u16)ppm->dgpu_tdp * 256 / 1000);
391 dpm_table->PPM_TemperatureLimit = cpu_to_be16((u16)ppm->tj_max * 256);
392 } else {
393 dpm_table->PPM_PkgPwrLimit = cpu_to_be16(0);
394 dpm_table->PPM_TemperatureLimit = cpu_to_be16(0);
395 }
396
397 dpm_table->BAPM_TEMP_GRADIENT = cpu_to_be32(pt_defaults->bapm_temp_gradient);
398 def1 = pt_defaults->bapmti_r;
399 def2 = pt_defaults->bapmti_rc;
400
401 for (i = 0; i < SMU7_DTE_ITERATIONS; i++) {
402 for (j = 0; j < SMU7_DTE_SOURCES; j++) {
403 for (k = 0; k < SMU7_DTE_SINKS; k++) {
404 dpm_table->BAPMTI_R[i][j][k] = cpu_to_be16(*def1);
405 dpm_table->BAPMTI_RC[i][j][k] = cpu_to_be16(*def2);
406 def1++;
407 def2++;
408 }
409 }
410 }
411
412 return 0;
413}
414
415static int ci_populate_pm_base(struct radeon_device *rdev)
416{
417 struct ci_power_info *pi = ci_get_pi(rdev);
418 u32 pm_fuse_table_offset;
419 int ret;
420
421 if (pi->caps_power_containment) {
422 ret = ci_read_smc_sram_dword(rdev,
423 SMU7_FIRMWARE_HEADER_LOCATION +
424 offsetof(SMU7_Firmware_Header, PmFuseTable),
425 &pm_fuse_table_offset, pi->sram_end);
426 if (ret)
427 return ret;
428 ret = ci_populate_bapm_vddc_vid_sidd(rdev);
429 if (ret)
430 return ret;
431 ret = ci_populate_vddc_vid(rdev);
432 if (ret)
433 return ret;
434 ret = ci_populate_svi_load_line(rdev);
435 if (ret)
436 return ret;
437 ret = ci_populate_tdc_limit(rdev);
438 if (ret)
439 return ret;
440 ret = ci_populate_dw8(rdev);
441 if (ret)
442 return ret;
443 ret = ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(rdev);
444 if (ret)
445 return ret;
446 ret = ci_populate_bapm_vddc_base_leakage_sidd(rdev);
447 if (ret)
448 return ret;
449 ret = ci_copy_bytes_to_smc(rdev, pm_fuse_table_offset,
450 (u8 *)&pi->smc_powertune_table,
451 sizeof(SMU7_Discrete_PmFuses), pi->sram_end);
452 if (ret)
453 return ret;
454 }
455
456 return 0;
457}
458
459static void ci_do_enable_didt(struct radeon_device *rdev, const bool enable)
460{
461 struct ci_power_info *pi = ci_get_pi(rdev);
462 u32 data;
463
464 if (pi->caps_sq_ramping) {
465 data = RREG32_DIDT(DIDT_SQ_CTRL0);
466 if (enable)
467 data |= DIDT_CTRL_EN;
468 else
469 data &= ~DIDT_CTRL_EN;
470 WREG32_DIDT(DIDT_SQ_CTRL0, data);
471 }
472
473 if (pi->caps_db_ramping) {
474 data = RREG32_DIDT(DIDT_DB_CTRL0);
475 if (enable)
476 data |= DIDT_CTRL_EN;
477 else
478 data &= ~DIDT_CTRL_EN;
479 WREG32_DIDT(DIDT_DB_CTRL0, data);
480 }
481
482 if (pi->caps_td_ramping) {
483 data = RREG32_DIDT(DIDT_TD_CTRL0);
484 if (enable)
485 data |= DIDT_CTRL_EN;
486 else
487 data &= ~DIDT_CTRL_EN;
488 WREG32_DIDT(DIDT_TD_CTRL0, data);
489 }
490
491 if (pi->caps_tcp_ramping) {
492 data = RREG32_DIDT(DIDT_TCP_CTRL0);
493 if (enable)
494 data |= DIDT_CTRL_EN;
495 else
496 data &= ~DIDT_CTRL_EN;
497 WREG32_DIDT(DIDT_TCP_CTRL0, data);
498 }
499}
500
501static int ci_program_pt_config_registers(struct radeon_device *rdev,
502 const struct ci_pt_config_reg *cac_config_regs)
503{
504 const struct ci_pt_config_reg *config_regs = cac_config_regs;
505 u32 data;
506 u32 cache = 0;
507
508 if (config_regs == NULL)
509 return -EINVAL;
510
511 while (config_regs->offset != 0xFFFFFFFF) {
512 if (config_regs->type == CISLANDS_CONFIGREG_CACHE) {
513 cache |= ((config_regs->value << config_regs->shift) & config_regs->mask);
514 } else {
515 switch (config_regs->type) {
516 case CISLANDS_CONFIGREG_SMC_IND:
517 data = RREG32_SMC(config_regs->offset);
518 break;
519 case CISLANDS_CONFIGREG_DIDT_IND:
520 data = RREG32_DIDT(config_regs->offset);
521 break;
522 default:
523 data = RREG32(config_regs->offset << 2);
524 break;
525 }
526
527 data &= ~config_regs->mask;
528 data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
529 data |= cache;
530
531 switch (config_regs->type) {
532 case CISLANDS_CONFIGREG_SMC_IND:
533 WREG32_SMC(config_regs->offset, data);
534 break;
535 case CISLANDS_CONFIGREG_DIDT_IND:
536 WREG32_DIDT(config_regs->offset, data);
537 break;
538 default:
539 WREG32(config_regs->offset << 2, data);
540 break;
541 }
542 cache = 0;
543 }
544 config_regs++;
545 }
546 return 0;
547}
548
549static int ci_enable_didt(struct radeon_device *rdev, bool enable)
550{
551 struct ci_power_info *pi = ci_get_pi(rdev);
552 int ret;
553
554 if (pi->caps_sq_ramping || pi->caps_db_ramping ||
555 pi->caps_td_ramping || pi->caps_tcp_ramping) {
556 cik_enter_rlc_safe_mode(rdev);
557
558 if (enable) {
559 ret = ci_program_pt_config_registers(rdev, didt_config_ci);
560 if (ret) {
561 cik_exit_rlc_safe_mode(rdev);
562 return ret;
563 }
564 }
565
566 ci_do_enable_didt(rdev, enable);
567
568 cik_exit_rlc_safe_mode(rdev);
569 }
570
571 return 0;
572}
573
574static int ci_enable_power_containment(struct radeon_device *rdev, bool enable)
575{
576 struct ci_power_info *pi = ci_get_pi(rdev);
577 PPSMC_Result smc_result;
578 int ret = 0;
579
580 if (enable) {
581 pi->power_containment_features = 0;
582 if (pi->caps_power_containment) {
583 if (pi->enable_bapm_feature) {
584 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableDTE);
585 if (smc_result != PPSMC_Result_OK)
586 ret = -EINVAL;
587 else
588 pi->power_containment_features |= POWERCONTAINMENT_FEATURE_BAPM;
589 }
590
591 if (pi->enable_tdc_limit_feature) {
592 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_TDCLimitEnable);
593 if (smc_result != PPSMC_Result_OK)
594 ret = -EINVAL;
595 else
596 pi->power_containment_features |= POWERCONTAINMENT_FEATURE_TDCLimit;
597 }
598
599 if (pi->enable_pkg_pwr_tracking_feature) {
600 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PkgPwrLimitEnable);
601 if (smc_result != PPSMC_Result_OK) {
602 ret = -EINVAL;
603 } else {
604 struct radeon_cac_tdp_table *cac_tdp_table =
605 rdev->pm.dpm.dyn_state.cac_tdp_table;
606 u32 default_pwr_limit =
607 (u32)(cac_tdp_table->maximum_power_delivery_limit * 256);
608
609 pi->power_containment_features |= POWERCONTAINMENT_FEATURE_PkgPwrLimit;
610
611 ci_set_power_limit(rdev, default_pwr_limit);
612 }
613 }
614 }
615 } else {
616 if (pi->caps_power_containment && pi->power_containment_features) {
617 if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_TDCLimit)
618 ci_send_msg_to_smc(rdev, PPSMC_MSG_TDCLimitDisable);
619
620 if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_BAPM)
621 ci_send_msg_to_smc(rdev, PPSMC_MSG_DisableDTE);
622
623 if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit)
624 ci_send_msg_to_smc(rdev, PPSMC_MSG_PkgPwrLimitDisable);
625 pi->power_containment_features = 0;
626 }
627 }
628
629 return ret;
630}
631
632static int ci_enable_smc_cac(struct radeon_device *rdev, bool enable)
633{
634 struct ci_power_info *pi = ci_get_pi(rdev);
635 PPSMC_Result smc_result;
636 int ret = 0;
637
638 if (pi->caps_cac) {
639 if (enable) {
640 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableCac);
641 if (smc_result != PPSMC_Result_OK) {
642 ret = -EINVAL;
643 pi->cac_enabled = false;
644 } else {
645 pi->cac_enabled = true;
646 }
647 } else if (pi->cac_enabled) {
648 ci_send_msg_to_smc(rdev, PPSMC_MSG_DisableCac);
649 pi->cac_enabled = false;
650 }
651 }
652
653 return ret;
654}
655
656static int ci_power_control_set_level(struct radeon_device *rdev)
657{
658 struct ci_power_info *pi = ci_get_pi(rdev);
659 struct radeon_cac_tdp_table *cac_tdp_table =
660 rdev->pm.dpm.dyn_state.cac_tdp_table;
661 s32 adjust_percent;
662 s32 target_tdp;
663 int ret = 0;
664 bool adjust_polarity = false; /* ??? */
665
666 if (pi->caps_power_containment &&
667 (pi->power_containment_features & POWERCONTAINMENT_FEATURE_BAPM)) {
668 adjust_percent = adjust_polarity ?
669 rdev->pm.dpm.tdp_adjustment : (-1 * rdev->pm.dpm.tdp_adjustment);
670 target_tdp = ((100 + adjust_percent) *
671 (s32)cac_tdp_table->configurable_tdp) / 100;
672 target_tdp *= 256;
673
674 ret = ci_set_overdrive_target_tdp(rdev, (u32)target_tdp);
675 }
676
677 return ret;
678}
679
680static void ci_dpm_powergate_uvd(struct radeon_device *rdev, bool gate)
681{
682 ci_update_uvd_dpm(rdev, gate);
683}
684
685static void ci_apply_state_adjust_rules(struct radeon_device *rdev,
686 struct radeon_ps *rps)
687{
688 struct ci_ps *ps = ci_get_ps(rps);
689 struct ci_power_info *pi = ci_get_pi(rdev);
690 struct radeon_clock_and_voltage_limits *max_limits;
691 bool disable_mclk_switching;
692 u32 sclk, mclk;
693 int i;
694
695 if (rdev->pm.dpm.new_active_crtc_count > 1)
696 disable_mclk_switching = true;
697 else
698 disable_mclk_switching = false;
699
700 if ((rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY)
701 pi->battery_state = true;
702 else
703 pi->battery_state = false;
704
705 if (rdev->pm.dpm.ac_power)
706 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
707 else
708 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
709
710 if (rdev->pm.dpm.ac_power == false) {
711 for (i = 0; i < ps->performance_level_count; i++) {
712 if (ps->performance_levels[i].mclk > max_limits->mclk)
713 ps->performance_levels[i].mclk = max_limits->mclk;
714 if (ps->performance_levels[i].sclk > max_limits->sclk)
715 ps->performance_levels[i].sclk = max_limits->sclk;
716 }
717 }
718
719 /* XXX validate the min clocks required for display */
720
721 if (disable_mclk_switching) {
722 mclk = ps->performance_levels[ps->performance_level_count - 1].mclk;
723 sclk = ps->performance_levels[0].sclk;
724 } else {
725 mclk = ps->performance_levels[0].mclk;
726 sclk = ps->performance_levels[0].sclk;
727 }
728
729 ps->performance_levels[0].sclk = sclk;
730 ps->performance_levels[0].mclk = mclk;
731
732 if (ps->performance_levels[1].sclk < ps->performance_levels[0].sclk)
733 ps->performance_levels[1].sclk = ps->performance_levels[0].sclk;
734
735 if (disable_mclk_switching) {
736 if (ps->performance_levels[0].mclk < ps->performance_levels[1].mclk)
737 ps->performance_levels[0].mclk = ps->performance_levels[1].mclk;
738 } else {
739 if (ps->performance_levels[1].mclk < ps->performance_levels[0].mclk)
740 ps->performance_levels[1].mclk = ps->performance_levels[0].mclk;
741 }
742}
743
744static int ci_set_thermal_temperature_range(struct radeon_device *rdev,
745 int min_temp, int max_temp)
746{
747 int low_temp = 0 * 1000;
748 int high_temp = 255 * 1000;
749 u32 tmp;
750
751 if (low_temp < min_temp)
752 low_temp = min_temp;
753 if (high_temp > max_temp)
754 high_temp = max_temp;
755 if (high_temp < low_temp) {
756 DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
757 return -EINVAL;
758 }
759
760 tmp = RREG32_SMC(CG_THERMAL_INT);
761 tmp &= ~(CI_DIG_THERM_INTH_MASK | CI_DIG_THERM_INTL_MASK);
762 tmp |= CI_DIG_THERM_INTH(high_temp / 1000) |
763 CI_DIG_THERM_INTL(low_temp / 1000);
764 WREG32_SMC(CG_THERMAL_INT, tmp);
765
766#if 0
767 /* XXX: need to figure out how to handle this properly */
768 tmp = RREG32_SMC(CG_THERMAL_CTRL);
769 tmp &= DIG_THERM_DPM_MASK;
770 tmp |= DIG_THERM_DPM(high_temp / 1000);
771 WREG32_SMC(CG_THERMAL_CTRL, tmp);
772#endif
773
774 return 0;
775}
776
777#if 0
778static int ci_read_smc_soft_register(struct radeon_device *rdev,
779 u16 reg_offset, u32 *value)
780{
781 struct ci_power_info *pi = ci_get_pi(rdev);
782
783 return ci_read_smc_sram_dword(rdev,
784 pi->soft_regs_start + reg_offset,
785 value, pi->sram_end);
786}
787#endif
788
789static int ci_write_smc_soft_register(struct radeon_device *rdev,
790 u16 reg_offset, u32 value)
791{
792 struct ci_power_info *pi = ci_get_pi(rdev);
793
794 return ci_write_smc_sram_dword(rdev,
795 pi->soft_regs_start + reg_offset,
796 value, pi->sram_end);
797}
798
799static void ci_init_fps_limits(struct radeon_device *rdev)
800{
801 struct ci_power_info *pi = ci_get_pi(rdev);
802 SMU7_Discrete_DpmTable *table = &pi->smc_state_table;
803
804 if (pi->caps_fps) {
805 u16 tmp;
806
807 tmp = 45;
808 table->FpsHighT = cpu_to_be16(tmp);
809
810 tmp = 30;
811 table->FpsLowT = cpu_to_be16(tmp);
812 }
813}
814
815static int ci_update_sclk_t(struct radeon_device *rdev)
816{
817 struct ci_power_info *pi = ci_get_pi(rdev);
818 int ret = 0;
819 u32 low_sclk_interrupt_t = 0;
820
821 if (pi->caps_sclk_throttle_low_notification) {
822 low_sclk_interrupt_t = cpu_to_be32(pi->low_sclk_interrupt_t);
823
824 ret = ci_copy_bytes_to_smc(rdev,
825 pi->dpm_table_start +
826 offsetof(SMU7_Discrete_DpmTable, LowSclkInterruptT),
827 (u8 *)&low_sclk_interrupt_t,
828 sizeof(u32), pi->sram_end);
829
830 }
831
832 return ret;
833}
834
835static void ci_get_leakage_voltages(struct radeon_device *rdev)
836{
837 struct ci_power_info *pi = ci_get_pi(rdev);
838 u16 leakage_id, virtual_voltage_id;
839 u16 vddc, vddci;
840 int i;
841
842 pi->vddc_leakage.count = 0;
843 pi->vddci_leakage.count = 0;
844
845 if (radeon_atom_get_leakage_id_from_vbios(rdev, &leakage_id) == 0) {
846 for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) {
847 virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
848 if (radeon_atom_get_leakage_vddc_based_on_leakage_params(rdev, &vddc, &vddci,
849 virtual_voltage_id,
850 leakage_id) == 0) {
851 if (vddc != 0 && vddc != virtual_voltage_id) {
852 pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc;
853 pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id;
854 pi->vddc_leakage.count++;
855 }
856 if (vddci != 0 && vddci != virtual_voltage_id) {
857 pi->vddci_leakage.actual_voltage[pi->vddci_leakage.count] = vddci;
858 pi->vddci_leakage.leakage_id[pi->vddci_leakage.count] = virtual_voltage_id;
859 pi->vddci_leakage.count++;
860 }
861 }
862 }
863 }
864}
865
866static void ci_set_dpm_event_sources(struct radeon_device *rdev, u32 sources)
867{
868 struct ci_power_info *pi = ci_get_pi(rdev);
869 bool want_thermal_protection;
870 enum radeon_dpm_event_src dpm_event_src;
871 u32 tmp;
872
873 switch (sources) {
874 case 0:
875 default:
876 want_thermal_protection = false;
877 break;
878 case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL):
879 want_thermal_protection = true;
880 dpm_event_src = RADEON_DPM_EVENT_SRC_DIGITAL;
881 break;
882 case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL):
883 want_thermal_protection = true;
884 dpm_event_src = RADEON_DPM_EVENT_SRC_EXTERNAL;
885 break;
886 case ((1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL) |
887 (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL)):
888 want_thermal_protection = true;
889 dpm_event_src = RADEON_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL;
890 break;
891 }
892
893 if (want_thermal_protection) {
894#if 0
895 /* XXX: need to figure out how to handle this properly */
896 tmp = RREG32_SMC(CG_THERMAL_CTRL);
897 tmp &= DPM_EVENT_SRC_MASK;
898 tmp |= DPM_EVENT_SRC(dpm_event_src);
899 WREG32_SMC(CG_THERMAL_CTRL, tmp);
900#endif
901
902 tmp = RREG32_SMC(GENERAL_PWRMGT);
903 if (pi->thermal_protection)
904 tmp &= ~THERMAL_PROTECTION_DIS;
905 else
906 tmp |= THERMAL_PROTECTION_DIS;
907 WREG32_SMC(GENERAL_PWRMGT, tmp);
908 } else {
909 tmp = RREG32_SMC(GENERAL_PWRMGT);
910 tmp |= THERMAL_PROTECTION_DIS;
911 WREG32_SMC(GENERAL_PWRMGT, tmp);
912 }
913}
914
915static void ci_enable_auto_throttle_source(struct radeon_device *rdev,
916 enum radeon_dpm_auto_throttle_src source,
917 bool enable)
918{
919 struct ci_power_info *pi = ci_get_pi(rdev);
920
921 if (enable) {
922 if (!(pi->active_auto_throttle_sources & (1 << source))) {
923 pi->active_auto_throttle_sources |= 1 << source;
924 ci_set_dpm_event_sources(rdev, pi->active_auto_throttle_sources);
925 }
926 } else {
927 if (pi->active_auto_throttle_sources & (1 << source)) {
928 pi->active_auto_throttle_sources &= ~(1 << source);
929 ci_set_dpm_event_sources(rdev, pi->active_auto_throttle_sources);
930 }
931 }
932}
933
934static void ci_enable_vr_hot_gpio_interrupt(struct radeon_device *rdev)
935{
936 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REGULATOR_HOT)
937 ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableVRHotGPIOInterrupt);
938}
939
940static int ci_unfreeze_sclk_mclk_dpm(struct radeon_device *rdev)
941{
942 struct ci_power_info *pi = ci_get_pi(rdev);
943 PPSMC_Result smc_result;
944
945 if (!pi->need_update_smu7_dpm_table)
946 return 0;
947
948 if ((!pi->sclk_dpm_key_disabled) &&
949 (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) {
950 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_SCLKDPM_UnfreezeLevel);
951 if (smc_result != PPSMC_Result_OK)
952 return -EINVAL;
953 }
954
955 if ((!pi->mclk_dpm_key_disabled) &&
956 (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
957 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_UnfreezeLevel);
958 if (smc_result != PPSMC_Result_OK)
959 return -EINVAL;
960 }
961
962 pi->need_update_smu7_dpm_table = 0;
963 return 0;
964}
965
966static int ci_enable_sclk_mclk_dpm(struct radeon_device *rdev, bool enable)
967{
968 struct ci_power_info *pi = ci_get_pi(rdev);
969 PPSMC_Result smc_result;
970
971 if (enable) {
972 if (!pi->sclk_dpm_key_disabled) {
973 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_DPM_Enable);
974 if (smc_result != PPSMC_Result_OK)
975 return -EINVAL;
976 }
977
978 if (!pi->mclk_dpm_key_disabled) {
979 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_Enable);
980 if (smc_result != PPSMC_Result_OK)
981 return -EINVAL;
982
983 WREG32_P(MC_SEQ_CNTL_3, CAC_EN, ~CAC_EN);
984
985 WREG32_SMC(LCAC_MC0_CNTL, 0x05);
986 WREG32_SMC(LCAC_MC1_CNTL, 0x05);
987 WREG32_SMC(LCAC_CPL_CNTL, 0x100005);
988
989 udelay(10);
990
991 WREG32_SMC(LCAC_MC0_CNTL, 0x400005);
992 WREG32_SMC(LCAC_MC1_CNTL, 0x400005);
993 WREG32_SMC(LCAC_CPL_CNTL, 0x500005);
994 }
995 } else {
996 if (!pi->sclk_dpm_key_disabled) {
997 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_DPM_Disable);
998 if (smc_result != PPSMC_Result_OK)
999 return -EINVAL;
1000 }
1001
1002 if (!pi->mclk_dpm_key_disabled) {
1003 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_Disable);
1004 if (smc_result != PPSMC_Result_OK)
1005 return -EINVAL;
1006 }
1007 }
1008
1009 return 0;
1010}
1011
1012static int ci_start_dpm(struct radeon_device *rdev)
1013{
1014 struct ci_power_info *pi = ci_get_pi(rdev);
1015 PPSMC_Result smc_result;
1016 int ret;
1017 u32 tmp;
1018
1019 tmp = RREG32_SMC(GENERAL_PWRMGT);
1020 tmp |= GLOBAL_PWRMGT_EN;
1021 WREG32_SMC(GENERAL_PWRMGT, tmp);
1022
1023 tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
1024 tmp |= DYNAMIC_PM_EN;
1025 WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
1026
1027 ci_write_smc_soft_register(rdev, offsetof(SMU7_SoftRegisters, VoltageChangeTimeout), 0x1000);
1028
1029 WREG32_P(BIF_LNCNT_RESET, 0, ~RESET_LNCNT_EN);
1030
1031 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Voltage_Cntl_Enable);
1032 if (smc_result != PPSMC_Result_OK)
1033 return -EINVAL;
1034
1035 ret = ci_enable_sclk_mclk_dpm(rdev, true);
1036 if (ret)
1037 return ret;
1038
1039 if (!pi->pcie_dpm_key_disabled) {
1040 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PCIeDPM_Enable);
1041 if (smc_result != PPSMC_Result_OK)
1042 return -EINVAL;
1043 }
1044
1045 return 0;
1046}
1047
1048static int ci_freeze_sclk_mclk_dpm(struct radeon_device *rdev)
1049{
1050 struct ci_power_info *pi = ci_get_pi(rdev);
1051 PPSMC_Result smc_result;
1052
1053 if (!pi->need_update_smu7_dpm_table)
1054 return 0;
1055
1056 if ((!pi->sclk_dpm_key_disabled) &&
1057 (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) {
1058 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_SCLKDPM_FreezeLevel);
1059 if (smc_result != PPSMC_Result_OK)
1060 return -EINVAL;
1061 }
1062
1063 if ((!pi->mclk_dpm_key_disabled) &&
1064 (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
1065 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_FreezeLevel);
1066 if (smc_result != PPSMC_Result_OK)
1067 return -EINVAL;
1068 }
1069
1070 return 0;
1071}
1072
1073static int ci_stop_dpm(struct radeon_device *rdev)
1074{
1075 struct ci_power_info *pi = ci_get_pi(rdev);
1076 PPSMC_Result smc_result;
1077 int ret;
1078 u32 tmp;
1079
1080 tmp = RREG32_SMC(GENERAL_PWRMGT);
1081 tmp &= ~GLOBAL_PWRMGT_EN;
1082 WREG32_SMC(GENERAL_PWRMGT, tmp);
1083
1084 tmp = RREG32(SCLK_PWRMGT_CNTL);
1085 tmp &= ~DYNAMIC_PM_EN;
1086 WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
1087
1088 if (!pi->pcie_dpm_key_disabled) {
1089 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PCIeDPM_Disable);
1090 if (smc_result != PPSMC_Result_OK)
1091 return -EINVAL;
1092 }
1093
1094 ret = ci_enable_sclk_mclk_dpm(rdev, false);
1095 if (ret)
1096 return ret;
1097
1098 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Voltage_Cntl_Disable);
1099 if (smc_result != PPSMC_Result_OK)
1100 return -EINVAL;
1101
1102 return 0;
1103}
1104
1105static void ci_enable_sclk_control(struct radeon_device *rdev, bool enable)
1106{
1107 u32 tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
1108
1109 if (enable)
1110 tmp &= ~SCLK_PWRMGT_OFF;
1111 else
1112 tmp |= SCLK_PWRMGT_OFF;
1113 WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
1114}
1115
1116#if 0
1117static int ci_notify_hw_of_power_source(struct radeon_device *rdev,
1118 bool ac_power)
1119{
1120 struct ci_power_info *pi = ci_get_pi(rdev);
1121 struct radeon_cac_tdp_table *cac_tdp_table =
1122 rdev->pm.dpm.dyn_state.cac_tdp_table;
1123 u32 power_limit;
1124
1125 if (ac_power)
1126 power_limit = (u32)(cac_tdp_table->maximum_power_delivery_limit * 256);
1127 else
1128 power_limit = (u32)(cac_tdp_table->battery_power_limit * 256);
1129
1130 ci_set_power_limit(rdev, power_limit);
1131
1132 if (pi->caps_automatic_dc_transition) {
1133 if (ac_power)
1134 ci_send_msg_to_smc(rdev, PPSMC_MSG_RunningOnAC);
1135 else
1136 ci_send_msg_to_smc(rdev, PPSMC_MSG_Remove_DC_Clamp);
1137 }
1138
1139 return 0;
1140}
1141#endif
1142
1143static PPSMC_Result ci_send_msg_to_smc_with_parameter(struct radeon_device *rdev,
1144 PPSMC_Msg msg, u32 parameter)
1145{
1146 WREG32(SMC_MSG_ARG_0, parameter);
1147 return ci_send_msg_to_smc(rdev, msg);
1148}
1149
1150static PPSMC_Result ci_send_msg_to_smc_return_parameter(struct radeon_device *rdev,
1151 PPSMC_Msg msg, u32 *parameter)
1152{
1153 PPSMC_Result smc_result;
1154
1155 smc_result = ci_send_msg_to_smc(rdev, msg);
1156
1157 if ((smc_result == PPSMC_Result_OK) && parameter)
1158 *parameter = RREG32(SMC_MSG_ARG_0);
1159
1160 return smc_result;
1161}
1162
1163static int ci_dpm_force_state_sclk(struct radeon_device *rdev, u32 n)
1164{
1165 struct ci_power_info *pi = ci_get_pi(rdev);
1166
1167 if (!pi->sclk_dpm_key_disabled) {
1168 PPSMC_Result smc_result =
1169 ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, n);
1170 if (smc_result != PPSMC_Result_OK)
1171 return -EINVAL;
1172 }
1173
1174 return 0;
1175}
1176
1177static int ci_dpm_force_state_mclk(struct radeon_device *rdev, u32 n)
1178{
1179 struct ci_power_info *pi = ci_get_pi(rdev);
1180
1181 if (!pi->mclk_dpm_key_disabled) {
1182 PPSMC_Result smc_result =
1183 ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_MCLKDPM_ForceState, n);
1184 if (smc_result != PPSMC_Result_OK)
1185 return -EINVAL;
1186 }
1187
1188 return 0;
1189}
1190
1191static int ci_dpm_force_state_pcie(struct radeon_device *rdev, u32 n)
1192{
1193 struct ci_power_info *pi = ci_get_pi(rdev);
1194
1195 if (!pi->pcie_dpm_key_disabled) {
1196 PPSMC_Result smc_result =
1197 ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_PCIeDPM_ForceLevel, n);
1198 if (smc_result != PPSMC_Result_OK)
1199 return -EINVAL;
1200 }
1201
1202 return 0;
1203}
1204
1205static int ci_set_power_limit(struct radeon_device *rdev, u32 n)
1206{
1207 struct ci_power_info *pi = ci_get_pi(rdev);
1208
1209 if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit) {
1210 PPSMC_Result smc_result =
1211 ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_PkgPwrSetLimit, n);
1212 if (smc_result != PPSMC_Result_OK)
1213 return -EINVAL;
1214 }
1215
1216 return 0;
1217}
1218
1219static int ci_set_overdrive_target_tdp(struct radeon_device *rdev,
1220 u32 target_tdp)
1221{
1222 PPSMC_Result smc_result =
1223 ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_OverDriveSetTargetTdp, target_tdp);
1224 if (smc_result != PPSMC_Result_OK)
1225 return -EINVAL;
1226 return 0;
1227}
1228
1229static int ci_set_boot_state(struct radeon_device *rdev)
1230{
1231 return ci_enable_sclk_mclk_dpm(rdev, false);
1232}
1233
1234static u32 ci_get_average_sclk_freq(struct radeon_device *rdev)
1235{
1236 u32 sclk_freq;
1237 PPSMC_Result smc_result =
1238 ci_send_msg_to_smc_return_parameter(rdev,
1239 PPSMC_MSG_API_GetSclkFrequency,
1240 &sclk_freq);
1241 if (smc_result != PPSMC_Result_OK)
1242 sclk_freq = 0;
1243
1244 return sclk_freq;
1245}
1246
1247static u32 ci_get_average_mclk_freq(struct radeon_device *rdev)
1248{
1249 u32 mclk_freq;
1250 PPSMC_Result smc_result =
1251 ci_send_msg_to_smc_return_parameter(rdev,
1252 PPSMC_MSG_API_GetMclkFrequency,
1253 &mclk_freq);
1254 if (smc_result != PPSMC_Result_OK)
1255 mclk_freq = 0;
1256
1257 return mclk_freq;
1258}
1259
1260static void ci_dpm_start_smc(struct radeon_device *rdev)
1261{
1262 int i;
1263
1264 ci_program_jump_on_start(rdev);
1265 ci_start_smc_clock(rdev);
1266 ci_start_smc(rdev);
1267 for (i = 0; i < rdev->usec_timeout; i++) {
1268 if (RREG32_SMC(FIRMWARE_FLAGS) & INTERRUPTS_ENABLED)
1269 break;
1270 }
1271}
1272
1273static void ci_dpm_stop_smc(struct radeon_device *rdev)
1274{
1275 ci_reset_smc(rdev);
1276 ci_stop_smc_clock(rdev);
1277}
1278
1279static int ci_process_firmware_header(struct radeon_device *rdev)
1280{
1281 struct ci_power_info *pi = ci_get_pi(rdev);
1282 u32 tmp;
1283 int ret;
1284
1285 ret = ci_read_smc_sram_dword(rdev,
1286 SMU7_FIRMWARE_HEADER_LOCATION +
1287 offsetof(SMU7_Firmware_Header, DpmTable),
1288 &tmp, pi->sram_end);
1289 if (ret)
1290 return ret;
1291
1292 pi->dpm_table_start = tmp;
1293
1294 ret = ci_read_smc_sram_dword(rdev,
1295 SMU7_FIRMWARE_HEADER_LOCATION +
1296 offsetof(SMU7_Firmware_Header, SoftRegisters),
1297 &tmp, pi->sram_end);
1298 if (ret)
1299 return ret;
1300
1301 pi->soft_regs_start = tmp;
1302
1303 ret = ci_read_smc_sram_dword(rdev,
1304 SMU7_FIRMWARE_HEADER_LOCATION +
1305 offsetof(SMU7_Firmware_Header, mcRegisterTable),
1306 &tmp, pi->sram_end);
1307 if (ret)
1308 return ret;
1309
1310 pi->mc_reg_table_start = tmp;
1311
1312 ret = ci_read_smc_sram_dword(rdev,
1313 SMU7_FIRMWARE_HEADER_LOCATION +
1314 offsetof(SMU7_Firmware_Header, FanTable),
1315 &tmp, pi->sram_end);
1316 if (ret)
1317 return ret;
1318
1319 pi->fan_table_start = tmp;
1320
1321 ret = ci_read_smc_sram_dword(rdev,
1322 SMU7_FIRMWARE_HEADER_LOCATION +
1323 offsetof(SMU7_Firmware_Header, mcArbDramTimingTable),
1324 &tmp, pi->sram_end);
1325 if (ret)
1326 return ret;
1327
1328 pi->arb_table_start = tmp;
1329
1330 return 0;
1331}
1332
1333static void ci_read_clock_registers(struct radeon_device *rdev)
1334{
1335 struct ci_power_info *pi = ci_get_pi(rdev);
1336
1337 pi->clock_registers.cg_spll_func_cntl =
1338 RREG32_SMC(CG_SPLL_FUNC_CNTL);
1339 pi->clock_registers.cg_spll_func_cntl_2 =
1340 RREG32_SMC(CG_SPLL_FUNC_CNTL_2);
1341 pi->clock_registers.cg_spll_func_cntl_3 =
1342 RREG32_SMC(CG_SPLL_FUNC_CNTL_3);
1343 pi->clock_registers.cg_spll_func_cntl_4 =
1344 RREG32_SMC(CG_SPLL_FUNC_CNTL_4);
1345 pi->clock_registers.cg_spll_spread_spectrum =
1346 RREG32_SMC(CG_SPLL_SPREAD_SPECTRUM);
1347 pi->clock_registers.cg_spll_spread_spectrum_2 =
1348 RREG32_SMC(CG_SPLL_SPREAD_SPECTRUM_2);
1349 pi->clock_registers.dll_cntl = RREG32(DLL_CNTL);
1350 pi->clock_registers.mclk_pwrmgt_cntl = RREG32(MCLK_PWRMGT_CNTL);
1351 pi->clock_registers.mpll_ad_func_cntl = RREG32(MPLL_AD_FUNC_CNTL);
1352 pi->clock_registers.mpll_dq_func_cntl = RREG32(MPLL_DQ_FUNC_CNTL);
1353 pi->clock_registers.mpll_func_cntl = RREG32(MPLL_FUNC_CNTL);
1354 pi->clock_registers.mpll_func_cntl_1 = RREG32(MPLL_FUNC_CNTL_1);
1355 pi->clock_registers.mpll_func_cntl_2 = RREG32(MPLL_FUNC_CNTL_2);
1356 pi->clock_registers.mpll_ss1 = RREG32(MPLL_SS1);
1357 pi->clock_registers.mpll_ss2 = RREG32(MPLL_SS2);
1358}
1359
1360static void ci_init_sclk_t(struct radeon_device *rdev)
1361{
1362 struct ci_power_info *pi = ci_get_pi(rdev);
1363
1364 pi->low_sclk_interrupt_t = 0;
1365}
1366
1367static void ci_enable_thermal_protection(struct radeon_device *rdev,
1368 bool enable)
1369{
1370 u32 tmp = RREG32_SMC(GENERAL_PWRMGT);
1371
1372 if (enable)
1373 tmp &= ~THERMAL_PROTECTION_DIS;
1374 else
1375 tmp |= THERMAL_PROTECTION_DIS;
1376 WREG32_SMC(GENERAL_PWRMGT, tmp);
1377}
1378
1379static void ci_enable_acpi_power_management(struct radeon_device *rdev)
1380{
1381 u32 tmp = RREG32_SMC(GENERAL_PWRMGT);
1382
1383 tmp |= STATIC_PM_EN;
1384
1385 WREG32_SMC(GENERAL_PWRMGT, tmp);
1386}
1387
1388#if 0
1389static int ci_enter_ulp_state(struct radeon_device *rdev)
1390{
1391
1392 WREG32(SMC_MESSAGE_0, PPSMC_MSG_SwitchToMinimumPower);
1393
1394 udelay(25000);
1395
1396 return 0;
1397}
1398
1399static int ci_exit_ulp_state(struct radeon_device *rdev)
1400{
1401 int i;
1402
1403 WREG32(SMC_MESSAGE_0, PPSMC_MSG_ResumeFromMinimumPower);
1404
1405 udelay(7000);
1406
1407 for (i = 0; i < rdev->usec_timeout; i++) {
1408 if (RREG32(SMC_RESP_0) == 1)
1409 break;
1410 udelay(1000);
1411 }
1412
1413 return 0;
1414}
1415#endif
1416
1417static int ci_notify_smc_display_change(struct radeon_device *rdev,
1418 bool has_display)
1419{
1420 PPSMC_Msg msg = has_display ? PPSMC_MSG_HasDisplay : PPSMC_MSG_NoDisplay;
1421
1422 return (ci_send_msg_to_smc(rdev, msg) == PPSMC_Result_OK) ? 0 : -EINVAL;
1423}
1424
1425static int ci_enable_ds_master_switch(struct radeon_device *rdev,
1426 bool enable)
1427{
1428 struct ci_power_info *pi = ci_get_pi(rdev);
1429
1430 if (enable) {
1431 if (pi->caps_sclk_ds) {
1432 if (ci_send_msg_to_smc(rdev, PPSMC_MSG_MASTER_DeepSleep_ON) != PPSMC_Result_OK)
1433 return -EINVAL;
1434 } else {
1435 if (ci_send_msg_to_smc(rdev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK)
1436 return -EINVAL;
1437 }
1438 } else {
1439 if (pi->caps_sclk_ds) {
1440 if (ci_send_msg_to_smc(rdev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK)
1441 return -EINVAL;
1442 }
1443 }
1444
1445 return 0;
1446}
1447
1448static void ci_program_display_gap(struct radeon_device *rdev)
1449{
1450 u32 tmp = RREG32_SMC(CG_DISPLAY_GAP_CNTL);
1451 u32 pre_vbi_time_in_us;
1452 u32 frame_time_in_us;
1453 u32 ref_clock = rdev->clock.spll.reference_freq;
1454 u32 refresh_rate = r600_dpm_get_vrefresh(rdev);
1455 u32 vblank_time = r600_dpm_get_vblank_time(rdev);
1456
1457 tmp &= ~DISP_GAP_MASK;
1458 if (rdev->pm.dpm.new_active_crtc_count > 0)
1459 tmp |= DISP_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM);
1460 else
1461 tmp |= DISP_GAP(R600_PM_DISPLAY_GAP_IGNORE);
1462 WREG32_SMC(CG_DISPLAY_GAP_CNTL, tmp);
1463
1464 if (refresh_rate == 0)
1465 refresh_rate = 60;
1466 if (vblank_time == 0xffffffff)
1467 vblank_time = 500;
1468 frame_time_in_us = 1000000 / refresh_rate;
1469 pre_vbi_time_in_us =
1470 frame_time_in_us - 200 - vblank_time;
1471 tmp = pre_vbi_time_in_us * (ref_clock / 100);
1472
1473 WREG32_SMC(CG_DISPLAY_GAP_CNTL2, tmp);
1474 ci_write_smc_soft_register(rdev, offsetof(SMU7_SoftRegisters, PreVBlankGap), 0x64);
1475 ci_write_smc_soft_register(rdev, offsetof(SMU7_SoftRegisters, VBlankTimeout), (frame_time_in_us - pre_vbi_time_in_us));
1476
1477
1478 ci_notify_smc_display_change(rdev, (rdev->pm.dpm.new_active_crtc_count == 1));
1479
1480}
1481
1482static void ci_enable_spread_spectrum(struct radeon_device *rdev, bool enable)
1483{
1484 struct ci_power_info *pi = ci_get_pi(rdev);
1485 u32 tmp;
1486
1487 if (enable) {
1488 if (pi->caps_sclk_ss_support) {
1489 tmp = RREG32_SMC(GENERAL_PWRMGT);
1490 tmp |= DYN_SPREAD_SPECTRUM_EN;
1491 WREG32_SMC(GENERAL_PWRMGT, tmp);
1492 }
1493 } else {
1494 tmp = RREG32_SMC(CG_SPLL_SPREAD_SPECTRUM);
1495 tmp &= ~SSEN;
1496 WREG32_SMC(CG_SPLL_SPREAD_SPECTRUM, tmp);
1497
1498 tmp = RREG32_SMC(GENERAL_PWRMGT);
1499 tmp &= ~DYN_SPREAD_SPECTRUM_EN;
1500 WREG32_SMC(GENERAL_PWRMGT, tmp);
1501 }
1502}
1503
1504static void ci_program_sstp(struct radeon_device *rdev)
1505{
1506 WREG32_SMC(CG_SSP, (SSTU(R600_SSTU_DFLT) | SST(R600_SST_DFLT)));
1507}
1508
1509static void ci_enable_display_gap(struct radeon_device *rdev)
1510{
1511 u32 tmp = RREG32_SMC(CG_DISPLAY_GAP_CNTL);
1512
1513 tmp &= ~(DISP_GAP_MASK | DISP_GAP_MCHG_MASK);
1514 tmp |= (DISP_GAP(R600_PM_DISPLAY_GAP_IGNORE) |
1515 DISP_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK));
1516
1517 WREG32_SMC(CG_DISPLAY_GAP_CNTL, tmp);
1518}
1519
1520static void ci_program_vc(struct radeon_device *rdev)
1521{
1522 u32 tmp;
1523
1524 tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
1525 tmp &= ~(RESET_SCLK_CNT | RESET_BUSY_CNT);
1526 WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
1527
1528 WREG32_SMC(CG_FTV_0, CISLANDS_VRC_DFLT0);
1529 WREG32_SMC(CG_FTV_1, CISLANDS_VRC_DFLT1);
1530 WREG32_SMC(CG_FTV_2, CISLANDS_VRC_DFLT2);
1531 WREG32_SMC(CG_FTV_3, CISLANDS_VRC_DFLT3);
1532 WREG32_SMC(CG_FTV_4, CISLANDS_VRC_DFLT4);
1533 WREG32_SMC(CG_FTV_5, CISLANDS_VRC_DFLT5);
1534 WREG32_SMC(CG_FTV_6, CISLANDS_VRC_DFLT6);
1535 WREG32_SMC(CG_FTV_7, CISLANDS_VRC_DFLT7);
1536}
1537
1538static void ci_clear_vc(struct radeon_device *rdev)
1539{
1540 u32 tmp;
1541
1542 tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
1543 tmp |= (RESET_SCLK_CNT | RESET_BUSY_CNT);
1544 WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
1545
1546 WREG32_SMC(CG_FTV_0, 0);
1547 WREG32_SMC(CG_FTV_1, 0);
1548 WREG32_SMC(CG_FTV_2, 0);
1549 WREG32_SMC(CG_FTV_3, 0);
1550 WREG32_SMC(CG_FTV_4, 0);
1551 WREG32_SMC(CG_FTV_5, 0);
1552 WREG32_SMC(CG_FTV_6, 0);
1553 WREG32_SMC(CG_FTV_7, 0);
1554}
1555
1556static int ci_upload_firmware(struct radeon_device *rdev)
1557{
1558 struct ci_power_info *pi = ci_get_pi(rdev);
1559 int i, ret;
1560
1561 for (i = 0; i < rdev->usec_timeout; i++) {
1562 if (RREG32_SMC(RCU_UC_EVENTS) & BOOT_SEQ_DONE)
1563 break;
1564 }
1565 WREG32_SMC(SMC_SYSCON_MISC_CNTL, 1);
1566
1567 ci_stop_smc_clock(rdev);
1568 ci_reset_smc(rdev);
1569
1570 ret = ci_load_smc_ucode(rdev, pi->sram_end);
1571
1572 return ret;
1573
1574}
1575
1576static int ci_get_svi2_voltage_table(struct radeon_device *rdev,
1577 struct radeon_clock_voltage_dependency_table *voltage_dependency_table,
1578 struct atom_voltage_table *voltage_table)
1579{
1580 u32 i;
1581
1582 if (voltage_dependency_table == NULL)
1583 return -EINVAL;
1584
1585 voltage_table->mask_low = 0;
1586 voltage_table->phase_delay = 0;
1587
1588 voltage_table->count = voltage_dependency_table->count;
1589 for (i = 0; i < voltage_table->count; i++) {
1590 voltage_table->entries[i].value = voltage_dependency_table->entries[i].v;
1591 voltage_table->entries[i].smio_low = 0;
1592 }
1593
1594 return 0;
1595}
1596
1597static int ci_construct_voltage_tables(struct radeon_device *rdev)
1598{
1599 struct ci_power_info *pi = ci_get_pi(rdev);
1600 int ret;
1601
1602 if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
1603 ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_VDDC,
1604 VOLTAGE_OBJ_GPIO_LUT,
1605 &pi->vddc_voltage_table);
1606 if (ret)
1607 return ret;
1608 } else if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
1609 ret = ci_get_svi2_voltage_table(rdev,
1610 &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
1611 &pi->vddc_voltage_table);
1612 if (ret)
1613 return ret;
1614 }
1615
1616 if (pi->vddc_voltage_table.count > SMU7_MAX_LEVELS_VDDC)
1617 si_trim_voltage_table_to_fit_state_table(rdev, SMU7_MAX_LEVELS_VDDC,
1618 &pi->vddc_voltage_table);
1619
1620 if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
1621 ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_VDDCI,
1622 VOLTAGE_OBJ_GPIO_LUT,
1623 &pi->vddci_voltage_table);
1624 if (ret)
1625 return ret;
1626 } else if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
1627 ret = ci_get_svi2_voltage_table(rdev,
1628 &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
1629 &pi->vddci_voltage_table);
1630 if (ret)
1631 return ret;
1632 }
1633
1634 if (pi->vddci_voltage_table.count > SMU7_MAX_LEVELS_VDDCI)
1635 si_trim_voltage_table_to_fit_state_table(rdev, SMU7_MAX_LEVELS_VDDCI,
1636 &pi->vddci_voltage_table);
1637
1638 if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
1639 ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_MVDDC,
1640 VOLTAGE_OBJ_GPIO_LUT,
1641 &pi->mvdd_voltage_table);
1642 if (ret)
1643 return ret;
1644 } else if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
1645 ret = ci_get_svi2_voltage_table(rdev,
1646 &rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
1647 &pi->mvdd_voltage_table);
1648 if (ret)
1649 return ret;
1650 }
1651
1652 if (pi->mvdd_voltage_table.count > SMU7_MAX_LEVELS_MVDD)
1653 si_trim_voltage_table_to_fit_state_table(rdev, SMU7_MAX_LEVELS_MVDD,
1654 &pi->mvdd_voltage_table);
1655
1656 return 0;
1657}
1658
1659static void ci_populate_smc_voltage_table(struct radeon_device *rdev,
1660 struct atom_voltage_table_entry *voltage_table,
1661 SMU7_Discrete_VoltageLevel *smc_voltage_table)
1662{
1663 int ret;
1664
1665 ret = ci_get_std_voltage_value_sidd(rdev, voltage_table,
1666 &smc_voltage_table->StdVoltageHiSidd,
1667 &smc_voltage_table->StdVoltageLoSidd);
1668
1669 if (ret) {
1670 smc_voltage_table->StdVoltageHiSidd = voltage_table->value * VOLTAGE_SCALE;
1671 smc_voltage_table->StdVoltageLoSidd = voltage_table->value * VOLTAGE_SCALE;
1672 }
1673
1674 smc_voltage_table->Voltage = cpu_to_be16(voltage_table->value * VOLTAGE_SCALE);
1675 smc_voltage_table->StdVoltageHiSidd =
1676 cpu_to_be16(smc_voltage_table->StdVoltageHiSidd);
1677 smc_voltage_table->StdVoltageLoSidd =
1678 cpu_to_be16(smc_voltage_table->StdVoltageLoSidd);
1679}
1680
1681static int ci_populate_smc_vddc_table(struct radeon_device *rdev,
1682 SMU7_Discrete_DpmTable *table)
1683{
1684 struct ci_power_info *pi = ci_get_pi(rdev);
1685 unsigned int count;
1686
1687 table->VddcLevelCount = pi->vddc_voltage_table.count;
1688 for (count = 0; count < table->VddcLevelCount; count++) {
1689 ci_populate_smc_voltage_table(rdev,
1690 &pi->vddc_voltage_table.entries[count],
1691 &table->VddcLevel[count]);
1692
1693 if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
1694 table->VddcLevel[count].Smio |=
1695 pi->vddc_voltage_table.entries[count].smio_low;
1696 else
1697 table->VddcLevel[count].Smio = 0;
1698 }
1699 table->VddcLevelCount = cpu_to_be32(table->VddcLevelCount);
1700
1701 return 0;
1702}
1703
1704static int ci_populate_smc_vddci_table(struct radeon_device *rdev,
1705 SMU7_Discrete_DpmTable *table)
1706{
1707 unsigned int count;
1708 struct ci_power_info *pi = ci_get_pi(rdev);
1709
1710 table->VddciLevelCount = pi->vddci_voltage_table.count;
1711 for (count = 0; count < table->VddciLevelCount; count++) {
1712 ci_populate_smc_voltage_table(rdev,
1713 &pi->vddci_voltage_table.entries[count],
1714 &table->VddciLevel[count]);
1715
1716 if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
1717 table->VddciLevel[count].Smio |=
1718 pi->vddci_voltage_table.entries[count].smio_low;
1719 else
1720 table->VddciLevel[count].Smio = 0;
1721 }
1722 table->VddciLevelCount = cpu_to_be32(table->VddciLevelCount);
1723
1724 return 0;
1725}
1726
1727static int ci_populate_smc_mvdd_table(struct radeon_device *rdev,
1728 SMU7_Discrete_DpmTable *table)
1729{
1730 struct ci_power_info *pi = ci_get_pi(rdev);
1731 unsigned int count;
1732
1733 table->MvddLevelCount = pi->mvdd_voltage_table.count;
1734 for (count = 0; count < table->MvddLevelCount; count++) {
1735 ci_populate_smc_voltage_table(rdev,
1736 &pi->mvdd_voltage_table.entries[count],
1737 &table->MvddLevel[count]);
1738
1739 if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
1740 table->MvddLevel[count].Smio |=
1741 pi->mvdd_voltage_table.entries[count].smio_low;
1742 else
1743 table->MvddLevel[count].Smio = 0;
1744 }
1745 table->MvddLevelCount = cpu_to_be32(table->MvddLevelCount);
1746
1747 return 0;
1748}
1749
1750static int ci_populate_smc_voltage_tables(struct radeon_device *rdev,
1751 SMU7_Discrete_DpmTable *table)
1752{
1753 int ret;
1754
1755 ret = ci_populate_smc_vddc_table(rdev, table);
1756 if (ret)
1757 return ret;
1758
1759 ret = ci_populate_smc_vddci_table(rdev, table);
1760 if (ret)
1761 return ret;
1762
1763 ret = ci_populate_smc_mvdd_table(rdev, table);
1764 if (ret)
1765 return ret;
1766
1767 return 0;
1768}
1769
1770static int ci_populate_mvdd_value(struct radeon_device *rdev, u32 mclk,
1771 SMU7_Discrete_VoltageLevel *voltage)
1772{
1773 struct ci_power_info *pi = ci_get_pi(rdev);
1774 u32 i = 0;
1775
1776 if (pi->mvdd_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
1777 for (i = 0; i < rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count; i++) {
1778 if (mclk <= rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries[i].clk) {
1779 voltage->Voltage = pi->mvdd_voltage_table.entries[i].value;
1780 break;
1781 }
1782 }
1783
1784 if (i >= rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count)
1785 return -EINVAL;
1786 }
1787
1788 return -EINVAL;
1789}
1790
1791static int ci_get_std_voltage_value_sidd(struct radeon_device *rdev,
1792 struct atom_voltage_table_entry *voltage_table,
1793 u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd)
1794{
1795 u16 v_index, idx;
1796 bool voltage_found = false;
1797 *std_voltage_hi_sidd = voltage_table->value * VOLTAGE_SCALE;
1798 *std_voltage_lo_sidd = voltage_table->value * VOLTAGE_SCALE;
1799
1800 if (rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries == NULL)
1801 return -EINVAL;
1802
1803 if (rdev->pm.dpm.dyn_state.cac_leakage_table.entries) {
1804 for (v_index = 0; (u32)v_index < rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
1805 if (voltage_table->value ==
1806 rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
1807 voltage_found = true;
1808 if ((u32)v_index < rdev->pm.dpm.dyn_state.cac_leakage_table.count)
1809 idx = v_index;
1810 else
1811 idx = rdev->pm.dpm.dyn_state.cac_leakage_table.count - 1;
1812 *std_voltage_lo_sidd =
1813 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE;
1814 *std_voltage_hi_sidd =
1815 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE;
1816 break;
1817 }
1818 }
1819
1820 if (!voltage_found) {
1821 for (v_index = 0; (u32)v_index < rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
1822 if (voltage_table->value <=
1823 rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
1824 voltage_found = true;
1825 if ((u32)v_index < rdev->pm.dpm.dyn_state.cac_leakage_table.count)
1826 idx = v_index;
1827 else
1828 idx = rdev->pm.dpm.dyn_state.cac_leakage_table.count - 1;
1829 *std_voltage_lo_sidd =
1830 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE;
1831 *std_voltage_hi_sidd =
1832 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE;
1833 break;
1834 }
1835 }
1836 }
1837 }
1838
1839 return 0;
1840}
1841
1842static void ci_populate_phase_value_based_on_sclk(struct radeon_device *rdev,
1843 const struct radeon_phase_shedding_limits_table *limits,
1844 u32 sclk,
1845 u32 *phase_shedding)
1846{
1847 unsigned int i;
1848
1849 *phase_shedding = 1;
1850
1851 for (i = 0; i < limits->count; i++) {
1852 if (sclk < limits->entries[i].sclk) {
1853 *phase_shedding = i;
1854 break;
1855 }
1856 }
1857}
1858
1859static void ci_populate_phase_value_based_on_mclk(struct radeon_device *rdev,
1860 const struct radeon_phase_shedding_limits_table *limits,
1861 u32 mclk,
1862 u32 *phase_shedding)
1863{
1864 unsigned int i;
1865
1866 *phase_shedding = 1;
1867
1868 for (i = 0; i < limits->count; i++) {
1869 if (mclk < limits->entries[i].mclk) {
1870 *phase_shedding = i;
1871 break;
1872 }
1873 }
1874}
1875
1876static int ci_init_arb_table_index(struct radeon_device *rdev)
1877{
1878 struct ci_power_info *pi = ci_get_pi(rdev);
1879 u32 tmp;
1880 int ret;
1881
1882 ret = ci_read_smc_sram_dword(rdev, pi->arb_table_start,
1883 &tmp, pi->sram_end);
1884 if (ret)
1885 return ret;
1886
1887 tmp &= 0x00FFFFFF;
1888 tmp |= MC_CG_ARB_FREQ_F1 << 24;
1889
1890 return ci_write_smc_sram_dword(rdev, pi->arb_table_start,
1891 tmp, pi->sram_end);
1892}
1893
1894static int ci_get_dependency_volt_by_clk(struct radeon_device *rdev,
1895 struct radeon_clock_voltage_dependency_table *allowed_clock_voltage_table,
1896 u32 clock, u32 *voltage)
1897{
1898 u32 i = 0;
1899
1900 if (allowed_clock_voltage_table->count == 0)
1901 return -EINVAL;
1902
1903 for (i = 0; i < allowed_clock_voltage_table->count; i++) {
1904 if (allowed_clock_voltage_table->entries[i].clk >= clock) {
1905 *voltage = allowed_clock_voltage_table->entries[i].v;
1906 return 0;
1907 }
1908 }
1909
1910 *voltage = allowed_clock_voltage_table->entries[i-1].v;
1911
1912 return 0;
1913}
1914
1915static u8 ci_get_sleep_divider_id_from_clock(struct radeon_device *rdev,
1916 u32 sclk, u32 min_sclk_in_sr)
1917{
1918 u32 i;
1919 u32 tmp;
1920 u32 min = (min_sclk_in_sr > CISLAND_MINIMUM_ENGINE_CLOCK) ?
1921 min_sclk_in_sr : CISLAND_MINIMUM_ENGINE_CLOCK;
1922
1923 if (sclk < min)
1924 return 0;
1925
1926 for (i = CISLAND_MAX_DEEPSLEEP_DIVIDER_ID; ; i--) {
1927 tmp = sclk / (1 << i);
1928 if (tmp >= min || i == 0)
1929 break;
1930 }
1931
1932 return (u8)i;
1933}
1934
1935static int ci_initial_switch_from_arb_f0_to_f1(struct radeon_device *rdev)
1936{
1937 return ni_copy_and_switch_arb_sets(rdev, MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
1938}
1939
1940static int ci_reset_to_default(struct radeon_device *rdev)
1941{
1942 return (ci_send_msg_to_smc(rdev, PPSMC_MSG_ResetToDefaults) == PPSMC_Result_OK) ?
1943 0 : -EINVAL;
1944}
1945
1946static int ci_force_switch_to_arb_f0(struct radeon_device *rdev)
1947{
1948 u32 tmp;
1949
1950 tmp = (RREG32_SMC(SMC_SCRATCH9) & 0x0000ff00) >> 8;
1951
1952 if (tmp == MC_CG_ARB_FREQ_F0)
1953 return 0;
1954
1955 return ni_copy_and_switch_arb_sets(rdev, tmp, MC_CG_ARB_FREQ_F0);
1956}
1957
1958static int ci_populate_memory_timing_parameters(struct radeon_device *rdev,
1959 u32 sclk,
1960 u32 mclk,
1961 SMU7_Discrete_MCArbDramTimingTableEntry *arb_regs)
1962{
1963 u32 dram_timing;
1964 u32 dram_timing2;
1965 u32 burst_time;
1966
1967 radeon_atom_set_engine_dram_timings(rdev, sclk, mclk);
1968
1969 dram_timing = RREG32(MC_ARB_DRAM_TIMING);
1970 dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2);
1971 burst_time = RREG32(MC_ARB_BURST_TIME) & STATE0_MASK;
1972
1973 arb_regs->McArbDramTiming = cpu_to_be32(dram_timing);
1974 arb_regs->McArbDramTiming2 = cpu_to_be32(dram_timing2);
1975 arb_regs->McArbBurstTime = (u8)burst_time;
1976
1977 return 0;
1978}
1979
1980static int ci_do_program_memory_timing_parameters(struct radeon_device *rdev)
1981{
1982 struct ci_power_info *pi = ci_get_pi(rdev);
1983 SMU7_Discrete_MCArbDramTimingTable arb_regs;
1984 u32 i, j;
1985 int ret = 0;
1986
1987 memset(&arb_regs, 0, sizeof(SMU7_Discrete_MCArbDramTimingTable));
1988
1989 for (i = 0; i < pi->dpm_table.sclk_table.count; i++) {
1990 for (j = 0; j < pi->dpm_table.mclk_table.count; j++) {
1991 ret = ci_populate_memory_timing_parameters(rdev,
1992 pi->dpm_table.sclk_table.dpm_levels[i].value,
1993 pi->dpm_table.mclk_table.dpm_levels[j].value,
1994 &arb_regs.entries[i][j]);
1995 if (ret)
1996 break;
1997 }
1998 }
1999
2000 if (ret == 0)
2001 ret = ci_copy_bytes_to_smc(rdev,
2002 pi->arb_table_start,
2003 (u8 *)&arb_regs,
2004 sizeof(SMU7_Discrete_MCArbDramTimingTable),
2005 pi->sram_end);
2006
2007 return ret;
2008}
2009
2010static int ci_program_memory_timing_parameters(struct radeon_device *rdev)
2011{
2012 struct ci_power_info *pi = ci_get_pi(rdev);
2013
2014 if (pi->need_update_smu7_dpm_table == 0)
2015 return 0;
2016
2017 return ci_do_program_memory_timing_parameters(rdev);
2018}
2019
2020static void ci_populate_smc_initial_state(struct radeon_device *rdev,
2021 struct radeon_ps *radeon_boot_state)
2022{
2023 struct ci_ps *boot_state = ci_get_ps(radeon_boot_state);
2024 struct ci_power_info *pi = ci_get_pi(rdev);
2025 u32 level = 0;
2026
2027 for (level = 0; level < rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; level++) {
2028 if (rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[level].clk >=
2029 boot_state->performance_levels[0].sclk) {
2030 pi->smc_state_table.GraphicsBootLevel = level;
2031 break;
2032 }
2033 }
2034
2035 for (level = 0; level < rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.count; level++) {
2036 if (rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries[level].clk >=
2037 boot_state->performance_levels[0].mclk) {
2038 pi->smc_state_table.MemoryBootLevel = level;
2039 break;
2040 }
2041 }
2042}
2043
2044static u32 ci_get_dpm_level_enable_mask_value(struct ci_single_dpm_table *dpm_table)
2045{
2046 u32 i;
2047 u32 mask_value = 0;
2048
2049 for (i = dpm_table->count; i > 0; i--) {
2050 mask_value = mask_value << 1;
2051 if (dpm_table->dpm_levels[i-1].enabled)
2052 mask_value |= 0x1;
2053 else
2054 mask_value &= 0xFFFFFFFE;
2055 }
2056
2057 return mask_value;
2058}
2059
2060static void ci_populate_smc_link_level(struct radeon_device *rdev,
2061 SMU7_Discrete_DpmTable *table)
2062{
2063 struct ci_power_info *pi = ci_get_pi(rdev);
2064 struct ci_dpm_table *dpm_table = &pi->dpm_table;
2065 u32 i;
2066
2067 for (i = 0; i < dpm_table->pcie_speed_table.count; i++) {
2068 table->LinkLevel[i].PcieGenSpeed =
2069 (u8)dpm_table->pcie_speed_table.dpm_levels[i].value;
2070 table->LinkLevel[i].PcieLaneCount =
2071 r600_encode_pci_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1);
2072 table->LinkLevel[i].EnabledForActivity = 1;
2073 table->LinkLevel[i].DownT = cpu_to_be32(5);
2074 table->LinkLevel[i].UpT = cpu_to_be32(30);
2075 }
2076
2077 pi->smc_state_table.LinkLevelCount = (u8)dpm_table->pcie_speed_table.count;
2078 pi->dpm_level_enable_mask.pcie_dpm_enable_mask =
2079 ci_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
2080}
2081
2082static int ci_populate_smc_uvd_level(struct radeon_device *rdev,
2083 SMU7_Discrete_DpmTable *table)
2084{
2085 u32 count;
2086 struct atom_clock_dividers dividers;
2087 int ret = -EINVAL;
2088
2089 table->UvdLevelCount =
2090 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count;
2091
2092 for (count = 0; count < table->UvdLevelCount; count++) {
2093 table->UvdLevel[count].VclkFrequency =
2094 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].vclk;
2095 table->UvdLevel[count].DclkFrequency =
2096 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].dclk;
2097 table->UvdLevel[count].MinVddc =
2098 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2099 table->UvdLevel[count].MinVddcPhases = 1;
2100
2101 ret = radeon_atom_get_clock_dividers(rdev,
2102 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2103 table->UvdLevel[count].VclkFrequency, false, &dividers);
2104 if (ret)
2105 return ret;
2106
2107 table->UvdLevel[count].VclkDivider = (u8)dividers.post_divider;
2108
2109 ret = radeon_atom_get_clock_dividers(rdev,
2110 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2111 table->UvdLevel[count].DclkFrequency, false, &dividers);
2112 if (ret)
2113 return ret;
2114
2115 table->UvdLevel[count].DclkDivider = (u8)dividers.post_divider;
2116
2117 table->UvdLevel[count].VclkFrequency = cpu_to_be32(table->UvdLevel[count].VclkFrequency);
2118 table->UvdLevel[count].DclkFrequency = cpu_to_be32(table->UvdLevel[count].DclkFrequency);
2119 table->UvdLevel[count].MinVddc = cpu_to_be16(table->UvdLevel[count].MinVddc);
2120 }
2121
2122 return ret;
2123}
2124
2125static int ci_populate_smc_vce_level(struct radeon_device *rdev,
2126 SMU7_Discrete_DpmTable *table)
2127{
2128 u32 count;
2129 struct atom_clock_dividers dividers;
2130 int ret = -EINVAL;
2131
2132 table->VceLevelCount =
2133 rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count;
2134
2135 for (count = 0; count < table->VceLevelCount; count++) {
2136 table->VceLevel[count].Frequency =
2137 rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].evclk;
2138 table->VceLevel[count].MinVoltage =
2139 (u16)rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2140 table->VceLevel[count].MinPhases = 1;
2141
2142 ret = radeon_atom_get_clock_dividers(rdev,
2143 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2144 table->VceLevel[count].Frequency, false, &dividers);
2145 if (ret)
2146 return ret;
2147
2148 table->VceLevel[count].Divider = (u8)dividers.post_divider;
2149
2150 table->VceLevel[count].Frequency = cpu_to_be32(table->VceLevel[count].Frequency);
2151 table->VceLevel[count].MinVoltage = cpu_to_be16(table->VceLevel[count].MinVoltage);
2152 }
2153
2154 return ret;
2155
2156}
2157
2158static int ci_populate_smc_acp_level(struct radeon_device *rdev,
2159 SMU7_Discrete_DpmTable *table)
2160{
2161 u32 count;
2162 struct atom_clock_dividers dividers;
2163 int ret = -EINVAL;
2164
2165 table->AcpLevelCount = (u8)
2166 (rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count);
2167
2168 for (count = 0; count < table->AcpLevelCount; count++) {
2169 table->AcpLevel[count].Frequency =
2170 rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].clk;
2171 table->AcpLevel[count].MinVoltage =
2172 rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].v;
2173 table->AcpLevel[count].MinPhases = 1;
2174
2175 ret = radeon_atom_get_clock_dividers(rdev,
2176 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2177 table->AcpLevel[count].Frequency, false, &dividers);
2178 if (ret)
2179 return ret;
2180
2181 table->AcpLevel[count].Divider = (u8)dividers.post_divider;
2182
2183 table->AcpLevel[count].Frequency = cpu_to_be32(table->AcpLevel[count].Frequency);
2184 table->AcpLevel[count].MinVoltage = cpu_to_be16(table->AcpLevel[count].MinVoltage);
2185 }
2186
2187 return ret;
2188}
2189
2190static int ci_populate_smc_samu_level(struct radeon_device *rdev,
2191 SMU7_Discrete_DpmTable *table)
2192{
2193 u32 count;
2194 struct atom_clock_dividers dividers;
2195 int ret = -EINVAL;
2196
2197 table->SamuLevelCount =
2198 rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count;
2199
2200 for (count = 0; count < table->SamuLevelCount; count++) {
2201 table->SamuLevel[count].Frequency =
2202 rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].clk;
2203 table->SamuLevel[count].MinVoltage =
2204 rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2205 table->SamuLevel[count].MinPhases = 1;
2206
2207 ret = radeon_atom_get_clock_dividers(rdev,
2208 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2209 table->SamuLevel[count].Frequency, false, &dividers);
2210 if (ret)
2211 return ret;
2212
2213 table->SamuLevel[count].Divider = (u8)dividers.post_divider;
2214
2215 table->SamuLevel[count].Frequency = cpu_to_be32(table->SamuLevel[count].Frequency);
2216 table->SamuLevel[count].MinVoltage = cpu_to_be16(table->SamuLevel[count].MinVoltage);
2217 }
2218
2219 return ret;
2220}
2221
2222static int ci_calculate_mclk_params(struct radeon_device *rdev,
2223 u32 memory_clock,
2224 SMU7_Discrete_MemoryLevel *mclk,
2225 bool strobe_mode,
2226 bool dll_state_on)
2227{
2228 struct ci_power_info *pi = ci_get_pi(rdev);
2229 u32 dll_cntl = pi->clock_registers.dll_cntl;
2230 u32 mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl;
2231 u32 mpll_ad_func_cntl = pi->clock_registers.mpll_ad_func_cntl;
2232 u32 mpll_dq_func_cntl = pi->clock_registers.mpll_dq_func_cntl;
2233 u32 mpll_func_cntl = pi->clock_registers.mpll_func_cntl;
2234 u32 mpll_func_cntl_1 = pi->clock_registers.mpll_func_cntl_1;
2235 u32 mpll_func_cntl_2 = pi->clock_registers.mpll_func_cntl_2;
2236 u32 mpll_ss1 = pi->clock_registers.mpll_ss1;
2237 u32 mpll_ss2 = pi->clock_registers.mpll_ss2;
2238 struct atom_mpll_param mpll_param;
2239 int ret;
2240
2241 ret = radeon_atom_get_memory_pll_dividers(rdev, memory_clock, strobe_mode, &mpll_param);
2242 if (ret)
2243 return ret;
2244
2245 mpll_func_cntl &= ~BWCTRL_MASK;
2246 mpll_func_cntl |= BWCTRL(mpll_param.bwcntl);
2247
2248 mpll_func_cntl_1 &= ~(CLKF_MASK | CLKFRAC_MASK | VCO_MODE_MASK);
2249 mpll_func_cntl_1 |= CLKF(mpll_param.clkf) |
2250 CLKFRAC(mpll_param.clkfrac) | VCO_MODE(mpll_param.vco_mode);
2251
2252 mpll_ad_func_cntl &= ~YCLK_POST_DIV_MASK;
2253 mpll_ad_func_cntl |= YCLK_POST_DIV(mpll_param.post_div);
2254
2255 if (pi->mem_gddr5) {
2256 mpll_dq_func_cntl &= ~(YCLK_SEL_MASK | YCLK_POST_DIV_MASK);
2257 mpll_dq_func_cntl |= YCLK_SEL(mpll_param.yclk_sel) |
2258 YCLK_POST_DIV(mpll_param.post_div);
2259 }
2260
2261 if (pi->caps_mclk_ss_support) {
2262 struct radeon_atom_ss ss;
2263 u32 freq_nom;
2264 u32 tmp;
2265 u32 reference_clock = rdev->clock.mpll.reference_freq;
2266
2267 if (pi->mem_gddr5)
2268 freq_nom = memory_clock * 4;
2269 else
2270 freq_nom = memory_clock * 2;
2271
2272 tmp = (freq_nom / reference_clock);
2273 tmp = tmp * tmp;
2274 if (radeon_atombios_get_asic_ss_info(rdev, &ss,
2275 ASIC_INTERNAL_MEMORY_SS, freq_nom)) {
2276 u32 clks = reference_clock * 5 / ss.rate;
2277 u32 clkv = (u32)((((131 * ss.percentage * ss.rate) / 100) * tmp) / freq_nom);
2278
2279 mpll_ss1 &= ~CLKV_MASK;
2280 mpll_ss1 |= CLKV(clkv);
2281
2282 mpll_ss2 &= ~CLKS_MASK;
2283 mpll_ss2 |= CLKS(clks);
2284 }
2285 }
2286
2287 mclk_pwrmgt_cntl &= ~DLL_SPEED_MASK;
2288 mclk_pwrmgt_cntl |= DLL_SPEED(mpll_param.dll_speed);
2289
2290 if (dll_state_on)
2291 mclk_pwrmgt_cntl |= MRDCK0_PDNB | MRDCK1_PDNB;
2292 else
2293 mclk_pwrmgt_cntl &= ~(MRDCK0_PDNB | MRDCK1_PDNB);
2294
2295 mclk->MclkFrequency = memory_clock;
2296 mclk->MpllFuncCntl = mpll_func_cntl;
2297 mclk->MpllFuncCntl_1 = mpll_func_cntl_1;
2298 mclk->MpllFuncCntl_2 = mpll_func_cntl_2;
2299 mclk->MpllAdFuncCntl = mpll_ad_func_cntl;
2300 mclk->MpllDqFuncCntl = mpll_dq_func_cntl;
2301 mclk->MclkPwrmgtCntl = mclk_pwrmgt_cntl;
2302 mclk->DllCntl = dll_cntl;
2303 mclk->MpllSs1 = mpll_ss1;
2304 mclk->MpllSs2 = mpll_ss2;
2305
2306 return 0;
2307}
2308
2309static int ci_populate_single_memory_level(struct radeon_device *rdev,
2310 u32 memory_clock,
2311 SMU7_Discrete_MemoryLevel *memory_level)
2312{
2313 struct ci_power_info *pi = ci_get_pi(rdev);
2314 int ret;
2315 bool dll_state_on;
2316
2317 if (rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries) {
2318 ret = ci_get_dependency_volt_by_clk(rdev,
2319 &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
2320 memory_clock, &memory_level->MinVddc);
2321 if (ret)
2322 return ret;
2323 }
2324
2325 if (rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries) {
2326 ret = ci_get_dependency_volt_by_clk(rdev,
2327 &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
2328 memory_clock, &memory_level->MinVddci);
2329 if (ret)
2330 return ret;
2331 }
2332
2333 if (rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries) {
2334 ret = ci_get_dependency_volt_by_clk(rdev,
2335 &rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
2336 memory_clock, &memory_level->MinMvdd);
2337 if (ret)
2338 return ret;
2339 }
2340
2341 memory_level->MinVddcPhases = 1;
2342
2343 if (pi->vddc_phase_shed_control)
2344 ci_populate_phase_value_based_on_mclk(rdev,
2345 &rdev->pm.dpm.dyn_state.phase_shedding_limits_table,
2346 memory_clock,
2347 &memory_level->MinVddcPhases);
2348
2349 memory_level->EnabledForThrottle = 1;
2350 memory_level->EnabledForActivity = 1;
2351 memory_level->UpH = 0;
2352 memory_level->DownH = 100;
2353 memory_level->VoltageDownH = 0;
2354 memory_level->ActivityLevel = (u16)pi->mclk_activity_target;
2355
2356 memory_level->StutterEnable = false;
2357 memory_level->StrobeEnable = false;
2358 memory_level->EdcReadEnable = false;
2359 memory_level->EdcWriteEnable = false;
2360 memory_level->RttEnable = false;
2361
2362 memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
2363
2364 if (pi->mclk_stutter_mode_threshold &&
2365 (memory_clock <= pi->mclk_stutter_mode_threshold) &&
2366 (pi->uvd_enabled == false) &&
2367 (RREG32(DPG_PIPE_STUTTER_CONTROL) & STUTTER_ENABLE) &&
2368 (rdev->pm.dpm.new_active_crtc_count <= 2))
2369 memory_level->StutterEnable = true;
2370
2371 if (pi->mclk_strobe_mode_threshold &&
2372 (memory_clock <= pi->mclk_strobe_mode_threshold))
2373 memory_level->StrobeEnable = 1;
2374
2375 if (pi->mem_gddr5) {
2376 memory_level->StrobeRatio =
2377 si_get_mclk_frequency_ratio(memory_clock, memory_level->StrobeEnable);
2378 if (pi->mclk_edc_enable_threshold &&
2379 (memory_clock > pi->mclk_edc_enable_threshold))
2380 memory_level->EdcReadEnable = true;
2381
2382 if (pi->mclk_edc_wr_enable_threshold &&
2383 (memory_clock > pi->mclk_edc_wr_enable_threshold))
2384 memory_level->EdcWriteEnable = true;
2385
2386 if (memory_level->StrobeEnable) {
2387 if (si_get_mclk_frequency_ratio(memory_clock, true) >=
2388 ((RREG32(MC_SEQ_MISC7) >> 16) & 0xf))
2389 dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
2390 else
2391 dll_state_on = ((RREG32(MC_SEQ_MISC6) >> 1) & 0x1) ? true : false;
2392 } else {
2393 dll_state_on = pi->dll_default_on;
2394 }
2395 } else {
2396 memory_level->StrobeRatio = si_get_ddr3_mclk_frequency_ratio(memory_clock);
2397 dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
2398 }
2399
2400 ret = ci_calculate_mclk_params(rdev, memory_clock, memory_level, memory_level->StrobeEnable, dll_state_on);
2401 if (ret)
2402 return ret;
2403
2404 memory_level->MinVddc = cpu_to_be32(memory_level->MinVddc * VOLTAGE_SCALE);
2405 memory_level->MinVddcPhases = cpu_to_be32(memory_level->MinVddcPhases);
2406 memory_level->MinVddci = cpu_to_be32(memory_level->MinVddci * VOLTAGE_SCALE);
2407 memory_level->MinMvdd = cpu_to_be32(memory_level->MinMvdd * VOLTAGE_SCALE);
2408
2409 memory_level->MclkFrequency = cpu_to_be32(memory_level->MclkFrequency);
2410 memory_level->ActivityLevel = cpu_to_be16(memory_level->ActivityLevel);
2411 memory_level->MpllFuncCntl = cpu_to_be32(memory_level->MpllFuncCntl);
2412 memory_level->MpllFuncCntl_1 = cpu_to_be32(memory_level->MpllFuncCntl_1);
2413 memory_level->MpllFuncCntl_2 = cpu_to_be32(memory_level->MpllFuncCntl_2);
2414 memory_level->MpllAdFuncCntl = cpu_to_be32(memory_level->MpllAdFuncCntl);
2415 memory_level->MpllDqFuncCntl = cpu_to_be32(memory_level->MpllDqFuncCntl);
2416 memory_level->MclkPwrmgtCntl = cpu_to_be32(memory_level->MclkPwrmgtCntl);
2417 memory_level->DllCntl = cpu_to_be32(memory_level->DllCntl);
2418 memory_level->MpllSs1 = cpu_to_be32(memory_level->MpllSs1);
2419 memory_level->MpllSs2 = cpu_to_be32(memory_level->MpllSs2);
2420
2421 return 0;
2422}
2423
2424static int ci_populate_smc_acpi_level(struct radeon_device *rdev,
2425 SMU7_Discrete_DpmTable *table)
2426{
2427 struct ci_power_info *pi = ci_get_pi(rdev);
2428 struct atom_clock_dividers dividers;
2429 SMU7_Discrete_VoltageLevel voltage_level;
2430 u32 spll_func_cntl = pi->clock_registers.cg_spll_func_cntl;
2431 u32 spll_func_cntl_2 = pi->clock_registers.cg_spll_func_cntl_2;
2432 u32 dll_cntl = pi->clock_registers.dll_cntl;
2433 u32 mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl;
2434 int ret;
2435
2436 table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
2437
2438 if (pi->acpi_vddc)
2439 table->ACPILevel.MinVddc = cpu_to_be32(pi->acpi_vddc * VOLTAGE_SCALE);
2440 else
2441 table->ACPILevel.MinVddc = cpu_to_be32(pi->min_vddc_in_pp_table * VOLTAGE_SCALE);
2442
2443 table->ACPILevel.MinVddcPhases = pi->vddc_phase_shed_control ? 0 : 1;
2444
2445 table->ACPILevel.SclkFrequency = rdev->clock.spll.reference_freq;
2446
2447 ret = radeon_atom_get_clock_dividers(rdev,
2448 COMPUTE_GPUCLK_INPUT_FLAG_SCLK,
2449 table->ACPILevel.SclkFrequency, false, &dividers);
2450 if (ret)
2451 return ret;
2452
2453 table->ACPILevel.SclkDid = (u8)dividers.post_divider;
2454 table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
2455 table->ACPILevel.DeepSleepDivId = 0;
2456
2457 spll_func_cntl &= ~SPLL_PWRON;
2458 spll_func_cntl |= SPLL_RESET;
2459
2460 spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
2461 spll_func_cntl_2 |= SCLK_MUX_SEL(4);
2462
2463 table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
2464 table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
2465 table->ACPILevel.CgSpllFuncCntl3 = pi->clock_registers.cg_spll_func_cntl_3;
2466 table->ACPILevel.CgSpllFuncCntl4 = pi->clock_registers.cg_spll_func_cntl_4;
2467 table->ACPILevel.SpllSpreadSpectrum = pi->clock_registers.cg_spll_spread_spectrum;
2468 table->ACPILevel.SpllSpreadSpectrum2 = pi->clock_registers.cg_spll_spread_spectrum_2;
2469 table->ACPILevel.CcPwrDynRm = 0;
2470 table->ACPILevel.CcPwrDynRm1 = 0;
2471
2472 table->ACPILevel.Flags = cpu_to_be32(table->ACPILevel.Flags);
2473 table->ACPILevel.MinVddcPhases = cpu_to_be32(table->ACPILevel.MinVddcPhases);
2474 table->ACPILevel.SclkFrequency = cpu_to_be32(table->ACPILevel.SclkFrequency);
2475 table->ACPILevel.CgSpllFuncCntl = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl);
2476 table->ACPILevel.CgSpllFuncCntl2 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl2);
2477 table->ACPILevel.CgSpllFuncCntl3 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl3);
2478 table->ACPILevel.CgSpllFuncCntl4 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl4);
2479 table->ACPILevel.SpllSpreadSpectrum = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum);
2480 table->ACPILevel.SpllSpreadSpectrum2 = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum2);
2481 table->ACPILevel.CcPwrDynRm = cpu_to_be32(table->ACPILevel.CcPwrDynRm);
2482 table->ACPILevel.CcPwrDynRm1 = cpu_to_be32(table->ACPILevel.CcPwrDynRm1);
2483
2484 table->MemoryACPILevel.MinVddc = table->ACPILevel.MinVddc;
2485 table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;
2486
2487 if (pi->vddci_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
2488 if (pi->acpi_vddci)
2489 table->MemoryACPILevel.MinVddci =
2490 cpu_to_be32(pi->acpi_vddci * VOLTAGE_SCALE);
2491 else
2492 table->MemoryACPILevel.MinVddci =
2493 cpu_to_be32(pi->min_vddci_in_pp_table * VOLTAGE_SCALE);
2494 }
2495
2496 if (ci_populate_mvdd_value(rdev, 0, &voltage_level))
2497 table->MemoryACPILevel.MinMvdd = 0;
2498 else
2499 table->MemoryACPILevel.MinMvdd =
2500 cpu_to_be32(voltage_level.Voltage * VOLTAGE_SCALE);
2501
2502 mclk_pwrmgt_cntl |= MRDCK0_RESET | MRDCK1_RESET;
2503 mclk_pwrmgt_cntl &= ~(MRDCK0_PDNB | MRDCK1_PDNB);
2504
2505 dll_cntl &= ~(MRDCK0_BYPASS | MRDCK1_BYPASS);
2506
2507 table->MemoryACPILevel.DllCntl = cpu_to_be32(dll_cntl);
2508 table->MemoryACPILevel.MclkPwrmgtCntl = cpu_to_be32(mclk_pwrmgt_cntl);
2509 table->MemoryACPILevel.MpllAdFuncCntl =
2510 cpu_to_be32(pi->clock_registers.mpll_ad_func_cntl);
2511 table->MemoryACPILevel.MpllDqFuncCntl =
2512 cpu_to_be32(pi->clock_registers.mpll_dq_func_cntl);
2513 table->MemoryACPILevel.MpllFuncCntl =
2514 cpu_to_be32(pi->clock_registers.mpll_func_cntl);
2515 table->MemoryACPILevel.MpllFuncCntl_1 =
2516 cpu_to_be32(pi->clock_registers.mpll_func_cntl_1);
2517 table->MemoryACPILevel.MpllFuncCntl_2 =
2518 cpu_to_be32(pi->clock_registers.mpll_func_cntl_2);
2519 table->MemoryACPILevel.MpllSs1 = cpu_to_be32(pi->clock_registers.mpll_ss1);
2520 table->MemoryACPILevel.MpllSs2 = cpu_to_be32(pi->clock_registers.mpll_ss2);
2521
2522 table->MemoryACPILevel.EnabledForThrottle = 0;
2523 table->MemoryACPILevel.EnabledForActivity = 0;
2524 table->MemoryACPILevel.UpH = 0;
2525 table->MemoryACPILevel.DownH = 100;
2526 table->MemoryACPILevel.VoltageDownH = 0;
2527 table->MemoryACPILevel.ActivityLevel =
2528 cpu_to_be16((u16)pi->mclk_activity_target);
2529
2530 table->MemoryACPILevel.StutterEnable = false;
2531 table->MemoryACPILevel.StrobeEnable = false;
2532 table->MemoryACPILevel.EdcReadEnable = false;
2533 table->MemoryACPILevel.EdcWriteEnable = false;
2534 table->MemoryACPILevel.RttEnable = false;
2535
2536 return 0;
2537}
2538
2539
2540static int ci_enable_ulv(struct radeon_device *rdev, bool enable)
2541{
2542 struct ci_power_info *pi = ci_get_pi(rdev);
2543 struct ci_ulv_parm *ulv = &pi->ulv;
2544
2545 if (ulv->supported) {
2546 if (enable)
2547 return (ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableULV) == PPSMC_Result_OK) ?
2548 0 : -EINVAL;
2549 else
2550 return (ci_send_msg_to_smc(rdev, PPSMC_MSG_DisableULV) == PPSMC_Result_OK) ?
2551 0 : -EINVAL;
2552 }
2553
2554 return 0;
2555}
2556
2557static int ci_populate_ulv_level(struct radeon_device *rdev,
2558 SMU7_Discrete_Ulv *state)
2559{
2560 struct ci_power_info *pi = ci_get_pi(rdev);
2561 u16 ulv_voltage = rdev->pm.dpm.backbias_response_time;
2562
2563 state->CcPwrDynRm = 0;
2564 state->CcPwrDynRm1 = 0;
2565
2566 if (ulv_voltage == 0) {
2567 pi->ulv.supported = false;
2568 return 0;
2569 }
2570
2571 if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
2572 if (ulv_voltage > rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v)
2573 state->VddcOffset = 0;
2574 else
2575 state->VddcOffset =
2576 rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage;
2577 } else {
2578 if (ulv_voltage > rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v)
2579 state->VddcOffsetVid = 0;
2580 else
2581 state->VddcOffsetVid = (u8)
2582 ((rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage) *
2583 VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
2584 }
2585 state->VddcPhase = pi->vddc_phase_shed_control ? 0 : 1;
2586
2587 state->CcPwrDynRm = cpu_to_be32(state->CcPwrDynRm);
2588 state->CcPwrDynRm1 = cpu_to_be32(state->CcPwrDynRm1);
2589 state->VddcOffset = cpu_to_be16(state->VddcOffset);
2590
2591 return 0;
2592}
2593
2594static int ci_calculate_sclk_params(struct radeon_device *rdev,
2595 u32 engine_clock,
2596 SMU7_Discrete_GraphicsLevel *sclk)
2597{
2598 struct ci_power_info *pi = ci_get_pi(rdev);
2599 struct atom_clock_dividers dividers;
2600 u32 spll_func_cntl_3 = pi->clock_registers.cg_spll_func_cntl_3;
2601 u32 spll_func_cntl_4 = pi->clock_registers.cg_spll_func_cntl_4;
2602 u32 cg_spll_spread_spectrum = pi->clock_registers.cg_spll_spread_spectrum;
2603 u32 cg_spll_spread_spectrum_2 = pi->clock_registers.cg_spll_spread_spectrum_2;
2604 u32 reference_clock = rdev->clock.spll.reference_freq;
2605 u32 reference_divider;
2606 u32 fbdiv;
2607 int ret;
2608
2609 ret = radeon_atom_get_clock_dividers(rdev,
2610 COMPUTE_GPUCLK_INPUT_FLAG_SCLK,
2611 engine_clock, false, &dividers);
2612 if (ret)
2613 return ret;
2614
2615 reference_divider = 1 + dividers.ref_div;
2616 fbdiv = dividers.fb_div & 0x3FFFFFF;
2617
2618 spll_func_cntl_3 &= ~SPLL_FB_DIV_MASK;
2619 spll_func_cntl_3 |= SPLL_FB_DIV(fbdiv);
2620 spll_func_cntl_3 |= SPLL_DITHEN;
2621
2622 if (pi->caps_sclk_ss_support) {
2623 struct radeon_atom_ss ss;
2624 u32 vco_freq = engine_clock * dividers.post_div;
2625
2626 if (radeon_atombios_get_asic_ss_info(rdev, &ss,
2627 ASIC_INTERNAL_ENGINE_SS, vco_freq)) {
2628 u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate);
2629 u32 clk_v = 4 * ss.percentage * fbdiv / (clk_s * 10000);
2630
2631 cg_spll_spread_spectrum &= ~CLK_S_MASK;
2632 cg_spll_spread_spectrum |= CLK_S(clk_s);
2633 cg_spll_spread_spectrum |= SSEN;
2634
2635 cg_spll_spread_spectrum_2 &= ~CLK_V_MASK;
2636 cg_spll_spread_spectrum_2 |= CLK_V(clk_v);
2637 }
2638 }
2639
2640 sclk->SclkFrequency = engine_clock;
2641 sclk->CgSpllFuncCntl3 = spll_func_cntl_3;
2642 sclk->CgSpllFuncCntl4 = spll_func_cntl_4;
2643 sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum;
2644 sclk->SpllSpreadSpectrum2 = cg_spll_spread_spectrum_2;
2645 sclk->SclkDid = (u8)dividers.post_divider;
2646
2647 return 0;
2648}
2649
2650static int ci_populate_single_graphic_level(struct radeon_device *rdev,
2651 u32 engine_clock,
2652 u16 sclk_activity_level_t,
2653 SMU7_Discrete_GraphicsLevel *graphic_level)
2654{
2655 struct ci_power_info *pi = ci_get_pi(rdev);
2656 int ret;
2657
2658 ret = ci_calculate_sclk_params(rdev, engine_clock, graphic_level);
2659 if (ret)
2660 return ret;
2661
2662 ret = ci_get_dependency_volt_by_clk(rdev,
2663 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
2664 engine_clock, &graphic_level->MinVddc);
2665 if (ret)
2666 return ret;
2667
2668 graphic_level->SclkFrequency = engine_clock;
2669
2670 graphic_level->Flags = 0;
2671 graphic_level->MinVddcPhases = 1;
2672
2673 if (pi->vddc_phase_shed_control)
2674 ci_populate_phase_value_based_on_sclk(rdev,
2675 &rdev->pm.dpm.dyn_state.phase_shedding_limits_table,
2676 engine_clock,
2677 &graphic_level->MinVddcPhases);
2678
2679 graphic_level->ActivityLevel = sclk_activity_level_t;
2680
2681 graphic_level->CcPwrDynRm = 0;
2682 graphic_level->CcPwrDynRm1 = 0;
2683 graphic_level->EnabledForActivity = 1;
2684 graphic_level->EnabledForThrottle = 1;
2685 graphic_level->UpH = 0;
2686 graphic_level->DownH = 0;
2687 graphic_level->VoltageDownH = 0;
2688 graphic_level->PowerThrottle = 0;
2689
2690 if (pi->caps_sclk_ds)
2691 graphic_level->DeepSleepDivId = ci_get_sleep_divider_id_from_clock(rdev,
2692 engine_clock,
2693 CISLAND_MINIMUM_ENGINE_CLOCK);
2694
2695 graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
2696
2697 graphic_level->Flags = cpu_to_be32(graphic_level->Flags);
2698 graphic_level->MinVddc = cpu_to_be32(graphic_level->MinVddc * VOLTAGE_SCALE);
2699 graphic_level->MinVddcPhases = cpu_to_be32(graphic_level->MinVddcPhases);
2700 graphic_level->SclkFrequency = cpu_to_be32(graphic_level->SclkFrequency);
2701 graphic_level->ActivityLevel = cpu_to_be16(graphic_level->ActivityLevel);
2702 graphic_level->CgSpllFuncCntl3 = cpu_to_be32(graphic_level->CgSpllFuncCntl3);
2703 graphic_level->CgSpllFuncCntl4 = cpu_to_be32(graphic_level->CgSpllFuncCntl4);
2704 graphic_level->SpllSpreadSpectrum = cpu_to_be32(graphic_level->SpllSpreadSpectrum);
2705 graphic_level->SpllSpreadSpectrum2 = cpu_to_be32(graphic_level->SpllSpreadSpectrum2);
2706 graphic_level->CcPwrDynRm = cpu_to_be32(graphic_level->CcPwrDynRm);
2707 graphic_level->CcPwrDynRm1 = cpu_to_be32(graphic_level->CcPwrDynRm1);
2708
2709 return 0;
2710}
2711
2712static int ci_populate_all_graphic_levels(struct radeon_device *rdev)
2713{
2714 struct ci_power_info *pi = ci_get_pi(rdev);
2715 struct ci_dpm_table *dpm_table = &pi->dpm_table;
2716 u32 level_array_address = pi->dpm_table_start +
2717 offsetof(SMU7_Discrete_DpmTable, GraphicsLevel);
2718 u32 level_array_size = sizeof(SMU7_Discrete_GraphicsLevel) *
2719 SMU7_MAX_LEVELS_GRAPHICS;
2720 SMU7_Discrete_GraphicsLevel *levels = pi->smc_state_table.GraphicsLevel;
2721 u32 i, ret;
2722
2723 memset(levels, 0, level_array_size);
2724
2725 for (i = 0; i < dpm_table->sclk_table.count; i++) {
2726 ret = ci_populate_single_graphic_level(rdev,
2727 dpm_table->sclk_table.dpm_levels[i].value,
2728 (u16)pi->activity_target[i],
2729 &pi->smc_state_table.GraphicsLevel[i]);
2730 if (ret)
2731 return ret;
2732 if (i == (dpm_table->sclk_table.count - 1))
2733 pi->smc_state_table.GraphicsLevel[i].DisplayWatermark =
2734 PPSMC_DISPLAY_WATERMARK_HIGH;
2735 }
2736
2737 pi->smc_state_table.GraphicsDpmLevelCount = (u8)dpm_table->sclk_table.count;
2738 pi->dpm_level_enable_mask.sclk_dpm_enable_mask =
2739 ci_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
2740
2741 ret = ci_copy_bytes_to_smc(rdev, level_array_address,
2742 (u8 *)levels, level_array_size,
2743 pi->sram_end);
2744 if (ret)
2745 return ret;
2746
2747 return 0;
2748}
2749
2750static int ci_populate_ulv_state(struct radeon_device *rdev,
2751 SMU7_Discrete_Ulv *ulv_level)
2752{
2753 return ci_populate_ulv_level(rdev, ulv_level);
2754}
2755
2756static int ci_populate_all_memory_levels(struct radeon_device *rdev)
2757{
2758 struct ci_power_info *pi = ci_get_pi(rdev);
2759 struct ci_dpm_table *dpm_table = &pi->dpm_table;
2760 u32 level_array_address = pi->dpm_table_start +
2761 offsetof(SMU7_Discrete_DpmTable, MemoryLevel);
2762 u32 level_array_size = sizeof(SMU7_Discrete_MemoryLevel) *
2763 SMU7_MAX_LEVELS_MEMORY;
2764 SMU7_Discrete_MemoryLevel *levels = pi->smc_state_table.MemoryLevel;
2765 u32 i, ret;
2766
2767 memset(levels, 0, level_array_size);
2768
2769 for (i = 0; i < dpm_table->mclk_table.count; i++) {
2770 if (dpm_table->mclk_table.dpm_levels[i].value == 0)
2771 return -EINVAL;
2772 ret = ci_populate_single_memory_level(rdev,
2773 dpm_table->mclk_table.dpm_levels[i].value,
2774 &pi->smc_state_table.MemoryLevel[i]);
2775 if (ret)
2776 return ret;
2777 }
2778
2779 pi->smc_state_table.MemoryLevel[0].ActivityLevel = cpu_to_be16(0x1F);
2780
2781 pi->smc_state_table.MemoryDpmLevelCount = (u8)dpm_table->mclk_table.count;
2782 pi->dpm_level_enable_mask.mclk_dpm_enable_mask =
2783 ci_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
2784
2785 pi->smc_state_table.MemoryLevel[dpm_table->mclk_table.count - 1].DisplayWatermark =
2786 PPSMC_DISPLAY_WATERMARK_HIGH;
2787
2788 ret = ci_copy_bytes_to_smc(rdev, level_array_address,
2789 (u8 *)levels, level_array_size,
2790 pi->sram_end);
2791 if (ret)
2792 return ret;
2793
2794 return 0;
2795}
2796
2797static void ci_reset_single_dpm_table(struct radeon_device *rdev,
2798 struct ci_single_dpm_table* dpm_table,
2799 u32 count)
2800{
2801 u32 i;
2802
2803 dpm_table->count = count;
2804 for (i = 0; i < MAX_REGULAR_DPM_NUMBER; i++)
2805 dpm_table->dpm_levels[i].enabled = false;
2806}
2807
2808static void ci_setup_pcie_table_entry(struct ci_single_dpm_table* dpm_table,
2809 u32 index, u32 pcie_gen, u32 pcie_lanes)
2810{
2811 dpm_table->dpm_levels[index].value = pcie_gen;
2812 dpm_table->dpm_levels[index].param1 = pcie_lanes;
2813 dpm_table->dpm_levels[index].enabled = true;
2814}
2815
2816static int ci_setup_default_pcie_tables(struct radeon_device *rdev)
2817{
2818 struct ci_power_info *pi = ci_get_pi(rdev);
2819
2820 if (!pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels)
2821 return -EINVAL;
2822
2823 if (pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels) {
2824 pi->pcie_gen_powersaving = pi->pcie_gen_performance;
2825 pi->pcie_lane_powersaving = pi->pcie_lane_performance;
2826 } else if (!pi->use_pcie_performance_levels && pi->use_pcie_powersaving_levels) {
2827 pi->pcie_gen_performance = pi->pcie_gen_powersaving;
2828 pi->pcie_lane_performance = pi->pcie_lane_powersaving;
2829 }
2830
2831 ci_reset_single_dpm_table(rdev,
2832 &pi->dpm_table.pcie_speed_table,
2833 SMU7_MAX_LEVELS_LINK);
2834
2835 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0,
2836 pi->pcie_gen_powersaving.min,
2837 pi->pcie_lane_powersaving.min);
2838 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 1,
2839 pi->pcie_gen_performance.min,
2840 pi->pcie_lane_performance.min);
2841 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 2,
2842 pi->pcie_gen_powersaving.min,
2843 pi->pcie_lane_powersaving.max);
2844 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 3,
2845 pi->pcie_gen_performance.min,
2846 pi->pcie_lane_performance.max);
2847 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 4,
2848 pi->pcie_gen_powersaving.max,
2849 pi->pcie_lane_powersaving.max);
2850 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 5,
2851 pi->pcie_gen_performance.max,
2852 pi->pcie_lane_performance.max);
2853
2854 pi->dpm_table.pcie_speed_table.count = 6;
2855
2856 return 0;
2857}
2858
2859static int ci_setup_default_dpm_tables(struct radeon_device *rdev)
2860{
2861 struct ci_power_info *pi = ci_get_pi(rdev);
2862 struct radeon_clock_voltage_dependency_table *allowed_sclk_vddc_table =
2863 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
2864 struct radeon_clock_voltage_dependency_table *allowed_mclk_table =
2865 &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk;
2866 struct radeon_cac_leakage_table *std_voltage_table =
2867 &rdev->pm.dpm.dyn_state.cac_leakage_table;
2868 u32 i;
2869
2870 if (allowed_sclk_vddc_table == NULL)
2871 return -EINVAL;
2872 if (allowed_sclk_vddc_table->count < 1)
2873 return -EINVAL;
2874 if (allowed_mclk_table == NULL)
2875 return -EINVAL;
2876 if (allowed_mclk_table->count < 1)
2877 return -EINVAL;
2878
2879 memset(&pi->dpm_table, 0, sizeof(struct ci_dpm_table));
2880
2881 ci_reset_single_dpm_table(rdev,
2882 &pi->dpm_table.sclk_table,
2883 SMU7_MAX_LEVELS_GRAPHICS);
2884 ci_reset_single_dpm_table(rdev,
2885 &pi->dpm_table.mclk_table,
2886 SMU7_MAX_LEVELS_MEMORY);
2887 ci_reset_single_dpm_table(rdev,
2888 &pi->dpm_table.vddc_table,
2889 SMU7_MAX_LEVELS_VDDC);
2890 ci_reset_single_dpm_table(rdev,
2891 &pi->dpm_table.vddci_table,
2892 SMU7_MAX_LEVELS_VDDCI);
2893 ci_reset_single_dpm_table(rdev,
2894 &pi->dpm_table.mvdd_table,
2895 SMU7_MAX_LEVELS_MVDD);
2896
2897 pi->dpm_table.sclk_table.count = 0;
2898 for (i = 0; i < allowed_sclk_vddc_table->count; i++) {
2899 if ((i == 0) ||
2900 (pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count-1].value !=
2901 allowed_sclk_vddc_table->entries[i].clk)) {
2902 pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].value =
2903 allowed_sclk_vddc_table->entries[i].clk;
2904 pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].enabled = true;
2905 pi->dpm_table.sclk_table.count++;
2906 }
2907 }
2908
2909 pi->dpm_table.mclk_table.count = 0;
2910 for (i = 0; i < allowed_mclk_table->count; i++) {
2911 if ((i==0) ||
2912 (pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count-1].value !=
2913 allowed_mclk_table->entries[i].clk)) {
2914 pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].value =
2915 allowed_mclk_table->entries[i].clk;
2916 pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].enabled = true;
2917 pi->dpm_table.mclk_table.count++;
2918 }
2919 }
2920
2921 for (i = 0; i < allowed_sclk_vddc_table->count; i++) {
2922 pi->dpm_table.vddc_table.dpm_levels[i].value =
2923 allowed_sclk_vddc_table->entries[i].v;
2924 pi->dpm_table.vddc_table.dpm_levels[i].param1 =
2925 std_voltage_table->entries[i].leakage;
2926 pi->dpm_table.vddc_table.dpm_levels[i].enabled = true;
2927 }
2928 pi->dpm_table.vddc_table.count = allowed_sclk_vddc_table->count;
2929
2930 allowed_mclk_table = &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk;
2931 if (allowed_mclk_table) {
2932 for (i = 0; i < allowed_mclk_table->count; i++) {
2933 pi->dpm_table.vddci_table.dpm_levels[i].value =
2934 allowed_mclk_table->entries[i].v;
2935 pi->dpm_table.vddci_table.dpm_levels[i].enabled = true;
2936 }
2937 pi->dpm_table.vddci_table.count = allowed_mclk_table->count;
2938 }
2939
2940 allowed_mclk_table = &rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk;
2941 if (allowed_mclk_table) {
2942 for (i = 0; i < allowed_mclk_table->count; i++) {
2943 pi->dpm_table.mvdd_table.dpm_levels[i].value =
2944 allowed_mclk_table->entries[i].v;
2945 pi->dpm_table.mvdd_table.dpm_levels[i].enabled = true;
2946 }
2947 pi->dpm_table.mvdd_table.count = allowed_mclk_table->count;
2948 }
2949
2950 ci_setup_default_pcie_tables(rdev);
2951
2952 return 0;
2953}
2954
2955static int ci_find_boot_level(struct ci_single_dpm_table *table,
2956 u32 value, u32 *boot_level)
2957{
2958 u32 i;
2959 int ret = -EINVAL;
2960
2961 for(i = 0; i < table->count; i++) {
2962 if (value == table->dpm_levels[i].value) {
2963 *boot_level = i;
2964 ret = 0;
2965 }
2966 }
2967
2968 return ret;
2969}
2970
2971static int ci_init_smc_table(struct radeon_device *rdev)
2972{
2973 struct ci_power_info *pi = ci_get_pi(rdev);
2974 struct ci_ulv_parm *ulv = &pi->ulv;
2975 struct radeon_ps *radeon_boot_state = rdev->pm.dpm.boot_ps;
2976 SMU7_Discrete_DpmTable *table = &pi->smc_state_table;
2977 int ret;
2978
2979 ret = ci_setup_default_dpm_tables(rdev);
2980 if (ret)
2981 return ret;
2982
2983 if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE)
2984 ci_populate_smc_voltage_tables(rdev, table);
2985
2986 ci_init_fps_limits(rdev);
2987
2988 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_HARDWAREDC)
2989 table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
2990
2991 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC)
2992 table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
2993
2994 if (pi->mem_gddr5)
2995 table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
2996
2997 if (ulv->supported) {
2998 ret = ci_populate_ulv_state(rdev, &pi->smc_state_table.Ulv);
2999 if (ret)
3000 return ret;
3001 WREG32_SMC(CG_ULV_PARAMETER, ulv->cg_ulv_parameter);
3002 }
3003
3004 ret = ci_populate_all_graphic_levels(rdev);
3005 if (ret)
3006 return ret;
3007
3008 ret = ci_populate_all_memory_levels(rdev);
3009 if (ret)
3010 return ret;
3011
3012 ci_populate_smc_link_level(rdev, table);
3013
3014 ret = ci_populate_smc_acpi_level(rdev, table);
3015 if (ret)
3016 return ret;
3017
3018 ret = ci_populate_smc_vce_level(rdev, table);
3019 if (ret)
3020 return ret;
3021
3022 ret = ci_populate_smc_acp_level(rdev, table);
3023 if (ret)
3024 return ret;
3025
3026 ret = ci_populate_smc_samu_level(rdev, table);
3027 if (ret)
3028 return ret;
3029
3030 ret = ci_do_program_memory_timing_parameters(rdev);
3031 if (ret)
3032 return ret;
3033
3034 ret = ci_populate_smc_uvd_level(rdev, table);
3035 if (ret)
3036 return ret;
3037
3038 table->UvdBootLevel = 0;
3039 table->VceBootLevel = 0;
3040 table->AcpBootLevel = 0;
3041 table->SamuBootLevel = 0;
3042 table->GraphicsBootLevel = 0;
3043 table->MemoryBootLevel = 0;
3044
3045 ret = ci_find_boot_level(&pi->dpm_table.sclk_table,
3046 pi->vbios_boot_state.sclk_bootup_value,
3047 (u32 *)&pi->smc_state_table.GraphicsBootLevel);
3048
3049 ret = ci_find_boot_level(&pi->dpm_table.mclk_table,
3050 pi->vbios_boot_state.mclk_bootup_value,
3051 (u32 *)&pi->smc_state_table.MemoryBootLevel);
3052
3053 table->BootVddc = pi->vbios_boot_state.vddc_bootup_value;
3054 table->BootVddci = pi->vbios_boot_state.vddci_bootup_value;
3055 table->BootMVdd = pi->vbios_boot_state.mvdd_bootup_value;
3056
3057 ci_populate_smc_initial_state(rdev, radeon_boot_state);
3058
3059 ret = ci_populate_bapm_parameters_in_dpm_table(rdev);
3060 if (ret)
3061 return ret;
3062
3063 table->UVDInterval = 1;
3064 table->VCEInterval = 1;
3065 table->ACPInterval = 1;
3066 table->SAMUInterval = 1;
3067 table->GraphicsVoltageChangeEnable = 1;
3068 table->GraphicsThermThrottleEnable = 1;
3069 table->GraphicsInterval = 1;
3070 table->VoltageInterval = 1;
3071 table->ThermalInterval = 1;
3072 table->TemperatureLimitHigh = (u16)((pi->thermal_temp_setting.temperature_high *
3073 CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000);
3074 table->TemperatureLimitLow = (u16)((pi->thermal_temp_setting.temperature_low *
3075 CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000);
3076 table->MemoryVoltageChangeEnable = 1;
3077 table->MemoryInterval = 1;
3078 table->VoltageResponseTime = 0;
3079 table->VddcVddciDelta = 4000;
3080 table->PhaseResponseTime = 0;
3081 table->MemoryThermThrottleEnable = 1;
3082 table->PCIeBootLinkLevel = 0;
3083 table->PCIeGenInterval = 1;
3084 if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2)
3085 table->SVI2Enable = 1;
3086 else
3087 table->SVI2Enable = 0;
3088
3089 table->ThermGpio = 17;
3090 table->SclkStepSize = 0x4000;
3091
3092 table->SystemFlags = cpu_to_be32(table->SystemFlags);
3093 table->SmioMaskVddcVid = cpu_to_be32(table->SmioMaskVddcVid);
3094 table->SmioMaskVddcPhase = cpu_to_be32(table->SmioMaskVddcPhase);
3095 table->SmioMaskVddciVid = cpu_to_be32(table->SmioMaskVddciVid);
3096 table->SmioMaskMvddVid = cpu_to_be32(table->SmioMaskMvddVid);
3097 table->SclkStepSize = cpu_to_be32(table->SclkStepSize);
3098 table->TemperatureLimitHigh = cpu_to_be16(table->TemperatureLimitHigh);
3099 table->TemperatureLimitLow = cpu_to_be16(table->TemperatureLimitLow);
3100 table->VddcVddciDelta = cpu_to_be16(table->VddcVddciDelta);
3101 table->VoltageResponseTime = cpu_to_be16(table->VoltageResponseTime);
3102 table->PhaseResponseTime = cpu_to_be16(table->PhaseResponseTime);
3103 table->BootVddc = cpu_to_be16(table->BootVddc * VOLTAGE_SCALE);
3104 table->BootVddci = cpu_to_be16(table->BootVddci * VOLTAGE_SCALE);
3105 table->BootMVdd = cpu_to_be16(table->BootMVdd * VOLTAGE_SCALE);
3106
3107 ret = ci_copy_bytes_to_smc(rdev,
3108 pi->dpm_table_start +
3109 offsetof(SMU7_Discrete_DpmTable, SystemFlags),
3110 (u8 *)&table->SystemFlags,
3111 sizeof(SMU7_Discrete_DpmTable) - 3 * sizeof(SMU7_PIDController),
3112 pi->sram_end);
3113 if (ret)
3114 return ret;
3115
3116 return 0;
3117}
3118
3119static void ci_trim_single_dpm_states(struct radeon_device *rdev,
3120 struct ci_single_dpm_table *dpm_table,
3121 u32 low_limit, u32 high_limit)
3122{
3123 u32 i;
3124
3125 for (i = 0; i < dpm_table->count; i++) {
3126 if ((dpm_table->dpm_levels[i].value < low_limit) ||
3127 (dpm_table->dpm_levels[i].value > high_limit))
3128 dpm_table->dpm_levels[i].enabled = false;
3129 else
3130 dpm_table->dpm_levels[i].enabled = true;
3131 }
3132}
3133
3134static void ci_trim_pcie_dpm_states(struct radeon_device *rdev,
3135 u32 speed_low, u32 lanes_low,
3136 u32 speed_high, u32 lanes_high)
3137{
3138 struct ci_power_info *pi = ci_get_pi(rdev);
3139 struct ci_single_dpm_table *pcie_table = &pi->dpm_table.pcie_speed_table;
3140 u32 i, j;
3141
3142 for (i = 0; i < pcie_table->count; i++) {
3143 if ((pcie_table->dpm_levels[i].value < speed_low) ||
3144 (pcie_table->dpm_levels[i].param1 < lanes_low) ||
3145 (pcie_table->dpm_levels[i].value > speed_high) ||
3146 (pcie_table->dpm_levels[i].param1 > lanes_high))
3147 pcie_table->dpm_levels[i].enabled = false;
3148 else
3149 pcie_table->dpm_levels[i].enabled = true;
3150 }
3151
3152 for (i = 0; i < pcie_table->count; i++) {
3153 if (pcie_table->dpm_levels[i].enabled) {
3154 for (j = i + 1; j < pcie_table->count; j++) {
3155 if (pcie_table->dpm_levels[j].enabled) {
3156 if ((pcie_table->dpm_levels[i].value == pcie_table->dpm_levels[j].value) &&
3157 (pcie_table->dpm_levels[i].param1 == pcie_table->dpm_levels[j].param1))
3158 pcie_table->dpm_levels[j].enabled = false;
3159 }
3160 }
3161 }
3162 }
3163}
3164
3165static int ci_trim_dpm_states(struct radeon_device *rdev,
3166 struct radeon_ps *radeon_state)
3167{
3168 struct ci_ps *state = ci_get_ps(radeon_state);
3169 struct ci_power_info *pi = ci_get_pi(rdev);
3170 u32 high_limit_count;
3171
3172 if (state->performance_level_count < 1)
3173 return -EINVAL;
3174
3175 if (state->performance_level_count == 1)
3176 high_limit_count = 0;
3177 else
3178 high_limit_count = 1;
3179
3180 ci_trim_single_dpm_states(rdev,
3181 &pi->dpm_table.sclk_table,
3182 state->performance_levels[0].sclk,
3183 state->performance_levels[high_limit_count].sclk);
3184
3185 ci_trim_single_dpm_states(rdev,
3186 &pi->dpm_table.mclk_table,
3187 state->performance_levels[0].mclk,
3188 state->performance_levels[high_limit_count].mclk);
3189
3190 ci_trim_pcie_dpm_states(rdev,
3191 state->performance_levels[0].pcie_gen,
3192 state->performance_levels[0].pcie_lane,
3193 state->performance_levels[high_limit_count].pcie_gen,
3194 state->performance_levels[high_limit_count].pcie_lane);
3195
3196 return 0;
3197}
3198
3199static int ci_apply_disp_minimum_voltage_request(struct radeon_device *rdev)
3200{
3201 struct radeon_clock_voltage_dependency_table *disp_voltage_table =
3202 &rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk;
3203 struct radeon_clock_voltage_dependency_table *vddc_table =
3204 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
3205 u32 requested_voltage = 0;
3206 u32 i;
3207
3208 if (disp_voltage_table == NULL)
3209 return -EINVAL;
3210 if (!disp_voltage_table->count)
3211 return -EINVAL;
3212
3213 for (i = 0; i < disp_voltage_table->count; i++) {
3214 if (rdev->clock.current_dispclk == disp_voltage_table->entries[i].clk)
3215 requested_voltage = disp_voltage_table->entries[i].v;
3216 }
3217
3218 for (i = 0; i < vddc_table->count; i++) {
3219 if (requested_voltage <= vddc_table->entries[i].v) {
3220 requested_voltage = vddc_table->entries[i].v;
3221 return (ci_send_msg_to_smc_with_parameter(rdev,
3222 PPSMC_MSG_VddC_Request,
3223 requested_voltage * VOLTAGE_SCALE) == PPSMC_Result_OK) ?
3224 0 : -EINVAL;
3225 }
3226 }
3227
3228 return -EINVAL;
3229}
3230
3231static int ci_upload_dpm_level_enable_mask(struct radeon_device *rdev)
3232{
3233 struct ci_power_info *pi = ci_get_pi(rdev);
3234 PPSMC_Result result;
3235
3236 if (!pi->sclk_dpm_key_disabled) {
3237 if (pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
3238 result = ci_send_msg_to_smc_with_parameter(rdev,
3239 PPSMC_MSG_SCLKDPM_SetEnabledMask,
3240 pi->dpm_level_enable_mask.sclk_dpm_enable_mask);
3241 if (result != PPSMC_Result_OK)
3242 return -EINVAL;
3243 }
3244 }
3245
3246 if (!pi->mclk_dpm_key_disabled) {
3247 if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
3248 result = ci_send_msg_to_smc_with_parameter(rdev,
3249 PPSMC_MSG_MCLKDPM_SetEnabledMask,
3250 pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
3251 if (result != PPSMC_Result_OK)
3252 return -EINVAL;
3253 }
3254 }
3255
3256 if (!pi->pcie_dpm_key_disabled) {
3257 if (pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
3258 result = ci_send_msg_to_smc_with_parameter(rdev,
3259 PPSMC_MSG_PCIeDPM_SetEnabledMask,
3260 pi->dpm_level_enable_mask.pcie_dpm_enable_mask);
3261 if (result != PPSMC_Result_OK)
3262 return -EINVAL;
3263 }
3264 }
3265
3266 ci_apply_disp_minimum_voltage_request(rdev);
3267
3268 return 0;
3269}
3270
3271static void ci_find_dpm_states_clocks_in_dpm_table(struct radeon_device *rdev,
3272 struct radeon_ps *radeon_state)
3273{
3274 struct ci_power_info *pi = ci_get_pi(rdev);
3275 struct ci_ps *state = ci_get_ps(radeon_state);
3276 struct ci_single_dpm_table *sclk_table = &pi->dpm_table.sclk_table;
3277 u32 sclk = state->performance_levels[state->performance_level_count-1].sclk;
3278 struct ci_single_dpm_table *mclk_table = &pi->dpm_table.mclk_table;
3279 u32 mclk = state->performance_levels[state->performance_level_count-1].mclk;
3280 u32 i;
3281
3282 pi->need_update_smu7_dpm_table = 0;
3283
3284 for (i = 0; i < sclk_table->count; i++) {
3285 if (sclk == sclk_table->dpm_levels[i].value)
3286 break;
3287 }
3288
3289 if (i >= sclk_table->count) {
3290 pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
3291 } else {
3292 /* XXX check display min clock requirements */
3293 if (0 != CISLAND_MINIMUM_ENGINE_CLOCK)
3294 pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
3295 }
3296
3297 for (i = 0; i < mclk_table->count; i++) {
3298 if (mclk == mclk_table->dpm_levels[i].value)
3299 break;
3300 }
3301
3302 if (i >= mclk_table->count)
3303 pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
3304
3305 if (rdev->pm.dpm.current_active_crtc_count !=
3306 rdev->pm.dpm.new_active_crtc_count)
3307 pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK;
3308}
3309
3310static int ci_populate_and_upload_sclk_mclk_dpm_levels(struct radeon_device *rdev,
3311 struct radeon_ps *radeon_state)
3312{
3313 struct ci_power_info *pi = ci_get_pi(rdev);
3314 struct ci_ps *state = ci_get_ps(radeon_state);
3315 u32 sclk = state->performance_levels[state->performance_level_count-1].sclk;
3316 u32 mclk = state->performance_levels[state->performance_level_count-1].mclk;
3317 struct ci_dpm_table *dpm_table = &pi->dpm_table;
3318 int ret;
3319
3320 if (!pi->need_update_smu7_dpm_table)
3321 return 0;
3322
3323 if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK)
3324 dpm_table->sclk_table.dpm_levels[dpm_table->sclk_table.count-1].value = sclk;
3325
3326 if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)
3327 dpm_table->mclk_table.dpm_levels[dpm_table->mclk_table.count-1].value = mclk;
3328
3329 if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK)) {
3330 ret = ci_populate_all_graphic_levels(rdev);
3331 if (ret)
3332 return ret;
3333 }
3334
3335 if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_MCLK | DPMTABLE_UPDATE_MCLK)) {
3336 ret = ci_populate_all_memory_levels(rdev);
3337 if (ret)
3338 return ret;
3339 }
3340
3341 return 0;
3342}
3343
3344static int ci_enable_uvd_dpm(struct radeon_device *rdev, bool enable)
3345{
3346 struct ci_power_info *pi = ci_get_pi(rdev);
3347 const struct radeon_clock_and_voltage_limits *max_limits;
3348 int i;
3349
3350 if (rdev->pm.dpm.ac_power)
3351 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
3352 else
3353 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
3354
3355 if (enable) {
3356 pi->dpm_level_enable_mask.uvd_dpm_enable_mask = 0;
3357
3358 for (i = rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
3359 if (rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
3360 pi->dpm_level_enable_mask.uvd_dpm_enable_mask |= 1 << i;
3361
3362 if (!pi->caps_uvd_dpm)
3363 break;
3364 }
3365 }
3366
3367 ci_send_msg_to_smc_with_parameter(rdev,
3368 PPSMC_MSG_UVDDPM_SetEnabledMask,
3369 pi->dpm_level_enable_mask.uvd_dpm_enable_mask);
3370
3371 if (pi->last_mclk_dpm_enable_mask & 0x1) {
3372 pi->uvd_enabled = true;
3373 pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
3374 ci_send_msg_to_smc_with_parameter(rdev,
3375 PPSMC_MSG_MCLKDPM_SetEnabledMask,
3376 pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
3377 }
3378 } else {
3379 if (pi->last_mclk_dpm_enable_mask & 0x1) {
3380 pi->uvd_enabled = false;
3381 pi->dpm_level_enable_mask.mclk_dpm_enable_mask |= 1;
3382 ci_send_msg_to_smc_with_parameter(rdev,
3383 PPSMC_MSG_MCLKDPM_SetEnabledMask,
3384 pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
3385 }
3386 }
3387
3388 return (ci_send_msg_to_smc(rdev, enable ?
3389 PPSMC_MSG_UVDDPM_Enable : PPSMC_MSG_UVDDPM_Disable) == PPSMC_Result_OK) ?
3390 0 : -EINVAL;
3391}
3392
3393#if 0
3394static int ci_enable_vce_dpm(struct radeon_device *rdev, bool enable)
3395{
3396 struct ci_power_info *pi = ci_get_pi(rdev);
3397 const struct radeon_clock_and_voltage_limits *max_limits;
3398 int i;
3399
3400 if (rdev->pm.dpm.ac_power)
3401 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
3402 else
3403 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
3404
3405 if (enable) {
3406 pi->dpm_level_enable_mask.vce_dpm_enable_mask = 0;
3407 for (i = rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
3408 if (rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
3409 pi->dpm_level_enable_mask.vce_dpm_enable_mask |= 1 << i;
3410
3411 if (!pi->caps_vce_dpm)
3412 break;
3413 }
3414 }
3415
3416 ci_send_msg_to_smc_with_parameter(rdev,
3417 PPSMC_MSG_VCEDPM_SetEnabledMask,
3418 pi->dpm_level_enable_mask.vce_dpm_enable_mask);
3419 }
3420
3421 return (ci_send_msg_to_smc(rdev, enable ?
3422 PPSMC_MSG_VCEDPM_Enable : PPSMC_MSG_VCEDPM_Disable) == PPSMC_Result_OK) ?
3423 0 : -EINVAL;
3424}
3425
3426static int ci_enable_samu_dpm(struct radeon_device *rdev, bool enable)
3427{
3428 struct ci_power_info *pi = ci_get_pi(rdev);
3429 const struct radeon_clock_and_voltage_limits *max_limits;
3430 int i;
3431
3432 if (rdev->pm.dpm.ac_power)
3433 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
3434 else
3435 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
3436
3437 if (enable) {
3438 pi->dpm_level_enable_mask.samu_dpm_enable_mask = 0;
3439 for (i = rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
3440 if (rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
3441 pi->dpm_level_enable_mask.samu_dpm_enable_mask |= 1 << i;
3442
3443 if (!pi->caps_samu_dpm)
3444 break;
3445 }
3446 }
3447
3448 ci_send_msg_to_smc_with_parameter(rdev,
3449 PPSMC_MSG_SAMUDPM_SetEnabledMask,
3450 pi->dpm_level_enable_mask.samu_dpm_enable_mask);
3451 }
3452 return (ci_send_msg_to_smc(rdev, enable ?
3453 PPSMC_MSG_SAMUDPM_Enable : PPSMC_MSG_SAMUDPM_Disable) == PPSMC_Result_OK) ?
3454 0 : -EINVAL;
3455}
3456
3457static int ci_enable_acp_dpm(struct radeon_device *rdev, bool enable)
3458{
3459 struct ci_power_info *pi = ci_get_pi(rdev);
3460 const struct radeon_clock_and_voltage_limits *max_limits;
3461 int i;
3462
3463 if (rdev->pm.dpm.ac_power)
3464 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
3465 else
3466 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
3467
3468 if (enable) {
3469 pi->dpm_level_enable_mask.acp_dpm_enable_mask = 0;
3470 for (i = rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
3471 if (rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
3472 pi->dpm_level_enable_mask.acp_dpm_enable_mask |= 1 << i;
3473
3474 if (!pi->caps_acp_dpm)
3475 break;
3476 }
3477 }
3478
3479 ci_send_msg_to_smc_with_parameter(rdev,
3480 PPSMC_MSG_ACPDPM_SetEnabledMask,
3481 pi->dpm_level_enable_mask.acp_dpm_enable_mask);
3482 }
3483
3484 return (ci_send_msg_to_smc(rdev, enable ?
3485 PPSMC_MSG_ACPDPM_Enable : PPSMC_MSG_ACPDPM_Disable) == PPSMC_Result_OK) ?
3486 0 : -EINVAL;
3487}
3488#endif
3489
3490static int ci_update_uvd_dpm(struct radeon_device *rdev, bool gate)
3491{
3492 struct ci_power_info *pi = ci_get_pi(rdev);
3493 u32 tmp;
3494
3495 if (!gate) {
3496 if (pi->caps_uvd_dpm ||
3497 (rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count <= 0))
3498 pi->smc_state_table.UvdBootLevel = 0;
3499 else
3500 pi->smc_state_table.UvdBootLevel =
3501 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1;
3502
3503 tmp = RREG32_SMC(DPM_TABLE_475);
3504 tmp &= ~UvdBootLevel_MASK;
3505 tmp |= UvdBootLevel(pi->smc_state_table.UvdBootLevel);
3506 WREG32_SMC(DPM_TABLE_475, tmp);
3507 }
3508
3509 return ci_enable_uvd_dpm(rdev, !gate);
3510}
3511
3512#if 0
3513static u8 ci_get_vce_boot_level(struct radeon_device *rdev)
3514{
3515 u8 i;
3516 u32 min_evclk = 30000; /* ??? */
3517 struct radeon_vce_clock_voltage_dependency_table *table =
3518 &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
3519
3520 for (i = 0; i < table->count; i++) {
3521 if (table->entries[i].evclk >= min_evclk)
3522 return i;
3523 }
3524
3525 return table->count - 1;
3526}
3527
3528static int ci_update_vce_dpm(struct radeon_device *rdev,
3529 struct radeon_ps *radeon_new_state,
3530 struct radeon_ps *radeon_current_state)
3531{
3532 struct ci_power_info *pi = ci_get_pi(rdev);
3533 bool new_vce_clock_non_zero = (radeon_new_state->evclk != 0);
3534 bool old_vce_clock_non_zero = (radeon_current_state->evclk != 0);
3535 int ret = 0;
3536 u32 tmp;
3537
3538 if (new_vce_clock_non_zero != old_vce_clock_non_zero) {
3539 if (new_vce_clock_non_zero) {
3540 pi->smc_state_table.VceBootLevel = ci_get_vce_boot_level(rdev);
3541
3542 tmp = RREG32_SMC(DPM_TABLE_475);
3543 tmp &= ~VceBootLevel_MASK;
3544 tmp |= VceBootLevel(pi->smc_state_table.VceBootLevel);
3545 WREG32_SMC(DPM_TABLE_475, tmp);
3546
3547 ret = ci_enable_vce_dpm(rdev, true);
3548 } else {
3549 ret = ci_enable_vce_dpm(rdev, false);
3550 }
3551 }
3552 return ret;
3553}
3554
3555static int ci_update_samu_dpm(struct radeon_device *rdev, bool gate)
3556{
3557 return ci_enable_samu_dpm(rdev, gate);
3558}
3559
3560static int ci_update_acp_dpm(struct radeon_device *rdev, bool gate)
3561{
3562 struct ci_power_info *pi = ci_get_pi(rdev);
3563 u32 tmp;
3564
3565 if (!gate) {
3566 pi->smc_state_table.AcpBootLevel = 0;
3567
3568 tmp = RREG32_SMC(DPM_TABLE_475);
3569 tmp &= ~AcpBootLevel_MASK;
3570 tmp |= AcpBootLevel(pi->smc_state_table.AcpBootLevel);
3571 WREG32_SMC(DPM_TABLE_475, tmp);
3572 }
3573
3574 return ci_enable_acp_dpm(rdev, !gate);
3575}
3576#endif
3577
3578static int ci_generate_dpm_level_enable_mask(struct radeon_device *rdev,
3579 struct radeon_ps *radeon_state)
3580{
3581 struct ci_power_info *pi = ci_get_pi(rdev);
3582 int ret;
3583
3584 ret = ci_trim_dpm_states(rdev, radeon_state);
3585 if (ret)
3586 return ret;
3587
3588 pi->dpm_level_enable_mask.sclk_dpm_enable_mask =
3589 ci_get_dpm_level_enable_mask_value(&pi->dpm_table.sclk_table);
3590 pi->dpm_level_enable_mask.mclk_dpm_enable_mask =
3591 ci_get_dpm_level_enable_mask_value(&pi->dpm_table.mclk_table);
3592 pi->last_mclk_dpm_enable_mask =
3593 pi->dpm_level_enable_mask.mclk_dpm_enable_mask;
3594 if (pi->uvd_enabled) {
3595 if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask & 1)
3596 pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
3597 }
3598 pi->dpm_level_enable_mask.pcie_dpm_enable_mask =
3599 ci_get_dpm_level_enable_mask_value(&pi->dpm_table.pcie_speed_table);
3600
3601 return 0;
3602}
3603
Alex Deucher89536fd2013-07-15 18:14:24 -04003604static u32 ci_get_lowest_enabled_level(struct radeon_device *rdev,
3605 u32 level_mask)
3606{
3607 u32 level = 0;
3608
3609 while ((level_mask & (1 << level)) == 0)
3610 level++;
3611
3612 return level;
3613}
3614
3615
3616int ci_dpm_force_performance_level(struct radeon_device *rdev,
3617 enum radeon_dpm_forced_level level)
3618{
3619 struct ci_power_info *pi = ci_get_pi(rdev);
3620 PPSMC_Result smc_result;
3621 u32 tmp, levels, i;
3622 int ret;
3623
3624 if (level == RADEON_DPM_FORCED_LEVEL_HIGH) {
3625 if ((!pi->sclk_dpm_key_disabled) &&
3626 pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
3627 levels = 0;
3628 tmp = pi->dpm_level_enable_mask.sclk_dpm_enable_mask;
3629 while (tmp >>= 1)
3630 levels++;
3631 if (levels) {
3632 ret = ci_dpm_force_state_sclk(rdev, levels);
3633 if (ret)
3634 return ret;
3635 for (i = 0; i < rdev->usec_timeout; i++) {
3636 tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
3637 CURR_SCLK_INDEX_MASK) >> CURR_SCLK_INDEX_SHIFT;
3638 if (tmp == levels)
3639 break;
3640 udelay(1);
3641 }
3642 }
3643 }
3644 if ((!pi->mclk_dpm_key_disabled) &&
3645 pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
3646 levels = 0;
3647 tmp = pi->dpm_level_enable_mask.mclk_dpm_enable_mask;
3648 while (tmp >>= 1)
3649 levels++;
3650 if (levels) {
3651 ret = ci_dpm_force_state_mclk(rdev, levels);
3652 if (ret)
3653 return ret;
3654 for (i = 0; i < rdev->usec_timeout; i++) {
3655 tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
3656 CURR_MCLK_INDEX_MASK) >> CURR_MCLK_INDEX_SHIFT;
3657 if (tmp == levels)
3658 break;
3659 udelay(1);
3660 }
3661 }
3662 }
3663 if ((!pi->pcie_dpm_key_disabled) &&
3664 pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
3665 levels = 0;
3666 tmp = pi->dpm_level_enable_mask.pcie_dpm_enable_mask;
3667 while (tmp >>= 1)
3668 levels++;
3669 if (levels) {
3670 ret = ci_dpm_force_state_pcie(rdev, level);
3671 if (ret)
3672 return ret;
3673 for (i = 0; i < rdev->usec_timeout; i++) {
3674 tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX_1) &
3675 CURR_PCIE_INDEX_MASK) >> CURR_PCIE_INDEX_SHIFT;
3676 if (tmp == levels)
3677 break;
3678 udelay(1);
3679 }
3680 }
3681 }
3682 } else if (level == RADEON_DPM_FORCED_LEVEL_LOW) {
3683 if ((!pi->sclk_dpm_key_disabled) &&
3684 pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
3685 levels = ci_get_lowest_enabled_level(rdev,
3686 pi->dpm_level_enable_mask.sclk_dpm_enable_mask);
3687 ret = ci_dpm_force_state_sclk(rdev, levels);
3688 if (ret)
3689 return ret;
3690 for (i = 0; i < rdev->usec_timeout; i++) {
3691 tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
3692 CURR_SCLK_INDEX_MASK) >> CURR_SCLK_INDEX_SHIFT;
3693 if (tmp == levels)
3694 break;
3695 udelay(1);
3696 }
3697 }
3698 if ((!pi->mclk_dpm_key_disabled) &&
3699 pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
3700 levels = ci_get_lowest_enabled_level(rdev,
3701 pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
3702 ret = ci_dpm_force_state_mclk(rdev, levels);
3703 if (ret)
3704 return ret;
3705 for (i = 0; i < rdev->usec_timeout; i++) {
3706 tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
3707 CURR_MCLK_INDEX_MASK) >> CURR_MCLK_INDEX_SHIFT;
3708 if (tmp == levels)
3709 break;
3710 udelay(1);
3711 }
3712 }
3713 if ((!pi->pcie_dpm_key_disabled) &&
3714 pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
3715 levels = ci_get_lowest_enabled_level(rdev,
3716 pi->dpm_level_enable_mask.pcie_dpm_enable_mask);
3717 ret = ci_dpm_force_state_pcie(rdev, levels);
3718 if (ret)
3719 return ret;
3720 for (i = 0; i < rdev->usec_timeout; i++) {
3721 tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX_1) &
3722 CURR_PCIE_INDEX_MASK) >> CURR_PCIE_INDEX_SHIFT;
3723 if (tmp == levels)
3724 break;
3725 udelay(1);
3726 }
3727 }
3728 } else if (level == RADEON_DPM_FORCED_LEVEL_AUTO) {
3729 if (!pi->sclk_dpm_key_disabled) {
3730 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_NoForcedLevel);
3731 if (smc_result != PPSMC_Result_OK)
3732 return -EINVAL;
3733 }
3734 if (!pi->mclk_dpm_key_disabled) {
3735 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_NoForcedLevel);
3736 if (smc_result != PPSMC_Result_OK)
3737 return -EINVAL;
3738 }
3739 if (!pi->pcie_dpm_key_disabled) {
3740 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PCIeDPM_UnForceLevel);
3741 if (smc_result != PPSMC_Result_OK)
3742 return -EINVAL;
3743 }
3744 }
3745
3746 rdev->pm.dpm.forced_level = level;
3747
3748 return 0;
3749}
3750
Alex Deuchercc8dbbb2013-08-14 01:03:41 -04003751static int ci_set_mc_special_registers(struct radeon_device *rdev,
3752 struct ci_mc_reg_table *table)
3753{
3754 struct ci_power_info *pi = ci_get_pi(rdev);
3755 u8 i, j, k;
3756 u32 temp_reg;
3757
3758 for (i = 0, j = table->last; i < table->last; i++) {
3759 if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
3760 return -EINVAL;
3761 switch(table->mc_reg_address[i].s1 << 2) {
3762 case MC_SEQ_MISC1:
3763 temp_reg = RREG32(MC_PMG_CMD_EMRS);
3764 table->mc_reg_address[j].s1 = MC_PMG_CMD_EMRS >> 2;
3765 table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_EMRS_LP >> 2;
3766 for (k = 0; k < table->num_entries; k++) {
3767 table->mc_reg_table_entry[k].mc_data[j] =
3768 ((temp_reg & 0xffff0000)) | ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
3769 }
3770 j++;
3771 if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
3772 return -EINVAL;
3773
3774 temp_reg = RREG32(MC_PMG_CMD_MRS);
3775 table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS >> 2;
3776 table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS_LP >> 2;
3777 for (k = 0; k < table->num_entries; k++) {
3778 table->mc_reg_table_entry[k].mc_data[j] =
3779 (temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
3780 if (!pi->mem_gddr5)
3781 table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
3782 }
3783 j++;
3784 if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
3785 return -EINVAL;
3786
3787 if (!pi->mem_gddr5) {
3788 table->mc_reg_address[j].s1 = MC_PMG_AUTO_CMD >> 2;
3789 table->mc_reg_address[j].s0 = MC_PMG_AUTO_CMD >> 2;
3790 for (k = 0; k < table->num_entries; k++) {
3791 table->mc_reg_table_entry[k].mc_data[j] =
3792 (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
3793 }
3794 j++;
3795 if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
3796 return -EINVAL;
3797 }
3798 break;
3799 case MC_SEQ_RESERVE_M:
3800 temp_reg = RREG32(MC_PMG_CMD_MRS1);
3801 table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS1 >> 2;
3802 table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS1_LP >> 2;
3803 for (k = 0; k < table->num_entries; k++) {
3804 table->mc_reg_table_entry[k].mc_data[j] =
3805 (temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
3806 }
3807 j++;
3808 if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
3809 return -EINVAL;
3810 break;
3811 default:
3812 break;
3813 }
3814
3815 }
3816
3817 table->last = j;
3818
3819 return 0;
3820}
3821
3822static bool ci_check_s0_mc_reg_index(u16 in_reg, u16 *out_reg)
3823{
3824 bool result = true;
3825
3826 switch(in_reg) {
3827 case MC_SEQ_RAS_TIMING >> 2:
3828 *out_reg = MC_SEQ_RAS_TIMING_LP >> 2;
3829 break;
3830 case MC_SEQ_DLL_STBY >> 2:
3831 *out_reg = MC_SEQ_DLL_STBY_LP >> 2;
3832 break;
3833 case MC_SEQ_G5PDX_CMD0 >> 2:
3834 *out_reg = MC_SEQ_G5PDX_CMD0_LP >> 2;
3835 break;
3836 case MC_SEQ_G5PDX_CMD1 >> 2:
3837 *out_reg = MC_SEQ_G5PDX_CMD1_LP >> 2;
3838 break;
3839 case MC_SEQ_G5PDX_CTRL >> 2:
3840 *out_reg = MC_SEQ_G5PDX_CTRL_LP >> 2;
3841 break;
3842 case MC_SEQ_CAS_TIMING >> 2:
3843 *out_reg = MC_SEQ_CAS_TIMING_LP >> 2;
3844 break;
3845 case MC_SEQ_MISC_TIMING >> 2:
3846 *out_reg = MC_SEQ_MISC_TIMING_LP >> 2;
3847 break;
3848 case MC_SEQ_MISC_TIMING2 >> 2:
3849 *out_reg = MC_SEQ_MISC_TIMING2_LP >> 2;
3850 break;
3851 case MC_SEQ_PMG_DVS_CMD >> 2:
3852 *out_reg = MC_SEQ_PMG_DVS_CMD_LP >> 2;
3853 break;
3854 case MC_SEQ_PMG_DVS_CTL >> 2:
3855 *out_reg = MC_SEQ_PMG_DVS_CTL_LP >> 2;
3856 break;
3857 case MC_SEQ_RD_CTL_D0 >> 2:
3858 *out_reg = MC_SEQ_RD_CTL_D0_LP >> 2;
3859 break;
3860 case MC_SEQ_RD_CTL_D1 >> 2:
3861 *out_reg = MC_SEQ_RD_CTL_D1_LP >> 2;
3862 break;
3863 case MC_SEQ_WR_CTL_D0 >> 2:
3864 *out_reg = MC_SEQ_WR_CTL_D0_LP >> 2;
3865 break;
3866 case MC_SEQ_WR_CTL_D1 >> 2:
3867 *out_reg = MC_SEQ_WR_CTL_D1_LP >> 2;
3868 break;
3869 case MC_PMG_CMD_EMRS >> 2:
3870 *out_reg = MC_SEQ_PMG_CMD_EMRS_LP >> 2;
3871 break;
3872 case MC_PMG_CMD_MRS >> 2:
3873 *out_reg = MC_SEQ_PMG_CMD_MRS_LP >> 2;
3874 break;
3875 case MC_PMG_CMD_MRS1 >> 2:
3876 *out_reg = MC_SEQ_PMG_CMD_MRS1_LP >> 2;
3877 break;
3878 case MC_SEQ_PMG_TIMING >> 2:
3879 *out_reg = MC_SEQ_PMG_TIMING_LP >> 2;
3880 break;
3881 case MC_PMG_CMD_MRS2 >> 2:
3882 *out_reg = MC_SEQ_PMG_CMD_MRS2_LP >> 2;
3883 break;
3884 case MC_SEQ_WR_CTL_2 >> 2:
3885 *out_reg = MC_SEQ_WR_CTL_2_LP >> 2;
3886 break;
3887 default:
3888 result = false;
3889 break;
3890 }
3891
3892 return result;
3893}
3894
3895static void ci_set_valid_flag(struct ci_mc_reg_table *table)
3896{
3897 u8 i, j;
3898
3899 for (i = 0; i < table->last; i++) {
3900 for (j = 1; j < table->num_entries; j++) {
3901 if (table->mc_reg_table_entry[j-1].mc_data[i] !=
3902 table->mc_reg_table_entry[j].mc_data[i]) {
3903 table->valid_flag |= 1 << i;
3904 break;
3905 }
3906 }
3907 }
3908}
3909
3910static void ci_set_s0_mc_reg_index(struct ci_mc_reg_table *table)
3911{
3912 u32 i;
3913 u16 address;
3914
3915 for (i = 0; i < table->last; i++) {
3916 table->mc_reg_address[i].s0 =
3917 ci_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) ?
3918 address : table->mc_reg_address[i].s1;
3919 }
3920}
3921
3922static int ci_copy_vbios_mc_reg_table(const struct atom_mc_reg_table *table,
3923 struct ci_mc_reg_table *ci_table)
3924{
3925 u8 i, j;
3926
3927 if (table->last > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
3928 return -EINVAL;
3929 if (table->num_entries > MAX_AC_TIMING_ENTRIES)
3930 return -EINVAL;
3931
3932 for (i = 0; i < table->last; i++)
3933 ci_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
3934
3935 ci_table->last = table->last;
3936
3937 for (i = 0; i < table->num_entries; i++) {
3938 ci_table->mc_reg_table_entry[i].mclk_max =
3939 table->mc_reg_table_entry[i].mclk_max;
3940 for (j = 0; j < table->last; j++)
3941 ci_table->mc_reg_table_entry[i].mc_data[j] =
3942 table->mc_reg_table_entry[i].mc_data[j];
3943 }
3944 ci_table->num_entries = table->num_entries;
3945
3946 return 0;
3947}
3948
3949static int ci_initialize_mc_reg_table(struct radeon_device *rdev)
3950{
3951 struct ci_power_info *pi = ci_get_pi(rdev);
3952 struct atom_mc_reg_table *table;
3953 struct ci_mc_reg_table *ci_table = &pi->mc_reg_table;
3954 u8 module_index = rv770_get_memory_module_index(rdev);
3955 int ret;
3956
3957 table = kzalloc(sizeof(struct atom_mc_reg_table), GFP_KERNEL);
3958 if (!table)
3959 return -ENOMEM;
3960
3961 WREG32(MC_SEQ_RAS_TIMING_LP, RREG32(MC_SEQ_RAS_TIMING));
3962 WREG32(MC_SEQ_CAS_TIMING_LP, RREG32(MC_SEQ_CAS_TIMING));
3963 WREG32(MC_SEQ_DLL_STBY_LP, RREG32(MC_SEQ_DLL_STBY));
3964 WREG32(MC_SEQ_G5PDX_CMD0_LP, RREG32(MC_SEQ_G5PDX_CMD0));
3965 WREG32(MC_SEQ_G5PDX_CMD1_LP, RREG32(MC_SEQ_G5PDX_CMD1));
3966 WREG32(MC_SEQ_G5PDX_CTRL_LP, RREG32(MC_SEQ_G5PDX_CTRL));
3967 WREG32(MC_SEQ_PMG_DVS_CMD_LP, RREG32(MC_SEQ_PMG_DVS_CMD));
3968 WREG32(MC_SEQ_PMG_DVS_CTL_LP, RREG32(MC_SEQ_PMG_DVS_CTL));
3969 WREG32(MC_SEQ_MISC_TIMING_LP, RREG32(MC_SEQ_MISC_TIMING));
3970 WREG32(MC_SEQ_MISC_TIMING2_LP, RREG32(MC_SEQ_MISC_TIMING2));
3971 WREG32(MC_SEQ_PMG_CMD_EMRS_LP, RREG32(MC_PMG_CMD_EMRS));
3972 WREG32(MC_SEQ_PMG_CMD_MRS_LP, RREG32(MC_PMG_CMD_MRS));
3973 WREG32(MC_SEQ_PMG_CMD_MRS1_LP, RREG32(MC_PMG_CMD_MRS1));
3974 WREG32(MC_SEQ_WR_CTL_D0_LP, RREG32(MC_SEQ_WR_CTL_D0));
3975 WREG32(MC_SEQ_WR_CTL_D1_LP, RREG32(MC_SEQ_WR_CTL_D1));
3976 WREG32(MC_SEQ_RD_CTL_D0_LP, RREG32(MC_SEQ_RD_CTL_D0));
3977 WREG32(MC_SEQ_RD_CTL_D1_LP, RREG32(MC_SEQ_RD_CTL_D1));
3978 WREG32(MC_SEQ_PMG_TIMING_LP, RREG32(MC_SEQ_PMG_TIMING));
3979 WREG32(MC_SEQ_PMG_CMD_MRS2_LP, RREG32(MC_PMG_CMD_MRS2));
3980 WREG32(MC_SEQ_WR_CTL_2_LP, RREG32(MC_SEQ_WR_CTL_2));
3981
3982 ret = radeon_atom_init_mc_reg_table(rdev, module_index, table);
3983 if (ret)
3984 goto init_mc_done;
3985
3986 ret = ci_copy_vbios_mc_reg_table(table, ci_table);
3987 if (ret)
3988 goto init_mc_done;
3989
3990 ci_set_s0_mc_reg_index(ci_table);
3991
3992 ret = ci_set_mc_special_registers(rdev, ci_table);
3993 if (ret)
3994 goto init_mc_done;
3995
3996 ci_set_valid_flag(ci_table);
3997
3998init_mc_done:
3999 kfree(table);
4000
4001 return ret;
4002}
4003
4004static int ci_populate_mc_reg_addresses(struct radeon_device *rdev,
4005 SMU7_Discrete_MCRegisters *mc_reg_table)
4006{
4007 struct ci_power_info *pi = ci_get_pi(rdev);
4008 u32 i, j;
4009
4010 for (i = 0, j = 0; j < pi->mc_reg_table.last; j++) {
4011 if (pi->mc_reg_table.valid_flag & (1 << j)) {
4012 if (i >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4013 return -EINVAL;
4014 mc_reg_table->address[i].s0 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s0);
4015 mc_reg_table->address[i].s1 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s1);
4016 i++;
4017 }
4018 }
4019
4020 mc_reg_table->last = (u8)i;
4021
4022 return 0;
4023}
4024
4025static void ci_convert_mc_registers(const struct ci_mc_reg_entry *entry,
4026 SMU7_Discrete_MCRegisterSet *data,
4027 u32 num_entries, u32 valid_flag)
4028{
4029 u32 i, j;
4030
4031 for (i = 0, j = 0; j < num_entries; j++) {
4032 if (valid_flag & (1 << j)) {
4033 data->value[i] = cpu_to_be32(entry->mc_data[j]);
4034 i++;
4035 }
4036 }
4037}
4038
4039static void ci_convert_mc_reg_table_entry_to_smc(struct radeon_device *rdev,
4040 const u32 memory_clock,
4041 SMU7_Discrete_MCRegisterSet *mc_reg_table_data)
4042{
4043 struct ci_power_info *pi = ci_get_pi(rdev);
4044 u32 i = 0;
4045
4046 for(i = 0; i < pi->mc_reg_table.num_entries; i++) {
4047 if (memory_clock <= pi->mc_reg_table.mc_reg_table_entry[i].mclk_max)
4048 break;
4049 }
4050
4051 if ((i == pi->mc_reg_table.num_entries) && (i > 0))
4052 --i;
4053
4054 ci_convert_mc_registers(&pi->mc_reg_table.mc_reg_table_entry[i],
4055 mc_reg_table_data, pi->mc_reg_table.last,
4056 pi->mc_reg_table.valid_flag);
4057}
4058
4059static void ci_convert_mc_reg_table_to_smc(struct radeon_device *rdev,
4060 SMU7_Discrete_MCRegisters *mc_reg_table)
4061{
4062 struct ci_power_info *pi = ci_get_pi(rdev);
4063 u32 i;
4064
4065 for (i = 0; i < pi->dpm_table.mclk_table.count; i++)
4066 ci_convert_mc_reg_table_entry_to_smc(rdev,
4067 pi->dpm_table.mclk_table.dpm_levels[i].value,
4068 &mc_reg_table->data[i]);
4069}
4070
4071static int ci_populate_initial_mc_reg_table(struct radeon_device *rdev)
4072{
4073 struct ci_power_info *pi = ci_get_pi(rdev);
4074 int ret;
4075
4076 memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters));
4077
4078 ret = ci_populate_mc_reg_addresses(rdev, &pi->smc_mc_reg_table);
4079 if (ret)
4080 return ret;
4081 ci_convert_mc_reg_table_to_smc(rdev, &pi->smc_mc_reg_table);
4082
4083 return ci_copy_bytes_to_smc(rdev,
4084 pi->mc_reg_table_start,
4085 (u8 *)&pi->smc_mc_reg_table,
4086 sizeof(SMU7_Discrete_MCRegisters),
4087 pi->sram_end);
4088}
4089
4090static int ci_update_and_upload_mc_reg_table(struct radeon_device *rdev)
4091{
4092 struct ci_power_info *pi = ci_get_pi(rdev);
4093
4094 if (!(pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK))
4095 return 0;
4096
4097 memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters));
4098
4099 ci_convert_mc_reg_table_to_smc(rdev, &pi->smc_mc_reg_table);
4100
4101 return ci_copy_bytes_to_smc(rdev,
4102 pi->mc_reg_table_start +
4103 offsetof(SMU7_Discrete_MCRegisters, data[0]),
4104 (u8 *)&pi->smc_mc_reg_table.data[0],
4105 sizeof(SMU7_Discrete_MCRegisterSet) *
4106 pi->dpm_table.mclk_table.count,
4107 pi->sram_end);
4108}
4109
4110static void ci_enable_voltage_control(struct radeon_device *rdev)
4111{
4112 u32 tmp = RREG32_SMC(GENERAL_PWRMGT);
4113
4114 tmp |= VOLT_PWRMGT_EN;
4115 WREG32_SMC(GENERAL_PWRMGT, tmp);
4116}
4117
4118static enum radeon_pcie_gen ci_get_maximum_link_speed(struct radeon_device *rdev,
4119 struct radeon_ps *radeon_state)
4120{
4121 struct ci_ps *state = ci_get_ps(radeon_state);
4122 int i;
4123 u16 pcie_speed, max_speed = 0;
4124
4125 for (i = 0; i < state->performance_level_count; i++) {
4126 pcie_speed = state->performance_levels[i].pcie_gen;
4127 if (max_speed < pcie_speed)
4128 max_speed = pcie_speed;
4129 }
4130
4131 return max_speed;
4132}
4133
4134static u16 ci_get_current_pcie_speed(struct radeon_device *rdev)
4135{
4136 u32 speed_cntl = 0;
4137
4138 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL) & LC_CURRENT_DATA_RATE_MASK;
4139 speed_cntl >>= LC_CURRENT_DATA_RATE_SHIFT;
4140
4141 return (u16)speed_cntl;
4142}
4143
4144static int ci_get_current_pcie_lane_number(struct radeon_device *rdev)
4145{
4146 u32 link_width = 0;
4147
4148 link_width = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL) & LC_LINK_WIDTH_RD_MASK;
4149 link_width >>= LC_LINK_WIDTH_RD_SHIFT;
4150
4151 switch (link_width) {
4152 case RADEON_PCIE_LC_LINK_WIDTH_X1:
4153 return 1;
4154 case RADEON_PCIE_LC_LINK_WIDTH_X2:
4155 return 2;
4156 case RADEON_PCIE_LC_LINK_WIDTH_X4:
4157 return 4;
4158 case RADEON_PCIE_LC_LINK_WIDTH_X8:
4159 return 8;
4160 case RADEON_PCIE_LC_LINK_WIDTH_X12:
4161 /* not actually supported */
4162 return 12;
4163 case RADEON_PCIE_LC_LINK_WIDTH_X0:
4164 case RADEON_PCIE_LC_LINK_WIDTH_X16:
4165 default:
4166 return 16;
4167 }
4168}
4169
4170static void ci_request_link_speed_change_before_state_change(struct radeon_device *rdev,
4171 struct radeon_ps *radeon_new_state,
4172 struct radeon_ps *radeon_current_state)
4173{
4174 struct ci_power_info *pi = ci_get_pi(rdev);
4175 enum radeon_pcie_gen target_link_speed =
4176 ci_get_maximum_link_speed(rdev, radeon_new_state);
4177 enum radeon_pcie_gen current_link_speed;
4178
4179 if (pi->force_pcie_gen == RADEON_PCIE_GEN_INVALID)
4180 current_link_speed = ci_get_maximum_link_speed(rdev, radeon_current_state);
4181 else
4182 current_link_speed = pi->force_pcie_gen;
4183
4184 pi->force_pcie_gen = RADEON_PCIE_GEN_INVALID;
4185 pi->pspp_notify_required = false;
4186 if (target_link_speed > current_link_speed) {
4187 switch (target_link_speed) {
4188 case RADEON_PCIE_GEN3:
4189 if (radeon_acpi_pcie_performance_request(rdev, PCIE_PERF_REQ_PECI_GEN3, false) == 0)
4190 break;
4191 pi->force_pcie_gen = RADEON_PCIE_GEN2;
4192 if (current_link_speed == RADEON_PCIE_GEN2)
4193 break;
4194 case RADEON_PCIE_GEN2:
4195 if (radeon_acpi_pcie_performance_request(rdev, PCIE_PERF_REQ_PECI_GEN2, false) == 0)
4196 break;
4197 default:
4198 pi->force_pcie_gen = ci_get_current_pcie_speed(rdev);
4199 break;
4200 }
4201 } else {
4202 if (target_link_speed < current_link_speed)
4203 pi->pspp_notify_required = true;
4204 }
4205}
4206
4207static void ci_notify_link_speed_change_after_state_change(struct radeon_device *rdev,
4208 struct radeon_ps *radeon_new_state,
4209 struct radeon_ps *radeon_current_state)
4210{
4211 struct ci_power_info *pi = ci_get_pi(rdev);
4212 enum radeon_pcie_gen target_link_speed =
4213 ci_get_maximum_link_speed(rdev, radeon_new_state);
4214 u8 request;
4215
4216 if (pi->pspp_notify_required) {
4217 if (target_link_speed == RADEON_PCIE_GEN3)
4218 request = PCIE_PERF_REQ_PECI_GEN3;
4219 else if (target_link_speed == RADEON_PCIE_GEN2)
4220 request = PCIE_PERF_REQ_PECI_GEN2;
4221 else
4222 request = PCIE_PERF_REQ_PECI_GEN1;
4223
4224 if ((request == PCIE_PERF_REQ_PECI_GEN1) &&
4225 (ci_get_current_pcie_speed(rdev) > 0))
4226 return;
4227
4228 radeon_acpi_pcie_performance_request(rdev, request, false);
4229 }
4230}
4231
4232static int ci_set_private_data_variables_based_on_pptable(struct radeon_device *rdev)
4233{
4234 struct ci_power_info *pi = ci_get_pi(rdev);
4235 struct radeon_clock_voltage_dependency_table *allowed_sclk_vddc_table =
4236 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
4237 struct radeon_clock_voltage_dependency_table *allowed_mclk_vddc_table =
4238 &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk;
4239 struct radeon_clock_voltage_dependency_table *allowed_mclk_vddci_table =
4240 &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk;
4241
4242 if (allowed_sclk_vddc_table == NULL)
4243 return -EINVAL;
4244 if (allowed_sclk_vddc_table->count < 1)
4245 return -EINVAL;
4246 if (allowed_mclk_vddc_table == NULL)
4247 return -EINVAL;
4248 if (allowed_mclk_vddc_table->count < 1)
4249 return -EINVAL;
4250 if (allowed_mclk_vddci_table == NULL)
4251 return -EINVAL;
4252 if (allowed_mclk_vddci_table->count < 1)
4253 return -EINVAL;
4254
4255 pi->min_vddc_in_pp_table = allowed_sclk_vddc_table->entries[0].v;
4256 pi->max_vddc_in_pp_table =
4257 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
4258
4259 pi->min_vddci_in_pp_table = allowed_mclk_vddci_table->entries[0].v;
4260 pi->max_vddci_in_pp_table =
4261 allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
4262
4263 rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk =
4264 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
4265 rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk =
4266 allowed_mclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
4267 rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddc =
4268 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
4269 rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci =
4270 allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
4271
4272 return 0;
4273}
4274
4275static void ci_patch_with_vddc_leakage(struct radeon_device *rdev, u16 *vddc)
4276{
4277 struct ci_power_info *pi = ci_get_pi(rdev);
4278 struct ci_leakage_voltage *leakage_table = &pi->vddc_leakage;
4279 u32 leakage_index;
4280
4281 for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) {
4282 if (leakage_table->leakage_id[leakage_index] == *vddc) {
4283 *vddc = leakage_table->actual_voltage[leakage_index];
4284 break;
4285 }
4286 }
4287}
4288
4289static void ci_patch_with_vddci_leakage(struct radeon_device *rdev, u16 *vddci)
4290{
4291 struct ci_power_info *pi = ci_get_pi(rdev);
4292 struct ci_leakage_voltage *leakage_table = &pi->vddci_leakage;
4293 u32 leakage_index;
4294
4295 for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) {
4296 if (leakage_table->leakage_id[leakage_index] == *vddci) {
4297 *vddci = leakage_table->actual_voltage[leakage_index];
4298 break;
4299 }
4300 }
4301}
4302
4303static void ci_patch_clock_voltage_dependency_table_with_vddc_leakage(struct radeon_device *rdev,
4304 struct radeon_clock_voltage_dependency_table *table)
4305{
4306 u32 i;
4307
4308 if (table) {
4309 for (i = 0; i < table->count; i++)
4310 ci_patch_with_vddc_leakage(rdev, &table->entries[i].v);
4311 }
4312}
4313
4314static void ci_patch_clock_voltage_dependency_table_with_vddci_leakage(struct radeon_device *rdev,
4315 struct radeon_clock_voltage_dependency_table *table)
4316{
4317 u32 i;
4318
4319 if (table) {
4320 for (i = 0; i < table->count; i++)
4321 ci_patch_with_vddci_leakage(rdev, &table->entries[i].v);
4322 }
4323}
4324
4325static void ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(struct radeon_device *rdev,
4326 struct radeon_vce_clock_voltage_dependency_table *table)
4327{
4328 u32 i;
4329
4330 if (table) {
4331 for (i = 0; i < table->count; i++)
4332 ci_patch_with_vddc_leakage(rdev, &table->entries[i].v);
4333 }
4334}
4335
4336static void ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(struct radeon_device *rdev,
4337 struct radeon_uvd_clock_voltage_dependency_table *table)
4338{
4339 u32 i;
4340
4341 if (table) {
4342 for (i = 0; i < table->count; i++)
4343 ci_patch_with_vddc_leakage(rdev, &table->entries[i].v);
4344 }
4345}
4346
4347static void ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(struct radeon_device *rdev,
4348 struct radeon_phase_shedding_limits_table *table)
4349{
4350 u32 i;
4351
4352 if (table) {
4353 for (i = 0; i < table->count; i++)
4354 ci_patch_with_vddc_leakage(rdev, &table->entries[i].voltage);
4355 }
4356}
4357
4358static void ci_patch_clock_voltage_limits_with_vddc_leakage(struct radeon_device *rdev,
4359 struct radeon_clock_and_voltage_limits *table)
4360{
4361 if (table) {
4362 ci_patch_with_vddc_leakage(rdev, (u16 *)&table->vddc);
4363 ci_patch_with_vddci_leakage(rdev, (u16 *)&table->vddci);
4364 }
4365}
4366
4367static void ci_patch_cac_leakage_table_with_vddc_leakage(struct radeon_device *rdev,
4368 struct radeon_cac_leakage_table *table)
4369{
4370 u32 i;
4371
4372 if (table) {
4373 for (i = 0; i < table->count; i++)
4374 ci_patch_with_vddc_leakage(rdev, &table->entries[i].vddc);
4375 }
4376}
4377
4378static void ci_patch_dependency_tables_with_leakage(struct radeon_device *rdev)
4379{
4380
4381 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
4382 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk);
4383 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
4384 &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk);
4385 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
4386 &rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk);
4387 ci_patch_clock_voltage_dependency_table_with_vddci_leakage(rdev,
4388 &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk);
4389 ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(rdev,
4390 &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table);
4391 ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(rdev,
4392 &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table);
4393 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
4394 &rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table);
4395 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
4396 &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table);
4397 ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(rdev,
4398 &rdev->pm.dpm.dyn_state.phase_shedding_limits_table);
4399 ci_patch_clock_voltage_limits_with_vddc_leakage(rdev,
4400 &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac);
4401 ci_patch_clock_voltage_limits_with_vddc_leakage(rdev,
4402 &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc);
4403 ci_patch_cac_leakage_table_with_vddc_leakage(rdev,
4404 &rdev->pm.dpm.dyn_state.cac_leakage_table);
4405
4406}
4407
4408static void ci_get_memory_type(struct radeon_device *rdev)
4409{
4410 struct ci_power_info *pi = ci_get_pi(rdev);
4411 u32 tmp;
4412
4413 tmp = RREG32(MC_SEQ_MISC0);
4414
4415 if (((tmp & MC_SEQ_MISC0_GDDR5_MASK) >> MC_SEQ_MISC0_GDDR5_SHIFT) ==
4416 MC_SEQ_MISC0_GDDR5_VALUE)
4417 pi->mem_gddr5 = true;
4418 else
4419 pi->mem_gddr5 = false;
4420
4421}
4422
4423void ci_update_current_ps(struct radeon_device *rdev,
4424 struct radeon_ps *rps)
4425{
4426 struct ci_ps *new_ps = ci_get_ps(rps);
4427 struct ci_power_info *pi = ci_get_pi(rdev);
4428
4429 pi->current_rps = *rps;
4430 pi->current_ps = *new_ps;
4431 pi->current_rps.ps_priv = &pi->current_ps;
4432}
4433
4434void ci_update_requested_ps(struct radeon_device *rdev,
4435 struct radeon_ps *rps)
4436{
4437 struct ci_ps *new_ps = ci_get_ps(rps);
4438 struct ci_power_info *pi = ci_get_pi(rdev);
4439
4440 pi->requested_rps = *rps;
4441 pi->requested_ps = *new_ps;
4442 pi->requested_rps.ps_priv = &pi->requested_ps;
4443}
4444
4445int ci_dpm_pre_set_power_state(struct radeon_device *rdev)
4446{
4447 struct ci_power_info *pi = ci_get_pi(rdev);
4448 struct radeon_ps requested_ps = *rdev->pm.dpm.requested_ps;
4449 struct radeon_ps *new_ps = &requested_ps;
4450
4451 ci_update_requested_ps(rdev, new_ps);
4452
4453 ci_apply_state_adjust_rules(rdev, &pi->requested_rps);
4454
4455 return 0;
4456}
4457
4458void ci_dpm_post_set_power_state(struct radeon_device *rdev)
4459{
4460 struct ci_power_info *pi = ci_get_pi(rdev);
4461 struct radeon_ps *new_ps = &pi->requested_rps;
4462
4463 ci_update_current_ps(rdev, new_ps);
4464}
4465
4466
4467void ci_dpm_setup_asic(struct radeon_device *rdev)
4468{
4469 ci_read_clock_registers(rdev);
4470 ci_get_memory_type(rdev);
4471 ci_enable_acpi_power_management(rdev);
4472 ci_init_sclk_t(rdev);
4473}
4474
4475int ci_dpm_enable(struct radeon_device *rdev)
4476{
4477 struct ci_power_info *pi = ci_get_pi(rdev);
4478 struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
4479 int ret;
4480
4481 if (ci_is_smc_running(rdev))
4482 return -EINVAL;
4483 if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
4484 ci_enable_voltage_control(rdev);
4485 ret = ci_construct_voltage_tables(rdev);
4486 if (ret) {
4487 DRM_ERROR("ci_construct_voltage_tables failed\n");
4488 return ret;
4489 }
4490 }
4491 if (pi->caps_dynamic_ac_timing) {
4492 ret = ci_initialize_mc_reg_table(rdev);
4493 if (ret)
4494 pi->caps_dynamic_ac_timing = false;
4495 }
4496 if (pi->dynamic_ss)
4497 ci_enable_spread_spectrum(rdev, true);
4498 if (pi->thermal_protection)
4499 ci_enable_thermal_protection(rdev, true);
4500 ci_program_sstp(rdev);
4501 ci_enable_display_gap(rdev);
4502 ci_program_vc(rdev);
4503 ret = ci_upload_firmware(rdev);
4504 if (ret) {
4505 DRM_ERROR("ci_upload_firmware failed\n");
4506 return ret;
4507 }
4508 ret = ci_process_firmware_header(rdev);
4509 if (ret) {
4510 DRM_ERROR("ci_process_firmware_header failed\n");
4511 return ret;
4512 }
4513 ret = ci_initial_switch_from_arb_f0_to_f1(rdev);
4514 if (ret) {
4515 DRM_ERROR("ci_initial_switch_from_arb_f0_to_f1 failed\n");
4516 return ret;
4517 }
4518 ret = ci_init_smc_table(rdev);
4519 if (ret) {
4520 DRM_ERROR("ci_init_smc_table failed\n");
4521 return ret;
4522 }
4523 ret = ci_init_arb_table_index(rdev);
4524 if (ret) {
4525 DRM_ERROR("ci_init_arb_table_index failed\n");
4526 return ret;
4527 }
4528 if (pi->caps_dynamic_ac_timing) {
4529 ret = ci_populate_initial_mc_reg_table(rdev);
4530 if (ret) {
4531 DRM_ERROR("ci_populate_initial_mc_reg_table failed\n");
4532 return ret;
4533 }
4534 }
4535 ret = ci_populate_pm_base(rdev);
4536 if (ret) {
4537 DRM_ERROR("ci_populate_pm_base failed\n");
4538 return ret;
4539 }
4540 ci_dpm_start_smc(rdev);
4541 ci_enable_vr_hot_gpio_interrupt(rdev);
4542 ret = ci_notify_smc_display_change(rdev, false);
4543 if (ret) {
4544 DRM_ERROR("ci_notify_smc_display_change failed\n");
4545 return ret;
4546 }
4547 ci_enable_sclk_control(rdev, true);
4548 ret = ci_enable_ulv(rdev, true);
4549 if (ret) {
4550 DRM_ERROR("ci_enable_ulv failed\n");
4551 return ret;
4552 }
4553 ret = ci_enable_ds_master_switch(rdev, true);
4554 if (ret) {
4555 DRM_ERROR("ci_enable_ds_master_switch failed\n");
4556 return ret;
4557 }
4558 ret = ci_start_dpm(rdev);
4559 if (ret) {
4560 DRM_ERROR("ci_start_dpm failed\n");
4561 return ret;
4562 }
4563 ret = ci_enable_didt(rdev, true);
4564 if (ret) {
4565 DRM_ERROR("ci_enable_didt failed\n");
4566 return ret;
4567 }
4568 ret = ci_enable_smc_cac(rdev, true);
4569 if (ret) {
4570 DRM_ERROR("ci_enable_smc_cac failed\n");
4571 return ret;
4572 }
4573 ret = ci_enable_power_containment(rdev, true);
4574 if (ret) {
4575 DRM_ERROR("ci_enable_power_containment failed\n");
4576 return ret;
4577 }
4578 if (rdev->irq.installed &&
4579 r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
4580#if 0
4581 PPSMC_Result result;
4582#endif
4583 ret = ci_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
4584 if (ret) {
4585 DRM_ERROR("ci_set_thermal_temperature_range failed\n");
4586 return ret;
4587 }
4588 rdev->irq.dpm_thermal = true;
4589 radeon_irq_set(rdev);
4590#if 0
4591 result = ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableThermalInterrupt);
4592
4593 if (result != PPSMC_Result_OK)
4594 DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
4595#endif
4596 }
4597
4598 ci_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
4599
4600 ci_update_current_ps(rdev, boot_ps);
4601
4602 return 0;
4603}
4604
4605void ci_dpm_disable(struct radeon_device *rdev)
4606{
4607 struct ci_power_info *pi = ci_get_pi(rdev);
4608 struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
4609
4610 if (!ci_is_smc_running(rdev))
4611 return;
4612
4613 if (pi->thermal_protection)
4614 ci_enable_thermal_protection(rdev, false);
4615 ci_enable_power_containment(rdev, false);
4616 ci_enable_smc_cac(rdev, false);
4617 ci_enable_didt(rdev, false);
4618 ci_enable_spread_spectrum(rdev, false);
4619 ci_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, false);
4620 ci_stop_dpm(rdev);
4621 ci_enable_ds_master_switch(rdev, true);
4622 ci_enable_ulv(rdev, false);
4623 ci_clear_vc(rdev);
4624 ci_reset_to_default(rdev);
4625 ci_dpm_stop_smc(rdev);
4626 ci_force_switch_to_arb_f0(rdev);
4627
4628 ci_update_current_ps(rdev, boot_ps);
4629}
4630
4631int ci_dpm_set_power_state(struct radeon_device *rdev)
4632{
4633 struct ci_power_info *pi = ci_get_pi(rdev);
4634 struct radeon_ps *new_ps = &pi->requested_rps;
4635 struct radeon_ps *old_ps = &pi->current_rps;
4636 int ret;
4637
4638 ci_find_dpm_states_clocks_in_dpm_table(rdev, new_ps);
4639 if (pi->pcie_performance_request)
4640 ci_request_link_speed_change_before_state_change(rdev, new_ps, old_ps);
4641 ret = ci_freeze_sclk_mclk_dpm(rdev);
4642 if (ret) {
4643 DRM_ERROR("ci_freeze_sclk_mclk_dpm failed\n");
4644 return ret;
4645 }
4646 ret = ci_populate_and_upload_sclk_mclk_dpm_levels(rdev, new_ps);
4647 if (ret) {
4648 DRM_ERROR("ci_populate_and_upload_sclk_mclk_dpm_levels failed\n");
4649 return ret;
4650 }
4651 ret = ci_generate_dpm_level_enable_mask(rdev, new_ps);
4652 if (ret) {
4653 DRM_ERROR("ci_generate_dpm_level_enable_mask failed\n");
4654 return ret;
4655 }
4656#if 0
4657 ret = ci_update_vce_dpm(rdev, new_ps, old_ps);
4658 if (ret) {
4659 DRM_ERROR("ci_update_vce_dpm failed\n");
4660 return ret;
4661 }
4662#endif
4663 ret = ci_update_uvd_dpm(rdev, false);
4664 if (ret) {
4665 DRM_ERROR("ci_update_uvd_dpm failed\n");
4666 return ret;
4667 }
4668 ret = ci_update_sclk_t(rdev);
4669 if (ret) {
4670 DRM_ERROR("ci_update_sclk_t failed\n");
4671 return ret;
4672 }
4673 if (pi->caps_dynamic_ac_timing) {
4674 ret = ci_update_and_upload_mc_reg_table(rdev);
4675 if (ret) {
4676 DRM_ERROR("ci_update_and_upload_mc_reg_table failed\n");
4677 return ret;
4678 }
4679 }
4680 ret = ci_program_memory_timing_parameters(rdev);
4681 if (ret) {
4682 DRM_ERROR("ci_program_memory_timing_parameters failed\n");
4683 return ret;
4684 }
4685 ret = ci_unfreeze_sclk_mclk_dpm(rdev);
4686 if (ret) {
4687 DRM_ERROR("ci_unfreeze_sclk_mclk_dpm failed\n");
4688 return ret;
4689 }
4690 ret = ci_upload_dpm_level_enable_mask(rdev);
4691 if (ret) {
4692 DRM_ERROR("ci_upload_dpm_level_enable_mask failed\n");
4693 return ret;
4694 }
4695 if (pi->pcie_performance_request)
4696 ci_notify_link_speed_change_after_state_change(rdev, new_ps, old_ps);
4697
Alex Deucher89536fd2013-07-15 18:14:24 -04004698 ret = ci_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_AUTO);
4699 if (ret) {
4700 DRM_ERROR("ci_dpm_force_performance_level failed\n");
4701 return ret;
4702 }
4703
Alex Deuchercc8dbbb2013-08-14 01:03:41 -04004704 return 0;
4705}
4706
4707int ci_dpm_power_control_set_level(struct radeon_device *rdev)
4708{
4709 return ci_power_control_set_level(rdev);
4710}
4711
4712void ci_dpm_reset_asic(struct radeon_device *rdev)
4713{
4714 ci_set_boot_state(rdev);
4715}
4716
4717void ci_dpm_display_configuration_changed(struct radeon_device *rdev)
4718{
4719 ci_program_display_gap(rdev);
4720}
4721
4722union power_info {
4723 struct _ATOM_POWERPLAY_INFO info;
4724 struct _ATOM_POWERPLAY_INFO_V2 info_2;
4725 struct _ATOM_POWERPLAY_INFO_V3 info_3;
4726 struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
4727 struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
4728 struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
4729};
4730
4731union pplib_clock_info {
4732 struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
4733 struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
4734 struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
4735 struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
4736 struct _ATOM_PPLIB_SI_CLOCK_INFO si;
4737 struct _ATOM_PPLIB_CI_CLOCK_INFO ci;
4738};
4739
4740union pplib_power_state {
4741 struct _ATOM_PPLIB_STATE v1;
4742 struct _ATOM_PPLIB_STATE_V2 v2;
4743};
4744
4745static void ci_parse_pplib_non_clock_info(struct radeon_device *rdev,
4746 struct radeon_ps *rps,
4747 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,
4748 u8 table_rev)
4749{
4750 rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
4751 rps->class = le16_to_cpu(non_clock_info->usClassification);
4752 rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
4753
4754 if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
4755 rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
4756 rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
4757 } else {
4758 rps->vclk = 0;
4759 rps->dclk = 0;
4760 }
4761
4762 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT)
4763 rdev->pm.dpm.boot_ps = rps;
4764 if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
4765 rdev->pm.dpm.uvd_ps = rps;
4766}
4767
4768static void ci_parse_pplib_clock_info(struct radeon_device *rdev,
4769 struct radeon_ps *rps, int index,
4770 union pplib_clock_info *clock_info)
4771{
4772 struct ci_power_info *pi = ci_get_pi(rdev);
4773 struct ci_ps *ps = ci_get_ps(rps);
4774 struct ci_pl *pl = &ps->performance_levels[index];
4775
4776 ps->performance_level_count = index + 1;
4777
4778 pl->sclk = le16_to_cpu(clock_info->ci.usEngineClockLow);
4779 pl->sclk |= clock_info->ci.ucEngineClockHigh << 16;
4780 pl->mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow);
4781 pl->mclk |= clock_info->ci.ucMemoryClockHigh << 16;
4782
4783 pl->pcie_gen = r600_get_pcie_gen_support(rdev,
4784 pi->sys_pcie_mask,
4785 pi->vbios_boot_state.pcie_gen_bootup_value,
4786 clock_info->ci.ucPCIEGen);
4787 pl->pcie_lane = r600_get_pcie_lane_support(rdev,
4788 pi->vbios_boot_state.pcie_lane_bootup_value,
4789 le16_to_cpu(clock_info->ci.usPCIELane));
4790
4791 if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) {
4792 pi->acpi_pcie_gen = pl->pcie_gen;
4793 }
4794
4795 if (rps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) {
4796 pi->ulv.supported = true;
4797 pi->ulv.pl = *pl;
4798 pi->ulv.cg_ulv_parameter = CISLANDS_CGULVPARAMETER_DFLT;
4799 }
4800
4801 /* patch up boot state */
4802 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
4803 pl->mclk = pi->vbios_boot_state.mclk_bootup_value;
4804 pl->sclk = pi->vbios_boot_state.sclk_bootup_value;
4805 pl->pcie_gen = pi->vbios_boot_state.pcie_gen_bootup_value;
4806 pl->pcie_lane = pi->vbios_boot_state.pcie_lane_bootup_value;
4807 }
4808
4809 switch (rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
4810 case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
4811 pi->use_pcie_powersaving_levels = true;
4812 if (pi->pcie_gen_powersaving.max < pl->pcie_gen)
4813 pi->pcie_gen_powersaving.max = pl->pcie_gen;
4814 if (pi->pcie_gen_powersaving.min > pl->pcie_gen)
4815 pi->pcie_gen_powersaving.min = pl->pcie_gen;
4816 if (pi->pcie_lane_powersaving.max < pl->pcie_lane)
4817 pi->pcie_lane_powersaving.max = pl->pcie_lane;
4818 if (pi->pcie_lane_powersaving.min > pl->pcie_lane)
4819 pi->pcie_lane_powersaving.min = pl->pcie_lane;
4820 break;
4821 case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
4822 pi->use_pcie_performance_levels = true;
4823 if (pi->pcie_gen_performance.max < pl->pcie_gen)
4824 pi->pcie_gen_performance.max = pl->pcie_gen;
4825 if (pi->pcie_gen_performance.min > pl->pcie_gen)
4826 pi->pcie_gen_performance.min = pl->pcie_gen;
4827 if (pi->pcie_lane_performance.max < pl->pcie_lane)
4828 pi->pcie_lane_performance.max = pl->pcie_lane;
4829 if (pi->pcie_lane_performance.min > pl->pcie_lane)
4830 pi->pcie_lane_performance.min = pl->pcie_lane;
4831 break;
4832 default:
4833 break;
4834 }
4835}
4836
4837static int ci_parse_power_table(struct radeon_device *rdev)
4838{
4839 struct radeon_mode_info *mode_info = &rdev->mode_info;
4840 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
4841 union pplib_power_state *power_state;
4842 int i, j, k, non_clock_array_index, clock_array_index;
4843 union pplib_clock_info *clock_info;
4844 struct _StateArray *state_array;
4845 struct _ClockInfoArray *clock_info_array;
4846 struct _NonClockInfoArray *non_clock_info_array;
4847 union power_info *power_info;
4848 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
4849 u16 data_offset;
4850 u8 frev, crev;
4851 u8 *power_state_offset;
4852 struct ci_ps *ps;
4853
4854 if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
4855 &frev, &crev, &data_offset))
4856 return -EINVAL;
4857 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
4858
4859 state_array = (struct _StateArray *)
4860 (mode_info->atom_context->bios + data_offset +
4861 le16_to_cpu(power_info->pplib.usStateArrayOffset));
4862 clock_info_array = (struct _ClockInfoArray *)
4863 (mode_info->atom_context->bios + data_offset +
4864 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
4865 non_clock_info_array = (struct _NonClockInfoArray *)
4866 (mode_info->atom_context->bios + data_offset +
4867 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
4868
4869 rdev->pm.dpm.ps = kzalloc(sizeof(struct radeon_ps) *
4870 state_array->ucNumEntries, GFP_KERNEL);
4871 if (!rdev->pm.dpm.ps)
4872 return -ENOMEM;
4873 power_state_offset = (u8 *)state_array->states;
4874 rdev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
4875 rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
4876 rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
4877 for (i = 0; i < state_array->ucNumEntries; i++) {
4878 power_state = (union pplib_power_state *)power_state_offset;
4879 non_clock_array_index = power_state->v2.nonClockInfoIndex;
4880 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
4881 &non_clock_info_array->nonClockInfo[non_clock_array_index];
4882 if (!rdev->pm.power_state[i].clock_info)
4883 return -EINVAL;
4884 ps = kzalloc(sizeof(struct ci_ps), GFP_KERNEL);
4885 if (ps == NULL) {
4886 kfree(rdev->pm.dpm.ps);
4887 return -ENOMEM;
4888 }
4889 rdev->pm.dpm.ps[i].ps_priv = ps;
4890 ci_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i],
4891 non_clock_info,
4892 non_clock_info_array->ucEntrySize);
4893 k = 0;
4894 for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
4895 clock_array_index = power_state->v2.clockInfoIndex[j];
4896 if (clock_array_index >= clock_info_array->ucNumEntries)
4897 continue;
4898 if (k >= CISLANDS_MAX_HARDWARE_POWERLEVELS)
4899 break;
4900 clock_info = (union pplib_clock_info *)
4901 &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize];
4902 ci_parse_pplib_clock_info(rdev,
4903 &rdev->pm.dpm.ps[i], k,
4904 clock_info);
4905 k++;
4906 }
4907 power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
4908 }
4909 rdev->pm.dpm.num_ps = state_array->ucNumEntries;
4910 return 0;
4911}
4912
4913int ci_get_vbios_boot_values(struct radeon_device *rdev,
4914 struct ci_vbios_boot_state *boot_state)
4915{
4916 struct radeon_mode_info *mode_info = &rdev->mode_info;
4917 int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
4918 ATOM_FIRMWARE_INFO_V2_2 *firmware_info;
4919 u8 frev, crev;
4920 u16 data_offset;
4921
4922 if (atom_parse_data_header(mode_info->atom_context, index, NULL,
4923 &frev, &crev, &data_offset)) {
4924 firmware_info =
4925 (ATOM_FIRMWARE_INFO_V2_2 *)(mode_info->atom_context->bios +
4926 data_offset);
4927 boot_state->mvdd_bootup_value = le16_to_cpu(firmware_info->usBootUpMVDDCVoltage);
4928 boot_state->vddc_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCVoltage);
4929 boot_state->vddci_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCIVoltage);
4930 boot_state->pcie_gen_bootup_value = ci_get_current_pcie_speed(rdev);
4931 boot_state->pcie_lane_bootup_value = ci_get_current_pcie_lane_number(rdev);
4932 boot_state->sclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultEngineClock);
4933 boot_state->mclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultMemoryClock);
4934
4935 return 0;
4936 }
4937 return -EINVAL;
4938}
4939
4940void ci_dpm_fini(struct radeon_device *rdev)
4941{
4942 int i;
4943
4944 for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
4945 kfree(rdev->pm.dpm.ps[i].ps_priv);
4946 }
4947 kfree(rdev->pm.dpm.ps);
4948 kfree(rdev->pm.dpm.priv);
4949 kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries);
4950 r600_free_extended_power_table(rdev);
4951}
4952
4953int ci_dpm_init(struct radeon_device *rdev)
4954{
4955 int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
4956 u16 data_offset, size;
4957 u8 frev, crev;
4958 struct ci_power_info *pi;
4959 int ret;
4960 u32 mask;
4961
4962 pi = kzalloc(sizeof(struct ci_power_info), GFP_KERNEL);
4963 if (pi == NULL)
4964 return -ENOMEM;
4965 rdev->pm.dpm.priv = pi;
4966
4967 ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask);
4968 if (ret)
4969 pi->sys_pcie_mask = 0;
4970 else
4971 pi->sys_pcie_mask = mask;
4972 pi->force_pcie_gen = RADEON_PCIE_GEN_INVALID;
4973
4974 pi->pcie_gen_performance.max = RADEON_PCIE_GEN1;
4975 pi->pcie_gen_performance.min = RADEON_PCIE_GEN3;
4976 pi->pcie_gen_powersaving.max = RADEON_PCIE_GEN1;
4977 pi->pcie_gen_powersaving.min = RADEON_PCIE_GEN3;
4978
4979 pi->pcie_lane_performance.max = 0;
4980 pi->pcie_lane_performance.min = 16;
4981 pi->pcie_lane_powersaving.max = 0;
4982 pi->pcie_lane_powersaving.min = 16;
4983
4984 ret = ci_get_vbios_boot_values(rdev, &pi->vbios_boot_state);
4985 if (ret) {
4986 ci_dpm_fini(rdev);
4987 return ret;
4988 }
4989 ret = ci_parse_power_table(rdev);
4990 if (ret) {
4991 ci_dpm_fini(rdev);
4992 return ret;
4993 }
4994 ret = r600_parse_extended_power_table(rdev);
4995 if (ret) {
4996 ci_dpm_fini(rdev);
4997 return ret;
4998 }
4999
5000 pi->dll_default_on = false;
5001 pi->sram_end = SMC_RAM_END;
5002
5003 pi->activity_target[0] = CISLAND_TARGETACTIVITY_DFLT;
5004 pi->activity_target[1] = CISLAND_TARGETACTIVITY_DFLT;
5005 pi->activity_target[2] = CISLAND_TARGETACTIVITY_DFLT;
5006 pi->activity_target[3] = CISLAND_TARGETACTIVITY_DFLT;
5007 pi->activity_target[4] = CISLAND_TARGETACTIVITY_DFLT;
5008 pi->activity_target[5] = CISLAND_TARGETACTIVITY_DFLT;
5009 pi->activity_target[6] = CISLAND_TARGETACTIVITY_DFLT;
5010 pi->activity_target[7] = CISLAND_TARGETACTIVITY_DFLT;
5011
5012 pi->mclk_activity_target = CISLAND_MCLK_TARGETACTIVITY_DFLT;
5013
5014 pi->sclk_dpm_key_disabled = 0;
5015 pi->mclk_dpm_key_disabled = 0;
5016 pi->pcie_dpm_key_disabled = 0;
5017
5018 pi->caps_sclk_ds = true;
5019
5020 pi->mclk_strobe_mode_threshold = 40000;
5021 pi->mclk_stutter_mode_threshold = 40000;
5022 pi->mclk_edc_enable_threshold = 40000;
5023 pi->mclk_edc_wr_enable_threshold = 40000;
5024
5025 ci_initialize_powertune_defaults(rdev);
5026
5027 pi->caps_fps = false;
5028
5029 pi->caps_sclk_throttle_low_notification = false;
5030
5031 ci_get_leakage_voltages(rdev);
5032 ci_patch_dependency_tables_with_leakage(rdev);
5033 ci_set_private_data_variables_based_on_pptable(rdev);
5034
5035 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries =
5036 kzalloc(4 * sizeof(struct radeon_clock_voltage_dependency_entry), GFP_KERNEL);
5037 if (!rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) {
5038 ci_dpm_fini(rdev);
5039 return -ENOMEM;
5040 }
5041 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count = 4;
5042 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].clk = 0;
5043 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].v = 0;
5044 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].clk = 36000;
5045 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].v = 720;
5046 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].clk = 54000;
5047 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].v = 810;
5048 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].clk = 72000;
5049 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].v = 900;
5050
5051 rdev->pm.dpm.dyn_state.mclk_sclk_ratio = 4;
5052 rdev->pm.dpm.dyn_state.sclk_mclk_delta = 15000;
5053 rdev->pm.dpm.dyn_state.vddc_vddci_delta = 200;
5054
5055 rdev->pm.dpm.dyn_state.valid_sclk_values.count = 0;
5056 rdev->pm.dpm.dyn_state.valid_sclk_values.values = NULL;
5057 rdev->pm.dpm.dyn_state.valid_mclk_values.count = 0;
5058 rdev->pm.dpm.dyn_state.valid_mclk_values.values = NULL;
5059
5060 pi->thermal_temp_setting.temperature_low = 99500;
5061 pi->thermal_temp_setting.temperature_high = 100000;
5062 pi->thermal_temp_setting.temperature_shutdown = 104000;
5063
5064 pi->uvd_enabled = false;
5065
5066 pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_NONE;
5067 pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_NONE;
5068 pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_NONE;
5069 if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT))
5070 pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
5071 else if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2))
5072 pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
5073
5074 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL) {
5075 if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
5076 pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
5077 else if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2))
5078 pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
5079 else
5080 rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL;
5081 }
5082
5083 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_MVDDCONTROL) {
5084 if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT))
5085 pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
5086 else if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2))
5087 pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
5088 else
5089 rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_MVDDCONTROL;
5090 }
5091
5092 pi->vddc_phase_shed_control = true;
5093
5094#if defined(CONFIG_ACPI)
5095 pi->pcie_performance_request =
5096 radeon_acpi_is_pcie_performance_request_supported(rdev);
5097#else
5098 pi->pcie_performance_request = false;
5099#endif
5100
5101 if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
5102 &frev, &crev, &data_offset)) {
5103 pi->caps_sclk_ss_support = true;
5104 pi->caps_mclk_ss_support = true;
5105 pi->dynamic_ss = true;
5106 } else {
5107 pi->caps_sclk_ss_support = false;
5108 pi->caps_mclk_ss_support = false;
5109 pi->dynamic_ss = true;
5110 }
5111
5112 if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE)
5113 pi->thermal_protection = true;
5114 else
5115 pi->thermal_protection = false;
5116
5117 pi->caps_dynamic_ac_timing = true;
5118
5119 return 0;
5120}
5121
Alex Deucher94b4adc2013-07-15 17:34:33 -04005122void ci_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
5123 struct seq_file *m)
5124{
5125 u32 sclk = ci_get_average_sclk_freq(rdev);
5126 u32 mclk = ci_get_average_mclk_freq(rdev);
5127
5128 seq_printf(m, "power level avg sclk: %u mclk: %u\n",
5129 sclk, mclk);
5130}
5131
Alex Deuchercc8dbbb2013-08-14 01:03:41 -04005132void ci_dpm_print_power_state(struct radeon_device *rdev,
5133 struct radeon_ps *rps)
5134{
5135 struct ci_ps *ps = ci_get_ps(rps);
5136 struct ci_pl *pl;
5137 int i;
5138
5139 r600_dpm_print_class_info(rps->class, rps->class2);
5140 r600_dpm_print_cap_info(rps->caps);
5141 printk("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
5142 for (i = 0; i < ps->performance_level_count; i++) {
5143 pl = &ps->performance_levels[i];
5144 printk("\t\tpower level %d sclk: %u mclk: %u pcie gen: %u pcie lanes: %u\n",
5145 i, pl->sclk, pl->mclk, pl->pcie_gen + 1, pl->pcie_lane);
5146 }
5147 r600_dpm_print_ps_status(rdev, rps);
5148}
5149
5150u32 ci_dpm_get_sclk(struct radeon_device *rdev, bool low)
5151{
5152 struct ci_power_info *pi = ci_get_pi(rdev);
5153 struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps);
5154
5155 if (low)
5156 return requested_state->performance_levels[0].sclk;
5157 else
5158 return requested_state->performance_levels[requested_state->performance_level_count - 1].sclk;
5159}
5160
5161u32 ci_dpm_get_mclk(struct radeon_device *rdev, bool low)
5162{
5163 struct ci_power_info *pi = ci_get_pi(rdev);
5164 struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps);
5165
5166 if (low)
5167 return requested_state->performance_levels[0].mclk;
5168 else
5169 return requested_state->performance_levels[requested_state->performance_level_count - 1].mclk;
5170}