blob: bdf792822ff5bb394636315f28a560716b37f31f [file] [log] [blame]
Alex Deuchera2e73f52015-04-20 17:09:27 -04001/*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <linux/firmware.h>
Masahiro Yamada248a1d62017-04-24 13:50:21 +090025#include <drm/drmP.h>
Alex Deuchera2e73f52015-04-20 17:09:27 -040026#include "amdgpu.h"
27#include "amdgpu_pm.h"
28#include "amdgpu_ucode.h"
29#include "cikd.h"
30#include "amdgpu_dpm.h"
31#include "ci_dpm.h"
32#include "gfx_v7_0.h"
33#include "atom.h"
Alex Deucher50171eb2016-02-04 10:44:04 -050034#include "amd_pcie.h"
Alex Deuchera2e73f52015-04-20 17:09:27 -040035#include <linux/seq_file.h>
36
37#include "smu/smu_7_0_1_d.h"
38#include "smu/smu_7_0_1_sh_mask.h"
39
40#include "dce/dce_8_0_d.h"
41#include "dce/dce_8_0_sh_mask.h"
42
43#include "bif/bif_4_1_d.h"
44#include "bif/bif_4_1_sh_mask.h"
45
46#include "gca/gfx_7_2_d.h"
47#include "gca/gfx_7_2_sh_mask.h"
48
49#include "gmc/gmc_7_1_d.h"
50#include "gmc/gmc_7_1_sh_mask.h"
51
52MODULE_FIRMWARE("radeon/bonaire_smc.bin");
Alex Deucher2254c212015-12-10 00:49:32 -050053MODULE_FIRMWARE("radeon/bonaire_k_smc.bin");
Alex Deuchera2e73f52015-04-20 17:09:27 -040054MODULE_FIRMWARE("radeon/hawaii_smc.bin");
Alex Deucher2254c212015-12-10 00:49:32 -050055MODULE_FIRMWARE("radeon/hawaii_k_smc.bin");
Alex Deuchera2e73f52015-04-20 17:09:27 -040056
57#define MC_CG_ARB_FREQ_F0 0x0a
58#define MC_CG_ARB_FREQ_F1 0x0b
59#define MC_CG_ARB_FREQ_F2 0x0c
60#define MC_CG_ARB_FREQ_F3 0x0d
61
62#define SMC_RAM_END 0x40000
63
64#define VOLTAGE_SCALE 4
65#define VOLTAGE_VID_OFFSET_SCALE1 625
66#define VOLTAGE_VID_OFFSET_SCALE2 100
67
68static const struct ci_pt_defaults defaults_hawaii_xt =
69{
70 1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0xB0000,
71 { 0x2E, 0x00, 0x00, 0x88, 0x00, 0x00, 0x72, 0x60, 0x51, 0xA7, 0x79, 0x6B, 0x90, 0xBD, 0x79 },
72 { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
73};
74
75static const struct ci_pt_defaults defaults_hawaii_pro =
76{
77 1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0x65062,
78 { 0x2E, 0x00, 0x00, 0x88, 0x00, 0x00, 0x72, 0x60, 0x51, 0xA7, 0x79, 0x6B, 0x90, 0xBD, 0x79 },
79 { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
80};
81
82static const struct ci_pt_defaults defaults_bonaire_xt =
83{
84 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
85 { 0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61 },
86 { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 }
87};
88
Slava Grigorev5ef82922016-07-15 11:29:14 -040089#if 0
Alex Deuchera2e73f52015-04-20 17:09:27 -040090static const struct ci_pt_defaults defaults_bonaire_pro =
91{
92 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x65062,
93 { 0x8C, 0x23F, 0x244, 0xA6, 0x83, 0x85, 0x86, 0x86, 0x83, 0xDB, 0xDB, 0xDA, 0x67, 0x60, 0x5F },
94 { 0x187, 0x193, 0x193, 0x1C7, 0x1D1, 0x1D1, 0x210, 0x219, 0x219, 0x266, 0x26C, 0x26C, 0x2C9, 0x2CB, 0x2CB }
95};
Slava Grigorev5ef82922016-07-15 11:29:14 -040096#endif
Alex Deuchera2e73f52015-04-20 17:09:27 -040097
98static const struct ci_pt_defaults defaults_saturn_xt =
99{
100 1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x70000,
101 { 0x8C, 0x247, 0x249, 0xA6, 0x80, 0x81, 0x8B, 0x89, 0x86, 0xC9, 0xCA, 0xC9, 0x4D, 0x4D, 0x4D },
102 { 0x187, 0x187, 0x187, 0x1C7, 0x1C7, 0x1C7, 0x210, 0x210, 0x210, 0x266, 0x266, 0x266, 0x2C9, 0x2C9, 0x2C9 }
103};
104
Slava Grigorev529d8c52016-07-19 00:24:10 -0400105#if 0
Alex Deuchera2e73f52015-04-20 17:09:27 -0400106static const struct ci_pt_defaults defaults_saturn_pro =
107{
108 1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x30000,
109 { 0x96, 0x21D, 0x23B, 0xA1, 0x85, 0x87, 0x83, 0x84, 0x81, 0xE6, 0xE6, 0xE6, 0x71, 0x6A, 0x6A },
110 { 0x193, 0x19E, 0x19E, 0x1D2, 0x1DC, 0x1DC, 0x21A, 0x223, 0x223, 0x26E, 0x27E, 0x274, 0x2CF, 0x2D2, 0x2D2 }
111};
Slava Grigorev529d8c52016-07-19 00:24:10 -0400112#endif
Alex Deuchera2e73f52015-04-20 17:09:27 -0400113
114static const struct ci_pt_config_reg didt_config_ci[] =
115{
116 { 0x10, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
117 { 0x10, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
118 { 0x10, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
119 { 0x10, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
120 { 0x11, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
121 { 0x11, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
122 { 0x11, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
123 { 0x11, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
124 { 0x12, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
125 { 0x12, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
126 { 0x12, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
127 { 0x12, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
128 { 0x2, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
129 { 0x2, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
130 { 0x2, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
131 { 0x1, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
132 { 0x1, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
133 { 0x0, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
134 { 0x30, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
135 { 0x30, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
136 { 0x30, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
137 { 0x30, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
138 { 0x31, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
139 { 0x31, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
140 { 0x31, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
141 { 0x31, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
142 { 0x32, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
143 { 0x32, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
144 { 0x32, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
145 { 0x32, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
146 { 0x22, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
147 { 0x22, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
148 { 0x22, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
149 { 0x21, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
150 { 0x21, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
151 { 0x20, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
152 { 0x50, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
153 { 0x50, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
154 { 0x50, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
155 { 0x50, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
156 { 0x51, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
157 { 0x51, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
158 { 0x51, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
159 { 0x51, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
160 { 0x52, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
161 { 0x52, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
162 { 0x52, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
163 { 0x52, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
164 { 0x42, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
165 { 0x42, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
166 { 0x42, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
167 { 0x41, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
168 { 0x41, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
169 { 0x40, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
170 { 0x70, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
171 { 0x70, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
172 { 0x70, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
173 { 0x70, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
174 { 0x71, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
175 { 0x71, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
176 { 0x71, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
177 { 0x71, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
178 { 0x72, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
179 { 0x72, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
180 { 0x72, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
181 { 0x72, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
182 { 0x62, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
183 { 0x62, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
184 { 0x62, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
185 { 0x61, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
186 { 0x61, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
187 { 0x60, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
188 { 0xFFFFFFFF }
189};
190
191static u8 ci_get_memory_module_index(struct amdgpu_device *adev)
192{
193 return (u8) ((RREG32(mmBIOS_SCRATCH_4) >> 16) & 0xff);
194}
195
196#define MC_CG_ARB_FREQ_F0 0x0a
197#define MC_CG_ARB_FREQ_F1 0x0b
198#define MC_CG_ARB_FREQ_F2 0x0c
199#define MC_CG_ARB_FREQ_F3 0x0d
200
201static int ci_copy_and_switch_arb_sets(struct amdgpu_device *adev,
202 u32 arb_freq_src, u32 arb_freq_dest)
203{
204 u32 mc_arb_dram_timing;
205 u32 mc_arb_dram_timing2;
206 u32 burst_time;
207 u32 mc_cg_config;
208
209 switch (arb_freq_src) {
210 case MC_CG_ARB_FREQ_F0:
211 mc_arb_dram_timing = RREG32(mmMC_ARB_DRAM_TIMING);
212 mc_arb_dram_timing2 = RREG32(mmMC_ARB_DRAM_TIMING2);
213 burst_time = (RREG32(mmMC_ARB_BURST_TIME) & MC_ARB_BURST_TIME__STATE0_MASK) >>
214 MC_ARB_BURST_TIME__STATE0__SHIFT;
215 break;
216 case MC_CG_ARB_FREQ_F1:
217 mc_arb_dram_timing = RREG32(mmMC_ARB_DRAM_TIMING_1);
218 mc_arb_dram_timing2 = RREG32(mmMC_ARB_DRAM_TIMING2_1);
219 burst_time = (RREG32(mmMC_ARB_BURST_TIME) & MC_ARB_BURST_TIME__STATE1_MASK) >>
220 MC_ARB_BURST_TIME__STATE1__SHIFT;
221 break;
222 default:
223 return -EINVAL;
224 }
225
226 switch (arb_freq_dest) {
227 case MC_CG_ARB_FREQ_F0:
228 WREG32(mmMC_ARB_DRAM_TIMING, mc_arb_dram_timing);
229 WREG32(mmMC_ARB_DRAM_TIMING2, mc_arb_dram_timing2);
230 WREG32_P(mmMC_ARB_BURST_TIME, (burst_time << MC_ARB_BURST_TIME__STATE0__SHIFT),
231 ~MC_ARB_BURST_TIME__STATE0_MASK);
232 break;
233 case MC_CG_ARB_FREQ_F1:
234 WREG32(mmMC_ARB_DRAM_TIMING_1, mc_arb_dram_timing);
235 WREG32(mmMC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2);
236 WREG32_P(mmMC_ARB_BURST_TIME, (burst_time << MC_ARB_BURST_TIME__STATE1__SHIFT),
237 ~MC_ARB_BURST_TIME__STATE1_MASK);
238 break;
239 default:
240 return -EINVAL;
241 }
242
243 mc_cg_config = RREG32(mmMC_CG_CONFIG) | 0x0000000F;
244 WREG32(mmMC_CG_CONFIG, mc_cg_config);
245 WREG32_P(mmMC_ARB_CG, (arb_freq_dest) << MC_ARB_CG__CG_ARB_REQ__SHIFT,
246 ~MC_ARB_CG__CG_ARB_REQ_MASK);
247
248 return 0;
249}
250
251static u8 ci_get_ddr3_mclk_frequency_ratio(u32 memory_clock)
252{
253 u8 mc_para_index;
254
255 if (memory_clock < 10000)
256 mc_para_index = 0;
257 else if (memory_clock >= 80000)
258 mc_para_index = 0x0f;
259 else
260 mc_para_index = (u8)((memory_clock - 10000) / 5000 + 1);
261 return mc_para_index;
262}
263
264static u8 ci_get_mclk_frequency_ratio(u32 memory_clock, bool strobe_mode)
265{
266 u8 mc_para_index;
267
268 if (strobe_mode) {
269 if (memory_clock < 12500)
270 mc_para_index = 0x00;
271 else if (memory_clock > 47500)
272 mc_para_index = 0x0f;
273 else
274 mc_para_index = (u8)((memory_clock - 10000) / 2500);
275 } else {
276 if (memory_clock < 65000)
277 mc_para_index = 0x00;
278 else if (memory_clock > 135000)
279 mc_para_index = 0x0f;
280 else
281 mc_para_index = (u8)((memory_clock - 60000) / 5000);
282 }
283 return mc_para_index;
284}
285
286static void ci_trim_voltage_table_to_fit_state_table(struct amdgpu_device *adev,
287 u32 max_voltage_steps,
288 struct atom_voltage_table *voltage_table)
289{
290 unsigned int i, diff;
291
292 if (voltage_table->count <= max_voltage_steps)
293 return;
294
295 diff = voltage_table->count - max_voltage_steps;
296
297 for (i = 0; i < max_voltage_steps; i++)
298 voltage_table->entries[i] = voltage_table->entries[i + diff];
299
300 voltage_table->count = max_voltage_steps;
301}
302
303static int ci_get_std_voltage_value_sidd(struct amdgpu_device *adev,
304 struct atom_voltage_table_entry *voltage_table,
305 u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd);
306static int ci_set_power_limit(struct amdgpu_device *adev, u32 n);
307static int ci_set_overdrive_target_tdp(struct amdgpu_device *adev,
308 u32 target_tdp);
309static int ci_update_uvd_dpm(struct amdgpu_device *adev, bool gate);
310static void ci_dpm_set_dpm_funcs(struct amdgpu_device *adev);
311static void ci_dpm_set_irq_funcs(struct amdgpu_device *adev);
312
313static PPSMC_Result amdgpu_ci_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
314 PPSMC_Msg msg, u32 parameter);
315static void ci_thermal_start_smc_fan_control(struct amdgpu_device *adev);
316static void ci_fan_ctrl_set_default_mode(struct amdgpu_device *adev);
317
318static struct ci_power_info *ci_get_pi(struct amdgpu_device *adev)
319{
320 struct ci_power_info *pi = adev->pm.dpm.priv;
321
322 return pi;
323}
324
325static struct ci_ps *ci_get_ps(struct amdgpu_ps *rps)
326{
327 struct ci_ps *ps = rps->ps_priv;
328
329 return ps;
330}
331
332static void ci_initialize_powertune_defaults(struct amdgpu_device *adev)
333{
334 struct ci_power_info *pi = ci_get_pi(adev);
335
336 switch (adev->pdev->device) {
337 case 0x6649:
338 case 0x6650:
339 case 0x6651:
340 case 0x6658:
341 case 0x665C:
342 case 0x665D:
343 default:
344 pi->powertune_defaults = &defaults_bonaire_xt;
345 break;
346 case 0x6640:
347 case 0x6641:
348 case 0x6646:
349 case 0x6647:
350 pi->powertune_defaults = &defaults_saturn_xt;
351 break;
352 case 0x67B8:
353 case 0x67B0:
354 pi->powertune_defaults = &defaults_hawaii_xt;
355 break;
356 case 0x67BA:
357 case 0x67B1:
358 pi->powertune_defaults = &defaults_hawaii_pro;
359 break;
360 case 0x67A0:
361 case 0x67A1:
362 case 0x67A2:
363 case 0x67A8:
364 case 0x67A9:
365 case 0x67AA:
366 case 0x67B9:
367 case 0x67BE:
368 pi->powertune_defaults = &defaults_bonaire_xt;
369 break;
370 }
371
372 pi->dte_tj_offset = 0;
373
374 pi->caps_power_containment = true;
375 pi->caps_cac = false;
376 pi->caps_sq_ramping = false;
377 pi->caps_db_ramping = false;
378 pi->caps_td_ramping = false;
379 pi->caps_tcp_ramping = false;
380
381 if (pi->caps_power_containment) {
382 pi->caps_cac = true;
383 if (adev->asic_type == CHIP_HAWAII)
384 pi->enable_bapm_feature = false;
385 else
386 pi->enable_bapm_feature = true;
387 pi->enable_tdc_limit_feature = true;
388 pi->enable_pkg_pwr_tracking_feature = true;
389 }
390}
391
392static u8 ci_convert_to_vid(u16 vddc)
393{
394 return (6200 - (vddc * VOLTAGE_SCALE)) / 25;
395}
396
397static int ci_populate_bapm_vddc_vid_sidd(struct amdgpu_device *adev)
398{
399 struct ci_power_info *pi = ci_get_pi(adev);
400 u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd;
401 u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd;
402 u8 *hi2_vid = pi->smc_powertune_table.BapmVddCVidHiSidd2;
403 u32 i;
404
405 if (adev->pm.dpm.dyn_state.cac_leakage_table.entries == NULL)
406 return -EINVAL;
407 if (adev->pm.dpm.dyn_state.cac_leakage_table.count > 8)
408 return -EINVAL;
409 if (adev->pm.dpm.dyn_state.cac_leakage_table.count !=
410 adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count)
411 return -EINVAL;
412
413 for (i = 0; i < adev->pm.dpm.dyn_state.cac_leakage_table.count; i++) {
414 if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
415 lo_vid[i] = ci_convert_to_vid(adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1);
416 hi_vid[i] = ci_convert_to_vid(adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2);
417 hi2_vid[i] = ci_convert_to_vid(adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3);
418 } else {
419 lo_vid[i] = ci_convert_to_vid(adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc);
420 hi_vid[i] = ci_convert_to_vid((u16)adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage);
421 }
422 }
423 return 0;
424}
425
426static int ci_populate_vddc_vid(struct amdgpu_device *adev)
427{
428 struct ci_power_info *pi = ci_get_pi(adev);
429 u8 *vid = pi->smc_powertune_table.VddCVid;
430 u32 i;
431
432 if (pi->vddc_voltage_table.count > 8)
433 return -EINVAL;
434
435 for (i = 0; i < pi->vddc_voltage_table.count; i++)
436 vid[i] = ci_convert_to_vid(pi->vddc_voltage_table.entries[i].value);
437
438 return 0;
439}
440
441static int ci_populate_svi_load_line(struct amdgpu_device *adev)
442{
443 struct ci_power_info *pi = ci_get_pi(adev);
444 const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
445
446 pi->smc_powertune_table.SviLoadLineEn = pt_defaults->svi_load_line_en;
447 pi->smc_powertune_table.SviLoadLineVddC = pt_defaults->svi_load_line_vddc;
448 pi->smc_powertune_table.SviLoadLineTrimVddC = 3;
449 pi->smc_powertune_table.SviLoadLineOffsetVddC = 0;
450
451 return 0;
452}
453
454static int ci_populate_tdc_limit(struct amdgpu_device *adev)
455{
456 struct ci_power_info *pi = ci_get_pi(adev);
457 const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
458 u16 tdc_limit;
459
460 tdc_limit = adev->pm.dpm.dyn_state.cac_tdp_table->tdc * 256;
461 pi->smc_powertune_table.TDC_VDDC_PkgLimit = cpu_to_be16(tdc_limit);
462 pi->smc_powertune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
463 pt_defaults->tdc_vddc_throttle_release_limit_perc;
464 pi->smc_powertune_table.TDC_MAWt = pt_defaults->tdc_mawt;
465
466 return 0;
467}
468
469static int ci_populate_dw8(struct amdgpu_device *adev)
470{
471 struct ci_power_info *pi = ci_get_pi(adev);
472 const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
473 int ret;
474
475 ret = amdgpu_ci_read_smc_sram_dword(adev,
476 SMU7_FIRMWARE_HEADER_LOCATION +
477 offsetof(SMU7_Firmware_Header, PmFuseTable) +
478 offsetof(SMU7_Discrete_PmFuses, TdcWaterfallCtl),
479 (u32 *)&pi->smc_powertune_table.TdcWaterfallCtl,
480 pi->sram_end);
481 if (ret)
482 return -EINVAL;
483 else
484 pi->smc_powertune_table.TdcWaterfallCtl = pt_defaults->tdc_waterfall_ctl;
485
486 return 0;
487}
488
489static int ci_populate_fuzzy_fan(struct amdgpu_device *adev)
490{
491 struct ci_power_info *pi = ci_get_pi(adev);
492
493 if ((adev->pm.dpm.fan.fan_output_sensitivity & (1 << 15)) ||
494 (adev->pm.dpm.fan.fan_output_sensitivity == 0))
495 adev->pm.dpm.fan.fan_output_sensitivity =
496 adev->pm.dpm.fan.default_fan_output_sensitivity;
497
498 pi->smc_powertune_table.FuzzyFan_PwmSetDelta =
499 cpu_to_be16(adev->pm.dpm.fan.fan_output_sensitivity);
500
501 return 0;
502}
503
504static int ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(struct amdgpu_device *adev)
505{
506 struct ci_power_info *pi = ci_get_pi(adev);
507 u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd;
508 u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd;
509 int i, min, max;
510
511 min = max = hi_vid[0];
512 for (i = 0; i < 8; i++) {
513 if (0 != hi_vid[i]) {
514 if (min > hi_vid[i])
515 min = hi_vid[i];
516 if (max < hi_vid[i])
517 max = hi_vid[i];
518 }
519
520 if (0 != lo_vid[i]) {
521 if (min > lo_vid[i])
522 min = lo_vid[i];
523 if (max < lo_vid[i])
524 max = lo_vid[i];
525 }
526 }
527
528 if ((min == 0) || (max == 0))
529 return -EINVAL;
530 pi->smc_powertune_table.GnbLPMLMaxVid = (u8)max;
531 pi->smc_powertune_table.GnbLPMLMinVid = (u8)min;
532
533 return 0;
534}
535
536static int ci_populate_bapm_vddc_base_leakage_sidd(struct amdgpu_device *adev)
537{
538 struct ci_power_info *pi = ci_get_pi(adev);
539 u16 hi_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd;
540 u16 lo_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd;
541 struct amdgpu_cac_tdp_table *cac_tdp_table =
542 adev->pm.dpm.dyn_state.cac_tdp_table;
543
544 hi_sidd = cac_tdp_table->high_cac_leakage / 100 * 256;
545 lo_sidd = cac_tdp_table->low_cac_leakage / 100 * 256;
546
547 pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd = cpu_to_be16(hi_sidd);
548 pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd = cpu_to_be16(lo_sidd);
549
550 return 0;
551}
552
553static int ci_populate_bapm_parameters_in_dpm_table(struct amdgpu_device *adev)
554{
555 struct ci_power_info *pi = ci_get_pi(adev);
556 const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
557 SMU7_Discrete_DpmTable *dpm_table = &pi->smc_state_table;
558 struct amdgpu_cac_tdp_table *cac_tdp_table =
559 adev->pm.dpm.dyn_state.cac_tdp_table;
560 struct amdgpu_ppm_table *ppm = adev->pm.dpm.dyn_state.ppm_table;
561 int i, j, k;
562 const u16 *def1;
563 const u16 *def2;
564
565 dpm_table->DefaultTdp = cac_tdp_table->tdp * 256;
566 dpm_table->TargetTdp = cac_tdp_table->configurable_tdp * 256;
567
568 dpm_table->DTETjOffset = (u8)pi->dte_tj_offset;
569 dpm_table->GpuTjMax =
570 (u8)(pi->thermal_temp_setting.temperature_high / 1000);
571 dpm_table->GpuTjHyst = 8;
572
573 dpm_table->DTEAmbientTempBase = pt_defaults->dte_ambient_temp_base;
574
575 if (ppm) {
576 dpm_table->PPM_PkgPwrLimit = cpu_to_be16((u16)ppm->dgpu_tdp * 256 / 1000);
577 dpm_table->PPM_TemperatureLimit = cpu_to_be16((u16)ppm->tj_max * 256);
578 } else {
579 dpm_table->PPM_PkgPwrLimit = cpu_to_be16(0);
580 dpm_table->PPM_TemperatureLimit = cpu_to_be16(0);
581 }
582
583 dpm_table->BAPM_TEMP_GRADIENT = cpu_to_be32(pt_defaults->bapm_temp_gradient);
584 def1 = pt_defaults->bapmti_r;
585 def2 = pt_defaults->bapmti_rc;
586
587 for (i = 0; i < SMU7_DTE_ITERATIONS; i++) {
588 for (j = 0; j < SMU7_DTE_SOURCES; j++) {
589 for (k = 0; k < SMU7_DTE_SINKS; k++) {
590 dpm_table->BAPMTI_R[i][j][k] = cpu_to_be16(*def1);
591 dpm_table->BAPMTI_RC[i][j][k] = cpu_to_be16(*def2);
592 def1++;
593 def2++;
594 }
595 }
596 }
597
598 return 0;
599}
600
601static int ci_populate_pm_base(struct amdgpu_device *adev)
602{
603 struct ci_power_info *pi = ci_get_pi(adev);
604 u32 pm_fuse_table_offset;
605 int ret;
606
607 if (pi->caps_power_containment) {
608 ret = amdgpu_ci_read_smc_sram_dword(adev,
609 SMU7_FIRMWARE_HEADER_LOCATION +
610 offsetof(SMU7_Firmware_Header, PmFuseTable),
611 &pm_fuse_table_offset, pi->sram_end);
612 if (ret)
613 return ret;
614 ret = ci_populate_bapm_vddc_vid_sidd(adev);
615 if (ret)
616 return ret;
617 ret = ci_populate_vddc_vid(adev);
618 if (ret)
619 return ret;
620 ret = ci_populate_svi_load_line(adev);
621 if (ret)
622 return ret;
623 ret = ci_populate_tdc_limit(adev);
624 if (ret)
625 return ret;
626 ret = ci_populate_dw8(adev);
627 if (ret)
628 return ret;
629 ret = ci_populate_fuzzy_fan(adev);
630 if (ret)
631 return ret;
632 ret = ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(adev);
633 if (ret)
634 return ret;
635 ret = ci_populate_bapm_vddc_base_leakage_sidd(adev);
636 if (ret)
637 return ret;
638 ret = amdgpu_ci_copy_bytes_to_smc(adev, pm_fuse_table_offset,
639 (u8 *)&pi->smc_powertune_table,
640 sizeof(SMU7_Discrete_PmFuses), pi->sram_end);
641 if (ret)
642 return ret;
643 }
644
645 return 0;
646}
647
648static void ci_do_enable_didt(struct amdgpu_device *adev, const bool enable)
649{
650 struct ci_power_info *pi = ci_get_pi(adev);
651 u32 data;
652
653 if (pi->caps_sq_ramping) {
654 data = RREG32_DIDT(ixDIDT_SQ_CTRL0);
655 if (enable)
656 data |= DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK;
657 else
658 data &= ~DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK;
659 WREG32_DIDT(ixDIDT_SQ_CTRL0, data);
660 }
661
662 if (pi->caps_db_ramping) {
663 data = RREG32_DIDT(ixDIDT_DB_CTRL0);
664 if (enable)
665 data |= DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK;
666 else
667 data &= ~DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK;
668 WREG32_DIDT(ixDIDT_DB_CTRL0, data);
669 }
670
671 if (pi->caps_td_ramping) {
672 data = RREG32_DIDT(ixDIDT_TD_CTRL0);
673 if (enable)
674 data |= DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK;
675 else
676 data &= ~DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK;
677 WREG32_DIDT(ixDIDT_TD_CTRL0, data);
678 }
679
680 if (pi->caps_tcp_ramping) {
681 data = RREG32_DIDT(ixDIDT_TCP_CTRL0);
682 if (enable)
683 data |= DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK;
684 else
685 data &= ~DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK;
686 WREG32_DIDT(ixDIDT_TCP_CTRL0, data);
687 }
688}
689
690static int ci_program_pt_config_registers(struct amdgpu_device *adev,
691 const struct ci_pt_config_reg *cac_config_regs)
692{
693 const struct ci_pt_config_reg *config_regs = cac_config_regs;
694 u32 data;
695 u32 cache = 0;
696
697 if (config_regs == NULL)
698 return -EINVAL;
699
700 while (config_regs->offset != 0xFFFFFFFF) {
701 if (config_regs->type == CISLANDS_CONFIGREG_CACHE) {
702 cache |= ((config_regs->value << config_regs->shift) & config_regs->mask);
703 } else {
704 switch (config_regs->type) {
705 case CISLANDS_CONFIGREG_SMC_IND:
706 data = RREG32_SMC(config_regs->offset);
707 break;
708 case CISLANDS_CONFIGREG_DIDT_IND:
709 data = RREG32_DIDT(config_regs->offset);
710 break;
711 default:
712 data = RREG32(config_regs->offset);
713 break;
714 }
715
716 data &= ~config_regs->mask;
717 data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
718 data |= cache;
719
720 switch (config_regs->type) {
721 case CISLANDS_CONFIGREG_SMC_IND:
722 WREG32_SMC(config_regs->offset, data);
723 break;
724 case CISLANDS_CONFIGREG_DIDT_IND:
725 WREG32_DIDT(config_regs->offset, data);
726 break;
727 default:
728 WREG32(config_regs->offset, data);
729 break;
730 }
731 cache = 0;
732 }
733 config_regs++;
734 }
735 return 0;
736}
737
738static int ci_enable_didt(struct amdgpu_device *adev, bool enable)
739{
740 struct ci_power_info *pi = ci_get_pi(adev);
741 int ret;
742
743 if (pi->caps_sq_ramping || pi->caps_db_ramping ||
744 pi->caps_td_ramping || pi->caps_tcp_ramping) {
Alex Deucher06120a12016-06-21 12:16:30 -0400745 adev->gfx.rlc.funcs->enter_safe_mode(adev);
Alex Deuchera2e73f52015-04-20 17:09:27 -0400746
747 if (enable) {
748 ret = ci_program_pt_config_registers(adev, didt_config_ci);
749 if (ret) {
Alex Deucher06120a12016-06-21 12:16:30 -0400750 adev->gfx.rlc.funcs->exit_safe_mode(adev);
Alex Deuchera2e73f52015-04-20 17:09:27 -0400751 return ret;
752 }
753 }
754
755 ci_do_enable_didt(adev, enable);
756
Alex Deucher06120a12016-06-21 12:16:30 -0400757 adev->gfx.rlc.funcs->exit_safe_mode(adev);
Alex Deuchera2e73f52015-04-20 17:09:27 -0400758 }
759
760 return 0;
761}
762
763static int ci_enable_power_containment(struct amdgpu_device *adev, bool enable)
764{
765 struct ci_power_info *pi = ci_get_pi(adev);
766 PPSMC_Result smc_result;
767 int ret = 0;
768
769 if (enable) {
770 pi->power_containment_features = 0;
771 if (pi->caps_power_containment) {
772 if (pi->enable_bapm_feature) {
773 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_EnableDTE);
774 if (smc_result != PPSMC_Result_OK)
775 ret = -EINVAL;
776 else
777 pi->power_containment_features |= POWERCONTAINMENT_FEATURE_BAPM;
778 }
779
780 if (pi->enable_tdc_limit_feature) {
781 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_TDCLimitEnable);
782 if (smc_result != PPSMC_Result_OK)
783 ret = -EINVAL;
784 else
785 pi->power_containment_features |= POWERCONTAINMENT_FEATURE_TDCLimit;
786 }
787
788 if (pi->enable_pkg_pwr_tracking_feature) {
789 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_PkgPwrLimitEnable);
790 if (smc_result != PPSMC_Result_OK) {
791 ret = -EINVAL;
792 } else {
793 struct amdgpu_cac_tdp_table *cac_tdp_table =
794 adev->pm.dpm.dyn_state.cac_tdp_table;
795 u32 default_pwr_limit =
796 (u32)(cac_tdp_table->maximum_power_delivery_limit * 256);
797
798 pi->power_containment_features |= POWERCONTAINMENT_FEATURE_PkgPwrLimit;
799
800 ci_set_power_limit(adev, default_pwr_limit);
801 }
802 }
803 }
804 } else {
805 if (pi->caps_power_containment && pi->power_containment_features) {
806 if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_TDCLimit)
807 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_TDCLimitDisable);
808
809 if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_BAPM)
810 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DisableDTE);
811
812 if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit)
813 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_PkgPwrLimitDisable);
814 pi->power_containment_features = 0;
815 }
816 }
817
818 return ret;
819}
820
821static int ci_enable_smc_cac(struct amdgpu_device *adev, bool enable)
822{
823 struct ci_power_info *pi = ci_get_pi(adev);
824 PPSMC_Result smc_result;
825 int ret = 0;
826
827 if (pi->caps_cac) {
828 if (enable) {
829 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_EnableCac);
830 if (smc_result != PPSMC_Result_OK) {
831 ret = -EINVAL;
832 pi->cac_enabled = false;
833 } else {
834 pi->cac_enabled = true;
835 }
836 } else if (pi->cac_enabled) {
837 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DisableCac);
838 pi->cac_enabled = false;
839 }
840 }
841
842 return ret;
843}
844
845static int ci_enable_thermal_based_sclk_dpm(struct amdgpu_device *adev,
846 bool enable)
847{
848 struct ci_power_info *pi = ci_get_pi(adev);
849 PPSMC_Result smc_result = PPSMC_Result_OK;
850
851 if (pi->thermal_sclk_dpm_enabled) {
852 if (enable)
853 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_ENABLE_THERMAL_DPM);
854 else
855 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DISABLE_THERMAL_DPM);
856 }
857
858 if (smc_result == PPSMC_Result_OK)
859 return 0;
860 else
861 return -EINVAL;
862}
863
864static int ci_power_control_set_level(struct amdgpu_device *adev)
865{
866 struct ci_power_info *pi = ci_get_pi(adev);
867 struct amdgpu_cac_tdp_table *cac_tdp_table =
868 adev->pm.dpm.dyn_state.cac_tdp_table;
869 s32 adjust_percent;
870 s32 target_tdp;
871 int ret = 0;
872 bool adjust_polarity = false; /* ??? */
873
874 if (pi->caps_power_containment) {
875 adjust_percent = adjust_polarity ?
876 adev->pm.dpm.tdp_adjustment : (-1 * adev->pm.dpm.tdp_adjustment);
877 target_tdp = ((100 + adjust_percent) *
878 (s32)cac_tdp_table->configurable_tdp) / 100;
879
880 ret = ci_set_overdrive_target_tdp(adev, (u32)target_tdp);
881 }
882
883 return ret;
884}
885
Rex Zhucfa289f2017-09-06 15:27:59 +0800886static void ci_dpm_powergate_uvd(void *handle, bool gate)
Alex Deuchera2e73f52015-04-20 17:09:27 -0400887{
Rex Zhucfa289f2017-09-06 15:27:59 +0800888 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
Alex Deuchera2e73f52015-04-20 17:09:27 -0400889 struct ci_power_info *pi = ci_get_pi(adev);
890
Alex Deuchera2e73f52015-04-20 17:09:27 -0400891 pi->uvd_power_gated = gate;
892
Rex Zhua1970a62017-01-12 21:50:18 +0800893 if (gate) {
894 /* stop the UVD block */
895 amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
896 AMD_PG_STATE_GATE);
897 ci_update_uvd_dpm(adev, gate);
898 } else {
899 amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
900 AMD_PG_STATE_UNGATE);
901 ci_update_uvd_dpm(adev, gate);
902 }
Alex Deuchera2e73f52015-04-20 17:09:27 -0400903}
904
Rex Zhucfa289f2017-09-06 15:27:59 +0800905static bool ci_dpm_vblank_too_short(void *handle)
Alex Deuchera2e73f52015-04-20 17:09:27 -0400906{
Rex Zhucfa289f2017-09-06 15:27:59 +0800907 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
Alex Deuchera2e73f52015-04-20 17:09:27 -0400908 u32 vblank_time = amdgpu_dpm_get_vblank_time(adev);
Ken Wang81c59f52015-06-03 21:02:01 +0800909 u32 switch_limit = adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 300;
Alex Deuchera2e73f52015-04-20 17:09:27 -0400910
Alex Deucher0a646f32017-05-11 13:10:02 -0400911 /* disable mclk switching if the refresh is >120Hz, even if the
912 * blanking period would allow it
913 */
914 if (amdgpu_dpm_get_vrefresh(adev) > 120)
915 return true;
916
Alex Deuchera2e73f52015-04-20 17:09:27 -0400917 if (vblank_time < switch_limit)
918 return true;
919 else
920 return false;
921
922}
923
924static void ci_apply_state_adjust_rules(struct amdgpu_device *adev,
925 struct amdgpu_ps *rps)
926{
927 struct ci_ps *ps = ci_get_ps(rps);
928 struct ci_power_info *pi = ci_get_pi(adev);
929 struct amdgpu_clock_and_voltage_limits *max_limits;
930 bool disable_mclk_switching;
931 u32 sclk, mclk;
932 int i;
933
934 if (rps->vce_active) {
935 rps->evclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].evclk;
936 rps->ecclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].ecclk;
937 } else {
938 rps->evclk = 0;
939 rps->ecclk = 0;
940 }
941
942 if ((adev->pm.dpm.new_active_crtc_count > 1) ||
943 ci_dpm_vblank_too_short(adev))
944 disable_mclk_switching = true;
945 else
946 disable_mclk_switching = false;
947
948 if ((rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY)
949 pi->battery_state = true;
950 else
951 pi->battery_state = false;
952
953 if (adev->pm.dpm.ac_power)
954 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
955 else
956 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
957
958 if (adev->pm.dpm.ac_power == false) {
959 for (i = 0; i < ps->performance_level_count; i++) {
960 if (ps->performance_levels[i].mclk > max_limits->mclk)
961 ps->performance_levels[i].mclk = max_limits->mclk;
962 if (ps->performance_levels[i].sclk > max_limits->sclk)
963 ps->performance_levels[i].sclk = max_limits->sclk;
964 }
965 }
966
967 /* XXX validate the min clocks required for display */
968
969 if (disable_mclk_switching) {
970 mclk = ps->performance_levels[ps->performance_level_count - 1].mclk;
971 sclk = ps->performance_levels[0].sclk;
972 } else {
973 mclk = ps->performance_levels[0].mclk;
974 sclk = ps->performance_levels[0].sclk;
975 }
976
Rex Zhudb82b672016-10-12 20:05:03 +0800977 if (adev->pm.pm_display_cfg.min_core_set_clock > sclk)
978 sclk = adev->pm.pm_display_cfg.min_core_set_clock;
979
980 if (adev->pm.pm_display_cfg.min_mem_set_clock > mclk)
981 mclk = adev->pm.pm_display_cfg.min_mem_set_clock;
982
Alex Deuchera2e73f52015-04-20 17:09:27 -0400983 if (rps->vce_active) {
984 if (sclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk)
985 sclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk;
986 if (mclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].mclk)
987 mclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].mclk;
988 }
989
990 ps->performance_levels[0].sclk = sclk;
991 ps->performance_levels[0].mclk = mclk;
992
993 if (ps->performance_levels[1].sclk < ps->performance_levels[0].sclk)
994 ps->performance_levels[1].sclk = ps->performance_levels[0].sclk;
995
996 if (disable_mclk_switching) {
997 if (ps->performance_levels[0].mclk < ps->performance_levels[1].mclk)
998 ps->performance_levels[0].mclk = ps->performance_levels[1].mclk;
999 } else {
1000 if (ps->performance_levels[1].mclk < ps->performance_levels[0].mclk)
1001 ps->performance_levels[1].mclk = ps->performance_levels[0].mclk;
1002 }
1003}
1004
1005static int ci_thermal_set_temperature_range(struct amdgpu_device *adev,
1006 int min_temp, int max_temp)
1007{
1008 int low_temp = 0 * 1000;
1009 int high_temp = 255 * 1000;
1010 u32 tmp;
1011
1012 if (low_temp < min_temp)
1013 low_temp = min_temp;
1014 if (high_temp > max_temp)
1015 high_temp = max_temp;
1016 if (high_temp < low_temp) {
1017 DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
1018 return -EINVAL;
1019 }
1020
1021 tmp = RREG32_SMC(ixCG_THERMAL_INT);
1022 tmp &= ~(CG_THERMAL_INT__DIG_THERM_INTH_MASK | CG_THERMAL_INT__DIG_THERM_INTL_MASK);
1023 tmp |= ((high_temp / 1000) << CG_THERMAL_INT__DIG_THERM_INTH__SHIFT) |
1024 ((low_temp / 1000)) << CG_THERMAL_INT__DIG_THERM_INTL__SHIFT;
1025 WREG32_SMC(ixCG_THERMAL_INT, tmp);
1026
1027#if 0
1028 /* XXX: need to figure out how to handle this properly */
1029 tmp = RREG32_SMC(ixCG_THERMAL_CTRL);
1030 tmp &= DIG_THERM_DPM_MASK;
1031 tmp |= DIG_THERM_DPM(high_temp / 1000);
1032 WREG32_SMC(ixCG_THERMAL_CTRL, tmp);
1033#endif
1034
1035 adev->pm.dpm.thermal.min_temp = low_temp;
1036 adev->pm.dpm.thermal.max_temp = high_temp;
1037 return 0;
1038}
1039
1040static int ci_thermal_enable_alert(struct amdgpu_device *adev,
1041 bool enable)
1042{
1043 u32 thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
1044 PPSMC_Result result;
1045
1046 if (enable) {
1047 thermal_int &= ~(CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK |
1048 CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK);
1049 WREG32_SMC(ixCG_THERMAL_INT, thermal_int);
1050 result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Thermal_Cntl_Enable);
1051 if (result != PPSMC_Result_OK) {
1052 DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
1053 return -EINVAL;
1054 }
1055 } else {
1056 thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK |
1057 CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
1058 WREG32_SMC(ixCG_THERMAL_INT, thermal_int);
1059 result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Thermal_Cntl_Disable);
1060 if (result != PPSMC_Result_OK) {
1061 DRM_DEBUG_KMS("Could not disable thermal interrupts.\n");
1062 return -EINVAL;
1063 }
1064 }
1065
1066 return 0;
1067}
1068
1069static void ci_fan_ctrl_set_static_mode(struct amdgpu_device *adev, u32 mode)
1070{
1071 struct ci_power_info *pi = ci_get_pi(adev);
1072 u32 tmp;
1073
1074 if (pi->fan_ctrl_is_in_default_mode) {
1075 tmp = (RREG32_SMC(ixCG_FDO_CTRL2) & CG_FDO_CTRL2__FDO_PWM_MODE_MASK)
1076 >> CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT;
1077 pi->fan_ctrl_default_mode = tmp;
1078 tmp = (RREG32_SMC(ixCG_FDO_CTRL2) & CG_FDO_CTRL2__TMIN_MASK)
1079 >> CG_FDO_CTRL2__TMIN__SHIFT;
1080 pi->t_min = tmp;
1081 pi->fan_ctrl_is_in_default_mode = false;
1082 }
1083
1084 tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__TMIN_MASK;
1085 tmp |= 0 << CG_FDO_CTRL2__TMIN__SHIFT;
1086 WREG32_SMC(ixCG_FDO_CTRL2, tmp);
1087
1088 tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__FDO_PWM_MODE_MASK;
1089 tmp |= mode << CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT;
1090 WREG32_SMC(ixCG_FDO_CTRL2, tmp);
1091}
1092
1093static int ci_thermal_setup_fan_table(struct amdgpu_device *adev)
1094{
1095 struct ci_power_info *pi = ci_get_pi(adev);
1096 SMU7_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
1097 u32 duty100;
1098 u32 t_diff1, t_diff2, pwm_diff1, pwm_diff2;
1099 u16 fdo_min, slope1, slope2;
1100 u32 reference_clock, tmp;
1101 int ret;
1102 u64 tmp64;
1103
1104 if (!pi->fan_table_start) {
1105 adev->pm.dpm.fan.ucode_fan_control = false;
1106 return 0;
1107 }
1108
1109 duty100 = (RREG32_SMC(ixCG_FDO_CTRL1) & CG_FDO_CTRL1__FMAX_DUTY100_MASK)
1110 >> CG_FDO_CTRL1__FMAX_DUTY100__SHIFT;
1111
1112 if (duty100 == 0) {
1113 adev->pm.dpm.fan.ucode_fan_control = false;
1114 return 0;
1115 }
1116
1117 tmp64 = (u64)adev->pm.dpm.fan.pwm_min * duty100;
1118 do_div(tmp64, 10000);
1119 fdo_min = (u16)tmp64;
1120
1121 t_diff1 = adev->pm.dpm.fan.t_med - adev->pm.dpm.fan.t_min;
1122 t_diff2 = adev->pm.dpm.fan.t_high - adev->pm.dpm.fan.t_med;
1123
1124 pwm_diff1 = adev->pm.dpm.fan.pwm_med - adev->pm.dpm.fan.pwm_min;
1125 pwm_diff2 = adev->pm.dpm.fan.pwm_high - adev->pm.dpm.fan.pwm_med;
1126
1127 slope1 = (u16)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
1128 slope2 = (u16)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
1129
1130 fan_table.TempMin = cpu_to_be16((50 + adev->pm.dpm.fan.t_min) / 100);
1131 fan_table.TempMed = cpu_to_be16((50 + adev->pm.dpm.fan.t_med) / 100);
1132 fan_table.TempMax = cpu_to_be16((50 + adev->pm.dpm.fan.t_max) / 100);
1133
1134 fan_table.Slope1 = cpu_to_be16(slope1);
1135 fan_table.Slope2 = cpu_to_be16(slope2);
1136
1137 fan_table.FdoMin = cpu_to_be16(fdo_min);
1138
1139 fan_table.HystDown = cpu_to_be16(adev->pm.dpm.fan.t_hyst);
1140
1141 fan_table.HystUp = cpu_to_be16(1);
1142
1143 fan_table.HystSlope = cpu_to_be16(1);
1144
1145 fan_table.TempRespLim = cpu_to_be16(5);
1146
1147 reference_clock = amdgpu_asic_get_xclk(adev);
1148
1149 fan_table.RefreshPeriod = cpu_to_be32((adev->pm.dpm.fan.cycle_delay *
1150 reference_clock) / 1600);
1151
1152 fan_table.FdoMax = cpu_to_be16((u16)duty100);
1153
1154 tmp = (RREG32_SMC(ixCG_MULT_THERMAL_CTRL) & CG_MULT_THERMAL_CTRL__TEMP_SEL_MASK)
1155 >> CG_MULT_THERMAL_CTRL__TEMP_SEL__SHIFT;
1156 fan_table.TempSrc = (uint8_t)tmp;
1157
1158 ret = amdgpu_ci_copy_bytes_to_smc(adev,
1159 pi->fan_table_start,
1160 (u8 *)(&fan_table),
1161 sizeof(fan_table),
1162 pi->sram_end);
1163
1164 if (ret) {
1165 DRM_ERROR("Failed to load fan table to the SMC.");
1166 adev->pm.dpm.fan.ucode_fan_control = false;
1167 }
1168
1169 return 0;
1170}
1171
1172static int ci_fan_ctrl_start_smc_fan_control(struct amdgpu_device *adev)
1173{
1174 struct ci_power_info *pi = ci_get_pi(adev);
1175 PPSMC_Result ret;
1176
1177 if (pi->caps_od_fuzzy_fan_control_support) {
1178 ret = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
1179 PPSMC_StartFanControl,
1180 FAN_CONTROL_FUZZY);
1181 if (ret != PPSMC_Result_OK)
1182 return -EINVAL;
1183 ret = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
1184 PPSMC_MSG_SetFanPwmMax,
1185 adev->pm.dpm.fan.default_max_fan_pwm);
1186 if (ret != PPSMC_Result_OK)
1187 return -EINVAL;
1188 } else {
1189 ret = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
1190 PPSMC_StartFanControl,
1191 FAN_CONTROL_TABLE);
1192 if (ret != PPSMC_Result_OK)
1193 return -EINVAL;
1194 }
1195
1196 pi->fan_is_controlled_by_smc = true;
1197 return 0;
1198}
1199
1200
1201static int ci_fan_ctrl_stop_smc_fan_control(struct amdgpu_device *adev)
1202{
1203 PPSMC_Result ret;
1204 struct ci_power_info *pi = ci_get_pi(adev);
1205
1206 ret = amdgpu_ci_send_msg_to_smc(adev, PPSMC_StopFanControl);
1207 if (ret == PPSMC_Result_OK) {
1208 pi->fan_is_controlled_by_smc = false;
1209 return 0;
1210 } else {
1211 return -EINVAL;
1212 }
1213}
1214
Rex Zhucfa289f2017-09-06 15:27:59 +08001215static int ci_dpm_get_fan_speed_percent(void *handle,
Alex Deuchera2e73f52015-04-20 17:09:27 -04001216 u32 *speed)
1217{
1218 u32 duty, duty100;
1219 u64 tmp64;
Rex Zhucfa289f2017-09-06 15:27:59 +08001220 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
Alex Deuchera2e73f52015-04-20 17:09:27 -04001221
1222 if (adev->pm.no_fan)
1223 return -ENOENT;
1224
1225 duty100 = (RREG32_SMC(ixCG_FDO_CTRL1) & CG_FDO_CTRL1__FMAX_DUTY100_MASK)
1226 >> CG_FDO_CTRL1__FMAX_DUTY100__SHIFT;
1227 duty = (RREG32_SMC(ixCG_THERMAL_STATUS) & CG_THERMAL_STATUS__FDO_PWM_DUTY_MASK)
1228 >> CG_THERMAL_STATUS__FDO_PWM_DUTY__SHIFT;
1229
1230 if (duty100 == 0)
1231 return -EINVAL;
1232
1233 tmp64 = (u64)duty * 100;
1234 do_div(tmp64, duty100);
1235 *speed = (u32)tmp64;
1236
1237 if (*speed > 100)
1238 *speed = 100;
1239
1240 return 0;
1241}
1242
Rex Zhucfa289f2017-09-06 15:27:59 +08001243static int ci_dpm_set_fan_speed_percent(void *handle,
Alex Deuchera2e73f52015-04-20 17:09:27 -04001244 u32 speed)
1245{
1246 u32 tmp;
1247 u32 duty, duty100;
1248 u64 tmp64;
Rex Zhucfa289f2017-09-06 15:27:59 +08001249 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
Alex Deuchera2e73f52015-04-20 17:09:27 -04001250 struct ci_power_info *pi = ci_get_pi(adev);
1251
1252 if (adev->pm.no_fan)
1253 return -ENOENT;
1254
1255 if (pi->fan_is_controlled_by_smc)
1256 return -EINVAL;
1257
1258 if (speed > 100)
1259 return -EINVAL;
1260
1261 duty100 = (RREG32_SMC(ixCG_FDO_CTRL1) & CG_FDO_CTRL1__FMAX_DUTY100_MASK)
1262 >> CG_FDO_CTRL1__FMAX_DUTY100__SHIFT;
1263
1264 if (duty100 == 0)
1265 return -EINVAL;
1266
1267 tmp64 = (u64)speed * duty100;
1268 do_div(tmp64, 100);
1269 duty = (u32)tmp64;
1270
1271 tmp = RREG32_SMC(ixCG_FDO_CTRL0) & ~CG_FDO_CTRL0__FDO_STATIC_DUTY_MASK;
1272 tmp |= duty << CG_FDO_CTRL0__FDO_STATIC_DUTY__SHIFT;
1273 WREG32_SMC(ixCG_FDO_CTRL0, tmp);
1274
1275 return 0;
1276}
1277
Rex Zhucfa289f2017-09-06 15:27:59 +08001278static void ci_dpm_set_fan_control_mode(void *handle, u32 mode)
Alex Deuchera2e73f52015-04-20 17:09:27 -04001279{
Rex Zhucfa289f2017-09-06 15:27:59 +08001280 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1281
Rex Zhuafa31872017-05-05 17:53:18 +08001282 switch (mode) {
1283 case AMD_FAN_CTRL_NONE:
Alex Deuchera2e73f52015-04-20 17:09:27 -04001284 if (adev->pm.dpm.fan.ucode_fan_control)
1285 ci_fan_ctrl_stop_smc_fan_control(adev);
Rex Zhuafa31872017-05-05 17:53:18 +08001286 ci_dpm_set_fan_speed_percent(adev, 100);
1287 break;
1288 case AMD_FAN_CTRL_MANUAL:
1289 if (adev->pm.dpm.fan.ucode_fan_control)
1290 ci_fan_ctrl_stop_smc_fan_control(adev);
1291 break;
1292 case AMD_FAN_CTRL_AUTO:
Alex Deuchera2e73f52015-04-20 17:09:27 -04001293 if (adev->pm.dpm.fan.ucode_fan_control)
1294 ci_thermal_start_smc_fan_control(adev);
Rex Zhuafa31872017-05-05 17:53:18 +08001295 break;
1296 default:
1297 break;
Alex Deuchera2e73f52015-04-20 17:09:27 -04001298 }
1299}
1300
Rex Zhucfa289f2017-09-06 15:27:59 +08001301static u32 ci_dpm_get_fan_control_mode(void *handle)
Alex Deuchera2e73f52015-04-20 17:09:27 -04001302{
Rex Zhucfa289f2017-09-06 15:27:59 +08001303 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
Alex Deuchera2e73f52015-04-20 17:09:27 -04001304 struct ci_power_info *pi = ci_get_pi(adev);
Alex Deuchera2e73f52015-04-20 17:09:27 -04001305
1306 if (pi->fan_is_controlled_by_smc)
Rex Zhuafa31872017-05-05 17:53:18 +08001307 return AMD_FAN_CTRL_AUTO;
1308 else
1309 return AMD_FAN_CTRL_MANUAL;
Alex Deuchera2e73f52015-04-20 17:09:27 -04001310}
1311
1312#if 0
1313static int ci_fan_ctrl_get_fan_speed_rpm(struct amdgpu_device *adev,
1314 u32 *speed)
1315{
1316 u32 tach_period;
1317 u32 xclk = amdgpu_asic_get_xclk(adev);
1318
1319 if (adev->pm.no_fan)
1320 return -ENOENT;
1321
1322 if (adev->pm.fan_pulses_per_revolution == 0)
1323 return -ENOENT;
1324
1325 tach_period = (RREG32_SMC(ixCG_TACH_STATUS) & CG_TACH_STATUS__TACH_PERIOD_MASK)
1326 >> CG_TACH_STATUS__TACH_PERIOD__SHIFT;
1327 if (tach_period == 0)
1328 return -ENOENT;
1329
1330 *speed = 60 * xclk * 10000 / tach_period;
1331
1332 return 0;
1333}
1334
1335static int ci_fan_ctrl_set_fan_speed_rpm(struct amdgpu_device *adev,
1336 u32 speed)
1337{
1338 u32 tach_period, tmp;
1339 u32 xclk = amdgpu_asic_get_xclk(adev);
1340
1341 if (adev->pm.no_fan)
1342 return -ENOENT;
1343
1344 if (adev->pm.fan_pulses_per_revolution == 0)
1345 return -ENOENT;
1346
1347 if ((speed < adev->pm.fan_min_rpm) ||
1348 (speed > adev->pm.fan_max_rpm))
1349 return -EINVAL;
1350
1351 if (adev->pm.dpm.fan.ucode_fan_control)
1352 ci_fan_ctrl_stop_smc_fan_control(adev);
1353
1354 tach_period = 60 * xclk * 10000 / (8 * speed);
1355 tmp = RREG32_SMC(ixCG_TACH_CTRL) & ~CG_TACH_CTRL__TARGET_PERIOD_MASK;
1356 tmp |= tach_period << CG_TACH_CTRL__TARGET_PERIOD__SHIFT;
1357 WREG32_SMC(CG_TACH_CTRL, tmp);
1358
1359 ci_fan_ctrl_set_static_mode(adev, FDO_PWM_MODE_STATIC_RPM);
1360
1361 return 0;
1362}
1363#endif
1364
1365static void ci_fan_ctrl_set_default_mode(struct amdgpu_device *adev)
1366{
1367 struct ci_power_info *pi = ci_get_pi(adev);
1368 u32 tmp;
1369
1370 if (!pi->fan_ctrl_is_in_default_mode) {
1371 tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__FDO_PWM_MODE_MASK;
1372 tmp |= pi->fan_ctrl_default_mode << CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT;
1373 WREG32_SMC(ixCG_FDO_CTRL2, tmp);
1374
1375 tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__TMIN_MASK;
1376 tmp |= pi->t_min << CG_FDO_CTRL2__TMIN__SHIFT;
1377 WREG32_SMC(ixCG_FDO_CTRL2, tmp);
1378 pi->fan_ctrl_is_in_default_mode = true;
1379 }
1380}
1381
1382static void ci_thermal_start_smc_fan_control(struct amdgpu_device *adev)
1383{
1384 if (adev->pm.dpm.fan.ucode_fan_control) {
1385 ci_fan_ctrl_start_smc_fan_control(adev);
1386 ci_fan_ctrl_set_static_mode(adev, FDO_PWM_MODE_STATIC);
1387 }
1388}
1389
1390static void ci_thermal_initialize(struct amdgpu_device *adev)
1391{
1392 u32 tmp;
1393
1394 if (adev->pm.fan_pulses_per_revolution) {
1395 tmp = RREG32_SMC(ixCG_TACH_CTRL) & ~CG_TACH_CTRL__EDGE_PER_REV_MASK;
1396 tmp |= (adev->pm.fan_pulses_per_revolution - 1)
1397 << CG_TACH_CTRL__EDGE_PER_REV__SHIFT;
1398 WREG32_SMC(ixCG_TACH_CTRL, tmp);
1399 }
1400
1401 tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__TACH_PWM_RESP_RATE_MASK;
1402 tmp |= 0x28 << CG_FDO_CTRL2__TACH_PWM_RESP_RATE__SHIFT;
1403 WREG32_SMC(ixCG_FDO_CTRL2, tmp);
1404}
1405
1406static int ci_thermal_start_thermal_controller(struct amdgpu_device *adev)
1407{
1408 int ret;
1409
1410 ci_thermal_initialize(adev);
1411 ret = ci_thermal_set_temperature_range(adev, CISLANDS_TEMP_RANGE_MIN, CISLANDS_TEMP_RANGE_MAX);
1412 if (ret)
1413 return ret;
1414 ret = ci_thermal_enable_alert(adev, true);
1415 if (ret)
1416 return ret;
1417 if (adev->pm.dpm.fan.ucode_fan_control) {
1418 ret = ci_thermal_setup_fan_table(adev);
1419 if (ret)
1420 return ret;
1421 ci_thermal_start_smc_fan_control(adev);
1422 }
1423
1424 return 0;
1425}
1426
1427static void ci_thermal_stop_thermal_controller(struct amdgpu_device *adev)
1428{
1429 if (!adev->pm.no_fan)
1430 ci_fan_ctrl_set_default_mode(adev);
1431}
1432
Alex Deuchera2e73f52015-04-20 17:09:27 -04001433static int ci_read_smc_soft_register(struct amdgpu_device *adev,
1434 u16 reg_offset, u32 *value)
1435{
1436 struct ci_power_info *pi = ci_get_pi(adev);
1437
1438 return amdgpu_ci_read_smc_sram_dword(adev,
1439 pi->soft_regs_start + reg_offset,
1440 value, pi->sram_end);
1441}
Alex Deuchera2e73f52015-04-20 17:09:27 -04001442
1443static int ci_write_smc_soft_register(struct amdgpu_device *adev,
1444 u16 reg_offset, u32 value)
1445{
1446 struct ci_power_info *pi = ci_get_pi(adev);
1447
1448 return amdgpu_ci_write_smc_sram_dword(adev,
1449 pi->soft_regs_start + reg_offset,
1450 value, pi->sram_end);
1451}
1452
1453static void ci_init_fps_limits(struct amdgpu_device *adev)
1454{
1455 struct ci_power_info *pi = ci_get_pi(adev);
1456 SMU7_Discrete_DpmTable *table = &pi->smc_state_table;
1457
1458 if (pi->caps_fps) {
1459 u16 tmp;
1460
1461 tmp = 45;
1462 table->FpsHighT = cpu_to_be16(tmp);
1463
1464 tmp = 30;
1465 table->FpsLowT = cpu_to_be16(tmp);
1466 }
1467}
1468
1469static int ci_update_sclk_t(struct amdgpu_device *adev)
1470{
1471 struct ci_power_info *pi = ci_get_pi(adev);
1472 int ret = 0;
1473 u32 low_sclk_interrupt_t = 0;
1474
1475 if (pi->caps_sclk_throttle_low_notification) {
1476 low_sclk_interrupt_t = cpu_to_be32(pi->low_sclk_interrupt_t);
1477
1478 ret = amdgpu_ci_copy_bytes_to_smc(adev,
1479 pi->dpm_table_start +
1480 offsetof(SMU7_Discrete_DpmTable, LowSclkInterruptT),
1481 (u8 *)&low_sclk_interrupt_t,
1482 sizeof(u32), pi->sram_end);
1483
1484 }
1485
1486 return ret;
1487}
1488
1489static void ci_get_leakage_voltages(struct amdgpu_device *adev)
1490{
1491 struct ci_power_info *pi = ci_get_pi(adev);
1492 u16 leakage_id, virtual_voltage_id;
1493 u16 vddc, vddci;
1494 int i;
1495
1496 pi->vddc_leakage.count = 0;
1497 pi->vddci_leakage.count = 0;
1498
1499 if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
1500 for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) {
1501 virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
1502 if (amdgpu_atombios_get_voltage_evv(adev, virtual_voltage_id, &vddc) != 0)
1503 continue;
1504 if (vddc != 0 && vddc != virtual_voltage_id) {
1505 pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc;
1506 pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id;
1507 pi->vddc_leakage.count++;
1508 }
1509 }
1510 } else if (amdgpu_atombios_get_leakage_id_from_vbios(adev, &leakage_id) == 0) {
1511 for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) {
1512 virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
1513 if (amdgpu_atombios_get_leakage_vddc_based_on_leakage_params(adev, &vddc, &vddci,
1514 virtual_voltage_id,
1515 leakage_id) == 0) {
1516 if (vddc != 0 && vddc != virtual_voltage_id) {
1517 pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc;
1518 pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id;
1519 pi->vddc_leakage.count++;
1520 }
1521 if (vddci != 0 && vddci != virtual_voltage_id) {
1522 pi->vddci_leakage.actual_voltage[pi->vddci_leakage.count] = vddci;
1523 pi->vddci_leakage.leakage_id[pi->vddci_leakage.count] = virtual_voltage_id;
1524 pi->vddci_leakage.count++;
1525 }
1526 }
1527 }
1528 }
1529}
1530
1531static void ci_set_dpm_event_sources(struct amdgpu_device *adev, u32 sources)
1532{
1533 struct ci_power_info *pi = ci_get_pi(adev);
1534 bool want_thermal_protection;
1535 enum amdgpu_dpm_event_src dpm_event_src;
1536 u32 tmp;
1537
1538 switch (sources) {
1539 case 0:
1540 default:
1541 want_thermal_protection = false;
1542 break;
1543 case (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL):
1544 want_thermal_protection = true;
1545 dpm_event_src = AMDGPU_DPM_EVENT_SRC_DIGITAL;
1546 break;
1547 case (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL):
1548 want_thermal_protection = true;
1549 dpm_event_src = AMDGPU_DPM_EVENT_SRC_EXTERNAL;
1550 break;
1551 case ((1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL) |
1552 (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL)):
1553 want_thermal_protection = true;
1554 dpm_event_src = AMDGPU_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL;
1555 break;
1556 }
1557
1558 if (want_thermal_protection) {
1559#if 0
1560 /* XXX: need to figure out how to handle this properly */
1561 tmp = RREG32_SMC(ixCG_THERMAL_CTRL);
1562 tmp &= DPM_EVENT_SRC_MASK;
1563 tmp |= DPM_EVENT_SRC(dpm_event_src);
1564 WREG32_SMC(ixCG_THERMAL_CTRL, tmp);
1565#endif
1566
1567 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
1568 if (pi->thermal_protection)
1569 tmp &= ~GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK;
1570 else
1571 tmp |= GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK;
1572 WREG32_SMC(ixGENERAL_PWRMGT, tmp);
1573 } else {
1574 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
1575 tmp |= GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK;
1576 WREG32_SMC(ixGENERAL_PWRMGT, tmp);
1577 }
1578}
1579
1580static void ci_enable_auto_throttle_source(struct amdgpu_device *adev,
1581 enum amdgpu_dpm_auto_throttle_src source,
1582 bool enable)
1583{
1584 struct ci_power_info *pi = ci_get_pi(adev);
1585
1586 if (enable) {
1587 if (!(pi->active_auto_throttle_sources & (1 << source))) {
1588 pi->active_auto_throttle_sources |= 1 << source;
1589 ci_set_dpm_event_sources(adev, pi->active_auto_throttle_sources);
1590 }
1591 } else {
1592 if (pi->active_auto_throttle_sources & (1 << source)) {
1593 pi->active_auto_throttle_sources &= ~(1 << source);
1594 ci_set_dpm_event_sources(adev, pi->active_auto_throttle_sources);
1595 }
1596 }
1597}
1598
1599static void ci_enable_vr_hot_gpio_interrupt(struct amdgpu_device *adev)
1600{
1601 if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REGULATOR_HOT)
1602 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_EnableVRHotGPIOInterrupt);
1603}
1604
1605static int ci_unfreeze_sclk_mclk_dpm(struct amdgpu_device *adev)
1606{
1607 struct ci_power_info *pi = ci_get_pi(adev);
1608 PPSMC_Result smc_result;
1609
1610 if (!pi->need_update_smu7_dpm_table)
1611 return 0;
1612
1613 if ((!pi->sclk_dpm_key_disabled) &&
1614 (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) {
1615 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_SCLKDPM_UnfreezeLevel);
1616 if (smc_result != PPSMC_Result_OK)
1617 return -EINVAL;
1618 }
1619
1620 if ((!pi->mclk_dpm_key_disabled) &&
1621 (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
1622 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MCLKDPM_UnfreezeLevel);
1623 if (smc_result != PPSMC_Result_OK)
1624 return -EINVAL;
1625 }
1626
1627 pi->need_update_smu7_dpm_table = 0;
1628 return 0;
1629}
1630
1631static int ci_enable_sclk_mclk_dpm(struct amdgpu_device *adev, bool enable)
1632{
1633 struct ci_power_info *pi = ci_get_pi(adev);
1634 PPSMC_Result smc_result;
1635
1636 if (enable) {
1637 if (!pi->sclk_dpm_key_disabled) {
1638 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DPM_Enable);
1639 if (smc_result != PPSMC_Result_OK)
1640 return -EINVAL;
1641 }
1642
1643 if (!pi->mclk_dpm_key_disabled) {
1644 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MCLKDPM_Enable);
1645 if (smc_result != PPSMC_Result_OK)
1646 return -EINVAL;
1647
1648 WREG32_P(mmMC_SEQ_CNTL_3, MC_SEQ_CNTL_3__CAC_EN_MASK,
1649 ~MC_SEQ_CNTL_3__CAC_EN_MASK);
1650
1651 WREG32_SMC(ixLCAC_MC0_CNTL, 0x05);
1652 WREG32_SMC(ixLCAC_MC1_CNTL, 0x05);
1653 WREG32_SMC(ixLCAC_CPL_CNTL, 0x100005);
1654
1655 udelay(10);
1656
1657 WREG32_SMC(ixLCAC_MC0_CNTL, 0x400005);
1658 WREG32_SMC(ixLCAC_MC1_CNTL, 0x400005);
1659 WREG32_SMC(ixLCAC_CPL_CNTL, 0x500005);
1660 }
1661 } else {
1662 if (!pi->sclk_dpm_key_disabled) {
1663 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DPM_Disable);
1664 if (smc_result != PPSMC_Result_OK)
1665 return -EINVAL;
1666 }
1667
1668 if (!pi->mclk_dpm_key_disabled) {
1669 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MCLKDPM_Disable);
1670 if (smc_result != PPSMC_Result_OK)
1671 return -EINVAL;
1672 }
1673 }
1674
1675 return 0;
1676}
1677
1678static int ci_start_dpm(struct amdgpu_device *adev)
1679{
1680 struct ci_power_info *pi = ci_get_pi(adev);
1681 PPSMC_Result smc_result;
1682 int ret;
1683 u32 tmp;
1684
1685 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
1686 tmp |= GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK;
1687 WREG32_SMC(ixGENERAL_PWRMGT, tmp);
1688
1689 tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
1690 tmp |= SCLK_PWRMGT_CNTL__DYNAMIC_PM_EN_MASK;
1691 WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp);
1692
1693 ci_write_smc_soft_register(adev, offsetof(SMU7_SoftRegisters, VoltageChangeTimeout), 0x1000);
1694
1695 WREG32_P(mmBIF_LNCNT_RESET, 0, ~BIF_LNCNT_RESET__RESET_LNCNT_EN_MASK);
1696
1697 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Voltage_Cntl_Enable);
1698 if (smc_result != PPSMC_Result_OK)
1699 return -EINVAL;
1700
1701 ret = ci_enable_sclk_mclk_dpm(adev, true);
1702 if (ret)
1703 return ret;
1704
1705 if (!pi->pcie_dpm_key_disabled) {
1706 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_PCIeDPM_Enable);
1707 if (smc_result != PPSMC_Result_OK)
1708 return -EINVAL;
1709 }
1710
1711 return 0;
1712}
1713
1714static int ci_freeze_sclk_mclk_dpm(struct amdgpu_device *adev)
1715{
1716 struct ci_power_info *pi = ci_get_pi(adev);
1717 PPSMC_Result smc_result;
1718
1719 if (!pi->need_update_smu7_dpm_table)
1720 return 0;
1721
1722 if ((!pi->sclk_dpm_key_disabled) &&
1723 (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) {
1724 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_SCLKDPM_FreezeLevel);
1725 if (smc_result != PPSMC_Result_OK)
1726 return -EINVAL;
1727 }
1728
1729 if ((!pi->mclk_dpm_key_disabled) &&
1730 (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
1731 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MCLKDPM_FreezeLevel);
1732 if (smc_result != PPSMC_Result_OK)
1733 return -EINVAL;
1734 }
1735
1736 return 0;
1737}
1738
1739static int ci_stop_dpm(struct amdgpu_device *adev)
1740{
1741 struct ci_power_info *pi = ci_get_pi(adev);
1742 PPSMC_Result smc_result;
1743 int ret;
1744 u32 tmp;
1745
1746 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
1747 tmp &= ~GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK;
1748 WREG32_SMC(ixGENERAL_PWRMGT, tmp);
1749
1750 tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
1751 tmp &= ~SCLK_PWRMGT_CNTL__DYNAMIC_PM_EN_MASK;
1752 WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp);
1753
1754 if (!pi->pcie_dpm_key_disabled) {
1755 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_PCIeDPM_Disable);
1756 if (smc_result != PPSMC_Result_OK)
1757 return -EINVAL;
1758 }
1759
1760 ret = ci_enable_sclk_mclk_dpm(adev, false);
1761 if (ret)
1762 return ret;
1763
1764 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Voltage_Cntl_Disable);
1765 if (smc_result != PPSMC_Result_OK)
1766 return -EINVAL;
1767
1768 return 0;
1769}
1770
1771static void ci_enable_sclk_control(struct amdgpu_device *adev, bool enable)
1772{
1773 u32 tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
1774
1775 if (enable)
1776 tmp &= ~SCLK_PWRMGT_CNTL__SCLK_PWRMGT_OFF_MASK;
1777 else
1778 tmp |= SCLK_PWRMGT_CNTL__SCLK_PWRMGT_OFF_MASK;
1779 WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp);
1780}
1781
1782#if 0
1783static int ci_notify_hw_of_power_source(struct amdgpu_device *adev,
1784 bool ac_power)
1785{
1786 struct ci_power_info *pi = ci_get_pi(adev);
1787 struct amdgpu_cac_tdp_table *cac_tdp_table =
1788 adev->pm.dpm.dyn_state.cac_tdp_table;
1789 u32 power_limit;
1790
1791 if (ac_power)
1792 power_limit = (u32)(cac_tdp_table->maximum_power_delivery_limit * 256);
1793 else
1794 power_limit = (u32)(cac_tdp_table->battery_power_limit * 256);
1795
1796 ci_set_power_limit(adev, power_limit);
1797
1798 if (pi->caps_automatic_dc_transition) {
1799 if (ac_power)
1800 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_RunningOnAC);
1801 else
1802 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Remove_DC_Clamp);
1803 }
1804
1805 return 0;
1806}
1807#endif
1808
1809static PPSMC_Result amdgpu_ci_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
1810 PPSMC_Msg msg, u32 parameter)
1811{
1812 WREG32(mmSMC_MSG_ARG_0, parameter);
1813 return amdgpu_ci_send_msg_to_smc(adev, msg);
1814}
1815
1816static PPSMC_Result amdgpu_ci_send_msg_to_smc_return_parameter(struct amdgpu_device *adev,
1817 PPSMC_Msg msg, u32 *parameter)
1818{
1819 PPSMC_Result smc_result;
1820
1821 smc_result = amdgpu_ci_send_msg_to_smc(adev, msg);
1822
1823 if ((smc_result == PPSMC_Result_OK) && parameter)
1824 *parameter = RREG32(mmSMC_MSG_ARG_0);
1825
1826 return smc_result;
1827}
1828
1829static int ci_dpm_force_state_sclk(struct amdgpu_device *adev, u32 n)
1830{
1831 struct ci_power_info *pi = ci_get_pi(adev);
1832
1833 if (!pi->sclk_dpm_key_disabled) {
1834 PPSMC_Result smc_result =
1835 amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SCLKDPM_SetEnabledMask, 1 << n);
1836 if (smc_result != PPSMC_Result_OK)
1837 return -EINVAL;
1838 }
1839
1840 return 0;
1841}
1842
1843static int ci_dpm_force_state_mclk(struct amdgpu_device *adev, u32 n)
1844{
1845 struct ci_power_info *pi = ci_get_pi(adev);
1846
1847 if (!pi->mclk_dpm_key_disabled) {
1848 PPSMC_Result smc_result =
1849 amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_MCLKDPM_SetEnabledMask, 1 << n);
1850 if (smc_result != PPSMC_Result_OK)
1851 return -EINVAL;
1852 }
1853
1854 return 0;
1855}
1856
1857static int ci_dpm_force_state_pcie(struct amdgpu_device *adev, u32 n)
1858{
1859 struct ci_power_info *pi = ci_get_pi(adev);
1860
1861 if (!pi->pcie_dpm_key_disabled) {
1862 PPSMC_Result smc_result =
1863 amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_PCIeDPM_ForceLevel, n);
1864 if (smc_result != PPSMC_Result_OK)
1865 return -EINVAL;
1866 }
1867
1868 return 0;
1869}
1870
1871static int ci_set_power_limit(struct amdgpu_device *adev, u32 n)
1872{
1873 struct ci_power_info *pi = ci_get_pi(adev);
1874
1875 if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit) {
1876 PPSMC_Result smc_result =
1877 amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_PkgPwrSetLimit, n);
1878 if (smc_result != PPSMC_Result_OK)
1879 return -EINVAL;
1880 }
1881
1882 return 0;
1883}
1884
1885static int ci_set_overdrive_target_tdp(struct amdgpu_device *adev,
1886 u32 target_tdp)
1887{
1888 PPSMC_Result smc_result =
1889 amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_OverDriveSetTargetTdp, target_tdp);
1890 if (smc_result != PPSMC_Result_OK)
1891 return -EINVAL;
1892 return 0;
1893}
1894
1895#if 0
1896static int ci_set_boot_state(struct amdgpu_device *adev)
1897{
1898 return ci_enable_sclk_mclk_dpm(adev, false);
1899}
1900#endif
1901
1902static u32 ci_get_average_sclk_freq(struct amdgpu_device *adev)
1903{
1904 u32 sclk_freq;
1905 PPSMC_Result smc_result =
1906 amdgpu_ci_send_msg_to_smc_return_parameter(adev,
1907 PPSMC_MSG_API_GetSclkFrequency,
1908 &sclk_freq);
1909 if (smc_result != PPSMC_Result_OK)
1910 sclk_freq = 0;
1911
1912 return sclk_freq;
1913}
1914
1915static u32 ci_get_average_mclk_freq(struct amdgpu_device *adev)
1916{
1917 u32 mclk_freq;
1918 PPSMC_Result smc_result =
1919 amdgpu_ci_send_msg_to_smc_return_parameter(adev,
1920 PPSMC_MSG_API_GetMclkFrequency,
1921 &mclk_freq);
1922 if (smc_result != PPSMC_Result_OK)
1923 mclk_freq = 0;
1924
1925 return mclk_freq;
1926}
1927
1928static void ci_dpm_start_smc(struct amdgpu_device *adev)
1929{
1930 int i;
1931
1932 amdgpu_ci_program_jump_on_start(adev);
1933 amdgpu_ci_start_smc_clock(adev);
1934 amdgpu_ci_start_smc(adev);
1935 for (i = 0; i < adev->usec_timeout; i++) {
1936 if (RREG32_SMC(ixFIRMWARE_FLAGS) & FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK)
1937 break;
1938 }
1939}
1940
1941static void ci_dpm_stop_smc(struct amdgpu_device *adev)
1942{
1943 amdgpu_ci_reset_smc(adev);
1944 amdgpu_ci_stop_smc_clock(adev);
1945}
1946
1947static int ci_process_firmware_header(struct amdgpu_device *adev)
1948{
1949 struct ci_power_info *pi = ci_get_pi(adev);
1950 u32 tmp;
1951 int ret;
1952
1953 ret = amdgpu_ci_read_smc_sram_dword(adev,
1954 SMU7_FIRMWARE_HEADER_LOCATION +
1955 offsetof(SMU7_Firmware_Header, DpmTable),
1956 &tmp, pi->sram_end);
1957 if (ret)
1958 return ret;
1959
1960 pi->dpm_table_start = tmp;
1961
1962 ret = amdgpu_ci_read_smc_sram_dword(adev,
1963 SMU7_FIRMWARE_HEADER_LOCATION +
1964 offsetof(SMU7_Firmware_Header, SoftRegisters),
1965 &tmp, pi->sram_end);
1966 if (ret)
1967 return ret;
1968
1969 pi->soft_regs_start = tmp;
1970
1971 ret = amdgpu_ci_read_smc_sram_dword(adev,
1972 SMU7_FIRMWARE_HEADER_LOCATION +
1973 offsetof(SMU7_Firmware_Header, mcRegisterTable),
1974 &tmp, pi->sram_end);
1975 if (ret)
1976 return ret;
1977
1978 pi->mc_reg_table_start = tmp;
1979
1980 ret = amdgpu_ci_read_smc_sram_dword(adev,
1981 SMU7_FIRMWARE_HEADER_LOCATION +
1982 offsetof(SMU7_Firmware_Header, FanTable),
1983 &tmp, pi->sram_end);
1984 if (ret)
1985 return ret;
1986
1987 pi->fan_table_start = tmp;
1988
1989 ret = amdgpu_ci_read_smc_sram_dword(adev,
1990 SMU7_FIRMWARE_HEADER_LOCATION +
1991 offsetof(SMU7_Firmware_Header, mcArbDramTimingTable),
1992 &tmp, pi->sram_end);
1993 if (ret)
1994 return ret;
1995
1996 pi->arb_table_start = tmp;
1997
1998 return 0;
1999}
2000
2001static void ci_read_clock_registers(struct amdgpu_device *adev)
2002{
2003 struct ci_power_info *pi = ci_get_pi(adev);
2004
2005 pi->clock_registers.cg_spll_func_cntl =
2006 RREG32_SMC(ixCG_SPLL_FUNC_CNTL);
2007 pi->clock_registers.cg_spll_func_cntl_2 =
2008 RREG32_SMC(ixCG_SPLL_FUNC_CNTL_2);
2009 pi->clock_registers.cg_spll_func_cntl_3 =
2010 RREG32_SMC(ixCG_SPLL_FUNC_CNTL_3);
2011 pi->clock_registers.cg_spll_func_cntl_4 =
2012 RREG32_SMC(ixCG_SPLL_FUNC_CNTL_4);
2013 pi->clock_registers.cg_spll_spread_spectrum =
2014 RREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM);
2015 pi->clock_registers.cg_spll_spread_spectrum_2 =
2016 RREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM_2);
2017 pi->clock_registers.dll_cntl = RREG32(mmDLL_CNTL);
2018 pi->clock_registers.mclk_pwrmgt_cntl = RREG32(mmMCLK_PWRMGT_CNTL);
2019 pi->clock_registers.mpll_ad_func_cntl = RREG32(mmMPLL_AD_FUNC_CNTL);
2020 pi->clock_registers.mpll_dq_func_cntl = RREG32(mmMPLL_DQ_FUNC_CNTL);
2021 pi->clock_registers.mpll_func_cntl = RREG32(mmMPLL_FUNC_CNTL);
2022 pi->clock_registers.mpll_func_cntl_1 = RREG32(mmMPLL_FUNC_CNTL_1);
2023 pi->clock_registers.mpll_func_cntl_2 = RREG32(mmMPLL_FUNC_CNTL_2);
2024 pi->clock_registers.mpll_ss1 = RREG32(mmMPLL_SS1);
2025 pi->clock_registers.mpll_ss2 = RREG32(mmMPLL_SS2);
2026}
2027
2028static void ci_init_sclk_t(struct amdgpu_device *adev)
2029{
2030 struct ci_power_info *pi = ci_get_pi(adev);
2031
2032 pi->low_sclk_interrupt_t = 0;
2033}
2034
2035static void ci_enable_thermal_protection(struct amdgpu_device *adev,
2036 bool enable)
2037{
2038 u32 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
2039
2040 if (enable)
2041 tmp &= ~GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK;
2042 else
2043 tmp |= GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK;
2044 WREG32_SMC(ixGENERAL_PWRMGT, tmp);
2045}
2046
2047static void ci_enable_acpi_power_management(struct amdgpu_device *adev)
2048{
2049 u32 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
2050
2051 tmp |= GENERAL_PWRMGT__STATIC_PM_EN_MASK;
2052
2053 WREG32_SMC(ixGENERAL_PWRMGT, tmp);
2054}
2055
2056#if 0
2057static int ci_enter_ulp_state(struct amdgpu_device *adev)
2058{
2059
2060 WREG32(mmSMC_MESSAGE_0, PPSMC_MSG_SwitchToMinimumPower);
2061
2062 udelay(25000);
2063
2064 return 0;
2065}
2066
2067static int ci_exit_ulp_state(struct amdgpu_device *adev)
2068{
2069 int i;
2070
2071 WREG32(mmSMC_MESSAGE_0, PPSMC_MSG_ResumeFromMinimumPower);
2072
2073 udelay(7000);
2074
2075 for (i = 0; i < adev->usec_timeout; i++) {
2076 if (RREG32(mmSMC_RESP_0) == 1)
2077 break;
2078 udelay(1000);
2079 }
2080
2081 return 0;
2082}
2083#endif
2084
2085static int ci_notify_smc_display_change(struct amdgpu_device *adev,
2086 bool has_display)
2087{
2088 PPSMC_Msg msg = has_display ? PPSMC_MSG_HasDisplay : PPSMC_MSG_NoDisplay;
2089
2090 return (amdgpu_ci_send_msg_to_smc(adev, msg) == PPSMC_Result_OK) ? 0 : -EINVAL;
2091}
2092
2093static int ci_enable_ds_master_switch(struct amdgpu_device *adev,
2094 bool enable)
2095{
2096 struct ci_power_info *pi = ci_get_pi(adev);
2097
2098 if (enable) {
2099 if (pi->caps_sclk_ds) {
2100 if (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MASTER_DeepSleep_ON) != PPSMC_Result_OK)
2101 return -EINVAL;
2102 } else {
2103 if (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK)
2104 return -EINVAL;
2105 }
2106 } else {
2107 if (pi->caps_sclk_ds) {
2108 if (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK)
2109 return -EINVAL;
2110 }
2111 }
2112
2113 return 0;
2114}
2115
2116static void ci_program_display_gap(struct amdgpu_device *adev)
2117{
2118 u32 tmp = RREG32_SMC(ixCG_DISPLAY_GAP_CNTL);
2119 u32 pre_vbi_time_in_us;
2120 u32 frame_time_in_us;
2121 u32 ref_clock = adev->clock.spll.reference_freq;
2122 u32 refresh_rate = amdgpu_dpm_get_vrefresh(adev);
2123 u32 vblank_time = amdgpu_dpm_get_vblank_time(adev);
2124
2125 tmp &= ~CG_DISPLAY_GAP_CNTL__DISP_GAP_MASK;
2126 if (adev->pm.dpm.new_active_crtc_count > 0)
2127 tmp |= (AMDGPU_PM_DISPLAY_GAP_VBLANK_OR_WM << CG_DISPLAY_GAP_CNTL__DISP_GAP__SHIFT);
2128 else
2129 tmp |= (AMDGPU_PM_DISPLAY_GAP_IGNORE << CG_DISPLAY_GAP_CNTL__DISP_GAP__SHIFT);
2130 WREG32_SMC(ixCG_DISPLAY_GAP_CNTL, tmp);
2131
2132 if (refresh_rate == 0)
2133 refresh_rate = 60;
2134 if (vblank_time == 0xffffffff)
2135 vblank_time = 500;
2136 frame_time_in_us = 1000000 / refresh_rate;
2137 pre_vbi_time_in_us =
2138 frame_time_in_us - 200 - vblank_time;
2139 tmp = pre_vbi_time_in_us * (ref_clock / 100);
2140
2141 WREG32_SMC(ixCG_DISPLAY_GAP_CNTL2, tmp);
2142 ci_write_smc_soft_register(adev, offsetof(SMU7_SoftRegisters, PreVBlankGap), 0x64);
2143 ci_write_smc_soft_register(adev, offsetof(SMU7_SoftRegisters, VBlankTimeout), (frame_time_in_us - pre_vbi_time_in_us));
2144
2145
2146 ci_notify_smc_display_change(adev, (adev->pm.dpm.new_active_crtc_count == 1));
2147
2148}
2149
2150static void ci_enable_spread_spectrum(struct amdgpu_device *adev, bool enable)
2151{
2152 struct ci_power_info *pi = ci_get_pi(adev);
2153 u32 tmp;
2154
2155 if (enable) {
2156 if (pi->caps_sclk_ss_support) {
2157 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
2158 tmp |= GENERAL_PWRMGT__DYN_SPREAD_SPECTRUM_EN_MASK;
2159 WREG32_SMC(ixGENERAL_PWRMGT, tmp);
2160 }
2161 } else {
2162 tmp = RREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM);
2163 tmp &= ~CG_SPLL_SPREAD_SPECTRUM__SSEN_MASK;
2164 WREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM, tmp);
2165
2166 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
2167 tmp &= ~GENERAL_PWRMGT__DYN_SPREAD_SPECTRUM_EN_MASK;
2168 WREG32_SMC(ixGENERAL_PWRMGT, tmp);
2169 }
2170}
2171
2172static void ci_program_sstp(struct amdgpu_device *adev)
2173{
2174 WREG32_SMC(ixCG_STATIC_SCREEN_PARAMETER,
2175 ((CISLANDS_SSTU_DFLT << CG_STATIC_SCREEN_PARAMETER__STATIC_SCREEN_THRESHOLD_UNIT__SHIFT) |
2176 (CISLANDS_SST_DFLT << CG_STATIC_SCREEN_PARAMETER__STATIC_SCREEN_THRESHOLD__SHIFT)));
2177}
2178
2179static void ci_enable_display_gap(struct amdgpu_device *adev)
2180{
2181 u32 tmp = RREG32_SMC(ixCG_DISPLAY_GAP_CNTL);
2182
2183 tmp &= ~(CG_DISPLAY_GAP_CNTL__DISP_GAP_MASK |
2184 CG_DISPLAY_GAP_CNTL__DISP_GAP_MCHG_MASK);
2185 tmp |= ((AMDGPU_PM_DISPLAY_GAP_IGNORE << CG_DISPLAY_GAP_CNTL__DISP_GAP__SHIFT) |
2186 (AMDGPU_PM_DISPLAY_GAP_VBLANK << CG_DISPLAY_GAP_CNTL__DISP_GAP_MCHG__SHIFT));
2187
2188 WREG32_SMC(ixCG_DISPLAY_GAP_CNTL, tmp);
2189}
2190
2191static void ci_program_vc(struct amdgpu_device *adev)
2192{
2193 u32 tmp;
2194
2195 tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
2196 tmp &= ~(SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK | SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK);
2197 WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp);
2198
2199 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, CISLANDS_VRC_DFLT0);
2200 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_1, CISLANDS_VRC_DFLT1);
2201 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_2, CISLANDS_VRC_DFLT2);
2202 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_3, CISLANDS_VRC_DFLT3);
2203 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_4, CISLANDS_VRC_DFLT4);
2204 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_5, CISLANDS_VRC_DFLT5);
2205 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_6, CISLANDS_VRC_DFLT6);
2206 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_7, CISLANDS_VRC_DFLT7);
2207}
2208
2209static void ci_clear_vc(struct amdgpu_device *adev)
2210{
2211 u32 tmp;
2212
2213 tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
2214 tmp |= (SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK | SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK);
2215 WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp);
2216
2217 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, 0);
2218 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_1, 0);
2219 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_2, 0);
2220 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_3, 0);
2221 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_4, 0);
2222 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_5, 0);
2223 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_6, 0);
2224 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_7, 0);
2225}
2226
2227static int ci_upload_firmware(struct amdgpu_device *adev)
2228{
Alex Deuchera2e73f52015-04-20 17:09:27 -04002229 int i, ret;
2230
Rex Zhu3f767e32016-10-26 13:44:12 +08002231 if (amdgpu_ci_is_smc_running(adev)) {
2232 DRM_INFO("smc is running, no need to load smc firmware\n");
2233 return 0;
2234 }
2235
Alex Deuchera2e73f52015-04-20 17:09:27 -04002236 for (i = 0; i < adev->usec_timeout; i++) {
2237 if (RREG32_SMC(ixRCU_UC_EVENTS) & RCU_UC_EVENTS__boot_seq_done_MASK)
2238 break;
2239 }
2240 WREG32_SMC(ixSMC_SYSCON_MISC_CNTL, 1);
2241
2242 amdgpu_ci_stop_smc_clock(adev);
2243 amdgpu_ci_reset_smc(adev);
2244
Rex Zhubac601e2017-02-03 17:33:11 +08002245 ret = amdgpu_ci_load_smc_ucode(adev, SMC_RAM_END);
Alex Deuchera2e73f52015-04-20 17:09:27 -04002246
2247 return ret;
2248
2249}
2250
2251static int ci_get_svi2_voltage_table(struct amdgpu_device *adev,
2252 struct amdgpu_clock_voltage_dependency_table *voltage_dependency_table,
2253 struct atom_voltage_table *voltage_table)
2254{
2255 u32 i;
2256
2257 if (voltage_dependency_table == NULL)
2258 return -EINVAL;
2259
2260 voltage_table->mask_low = 0;
2261 voltage_table->phase_delay = 0;
2262
2263 voltage_table->count = voltage_dependency_table->count;
2264 for (i = 0; i < voltage_table->count; i++) {
2265 voltage_table->entries[i].value = voltage_dependency_table->entries[i].v;
2266 voltage_table->entries[i].smio_low = 0;
2267 }
2268
2269 return 0;
2270}
2271
2272static int ci_construct_voltage_tables(struct amdgpu_device *adev)
2273{
2274 struct ci_power_info *pi = ci_get_pi(adev);
2275 int ret;
2276
2277 if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
2278 ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_VDDC,
2279 VOLTAGE_OBJ_GPIO_LUT,
2280 &pi->vddc_voltage_table);
2281 if (ret)
2282 return ret;
2283 } else if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
2284 ret = ci_get_svi2_voltage_table(adev,
2285 &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
2286 &pi->vddc_voltage_table);
2287 if (ret)
2288 return ret;
2289 }
2290
2291 if (pi->vddc_voltage_table.count > SMU7_MAX_LEVELS_VDDC)
2292 ci_trim_voltage_table_to_fit_state_table(adev, SMU7_MAX_LEVELS_VDDC,
2293 &pi->vddc_voltage_table);
2294
2295 if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
2296 ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_VDDCI,
2297 VOLTAGE_OBJ_GPIO_LUT,
2298 &pi->vddci_voltage_table);
2299 if (ret)
2300 return ret;
2301 } else if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
2302 ret = ci_get_svi2_voltage_table(adev,
2303 &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
2304 &pi->vddci_voltage_table);
2305 if (ret)
2306 return ret;
2307 }
2308
2309 if (pi->vddci_voltage_table.count > SMU7_MAX_LEVELS_VDDCI)
2310 ci_trim_voltage_table_to_fit_state_table(adev, SMU7_MAX_LEVELS_VDDCI,
2311 &pi->vddci_voltage_table);
2312
2313 if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
2314 ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_MVDDC,
2315 VOLTAGE_OBJ_GPIO_LUT,
2316 &pi->mvdd_voltage_table);
2317 if (ret)
2318 return ret;
2319 } else if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
2320 ret = ci_get_svi2_voltage_table(adev,
2321 &adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
2322 &pi->mvdd_voltage_table);
2323 if (ret)
2324 return ret;
2325 }
2326
2327 if (pi->mvdd_voltage_table.count > SMU7_MAX_LEVELS_MVDD)
2328 ci_trim_voltage_table_to_fit_state_table(adev, SMU7_MAX_LEVELS_MVDD,
2329 &pi->mvdd_voltage_table);
2330
2331 return 0;
2332}
2333
2334static void ci_populate_smc_voltage_table(struct amdgpu_device *adev,
2335 struct atom_voltage_table_entry *voltage_table,
2336 SMU7_Discrete_VoltageLevel *smc_voltage_table)
2337{
2338 int ret;
2339
2340 ret = ci_get_std_voltage_value_sidd(adev, voltage_table,
2341 &smc_voltage_table->StdVoltageHiSidd,
2342 &smc_voltage_table->StdVoltageLoSidd);
2343
2344 if (ret) {
2345 smc_voltage_table->StdVoltageHiSidd = voltage_table->value * VOLTAGE_SCALE;
2346 smc_voltage_table->StdVoltageLoSidd = voltage_table->value * VOLTAGE_SCALE;
2347 }
2348
2349 smc_voltage_table->Voltage = cpu_to_be16(voltage_table->value * VOLTAGE_SCALE);
2350 smc_voltage_table->StdVoltageHiSidd =
2351 cpu_to_be16(smc_voltage_table->StdVoltageHiSidd);
2352 smc_voltage_table->StdVoltageLoSidd =
2353 cpu_to_be16(smc_voltage_table->StdVoltageLoSidd);
2354}
2355
2356static int ci_populate_smc_vddc_table(struct amdgpu_device *adev,
2357 SMU7_Discrete_DpmTable *table)
2358{
2359 struct ci_power_info *pi = ci_get_pi(adev);
2360 unsigned int count;
2361
2362 table->VddcLevelCount = pi->vddc_voltage_table.count;
2363 for (count = 0; count < table->VddcLevelCount; count++) {
2364 ci_populate_smc_voltage_table(adev,
2365 &pi->vddc_voltage_table.entries[count],
2366 &table->VddcLevel[count]);
2367
2368 if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
2369 table->VddcLevel[count].Smio |=
2370 pi->vddc_voltage_table.entries[count].smio_low;
2371 else
2372 table->VddcLevel[count].Smio = 0;
2373 }
2374 table->VddcLevelCount = cpu_to_be32(table->VddcLevelCount);
2375
2376 return 0;
2377}
2378
2379static int ci_populate_smc_vddci_table(struct amdgpu_device *adev,
2380 SMU7_Discrete_DpmTable *table)
2381{
2382 unsigned int count;
2383 struct ci_power_info *pi = ci_get_pi(adev);
2384
2385 table->VddciLevelCount = pi->vddci_voltage_table.count;
2386 for (count = 0; count < table->VddciLevelCount; count++) {
2387 ci_populate_smc_voltage_table(adev,
2388 &pi->vddci_voltage_table.entries[count],
2389 &table->VddciLevel[count]);
2390
2391 if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
2392 table->VddciLevel[count].Smio |=
2393 pi->vddci_voltage_table.entries[count].smio_low;
2394 else
2395 table->VddciLevel[count].Smio = 0;
2396 }
2397 table->VddciLevelCount = cpu_to_be32(table->VddciLevelCount);
2398
2399 return 0;
2400}
2401
2402static int ci_populate_smc_mvdd_table(struct amdgpu_device *adev,
2403 SMU7_Discrete_DpmTable *table)
2404{
2405 struct ci_power_info *pi = ci_get_pi(adev);
2406 unsigned int count;
2407
2408 table->MvddLevelCount = pi->mvdd_voltage_table.count;
2409 for (count = 0; count < table->MvddLevelCount; count++) {
2410 ci_populate_smc_voltage_table(adev,
2411 &pi->mvdd_voltage_table.entries[count],
2412 &table->MvddLevel[count]);
2413
2414 if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
2415 table->MvddLevel[count].Smio |=
2416 pi->mvdd_voltage_table.entries[count].smio_low;
2417 else
2418 table->MvddLevel[count].Smio = 0;
2419 }
2420 table->MvddLevelCount = cpu_to_be32(table->MvddLevelCount);
2421
2422 return 0;
2423}
2424
2425static int ci_populate_smc_voltage_tables(struct amdgpu_device *adev,
2426 SMU7_Discrete_DpmTable *table)
2427{
2428 int ret;
2429
2430 ret = ci_populate_smc_vddc_table(adev, table);
2431 if (ret)
2432 return ret;
2433
2434 ret = ci_populate_smc_vddci_table(adev, table);
2435 if (ret)
2436 return ret;
2437
2438 ret = ci_populate_smc_mvdd_table(adev, table);
2439 if (ret)
2440 return ret;
2441
2442 return 0;
2443}
2444
2445static int ci_populate_mvdd_value(struct amdgpu_device *adev, u32 mclk,
2446 SMU7_Discrete_VoltageLevel *voltage)
2447{
2448 struct ci_power_info *pi = ci_get_pi(adev);
2449 u32 i = 0;
2450
2451 if (pi->mvdd_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
2452 for (i = 0; i < adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count; i++) {
2453 if (mclk <= adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries[i].clk) {
2454 voltage->Voltage = pi->mvdd_voltage_table.entries[i].value;
2455 break;
2456 }
2457 }
2458
2459 if (i >= adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count)
2460 return -EINVAL;
2461 }
2462
2463 return -EINVAL;
2464}
2465
2466static int ci_get_std_voltage_value_sidd(struct amdgpu_device *adev,
2467 struct atom_voltage_table_entry *voltage_table,
2468 u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd)
2469{
2470 u16 v_index, idx;
2471 bool voltage_found = false;
2472 *std_voltage_hi_sidd = voltage_table->value * VOLTAGE_SCALE;
2473 *std_voltage_lo_sidd = voltage_table->value * VOLTAGE_SCALE;
2474
2475 if (adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries == NULL)
2476 return -EINVAL;
2477
2478 if (adev->pm.dpm.dyn_state.cac_leakage_table.entries) {
2479 for (v_index = 0; (u32)v_index < adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
2480 if (voltage_table->value ==
2481 adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
2482 voltage_found = true;
2483 if ((u32)v_index < adev->pm.dpm.dyn_state.cac_leakage_table.count)
2484 idx = v_index;
2485 else
2486 idx = adev->pm.dpm.dyn_state.cac_leakage_table.count - 1;
2487 *std_voltage_lo_sidd =
2488 adev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE;
2489 *std_voltage_hi_sidd =
2490 adev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE;
2491 break;
2492 }
2493 }
2494
2495 if (!voltage_found) {
2496 for (v_index = 0; (u32)v_index < adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
2497 if (voltage_table->value <=
2498 adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
2499 voltage_found = true;
2500 if ((u32)v_index < adev->pm.dpm.dyn_state.cac_leakage_table.count)
2501 idx = v_index;
2502 else
2503 idx = adev->pm.dpm.dyn_state.cac_leakage_table.count - 1;
2504 *std_voltage_lo_sidd =
2505 adev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE;
2506 *std_voltage_hi_sidd =
2507 adev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE;
2508 break;
2509 }
2510 }
2511 }
2512 }
2513
2514 return 0;
2515}
2516
2517static void ci_populate_phase_value_based_on_sclk(struct amdgpu_device *adev,
2518 const struct amdgpu_phase_shedding_limits_table *limits,
2519 u32 sclk,
2520 u32 *phase_shedding)
2521{
2522 unsigned int i;
2523
2524 *phase_shedding = 1;
2525
2526 for (i = 0; i < limits->count; i++) {
2527 if (sclk < limits->entries[i].sclk) {
2528 *phase_shedding = i;
2529 break;
2530 }
2531 }
2532}
2533
2534static void ci_populate_phase_value_based_on_mclk(struct amdgpu_device *adev,
2535 const struct amdgpu_phase_shedding_limits_table *limits,
2536 u32 mclk,
2537 u32 *phase_shedding)
2538{
2539 unsigned int i;
2540
2541 *phase_shedding = 1;
2542
2543 for (i = 0; i < limits->count; i++) {
2544 if (mclk < limits->entries[i].mclk) {
2545 *phase_shedding = i;
2546 break;
2547 }
2548 }
2549}
2550
2551static int ci_init_arb_table_index(struct amdgpu_device *adev)
2552{
2553 struct ci_power_info *pi = ci_get_pi(adev);
2554 u32 tmp;
2555 int ret;
2556
2557 ret = amdgpu_ci_read_smc_sram_dword(adev, pi->arb_table_start,
2558 &tmp, pi->sram_end);
2559 if (ret)
2560 return ret;
2561
2562 tmp &= 0x00FFFFFF;
2563 tmp |= MC_CG_ARB_FREQ_F1 << 24;
2564
2565 return amdgpu_ci_write_smc_sram_dword(adev, pi->arb_table_start,
2566 tmp, pi->sram_end);
2567}
2568
2569static int ci_get_dependency_volt_by_clk(struct amdgpu_device *adev,
2570 struct amdgpu_clock_voltage_dependency_table *allowed_clock_voltage_table,
2571 u32 clock, u32 *voltage)
2572{
2573 u32 i = 0;
2574
2575 if (allowed_clock_voltage_table->count == 0)
2576 return -EINVAL;
2577
2578 for (i = 0; i < allowed_clock_voltage_table->count; i++) {
2579 if (allowed_clock_voltage_table->entries[i].clk >= clock) {
2580 *voltage = allowed_clock_voltage_table->entries[i].v;
2581 return 0;
2582 }
2583 }
2584
2585 *voltage = allowed_clock_voltage_table->entries[i-1].v;
2586
2587 return 0;
2588}
2589
Nils Wallménius438498a2016-05-05 09:07:48 +02002590static u8 ci_get_sleep_divider_id_from_clock(u32 sclk, u32 min_sclk_in_sr)
Alex Deuchera2e73f52015-04-20 17:09:27 -04002591{
2592 u32 i;
2593 u32 tmp;
Nils Wallménius9887e422016-05-05 09:07:46 +02002594 u32 min = max(min_sclk_in_sr, (u32)CISLAND_MINIMUM_ENGINE_CLOCK);
Alex Deuchera2e73f52015-04-20 17:09:27 -04002595
2596 if (sclk < min)
2597 return 0;
2598
2599 for (i = CISLAND_MAX_DEEPSLEEP_DIVIDER_ID; ; i--) {
Nils Wallménius354ef922016-05-05 09:07:47 +02002600 tmp = sclk >> i;
Alex Deuchera2e73f52015-04-20 17:09:27 -04002601 if (tmp >= min || i == 0)
2602 break;
2603 }
2604
2605 return (u8)i;
2606}
2607
2608static int ci_initial_switch_from_arb_f0_to_f1(struct amdgpu_device *adev)
2609{
2610 return ci_copy_and_switch_arb_sets(adev, MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
2611}
2612
2613static int ci_reset_to_default(struct amdgpu_device *adev)
2614{
2615 return (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_ResetToDefaults) == PPSMC_Result_OK) ?
2616 0 : -EINVAL;
2617}
2618
2619static int ci_force_switch_to_arb_f0(struct amdgpu_device *adev)
2620{
2621 u32 tmp;
2622
2623 tmp = (RREG32_SMC(ixSMC_SCRATCH9) & 0x0000ff00) >> 8;
2624
2625 if (tmp == MC_CG_ARB_FREQ_F0)
2626 return 0;
2627
2628 return ci_copy_and_switch_arb_sets(adev, tmp, MC_CG_ARB_FREQ_F0);
2629}
2630
2631static void ci_register_patching_mc_arb(struct amdgpu_device *adev,
2632 const u32 engine_clock,
2633 const u32 memory_clock,
2634 u32 *dram_timimg2)
2635{
2636 bool patch;
2637 u32 tmp, tmp2;
2638
2639 tmp = RREG32(mmMC_SEQ_MISC0);
2640 patch = ((tmp & 0x0000f00) == 0x300) ? true : false;
2641
2642 if (patch &&
2643 ((adev->pdev->device == 0x67B0) ||
2644 (adev->pdev->device == 0x67B1))) {
2645 if ((memory_clock > 100000) && (memory_clock <= 125000)) {
2646 tmp2 = (((0x31 * engine_clock) / 125000) - 1) & 0xff;
2647 *dram_timimg2 &= ~0x00ff0000;
2648 *dram_timimg2 |= tmp2 << 16;
2649 } else if ((memory_clock > 125000) && (memory_clock <= 137500)) {
2650 tmp2 = (((0x36 * engine_clock) / 137500) - 1) & 0xff;
2651 *dram_timimg2 &= ~0x00ff0000;
2652 *dram_timimg2 |= tmp2 << 16;
2653 }
2654 }
2655}
2656
2657static int ci_populate_memory_timing_parameters(struct amdgpu_device *adev,
2658 u32 sclk,
2659 u32 mclk,
2660 SMU7_Discrete_MCArbDramTimingTableEntry *arb_regs)
2661{
2662 u32 dram_timing;
2663 u32 dram_timing2;
2664 u32 burst_time;
2665
2666 amdgpu_atombios_set_engine_dram_timings(adev, sclk, mclk);
2667
2668 dram_timing = RREG32(mmMC_ARB_DRAM_TIMING);
2669 dram_timing2 = RREG32(mmMC_ARB_DRAM_TIMING2);
2670 burst_time = RREG32(mmMC_ARB_BURST_TIME) & MC_ARB_BURST_TIME__STATE0_MASK;
2671
2672 ci_register_patching_mc_arb(adev, sclk, mclk, &dram_timing2);
2673
2674 arb_regs->McArbDramTiming = cpu_to_be32(dram_timing);
2675 arb_regs->McArbDramTiming2 = cpu_to_be32(dram_timing2);
2676 arb_regs->McArbBurstTime = (u8)burst_time;
2677
2678 return 0;
2679}
2680
2681static int ci_do_program_memory_timing_parameters(struct amdgpu_device *adev)
2682{
2683 struct ci_power_info *pi = ci_get_pi(adev);
2684 SMU7_Discrete_MCArbDramTimingTable arb_regs;
2685 u32 i, j;
2686 int ret = 0;
2687
2688 memset(&arb_regs, 0, sizeof(SMU7_Discrete_MCArbDramTimingTable));
2689
2690 for (i = 0; i < pi->dpm_table.sclk_table.count; i++) {
2691 for (j = 0; j < pi->dpm_table.mclk_table.count; j++) {
2692 ret = ci_populate_memory_timing_parameters(adev,
2693 pi->dpm_table.sclk_table.dpm_levels[i].value,
2694 pi->dpm_table.mclk_table.dpm_levels[j].value,
2695 &arb_regs.entries[i][j]);
2696 if (ret)
2697 break;
2698 }
2699 }
2700
2701 if (ret == 0)
2702 ret = amdgpu_ci_copy_bytes_to_smc(adev,
2703 pi->arb_table_start,
2704 (u8 *)&arb_regs,
2705 sizeof(SMU7_Discrete_MCArbDramTimingTable),
2706 pi->sram_end);
2707
2708 return ret;
2709}
2710
2711static int ci_program_memory_timing_parameters(struct amdgpu_device *adev)
2712{
2713 struct ci_power_info *pi = ci_get_pi(adev);
2714
2715 if (pi->need_update_smu7_dpm_table == 0)
2716 return 0;
2717
2718 return ci_do_program_memory_timing_parameters(adev);
2719}
2720
2721static void ci_populate_smc_initial_state(struct amdgpu_device *adev,
2722 struct amdgpu_ps *amdgpu_boot_state)
2723{
2724 struct ci_ps *boot_state = ci_get_ps(amdgpu_boot_state);
2725 struct ci_power_info *pi = ci_get_pi(adev);
2726 u32 level = 0;
2727
2728 for (level = 0; level < adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; level++) {
2729 if (adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[level].clk >=
2730 boot_state->performance_levels[0].sclk) {
2731 pi->smc_state_table.GraphicsBootLevel = level;
2732 break;
2733 }
2734 }
2735
2736 for (level = 0; level < adev->pm.dpm.dyn_state.vddc_dependency_on_mclk.count; level++) {
2737 if (adev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries[level].clk >=
2738 boot_state->performance_levels[0].mclk) {
2739 pi->smc_state_table.MemoryBootLevel = level;
2740 break;
2741 }
2742 }
2743}
2744
2745static u32 ci_get_dpm_level_enable_mask_value(struct ci_single_dpm_table *dpm_table)
2746{
2747 u32 i;
2748 u32 mask_value = 0;
2749
2750 for (i = dpm_table->count; i > 0; i--) {
2751 mask_value = mask_value << 1;
2752 if (dpm_table->dpm_levels[i-1].enabled)
2753 mask_value |= 0x1;
2754 else
2755 mask_value &= 0xFFFFFFFE;
2756 }
2757
2758 return mask_value;
2759}
2760
2761static void ci_populate_smc_link_level(struct amdgpu_device *adev,
2762 SMU7_Discrete_DpmTable *table)
2763{
2764 struct ci_power_info *pi = ci_get_pi(adev);
2765 struct ci_dpm_table *dpm_table = &pi->dpm_table;
2766 u32 i;
2767
2768 for (i = 0; i < dpm_table->pcie_speed_table.count; i++) {
2769 table->LinkLevel[i].PcieGenSpeed =
2770 (u8)dpm_table->pcie_speed_table.dpm_levels[i].value;
2771 table->LinkLevel[i].PcieLaneCount =
2772 amdgpu_encode_pci_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1);
2773 table->LinkLevel[i].EnabledForActivity = 1;
2774 table->LinkLevel[i].DownT = cpu_to_be32(5);
2775 table->LinkLevel[i].UpT = cpu_to_be32(30);
2776 }
2777
2778 pi->smc_state_table.LinkLevelCount = (u8)dpm_table->pcie_speed_table.count;
2779 pi->dpm_level_enable_mask.pcie_dpm_enable_mask =
2780 ci_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
2781}
2782
2783static int ci_populate_smc_uvd_level(struct amdgpu_device *adev,
2784 SMU7_Discrete_DpmTable *table)
2785{
2786 u32 count;
2787 struct atom_clock_dividers dividers;
2788 int ret = -EINVAL;
2789
2790 table->UvdLevelCount =
2791 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count;
2792
2793 for (count = 0; count < table->UvdLevelCount; count++) {
2794 table->UvdLevel[count].VclkFrequency =
2795 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].vclk;
2796 table->UvdLevel[count].DclkFrequency =
2797 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].dclk;
2798 table->UvdLevel[count].MinVddc =
2799 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2800 table->UvdLevel[count].MinVddcPhases = 1;
2801
2802 ret = amdgpu_atombios_get_clock_dividers(adev,
2803 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2804 table->UvdLevel[count].VclkFrequency, false, &dividers);
2805 if (ret)
2806 return ret;
2807
2808 table->UvdLevel[count].VclkDivider = (u8)dividers.post_divider;
2809
2810 ret = amdgpu_atombios_get_clock_dividers(adev,
2811 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2812 table->UvdLevel[count].DclkFrequency, false, &dividers);
2813 if (ret)
2814 return ret;
2815
2816 table->UvdLevel[count].DclkDivider = (u8)dividers.post_divider;
2817
2818 table->UvdLevel[count].VclkFrequency = cpu_to_be32(table->UvdLevel[count].VclkFrequency);
2819 table->UvdLevel[count].DclkFrequency = cpu_to_be32(table->UvdLevel[count].DclkFrequency);
2820 table->UvdLevel[count].MinVddc = cpu_to_be16(table->UvdLevel[count].MinVddc);
2821 }
2822
2823 return ret;
2824}
2825
2826static int ci_populate_smc_vce_level(struct amdgpu_device *adev,
2827 SMU7_Discrete_DpmTable *table)
2828{
2829 u32 count;
2830 struct atom_clock_dividers dividers;
2831 int ret = -EINVAL;
2832
2833 table->VceLevelCount =
2834 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count;
2835
2836 for (count = 0; count < table->VceLevelCount; count++) {
2837 table->VceLevel[count].Frequency =
2838 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].evclk;
2839 table->VceLevel[count].MinVoltage =
2840 (u16)adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2841 table->VceLevel[count].MinPhases = 1;
2842
2843 ret = amdgpu_atombios_get_clock_dividers(adev,
2844 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2845 table->VceLevel[count].Frequency, false, &dividers);
2846 if (ret)
2847 return ret;
2848
2849 table->VceLevel[count].Divider = (u8)dividers.post_divider;
2850
2851 table->VceLevel[count].Frequency = cpu_to_be32(table->VceLevel[count].Frequency);
2852 table->VceLevel[count].MinVoltage = cpu_to_be16(table->VceLevel[count].MinVoltage);
2853 }
2854
2855 return ret;
2856
2857}
2858
2859static int ci_populate_smc_acp_level(struct amdgpu_device *adev,
2860 SMU7_Discrete_DpmTable *table)
2861{
2862 u32 count;
2863 struct atom_clock_dividers dividers;
2864 int ret = -EINVAL;
2865
2866 table->AcpLevelCount = (u8)
2867 (adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count);
2868
2869 for (count = 0; count < table->AcpLevelCount; count++) {
2870 table->AcpLevel[count].Frequency =
2871 adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].clk;
2872 table->AcpLevel[count].MinVoltage =
2873 adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].v;
2874 table->AcpLevel[count].MinPhases = 1;
2875
2876 ret = amdgpu_atombios_get_clock_dividers(adev,
2877 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2878 table->AcpLevel[count].Frequency, false, &dividers);
2879 if (ret)
2880 return ret;
2881
2882 table->AcpLevel[count].Divider = (u8)dividers.post_divider;
2883
2884 table->AcpLevel[count].Frequency = cpu_to_be32(table->AcpLevel[count].Frequency);
2885 table->AcpLevel[count].MinVoltage = cpu_to_be16(table->AcpLevel[count].MinVoltage);
2886 }
2887
2888 return ret;
2889}
2890
2891static int ci_populate_smc_samu_level(struct amdgpu_device *adev,
2892 SMU7_Discrete_DpmTable *table)
2893{
2894 u32 count;
2895 struct atom_clock_dividers dividers;
2896 int ret = -EINVAL;
2897
2898 table->SamuLevelCount =
2899 adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count;
2900
2901 for (count = 0; count < table->SamuLevelCount; count++) {
2902 table->SamuLevel[count].Frequency =
2903 adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].clk;
2904 table->SamuLevel[count].MinVoltage =
2905 adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2906 table->SamuLevel[count].MinPhases = 1;
2907
2908 ret = amdgpu_atombios_get_clock_dividers(adev,
2909 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2910 table->SamuLevel[count].Frequency, false, &dividers);
2911 if (ret)
2912 return ret;
2913
2914 table->SamuLevel[count].Divider = (u8)dividers.post_divider;
2915
2916 table->SamuLevel[count].Frequency = cpu_to_be32(table->SamuLevel[count].Frequency);
2917 table->SamuLevel[count].MinVoltage = cpu_to_be16(table->SamuLevel[count].MinVoltage);
2918 }
2919
2920 return ret;
2921}
2922
2923static int ci_calculate_mclk_params(struct amdgpu_device *adev,
2924 u32 memory_clock,
2925 SMU7_Discrete_MemoryLevel *mclk,
2926 bool strobe_mode,
2927 bool dll_state_on)
2928{
2929 struct ci_power_info *pi = ci_get_pi(adev);
2930 u32 dll_cntl = pi->clock_registers.dll_cntl;
2931 u32 mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl;
2932 u32 mpll_ad_func_cntl = pi->clock_registers.mpll_ad_func_cntl;
2933 u32 mpll_dq_func_cntl = pi->clock_registers.mpll_dq_func_cntl;
2934 u32 mpll_func_cntl = pi->clock_registers.mpll_func_cntl;
2935 u32 mpll_func_cntl_1 = pi->clock_registers.mpll_func_cntl_1;
2936 u32 mpll_func_cntl_2 = pi->clock_registers.mpll_func_cntl_2;
2937 u32 mpll_ss1 = pi->clock_registers.mpll_ss1;
2938 u32 mpll_ss2 = pi->clock_registers.mpll_ss2;
2939 struct atom_mpll_param mpll_param;
2940 int ret;
2941
2942 ret = amdgpu_atombios_get_memory_pll_dividers(adev, memory_clock, strobe_mode, &mpll_param);
2943 if (ret)
2944 return ret;
2945
2946 mpll_func_cntl &= ~MPLL_FUNC_CNTL__BWCTRL_MASK;
2947 mpll_func_cntl |= (mpll_param.bwcntl << MPLL_FUNC_CNTL__BWCTRL__SHIFT);
2948
2949 mpll_func_cntl_1 &= ~(MPLL_FUNC_CNTL_1__CLKF_MASK | MPLL_FUNC_CNTL_1__CLKFRAC_MASK |
2950 MPLL_FUNC_CNTL_1__VCO_MODE_MASK);
2951 mpll_func_cntl_1 |= (mpll_param.clkf) << MPLL_FUNC_CNTL_1__CLKF__SHIFT |
2952 (mpll_param.clkfrac << MPLL_FUNC_CNTL_1__CLKFRAC__SHIFT) |
2953 (mpll_param.vco_mode << MPLL_FUNC_CNTL_1__VCO_MODE__SHIFT);
2954
2955 mpll_ad_func_cntl &= ~MPLL_AD_FUNC_CNTL__YCLK_POST_DIV_MASK;
2956 mpll_ad_func_cntl |= (mpll_param.post_div << MPLL_AD_FUNC_CNTL__YCLK_POST_DIV__SHIFT);
2957
Ken Wang81c59f52015-06-03 21:02:01 +08002958 if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) {
Alex Deuchera2e73f52015-04-20 17:09:27 -04002959 mpll_dq_func_cntl &= ~(MPLL_DQ_FUNC_CNTL__YCLK_SEL_MASK |
2960 MPLL_AD_FUNC_CNTL__YCLK_POST_DIV_MASK);
2961 mpll_dq_func_cntl |= (mpll_param.yclk_sel << MPLL_DQ_FUNC_CNTL__YCLK_SEL__SHIFT) |
2962 (mpll_param.post_div << MPLL_AD_FUNC_CNTL__YCLK_POST_DIV__SHIFT);
2963 }
2964
2965 if (pi->caps_mclk_ss_support) {
2966 struct amdgpu_atom_ss ss;
2967 u32 freq_nom;
2968 u32 tmp;
2969 u32 reference_clock = adev->clock.mpll.reference_freq;
2970
2971 if (mpll_param.qdr == 1)
2972 freq_nom = memory_clock * 4 * (1 << mpll_param.post_div);
2973 else
2974 freq_nom = memory_clock * 2 * (1 << mpll_param.post_div);
2975
2976 tmp = (freq_nom / reference_clock);
2977 tmp = tmp * tmp;
2978 if (amdgpu_atombios_get_asic_ss_info(adev, &ss,
2979 ASIC_INTERNAL_MEMORY_SS, freq_nom)) {
2980 u32 clks = reference_clock * 5 / ss.rate;
2981 u32 clkv = (u32)((((131 * ss.percentage * ss.rate) / 100) * tmp) / freq_nom);
2982
2983 mpll_ss1 &= ~MPLL_SS1__CLKV_MASK;
2984 mpll_ss1 |= (clkv << MPLL_SS1__CLKV__SHIFT);
2985
2986 mpll_ss2 &= ~MPLL_SS2__CLKS_MASK;
2987 mpll_ss2 |= (clks << MPLL_SS2__CLKS__SHIFT);
2988 }
2989 }
2990
2991 mclk_pwrmgt_cntl &= ~MCLK_PWRMGT_CNTL__DLL_SPEED_MASK;
2992 mclk_pwrmgt_cntl |= (mpll_param.dll_speed << MCLK_PWRMGT_CNTL__DLL_SPEED__SHIFT);
2993
2994 if (dll_state_on)
2995 mclk_pwrmgt_cntl |= MCLK_PWRMGT_CNTL__MRDCK0_PDNB_MASK |
2996 MCLK_PWRMGT_CNTL__MRDCK1_PDNB_MASK;
2997 else
2998 mclk_pwrmgt_cntl &= ~(MCLK_PWRMGT_CNTL__MRDCK0_PDNB_MASK |
2999 MCLK_PWRMGT_CNTL__MRDCK1_PDNB_MASK);
3000
3001 mclk->MclkFrequency = memory_clock;
3002 mclk->MpllFuncCntl = mpll_func_cntl;
3003 mclk->MpllFuncCntl_1 = mpll_func_cntl_1;
3004 mclk->MpllFuncCntl_2 = mpll_func_cntl_2;
3005 mclk->MpllAdFuncCntl = mpll_ad_func_cntl;
3006 mclk->MpllDqFuncCntl = mpll_dq_func_cntl;
3007 mclk->MclkPwrmgtCntl = mclk_pwrmgt_cntl;
3008 mclk->DllCntl = dll_cntl;
3009 mclk->MpllSs1 = mpll_ss1;
3010 mclk->MpllSs2 = mpll_ss2;
3011
3012 return 0;
3013}
3014
3015static int ci_populate_single_memory_level(struct amdgpu_device *adev,
3016 u32 memory_clock,
3017 SMU7_Discrete_MemoryLevel *memory_level)
3018{
3019 struct ci_power_info *pi = ci_get_pi(adev);
3020 int ret;
3021 bool dll_state_on;
3022
3023 if (adev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries) {
3024 ret = ci_get_dependency_volt_by_clk(adev,
3025 &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
3026 memory_clock, &memory_level->MinVddc);
3027 if (ret)
3028 return ret;
3029 }
3030
3031 if (adev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries) {
3032 ret = ci_get_dependency_volt_by_clk(adev,
3033 &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
3034 memory_clock, &memory_level->MinVddci);
3035 if (ret)
3036 return ret;
3037 }
3038
3039 if (adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries) {
3040 ret = ci_get_dependency_volt_by_clk(adev,
3041 &adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
3042 memory_clock, &memory_level->MinMvdd);
3043 if (ret)
3044 return ret;
3045 }
3046
3047 memory_level->MinVddcPhases = 1;
3048
3049 if (pi->vddc_phase_shed_control)
3050 ci_populate_phase_value_based_on_mclk(adev,
3051 &adev->pm.dpm.dyn_state.phase_shedding_limits_table,
3052 memory_clock,
3053 &memory_level->MinVddcPhases);
3054
Rex Zhu7ef69842017-04-18 19:21:44 +08003055 memory_level->EnabledForActivity = 1;
Alex Deuchera2e73f52015-04-20 17:09:27 -04003056 memory_level->EnabledForThrottle = 1;
Alex Deuchera2e73f52015-04-20 17:09:27 -04003057 memory_level->UpH = 0;
3058 memory_level->DownH = 100;
3059 memory_level->VoltageDownH = 0;
3060 memory_level->ActivityLevel = (u16)pi->mclk_activity_target;
3061
3062 memory_level->StutterEnable = false;
3063 memory_level->StrobeEnable = false;
3064 memory_level->EdcReadEnable = false;
3065 memory_level->EdcWriteEnable = false;
3066 memory_level->RttEnable = false;
3067
3068 memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
3069
3070 if (pi->mclk_stutter_mode_threshold &&
3071 (memory_clock <= pi->mclk_stutter_mode_threshold) &&
Edward O'Callaghan004e29c2016-07-12 10:17:53 +10003072 (!pi->uvd_enabled) &&
Alex Deuchera2e73f52015-04-20 17:09:27 -04003073 (RREG32(mmDPG_PIPE_STUTTER_CONTROL) & DPG_PIPE_STUTTER_CONTROL__STUTTER_ENABLE_MASK) &&
3074 (adev->pm.dpm.new_active_crtc_count <= 2))
3075 memory_level->StutterEnable = true;
3076
3077 if (pi->mclk_strobe_mode_threshold &&
3078 (memory_clock <= pi->mclk_strobe_mode_threshold))
3079 memory_level->StrobeEnable = 1;
3080
Ken Wang81c59f52015-06-03 21:02:01 +08003081 if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) {
Alex Deuchera2e73f52015-04-20 17:09:27 -04003082 memory_level->StrobeRatio =
3083 ci_get_mclk_frequency_ratio(memory_clock, memory_level->StrobeEnable);
3084 if (pi->mclk_edc_enable_threshold &&
3085 (memory_clock > pi->mclk_edc_enable_threshold))
3086 memory_level->EdcReadEnable = true;
3087
3088 if (pi->mclk_edc_wr_enable_threshold &&
3089 (memory_clock > pi->mclk_edc_wr_enable_threshold))
3090 memory_level->EdcWriteEnable = true;
3091
3092 if (memory_level->StrobeEnable) {
3093 if (ci_get_mclk_frequency_ratio(memory_clock, true) >=
3094 ((RREG32(mmMC_SEQ_MISC7) >> 16) & 0xf))
3095 dll_state_on = ((RREG32(mmMC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
3096 else
3097 dll_state_on = ((RREG32(mmMC_SEQ_MISC6) >> 1) & 0x1) ? true : false;
3098 } else {
3099 dll_state_on = pi->dll_default_on;
3100 }
3101 } else {
3102 memory_level->StrobeRatio = ci_get_ddr3_mclk_frequency_ratio(memory_clock);
3103 dll_state_on = ((RREG32(mmMC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
3104 }
3105
3106 ret = ci_calculate_mclk_params(adev, memory_clock, memory_level, memory_level->StrobeEnable, dll_state_on);
3107 if (ret)
3108 return ret;
3109
3110 memory_level->MinVddc = cpu_to_be32(memory_level->MinVddc * VOLTAGE_SCALE);
3111 memory_level->MinVddcPhases = cpu_to_be32(memory_level->MinVddcPhases);
3112 memory_level->MinVddci = cpu_to_be32(memory_level->MinVddci * VOLTAGE_SCALE);
3113 memory_level->MinMvdd = cpu_to_be32(memory_level->MinMvdd * VOLTAGE_SCALE);
3114
3115 memory_level->MclkFrequency = cpu_to_be32(memory_level->MclkFrequency);
3116 memory_level->ActivityLevel = cpu_to_be16(memory_level->ActivityLevel);
3117 memory_level->MpllFuncCntl = cpu_to_be32(memory_level->MpllFuncCntl);
3118 memory_level->MpllFuncCntl_1 = cpu_to_be32(memory_level->MpllFuncCntl_1);
3119 memory_level->MpllFuncCntl_2 = cpu_to_be32(memory_level->MpllFuncCntl_2);
3120 memory_level->MpllAdFuncCntl = cpu_to_be32(memory_level->MpllAdFuncCntl);
3121 memory_level->MpllDqFuncCntl = cpu_to_be32(memory_level->MpllDqFuncCntl);
3122 memory_level->MclkPwrmgtCntl = cpu_to_be32(memory_level->MclkPwrmgtCntl);
3123 memory_level->DllCntl = cpu_to_be32(memory_level->DllCntl);
3124 memory_level->MpllSs1 = cpu_to_be32(memory_level->MpllSs1);
3125 memory_level->MpllSs2 = cpu_to_be32(memory_level->MpllSs2);
3126
3127 return 0;
3128}
3129
3130static int ci_populate_smc_acpi_level(struct amdgpu_device *adev,
3131 SMU7_Discrete_DpmTable *table)
3132{
3133 struct ci_power_info *pi = ci_get_pi(adev);
3134 struct atom_clock_dividers dividers;
3135 SMU7_Discrete_VoltageLevel voltage_level;
3136 u32 spll_func_cntl = pi->clock_registers.cg_spll_func_cntl;
3137 u32 spll_func_cntl_2 = pi->clock_registers.cg_spll_func_cntl_2;
3138 u32 dll_cntl = pi->clock_registers.dll_cntl;
3139 u32 mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl;
3140 int ret;
3141
3142 table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
3143
3144 if (pi->acpi_vddc)
3145 table->ACPILevel.MinVddc = cpu_to_be32(pi->acpi_vddc * VOLTAGE_SCALE);
3146 else
3147 table->ACPILevel.MinVddc = cpu_to_be32(pi->min_vddc_in_pp_table * VOLTAGE_SCALE);
3148
3149 table->ACPILevel.MinVddcPhases = pi->vddc_phase_shed_control ? 0 : 1;
3150
3151 table->ACPILevel.SclkFrequency = adev->clock.spll.reference_freq;
3152
3153 ret = amdgpu_atombios_get_clock_dividers(adev,
3154 COMPUTE_GPUCLK_INPUT_FLAG_SCLK,
3155 table->ACPILevel.SclkFrequency, false, &dividers);
3156 if (ret)
3157 return ret;
3158
3159 table->ACPILevel.SclkDid = (u8)dividers.post_divider;
3160 table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
3161 table->ACPILevel.DeepSleepDivId = 0;
3162
3163 spll_func_cntl &= ~CG_SPLL_FUNC_CNTL__SPLL_PWRON_MASK;
3164 spll_func_cntl |= CG_SPLL_FUNC_CNTL__SPLL_RESET_MASK;
3165
3166 spll_func_cntl_2 &= ~CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL_MASK;
3167 spll_func_cntl_2 |= (4 << CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL__SHIFT);
3168
3169 table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
3170 table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
3171 table->ACPILevel.CgSpllFuncCntl3 = pi->clock_registers.cg_spll_func_cntl_3;
3172 table->ACPILevel.CgSpllFuncCntl4 = pi->clock_registers.cg_spll_func_cntl_4;
3173 table->ACPILevel.SpllSpreadSpectrum = pi->clock_registers.cg_spll_spread_spectrum;
3174 table->ACPILevel.SpllSpreadSpectrum2 = pi->clock_registers.cg_spll_spread_spectrum_2;
3175 table->ACPILevel.CcPwrDynRm = 0;
3176 table->ACPILevel.CcPwrDynRm1 = 0;
3177
3178 table->ACPILevel.Flags = cpu_to_be32(table->ACPILevel.Flags);
3179 table->ACPILevel.MinVddcPhases = cpu_to_be32(table->ACPILevel.MinVddcPhases);
3180 table->ACPILevel.SclkFrequency = cpu_to_be32(table->ACPILevel.SclkFrequency);
3181 table->ACPILevel.CgSpllFuncCntl = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl);
3182 table->ACPILevel.CgSpllFuncCntl2 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl2);
3183 table->ACPILevel.CgSpllFuncCntl3 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl3);
3184 table->ACPILevel.CgSpllFuncCntl4 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl4);
3185 table->ACPILevel.SpllSpreadSpectrum = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum);
3186 table->ACPILevel.SpllSpreadSpectrum2 = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum2);
3187 table->ACPILevel.CcPwrDynRm = cpu_to_be32(table->ACPILevel.CcPwrDynRm);
3188 table->ACPILevel.CcPwrDynRm1 = cpu_to_be32(table->ACPILevel.CcPwrDynRm1);
3189
3190 table->MemoryACPILevel.MinVddc = table->ACPILevel.MinVddc;
3191 table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;
3192
3193 if (pi->vddci_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
3194 if (pi->acpi_vddci)
3195 table->MemoryACPILevel.MinVddci =
3196 cpu_to_be32(pi->acpi_vddci * VOLTAGE_SCALE);
3197 else
3198 table->MemoryACPILevel.MinVddci =
3199 cpu_to_be32(pi->min_vddci_in_pp_table * VOLTAGE_SCALE);
3200 }
3201
3202 if (ci_populate_mvdd_value(adev, 0, &voltage_level))
3203 table->MemoryACPILevel.MinMvdd = 0;
3204 else
3205 table->MemoryACPILevel.MinMvdd =
3206 cpu_to_be32(voltage_level.Voltage * VOLTAGE_SCALE);
3207
3208 mclk_pwrmgt_cntl |= MCLK_PWRMGT_CNTL__MRDCK0_RESET_MASK |
3209 MCLK_PWRMGT_CNTL__MRDCK1_RESET_MASK;
3210 mclk_pwrmgt_cntl &= ~(MCLK_PWRMGT_CNTL__MRDCK0_PDNB_MASK |
3211 MCLK_PWRMGT_CNTL__MRDCK1_PDNB_MASK);
3212
3213 dll_cntl &= ~(DLL_CNTL__MRDCK0_BYPASS_MASK | DLL_CNTL__MRDCK1_BYPASS_MASK);
3214
3215 table->MemoryACPILevel.DllCntl = cpu_to_be32(dll_cntl);
3216 table->MemoryACPILevel.MclkPwrmgtCntl = cpu_to_be32(mclk_pwrmgt_cntl);
3217 table->MemoryACPILevel.MpllAdFuncCntl =
3218 cpu_to_be32(pi->clock_registers.mpll_ad_func_cntl);
3219 table->MemoryACPILevel.MpllDqFuncCntl =
3220 cpu_to_be32(pi->clock_registers.mpll_dq_func_cntl);
3221 table->MemoryACPILevel.MpllFuncCntl =
3222 cpu_to_be32(pi->clock_registers.mpll_func_cntl);
3223 table->MemoryACPILevel.MpllFuncCntl_1 =
3224 cpu_to_be32(pi->clock_registers.mpll_func_cntl_1);
3225 table->MemoryACPILevel.MpllFuncCntl_2 =
3226 cpu_to_be32(pi->clock_registers.mpll_func_cntl_2);
3227 table->MemoryACPILevel.MpllSs1 = cpu_to_be32(pi->clock_registers.mpll_ss1);
3228 table->MemoryACPILevel.MpllSs2 = cpu_to_be32(pi->clock_registers.mpll_ss2);
3229
3230 table->MemoryACPILevel.EnabledForThrottle = 0;
3231 table->MemoryACPILevel.EnabledForActivity = 0;
3232 table->MemoryACPILevel.UpH = 0;
3233 table->MemoryACPILevel.DownH = 100;
3234 table->MemoryACPILevel.VoltageDownH = 0;
3235 table->MemoryACPILevel.ActivityLevel =
3236 cpu_to_be16((u16)pi->mclk_activity_target);
3237
3238 table->MemoryACPILevel.StutterEnable = false;
3239 table->MemoryACPILevel.StrobeEnable = false;
3240 table->MemoryACPILevel.EdcReadEnable = false;
3241 table->MemoryACPILevel.EdcWriteEnable = false;
3242 table->MemoryACPILevel.RttEnable = false;
3243
3244 return 0;
3245}
3246
3247
3248static int ci_enable_ulv(struct amdgpu_device *adev, bool enable)
3249{
3250 struct ci_power_info *pi = ci_get_pi(adev);
3251 struct ci_ulv_parm *ulv = &pi->ulv;
3252
3253 if (ulv->supported) {
3254 if (enable)
3255 return (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_EnableULV) == PPSMC_Result_OK) ?
3256 0 : -EINVAL;
3257 else
3258 return (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DisableULV) == PPSMC_Result_OK) ?
3259 0 : -EINVAL;
3260 }
3261
3262 return 0;
3263}
3264
3265static int ci_populate_ulv_level(struct amdgpu_device *adev,
3266 SMU7_Discrete_Ulv *state)
3267{
3268 struct ci_power_info *pi = ci_get_pi(adev);
3269 u16 ulv_voltage = adev->pm.dpm.backbias_response_time;
3270
3271 state->CcPwrDynRm = 0;
3272 state->CcPwrDynRm1 = 0;
3273
3274 if (ulv_voltage == 0) {
3275 pi->ulv.supported = false;
3276 return 0;
3277 }
3278
3279 if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
3280 if (ulv_voltage > adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v)
3281 state->VddcOffset = 0;
3282 else
3283 state->VddcOffset =
3284 adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage;
3285 } else {
3286 if (ulv_voltage > adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v)
3287 state->VddcOffsetVid = 0;
3288 else
3289 state->VddcOffsetVid = (u8)
3290 ((adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage) *
3291 VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
3292 }
3293 state->VddcPhase = pi->vddc_phase_shed_control ? 0 : 1;
3294
3295 state->CcPwrDynRm = cpu_to_be32(state->CcPwrDynRm);
3296 state->CcPwrDynRm1 = cpu_to_be32(state->CcPwrDynRm1);
3297 state->VddcOffset = cpu_to_be16(state->VddcOffset);
3298
3299 return 0;
3300}
3301
3302static int ci_calculate_sclk_params(struct amdgpu_device *adev,
3303 u32 engine_clock,
3304 SMU7_Discrete_GraphicsLevel *sclk)
3305{
3306 struct ci_power_info *pi = ci_get_pi(adev);
3307 struct atom_clock_dividers dividers;
3308 u32 spll_func_cntl_3 = pi->clock_registers.cg_spll_func_cntl_3;
3309 u32 spll_func_cntl_4 = pi->clock_registers.cg_spll_func_cntl_4;
3310 u32 cg_spll_spread_spectrum = pi->clock_registers.cg_spll_spread_spectrum;
3311 u32 cg_spll_spread_spectrum_2 = pi->clock_registers.cg_spll_spread_spectrum_2;
3312 u32 reference_clock = adev->clock.spll.reference_freq;
3313 u32 reference_divider;
3314 u32 fbdiv;
3315 int ret;
3316
3317 ret = amdgpu_atombios_get_clock_dividers(adev,
3318 COMPUTE_GPUCLK_INPUT_FLAG_SCLK,
3319 engine_clock, false, &dividers);
3320 if (ret)
3321 return ret;
3322
3323 reference_divider = 1 + dividers.ref_div;
3324 fbdiv = dividers.fb_div & 0x3FFFFFF;
3325
3326 spll_func_cntl_3 &= ~CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV_MASK;
3327 spll_func_cntl_3 |= (fbdiv << CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV__SHIFT);
3328 spll_func_cntl_3 |= CG_SPLL_FUNC_CNTL_3__SPLL_DITHEN_MASK;
3329
3330 if (pi->caps_sclk_ss_support) {
3331 struct amdgpu_atom_ss ss;
3332 u32 vco_freq = engine_clock * dividers.post_div;
3333
3334 if (amdgpu_atombios_get_asic_ss_info(adev, &ss,
3335 ASIC_INTERNAL_ENGINE_SS, vco_freq)) {
3336 u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate);
3337 u32 clk_v = 4 * ss.percentage * fbdiv / (clk_s * 10000);
3338
3339 cg_spll_spread_spectrum &= ~(CG_SPLL_SPREAD_SPECTRUM__CLKS_MASK | CG_SPLL_SPREAD_SPECTRUM__SSEN_MASK);
3340 cg_spll_spread_spectrum |= (clk_s << CG_SPLL_SPREAD_SPECTRUM__CLKS__SHIFT);
3341 cg_spll_spread_spectrum |= (1 << CG_SPLL_SPREAD_SPECTRUM__SSEN__SHIFT);
3342
3343 cg_spll_spread_spectrum_2 &= ~CG_SPLL_SPREAD_SPECTRUM_2__CLKV_MASK;
3344 cg_spll_spread_spectrum_2 |= (clk_v << CG_SPLL_SPREAD_SPECTRUM_2__CLKV__SHIFT);
3345 }
3346 }
3347
3348 sclk->SclkFrequency = engine_clock;
3349 sclk->CgSpllFuncCntl3 = spll_func_cntl_3;
3350 sclk->CgSpllFuncCntl4 = spll_func_cntl_4;
3351 sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum;
3352 sclk->SpllSpreadSpectrum2 = cg_spll_spread_spectrum_2;
3353 sclk->SclkDid = (u8)dividers.post_divider;
3354
3355 return 0;
3356}
3357
3358static int ci_populate_single_graphic_level(struct amdgpu_device *adev,
3359 u32 engine_clock,
3360 u16 sclk_activity_level_t,
3361 SMU7_Discrete_GraphicsLevel *graphic_level)
3362{
3363 struct ci_power_info *pi = ci_get_pi(adev);
3364 int ret;
3365
3366 ret = ci_calculate_sclk_params(adev, engine_clock, graphic_level);
3367 if (ret)
3368 return ret;
3369
3370 ret = ci_get_dependency_volt_by_clk(adev,
3371 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
3372 engine_clock, &graphic_level->MinVddc);
3373 if (ret)
3374 return ret;
3375
3376 graphic_level->SclkFrequency = engine_clock;
3377
3378 graphic_level->Flags = 0;
3379 graphic_level->MinVddcPhases = 1;
3380
3381 if (pi->vddc_phase_shed_control)
3382 ci_populate_phase_value_based_on_sclk(adev,
3383 &adev->pm.dpm.dyn_state.phase_shedding_limits_table,
3384 engine_clock,
3385 &graphic_level->MinVddcPhases);
3386
3387 graphic_level->ActivityLevel = sclk_activity_level_t;
3388
3389 graphic_level->CcPwrDynRm = 0;
3390 graphic_level->CcPwrDynRm1 = 0;
3391 graphic_level->EnabledForThrottle = 1;
3392 graphic_level->UpH = 0;
3393 graphic_level->DownH = 0;
3394 graphic_level->VoltageDownH = 0;
3395 graphic_level->PowerThrottle = 0;
3396
3397 if (pi->caps_sclk_ds)
Nils Wallménius438498a2016-05-05 09:07:48 +02003398 graphic_level->DeepSleepDivId = ci_get_sleep_divider_id_from_clock(engine_clock,
Alex Deuchera2e73f52015-04-20 17:09:27 -04003399 CISLAND_MINIMUM_ENGINE_CLOCK);
3400
3401 graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
3402
3403 graphic_level->Flags = cpu_to_be32(graphic_level->Flags);
3404 graphic_level->MinVddc = cpu_to_be32(graphic_level->MinVddc * VOLTAGE_SCALE);
3405 graphic_level->MinVddcPhases = cpu_to_be32(graphic_level->MinVddcPhases);
3406 graphic_level->SclkFrequency = cpu_to_be32(graphic_level->SclkFrequency);
3407 graphic_level->ActivityLevel = cpu_to_be16(graphic_level->ActivityLevel);
3408 graphic_level->CgSpllFuncCntl3 = cpu_to_be32(graphic_level->CgSpllFuncCntl3);
3409 graphic_level->CgSpllFuncCntl4 = cpu_to_be32(graphic_level->CgSpllFuncCntl4);
3410 graphic_level->SpllSpreadSpectrum = cpu_to_be32(graphic_level->SpllSpreadSpectrum);
3411 graphic_level->SpllSpreadSpectrum2 = cpu_to_be32(graphic_level->SpllSpreadSpectrum2);
3412 graphic_level->CcPwrDynRm = cpu_to_be32(graphic_level->CcPwrDynRm);
3413 graphic_level->CcPwrDynRm1 = cpu_to_be32(graphic_level->CcPwrDynRm1);
Alex Deuchera2e73f52015-04-20 17:09:27 -04003414
3415 return 0;
3416}
3417
3418static int ci_populate_all_graphic_levels(struct amdgpu_device *adev)
3419{
3420 struct ci_power_info *pi = ci_get_pi(adev);
3421 struct ci_dpm_table *dpm_table = &pi->dpm_table;
3422 u32 level_array_address = pi->dpm_table_start +
3423 offsetof(SMU7_Discrete_DpmTable, GraphicsLevel);
3424 u32 level_array_size = sizeof(SMU7_Discrete_GraphicsLevel) *
3425 SMU7_MAX_LEVELS_GRAPHICS;
3426 SMU7_Discrete_GraphicsLevel *levels = pi->smc_state_table.GraphicsLevel;
3427 u32 i, ret;
3428
3429 memset(levels, 0, level_array_size);
3430
3431 for (i = 0; i < dpm_table->sclk_table.count; i++) {
3432 ret = ci_populate_single_graphic_level(adev,
3433 dpm_table->sclk_table.dpm_levels[i].value,
3434 (u16)pi->activity_target[i],
3435 &pi->smc_state_table.GraphicsLevel[i]);
3436 if (ret)
3437 return ret;
3438 if (i > 1)
3439 pi->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0;
3440 if (i == (dpm_table->sclk_table.count - 1))
3441 pi->smc_state_table.GraphicsLevel[i].DisplayWatermark =
3442 PPSMC_DISPLAY_WATERMARK_HIGH;
3443 }
Alex Deucher4223cc3d2016-03-03 12:27:46 -05003444 pi->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
Alex Deuchera2e73f52015-04-20 17:09:27 -04003445
3446 pi->smc_state_table.GraphicsDpmLevelCount = (u8)dpm_table->sclk_table.count;
3447 pi->dpm_level_enable_mask.sclk_dpm_enable_mask =
3448 ci_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
3449
3450 ret = amdgpu_ci_copy_bytes_to_smc(adev, level_array_address,
3451 (u8 *)levels, level_array_size,
3452 pi->sram_end);
3453 if (ret)
3454 return ret;
3455
3456 return 0;
3457}
3458
3459static int ci_populate_ulv_state(struct amdgpu_device *adev,
3460 SMU7_Discrete_Ulv *ulv_level)
3461{
3462 return ci_populate_ulv_level(adev, ulv_level);
3463}
3464
3465static int ci_populate_all_memory_levels(struct amdgpu_device *adev)
3466{
3467 struct ci_power_info *pi = ci_get_pi(adev);
3468 struct ci_dpm_table *dpm_table = &pi->dpm_table;
3469 u32 level_array_address = pi->dpm_table_start +
3470 offsetof(SMU7_Discrete_DpmTable, MemoryLevel);
3471 u32 level_array_size = sizeof(SMU7_Discrete_MemoryLevel) *
3472 SMU7_MAX_LEVELS_MEMORY;
3473 SMU7_Discrete_MemoryLevel *levels = pi->smc_state_table.MemoryLevel;
3474 u32 i, ret;
3475
3476 memset(levels, 0, level_array_size);
3477
3478 for (i = 0; i < dpm_table->mclk_table.count; i++) {
3479 if (dpm_table->mclk_table.dpm_levels[i].value == 0)
3480 return -EINVAL;
3481 ret = ci_populate_single_memory_level(adev,
3482 dpm_table->mclk_table.dpm_levels[i].value,
3483 &pi->smc_state_table.MemoryLevel[i]);
3484 if (ret)
3485 return ret;
3486 }
3487
3488 if ((dpm_table->mclk_table.count >= 2) &&
3489 ((adev->pdev->device == 0x67B0) || (adev->pdev->device == 0x67B1))) {
3490 pi->smc_state_table.MemoryLevel[1].MinVddc =
3491 pi->smc_state_table.MemoryLevel[0].MinVddc;
3492 pi->smc_state_table.MemoryLevel[1].MinVddcPhases =
3493 pi->smc_state_table.MemoryLevel[0].MinVddcPhases;
3494 }
3495
3496 pi->smc_state_table.MemoryLevel[0].ActivityLevel = cpu_to_be16(0x1F);
3497
3498 pi->smc_state_table.MemoryDpmLevelCount = (u8)dpm_table->mclk_table.count;
3499 pi->dpm_level_enable_mask.mclk_dpm_enable_mask =
3500 ci_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
3501
3502 pi->smc_state_table.MemoryLevel[dpm_table->mclk_table.count - 1].DisplayWatermark =
3503 PPSMC_DISPLAY_WATERMARK_HIGH;
3504
3505 ret = amdgpu_ci_copy_bytes_to_smc(adev, level_array_address,
3506 (u8 *)levels, level_array_size,
3507 pi->sram_end);
3508 if (ret)
3509 return ret;
3510
3511 return 0;
3512}
3513
3514static void ci_reset_single_dpm_table(struct amdgpu_device *adev,
3515 struct ci_single_dpm_table* dpm_table,
3516 u32 count)
3517{
3518 u32 i;
3519
3520 dpm_table->count = count;
3521 for (i = 0; i < MAX_REGULAR_DPM_NUMBER; i++)
3522 dpm_table->dpm_levels[i].enabled = false;
3523}
3524
3525static void ci_setup_pcie_table_entry(struct ci_single_dpm_table* dpm_table,
3526 u32 index, u32 pcie_gen, u32 pcie_lanes)
3527{
3528 dpm_table->dpm_levels[index].value = pcie_gen;
3529 dpm_table->dpm_levels[index].param1 = pcie_lanes;
3530 dpm_table->dpm_levels[index].enabled = true;
3531}
3532
3533static int ci_setup_default_pcie_tables(struct amdgpu_device *adev)
3534{
3535 struct ci_power_info *pi = ci_get_pi(adev);
3536
3537 if (!pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels)
3538 return -EINVAL;
3539
3540 if (pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels) {
3541 pi->pcie_gen_powersaving = pi->pcie_gen_performance;
3542 pi->pcie_lane_powersaving = pi->pcie_lane_performance;
3543 } else if (!pi->use_pcie_performance_levels && pi->use_pcie_powersaving_levels) {
3544 pi->pcie_gen_performance = pi->pcie_gen_powersaving;
3545 pi->pcie_lane_performance = pi->pcie_lane_powersaving;
3546 }
3547
3548 ci_reset_single_dpm_table(adev,
3549 &pi->dpm_table.pcie_speed_table,
3550 SMU7_MAX_LEVELS_LINK);
3551
3552 if (adev->asic_type == CHIP_BONAIRE)
3553 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0,
3554 pi->pcie_gen_powersaving.min,
3555 pi->pcie_lane_powersaving.max);
3556 else
3557 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0,
3558 pi->pcie_gen_powersaving.min,
3559 pi->pcie_lane_powersaving.min);
3560 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 1,
3561 pi->pcie_gen_performance.min,
3562 pi->pcie_lane_performance.min);
3563 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 2,
3564 pi->pcie_gen_powersaving.min,
3565 pi->pcie_lane_powersaving.max);
3566 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 3,
3567 pi->pcie_gen_performance.min,
3568 pi->pcie_lane_performance.max);
3569 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 4,
3570 pi->pcie_gen_powersaving.max,
3571 pi->pcie_lane_powersaving.max);
3572 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 5,
3573 pi->pcie_gen_performance.max,
3574 pi->pcie_lane_performance.max);
3575
3576 pi->dpm_table.pcie_speed_table.count = 6;
3577
3578 return 0;
3579}
3580
3581static int ci_setup_default_dpm_tables(struct amdgpu_device *adev)
3582{
3583 struct ci_power_info *pi = ci_get_pi(adev);
3584 struct amdgpu_clock_voltage_dependency_table *allowed_sclk_vddc_table =
3585 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
3586 struct amdgpu_clock_voltage_dependency_table *allowed_mclk_table =
3587 &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk;
3588 struct amdgpu_cac_leakage_table *std_voltage_table =
3589 &adev->pm.dpm.dyn_state.cac_leakage_table;
3590 u32 i;
3591
3592 if (allowed_sclk_vddc_table == NULL)
3593 return -EINVAL;
3594 if (allowed_sclk_vddc_table->count < 1)
3595 return -EINVAL;
3596 if (allowed_mclk_table == NULL)
3597 return -EINVAL;
3598 if (allowed_mclk_table->count < 1)
3599 return -EINVAL;
3600
3601 memset(&pi->dpm_table, 0, sizeof(struct ci_dpm_table));
3602
3603 ci_reset_single_dpm_table(adev,
3604 &pi->dpm_table.sclk_table,
3605 SMU7_MAX_LEVELS_GRAPHICS);
3606 ci_reset_single_dpm_table(adev,
3607 &pi->dpm_table.mclk_table,
3608 SMU7_MAX_LEVELS_MEMORY);
3609 ci_reset_single_dpm_table(adev,
3610 &pi->dpm_table.vddc_table,
3611 SMU7_MAX_LEVELS_VDDC);
3612 ci_reset_single_dpm_table(adev,
3613 &pi->dpm_table.vddci_table,
3614 SMU7_MAX_LEVELS_VDDCI);
3615 ci_reset_single_dpm_table(adev,
3616 &pi->dpm_table.mvdd_table,
3617 SMU7_MAX_LEVELS_MVDD);
3618
3619 pi->dpm_table.sclk_table.count = 0;
3620 for (i = 0; i < allowed_sclk_vddc_table->count; i++) {
3621 if ((i == 0) ||
3622 (pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count-1].value !=
3623 allowed_sclk_vddc_table->entries[i].clk)) {
3624 pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].value =
3625 allowed_sclk_vddc_table->entries[i].clk;
3626 pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].enabled =
3627 (i == 0) ? true : false;
3628 pi->dpm_table.sclk_table.count++;
3629 }
3630 }
3631
3632 pi->dpm_table.mclk_table.count = 0;
3633 for (i = 0; i < allowed_mclk_table->count; i++) {
3634 if ((i == 0) ||
3635 (pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count-1].value !=
3636 allowed_mclk_table->entries[i].clk)) {
3637 pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].value =
3638 allowed_mclk_table->entries[i].clk;
3639 pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].enabled =
3640 (i == 0) ? true : false;
3641 pi->dpm_table.mclk_table.count++;
3642 }
3643 }
3644
3645 for (i = 0; i < allowed_sclk_vddc_table->count; i++) {
3646 pi->dpm_table.vddc_table.dpm_levels[i].value =
3647 allowed_sclk_vddc_table->entries[i].v;
3648 pi->dpm_table.vddc_table.dpm_levels[i].param1 =
3649 std_voltage_table->entries[i].leakage;
3650 pi->dpm_table.vddc_table.dpm_levels[i].enabled = true;
3651 }
3652 pi->dpm_table.vddc_table.count = allowed_sclk_vddc_table->count;
3653
3654 allowed_mclk_table = &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk;
3655 if (allowed_mclk_table) {
3656 for (i = 0; i < allowed_mclk_table->count; i++) {
3657 pi->dpm_table.vddci_table.dpm_levels[i].value =
3658 allowed_mclk_table->entries[i].v;
3659 pi->dpm_table.vddci_table.dpm_levels[i].enabled = true;
3660 }
3661 pi->dpm_table.vddci_table.count = allowed_mclk_table->count;
3662 }
3663
3664 allowed_mclk_table = &adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk;
3665 if (allowed_mclk_table) {
3666 for (i = 0; i < allowed_mclk_table->count; i++) {
3667 pi->dpm_table.mvdd_table.dpm_levels[i].value =
3668 allowed_mclk_table->entries[i].v;
3669 pi->dpm_table.mvdd_table.dpm_levels[i].enabled = true;
3670 }
3671 pi->dpm_table.mvdd_table.count = allowed_mclk_table->count;
3672 }
3673
3674 ci_setup_default_pcie_tables(adev);
3675
Eric Huang3cc25912016-05-19 15:54:35 -04003676 /* save a copy of the default DPM table */
3677 memcpy(&(pi->golden_dpm_table), &(pi->dpm_table),
3678 sizeof(struct ci_dpm_table));
3679
Alex Deuchera2e73f52015-04-20 17:09:27 -04003680 return 0;
3681}
3682
3683static int ci_find_boot_level(struct ci_single_dpm_table *table,
3684 u32 value, u32 *boot_level)
3685{
3686 u32 i;
3687 int ret = -EINVAL;
3688
3689 for(i = 0; i < table->count; i++) {
3690 if (value == table->dpm_levels[i].value) {
3691 *boot_level = i;
3692 ret = 0;
3693 }
3694 }
3695
3696 return ret;
3697}
3698
Eric Huang618c0482016-10-14 14:21:19 -04003699static void ci_save_default_power_profile(struct amdgpu_device *adev)
3700{
3701 struct ci_power_info *pi = ci_get_pi(adev);
3702 struct SMU7_Discrete_GraphicsLevel *levels =
3703 pi->smc_state_table.GraphicsLevel;
3704 uint32_t min_level = 0;
3705
3706 pi->default_gfx_power_profile.activity_threshold =
3707 be16_to_cpu(levels[0].ActivityLevel);
3708 pi->default_gfx_power_profile.up_hyst = levels[0].UpH;
3709 pi->default_gfx_power_profile.down_hyst = levels[0].DownH;
3710 pi->default_gfx_power_profile.type = AMD_PP_GFX_PROFILE;
3711
3712 pi->default_compute_power_profile = pi->default_gfx_power_profile;
3713 pi->default_compute_power_profile.type = AMD_PP_COMPUTE_PROFILE;
3714
3715 /* Optimize compute power profile: Use only highest
3716 * 2 power levels (if more than 2 are available), Hysteresis:
3717 * 0ms up, 5ms down
3718 */
3719 if (pi->smc_state_table.GraphicsDpmLevelCount > 2)
3720 min_level = pi->smc_state_table.GraphicsDpmLevelCount - 2;
3721 else if (pi->smc_state_table.GraphicsDpmLevelCount == 2)
3722 min_level = 1;
3723 pi->default_compute_power_profile.min_sclk =
3724 be32_to_cpu(levels[min_level].SclkFrequency);
3725
3726 pi->default_compute_power_profile.up_hyst = 0;
3727 pi->default_compute_power_profile.down_hyst = 5;
3728
3729 pi->gfx_power_profile = pi->default_gfx_power_profile;
3730 pi->compute_power_profile = pi->default_compute_power_profile;
3731}
3732
Alex Deuchera2e73f52015-04-20 17:09:27 -04003733static int ci_init_smc_table(struct amdgpu_device *adev)
3734{
3735 struct ci_power_info *pi = ci_get_pi(adev);
3736 struct ci_ulv_parm *ulv = &pi->ulv;
3737 struct amdgpu_ps *amdgpu_boot_state = adev->pm.dpm.boot_ps;
3738 SMU7_Discrete_DpmTable *table = &pi->smc_state_table;
3739 int ret;
3740
3741 ret = ci_setup_default_dpm_tables(adev);
3742 if (ret)
3743 return ret;
3744
3745 if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE)
3746 ci_populate_smc_voltage_tables(adev, table);
3747
3748 ci_init_fps_limits(adev);
3749
3750 if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_HARDWAREDC)
3751 table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
3752
3753 if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC)
3754 table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
3755
Ken Wang81c59f52015-06-03 21:02:01 +08003756 if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5)
Alex Deuchera2e73f52015-04-20 17:09:27 -04003757 table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
3758
3759 if (ulv->supported) {
3760 ret = ci_populate_ulv_state(adev, &pi->smc_state_table.Ulv);
3761 if (ret)
3762 return ret;
3763 WREG32_SMC(ixCG_ULV_PARAMETER, ulv->cg_ulv_parameter);
3764 }
3765
3766 ret = ci_populate_all_graphic_levels(adev);
3767 if (ret)
3768 return ret;
3769
3770 ret = ci_populate_all_memory_levels(adev);
3771 if (ret)
3772 return ret;
3773
3774 ci_populate_smc_link_level(adev, table);
3775
3776 ret = ci_populate_smc_acpi_level(adev, table);
3777 if (ret)
3778 return ret;
3779
3780 ret = ci_populate_smc_vce_level(adev, table);
3781 if (ret)
3782 return ret;
3783
3784 ret = ci_populate_smc_acp_level(adev, table);
3785 if (ret)
3786 return ret;
3787
3788 ret = ci_populate_smc_samu_level(adev, table);
3789 if (ret)
3790 return ret;
3791
3792 ret = ci_do_program_memory_timing_parameters(adev);
3793 if (ret)
3794 return ret;
3795
3796 ret = ci_populate_smc_uvd_level(adev, table);
3797 if (ret)
3798 return ret;
3799
3800 table->UvdBootLevel = 0;
3801 table->VceBootLevel = 0;
3802 table->AcpBootLevel = 0;
3803 table->SamuBootLevel = 0;
3804 table->GraphicsBootLevel = 0;
3805 table->MemoryBootLevel = 0;
3806
3807 ret = ci_find_boot_level(&pi->dpm_table.sclk_table,
3808 pi->vbios_boot_state.sclk_bootup_value,
3809 (u32 *)&pi->smc_state_table.GraphicsBootLevel);
3810
3811 ret = ci_find_boot_level(&pi->dpm_table.mclk_table,
3812 pi->vbios_boot_state.mclk_bootup_value,
3813 (u32 *)&pi->smc_state_table.MemoryBootLevel);
3814
3815 table->BootVddc = pi->vbios_boot_state.vddc_bootup_value;
3816 table->BootVddci = pi->vbios_boot_state.vddci_bootup_value;
3817 table->BootMVdd = pi->vbios_boot_state.mvdd_bootup_value;
3818
3819 ci_populate_smc_initial_state(adev, amdgpu_boot_state);
3820
3821 ret = ci_populate_bapm_parameters_in_dpm_table(adev);
3822 if (ret)
3823 return ret;
3824
3825 table->UVDInterval = 1;
3826 table->VCEInterval = 1;
3827 table->ACPInterval = 1;
3828 table->SAMUInterval = 1;
3829 table->GraphicsVoltageChangeEnable = 1;
3830 table->GraphicsThermThrottleEnable = 1;
3831 table->GraphicsInterval = 1;
3832 table->VoltageInterval = 1;
3833 table->ThermalInterval = 1;
3834 table->TemperatureLimitHigh = (u16)((pi->thermal_temp_setting.temperature_high *
3835 CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000);
3836 table->TemperatureLimitLow = (u16)((pi->thermal_temp_setting.temperature_low *
3837 CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000);
3838 table->MemoryVoltageChangeEnable = 1;
3839 table->MemoryInterval = 1;
3840 table->VoltageResponseTime = 0;
3841 table->VddcVddciDelta = 4000;
3842 table->PhaseResponseTime = 0;
3843 table->MemoryThermThrottleEnable = 1;
3844 table->PCIeBootLinkLevel = pi->dpm_table.pcie_speed_table.count - 1;
3845 table->PCIeGenInterval = 1;
3846 if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2)
3847 table->SVI2Enable = 1;
3848 else
3849 table->SVI2Enable = 0;
3850
3851 table->ThermGpio = 17;
3852 table->SclkStepSize = 0x4000;
3853
3854 table->SystemFlags = cpu_to_be32(table->SystemFlags);
3855 table->SmioMaskVddcVid = cpu_to_be32(table->SmioMaskVddcVid);
3856 table->SmioMaskVddcPhase = cpu_to_be32(table->SmioMaskVddcPhase);
3857 table->SmioMaskVddciVid = cpu_to_be32(table->SmioMaskVddciVid);
3858 table->SmioMaskMvddVid = cpu_to_be32(table->SmioMaskMvddVid);
3859 table->SclkStepSize = cpu_to_be32(table->SclkStepSize);
3860 table->TemperatureLimitHigh = cpu_to_be16(table->TemperatureLimitHigh);
3861 table->TemperatureLimitLow = cpu_to_be16(table->TemperatureLimitLow);
3862 table->VddcVddciDelta = cpu_to_be16(table->VddcVddciDelta);
3863 table->VoltageResponseTime = cpu_to_be16(table->VoltageResponseTime);
3864 table->PhaseResponseTime = cpu_to_be16(table->PhaseResponseTime);
3865 table->BootVddc = cpu_to_be16(table->BootVddc * VOLTAGE_SCALE);
3866 table->BootVddci = cpu_to_be16(table->BootVddci * VOLTAGE_SCALE);
3867 table->BootMVdd = cpu_to_be16(table->BootMVdd * VOLTAGE_SCALE);
3868
3869 ret = amdgpu_ci_copy_bytes_to_smc(adev,
3870 pi->dpm_table_start +
3871 offsetof(SMU7_Discrete_DpmTable, SystemFlags),
3872 (u8 *)&table->SystemFlags,
3873 sizeof(SMU7_Discrete_DpmTable) - 3 * sizeof(SMU7_PIDController),
3874 pi->sram_end);
3875 if (ret)
3876 return ret;
3877
Eric Huang618c0482016-10-14 14:21:19 -04003878 ci_save_default_power_profile(adev);
3879
Alex Deuchera2e73f52015-04-20 17:09:27 -04003880 return 0;
3881}
3882
3883static void ci_trim_single_dpm_states(struct amdgpu_device *adev,
3884 struct ci_single_dpm_table *dpm_table,
3885 u32 low_limit, u32 high_limit)
3886{
3887 u32 i;
3888
3889 for (i = 0; i < dpm_table->count; i++) {
3890 if ((dpm_table->dpm_levels[i].value < low_limit) ||
3891 (dpm_table->dpm_levels[i].value > high_limit))
3892 dpm_table->dpm_levels[i].enabled = false;
3893 else
3894 dpm_table->dpm_levels[i].enabled = true;
3895 }
3896}
3897
3898static void ci_trim_pcie_dpm_states(struct amdgpu_device *adev,
3899 u32 speed_low, u32 lanes_low,
3900 u32 speed_high, u32 lanes_high)
3901{
3902 struct ci_power_info *pi = ci_get_pi(adev);
3903 struct ci_single_dpm_table *pcie_table = &pi->dpm_table.pcie_speed_table;
3904 u32 i, j;
3905
3906 for (i = 0; i < pcie_table->count; i++) {
3907 if ((pcie_table->dpm_levels[i].value < speed_low) ||
3908 (pcie_table->dpm_levels[i].param1 < lanes_low) ||
3909 (pcie_table->dpm_levels[i].value > speed_high) ||
3910 (pcie_table->dpm_levels[i].param1 > lanes_high))
3911 pcie_table->dpm_levels[i].enabled = false;
3912 else
3913 pcie_table->dpm_levels[i].enabled = true;
3914 }
3915
3916 for (i = 0; i < pcie_table->count; i++) {
3917 if (pcie_table->dpm_levels[i].enabled) {
3918 for (j = i + 1; j < pcie_table->count; j++) {
3919 if (pcie_table->dpm_levels[j].enabled) {
3920 if ((pcie_table->dpm_levels[i].value == pcie_table->dpm_levels[j].value) &&
3921 (pcie_table->dpm_levels[i].param1 == pcie_table->dpm_levels[j].param1))
3922 pcie_table->dpm_levels[j].enabled = false;
3923 }
3924 }
3925 }
3926 }
3927}
3928
3929static int ci_trim_dpm_states(struct amdgpu_device *adev,
3930 struct amdgpu_ps *amdgpu_state)
3931{
3932 struct ci_ps *state = ci_get_ps(amdgpu_state);
3933 struct ci_power_info *pi = ci_get_pi(adev);
3934 u32 high_limit_count;
3935
3936 if (state->performance_level_count < 1)
3937 return -EINVAL;
3938
3939 if (state->performance_level_count == 1)
3940 high_limit_count = 0;
3941 else
3942 high_limit_count = 1;
3943
3944 ci_trim_single_dpm_states(adev,
3945 &pi->dpm_table.sclk_table,
3946 state->performance_levels[0].sclk,
3947 state->performance_levels[high_limit_count].sclk);
3948
3949 ci_trim_single_dpm_states(adev,
3950 &pi->dpm_table.mclk_table,
3951 state->performance_levels[0].mclk,
3952 state->performance_levels[high_limit_count].mclk);
3953
3954 ci_trim_pcie_dpm_states(adev,
3955 state->performance_levels[0].pcie_gen,
3956 state->performance_levels[0].pcie_lane,
3957 state->performance_levels[high_limit_count].pcie_gen,
3958 state->performance_levels[high_limit_count].pcie_lane);
3959
3960 return 0;
3961}
3962
3963static int ci_apply_disp_minimum_voltage_request(struct amdgpu_device *adev)
3964{
3965 struct amdgpu_clock_voltage_dependency_table *disp_voltage_table =
3966 &adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk;
3967 struct amdgpu_clock_voltage_dependency_table *vddc_table =
3968 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
3969 u32 requested_voltage = 0;
3970 u32 i;
3971
3972 if (disp_voltage_table == NULL)
3973 return -EINVAL;
3974 if (!disp_voltage_table->count)
3975 return -EINVAL;
3976
3977 for (i = 0; i < disp_voltage_table->count; i++) {
3978 if (adev->clock.current_dispclk == disp_voltage_table->entries[i].clk)
3979 requested_voltage = disp_voltage_table->entries[i].v;
3980 }
3981
3982 for (i = 0; i < vddc_table->count; i++) {
3983 if (requested_voltage <= vddc_table->entries[i].v) {
3984 requested_voltage = vddc_table->entries[i].v;
3985 return (amdgpu_ci_send_msg_to_smc_with_parameter(adev,
3986 PPSMC_MSG_VddC_Request,
3987 requested_voltage * VOLTAGE_SCALE) == PPSMC_Result_OK) ?
3988 0 : -EINVAL;
3989 }
3990 }
3991
3992 return -EINVAL;
3993}
3994
3995static int ci_upload_dpm_level_enable_mask(struct amdgpu_device *adev)
3996{
3997 struct ci_power_info *pi = ci_get_pi(adev);
3998 PPSMC_Result result;
3999
4000 ci_apply_disp_minimum_voltage_request(adev);
4001
4002 if (!pi->sclk_dpm_key_disabled) {
4003 if (pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
4004 result = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
4005 PPSMC_MSG_SCLKDPM_SetEnabledMask,
4006 pi->dpm_level_enable_mask.sclk_dpm_enable_mask);
4007 if (result != PPSMC_Result_OK)
4008 return -EINVAL;
4009 }
4010 }
4011
4012 if (!pi->mclk_dpm_key_disabled) {
4013 if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
4014 result = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
4015 PPSMC_MSG_MCLKDPM_SetEnabledMask,
4016 pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
4017 if (result != PPSMC_Result_OK)
4018 return -EINVAL;
4019 }
4020 }
4021
4022#if 0
4023 if (!pi->pcie_dpm_key_disabled) {
4024 if (pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
4025 result = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
4026 PPSMC_MSG_PCIeDPM_SetEnabledMask,
4027 pi->dpm_level_enable_mask.pcie_dpm_enable_mask);
4028 if (result != PPSMC_Result_OK)
4029 return -EINVAL;
4030 }
4031 }
4032#endif
4033
4034 return 0;
4035}
4036
4037static void ci_find_dpm_states_clocks_in_dpm_table(struct amdgpu_device *adev,
4038 struct amdgpu_ps *amdgpu_state)
4039{
4040 struct ci_power_info *pi = ci_get_pi(adev);
4041 struct ci_ps *state = ci_get_ps(amdgpu_state);
4042 struct ci_single_dpm_table *sclk_table = &pi->dpm_table.sclk_table;
4043 u32 sclk = state->performance_levels[state->performance_level_count-1].sclk;
4044 struct ci_single_dpm_table *mclk_table = &pi->dpm_table.mclk_table;
4045 u32 mclk = state->performance_levels[state->performance_level_count-1].mclk;
4046 u32 i;
4047
4048 pi->need_update_smu7_dpm_table = 0;
4049
4050 for (i = 0; i < sclk_table->count; i++) {
4051 if (sclk == sclk_table->dpm_levels[i].value)
4052 break;
4053 }
4054
4055 if (i >= sclk_table->count) {
4056 pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
4057 } else {
4058 /* XXX check display min clock requirements */
4059 if (CISLAND_MINIMUM_ENGINE_CLOCK != CISLAND_MINIMUM_ENGINE_CLOCK)
4060 pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
4061 }
4062
4063 for (i = 0; i < mclk_table->count; i++) {
4064 if (mclk == mclk_table->dpm_levels[i].value)
4065 break;
4066 }
4067
4068 if (i >= mclk_table->count)
4069 pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
4070
4071 if (adev->pm.dpm.current_active_crtc_count !=
4072 adev->pm.dpm.new_active_crtc_count)
4073 pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK;
4074}
4075
4076static int ci_populate_and_upload_sclk_mclk_dpm_levels(struct amdgpu_device *adev,
4077 struct amdgpu_ps *amdgpu_state)
4078{
4079 struct ci_power_info *pi = ci_get_pi(adev);
4080 struct ci_ps *state = ci_get_ps(amdgpu_state);
4081 u32 sclk = state->performance_levels[state->performance_level_count-1].sclk;
4082 u32 mclk = state->performance_levels[state->performance_level_count-1].mclk;
4083 struct ci_dpm_table *dpm_table = &pi->dpm_table;
4084 int ret;
4085
4086 if (!pi->need_update_smu7_dpm_table)
4087 return 0;
4088
4089 if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK)
4090 dpm_table->sclk_table.dpm_levels[dpm_table->sclk_table.count-1].value = sclk;
4091
4092 if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)
4093 dpm_table->mclk_table.dpm_levels[dpm_table->mclk_table.count-1].value = mclk;
4094
4095 if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK)) {
4096 ret = ci_populate_all_graphic_levels(adev);
4097 if (ret)
4098 return ret;
4099 }
4100
4101 if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_MCLK | DPMTABLE_UPDATE_MCLK)) {
4102 ret = ci_populate_all_memory_levels(adev);
4103 if (ret)
4104 return ret;
4105 }
4106
4107 return 0;
4108}
4109
4110static int ci_enable_uvd_dpm(struct amdgpu_device *adev, bool enable)
4111{
4112 struct ci_power_info *pi = ci_get_pi(adev);
4113 const struct amdgpu_clock_and_voltage_limits *max_limits;
4114 int i;
4115
4116 if (adev->pm.dpm.ac_power)
4117 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
4118 else
4119 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
4120
4121 if (enable) {
4122 pi->dpm_level_enable_mask.uvd_dpm_enable_mask = 0;
4123
4124 for (i = adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
4125 if (adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
4126 pi->dpm_level_enable_mask.uvd_dpm_enable_mask |= 1 << i;
4127
4128 if (!pi->caps_uvd_dpm)
4129 break;
4130 }
4131 }
4132
4133 amdgpu_ci_send_msg_to_smc_with_parameter(adev,
4134 PPSMC_MSG_UVDDPM_SetEnabledMask,
4135 pi->dpm_level_enable_mask.uvd_dpm_enable_mask);
4136
4137 if (pi->last_mclk_dpm_enable_mask & 0x1) {
4138 pi->uvd_enabled = true;
4139 pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
4140 amdgpu_ci_send_msg_to_smc_with_parameter(adev,
4141 PPSMC_MSG_MCLKDPM_SetEnabledMask,
4142 pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
4143 }
4144 } else {
Rex Zhu49a5d732016-10-21 16:55:02 +08004145 if (pi->uvd_enabled) {
Alex Deuchera2e73f52015-04-20 17:09:27 -04004146 pi->uvd_enabled = false;
4147 pi->dpm_level_enable_mask.mclk_dpm_enable_mask |= 1;
4148 amdgpu_ci_send_msg_to_smc_with_parameter(adev,
4149 PPSMC_MSG_MCLKDPM_SetEnabledMask,
4150 pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
4151 }
4152 }
4153
4154 return (amdgpu_ci_send_msg_to_smc(adev, enable ?
4155 PPSMC_MSG_UVDDPM_Enable : PPSMC_MSG_UVDDPM_Disable) == PPSMC_Result_OK) ?
4156 0 : -EINVAL;
4157}
4158
4159static int ci_enable_vce_dpm(struct amdgpu_device *adev, bool enable)
4160{
4161 struct ci_power_info *pi = ci_get_pi(adev);
4162 const struct amdgpu_clock_and_voltage_limits *max_limits;
4163 int i;
4164
4165 if (adev->pm.dpm.ac_power)
4166 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
4167 else
4168 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
4169
4170 if (enable) {
4171 pi->dpm_level_enable_mask.vce_dpm_enable_mask = 0;
4172 for (i = adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
4173 if (adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
4174 pi->dpm_level_enable_mask.vce_dpm_enable_mask |= 1 << i;
4175
4176 if (!pi->caps_vce_dpm)
4177 break;
4178 }
4179 }
4180
4181 amdgpu_ci_send_msg_to_smc_with_parameter(adev,
4182 PPSMC_MSG_VCEDPM_SetEnabledMask,
4183 pi->dpm_level_enable_mask.vce_dpm_enable_mask);
4184 }
4185
4186 return (amdgpu_ci_send_msg_to_smc(adev, enable ?
4187 PPSMC_MSG_VCEDPM_Enable : PPSMC_MSG_VCEDPM_Disable) == PPSMC_Result_OK) ?
4188 0 : -EINVAL;
4189}
4190
4191#if 0
4192static int ci_enable_samu_dpm(struct amdgpu_device *adev, bool enable)
4193{
4194 struct ci_power_info *pi = ci_get_pi(adev);
4195 const struct amdgpu_clock_and_voltage_limits *max_limits;
4196 int i;
4197
4198 if (adev->pm.dpm.ac_power)
4199 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
4200 else
4201 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
4202
4203 if (enable) {
4204 pi->dpm_level_enable_mask.samu_dpm_enable_mask = 0;
4205 for (i = adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
4206 if (adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
4207 pi->dpm_level_enable_mask.samu_dpm_enable_mask |= 1 << i;
4208
4209 if (!pi->caps_samu_dpm)
4210 break;
4211 }
4212 }
4213
4214 amdgpu_ci_send_msg_to_smc_with_parameter(adev,
4215 PPSMC_MSG_SAMUDPM_SetEnabledMask,
4216 pi->dpm_level_enable_mask.samu_dpm_enable_mask);
4217 }
4218 return (amdgpu_ci_send_msg_to_smc(adev, enable ?
4219 PPSMC_MSG_SAMUDPM_Enable : PPSMC_MSG_SAMUDPM_Disable) == PPSMC_Result_OK) ?
4220 0 : -EINVAL;
4221}
4222
4223static int ci_enable_acp_dpm(struct amdgpu_device *adev, bool enable)
4224{
4225 struct ci_power_info *pi = ci_get_pi(adev);
4226 const struct amdgpu_clock_and_voltage_limits *max_limits;
4227 int i;
4228
4229 if (adev->pm.dpm.ac_power)
4230 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
4231 else
4232 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
4233
4234 if (enable) {
4235 pi->dpm_level_enable_mask.acp_dpm_enable_mask = 0;
4236 for (i = adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
4237 if (adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
4238 pi->dpm_level_enable_mask.acp_dpm_enable_mask |= 1 << i;
4239
4240 if (!pi->caps_acp_dpm)
4241 break;
4242 }
4243 }
4244
4245 amdgpu_ci_send_msg_to_smc_with_parameter(adev,
4246 PPSMC_MSG_ACPDPM_SetEnabledMask,
4247 pi->dpm_level_enable_mask.acp_dpm_enable_mask);
4248 }
4249
4250 return (amdgpu_ci_send_msg_to_smc(adev, enable ?
4251 PPSMC_MSG_ACPDPM_Enable : PPSMC_MSG_ACPDPM_Disable) == PPSMC_Result_OK) ?
4252 0 : -EINVAL;
4253}
4254#endif
4255
4256static int ci_update_uvd_dpm(struct amdgpu_device *adev, bool gate)
4257{
4258 struct ci_power_info *pi = ci_get_pi(adev);
4259 u32 tmp;
Rex Zhu3495a102016-10-26 18:05:00 +08004260 int ret = 0;
Alex Deuchera2e73f52015-04-20 17:09:27 -04004261
4262 if (!gate) {
Rex Zhu3495a102016-10-26 18:05:00 +08004263 /* turn the clocks on when decoding */
Alex Deuchera2e73f52015-04-20 17:09:27 -04004264 if (pi->caps_uvd_dpm ||
4265 (adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count <= 0))
4266 pi->smc_state_table.UvdBootLevel = 0;
4267 else
4268 pi->smc_state_table.UvdBootLevel =
4269 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1;
4270
4271 tmp = RREG32_SMC(ixDPM_TABLE_475);
4272 tmp &= ~DPM_TABLE_475__UvdBootLevel_MASK;
4273 tmp |= (pi->smc_state_table.UvdBootLevel << DPM_TABLE_475__UvdBootLevel__SHIFT);
4274 WREG32_SMC(ixDPM_TABLE_475, tmp);
Rex Zhu3495a102016-10-26 18:05:00 +08004275 ret = ci_enable_uvd_dpm(adev, true);
4276 } else {
4277 ret = ci_enable_uvd_dpm(adev, false);
4278 if (ret)
4279 return ret;
Alex Deuchera2e73f52015-04-20 17:09:27 -04004280 }
4281
Rex Zhu3495a102016-10-26 18:05:00 +08004282 return ret;
Alex Deuchera2e73f52015-04-20 17:09:27 -04004283}
4284
4285static u8 ci_get_vce_boot_level(struct amdgpu_device *adev)
4286{
4287 u8 i;
4288 u32 min_evclk = 30000; /* ??? */
4289 struct amdgpu_vce_clock_voltage_dependency_table *table =
4290 &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
4291
4292 for (i = 0; i < table->count; i++) {
4293 if (table->entries[i].evclk >= min_evclk)
4294 return i;
4295 }
4296
4297 return table->count - 1;
4298}
4299
4300static int ci_update_vce_dpm(struct amdgpu_device *adev,
4301 struct amdgpu_ps *amdgpu_new_state,
4302 struct amdgpu_ps *amdgpu_current_state)
4303{
4304 struct ci_power_info *pi = ci_get_pi(adev);
4305 int ret = 0;
4306 u32 tmp;
4307
4308 if (amdgpu_current_state->evclk != amdgpu_new_state->evclk) {
4309 if (amdgpu_new_state->evclk) {
Alex Deuchera2e73f52015-04-20 17:09:27 -04004310 pi->smc_state_table.VceBootLevel = ci_get_vce_boot_level(adev);
4311 tmp = RREG32_SMC(ixDPM_TABLE_475);
4312 tmp &= ~DPM_TABLE_475__VceBootLevel_MASK;
4313 tmp |= (pi->smc_state_table.VceBootLevel << DPM_TABLE_475__VceBootLevel__SHIFT);
4314 WREG32_SMC(ixDPM_TABLE_475, tmp);
4315
4316 ret = ci_enable_vce_dpm(adev, true);
4317 } else {
Rex Zhu415282b2016-10-26 17:05:30 +08004318 ret = ci_enable_vce_dpm(adev, false);
4319 if (ret)
4320 return ret;
Alex Deuchera2e73f52015-04-20 17:09:27 -04004321 }
4322 }
4323 return ret;
4324}
4325
4326#if 0
4327static int ci_update_samu_dpm(struct amdgpu_device *adev, bool gate)
4328{
4329 return ci_enable_samu_dpm(adev, gate);
4330}
4331
4332static int ci_update_acp_dpm(struct amdgpu_device *adev, bool gate)
4333{
4334 struct ci_power_info *pi = ci_get_pi(adev);
4335 u32 tmp;
4336
4337 if (!gate) {
4338 pi->smc_state_table.AcpBootLevel = 0;
4339
4340 tmp = RREG32_SMC(ixDPM_TABLE_475);
4341 tmp &= ~AcpBootLevel_MASK;
4342 tmp |= AcpBootLevel(pi->smc_state_table.AcpBootLevel);
4343 WREG32_SMC(ixDPM_TABLE_475, tmp);
4344 }
4345
4346 return ci_enable_acp_dpm(adev, !gate);
4347}
4348#endif
4349
4350static int ci_generate_dpm_level_enable_mask(struct amdgpu_device *adev,
4351 struct amdgpu_ps *amdgpu_state)
4352{
4353 struct ci_power_info *pi = ci_get_pi(adev);
4354 int ret;
4355
4356 ret = ci_trim_dpm_states(adev, amdgpu_state);
4357 if (ret)
4358 return ret;
4359
4360 pi->dpm_level_enable_mask.sclk_dpm_enable_mask =
4361 ci_get_dpm_level_enable_mask_value(&pi->dpm_table.sclk_table);
4362 pi->dpm_level_enable_mask.mclk_dpm_enable_mask =
4363 ci_get_dpm_level_enable_mask_value(&pi->dpm_table.mclk_table);
4364 pi->last_mclk_dpm_enable_mask =
4365 pi->dpm_level_enable_mask.mclk_dpm_enable_mask;
4366 if (pi->uvd_enabled) {
4367 if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask & 1)
4368 pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
4369 }
4370 pi->dpm_level_enable_mask.pcie_dpm_enable_mask =
4371 ci_get_dpm_level_enable_mask_value(&pi->dpm_table.pcie_speed_table);
4372
4373 return 0;
4374}
4375
4376static u32 ci_get_lowest_enabled_level(struct amdgpu_device *adev,
4377 u32 level_mask)
4378{
4379 u32 level = 0;
4380
4381 while ((level_mask & (1 << level)) == 0)
4382 level++;
4383
4384 return level;
4385}
4386
4387
Rex Zhucfa289f2017-09-06 15:27:59 +08004388static int ci_dpm_force_performance_level(void *handle,
Rex Zhue5d03ac2016-12-23 14:39:41 +08004389 enum amd_dpm_forced_level level)
Alex Deuchera2e73f52015-04-20 17:09:27 -04004390{
Rex Zhucfa289f2017-09-06 15:27:59 +08004391 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
Alex Deuchera2e73f52015-04-20 17:09:27 -04004392 struct ci_power_info *pi = ci_get_pi(adev);
4393 u32 tmp, levels, i;
4394 int ret;
4395
Rex Zhue5d03ac2016-12-23 14:39:41 +08004396 if (level == AMD_DPM_FORCED_LEVEL_HIGH) {
Alex Deuchera2e73f52015-04-20 17:09:27 -04004397 if ((!pi->pcie_dpm_key_disabled) &&
4398 pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
4399 levels = 0;
4400 tmp = pi->dpm_level_enable_mask.pcie_dpm_enable_mask;
4401 while (tmp >>= 1)
4402 levels++;
4403 if (levels) {
4404 ret = ci_dpm_force_state_pcie(adev, level);
4405 if (ret)
4406 return ret;
4407 for (i = 0; i < adev->usec_timeout; i++) {
4408 tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX_1) &
4409 TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX_MASK) >>
4410 TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX__SHIFT;
4411 if (tmp == levels)
4412 break;
4413 udelay(1);
4414 }
4415 }
4416 }
4417 if ((!pi->sclk_dpm_key_disabled) &&
4418 pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
4419 levels = 0;
4420 tmp = pi->dpm_level_enable_mask.sclk_dpm_enable_mask;
4421 while (tmp >>= 1)
4422 levels++;
4423 if (levels) {
4424 ret = ci_dpm_force_state_sclk(adev, levels);
4425 if (ret)
4426 return ret;
4427 for (i = 0; i < adev->usec_timeout; i++) {
4428 tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
4429 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK) >>
4430 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT;
4431 if (tmp == levels)
4432 break;
4433 udelay(1);
4434 }
4435 }
4436 }
4437 if ((!pi->mclk_dpm_key_disabled) &&
4438 pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
4439 levels = 0;
4440 tmp = pi->dpm_level_enable_mask.mclk_dpm_enable_mask;
4441 while (tmp >>= 1)
4442 levels++;
4443 if (levels) {
4444 ret = ci_dpm_force_state_mclk(adev, levels);
4445 if (ret)
4446 return ret;
4447 for (i = 0; i < adev->usec_timeout; i++) {
4448 tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
4449 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX_MASK) >>
4450 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX__SHIFT;
4451 if (tmp == levels)
4452 break;
4453 udelay(1);
4454 }
4455 }
4456 }
Rex Zhue5d03ac2016-12-23 14:39:41 +08004457 } else if (level == AMD_DPM_FORCED_LEVEL_LOW) {
Alex Deuchera2e73f52015-04-20 17:09:27 -04004458 if ((!pi->sclk_dpm_key_disabled) &&
4459 pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
4460 levels = ci_get_lowest_enabled_level(adev,
4461 pi->dpm_level_enable_mask.sclk_dpm_enable_mask);
4462 ret = ci_dpm_force_state_sclk(adev, levels);
4463 if (ret)
4464 return ret;
4465 for (i = 0; i < adev->usec_timeout; i++) {
4466 tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
4467 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK) >>
4468 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT;
4469 if (tmp == levels)
4470 break;
4471 udelay(1);
4472 }
4473 }
4474 if ((!pi->mclk_dpm_key_disabled) &&
4475 pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
4476 levels = ci_get_lowest_enabled_level(adev,
4477 pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
4478 ret = ci_dpm_force_state_mclk(adev, levels);
4479 if (ret)
4480 return ret;
4481 for (i = 0; i < adev->usec_timeout; i++) {
4482 tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
4483 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX_MASK) >>
4484 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX__SHIFT;
4485 if (tmp == levels)
4486 break;
4487 udelay(1);
4488 }
4489 }
4490 if ((!pi->pcie_dpm_key_disabled) &&
4491 pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
4492 levels = ci_get_lowest_enabled_level(adev,
4493 pi->dpm_level_enable_mask.pcie_dpm_enable_mask);
4494 ret = ci_dpm_force_state_pcie(adev, levels);
4495 if (ret)
4496 return ret;
4497 for (i = 0; i < adev->usec_timeout; i++) {
4498 tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX_1) &
4499 TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX_MASK) >>
4500 TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX__SHIFT;
4501 if (tmp == levels)
4502 break;
4503 udelay(1);
4504 }
4505 }
Rex Zhue5d03ac2016-12-23 14:39:41 +08004506 } else if (level == AMD_DPM_FORCED_LEVEL_AUTO) {
Alex Deuchera2e73f52015-04-20 17:09:27 -04004507 if (!pi->pcie_dpm_key_disabled) {
4508 PPSMC_Result smc_result;
4509
4510 smc_result = amdgpu_ci_send_msg_to_smc(adev,
4511 PPSMC_MSG_PCIeDPM_UnForceLevel);
4512 if (smc_result != PPSMC_Result_OK)
4513 return -EINVAL;
4514 }
4515 ret = ci_upload_dpm_level_enable_mask(adev);
4516 if (ret)
4517 return ret;
4518 }
4519
4520 adev->pm.dpm.forced_level = level;
4521
4522 return 0;
4523}
4524
4525static int ci_set_mc_special_registers(struct amdgpu_device *adev,
4526 struct ci_mc_reg_table *table)
4527{
4528 u8 i, j, k;
4529 u32 temp_reg;
4530
4531 for (i = 0, j = table->last; i < table->last; i++) {
4532 if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4533 return -EINVAL;
4534 switch(table->mc_reg_address[i].s1) {
4535 case mmMC_SEQ_MISC1:
4536 temp_reg = RREG32(mmMC_PMG_CMD_EMRS);
4537 table->mc_reg_address[j].s1 = mmMC_PMG_CMD_EMRS;
4538 table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_EMRS_LP;
4539 for (k = 0; k < table->num_entries; k++) {
4540 table->mc_reg_table_entry[k].mc_data[j] =
4541 ((temp_reg & 0xffff0000)) | ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
4542 }
4543 j++;
4544 if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4545 return -EINVAL;
4546
4547 temp_reg = RREG32(mmMC_PMG_CMD_MRS);
4548 table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS;
4549 table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS_LP;
4550 for (k = 0; k < table->num_entries; k++) {
4551 table->mc_reg_table_entry[k].mc_data[j] =
4552 (temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
Ken Wang81c59f52015-06-03 21:02:01 +08004553 if (adev->mc.vram_type != AMDGPU_VRAM_TYPE_GDDR5)
Alex Deuchera2e73f52015-04-20 17:09:27 -04004554 table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
4555 }
4556 j++;
4557 if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4558 return -EINVAL;
4559
Ken Wang81c59f52015-06-03 21:02:01 +08004560 if (adev->mc.vram_type != AMDGPU_VRAM_TYPE_GDDR5) {
Alex Deuchera2e73f52015-04-20 17:09:27 -04004561 table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD;
4562 table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD;
4563 for (k = 0; k < table->num_entries; k++) {
4564 table->mc_reg_table_entry[k].mc_data[j] =
4565 (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
4566 }
4567 j++;
4568 if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4569 return -EINVAL;
4570 }
4571 break;
4572 case mmMC_SEQ_RESERVE_M:
4573 temp_reg = RREG32(mmMC_PMG_CMD_MRS1);
4574 table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS1;
4575 table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS1_LP;
4576 for (k = 0; k < table->num_entries; k++) {
4577 table->mc_reg_table_entry[k].mc_data[j] =
4578 (temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
4579 }
4580 j++;
4581 if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4582 return -EINVAL;
4583 break;
4584 default:
4585 break;
4586 }
4587
4588 }
4589
4590 table->last = j;
4591
4592 return 0;
4593}
4594
4595static bool ci_check_s0_mc_reg_index(u16 in_reg, u16 *out_reg)
4596{
4597 bool result = true;
4598
4599 switch(in_reg) {
4600 case mmMC_SEQ_RAS_TIMING:
4601 *out_reg = mmMC_SEQ_RAS_TIMING_LP;
4602 break;
4603 case mmMC_SEQ_DLL_STBY:
4604 *out_reg = mmMC_SEQ_DLL_STBY_LP;
4605 break;
4606 case mmMC_SEQ_G5PDX_CMD0:
4607 *out_reg = mmMC_SEQ_G5PDX_CMD0_LP;
4608 break;
4609 case mmMC_SEQ_G5PDX_CMD1:
4610 *out_reg = mmMC_SEQ_G5PDX_CMD1_LP;
4611 break;
4612 case mmMC_SEQ_G5PDX_CTRL:
4613 *out_reg = mmMC_SEQ_G5PDX_CTRL_LP;
4614 break;
4615 case mmMC_SEQ_CAS_TIMING:
4616 *out_reg = mmMC_SEQ_CAS_TIMING_LP;
4617 break;
4618 case mmMC_SEQ_MISC_TIMING:
4619 *out_reg = mmMC_SEQ_MISC_TIMING_LP;
4620 break;
4621 case mmMC_SEQ_MISC_TIMING2:
4622 *out_reg = mmMC_SEQ_MISC_TIMING2_LP;
4623 break;
4624 case mmMC_SEQ_PMG_DVS_CMD:
4625 *out_reg = mmMC_SEQ_PMG_DVS_CMD_LP;
4626 break;
4627 case mmMC_SEQ_PMG_DVS_CTL:
4628 *out_reg = mmMC_SEQ_PMG_DVS_CTL_LP;
4629 break;
4630 case mmMC_SEQ_RD_CTL_D0:
4631 *out_reg = mmMC_SEQ_RD_CTL_D0_LP;
4632 break;
4633 case mmMC_SEQ_RD_CTL_D1:
4634 *out_reg = mmMC_SEQ_RD_CTL_D1_LP;
4635 break;
4636 case mmMC_SEQ_WR_CTL_D0:
4637 *out_reg = mmMC_SEQ_WR_CTL_D0_LP;
4638 break;
4639 case mmMC_SEQ_WR_CTL_D1:
4640 *out_reg = mmMC_SEQ_WR_CTL_D1_LP;
4641 break;
4642 case mmMC_PMG_CMD_EMRS:
4643 *out_reg = mmMC_SEQ_PMG_CMD_EMRS_LP;
4644 break;
4645 case mmMC_PMG_CMD_MRS:
4646 *out_reg = mmMC_SEQ_PMG_CMD_MRS_LP;
4647 break;
4648 case mmMC_PMG_CMD_MRS1:
4649 *out_reg = mmMC_SEQ_PMG_CMD_MRS1_LP;
4650 break;
4651 case mmMC_SEQ_PMG_TIMING:
4652 *out_reg = mmMC_SEQ_PMG_TIMING_LP;
4653 break;
4654 case mmMC_PMG_CMD_MRS2:
4655 *out_reg = mmMC_SEQ_PMG_CMD_MRS2_LP;
4656 break;
4657 case mmMC_SEQ_WR_CTL_2:
4658 *out_reg = mmMC_SEQ_WR_CTL_2_LP;
4659 break;
4660 default:
4661 result = false;
4662 break;
4663 }
4664
4665 return result;
4666}
4667
4668static void ci_set_valid_flag(struct ci_mc_reg_table *table)
4669{
4670 u8 i, j;
4671
4672 for (i = 0; i < table->last; i++) {
4673 for (j = 1; j < table->num_entries; j++) {
4674 if (table->mc_reg_table_entry[j-1].mc_data[i] !=
4675 table->mc_reg_table_entry[j].mc_data[i]) {
4676 table->valid_flag |= 1 << i;
4677 break;
4678 }
4679 }
4680 }
4681}
4682
4683static void ci_set_s0_mc_reg_index(struct ci_mc_reg_table *table)
4684{
4685 u32 i;
4686 u16 address;
4687
4688 for (i = 0; i < table->last; i++) {
4689 table->mc_reg_address[i].s0 =
4690 ci_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) ?
4691 address : table->mc_reg_address[i].s1;
4692 }
4693}
4694
4695static int ci_copy_vbios_mc_reg_table(const struct atom_mc_reg_table *table,
4696 struct ci_mc_reg_table *ci_table)
4697{
4698 u8 i, j;
4699
4700 if (table->last > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4701 return -EINVAL;
4702 if (table->num_entries > MAX_AC_TIMING_ENTRIES)
4703 return -EINVAL;
4704
4705 for (i = 0; i < table->last; i++)
4706 ci_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
4707
4708 ci_table->last = table->last;
4709
4710 for (i = 0; i < table->num_entries; i++) {
4711 ci_table->mc_reg_table_entry[i].mclk_max =
4712 table->mc_reg_table_entry[i].mclk_max;
4713 for (j = 0; j < table->last; j++)
4714 ci_table->mc_reg_table_entry[i].mc_data[j] =
4715 table->mc_reg_table_entry[i].mc_data[j];
4716 }
4717 ci_table->num_entries = table->num_entries;
4718
4719 return 0;
4720}
4721
4722static int ci_register_patching_mc_seq(struct amdgpu_device *adev,
4723 struct ci_mc_reg_table *table)
4724{
4725 u8 i, k;
4726 u32 tmp;
4727 bool patch;
4728
4729 tmp = RREG32(mmMC_SEQ_MISC0);
4730 patch = ((tmp & 0x0000f00) == 0x300) ? true : false;
4731
4732 if (patch &&
4733 ((adev->pdev->device == 0x67B0) ||
4734 (adev->pdev->device == 0x67B1))) {
4735 for (i = 0; i < table->last; i++) {
4736 if (table->last >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4737 return -EINVAL;
4738 switch (table->mc_reg_address[i].s1) {
4739 case mmMC_SEQ_MISC1:
4740 for (k = 0; k < table->num_entries; k++) {
4741 if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
4742 (table->mc_reg_table_entry[k].mclk_max == 137500))
4743 table->mc_reg_table_entry[k].mc_data[i] =
4744 (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFF8) |
4745 0x00000007;
4746 }
4747 break;
4748 case mmMC_SEQ_WR_CTL_D0:
4749 for (k = 0; k < table->num_entries; k++) {
4750 if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
4751 (table->mc_reg_table_entry[k].mclk_max == 137500))
4752 table->mc_reg_table_entry[k].mc_data[i] =
4753 (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFF0F00) |
4754 0x0000D0DD;
4755 }
4756 break;
4757 case mmMC_SEQ_WR_CTL_D1:
4758 for (k = 0; k < table->num_entries; k++) {
4759 if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
4760 (table->mc_reg_table_entry[k].mclk_max == 137500))
4761 table->mc_reg_table_entry[k].mc_data[i] =
4762 (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFF0F00) |
4763 0x0000D0DD;
4764 }
4765 break;
4766 case mmMC_SEQ_WR_CTL_2:
4767 for (k = 0; k < table->num_entries; k++) {
4768 if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
4769 (table->mc_reg_table_entry[k].mclk_max == 137500))
4770 table->mc_reg_table_entry[k].mc_data[i] = 0;
4771 }
4772 break;
4773 case mmMC_SEQ_CAS_TIMING:
4774 for (k = 0; k < table->num_entries; k++) {
4775 if (table->mc_reg_table_entry[k].mclk_max == 125000)
4776 table->mc_reg_table_entry[k].mc_data[i] =
4777 (table->mc_reg_table_entry[k].mc_data[i] & 0xFFE0FE0F) |
4778 0x000C0140;
4779 else if (table->mc_reg_table_entry[k].mclk_max == 137500)
4780 table->mc_reg_table_entry[k].mc_data[i] =
4781 (table->mc_reg_table_entry[k].mc_data[i] & 0xFFE0FE0F) |
4782 0x000C0150;
4783 }
4784 break;
4785 case mmMC_SEQ_MISC_TIMING:
4786 for (k = 0; k < table->num_entries; k++) {
4787 if (table->mc_reg_table_entry[k].mclk_max == 125000)
4788 table->mc_reg_table_entry[k].mc_data[i] =
4789 (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFE0) |
4790 0x00000030;
4791 else if (table->mc_reg_table_entry[k].mclk_max == 137500)
4792 table->mc_reg_table_entry[k].mc_data[i] =
4793 (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFE0) |
4794 0x00000035;
4795 }
4796 break;
4797 default:
4798 break;
4799 }
4800 }
4801
4802 WREG32(mmMC_SEQ_IO_DEBUG_INDEX, 3);
4803 tmp = RREG32(mmMC_SEQ_IO_DEBUG_DATA);
4804 tmp = (tmp & 0xFFF8FFFF) | (1 << 16);
4805 WREG32(mmMC_SEQ_IO_DEBUG_INDEX, 3);
4806 WREG32(mmMC_SEQ_IO_DEBUG_DATA, tmp);
4807 }
4808
4809 return 0;
4810}
4811
4812static int ci_initialize_mc_reg_table(struct amdgpu_device *adev)
4813{
4814 struct ci_power_info *pi = ci_get_pi(adev);
4815 struct atom_mc_reg_table *table;
4816 struct ci_mc_reg_table *ci_table = &pi->mc_reg_table;
4817 u8 module_index = ci_get_memory_module_index(adev);
4818 int ret;
4819
4820 table = kzalloc(sizeof(struct atom_mc_reg_table), GFP_KERNEL);
4821 if (!table)
4822 return -ENOMEM;
4823
4824 WREG32(mmMC_SEQ_RAS_TIMING_LP, RREG32(mmMC_SEQ_RAS_TIMING));
4825 WREG32(mmMC_SEQ_CAS_TIMING_LP, RREG32(mmMC_SEQ_CAS_TIMING));
4826 WREG32(mmMC_SEQ_DLL_STBY_LP, RREG32(mmMC_SEQ_DLL_STBY));
4827 WREG32(mmMC_SEQ_G5PDX_CMD0_LP, RREG32(mmMC_SEQ_G5PDX_CMD0));
4828 WREG32(mmMC_SEQ_G5PDX_CMD1_LP, RREG32(mmMC_SEQ_G5PDX_CMD1));
4829 WREG32(mmMC_SEQ_G5PDX_CTRL_LP, RREG32(mmMC_SEQ_G5PDX_CTRL));
4830 WREG32(mmMC_SEQ_PMG_DVS_CMD_LP, RREG32(mmMC_SEQ_PMG_DVS_CMD));
4831 WREG32(mmMC_SEQ_PMG_DVS_CTL_LP, RREG32(mmMC_SEQ_PMG_DVS_CTL));
4832 WREG32(mmMC_SEQ_MISC_TIMING_LP, RREG32(mmMC_SEQ_MISC_TIMING));
4833 WREG32(mmMC_SEQ_MISC_TIMING2_LP, RREG32(mmMC_SEQ_MISC_TIMING2));
4834 WREG32(mmMC_SEQ_PMG_CMD_EMRS_LP, RREG32(mmMC_PMG_CMD_EMRS));
4835 WREG32(mmMC_SEQ_PMG_CMD_MRS_LP, RREG32(mmMC_PMG_CMD_MRS));
4836 WREG32(mmMC_SEQ_PMG_CMD_MRS1_LP, RREG32(mmMC_PMG_CMD_MRS1));
4837 WREG32(mmMC_SEQ_WR_CTL_D0_LP, RREG32(mmMC_SEQ_WR_CTL_D0));
4838 WREG32(mmMC_SEQ_WR_CTL_D1_LP, RREG32(mmMC_SEQ_WR_CTL_D1));
4839 WREG32(mmMC_SEQ_RD_CTL_D0_LP, RREG32(mmMC_SEQ_RD_CTL_D0));
4840 WREG32(mmMC_SEQ_RD_CTL_D1_LP, RREG32(mmMC_SEQ_RD_CTL_D1));
4841 WREG32(mmMC_SEQ_PMG_TIMING_LP, RREG32(mmMC_SEQ_PMG_TIMING));
4842 WREG32(mmMC_SEQ_PMG_CMD_MRS2_LP, RREG32(mmMC_PMG_CMD_MRS2));
4843 WREG32(mmMC_SEQ_WR_CTL_2_LP, RREG32(mmMC_SEQ_WR_CTL_2));
4844
4845 ret = amdgpu_atombios_init_mc_reg_table(adev, module_index, table);
4846 if (ret)
4847 goto init_mc_done;
4848
4849 ret = ci_copy_vbios_mc_reg_table(table, ci_table);
4850 if (ret)
4851 goto init_mc_done;
4852
4853 ci_set_s0_mc_reg_index(ci_table);
4854
4855 ret = ci_register_patching_mc_seq(adev, ci_table);
4856 if (ret)
4857 goto init_mc_done;
4858
4859 ret = ci_set_mc_special_registers(adev, ci_table);
4860 if (ret)
4861 goto init_mc_done;
4862
4863 ci_set_valid_flag(ci_table);
4864
4865init_mc_done:
4866 kfree(table);
4867
4868 return ret;
4869}
4870
4871static int ci_populate_mc_reg_addresses(struct amdgpu_device *adev,
4872 SMU7_Discrete_MCRegisters *mc_reg_table)
4873{
4874 struct ci_power_info *pi = ci_get_pi(adev);
4875 u32 i, j;
4876
4877 for (i = 0, j = 0; j < pi->mc_reg_table.last; j++) {
4878 if (pi->mc_reg_table.valid_flag & (1 << j)) {
4879 if (i >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4880 return -EINVAL;
4881 mc_reg_table->address[i].s0 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s0);
4882 mc_reg_table->address[i].s1 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s1);
4883 i++;
4884 }
4885 }
4886
4887 mc_reg_table->last = (u8)i;
4888
4889 return 0;
4890}
4891
4892static void ci_convert_mc_registers(const struct ci_mc_reg_entry *entry,
4893 SMU7_Discrete_MCRegisterSet *data,
4894 u32 num_entries, u32 valid_flag)
4895{
4896 u32 i, j;
4897
4898 for (i = 0, j = 0; j < num_entries; j++) {
4899 if (valid_flag & (1 << j)) {
4900 data->value[i] = cpu_to_be32(entry->mc_data[j]);
4901 i++;
4902 }
4903 }
4904}
4905
4906static void ci_convert_mc_reg_table_entry_to_smc(struct amdgpu_device *adev,
4907 const u32 memory_clock,
4908 SMU7_Discrete_MCRegisterSet *mc_reg_table_data)
4909{
4910 struct ci_power_info *pi = ci_get_pi(adev);
4911 u32 i = 0;
4912
4913 for(i = 0; i < pi->mc_reg_table.num_entries; i++) {
4914 if (memory_clock <= pi->mc_reg_table.mc_reg_table_entry[i].mclk_max)
4915 break;
4916 }
4917
4918 if ((i == pi->mc_reg_table.num_entries) && (i > 0))
4919 --i;
4920
4921 ci_convert_mc_registers(&pi->mc_reg_table.mc_reg_table_entry[i],
4922 mc_reg_table_data, pi->mc_reg_table.last,
4923 pi->mc_reg_table.valid_flag);
4924}
4925
4926static void ci_convert_mc_reg_table_to_smc(struct amdgpu_device *adev,
4927 SMU7_Discrete_MCRegisters *mc_reg_table)
4928{
4929 struct ci_power_info *pi = ci_get_pi(adev);
4930 u32 i;
4931
4932 for (i = 0; i < pi->dpm_table.mclk_table.count; i++)
4933 ci_convert_mc_reg_table_entry_to_smc(adev,
4934 pi->dpm_table.mclk_table.dpm_levels[i].value,
4935 &mc_reg_table->data[i]);
4936}
4937
4938static int ci_populate_initial_mc_reg_table(struct amdgpu_device *adev)
4939{
4940 struct ci_power_info *pi = ci_get_pi(adev);
4941 int ret;
4942
4943 memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters));
4944
4945 ret = ci_populate_mc_reg_addresses(adev, &pi->smc_mc_reg_table);
4946 if (ret)
4947 return ret;
4948 ci_convert_mc_reg_table_to_smc(adev, &pi->smc_mc_reg_table);
4949
4950 return amdgpu_ci_copy_bytes_to_smc(adev,
4951 pi->mc_reg_table_start,
4952 (u8 *)&pi->smc_mc_reg_table,
4953 sizeof(SMU7_Discrete_MCRegisters),
4954 pi->sram_end);
4955}
4956
4957static int ci_update_and_upload_mc_reg_table(struct amdgpu_device *adev)
4958{
4959 struct ci_power_info *pi = ci_get_pi(adev);
4960
4961 if (!(pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK))
4962 return 0;
4963
4964 memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters));
4965
4966 ci_convert_mc_reg_table_to_smc(adev, &pi->smc_mc_reg_table);
4967
4968 return amdgpu_ci_copy_bytes_to_smc(adev,
4969 pi->mc_reg_table_start +
4970 offsetof(SMU7_Discrete_MCRegisters, data[0]),
4971 (u8 *)&pi->smc_mc_reg_table.data[0],
4972 sizeof(SMU7_Discrete_MCRegisterSet) *
4973 pi->dpm_table.mclk_table.count,
4974 pi->sram_end);
4975}
4976
4977static void ci_enable_voltage_control(struct amdgpu_device *adev)
4978{
4979 u32 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
4980
4981 tmp |= GENERAL_PWRMGT__VOLT_PWRMGT_EN_MASK;
4982 WREG32_SMC(ixGENERAL_PWRMGT, tmp);
4983}
4984
4985static enum amdgpu_pcie_gen ci_get_maximum_link_speed(struct amdgpu_device *adev,
4986 struct amdgpu_ps *amdgpu_state)
4987{
4988 struct ci_ps *state = ci_get_ps(amdgpu_state);
4989 int i;
4990 u16 pcie_speed, max_speed = 0;
4991
4992 for (i = 0; i < state->performance_level_count; i++) {
4993 pcie_speed = state->performance_levels[i].pcie_gen;
4994 if (max_speed < pcie_speed)
4995 max_speed = pcie_speed;
4996 }
4997
4998 return max_speed;
4999}
5000
5001static u16 ci_get_current_pcie_speed(struct amdgpu_device *adev)
5002{
5003 u32 speed_cntl = 0;
5004
5005 speed_cntl = RREG32_PCIE(ixPCIE_LC_SPEED_CNTL) &
5006 PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK;
5007 speed_cntl >>= PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
5008
5009 return (u16)speed_cntl;
5010}
5011
5012static int ci_get_current_pcie_lane_number(struct amdgpu_device *adev)
5013{
5014 u32 link_width = 0;
5015
5016 link_width = RREG32_PCIE(ixPCIE_LC_LINK_WIDTH_CNTL) &
5017 PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK;
5018 link_width >>= PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT;
5019
5020 switch (link_width) {
5021 case 1:
5022 return 1;
5023 case 2:
5024 return 2;
5025 case 3:
5026 return 4;
5027 case 4:
5028 return 8;
5029 case 0:
5030 case 6:
5031 default:
5032 return 16;
5033 }
5034}
5035
5036static void ci_request_link_speed_change_before_state_change(struct amdgpu_device *adev,
5037 struct amdgpu_ps *amdgpu_new_state,
5038 struct amdgpu_ps *amdgpu_current_state)
5039{
5040 struct ci_power_info *pi = ci_get_pi(adev);
5041 enum amdgpu_pcie_gen target_link_speed =
5042 ci_get_maximum_link_speed(adev, amdgpu_new_state);
5043 enum amdgpu_pcie_gen current_link_speed;
5044
5045 if (pi->force_pcie_gen == AMDGPU_PCIE_GEN_INVALID)
5046 current_link_speed = ci_get_maximum_link_speed(adev, amdgpu_current_state);
5047 else
5048 current_link_speed = pi->force_pcie_gen;
5049
5050 pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID;
5051 pi->pspp_notify_required = false;
5052 if (target_link_speed > current_link_speed) {
5053 switch (target_link_speed) {
5054#ifdef CONFIG_ACPI
5055 case AMDGPU_PCIE_GEN3:
5056 if (amdgpu_acpi_pcie_performance_request(adev, PCIE_PERF_REQ_PECI_GEN3, false) == 0)
5057 break;
5058 pi->force_pcie_gen = AMDGPU_PCIE_GEN2;
5059 if (current_link_speed == AMDGPU_PCIE_GEN2)
5060 break;
5061 case AMDGPU_PCIE_GEN2:
5062 if (amdgpu_acpi_pcie_performance_request(adev, PCIE_PERF_REQ_PECI_GEN2, false) == 0)
5063 break;
5064#endif
5065 default:
5066 pi->force_pcie_gen = ci_get_current_pcie_speed(adev);
5067 break;
5068 }
5069 } else {
5070 if (target_link_speed < current_link_speed)
5071 pi->pspp_notify_required = true;
5072 }
5073}
5074
5075static void ci_notify_link_speed_change_after_state_change(struct amdgpu_device *adev,
5076 struct amdgpu_ps *amdgpu_new_state,
5077 struct amdgpu_ps *amdgpu_current_state)
5078{
5079 struct ci_power_info *pi = ci_get_pi(adev);
5080 enum amdgpu_pcie_gen target_link_speed =
5081 ci_get_maximum_link_speed(adev, amdgpu_new_state);
5082 u8 request;
5083
5084 if (pi->pspp_notify_required) {
5085 if (target_link_speed == AMDGPU_PCIE_GEN3)
5086 request = PCIE_PERF_REQ_PECI_GEN3;
5087 else if (target_link_speed == AMDGPU_PCIE_GEN2)
5088 request = PCIE_PERF_REQ_PECI_GEN2;
5089 else
5090 request = PCIE_PERF_REQ_PECI_GEN1;
5091
5092 if ((request == PCIE_PERF_REQ_PECI_GEN1) &&
5093 (ci_get_current_pcie_speed(adev) > 0))
5094 return;
5095
5096#ifdef CONFIG_ACPI
5097 amdgpu_acpi_pcie_performance_request(adev, request, false);
5098#endif
5099 }
5100}
5101
5102static int ci_set_private_data_variables_based_on_pptable(struct amdgpu_device *adev)
5103{
5104 struct ci_power_info *pi = ci_get_pi(adev);
5105 struct amdgpu_clock_voltage_dependency_table *allowed_sclk_vddc_table =
5106 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
5107 struct amdgpu_clock_voltage_dependency_table *allowed_mclk_vddc_table =
5108 &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk;
5109 struct amdgpu_clock_voltage_dependency_table *allowed_mclk_vddci_table =
5110 &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk;
5111
5112 if (allowed_sclk_vddc_table == NULL)
5113 return -EINVAL;
5114 if (allowed_sclk_vddc_table->count < 1)
5115 return -EINVAL;
5116 if (allowed_mclk_vddc_table == NULL)
5117 return -EINVAL;
5118 if (allowed_mclk_vddc_table->count < 1)
5119 return -EINVAL;
5120 if (allowed_mclk_vddci_table == NULL)
5121 return -EINVAL;
5122 if (allowed_mclk_vddci_table->count < 1)
5123 return -EINVAL;
5124
5125 pi->min_vddc_in_pp_table = allowed_sclk_vddc_table->entries[0].v;
5126 pi->max_vddc_in_pp_table =
5127 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
5128
5129 pi->min_vddci_in_pp_table = allowed_mclk_vddci_table->entries[0].v;
5130 pi->max_vddci_in_pp_table =
5131 allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
5132
5133 adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk =
5134 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
5135 adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk =
5136 allowed_mclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
5137 adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddc =
5138 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
5139 adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci =
5140 allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
5141
5142 return 0;
5143}
5144
5145static void ci_patch_with_vddc_leakage(struct amdgpu_device *adev, u16 *vddc)
5146{
5147 struct ci_power_info *pi = ci_get_pi(adev);
5148 struct ci_leakage_voltage *leakage_table = &pi->vddc_leakage;
5149 u32 leakage_index;
5150
5151 for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) {
5152 if (leakage_table->leakage_id[leakage_index] == *vddc) {
5153 *vddc = leakage_table->actual_voltage[leakage_index];
5154 break;
5155 }
5156 }
5157}
5158
5159static void ci_patch_with_vddci_leakage(struct amdgpu_device *adev, u16 *vddci)
5160{
5161 struct ci_power_info *pi = ci_get_pi(adev);
5162 struct ci_leakage_voltage *leakage_table = &pi->vddci_leakage;
5163 u32 leakage_index;
5164
5165 for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) {
5166 if (leakage_table->leakage_id[leakage_index] == *vddci) {
5167 *vddci = leakage_table->actual_voltage[leakage_index];
5168 break;
5169 }
5170 }
5171}
5172
5173static void ci_patch_clock_voltage_dependency_table_with_vddc_leakage(struct amdgpu_device *adev,
5174 struct amdgpu_clock_voltage_dependency_table *table)
5175{
5176 u32 i;
5177
5178 if (table) {
5179 for (i = 0; i < table->count; i++)
5180 ci_patch_with_vddc_leakage(adev, &table->entries[i].v);
5181 }
5182}
5183
5184static void ci_patch_clock_voltage_dependency_table_with_vddci_leakage(struct amdgpu_device *adev,
5185 struct amdgpu_clock_voltage_dependency_table *table)
5186{
5187 u32 i;
5188
5189 if (table) {
5190 for (i = 0; i < table->count; i++)
5191 ci_patch_with_vddci_leakage(adev, &table->entries[i].v);
5192 }
5193}
5194
5195static void ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(struct amdgpu_device *adev,
5196 struct amdgpu_vce_clock_voltage_dependency_table *table)
5197{
5198 u32 i;
5199
5200 if (table) {
5201 for (i = 0; i < table->count; i++)
5202 ci_patch_with_vddc_leakage(adev, &table->entries[i].v);
5203 }
5204}
5205
5206static void ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(struct amdgpu_device *adev,
5207 struct amdgpu_uvd_clock_voltage_dependency_table *table)
5208{
5209 u32 i;
5210
5211 if (table) {
5212 for (i = 0; i < table->count; i++)
5213 ci_patch_with_vddc_leakage(adev, &table->entries[i].v);
5214 }
5215}
5216
5217static void ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(struct amdgpu_device *adev,
5218 struct amdgpu_phase_shedding_limits_table *table)
5219{
5220 u32 i;
5221
5222 if (table) {
5223 for (i = 0; i < table->count; i++)
5224 ci_patch_with_vddc_leakage(adev, &table->entries[i].voltage);
5225 }
5226}
5227
5228static void ci_patch_clock_voltage_limits_with_vddc_leakage(struct amdgpu_device *adev,
5229 struct amdgpu_clock_and_voltage_limits *table)
5230{
5231 if (table) {
5232 ci_patch_with_vddc_leakage(adev, (u16 *)&table->vddc);
5233 ci_patch_with_vddci_leakage(adev, (u16 *)&table->vddci);
5234 }
5235}
5236
5237static void ci_patch_cac_leakage_table_with_vddc_leakage(struct amdgpu_device *adev,
5238 struct amdgpu_cac_leakage_table *table)
5239{
5240 u32 i;
5241
5242 if (table) {
5243 for (i = 0; i < table->count; i++)
5244 ci_patch_with_vddc_leakage(adev, &table->entries[i].vddc);
5245 }
5246}
5247
5248static void ci_patch_dependency_tables_with_leakage(struct amdgpu_device *adev)
5249{
5250
5251 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev,
5252 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk);
5253 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev,
5254 &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk);
5255 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev,
5256 &adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk);
5257 ci_patch_clock_voltage_dependency_table_with_vddci_leakage(adev,
5258 &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk);
5259 ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(adev,
5260 &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table);
5261 ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(adev,
5262 &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table);
5263 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev,
5264 &adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table);
5265 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev,
5266 &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table);
5267 ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(adev,
5268 &adev->pm.dpm.dyn_state.phase_shedding_limits_table);
5269 ci_patch_clock_voltage_limits_with_vddc_leakage(adev,
5270 &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac);
5271 ci_patch_clock_voltage_limits_with_vddc_leakage(adev,
5272 &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc);
5273 ci_patch_cac_leakage_table_with_vddc_leakage(adev,
5274 &adev->pm.dpm.dyn_state.cac_leakage_table);
5275
5276}
5277
5278static void ci_update_current_ps(struct amdgpu_device *adev,
5279 struct amdgpu_ps *rps)
5280{
5281 struct ci_ps *new_ps = ci_get_ps(rps);
5282 struct ci_power_info *pi = ci_get_pi(adev);
5283
5284 pi->current_rps = *rps;
5285 pi->current_ps = *new_ps;
5286 pi->current_rps.ps_priv = &pi->current_ps;
Rex Zhu8c8e2c32016-10-14 19:29:02 +08005287 adev->pm.dpm.current_ps = &pi->current_rps;
Alex Deuchera2e73f52015-04-20 17:09:27 -04005288}
5289
5290static void ci_update_requested_ps(struct amdgpu_device *adev,
5291 struct amdgpu_ps *rps)
5292{
5293 struct ci_ps *new_ps = ci_get_ps(rps);
5294 struct ci_power_info *pi = ci_get_pi(adev);
5295
5296 pi->requested_rps = *rps;
5297 pi->requested_ps = *new_ps;
5298 pi->requested_rps.ps_priv = &pi->requested_ps;
Rex Zhu8c8e2c32016-10-14 19:29:02 +08005299 adev->pm.dpm.requested_ps = &pi->requested_rps;
Alex Deuchera2e73f52015-04-20 17:09:27 -04005300}
5301
Rex Zhucfa289f2017-09-06 15:27:59 +08005302static int ci_dpm_pre_set_power_state(void *handle)
Alex Deuchera2e73f52015-04-20 17:09:27 -04005303{
Rex Zhucfa289f2017-09-06 15:27:59 +08005304 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
Alex Deuchera2e73f52015-04-20 17:09:27 -04005305 struct ci_power_info *pi = ci_get_pi(adev);
5306 struct amdgpu_ps requested_ps = *adev->pm.dpm.requested_ps;
5307 struct amdgpu_ps *new_ps = &requested_ps;
5308
5309 ci_update_requested_ps(adev, new_ps);
5310
5311 ci_apply_state_adjust_rules(adev, &pi->requested_rps);
5312
5313 return 0;
5314}
5315
Rex Zhucfa289f2017-09-06 15:27:59 +08005316static void ci_dpm_post_set_power_state(void *handle)
Alex Deuchera2e73f52015-04-20 17:09:27 -04005317{
Rex Zhucfa289f2017-09-06 15:27:59 +08005318 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
Alex Deuchera2e73f52015-04-20 17:09:27 -04005319 struct ci_power_info *pi = ci_get_pi(adev);
5320 struct amdgpu_ps *new_ps = &pi->requested_rps;
5321
5322 ci_update_current_ps(adev, new_ps);
5323}
5324
5325
5326static void ci_dpm_setup_asic(struct amdgpu_device *adev)
5327{
5328 ci_read_clock_registers(adev);
5329 ci_enable_acpi_power_management(adev);
5330 ci_init_sclk_t(adev);
5331}
5332
5333static int ci_dpm_enable(struct amdgpu_device *adev)
5334{
5335 struct ci_power_info *pi = ci_get_pi(adev);
5336 struct amdgpu_ps *boot_ps = adev->pm.dpm.boot_ps;
5337 int ret;
5338
Alex Deuchera2e73f52015-04-20 17:09:27 -04005339 if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
5340 ci_enable_voltage_control(adev);
5341 ret = ci_construct_voltage_tables(adev);
5342 if (ret) {
5343 DRM_ERROR("ci_construct_voltage_tables failed\n");
5344 return ret;
5345 }
5346 }
5347 if (pi->caps_dynamic_ac_timing) {
5348 ret = ci_initialize_mc_reg_table(adev);
5349 if (ret)
5350 pi->caps_dynamic_ac_timing = false;
5351 }
5352 if (pi->dynamic_ss)
5353 ci_enable_spread_spectrum(adev, true);
5354 if (pi->thermal_protection)
5355 ci_enable_thermal_protection(adev, true);
5356 ci_program_sstp(adev);
5357 ci_enable_display_gap(adev);
5358 ci_program_vc(adev);
5359 ret = ci_upload_firmware(adev);
5360 if (ret) {
5361 DRM_ERROR("ci_upload_firmware failed\n");
5362 return ret;
5363 }
5364 ret = ci_process_firmware_header(adev);
5365 if (ret) {
5366 DRM_ERROR("ci_process_firmware_header failed\n");
5367 return ret;
5368 }
5369 ret = ci_initial_switch_from_arb_f0_to_f1(adev);
5370 if (ret) {
5371 DRM_ERROR("ci_initial_switch_from_arb_f0_to_f1 failed\n");
5372 return ret;
5373 }
5374 ret = ci_init_smc_table(adev);
5375 if (ret) {
5376 DRM_ERROR("ci_init_smc_table failed\n");
5377 return ret;
5378 }
5379 ret = ci_init_arb_table_index(adev);
5380 if (ret) {
5381 DRM_ERROR("ci_init_arb_table_index failed\n");
5382 return ret;
5383 }
5384 if (pi->caps_dynamic_ac_timing) {
5385 ret = ci_populate_initial_mc_reg_table(adev);
5386 if (ret) {
5387 DRM_ERROR("ci_populate_initial_mc_reg_table failed\n");
5388 return ret;
5389 }
5390 }
5391 ret = ci_populate_pm_base(adev);
5392 if (ret) {
5393 DRM_ERROR("ci_populate_pm_base failed\n");
5394 return ret;
5395 }
5396 ci_dpm_start_smc(adev);
5397 ci_enable_vr_hot_gpio_interrupt(adev);
5398 ret = ci_notify_smc_display_change(adev, false);
5399 if (ret) {
5400 DRM_ERROR("ci_notify_smc_display_change failed\n");
5401 return ret;
5402 }
5403 ci_enable_sclk_control(adev, true);
5404 ret = ci_enable_ulv(adev, true);
5405 if (ret) {
5406 DRM_ERROR("ci_enable_ulv failed\n");
5407 return ret;
5408 }
5409 ret = ci_enable_ds_master_switch(adev, true);
5410 if (ret) {
5411 DRM_ERROR("ci_enable_ds_master_switch failed\n");
5412 return ret;
5413 }
5414 ret = ci_start_dpm(adev);
5415 if (ret) {
5416 DRM_ERROR("ci_start_dpm failed\n");
5417 return ret;
5418 }
5419 ret = ci_enable_didt(adev, true);
5420 if (ret) {
5421 DRM_ERROR("ci_enable_didt failed\n");
5422 return ret;
5423 }
5424 ret = ci_enable_smc_cac(adev, true);
5425 if (ret) {
5426 DRM_ERROR("ci_enable_smc_cac failed\n");
5427 return ret;
5428 }
5429 ret = ci_enable_power_containment(adev, true);
5430 if (ret) {
5431 DRM_ERROR("ci_enable_power_containment failed\n");
5432 return ret;
5433 }
5434
5435 ret = ci_power_control_set_level(adev);
5436 if (ret) {
5437 DRM_ERROR("ci_power_control_set_level failed\n");
5438 return ret;
5439 }
5440
5441 ci_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
5442
5443 ret = ci_enable_thermal_based_sclk_dpm(adev, true);
5444 if (ret) {
5445 DRM_ERROR("ci_enable_thermal_based_sclk_dpm failed\n");
5446 return ret;
5447 }
5448
5449 ci_thermal_start_thermal_controller(adev);
5450
5451 ci_update_current_ps(adev, boot_ps);
5452
Alex Deuchera2e73f52015-04-20 17:09:27 -04005453 return 0;
5454}
5455
5456static void ci_dpm_disable(struct amdgpu_device *adev)
5457{
5458 struct ci_power_info *pi = ci_get_pi(adev);
5459 struct amdgpu_ps *boot_ps = adev->pm.dpm.boot_ps;
5460
5461 amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
5462 AMDGPU_THERMAL_IRQ_LOW_TO_HIGH);
5463 amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
5464 AMDGPU_THERMAL_IRQ_HIGH_TO_LOW);
5465
Rex Zhuc08770e2016-08-24 19:39:06 +08005466 ci_dpm_powergate_uvd(adev, true);
Alex Deuchera2e73f52015-04-20 17:09:27 -04005467
5468 if (!amdgpu_ci_is_smc_running(adev))
5469 return;
5470
5471 ci_thermal_stop_thermal_controller(adev);
5472
5473 if (pi->thermal_protection)
5474 ci_enable_thermal_protection(adev, false);
5475 ci_enable_power_containment(adev, false);
5476 ci_enable_smc_cac(adev, false);
5477 ci_enable_didt(adev, false);
5478 ci_enable_spread_spectrum(adev, false);
5479 ci_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, false);
5480 ci_stop_dpm(adev);
5481 ci_enable_ds_master_switch(adev, false);
5482 ci_enable_ulv(adev, false);
5483 ci_clear_vc(adev);
5484 ci_reset_to_default(adev);
5485 ci_dpm_stop_smc(adev);
5486 ci_force_switch_to_arb_f0(adev);
5487 ci_enable_thermal_based_sclk_dpm(adev, false);
5488
5489 ci_update_current_ps(adev, boot_ps);
5490}
5491
Rex Zhucfa289f2017-09-06 15:27:59 +08005492static int ci_dpm_set_power_state(void *handle)
Alex Deuchera2e73f52015-04-20 17:09:27 -04005493{
Rex Zhucfa289f2017-09-06 15:27:59 +08005494 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
Alex Deuchera2e73f52015-04-20 17:09:27 -04005495 struct ci_power_info *pi = ci_get_pi(adev);
5496 struct amdgpu_ps *new_ps = &pi->requested_rps;
5497 struct amdgpu_ps *old_ps = &pi->current_rps;
5498 int ret;
5499
5500 ci_find_dpm_states_clocks_in_dpm_table(adev, new_ps);
5501 if (pi->pcie_performance_request)
5502 ci_request_link_speed_change_before_state_change(adev, new_ps, old_ps);
5503 ret = ci_freeze_sclk_mclk_dpm(adev);
5504 if (ret) {
5505 DRM_ERROR("ci_freeze_sclk_mclk_dpm failed\n");
5506 return ret;
5507 }
5508 ret = ci_populate_and_upload_sclk_mclk_dpm_levels(adev, new_ps);
5509 if (ret) {
5510 DRM_ERROR("ci_populate_and_upload_sclk_mclk_dpm_levels failed\n");
5511 return ret;
5512 }
5513 ret = ci_generate_dpm_level_enable_mask(adev, new_ps);
5514 if (ret) {
5515 DRM_ERROR("ci_generate_dpm_level_enable_mask failed\n");
5516 return ret;
5517 }
5518
5519 ret = ci_update_vce_dpm(adev, new_ps, old_ps);
5520 if (ret) {
5521 DRM_ERROR("ci_update_vce_dpm failed\n");
5522 return ret;
5523 }
5524
5525 ret = ci_update_sclk_t(adev);
5526 if (ret) {
5527 DRM_ERROR("ci_update_sclk_t failed\n");
5528 return ret;
5529 }
5530 if (pi->caps_dynamic_ac_timing) {
5531 ret = ci_update_and_upload_mc_reg_table(adev);
5532 if (ret) {
5533 DRM_ERROR("ci_update_and_upload_mc_reg_table failed\n");
5534 return ret;
5535 }
5536 }
5537 ret = ci_program_memory_timing_parameters(adev);
5538 if (ret) {
5539 DRM_ERROR("ci_program_memory_timing_parameters failed\n");
5540 return ret;
5541 }
5542 ret = ci_unfreeze_sclk_mclk_dpm(adev);
5543 if (ret) {
5544 DRM_ERROR("ci_unfreeze_sclk_mclk_dpm failed\n");
5545 return ret;
5546 }
5547 ret = ci_upload_dpm_level_enable_mask(adev);
5548 if (ret) {
5549 DRM_ERROR("ci_upload_dpm_level_enable_mask failed\n");
5550 return ret;
5551 }
5552 if (pi->pcie_performance_request)
5553 ci_notify_link_speed_change_after_state_change(adev, new_ps, old_ps);
5554
5555 return 0;
5556}
5557
5558#if 0
5559static void ci_dpm_reset_asic(struct amdgpu_device *adev)
5560{
5561 ci_set_boot_state(adev);
5562}
5563#endif
5564
Rex Zhucfa289f2017-09-06 15:27:59 +08005565static void ci_dpm_display_configuration_changed(void *handle)
Alex Deuchera2e73f52015-04-20 17:09:27 -04005566{
Rex Zhucfa289f2017-09-06 15:27:59 +08005567 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5568
Alex Deuchera2e73f52015-04-20 17:09:27 -04005569 ci_program_display_gap(adev);
5570}
5571
5572union power_info {
5573 struct _ATOM_POWERPLAY_INFO info;
5574 struct _ATOM_POWERPLAY_INFO_V2 info_2;
5575 struct _ATOM_POWERPLAY_INFO_V3 info_3;
5576 struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
5577 struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
5578 struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
5579};
5580
5581union pplib_clock_info {
5582 struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
5583 struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
5584 struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
5585 struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
5586 struct _ATOM_PPLIB_SI_CLOCK_INFO si;
5587 struct _ATOM_PPLIB_CI_CLOCK_INFO ci;
5588};
5589
5590union pplib_power_state {
5591 struct _ATOM_PPLIB_STATE v1;
5592 struct _ATOM_PPLIB_STATE_V2 v2;
5593};
5594
5595static void ci_parse_pplib_non_clock_info(struct amdgpu_device *adev,
5596 struct amdgpu_ps *rps,
5597 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,
5598 u8 table_rev)
5599{
5600 rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
5601 rps->class = le16_to_cpu(non_clock_info->usClassification);
5602 rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
5603
5604 if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
5605 rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
5606 rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
5607 } else {
5608 rps->vclk = 0;
5609 rps->dclk = 0;
5610 }
5611
5612 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT)
5613 adev->pm.dpm.boot_ps = rps;
5614 if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
5615 adev->pm.dpm.uvd_ps = rps;
5616}
5617
5618static void ci_parse_pplib_clock_info(struct amdgpu_device *adev,
5619 struct amdgpu_ps *rps, int index,
5620 union pplib_clock_info *clock_info)
5621{
5622 struct ci_power_info *pi = ci_get_pi(adev);
5623 struct ci_ps *ps = ci_get_ps(rps);
5624 struct ci_pl *pl = &ps->performance_levels[index];
5625
5626 ps->performance_level_count = index + 1;
5627
5628 pl->sclk = le16_to_cpu(clock_info->ci.usEngineClockLow);
5629 pl->sclk |= clock_info->ci.ucEngineClockHigh << 16;
5630 pl->mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow);
5631 pl->mclk |= clock_info->ci.ucMemoryClockHigh << 16;
5632
5633 pl->pcie_gen = amdgpu_get_pcie_gen_support(adev,
5634 pi->sys_pcie_mask,
5635 pi->vbios_boot_state.pcie_gen_bootup_value,
5636 clock_info->ci.ucPCIEGen);
5637 pl->pcie_lane = amdgpu_get_pcie_lane_support(adev,
5638 pi->vbios_boot_state.pcie_lane_bootup_value,
5639 le16_to_cpu(clock_info->ci.usPCIELane));
5640
5641 if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) {
5642 pi->acpi_pcie_gen = pl->pcie_gen;
5643 }
5644
5645 if (rps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) {
5646 pi->ulv.supported = true;
5647 pi->ulv.pl = *pl;
5648 pi->ulv.cg_ulv_parameter = CISLANDS_CGULVPARAMETER_DFLT;
5649 }
5650
5651 /* patch up boot state */
5652 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
5653 pl->mclk = pi->vbios_boot_state.mclk_bootup_value;
5654 pl->sclk = pi->vbios_boot_state.sclk_bootup_value;
5655 pl->pcie_gen = pi->vbios_boot_state.pcie_gen_bootup_value;
5656 pl->pcie_lane = pi->vbios_boot_state.pcie_lane_bootup_value;
5657 }
5658
5659 switch (rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
5660 case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
5661 pi->use_pcie_powersaving_levels = true;
5662 if (pi->pcie_gen_powersaving.max < pl->pcie_gen)
5663 pi->pcie_gen_powersaving.max = pl->pcie_gen;
5664 if (pi->pcie_gen_powersaving.min > pl->pcie_gen)
5665 pi->pcie_gen_powersaving.min = pl->pcie_gen;
5666 if (pi->pcie_lane_powersaving.max < pl->pcie_lane)
5667 pi->pcie_lane_powersaving.max = pl->pcie_lane;
5668 if (pi->pcie_lane_powersaving.min > pl->pcie_lane)
5669 pi->pcie_lane_powersaving.min = pl->pcie_lane;
5670 break;
5671 case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
5672 pi->use_pcie_performance_levels = true;
5673 if (pi->pcie_gen_performance.max < pl->pcie_gen)
5674 pi->pcie_gen_performance.max = pl->pcie_gen;
5675 if (pi->pcie_gen_performance.min > pl->pcie_gen)
5676 pi->pcie_gen_performance.min = pl->pcie_gen;
5677 if (pi->pcie_lane_performance.max < pl->pcie_lane)
5678 pi->pcie_lane_performance.max = pl->pcie_lane;
5679 if (pi->pcie_lane_performance.min > pl->pcie_lane)
5680 pi->pcie_lane_performance.min = pl->pcie_lane;
5681 break;
5682 default:
5683 break;
5684 }
5685}
5686
5687static int ci_parse_power_table(struct amdgpu_device *adev)
5688{
5689 struct amdgpu_mode_info *mode_info = &adev->mode_info;
5690 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
5691 union pplib_power_state *power_state;
5692 int i, j, k, non_clock_array_index, clock_array_index;
5693 union pplib_clock_info *clock_info;
5694 struct _StateArray *state_array;
5695 struct _ClockInfoArray *clock_info_array;
5696 struct _NonClockInfoArray *non_clock_info_array;
5697 union power_info *power_info;
5698 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
5699 u16 data_offset;
5700 u8 frev, crev;
5701 u8 *power_state_offset;
5702 struct ci_ps *ps;
5703
5704 if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
5705 &frev, &crev, &data_offset))
5706 return -EINVAL;
5707 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
5708
5709 amdgpu_add_thermal_controller(adev);
5710
5711 state_array = (struct _StateArray *)
5712 (mode_info->atom_context->bios + data_offset +
5713 le16_to_cpu(power_info->pplib.usStateArrayOffset));
5714 clock_info_array = (struct _ClockInfoArray *)
5715 (mode_info->atom_context->bios + data_offset +
5716 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
5717 non_clock_info_array = (struct _NonClockInfoArray *)
5718 (mode_info->atom_context->bios + data_offset +
5719 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
5720
5721 adev->pm.dpm.ps = kzalloc(sizeof(struct amdgpu_ps) *
5722 state_array->ucNumEntries, GFP_KERNEL);
5723 if (!adev->pm.dpm.ps)
5724 return -ENOMEM;
5725 power_state_offset = (u8 *)state_array->states;
5726 for (i = 0; i < state_array->ucNumEntries; i++) {
5727 u8 *idx;
5728 power_state = (union pplib_power_state *)power_state_offset;
5729 non_clock_array_index = power_state->v2.nonClockInfoIndex;
5730 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
5731 &non_clock_info_array->nonClockInfo[non_clock_array_index];
5732 ps = kzalloc(sizeof(struct ci_ps), GFP_KERNEL);
5733 if (ps == NULL) {
5734 kfree(adev->pm.dpm.ps);
5735 return -ENOMEM;
5736 }
5737 adev->pm.dpm.ps[i].ps_priv = ps;
5738 ci_parse_pplib_non_clock_info(adev, &adev->pm.dpm.ps[i],
5739 non_clock_info,
5740 non_clock_info_array->ucEntrySize);
5741 k = 0;
5742 idx = (u8 *)&power_state->v2.clockInfoIndex[0];
5743 for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
5744 clock_array_index = idx[j];
5745 if (clock_array_index >= clock_info_array->ucNumEntries)
5746 continue;
5747 if (k >= CISLANDS_MAX_HARDWARE_POWERLEVELS)
5748 break;
5749 clock_info = (union pplib_clock_info *)
5750 ((u8 *)&clock_info_array->clockInfo[0] +
5751 (clock_array_index * clock_info_array->ucEntrySize));
5752 ci_parse_pplib_clock_info(adev,
5753 &adev->pm.dpm.ps[i], k,
5754 clock_info);
5755 k++;
5756 }
5757 power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
5758 }
5759 adev->pm.dpm.num_ps = state_array->ucNumEntries;
5760
5761 /* fill in the vce power states */
Rex Zhu66ba1af2016-10-12 15:38:56 +08005762 for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) {
Alex Deuchera2e73f52015-04-20 17:09:27 -04005763 u32 sclk, mclk;
5764 clock_array_index = adev->pm.dpm.vce_states[i].clk_idx;
5765 clock_info = (union pplib_clock_info *)
5766 &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize];
5767 sclk = le16_to_cpu(clock_info->ci.usEngineClockLow);
5768 sclk |= clock_info->ci.ucEngineClockHigh << 16;
5769 mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow);
5770 mclk |= clock_info->ci.ucMemoryClockHigh << 16;
5771 adev->pm.dpm.vce_states[i].sclk = sclk;
5772 adev->pm.dpm.vce_states[i].mclk = mclk;
5773 }
5774
5775 return 0;
5776}
5777
5778static int ci_get_vbios_boot_values(struct amdgpu_device *adev,
5779 struct ci_vbios_boot_state *boot_state)
5780{
5781 struct amdgpu_mode_info *mode_info = &adev->mode_info;
5782 int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
5783 ATOM_FIRMWARE_INFO_V2_2 *firmware_info;
5784 u8 frev, crev;
5785 u16 data_offset;
5786
5787 if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
5788 &frev, &crev, &data_offset)) {
5789 firmware_info =
5790 (ATOM_FIRMWARE_INFO_V2_2 *)(mode_info->atom_context->bios +
5791 data_offset);
5792 boot_state->mvdd_bootup_value = le16_to_cpu(firmware_info->usBootUpMVDDCVoltage);
5793 boot_state->vddc_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCVoltage);
5794 boot_state->vddci_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCIVoltage);
5795 boot_state->pcie_gen_bootup_value = ci_get_current_pcie_speed(adev);
5796 boot_state->pcie_lane_bootup_value = ci_get_current_pcie_lane_number(adev);
5797 boot_state->sclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultEngineClock);
5798 boot_state->mclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultMemoryClock);
5799
5800 return 0;
5801 }
5802 return -EINVAL;
5803}
5804
5805static void ci_dpm_fini(struct amdgpu_device *adev)
5806{
5807 int i;
5808
5809 for (i = 0; i < adev->pm.dpm.num_ps; i++) {
5810 kfree(adev->pm.dpm.ps[i].ps_priv);
5811 }
5812 kfree(adev->pm.dpm.ps);
5813 kfree(adev->pm.dpm.priv);
5814 kfree(adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries);
5815 amdgpu_free_extended_power_table(adev);
5816}
5817
5818/**
5819 * ci_dpm_init_microcode - load ucode images from disk
5820 *
5821 * @adev: amdgpu_device pointer
5822 *
5823 * Use the firmware interface to load the ucode images into
5824 * the driver (not loaded into hw).
5825 * Returns 0 on success, error on failure.
5826 */
5827static int ci_dpm_init_microcode(struct amdgpu_device *adev)
5828{
5829 const char *chip_name;
5830 char fw_name[30];
5831 int err;
5832
5833 DRM_DEBUG("\n");
5834
5835 switch (adev->asic_type) {
5836 case CHIP_BONAIRE:
Alex Deucher2254c212015-12-10 00:49:32 -05005837 if ((adev->pdev->revision == 0x80) ||
5838 (adev->pdev->revision == 0x81) ||
5839 (adev->pdev->device == 0x665f))
5840 chip_name = "bonaire_k";
5841 else
5842 chip_name = "bonaire";
Alex Deuchera2e73f52015-04-20 17:09:27 -04005843 break;
5844 case CHIP_HAWAII:
Alex Deucher2254c212015-12-10 00:49:32 -05005845 if (adev->pdev->revision == 0x80)
5846 chip_name = "hawaii_k";
5847 else
5848 chip_name = "hawaii";
Alex Deuchera2e73f52015-04-20 17:09:27 -04005849 break;
5850 case CHIP_KAVERI:
5851 case CHIP_KABINI:
Alex Deucherb9a8be92016-07-29 18:14:39 -04005852 case CHIP_MULLINS:
Alex Deuchera2e73f52015-04-20 17:09:27 -04005853 default: BUG();
5854 }
5855
5856 snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
5857 err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
5858 if (err)
5859 goto out;
5860 err = amdgpu_ucode_validate(adev->pm.fw);
5861
5862out:
5863 if (err) {
Joe Perches7ca85292017-02-28 04:55:52 -08005864 pr_err("cik_smc: Failed to load firmware \"%s\"\n", fw_name);
Alex Deuchera2e73f52015-04-20 17:09:27 -04005865 release_firmware(adev->pm.fw);
5866 adev->pm.fw = NULL;
5867 }
5868 return err;
5869}
5870
5871static int ci_dpm_init(struct amdgpu_device *adev)
5872{
5873 int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
5874 SMU7_Discrete_DpmTable *dpm_table;
5875 struct amdgpu_gpio_rec gpio;
5876 u16 data_offset, size;
5877 u8 frev, crev;
5878 struct ci_power_info *pi;
5879 int ret;
Alex Deuchera2e73f52015-04-20 17:09:27 -04005880
Alex Deuchera2e73f52015-04-20 17:09:27 -04005881 pi = kzalloc(sizeof(struct ci_power_info), GFP_KERNEL);
5882 if (pi == NULL)
5883 return -ENOMEM;
5884 adev->pm.dpm.priv = pi;
5885
Alex Deucher50171eb2016-02-04 10:44:04 -05005886 pi->sys_pcie_mask =
5887 (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_MASK) >>
5888 CAIL_PCIE_LINK_SPEED_SUPPORT_SHIFT;
5889
Alex Deuchera2e73f52015-04-20 17:09:27 -04005890 pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID;
5891
5892 pi->pcie_gen_performance.max = AMDGPU_PCIE_GEN1;
5893 pi->pcie_gen_performance.min = AMDGPU_PCIE_GEN3;
5894 pi->pcie_gen_powersaving.max = AMDGPU_PCIE_GEN1;
5895 pi->pcie_gen_powersaving.min = AMDGPU_PCIE_GEN3;
5896
5897 pi->pcie_lane_performance.max = 0;
5898 pi->pcie_lane_performance.min = 16;
5899 pi->pcie_lane_powersaving.max = 0;
5900 pi->pcie_lane_powersaving.min = 16;
5901
5902 ret = ci_get_vbios_boot_values(adev, &pi->vbios_boot_state);
5903 if (ret) {
5904 ci_dpm_fini(adev);
5905 return ret;
5906 }
5907
5908 ret = amdgpu_get_platform_caps(adev);
5909 if (ret) {
5910 ci_dpm_fini(adev);
5911 return ret;
5912 }
5913
5914 ret = amdgpu_parse_extended_power_table(adev);
5915 if (ret) {
5916 ci_dpm_fini(adev);
5917 return ret;
5918 }
5919
5920 ret = ci_parse_power_table(adev);
5921 if (ret) {
5922 ci_dpm_fini(adev);
5923 return ret;
5924 }
5925
5926 pi->dll_default_on = false;
5927 pi->sram_end = SMC_RAM_END;
5928
5929 pi->activity_target[0] = CISLAND_TARGETACTIVITY_DFLT;
5930 pi->activity_target[1] = CISLAND_TARGETACTIVITY_DFLT;
5931 pi->activity_target[2] = CISLAND_TARGETACTIVITY_DFLT;
5932 pi->activity_target[3] = CISLAND_TARGETACTIVITY_DFLT;
5933 pi->activity_target[4] = CISLAND_TARGETACTIVITY_DFLT;
5934 pi->activity_target[5] = CISLAND_TARGETACTIVITY_DFLT;
5935 pi->activity_target[6] = CISLAND_TARGETACTIVITY_DFLT;
5936 pi->activity_target[7] = CISLAND_TARGETACTIVITY_DFLT;
5937
5938 pi->mclk_activity_target = CISLAND_MCLK_TARGETACTIVITY_DFLT;
5939
5940 pi->sclk_dpm_key_disabled = 0;
5941 pi->mclk_dpm_key_disabled = 0;
5942 pi->pcie_dpm_key_disabled = 0;
5943 pi->thermal_sclk_dpm_enabled = 0;
5944
Rex Zhu801caaf2016-11-02 13:35:15 +08005945 if (amdgpu_pp_feature_mask & SCLK_DEEP_SLEEP_MASK)
Rex Zhu66bc3f72016-07-28 17:36:35 +08005946 pi->caps_sclk_ds = true;
5947 else
5948 pi->caps_sclk_ds = false;
Alex Deuchera2e73f52015-04-20 17:09:27 -04005949
5950 pi->mclk_strobe_mode_threshold = 40000;
5951 pi->mclk_stutter_mode_threshold = 40000;
5952 pi->mclk_edc_enable_threshold = 40000;
5953 pi->mclk_edc_wr_enable_threshold = 40000;
5954
5955 ci_initialize_powertune_defaults(adev);
5956
5957 pi->caps_fps = false;
5958
5959 pi->caps_sclk_throttle_low_notification = false;
5960
5961 pi->caps_uvd_dpm = true;
5962 pi->caps_vce_dpm = true;
5963
5964 ci_get_leakage_voltages(adev);
5965 ci_patch_dependency_tables_with_leakage(adev);
5966 ci_set_private_data_variables_based_on_pptable(adev);
5967
5968 adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries =
5969 kzalloc(4 * sizeof(struct amdgpu_clock_voltage_dependency_entry), GFP_KERNEL);
5970 if (!adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) {
5971 ci_dpm_fini(adev);
5972 return -ENOMEM;
5973 }
5974 adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count = 4;
5975 adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].clk = 0;
5976 adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].v = 0;
5977 adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].clk = 36000;
5978 adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].v = 720;
5979 adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].clk = 54000;
5980 adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].v = 810;
5981 adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].clk = 72000;
5982 adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].v = 900;
5983
5984 adev->pm.dpm.dyn_state.mclk_sclk_ratio = 4;
5985 adev->pm.dpm.dyn_state.sclk_mclk_delta = 15000;
5986 adev->pm.dpm.dyn_state.vddc_vddci_delta = 200;
5987
5988 adev->pm.dpm.dyn_state.valid_sclk_values.count = 0;
5989 adev->pm.dpm.dyn_state.valid_sclk_values.values = NULL;
5990 adev->pm.dpm.dyn_state.valid_mclk_values.count = 0;
5991 adev->pm.dpm.dyn_state.valid_mclk_values.values = NULL;
5992
5993 if (adev->asic_type == CHIP_HAWAII) {
5994 pi->thermal_temp_setting.temperature_low = 94500;
5995 pi->thermal_temp_setting.temperature_high = 95000;
5996 pi->thermal_temp_setting.temperature_shutdown = 104000;
5997 } else {
5998 pi->thermal_temp_setting.temperature_low = 99500;
5999 pi->thermal_temp_setting.temperature_high = 100000;
6000 pi->thermal_temp_setting.temperature_shutdown = 104000;
6001 }
6002
6003 pi->uvd_enabled = false;
6004
6005 dpm_table = &pi->smc_state_table;
6006
6007 gpio = amdgpu_atombios_lookup_gpio(adev, VDDC_VRHOT_GPIO_PINID);
6008 if (gpio.valid) {
6009 dpm_table->VRHotGpio = gpio.shift;
6010 adev->pm.dpm.platform_caps |= ATOM_PP_PLATFORM_CAP_REGULATOR_HOT;
6011 } else {
6012 dpm_table->VRHotGpio = CISLANDS_UNUSED_GPIO_PIN;
6013 adev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_REGULATOR_HOT;
6014 }
6015
6016 gpio = amdgpu_atombios_lookup_gpio(adev, PP_AC_DC_SWITCH_GPIO_PINID);
6017 if (gpio.valid) {
6018 dpm_table->AcDcGpio = gpio.shift;
6019 adev->pm.dpm.platform_caps |= ATOM_PP_PLATFORM_CAP_HARDWAREDC;
6020 } else {
6021 dpm_table->AcDcGpio = CISLANDS_UNUSED_GPIO_PIN;
6022 adev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_HARDWAREDC;
6023 }
6024
6025 gpio = amdgpu_atombios_lookup_gpio(adev, VDDC_PCC_GPIO_PINID);
6026 if (gpio.valid) {
6027 u32 tmp = RREG32_SMC(ixCNB_PWRMGT_CNTL);
6028
6029 switch (gpio.shift) {
6030 case 0:
6031 tmp &= ~CNB_PWRMGT_CNTL__GNB_SLOW_MODE_MASK;
6032 tmp |= 1 << CNB_PWRMGT_CNTL__GNB_SLOW_MODE__SHIFT;
6033 break;
6034 case 1:
6035 tmp &= ~CNB_PWRMGT_CNTL__GNB_SLOW_MODE_MASK;
6036 tmp |= 2 << CNB_PWRMGT_CNTL__GNB_SLOW_MODE__SHIFT;
6037 break;
6038 case 2:
6039 tmp |= CNB_PWRMGT_CNTL__GNB_SLOW_MASK;
6040 break;
6041 case 3:
6042 tmp |= CNB_PWRMGT_CNTL__FORCE_NB_PS1_MASK;
6043 break;
6044 case 4:
6045 tmp |= CNB_PWRMGT_CNTL__DPM_ENABLED_MASK;
6046 break;
6047 default:
Rex Zhu58a6a7d2016-11-09 17:27:59 +08006048 DRM_INFO("Invalid PCC GPIO: %u!\n", gpio.shift);
Alex Deuchera2e73f52015-04-20 17:09:27 -04006049 break;
6050 }
6051 WREG32_SMC(ixCNB_PWRMGT_CNTL, tmp);
6052 }
6053
6054 pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_NONE;
6055 pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_NONE;
6056 pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_NONE;
6057 if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT))
6058 pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
6059 else if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2))
6060 pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
6061
6062 if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL) {
6063 if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
6064 pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
6065 else if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2))
6066 pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
6067 else
6068 adev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL;
6069 }
6070
6071 if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_MVDDCONTROL) {
6072 if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT))
6073 pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
6074 else if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2))
6075 pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
6076 else
6077 adev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_MVDDCONTROL;
6078 }
6079
6080 pi->vddc_phase_shed_control = true;
6081
6082#if defined(CONFIG_ACPI)
6083 pi->pcie_performance_request =
6084 amdgpu_acpi_is_pcie_performance_request_supported(adev);
6085#else
6086 pi->pcie_performance_request = false;
6087#endif
6088
6089 if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, &size,
6090 &frev, &crev, &data_offset)) {
6091 pi->caps_sclk_ss_support = true;
6092 pi->caps_mclk_ss_support = true;
6093 pi->dynamic_ss = true;
6094 } else {
6095 pi->caps_sclk_ss_support = false;
6096 pi->caps_mclk_ss_support = false;
6097 pi->dynamic_ss = true;
6098 }
6099
6100 if (adev->pm.int_thermal_type != THERMAL_TYPE_NONE)
6101 pi->thermal_protection = true;
6102 else
6103 pi->thermal_protection = false;
6104
6105 pi->caps_dynamic_ac_timing = true;
6106
Rex Zhuc08770e2016-08-24 19:39:06 +08006107 pi->uvd_power_gated = true;
Alex Deuchera2e73f52015-04-20 17:09:27 -04006108
6109 /* make sure dc limits are valid */
6110 if ((adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk == 0) ||
6111 (adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk == 0))
6112 adev->pm.dpm.dyn_state.max_clock_voltage_on_dc =
6113 adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
6114
6115 pi->fan_ctrl_is_in_default_mode = true;
6116
6117 return 0;
6118}
6119
6120static void
Rex Zhucfa289f2017-09-06 15:27:59 +08006121ci_dpm_debugfs_print_current_performance_level(void *handle,
Alex Deuchera2e73f52015-04-20 17:09:27 -04006122 struct seq_file *m)
6123{
Rex Zhucfa289f2017-09-06 15:27:59 +08006124 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
Alex Deuchera2e73f52015-04-20 17:09:27 -04006125 struct ci_power_info *pi = ci_get_pi(adev);
6126 struct amdgpu_ps *rps = &pi->current_rps;
6127 u32 sclk = ci_get_average_sclk_freq(adev);
6128 u32 mclk = ci_get_average_mclk_freq(adev);
Rex Zhu93545732016-01-06 17:08:46 +08006129 u32 activity_percent = 50;
6130 int ret;
6131
6132 ret = ci_read_smc_soft_register(adev, offsetof(SMU7_SoftRegisters, AverageGraphicsA),
6133 &activity_percent);
6134
6135 if (ret == 0) {
6136 activity_percent += 0x80;
6137 activity_percent >>= 8;
6138 activity_percent = activity_percent > 100 ? 100 : activity_percent;
6139 }
Alex Deuchera2e73f52015-04-20 17:09:27 -04006140
Rex Zhuddbc2592016-11-25 19:23:06 +08006141 seq_printf(m, "uvd %sabled\n", pi->uvd_power_gated ? "dis" : "en");
Alex Deuchera2e73f52015-04-20 17:09:27 -04006142 seq_printf(m, "vce %sabled\n", rps->vce_active ? "en" : "dis");
6143 seq_printf(m, "power level avg sclk: %u mclk: %u\n",
6144 sclk, mclk);
Rex Zhu93545732016-01-06 17:08:46 +08006145 seq_printf(m, "GPU load: %u %%\n", activity_percent);
Alex Deuchera2e73f52015-04-20 17:09:27 -04006146}
6147
Rex Zhucfa289f2017-09-06 15:27:59 +08006148static void ci_dpm_print_power_state(void *handle, void *current_ps)
Alex Deuchera2e73f52015-04-20 17:09:27 -04006149{
Rex Zhucfa289f2017-09-06 15:27:59 +08006150 struct amdgpu_ps *rps = (struct amdgpu_ps *)current_ps;
Alex Deuchera2e73f52015-04-20 17:09:27 -04006151 struct ci_ps *ps = ci_get_ps(rps);
6152 struct ci_pl *pl;
6153 int i;
Rex Zhucfa289f2017-09-06 15:27:59 +08006154 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
Alex Deuchera2e73f52015-04-20 17:09:27 -04006155
6156 amdgpu_dpm_print_class_info(rps->class, rps->class2);
6157 amdgpu_dpm_print_cap_info(rps->caps);
6158 printk("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
6159 for (i = 0; i < ps->performance_level_count; i++) {
6160 pl = &ps->performance_levels[i];
6161 printk("\t\tpower level %d sclk: %u mclk: %u pcie gen: %u pcie lanes: %u\n",
6162 i, pl->sclk, pl->mclk, pl->pcie_gen + 1, pl->pcie_lane);
6163 }
6164 amdgpu_dpm_print_ps_status(adev, rps);
6165}
6166
Rex Zhu1d516c42016-10-14 19:16:54 +08006167static inline bool ci_are_power_levels_equal(const struct ci_pl *ci_cpl1,
6168 const struct ci_pl *ci_cpl2)
6169{
6170 return ((ci_cpl1->mclk == ci_cpl2->mclk) &&
6171 (ci_cpl1->sclk == ci_cpl2->sclk) &&
6172 (ci_cpl1->pcie_gen == ci_cpl2->pcie_gen) &&
6173 (ci_cpl1->pcie_lane == ci_cpl2->pcie_lane));
6174}
6175
Rex Zhucfa289f2017-09-06 15:27:59 +08006176static int ci_check_state_equal(void *handle,
6177 void *current_ps,
6178 void *request_ps,
Rex Zhu1d516c42016-10-14 19:16:54 +08006179 bool *equal)
6180{
6181 struct ci_ps *ci_cps;
6182 struct ci_ps *ci_rps;
6183 int i;
Rex Zhucfa289f2017-09-06 15:27:59 +08006184 struct amdgpu_ps *cps = (struct amdgpu_ps *)current_ps;
6185 struct amdgpu_ps *rps = (struct amdgpu_ps *)request_ps;
6186 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
Rex Zhu1d516c42016-10-14 19:16:54 +08006187
6188 if (adev == NULL || cps == NULL || rps == NULL || equal == NULL)
6189 return -EINVAL;
6190
Rex Zhucfa289f2017-09-06 15:27:59 +08006191 ci_cps = ci_get_ps((struct amdgpu_ps *)cps);
6192 ci_rps = ci_get_ps((struct amdgpu_ps *)rps);
Rex Zhu1d516c42016-10-14 19:16:54 +08006193
6194 if (ci_cps == NULL) {
6195 *equal = false;
6196 return 0;
6197 }
6198
6199 if (ci_cps->performance_level_count != ci_rps->performance_level_count) {
6200
6201 *equal = false;
6202 return 0;
6203 }
6204
6205 for (i = 0; i < ci_cps->performance_level_count; i++) {
6206 if (!ci_are_power_levels_equal(&(ci_cps->performance_levels[i]),
6207 &(ci_rps->performance_levels[i]))) {
6208 *equal = false;
6209 return 0;
6210 }
6211 }
6212
6213 /* If all performance levels are the same try to use the UVD clocks to break the tie.*/
6214 *equal = ((cps->vclk == rps->vclk) && (cps->dclk == rps->dclk));
6215 *equal &= ((cps->evclk == rps->evclk) && (cps->ecclk == rps->ecclk));
6216
6217 return 0;
6218}
6219
Rex Zhucfa289f2017-09-06 15:27:59 +08006220static u32 ci_dpm_get_sclk(void *handle, bool low)
Alex Deuchera2e73f52015-04-20 17:09:27 -04006221{
Rex Zhucfa289f2017-09-06 15:27:59 +08006222 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
Alex Deuchera2e73f52015-04-20 17:09:27 -04006223 struct ci_power_info *pi = ci_get_pi(adev);
6224 struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps);
6225
6226 if (low)
6227 return requested_state->performance_levels[0].sclk;
6228 else
6229 return requested_state->performance_levels[requested_state->performance_level_count - 1].sclk;
6230}
6231
Rex Zhucfa289f2017-09-06 15:27:59 +08006232static u32 ci_dpm_get_mclk(void *handle, bool low)
Alex Deuchera2e73f52015-04-20 17:09:27 -04006233{
Rex Zhucfa289f2017-09-06 15:27:59 +08006234 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
Alex Deuchera2e73f52015-04-20 17:09:27 -04006235 struct ci_power_info *pi = ci_get_pi(adev);
6236 struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps);
6237
6238 if (low)
6239 return requested_state->performance_levels[0].mclk;
6240 else
6241 return requested_state->performance_levels[requested_state->performance_level_count - 1].mclk;
6242}
6243
6244/* get temperature in millidegrees */
Rex Zhucfa289f2017-09-06 15:27:59 +08006245static int ci_dpm_get_temp(void *handle)
Alex Deuchera2e73f52015-04-20 17:09:27 -04006246{
6247 u32 temp;
6248 int actual_temp = 0;
Rex Zhucfa289f2017-09-06 15:27:59 +08006249 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
Alex Deuchera2e73f52015-04-20 17:09:27 -04006250
6251 temp = (RREG32_SMC(ixCG_MULT_THERMAL_STATUS) & CG_MULT_THERMAL_STATUS__CTF_TEMP_MASK) >>
6252 CG_MULT_THERMAL_STATUS__CTF_TEMP__SHIFT;
6253
6254 if (temp & 0x200)
6255 actual_temp = 255;
6256 else
6257 actual_temp = temp & 0x1ff;
6258
6259 actual_temp = actual_temp * 1000;
6260
6261 return actual_temp;
6262}
6263
6264static int ci_set_temperature_range(struct amdgpu_device *adev)
6265{
6266 int ret;
6267
6268 ret = ci_thermal_enable_alert(adev, false);
6269 if (ret)
6270 return ret;
6271 ret = ci_thermal_set_temperature_range(adev, CISLANDS_TEMP_RANGE_MIN,
6272 CISLANDS_TEMP_RANGE_MAX);
6273 if (ret)
6274 return ret;
6275 ret = ci_thermal_enable_alert(adev, true);
6276 if (ret)
6277 return ret;
6278 return ret;
6279}
6280
yanyang15fc3aee2015-05-22 14:39:35 -04006281static int ci_dpm_early_init(void *handle)
Alex Deuchera2e73f52015-04-20 17:09:27 -04006282{
yanyang15fc3aee2015-05-22 14:39:35 -04006283 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6284
Alex Deuchera2e73f52015-04-20 17:09:27 -04006285 ci_dpm_set_dpm_funcs(adev);
6286 ci_dpm_set_irq_funcs(adev);
6287
6288 return 0;
6289}
6290
yanyang15fc3aee2015-05-22 14:39:35 -04006291static int ci_dpm_late_init(void *handle)
Alex Deuchera2e73f52015-04-20 17:09:27 -04006292{
6293 int ret;
yanyang15fc3aee2015-05-22 14:39:35 -04006294 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
Alex Deuchera2e73f52015-04-20 17:09:27 -04006295
6296 if (!amdgpu_dpm)
6297 return 0;
6298
Alex Deucherfa022a92015-09-30 17:05:40 -04006299 /* init the sysfs and debugfs files late */
6300 ret = amdgpu_pm_sysfs_init(adev);
6301 if (ret)
6302 return ret;
6303
Alex Deuchera2e73f52015-04-20 17:09:27 -04006304 ret = ci_set_temperature_range(adev);
6305 if (ret)
6306 return ret;
6307
Alex Deuchera2e73f52015-04-20 17:09:27 -04006308 return 0;
6309}
6310
yanyang15fc3aee2015-05-22 14:39:35 -04006311static int ci_dpm_sw_init(void *handle)
Alex Deuchera2e73f52015-04-20 17:09:27 -04006312{
6313 int ret;
yanyang15fc3aee2015-05-22 14:39:35 -04006314 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
Alex Deuchera2e73f52015-04-20 17:09:27 -04006315
Alex Deucherd766e6a2016-03-29 18:28:50 -04006316 ret = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 230,
6317 &adev->pm.dpm.thermal.irq);
Alex Deuchera2e73f52015-04-20 17:09:27 -04006318 if (ret)
6319 return ret;
6320
Alex Deucherd766e6a2016-03-29 18:28:50 -04006321 ret = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 231,
6322 &adev->pm.dpm.thermal.irq);
Alex Deuchera2e73f52015-04-20 17:09:27 -04006323 if (ret)
6324 return ret;
6325
6326 /* default to balanced state */
6327 adev->pm.dpm.state = POWER_STATE_TYPE_BALANCED;
6328 adev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
Rex Zhue5d03ac2016-12-23 14:39:41 +08006329 adev->pm.dpm.forced_level = AMD_DPM_FORCED_LEVEL_AUTO;
Alex Deuchera2e73f52015-04-20 17:09:27 -04006330 adev->pm.default_sclk = adev->clock.default_sclk;
6331 adev->pm.default_mclk = adev->clock.default_mclk;
6332 adev->pm.current_sclk = adev->clock.default_sclk;
6333 adev->pm.current_mclk = adev->clock.default_mclk;
6334 adev->pm.int_thermal_type = THERMAL_TYPE_NONE;
6335
Christian Königfaad24c2015-05-28 22:02:26 +02006336 ret = ci_dpm_init_microcode(adev);
6337 if (ret)
6338 return ret;
6339
Rex Zhubac601e2017-02-03 17:33:11 +08006340 if (amdgpu_dpm == 0)
6341 return 0;
6342
Alex Deuchera2e73f52015-04-20 17:09:27 -04006343 INIT_WORK(&adev->pm.dpm.thermal.work, amdgpu_dpm_thermal_work_handler);
6344 mutex_lock(&adev->pm.mutex);
6345 ret = ci_dpm_init(adev);
6346 if (ret)
6347 goto dpm_failed;
6348 adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
6349 if (amdgpu_dpm == 1)
6350 amdgpu_pm_print_power_states(adev);
Alex Deuchera2e73f52015-04-20 17:09:27 -04006351 mutex_unlock(&adev->pm.mutex);
6352 DRM_INFO("amdgpu: dpm initialized\n");
6353
6354 return 0;
6355
6356dpm_failed:
6357 ci_dpm_fini(adev);
6358 mutex_unlock(&adev->pm.mutex);
6359 DRM_ERROR("amdgpu: dpm initialization failed\n");
6360 return ret;
6361}
6362
yanyang15fc3aee2015-05-22 14:39:35 -04006363static int ci_dpm_sw_fini(void *handle)
Alex Deuchera2e73f52015-04-20 17:09:27 -04006364{
yanyang15fc3aee2015-05-22 14:39:35 -04006365 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6366
Alex Deucher45607382016-10-21 16:30:10 -04006367 flush_work(&adev->pm.dpm.thermal.work);
6368
Alex Deuchera2e73f52015-04-20 17:09:27 -04006369 mutex_lock(&adev->pm.mutex);
6370 amdgpu_pm_sysfs_fini(adev);
6371 ci_dpm_fini(adev);
6372 mutex_unlock(&adev->pm.mutex);
6373
Alex Deucher768c95e2016-06-01 11:09:01 -04006374 release_firmware(adev->pm.fw);
6375 adev->pm.fw = NULL;
6376
Alex Deuchera2e73f52015-04-20 17:09:27 -04006377 return 0;
6378}
6379
yanyang15fc3aee2015-05-22 14:39:35 -04006380static int ci_dpm_hw_init(void *handle)
Alex Deuchera2e73f52015-04-20 17:09:27 -04006381{
6382 int ret;
6383
yanyang15fc3aee2015-05-22 14:39:35 -04006384 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6385
Rex Zhubac601e2017-02-03 17:33:11 +08006386 if (!amdgpu_dpm) {
6387 ret = ci_upload_firmware(adev);
6388 if (ret) {
6389 DRM_ERROR("ci_upload_firmware failed\n");
6390 return ret;
6391 }
6392 ci_dpm_start_smc(adev);
Alex Deuchera2e73f52015-04-20 17:09:27 -04006393 return 0;
Rex Zhubac601e2017-02-03 17:33:11 +08006394 }
Alex Deuchera2e73f52015-04-20 17:09:27 -04006395
6396 mutex_lock(&adev->pm.mutex);
6397 ci_dpm_setup_asic(adev);
6398 ret = ci_dpm_enable(adev);
6399 if (ret)
6400 adev->pm.dpm_enabled = false;
6401 else
6402 adev->pm.dpm_enabled = true;
6403 mutex_unlock(&adev->pm.mutex);
6404
6405 return ret;
6406}
6407
yanyang15fc3aee2015-05-22 14:39:35 -04006408static int ci_dpm_hw_fini(void *handle)
Alex Deuchera2e73f52015-04-20 17:09:27 -04006409{
yanyang15fc3aee2015-05-22 14:39:35 -04006410 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6411
Alex Deuchera2e73f52015-04-20 17:09:27 -04006412 if (adev->pm.dpm_enabled) {
6413 mutex_lock(&adev->pm.mutex);
6414 ci_dpm_disable(adev);
6415 mutex_unlock(&adev->pm.mutex);
Rex Zhubac601e2017-02-03 17:33:11 +08006416 } else {
6417 ci_dpm_stop_smc(adev);
Alex Deuchera2e73f52015-04-20 17:09:27 -04006418 }
6419
6420 return 0;
6421}
6422
yanyang15fc3aee2015-05-22 14:39:35 -04006423static int ci_dpm_suspend(void *handle)
Alex Deuchera2e73f52015-04-20 17:09:27 -04006424{
yanyang15fc3aee2015-05-22 14:39:35 -04006425 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6426
Alex Deuchera2e73f52015-04-20 17:09:27 -04006427 if (adev->pm.dpm_enabled) {
6428 mutex_lock(&adev->pm.mutex);
Rex Zhu86f8c592016-10-03 20:46:36 +08006429 amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
6430 AMDGPU_THERMAL_IRQ_LOW_TO_HIGH);
6431 amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
6432 AMDGPU_THERMAL_IRQ_HIGH_TO_LOW);
6433 adev->pm.dpm.last_user_state = adev->pm.dpm.user_state;
6434 adev->pm.dpm.last_state = adev->pm.dpm.state;
6435 adev->pm.dpm.user_state = POWER_STATE_TYPE_INTERNAL_BOOT;
6436 adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_BOOT;
Alex Deuchera2e73f52015-04-20 17:09:27 -04006437 mutex_unlock(&adev->pm.mutex);
Rex Zhu86f8c592016-10-03 20:46:36 +08006438 amdgpu_pm_compute_clocks(adev);
6439
Alex Deuchera2e73f52015-04-20 17:09:27 -04006440 }
Rex Zhu86f8c592016-10-03 20:46:36 +08006441
Alex Deuchera2e73f52015-04-20 17:09:27 -04006442 return 0;
6443}
6444
yanyang15fc3aee2015-05-22 14:39:35 -04006445static int ci_dpm_resume(void *handle)
Alex Deuchera2e73f52015-04-20 17:09:27 -04006446{
6447 int ret;
yanyang15fc3aee2015-05-22 14:39:35 -04006448 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
Alex Deuchera2e73f52015-04-20 17:09:27 -04006449
6450 if (adev->pm.dpm_enabled) {
6451 /* asic init will reset to the boot state */
6452 mutex_lock(&adev->pm.mutex);
6453 ci_dpm_setup_asic(adev);
6454 ret = ci_dpm_enable(adev);
6455 if (ret)
6456 adev->pm.dpm_enabled = false;
6457 else
6458 adev->pm.dpm_enabled = true;
Rex Zhu86f8c592016-10-03 20:46:36 +08006459 adev->pm.dpm.user_state = adev->pm.dpm.last_user_state;
6460 adev->pm.dpm.state = adev->pm.dpm.last_state;
Alex Deuchera2e73f52015-04-20 17:09:27 -04006461 mutex_unlock(&adev->pm.mutex);
6462 if (adev->pm.dpm_enabled)
6463 amdgpu_pm_compute_clocks(adev);
6464 }
6465 return 0;
6466}
6467
yanyang15fc3aee2015-05-22 14:39:35 -04006468static bool ci_dpm_is_idle(void *handle)
Alex Deuchera2e73f52015-04-20 17:09:27 -04006469{
6470 /* XXX */
6471 return true;
6472}
6473
yanyang15fc3aee2015-05-22 14:39:35 -04006474static int ci_dpm_wait_for_idle(void *handle)
Alex Deuchera2e73f52015-04-20 17:09:27 -04006475{
6476 /* XXX */
6477 return 0;
6478}
6479
yanyang15fc3aee2015-05-22 14:39:35 -04006480static int ci_dpm_soft_reset(void *handle)
Alex Deuchera2e73f52015-04-20 17:09:27 -04006481{
6482 return 0;
6483}
6484
6485static int ci_dpm_set_interrupt_state(struct amdgpu_device *adev,
6486 struct amdgpu_irq_src *source,
6487 unsigned type,
6488 enum amdgpu_interrupt_state state)
6489{
6490 u32 cg_thermal_int;
6491
6492 switch (type) {
6493 case AMDGPU_THERMAL_IRQ_LOW_TO_HIGH:
6494 switch (state) {
6495 case AMDGPU_IRQ_STATE_DISABLE:
6496 cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
Rex Zhuc305fd52015-10-13 13:57:52 +08006497 cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
Alex Deuchera2e73f52015-04-20 17:09:27 -04006498 WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
6499 break;
6500 case AMDGPU_IRQ_STATE_ENABLE:
6501 cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
Rex Zhuc305fd52015-10-13 13:57:52 +08006502 cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
Alex Deuchera2e73f52015-04-20 17:09:27 -04006503 WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
6504 break;
6505 default:
6506 break;
6507 }
6508 break;
6509
6510 case AMDGPU_THERMAL_IRQ_HIGH_TO_LOW:
6511 switch (state) {
6512 case AMDGPU_IRQ_STATE_DISABLE:
6513 cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
Rex Zhuc305fd52015-10-13 13:57:52 +08006514 cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
Alex Deuchera2e73f52015-04-20 17:09:27 -04006515 WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
6516 break;
6517 case AMDGPU_IRQ_STATE_ENABLE:
6518 cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
Rex Zhuc305fd52015-10-13 13:57:52 +08006519 cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
Alex Deuchera2e73f52015-04-20 17:09:27 -04006520 WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
6521 break;
6522 default:
6523 break;
6524 }
6525 break;
6526
6527 default:
6528 break;
6529 }
6530 return 0;
6531}
6532
6533static int ci_dpm_process_interrupt(struct amdgpu_device *adev,
Christian Königedf600d2016-05-03 15:54:54 +02006534 struct amdgpu_irq_src *source,
Alex Deuchera2e73f52015-04-20 17:09:27 -04006535 struct amdgpu_iv_entry *entry)
6536{
6537 bool queue_thermal = false;
6538
6539 if (entry == NULL)
6540 return -EINVAL;
6541
6542 switch (entry->src_id) {
6543 case 230: /* thermal low to high */
6544 DRM_DEBUG("IH: thermal low to high\n");
6545 adev->pm.dpm.thermal.high_to_low = false;
6546 queue_thermal = true;
6547 break;
6548 case 231: /* thermal high to low */
6549 DRM_DEBUG("IH: thermal high to low\n");
6550 adev->pm.dpm.thermal.high_to_low = true;
6551 queue_thermal = true;
6552 break;
6553 default:
6554 break;
6555 }
6556
6557 if (queue_thermal)
6558 schedule_work(&adev->pm.dpm.thermal.work);
6559
6560 return 0;
6561}
6562
yanyang15fc3aee2015-05-22 14:39:35 -04006563static int ci_dpm_set_clockgating_state(void *handle,
6564 enum amd_clockgating_state state)
Alex Deuchera2e73f52015-04-20 17:09:27 -04006565{
6566 return 0;
6567}
6568
yanyang15fc3aee2015-05-22 14:39:35 -04006569static int ci_dpm_set_powergating_state(void *handle,
6570 enum amd_powergating_state state)
Alex Deuchera2e73f52015-04-20 17:09:27 -04006571{
6572 return 0;
6573}
6574
Rex Zhucfa289f2017-09-06 15:27:59 +08006575static int ci_dpm_print_clock_levels(void *handle,
Eric Huang19fbc432016-05-19 15:50:09 -04006576 enum pp_clock_type type, char *buf)
6577{
Rex Zhucfa289f2017-09-06 15:27:59 +08006578 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
Eric Huang19fbc432016-05-19 15:50:09 -04006579 struct ci_power_info *pi = ci_get_pi(adev);
6580 struct ci_single_dpm_table *sclk_table = &pi->dpm_table.sclk_table;
6581 struct ci_single_dpm_table *mclk_table = &pi->dpm_table.mclk_table;
6582 struct ci_single_dpm_table *pcie_table = &pi->dpm_table.pcie_speed_table;
6583
6584 int i, now, size = 0;
6585 uint32_t clock, pcie_speed;
6586
6587 switch (type) {
6588 case PP_SCLK:
6589 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_API_GetSclkFrequency);
6590 clock = RREG32(mmSMC_MSG_ARG_0);
6591
6592 for (i = 0; i < sclk_table->count; i++) {
6593 if (clock > sclk_table->dpm_levels[i].value)
6594 continue;
6595 break;
6596 }
6597 now = i;
6598
6599 for (i = 0; i < sclk_table->count; i++)
6600 size += sprintf(buf + size, "%d: %uMhz %s\n",
6601 i, sclk_table->dpm_levels[i].value / 100,
6602 (i == now) ? "*" : "");
6603 break;
6604 case PP_MCLK:
6605 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_API_GetMclkFrequency);
6606 clock = RREG32(mmSMC_MSG_ARG_0);
6607
6608 for (i = 0; i < mclk_table->count; i++) {
6609 if (clock > mclk_table->dpm_levels[i].value)
6610 continue;
6611 break;
6612 }
6613 now = i;
6614
6615 for (i = 0; i < mclk_table->count; i++)
6616 size += sprintf(buf + size, "%d: %uMhz %s\n",
6617 i, mclk_table->dpm_levels[i].value / 100,
6618 (i == now) ? "*" : "");
6619 break;
6620 case PP_PCIE:
6621 pcie_speed = ci_get_current_pcie_speed(adev);
6622 for (i = 0; i < pcie_table->count; i++) {
6623 if (pcie_speed != pcie_table->dpm_levels[i].value)
6624 continue;
6625 break;
6626 }
6627 now = i;
6628
6629 for (i = 0; i < pcie_table->count; i++)
6630 size += sprintf(buf + size, "%d: %s %s\n", i,
6631 (pcie_table->dpm_levels[i].value == 0) ? "2.5GB, x1" :
6632 (pcie_table->dpm_levels[i].value == 1) ? "5.0GB, x16" :
6633 (pcie_table->dpm_levels[i].value == 2) ? "8.0GB, x16" : "",
6634 (i == now) ? "*" : "");
6635 break;
6636 default:
6637 break;
6638 }
6639
6640 return size;
6641}
6642
Rex Zhucfa289f2017-09-06 15:27:59 +08006643static int ci_dpm_force_clock_level(void *handle,
Eric Huang19fbc432016-05-19 15:50:09 -04006644 enum pp_clock_type type, uint32_t mask)
6645{
Rex Zhucfa289f2017-09-06 15:27:59 +08006646 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
Eric Huang19fbc432016-05-19 15:50:09 -04006647 struct ci_power_info *pi = ci_get_pi(adev);
6648
Rex Zhu570272d2017-01-06 13:32:49 +08006649 if (adev->pm.dpm.forced_level & (AMD_DPM_FORCED_LEVEL_AUTO |
6650 AMD_DPM_FORCED_LEVEL_LOW |
6651 AMD_DPM_FORCED_LEVEL_HIGH))
Eric Huang19fbc432016-05-19 15:50:09 -04006652 return -EINVAL;
6653
6654 switch (type) {
6655 case PP_SCLK:
6656 if (!pi->sclk_dpm_key_disabled)
6657 amdgpu_ci_send_msg_to_smc_with_parameter(adev,
6658 PPSMC_MSG_SCLKDPM_SetEnabledMask,
6659 pi->dpm_level_enable_mask.sclk_dpm_enable_mask & mask);
6660 break;
6661
6662 case PP_MCLK:
6663 if (!pi->mclk_dpm_key_disabled)
6664 amdgpu_ci_send_msg_to_smc_with_parameter(adev,
6665 PPSMC_MSG_MCLKDPM_SetEnabledMask,
6666 pi->dpm_level_enable_mask.mclk_dpm_enable_mask & mask);
6667 break;
6668
6669 case PP_PCIE:
6670 {
6671 uint32_t tmp = mask & pi->dpm_level_enable_mask.pcie_dpm_enable_mask;
6672 uint32_t level = 0;
6673
6674 while (tmp >>= 1)
6675 level++;
6676
6677 if (!pi->pcie_dpm_key_disabled)
6678 amdgpu_ci_send_msg_to_smc_with_parameter(adev,
6679 PPSMC_MSG_PCIeDPM_ForceLevel,
6680 level);
6681 break;
6682 }
6683 default:
6684 break;
6685 }
6686
6687 return 0;
6688}
6689
Rex Zhucfa289f2017-09-06 15:27:59 +08006690static int ci_dpm_get_sclk_od(void *handle)
Eric Huang3cc25912016-05-19 15:54:35 -04006691{
Rex Zhucfa289f2017-09-06 15:27:59 +08006692 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
Eric Huang3cc25912016-05-19 15:54:35 -04006693 struct ci_power_info *pi = ci_get_pi(adev);
6694 struct ci_single_dpm_table *sclk_table = &(pi->dpm_table.sclk_table);
6695 struct ci_single_dpm_table *golden_sclk_table =
6696 &(pi->golden_dpm_table.sclk_table);
6697 int value;
6698
6699 value = (sclk_table->dpm_levels[sclk_table->count - 1].value -
6700 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) *
6701 100 /
6702 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
6703
6704 return value;
6705}
6706
Rex Zhucfa289f2017-09-06 15:27:59 +08006707static int ci_dpm_set_sclk_od(void *handle, uint32_t value)
Eric Huang3cc25912016-05-19 15:54:35 -04006708{
Rex Zhucfa289f2017-09-06 15:27:59 +08006709 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
Eric Huang3cc25912016-05-19 15:54:35 -04006710 struct ci_power_info *pi = ci_get_pi(adev);
6711 struct ci_ps *ps = ci_get_ps(adev->pm.dpm.requested_ps);
6712 struct ci_single_dpm_table *golden_sclk_table =
6713 &(pi->golden_dpm_table.sclk_table);
6714
6715 if (value > 20)
6716 value = 20;
6717
6718 ps->performance_levels[ps->performance_level_count - 1].sclk =
6719 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value *
6720 value / 100 +
6721 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
6722
6723 return 0;
6724}
6725
Rex Zhucfa289f2017-09-06 15:27:59 +08006726static int ci_dpm_get_mclk_od(void *handle)
Eric Huang40899d52016-05-24 15:43:53 -04006727{
Rex Zhucfa289f2017-09-06 15:27:59 +08006728 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
Eric Huang40899d52016-05-24 15:43:53 -04006729 struct ci_power_info *pi = ci_get_pi(adev);
6730 struct ci_single_dpm_table *mclk_table = &(pi->dpm_table.mclk_table);
6731 struct ci_single_dpm_table *golden_mclk_table =
6732 &(pi->golden_dpm_table.mclk_table);
6733 int value;
6734
6735 value = (mclk_table->dpm_levels[mclk_table->count - 1].value -
6736 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) *
6737 100 /
6738 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
6739
6740 return value;
6741}
6742
Rex Zhucfa289f2017-09-06 15:27:59 +08006743static int ci_dpm_set_mclk_od(void *handle, uint32_t value)
Eric Huang40899d52016-05-24 15:43:53 -04006744{
Rex Zhucfa289f2017-09-06 15:27:59 +08006745 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
Eric Huang40899d52016-05-24 15:43:53 -04006746 struct ci_power_info *pi = ci_get_pi(adev);
6747 struct ci_ps *ps = ci_get_ps(adev->pm.dpm.requested_ps);
6748 struct ci_single_dpm_table *golden_mclk_table =
6749 &(pi->golden_dpm_table.mclk_table);
6750
6751 if (value > 20)
6752 value = 20;
6753
6754 ps->performance_levels[ps->performance_level_count - 1].mclk =
6755 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value *
6756 value / 100 +
6757 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
6758
6759 return 0;
6760}
6761
Rex Zhucfa289f2017-09-06 15:27:59 +08006762static int ci_dpm_get_power_profile_state(void *handle,
Eric Huang618c0482016-10-14 14:21:19 -04006763 struct amd_pp_profile *query)
6764{
Rex Zhucfa289f2017-09-06 15:27:59 +08006765 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
Eric Huang618c0482016-10-14 14:21:19 -04006766 struct ci_power_info *pi = ci_get_pi(adev);
6767
6768 if (!pi || !query)
6769 return -EINVAL;
6770
6771 if (query->type == AMD_PP_GFX_PROFILE)
6772 memcpy(query, &pi->gfx_power_profile,
6773 sizeof(struct amd_pp_profile));
6774 else if (query->type == AMD_PP_COMPUTE_PROFILE)
6775 memcpy(query, &pi->compute_power_profile,
6776 sizeof(struct amd_pp_profile));
6777 else
6778 return -EINVAL;
6779
6780 return 0;
6781}
6782
6783static int ci_populate_requested_graphic_levels(struct amdgpu_device *adev,
6784 struct amd_pp_profile *request)
6785{
6786 struct ci_power_info *pi = ci_get_pi(adev);
6787 struct ci_dpm_table *dpm_table = &(pi->dpm_table);
6788 struct SMU7_Discrete_GraphicsLevel *levels =
6789 pi->smc_state_table.GraphicsLevel;
6790 uint32_t array = pi->dpm_table_start +
6791 offsetof(SMU7_Discrete_DpmTable, GraphicsLevel);
6792 uint32_t array_size = sizeof(struct SMU7_Discrete_GraphicsLevel) *
6793 SMU7_MAX_LEVELS_GRAPHICS;
6794 uint32_t i;
6795
6796 for (i = 0; i < dpm_table->sclk_table.count; i++) {
6797 levels[i].ActivityLevel =
6798 cpu_to_be16(request->activity_threshold);
6799 levels[i].EnabledForActivity = 1;
6800 levels[i].UpH = request->up_hyst;
6801 levels[i].DownH = request->down_hyst;
6802 }
6803
6804 return amdgpu_ci_copy_bytes_to_smc(adev, array, (uint8_t *)levels,
6805 array_size, pi->sram_end);
6806}
6807
6808static void ci_find_min_clock_masks(struct amdgpu_device *adev,
6809 uint32_t *sclk_mask, uint32_t *mclk_mask,
6810 uint32_t min_sclk, uint32_t min_mclk)
6811{
6812 struct ci_power_info *pi = ci_get_pi(adev);
6813 struct ci_dpm_table *dpm_table = &(pi->dpm_table);
6814 uint32_t i;
6815
6816 for (i = 0; i < dpm_table->sclk_table.count; i++) {
6817 if (dpm_table->sclk_table.dpm_levels[i].enabled &&
6818 dpm_table->sclk_table.dpm_levels[i].value >= min_sclk)
6819 *sclk_mask |= 1 << i;
6820 }
6821
6822 for (i = 0; i < dpm_table->mclk_table.count; i++) {
6823 if (dpm_table->mclk_table.dpm_levels[i].enabled &&
6824 dpm_table->mclk_table.dpm_levels[i].value >= min_mclk)
6825 *mclk_mask |= 1 << i;
6826 }
6827}
6828
6829static int ci_set_power_profile_state(struct amdgpu_device *adev,
6830 struct amd_pp_profile *request)
6831{
6832 struct ci_power_info *pi = ci_get_pi(adev);
6833 int tmp_result, result = 0;
6834 uint32_t sclk_mask = 0, mclk_mask = 0;
6835
6836 tmp_result = ci_freeze_sclk_mclk_dpm(adev);
6837 if (tmp_result) {
6838 DRM_ERROR("Failed to freeze SCLK MCLK DPM!");
6839 result = tmp_result;
6840 }
6841
6842 tmp_result = ci_populate_requested_graphic_levels(adev,
6843 request);
6844 if (tmp_result) {
6845 DRM_ERROR("Failed to populate requested graphic levels!");
6846 result = tmp_result;
6847 }
6848
6849 tmp_result = ci_unfreeze_sclk_mclk_dpm(adev);
6850 if (tmp_result) {
6851 DRM_ERROR("Failed to unfreeze SCLK MCLK DPM!");
6852 result = tmp_result;
6853 }
6854
6855 ci_find_min_clock_masks(adev, &sclk_mask, &mclk_mask,
6856 request->min_sclk, request->min_mclk);
6857
6858 if (sclk_mask) {
6859 if (!pi->sclk_dpm_key_disabled)
6860 amdgpu_ci_send_msg_to_smc_with_parameter(
6861 adev,
6862 PPSMC_MSG_SCLKDPM_SetEnabledMask,
6863 pi->dpm_level_enable_mask.
6864 sclk_dpm_enable_mask &
6865 sclk_mask);
6866 }
6867
6868 if (mclk_mask) {
6869 if (!pi->mclk_dpm_key_disabled)
6870 amdgpu_ci_send_msg_to_smc_with_parameter(
6871 adev,
6872 PPSMC_MSG_MCLKDPM_SetEnabledMask,
6873 pi->dpm_level_enable_mask.
6874 mclk_dpm_enable_mask &
6875 mclk_mask);
6876 }
6877
6878
6879 return result;
6880}
6881
Rex Zhucfa289f2017-09-06 15:27:59 +08006882static int ci_dpm_set_power_profile_state(void *handle,
Eric Huang618c0482016-10-14 14:21:19 -04006883 struct amd_pp_profile *request)
6884{
Rex Zhucfa289f2017-09-06 15:27:59 +08006885 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
Eric Huang618c0482016-10-14 14:21:19 -04006886 struct ci_power_info *pi = ci_get_pi(adev);
6887 int ret = -1;
6888
6889 if (!pi || !request)
6890 return -EINVAL;
6891
6892 if (adev->pm.dpm.forced_level !=
6893 AMD_DPM_FORCED_LEVEL_AUTO)
6894 return -EINVAL;
6895
6896 if (request->min_sclk ||
6897 request->min_mclk ||
6898 request->activity_threshold ||
6899 request->up_hyst ||
6900 request->down_hyst) {
6901 if (request->type == AMD_PP_GFX_PROFILE)
6902 memcpy(&pi->gfx_power_profile, request,
6903 sizeof(struct amd_pp_profile));
6904 else if (request->type == AMD_PP_COMPUTE_PROFILE)
6905 memcpy(&pi->compute_power_profile, request,
6906 sizeof(struct amd_pp_profile));
6907 else
6908 return -EINVAL;
6909
6910 if (request->type == pi->current_power_profile)
6911 ret = ci_set_power_profile_state(
6912 adev,
6913 request);
6914 } else {
6915 /* set power profile if it exists */
6916 switch (request->type) {
6917 case AMD_PP_GFX_PROFILE:
6918 ret = ci_set_power_profile_state(
6919 adev,
6920 &pi->gfx_power_profile);
6921 break;
6922 case AMD_PP_COMPUTE_PROFILE:
6923 ret = ci_set_power_profile_state(
6924 adev,
6925 &pi->compute_power_profile);
6926 break;
6927 default:
6928 return -EINVAL;
6929 }
6930 }
6931
6932 if (!ret)
6933 pi->current_power_profile = request->type;
6934
6935 return 0;
6936}
6937
Rex Zhucfa289f2017-09-06 15:27:59 +08006938static int ci_dpm_reset_power_profile_state(void *handle,
Eric Huang618c0482016-10-14 14:21:19 -04006939 struct amd_pp_profile *request)
6940{
Rex Zhucfa289f2017-09-06 15:27:59 +08006941 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
Eric Huang618c0482016-10-14 14:21:19 -04006942 struct ci_power_info *pi = ci_get_pi(adev);
6943
6944 if (!pi || !request)
6945 return -EINVAL;
6946
6947 if (request->type == AMD_PP_GFX_PROFILE) {
6948 pi->gfx_power_profile = pi->default_gfx_power_profile;
6949 return ci_dpm_set_power_profile_state(adev,
6950 &pi->gfx_power_profile);
6951 } else if (request->type == AMD_PP_COMPUTE_PROFILE) {
6952 pi->compute_power_profile =
6953 pi->default_compute_power_profile;
6954 return ci_dpm_set_power_profile_state(adev,
6955 &pi->compute_power_profile);
6956 } else
6957 return -EINVAL;
6958}
6959
Rex Zhucfa289f2017-09-06 15:27:59 +08006960static int ci_dpm_switch_power_profile(void *handle,
Eric Huang618c0482016-10-14 14:21:19 -04006961 enum amd_pp_profile_type type)
6962{
Rex Zhucfa289f2017-09-06 15:27:59 +08006963 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
Eric Huang618c0482016-10-14 14:21:19 -04006964 struct ci_power_info *pi = ci_get_pi(adev);
6965 struct amd_pp_profile request = {0};
6966
6967 if (!pi)
6968 return -EINVAL;
6969
6970 if (pi->current_power_profile != type) {
6971 request.type = type;
6972 return ci_dpm_set_power_profile_state(adev, &request);
6973 }
6974
6975 return 0;
6976}
6977
Rex Zhucfa289f2017-09-06 15:27:59 +08006978static int ci_dpm_read_sensor(void *handle, int idx,
Samuel Pitoisetd6c29692017-02-14 01:00:49 +01006979 void *value, int *size)
6980{
6981 u32 activity_percent = 50;
6982 int ret;
Rex Zhucfa289f2017-09-06 15:27:59 +08006983 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
Samuel Pitoisetd6c29692017-02-14 01:00:49 +01006984
6985 /* size must be at least 4 bytes for all sensors */
6986 if (*size < 4)
6987 return -EINVAL;
6988
6989 switch (idx) {
6990 case AMDGPU_PP_SENSOR_GFX_SCLK:
6991 *((uint32_t *)value) = ci_get_average_sclk_freq(adev);
6992 *size = 4;
6993 return 0;
6994 case AMDGPU_PP_SENSOR_GFX_MCLK:
6995 *((uint32_t *)value) = ci_get_average_mclk_freq(adev);
6996 *size = 4;
6997 return 0;
6998 case AMDGPU_PP_SENSOR_GPU_TEMP:
6999 *((uint32_t *)value) = ci_dpm_get_temp(adev);
7000 *size = 4;
7001 return 0;
7002 case AMDGPU_PP_SENSOR_GPU_LOAD:
7003 ret = ci_read_smc_soft_register(adev,
7004 offsetof(SMU7_SoftRegisters,
7005 AverageGraphicsA),
7006 &activity_percent);
7007 if (ret == 0) {
7008 activity_percent += 0x80;
7009 activity_percent >>= 8;
7010 activity_percent =
7011 activity_percent > 100 ? 100 : activity_percent;
7012 }
7013 *((uint32_t *)value) = activity_percent;
7014 *size = 4;
7015 return 0;
7016 default:
7017 return -EINVAL;
7018 }
7019}
7020
yanyang15fc3aee2015-05-22 14:39:35 -04007021const struct amd_ip_funcs ci_dpm_ip_funcs = {
Tom St Denis88a907d2016-05-04 14:28:35 -04007022 .name = "ci_dpm",
Alex Deuchera2e73f52015-04-20 17:09:27 -04007023 .early_init = ci_dpm_early_init,
7024 .late_init = ci_dpm_late_init,
7025 .sw_init = ci_dpm_sw_init,
7026 .sw_fini = ci_dpm_sw_fini,
7027 .hw_init = ci_dpm_hw_init,
7028 .hw_fini = ci_dpm_hw_fini,
7029 .suspend = ci_dpm_suspend,
7030 .resume = ci_dpm_resume,
7031 .is_idle = ci_dpm_is_idle,
7032 .wait_for_idle = ci_dpm_wait_for_idle,
7033 .soft_reset = ci_dpm_soft_reset,
Alex Deuchera2e73f52015-04-20 17:09:27 -04007034 .set_clockgating_state = ci_dpm_set_clockgating_state,
7035 .set_powergating_state = ci_dpm_set_powergating_state,
7036};
7037
Rex Zhucfa289f2017-09-06 15:27:59 +08007038static const struct amd_pm_funcs ci_dpm_funcs = {
Alex Deuchera2e73f52015-04-20 17:09:27 -04007039 .get_temperature = &ci_dpm_get_temp,
7040 .pre_set_power_state = &ci_dpm_pre_set_power_state,
7041 .set_power_state = &ci_dpm_set_power_state,
7042 .post_set_power_state = &ci_dpm_post_set_power_state,
7043 .display_configuration_changed = &ci_dpm_display_configuration_changed,
7044 .get_sclk = &ci_dpm_get_sclk,
7045 .get_mclk = &ci_dpm_get_mclk,
7046 .print_power_state = &ci_dpm_print_power_state,
7047 .debugfs_print_current_performance_level = &ci_dpm_debugfs_print_current_performance_level,
7048 .force_performance_level = &ci_dpm_force_performance_level,
7049 .vblank_too_short = &ci_dpm_vblank_too_short,
7050 .powergate_uvd = &ci_dpm_powergate_uvd,
7051 .set_fan_control_mode = &ci_dpm_set_fan_control_mode,
7052 .get_fan_control_mode = &ci_dpm_get_fan_control_mode,
7053 .set_fan_speed_percent = &ci_dpm_set_fan_speed_percent,
7054 .get_fan_speed_percent = &ci_dpm_get_fan_speed_percent,
Eric Huang19fbc432016-05-19 15:50:09 -04007055 .print_clock_levels = ci_dpm_print_clock_levels,
7056 .force_clock_level = ci_dpm_force_clock_level,
Eric Huang3cc25912016-05-19 15:54:35 -04007057 .get_sclk_od = ci_dpm_get_sclk_od,
7058 .set_sclk_od = ci_dpm_set_sclk_od,
Eric Huang40899d52016-05-24 15:43:53 -04007059 .get_mclk_od = ci_dpm_get_mclk_od,
7060 .set_mclk_od = ci_dpm_set_mclk_od,
Rex Zhu1d516c42016-10-14 19:16:54 +08007061 .check_state_equal = ci_check_state_equal,
Alex Deucher825cc992016-10-07 12:38:04 -04007062 .get_vce_clock_state = amdgpu_get_vce_clock_state,
Eric Huang618c0482016-10-14 14:21:19 -04007063 .get_power_profile_state = ci_dpm_get_power_profile_state,
7064 .set_power_profile_state = ci_dpm_set_power_profile_state,
7065 .reset_power_profile_state = ci_dpm_reset_power_profile_state,
7066 .switch_power_profile = ci_dpm_switch_power_profile,
Samuel Pitoisetd6c29692017-02-14 01:00:49 +01007067 .read_sensor = ci_dpm_read_sensor,
Alex Deuchera2e73f52015-04-20 17:09:27 -04007068};
7069
7070static void ci_dpm_set_dpm_funcs(struct amdgpu_device *adev)
7071{
7072 if (adev->pm.funcs == NULL)
7073 adev->pm.funcs = &ci_dpm_funcs;
7074}
7075
7076static const struct amdgpu_irq_src_funcs ci_dpm_irq_funcs = {
7077 .set = ci_dpm_set_interrupt_state,
7078 .process = ci_dpm_process_interrupt,
7079};
7080
7081static void ci_dpm_set_irq_funcs(struct amdgpu_device *adev)
7082{
7083 adev->pm.dpm.thermal.irq.num_types = AMDGPU_THERMAL_IRQ_LAST;
7084 adev->pm.dpm.thermal.irq.funcs = &ci_dpm_irq_funcs;
7085}