blob: 68b505c768ad1af3293c75dd8b762ccc13fb7e84 [file] [log] [blame]
Alex Deuchera2e73f52015-04-20 17:09:27 -04001/*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <linux/firmware.h>
Masahiro Yamada248a1d62017-04-24 13:50:21 +090025#include <drm/drmP.h>
Alex Deuchera2e73f52015-04-20 17:09:27 -040026#include "amdgpu.h"
27#include "amdgpu_pm.h"
28#include "amdgpu_ucode.h"
29#include "cikd.h"
30#include "amdgpu_dpm.h"
31#include "ci_dpm.h"
32#include "gfx_v7_0.h"
33#include "atom.h"
Alex Deucher50171eb2016-02-04 10:44:04 -050034#include "amd_pcie.h"
Alex Deuchera2e73f52015-04-20 17:09:27 -040035#include <linux/seq_file.h>
36
37#include "smu/smu_7_0_1_d.h"
38#include "smu/smu_7_0_1_sh_mask.h"
39
40#include "dce/dce_8_0_d.h"
41#include "dce/dce_8_0_sh_mask.h"
42
43#include "bif/bif_4_1_d.h"
44#include "bif/bif_4_1_sh_mask.h"
45
46#include "gca/gfx_7_2_d.h"
47#include "gca/gfx_7_2_sh_mask.h"
48
49#include "gmc/gmc_7_1_d.h"
50#include "gmc/gmc_7_1_sh_mask.h"
51
52MODULE_FIRMWARE("radeon/bonaire_smc.bin");
Alex Deucher2254c212015-12-10 00:49:32 -050053MODULE_FIRMWARE("radeon/bonaire_k_smc.bin");
Alex Deuchera2e73f52015-04-20 17:09:27 -040054MODULE_FIRMWARE("radeon/hawaii_smc.bin");
Alex Deucher2254c212015-12-10 00:49:32 -050055MODULE_FIRMWARE("radeon/hawaii_k_smc.bin");
Alex Deuchera2e73f52015-04-20 17:09:27 -040056
57#define MC_CG_ARB_FREQ_F0 0x0a
58#define MC_CG_ARB_FREQ_F1 0x0b
59#define MC_CG_ARB_FREQ_F2 0x0c
60#define MC_CG_ARB_FREQ_F3 0x0d
61
62#define SMC_RAM_END 0x40000
63
64#define VOLTAGE_SCALE 4
65#define VOLTAGE_VID_OFFSET_SCALE1 625
66#define VOLTAGE_VID_OFFSET_SCALE2 100
67
68static const struct ci_pt_defaults defaults_hawaii_xt =
69{
70 1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0xB0000,
71 { 0x2E, 0x00, 0x00, 0x88, 0x00, 0x00, 0x72, 0x60, 0x51, 0xA7, 0x79, 0x6B, 0x90, 0xBD, 0x79 },
72 { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
73};
74
75static const struct ci_pt_defaults defaults_hawaii_pro =
76{
77 1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0x65062,
78 { 0x2E, 0x00, 0x00, 0x88, 0x00, 0x00, 0x72, 0x60, 0x51, 0xA7, 0x79, 0x6B, 0x90, 0xBD, 0x79 },
79 { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
80};
81
82static const struct ci_pt_defaults defaults_bonaire_xt =
83{
84 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
85 { 0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61 },
86 { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 }
87};
88
Slava Grigorev5ef82922016-07-15 11:29:14 -040089#if 0
Alex Deuchera2e73f52015-04-20 17:09:27 -040090static const struct ci_pt_defaults defaults_bonaire_pro =
91{
92 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x65062,
93 { 0x8C, 0x23F, 0x244, 0xA6, 0x83, 0x85, 0x86, 0x86, 0x83, 0xDB, 0xDB, 0xDA, 0x67, 0x60, 0x5F },
94 { 0x187, 0x193, 0x193, 0x1C7, 0x1D1, 0x1D1, 0x210, 0x219, 0x219, 0x266, 0x26C, 0x26C, 0x2C9, 0x2CB, 0x2CB }
95};
Slava Grigorev5ef82922016-07-15 11:29:14 -040096#endif
Alex Deuchera2e73f52015-04-20 17:09:27 -040097
98static const struct ci_pt_defaults defaults_saturn_xt =
99{
100 1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x70000,
101 { 0x8C, 0x247, 0x249, 0xA6, 0x80, 0x81, 0x8B, 0x89, 0x86, 0xC9, 0xCA, 0xC9, 0x4D, 0x4D, 0x4D },
102 { 0x187, 0x187, 0x187, 0x1C7, 0x1C7, 0x1C7, 0x210, 0x210, 0x210, 0x266, 0x266, 0x266, 0x2C9, 0x2C9, 0x2C9 }
103};
104
Slava Grigorev529d8c52016-07-19 00:24:10 -0400105#if 0
Alex Deuchera2e73f52015-04-20 17:09:27 -0400106static const struct ci_pt_defaults defaults_saturn_pro =
107{
108 1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x30000,
109 { 0x96, 0x21D, 0x23B, 0xA1, 0x85, 0x87, 0x83, 0x84, 0x81, 0xE6, 0xE6, 0xE6, 0x71, 0x6A, 0x6A },
110 { 0x193, 0x19E, 0x19E, 0x1D2, 0x1DC, 0x1DC, 0x21A, 0x223, 0x223, 0x26E, 0x27E, 0x274, 0x2CF, 0x2D2, 0x2D2 }
111};
Slava Grigorev529d8c52016-07-19 00:24:10 -0400112#endif
Alex Deuchera2e73f52015-04-20 17:09:27 -0400113
114static const struct ci_pt_config_reg didt_config_ci[] =
115{
116 { 0x10, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
117 { 0x10, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
118 { 0x10, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
119 { 0x10, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
120 { 0x11, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
121 { 0x11, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
122 { 0x11, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
123 { 0x11, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
124 { 0x12, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
125 { 0x12, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
126 { 0x12, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
127 { 0x12, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
128 { 0x2, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
129 { 0x2, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
130 { 0x2, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
131 { 0x1, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
132 { 0x1, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
133 { 0x0, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
134 { 0x30, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
135 { 0x30, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
136 { 0x30, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
137 { 0x30, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
138 { 0x31, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
139 { 0x31, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
140 { 0x31, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
141 { 0x31, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
142 { 0x32, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
143 { 0x32, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
144 { 0x32, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
145 { 0x32, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
146 { 0x22, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
147 { 0x22, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
148 { 0x22, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
149 { 0x21, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
150 { 0x21, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
151 { 0x20, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
152 { 0x50, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
153 { 0x50, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
154 { 0x50, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
155 { 0x50, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
156 { 0x51, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
157 { 0x51, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
158 { 0x51, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
159 { 0x51, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
160 { 0x52, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
161 { 0x52, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
162 { 0x52, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
163 { 0x52, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
164 { 0x42, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
165 { 0x42, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
166 { 0x42, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
167 { 0x41, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
168 { 0x41, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
169 { 0x40, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
170 { 0x70, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
171 { 0x70, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
172 { 0x70, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
173 { 0x70, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
174 { 0x71, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
175 { 0x71, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
176 { 0x71, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
177 { 0x71, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
178 { 0x72, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
179 { 0x72, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
180 { 0x72, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
181 { 0x72, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
182 { 0x62, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
183 { 0x62, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
184 { 0x62, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
185 { 0x61, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
186 { 0x61, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
187 { 0x60, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
188 { 0xFFFFFFFF }
189};
190
191static u8 ci_get_memory_module_index(struct amdgpu_device *adev)
192{
193 return (u8) ((RREG32(mmBIOS_SCRATCH_4) >> 16) & 0xff);
194}
195
196#define MC_CG_ARB_FREQ_F0 0x0a
197#define MC_CG_ARB_FREQ_F1 0x0b
198#define MC_CG_ARB_FREQ_F2 0x0c
199#define MC_CG_ARB_FREQ_F3 0x0d
200
201static int ci_copy_and_switch_arb_sets(struct amdgpu_device *adev,
202 u32 arb_freq_src, u32 arb_freq_dest)
203{
204 u32 mc_arb_dram_timing;
205 u32 mc_arb_dram_timing2;
206 u32 burst_time;
207 u32 mc_cg_config;
208
209 switch (arb_freq_src) {
210 case MC_CG_ARB_FREQ_F0:
211 mc_arb_dram_timing = RREG32(mmMC_ARB_DRAM_TIMING);
212 mc_arb_dram_timing2 = RREG32(mmMC_ARB_DRAM_TIMING2);
213 burst_time = (RREG32(mmMC_ARB_BURST_TIME) & MC_ARB_BURST_TIME__STATE0_MASK) >>
214 MC_ARB_BURST_TIME__STATE0__SHIFT;
215 break;
216 case MC_CG_ARB_FREQ_F1:
217 mc_arb_dram_timing = RREG32(mmMC_ARB_DRAM_TIMING_1);
218 mc_arb_dram_timing2 = RREG32(mmMC_ARB_DRAM_TIMING2_1);
219 burst_time = (RREG32(mmMC_ARB_BURST_TIME) & MC_ARB_BURST_TIME__STATE1_MASK) >>
220 MC_ARB_BURST_TIME__STATE1__SHIFT;
221 break;
222 default:
223 return -EINVAL;
224 }
225
226 switch (arb_freq_dest) {
227 case MC_CG_ARB_FREQ_F0:
228 WREG32(mmMC_ARB_DRAM_TIMING, mc_arb_dram_timing);
229 WREG32(mmMC_ARB_DRAM_TIMING2, mc_arb_dram_timing2);
230 WREG32_P(mmMC_ARB_BURST_TIME, (burst_time << MC_ARB_BURST_TIME__STATE0__SHIFT),
231 ~MC_ARB_BURST_TIME__STATE0_MASK);
232 break;
233 case MC_CG_ARB_FREQ_F1:
234 WREG32(mmMC_ARB_DRAM_TIMING_1, mc_arb_dram_timing);
235 WREG32(mmMC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2);
236 WREG32_P(mmMC_ARB_BURST_TIME, (burst_time << MC_ARB_BURST_TIME__STATE1__SHIFT),
237 ~MC_ARB_BURST_TIME__STATE1_MASK);
238 break;
239 default:
240 return -EINVAL;
241 }
242
243 mc_cg_config = RREG32(mmMC_CG_CONFIG) | 0x0000000F;
244 WREG32(mmMC_CG_CONFIG, mc_cg_config);
245 WREG32_P(mmMC_ARB_CG, (arb_freq_dest) << MC_ARB_CG__CG_ARB_REQ__SHIFT,
246 ~MC_ARB_CG__CG_ARB_REQ_MASK);
247
248 return 0;
249}
250
251static u8 ci_get_ddr3_mclk_frequency_ratio(u32 memory_clock)
252{
253 u8 mc_para_index;
254
255 if (memory_clock < 10000)
256 mc_para_index = 0;
257 else if (memory_clock >= 80000)
258 mc_para_index = 0x0f;
259 else
260 mc_para_index = (u8)((memory_clock - 10000) / 5000 + 1);
261 return mc_para_index;
262}
263
264static u8 ci_get_mclk_frequency_ratio(u32 memory_clock, bool strobe_mode)
265{
266 u8 mc_para_index;
267
268 if (strobe_mode) {
269 if (memory_clock < 12500)
270 mc_para_index = 0x00;
271 else if (memory_clock > 47500)
272 mc_para_index = 0x0f;
273 else
274 mc_para_index = (u8)((memory_clock - 10000) / 2500);
275 } else {
276 if (memory_clock < 65000)
277 mc_para_index = 0x00;
278 else if (memory_clock > 135000)
279 mc_para_index = 0x0f;
280 else
281 mc_para_index = (u8)((memory_clock - 60000) / 5000);
282 }
283 return mc_para_index;
284}
285
286static void ci_trim_voltage_table_to_fit_state_table(struct amdgpu_device *adev,
287 u32 max_voltage_steps,
288 struct atom_voltage_table *voltage_table)
289{
290 unsigned int i, diff;
291
292 if (voltage_table->count <= max_voltage_steps)
293 return;
294
295 diff = voltage_table->count - max_voltage_steps;
296
297 for (i = 0; i < max_voltage_steps; i++)
298 voltage_table->entries[i] = voltage_table->entries[i + diff];
299
300 voltage_table->count = max_voltage_steps;
301}
302
303static int ci_get_std_voltage_value_sidd(struct amdgpu_device *adev,
304 struct atom_voltage_table_entry *voltage_table,
305 u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd);
306static int ci_set_power_limit(struct amdgpu_device *adev, u32 n);
307static int ci_set_overdrive_target_tdp(struct amdgpu_device *adev,
308 u32 target_tdp);
309static int ci_update_uvd_dpm(struct amdgpu_device *adev, bool gate);
Alex Deuchera2e73f52015-04-20 17:09:27 -0400310static void ci_dpm_set_irq_funcs(struct amdgpu_device *adev);
311
312static PPSMC_Result amdgpu_ci_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
313 PPSMC_Msg msg, u32 parameter);
314static void ci_thermal_start_smc_fan_control(struct amdgpu_device *adev);
315static void ci_fan_ctrl_set_default_mode(struct amdgpu_device *adev);
316
317static struct ci_power_info *ci_get_pi(struct amdgpu_device *adev)
318{
319 struct ci_power_info *pi = adev->pm.dpm.priv;
320
321 return pi;
322}
323
324static struct ci_ps *ci_get_ps(struct amdgpu_ps *rps)
325{
326 struct ci_ps *ps = rps->ps_priv;
327
328 return ps;
329}
330
331static void ci_initialize_powertune_defaults(struct amdgpu_device *adev)
332{
333 struct ci_power_info *pi = ci_get_pi(adev);
334
335 switch (adev->pdev->device) {
336 case 0x6649:
337 case 0x6650:
338 case 0x6651:
339 case 0x6658:
340 case 0x665C:
341 case 0x665D:
342 default:
343 pi->powertune_defaults = &defaults_bonaire_xt;
344 break;
345 case 0x6640:
346 case 0x6641:
347 case 0x6646:
348 case 0x6647:
349 pi->powertune_defaults = &defaults_saturn_xt;
350 break;
351 case 0x67B8:
352 case 0x67B0:
353 pi->powertune_defaults = &defaults_hawaii_xt;
354 break;
355 case 0x67BA:
356 case 0x67B1:
357 pi->powertune_defaults = &defaults_hawaii_pro;
358 break;
359 case 0x67A0:
360 case 0x67A1:
361 case 0x67A2:
362 case 0x67A8:
363 case 0x67A9:
364 case 0x67AA:
365 case 0x67B9:
366 case 0x67BE:
367 pi->powertune_defaults = &defaults_bonaire_xt;
368 break;
369 }
370
371 pi->dte_tj_offset = 0;
372
373 pi->caps_power_containment = true;
374 pi->caps_cac = false;
375 pi->caps_sq_ramping = false;
376 pi->caps_db_ramping = false;
377 pi->caps_td_ramping = false;
378 pi->caps_tcp_ramping = false;
379
380 if (pi->caps_power_containment) {
381 pi->caps_cac = true;
382 if (adev->asic_type == CHIP_HAWAII)
383 pi->enable_bapm_feature = false;
384 else
385 pi->enable_bapm_feature = true;
386 pi->enable_tdc_limit_feature = true;
387 pi->enable_pkg_pwr_tracking_feature = true;
388 }
389}
390
391static u8 ci_convert_to_vid(u16 vddc)
392{
393 return (6200 - (vddc * VOLTAGE_SCALE)) / 25;
394}
395
396static int ci_populate_bapm_vddc_vid_sidd(struct amdgpu_device *adev)
397{
398 struct ci_power_info *pi = ci_get_pi(adev);
399 u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd;
400 u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd;
401 u8 *hi2_vid = pi->smc_powertune_table.BapmVddCVidHiSidd2;
402 u32 i;
403
404 if (adev->pm.dpm.dyn_state.cac_leakage_table.entries == NULL)
405 return -EINVAL;
406 if (adev->pm.dpm.dyn_state.cac_leakage_table.count > 8)
407 return -EINVAL;
408 if (adev->pm.dpm.dyn_state.cac_leakage_table.count !=
409 adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count)
410 return -EINVAL;
411
412 for (i = 0; i < adev->pm.dpm.dyn_state.cac_leakage_table.count; i++) {
413 if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
414 lo_vid[i] = ci_convert_to_vid(adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1);
415 hi_vid[i] = ci_convert_to_vid(adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2);
416 hi2_vid[i] = ci_convert_to_vid(adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3);
417 } else {
418 lo_vid[i] = ci_convert_to_vid(adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc);
419 hi_vid[i] = ci_convert_to_vid((u16)adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage);
420 }
421 }
422 return 0;
423}
424
425static int ci_populate_vddc_vid(struct amdgpu_device *adev)
426{
427 struct ci_power_info *pi = ci_get_pi(adev);
428 u8 *vid = pi->smc_powertune_table.VddCVid;
429 u32 i;
430
431 if (pi->vddc_voltage_table.count > 8)
432 return -EINVAL;
433
434 for (i = 0; i < pi->vddc_voltage_table.count; i++)
435 vid[i] = ci_convert_to_vid(pi->vddc_voltage_table.entries[i].value);
436
437 return 0;
438}
439
440static int ci_populate_svi_load_line(struct amdgpu_device *adev)
441{
442 struct ci_power_info *pi = ci_get_pi(adev);
443 const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
444
445 pi->smc_powertune_table.SviLoadLineEn = pt_defaults->svi_load_line_en;
446 pi->smc_powertune_table.SviLoadLineVddC = pt_defaults->svi_load_line_vddc;
447 pi->smc_powertune_table.SviLoadLineTrimVddC = 3;
448 pi->smc_powertune_table.SviLoadLineOffsetVddC = 0;
449
450 return 0;
451}
452
453static int ci_populate_tdc_limit(struct amdgpu_device *adev)
454{
455 struct ci_power_info *pi = ci_get_pi(adev);
456 const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
457 u16 tdc_limit;
458
459 tdc_limit = adev->pm.dpm.dyn_state.cac_tdp_table->tdc * 256;
460 pi->smc_powertune_table.TDC_VDDC_PkgLimit = cpu_to_be16(tdc_limit);
461 pi->smc_powertune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
462 pt_defaults->tdc_vddc_throttle_release_limit_perc;
463 pi->smc_powertune_table.TDC_MAWt = pt_defaults->tdc_mawt;
464
465 return 0;
466}
467
468static int ci_populate_dw8(struct amdgpu_device *adev)
469{
470 struct ci_power_info *pi = ci_get_pi(adev);
471 const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
472 int ret;
473
474 ret = amdgpu_ci_read_smc_sram_dword(adev,
475 SMU7_FIRMWARE_HEADER_LOCATION +
476 offsetof(SMU7_Firmware_Header, PmFuseTable) +
477 offsetof(SMU7_Discrete_PmFuses, TdcWaterfallCtl),
478 (u32 *)&pi->smc_powertune_table.TdcWaterfallCtl,
479 pi->sram_end);
480 if (ret)
481 return -EINVAL;
482 else
483 pi->smc_powertune_table.TdcWaterfallCtl = pt_defaults->tdc_waterfall_ctl;
484
485 return 0;
486}
487
488static int ci_populate_fuzzy_fan(struct amdgpu_device *adev)
489{
490 struct ci_power_info *pi = ci_get_pi(adev);
491
492 if ((adev->pm.dpm.fan.fan_output_sensitivity & (1 << 15)) ||
493 (adev->pm.dpm.fan.fan_output_sensitivity == 0))
494 adev->pm.dpm.fan.fan_output_sensitivity =
495 adev->pm.dpm.fan.default_fan_output_sensitivity;
496
497 pi->smc_powertune_table.FuzzyFan_PwmSetDelta =
498 cpu_to_be16(adev->pm.dpm.fan.fan_output_sensitivity);
499
500 return 0;
501}
502
503static int ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(struct amdgpu_device *adev)
504{
505 struct ci_power_info *pi = ci_get_pi(adev);
506 u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd;
507 u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd;
508 int i, min, max;
509
510 min = max = hi_vid[0];
511 for (i = 0; i < 8; i++) {
512 if (0 != hi_vid[i]) {
513 if (min > hi_vid[i])
514 min = hi_vid[i];
515 if (max < hi_vid[i])
516 max = hi_vid[i];
517 }
518
519 if (0 != lo_vid[i]) {
520 if (min > lo_vid[i])
521 min = lo_vid[i];
522 if (max < lo_vid[i])
523 max = lo_vid[i];
524 }
525 }
526
527 if ((min == 0) || (max == 0))
528 return -EINVAL;
529 pi->smc_powertune_table.GnbLPMLMaxVid = (u8)max;
530 pi->smc_powertune_table.GnbLPMLMinVid = (u8)min;
531
532 return 0;
533}
534
535static int ci_populate_bapm_vddc_base_leakage_sidd(struct amdgpu_device *adev)
536{
537 struct ci_power_info *pi = ci_get_pi(adev);
538 u16 hi_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd;
539 u16 lo_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd;
540 struct amdgpu_cac_tdp_table *cac_tdp_table =
541 adev->pm.dpm.dyn_state.cac_tdp_table;
542
543 hi_sidd = cac_tdp_table->high_cac_leakage / 100 * 256;
544 lo_sidd = cac_tdp_table->low_cac_leakage / 100 * 256;
545
546 pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd = cpu_to_be16(hi_sidd);
547 pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd = cpu_to_be16(lo_sidd);
548
549 return 0;
550}
551
552static int ci_populate_bapm_parameters_in_dpm_table(struct amdgpu_device *adev)
553{
554 struct ci_power_info *pi = ci_get_pi(adev);
555 const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
556 SMU7_Discrete_DpmTable *dpm_table = &pi->smc_state_table;
557 struct amdgpu_cac_tdp_table *cac_tdp_table =
558 adev->pm.dpm.dyn_state.cac_tdp_table;
559 struct amdgpu_ppm_table *ppm = adev->pm.dpm.dyn_state.ppm_table;
560 int i, j, k;
561 const u16 *def1;
562 const u16 *def2;
563
564 dpm_table->DefaultTdp = cac_tdp_table->tdp * 256;
565 dpm_table->TargetTdp = cac_tdp_table->configurable_tdp * 256;
566
567 dpm_table->DTETjOffset = (u8)pi->dte_tj_offset;
568 dpm_table->GpuTjMax =
569 (u8)(pi->thermal_temp_setting.temperature_high / 1000);
570 dpm_table->GpuTjHyst = 8;
571
572 dpm_table->DTEAmbientTempBase = pt_defaults->dte_ambient_temp_base;
573
574 if (ppm) {
575 dpm_table->PPM_PkgPwrLimit = cpu_to_be16((u16)ppm->dgpu_tdp * 256 / 1000);
576 dpm_table->PPM_TemperatureLimit = cpu_to_be16((u16)ppm->tj_max * 256);
577 } else {
578 dpm_table->PPM_PkgPwrLimit = cpu_to_be16(0);
579 dpm_table->PPM_TemperatureLimit = cpu_to_be16(0);
580 }
581
582 dpm_table->BAPM_TEMP_GRADIENT = cpu_to_be32(pt_defaults->bapm_temp_gradient);
583 def1 = pt_defaults->bapmti_r;
584 def2 = pt_defaults->bapmti_rc;
585
586 for (i = 0; i < SMU7_DTE_ITERATIONS; i++) {
587 for (j = 0; j < SMU7_DTE_SOURCES; j++) {
588 for (k = 0; k < SMU7_DTE_SINKS; k++) {
589 dpm_table->BAPMTI_R[i][j][k] = cpu_to_be16(*def1);
590 dpm_table->BAPMTI_RC[i][j][k] = cpu_to_be16(*def2);
591 def1++;
592 def2++;
593 }
594 }
595 }
596
597 return 0;
598}
599
600static int ci_populate_pm_base(struct amdgpu_device *adev)
601{
602 struct ci_power_info *pi = ci_get_pi(adev);
603 u32 pm_fuse_table_offset;
604 int ret;
605
606 if (pi->caps_power_containment) {
607 ret = amdgpu_ci_read_smc_sram_dword(adev,
608 SMU7_FIRMWARE_HEADER_LOCATION +
609 offsetof(SMU7_Firmware_Header, PmFuseTable),
610 &pm_fuse_table_offset, pi->sram_end);
611 if (ret)
612 return ret;
613 ret = ci_populate_bapm_vddc_vid_sidd(adev);
614 if (ret)
615 return ret;
616 ret = ci_populate_vddc_vid(adev);
617 if (ret)
618 return ret;
619 ret = ci_populate_svi_load_line(adev);
620 if (ret)
621 return ret;
622 ret = ci_populate_tdc_limit(adev);
623 if (ret)
624 return ret;
625 ret = ci_populate_dw8(adev);
626 if (ret)
627 return ret;
628 ret = ci_populate_fuzzy_fan(adev);
629 if (ret)
630 return ret;
631 ret = ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(adev);
632 if (ret)
633 return ret;
634 ret = ci_populate_bapm_vddc_base_leakage_sidd(adev);
635 if (ret)
636 return ret;
637 ret = amdgpu_ci_copy_bytes_to_smc(adev, pm_fuse_table_offset,
638 (u8 *)&pi->smc_powertune_table,
639 sizeof(SMU7_Discrete_PmFuses), pi->sram_end);
640 if (ret)
641 return ret;
642 }
643
644 return 0;
645}
646
647static void ci_do_enable_didt(struct amdgpu_device *adev, const bool enable)
648{
649 struct ci_power_info *pi = ci_get_pi(adev);
650 u32 data;
651
652 if (pi->caps_sq_ramping) {
653 data = RREG32_DIDT(ixDIDT_SQ_CTRL0);
654 if (enable)
655 data |= DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK;
656 else
657 data &= ~DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK;
658 WREG32_DIDT(ixDIDT_SQ_CTRL0, data);
659 }
660
661 if (pi->caps_db_ramping) {
662 data = RREG32_DIDT(ixDIDT_DB_CTRL0);
663 if (enable)
664 data |= DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK;
665 else
666 data &= ~DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK;
667 WREG32_DIDT(ixDIDT_DB_CTRL0, data);
668 }
669
670 if (pi->caps_td_ramping) {
671 data = RREG32_DIDT(ixDIDT_TD_CTRL0);
672 if (enable)
673 data |= DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK;
674 else
675 data &= ~DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK;
676 WREG32_DIDT(ixDIDT_TD_CTRL0, data);
677 }
678
679 if (pi->caps_tcp_ramping) {
680 data = RREG32_DIDT(ixDIDT_TCP_CTRL0);
681 if (enable)
682 data |= DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK;
683 else
684 data &= ~DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK;
685 WREG32_DIDT(ixDIDT_TCP_CTRL0, data);
686 }
687}
688
689static int ci_program_pt_config_registers(struct amdgpu_device *adev,
690 const struct ci_pt_config_reg *cac_config_regs)
691{
692 const struct ci_pt_config_reg *config_regs = cac_config_regs;
693 u32 data;
694 u32 cache = 0;
695
696 if (config_regs == NULL)
697 return -EINVAL;
698
699 while (config_regs->offset != 0xFFFFFFFF) {
700 if (config_regs->type == CISLANDS_CONFIGREG_CACHE) {
701 cache |= ((config_regs->value << config_regs->shift) & config_regs->mask);
702 } else {
703 switch (config_regs->type) {
704 case CISLANDS_CONFIGREG_SMC_IND:
705 data = RREG32_SMC(config_regs->offset);
706 break;
707 case CISLANDS_CONFIGREG_DIDT_IND:
708 data = RREG32_DIDT(config_regs->offset);
709 break;
710 default:
711 data = RREG32(config_regs->offset);
712 break;
713 }
714
715 data &= ~config_regs->mask;
716 data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
717 data |= cache;
718
719 switch (config_regs->type) {
720 case CISLANDS_CONFIGREG_SMC_IND:
721 WREG32_SMC(config_regs->offset, data);
722 break;
723 case CISLANDS_CONFIGREG_DIDT_IND:
724 WREG32_DIDT(config_regs->offset, data);
725 break;
726 default:
727 WREG32(config_regs->offset, data);
728 break;
729 }
730 cache = 0;
731 }
732 config_regs++;
733 }
734 return 0;
735}
736
737static int ci_enable_didt(struct amdgpu_device *adev, bool enable)
738{
739 struct ci_power_info *pi = ci_get_pi(adev);
740 int ret;
741
742 if (pi->caps_sq_ramping || pi->caps_db_ramping ||
743 pi->caps_td_ramping || pi->caps_tcp_ramping) {
Alex Deucher06120a12016-06-21 12:16:30 -0400744 adev->gfx.rlc.funcs->enter_safe_mode(adev);
Alex Deuchera2e73f52015-04-20 17:09:27 -0400745
746 if (enable) {
747 ret = ci_program_pt_config_registers(adev, didt_config_ci);
748 if (ret) {
Alex Deucher06120a12016-06-21 12:16:30 -0400749 adev->gfx.rlc.funcs->exit_safe_mode(adev);
Alex Deuchera2e73f52015-04-20 17:09:27 -0400750 return ret;
751 }
752 }
753
754 ci_do_enable_didt(adev, enable);
755
Alex Deucher06120a12016-06-21 12:16:30 -0400756 adev->gfx.rlc.funcs->exit_safe_mode(adev);
Alex Deuchera2e73f52015-04-20 17:09:27 -0400757 }
758
759 return 0;
760}
761
762static int ci_enable_power_containment(struct amdgpu_device *adev, bool enable)
763{
764 struct ci_power_info *pi = ci_get_pi(adev);
765 PPSMC_Result smc_result;
766 int ret = 0;
767
768 if (enable) {
769 pi->power_containment_features = 0;
770 if (pi->caps_power_containment) {
771 if (pi->enable_bapm_feature) {
772 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_EnableDTE);
773 if (smc_result != PPSMC_Result_OK)
774 ret = -EINVAL;
775 else
776 pi->power_containment_features |= POWERCONTAINMENT_FEATURE_BAPM;
777 }
778
779 if (pi->enable_tdc_limit_feature) {
780 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_TDCLimitEnable);
781 if (smc_result != PPSMC_Result_OK)
782 ret = -EINVAL;
783 else
784 pi->power_containment_features |= POWERCONTAINMENT_FEATURE_TDCLimit;
785 }
786
787 if (pi->enable_pkg_pwr_tracking_feature) {
788 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_PkgPwrLimitEnable);
789 if (smc_result != PPSMC_Result_OK) {
790 ret = -EINVAL;
791 } else {
792 struct amdgpu_cac_tdp_table *cac_tdp_table =
793 adev->pm.dpm.dyn_state.cac_tdp_table;
794 u32 default_pwr_limit =
795 (u32)(cac_tdp_table->maximum_power_delivery_limit * 256);
796
797 pi->power_containment_features |= POWERCONTAINMENT_FEATURE_PkgPwrLimit;
798
799 ci_set_power_limit(adev, default_pwr_limit);
800 }
801 }
802 }
803 } else {
804 if (pi->caps_power_containment && pi->power_containment_features) {
805 if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_TDCLimit)
806 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_TDCLimitDisable);
807
808 if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_BAPM)
809 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DisableDTE);
810
811 if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit)
812 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_PkgPwrLimitDisable);
813 pi->power_containment_features = 0;
814 }
815 }
816
817 return ret;
818}
819
820static int ci_enable_smc_cac(struct amdgpu_device *adev, bool enable)
821{
822 struct ci_power_info *pi = ci_get_pi(adev);
823 PPSMC_Result smc_result;
824 int ret = 0;
825
826 if (pi->caps_cac) {
827 if (enable) {
828 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_EnableCac);
829 if (smc_result != PPSMC_Result_OK) {
830 ret = -EINVAL;
831 pi->cac_enabled = false;
832 } else {
833 pi->cac_enabled = true;
834 }
835 } else if (pi->cac_enabled) {
836 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DisableCac);
837 pi->cac_enabled = false;
838 }
839 }
840
841 return ret;
842}
843
844static int ci_enable_thermal_based_sclk_dpm(struct amdgpu_device *adev,
845 bool enable)
846{
847 struct ci_power_info *pi = ci_get_pi(adev);
848 PPSMC_Result smc_result = PPSMC_Result_OK;
849
850 if (pi->thermal_sclk_dpm_enabled) {
851 if (enable)
852 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_ENABLE_THERMAL_DPM);
853 else
854 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DISABLE_THERMAL_DPM);
855 }
856
857 if (smc_result == PPSMC_Result_OK)
858 return 0;
859 else
860 return -EINVAL;
861}
862
863static int ci_power_control_set_level(struct amdgpu_device *adev)
864{
865 struct ci_power_info *pi = ci_get_pi(adev);
866 struct amdgpu_cac_tdp_table *cac_tdp_table =
867 adev->pm.dpm.dyn_state.cac_tdp_table;
868 s32 adjust_percent;
869 s32 target_tdp;
870 int ret = 0;
871 bool adjust_polarity = false; /* ??? */
872
873 if (pi->caps_power_containment) {
874 adjust_percent = adjust_polarity ?
875 adev->pm.dpm.tdp_adjustment : (-1 * adev->pm.dpm.tdp_adjustment);
876 target_tdp = ((100 + adjust_percent) *
877 (s32)cac_tdp_table->configurable_tdp) / 100;
878
879 ret = ci_set_overdrive_target_tdp(adev, (u32)target_tdp);
880 }
881
882 return ret;
883}
884
Rex Zhucfa289f2017-09-06 15:27:59 +0800885static void ci_dpm_powergate_uvd(void *handle, bool gate)
Alex Deuchera2e73f52015-04-20 17:09:27 -0400886{
Rex Zhucfa289f2017-09-06 15:27:59 +0800887 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
Alex Deuchera2e73f52015-04-20 17:09:27 -0400888 struct ci_power_info *pi = ci_get_pi(adev);
889
Alex Deuchera2e73f52015-04-20 17:09:27 -0400890 pi->uvd_power_gated = gate;
891
Rex Zhua1970a62017-01-12 21:50:18 +0800892 if (gate) {
893 /* stop the UVD block */
894 amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
895 AMD_PG_STATE_GATE);
896 ci_update_uvd_dpm(adev, gate);
897 } else {
898 amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
899 AMD_PG_STATE_UNGATE);
900 ci_update_uvd_dpm(adev, gate);
901 }
Alex Deuchera2e73f52015-04-20 17:09:27 -0400902}
903
Rex Zhucfa289f2017-09-06 15:27:59 +0800904static bool ci_dpm_vblank_too_short(void *handle)
Alex Deuchera2e73f52015-04-20 17:09:27 -0400905{
Rex Zhucfa289f2017-09-06 15:27:59 +0800906 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
Alex Deuchera2e73f52015-04-20 17:09:27 -0400907 u32 vblank_time = amdgpu_dpm_get_vblank_time(adev);
Ken Wang81c59f52015-06-03 21:02:01 +0800908 u32 switch_limit = adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 300;
Alex Deuchera2e73f52015-04-20 17:09:27 -0400909
Alex Deucher0a646f32017-05-11 13:10:02 -0400910 /* disable mclk switching if the refresh is >120Hz, even if the
911 * blanking period would allow it
912 */
913 if (amdgpu_dpm_get_vrefresh(adev) > 120)
914 return true;
915
Alex Deuchera2e73f52015-04-20 17:09:27 -0400916 if (vblank_time < switch_limit)
917 return true;
918 else
919 return false;
920
921}
922
923static void ci_apply_state_adjust_rules(struct amdgpu_device *adev,
924 struct amdgpu_ps *rps)
925{
926 struct ci_ps *ps = ci_get_ps(rps);
927 struct ci_power_info *pi = ci_get_pi(adev);
928 struct amdgpu_clock_and_voltage_limits *max_limits;
929 bool disable_mclk_switching;
930 u32 sclk, mclk;
931 int i;
932
933 if (rps->vce_active) {
934 rps->evclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].evclk;
935 rps->ecclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].ecclk;
936 } else {
937 rps->evclk = 0;
938 rps->ecclk = 0;
939 }
940
941 if ((adev->pm.dpm.new_active_crtc_count > 1) ||
942 ci_dpm_vblank_too_short(adev))
943 disable_mclk_switching = true;
944 else
945 disable_mclk_switching = false;
946
947 if ((rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY)
948 pi->battery_state = true;
949 else
950 pi->battery_state = false;
951
952 if (adev->pm.dpm.ac_power)
953 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
954 else
955 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
956
957 if (adev->pm.dpm.ac_power == false) {
958 for (i = 0; i < ps->performance_level_count; i++) {
959 if (ps->performance_levels[i].mclk > max_limits->mclk)
960 ps->performance_levels[i].mclk = max_limits->mclk;
961 if (ps->performance_levels[i].sclk > max_limits->sclk)
962 ps->performance_levels[i].sclk = max_limits->sclk;
963 }
964 }
965
966 /* XXX validate the min clocks required for display */
967
968 if (disable_mclk_switching) {
969 mclk = ps->performance_levels[ps->performance_level_count - 1].mclk;
970 sclk = ps->performance_levels[0].sclk;
971 } else {
972 mclk = ps->performance_levels[0].mclk;
973 sclk = ps->performance_levels[0].sclk;
974 }
975
Rex Zhudb82b672016-10-12 20:05:03 +0800976 if (adev->pm.pm_display_cfg.min_core_set_clock > sclk)
977 sclk = adev->pm.pm_display_cfg.min_core_set_clock;
978
979 if (adev->pm.pm_display_cfg.min_mem_set_clock > mclk)
980 mclk = adev->pm.pm_display_cfg.min_mem_set_clock;
981
Alex Deuchera2e73f52015-04-20 17:09:27 -0400982 if (rps->vce_active) {
983 if (sclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk)
984 sclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk;
985 if (mclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].mclk)
986 mclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].mclk;
987 }
988
989 ps->performance_levels[0].sclk = sclk;
990 ps->performance_levels[0].mclk = mclk;
991
992 if (ps->performance_levels[1].sclk < ps->performance_levels[0].sclk)
993 ps->performance_levels[1].sclk = ps->performance_levels[0].sclk;
994
995 if (disable_mclk_switching) {
996 if (ps->performance_levels[0].mclk < ps->performance_levels[1].mclk)
997 ps->performance_levels[0].mclk = ps->performance_levels[1].mclk;
998 } else {
999 if (ps->performance_levels[1].mclk < ps->performance_levels[0].mclk)
1000 ps->performance_levels[1].mclk = ps->performance_levels[0].mclk;
1001 }
1002}
1003
1004static int ci_thermal_set_temperature_range(struct amdgpu_device *adev,
1005 int min_temp, int max_temp)
1006{
1007 int low_temp = 0 * 1000;
1008 int high_temp = 255 * 1000;
1009 u32 tmp;
1010
1011 if (low_temp < min_temp)
1012 low_temp = min_temp;
1013 if (high_temp > max_temp)
1014 high_temp = max_temp;
1015 if (high_temp < low_temp) {
1016 DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
1017 return -EINVAL;
1018 }
1019
1020 tmp = RREG32_SMC(ixCG_THERMAL_INT);
1021 tmp &= ~(CG_THERMAL_INT__DIG_THERM_INTH_MASK | CG_THERMAL_INT__DIG_THERM_INTL_MASK);
1022 tmp |= ((high_temp / 1000) << CG_THERMAL_INT__DIG_THERM_INTH__SHIFT) |
1023 ((low_temp / 1000)) << CG_THERMAL_INT__DIG_THERM_INTL__SHIFT;
1024 WREG32_SMC(ixCG_THERMAL_INT, tmp);
1025
1026#if 0
1027 /* XXX: need to figure out how to handle this properly */
1028 tmp = RREG32_SMC(ixCG_THERMAL_CTRL);
1029 tmp &= DIG_THERM_DPM_MASK;
1030 tmp |= DIG_THERM_DPM(high_temp / 1000);
1031 WREG32_SMC(ixCG_THERMAL_CTRL, tmp);
1032#endif
1033
1034 adev->pm.dpm.thermal.min_temp = low_temp;
1035 adev->pm.dpm.thermal.max_temp = high_temp;
1036 return 0;
1037}
1038
1039static int ci_thermal_enable_alert(struct amdgpu_device *adev,
1040 bool enable)
1041{
1042 u32 thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
1043 PPSMC_Result result;
1044
1045 if (enable) {
1046 thermal_int &= ~(CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK |
1047 CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK);
1048 WREG32_SMC(ixCG_THERMAL_INT, thermal_int);
1049 result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Thermal_Cntl_Enable);
1050 if (result != PPSMC_Result_OK) {
1051 DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
1052 return -EINVAL;
1053 }
1054 } else {
1055 thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK |
1056 CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
1057 WREG32_SMC(ixCG_THERMAL_INT, thermal_int);
1058 result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Thermal_Cntl_Disable);
1059 if (result != PPSMC_Result_OK) {
1060 DRM_DEBUG_KMS("Could not disable thermal interrupts.\n");
1061 return -EINVAL;
1062 }
1063 }
1064
1065 return 0;
1066}
1067
1068static void ci_fan_ctrl_set_static_mode(struct amdgpu_device *adev, u32 mode)
1069{
1070 struct ci_power_info *pi = ci_get_pi(adev);
1071 u32 tmp;
1072
1073 if (pi->fan_ctrl_is_in_default_mode) {
1074 tmp = (RREG32_SMC(ixCG_FDO_CTRL2) & CG_FDO_CTRL2__FDO_PWM_MODE_MASK)
1075 >> CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT;
1076 pi->fan_ctrl_default_mode = tmp;
1077 tmp = (RREG32_SMC(ixCG_FDO_CTRL2) & CG_FDO_CTRL2__TMIN_MASK)
1078 >> CG_FDO_CTRL2__TMIN__SHIFT;
1079 pi->t_min = tmp;
1080 pi->fan_ctrl_is_in_default_mode = false;
1081 }
1082
1083 tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__TMIN_MASK;
1084 tmp |= 0 << CG_FDO_CTRL2__TMIN__SHIFT;
1085 WREG32_SMC(ixCG_FDO_CTRL2, tmp);
1086
1087 tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__FDO_PWM_MODE_MASK;
1088 tmp |= mode << CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT;
1089 WREG32_SMC(ixCG_FDO_CTRL2, tmp);
1090}
1091
1092static int ci_thermal_setup_fan_table(struct amdgpu_device *adev)
1093{
1094 struct ci_power_info *pi = ci_get_pi(adev);
1095 SMU7_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
1096 u32 duty100;
1097 u32 t_diff1, t_diff2, pwm_diff1, pwm_diff2;
1098 u16 fdo_min, slope1, slope2;
1099 u32 reference_clock, tmp;
1100 int ret;
1101 u64 tmp64;
1102
1103 if (!pi->fan_table_start) {
1104 adev->pm.dpm.fan.ucode_fan_control = false;
1105 return 0;
1106 }
1107
1108 duty100 = (RREG32_SMC(ixCG_FDO_CTRL1) & CG_FDO_CTRL1__FMAX_DUTY100_MASK)
1109 >> CG_FDO_CTRL1__FMAX_DUTY100__SHIFT;
1110
1111 if (duty100 == 0) {
1112 adev->pm.dpm.fan.ucode_fan_control = false;
1113 return 0;
1114 }
1115
1116 tmp64 = (u64)adev->pm.dpm.fan.pwm_min * duty100;
1117 do_div(tmp64, 10000);
1118 fdo_min = (u16)tmp64;
1119
1120 t_diff1 = adev->pm.dpm.fan.t_med - adev->pm.dpm.fan.t_min;
1121 t_diff2 = adev->pm.dpm.fan.t_high - adev->pm.dpm.fan.t_med;
1122
1123 pwm_diff1 = adev->pm.dpm.fan.pwm_med - adev->pm.dpm.fan.pwm_min;
1124 pwm_diff2 = adev->pm.dpm.fan.pwm_high - adev->pm.dpm.fan.pwm_med;
1125
1126 slope1 = (u16)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
1127 slope2 = (u16)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
1128
1129 fan_table.TempMin = cpu_to_be16((50 + adev->pm.dpm.fan.t_min) / 100);
1130 fan_table.TempMed = cpu_to_be16((50 + adev->pm.dpm.fan.t_med) / 100);
1131 fan_table.TempMax = cpu_to_be16((50 + adev->pm.dpm.fan.t_max) / 100);
1132
1133 fan_table.Slope1 = cpu_to_be16(slope1);
1134 fan_table.Slope2 = cpu_to_be16(slope2);
1135
1136 fan_table.FdoMin = cpu_to_be16(fdo_min);
1137
1138 fan_table.HystDown = cpu_to_be16(adev->pm.dpm.fan.t_hyst);
1139
1140 fan_table.HystUp = cpu_to_be16(1);
1141
1142 fan_table.HystSlope = cpu_to_be16(1);
1143
1144 fan_table.TempRespLim = cpu_to_be16(5);
1145
1146 reference_clock = amdgpu_asic_get_xclk(adev);
1147
1148 fan_table.RefreshPeriod = cpu_to_be32((adev->pm.dpm.fan.cycle_delay *
1149 reference_clock) / 1600);
1150
1151 fan_table.FdoMax = cpu_to_be16((u16)duty100);
1152
1153 tmp = (RREG32_SMC(ixCG_MULT_THERMAL_CTRL) & CG_MULT_THERMAL_CTRL__TEMP_SEL_MASK)
1154 >> CG_MULT_THERMAL_CTRL__TEMP_SEL__SHIFT;
1155 fan_table.TempSrc = (uint8_t)tmp;
1156
1157 ret = amdgpu_ci_copy_bytes_to_smc(adev,
1158 pi->fan_table_start,
1159 (u8 *)(&fan_table),
1160 sizeof(fan_table),
1161 pi->sram_end);
1162
1163 if (ret) {
1164 DRM_ERROR("Failed to load fan table to the SMC.");
1165 adev->pm.dpm.fan.ucode_fan_control = false;
1166 }
1167
1168 return 0;
1169}
1170
1171static int ci_fan_ctrl_start_smc_fan_control(struct amdgpu_device *adev)
1172{
1173 struct ci_power_info *pi = ci_get_pi(adev);
1174 PPSMC_Result ret;
1175
1176 if (pi->caps_od_fuzzy_fan_control_support) {
1177 ret = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
1178 PPSMC_StartFanControl,
1179 FAN_CONTROL_FUZZY);
1180 if (ret != PPSMC_Result_OK)
1181 return -EINVAL;
1182 ret = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
1183 PPSMC_MSG_SetFanPwmMax,
1184 adev->pm.dpm.fan.default_max_fan_pwm);
1185 if (ret != PPSMC_Result_OK)
1186 return -EINVAL;
1187 } else {
1188 ret = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
1189 PPSMC_StartFanControl,
1190 FAN_CONTROL_TABLE);
1191 if (ret != PPSMC_Result_OK)
1192 return -EINVAL;
1193 }
1194
1195 pi->fan_is_controlled_by_smc = true;
1196 return 0;
1197}
1198
1199
1200static int ci_fan_ctrl_stop_smc_fan_control(struct amdgpu_device *adev)
1201{
1202 PPSMC_Result ret;
1203 struct ci_power_info *pi = ci_get_pi(adev);
1204
1205 ret = amdgpu_ci_send_msg_to_smc(adev, PPSMC_StopFanControl);
1206 if (ret == PPSMC_Result_OK) {
1207 pi->fan_is_controlled_by_smc = false;
1208 return 0;
1209 } else {
1210 return -EINVAL;
1211 }
1212}
1213
Rex Zhucfa289f2017-09-06 15:27:59 +08001214static int ci_dpm_get_fan_speed_percent(void *handle,
Alex Deuchera2e73f52015-04-20 17:09:27 -04001215 u32 *speed)
1216{
1217 u32 duty, duty100;
1218 u64 tmp64;
Rex Zhucfa289f2017-09-06 15:27:59 +08001219 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
Alex Deuchera2e73f52015-04-20 17:09:27 -04001220
1221 if (adev->pm.no_fan)
1222 return -ENOENT;
1223
1224 duty100 = (RREG32_SMC(ixCG_FDO_CTRL1) & CG_FDO_CTRL1__FMAX_DUTY100_MASK)
1225 >> CG_FDO_CTRL1__FMAX_DUTY100__SHIFT;
1226 duty = (RREG32_SMC(ixCG_THERMAL_STATUS) & CG_THERMAL_STATUS__FDO_PWM_DUTY_MASK)
1227 >> CG_THERMAL_STATUS__FDO_PWM_DUTY__SHIFT;
1228
1229 if (duty100 == 0)
1230 return -EINVAL;
1231
1232 tmp64 = (u64)duty * 100;
1233 do_div(tmp64, duty100);
1234 *speed = (u32)tmp64;
1235
1236 if (*speed > 100)
1237 *speed = 100;
1238
1239 return 0;
1240}
1241
Rex Zhucfa289f2017-09-06 15:27:59 +08001242static int ci_dpm_set_fan_speed_percent(void *handle,
Alex Deuchera2e73f52015-04-20 17:09:27 -04001243 u32 speed)
1244{
1245 u32 tmp;
1246 u32 duty, duty100;
1247 u64 tmp64;
Rex Zhucfa289f2017-09-06 15:27:59 +08001248 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
Alex Deuchera2e73f52015-04-20 17:09:27 -04001249 struct ci_power_info *pi = ci_get_pi(adev);
1250
1251 if (adev->pm.no_fan)
1252 return -ENOENT;
1253
1254 if (pi->fan_is_controlled_by_smc)
1255 return -EINVAL;
1256
1257 if (speed > 100)
1258 return -EINVAL;
1259
1260 duty100 = (RREG32_SMC(ixCG_FDO_CTRL1) & CG_FDO_CTRL1__FMAX_DUTY100_MASK)
1261 >> CG_FDO_CTRL1__FMAX_DUTY100__SHIFT;
1262
1263 if (duty100 == 0)
1264 return -EINVAL;
1265
1266 tmp64 = (u64)speed * duty100;
1267 do_div(tmp64, 100);
1268 duty = (u32)tmp64;
1269
1270 tmp = RREG32_SMC(ixCG_FDO_CTRL0) & ~CG_FDO_CTRL0__FDO_STATIC_DUTY_MASK;
1271 tmp |= duty << CG_FDO_CTRL0__FDO_STATIC_DUTY__SHIFT;
1272 WREG32_SMC(ixCG_FDO_CTRL0, tmp);
1273
1274 return 0;
1275}
1276
Rex Zhucfa289f2017-09-06 15:27:59 +08001277static void ci_dpm_set_fan_control_mode(void *handle, u32 mode)
Alex Deuchera2e73f52015-04-20 17:09:27 -04001278{
Rex Zhucfa289f2017-09-06 15:27:59 +08001279 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1280
Rex Zhuafa31872017-05-05 17:53:18 +08001281 switch (mode) {
1282 case AMD_FAN_CTRL_NONE:
Alex Deuchera2e73f52015-04-20 17:09:27 -04001283 if (adev->pm.dpm.fan.ucode_fan_control)
1284 ci_fan_ctrl_stop_smc_fan_control(adev);
Rex Zhuafa31872017-05-05 17:53:18 +08001285 ci_dpm_set_fan_speed_percent(adev, 100);
1286 break;
1287 case AMD_FAN_CTRL_MANUAL:
1288 if (adev->pm.dpm.fan.ucode_fan_control)
1289 ci_fan_ctrl_stop_smc_fan_control(adev);
1290 break;
1291 case AMD_FAN_CTRL_AUTO:
Alex Deuchera2e73f52015-04-20 17:09:27 -04001292 if (adev->pm.dpm.fan.ucode_fan_control)
1293 ci_thermal_start_smc_fan_control(adev);
Rex Zhuafa31872017-05-05 17:53:18 +08001294 break;
1295 default:
1296 break;
Alex Deuchera2e73f52015-04-20 17:09:27 -04001297 }
1298}
1299
Rex Zhucfa289f2017-09-06 15:27:59 +08001300static u32 ci_dpm_get_fan_control_mode(void *handle)
Alex Deuchera2e73f52015-04-20 17:09:27 -04001301{
Rex Zhucfa289f2017-09-06 15:27:59 +08001302 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
Alex Deuchera2e73f52015-04-20 17:09:27 -04001303 struct ci_power_info *pi = ci_get_pi(adev);
Alex Deuchera2e73f52015-04-20 17:09:27 -04001304
1305 if (pi->fan_is_controlled_by_smc)
Rex Zhuafa31872017-05-05 17:53:18 +08001306 return AMD_FAN_CTRL_AUTO;
1307 else
1308 return AMD_FAN_CTRL_MANUAL;
Alex Deuchera2e73f52015-04-20 17:09:27 -04001309}
1310
1311#if 0
1312static int ci_fan_ctrl_get_fan_speed_rpm(struct amdgpu_device *adev,
1313 u32 *speed)
1314{
1315 u32 tach_period;
1316 u32 xclk = amdgpu_asic_get_xclk(adev);
1317
1318 if (adev->pm.no_fan)
1319 return -ENOENT;
1320
1321 if (adev->pm.fan_pulses_per_revolution == 0)
1322 return -ENOENT;
1323
1324 tach_period = (RREG32_SMC(ixCG_TACH_STATUS) & CG_TACH_STATUS__TACH_PERIOD_MASK)
1325 >> CG_TACH_STATUS__TACH_PERIOD__SHIFT;
1326 if (tach_period == 0)
1327 return -ENOENT;
1328
1329 *speed = 60 * xclk * 10000 / tach_period;
1330
1331 return 0;
1332}
1333
1334static int ci_fan_ctrl_set_fan_speed_rpm(struct amdgpu_device *adev,
1335 u32 speed)
1336{
1337 u32 tach_period, tmp;
1338 u32 xclk = amdgpu_asic_get_xclk(adev);
1339
1340 if (adev->pm.no_fan)
1341 return -ENOENT;
1342
1343 if (adev->pm.fan_pulses_per_revolution == 0)
1344 return -ENOENT;
1345
1346 if ((speed < adev->pm.fan_min_rpm) ||
1347 (speed > adev->pm.fan_max_rpm))
1348 return -EINVAL;
1349
1350 if (adev->pm.dpm.fan.ucode_fan_control)
1351 ci_fan_ctrl_stop_smc_fan_control(adev);
1352
1353 tach_period = 60 * xclk * 10000 / (8 * speed);
1354 tmp = RREG32_SMC(ixCG_TACH_CTRL) & ~CG_TACH_CTRL__TARGET_PERIOD_MASK;
1355 tmp |= tach_period << CG_TACH_CTRL__TARGET_PERIOD__SHIFT;
1356 WREG32_SMC(CG_TACH_CTRL, tmp);
1357
1358 ci_fan_ctrl_set_static_mode(adev, FDO_PWM_MODE_STATIC_RPM);
1359
1360 return 0;
1361}
1362#endif
1363
1364static void ci_fan_ctrl_set_default_mode(struct amdgpu_device *adev)
1365{
1366 struct ci_power_info *pi = ci_get_pi(adev);
1367 u32 tmp;
1368
1369 if (!pi->fan_ctrl_is_in_default_mode) {
1370 tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__FDO_PWM_MODE_MASK;
1371 tmp |= pi->fan_ctrl_default_mode << CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT;
1372 WREG32_SMC(ixCG_FDO_CTRL2, tmp);
1373
1374 tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__TMIN_MASK;
1375 tmp |= pi->t_min << CG_FDO_CTRL2__TMIN__SHIFT;
1376 WREG32_SMC(ixCG_FDO_CTRL2, tmp);
1377 pi->fan_ctrl_is_in_default_mode = true;
1378 }
1379}
1380
1381static void ci_thermal_start_smc_fan_control(struct amdgpu_device *adev)
1382{
1383 if (adev->pm.dpm.fan.ucode_fan_control) {
1384 ci_fan_ctrl_start_smc_fan_control(adev);
1385 ci_fan_ctrl_set_static_mode(adev, FDO_PWM_MODE_STATIC);
1386 }
1387}
1388
1389static void ci_thermal_initialize(struct amdgpu_device *adev)
1390{
1391 u32 tmp;
1392
1393 if (adev->pm.fan_pulses_per_revolution) {
1394 tmp = RREG32_SMC(ixCG_TACH_CTRL) & ~CG_TACH_CTRL__EDGE_PER_REV_MASK;
1395 tmp |= (adev->pm.fan_pulses_per_revolution - 1)
1396 << CG_TACH_CTRL__EDGE_PER_REV__SHIFT;
1397 WREG32_SMC(ixCG_TACH_CTRL, tmp);
1398 }
1399
1400 tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__TACH_PWM_RESP_RATE_MASK;
1401 tmp |= 0x28 << CG_FDO_CTRL2__TACH_PWM_RESP_RATE__SHIFT;
1402 WREG32_SMC(ixCG_FDO_CTRL2, tmp);
1403}
1404
1405static int ci_thermal_start_thermal_controller(struct amdgpu_device *adev)
1406{
1407 int ret;
1408
1409 ci_thermal_initialize(adev);
1410 ret = ci_thermal_set_temperature_range(adev, CISLANDS_TEMP_RANGE_MIN, CISLANDS_TEMP_RANGE_MAX);
1411 if (ret)
1412 return ret;
1413 ret = ci_thermal_enable_alert(adev, true);
1414 if (ret)
1415 return ret;
1416 if (adev->pm.dpm.fan.ucode_fan_control) {
1417 ret = ci_thermal_setup_fan_table(adev);
1418 if (ret)
1419 return ret;
1420 ci_thermal_start_smc_fan_control(adev);
1421 }
1422
1423 return 0;
1424}
1425
1426static void ci_thermal_stop_thermal_controller(struct amdgpu_device *adev)
1427{
1428 if (!adev->pm.no_fan)
1429 ci_fan_ctrl_set_default_mode(adev);
1430}
1431
Alex Deuchera2e73f52015-04-20 17:09:27 -04001432static int ci_read_smc_soft_register(struct amdgpu_device *adev,
1433 u16 reg_offset, u32 *value)
1434{
1435 struct ci_power_info *pi = ci_get_pi(adev);
1436
1437 return amdgpu_ci_read_smc_sram_dword(adev,
1438 pi->soft_regs_start + reg_offset,
1439 value, pi->sram_end);
1440}
Alex Deuchera2e73f52015-04-20 17:09:27 -04001441
1442static int ci_write_smc_soft_register(struct amdgpu_device *adev,
1443 u16 reg_offset, u32 value)
1444{
1445 struct ci_power_info *pi = ci_get_pi(adev);
1446
1447 return amdgpu_ci_write_smc_sram_dword(adev,
1448 pi->soft_regs_start + reg_offset,
1449 value, pi->sram_end);
1450}
1451
1452static void ci_init_fps_limits(struct amdgpu_device *adev)
1453{
1454 struct ci_power_info *pi = ci_get_pi(adev);
1455 SMU7_Discrete_DpmTable *table = &pi->smc_state_table;
1456
1457 if (pi->caps_fps) {
1458 u16 tmp;
1459
1460 tmp = 45;
1461 table->FpsHighT = cpu_to_be16(tmp);
1462
1463 tmp = 30;
1464 table->FpsLowT = cpu_to_be16(tmp);
1465 }
1466}
1467
1468static int ci_update_sclk_t(struct amdgpu_device *adev)
1469{
1470 struct ci_power_info *pi = ci_get_pi(adev);
1471 int ret = 0;
1472 u32 low_sclk_interrupt_t = 0;
1473
1474 if (pi->caps_sclk_throttle_low_notification) {
1475 low_sclk_interrupt_t = cpu_to_be32(pi->low_sclk_interrupt_t);
1476
1477 ret = amdgpu_ci_copy_bytes_to_smc(adev,
1478 pi->dpm_table_start +
1479 offsetof(SMU7_Discrete_DpmTable, LowSclkInterruptT),
1480 (u8 *)&low_sclk_interrupt_t,
1481 sizeof(u32), pi->sram_end);
1482
1483 }
1484
1485 return ret;
1486}
1487
1488static void ci_get_leakage_voltages(struct amdgpu_device *adev)
1489{
1490 struct ci_power_info *pi = ci_get_pi(adev);
1491 u16 leakage_id, virtual_voltage_id;
1492 u16 vddc, vddci;
1493 int i;
1494
1495 pi->vddc_leakage.count = 0;
1496 pi->vddci_leakage.count = 0;
1497
1498 if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
1499 for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) {
1500 virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
1501 if (amdgpu_atombios_get_voltage_evv(adev, virtual_voltage_id, &vddc) != 0)
1502 continue;
1503 if (vddc != 0 && vddc != virtual_voltage_id) {
1504 pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc;
1505 pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id;
1506 pi->vddc_leakage.count++;
1507 }
1508 }
1509 } else if (amdgpu_atombios_get_leakage_id_from_vbios(adev, &leakage_id) == 0) {
1510 for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) {
1511 virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
1512 if (amdgpu_atombios_get_leakage_vddc_based_on_leakage_params(adev, &vddc, &vddci,
1513 virtual_voltage_id,
1514 leakage_id) == 0) {
1515 if (vddc != 0 && vddc != virtual_voltage_id) {
1516 pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc;
1517 pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id;
1518 pi->vddc_leakage.count++;
1519 }
1520 if (vddci != 0 && vddci != virtual_voltage_id) {
1521 pi->vddci_leakage.actual_voltage[pi->vddci_leakage.count] = vddci;
1522 pi->vddci_leakage.leakage_id[pi->vddci_leakage.count] = virtual_voltage_id;
1523 pi->vddci_leakage.count++;
1524 }
1525 }
1526 }
1527 }
1528}
1529
1530static void ci_set_dpm_event_sources(struct amdgpu_device *adev, u32 sources)
1531{
1532 struct ci_power_info *pi = ci_get_pi(adev);
1533 bool want_thermal_protection;
1534 enum amdgpu_dpm_event_src dpm_event_src;
1535 u32 tmp;
1536
1537 switch (sources) {
1538 case 0:
1539 default:
1540 want_thermal_protection = false;
1541 break;
1542 case (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL):
1543 want_thermal_protection = true;
1544 dpm_event_src = AMDGPU_DPM_EVENT_SRC_DIGITAL;
1545 break;
1546 case (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL):
1547 want_thermal_protection = true;
1548 dpm_event_src = AMDGPU_DPM_EVENT_SRC_EXTERNAL;
1549 break;
1550 case ((1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL) |
1551 (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL)):
1552 want_thermal_protection = true;
1553 dpm_event_src = AMDGPU_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL;
1554 break;
1555 }
1556
1557 if (want_thermal_protection) {
1558#if 0
1559 /* XXX: need to figure out how to handle this properly */
1560 tmp = RREG32_SMC(ixCG_THERMAL_CTRL);
1561 tmp &= DPM_EVENT_SRC_MASK;
1562 tmp |= DPM_EVENT_SRC(dpm_event_src);
1563 WREG32_SMC(ixCG_THERMAL_CTRL, tmp);
1564#endif
1565
1566 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
1567 if (pi->thermal_protection)
1568 tmp &= ~GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK;
1569 else
1570 tmp |= GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK;
1571 WREG32_SMC(ixGENERAL_PWRMGT, tmp);
1572 } else {
1573 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
1574 tmp |= GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK;
1575 WREG32_SMC(ixGENERAL_PWRMGT, tmp);
1576 }
1577}
1578
1579static void ci_enable_auto_throttle_source(struct amdgpu_device *adev,
1580 enum amdgpu_dpm_auto_throttle_src source,
1581 bool enable)
1582{
1583 struct ci_power_info *pi = ci_get_pi(adev);
1584
1585 if (enable) {
1586 if (!(pi->active_auto_throttle_sources & (1 << source))) {
1587 pi->active_auto_throttle_sources |= 1 << source;
1588 ci_set_dpm_event_sources(adev, pi->active_auto_throttle_sources);
1589 }
1590 } else {
1591 if (pi->active_auto_throttle_sources & (1 << source)) {
1592 pi->active_auto_throttle_sources &= ~(1 << source);
1593 ci_set_dpm_event_sources(adev, pi->active_auto_throttle_sources);
1594 }
1595 }
1596}
1597
1598static void ci_enable_vr_hot_gpio_interrupt(struct amdgpu_device *adev)
1599{
1600 if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REGULATOR_HOT)
1601 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_EnableVRHotGPIOInterrupt);
1602}
1603
1604static int ci_unfreeze_sclk_mclk_dpm(struct amdgpu_device *adev)
1605{
1606 struct ci_power_info *pi = ci_get_pi(adev);
1607 PPSMC_Result smc_result;
1608
1609 if (!pi->need_update_smu7_dpm_table)
1610 return 0;
1611
1612 if ((!pi->sclk_dpm_key_disabled) &&
1613 (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) {
1614 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_SCLKDPM_UnfreezeLevel);
1615 if (smc_result != PPSMC_Result_OK)
1616 return -EINVAL;
1617 }
1618
1619 if ((!pi->mclk_dpm_key_disabled) &&
1620 (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
1621 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MCLKDPM_UnfreezeLevel);
1622 if (smc_result != PPSMC_Result_OK)
1623 return -EINVAL;
1624 }
1625
1626 pi->need_update_smu7_dpm_table = 0;
1627 return 0;
1628}
1629
1630static int ci_enable_sclk_mclk_dpm(struct amdgpu_device *adev, bool enable)
1631{
1632 struct ci_power_info *pi = ci_get_pi(adev);
1633 PPSMC_Result smc_result;
1634
1635 if (enable) {
1636 if (!pi->sclk_dpm_key_disabled) {
1637 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DPM_Enable);
1638 if (smc_result != PPSMC_Result_OK)
1639 return -EINVAL;
1640 }
1641
1642 if (!pi->mclk_dpm_key_disabled) {
1643 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MCLKDPM_Enable);
1644 if (smc_result != PPSMC_Result_OK)
1645 return -EINVAL;
1646
1647 WREG32_P(mmMC_SEQ_CNTL_3, MC_SEQ_CNTL_3__CAC_EN_MASK,
1648 ~MC_SEQ_CNTL_3__CAC_EN_MASK);
1649
1650 WREG32_SMC(ixLCAC_MC0_CNTL, 0x05);
1651 WREG32_SMC(ixLCAC_MC1_CNTL, 0x05);
1652 WREG32_SMC(ixLCAC_CPL_CNTL, 0x100005);
1653
1654 udelay(10);
1655
1656 WREG32_SMC(ixLCAC_MC0_CNTL, 0x400005);
1657 WREG32_SMC(ixLCAC_MC1_CNTL, 0x400005);
1658 WREG32_SMC(ixLCAC_CPL_CNTL, 0x500005);
1659 }
1660 } else {
1661 if (!pi->sclk_dpm_key_disabled) {
1662 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DPM_Disable);
1663 if (smc_result != PPSMC_Result_OK)
1664 return -EINVAL;
1665 }
1666
1667 if (!pi->mclk_dpm_key_disabled) {
1668 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MCLKDPM_Disable);
1669 if (smc_result != PPSMC_Result_OK)
1670 return -EINVAL;
1671 }
1672 }
1673
1674 return 0;
1675}
1676
1677static int ci_start_dpm(struct amdgpu_device *adev)
1678{
1679 struct ci_power_info *pi = ci_get_pi(adev);
1680 PPSMC_Result smc_result;
1681 int ret;
1682 u32 tmp;
1683
1684 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
1685 tmp |= GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK;
1686 WREG32_SMC(ixGENERAL_PWRMGT, tmp);
1687
1688 tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
1689 tmp |= SCLK_PWRMGT_CNTL__DYNAMIC_PM_EN_MASK;
1690 WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp);
1691
1692 ci_write_smc_soft_register(adev, offsetof(SMU7_SoftRegisters, VoltageChangeTimeout), 0x1000);
1693
1694 WREG32_P(mmBIF_LNCNT_RESET, 0, ~BIF_LNCNT_RESET__RESET_LNCNT_EN_MASK);
1695
1696 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Voltage_Cntl_Enable);
1697 if (smc_result != PPSMC_Result_OK)
1698 return -EINVAL;
1699
1700 ret = ci_enable_sclk_mclk_dpm(adev, true);
1701 if (ret)
1702 return ret;
1703
1704 if (!pi->pcie_dpm_key_disabled) {
1705 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_PCIeDPM_Enable);
1706 if (smc_result != PPSMC_Result_OK)
1707 return -EINVAL;
1708 }
1709
1710 return 0;
1711}
1712
1713static int ci_freeze_sclk_mclk_dpm(struct amdgpu_device *adev)
1714{
1715 struct ci_power_info *pi = ci_get_pi(adev);
1716 PPSMC_Result smc_result;
1717
1718 if (!pi->need_update_smu7_dpm_table)
1719 return 0;
1720
1721 if ((!pi->sclk_dpm_key_disabled) &&
1722 (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) {
1723 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_SCLKDPM_FreezeLevel);
1724 if (smc_result != PPSMC_Result_OK)
1725 return -EINVAL;
1726 }
1727
1728 if ((!pi->mclk_dpm_key_disabled) &&
1729 (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
1730 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MCLKDPM_FreezeLevel);
1731 if (smc_result != PPSMC_Result_OK)
1732 return -EINVAL;
1733 }
1734
1735 return 0;
1736}
1737
1738static int ci_stop_dpm(struct amdgpu_device *adev)
1739{
1740 struct ci_power_info *pi = ci_get_pi(adev);
1741 PPSMC_Result smc_result;
1742 int ret;
1743 u32 tmp;
1744
1745 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
1746 tmp &= ~GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK;
1747 WREG32_SMC(ixGENERAL_PWRMGT, tmp);
1748
1749 tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
1750 tmp &= ~SCLK_PWRMGT_CNTL__DYNAMIC_PM_EN_MASK;
1751 WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp);
1752
1753 if (!pi->pcie_dpm_key_disabled) {
1754 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_PCIeDPM_Disable);
1755 if (smc_result != PPSMC_Result_OK)
1756 return -EINVAL;
1757 }
1758
1759 ret = ci_enable_sclk_mclk_dpm(adev, false);
1760 if (ret)
1761 return ret;
1762
1763 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Voltage_Cntl_Disable);
1764 if (smc_result != PPSMC_Result_OK)
1765 return -EINVAL;
1766
1767 return 0;
1768}
1769
1770static void ci_enable_sclk_control(struct amdgpu_device *adev, bool enable)
1771{
1772 u32 tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
1773
1774 if (enable)
1775 tmp &= ~SCLK_PWRMGT_CNTL__SCLK_PWRMGT_OFF_MASK;
1776 else
1777 tmp |= SCLK_PWRMGT_CNTL__SCLK_PWRMGT_OFF_MASK;
1778 WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp);
1779}
1780
1781#if 0
1782static int ci_notify_hw_of_power_source(struct amdgpu_device *adev,
1783 bool ac_power)
1784{
1785 struct ci_power_info *pi = ci_get_pi(adev);
1786 struct amdgpu_cac_tdp_table *cac_tdp_table =
1787 adev->pm.dpm.dyn_state.cac_tdp_table;
1788 u32 power_limit;
1789
1790 if (ac_power)
1791 power_limit = (u32)(cac_tdp_table->maximum_power_delivery_limit * 256);
1792 else
1793 power_limit = (u32)(cac_tdp_table->battery_power_limit * 256);
1794
1795 ci_set_power_limit(adev, power_limit);
1796
1797 if (pi->caps_automatic_dc_transition) {
1798 if (ac_power)
1799 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_RunningOnAC);
1800 else
1801 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Remove_DC_Clamp);
1802 }
1803
1804 return 0;
1805}
1806#endif
1807
1808static PPSMC_Result amdgpu_ci_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
1809 PPSMC_Msg msg, u32 parameter)
1810{
1811 WREG32(mmSMC_MSG_ARG_0, parameter);
1812 return amdgpu_ci_send_msg_to_smc(adev, msg);
1813}
1814
1815static PPSMC_Result amdgpu_ci_send_msg_to_smc_return_parameter(struct amdgpu_device *adev,
1816 PPSMC_Msg msg, u32 *parameter)
1817{
1818 PPSMC_Result smc_result;
1819
1820 smc_result = amdgpu_ci_send_msg_to_smc(adev, msg);
1821
1822 if ((smc_result == PPSMC_Result_OK) && parameter)
1823 *parameter = RREG32(mmSMC_MSG_ARG_0);
1824
1825 return smc_result;
1826}
1827
1828static int ci_dpm_force_state_sclk(struct amdgpu_device *adev, u32 n)
1829{
1830 struct ci_power_info *pi = ci_get_pi(adev);
1831
1832 if (!pi->sclk_dpm_key_disabled) {
1833 PPSMC_Result smc_result =
1834 amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SCLKDPM_SetEnabledMask, 1 << n);
1835 if (smc_result != PPSMC_Result_OK)
1836 return -EINVAL;
1837 }
1838
1839 return 0;
1840}
1841
1842static int ci_dpm_force_state_mclk(struct amdgpu_device *adev, u32 n)
1843{
1844 struct ci_power_info *pi = ci_get_pi(adev);
1845
1846 if (!pi->mclk_dpm_key_disabled) {
1847 PPSMC_Result smc_result =
1848 amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_MCLKDPM_SetEnabledMask, 1 << n);
1849 if (smc_result != PPSMC_Result_OK)
1850 return -EINVAL;
1851 }
1852
1853 return 0;
1854}
1855
1856static int ci_dpm_force_state_pcie(struct amdgpu_device *adev, u32 n)
1857{
1858 struct ci_power_info *pi = ci_get_pi(adev);
1859
1860 if (!pi->pcie_dpm_key_disabled) {
1861 PPSMC_Result smc_result =
1862 amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_PCIeDPM_ForceLevel, n);
1863 if (smc_result != PPSMC_Result_OK)
1864 return -EINVAL;
1865 }
1866
1867 return 0;
1868}
1869
1870static int ci_set_power_limit(struct amdgpu_device *adev, u32 n)
1871{
1872 struct ci_power_info *pi = ci_get_pi(adev);
1873
1874 if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit) {
1875 PPSMC_Result smc_result =
1876 amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_PkgPwrSetLimit, n);
1877 if (smc_result != PPSMC_Result_OK)
1878 return -EINVAL;
1879 }
1880
1881 return 0;
1882}
1883
1884static int ci_set_overdrive_target_tdp(struct amdgpu_device *adev,
1885 u32 target_tdp)
1886{
1887 PPSMC_Result smc_result =
1888 amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_OverDriveSetTargetTdp, target_tdp);
1889 if (smc_result != PPSMC_Result_OK)
1890 return -EINVAL;
1891 return 0;
1892}
1893
1894#if 0
1895static int ci_set_boot_state(struct amdgpu_device *adev)
1896{
1897 return ci_enable_sclk_mclk_dpm(adev, false);
1898}
1899#endif
1900
1901static u32 ci_get_average_sclk_freq(struct amdgpu_device *adev)
1902{
1903 u32 sclk_freq;
1904 PPSMC_Result smc_result =
1905 amdgpu_ci_send_msg_to_smc_return_parameter(adev,
1906 PPSMC_MSG_API_GetSclkFrequency,
1907 &sclk_freq);
1908 if (smc_result != PPSMC_Result_OK)
1909 sclk_freq = 0;
1910
1911 return sclk_freq;
1912}
1913
1914static u32 ci_get_average_mclk_freq(struct amdgpu_device *adev)
1915{
1916 u32 mclk_freq;
1917 PPSMC_Result smc_result =
1918 amdgpu_ci_send_msg_to_smc_return_parameter(adev,
1919 PPSMC_MSG_API_GetMclkFrequency,
1920 &mclk_freq);
1921 if (smc_result != PPSMC_Result_OK)
1922 mclk_freq = 0;
1923
1924 return mclk_freq;
1925}
1926
1927static void ci_dpm_start_smc(struct amdgpu_device *adev)
1928{
1929 int i;
1930
1931 amdgpu_ci_program_jump_on_start(adev);
1932 amdgpu_ci_start_smc_clock(adev);
1933 amdgpu_ci_start_smc(adev);
1934 for (i = 0; i < adev->usec_timeout; i++) {
1935 if (RREG32_SMC(ixFIRMWARE_FLAGS) & FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK)
1936 break;
1937 }
1938}
1939
1940static void ci_dpm_stop_smc(struct amdgpu_device *adev)
1941{
1942 amdgpu_ci_reset_smc(adev);
1943 amdgpu_ci_stop_smc_clock(adev);
1944}
1945
1946static int ci_process_firmware_header(struct amdgpu_device *adev)
1947{
1948 struct ci_power_info *pi = ci_get_pi(adev);
1949 u32 tmp;
1950 int ret;
1951
1952 ret = amdgpu_ci_read_smc_sram_dword(adev,
1953 SMU7_FIRMWARE_HEADER_LOCATION +
1954 offsetof(SMU7_Firmware_Header, DpmTable),
1955 &tmp, pi->sram_end);
1956 if (ret)
1957 return ret;
1958
1959 pi->dpm_table_start = tmp;
1960
1961 ret = amdgpu_ci_read_smc_sram_dword(adev,
1962 SMU7_FIRMWARE_HEADER_LOCATION +
1963 offsetof(SMU7_Firmware_Header, SoftRegisters),
1964 &tmp, pi->sram_end);
1965 if (ret)
1966 return ret;
1967
1968 pi->soft_regs_start = tmp;
1969
1970 ret = amdgpu_ci_read_smc_sram_dword(adev,
1971 SMU7_FIRMWARE_HEADER_LOCATION +
1972 offsetof(SMU7_Firmware_Header, mcRegisterTable),
1973 &tmp, pi->sram_end);
1974 if (ret)
1975 return ret;
1976
1977 pi->mc_reg_table_start = tmp;
1978
1979 ret = amdgpu_ci_read_smc_sram_dword(adev,
1980 SMU7_FIRMWARE_HEADER_LOCATION +
1981 offsetof(SMU7_Firmware_Header, FanTable),
1982 &tmp, pi->sram_end);
1983 if (ret)
1984 return ret;
1985
1986 pi->fan_table_start = tmp;
1987
1988 ret = amdgpu_ci_read_smc_sram_dword(adev,
1989 SMU7_FIRMWARE_HEADER_LOCATION +
1990 offsetof(SMU7_Firmware_Header, mcArbDramTimingTable),
1991 &tmp, pi->sram_end);
1992 if (ret)
1993 return ret;
1994
1995 pi->arb_table_start = tmp;
1996
1997 return 0;
1998}
1999
2000static void ci_read_clock_registers(struct amdgpu_device *adev)
2001{
2002 struct ci_power_info *pi = ci_get_pi(adev);
2003
2004 pi->clock_registers.cg_spll_func_cntl =
2005 RREG32_SMC(ixCG_SPLL_FUNC_CNTL);
2006 pi->clock_registers.cg_spll_func_cntl_2 =
2007 RREG32_SMC(ixCG_SPLL_FUNC_CNTL_2);
2008 pi->clock_registers.cg_spll_func_cntl_3 =
2009 RREG32_SMC(ixCG_SPLL_FUNC_CNTL_3);
2010 pi->clock_registers.cg_spll_func_cntl_4 =
2011 RREG32_SMC(ixCG_SPLL_FUNC_CNTL_4);
2012 pi->clock_registers.cg_spll_spread_spectrum =
2013 RREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM);
2014 pi->clock_registers.cg_spll_spread_spectrum_2 =
2015 RREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM_2);
2016 pi->clock_registers.dll_cntl = RREG32(mmDLL_CNTL);
2017 pi->clock_registers.mclk_pwrmgt_cntl = RREG32(mmMCLK_PWRMGT_CNTL);
2018 pi->clock_registers.mpll_ad_func_cntl = RREG32(mmMPLL_AD_FUNC_CNTL);
2019 pi->clock_registers.mpll_dq_func_cntl = RREG32(mmMPLL_DQ_FUNC_CNTL);
2020 pi->clock_registers.mpll_func_cntl = RREG32(mmMPLL_FUNC_CNTL);
2021 pi->clock_registers.mpll_func_cntl_1 = RREG32(mmMPLL_FUNC_CNTL_1);
2022 pi->clock_registers.mpll_func_cntl_2 = RREG32(mmMPLL_FUNC_CNTL_2);
2023 pi->clock_registers.mpll_ss1 = RREG32(mmMPLL_SS1);
2024 pi->clock_registers.mpll_ss2 = RREG32(mmMPLL_SS2);
2025}
2026
2027static void ci_init_sclk_t(struct amdgpu_device *adev)
2028{
2029 struct ci_power_info *pi = ci_get_pi(adev);
2030
2031 pi->low_sclk_interrupt_t = 0;
2032}
2033
2034static void ci_enable_thermal_protection(struct amdgpu_device *adev,
2035 bool enable)
2036{
2037 u32 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
2038
2039 if (enable)
2040 tmp &= ~GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK;
2041 else
2042 tmp |= GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK;
2043 WREG32_SMC(ixGENERAL_PWRMGT, tmp);
2044}
2045
2046static void ci_enable_acpi_power_management(struct amdgpu_device *adev)
2047{
2048 u32 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
2049
2050 tmp |= GENERAL_PWRMGT__STATIC_PM_EN_MASK;
2051
2052 WREG32_SMC(ixGENERAL_PWRMGT, tmp);
2053}
2054
2055#if 0
2056static int ci_enter_ulp_state(struct amdgpu_device *adev)
2057{
2058
2059 WREG32(mmSMC_MESSAGE_0, PPSMC_MSG_SwitchToMinimumPower);
2060
2061 udelay(25000);
2062
2063 return 0;
2064}
2065
2066static int ci_exit_ulp_state(struct amdgpu_device *adev)
2067{
2068 int i;
2069
2070 WREG32(mmSMC_MESSAGE_0, PPSMC_MSG_ResumeFromMinimumPower);
2071
2072 udelay(7000);
2073
2074 for (i = 0; i < adev->usec_timeout; i++) {
2075 if (RREG32(mmSMC_RESP_0) == 1)
2076 break;
2077 udelay(1000);
2078 }
2079
2080 return 0;
2081}
2082#endif
2083
2084static int ci_notify_smc_display_change(struct amdgpu_device *adev,
2085 bool has_display)
2086{
2087 PPSMC_Msg msg = has_display ? PPSMC_MSG_HasDisplay : PPSMC_MSG_NoDisplay;
2088
2089 return (amdgpu_ci_send_msg_to_smc(adev, msg) == PPSMC_Result_OK) ? 0 : -EINVAL;
2090}
2091
2092static int ci_enable_ds_master_switch(struct amdgpu_device *adev,
2093 bool enable)
2094{
2095 struct ci_power_info *pi = ci_get_pi(adev);
2096
2097 if (enable) {
2098 if (pi->caps_sclk_ds) {
2099 if (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MASTER_DeepSleep_ON) != PPSMC_Result_OK)
2100 return -EINVAL;
2101 } else {
2102 if (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK)
2103 return -EINVAL;
2104 }
2105 } else {
2106 if (pi->caps_sclk_ds) {
2107 if (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK)
2108 return -EINVAL;
2109 }
2110 }
2111
2112 return 0;
2113}
2114
2115static void ci_program_display_gap(struct amdgpu_device *adev)
2116{
2117 u32 tmp = RREG32_SMC(ixCG_DISPLAY_GAP_CNTL);
2118 u32 pre_vbi_time_in_us;
2119 u32 frame_time_in_us;
2120 u32 ref_clock = adev->clock.spll.reference_freq;
2121 u32 refresh_rate = amdgpu_dpm_get_vrefresh(adev);
2122 u32 vblank_time = amdgpu_dpm_get_vblank_time(adev);
2123
2124 tmp &= ~CG_DISPLAY_GAP_CNTL__DISP_GAP_MASK;
2125 if (adev->pm.dpm.new_active_crtc_count > 0)
2126 tmp |= (AMDGPU_PM_DISPLAY_GAP_VBLANK_OR_WM << CG_DISPLAY_GAP_CNTL__DISP_GAP__SHIFT);
2127 else
2128 tmp |= (AMDGPU_PM_DISPLAY_GAP_IGNORE << CG_DISPLAY_GAP_CNTL__DISP_GAP__SHIFT);
2129 WREG32_SMC(ixCG_DISPLAY_GAP_CNTL, tmp);
2130
2131 if (refresh_rate == 0)
2132 refresh_rate = 60;
2133 if (vblank_time == 0xffffffff)
2134 vblank_time = 500;
2135 frame_time_in_us = 1000000 / refresh_rate;
2136 pre_vbi_time_in_us =
2137 frame_time_in_us - 200 - vblank_time;
2138 tmp = pre_vbi_time_in_us * (ref_clock / 100);
2139
2140 WREG32_SMC(ixCG_DISPLAY_GAP_CNTL2, tmp);
2141 ci_write_smc_soft_register(adev, offsetof(SMU7_SoftRegisters, PreVBlankGap), 0x64);
2142 ci_write_smc_soft_register(adev, offsetof(SMU7_SoftRegisters, VBlankTimeout), (frame_time_in_us - pre_vbi_time_in_us));
2143
2144
2145 ci_notify_smc_display_change(adev, (adev->pm.dpm.new_active_crtc_count == 1));
2146
2147}
2148
2149static void ci_enable_spread_spectrum(struct amdgpu_device *adev, bool enable)
2150{
2151 struct ci_power_info *pi = ci_get_pi(adev);
2152 u32 tmp;
2153
2154 if (enable) {
2155 if (pi->caps_sclk_ss_support) {
2156 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
2157 tmp |= GENERAL_PWRMGT__DYN_SPREAD_SPECTRUM_EN_MASK;
2158 WREG32_SMC(ixGENERAL_PWRMGT, tmp);
2159 }
2160 } else {
2161 tmp = RREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM);
2162 tmp &= ~CG_SPLL_SPREAD_SPECTRUM__SSEN_MASK;
2163 WREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM, tmp);
2164
2165 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
2166 tmp &= ~GENERAL_PWRMGT__DYN_SPREAD_SPECTRUM_EN_MASK;
2167 WREG32_SMC(ixGENERAL_PWRMGT, tmp);
2168 }
2169}
2170
2171static void ci_program_sstp(struct amdgpu_device *adev)
2172{
2173 WREG32_SMC(ixCG_STATIC_SCREEN_PARAMETER,
2174 ((CISLANDS_SSTU_DFLT << CG_STATIC_SCREEN_PARAMETER__STATIC_SCREEN_THRESHOLD_UNIT__SHIFT) |
2175 (CISLANDS_SST_DFLT << CG_STATIC_SCREEN_PARAMETER__STATIC_SCREEN_THRESHOLD__SHIFT)));
2176}
2177
2178static void ci_enable_display_gap(struct amdgpu_device *adev)
2179{
2180 u32 tmp = RREG32_SMC(ixCG_DISPLAY_GAP_CNTL);
2181
2182 tmp &= ~(CG_DISPLAY_GAP_CNTL__DISP_GAP_MASK |
2183 CG_DISPLAY_GAP_CNTL__DISP_GAP_MCHG_MASK);
2184 tmp |= ((AMDGPU_PM_DISPLAY_GAP_IGNORE << CG_DISPLAY_GAP_CNTL__DISP_GAP__SHIFT) |
2185 (AMDGPU_PM_DISPLAY_GAP_VBLANK << CG_DISPLAY_GAP_CNTL__DISP_GAP_MCHG__SHIFT));
2186
2187 WREG32_SMC(ixCG_DISPLAY_GAP_CNTL, tmp);
2188}
2189
2190static void ci_program_vc(struct amdgpu_device *adev)
2191{
2192 u32 tmp;
2193
2194 tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
2195 tmp &= ~(SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK | SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK);
2196 WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp);
2197
2198 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, CISLANDS_VRC_DFLT0);
2199 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_1, CISLANDS_VRC_DFLT1);
2200 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_2, CISLANDS_VRC_DFLT2);
2201 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_3, CISLANDS_VRC_DFLT3);
2202 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_4, CISLANDS_VRC_DFLT4);
2203 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_5, CISLANDS_VRC_DFLT5);
2204 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_6, CISLANDS_VRC_DFLT6);
2205 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_7, CISLANDS_VRC_DFLT7);
2206}
2207
2208static void ci_clear_vc(struct amdgpu_device *adev)
2209{
2210 u32 tmp;
2211
2212 tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
2213 tmp |= (SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK | SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK);
2214 WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp);
2215
2216 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, 0);
2217 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_1, 0);
2218 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_2, 0);
2219 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_3, 0);
2220 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_4, 0);
2221 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_5, 0);
2222 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_6, 0);
2223 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_7, 0);
2224}
2225
2226static int ci_upload_firmware(struct amdgpu_device *adev)
2227{
Alex Deuchera2e73f52015-04-20 17:09:27 -04002228 int i, ret;
2229
Rex Zhu3f767e32016-10-26 13:44:12 +08002230 if (amdgpu_ci_is_smc_running(adev)) {
2231 DRM_INFO("smc is running, no need to load smc firmware\n");
2232 return 0;
2233 }
2234
Alex Deuchera2e73f52015-04-20 17:09:27 -04002235 for (i = 0; i < adev->usec_timeout; i++) {
2236 if (RREG32_SMC(ixRCU_UC_EVENTS) & RCU_UC_EVENTS__boot_seq_done_MASK)
2237 break;
2238 }
2239 WREG32_SMC(ixSMC_SYSCON_MISC_CNTL, 1);
2240
2241 amdgpu_ci_stop_smc_clock(adev);
2242 amdgpu_ci_reset_smc(adev);
2243
Rex Zhubac601e2017-02-03 17:33:11 +08002244 ret = amdgpu_ci_load_smc_ucode(adev, SMC_RAM_END);
Alex Deuchera2e73f52015-04-20 17:09:27 -04002245
2246 return ret;
2247
2248}
2249
2250static int ci_get_svi2_voltage_table(struct amdgpu_device *adev,
2251 struct amdgpu_clock_voltage_dependency_table *voltage_dependency_table,
2252 struct atom_voltage_table *voltage_table)
2253{
2254 u32 i;
2255
2256 if (voltage_dependency_table == NULL)
2257 return -EINVAL;
2258
2259 voltage_table->mask_low = 0;
2260 voltage_table->phase_delay = 0;
2261
2262 voltage_table->count = voltage_dependency_table->count;
2263 for (i = 0; i < voltage_table->count; i++) {
2264 voltage_table->entries[i].value = voltage_dependency_table->entries[i].v;
2265 voltage_table->entries[i].smio_low = 0;
2266 }
2267
2268 return 0;
2269}
2270
2271static int ci_construct_voltage_tables(struct amdgpu_device *adev)
2272{
2273 struct ci_power_info *pi = ci_get_pi(adev);
2274 int ret;
2275
2276 if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
2277 ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_VDDC,
2278 VOLTAGE_OBJ_GPIO_LUT,
2279 &pi->vddc_voltage_table);
2280 if (ret)
2281 return ret;
2282 } else if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
2283 ret = ci_get_svi2_voltage_table(adev,
2284 &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
2285 &pi->vddc_voltage_table);
2286 if (ret)
2287 return ret;
2288 }
2289
2290 if (pi->vddc_voltage_table.count > SMU7_MAX_LEVELS_VDDC)
2291 ci_trim_voltage_table_to_fit_state_table(adev, SMU7_MAX_LEVELS_VDDC,
2292 &pi->vddc_voltage_table);
2293
2294 if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
2295 ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_VDDCI,
2296 VOLTAGE_OBJ_GPIO_LUT,
2297 &pi->vddci_voltage_table);
2298 if (ret)
2299 return ret;
2300 } else if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
2301 ret = ci_get_svi2_voltage_table(adev,
2302 &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
2303 &pi->vddci_voltage_table);
2304 if (ret)
2305 return ret;
2306 }
2307
2308 if (pi->vddci_voltage_table.count > SMU7_MAX_LEVELS_VDDCI)
2309 ci_trim_voltage_table_to_fit_state_table(adev, SMU7_MAX_LEVELS_VDDCI,
2310 &pi->vddci_voltage_table);
2311
2312 if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
2313 ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_MVDDC,
2314 VOLTAGE_OBJ_GPIO_LUT,
2315 &pi->mvdd_voltage_table);
2316 if (ret)
2317 return ret;
2318 } else if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
2319 ret = ci_get_svi2_voltage_table(adev,
2320 &adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
2321 &pi->mvdd_voltage_table);
2322 if (ret)
2323 return ret;
2324 }
2325
2326 if (pi->mvdd_voltage_table.count > SMU7_MAX_LEVELS_MVDD)
2327 ci_trim_voltage_table_to_fit_state_table(adev, SMU7_MAX_LEVELS_MVDD,
2328 &pi->mvdd_voltage_table);
2329
2330 return 0;
2331}
2332
2333static void ci_populate_smc_voltage_table(struct amdgpu_device *adev,
2334 struct atom_voltage_table_entry *voltage_table,
2335 SMU7_Discrete_VoltageLevel *smc_voltage_table)
2336{
2337 int ret;
2338
2339 ret = ci_get_std_voltage_value_sidd(adev, voltage_table,
2340 &smc_voltage_table->StdVoltageHiSidd,
2341 &smc_voltage_table->StdVoltageLoSidd);
2342
2343 if (ret) {
2344 smc_voltage_table->StdVoltageHiSidd = voltage_table->value * VOLTAGE_SCALE;
2345 smc_voltage_table->StdVoltageLoSidd = voltage_table->value * VOLTAGE_SCALE;
2346 }
2347
2348 smc_voltage_table->Voltage = cpu_to_be16(voltage_table->value * VOLTAGE_SCALE);
2349 smc_voltage_table->StdVoltageHiSidd =
2350 cpu_to_be16(smc_voltage_table->StdVoltageHiSidd);
2351 smc_voltage_table->StdVoltageLoSidd =
2352 cpu_to_be16(smc_voltage_table->StdVoltageLoSidd);
2353}
2354
2355static int ci_populate_smc_vddc_table(struct amdgpu_device *adev,
2356 SMU7_Discrete_DpmTable *table)
2357{
2358 struct ci_power_info *pi = ci_get_pi(adev);
2359 unsigned int count;
2360
2361 table->VddcLevelCount = pi->vddc_voltage_table.count;
2362 for (count = 0; count < table->VddcLevelCount; count++) {
2363 ci_populate_smc_voltage_table(adev,
2364 &pi->vddc_voltage_table.entries[count],
2365 &table->VddcLevel[count]);
2366
2367 if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
2368 table->VddcLevel[count].Smio |=
2369 pi->vddc_voltage_table.entries[count].smio_low;
2370 else
2371 table->VddcLevel[count].Smio = 0;
2372 }
2373 table->VddcLevelCount = cpu_to_be32(table->VddcLevelCount);
2374
2375 return 0;
2376}
2377
2378static int ci_populate_smc_vddci_table(struct amdgpu_device *adev,
2379 SMU7_Discrete_DpmTable *table)
2380{
2381 unsigned int count;
2382 struct ci_power_info *pi = ci_get_pi(adev);
2383
2384 table->VddciLevelCount = pi->vddci_voltage_table.count;
2385 for (count = 0; count < table->VddciLevelCount; count++) {
2386 ci_populate_smc_voltage_table(adev,
2387 &pi->vddci_voltage_table.entries[count],
2388 &table->VddciLevel[count]);
2389
2390 if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
2391 table->VddciLevel[count].Smio |=
2392 pi->vddci_voltage_table.entries[count].smio_low;
2393 else
2394 table->VddciLevel[count].Smio = 0;
2395 }
2396 table->VddciLevelCount = cpu_to_be32(table->VddciLevelCount);
2397
2398 return 0;
2399}
2400
2401static int ci_populate_smc_mvdd_table(struct amdgpu_device *adev,
2402 SMU7_Discrete_DpmTable *table)
2403{
2404 struct ci_power_info *pi = ci_get_pi(adev);
2405 unsigned int count;
2406
2407 table->MvddLevelCount = pi->mvdd_voltage_table.count;
2408 for (count = 0; count < table->MvddLevelCount; count++) {
2409 ci_populate_smc_voltage_table(adev,
2410 &pi->mvdd_voltage_table.entries[count],
2411 &table->MvddLevel[count]);
2412
2413 if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
2414 table->MvddLevel[count].Smio |=
2415 pi->mvdd_voltage_table.entries[count].smio_low;
2416 else
2417 table->MvddLevel[count].Smio = 0;
2418 }
2419 table->MvddLevelCount = cpu_to_be32(table->MvddLevelCount);
2420
2421 return 0;
2422}
2423
2424static int ci_populate_smc_voltage_tables(struct amdgpu_device *adev,
2425 SMU7_Discrete_DpmTable *table)
2426{
2427 int ret;
2428
2429 ret = ci_populate_smc_vddc_table(adev, table);
2430 if (ret)
2431 return ret;
2432
2433 ret = ci_populate_smc_vddci_table(adev, table);
2434 if (ret)
2435 return ret;
2436
2437 ret = ci_populate_smc_mvdd_table(adev, table);
2438 if (ret)
2439 return ret;
2440
2441 return 0;
2442}
2443
2444static int ci_populate_mvdd_value(struct amdgpu_device *adev, u32 mclk,
2445 SMU7_Discrete_VoltageLevel *voltage)
2446{
2447 struct ci_power_info *pi = ci_get_pi(adev);
2448 u32 i = 0;
2449
2450 if (pi->mvdd_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
2451 for (i = 0; i < adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count; i++) {
2452 if (mclk <= adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries[i].clk) {
2453 voltage->Voltage = pi->mvdd_voltage_table.entries[i].value;
2454 break;
2455 }
2456 }
2457
2458 if (i >= adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count)
2459 return -EINVAL;
2460 }
2461
2462 return -EINVAL;
2463}
2464
2465static int ci_get_std_voltage_value_sidd(struct amdgpu_device *adev,
2466 struct atom_voltage_table_entry *voltage_table,
2467 u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd)
2468{
2469 u16 v_index, idx;
2470 bool voltage_found = false;
2471 *std_voltage_hi_sidd = voltage_table->value * VOLTAGE_SCALE;
2472 *std_voltage_lo_sidd = voltage_table->value * VOLTAGE_SCALE;
2473
2474 if (adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries == NULL)
2475 return -EINVAL;
2476
2477 if (adev->pm.dpm.dyn_state.cac_leakage_table.entries) {
2478 for (v_index = 0; (u32)v_index < adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
2479 if (voltage_table->value ==
2480 adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
2481 voltage_found = true;
2482 if ((u32)v_index < adev->pm.dpm.dyn_state.cac_leakage_table.count)
2483 idx = v_index;
2484 else
2485 idx = adev->pm.dpm.dyn_state.cac_leakage_table.count - 1;
2486 *std_voltage_lo_sidd =
2487 adev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE;
2488 *std_voltage_hi_sidd =
2489 adev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE;
2490 break;
2491 }
2492 }
2493
2494 if (!voltage_found) {
2495 for (v_index = 0; (u32)v_index < adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
2496 if (voltage_table->value <=
2497 adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
2498 voltage_found = true;
2499 if ((u32)v_index < adev->pm.dpm.dyn_state.cac_leakage_table.count)
2500 idx = v_index;
2501 else
2502 idx = adev->pm.dpm.dyn_state.cac_leakage_table.count - 1;
2503 *std_voltage_lo_sidd =
2504 adev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE;
2505 *std_voltage_hi_sidd =
2506 adev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE;
2507 break;
2508 }
2509 }
2510 }
2511 }
2512
2513 return 0;
2514}
2515
2516static void ci_populate_phase_value_based_on_sclk(struct amdgpu_device *adev,
2517 const struct amdgpu_phase_shedding_limits_table *limits,
2518 u32 sclk,
2519 u32 *phase_shedding)
2520{
2521 unsigned int i;
2522
2523 *phase_shedding = 1;
2524
2525 for (i = 0; i < limits->count; i++) {
2526 if (sclk < limits->entries[i].sclk) {
2527 *phase_shedding = i;
2528 break;
2529 }
2530 }
2531}
2532
2533static void ci_populate_phase_value_based_on_mclk(struct amdgpu_device *adev,
2534 const struct amdgpu_phase_shedding_limits_table *limits,
2535 u32 mclk,
2536 u32 *phase_shedding)
2537{
2538 unsigned int i;
2539
2540 *phase_shedding = 1;
2541
2542 for (i = 0; i < limits->count; i++) {
2543 if (mclk < limits->entries[i].mclk) {
2544 *phase_shedding = i;
2545 break;
2546 }
2547 }
2548}
2549
2550static int ci_init_arb_table_index(struct amdgpu_device *adev)
2551{
2552 struct ci_power_info *pi = ci_get_pi(adev);
2553 u32 tmp;
2554 int ret;
2555
2556 ret = amdgpu_ci_read_smc_sram_dword(adev, pi->arb_table_start,
2557 &tmp, pi->sram_end);
2558 if (ret)
2559 return ret;
2560
2561 tmp &= 0x00FFFFFF;
2562 tmp |= MC_CG_ARB_FREQ_F1 << 24;
2563
2564 return amdgpu_ci_write_smc_sram_dword(adev, pi->arb_table_start,
2565 tmp, pi->sram_end);
2566}
2567
2568static int ci_get_dependency_volt_by_clk(struct amdgpu_device *adev,
2569 struct amdgpu_clock_voltage_dependency_table *allowed_clock_voltage_table,
2570 u32 clock, u32 *voltage)
2571{
2572 u32 i = 0;
2573
2574 if (allowed_clock_voltage_table->count == 0)
2575 return -EINVAL;
2576
2577 for (i = 0; i < allowed_clock_voltage_table->count; i++) {
2578 if (allowed_clock_voltage_table->entries[i].clk >= clock) {
2579 *voltage = allowed_clock_voltage_table->entries[i].v;
2580 return 0;
2581 }
2582 }
2583
2584 *voltage = allowed_clock_voltage_table->entries[i-1].v;
2585
2586 return 0;
2587}
2588
Nils Wallménius438498a2016-05-05 09:07:48 +02002589static u8 ci_get_sleep_divider_id_from_clock(u32 sclk, u32 min_sclk_in_sr)
Alex Deuchera2e73f52015-04-20 17:09:27 -04002590{
2591 u32 i;
2592 u32 tmp;
Nils Wallménius9887e422016-05-05 09:07:46 +02002593 u32 min = max(min_sclk_in_sr, (u32)CISLAND_MINIMUM_ENGINE_CLOCK);
Alex Deuchera2e73f52015-04-20 17:09:27 -04002594
2595 if (sclk < min)
2596 return 0;
2597
2598 for (i = CISLAND_MAX_DEEPSLEEP_DIVIDER_ID; ; i--) {
Nils Wallménius354ef922016-05-05 09:07:47 +02002599 tmp = sclk >> i;
Alex Deuchera2e73f52015-04-20 17:09:27 -04002600 if (tmp >= min || i == 0)
2601 break;
2602 }
2603
2604 return (u8)i;
2605}
2606
2607static int ci_initial_switch_from_arb_f0_to_f1(struct amdgpu_device *adev)
2608{
2609 return ci_copy_and_switch_arb_sets(adev, MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
2610}
2611
2612static int ci_reset_to_default(struct amdgpu_device *adev)
2613{
2614 return (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_ResetToDefaults) == PPSMC_Result_OK) ?
2615 0 : -EINVAL;
2616}
2617
2618static int ci_force_switch_to_arb_f0(struct amdgpu_device *adev)
2619{
2620 u32 tmp;
2621
2622 tmp = (RREG32_SMC(ixSMC_SCRATCH9) & 0x0000ff00) >> 8;
2623
2624 if (tmp == MC_CG_ARB_FREQ_F0)
2625 return 0;
2626
2627 return ci_copy_and_switch_arb_sets(adev, tmp, MC_CG_ARB_FREQ_F0);
2628}
2629
2630static void ci_register_patching_mc_arb(struct amdgpu_device *adev,
2631 const u32 engine_clock,
2632 const u32 memory_clock,
2633 u32 *dram_timimg2)
2634{
2635 bool patch;
2636 u32 tmp, tmp2;
2637
2638 tmp = RREG32(mmMC_SEQ_MISC0);
2639 patch = ((tmp & 0x0000f00) == 0x300) ? true : false;
2640
2641 if (patch &&
2642 ((adev->pdev->device == 0x67B0) ||
2643 (adev->pdev->device == 0x67B1))) {
2644 if ((memory_clock > 100000) && (memory_clock <= 125000)) {
2645 tmp2 = (((0x31 * engine_clock) / 125000) - 1) & 0xff;
2646 *dram_timimg2 &= ~0x00ff0000;
2647 *dram_timimg2 |= tmp2 << 16;
2648 } else if ((memory_clock > 125000) && (memory_clock <= 137500)) {
2649 tmp2 = (((0x36 * engine_clock) / 137500) - 1) & 0xff;
2650 *dram_timimg2 &= ~0x00ff0000;
2651 *dram_timimg2 |= tmp2 << 16;
2652 }
2653 }
2654}
2655
2656static int ci_populate_memory_timing_parameters(struct amdgpu_device *adev,
2657 u32 sclk,
2658 u32 mclk,
2659 SMU7_Discrete_MCArbDramTimingTableEntry *arb_regs)
2660{
2661 u32 dram_timing;
2662 u32 dram_timing2;
2663 u32 burst_time;
2664
2665 amdgpu_atombios_set_engine_dram_timings(adev, sclk, mclk);
2666
2667 dram_timing = RREG32(mmMC_ARB_DRAM_TIMING);
2668 dram_timing2 = RREG32(mmMC_ARB_DRAM_TIMING2);
2669 burst_time = RREG32(mmMC_ARB_BURST_TIME) & MC_ARB_BURST_TIME__STATE0_MASK;
2670
2671 ci_register_patching_mc_arb(adev, sclk, mclk, &dram_timing2);
2672
2673 arb_regs->McArbDramTiming = cpu_to_be32(dram_timing);
2674 arb_regs->McArbDramTiming2 = cpu_to_be32(dram_timing2);
2675 arb_regs->McArbBurstTime = (u8)burst_time;
2676
2677 return 0;
2678}
2679
2680static int ci_do_program_memory_timing_parameters(struct amdgpu_device *adev)
2681{
2682 struct ci_power_info *pi = ci_get_pi(adev);
2683 SMU7_Discrete_MCArbDramTimingTable arb_regs;
2684 u32 i, j;
2685 int ret = 0;
2686
2687 memset(&arb_regs, 0, sizeof(SMU7_Discrete_MCArbDramTimingTable));
2688
2689 for (i = 0; i < pi->dpm_table.sclk_table.count; i++) {
2690 for (j = 0; j < pi->dpm_table.mclk_table.count; j++) {
2691 ret = ci_populate_memory_timing_parameters(adev,
2692 pi->dpm_table.sclk_table.dpm_levels[i].value,
2693 pi->dpm_table.mclk_table.dpm_levels[j].value,
2694 &arb_regs.entries[i][j]);
2695 if (ret)
2696 break;
2697 }
2698 }
2699
2700 if (ret == 0)
2701 ret = amdgpu_ci_copy_bytes_to_smc(adev,
2702 pi->arb_table_start,
2703 (u8 *)&arb_regs,
2704 sizeof(SMU7_Discrete_MCArbDramTimingTable),
2705 pi->sram_end);
2706
2707 return ret;
2708}
2709
2710static int ci_program_memory_timing_parameters(struct amdgpu_device *adev)
2711{
2712 struct ci_power_info *pi = ci_get_pi(adev);
2713
2714 if (pi->need_update_smu7_dpm_table == 0)
2715 return 0;
2716
2717 return ci_do_program_memory_timing_parameters(adev);
2718}
2719
2720static void ci_populate_smc_initial_state(struct amdgpu_device *adev,
2721 struct amdgpu_ps *amdgpu_boot_state)
2722{
2723 struct ci_ps *boot_state = ci_get_ps(amdgpu_boot_state);
2724 struct ci_power_info *pi = ci_get_pi(adev);
2725 u32 level = 0;
2726
2727 for (level = 0; level < adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; level++) {
2728 if (adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[level].clk >=
2729 boot_state->performance_levels[0].sclk) {
2730 pi->smc_state_table.GraphicsBootLevel = level;
2731 break;
2732 }
2733 }
2734
2735 for (level = 0; level < adev->pm.dpm.dyn_state.vddc_dependency_on_mclk.count; level++) {
2736 if (adev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries[level].clk >=
2737 boot_state->performance_levels[0].mclk) {
2738 pi->smc_state_table.MemoryBootLevel = level;
2739 break;
2740 }
2741 }
2742}
2743
2744static u32 ci_get_dpm_level_enable_mask_value(struct ci_single_dpm_table *dpm_table)
2745{
2746 u32 i;
2747 u32 mask_value = 0;
2748
2749 for (i = dpm_table->count; i > 0; i--) {
2750 mask_value = mask_value << 1;
2751 if (dpm_table->dpm_levels[i-1].enabled)
2752 mask_value |= 0x1;
2753 else
2754 mask_value &= 0xFFFFFFFE;
2755 }
2756
2757 return mask_value;
2758}
2759
2760static void ci_populate_smc_link_level(struct amdgpu_device *adev,
2761 SMU7_Discrete_DpmTable *table)
2762{
2763 struct ci_power_info *pi = ci_get_pi(adev);
2764 struct ci_dpm_table *dpm_table = &pi->dpm_table;
2765 u32 i;
2766
2767 for (i = 0; i < dpm_table->pcie_speed_table.count; i++) {
2768 table->LinkLevel[i].PcieGenSpeed =
2769 (u8)dpm_table->pcie_speed_table.dpm_levels[i].value;
2770 table->LinkLevel[i].PcieLaneCount =
2771 amdgpu_encode_pci_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1);
2772 table->LinkLevel[i].EnabledForActivity = 1;
2773 table->LinkLevel[i].DownT = cpu_to_be32(5);
2774 table->LinkLevel[i].UpT = cpu_to_be32(30);
2775 }
2776
2777 pi->smc_state_table.LinkLevelCount = (u8)dpm_table->pcie_speed_table.count;
2778 pi->dpm_level_enable_mask.pcie_dpm_enable_mask =
2779 ci_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
2780}
2781
2782static int ci_populate_smc_uvd_level(struct amdgpu_device *adev,
2783 SMU7_Discrete_DpmTable *table)
2784{
2785 u32 count;
2786 struct atom_clock_dividers dividers;
2787 int ret = -EINVAL;
2788
2789 table->UvdLevelCount =
2790 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count;
2791
2792 for (count = 0; count < table->UvdLevelCount; count++) {
2793 table->UvdLevel[count].VclkFrequency =
2794 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].vclk;
2795 table->UvdLevel[count].DclkFrequency =
2796 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].dclk;
2797 table->UvdLevel[count].MinVddc =
2798 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2799 table->UvdLevel[count].MinVddcPhases = 1;
2800
2801 ret = amdgpu_atombios_get_clock_dividers(adev,
2802 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2803 table->UvdLevel[count].VclkFrequency, false, &dividers);
2804 if (ret)
2805 return ret;
2806
2807 table->UvdLevel[count].VclkDivider = (u8)dividers.post_divider;
2808
2809 ret = amdgpu_atombios_get_clock_dividers(adev,
2810 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2811 table->UvdLevel[count].DclkFrequency, false, &dividers);
2812 if (ret)
2813 return ret;
2814
2815 table->UvdLevel[count].DclkDivider = (u8)dividers.post_divider;
2816
2817 table->UvdLevel[count].VclkFrequency = cpu_to_be32(table->UvdLevel[count].VclkFrequency);
2818 table->UvdLevel[count].DclkFrequency = cpu_to_be32(table->UvdLevel[count].DclkFrequency);
2819 table->UvdLevel[count].MinVddc = cpu_to_be16(table->UvdLevel[count].MinVddc);
2820 }
2821
2822 return ret;
2823}
2824
2825static int ci_populate_smc_vce_level(struct amdgpu_device *adev,
2826 SMU7_Discrete_DpmTable *table)
2827{
2828 u32 count;
2829 struct atom_clock_dividers dividers;
2830 int ret = -EINVAL;
2831
2832 table->VceLevelCount =
2833 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count;
2834
2835 for (count = 0; count < table->VceLevelCount; count++) {
2836 table->VceLevel[count].Frequency =
2837 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].evclk;
2838 table->VceLevel[count].MinVoltage =
2839 (u16)adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2840 table->VceLevel[count].MinPhases = 1;
2841
2842 ret = amdgpu_atombios_get_clock_dividers(adev,
2843 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2844 table->VceLevel[count].Frequency, false, &dividers);
2845 if (ret)
2846 return ret;
2847
2848 table->VceLevel[count].Divider = (u8)dividers.post_divider;
2849
2850 table->VceLevel[count].Frequency = cpu_to_be32(table->VceLevel[count].Frequency);
2851 table->VceLevel[count].MinVoltage = cpu_to_be16(table->VceLevel[count].MinVoltage);
2852 }
2853
2854 return ret;
2855
2856}
2857
2858static int ci_populate_smc_acp_level(struct amdgpu_device *adev,
2859 SMU7_Discrete_DpmTable *table)
2860{
2861 u32 count;
2862 struct atom_clock_dividers dividers;
2863 int ret = -EINVAL;
2864
2865 table->AcpLevelCount = (u8)
2866 (adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count);
2867
2868 for (count = 0; count < table->AcpLevelCount; count++) {
2869 table->AcpLevel[count].Frequency =
2870 adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].clk;
2871 table->AcpLevel[count].MinVoltage =
2872 adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].v;
2873 table->AcpLevel[count].MinPhases = 1;
2874
2875 ret = amdgpu_atombios_get_clock_dividers(adev,
2876 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2877 table->AcpLevel[count].Frequency, false, &dividers);
2878 if (ret)
2879 return ret;
2880
2881 table->AcpLevel[count].Divider = (u8)dividers.post_divider;
2882
2883 table->AcpLevel[count].Frequency = cpu_to_be32(table->AcpLevel[count].Frequency);
2884 table->AcpLevel[count].MinVoltage = cpu_to_be16(table->AcpLevel[count].MinVoltage);
2885 }
2886
2887 return ret;
2888}
2889
2890static int ci_populate_smc_samu_level(struct amdgpu_device *adev,
2891 SMU7_Discrete_DpmTable *table)
2892{
2893 u32 count;
2894 struct atom_clock_dividers dividers;
2895 int ret = -EINVAL;
2896
2897 table->SamuLevelCount =
2898 adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count;
2899
2900 for (count = 0; count < table->SamuLevelCount; count++) {
2901 table->SamuLevel[count].Frequency =
2902 adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].clk;
2903 table->SamuLevel[count].MinVoltage =
2904 adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2905 table->SamuLevel[count].MinPhases = 1;
2906
2907 ret = amdgpu_atombios_get_clock_dividers(adev,
2908 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2909 table->SamuLevel[count].Frequency, false, &dividers);
2910 if (ret)
2911 return ret;
2912
2913 table->SamuLevel[count].Divider = (u8)dividers.post_divider;
2914
2915 table->SamuLevel[count].Frequency = cpu_to_be32(table->SamuLevel[count].Frequency);
2916 table->SamuLevel[count].MinVoltage = cpu_to_be16(table->SamuLevel[count].MinVoltage);
2917 }
2918
2919 return ret;
2920}
2921
2922static int ci_calculate_mclk_params(struct amdgpu_device *adev,
2923 u32 memory_clock,
2924 SMU7_Discrete_MemoryLevel *mclk,
2925 bool strobe_mode,
2926 bool dll_state_on)
2927{
2928 struct ci_power_info *pi = ci_get_pi(adev);
2929 u32 dll_cntl = pi->clock_registers.dll_cntl;
2930 u32 mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl;
2931 u32 mpll_ad_func_cntl = pi->clock_registers.mpll_ad_func_cntl;
2932 u32 mpll_dq_func_cntl = pi->clock_registers.mpll_dq_func_cntl;
2933 u32 mpll_func_cntl = pi->clock_registers.mpll_func_cntl;
2934 u32 mpll_func_cntl_1 = pi->clock_registers.mpll_func_cntl_1;
2935 u32 mpll_func_cntl_2 = pi->clock_registers.mpll_func_cntl_2;
2936 u32 mpll_ss1 = pi->clock_registers.mpll_ss1;
2937 u32 mpll_ss2 = pi->clock_registers.mpll_ss2;
2938 struct atom_mpll_param mpll_param;
2939 int ret;
2940
2941 ret = amdgpu_atombios_get_memory_pll_dividers(adev, memory_clock, strobe_mode, &mpll_param);
2942 if (ret)
2943 return ret;
2944
2945 mpll_func_cntl &= ~MPLL_FUNC_CNTL__BWCTRL_MASK;
2946 mpll_func_cntl |= (mpll_param.bwcntl << MPLL_FUNC_CNTL__BWCTRL__SHIFT);
2947
2948 mpll_func_cntl_1 &= ~(MPLL_FUNC_CNTL_1__CLKF_MASK | MPLL_FUNC_CNTL_1__CLKFRAC_MASK |
2949 MPLL_FUNC_CNTL_1__VCO_MODE_MASK);
2950 mpll_func_cntl_1 |= (mpll_param.clkf) << MPLL_FUNC_CNTL_1__CLKF__SHIFT |
2951 (mpll_param.clkfrac << MPLL_FUNC_CNTL_1__CLKFRAC__SHIFT) |
2952 (mpll_param.vco_mode << MPLL_FUNC_CNTL_1__VCO_MODE__SHIFT);
2953
2954 mpll_ad_func_cntl &= ~MPLL_AD_FUNC_CNTL__YCLK_POST_DIV_MASK;
2955 mpll_ad_func_cntl |= (mpll_param.post_div << MPLL_AD_FUNC_CNTL__YCLK_POST_DIV__SHIFT);
2956
Ken Wang81c59f52015-06-03 21:02:01 +08002957 if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) {
Alex Deuchera2e73f52015-04-20 17:09:27 -04002958 mpll_dq_func_cntl &= ~(MPLL_DQ_FUNC_CNTL__YCLK_SEL_MASK |
2959 MPLL_AD_FUNC_CNTL__YCLK_POST_DIV_MASK);
2960 mpll_dq_func_cntl |= (mpll_param.yclk_sel << MPLL_DQ_FUNC_CNTL__YCLK_SEL__SHIFT) |
2961 (mpll_param.post_div << MPLL_AD_FUNC_CNTL__YCLK_POST_DIV__SHIFT);
2962 }
2963
2964 if (pi->caps_mclk_ss_support) {
2965 struct amdgpu_atom_ss ss;
2966 u32 freq_nom;
2967 u32 tmp;
2968 u32 reference_clock = adev->clock.mpll.reference_freq;
2969
2970 if (mpll_param.qdr == 1)
2971 freq_nom = memory_clock * 4 * (1 << mpll_param.post_div);
2972 else
2973 freq_nom = memory_clock * 2 * (1 << mpll_param.post_div);
2974
2975 tmp = (freq_nom / reference_clock);
2976 tmp = tmp * tmp;
2977 if (amdgpu_atombios_get_asic_ss_info(adev, &ss,
2978 ASIC_INTERNAL_MEMORY_SS, freq_nom)) {
2979 u32 clks = reference_clock * 5 / ss.rate;
2980 u32 clkv = (u32)((((131 * ss.percentage * ss.rate) / 100) * tmp) / freq_nom);
2981
2982 mpll_ss1 &= ~MPLL_SS1__CLKV_MASK;
2983 mpll_ss1 |= (clkv << MPLL_SS1__CLKV__SHIFT);
2984
2985 mpll_ss2 &= ~MPLL_SS2__CLKS_MASK;
2986 mpll_ss2 |= (clks << MPLL_SS2__CLKS__SHIFT);
2987 }
2988 }
2989
2990 mclk_pwrmgt_cntl &= ~MCLK_PWRMGT_CNTL__DLL_SPEED_MASK;
2991 mclk_pwrmgt_cntl |= (mpll_param.dll_speed << MCLK_PWRMGT_CNTL__DLL_SPEED__SHIFT);
2992
2993 if (dll_state_on)
2994 mclk_pwrmgt_cntl |= MCLK_PWRMGT_CNTL__MRDCK0_PDNB_MASK |
2995 MCLK_PWRMGT_CNTL__MRDCK1_PDNB_MASK;
2996 else
2997 mclk_pwrmgt_cntl &= ~(MCLK_PWRMGT_CNTL__MRDCK0_PDNB_MASK |
2998 MCLK_PWRMGT_CNTL__MRDCK1_PDNB_MASK);
2999
3000 mclk->MclkFrequency = memory_clock;
3001 mclk->MpllFuncCntl = mpll_func_cntl;
3002 mclk->MpllFuncCntl_1 = mpll_func_cntl_1;
3003 mclk->MpllFuncCntl_2 = mpll_func_cntl_2;
3004 mclk->MpllAdFuncCntl = mpll_ad_func_cntl;
3005 mclk->MpllDqFuncCntl = mpll_dq_func_cntl;
3006 mclk->MclkPwrmgtCntl = mclk_pwrmgt_cntl;
3007 mclk->DllCntl = dll_cntl;
3008 mclk->MpllSs1 = mpll_ss1;
3009 mclk->MpllSs2 = mpll_ss2;
3010
3011 return 0;
3012}
3013
3014static int ci_populate_single_memory_level(struct amdgpu_device *adev,
3015 u32 memory_clock,
3016 SMU7_Discrete_MemoryLevel *memory_level)
3017{
3018 struct ci_power_info *pi = ci_get_pi(adev);
3019 int ret;
3020 bool dll_state_on;
3021
3022 if (adev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries) {
3023 ret = ci_get_dependency_volt_by_clk(adev,
3024 &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
3025 memory_clock, &memory_level->MinVddc);
3026 if (ret)
3027 return ret;
3028 }
3029
3030 if (adev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries) {
3031 ret = ci_get_dependency_volt_by_clk(adev,
3032 &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
3033 memory_clock, &memory_level->MinVddci);
3034 if (ret)
3035 return ret;
3036 }
3037
3038 if (adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries) {
3039 ret = ci_get_dependency_volt_by_clk(adev,
3040 &adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
3041 memory_clock, &memory_level->MinMvdd);
3042 if (ret)
3043 return ret;
3044 }
3045
3046 memory_level->MinVddcPhases = 1;
3047
3048 if (pi->vddc_phase_shed_control)
3049 ci_populate_phase_value_based_on_mclk(adev,
3050 &adev->pm.dpm.dyn_state.phase_shedding_limits_table,
3051 memory_clock,
3052 &memory_level->MinVddcPhases);
3053
Rex Zhu7ef69842017-04-18 19:21:44 +08003054 memory_level->EnabledForActivity = 1;
Alex Deuchera2e73f52015-04-20 17:09:27 -04003055 memory_level->EnabledForThrottle = 1;
Alex Deuchera2e73f52015-04-20 17:09:27 -04003056 memory_level->UpH = 0;
3057 memory_level->DownH = 100;
3058 memory_level->VoltageDownH = 0;
3059 memory_level->ActivityLevel = (u16)pi->mclk_activity_target;
3060
3061 memory_level->StutterEnable = false;
3062 memory_level->StrobeEnable = false;
3063 memory_level->EdcReadEnable = false;
3064 memory_level->EdcWriteEnable = false;
3065 memory_level->RttEnable = false;
3066
3067 memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
3068
3069 if (pi->mclk_stutter_mode_threshold &&
3070 (memory_clock <= pi->mclk_stutter_mode_threshold) &&
Edward O'Callaghan004e29c2016-07-12 10:17:53 +10003071 (!pi->uvd_enabled) &&
Alex Deuchera2e73f52015-04-20 17:09:27 -04003072 (RREG32(mmDPG_PIPE_STUTTER_CONTROL) & DPG_PIPE_STUTTER_CONTROL__STUTTER_ENABLE_MASK) &&
3073 (adev->pm.dpm.new_active_crtc_count <= 2))
3074 memory_level->StutterEnable = true;
3075
3076 if (pi->mclk_strobe_mode_threshold &&
3077 (memory_clock <= pi->mclk_strobe_mode_threshold))
3078 memory_level->StrobeEnable = 1;
3079
Ken Wang81c59f52015-06-03 21:02:01 +08003080 if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) {
Alex Deuchera2e73f52015-04-20 17:09:27 -04003081 memory_level->StrobeRatio =
3082 ci_get_mclk_frequency_ratio(memory_clock, memory_level->StrobeEnable);
3083 if (pi->mclk_edc_enable_threshold &&
3084 (memory_clock > pi->mclk_edc_enable_threshold))
3085 memory_level->EdcReadEnable = true;
3086
3087 if (pi->mclk_edc_wr_enable_threshold &&
3088 (memory_clock > pi->mclk_edc_wr_enable_threshold))
3089 memory_level->EdcWriteEnable = true;
3090
3091 if (memory_level->StrobeEnable) {
3092 if (ci_get_mclk_frequency_ratio(memory_clock, true) >=
3093 ((RREG32(mmMC_SEQ_MISC7) >> 16) & 0xf))
3094 dll_state_on = ((RREG32(mmMC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
3095 else
3096 dll_state_on = ((RREG32(mmMC_SEQ_MISC6) >> 1) & 0x1) ? true : false;
3097 } else {
3098 dll_state_on = pi->dll_default_on;
3099 }
3100 } else {
3101 memory_level->StrobeRatio = ci_get_ddr3_mclk_frequency_ratio(memory_clock);
3102 dll_state_on = ((RREG32(mmMC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
3103 }
3104
3105 ret = ci_calculate_mclk_params(adev, memory_clock, memory_level, memory_level->StrobeEnable, dll_state_on);
3106 if (ret)
3107 return ret;
3108
3109 memory_level->MinVddc = cpu_to_be32(memory_level->MinVddc * VOLTAGE_SCALE);
3110 memory_level->MinVddcPhases = cpu_to_be32(memory_level->MinVddcPhases);
3111 memory_level->MinVddci = cpu_to_be32(memory_level->MinVddci * VOLTAGE_SCALE);
3112 memory_level->MinMvdd = cpu_to_be32(memory_level->MinMvdd * VOLTAGE_SCALE);
3113
3114 memory_level->MclkFrequency = cpu_to_be32(memory_level->MclkFrequency);
3115 memory_level->ActivityLevel = cpu_to_be16(memory_level->ActivityLevel);
3116 memory_level->MpllFuncCntl = cpu_to_be32(memory_level->MpllFuncCntl);
3117 memory_level->MpllFuncCntl_1 = cpu_to_be32(memory_level->MpllFuncCntl_1);
3118 memory_level->MpllFuncCntl_2 = cpu_to_be32(memory_level->MpllFuncCntl_2);
3119 memory_level->MpllAdFuncCntl = cpu_to_be32(memory_level->MpllAdFuncCntl);
3120 memory_level->MpllDqFuncCntl = cpu_to_be32(memory_level->MpllDqFuncCntl);
3121 memory_level->MclkPwrmgtCntl = cpu_to_be32(memory_level->MclkPwrmgtCntl);
3122 memory_level->DllCntl = cpu_to_be32(memory_level->DllCntl);
3123 memory_level->MpllSs1 = cpu_to_be32(memory_level->MpllSs1);
3124 memory_level->MpllSs2 = cpu_to_be32(memory_level->MpllSs2);
3125
3126 return 0;
3127}
3128
3129static int ci_populate_smc_acpi_level(struct amdgpu_device *adev,
3130 SMU7_Discrete_DpmTable *table)
3131{
3132 struct ci_power_info *pi = ci_get_pi(adev);
3133 struct atom_clock_dividers dividers;
3134 SMU7_Discrete_VoltageLevel voltage_level;
3135 u32 spll_func_cntl = pi->clock_registers.cg_spll_func_cntl;
3136 u32 spll_func_cntl_2 = pi->clock_registers.cg_spll_func_cntl_2;
3137 u32 dll_cntl = pi->clock_registers.dll_cntl;
3138 u32 mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl;
3139 int ret;
3140
3141 table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
3142
3143 if (pi->acpi_vddc)
3144 table->ACPILevel.MinVddc = cpu_to_be32(pi->acpi_vddc * VOLTAGE_SCALE);
3145 else
3146 table->ACPILevel.MinVddc = cpu_to_be32(pi->min_vddc_in_pp_table * VOLTAGE_SCALE);
3147
3148 table->ACPILevel.MinVddcPhases = pi->vddc_phase_shed_control ? 0 : 1;
3149
3150 table->ACPILevel.SclkFrequency = adev->clock.spll.reference_freq;
3151
3152 ret = amdgpu_atombios_get_clock_dividers(adev,
3153 COMPUTE_GPUCLK_INPUT_FLAG_SCLK,
3154 table->ACPILevel.SclkFrequency, false, &dividers);
3155 if (ret)
3156 return ret;
3157
3158 table->ACPILevel.SclkDid = (u8)dividers.post_divider;
3159 table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
3160 table->ACPILevel.DeepSleepDivId = 0;
3161
3162 spll_func_cntl &= ~CG_SPLL_FUNC_CNTL__SPLL_PWRON_MASK;
3163 spll_func_cntl |= CG_SPLL_FUNC_CNTL__SPLL_RESET_MASK;
3164
3165 spll_func_cntl_2 &= ~CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL_MASK;
3166 spll_func_cntl_2 |= (4 << CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL__SHIFT);
3167
3168 table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
3169 table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
3170 table->ACPILevel.CgSpllFuncCntl3 = pi->clock_registers.cg_spll_func_cntl_3;
3171 table->ACPILevel.CgSpllFuncCntl4 = pi->clock_registers.cg_spll_func_cntl_4;
3172 table->ACPILevel.SpllSpreadSpectrum = pi->clock_registers.cg_spll_spread_spectrum;
3173 table->ACPILevel.SpllSpreadSpectrum2 = pi->clock_registers.cg_spll_spread_spectrum_2;
3174 table->ACPILevel.CcPwrDynRm = 0;
3175 table->ACPILevel.CcPwrDynRm1 = 0;
3176
3177 table->ACPILevel.Flags = cpu_to_be32(table->ACPILevel.Flags);
3178 table->ACPILevel.MinVddcPhases = cpu_to_be32(table->ACPILevel.MinVddcPhases);
3179 table->ACPILevel.SclkFrequency = cpu_to_be32(table->ACPILevel.SclkFrequency);
3180 table->ACPILevel.CgSpllFuncCntl = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl);
3181 table->ACPILevel.CgSpllFuncCntl2 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl2);
3182 table->ACPILevel.CgSpllFuncCntl3 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl3);
3183 table->ACPILevel.CgSpllFuncCntl4 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl4);
3184 table->ACPILevel.SpllSpreadSpectrum = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum);
3185 table->ACPILevel.SpllSpreadSpectrum2 = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum2);
3186 table->ACPILevel.CcPwrDynRm = cpu_to_be32(table->ACPILevel.CcPwrDynRm);
3187 table->ACPILevel.CcPwrDynRm1 = cpu_to_be32(table->ACPILevel.CcPwrDynRm1);
3188
3189 table->MemoryACPILevel.MinVddc = table->ACPILevel.MinVddc;
3190 table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;
3191
3192 if (pi->vddci_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
3193 if (pi->acpi_vddci)
3194 table->MemoryACPILevel.MinVddci =
3195 cpu_to_be32(pi->acpi_vddci * VOLTAGE_SCALE);
3196 else
3197 table->MemoryACPILevel.MinVddci =
3198 cpu_to_be32(pi->min_vddci_in_pp_table * VOLTAGE_SCALE);
3199 }
3200
3201 if (ci_populate_mvdd_value(adev, 0, &voltage_level))
3202 table->MemoryACPILevel.MinMvdd = 0;
3203 else
3204 table->MemoryACPILevel.MinMvdd =
3205 cpu_to_be32(voltage_level.Voltage * VOLTAGE_SCALE);
3206
3207 mclk_pwrmgt_cntl |= MCLK_PWRMGT_CNTL__MRDCK0_RESET_MASK |
3208 MCLK_PWRMGT_CNTL__MRDCK1_RESET_MASK;
3209 mclk_pwrmgt_cntl &= ~(MCLK_PWRMGT_CNTL__MRDCK0_PDNB_MASK |
3210 MCLK_PWRMGT_CNTL__MRDCK1_PDNB_MASK);
3211
3212 dll_cntl &= ~(DLL_CNTL__MRDCK0_BYPASS_MASK | DLL_CNTL__MRDCK1_BYPASS_MASK);
3213
3214 table->MemoryACPILevel.DllCntl = cpu_to_be32(dll_cntl);
3215 table->MemoryACPILevel.MclkPwrmgtCntl = cpu_to_be32(mclk_pwrmgt_cntl);
3216 table->MemoryACPILevel.MpllAdFuncCntl =
3217 cpu_to_be32(pi->clock_registers.mpll_ad_func_cntl);
3218 table->MemoryACPILevel.MpllDqFuncCntl =
3219 cpu_to_be32(pi->clock_registers.mpll_dq_func_cntl);
3220 table->MemoryACPILevel.MpllFuncCntl =
3221 cpu_to_be32(pi->clock_registers.mpll_func_cntl);
3222 table->MemoryACPILevel.MpllFuncCntl_1 =
3223 cpu_to_be32(pi->clock_registers.mpll_func_cntl_1);
3224 table->MemoryACPILevel.MpllFuncCntl_2 =
3225 cpu_to_be32(pi->clock_registers.mpll_func_cntl_2);
3226 table->MemoryACPILevel.MpllSs1 = cpu_to_be32(pi->clock_registers.mpll_ss1);
3227 table->MemoryACPILevel.MpllSs2 = cpu_to_be32(pi->clock_registers.mpll_ss2);
3228
3229 table->MemoryACPILevel.EnabledForThrottle = 0;
3230 table->MemoryACPILevel.EnabledForActivity = 0;
3231 table->MemoryACPILevel.UpH = 0;
3232 table->MemoryACPILevel.DownH = 100;
3233 table->MemoryACPILevel.VoltageDownH = 0;
3234 table->MemoryACPILevel.ActivityLevel =
3235 cpu_to_be16((u16)pi->mclk_activity_target);
3236
3237 table->MemoryACPILevel.StutterEnable = false;
3238 table->MemoryACPILevel.StrobeEnable = false;
3239 table->MemoryACPILevel.EdcReadEnable = false;
3240 table->MemoryACPILevel.EdcWriteEnable = false;
3241 table->MemoryACPILevel.RttEnable = false;
3242
3243 return 0;
3244}
3245
3246
3247static int ci_enable_ulv(struct amdgpu_device *adev, bool enable)
3248{
3249 struct ci_power_info *pi = ci_get_pi(adev);
3250 struct ci_ulv_parm *ulv = &pi->ulv;
3251
3252 if (ulv->supported) {
3253 if (enable)
3254 return (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_EnableULV) == PPSMC_Result_OK) ?
3255 0 : -EINVAL;
3256 else
3257 return (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DisableULV) == PPSMC_Result_OK) ?
3258 0 : -EINVAL;
3259 }
3260
3261 return 0;
3262}
3263
3264static int ci_populate_ulv_level(struct amdgpu_device *adev,
3265 SMU7_Discrete_Ulv *state)
3266{
3267 struct ci_power_info *pi = ci_get_pi(adev);
3268 u16 ulv_voltage = adev->pm.dpm.backbias_response_time;
3269
3270 state->CcPwrDynRm = 0;
3271 state->CcPwrDynRm1 = 0;
3272
3273 if (ulv_voltage == 0) {
3274 pi->ulv.supported = false;
3275 return 0;
3276 }
3277
3278 if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
3279 if (ulv_voltage > adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v)
3280 state->VddcOffset = 0;
3281 else
3282 state->VddcOffset =
3283 adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage;
3284 } else {
3285 if (ulv_voltage > adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v)
3286 state->VddcOffsetVid = 0;
3287 else
3288 state->VddcOffsetVid = (u8)
3289 ((adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage) *
3290 VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
3291 }
3292 state->VddcPhase = pi->vddc_phase_shed_control ? 0 : 1;
3293
3294 state->CcPwrDynRm = cpu_to_be32(state->CcPwrDynRm);
3295 state->CcPwrDynRm1 = cpu_to_be32(state->CcPwrDynRm1);
3296 state->VddcOffset = cpu_to_be16(state->VddcOffset);
3297
3298 return 0;
3299}
3300
3301static int ci_calculate_sclk_params(struct amdgpu_device *adev,
3302 u32 engine_clock,
3303 SMU7_Discrete_GraphicsLevel *sclk)
3304{
3305 struct ci_power_info *pi = ci_get_pi(adev);
3306 struct atom_clock_dividers dividers;
3307 u32 spll_func_cntl_3 = pi->clock_registers.cg_spll_func_cntl_3;
3308 u32 spll_func_cntl_4 = pi->clock_registers.cg_spll_func_cntl_4;
3309 u32 cg_spll_spread_spectrum = pi->clock_registers.cg_spll_spread_spectrum;
3310 u32 cg_spll_spread_spectrum_2 = pi->clock_registers.cg_spll_spread_spectrum_2;
3311 u32 reference_clock = adev->clock.spll.reference_freq;
3312 u32 reference_divider;
3313 u32 fbdiv;
3314 int ret;
3315
3316 ret = amdgpu_atombios_get_clock_dividers(adev,
3317 COMPUTE_GPUCLK_INPUT_FLAG_SCLK,
3318 engine_clock, false, &dividers);
3319 if (ret)
3320 return ret;
3321
3322 reference_divider = 1 + dividers.ref_div;
3323 fbdiv = dividers.fb_div & 0x3FFFFFF;
3324
3325 spll_func_cntl_3 &= ~CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV_MASK;
3326 spll_func_cntl_3 |= (fbdiv << CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV__SHIFT);
3327 spll_func_cntl_3 |= CG_SPLL_FUNC_CNTL_3__SPLL_DITHEN_MASK;
3328
3329 if (pi->caps_sclk_ss_support) {
3330 struct amdgpu_atom_ss ss;
3331 u32 vco_freq = engine_clock * dividers.post_div;
3332
3333 if (amdgpu_atombios_get_asic_ss_info(adev, &ss,
3334 ASIC_INTERNAL_ENGINE_SS, vco_freq)) {
3335 u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate);
3336 u32 clk_v = 4 * ss.percentage * fbdiv / (clk_s * 10000);
3337
3338 cg_spll_spread_spectrum &= ~(CG_SPLL_SPREAD_SPECTRUM__CLKS_MASK | CG_SPLL_SPREAD_SPECTRUM__SSEN_MASK);
3339 cg_spll_spread_spectrum |= (clk_s << CG_SPLL_SPREAD_SPECTRUM__CLKS__SHIFT);
3340 cg_spll_spread_spectrum |= (1 << CG_SPLL_SPREAD_SPECTRUM__SSEN__SHIFT);
3341
3342 cg_spll_spread_spectrum_2 &= ~CG_SPLL_SPREAD_SPECTRUM_2__CLKV_MASK;
3343 cg_spll_spread_spectrum_2 |= (clk_v << CG_SPLL_SPREAD_SPECTRUM_2__CLKV__SHIFT);
3344 }
3345 }
3346
3347 sclk->SclkFrequency = engine_clock;
3348 sclk->CgSpllFuncCntl3 = spll_func_cntl_3;
3349 sclk->CgSpllFuncCntl4 = spll_func_cntl_4;
3350 sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum;
3351 sclk->SpllSpreadSpectrum2 = cg_spll_spread_spectrum_2;
3352 sclk->SclkDid = (u8)dividers.post_divider;
3353
3354 return 0;
3355}
3356
3357static int ci_populate_single_graphic_level(struct amdgpu_device *adev,
3358 u32 engine_clock,
3359 u16 sclk_activity_level_t,
3360 SMU7_Discrete_GraphicsLevel *graphic_level)
3361{
3362 struct ci_power_info *pi = ci_get_pi(adev);
3363 int ret;
3364
3365 ret = ci_calculate_sclk_params(adev, engine_clock, graphic_level);
3366 if (ret)
3367 return ret;
3368
3369 ret = ci_get_dependency_volt_by_clk(adev,
3370 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
3371 engine_clock, &graphic_level->MinVddc);
3372 if (ret)
3373 return ret;
3374
3375 graphic_level->SclkFrequency = engine_clock;
3376
3377 graphic_level->Flags = 0;
3378 graphic_level->MinVddcPhases = 1;
3379
3380 if (pi->vddc_phase_shed_control)
3381 ci_populate_phase_value_based_on_sclk(adev,
3382 &adev->pm.dpm.dyn_state.phase_shedding_limits_table,
3383 engine_clock,
3384 &graphic_level->MinVddcPhases);
3385
3386 graphic_level->ActivityLevel = sclk_activity_level_t;
3387
3388 graphic_level->CcPwrDynRm = 0;
3389 graphic_level->CcPwrDynRm1 = 0;
3390 graphic_level->EnabledForThrottle = 1;
3391 graphic_level->UpH = 0;
3392 graphic_level->DownH = 0;
3393 graphic_level->VoltageDownH = 0;
3394 graphic_level->PowerThrottle = 0;
3395
3396 if (pi->caps_sclk_ds)
Nils Wallménius438498a2016-05-05 09:07:48 +02003397 graphic_level->DeepSleepDivId = ci_get_sleep_divider_id_from_clock(engine_clock,
Alex Deuchera2e73f52015-04-20 17:09:27 -04003398 CISLAND_MINIMUM_ENGINE_CLOCK);
3399
3400 graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
3401
3402 graphic_level->Flags = cpu_to_be32(graphic_level->Flags);
3403 graphic_level->MinVddc = cpu_to_be32(graphic_level->MinVddc * VOLTAGE_SCALE);
3404 graphic_level->MinVddcPhases = cpu_to_be32(graphic_level->MinVddcPhases);
3405 graphic_level->SclkFrequency = cpu_to_be32(graphic_level->SclkFrequency);
3406 graphic_level->ActivityLevel = cpu_to_be16(graphic_level->ActivityLevel);
3407 graphic_level->CgSpllFuncCntl3 = cpu_to_be32(graphic_level->CgSpllFuncCntl3);
3408 graphic_level->CgSpllFuncCntl4 = cpu_to_be32(graphic_level->CgSpllFuncCntl4);
3409 graphic_level->SpllSpreadSpectrum = cpu_to_be32(graphic_level->SpllSpreadSpectrum);
3410 graphic_level->SpllSpreadSpectrum2 = cpu_to_be32(graphic_level->SpllSpreadSpectrum2);
3411 graphic_level->CcPwrDynRm = cpu_to_be32(graphic_level->CcPwrDynRm);
3412 graphic_level->CcPwrDynRm1 = cpu_to_be32(graphic_level->CcPwrDynRm1);
Alex Deuchera2e73f52015-04-20 17:09:27 -04003413
3414 return 0;
3415}
3416
3417static int ci_populate_all_graphic_levels(struct amdgpu_device *adev)
3418{
3419 struct ci_power_info *pi = ci_get_pi(adev);
3420 struct ci_dpm_table *dpm_table = &pi->dpm_table;
3421 u32 level_array_address = pi->dpm_table_start +
3422 offsetof(SMU7_Discrete_DpmTable, GraphicsLevel);
3423 u32 level_array_size = sizeof(SMU7_Discrete_GraphicsLevel) *
3424 SMU7_MAX_LEVELS_GRAPHICS;
3425 SMU7_Discrete_GraphicsLevel *levels = pi->smc_state_table.GraphicsLevel;
3426 u32 i, ret;
3427
3428 memset(levels, 0, level_array_size);
3429
3430 for (i = 0; i < dpm_table->sclk_table.count; i++) {
3431 ret = ci_populate_single_graphic_level(adev,
3432 dpm_table->sclk_table.dpm_levels[i].value,
3433 (u16)pi->activity_target[i],
3434 &pi->smc_state_table.GraphicsLevel[i]);
3435 if (ret)
3436 return ret;
3437 if (i > 1)
3438 pi->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0;
3439 if (i == (dpm_table->sclk_table.count - 1))
3440 pi->smc_state_table.GraphicsLevel[i].DisplayWatermark =
3441 PPSMC_DISPLAY_WATERMARK_HIGH;
3442 }
Alex Deucher4223cc3d2016-03-03 12:27:46 -05003443 pi->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
Alex Deuchera2e73f52015-04-20 17:09:27 -04003444
3445 pi->smc_state_table.GraphicsDpmLevelCount = (u8)dpm_table->sclk_table.count;
3446 pi->dpm_level_enable_mask.sclk_dpm_enable_mask =
3447 ci_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
3448
3449 ret = amdgpu_ci_copy_bytes_to_smc(adev, level_array_address,
3450 (u8 *)levels, level_array_size,
3451 pi->sram_end);
3452 if (ret)
3453 return ret;
3454
3455 return 0;
3456}
3457
3458static int ci_populate_ulv_state(struct amdgpu_device *adev,
3459 SMU7_Discrete_Ulv *ulv_level)
3460{
3461 return ci_populate_ulv_level(adev, ulv_level);
3462}
3463
3464static int ci_populate_all_memory_levels(struct amdgpu_device *adev)
3465{
3466 struct ci_power_info *pi = ci_get_pi(adev);
3467 struct ci_dpm_table *dpm_table = &pi->dpm_table;
3468 u32 level_array_address = pi->dpm_table_start +
3469 offsetof(SMU7_Discrete_DpmTable, MemoryLevel);
3470 u32 level_array_size = sizeof(SMU7_Discrete_MemoryLevel) *
3471 SMU7_MAX_LEVELS_MEMORY;
3472 SMU7_Discrete_MemoryLevel *levels = pi->smc_state_table.MemoryLevel;
3473 u32 i, ret;
3474
3475 memset(levels, 0, level_array_size);
3476
3477 for (i = 0; i < dpm_table->mclk_table.count; i++) {
3478 if (dpm_table->mclk_table.dpm_levels[i].value == 0)
3479 return -EINVAL;
3480 ret = ci_populate_single_memory_level(adev,
3481 dpm_table->mclk_table.dpm_levels[i].value,
3482 &pi->smc_state_table.MemoryLevel[i]);
3483 if (ret)
3484 return ret;
3485 }
3486
3487 if ((dpm_table->mclk_table.count >= 2) &&
3488 ((adev->pdev->device == 0x67B0) || (adev->pdev->device == 0x67B1))) {
3489 pi->smc_state_table.MemoryLevel[1].MinVddc =
3490 pi->smc_state_table.MemoryLevel[0].MinVddc;
3491 pi->smc_state_table.MemoryLevel[1].MinVddcPhases =
3492 pi->smc_state_table.MemoryLevel[0].MinVddcPhases;
3493 }
3494
3495 pi->smc_state_table.MemoryLevel[0].ActivityLevel = cpu_to_be16(0x1F);
3496
3497 pi->smc_state_table.MemoryDpmLevelCount = (u8)dpm_table->mclk_table.count;
3498 pi->dpm_level_enable_mask.mclk_dpm_enable_mask =
3499 ci_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
3500
3501 pi->smc_state_table.MemoryLevel[dpm_table->mclk_table.count - 1].DisplayWatermark =
3502 PPSMC_DISPLAY_WATERMARK_HIGH;
3503
3504 ret = amdgpu_ci_copy_bytes_to_smc(adev, level_array_address,
3505 (u8 *)levels, level_array_size,
3506 pi->sram_end);
3507 if (ret)
3508 return ret;
3509
3510 return 0;
3511}
3512
3513static void ci_reset_single_dpm_table(struct amdgpu_device *adev,
3514 struct ci_single_dpm_table* dpm_table,
3515 u32 count)
3516{
3517 u32 i;
3518
3519 dpm_table->count = count;
3520 for (i = 0; i < MAX_REGULAR_DPM_NUMBER; i++)
3521 dpm_table->dpm_levels[i].enabled = false;
3522}
3523
3524static void ci_setup_pcie_table_entry(struct ci_single_dpm_table* dpm_table,
3525 u32 index, u32 pcie_gen, u32 pcie_lanes)
3526{
3527 dpm_table->dpm_levels[index].value = pcie_gen;
3528 dpm_table->dpm_levels[index].param1 = pcie_lanes;
3529 dpm_table->dpm_levels[index].enabled = true;
3530}
3531
3532static int ci_setup_default_pcie_tables(struct amdgpu_device *adev)
3533{
3534 struct ci_power_info *pi = ci_get_pi(adev);
3535
3536 if (!pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels)
3537 return -EINVAL;
3538
3539 if (pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels) {
3540 pi->pcie_gen_powersaving = pi->pcie_gen_performance;
3541 pi->pcie_lane_powersaving = pi->pcie_lane_performance;
3542 } else if (!pi->use_pcie_performance_levels && pi->use_pcie_powersaving_levels) {
3543 pi->pcie_gen_performance = pi->pcie_gen_powersaving;
3544 pi->pcie_lane_performance = pi->pcie_lane_powersaving;
3545 }
3546
3547 ci_reset_single_dpm_table(adev,
3548 &pi->dpm_table.pcie_speed_table,
3549 SMU7_MAX_LEVELS_LINK);
3550
3551 if (adev->asic_type == CHIP_BONAIRE)
3552 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0,
3553 pi->pcie_gen_powersaving.min,
3554 pi->pcie_lane_powersaving.max);
3555 else
3556 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0,
3557 pi->pcie_gen_powersaving.min,
3558 pi->pcie_lane_powersaving.min);
3559 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 1,
3560 pi->pcie_gen_performance.min,
3561 pi->pcie_lane_performance.min);
3562 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 2,
3563 pi->pcie_gen_powersaving.min,
3564 pi->pcie_lane_powersaving.max);
3565 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 3,
3566 pi->pcie_gen_performance.min,
3567 pi->pcie_lane_performance.max);
3568 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 4,
3569 pi->pcie_gen_powersaving.max,
3570 pi->pcie_lane_powersaving.max);
3571 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 5,
3572 pi->pcie_gen_performance.max,
3573 pi->pcie_lane_performance.max);
3574
3575 pi->dpm_table.pcie_speed_table.count = 6;
3576
3577 return 0;
3578}
3579
3580static int ci_setup_default_dpm_tables(struct amdgpu_device *adev)
3581{
3582 struct ci_power_info *pi = ci_get_pi(adev);
3583 struct amdgpu_clock_voltage_dependency_table *allowed_sclk_vddc_table =
3584 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
3585 struct amdgpu_clock_voltage_dependency_table *allowed_mclk_table =
3586 &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk;
3587 struct amdgpu_cac_leakage_table *std_voltage_table =
3588 &adev->pm.dpm.dyn_state.cac_leakage_table;
3589 u32 i;
3590
3591 if (allowed_sclk_vddc_table == NULL)
3592 return -EINVAL;
3593 if (allowed_sclk_vddc_table->count < 1)
3594 return -EINVAL;
3595 if (allowed_mclk_table == NULL)
3596 return -EINVAL;
3597 if (allowed_mclk_table->count < 1)
3598 return -EINVAL;
3599
3600 memset(&pi->dpm_table, 0, sizeof(struct ci_dpm_table));
3601
3602 ci_reset_single_dpm_table(adev,
3603 &pi->dpm_table.sclk_table,
3604 SMU7_MAX_LEVELS_GRAPHICS);
3605 ci_reset_single_dpm_table(adev,
3606 &pi->dpm_table.mclk_table,
3607 SMU7_MAX_LEVELS_MEMORY);
3608 ci_reset_single_dpm_table(adev,
3609 &pi->dpm_table.vddc_table,
3610 SMU7_MAX_LEVELS_VDDC);
3611 ci_reset_single_dpm_table(adev,
3612 &pi->dpm_table.vddci_table,
3613 SMU7_MAX_LEVELS_VDDCI);
3614 ci_reset_single_dpm_table(adev,
3615 &pi->dpm_table.mvdd_table,
3616 SMU7_MAX_LEVELS_MVDD);
3617
3618 pi->dpm_table.sclk_table.count = 0;
3619 for (i = 0; i < allowed_sclk_vddc_table->count; i++) {
3620 if ((i == 0) ||
3621 (pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count-1].value !=
3622 allowed_sclk_vddc_table->entries[i].clk)) {
3623 pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].value =
3624 allowed_sclk_vddc_table->entries[i].clk;
3625 pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].enabled =
3626 (i == 0) ? true : false;
3627 pi->dpm_table.sclk_table.count++;
3628 }
3629 }
3630
3631 pi->dpm_table.mclk_table.count = 0;
3632 for (i = 0; i < allowed_mclk_table->count; i++) {
3633 if ((i == 0) ||
3634 (pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count-1].value !=
3635 allowed_mclk_table->entries[i].clk)) {
3636 pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].value =
3637 allowed_mclk_table->entries[i].clk;
3638 pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].enabled =
3639 (i == 0) ? true : false;
3640 pi->dpm_table.mclk_table.count++;
3641 }
3642 }
3643
3644 for (i = 0; i < allowed_sclk_vddc_table->count; i++) {
3645 pi->dpm_table.vddc_table.dpm_levels[i].value =
3646 allowed_sclk_vddc_table->entries[i].v;
3647 pi->dpm_table.vddc_table.dpm_levels[i].param1 =
3648 std_voltage_table->entries[i].leakage;
3649 pi->dpm_table.vddc_table.dpm_levels[i].enabled = true;
3650 }
3651 pi->dpm_table.vddc_table.count = allowed_sclk_vddc_table->count;
3652
3653 allowed_mclk_table = &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk;
3654 if (allowed_mclk_table) {
3655 for (i = 0; i < allowed_mclk_table->count; i++) {
3656 pi->dpm_table.vddci_table.dpm_levels[i].value =
3657 allowed_mclk_table->entries[i].v;
3658 pi->dpm_table.vddci_table.dpm_levels[i].enabled = true;
3659 }
3660 pi->dpm_table.vddci_table.count = allowed_mclk_table->count;
3661 }
3662
3663 allowed_mclk_table = &adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk;
3664 if (allowed_mclk_table) {
3665 for (i = 0; i < allowed_mclk_table->count; i++) {
3666 pi->dpm_table.mvdd_table.dpm_levels[i].value =
3667 allowed_mclk_table->entries[i].v;
3668 pi->dpm_table.mvdd_table.dpm_levels[i].enabled = true;
3669 }
3670 pi->dpm_table.mvdd_table.count = allowed_mclk_table->count;
3671 }
3672
3673 ci_setup_default_pcie_tables(adev);
3674
Eric Huang3cc25912016-05-19 15:54:35 -04003675 /* save a copy of the default DPM table */
3676 memcpy(&(pi->golden_dpm_table), &(pi->dpm_table),
3677 sizeof(struct ci_dpm_table));
3678
Alex Deuchera2e73f52015-04-20 17:09:27 -04003679 return 0;
3680}
3681
3682static int ci_find_boot_level(struct ci_single_dpm_table *table,
3683 u32 value, u32 *boot_level)
3684{
3685 u32 i;
3686 int ret = -EINVAL;
3687
3688 for(i = 0; i < table->count; i++) {
3689 if (value == table->dpm_levels[i].value) {
3690 *boot_level = i;
3691 ret = 0;
3692 }
3693 }
3694
3695 return ret;
3696}
3697
Eric Huang618c0482016-10-14 14:21:19 -04003698static void ci_save_default_power_profile(struct amdgpu_device *adev)
3699{
3700 struct ci_power_info *pi = ci_get_pi(adev);
3701 struct SMU7_Discrete_GraphicsLevel *levels =
3702 pi->smc_state_table.GraphicsLevel;
3703 uint32_t min_level = 0;
3704
3705 pi->default_gfx_power_profile.activity_threshold =
3706 be16_to_cpu(levels[0].ActivityLevel);
3707 pi->default_gfx_power_profile.up_hyst = levels[0].UpH;
3708 pi->default_gfx_power_profile.down_hyst = levels[0].DownH;
3709 pi->default_gfx_power_profile.type = AMD_PP_GFX_PROFILE;
3710
3711 pi->default_compute_power_profile = pi->default_gfx_power_profile;
3712 pi->default_compute_power_profile.type = AMD_PP_COMPUTE_PROFILE;
3713
3714 /* Optimize compute power profile: Use only highest
3715 * 2 power levels (if more than 2 are available), Hysteresis:
3716 * 0ms up, 5ms down
3717 */
3718 if (pi->smc_state_table.GraphicsDpmLevelCount > 2)
3719 min_level = pi->smc_state_table.GraphicsDpmLevelCount - 2;
3720 else if (pi->smc_state_table.GraphicsDpmLevelCount == 2)
3721 min_level = 1;
3722 pi->default_compute_power_profile.min_sclk =
3723 be32_to_cpu(levels[min_level].SclkFrequency);
3724
3725 pi->default_compute_power_profile.up_hyst = 0;
3726 pi->default_compute_power_profile.down_hyst = 5;
3727
3728 pi->gfx_power_profile = pi->default_gfx_power_profile;
3729 pi->compute_power_profile = pi->default_compute_power_profile;
3730}
3731
Alex Deuchera2e73f52015-04-20 17:09:27 -04003732static int ci_init_smc_table(struct amdgpu_device *adev)
3733{
3734 struct ci_power_info *pi = ci_get_pi(adev);
3735 struct ci_ulv_parm *ulv = &pi->ulv;
3736 struct amdgpu_ps *amdgpu_boot_state = adev->pm.dpm.boot_ps;
3737 SMU7_Discrete_DpmTable *table = &pi->smc_state_table;
3738 int ret;
3739
3740 ret = ci_setup_default_dpm_tables(adev);
3741 if (ret)
3742 return ret;
3743
3744 if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE)
3745 ci_populate_smc_voltage_tables(adev, table);
3746
3747 ci_init_fps_limits(adev);
3748
3749 if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_HARDWAREDC)
3750 table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
3751
3752 if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC)
3753 table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
3754
Ken Wang81c59f52015-06-03 21:02:01 +08003755 if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5)
Alex Deuchera2e73f52015-04-20 17:09:27 -04003756 table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
3757
3758 if (ulv->supported) {
3759 ret = ci_populate_ulv_state(adev, &pi->smc_state_table.Ulv);
3760 if (ret)
3761 return ret;
3762 WREG32_SMC(ixCG_ULV_PARAMETER, ulv->cg_ulv_parameter);
3763 }
3764
3765 ret = ci_populate_all_graphic_levels(adev);
3766 if (ret)
3767 return ret;
3768
3769 ret = ci_populate_all_memory_levels(adev);
3770 if (ret)
3771 return ret;
3772
3773 ci_populate_smc_link_level(adev, table);
3774
3775 ret = ci_populate_smc_acpi_level(adev, table);
3776 if (ret)
3777 return ret;
3778
3779 ret = ci_populate_smc_vce_level(adev, table);
3780 if (ret)
3781 return ret;
3782
3783 ret = ci_populate_smc_acp_level(adev, table);
3784 if (ret)
3785 return ret;
3786
3787 ret = ci_populate_smc_samu_level(adev, table);
3788 if (ret)
3789 return ret;
3790
3791 ret = ci_do_program_memory_timing_parameters(adev);
3792 if (ret)
3793 return ret;
3794
3795 ret = ci_populate_smc_uvd_level(adev, table);
3796 if (ret)
3797 return ret;
3798
3799 table->UvdBootLevel = 0;
3800 table->VceBootLevel = 0;
3801 table->AcpBootLevel = 0;
3802 table->SamuBootLevel = 0;
3803 table->GraphicsBootLevel = 0;
3804 table->MemoryBootLevel = 0;
3805
3806 ret = ci_find_boot_level(&pi->dpm_table.sclk_table,
3807 pi->vbios_boot_state.sclk_bootup_value,
3808 (u32 *)&pi->smc_state_table.GraphicsBootLevel);
3809
3810 ret = ci_find_boot_level(&pi->dpm_table.mclk_table,
3811 pi->vbios_boot_state.mclk_bootup_value,
3812 (u32 *)&pi->smc_state_table.MemoryBootLevel);
3813
3814 table->BootVddc = pi->vbios_boot_state.vddc_bootup_value;
3815 table->BootVddci = pi->vbios_boot_state.vddci_bootup_value;
3816 table->BootMVdd = pi->vbios_boot_state.mvdd_bootup_value;
3817
3818 ci_populate_smc_initial_state(adev, amdgpu_boot_state);
3819
3820 ret = ci_populate_bapm_parameters_in_dpm_table(adev);
3821 if (ret)
3822 return ret;
3823
3824 table->UVDInterval = 1;
3825 table->VCEInterval = 1;
3826 table->ACPInterval = 1;
3827 table->SAMUInterval = 1;
3828 table->GraphicsVoltageChangeEnable = 1;
3829 table->GraphicsThermThrottleEnable = 1;
3830 table->GraphicsInterval = 1;
3831 table->VoltageInterval = 1;
3832 table->ThermalInterval = 1;
3833 table->TemperatureLimitHigh = (u16)((pi->thermal_temp_setting.temperature_high *
3834 CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000);
3835 table->TemperatureLimitLow = (u16)((pi->thermal_temp_setting.temperature_low *
3836 CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000);
3837 table->MemoryVoltageChangeEnable = 1;
3838 table->MemoryInterval = 1;
3839 table->VoltageResponseTime = 0;
3840 table->VddcVddciDelta = 4000;
3841 table->PhaseResponseTime = 0;
3842 table->MemoryThermThrottleEnable = 1;
3843 table->PCIeBootLinkLevel = pi->dpm_table.pcie_speed_table.count - 1;
3844 table->PCIeGenInterval = 1;
3845 if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2)
3846 table->SVI2Enable = 1;
3847 else
3848 table->SVI2Enable = 0;
3849
3850 table->ThermGpio = 17;
3851 table->SclkStepSize = 0x4000;
3852
3853 table->SystemFlags = cpu_to_be32(table->SystemFlags);
3854 table->SmioMaskVddcVid = cpu_to_be32(table->SmioMaskVddcVid);
3855 table->SmioMaskVddcPhase = cpu_to_be32(table->SmioMaskVddcPhase);
3856 table->SmioMaskVddciVid = cpu_to_be32(table->SmioMaskVddciVid);
3857 table->SmioMaskMvddVid = cpu_to_be32(table->SmioMaskMvddVid);
3858 table->SclkStepSize = cpu_to_be32(table->SclkStepSize);
3859 table->TemperatureLimitHigh = cpu_to_be16(table->TemperatureLimitHigh);
3860 table->TemperatureLimitLow = cpu_to_be16(table->TemperatureLimitLow);
3861 table->VddcVddciDelta = cpu_to_be16(table->VddcVddciDelta);
3862 table->VoltageResponseTime = cpu_to_be16(table->VoltageResponseTime);
3863 table->PhaseResponseTime = cpu_to_be16(table->PhaseResponseTime);
3864 table->BootVddc = cpu_to_be16(table->BootVddc * VOLTAGE_SCALE);
3865 table->BootVddci = cpu_to_be16(table->BootVddci * VOLTAGE_SCALE);
3866 table->BootMVdd = cpu_to_be16(table->BootMVdd * VOLTAGE_SCALE);
3867
3868 ret = amdgpu_ci_copy_bytes_to_smc(adev,
3869 pi->dpm_table_start +
3870 offsetof(SMU7_Discrete_DpmTable, SystemFlags),
3871 (u8 *)&table->SystemFlags,
3872 sizeof(SMU7_Discrete_DpmTable) - 3 * sizeof(SMU7_PIDController),
3873 pi->sram_end);
3874 if (ret)
3875 return ret;
3876
Eric Huang618c0482016-10-14 14:21:19 -04003877 ci_save_default_power_profile(adev);
3878
Alex Deuchera2e73f52015-04-20 17:09:27 -04003879 return 0;
3880}
3881
3882static void ci_trim_single_dpm_states(struct amdgpu_device *adev,
3883 struct ci_single_dpm_table *dpm_table,
3884 u32 low_limit, u32 high_limit)
3885{
3886 u32 i;
3887
3888 for (i = 0; i < dpm_table->count; i++) {
3889 if ((dpm_table->dpm_levels[i].value < low_limit) ||
3890 (dpm_table->dpm_levels[i].value > high_limit))
3891 dpm_table->dpm_levels[i].enabled = false;
3892 else
3893 dpm_table->dpm_levels[i].enabled = true;
3894 }
3895}
3896
3897static void ci_trim_pcie_dpm_states(struct amdgpu_device *adev,
3898 u32 speed_low, u32 lanes_low,
3899 u32 speed_high, u32 lanes_high)
3900{
3901 struct ci_power_info *pi = ci_get_pi(adev);
3902 struct ci_single_dpm_table *pcie_table = &pi->dpm_table.pcie_speed_table;
3903 u32 i, j;
3904
3905 for (i = 0; i < pcie_table->count; i++) {
3906 if ((pcie_table->dpm_levels[i].value < speed_low) ||
3907 (pcie_table->dpm_levels[i].param1 < lanes_low) ||
3908 (pcie_table->dpm_levels[i].value > speed_high) ||
3909 (pcie_table->dpm_levels[i].param1 > lanes_high))
3910 pcie_table->dpm_levels[i].enabled = false;
3911 else
3912 pcie_table->dpm_levels[i].enabled = true;
3913 }
3914
3915 for (i = 0; i < pcie_table->count; i++) {
3916 if (pcie_table->dpm_levels[i].enabled) {
3917 for (j = i + 1; j < pcie_table->count; j++) {
3918 if (pcie_table->dpm_levels[j].enabled) {
3919 if ((pcie_table->dpm_levels[i].value == pcie_table->dpm_levels[j].value) &&
3920 (pcie_table->dpm_levels[i].param1 == pcie_table->dpm_levels[j].param1))
3921 pcie_table->dpm_levels[j].enabled = false;
3922 }
3923 }
3924 }
3925 }
3926}
3927
3928static int ci_trim_dpm_states(struct amdgpu_device *adev,
3929 struct amdgpu_ps *amdgpu_state)
3930{
3931 struct ci_ps *state = ci_get_ps(amdgpu_state);
3932 struct ci_power_info *pi = ci_get_pi(adev);
3933 u32 high_limit_count;
3934
3935 if (state->performance_level_count < 1)
3936 return -EINVAL;
3937
3938 if (state->performance_level_count == 1)
3939 high_limit_count = 0;
3940 else
3941 high_limit_count = 1;
3942
3943 ci_trim_single_dpm_states(adev,
3944 &pi->dpm_table.sclk_table,
3945 state->performance_levels[0].sclk,
3946 state->performance_levels[high_limit_count].sclk);
3947
3948 ci_trim_single_dpm_states(adev,
3949 &pi->dpm_table.mclk_table,
3950 state->performance_levels[0].mclk,
3951 state->performance_levels[high_limit_count].mclk);
3952
3953 ci_trim_pcie_dpm_states(adev,
3954 state->performance_levels[0].pcie_gen,
3955 state->performance_levels[0].pcie_lane,
3956 state->performance_levels[high_limit_count].pcie_gen,
3957 state->performance_levels[high_limit_count].pcie_lane);
3958
3959 return 0;
3960}
3961
3962static int ci_apply_disp_minimum_voltage_request(struct amdgpu_device *adev)
3963{
3964 struct amdgpu_clock_voltage_dependency_table *disp_voltage_table =
3965 &adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk;
3966 struct amdgpu_clock_voltage_dependency_table *vddc_table =
3967 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
3968 u32 requested_voltage = 0;
3969 u32 i;
3970
3971 if (disp_voltage_table == NULL)
3972 return -EINVAL;
3973 if (!disp_voltage_table->count)
3974 return -EINVAL;
3975
3976 for (i = 0; i < disp_voltage_table->count; i++) {
3977 if (adev->clock.current_dispclk == disp_voltage_table->entries[i].clk)
3978 requested_voltage = disp_voltage_table->entries[i].v;
3979 }
3980
3981 for (i = 0; i < vddc_table->count; i++) {
3982 if (requested_voltage <= vddc_table->entries[i].v) {
3983 requested_voltage = vddc_table->entries[i].v;
3984 return (amdgpu_ci_send_msg_to_smc_with_parameter(adev,
3985 PPSMC_MSG_VddC_Request,
3986 requested_voltage * VOLTAGE_SCALE) == PPSMC_Result_OK) ?
3987 0 : -EINVAL;
3988 }
3989 }
3990
3991 return -EINVAL;
3992}
3993
3994static int ci_upload_dpm_level_enable_mask(struct amdgpu_device *adev)
3995{
3996 struct ci_power_info *pi = ci_get_pi(adev);
3997 PPSMC_Result result;
3998
3999 ci_apply_disp_minimum_voltage_request(adev);
4000
4001 if (!pi->sclk_dpm_key_disabled) {
4002 if (pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
4003 result = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
4004 PPSMC_MSG_SCLKDPM_SetEnabledMask,
4005 pi->dpm_level_enable_mask.sclk_dpm_enable_mask);
4006 if (result != PPSMC_Result_OK)
4007 return -EINVAL;
4008 }
4009 }
4010
4011 if (!pi->mclk_dpm_key_disabled) {
4012 if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
4013 result = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
4014 PPSMC_MSG_MCLKDPM_SetEnabledMask,
4015 pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
4016 if (result != PPSMC_Result_OK)
4017 return -EINVAL;
4018 }
4019 }
4020
4021#if 0
4022 if (!pi->pcie_dpm_key_disabled) {
4023 if (pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
4024 result = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
4025 PPSMC_MSG_PCIeDPM_SetEnabledMask,
4026 pi->dpm_level_enable_mask.pcie_dpm_enable_mask);
4027 if (result != PPSMC_Result_OK)
4028 return -EINVAL;
4029 }
4030 }
4031#endif
4032
4033 return 0;
4034}
4035
4036static void ci_find_dpm_states_clocks_in_dpm_table(struct amdgpu_device *adev,
4037 struct amdgpu_ps *amdgpu_state)
4038{
4039 struct ci_power_info *pi = ci_get_pi(adev);
4040 struct ci_ps *state = ci_get_ps(amdgpu_state);
4041 struct ci_single_dpm_table *sclk_table = &pi->dpm_table.sclk_table;
4042 u32 sclk = state->performance_levels[state->performance_level_count-1].sclk;
4043 struct ci_single_dpm_table *mclk_table = &pi->dpm_table.mclk_table;
4044 u32 mclk = state->performance_levels[state->performance_level_count-1].mclk;
4045 u32 i;
4046
4047 pi->need_update_smu7_dpm_table = 0;
4048
4049 for (i = 0; i < sclk_table->count; i++) {
4050 if (sclk == sclk_table->dpm_levels[i].value)
4051 break;
4052 }
4053
4054 if (i >= sclk_table->count) {
4055 pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
4056 } else {
4057 /* XXX check display min clock requirements */
4058 if (CISLAND_MINIMUM_ENGINE_CLOCK != CISLAND_MINIMUM_ENGINE_CLOCK)
4059 pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
4060 }
4061
4062 for (i = 0; i < mclk_table->count; i++) {
4063 if (mclk == mclk_table->dpm_levels[i].value)
4064 break;
4065 }
4066
4067 if (i >= mclk_table->count)
4068 pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
4069
4070 if (adev->pm.dpm.current_active_crtc_count !=
4071 adev->pm.dpm.new_active_crtc_count)
4072 pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK;
4073}
4074
4075static int ci_populate_and_upload_sclk_mclk_dpm_levels(struct amdgpu_device *adev,
4076 struct amdgpu_ps *amdgpu_state)
4077{
4078 struct ci_power_info *pi = ci_get_pi(adev);
4079 struct ci_ps *state = ci_get_ps(amdgpu_state);
4080 u32 sclk = state->performance_levels[state->performance_level_count-1].sclk;
4081 u32 mclk = state->performance_levels[state->performance_level_count-1].mclk;
4082 struct ci_dpm_table *dpm_table = &pi->dpm_table;
4083 int ret;
4084
4085 if (!pi->need_update_smu7_dpm_table)
4086 return 0;
4087
4088 if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK)
4089 dpm_table->sclk_table.dpm_levels[dpm_table->sclk_table.count-1].value = sclk;
4090
4091 if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)
4092 dpm_table->mclk_table.dpm_levels[dpm_table->mclk_table.count-1].value = mclk;
4093
4094 if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK)) {
4095 ret = ci_populate_all_graphic_levels(adev);
4096 if (ret)
4097 return ret;
4098 }
4099
4100 if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_MCLK | DPMTABLE_UPDATE_MCLK)) {
4101 ret = ci_populate_all_memory_levels(adev);
4102 if (ret)
4103 return ret;
4104 }
4105
4106 return 0;
4107}
4108
4109static int ci_enable_uvd_dpm(struct amdgpu_device *adev, bool enable)
4110{
4111 struct ci_power_info *pi = ci_get_pi(adev);
4112 const struct amdgpu_clock_and_voltage_limits *max_limits;
4113 int i;
4114
4115 if (adev->pm.dpm.ac_power)
4116 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
4117 else
4118 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
4119
4120 if (enable) {
4121 pi->dpm_level_enable_mask.uvd_dpm_enable_mask = 0;
4122
4123 for (i = adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
4124 if (adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
4125 pi->dpm_level_enable_mask.uvd_dpm_enable_mask |= 1 << i;
4126
4127 if (!pi->caps_uvd_dpm)
4128 break;
4129 }
4130 }
4131
4132 amdgpu_ci_send_msg_to_smc_with_parameter(adev,
4133 PPSMC_MSG_UVDDPM_SetEnabledMask,
4134 pi->dpm_level_enable_mask.uvd_dpm_enable_mask);
4135
4136 if (pi->last_mclk_dpm_enable_mask & 0x1) {
4137 pi->uvd_enabled = true;
4138 pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
4139 amdgpu_ci_send_msg_to_smc_with_parameter(adev,
4140 PPSMC_MSG_MCLKDPM_SetEnabledMask,
4141 pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
4142 }
4143 } else {
Rex Zhu49a5d732016-10-21 16:55:02 +08004144 if (pi->uvd_enabled) {
Alex Deuchera2e73f52015-04-20 17:09:27 -04004145 pi->uvd_enabled = false;
4146 pi->dpm_level_enable_mask.mclk_dpm_enable_mask |= 1;
4147 amdgpu_ci_send_msg_to_smc_with_parameter(adev,
4148 PPSMC_MSG_MCLKDPM_SetEnabledMask,
4149 pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
4150 }
4151 }
4152
4153 return (amdgpu_ci_send_msg_to_smc(adev, enable ?
4154 PPSMC_MSG_UVDDPM_Enable : PPSMC_MSG_UVDDPM_Disable) == PPSMC_Result_OK) ?
4155 0 : -EINVAL;
4156}
4157
4158static int ci_enable_vce_dpm(struct amdgpu_device *adev, bool enable)
4159{
4160 struct ci_power_info *pi = ci_get_pi(adev);
4161 const struct amdgpu_clock_and_voltage_limits *max_limits;
4162 int i;
4163
4164 if (adev->pm.dpm.ac_power)
4165 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
4166 else
4167 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
4168
4169 if (enable) {
4170 pi->dpm_level_enable_mask.vce_dpm_enable_mask = 0;
4171 for (i = adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
4172 if (adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
4173 pi->dpm_level_enable_mask.vce_dpm_enable_mask |= 1 << i;
4174
4175 if (!pi->caps_vce_dpm)
4176 break;
4177 }
4178 }
4179
4180 amdgpu_ci_send_msg_to_smc_with_parameter(adev,
4181 PPSMC_MSG_VCEDPM_SetEnabledMask,
4182 pi->dpm_level_enable_mask.vce_dpm_enable_mask);
4183 }
4184
4185 return (amdgpu_ci_send_msg_to_smc(adev, enable ?
4186 PPSMC_MSG_VCEDPM_Enable : PPSMC_MSG_VCEDPM_Disable) == PPSMC_Result_OK) ?
4187 0 : -EINVAL;
4188}
4189
4190#if 0
4191static int ci_enable_samu_dpm(struct amdgpu_device *adev, bool enable)
4192{
4193 struct ci_power_info *pi = ci_get_pi(adev);
4194 const struct amdgpu_clock_and_voltage_limits *max_limits;
4195 int i;
4196
4197 if (adev->pm.dpm.ac_power)
4198 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
4199 else
4200 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
4201
4202 if (enable) {
4203 pi->dpm_level_enable_mask.samu_dpm_enable_mask = 0;
4204 for (i = adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
4205 if (adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
4206 pi->dpm_level_enable_mask.samu_dpm_enable_mask |= 1 << i;
4207
4208 if (!pi->caps_samu_dpm)
4209 break;
4210 }
4211 }
4212
4213 amdgpu_ci_send_msg_to_smc_with_parameter(adev,
4214 PPSMC_MSG_SAMUDPM_SetEnabledMask,
4215 pi->dpm_level_enable_mask.samu_dpm_enable_mask);
4216 }
4217 return (amdgpu_ci_send_msg_to_smc(adev, enable ?
4218 PPSMC_MSG_SAMUDPM_Enable : PPSMC_MSG_SAMUDPM_Disable) == PPSMC_Result_OK) ?
4219 0 : -EINVAL;
4220}
4221
4222static int ci_enable_acp_dpm(struct amdgpu_device *adev, bool enable)
4223{
4224 struct ci_power_info *pi = ci_get_pi(adev);
4225 const struct amdgpu_clock_and_voltage_limits *max_limits;
4226 int i;
4227
4228 if (adev->pm.dpm.ac_power)
4229 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
4230 else
4231 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
4232
4233 if (enable) {
4234 pi->dpm_level_enable_mask.acp_dpm_enable_mask = 0;
4235 for (i = adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
4236 if (adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
4237 pi->dpm_level_enable_mask.acp_dpm_enable_mask |= 1 << i;
4238
4239 if (!pi->caps_acp_dpm)
4240 break;
4241 }
4242 }
4243
4244 amdgpu_ci_send_msg_to_smc_with_parameter(adev,
4245 PPSMC_MSG_ACPDPM_SetEnabledMask,
4246 pi->dpm_level_enable_mask.acp_dpm_enable_mask);
4247 }
4248
4249 return (amdgpu_ci_send_msg_to_smc(adev, enable ?
4250 PPSMC_MSG_ACPDPM_Enable : PPSMC_MSG_ACPDPM_Disable) == PPSMC_Result_OK) ?
4251 0 : -EINVAL;
4252}
4253#endif
4254
4255static int ci_update_uvd_dpm(struct amdgpu_device *adev, bool gate)
4256{
4257 struct ci_power_info *pi = ci_get_pi(adev);
4258 u32 tmp;
Rex Zhu3495a102016-10-26 18:05:00 +08004259 int ret = 0;
Alex Deuchera2e73f52015-04-20 17:09:27 -04004260
4261 if (!gate) {
Rex Zhu3495a102016-10-26 18:05:00 +08004262 /* turn the clocks on when decoding */
Alex Deuchera2e73f52015-04-20 17:09:27 -04004263 if (pi->caps_uvd_dpm ||
4264 (adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count <= 0))
4265 pi->smc_state_table.UvdBootLevel = 0;
4266 else
4267 pi->smc_state_table.UvdBootLevel =
4268 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1;
4269
4270 tmp = RREG32_SMC(ixDPM_TABLE_475);
4271 tmp &= ~DPM_TABLE_475__UvdBootLevel_MASK;
4272 tmp |= (pi->smc_state_table.UvdBootLevel << DPM_TABLE_475__UvdBootLevel__SHIFT);
4273 WREG32_SMC(ixDPM_TABLE_475, tmp);
Rex Zhu3495a102016-10-26 18:05:00 +08004274 ret = ci_enable_uvd_dpm(adev, true);
4275 } else {
4276 ret = ci_enable_uvd_dpm(adev, false);
4277 if (ret)
4278 return ret;
Alex Deuchera2e73f52015-04-20 17:09:27 -04004279 }
4280
Rex Zhu3495a102016-10-26 18:05:00 +08004281 return ret;
Alex Deuchera2e73f52015-04-20 17:09:27 -04004282}
4283
4284static u8 ci_get_vce_boot_level(struct amdgpu_device *adev)
4285{
4286 u8 i;
4287 u32 min_evclk = 30000; /* ??? */
4288 struct amdgpu_vce_clock_voltage_dependency_table *table =
4289 &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
4290
4291 for (i = 0; i < table->count; i++) {
4292 if (table->entries[i].evclk >= min_evclk)
4293 return i;
4294 }
4295
4296 return table->count - 1;
4297}
4298
4299static int ci_update_vce_dpm(struct amdgpu_device *adev,
4300 struct amdgpu_ps *amdgpu_new_state,
4301 struct amdgpu_ps *amdgpu_current_state)
4302{
4303 struct ci_power_info *pi = ci_get_pi(adev);
4304 int ret = 0;
4305 u32 tmp;
4306
4307 if (amdgpu_current_state->evclk != amdgpu_new_state->evclk) {
4308 if (amdgpu_new_state->evclk) {
Alex Deuchera2e73f52015-04-20 17:09:27 -04004309 pi->smc_state_table.VceBootLevel = ci_get_vce_boot_level(adev);
4310 tmp = RREG32_SMC(ixDPM_TABLE_475);
4311 tmp &= ~DPM_TABLE_475__VceBootLevel_MASK;
4312 tmp |= (pi->smc_state_table.VceBootLevel << DPM_TABLE_475__VceBootLevel__SHIFT);
4313 WREG32_SMC(ixDPM_TABLE_475, tmp);
4314
4315 ret = ci_enable_vce_dpm(adev, true);
4316 } else {
Rex Zhu415282b2016-10-26 17:05:30 +08004317 ret = ci_enable_vce_dpm(adev, false);
4318 if (ret)
4319 return ret;
Alex Deuchera2e73f52015-04-20 17:09:27 -04004320 }
4321 }
4322 return ret;
4323}
4324
4325#if 0
4326static int ci_update_samu_dpm(struct amdgpu_device *adev, bool gate)
4327{
4328 return ci_enable_samu_dpm(adev, gate);
4329}
4330
4331static int ci_update_acp_dpm(struct amdgpu_device *adev, bool gate)
4332{
4333 struct ci_power_info *pi = ci_get_pi(adev);
4334 u32 tmp;
4335
4336 if (!gate) {
4337 pi->smc_state_table.AcpBootLevel = 0;
4338
4339 tmp = RREG32_SMC(ixDPM_TABLE_475);
4340 tmp &= ~AcpBootLevel_MASK;
4341 tmp |= AcpBootLevel(pi->smc_state_table.AcpBootLevel);
4342 WREG32_SMC(ixDPM_TABLE_475, tmp);
4343 }
4344
4345 return ci_enable_acp_dpm(adev, !gate);
4346}
4347#endif
4348
4349static int ci_generate_dpm_level_enable_mask(struct amdgpu_device *adev,
4350 struct amdgpu_ps *amdgpu_state)
4351{
4352 struct ci_power_info *pi = ci_get_pi(adev);
4353 int ret;
4354
4355 ret = ci_trim_dpm_states(adev, amdgpu_state);
4356 if (ret)
4357 return ret;
4358
4359 pi->dpm_level_enable_mask.sclk_dpm_enable_mask =
4360 ci_get_dpm_level_enable_mask_value(&pi->dpm_table.sclk_table);
4361 pi->dpm_level_enable_mask.mclk_dpm_enable_mask =
4362 ci_get_dpm_level_enable_mask_value(&pi->dpm_table.mclk_table);
4363 pi->last_mclk_dpm_enable_mask =
4364 pi->dpm_level_enable_mask.mclk_dpm_enable_mask;
4365 if (pi->uvd_enabled) {
4366 if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask & 1)
4367 pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
4368 }
4369 pi->dpm_level_enable_mask.pcie_dpm_enable_mask =
4370 ci_get_dpm_level_enable_mask_value(&pi->dpm_table.pcie_speed_table);
4371
4372 return 0;
4373}
4374
4375static u32 ci_get_lowest_enabled_level(struct amdgpu_device *adev,
4376 u32 level_mask)
4377{
4378 u32 level = 0;
4379
4380 while ((level_mask & (1 << level)) == 0)
4381 level++;
4382
4383 return level;
4384}
4385
4386
Rex Zhucfa289f2017-09-06 15:27:59 +08004387static int ci_dpm_force_performance_level(void *handle,
Rex Zhue5d03ac2016-12-23 14:39:41 +08004388 enum amd_dpm_forced_level level)
Alex Deuchera2e73f52015-04-20 17:09:27 -04004389{
Rex Zhucfa289f2017-09-06 15:27:59 +08004390 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
Alex Deuchera2e73f52015-04-20 17:09:27 -04004391 struct ci_power_info *pi = ci_get_pi(adev);
4392 u32 tmp, levels, i;
4393 int ret;
4394
Rex Zhue5d03ac2016-12-23 14:39:41 +08004395 if (level == AMD_DPM_FORCED_LEVEL_HIGH) {
Alex Deuchera2e73f52015-04-20 17:09:27 -04004396 if ((!pi->pcie_dpm_key_disabled) &&
4397 pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
4398 levels = 0;
4399 tmp = pi->dpm_level_enable_mask.pcie_dpm_enable_mask;
4400 while (tmp >>= 1)
4401 levels++;
4402 if (levels) {
4403 ret = ci_dpm_force_state_pcie(adev, level);
4404 if (ret)
4405 return ret;
4406 for (i = 0; i < adev->usec_timeout; i++) {
4407 tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX_1) &
4408 TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX_MASK) >>
4409 TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX__SHIFT;
4410 if (tmp == levels)
4411 break;
4412 udelay(1);
4413 }
4414 }
4415 }
4416 if ((!pi->sclk_dpm_key_disabled) &&
4417 pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
4418 levels = 0;
4419 tmp = pi->dpm_level_enable_mask.sclk_dpm_enable_mask;
4420 while (tmp >>= 1)
4421 levels++;
4422 if (levels) {
4423 ret = ci_dpm_force_state_sclk(adev, levels);
4424 if (ret)
4425 return ret;
4426 for (i = 0; i < adev->usec_timeout; i++) {
4427 tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
4428 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK) >>
4429 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT;
4430 if (tmp == levels)
4431 break;
4432 udelay(1);
4433 }
4434 }
4435 }
4436 if ((!pi->mclk_dpm_key_disabled) &&
4437 pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
4438 levels = 0;
4439 tmp = pi->dpm_level_enable_mask.mclk_dpm_enable_mask;
4440 while (tmp >>= 1)
4441 levels++;
4442 if (levels) {
4443 ret = ci_dpm_force_state_mclk(adev, levels);
4444 if (ret)
4445 return ret;
4446 for (i = 0; i < adev->usec_timeout; i++) {
4447 tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
4448 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX_MASK) >>
4449 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX__SHIFT;
4450 if (tmp == levels)
4451 break;
4452 udelay(1);
4453 }
4454 }
4455 }
Rex Zhue5d03ac2016-12-23 14:39:41 +08004456 } else if (level == AMD_DPM_FORCED_LEVEL_LOW) {
Alex Deuchera2e73f52015-04-20 17:09:27 -04004457 if ((!pi->sclk_dpm_key_disabled) &&
4458 pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
4459 levels = ci_get_lowest_enabled_level(adev,
4460 pi->dpm_level_enable_mask.sclk_dpm_enable_mask);
4461 ret = ci_dpm_force_state_sclk(adev, levels);
4462 if (ret)
4463 return ret;
4464 for (i = 0; i < adev->usec_timeout; i++) {
4465 tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
4466 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK) >>
4467 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT;
4468 if (tmp == levels)
4469 break;
4470 udelay(1);
4471 }
4472 }
4473 if ((!pi->mclk_dpm_key_disabled) &&
4474 pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
4475 levels = ci_get_lowest_enabled_level(adev,
4476 pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
4477 ret = ci_dpm_force_state_mclk(adev, levels);
4478 if (ret)
4479 return ret;
4480 for (i = 0; i < adev->usec_timeout; i++) {
4481 tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
4482 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX_MASK) >>
4483 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX__SHIFT;
4484 if (tmp == levels)
4485 break;
4486 udelay(1);
4487 }
4488 }
4489 if ((!pi->pcie_dpm_key_disabled) &&
4490 pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
4491 levels = ci_get_lowest_enabled_level(adev,
4492 pi->dpm_level_enable_mask.pcie_dpm_enable_mask);
4493 ret = ci_dpm_force_state_pcie(adev, levels);
4494 if (ret)
4495 return ret;
4496 for (i = 0; i < adev->usec_timeout; i++) {
4497 tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX_1) &
4498 TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX_MASK) >>
4499 TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX__SHIFT;
4500 if (tmp == levels)
4501 break;
4502 udelay(1);
4503 }
4504 }
Rex Zhue5d03ac2016-12-23 14:39:41 +08004505 } else if (level == AMD_DPM_FORCED_LEVEL_AUTO) {
Alex Deuchera2e73f52015-04-20 17:09:27 -04004506 if (!pi->pcie_dpm_key_disabled) {
4507 PPSMC_Result smc_result;
4508
4509 smc_result = amdgpu_ci_send_msg_to_smc(adev,
4510 PPSMC_MSG_PCIeDPM_UnForceLevel);
4511 if (smc_result != PPSMC_Result_OK)
4512 return -EINVAL;
4513 }
4514 ret = ci_upload_dpm_level_enable_mask(adev);
4515 if (ret)
4516 return ret;
4517 }
4518
4519 adev->pm.dpm.forced_level = level;
4520
4521 return 0;
4522}
4523
4524static int ci_set_mc_special_registers(struct amdgpu_device *adev,
4525 struct ci_mc_reg_table *table)
4526{
4527 u8 i, j, k;
4528 u32 temp_reg;
4529
4530 for (i = 0, j = table->last; i < table->last; i++) {
4531 if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4532 return -EINVAL;
4533 switch(table->mc_reg_address[i].s1) {
4534 case mmMC_SEQ_MISC1:
4535 temp_reg = RREG32(mmMC_PMG_CMD_EMRS);
4536 table->mc_reg_address[j].s1 = mmMC_PMG_CMD_EMRS;
4537 table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_EMRS_LP;
4538 for (k = 0; k < table->num_entries; k++) {
4539 table->mc_reg_table_entry[k].mc_data[j] =
4540 ((temp_reg & 0xffff0000)) | ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
4541 }
4542 j++;
4543 if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4544 return -EINVAL;
4545
4546 temp_reg = RREG32(mmMC_PMG_CMD_MRS);
4547 table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS;
4548 table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS_LP;
4549 for (k = 0; k < table->num_entries; k++) {
4550 table->mc_reg_table_entry[k].mc_data[j] =
4551 (temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
Ken Wang81c59f52015-06-03 21:02:01 +08004552 if (adev->mc.vram_type != AMDGPU_VRAM_TYPE_GDDR5)
Alex Deuchera2e73f52015-04-20 17:09:27 -04004553 table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
4554 }
4555 j++;
4556 if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4557 return -EINVAL;
4558
Ken Wang81c59f52015-06-03 21:02:01 +08004559 if (adev->mc.vram_type != AMDGPU_VRAM_TYPE_GDDR5) {
Alex Deuchera2e73f52015-04-20 17:09:27 -04004560 table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD;
4561 table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD;
4562 for (k = 0; k < table->num_entries; k++) {
4563 table->mc_reg_table_entry[k].mc_data[j] =
4564 (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
4565 }
4566 j++;
4567 if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4568 return -EINVAL;
4569 }
4570 break;
4571 case mmMC_SEQ_RESERVE_M:
4572 temp_reg = RREG32(mmMC_PMG_CMD_MRS1);
4573 table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS1;
4574 table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS1_LP;
4575 for (k = 0; k < table->num_entries; k++) {
4576 table->mc_reg_table_entry[k].mc_data[j] =
4577 (temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
4578 }
4579 j++;
4580 if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4581 return -EINVAL;
4582 break;
4583 default:
4584 break;
4585 }
4586
4587 }
4588
4589 table->last = j;
4590
4591 return 0;
4592}
4593
4594static bool ci_check_s0_mc_reg_index(u16 in_reg, u16 *out_reg)
4595{
4596 bool result = true;
4597
4598 switch(in_reg) {
4599 case mmMC_SEQ_RAS_TIMING:
4600 *out_reg = mmMC_SEQ_RAS_TIMING_LP;
4601 break;
4602 case mmMC_SEQ_DLL_STBY:
4603 *out_reg = mmMC_SEQ_DLL_STBY_LP;
4604 break;
4605 case mmMC_SEQ_G5PDX_CMD0:
4606 *out_reg = mmMC_SEQ_G5PDX_CMD0_LP;
4607 break;
4608 case mmMC_SEQ_G5PDX_CMD1:
4609 *out_reg = mmMC_SEQ_G5PDX_CMD1_LP;
4610 break;
4611 case mmMC_SEQ_G5PDX_CTRL:
4612 *out_reg = mmMC_SEQ_G5PDX_CTRL_LP;
4613 break;
4614 case mmMC_SEQ_CAS_TIMING:
4615 *out_reg = mmMC_SEQ_CAS_TIMING_LP;
4616 break;
4617 case mmMC_SEQ_MISC_TIMING:
4618 *out_reg = mmMC_SEQ_MISC_TIMING_LP;
4619 break;
4620 case mmMC_SEQ_MISC_TIMING2:
4621 *out_reg = mmMC_SEQ_MISC_TIMING2_LP;
4622 break;
4623 case mmMC_SEQ_PMG_DVS_CMD:
4624 *out_reg = mmMC_SEQ_PMG_DVS_CMD_LP;
4625 break;
4626 case mmMC_SEQ_PMG_DVS_CTL:
4627 *out_reg = mmMC_SEQ_PMG_DVS_CTL_LP;
4628 break;
4629 case mmMC_SEQ_RD_CTL_D0:
4630 *out_reg = mmMC_SEQ_RD_CTL_D0_LP;
4631 break;
4632 case mmMC_SEQ_RD_CTL_D1:
4633 *out_reg = mmMC_SEQ_RD_CTL_D1_LP;
4634 break;
4635 case mmMC_SEQ_WR_CTL_D0:
4636 *out_reg = mmMC_SEQ_WR_CTL_D0_LP;
4637 break;
4638 case mmMC_SEQ_WR_CTL_D1:
4639 *out_reg = mmMC_SEQ_WR_CTL_D1_LP;
4640 break;
4641 case mmMC_PMG_CMD_EMRS:
4642 *out_reg = mmMC_SEQ_PMG_CMD_EMRS_LP;
4643 break;
4644 case mmMC_PMG_CMD_MRS:
4645 *out_reg = mmMC_SEQ_PMG_CMD_MRS_LP;
4646 break;
4647 case mmMC_PMG_CMD_MRS1:
4648 *out_reg = mmMC_SEQ_PMG_CMD_MRS1_LP;
4649 break;
4650 case mmMC_SEQ_PMG_TIMING:
4651 *out_reg = mmMC_SEQ_PMG_TIMING_LP;
4652 break;
4653 case mmMC_PMG_CMD_MRS2:
4654 *out_reg = mmMC_SEQ_PMG_CMD_MRS2_LP;
4655 break;
4656 case mmMC_SEQ_WR_CTL_2:
4657 *out_reg = mmMC_SEQ_WR_CTL_2_LP;
4658 break;
4659 default:
4660 result = false;
4661 break;
4662 }
4663
4664 return result;
4665}
4666
4667static void ci_set_valid_flag(struct ci_mc_reg_table *table)
4668{
4669 u8 i, j;
4670
4671 for (i = 0; i < table->last; i++) {
4672 for (j = 1; j < table->num_entries; j++) {
4673 if (table->mc_reg_table_entry[j-1].mc_data[i] !=
4674 table->mc_reg_table_entry[j].mc_data[i]) {
4675 table->valid_flag |= 1 << i;
4676 break;
4677 }
4678 }
4679 }
4680}
4681
4682static void ci_set_s0_mc_reg_index(struct ci_mc_reg_table *table)
4683{
4684 u32 i;
4685 u16 address;
4686
4687 for (i = 0; i < table->last; i++) {
4688 table->mc_reg_address[i].s0 =
4689 ci_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) ?
4690 address : table->mc_reg_address[i].s1;
4691 }
4692}
4693
4694static int ci_copy_vbios_mc_reg_table(const struct atom_mc_reg_table *table,
4695 struct ci_mc_reg_table *ci_table)
4696{
4697 u8 i, j;
4698
4699 if (table->last > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4700 return -EINVAL;
4701 if (table->num_entries > MAX_AC_TIMING_ENTRIES)
4702 return -EINVAL;
4703
4704 for (i = 0; i < table->last; i++)
4705 ci_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
4706
4707 ci_table->last = table->last;
4708
4709 for (i = 0; i < table->num_entries; i++) {
4710 ci_table->mc_reg_table_entry[i].mclk_max =
4711 table->mc_reg_table_entry[i].mclk_max;
4712 for (j = 0; j < table->last; j++)
4713 ci_table->mc_reg_table_entry[i].mc_data[j] =
4714 table->mc_reg_table_entry[i].mc_data[j];
4715 }
4716 ci_table->num_entries = table->num_entries;
4717
4718 return 0;
4719}
4720
4721static int ci_register_patching_mc_seq(struct amdgpu_device *adev,
4722 struct ci_mc_reg_table *table)
4723{
4724 u8 i, k;
4725 u32 tmp;
4726 bool patch;
4727
4728 tmp = RREG32(mmMC_SEQ_MISC0);
4729 patch = ((tmp & 0x0000f00) == 0x300) ? true : false;
4730
4731 if (patch &&
4732 ((adev->pdev->device == 0x67B0) ||
4733 (adev->pdev->device == 0x67B1))) {
4734 for (i = 0; i < table->last; i++) {
4735 if (table->last >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4736 return -EINVAL;
4737 switch (table->mc_reg_address[i].s1) {
4738 case mmMC_SEQ_MISC1:
4739 for (k = 0; k < table->num_entries; k++) {
4740 if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
4741 (table->mc_reg_table_entry[k].mclk_max == 137500))
4742 table->mc_reg_table_entry[k].mc_data[i] =
4743 (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFF8) |
4744 0x00000007;
4745 }
4746 break;
4747 case mmMC_SEQ_WR_CTL_D0:
4748 for (k = 0; k < table->num_entries; k++) {
4749 if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
4750 (table->mc_reg_table_entry[k].mclk_max == 137500))
4751 table->mc_reg_table_entry[k].mc_data[i] =
4752 (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFF0F00) |
4753 0x0000D0DD;
4754 }
4755 break;
4756 case mmMC_SEQ_WR_CTL_D1:
4757 for (k = 0; k < table->num_entries; k++) {
4758 if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
4759 (table->mc_reg_table_entry[k].mclk_max == 137500))
4760 table->mc_reg_table_entry[k].mc_data[i] =
4761 (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFF0F00) |
4762 0x0000D0DD;
4763 }
4764 break;
4765 case mmMC_SEQ_WR_CTL_2:
4766 for (k = 0; k < table->num_entries; k++) {
4767 if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
4768 (table->mc_reg_table_entry[k].mclk_max == 137500))
4769 table->mc_reg_table_entry[k].mc_data[i] = 0;
4770 }
4771 break;
4772 case mmMC_SEQ_CAS_TIMING:
4773 for (k = 0; k < table->num_entries; k++) {
4774 if (table->mc_reg_table_entry[k].mclk_max == 125000)
4775 table->mc_reg_table_entry[k].mc_data[i] =
4776 (table->mc_reg_table_entry[k].mc_data[i] & 0xFFE0FE0F) |
4777 0x000C0140;
4778 else if (table->mc_reg_table_entry[k].mclk_max == 137500)
4779 table->mc_reg_table_entry[k].mc_data[i] =
4780 (table->mc_reg_table_entry[k].mc_data[i] & 0xFFE0FE0F) |
4781 0x000C0150;
4782 }
4783 break;
4784 case mmMC_SEQ_MISC_TIMING:
4785 for (k = 0; k < table->num_entries; k++) {
4786 if (table->mc_reg_table_entry[k].mclk_max == 125000)
4787 table->mc_reg_table_entry[k].mc_data[i] =
4788 (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFE0) |
4789 0x00000030;
4790 else if (table->mc_reg_table_entry[k].mclk_max == 137500)
4791 table->mc_reg_table_entry[k].mc_data[i] =
4792 (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFE0) |
4793 0x00000035;
4794 }
4795 break;
4796 default:
4797 break;
4798 }
4799 }
4800
4801 WREG32(mmMC_SEQ_IO_DEBUG_INDEX, 3);
4802 tmp = RREG32(mmMC_SEQ_IO_DEBUG_DATA);
4803 tmp = (tmp & 0xFFF8FFFF) | (1 << 16);
4804 WREG32(mmMC_SEQ_IO_DEBUG_INDEX, 3);
4805 WREG32(mmMC_SEQ_IO_DEBUG_DATA, tmp);
4806 }
4807
4808 return 0;
4809}
4810
4811static int ci_initialize_mc_reg_table(struct amdgpu_device *adev)
4812{
4813 struct ci_power_info *pi = ci_get_pi(adev);
4814 struct atom_mc_reg_table *table;
4815 struct ci_mc_reg_table *ci_table = &pi->mc_reg_table;
4816 u8 module_index = ci_get_memory_module_index(adev);
4817 int ret;
4818
4819 table = kzalloc(sizeof(struct atom_mc_reg_table), GFP_KERNEL);
4820 if (!table)
4821 return -ENOMEM;
4822
4823 WREG32(mmMC_SEQ_RAS_TIMING_LP, RREG32(mmMC_SEQ_RAS_TIMING));
4824 WREG32(mmMC_SEQ_CAS_TIMING_LP, RREG32(mmMC_SEQ_CAS_TIMING));
4825 WREG32(mmMC_SEQ_DLL_STBY_LP, RREG32(mmMC_SEQ_DLL_STBY));
4826 WREG32(mmMC_SEQ_G5PDX_CMD0_LP, RREG32(mmMC_SEQ_G5PDX_CMD0));
4827 WREG32(mmMC_SEQ_G5PDX_CMD1_LP, RREG32(mmMC_SEQ_G5PDX_CMD1));
4828 WREG32(mmMC_SEQ_G5PDX_CTRL_LP, RREG32(mmMC_SEQ_G5PDX_CTRL));
4829 WREG32(mmMC_SEQ_PMG_DVS_CMD_LP, RREG32(mmMC_SEQ_PMG_DVS_CMD));
4830 WREG32(mmMC_SEQ_PMG_DVS_CTL_LP, RREG32(mmMC_SEQ_PMG_DVS_CTL));
4831 WREG32(mmMC_SEQ_MISC_TIMING_LP, RREG32(mmMC_SEQ_MISC_TIMING));
4832 WREG32(mmMC_SEQ_MISC_TIMING2_LP, RREG32(mmMC_SEQ_MISC_TIMING2));
4833 WREG32(mmMC_SEQ_PMG_CMD_EMRS_LP, RREG32(mmMC_PMG_CMD_EMRS));
4834 WREG32(mmMC_SEQ_PMG_CMD_MRS_LP, RREG32(mmMC_PMG_CMD_MRS));
4835 WREG32(mmMC_SEQ_PMG_CMD_MRS1_LP, RREG32(mmMC_PMG_CMD_MRS1));
4836 WREG32(mmMC_SEQ_WR_CTL_D0_LP, RREG32(mmMC_SEQ_WR_CTL_D0));
4837 WREG32(mmMC_SEQ_WR_CTL_D1_LP, RREG32(mmMC_SEQ_WR_CTL_D1));
4838 WREG32(mmMC_SEQ_RD_CTL_D0_LP, RREG32(mmMC_SEQ_RD_CTL_D0));
4839 WREG32(mmMC_SEQ_RD_CTL_D1_LP, RREG32(mmMC_SEQ_RD_CTL_D1));
4840 WREG32(mmMC_SEQ_PMG_TIMING_LP, RREG32(mmMC_SEQ_PMG_TIMING));
4841 WREG32(mmMC_SEQ_PMG_CMD_MRS2_LP, RREG32(mmMC_PMG_CMD_MRS2));
4842 WREG32(mmMC_SEQ_WR_CTL_2_LP, RREG32(mmMC_SEQ_WR_CTL_2));
4843
4844 ret = amdgpu_atombios_init_mc_reg_table(adev, module_index, table);
4845 if (ret)
4846 goto init_mc_done;
4847
4848 ret = ci_copy_vbios_mc_reg_table(table, ci_table);
4849 if (ret)
4850 goto init_mc_done;
4851
4852 ci_set_s0_mc_reg_index(ci_table);
4853
4854 ret = ci_register_patching_mc_seq(adev, ci_table);
4855 if (ret)
4856 goto init_mc_done;
4857
4858 ret = ci_set_mc_special_registers(adev, ci_table);
4859 if (ret)
4860 goto init_mc_done;
4861
4862 ci_set_valid_flag(ci_table);
4863
4864init_mc_done:
4865 kfree(table);
4866
4867 return ret;
4868}
4869
4870static int ci_populate_mc_reg_addresses(struct amdgpu_device *adev,
4871 SMU7_Discrete_MCRegisters *mc_reg_table)
4872{
4873 struct ci_power_info *pi = ci_get_pi(adev);
4874 u32 i, j;
4875
4876 for (i = 0, j = 0; j < pi->mc_reg_table.last; j++) {
4877 if (pi->mc_reg_table.valid_flag & (1 << j)) {
4878 if (i >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4879 return -EINVAL;
4880 mc_reg_table->address[i].s0 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s0);
4881 mc_reg_table->address[i].s1 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s1);
4882 i++;
4883 }
4884 }
4885
4886 mc_reg_table->last = (u8)i;
4887
4888 return 0;
4889}
4890
4891static void ci_convert_mc_registers(const struct ci_mc_reg_entry *entry,
4892 SMU7_Discrete_MCRegisterSet *data,
4893 u32 num_entries, u32 valid_flag)
4894{
4895 u32 i, j;
4896
4897 for (i = 0, j = 0; j < num_entries; j++) {
4898 if (valid_flag & (1 << j)) {
4899 data->value[i] = cpu_to_be32(entry->mc_data[j]);
4900 i++;
4901 }
4902 }
4903}
4904
4905static void ci_convert_mc_reg_table_entry_to_smc(struct amdgpu_device *adev,
4906 const u32 memory_clock,
4907 SMU7_Discrete_MCRegisterSet *mc_reg_table_data)
4908{
4909 struct ci_power_info *pi = ci_get_pi(adev);
4910 u32 i = 0;
4911
4912 for(i = 0; i < pi->mc_reg_table.num_entries; i++) {
4913 if (memory_clock <= pi->mc_reg_table.mc_reg_table_entry[i].mclk_max)
4914 break;
4915 }
4916
4917 if ((i == pi->mc_reg_table.num_entries) && (i > 0))
4918 --i;
4919
4920 ci_convert_mc_registers(&pi->mc_reg_table.mc_reg_table_entry[i],
4921 mc_reg_table_data, pi->mc_reg_table.last,
4922 pi->mc_reg_table.valid_flag);
4923}
4924
4925static void ci_convert_mc_reg_table_to_smc(struct amdgpu_device *adev,
4926 SMU7_Discrete_MCRegisters *mc_reg_table)
4927{
4928 struct ci_power_info *pi = ci_get_pi(adev);
4929 u32 i;
4930
4931 for (i = 0; i < pi->dpm_table.mclk_table.count; i++)
4932 ci_convert_mc_reg_table_entry_to_smc(adev,
4933 pi->dpm_table.mclk_table.dpm_levels[i].value,
4934 &mc_reg_table->data[i]);
4935}
4936
4937static int ci_populate_initial_mc_reg_table(struct amdgpu_device *adev)
4938{
4939 struct ci_power_info *pi = ci_get_pi(adev);
4940 int ret;
4941
4942 memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters));
4943
4944 ret = ci_populate_mc_reg_addresses(adev, &pi->smc_mc_reg_table);
4945 if (ret)
4946 return ret;
4947 ci_convert_mc_reg_table_to_smc(adev, &pi->smc_mc_reg_table);
4948
4949 return amdgpu_ci_copy_bytes_to_smc(adev,
4950 pi->mc_reg_table_start,
4951 (u8 *)&pi->smc_mc_reg_table,
4952 sizeof(SMU7_Discrete_MCRegisters),
4953 pi->sram_end);
4954}
4955
4956static int ci_update_and_upload_mc_reg_table(struct amdgpu_device *adev)
4957{
4958 struct ci_power_info *pi = ci_get_pi(adev);
4959
4960 if (!(pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK))
4961 return 0;
4962
4963 memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters));
4964
4965 ci_convert_mc_reg_table_to_smc(adev, &pi->smc_mc_reg_table);
4966
4967 return amdgpu_ci_copy_bytes_to_smc(adev,
4968 pi->mc_reg_table_start +
4969 offsetof(SMU7_Discrete_MCRegisters, data[0]),
4970 (u8 *)&pi->smc_mc_reg_table.data[0],
4971 sizeof(SMU7_Discrete_MCRegisterSet) *
4972 pi->dpm_table.mclk_table.count,
4973 pi->sram_end);
4974}
4975
4976static void ci_enable_voltage_control(struct amdgpu_device *adev)
4977{
4978 u32 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
4979
4980 tmp |= GENERAL_PWRMGT__VOLT_PWRMGT_EN_MASK;
4981 WREG32_SMC(ixGENERAL_PWRMGT, tmp);
4982}
4983
4984static enum amdgpu_pcie_gen ci_get_maximum_link_speed(struct amdgpu_device *adev,
4985 struct amdgpu_ps *amdgpu_state)
4986{
4987 struct ci_ps *state = ci_get_ps(amdgpu_state);
4988 int i;
4989 u16 pcie_speed, max_speed = 0;
4990
4991 for (i = 0; i < state->performance_level_count; i++) {
4992 pcie_speed = state->performance_levels[i].pcie_gen;
4993 if (max_speed < pcie_speed)
4994 max_speed = pcie_speed;
4995 }
4996
4997 return max_speed;
4998}
4999
5000static u16 ci_get_current_pcie_speed(struct amdgpu_device *adev)
5001{
5002 u32 speed_cntl = 0;
5003
5004 speed_cntl = RREG32_PCIE(ixPCIE_LC_SPEED_CNTL) &
5005 PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK;
5006 speed_cntl >>= PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
5007
5008 return (u16)speed_cntl;
5009}
5010
5011static int ci_get_current_pcie_lane_number(struct amdgpu_device *adev)
5012{
5013 u32 link_width = 0;
5014
5015 link_width = RREG32_PCIE(ixPCIE_LC_LINK_WIDTH_CNTL) &
5016 PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK;
5017 link_width >>= PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT;
5018
5019 switch (link_width) {
5020 case 1:
5021 return 1;
5022 case 2:
5023 return 2;
5024 case 3:
5025 return 4;
5026 case 4:
5027 return 8;
5028 case 0:
5029 case 6:
5030 default:
5031 return 16;
5032 }
5033}
5034
5035static void ci_request_link_speed_change_before_state_change(struct amdgpu_device *adev,
5036 struct amdgpu_ps *amdgpu_new_state,
5037 struct amdgpu_ps *amdgpu_current_state)
5038{
5039 struct ci_power_info *pi = ci_get_pi(adev);
5040 enum amdgpu_pcie_gen target_link_speed =
5041 ci_get_maximum_link_speed(adev, amdgpu_new_state);
5042 enum amdgpu_pcie_gen current_link_speed;
5043
5044 if (pi->force_pcie_gen == AMDGPU_PCIE_GEN_INVALID)
5045 current_link_speed = ci_get_maximum_link_speed(adev, amdgpu_current_state);
5046 else
5047 current_link_speed = pi->force_pcie_gen;
5048
5049 pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID;
5050 pi->pspp_notify_required = false;
5051 if (target_link_speed > current_link_speed) {
5052 switch (target_link_speed) {
5053#ifdef CONFIG_ACPI
5054 case AMDGPU_PCIE_GEN3:
5055 if (amdgpu_acpi_pcie_performance_request(adev, PCIE_PERF_REQ_PECI_GEN3, false) == 0)
5056 break;
5057 pi->force_pcie_gen = AMDGPU_PCIE_GEN2;
5058 if (current_link_speed == AMDGPU_PCIE_GEN2)
5059 break;
5060 case AMDGPU_PCIE_GEN2:
5061 if (amdgpu_acpi_pcie_performance_request(adev, PCIE_PERF_REQ_PECI_GEN2, false) == 0)
5062 break;
5063#endif
5064 default:
5065 pi->force_pcie_gen = ci_get_current_pcie_speed(adev);
5066 break;
5067 }
5068 } else {
5069 if (target_link_speed < current_link_speed)
5070 pi->pspp_notify_required = true;
5071 }
5072}
5073
5074static void ci_notify_link_speed_change_after_state_change(struct amdgpu_device *adev,
5075 struct amdgpu_ps *amdgpu_new_state,
5076 struct amdgpu_ps *amdgpu_current_state)
5077{
5078 struct ci_power_info *pi = ci_get_pi(adev);
5079 enum amdgpu_pcie_gen target_link_speed =
5080 ci_get_maximum_link_speed(adev, amdgpu_new_state);
5081 u8 request;
5082
5083 if (pi->pspp_notify_required) {
5084 if (target_link_speed == AMDGPU_PCIE_GEN3)
5085 request = PCIE_PERF_REQ_PECI_GEN3;
5086 else if (target_link_speed == AMDGPU_PCIE_GEN2)
5087 request = PCIE_PERF_REQ_PECI_GEN2;
5088 else
5089 request = PCIE_PERF_REQ_PECI_GEN1;
5090
5091 if ((request == PCIE_PERF_REQ_PECI_GEN1) &&
5092 (ci_get_current_pcie_speed(adev) > 0))
5093 return;
5094
5095#ifdef CONFIG_ACPI
5096 amdgpu_acpi_pcie_performance_request(adev, request, false);
5097#endif
5098 }
5099}
5100
5101static int ci_set_private_data_variables_based_on_pptable(struct amdgpu_device *adev)
5102{
5103 struct ci_power_info *pi = ci_get_pi(adev);
5104 struct amdgpu_clock_voltage_dependency_table *allowed_sclk_vddc_table =
5105 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
5106 struct amdgpu_clock_voltage_dependency_table *allowed_mclk_vddc_table =
5107 &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk;
5108 struct amdgpu_clock_voltage_dependency_table *allowed_mclk_vddci_table =
5109 &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk;
5110
5111 if (allowed_sclk_vddc_table == NULL)
5112 return -EINVAL;
5113 if (allowed_sclk_vddc_table->count < 1)
5114 return -EINVAL;
5115 if (allowed_mclk_vddc_table == NULL)
5116 return -EINVAL;
5117 if (allowed_mclk_vddc_table->count < 1)
5118 return -EINVAL;
5119 if (allowed_mclk_vddci_table == NULL)
5120 return -EINVAL;
5121 if (allowed_mclk_vddci_table->count < 1)
5122 return -EINVAL;
5123
5124 pi->min_vddc_in_pp_table = allowed_sclk_vddc_table->entries[0].v;
5125 pi->max_vddc_in_pp_table =
5126 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
5127
5128 pi->min_vddci_in_pp_table = allowed_mclk_vddci_table->entries[0].v;
5129 pi->max_vddci_in_pp_table =
5130 allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
5131
5132 adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk =
5133 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
5134 adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk =
5135 allowed_mclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
5136 adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddc =
5137 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
5138 adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci =
5139 allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
5140
5141 return 0;
5142}
5143
5144static void ci_patch_with_vddc_leakage(struct amdgpu_device *adev, u16 *vddc)
5145{
5146 struct ci_power_info *pi = ci_get_pi(adev);
5147 struct ci_leakage_voltage *leakage_table = &pi->vddc_leakage;
5148 u32 leakage_index;
5149
5150 for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) {
5151 if (leakage_table->leakage_id[leakage_index] == *vddc) {
5152 *vddc = leakage_table->actual_voltage[leakage_index];
5153 break;
5154 }
5155 }
5156}
5157
5158static void ci_patch_with_vddci_leakage(struct amdgpu_device *adev, u16 *vddci)
5159{
5160 struct ci_power_info *pi = ci_get_pi(adev);
5161 struct ci_leakage_voltage *leakage_table = &pi->vddci_leakage;
5162 u32 leakage_index;
5163
5164 for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) {
5165 if (leakage_table->leakage_id[leakage_index] == *vddci) {
5166 *vddci = leakage_table->actual_voltage[leakage_index];
5167 break;
5168 }
5169 }
5170}
5171
5172static void ci_patch_clock_voltage_dependency_table_with_vddc_leakage(struct amdgpu_device *adev,
5173 struct amdgpu_clock_voltage_dependency_table *table)
5174{
5175 u32 i;
5176
5177 if (table) {
5178 for (i = 0; i < table->count; i++)
5179 ci_patch_with_vddc_leakage(adev, &table->entries[i].v);
5180 }
5181}
5182
5183static void ci_patch_clock_voltage_dependency_table_with_vddci_leakage(struct amdgpu_device *adev,
5184 struct amdgpu_clock_voltage_dependency_table *table)
5185{
5186 u32 i;
5187
5188 if (table) {
5189 for (i = 0; i < table->count; i++)
5190 ci_patch_with_vddci_leakage(adev, &table->entries[i].v);
5191 }
5192}
5193
5194static void ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(struct amdgpu_device *adev,
5195 struct amdgpu_vce_clock_voltage_dependency_table *table)
5196{
5197 u32 i;
5198
5199 if (table) {
5200 for (i = 0; i < table->count; i++)
5201 ci_patch_with_vddc_leakage(adev, &table->entries[i].v);
5202 }
5203}
5204
5205static void ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(struct amdgpu_device *adev,
5206 struct amdgpu_uvd_clock_voltage_dependency_table *table)
5207{
5208 u32 i;
5209
5210 if (table) {
5211 for (i = 0; i < table->count; i++)
5212 ci_patch_with_vddc_leakage(adev, &table->entries[i].v);
5213 }
5214}
5215
5216static void ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(struct amdgpu_device *adev,
5217 struct amdgpu_phase_shedding_limits_table *table)
5218{
5219 u32 i;
5220
5221 if (table) {
5222 for (i = 0; i < table->count; i++)
5223 ci_patch_with_vddc_leakage(adev, &table->entries[i].voltage);
5224 }
5225}
5226
5227static void ci_patch_clock_voltage_limits_with_vddc_leakage(struct amdgpu_device *adev,
5228 struct amdgpu_clock_and_voltage_limits *table)
5229{
5230 if (table) {
5231 ci_patch_with_vddc_leakage(adev, (u16 *)&table->vddc);
5232 ci_patch_with_vddci_leakage(adev, (u16 *)&table->vddci);
5233 }
5234}
5235
5236static void ci_patch_cac_leakage_table_with_vddc_leakage(struct amdgpu_device *adev,
5237 struct amdgpu_cac_leakage_table *table)
5238{
5239 u32 i;
5240
5241 if (table) {
5242 for (i = 0; i < table->count; i++)
5243 ci_patch_with_vddc_leakage(adev, &table->entries[i].vddc);
5244 }
5245}
5246
5247static void ci_patch_dependency_tables_with_leakage(struct amdgpu_device *adev)
5248{
5249
5250 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev,
5251 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk);
5252 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev,
5253 &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk);
5254 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev,
5255 &adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk);
5256 ci_patch_clock_voltage_dependency_table_with_vddci_leakage(adev,
5257 &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk);
5258 ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(adev,
5259 &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table);
5260 ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(adev,
5261 &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table);
5262 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev,
5263 &adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table);
5264 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev,
5265 &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table);
5266 ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(adev,
5267 &adev->pm.dpm.dyn_state.phase_shedding_limits_table);
5268 ci_patch_clock_voltage_limits_with_vddc_leakage(adev,
5269 &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac);
5270 ci_patch_clock_voltage_limits_with_vddc_leakage(adev,
5271 &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc);
5272 ci_patch_cac_leakage_table_with_vddc_leakage(adev,
5273 &adev->pm.dpm.dyn_state.cac_leakage_table);
5274
5275}
5276
5277static void ci_update_current_ps(struct amdgpu_device *adev,
5278 struct amdgpu_ps *rps)
5279{
5280 struct ci_ps *new_ps = ci_get_ps(rps);
5281 struct ci_power_info *pi = ci_get_pi(adev);
5282
5283 pi->current_rps = *rps;
5284 pi->current_ps = *new_ps;
5285 pi->current_rps.ps_priv = &pi->current_ps;
Rex Zhu8c8e2c32016-10-14 19:29:02 +08005286 adev->pm.dpm.current_ps = &pi->current_rps;
Alex Deuchera2e73f52015-04-20 17:09:27 -04005287}
5288
5289static void ci_update_requested_ps(struct amdgpu_device *adev,
5290 struct amdgpu_ps *rps)
5291{
5292 struct ci_ps *new_ps = ci_get_ps(rps);
5293 struct ci_power_info *pi = ci_get_pi(adev);
5294
5295 pi->requested_rps = *rps;
5296 pi->requested_ps = *new_ps;
5297 pi->requested_rps.ps_priv = &pi->requested_ps;
Rex Zhu8c8e2c32016-10-14 19:29:02 +08005298 adev->pm.dpm.requested_ps = &pi->requested_rps;
Alex Deuchera2e73f52015-04-20 17:09:27 -04005299}
5300
Rex Zhucfa289f2017-09-06 15:27:59 +08005301static int ci_dpm_pre_set_power_state(void *handle)
Alex Deuchera2e73f52015-04-20 17:09:27 -04005302{
Rex Zhucfa289f2017-09-06 15:27:59 +08005303 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
Alex Deuchera2e73f52015-04-20 17:09:27 -04005304 struct ci_power_info *pi = ci_get_pi(adev);
5305 struct amdgpu_ps requested_ps = *adev->pm.dpm.requested_ps;
5306 struct amdgpu_ps *new_ps = &requested_ps;
5307
5308 ci_update_requested_ps(adev, new_ps);
5309
5310 ci_apply_state_adjust_rules(adev, &pi->requested_rps);
5311
5312 return 0;
5313}
5314
Rex Zhucfa289f2017-09-06 15:27:59 +08005315static void ci_dpm_post_set_power_state(void *handle)
Alex Deuchera2e73f52015-04-20 17:09:27 -04005316{
Rex Zhucfa289f2017-09-06 15:27:59 +08005317 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
Alex Deuchera2e73f52015-04-20 17:09:27 -04005318 struct ci_power_info *pi = ci_get_pi(adev);
5319 struct amdgpu_ps *new_ps = &pi->requested_rps;
5320
5321 ci_update_current_ps(adev, new_ps);
5322}
5323
5324
5325static void ci_dpm_setup_asic(struct amdgpu_device *adev)
5326{
5327 ci_read_clock_registers(adev);
5328 ci_enable_acpi_power_management(adev);
5329 ci_init_sclk_t(adev);
5330}
5331
5332static int ci_dpm_enable(struct amdgpu_device *adev)
5333{
5334 struct ci_power_info *pi = ci_get_pi(adev);
5335 struct amdgpu_ps *boot_ps = adev->pm.dpm.boot_ps;
5336 int ret;
5337
Alex Deuchera2e73f52015-04-20 17:09:27 -04005338 if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
5339 ci_enable_voltage_control(adev);
5340 ret = ci_construct_voltage_tables(adev);
5341 if (ret) {
5342 DRM_ERROR("ci_construct_voltage_tables failed\n");
5343 return ret;
5344 }
5345 }
5346 if (pi->caps_dynamic_ac_timing) {
5347 ret = ci_initialize_mc_reg_table(adev);
5348 if (ret)
5349 pi->caps_dynamic_ac_timing = false;
5350 }
5351 if (pi->dynamic_ss)
5352 ci_enable_spread_spectrum(adev, true);
5353 if (pi->thermal_protection)
5354 ci_enable_thermal_protection(adev, true);
5355 ci_program_sstp(adev);
5356 ci_enable_display_gap(adev);
5357 ci_program_vc(adev);
5358 ret = ci_upload_firmware(adev);
5359 if (ret) {
5360 DRM_ERROR("ci_upload_firmware failed\n");
5361 return ret;
5362 }
5363 ret = ci_process_firmware_header(adev);
5364 if (ret) {
5365 DRM_ERROR("ci_process_firmware_header failed\n");
5366 return ret;
5367 }
5368 ret = ci_initial_switch_from_arb_f0_to_f1(adev);
5369 if (ret) {
5370 DRM_ERROR("ci_initial_switch_from_arb_f0_to_f1 failed\n");
5371 return ret;
5372 }
5373 ret = ci_init_smc_table(adev);
5374 if (ret) {
5375 DRM_ERROR("ci_init_smc_table failed\n");
5376 return ret;
5377 }
5378 ret = ci_init_arb_table_index(adev);
5379 if (ret) {
5380 DRM_ERROR("ci_init_arb_table_index failed\n");
5381 return ret;
5382 }
5383 if (pi->caps_dynamic_ac_timing) {
5384 ret = ci_populate_initial_mc_reg_table(adev);
5385 if (ret) {
5386 DRM_ERROR("ci_populate_initial_mc_reg_table failed\n");
5387 return ret;
5388 }
5389 }
5390 ret = ci_populate_pm_base(adev);
5391 if (ret) {
5392 DRM_ERROR("ci_populate_pm_base failed\n");
5393 return ret;
5394 }
5395 ci_dpm_start_smc(adev);
5396 ci_enable_vr_hot_gpio_interrupt(adev);
5397 ret = ci_notify_smc_display_change(adev, false);
5398 if (ret) {
5399 DRM_ERROR("ci_notify_smc_display_change failed\n");
5400 return ret;
5401 }
5402 ci_enable_sclk_control(adev, true);
5403 ret = ci_enable_ulv(adev, true);
5404 if (ret) {
5405 DRM_ERROR("ci_enable_ulv failed\n");
5406 return ret;
5407 }
5408 ret = ci_enable_ds_master_switch(adev, true);
5409 if (ret) {
5410 DRM_ERROR("ci_enable_ds_master_switch failed\n");
5411 return ret;
5412 }
5413 ret = ci_start_dpm(adev);
5414 if (ret) {
5415 DRM_ERROR("ci_start_dpm failed\n");
5416 return ret;
5417 }
5418 ret = ci_enable_didt(adev, true);
5419 if (ret) {
5420 DRM_ERROR("ci_enable_didt failed\n");
5421 return ret;
5422 }
5423 ret = ci_enable_smc_cac(adev, true);
5424 if (ret) {
5425 DRM_ERROR("ci_enable_smc_cac failed\n");
5426 return ret;
5427 }
5428 ret = ci_enable_power_containment(adev, true);
5429 if (ret) {
5430 DRM_ERROR("ci_enable_power_containment failed\n");
5431 return ret;
5432 }
5433
5434 ret = ci_power_control_set_level(adev);
5435 if (ret) {
5436 DRM_ERROR("ci_power_control_set_level failed\n");
5437 return ret;
5438 }
5439
5440 ci_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
5441
5442 ret = ci_enable_thermal_based_sclk_dpm(adev, true);
5443 if (ret) {
5444 DRM_ERROR("ci_enable_thermal_based_sclk_dpm failed\n");
5445 return ret;
5446 }
5447
5448 ci_thermal_start_thermal_controller(adev);
5449
5450 ci_update_current_ps(adev, boot_ps);
5451
Alex Deuchera2e73f52015-04-20 17:09:27 -04005452 return 0;
5453}
5454
5455static void ci_dpm_disable(struct amdgpu_device *adev)
5456{
5457 struct ci_power_info *pi = ci_get_pi(adev);
5458 struct amdgpu_ps *boot_ps = adev->pm.dpm.boot_ps;
5459
5460 amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
5461 AMDGPU_THERMAL_IRQ_LOW_TO_HIGH);
5462 amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
5463 AMDGPU_THERMAL_IRQ_HIGH_TO_LOW);
5464
Rex Zhuc08770e2016-08-24 19:39:06 +08005465 ci_dpm_powergate_uvd(adev, true);
Alex Deuchera2e73f52015-04-20 17:09:27 -04005466
5467 if (!amdgpu_ci_is_smc_running(adev))
5468 return;
5469
5470 ci_thermal_stop_thermal_controller(adev);
5471
5472 if (pi->thermal_protection)
5473 ci_enable_thermal_protection(adev, false);
5474 ci_enable_power_containment(adev, false);
5475 ci_enable_smc_cac(adev, false);
5476 ci_enable_didt(adev, false);
5477 ci_enable_spread_spectrum(adev, false);
5478 ci_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, false);
5479 ci_stop_dpm(adev);
5480 ci_enable_ds_master_switch(adev, false);
5481 ci_enable_ulv(adev, false);
5482 ci_clear_vc(adev);
5483 ci_reset_to_default(adev);
5484 ci_dpm_stop_smc(adev);
5485 ci_force_switch_to_arb_f0(adev);
5486 ci_enable_thermal_based_sclk_dpm(adev, false);
5487
5488 ci_update_current_ps(adev, boot_ps);
5489}
5490
Rex Zhucfa289f2017-09-06 15:27:59 +08005491static int ci_dpm_set_power_state(void *handle)
Alex Deuchera2e73f52015-04-20 17:09:27 -04005492{
Rex Zhucfa289f2017-09-06 15:27:59 +08005493 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
Alex Deuchera2e73f52015-04-20 17:09:27 -04005494 struct ci_power_info *pi = ci_get_pi(adev);
5495 struct amdgpu_ps *new_ps = &pi->requested_rps;
5496 struct amdgpu_ps *old_ps = &pi->current_rps;
5497 int ret;
5498
5499 ci_find_dpm_states_clocks_in_dpm_table(adev, new_ps);
5500 if (pi->pcie_performance_request)
5501 ci_request_link_speed_change_before_state_change(adev, new_ps, old_ps);
5502 ret = ci_freeze_sclk_mclk_dpm(adev);
5503 if (ret) {
5504 DRM_ERROR("ci_freeze_sclk_mclk_dpm failed\n");
5505 return ret;
5506 }
5507 ret = ci_populate_and_upload_sclk_mclk_dpm_levels(adev, new_ps);
5508 if (ret) {
5509 DRM_ERROR("ci_populate_and_upload_sclk_mclk_dpm_levels failed\n");
5510 return ret;
5511 }
5512 ret = ci_generate_dpm_level_enable_mask(adev, new_ps);
5513 if (ret) {
5514 DRM_ERROR("ci_generate_dpm_level_enable_mask failed\n");
5515 return ret;
5516 }
5517
5518 ret = ci_update_vce_dpm(adev, new_ps, old_ps);
5519 if (ret) {
5520 DRM_ERROR("ci_update_vce_dpm failed\n");
5521 return ret;
5522 }
5523
5524 ret = ci_update_sclk_t(adev);
5525 if (ret) {
5526 DRM_ERROR("ci_update_sclk_t failed\n");
5527 return ret;
5528 }
5529 if (pi->caps_dynamic_ac_timing) {
5530 ret = ci_update_and_upload_mc_reg_table(adev);
5531 if (ret) {
5532 DRM_ERROR("ci_update_and_upload_mc_reg_table failed\n");
5533 return ret;
5534 }
5535 }
5536 ret = ci_program_memory_timing_parameters(adev);
5537 if (ret) {
5538 DRM_ERROR("ci_program_memory_timing_parameters failed\n");
5539 return ret;
5540 }
5541 ret = ci_unfreeze_sclk_mclk_dpm(adev);
5542 if (ret) {
5543 DRM_ERROR("ci_unfreeze_sclk_mclk_dpm failed\n");
5544 return ret;
5545 }
5546 ret = ci_upload_dpm_level_enable_mask(adev);
5547 if (ret) {
5548 DRM_ERROR("ci_upload_dpm_level_enable_mask failed\n");
5549 return ret;
5550 }
5551 if (pi->pcie_performance_request)
5552 ci_notify_link_speed_change_after_state_change(adev, new_ps, old_ps);
5553
5554 return 0;
5555}
5556
5557#if 0
5558static void ci_dpm_reset_asic(struct amdgpu_device *adev)
5559{
5560 ci_set_boot_state(adev);
5561}
5562#endif
5563
Rex Zhucfa289f2017-09-06 15:27:59 +08005564static void ci_dpm_display_configuration_changed(void *handle)
Alex Deuchera2e73f52015-04-20 17:09:27 -04005565{
Rex Zhucfa289f2017-09-06 15:27:59 +08005566 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5567
Alex Deuchera2e73f52015-04-20 17:09:27 -04005568 ci_program_display_gap(adev);
5569}
5570
5571union power_info {
5572 struct _ATOM_POWERPLAY_INFO info;
5573 struct _ATOM_POWERPLAY_INFO_V2 info_2;
5574 struct _ATOM_POWERPLAY_INFO_V3 info_3;
5575 struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
5576 struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
5577 struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
5578};
5579
5580union pplib_clock_info {
5581 struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
5582 struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
5583 struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
5584 struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
5585 struct _ATOM_PPLIB_SI_CLOCK_INFO si;
5586 struct _ATOM_PPLIB_CI_CLOCK_INFO ci;
5587};
5588
5589union pplib_power_state {
5590 struct _ATOM_PPLIB_STATE v1;
5591 struct _ATOM_PPLIB_STATE_V2 v2;
5592};
5593
5594static void ci_parse_pplib_non_clock_info(struct amdgpu_device *adev,
5595 struct amdgpu_ps *rps,
5596 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,
5597 u8 table_rev)
5598{
5599 rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
5600 rps->class = le16_to_cpu(non_clock_info->usClassification);
5601 rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
5602
5603 if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
5604 rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
5605 rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
5606 } else {
5607 rps->vclk = 0;
5608 rps->dclk = 0;
5609 }
5610
5611 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT)
5612 adev->pm.dpm.boot_ps = rps;
5613 if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
5614 adev->pm.dpm.uvd_ps = rps;
5615}
5616
5617static void ci_parse_pplib_clock_info(struct amdgpu_device *adev,
5618 struct amdgpu_ps *rps, int index,
5619 union pplib_clock_info *clock_info)
5620{
5621 struct ci_power_info *pi = ci_get_pi(adev);
5622 struct ci_ps *ps = ci_get_ps(rps);
5623 struct ci_pl *pl = &ps->performance_levels[index];
5624
5625 ps->performance_level_count = index + 1;
5626
5627 pl->sclk = le16_to_cpu(clock_info->ci.usEngineClockLow);
5628 pl->sclk |= clock_info->ci.ucEngineClockHigh << 16;
5629 pl->mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow);
5630 pl->mclk |= clock_info->ci.ucMemoryClockHigh << 16;
5631
5632 pl->pcie_gen = amdgpu_get_pcie_gen_support(adev,
5633 pi->sys_pcie_mask,
5634 pi->vbios_boot_state.pcie_gen_bootup_value,
5635 clock_info->ci.ucPCIEGen);
5636 pl->pcie_lane = amdgpu_get_pcie_lane_support(adev,
5637 pi->vbios_boot_state.pcie_lane_bootup_value,
5638 le16_to_cpu(clock_info->ci.usPCIELane));
5639
5640 if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) {
5641 pi->acpi_pcie_gen = pl->pcie_gen;
5642 }
5643
5644 if (rps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) {
5645 pi->ulv.supported = true;
5646 pi->ulv.pl = *pl;
5647 pi->ulv.cg_ulv_parameter = CISLANDS_CGULVPARAMETER_DFLT;
5648 }
5649
5650 /* patch up boot state */
5651 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
5652 pl->mclk = pi->vbios_boot_state.mclk_bootup_value;
5653 pl->sclk = pi->vbios_boot_state.sclk_bootup_value;
5654 pl->pcie_gen = pi->vbios_boot_state.pcie_gen_bootup_value;
5655 pl->pcie_lane = pi->vbios_boot_state.pcie_lane_bootup_value;
5656 }
5657
5658 switch (rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
5659 case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
5660 pi->use_pcie_powersaving_levels = true;
5661 if (pi->pcie_gen_powersaving.max < pl->pcie_gen)
5662 pi->pcie_gen_powersaving.max = pl->pcie_gen;
5663 if (pi->pcie_gen_powersaving.min > pl->pcie_gen)
5664 pi->pcie_gen_powersaving.min = pl->pcie_gen;
5665 if (pi->pcie_lane_powersaving.max < pl->pcie_lane)
5666 pi->pcie_lane_powersaving.max = pl->pcie_lane;
5667 if (pi->pcie_lane_powersaving.min > pl->pcie_lane)
5668 pi->pcie_lane_powersaving.min = pl->pcie_lane;
5669 break;
5670 case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
5671 pi->use_pcie_performance_levels = true;
5672 if (pi->pcie_gen_performance.max < pl->pcie_gen)
5673 pi->pcie_gen_performance.max = pl->pcie_gen;
5674 if (pi->pcie_gen_performance.min > pl->pcie_gen)
5675 pi->pcie_gen_performance.min = pl->pcie_gen;
5676 if (pi->pcie_lane_performance.max < pl->pcie_lane)
5677 pi->pcie_lane_performance.max = pl->pcie_lane;
5678 if (pi->pcie_lane_performance.min > pl->pcie_lane)
5679 pi->pcie_lane_performance.min = pl->pcie_lane;
5680 break;
5681 default:
5682 break;
5683 }
5684}
5685
5686static int ci_parse_power_table(struct amdgpu_device *adev)
5687{
5688 struct amdgpu_mode_info *mode_info = &adev->mode_info;
5689 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
5690 union pplib_power_state *power_state;
5691 int i, j, k, non_clock_array_index, clock_array_index;
5692 union pplib_clock_info *clock_info;
5693 struct _StateArray *state_array;
5694 struct _ClockInfoArray *clock_info_array;
5695 struct _NonClockInfoArray *non_clock_info_array;
5696 union power_info *power_info;
5697 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
5698 u16 data_offset;
5699 u8 frev, crev;
5700 u8 *power_state_offset;
5701 struct ci_ps *ps;
5702
5703 if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
5704 &frev, &crev, &data_offset))
5705 return -EINVAL;
5706 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
5707
5708 amdgpu_add_thermal_controller(adev);
5709
5710 state_array = (struct _StateArray *)
5711 (mode_info->atom_context->bios + data_offset +
5712 le16_to_cpu(power_info->pplib.usStateArrayOffset));
5713 clock_info_array = (struct _ClockInfoArray *)
5714 (mode_info->atom_context->bios + data_offset +
5715 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
5716 non_clock_info_array = (struct _NonClockInfoArray *)
5717 (mode_info->atom_context->bios + data_offset +
5718 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
5719
5720 adev->pm.dpm.ps = kzalloc(sizeof(struct amdgpu_ps) *
5721 state_array->ucNumEntries, GFP_KERNEL);
5722 if (!adev->pm.dpm.ps)
5723 return -ENOMEM;
5724 power_state_offset = (u8 *)state_array->states;
5725 for (i = 0; i < state_array->ucNumEntries; i++) {
5726 u8 *idx;
5727 power_state = (union pplib_power_state *)power_state_offset;
5728 non_clock_array_index = power_state->v2.nonClockInfoIndex;
5729 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
5730 &non_clock_info_array->nonClockInfo[non_clock_array_index];
5731 ps = kzalloc(sizeof(struct ci_ps), GFP_KERNEL);
5732 if (ps == NULL) {
5733 kfree(adev->pm.dpm.ps);
5734 return -ENOMEM;
5735 }
5736 adev->pm.dpm.ps[i].ps_priv = ps;
5737 ci_parse_pplib_non_clock_info(adev, &adev->pm.dpm.ps[i],
5738 non_clock_info,
5739 non_clock_info_array->ucEntrySize);
5740 k = 0;
5741 idx = (u8 *)&power_state->v2.clockInfoIndex[0];
5742 for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
5743 clock_array_index = idx[j];
5744 if (clock_array_index >= clock_info_array->ucNumEntries)
5745 continue;
5746 if (k >= CISLANDS_MAX_HARDWARE_POWERLEVELS)
5747 break;
5748 clock_info = (union pplib_clock_info *)
5749 ((u8 *)&clock_info_array->clockInfo[0] +
5750 (clock_array_index * clock_info_array->ucEntrySize));
5751 ci_parse_pplib_clock_info(adev,
5752 &adev->pm.dpm.ps[i], k,
5753 clock_info);
5754 k++;
5755 }
5756 power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
5757 }
5758 adev->pm.dpm.num_ps = state_array->ucNumEntries;
5759
5760 /* fill in the vce power states */
Rex Zhu66ba1af2016-10-12 15:38:56 +08005761 for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) {
Alex Deuchera2e73f52015-04-20 17:09:27 -04005762 u32 sclk, mclk;
5763 clock_array_index = adev->pm.dpm.vce_states[i].clk_idx;
5764 clock_info = (union pplib_clock_info *)
5765 &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize];
5766 sclk = le16_to_cpu(clock_info->ci.usEngineClockLow);
5767 sclk |= clock_info->ci.ucEngineClockHigh << 16;
5768 mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow);
5769 mclk |= clock_info->ci.ucMemoryClockHigh << 16;
5770 adev->pm.dpm.vce_states[i].sclk = sclk;
5771 adev->pm.dpm.vce_states[i].mclk = mclk;
5772 }
5773
5774 return 0;
5775}
5776
5777static int ci_get_vbios_boot_values(struct amdgpu_device *adev,
5778 struct ci_vbios_boot_state *boot_state)
5779{
5780 struct amdgpu_mode_info *mode_info = &adev->mode_info;
5781 int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
5782 ATOM_FIRMWARE_INFO_V2_2 *firmware_info;
5783 u8 frev, crev;
5784 u16 data_offset;
5785
5786 if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
5787 &frev, &crev, &data_offset)) {
5788 firmware_info =
5789 (ATOM_FIRMWARE_INFO_V2_2 *)(mode_info->atom_context->bios +
5790 data_offset);
5791 boot_state->mvdd_bootup_value = le16_to_cpu(firmware_info->usBootUpMVDDCVoltage);
5792 boot_state->vddc_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCVoltage);
5793 boot_state->vddci_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCIVoltage);
5794 boot_state->pcie_gen_bootup_value = ci_get_current_pcie_speed(adev);
5795 boot_state->pcie_lane_bootup_value = ci_get_current_pcie_lane_number(adev);
5796 boot_state->sclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultEngineClock);
5797 boot_state->mclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultMemoryClock);
5798
5799 return 0;
5800 }
5801 return -EINVAL;
5802}
5803
5804static void ci_dpm_fini(struct amdgpu_device *adev)
5805{
5806 int i;
5807
5808 for (i = 0; i < adev->pm.dpm.num_ps; i++) {
5809 kfree(adev->pm.dpm.ps[i].ps_priv);
5810 }
5811 kfree(adev->pm.dpm.ps);
5812 kfree(adev->pm.dpm.priv);
5813 kfree(adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries);
5814 amdgpu_free_extended_power_table(adev);
5815}
5816
5817/**
5818 * ci_dpm_init_microcode - load ucode images from disk
5819 *
5820 * @adev: amdgpu_device pointer
5821 *
5822 * Use the firmware interface to load the ucode images into
5823 * the driver (not loaded into hw).
5824 * Returns 0 on success, error on failure.
5825 */
5826static int ci_dpm_init_microcode(struct amdgpu_device *adev)
5827{
5828 const char *chip_name;
5829 char fw_name[30];
5830 int err;
5831
5832 DRM_DEBUG("\n");
5833
5834 switch (adev->asic_type) {
5835 case CHIP_BONAIRE:
Alex Deucher2254c212015-12-10 00:49:32 -05005836 if ((adev->pdev->revision == 0x80) ||
5837 (adev->pdev->revision == 0x81) ||
5838 (adev->pdev->device == 0x665f))
5839 chip_name = "bonaire_k";
5840 else
5841 chip_name = "bonaire";
Alex Deuchera2e73f52015-04-20 17:09:27 -04005842 break;
5843 case CHIP_HAWAII:
Alex Deucher2254c212015-12-10 00:49:32 -05005844 if (adev->pdev->revision == 0x80)
5845 chip_name = "hawaii_k";
5846 else
5847 chip_name = "hawaii";
Alex Deuchera2e73f52015-04-20 17:09:27 -04005848 break;
5849 case CHIP_KAVERI:
5850 case CHIP_KABINI:
Alex Deucherb9a8be92016-07-29 18:14:39 -04005851 case CHIP_MULLINS:
Alex Deuchera2e73f52015-04-20 17:09:27 -04005852 default: BUG();
5853 }
5854
5855 snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
5856 err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
5857 if (err)
5858 goto out;
5859 err = amdgpu_ucode_validate(adev->pm.fw);
5860
5861out:
5862 if (err) {
Joe Perches7ca85292017-02-28 04:55:52 -08005863 pr_err("cik_smc: Failed to load firmware \"%s\"\n", fw_name);
Alex Deuchera2e73f52015-04-20 17:09:27 -04005864 release_firmware(adev->pm.fw);
5865 adev->pm.fw = NULL;
5866 }
5867 return err;
5868}
5869
5870static int ci_dpm_init(struct amdgpu_device *adev)
5871{
5872 int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
5873 SMU7_Discrete_DpmTable *dpm_table;
5874 struct amdgpu_gpio_rec gpio;
5875 u16 data_offset, size;
5876 u8 frev, crev;
5877 struct ci_power_info *pi;
5878 int ret;
Alex Deuchera2e73f52015-04-20 17:09:27 -04005879
Alex Deuchera2e73f52015-04-20 17:09:27 -04005880 pi = kzalloc(sizeof(struct ci_power_info), GFP_KERNEL);
5881 if (pi == NULL)
5882 return -ENOMEM;
5883 adev->pm.dpm.priv = pi;
5884
Alex Deucher50171eb2016-02-04 10:44:04 -05005885 pi->sys_pcie_mask =
5886 (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_MASK) >>
5887 CAIL_PCIE_LINK_SPEED_SUPPORT_SHIFT;
5888
Alex Deuchera2e73f52015-04-20 17:09:27 -04005889 pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID;
5890
5891 pi->pcie_gen_performance.max = AMDGPU_PCIE_GEN1;
5892 pi->pcie_gen_performance.min = AMDGPU_PCIE_GEN3;
5893 pi->pcie_gen_powersaving.max = AMDGPU_PCIE_GEN1;
5894 pi->pcie_gen_powersaving.min = AMDGPU_PCIE_GEN3;
5895
5896 pi->pcie_lane_performance.max = 0;
5897 pi->pcie_lane_performance.min = 16;
5898 pi->pcie_lane_powersaving.max = 0;
5899 pi->pcie_lane_powersaving.min = 16;
5900
5901 ret = ci_get_vbios_boot_values(adev, &pi->vbios_boot_state);
5902 if (ret) {
5903 ci_dpm_fini(adev);
5904 return ret;
5905 }
5906
5907 ret = amdgpu_get_platform_caps(adev);
5908 if (ret) {
5909 ci_dpm_fini(adev);
5910 return ret;
5911 }
5912
5913 ret = amdgpu_parse_extended_power_table(adev);
5914 if (ret) {
5915 ci_dpm_fini(adev);
5916 return ret;
5917 }
5918
5919 ret = ci_parse_power_table(adev);
5920 if (ret) {
5921 ci_dpm_fini(adev);
5922 return ret;
5923 }
5924
5925 pi->dll_default_on = false;
5926 pi->sram_end = SMC_RAM_END;
5927
5928 pi->activity_target[0] = CISLAND_TARGETACTIVITY_DFLT;
5929 pi->activity_target[1] = CISLAND_TARGETACTIVITY_DFLT;
5930 pi->activity_target[2] = CISLAND_TARGETACTIVITY_DFLT;
5931 pi->activity_target[3] = CISLAND_TARGETACTIVITY_DFLT;
5932 pi->activity_target[4] = CISLAND_TARGETACTIVITY_DFLT;
5933 pi->activity_target[5] = CISLAND_TARGETACTIVITY_DFLT;
5934 pi->activity_target[6] = CISLAND_TARGETACTIVITY_DFLT;
5935 pi->activity_target[7] = CISLAND_TARGETACTIVITY_DFLT;
5936
5937 pi->mclk_activity_target = CISLAND_MCLK_TARGETACTIVITY_DFLT;
5938
5939 pi->sclk_dpm_key_disabled = 0;
5940 pi->mclk_dpm_key_disabled = 0;
5941 pi->pcie_dpm_key_disabled = 0;
5942 pi->thermal_sclk_dpm_enabled = 0;
5943
Rex Zhu801caaf2016-11-02 13:35:15 +08005944 if (amdgpu_pp_feature_mask & SCLK_DEEP_SLEEP_MASK)
Rex Zhu66bc3f72016-07-28 17:36:35 +08005945 pi->caps_sclk_ds = true;
5946 else
5947 pi->caps_sclk_ds = false;
Alex Deuchera2e73f52015-04-20 17:09:27 -04005948
5949 pi->mclk_strobe_mode_threshold = 40000;
5950 pi->mclk_stutter_mode_threshold = 40000;
5951 pi->mclk_edc_enable_threshold = 40000;
5952 pi->mclk_edc_wr_enable_threshold = 40000;
5953
5954 ci_initialize_powertune_defaults(adev);
5955
5956 pi->caps_fps = false;
5957
5958 pi->caps_sclk_throttle_low_notification = false;
5959
5960 pi->caps_uvd_dpm = true;
5961 pi->caps_vce_dpm = true;
5962
5963 ci_get_leakage_voltages(adev);
5964 ci_patch_dependency_tables_with_leakage(adev);
5965 ci_set_private_data_variables_based_on_pptable(adev);
5966
5967 adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries =
5968 kzalloc(4 * sizeof(struct amdgpu_clock_voltage_dependency_entry), GFP_KERNEL);
5969 if (!adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) {
5970 ci_dpm_fini(adev);
5971 return -ENOMEM;
5972 }
5973 adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count = 4;
5974 adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].clk = 0;
5975 adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].v = 0;
5976 adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].clk = 36000;
5977 adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].v = 720;
5978 adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].clk = 54000;
5979 adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].v = 810;
5980 adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].clk = 72000;
5981 adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].v = 900;
5982
5983 adev->pm.dpm.dyn_state.mclk_sclk_ratio = 4;
5984 adev->pm.dpm.dyn_state.sclk_mclk_delta = 15000;
5985 adev->pm.dpm.dyn_state.vddc_vddci_delta = 200;
5986
5987 adev->pm.dpm.dyn_state.valid_sclk_values.count = 0;
5988 adev->pm.dpm.dyn_state.valid_sclk_values.values = NULL;
5989 adev->pm.dpm.dyn_state.valid_mclk_values.count = 0;
5990 adev->pm.dpm.dyn_state.valid_mclk_values.values = NULL;
5991
5992 if (adev->asic_type == CHIP_HAWAII) {
5993 pi->thermal_temp_setting.temperature_low = 94500;
5994 pi->thermal_temp_setting.temperature_high = 95000;
5995 pi->thermal_temp_setting.temperature_shutdown = 104000;
5996 } else {
5997 pi->thermal_temp_setting.temperature_low = 99500;
5998 pi->thermal_temp_setting.temperature_high = 100000;
5999 pi->thermal_temp_setting.temperature_shutdown = 104000;
6000 }
6001
6002 pi->uvd_enabled = false;
6003
6004 dpm_table = &pi->smc_state_table;
6005
6006 gpio = amdgpu_atombios_lookup_gpio(adev, VDDC_VRHOT_GPIO_PINID);
6007 if (gpio.valid) {
6008 dpm_table->VRHotGpio = gpio.shift;
6009 adev->pm.dpm.platform_caps |= ATOM_PP_PLATFORM_CAP_REGULATOR_HOT;
6010 } else {
6011 dpm_table->VRHotGpio = CISLANDS_UNUSED_GPIO_PIN;
6012 adev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_REGULATOR_HOT;
6013 }
6014
6015 gpio = amdgpu_atombios_lookup_gpio(adev, PP_AC_DC_SWITCH_GPIO_PINID);
6016 if (gpio.valid) {
6017 dpm_table->AcDcGpio = gpio.shift;
6018 adev->pm.dpm.platform_caps |= ATOM_PP_PLATFORM_CAP_HARDWAREDC;
6019 } else {
6020 dpm_table->AcDcGpio = CISLANDS_UNUSED_GPIO_PIN;
6021 adev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_HARDWAREDC;
6022 }
6023
6024 gpio = amdgpu_atombios_lookup_gpio(adev, VDDC_PCC_GPIO_PINID);
6025 if (gpio.valid) {
6026 u32 tmp = RREG32_SMC(ixCNB_PWRMGT_CNTL);
6027
6028 switch (gpio.shift) {
6029 case 0:
6030 tmp &= ~CNB_PWRMGT_CNTL__GNB_SLOW_MODE_MASK;
6031 tmp |= 1 << CNB_PWRMGT_CNTL__GNB_SLOW_MODE__SHIFT;
6032 break;
6033 case 1:
6034 tmp &= ~CNB_PWRMGT_CNTL__GNB_SLOW_MODE_MASK;
6035 tmp |= 2 << CNB_PWRMGT_CNTL__GNB_SLOW_MODE__SHIFT;
6036 break;
6037 case 2:
6038 tmp |= CNB_PWRMGT_CNTL__GNB_SLOW_MASK;
6039 break;
6040 case 3:
6041 tmp |= CNB_PWRMGT_CNTL__FORCE_NB_PS1_MASK;
6042 break;
6043 case 4:
6044 tmp |= CNB_PWRMGT_CNTL__DPM_ENABLED_MASK;
6045 break;
6046 default:
Rex Zhu58a6a7d2016-11-09 17:27:59 +08006047 DRM_INFO("Invalid PCC GPIO: %u!\n", gpio.shift);
Alex Deuchera2e73f52015-04-20 17:09:27 -04006048 break;
6049 }
6050 WREG32_SMC(ixCNB_PWRMGT_CNTL, tmp);
6051 }
6052
6053 pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_NONE;
6054 pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_NONE;
6055 pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_NONE;
6056 if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT))
6057 pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
6058 else if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2))
6059 pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
6060
6061 if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL) {
6062 if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
6063 pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
6064 else if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2))
6065 pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
6066 else
6067 adev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL;
6068 }
6069
6070 if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_MVDDCONTROL) {
6071 if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT))
6072 pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
6073 else if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2))
6074 pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
6075 else
6076 adev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_MVDDCONTROL;
6077 }
6078
6079 pi->vddc_phase_shed_control = true;
6080
6081#if defined(CONFIG_ACPI)
6082 pi->pcie_performance_request =
6083 amdgpu_acpi_is_pcie_performance_request_supported(adev);
6084#else
6085 pi->pcie_performance_request = false;
6086#endif
6087
6088 if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, &size,
6089 &frev, &crev, &data_offset)) {
6090 pi->caps_sclk_ss_support = true;
6091 pi->caps_mclk_ss_support = true;
6092 pi->dynamic_ss = true;
6093 } else {
6094 pi->caps_sclk_ss_support = false;
6095 pi->caps_mclk_ss_support = false;
6096 pi->dynamic_ss = true;
6097 }
6098
6099 if (adev->pm.int_thermal_type != THERMAL_TYPE_NONE)
6100 pi->thermal_protection = true;
6101 else
6102 pi->thermal_protection = false;
6103
6104 pi->caps_dynamic_ac_timing = true;
6105
Rex Zhuc08770e2016-08-24 19:39:06 +08006106 pi->uvd_power_gated = true;
Alex Deuchera2e73f52015-04-20 17:09:27 -04006107
6108 /* make sure dc limits are valid */
6109 if ((adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk == 0) ||
6110 (adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk == 0))
6111 adev->pm.dpm.dyn_state.max_clock_voltage_on_dc =
6112 adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
6113
6114 pi->fan_ctrl_is_in_default_mode = true;
6115
6116 return 0;
6117}
6118
6119static void
Rex Zhucfa289f2017-09-06 15:27:59 +08006120ci_dpm_debugfs_print_current_performance_level(void *handle,
Alex Deuchera2e73f52015-04-20 17:09:27 -04006121 struct seq_file *m)
6122{
Rex Zhucfa289f2017-09-06 15:27:59 +08006123 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
Alex Deuchera2e73f52015-04-20 17:09:27 -04006124 struct ci_power_info *pi = ci_get_pi(adev);
6125 struct amdgpu_ps *rps = &pi->current_rps;
6126 u32 sclk = ci_get_average_sclk_freq(adev);
6127 u32 mclk = ci_get_average_mclk_freq(adev);
Rex Zhu93545732016-01-06 17:08:46 +08006128 u32 activity_percent = 50;
6129 int ret;
6130
6131 ret = ci_read_smc_soft_register(adev, offsetof(SMU7_SoftRegisters, AverageGraphicsA),
6132 &activity_percent);
6133
6134 if (ret == 0) {
6135 activity_percent += 0x80;
6136 activity_percent >>= 8;
6137 activity_percent = activity_percent > 100 ? 100 : activity_percent;
6138 }
Alex Deuchera2e73f52015-04-20 17:09:27 -04006139
Rex Zhuddbc2592016-11-25 19:23:06 +08006140 seq_printf(m, "uvd %sabled\n", pi->uvd_power_gated ? "dis" : "en");
Alex Deuchera2e73f52015-04-20 17:09:27 -04006141 seq_printf(m, "vce %sabled\n", rps->vce_active ? "en" : "dis");
6142 seq_printf(m, "power level avg sclk: %u mclk: %u\n",
6143 sclk, mclk);
Rex Zhu93545732016-01-06 17:08:46 +08006144 seq_printf(m, "GPU load: %u %%\n", activity_percent);
Alex Deuchera2e73f52015-04-20 17:09:27 -04006145}
6146
Rex Zhucfa289f2017-09-06 15:27:59 +08006147static void ci_dpm_print_power_state(void *handle, void *current_ps)
Alex Deuchera2e73f52015-04-20 17:09:27 -04006148{
Rex Zhucfa289f2017-09-06 15:27:59 +08006149 struct amdgpu_ps *rps = (struct amdgpu_ps *)current_ps;
Alex Deuchera2e73f52015-04-20 17:09:27 -04006150 struct ci_ps *ps = ci_get_ps(rps);
6151 struct ci_pl *pl;
6152 int i;
Rex Zhucfa289f2017-09-06 15:27:59 +08006153 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
Alex Deuchera2e73f52015-04-20 17:09:27 -04006154
6155 amdgpu_dpm_print_class_info(rps->class, rps->class2);
6156 amdgpu_dpm_print_cap_info(rps->caps);
6157 printk("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
6158 for (i = 0; i < ps->performance_level_count; i++) {
6159 pl = &ps->performance_levels[i];
6160 printk("\t\tpower level %d sclk: %u mclk: %u pcie gen: %u pcie lanes: %u\n",
6161 i, pl->sclk, pl->mclk, pl->pcie_gen + 1, pl->pcie_lane);
6162 }
6163 amdgpu_dpm_print_ps_status(adev, rps);
6164}
6165
Rex Zhu1d516c42016-10-14 19:16:54 +08006166static inline bool ci_are_power_levels_equal(const struct ci_pl *ci_cpl1,
6167 const struct ci_pl *ci_cpl2)
6168{
6169 return ((ci_cpl1->mclk == ci_cpl2->mclk) &&
6170 (ci_cpl1->sclk == ci_cpl2->sclk) &&
6171 (ci_cpl1->pcie_gen == ci_cpl2->pcie_gen) &&
6172 (ci_cpl1->pcie_lane == ci_cpl2->pcie_lane));
6173}
6174
Rex Zhucfa289f2017-09-06 15:27:59 +08006175static int ci_check_state_equal(void *handle,
6176 void *current_ps,
6177 void *request_ps,
Rex Zhu1d516c42016-10-14 19:16:54 +08006178 bool *equal)
6179{
6180 struct ci_ps *ci_cps;
6181 struct ci_ps *ci_rps;
6182 int i;
Rex Zhucfa289f2017-09-06 15:27:59 +08006183 struct amdgpu_ps *cps = (struct amdgpu_ps *)current_ps;
6184 struct amdgpu_ps *rps = (struct amdgpu_ps *)request_ps;
6185 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
Rex Zhu1d516c42016-10-14 19:16:54 +08006186
6187 if (adev == NULL || cps == NULL || rps == NULL || equal == NULL)
6188 return -EINVAL;
6189
Rex Zhucfa289f2017-09-06 15:27:59 +08006190 ci_cps = ci_get_ps((struct amdgpu_ps *)cps);
6191 ci_rps = ci_get_ps((struct amdgpu_ps *)rps);
Rex Zhu1d516c42016-10-14 19:16:54 +08006192
6193 if (ci_cps == NULL) {
6194 *equal = false;
6195 return 0;
6196 }
6197
6198 if (ci_cps->performance_level_count != ci_rps->performance_level_count) {
6199
6200 *equal = false;
6201 return 0;
6202 }
6203
6204 for (i = 0; i < ci_cps->performance_level_count; i++) {
6205 if (!ci_are_power_levels_equal(&(ci_cps->performance_levels[i]),
6206 &(ci_rps->performance_levels[i]))) {
6207 *equal = false;
6208 return 0;
6209 }
6210 }
6211
6212 /* If all performance levels are the same try to use the UVD clocks to break the tie.*/
6213 *equal = ((cps->vclk == rps->vclk) && (cps->dclk == rps->dclk));
6214 *equal &= ((cps->evclk == rps->evclk) && (cps->ecclk == rps->ecclk));
6215
6216 return 0;
6217}
6218
Rex Zhucfa289f2017-09-06 15:27:59 +08006219static u32 ci_dpm_get_sclk(void *handle, bool low)
Alex Deuchera2e73f52015-04-20 17:09:27 -04006220{
Rex Zhucfa289f2017-09-06 15:27:59 +08006221 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
Alex Deuchera2e73f52015-04-20 17:09:27 -04006222 struct ci_power_info *pi = ci_get_pi(adev);
6223 struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps);
6224
6225 if (low)
6226 return requested_state->performance_levels[0].sclk;
6227 else
6228 return requested_state->performance_levels[requested_state->performance_level_count - 1].sclk;
6229}
6230
Rex Zhucfa289f2017-09-06 15:27:59 +08006231static u32 ci_dpm_get_mclk(void *handle, bool low)
Alex Deuchera2e73f52015-04-20 17:09:27 -04006232{
Rex Zhucfa289f2017-09-06 15:27:59 +08006233 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
Alex Deuchera2e73f52015-04-20 17:09:27 -04006234 struct ci_power_info *pi = ci_get_pi(adev);
6235 struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps);
6236
6237 if (low)
6238 return requested_state->performance_levels[0].mclk;
6239 else
6240 return requested_state->performance_levels[requested_state->performance_level_count - 1].mclk;
6241}
6242
6243/* get temperature in millidegrees */
Rex Zhucfa289f2017-09-06 15:27:59 +08006244static int ci_dpm_get_temp(void *handle)
Alex Deuchera2e73f52015-04-20 17:09:27 -04006245{
6246 u32 temp;
6247 int actual_temp = 0;
Rex Zhucfa289f2017-09-06 15:27:59 +08006248 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
Alex Deuchera2e73f52015-04-20 17:09:27 -04006249
6250 temp = (RREG32_SMC(ixCG_MULT_THERMAL_STATUS) & CG_MULT_THERMAL_STATUS__CTF_TEMP_MASK) >>
6251 CG_MULT_THERMAL_STATUS__CTF_TEMP__SHIFT;
6252
6253 if (temp & 0x200)
6254 actual_temp = 255;
6255 else
6256 actual_temp = temp & 0x1ff;
6257
6258 actual_temp = actual_temp * 1000;
6259
6260 return actual_temp;
6261}
6262
6263static int ci_set_temperature_range(struct amdgpu_device *adev)
6264{
6265 int ret;
6266
6267 ret = ci_thermal_enable_alert(adev, false);
6268 if (ret)
6269 return ret;
6270 ret = ci_thermal_set_temperature_range(adev, CISLANDS_TEMP_RANGE_MIN,
6271 CISLANDS_TEMP_RANGE_MAX);
6272 if (ret)
6273 return ret;
6274 ret = ci_thermal_enable_alert(adev, true);
6275 if (ret)
6276 return ret;
6277 return ret;
6278}
6279
yanyang15fc3aee2015-05-22 14:39:35 -04006280static int ci_dpm_early_init(void *handle)
Alex Deuchera2e73f52015-04-20 17:09:27 -04006281{
yanyang15fc3aee2015-05-22 14:39:35 -04006282 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6283
Alex Deuchera2e73f52015-04-20 17:09:27 -04006284 ci_dpm_set_irq_funcs(adev);
6285
6286 return 0;
6287}
6288
yanyang15fc3aee2015-05-22 14:39:35 -04006289static int ci_dpm_late_init(void *handle)
Alex Deuchera2e73f52015-04-20 17:09:27 -04006290{
6291 int ret;
yanyang15fc3aee2015-05-22 14:39:35 -04006292 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
Alex Deuchera2e73f52015-04-20 17:09:27 -04006293
6294 if (!amdgpu_dpm)
6295 return 0;
6296
Alex Deucherfa022a92015-09-30 17:05:40 -04006297 /* init the sysfs and debugfs files late */
6298 ret = amdgpu_pm_sysfs_init(adev);
6299 if (ret)
6300 return ret;
6301
Alex Deuchera2e73f52015-04-20 17:09:27 -04006302 ret = ci_set_temperature_range(adev);
6303 if (ret)
6304 return ret;
6305
Alex Deuchera2e73f52015-04-20 17:09:27 -04006306 return 0;
6307}
6308
yanyang15fc3aee2015-05-22 14:39:35 -04006309static int ci_dpm_sw_init(void *handle)
Alex Deuchera2e73f52015-04-20 17:09:27 -04006310{
6311 int ret;
yanyang15fc3aee2015-05-22 14:39:35 -04006312 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
Alex Deuchera2e73f52015-04-20 17:09:27 -04006313
Alex Deucherd766e6a2016-03-29 18:28:50 -04006314 ret = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 230,
6315 &adev->pm.dpm.thermal.irq);
Alex Deuchera2e73f52015-04-20 17:09:27 -04006316 if (ret)
6317 return ret;
6318
Alex Deucherd766e6a2016-03-29 18:28:50 -04006319 ret = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 231,
6320 &adev->pm.dpm.thermal.irq);
Alex Deuchera2e73f52015-04-20 17:09:27 -04006321 if (ret)
6322 return ret;
6323
6324 /* default to balanced state */
6325 adev->pm.dpm.state = POWER_STATE_TYPE_BALANCED;
6326 adev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
Rex Zhue5d03ac2016-12-23 14:39:41 +08006327 adev->pm.dpm.forced_level = AMD_DPM_FORCED_LEVEL_AUTO;
Alex Deuchera2e73f52015-04-20 17:09:27 -04006328 adev->pm.default_sclk = adev->clock.default_sclk;
6329 adev->pm.default_mclk = adev->clock.default_mclk;
6330 adev->pm.current_sclk = adev->clock.default_sclk;
6331 adev->pm.current_mclk = adev->clock.default_mclk;
6332 adev->pm.int_thermal_type = THERMAL_TYPE_NONE;
6333
Christian Königfaad24c2015-05-28 22:02:26 +02006334 ret = ci_dpm_init_microcode(adev);
6335 if (ret)
6336 return ret;
6337
Rex Zhubac601e2017-02-03 17:33:11 +08006338 if (amdgpu_dpm == 0)
6339 return 0;
6340
Alex Deuchera2e73f52015-04-20 17:09:27 -04006341 INIT_WORK(&adev->pm.dpm.thermal.work, amdgpu_dpm_thermal_work_handler);
6342 mutex_lock(&adev->pm.mutex);
6343 ret = ci_dpm_init(adev);
6344 if (ret)
6345 goto dpm_failed;
6346 adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
6347 if (amdgpu_dpm == 1)
6348 amdgpu_pm_print_power_states(adev);
Alex Deuchera2e73f52015-04-20 17:09:27 -04006349 mutex_unlock(&adev->pm.mutex);
6350 DRM_INFO("amdgpu: dpm initialized\n");
6351
6352 return 0;
6353
6354dpm_failed:
6355 ci_dpm_fini(adev);
6356 mutex_unlock(&adev->pm.mutex);
6357 DRM_ERROR("amdgpu: dpm initialization failed\n");
6358 return ret;
6359}
6360
yanyang15fc3aee2015-05-22 14:39:35 -04006361static int ci_dpm_sw_fini(void *handle)
Alex Deuchera2e73f52015-04-20 17:09:27 -04006362{
yanyang15fc3aee2015-05-22 14:39:35 -04006363 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6364
Alex Deucher45607382016-10-21 16:30:10 -04006365 flush_work(&adev->pm.dpm.thermal.work);
6366
Alex Deuchera2e73f52015-04-20 17:09:27 -04006367 mutex_lock(&adev->pm.mutex);
Alex Deuchera2e73f52015-04-20 17:09:27 -04006368 ci_dpm_fini(adev);
6369 mutex_unlock(&adev->pm.mutex);
6370
Alex Deucher768c95e2016-06-01 11:09:01 -04006371 release_firmware(adev->pm.fw);
6372 adev->pm.fw = NULL;
6373
Alex Deuchera2e73f52015-04-20 17:09:27 -04006374 return 0;
6375}
6376
yanyang15fc3aee2015-05-22 14:39:35 -04006377static int ci_dpm_hw_init(void *handle)
Alex Deuchera2e73f52015-04-20 17:09:27 -04006378{
6379 int ret;
6380
yanyang15fc3aee2015-05-22 14:39:35 -04006381 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6382
Rex Zhubac601e2017-02-03 17:33:11 +08006383 if (!amdgpu_dpm) {
6384 ret = ci_upload_firmware(adev);
6385 if (ret) {
6386 DRM_ERROR("ci_upload_firmware failed\n");
6387 return ret;
6388 }
6389 ci_dpm_start_smc(adev);
Alex Deuchera2e73f52015-04-20 17:09:27 -04006390 return 0;
Rex Zhubac601e2017-02-03 17:33:11 +08006391 }
Alex Deuchera2e73f52015-04-20 17:09:27 -04006392
6393 mutex_lock(&adev->pm.mutex);
6394 ci_dpm_setup_asic(adev);
6395 ret = ci_dpm_enable(adev);
6396 if (ret)
6397 adev->pm.dpm_enabled = false;
6398 else
6399 adev->pm.dpm_enabled = true;
6400 mutex_unlock(&adev->pm.mutex);
6401
6402 return ret;
6403}
6404
yanyang15fc3aee2015-05-22 14:39:35 -04006405static int ci_dpm_hw_fini(void *handle)
Alex Deuchera2e73f52015-04-20 17:09:27 -04006406{
yanyang15fc3aee2015-05-22 14:39:35 -04006407 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6408
Alex Deuchera2e73f52015-04-20 17:09:27 -04006409 if (adev->pm.dpm_enabled) {
6410 mutex_lock(&adev->pm.mutex);
6411 ci_dpm_disable(adev);
6412 mutex_unlock(&adev->pm.mutex);
Rex Zhubac601e2017-02-03 17:33:11 +08006413 } else {
6414 ci_dpm_stop_smc(adev);
Alex Deuchera2e73f52015-04-20 17:09:27 -04006415 }
6416
6417 return 0;
6418}
6419
yanyang15fc3aee2015-05-22 14:39:35 -04006420static int ci_dpm_suspend(void *handle)
Alex Deuchera2e73f52015-04-20 17:09:27 -04006421{
yanyang15fc3aee2015-05-22 14:39:35 -04006422 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6423
Alex Deuchera2e73f52015-04-20 17:09:27 -04006424 if (adev->pm.dpm_enabled) {
6425 mutex_lock(&adev->pm.mutex);
Rex Zhu86f8c592016-10-03 20:46:36 +08006426 amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
6427 AMDGPU_THERMAL_IRQ_LOW_TO_HIGH);
6428 amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
6429 AMDGPU_THERMAL_IRQ_HIGH_TO_LOW);
6430 adev->pm.dpm.last_user_state = adev->pm.dpm.user_state;
6431 adev->pm.dpm.last_state = adev->pm.dpm.state;
6432 adev->pm.dpm.user_state = POWER_STATE_TYPE_INTERNAL_BOOT;
6433 adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_BOOT;
Alex Deuchera2e73f52015-04-20 17:09:27 -04006434 mutex_unlock(&adev->pm.mutex);
Rex Zhu86f8c592016-10-03 20:46:36 +08006435 amdgpu_pm_compute_clocks(adev);
6436
Alex Deuchera2e73f52015-04-20 17:09:27 -04006437 }
Rex Zhu86f8c592016-10-03 20:46:36 +08006438
Alex Deuchera2e73f52015-04-20 17:09:27 -04006439 return 0;
6440}
6441
yanyang15fc3aee2015-05-22 14:39:35 -04006442static int ci_dpm_resume(void *handle)
Alex Deuchera2e73f52015-04-20 17:09:27 -04006443{
6444 int ret;
yanyang15fc3aee2015-05-22 14:39:35 -04006445 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
Alex Deuchera2e73f52015-04-20 17:09:27 -04006446
6447 if (adev->pm.dpm_enabled) {
6448 /* asic init will reset to the boot state */
6449 mutex_lock(&adev->pm.mutex);
6450 ci_dpm_setup_asic(adev);
6451 ret = ci_dpm_enable(adev);
6452 if (ret)
6453 adev->pm.dpm_enabled = false;
6454 else
6455 adev->pm.dpm_enabled = true;
Rex Zhu86f8c592016-10-03 20:46:36 +08006456 adev->pm.dpm.user_state = adev->pm.dpm.last_user_state;
6457 adev->pm.dpm.state = adev->pm.dpm.last_state;
Alex Deuchera2e73f52015-04-20 17:09:27 -04006458 mutex_unlock(&adev->pm.mutex);
6459 if (adev->pm.dpm_enabled)
6460 amdgpu_pm_compute_clocks(adev);
6461 }
6462 return 0;
6463}
6464
yanyang15fc3aee2015-05-22 14:39:35 -04006465static bool ci_dpm_is_idle(void *handle)
Alex Deuchera2e73f52015-04-20 17:09:27 -04006466{
6467 /* XXX */
6468 return true;
6469}
6470
yanyang15fc3aee2015-05-22 14:39:35 -04006471static int ci_dpm_wait_for_idle(void *handle)
Alex Deuchera2e73f52015-04-20 17:09:27 -04006472{
6473 /* XXX */
6474 return 0;
6475}
6476
yanyang15fc3aee2015-05-22 14:39:35 -04006477static int ci_dpm_soft_reset(void *handle)
Alex Deuchera2e73f52015-04-20 17:09:27 -04006478{
6479 return 0;
6480}
6481
6482static int ci_dpm_set_interrupt_state(struct amdgpu_device *adev,
6483 struct amdgpu_irq_src *source,
6484 unsigned type,
6485 enum amdgpu_interrupt_state state)
6486{
6487 u32 cg_thermal_int;
6488
6489 switch (type) {
6490 case AMDGPU_THERMAL_IRQ_LOW_TO_HIGH:
6491 switch (state) {
6492 case AMDGPU_IRQ_STATE_DISABLE:
6493 cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
Rex Zhuc305fd52015-10-13 13:57:52 +08006494 cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
Alex Deuchera2e73f52015-04-20 17:09:27 -04006495 WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
6496 break;
6497 case AMDGPU_IRQ_STATE_ENABLE:
6498 cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
Rex Zhuc305fd52015-10-13 13:57:52 +08006499 cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
Alex Deuchera2e73f52015-04-20 17:09:27 -04006500 WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
6501 break;
6502 default:
6503 break;
6504 }
6505 break;
6506
6507 case AMDGPU_THERMAL_IRQ_HIGH_TO_LOW:
6508 switch (state) {
6509 case AMDGPU_IRQ_STATE_DISABLE:
6510 cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
Rex Zhuc305fd52015-10-13 13:57:52 +08006511 cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
Alex Deuchera2e73f52015-04-20 17:09:27 -04006512 WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
6513 break;
6514 case AMDGPU_IRQ_STATE_ENABLE:
6515 cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
Rex Zhuc305fd52015-10-13 13:57:52 +08006516 cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
Alex Deuchera2e73f52015-04-20 17:09:27 -04006517 WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
6518 break;
6519 default:
6520 break;
6521 }
6522 break;
6523
6524 default:
6525 break;
6526 }
6527 return 0;
6528}
6529
6530static int ci_dpm_process_interrupt(struct amdgpu_device *adev,
Christian Königedf600d2016-05-03 15:54:54 +02006531 struct amdgpu_irq_src *source,
Alex Deuchera2e73f52015-04-20 17:09:27 -04006532 struct amdgpu_iv_entry *entry)
6533{
6534 bool queue_thermal = false;
6535
6536 if (entry == NULL)
6537 return -EINVAL;
6538
6539 switch (entry->src_id) {
6540 case 230: /* thermal low to high */
6541 DRM_DEBUG("IH: thermal low to high\n");
6542 adev->pm.dpm.thermal.high_to_low = false;
6543 queue_thermal = true;
6544 break;
6545 case 231: /* thermal high to low */
6546 DRM_DEBUG("IH: thermal high to low\n");
6547 adev->pm.dpm.thermal.high_to_low = true;
6548 queue_thermal = true;
6549 break;
6550 default:
6551 break;
6552 }
6553
6554 if (queue_thermal)
6555 schedule_work(&adev->pm.dpm.thermal.work);
6556
6557 return 0;
6558}
6559
yanyang15fc3aee2015-05-22 14:39:35 -04006560static int ci_dpm_set_clockgating_state(void *handle,
6561 enum amd_clockgating_state state)
Alex Deuchera2e73f52015-04-20 17:09:27 -04006562{
6563 return 0;
6564}
6565
yanyang15fc3aee2015-05-22 14:39:35 -04006566static int ci_dpm_set_powergating_state(void *handle,
6567 enum amd_powergating_state state)
Alex Deuchera2e73f52015-04-20 17:09:27 -04006568{
6569 return 0;
6570}
6571
Rex Zhucfa289f2017-09-06 15:27:59 +08006572static int ci_dpm_print_clock_levels(void *handle,
Eric Huang19fbc432016-05-19 15:50:09 -04006573 enum pp_clock_type type, char *buf)
6574{
Rex Zhucfa289f2017-09-06 15:27:59 +08006575 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
Eric Huang19fbc432016-05-19 15:50:09 -04006576 struct ci_power_info *pi = ci_get_pi(adev);
6577 struct ci_single_dpm_table *sclk_table = &pi->dpm_table.sclk_table;
6578 struct ci_single_dpm_table *mclk_table = &pi->dpm_table.mclk_table;
6579 struct ci_single_dpm_table *pcie_table = &pi->dpm_table.pcie_speed_table;
6580
6581 int i, now, size = 0;
6582 uint32_t clock, pcie_speed;
6583
6584 switch (type) {
6585 case PP_SCLK:
6586 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_API_GetSclkFrequency);
6587 clock = RREG32(mmSMC_MSG_ARG_0);
6588
6589 for (i = 0; i < sclk_table->count; i++) {
6590 if (clock > sclk_table->dpm_levels[i].value)
6591 continue;
6592 break;
6593 }
6594 now = i;
6595
6596 for (i = 0; i < sclk_table->count; i++)
6597 size += sprintf(buf + size, "%d: %uMhz %s\n",
6598 i, sclk_table->dpm_levels[i].value / 100,
6599 (i == now) ? "*" : "");
6600 break;
6601 case PP_MCLK:
6602 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_API_GetMclkFrequency);
6603 clock = RREG32(mmSMC_MSG_ARG_0);
6604
6605 for (i = 0; i < mclk_table->count; i++) {
6606 if (clock > mclk_table->dpm_levels[i].value)
6607 continue;
6608 break;
6609 }
6610 now = i;
6611
6612 for (i = 0; i < mclk_table->count; i++)
6613 size += sprintf(buf + size, "%d: %uMhz %s\n",
6614 i, mclk_table->dpm_levels[i].value / 100,
6615 (i == now) ? "*" : "");
6616 break;
6617 case PP_PCIE:
6618 pcie_speed = ci_get_current_pcie_speed(adev);
6619 for (i = 0; i < pcie_table->count; i++) {
6620 if (pcie_speed != pcie_table->dpm_levels[i].value)
6621 continue;
6622 break;
6623 }
6624 now = i;
6625
6626 for (i = 0; i < pcie_table->count; i++)
6627 size += sprintf(buf + size, "%d: %s %s\n", i,
6628 (pcie_table->dpm_levels[i].value == 0) ? "2.5GB, x1" :
6629 (pcie_table->dpm_levels[i].value == 1) ? "5.0GB, x16" :
6630 (pcie_table->dpm_levels[i].value == 2) ? "8.0GB, x16" : "",
6631 (i == now) ? "*" : "");
6632 break;
6633 default:
6634 break;
6635 }
6636
6637 return size;
6638}
6639
Rex Zhucfa289f2017-09-06 15:27:59 +08006640static int ci_dpm_force_clock_level(void *handle,
Eric Huang19fbc432016-05-19 15:50:09 -04006641 enum pp_clock_type type, uint32_t mask)
6642{
Rex Zhucfa289f2017-09-06 15:27:59 +08006643 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
Eric Huang19fbc432016-05-19 15:50:09 -04006644 struct ci_power_info *pi = ci_get_pi(adev);
6645
Rex Zhu570272d2017-01-06 13:32:49 +08006646 if (adev->pm.dpm.forced_level & (AMD_DPM_FORCED_LEVEL_AUTO |
6647 AMD_DPM_FORCED_LEVEL_LOW |
6648 AMD_DPM_FORCED_LEVEL_HIGH))
Eric Huang19fbc432016-05-19 15:50:09 -04006649 return -EINVAL;
6650
6651 switch (type) {
6652 case PP_SCLK:
6653 if (!pi->sclk_dpm_key_disabled)
6654 amdgpu_ci_send_msg_to_smc_with_parameter(adev,
6655 PPSMC_MSG_SCLKDPM_SetEnabledMask,
6656 pi->dpm_level_enable_mask.sclk_dpm_enable_mask & mask);
6657 break;
6658
6659 case PP_MCLK:
6660 if (!pi->mclk_dpm_key_disabled)
6661 amdgpu_ci_send_msg_to_smc_with_parameter(adev,
6662 PPSMC_MSG_MCLKDPM_SetEnabledMask,
6663 pi->dpm_level_enable_mask.mclk_dpm_enable_mask & mask);
6664 break;
6665
6666 case PP_PCIE:
6667 {
6668 uint32_t tmp = mask & pi->dpm_level_enable_mask.pcie_dpm_enable_mask;
6669 uint32_t level = 0;
6670
6671 while (tmp >>= 1)
6672 level++;
6673
6674 if (!pi->pcie_dpm_key_disabled)
6675 amdgpu_ci_send_msg_to_smc_with_parameter(adev,
6676 PPSMC_MSG_PCIeDPM_ForceLevel,
6677 level);
6678 break;
6679 }
6680 default:
6681 break;
6682 }
6683
6684 return 0;
6685}
6686
Rex Zhucfa289f2017-09-06 15:27:59 +08006687static int ci_dpm_get_sclk_od(void *handle)
Eric Huang3cc25912016-05-19 15:54:35 -04006688{
Rex Zhucfa289f2017-09-06 15:27:59 +08006689 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
Eric Huang3cc25912016-05-19 15:54:35 -04006690 struct ci_power_info *pi = ci_get_pi(adev);
6691 struct ci_single_dpm_table *sclk_table = &(pi->dpm_table.sclk_table);
6692 struct ci_single_dpm_table *golden_sclk_table =
6693 &(pi->golden_dpm_table.sclk_table);
6694 int value;
6695
6696 value = (sclk_table->dpm_levels[sclk_table->count - 1].value -
6697 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) *
6698 100 /
6699 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
6700
6701 return value;
6702}
6703
Rex Zhucfa289f2017-09-06 15:27:59 +08006704static int ci_dpm_set_sclk_od(void *handle, uint32_t value)
Eric Huang3cc25912016-05-19 15:54:35 -04006705{
Rex Zhucfa289f2017-09-06 15:27:59 +08006706 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
Eric Huang3cc25912016-05-19 15:54:35 -04006707 struct ci_power_info *pi = ci_get_pi(adev);
6708 struct ci_ps *ps = ci_get_ps(adev->pm.dpm.requested_ps);
6709 struct ci_single_dpm_table *golden_sclk_table =
6710 &(pi->golden_dpm_table.sclk_table);
6711
6712 if (value > 20)
6713 value = 20;
6714
6715 ps->performance_levels[ps->performance_level_count - 1].sclk =
6716 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value *
6717 value / 100 +
6718 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
6719
6720 return 0;
6721}
6722
Rex Zhucfa289f2017-09-06 15:27:59 +08006723static int ci_dpm_get_mclk_od(void *handle)
Eric Huang40899d52016-05-24 15:43:53 -04006724{
Rex Zhucfa289f2017-09-06 15:27:59 +08006725 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
Eric Huang40899d52016-05-24 15:43:53 -04006726 struct ci_power_info *pi = ci_get_pi(adev);
6727 struct ci_single_dpm_table *mclk_table = &(pi->dpm_table.mclk_table);
6728 struct ci_single_dpm_table *golden_mclk_table =
6729 &(pi->golden_dpm_table.mclk_table);
6730 int value;
6731
6732 value = (mclk_table->dpm_levels[mclk_table->count - 1].value -
6733 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) *
6734 100 /
6735 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
6736
6737 return value;
6738}
6739
Rex Zhucfa289f2017-09-06 15:27:59 +08006740static int ci_dpm_set_mclk_od(void *handle, uint32_t value)
Eric Huang40899d52016-05-24 15:43:53 -04006741{
Rex Zhucfa289f2017-09-06 15:27:59 +08006742 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
Eric Huang40899d52016-05-24 15:43:53 -04006743 struct ci_power_info *pi = ci_get_pi(adev);
6744 struct ci_ps *ps = ci_get_ps(adev->pm.dpm.requested_ps);
6745 struct ci_single_dpm_table *golden_mclk_table =
6746 &(pi->golden_dpm_table.mclk_table);
6747
6748 if (value > 20)
6749 value = 20;
6750
6751 ps->performance_levels[ps->performance_level_count - 1].mclk =
6752 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value *
6753 value / 100 +
6754 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
6755
6756 return 0;
6757}
6758
Rex Zhucfa289f2017-09-06 15:27:59 +08006759static int ci_dpm_get_power_profile_state(void *handle,
Eric Huang618c0482016-10-14 14:21:19 -04006760 struct amd_pp_profile *query)
6761{
Rex Zhucfa289f2017-09-06 15:27:59 +08006762 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
Eric Huang618c0482016-10-14 14:21:19 -04006763 struct ci_power_info *pi = ci_get_pi(adev);
6764
6765 if (!pi || !query)
6766 return -EINVAL;
6767
6768 if (query->type == AMD_PP_GFX_PROFILE)
6769 memcpy(query, &pi->gfx_power_profile,
6770 sizeof(struct amd_pp_profile));
6771 else if (query->type == AMD_PP_COMPUTE_PROFILE)
6772 memcpy(query, &pi->compute_power_profile,
6773 sizeof(struct amd_pp_profile));
6774 else
6775 return -EINVAL;
6776
6777 return 0;
6778}
6779
6780static int ci_populate_requested_graphic_levels(struct amdgpu_device *adev,
6781 struct amd_pp_profile *request)
6782{
6783 struct ci_power_info *pi = ci_get_pi(adev);
6784 struct ci_dpm_table *dpm_table = &(pi->dpm_table);
6785 struct SMU7_Discrete_GraphicsLevel *levels =
6786 pi->smc_state_table.GraphicsLevel;
6787 uint32_t array = pi->dpm_table_start +
6788 offsetof(SMU7_Discrete_DpmTable, GraphicsLevel);
6789 uint32_t array_size = sizeof(struct SMU7_Discrete_GraphicsLevel) *
6790 SMU7_MAX_LEVELS_GRAPHICS;
6791 uint32_t i;
6792
6793 for (i = 0; i < dpm_table->sclk_table.count; i++) {
6794 levels[i].ActivityLevel =
6795 cpu_to_be16(request->activity_threshold);
6796 levels[i].EnabledForActivity = 1;
6797 levels[i].UpH = request->up_hyst;
6798 levels[i].DownH = request->down_hyst;
6799 }
6800
6801 return amdgpu_ci_copy_bytes_to_smc(adev, array, (uint8_t *)levels,
6802 array_size, pi->sram_end);
6803}
6804
6805static void ci_find_min_clock_masks(struct amdgpu_device *adev,
6806 uint32_t *sclk_mask, uint32_t *mclk_mask,
6807 uint32_t min_sclk, uint32_t min_mclk)
6808{
6809 struct ci_power_info *pi = ci_get_pi(adev);
6810 struct ci_dpm_table *dpm_table = &(pi->dpm_table);
6811 uint32_t i;
6812
6813 for (i = 0; i < dpm_table->sclk_table.count; i++) {
6814 if (dpm_table->sclk_table.dpm_levels[i].enabled &&
6815 dpm_table->sclk_table.dpm_levels[i].value >= min_sclk)
6816 *sclk_mask |= 1 << i;
6817 }
6818
6819 for (i = 0; i < dpm_table->mclk_table.count; i++) {
6820 if (dpm_table->mclk_table.dpm_levels[i].enabled &&
6821 dpm_table->mclk_table.dpm_levels[i].value >= min_mclk)
6822 *mclk_mask |= 1 << i;
6823 }
6824}
6825
6826static int ci_set_power_profile_state(struct amdgpu_device *adev,
6827 struct amd_pp_profile *request)
6828{
6829 struct ci_power_info *pi = ci_get_pi(adev);
6830 int tmp_result, result = 0;
6831 uint32_t sclk_mask = 0, mclk_mask = 0;
6832
6833 tmp_result = ci_freeze_sclk_mclk_dpm(adev);
6834 if (tmp_result) {
6835 DRM_ERROR("Failed to freeze SCLK MCLK DPM!");
6836 result = tmp_result;
6837 }
6838
6839 tmp_result = ci_populate_requested_graphic_levels(adev,
6840 request);
6841 if (tmp_result) {
6842 DRM_ERROR("Failed to populate requested graphic levels!");
6843 result = tmp_result;
6844 }
6845
6846 tmp_result = ci_unfreeze_sclk_mclk_dpm(adev);
6847 if (tmp_result) {
6848 DRM_ERROR("Failed to unfreeze SCLK MCLK DPM!");
6849 result = tmp_result;
6850 }
6851
6852 ci_find_min_clock_masks(adev, &sclk_mask, &mclk_mask,
6853 request->min_sclk, request->min_mclk);
6854
6855 if (sclk_mask) {
6856 if (!pi->sclk_dpm_key_disabled)
6857 amdgpu_ci_send_msg_to_smc_with_parameter(
6858 adev,
6859 PPSMC_MSG_SCLKDPM_SetEnabledMask,
6860 pi->dpm_level_enable_mask.
6861 sclk_dpm_enable_mask &
6862 sclk_mask);
6863 }
6864
6865 if (mclk_mask) {
6866 if (!pi->mclk_dpm_key_disabled)
6867 amdgpu_ci_send_msg_to_smc_with_parameter(
6868 adev,
6869 PPSMC_MSG_MCLKDPM_SetEnabledMask,
6870 pi->dpm_level_enable_mask.
6871 mclk_dpm_enable_mask &
6872 mclk_mask);
6873 }
6874
6875
6876 return result;
6877}
6878
Rex Zhucfa289f2017-09-06 15:27:59 +08006879static int ci_dpm_set_power_profile_state(void *handle,
Eric Huang618c0482016-10-14 14:21:19 -04006880 struct amd_pp_profile *request)
6881{
Rex Zhucfa289f2017-09-06 15:27:59 +08006882 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
Eric Huang618c0482016-10-14 14:21:19 -04006883 struct ci_power_info *pi = ci_get_pi(adev);
6884 int ret = -1;
6885
6886 if (!pi || !request)
6887 return -EINVAL;
6888
6889 if (adev->pm.dpm.forced_level !=
6890 AMD_DPM_FORCED_LEVEL_AUTO)
6891 return -EINVAL;
6892
6893 if (request->min_sclk ||
6894 request->min_mclk ||
6895 request->activity_threshold ||
6896 request->up_hyst ||
6897 request->down_hyst) {
6898 if (request->type == AMD_PP_GFX_PROFILE)
6899 memcpy(&pi->gfx_power_profile, request,
6900 sizeof(struct amd_pp_profile));
6901 else if (request->type == AMD_PP_COMPUTE_PROFILE)
6902 memcpy(&pi->compute_power_profile, request,
6903 sizeof(struct amd_pp_profile));
6904 else
6905 return -EINVAL;
6906
6907 if (request->type == pi->current_power_profile)
6908 ret = ci_set_power_profile_state(
6909 adev,
6910 request);
6911 } else {
6912 /* set power profile if it exists */
6913 switch (request->type) {
6914 case AMD_PP_GFX_PROFILE:
6915 ret = ci_set_power_profile_state(
6916 adev,
6917 &pi->gfx_power_profile);
6918 break;
6919 case AMD_PP_COMPUTE_PROFILE:
6920 ret = ci_set_power_profile_state(
6921 adev,
6922 &pi->compute_power_profile);
6923 break;
6924 default:
6925 return -EINVAL;
6926 }
6927 }
6928
6929 if (!ret)
6930 pi->current_power_profile = request->type;
6931
6932 return 0;
6933}
6934
Rex Zhucfa289f2017-09-06 15:27:59 +08006935static int ci_dpm_reset_power_profile_state(void *handle,
Eric Huang618c0482016-10-14 14:21:19 -04006936 struct amd_pp_profile *request)
6937{
Rex Zhucfa289f2017-09-06 15:27:59 +08006938 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
Eric Huang618c0482016-10-14 14:21:19 -04006939 struct ci_power_info *pi = ci_get_pi(adev);
6940
6941 if (!pi || !request)
6942 return -EINVAL;
6943
6944 if (request->type == AMD_PP_GFX_PROFILE) {
6945 pi->gfx_power_profile = pi->default_gfx_power_profile;
6946 return ci_dpm_set_power_profile_state(adev,
6947 &pi->gfx_power_profile);
6948 } else if (request->type == AMD_PP_COMPUTE_PROFILE) {
6949 pi->compute_power_profile =
6950 pi->default_compute_power_profile;
6951 return ci_dpm_set_power_profile_state(adev,
6952 &pi->compute_power_profile);
6953 } else
6954 return -EINVAL;
6955}
6956
Rex Zhucfa289f2017-09-06 15:27:59 +08006957static int ci_dpm_switch_power_profile(void *handle,
Eric Huang618c0482016-10-14 14:21:19 -04006958 enum amd_pp_profile_type type)
6959{
Rex Zhucfa289f2017-09-06 15:27:59 +08006960 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
Eric Huang618c0482016-10-14 14:21:19 -04006961 struct ci_power_info *pi = ci_get_pi(adev);
6962 struct amd_pp_profile request = {0};
6963
6964 if (!pi)
6965 return -EINVAL;
6966
6967 if (pi->current_power_profile != type) {
6968 request.type = type;
6969 return ci_dpm_set_power_profile_state(adev, &request);
6970 }
6971
6972 return 0;
6973}
6974
Rex Zhucfa289f2017-09-06 15:27:59 +08006975static int ci_dpm_read_sensor(void *handle, int idx,
Samuel Pitoisetd6c29692017-02-14 01:00:49 +01006976 void *value, int *size)
6977{
6978 u32 activity_percent = 50;
6979 int ret;
Rex Zhucfa289f2017-09-06 15:27:59 +08006980 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
Samuel Pitoisetd6c29692017-02-14 01:00:49 +01006981
6982 /* size must be at least 4 bytes for all sensors */
6983 if (*size < 4)
6984 return -EINVAL;
6985
6986 switch (idx) {
6987 case AMDGPU_PP_SENSOR_GFX_SCLK:
6988 *((uint32_t *)value) = ci_get_average_sclk_freq(adev);
6989 *size = 4;
6990 return 0;
6991 case AMDGPU_PP_SENSOR_GFX_MCLK:
6992 *((uint32_t *)value) = ci_get_average_mclk_freq(adev);
6993 *size = 4;
6994 return 0;
6995 case AMDGPU_PP_SENSOR_GPU_TEMP:
6996 *((uint32_t *)value) = ci_dpm_get_temp(adev);
6997 *size = 4;
6998 return 0;
6999 case AMDGPU_PP_SENSOR_GPU_LOAD:
7000 ret = ci_read_smc_soft_register(adev,
7001 offsetof(SMU7_SoftRegisters,
7002 AverageGraphicsA),
7003 &activity_percent);
7004 if (ret == 0) {
7005 activity_percent += 0x80;
7006 activity_percent >>= 8;
7007 activity_percent =
7008 activity_percent > 100 ? 100 : activity_percent;
7009 }
7010 *((uint32_t *)value) = activity_percent;
7011 *size = 4;
7012 return 0;
7013 default:
7014 return -EINVAL;
7015 }
7016}
7017
yanyang15fc3aee2015-05-22 14:39:35 -04007018const struct amd_ip_funcs ci_dpm_ip_funcs = {
Tom St Denis88a907d2016-05-04 14:28:35 -04007019 .name = "ci_dpm",
Alex Deuchera2e73f52015-04-20 17:09:27 -04007020 .early_init = ci_dpm_early_init,
7021 .late_init = ci_dpm_late_init,
7022 .sw_init = ci_dpm_sw_init,
7023 .sw_fini = ci_dpm_sw_fini,
7024 .hw_init = ci_dpm_hw_init,
7025 .hw_fini = ci_dpm_hw_fini,
7026 .suspend = ci_dpm_suspend,
7027 .resume = ci_dpm_resume,
7028 .is_idle = ci_dpm_is_idle,
7029 .wait_for_idle = ci_dpm_wait_for_idle,
7030 .soft_reset = ci_dpm_soft_reset,
Alex Deuchera2e73f52015-04-20 17:09:27 -04007031 .set_clockgating_state = ci_dpm_set_clockgating_state,
7032 .set_powergating_state = ci_dpm_set_powergating_state,
7033};
7034
Rex Zhucd4d7462017-09-06 18:43:52 +08007035const struct amd_pm_funcs ci_dpm_funcs = {
Alex Deuchera2e73f52015-04-20 17:09:27 -04007036 .get_temperature = &ci_dpm_get_temp,
7037 .pre_set_power_state = &ci_dpm_pre_set_power_state,
7038 .set_power_state = &ci_dpm_set_power_state,
7039 .post_set_power_state = &ci_dpm_post_set_power_state,
7040 .display_configuration_changed = &ci_dpm_display_configuration_changed,
7041 .get_sclk = &ci_dpm_get_sclk,
7042 .get_mclk = &ci_dpm_get_mclk,
7043 .print_power_state = &ci_dpm_print_power_state,
7044 .debugfs_print_current_performance_level = &ci_dpm_debugfs_print_current_performance_level,
7045 .force_performance_level = &ci_dpm_force_performance_level,
7046 .vblank_too_short = &ci_dpm_vblank_too_short,
7047 .powergate_uvd = &ci_dpm_powergate_uvd,
7048 .set_fan_control_mode = &ci_dpm_set_fan_control_mode,
7049 .get_fan_control_mode = &ci_dpm_get_fan_control_mode,
7050 .set_fan_speed_percent = &ci_dpm_set_fan_speed_percent,
7051 .get_fan_speed_percent = &ci_dpm_get_fan_speed_percent,
Eric Huang19fbc432016-05-19 15:50:09 -04007052 .print_clock_levels = ci_dpm_print_clock_levels,
7053 .force_clock_level = ci_dpm_force_clock_level,
Eric Huang3cc25912016-05-19 15:54:35 -04007054 .get_sclk_od = ci_dpm_get_sclk_od,
7055 .set_sclk_od = ci_dpm_set_sclk_od,
Eric Huang40899d52016-05-24 15:43:53 -04007056 .get_mclk_od = ci_dpm_get_mclk_od,
7057 .set_mclk_od = ci_dpm_set_mclk_od,
Rex Zhu1d516c42016-10-14 19:16:54 +08007058 .check_state_equal = ci_check_state_equal,
Alex Deucher825cc992016-10-07 12:38:04 -04007059 .get_vce_clock_state = amdgpu_get_vce_clock_state,
Eric Huang618c0482016-10-14 14:21:19 -04007060 .get_power_profile_state = ci_dpm_get_power_profile_state,
7061 .set_power_profile_state = ci_dpm_set_power_profile_state,
7062 .reset_power_profile_state = ci_dpm_reset_power_profile_state,
7063 .switch_power_profile = ci_dpm_switch_power_profile,
Samuel Pitoisetd6c29692017-02-14 01:00:49 +01007064 .read_sensor = ci_dpm_read_sensor,
Alex Deuchera2e73f52015-04-20 17:09:27 -04007065};
7066
Alex Deuchera2e73f52015-04-20 17:09:27 -04007067static const struct amdgpu_irq_src_funcs ci_dpm_irq_funcs = {
7068 .set = ci_dpm_set_interrupt_state,
7069 .process = ci_dpm_process_interrupt,
7070};
7071
7072static void ci_dpm_set_irq_funcs(struct amdgpu_device *adev)
7073{
7074 adev->pm.dpm.thermal.irq.num_types = AMDGPU_THERMAL_IRQ_LAST;
7075 adev->pm.dpm.thermal.irq.funcs = &ci_dpm_irq_funcs;
7076}