blob: ddb814f7e952170dfaaf559ca1a29b92eddde502 [file] [log] [blame]
Alex Deuchera2e73f52015-04-20 17:09:27 -04001/*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <linux/firmware.h>
Masahiro Yamada248a1d62017-04-24 13:50:21 +090025#include <drm/drmP.h>
Alex Deuchera2e73f52015-04-20 17:09:27 -040026#include "amdgpu.h"
27#include "amdgpu_pm.h"
28#include "amdgpu_ucode.h"
29#include "cikd.h"
30#include "amdgpu_dpm.h"
31#include "ci_dpm.h"
32#include "gfx_v7_0.h"
33#include "atom.h"
Alex Deucher50171eb2016-02-04 10:44:04 -050034#include "amd_pcie.h"
Alex Deuchera2e73f52015-04-20 17:09:27 -040035#include <linux/seq_file.h>
36
37#include "smu/smu_7_0_1_d.h"
38#include "smu/smu_7_0_1_sh_mask.h"
39
40#include "dce/dce_8_0_d.h"
41#include "dce/dce_8_0_sh_mask.h"
42
43#include "bif/bif_4_1_d.h"
44#include "bif/bif_4_1_sh_mask.h"
45
46#include "gca/gfx_7_2_d.h"
47#include "gca/gfx_7_2_sh_mask.h"
48
49#include "gmc/gmc_7_1_d.h"
50#include "gmc/gmc_7_1_sh_mask.h"
51
52MODULE_FIRMWARE("radeon/bonaire_smc.bin");
Alex Deucher2254c212015-12-10 00:49:32 -050053MODULE_FIRMWARE("radeon/bonaire_k_smc.bin");
Alex Deuchera2e73f52015-04-20 17:09:27 -040054MODULE_FIRMWARE("radeon/hawaii_smc.bin");
Alex Deucher2254c212015-12-10 00:49:32 -050055MODULE_FIRMWARE("radeon/hawaii_k_smc.bin");
Alex Deuchera2e73f52015-04-20 17:09:27 -040056
57#define MC_CG_ARB_FREQ_F0 0x0a
58#define MC_CG_ARB_FREQ_F1 0x0b
59#define MC_CG_ARB_FREQ_F2 0x0c
60#define MC_CG_ARB_FREQ_F3 0x0d
61
62#define SMC_RAM_END 0x40000
63
64#define VOLTAGE_SCALE 4
65#define VOLTAGE_VID_OFFSET_SCALE1 625
66#define VOLTAGE_VID_OFFSET_SCALE2 100
67
68static const struct ci_pt_defaults defaults_hawaii_xt =
69{
70 1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0xB0000,
71 { 0x2E, 0x00, 0x00, 0x88, 0x00, 0x00, 0x72, 0x60, 0x51, 0xA7, 0x79, 0x6B, 0x90, 0xBD, 0x79 },
72 { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
73};
74
75static const struct ci_pt_defaults defaults_hawaii_pro =
76{
77 1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0x65062,
78 { 0x2E, 0x00, 0x00, 0x88, 0x00, 0x00, 0x72, 0x60, 0x51, 0xA7, 0x79, 0x6B, 0x90, 0xBD, 0x79 },
79 { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
80};
81
82static const struct ci_pt_defaults defaults_bonaire_xt =
83{
84 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
85 { 0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61 },
86 { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 }
87};
88
Slava Grigorev5ef82922016-07-15 11:29:14 -040089#if 0
Alex Deuchera2e73f52015-04-20 17:09:27 -040090static const struct ci_pt_defaults defaults_bonaire_pro =
91{
92 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x65062,
93 { 0x8C, 0x23F, 0x244, 0xA6, 0x83, 0x85, 0x86, 0x86, 0x83, 0xDB, 0xDB, 0xDA, 0x67, 0x60, 0x5F },
94 { 0x187, 0x193, 0x193, 0x1C7, 0x1D1, 0x1D1, 0x210, 0x219, 0x219, 0x266, 0x26C, 0x26C, 0x2C9, 0x2CB, 0x2CB }
95};
Slava Grigorev5ef82922016-07-15 11:29:14 -040096#endif
Alex Deuchera2e73f52015-04-20 17:09:27 -040097
98static const struct ci_pt_defaults defaults_saturn_xt =
99{
100 1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x70000,
101 { 0x8C, 0x247, 0x249, 0xA6, 0x80, 0x81, 0x8B, 0x89, 0x86, 0xC9, 0xCA, 0xC9, 0x4D, 0x4D, 0x4D },
102 { 0x187, 0x187, 0x187, 0x1C7, 0x1C7, 0x1C7, 0x210, 0x210, 0x210, 0x266, 0x266, 0x266, 0x2C9, 0x2C9, 0x2C9 }
103};
104
Slava Grigorev529d8c52016-07-19 00:24:10 -0400105#if 0
Alex Deuchera2e73f52015-04-20 17:09:27 -0400106static const struct ci_pt_defaults defaults_saturn_pro =
107{
108 1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x30000,
109 { 0x96, 0x21D, 0x23B, 0xA1, 0x85, 0x87, 0x83, 0x84, 0x81, 0xE6, 0xE6, 0xE6, 0x71, 0x6A, 0x6A },
110 { 0x193, 0x19E, 0x19E, 0x1D2, 0x1DC, 0x1DC, 0x21A, 0x223, 0x223, 0x26E, 0x27E, 0x274, 0x2CF, 0x2D2, 0x2D2 }
111};
Slava Grigorev529d8c52016-07-19 00:24:10 -0400112#endif
Alex Deuchera2e73f52015-04-20 17:09:27 -0400113
114static const struct ci_pt_config_reg didt_config_ci[] =
115{
116 { 0x10, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
117 { 0x10, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
118 { 0x10, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
119 { 0x10, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
120 { 0x11, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
121 { 0x11, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
122 { 0x11, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
123 { 0x11, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
124 { 0x12, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
125 { 0x12, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
126 { 0x12, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
127 { 0x12, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
128 { 0x2, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
129 { 0x2, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
130 { 0x2, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
131 { 0x1, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
132 { 0x1, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
133 { 0x0, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
134 { 0x30, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
135 { 0x30, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
136 { 0x30, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
137 { 0x30, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
138 { 0x31, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
139 { 0x31, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
140 { 0x31, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
141 { 0x31, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
142 { 0x32, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
143 { 0x32, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
144 { 0x32, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
145 { 0x32, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
146 { 0x22, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
147 { 0x22, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
148 { 0x22, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
149 { 0x21, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
150 { 0x21, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
151 { 0x20, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
152 { 0x50, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
153 { 0x50, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
154 { 0x50, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
155 { 0x50, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
156 { 0x51, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
157 { 0x51, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
158 { 0x51, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
159 { 0x51, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
160 { 0x52, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
161 { 0x52, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
162 { 0x52, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
163 { 0x52, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
164 { 0x42, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
165 { 0x42, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
166 { 0x42, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
167 { 0x41, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
168 { 0x41, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
169 { 0x40, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
170 { 0x70, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
171 { 0x70, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
172 { 0x70, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
173 { 0x70, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
174 { 0x71, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
175 { 0x71, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
176 { 0x71, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
177 { 0x71, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
178 { 0x72, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
179 { 0x72, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
180 { 0x72, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
181 { 0x72, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
182 { 0x62, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
183 { 0x62, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
184 { 0x62, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
185 { 0x61, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
186 { 0x61, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
187 { 0x60, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
188 { 0xFFFFFFFF }
189};
190
191static u8 ci_get_memory_module_index(struct amdgpu_device *adev)
192{
193 return (u8) ((RREG32(mmBIOS_SCRATCH_4) >> 16) & 0xff);
194}
195
196#define MC_CG_ARB_FREQ_F0 0x0a
197#define MC_CG_ARB_FREQ_F1 0x0b
198#define MC_CG_ARB_FREQ_F2 0x0c
199#define MC_CG_ARB_FREQ_F3 0x0d
200
201static int ci_copy_and_switch_arb_sets(struct amdgpu_device *adev,
202 u32 arb_freq_src, u32 arb_freq_dest)
203{
204 u32 mc_arb_dram_timing;
205 u32 mc_arb_dram_timing2;
206 u32 burst_time;
207 u32 mc_cg_config;
208
209 switch (arb_freq_src) {
210 case MC_CG_ARB_FREQ_F0:
211 mc_arb_dram_timing = RREG32(mmMC_ARB_DRAM_TIMING);
212 mc_arb_dram_timing2 = RREG32(mmMC_ARB_DRAM_TIMING2);
213 burst_time = (RREG32(mmMC_ARB_BURST_TIME) & MC_ARB_BURST_TIME__STATE0_MASK) >>
214 MC_ARB_BURST_TIME__STATE0__SHIFT;
215 break;
216 case MC_CG_ARB_FREQ_F1:
217 mc_arb_dram_timing = RREG32(mmMC_ARB_DRAM_TIMING_1);
218 mc_arb_dram_timing2 = RREG32(mmMC_ARB_DRAM_TIMING2_1);
219 burst_time = (RREG32(mmMC_ARB_BURST_TIME) & MC_ARB_BURST_TIME__STATE1_MASK) >>
220 MC_ARB_BURST_TIME__STATE1__SHIFT;
221 break;
222 default:
223 return -EINVAL;
224 }
225
226 switch (arb_freq_dest) {
227 case MC_CG_ARB_FREQ_F0:
228 WREG32(mmMC_ARB_DRAM_TIMING, mc_arb_dram_timing);
229 WREG32(mmMC_ARB_DRAM_TIMING2, mc_arb_dram_timing2);
230 WREG32_P(mmMC_ARB_BURST_TIME, (burst_time << MC_ARB_BURST_TIME__STATE0__SHIFT),
231 ~MC_ARB_BURST_TIME__STATE0_MASK);
232 break;
233 case MC_CG_ARB_FREQ_F1:
234 WREG32(mmMC_ARB_DRAM_TIMING_1, mc_arb_dram_timing);
235 WREG32(mmMC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2);
236 WREG32_P(mmMC_ARB_BURST_TIME, (burst_time << MC_ARB_BURST_TIME__STATE1__SHIFT),
237 ~MC_ARB_BURST_TIME__STATE1_MASK);
238 break;
239 default:
240 return -EINVAL;
241 }
242
243 mc_cg_config = RREG32(mmMC_CG_CONFIG) | 0x0000000F;
244 WREG32(mmMC_CG_CONFIG, mc_cg_config);
245 WREG32_P(mmMC_ARB_CG, (arb_freq_dest) << MC_ARB_CG__CG_ARB_REQ__SHIFT,
246 ~MC_ARB_CG__CG_ARB_REQ_MASK);
247
248 return 0;
249}
250
251static u8 ci_get_ddr3_mclk_frequency_ratio(u32 memory_clock)
252{
253 u8 mc_para_index;
254
255 if (memory_clock < 10000)
256 mc_para_index = 0;
257 else if (memory_clock >= 80000)
258 mc_para_index = 0x0f;
259 else
260 mc_para_index = (u8)((memory_clock - 10000) / 5000 + 1);
261 return mc_para_index;
262}
263
264static u8 ci_get_mclk_frequency_ratio(u32 memory_clock, bool strobe_mode)
265{
266 u8 mc_para_index;
267
268 if (strobe_mode) {
269 if (memory_clock < 12500)
270 mc_para_index = 0x00;
271 else if (memory_clock > 47500)
272 mc_para_index = 0x0f;
273 else
274 mc_para_index = (u8)((memory_clock - 10000) / 2500);
275 } else {
276 if (memory_clock < 65000)
277 mc_para_index = 0x00;
278 else if (memory_clock > 135000)
279 mc_para_index = 0x0f;
280 else
281 mc_para_index = (u8)((memory_clock - 60000) / 5000);
282 }
283 return mc_para_index;
284}
285
286static void ci_trim_voltage_table_to_fit_state_table(struct amdgpu_device *adev,
287 u32 max_voltage_steps,
288 struct atom_voltage_table *voltage_table)
289{
290 unsigned int i, diff;
291
292 if (voltage_table->count <= max_voltage_steps)
293 return;
294
295 diff = voltage_table->count - max_voltage_steps;
296
297 for (i = 0; i < max_voltage_steps; i++)
298 voltage_table->entries[i] = voltage_table->entries[i + diff];
299
300 voltage_table->count = max_voltage_steps;
301}
302
303static int ci_get_std_voltage_value_sidd(struct amdgpu_device *adev,
304 struct atom_voltage_table_entry *voltage_table,
305 u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd);
306static int ci_set_power_limit(struct amdgpu_device *adev, u32 n);
307static int ci_set_overdrive_target_tdp(struct amdgpu_device *adev,
308 u32 target_tdp);
309static int ci_update_uvd_dpm(struct amdgpu_device *adev, bool gate);
Alex Deuchera2e73f52015-04-20 17:09:27 -0400310static void ci_dpm_set_irq_funcs(struct amdgpu_device *adev);
311
312static PPSMC_Result amdgpu_ci_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
313 PPSMC_Msg msg, u32 parameter);
314static void ci_thermal_start_smc_fan_control(struct amdgpu_device *adev);
315static void ci_fan_ctrl_set_default_mode(struct amdgpu_device *adev);
316
317static struct ci_power_info *ci_get_pi(struct amdgpu_device *adev)
318{
319 struct ci_power_info *pi = adev->pm.dpm.priv;
320
321 return pi;
322}
323
324static struct ci_ps *ci_get_ps(struct amdgpu_ps *rps)
325{
326 struct ci_ps *ps = rps->ps_priv;
327
328 return ps;
329}
330
331static void ci_initialize_powertune_defaults(struct amdgpu_device *adev)
332{
333 struct ci_power_info *pi = ci_get_pi(adev);
334
335 switch (adev->pdev->device) {
336 case 0x6649:
337 case 0x6650:
338 case 0x6651:
339 case 0x6658:
340 case 0x665C:
341 case 0x665D:
342 default:
343 pi->powertune_defaults = &defaults_bonaire_xt;
344 break;
345 case 0x6640:
346 case 0x6641:
347 case 0x6646:
348 case 0x6647:
349 pi->powertune_defaults = &defaults_saturn_xt;
350 break;
351 case 0x67B8:
352 case 0x67B0:
353 pi->powertune_defaults = &defaults_hawaii_xt;
354 break;
355 case 0x67BA:
356 case 0x67B1:
357 pi->powertune_defaults = &defaults_hawaii_pro;
358 break;
359 case 0x67A0:
360 case 0x67A1:
361 case 0x67A2:
362 case 0x67A8:
363 case 0x67A9:
364 case 0x67AA:
365 case 0x67B9:
366 case 0x67BE:
367 pi->powertune_defaults = &defaults_bonaire_xt;
368 break;
369 }
370
371 pi->dte_tj_offset = 0;
372
373 pi->caps_power_containment = true;
374 pi->caps_cac = false;
375 pi->caps_sq_ramping = false;
376 pi->caps_db_ramping = false;
377 pi->caps_td_ramping = false;
378 pi->caps_tcp_ramping = false;
379
380 if (pi->caps_power_containment) {
381 pi->caps_cac = true;
382 if (adev->asic_type == CHIP_HAWAII)
383 pi->enable_bapm_feature = false;
384 else
385 pi->enable_bapm_feature = true;
386 pi->enable_tdc_limit_feature = true;
387 pi->enable_pkg_pwr_tracking_feature = true;
388 }
389}
390
391static u8 ci_convert_to_vid(u16 vddc)
392{
393 return (6200 - (vddc * VOLTAGE_SCALE)) / 25;
394}
395
396static int ci_populate_bapm_vddc_vid_sidd(struct amdgpu_device *adev)
397{
398 struct ci_power_info *pi = ci_get_pi(adev);
399 u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd;
400 u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd;
401 u8 *hi2_vid = pi->smc_powertune_table.BapmVddCVidHiSidd2;
402 u32 i;
403
404 if (adev->pm.dpm.dyn_state.cac_leakage_table.entries == NULL)
405 return -EINVAL;
406 if (adev->pm.dpm.dyn_state.cac_leakage_table.count > 8)
407 return -EINVAL;
408 if (adev->pm.dpm.dyn_state.cac_leakage_table.count !=
409 adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count)
410 return -EINVAL;
411
412 for (i = 0; i < adev->pm.dpm.dyn_state.cac_leakage_table.count; i++) {
413 if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
414 lo_vid[i] = ci_convert_to_vid(adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1);
415 hi_vid[i] = ci_convert_to_vid(adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2);
416 hi2_vid[i] = ci_convert_to_vid(adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3);
417 } else {
418 lo_vid[i] = ci_convert_to_vid(adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc);
419 hi_vid[i] = ci_convert_to_vid((u16)adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage);
420 }
421 }
422 return 0;
423}
424
425static int ci_populate_vddc_vid(struct amdgpu_device *adev)
426{
427 struct ci_power_info *pi = ci_get_pi(adev);
428 u8 *vid = pi->smc_powertune_table.VddCVid;
429 u32 i;
430
431 if (pi->vddc_voltage_table.count > 8)
432 return -EINVAL;
433
434 for (i = 0; i < pi->vddc_voltage_table.count; i++)
435 vid[i] = ci_convert_to_vid(pi->vddc_voltage_table.entries[i].value);
436
437 return 0;
438}
439
440static int ci_populate_svi_load_line(struct amdgpu_device *adev)
441{
442 struct ci_power_info *pi = ci_get_pi(adev);
443 const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
444
445 pi->smc_powertune_table.SviLoadLineEn = pt_defaults->svi_load_line_en;
446 pi->smc_powertune_table.SviLoadLineVddC = pt_defaults->svi_load_line_vddc;
447 pi->smc_powertune_table.SviLoadLineTrimVddC = 3;
448 pi->smc_powertune_table.SviLoadLineOffsetVddC = 0;
449
450 return 0;
451}
452
453static int ci_populate_tdc_limit(struct amdgpu_device *adev)
454{
455 struct ci_power_info *pi = ci_get_pi(adev);
456 const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
457 u16 tdc_limit;
458
459 tdc_limit = adev->pm.dpm.dyn_state.cac_tdp_table->tdc * 256;
460 pi->smc_powertune_table.TDC_VDDC_PkgLimit = cpu_to_be16(tdc_limit);
461 pi->smc_powertune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
462 pt_defaults->tdc_vddc_throttle_release_limit_perc;
463 pi->smc_powertune_table.TDC_MAWt = pt_defaults->tdc_mawt;
464
465 return 0;
466}
467
468static int ci_populate_dw8(struct amdgpu_device *adev)
469{
470 struct ci_power_info *pi = ci_get_pi(adev);
471 const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
472 int ret;
473
474 ret = amdgpu_ci_read_smc_sram_dword(adev,
475 SMU7_FIRMWARE_HEADER_LOCATION +
476 offsetof(SMU7_Firmware_Header, PmFuseTable) +
477 offsetof(SMU7_Discrete_PmFuses, TdcWaterfallCtl),
478 (u32 *)&pi->smc_powertune_table.TdcWaterfallCtl,
479 pi->sram_end);
480 if (ret)
481 return -EINVAL;
482 else
483 pi->smc_powertune_table.TdcWaterfallCtl = pt_defaults->tdc_waterfall_ctl;
484
485 return 0;
486}
487
488static int ci_populate_fuzzy_fan(struct amdgpu_device *adev)
489{
490 struct ci_power_info *pi = ci_get_pi(adev);
491
492 if ((adev->pm.dpm.fan.fan_output_sensitivity & (1 << 15)) ||
493 (adev->pm.dpm.fan.fan_output_sensitivity == 0))
494 adev->pm.dpm.fan.fan_output_sensitivity =
495 adev->pm.dpm.fan.default_fan_output_sensitivity;
496
497 pi->smc_powertune_table.FuzzyFan_PwmSetDelta =
498 cpu_to_be16(adev->pm.dpm.fan.fan_output_sensitivity);
499
500 return 0;
501}
502
503static int ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(struct amdgpu_device *adev)
504{
505 struct ci_power_info *pi = ci_get_pi(adev);
506 u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd;
507 u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd;
508 int i, min, max;
509
510 min = max = hi_vid[0];
511 for (i = 0; i < 8; i++) {
512 if (0 != hi_vid[i]) {
513 if (min > hi_vid[i])
514 min = hi_vid[i];
515 if (max < hi_vid[i])
516 max = hi_vid[i];
517 }
518
519 if (0 != lo_vid[i]) {
520 if (min > lo_vid[i])
521 min = lo_vid[i];
522 if (max < lo_vid[i])
523 max = lo_vid[i];
524 }
525 }
526
527 if ((min == 0) || (max == 0))
528 return -EINVAL;
529 pi->smc_powertune_table.GnbLPMLMaxVid = (u8)max;
530 pi->smc_powertune_table.GnbLPMLMinVid = (u8)min;
531
532 return 0;
533}
534
535static int ci_populate_bapm_vddc_base_leakage_sidd(struct amdgpu_device *adev)
536{
537 struct ci_power_info *pi = ci_get_pi(adev);
538 u16 hi_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd;
539 u16 lo_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd;
540 struct amdgpu_cac_tdp_table *cac_tdp_table =
541 adev->pm.dpm.dyn_state.cac_tdp_table;
542
543 hi_sidd = cac_tdp_table->high_cac_leakage / 100 * 256;
544 lo_sidd = cac_tdp_table->low_cac_leakage / 100 * 256;
545
546 pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd = cpu_to_be16(hi_sidd);
547 pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd = cpu_to_be16(lo_sidd);
548
549 return 0;
550}
551
552static int ci_populate_bapm_parameters_in_dpm_table(struct amdgpu_device *adev)
553{
554 struct ci_power_info *pi = ci_get_pi(adev);
555 const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
556 SMU7_Discrete_DpmTable *dpm_table = &pi->smc_state_table;
557 struct amdgpu_cac_tdp_table *cac_tdp_table =
558 adev->pm.dpm.dyn_state.cac_tdp_table;
559 struct amdgpu_ppm_table *ppm = adev->pm.dpm.dyn_state.ppm_table;
560 int i, j, k;
561 const u16 *def1;
562 const u16 *def2;
563
564 dpm_table->DefaultTdp = cac_tdp_table->tdp * 256;
565 dpm_table->TargetTdp = cac_tdp_table->configurable_tdp * 256;
566
567 dpm_table->DTETjOffset = (u8)pi->dte_tj_offset;
568 dpm_table->GpuTjMax =
569 (u8)(pi->thermal_temp_setting.temperature_high / 1000);
570 dpm_table->GpuTjHyst = 8;
571
572 dpm_table->DTEAmbientTempBase = pt_defaults->dte_ambient_temp_base;
573
574 if (ppm) {
575 dpm_table->PPM_PkgPwrLimit = cpu_to_be16((u16)ppm->dgpu_tdp * 256 / 1000);
576 dpm_table->PPM_TemperatureLimit = cpu_to_be16((u16)ppm->tj_max * 256);
577 } else {
578 dpm_table->PPM_PkgPwrLimit = cpu_to_be16(0);
579 dpm_table->PPM_TemperatureLimit = cpu_to_be16(0);
580 }
581
582 dpm_table->BAPM_TEMP_GRADIENT = cpu_to_be32(pt_defaults->bapm_temp_gradient);
583 def1 = pt_defaults->bapmti_r;
584 def2 = pt_defaults->bapmti_rc;
585
586 for (i = 0; i < SMU7_DTE_ITERATIONS; i++) {
587 for (j = 0; j < SMU7_DTE_SOURCES; j++) {
588 for (k = 0; k < SMU7_DTE_SINKS; k++) {
589 dpm_table->BAPMTI_R[i][j][k] = cpu_to_be16(*def1);
590 dpm_table->BAPMTI_RC[i][j][k] = cpu_to_be16(*def2);
591 def1++;
592 def2++;
593 }
594 }
595 }
596
597 return 0;
598}
599
600static int ci_populate_pm_base(struct amdgpu_device *adev)
601{
602 struct ci_power_info *pi = ci_get_pi(adev);
603 u32 pm_fuse_table_offset;
604 int ret;
605
606 if (pi->caps_power_containment) {
607 ret = amdgpu_ci_read_smc_sram_dword(adev,
608 SMU7_FIRMWARE_HEADER_LOCATION +
609 offsetof(SMU7_Firmware_Header, PmFuseTable),
610 &pm_fuse_table_offset, pi->sram_end);
611 if (ret)
612 return ret;
613 ret = ci_populate_bapm_vddc_vid_sidd(adev);
614 if (ret)
615 return ret;
616 ret = ci_populate_vddc_vid(adev);
617 if (ret)
618 return ret;
619 ret = ci_populate_svi_load_line(adev);
620 if (ret)
621 return ret;
622 ret = ci_populate_tdc_limit(adev);
623 if (ret)
624 return ret;
625 ret = ci_populate_dw8(adev);
626 if (ret)
627 return ret;
628 ret = ci_populate_fuzzy_fan(adev);
629 if (ret)
630 return ret;
631 ret = ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(adev);
632 if (ret)
633 return ret;
634 ret = ci_populate_bapm_vddc_base_leakage_sidd(adev);
635 if (ret)
636 return ret;
637 ret = amdgpu_ci_copy_bytes_to_smc(adev, pm_fuse_table_offset,
638 (u8 *)&pi->smc_powertune_table,
639 sizeof(SMU7_Discrete_PmFuses), pi->sram_end);
640 if (ret)
641 return ret;
642 }
643
644 return 0;
645}
646
647static void ci_do_enable_didt(struct amdgpu_device *adev, const bool enable)
648{
649 struct ci_power_info *pi = ci_get_pi(adev);
650 u32 data;
651
652 if (pi->caps_sq_ramping) {
653 data = RREG32_DIDT(ixDIDT_SQ_CTRL0);
654 if (enable)
655 data |= DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK;
656 else
657 data &= ~DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK;
658 WREG32_DIDT(ixDIDT_SQ_CTRL0, data);
659 }
660
661 if (pi->caps_db_ramping) {
662 data = RREG32_DIDT(ixDIDT_DB_CTRL0);
663 if (enable)
664 data |= DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK;
665 else
666 data &= ~DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK;
667 WREG32_DIDT(ixDIDT_DB_CTRL0, data);
668 }
669
670 if (pi->caps_td_ramping) {
671 data = RREG32_DIDT(ixDIDT_TD_CTRL0);
672 if (enable)
673 data |= DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK;
674 else
675 data &= ~DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK;
676 WREG32_DIDT(ixDIDT_TD_CTRL0, data);
677 }
678
679 if (pi->caps_tcp_ramping) {
680 data = RREG32_DIDT(ixDIDT_TCP_CTRL0);
681 if (enable)
682 data |= DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK;
683 else
684 data &= ~DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK;
685 WREG32_DIDT(ixDIDT_TCP_CTRL0, data);
686 }
687}
688
689static int ci_program_pt_config_registers(struct amdgpu_device *adev,
690 const struct ci_pt_config_reg *cac_config_regs)
691{
692 const struct ci_pt_config_reg *config_regs = cac_config_regs;
693 u32 data;
694 u32 cache = 0;
695
696 if (config_regs == NULL)
697 return -EINVAL;
698
699 while (config_regs->offset != 0xFFFFFFFF) {
700 if (config_regs->type == CISLANDS_CONFIGREG_CACHE) {
701 cache |= ((config_regs->value << config_regs->shift) & config_regs->mask);
702 } else {
703 switch (config_regs->type) {
704 case CISLANDS_CONFIGREG_SMC_IND:
705 data = RREG32_SMC(config_regs->offset);
706 break;
707 case CISLANDS_CONFIGREG_DIDT_IND:
708 data = RREG32_DIDT(config_regs->offset);
709 break;
710 default:
711 data = RREG32(config_regs->offset);
712 break;
713 }
714
715 data &= ~config_regs->mask;
716 data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
717 data |= cache;
718
719 switch (config_regs->type) {
720 case CISLANDS_CONFIGREG_SMC_IND:
721 WREG32_SMC(config_regs->offset, data);
722 break;
723 case CISLANDS_CONFIGREG_DIDT_IND:
724 WREG32_DIDT(config_regs->offset, data);
725 break;
726 default:
727 WREG32(config_regs->offset, data);
728 break;
729 }
730 cache = 0;
731 }
732 config_regs++;
733 }
734 return 0;
735}
736
737static int ci_enable_didt(struct amdgpu_device *adev, bool enable)
738{
739 struct ci_power_info *pi = ci_get_pi(adev);
740 int ret;
741
742 if (pi->caps_sq_ramping || pi->caps_db_ramping ||
743 pi->caps_td_ramping || pi->caps_tcp_ramping) {
Alex Deucher06120a12016-06-21 12:16:30 -0400744 adev->gfx.rlc.funcs->enter_safe_mode(adev);
Alex Deuchera2e73f52015-04-20 17:09:27 -0400745
746 if (enable) {
747 ret = ci_program_pt_config_registers(adev, didt_config_ci);
748 if (ret) {
Alex Deucher06120a12016-06-21 12:16:30 -0400749 adev->gfx.rlc.funcs->exit_safe_mode(adev);
Alex Deuchera2e73f52015-04-20 17:09:27 -0400750 return ret;
751 }
752 }
753
754 ci_do_enable_didt(adev, enable);
755
Alex Deucher06120a12016-06-21 12:16:30 -0400756 adev->gfx.rlc.funcs->exit_safe_mode(adev);
Alex Deuchera2e73f52015-04-20 17:09:27 -0400757 }
758
759 return 0;
760}
761
762static int ci_enable_power_containment(struct amdgpu_device *adev, bool enable)
763{
764 struct ci_power_info *pi = ci_get_pi(adev);
765 PPSMC_Result smc_result;
766 int ret = 0;
767
768 if (enable) {
769 pi->power_containment_features = 0;
770 if (pi->caps_power_containment) {
771 if (pi->enable_bapm_feature) {
772 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_EnableDTE);
773 if (smc_result != PPSMC_Result_OK)
774 ret = -EINVAL;
775 else
776 pi->power_containment_features |= POWERCONTAINMENT_FEATURE_BAPM;
777 }
778
779 if (pi->enable_tdc_limit_feature) {
780 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_TDCLimitEnable);
781 if (smc_result != PPSMC_Result_OK)
782 ret = -EINVAL;
783 else
784 pi->power_containment_features |= POWERCONTAINMENT_FEATURE_TDCLimit;
785 }
786
787 if (pi->enable_pkg_pwr_tracking_feature) {
788 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_PkgPwrLimitEnable);
789 if (smc_result != PPSMC_Result_OK) {
790 ret = -EINVAL;
791 } else {
792 struct amdgpu_cac_tdp_table *cac_tdp_table =
793 adev->pm.dpm.dyn_state.cac_tdp_table;
794 u32 default_pwr_limit =
795 (u32)(cac_tdp_table->maximum_power_delivery_limit * 256);
796
797 pi->power_containment_features |= POWERCONTAINMENT_FEATURE_PkgPwrLimit;
798
799 ci_set_power_limit(adev, default_pwr_limit);
800 }
801 }
802 }
803 } else {
804 if (pi->caps_power_containment && pi->power_containment_features) {
805 if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_TDCLimit)
806 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_TDCLimitDisable);
807
808 if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_BAPM)
809 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DisableDTE);
810
811 if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit)
812 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_PkgPwrLimitDisable);
813 pi->power_containment_features = 0;
814 }
815 }
816
817 return ret;
818}
819
820static int ci_enable_smc_cac(struct amdgpu_device *adev, bool enable)
821{
822 struct ci_power_info *pi = ci_get_pi(adev);
823 PPSMC_Result smc_result;
824 int ret = 0;
825
826 if (pi->caps_cac) {
827 if (enable) {
828 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_EnableCac);
829 if (smc_result != PPSMC_Result_OK) {
830 ret = -EINVAL;
831 pi->cac_enabled = false;
832 } else {
833 pi->cac_enabled = true;
834 }
835 } else if (pi->cac_enabled) {
836 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DisableCac);
837 pi->cac_enabled = false;
838 }
839 }
840
841 return ret;
842}
843
844static int ci_enable_thermal_based_sclk_dpm(struct amdgpu_device *adev,
845 bool enable)
846{
847 struct ci_power_info *pi = ci_get_pi(adev);
848 PPSMC_Result smc_result = PPSMC_Result_OK;
849
850 if (pi->thermal_sclk_dpm_enabled) {
851 if (enable)
852 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_ENABLE_THERMAL_DPM);
853 else
854 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DISABLE_THERMAL_DPM);
855 }
856
857 if (smc_result == PPSMC_Result_OK)
858 return 0;
859 else
860 return -EINVAL;
861}
862
863static int ci_power_control_set_level(struct amdgpu_device *adev)
864{
865 struct ci_power_info *pi = ci_get_pi(adev);
866 struct amdgpu_cac_tdp_table *cac_tdp_table =
867 adev->pm.dpm.dyn_state.cac_tdp_table;
868 s32 adjust_percent;
869 s32 target_tdp;
870 int ret = 0;
871 bool adjust_polarity = false; /* ??? */
872
873 if (pi->caps_power_containment) {
874 adjust_percent = adjust_polarity ?
875 adev->pm.dpm.tdp_adjustment : (-1 * adev->pm.dpm.tdp_adjustment);
876 target_tdp = ((100 + adjust_percent) *
877 (s32)cac_tdp_table->configurable_tdp) / 100;
878
879 ret = ci_set_overdrive_target_tdp(adev, (u32)target_tdp);
880 }
881
882 return ret;
883}
884
Rex Zhucfa289f2017-09-06 15:27:59 +0800885static void ci_dpm_powergate_uvd(void *handle, bool gate)
Alex Deuchera2e73f52015-04-20 17:09:27 -0400886{
Rex Zhucfa289f2017-09-06 15:27:59 +0800887 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
Alex Deuchera2e73f52015-04-20 17:09:27 -0400888 struct ci_power_info *pi = ci_get_pi(adev);
889
Alex Deuchera2e73f52015-04-20 17:09:27 -0400890 pi->uvd_power_gated = gate;
891
Rex Zhua1970a62017-01-12 21:50:18 +0800892 if (gate) {
893 /* stop the UVD block */
Alex Deucher2990a1f2017-12-15 16:18:00 -0500894 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
895 AMD_PG_STATE_GATE);
Rex Zhua1970a62017-01-12 21:50:18 +0800896 ci_update_uvd_dpm(adev, gate);
897 } else {
Alex Deucher2990a1f2017-12-15 16:18:00 -0500898 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
899 AMD_PG_STATE_UNGATE);
Rex Zhua1970a62017-01-12 21:50:18 +0800900 ci_update_uvd_dpm(adev, gate);
901 }
Alex Deuchera2e73f52015-04-20 17:09:27 -0400902}
903
Rex Zhucfa289f2017-09-06 15:27:59 +0800904static bool ci_dpm_vblank_too_short(void *handle)
Alex Deuchera2e73f52015-04-20 17:09:27 -0400905{
Rex Zhucfa289f2017-09-06 15:27:59 +0800906 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
Alex Deuchera2e73f52015-04-20 17:09:27 -0400907 u32 vblank_time = amdgpu_dpm_get_vblank_time(adev);
Christian König770d13b2018-01-12 14:52:22 +0100908 u32 switch_limit = adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 300;
Alex Deuchera2e73f52015-04-20 17:09:27 -0400909
Alex Deucher0a646f32017-05-11 13:10:02 -0400910 /* disable mclk switching if the refresh is >120Hz, even if the
911 * blanking period would allow it
912 */
913 if (amdgpu_dpm_get_vrefresh(adev) > 120)
914 return true;
915
Alex Deuchera2e73f52015-04-20 17:09:27 -0400916 if (vblank_time < switch_limit)
917 return true;
918 else
919 return false;
920
921}
922
923static void ci_apply_state_adjust_rules(struct amdgpu_device *adev,
924 struct amdgpu_ps *rps)
925{
926 struct ci_ps *ps = ci_get_ps(rps);
927 struct ci_power_info *pi = ci_get_pi(adev);
928 struct amdgpu_clock_and_voltage_limits *max_limits;
929 bool disable_mclk_switching;
930 u32 sclk, mclk;
931 int i;
932
933 if (rps->vce_active) {
934 rps->evclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].evclk;
935 rps->ecclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].ecclk;
936 } else {
937 rps->evclk = 0;
938 rps->ecclk = 0;
939 }
940
941 if ((adev->pm.dpm.new_active_crtc_count > 1) ||
942 ci_dpm_vblank_too_short(adev))
943 disable_mclk_switching = true;
944 else
945 disable_mclk_switching = false;
946
947 if ((rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY)
948 pi->battery_state = true;
949 else
950 pi->battery_state = false;
951
952 if (adev->pm.dpm.ac_power)
953 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
954 else
955 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
956
957 if (adev->pm.dpm.ac_power == false) {
958 for (i = 0; i < ps->performance_level_count; i++) {
959 if (ps->performance_levels[i].mclk > max_limits->mclk)
960 ps->performance_levels[i].mclk = max_limits->mclk;
961 if (ps->performance_levels[i].sclk > max_limits->sclk)
962 ps->performance_levels[i].sclk = max_limits->sclk;
963 }
964 }
965
966 /* XXX validate the min clocks required for display */
967
968 if (disable_mclk_switching) {
969 mclk = ps->performance_levels[ps->performance_level_count - 1].mclk;
970 sclk = ps->performance_levels[0].sclk;
971 } else {
972 mclk = ps->performance_levels[0].mclk;
973 sclk = ps->performance_levels[0].sclk;
974 }
975
Rex Zhudb82b672016-10-12 20:05:03 +0800976 if (adev->pm.pm_display_cfg.min_core_set_clock > sclk)
977 sclk = adev->pm.pm_display_cfg.min_core_set_clock;
978
979 if (adev->pm.pm_display_cfg.min_mem_set_clock > mclk)
980 mclk = adev->pm.pm_display_cfg.min_mem_set_clock;
981
Alex Deuchera2e73f52015-04-20 17:09:27 -0400982 if (rps->vce_active) {
983 if (sclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk)
984 sclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk;
985 if (mclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].mclk)
986 mclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].mclk;
987 }
988
989 ps->performance_levels[0].sclk = sclk;
990 ps->performance_levels[0].mclk = mclk;
991
992 if (ps->performance_levels[1].sclk < ps->performance_levels[0].sclk)
993 ps->performance_levels[1].sclk = ps->performance_levels[0].sclk;
994
995 if (disable_mclk_switching) {
996 if (ps->performance_levels[0].mclk < ps->performance_levels[1].mclk)
997 ps->performance_levels[0].mclk = ps->performance_levels[1].mclk;
998 } else {
999 if (ps->performance_levels[1].mclk < ps->performance_levels[0].mclk)
1000 ps->performance_levels[1].mclk = ps->performance_levels[0].mclk;
1001 }
1002}
1003
1004static int ci_thermal_set_temperature_range(struct amdgpu_device *adev,
1005 int min_temp, int max_temp)
1006{
1007 int low_temp = 0 * 1000;
1008 int high_temp = 255 * 1000;
1009 u32 tmp;
1010
1011 if (low_temp < min_temp)
1012 low_temp = min_temp;
1013 if (high_temp > max_temp)
1014 high_temp = max_temp;
1015 if (high_temp < low_temp) {
1016 DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
1017 return -EINVAL;
1018 }
1019
1020 tmp = RREG32_SMC(ixCG_THERMAL_INT);
1021 tmp &= ~(CG_THERMAL_INT__DIG_THERM_INTH_MASK | CG_THERMAL_INT__DIG_THERM_INTL_MASK);
1022 tmp |= ((high_temp / 1000) << CG_THERMAL_INT__DIG_THERM_INTH__SHIFT) |
1023 ((low_temp / 1000)) << CG_THERMAL_INT__DIG_THERM_INTL__SHIFT;
1024 WREG32_SMC(ixCG_THERMAL_INT, tmp);
1025
1026#if 0
1027 /* XXX: need to figure out how to handle this properly */
1028 tmp = RREG32_SMC(ixCG_THERMAL_CTRL);
1029 tmp &= DIG_THERM_DPM_MASK;
1030 tmp |= DIG_THERM_DPM(high_temp / 1000);
1031 WREG32_SMC(ixCG_THERMAL_CTRL, tmp);
1032#endif
1033
1034 adev->pm.dpm.thermal.min_temp = low_temp;
1035 adev->pm.dpm.thermal.max_temp = high_temp;
1036 return 0;
1037}
1038
1039static int ci_thermal_enable_alert(struct amdgpu_device *adev,
1040 bool enable)
1041{
1042 u32 thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
1043 PPSMC_Result result;
1044
1045 if (enable) {
1046 thermal_int &= ~(CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK |
1047 CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK);
1048 WREG32_SMC(ixCG_THERMAL_INT, thermal_int);
1049 result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Thermal_Cntl_Enable);
1050 if (result != PPSMC_Result_OK) {
1051 DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
1052 return -EINVAL;
1053 }
1054 } else {
1055 thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK |
1056 CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
1057 WREG32_SMC(ixCG_THERMAL_INT, thermal_int);
1058 result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Thermal_Cntl_Disable);
1059 if (result != PPSMC_Result_OK) {
1060 DRM_DEBUG_KMS("Could not disable thermal interrupts.\n");
1061 return -EINVAL;
1062 }
1063 }
1064
1065 return 0;
1066}
1067
1068static void ci_fan_ctrl_set_static_mode(struct amdgpu_device *adev, u32 mode)
1069{
1070 struct ci_power_info *pi = ci_get_pi(adev);
1071 u32 tmp;
1072
1073 if (pi->fan_ctrl_is_in_default_mode) {
1074 tmp = (RREG32_SMC(ixCG_FDO_CTRL2) & CG_FDO_CTRL2__FDO_PWM_MODE_MASK)
1075 >> CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT;
1076 pi->fan_ctrl_default_mode = tmp;
1077 tmp = (RREG32_SMC(ixCG_FDO_CTRL2) & CG_FDO_CTRL2__TMIN_MASK)
1078 >> CG_FDO_CTRL2__TMIN__SHIFT;
1079 pi->t_min = tmp;
1080 pi->fan_ctrl_is_in_default_mode = false;
1081 }
1082
1083 tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__TMIN_MASK;
1084 tmp |= 0 << CG_FDO_CTRL2__TMIN__SHIFT;
1085 WREG32_SMC(ixCG_FDO_CTRL2, tmp);
1086
1087 tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__FDO_PWM_MODE_MASK;
1088 tmp |= mode << CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT;
1089 WREG32_SMC(ixCG_FDO_CTRL2, tmp);
1090}
1091
1092static int ci_thermal_setup_fan_table(struct amdgpu_device *adev)
1093{
1094 struct ci_power_info *pi = ci_get_pi(adev);
1095 SMU7_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
1096 u32 duty100;
1097 u32 t_diff1, t_diff2, pwm_diff1, pwm_diff2;
1098 u16 fdo_min, slope1, slope2;
1099 u32 reference_clock, tmp;
1100 int ret;
1101 u64 tmp64;
1102
1103 if (!pi->fan_table_start) {
1104 adev->pm.dpm.fan.ucode_fan_control = false;
1105 return 0;
1106 }
1107
1108 duty100 = (RREG32_SMC(ixCG_FDO_CTRL1) & CG_FDO_CTRL1__FMAX_DUTY100_MASK)
1109 >> CG_FDO_CTRL1__FMAX_DUTY100__SHIFT;
1110
1111 if (duty100 == 0) {
1112 adev->pm.dpm.fan.ucode_fan_control = false;
1113 return 0;
1114 }
1115
1116 tmp64 = (u64)adev->pm.dpm.fan.pwm_min * duty100;
1117 do_div(tmp64, 10000);
1118 fdo_min = (u16)tmp64;
1119
1120 t_diff1 = adev->pm.dpm.fan.t_med - adev->pm.dpm.fan.t_min;
1121 t_diff2 = adev->pm.dpm.fan.t_high - adev->pm.dpm.fan.t_med;
1122
1123 pwm_diff1 = adev->pm.dpm.fan.pwm_med - adev->pm.dpm.fan.pwm_min;
1124 pwm_diff2 = adev->pm.dpm.fan.pwm_high - adev->pm.dpm.fan.pwm_med;
1125
1126 slope1 = (u16)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
1127 slope2 = (u16)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
1128
1129 fan_table.TempMin = cpu_to_be16((50 + adev->pm.dpm.fan.t_min) / 100);
1130 fan_table.TempMed = cpu_to_be16((50 + adev->pm.dpm.fan.t_med) / 100);
1131 fan_table.TempMax = cpu_to_be16((50 + adev->pm.dpm.fan.t_max) / 100);
1132
1133 fan_table.Slope1 = cpu_to_be16(slope1);
1134 fan_table.Slope2 = cpu_to_be16(slope2);
1135
1136 fan_table.FdoMin = cpu_to_be16(fdo_min);
1137
1138 fan_table.HystDown = cpu_to_be16(adev->pm.dpm.fan.t_hyst);
1139
1140 fan_table.HystUp = cpu_to_be16(1);
1141
1142 fan_table.HystSlope = cpu_to_be16(1);
1143
1144 fan_table.TempRespLim = cpu_to_be16(5);
1145
1146 reference_clock = amdgpu_asic_get_xclk(adev);
1147
1148 fan_table.RefreshPeriod = cpu_to_be32((adev->pm.dpm.fan.cycle_delay *
1149 reference_clock) / 1600);
1150
1151 fan_table.FdoMax = cpu_to_be16((u16)duty100);
1152
1153 tmp = (RREG32_SMC(ixCG_MULT_THERMAL_CTRL) & CG_MULT_THERMAL_CTRL__TEMP_SEL_MASK)
1154 >> CG_MULT_THERMAL_CTRL__TEMP_SEL__SHIFT;
1155 fan_table.TempSrc = (uint8_t)tmp;
1156
1157 ret = amdgpu_ci_copy_bytes_to_smc(adev,
1158 pi->fan_table_start,
1159 (u8 *)(&fan_table),
1160 sizeof(fan_table),
1161 pi->sram_end);
1162
1163 if (ret) {
1164 DRM_ERROR("Failed to load fan table to the SMC.");
1165 adev->pm.dpm.fan.ucode_fan_control = false;
1166 }
1167
1168 return 0;
1169}
1170
1171static int ci_fan_ctrl_start_smc_fan_control(struct amdgpu_device *adev)
1172{
1173 struct ci_power_info *pi = ci_get_pi(adev);
1174 PPSMC_Result ret;
1175
1176 if (pi->caps_od_fuzzy_fan_control_support) {
1177 ret = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
1178 PPSMC_StartFanControl,
1179 FAN_CONTROL_FUZZY);
1180 if (ret != PPSMC_Result_OK)
1181 return -EINVAL;
1182 ret = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
1183 PPSMC_MSG_SetFanPwmMax,
1184 adev->pm.dpm.fan.default_max_fan_pwm);
1185 if (ret != PPSMC_Result_OK)
1186 return -EINVAL;
1187 } else {
1188 ret = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
1189 PPSMC_StartFanControl,
1190 FAN_CONTROL_TABLE);
1191 if (ret != PPSMC_Result_OK)
1192 return -EINVAL;
1193 }
1194
1195 pi->fan_is_controlled_by_smc = true;
1196 return 0;
1197}
1198
1199
1200static int ci_fan_ctrl_stop_smc_fan_control(struct amdgpu_device *adev)
1201{
1202 PPSMC_Result ret;
1203 struct ci_power_info *pi = ci_get_pi(adev);
1204
1205 ret = amdgpu_ci_send_msg_to_smc(adev, PPSMC_StopFanControl);
1206 if (ret == PPSMC_Result_OK) {
1207 pi->fan_is_controlled_by_smc = false;
1208 return 0;
1209 } else {
1210 return -EINVAL;
1211 }
1212}
1213
Rex Zhucfa289f2017-09-06 15:27:59 +08001214static int ci_dpm_get_fan_speed_percent(void *handle,
Alex Deuchera2e73f52015-04-20 17:09:27 -04001215 u32 *speed)
1216{
1217 u32 duty, duty100;
1218 u64 tmp64;
Rex Zhucfa289f2017-09-06 15:27:59 +08001219 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
Alex Deuchera2e73f52015-04-20 17:09:27 -04001220
1221 if (adev->pm.no_fan)
1222 return -ENOENT;
1223
1224 duty100 = (RREG32_SMC(ixCG_FDO_CTRL1) & CG_FDO_CTRL1__FMAX_DUTY100_MASK)
1225 >> CG_FDO_CTRL1__FMAX_DUTY100__SHIFT;
1226 duty = (RREG32_SMC(ixCG_THERMAL_STATUS) & CG_THERMAL_STATUS__FDO_PWM_DUTY_MASK)
1227 >> CG_THERMAL_STATUS__FDO_PWM_DUTY__SHIFT;
1228
1229 if (duty100 == 0)
1230 return -EINVAL;
1231
1232 tmp64 = (u64)duty * 100;
1233 do_div(tmp64, duty100);
1234 *speed = (u32)tmp64;
1235
1236 if (*speed > 100)
1237 *speed = 100;
1238
1239 return 0;
1240}
1241
Rex Zhucfa289f2017-09-06 15:27:59 +08001242static int ci_dpm_set_fan_speed_percent(void *handle,
Alex Deuchera2e73f52015-04-20 17:09:27 -04001243 u32 speed)
1244{
1245 u32 tmp;
1246 u32 duty, duty100;
1247 u64 tmp64;
Rex Zhucfa289f2017-09-06 15:27:59 +08001248 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
Alex Deuchera2e73f52015-04-20 17:09:27 -04001249 struct ci_power_info *pi = ci_get_pi(adev);
1250
1251 if (adev->pm.no_fan)
1252 return -ENOENT;
1253
1254 if (pi->fan_is_controlled_by_smc)
1255 return -EINVAL;
1256
1257 if (speed > 100)
1258 return -EINVAL;
1259
1260 duty100 = (RREG32_SMC(ixCG_FDO_CTRL1) & CG_FDO_CTRL1__FMAX_DUTY100_MASK)
1261 >> CG_FDO_CTRL1__FMAX_DUTY100__SHIFT;
1262
1263 if (duty100 == 0)
1264 return -EINVAL;
1265
1266 tmp64 = (u64)speed * duty100;
1267 do_div(tmp64, 100);
1268 duty = (u32)tmp64;
1269
1270 tmp = RREG32_SMC(ixCG_FDO_CTRL0) & ~CG_FDO_CTRL0__FDO_STATIC_DUTY_MASK;
1271 tmp |= duty << CG_FDO_CTRL0__FDO_STATIC_DUTY__SHIFT;
1272 WREG32_SMC(ixCG_FDO_CTRL0, tmp);
1273
1274 return 0;
1275}
1276
Rex Zhucfa289f2017-09-06 15:27:59 +08001277static void ci_dpm_set_fan_control_mode(void *handle, u32 mode)
Alex Deuchera2e73f52015-04-20 17:09:27 -04001278{
Rex Zhucfa289f2017-09-06 15:27:59 +08001279 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1280
Rex Zhuafa31872017-05-05 17:53:18 +08001281 switch (mode) {
1282 case AMD_FAN_CTRL_NONE:
Alex Deuchera2e73f52015-04-20 17:09:27 -04001283 if (adev->pm.dpm.fan.ucode_fan_control)
1284 ci_fan_ctrl_stop_smc_fan_control(adev);
Rex Zhuafa31872017-05-05 17:53:18 +08001285 ci_dpm_set_fan_speed_percent(adev, 100);
1286 break;
1287 case AMD_FAN_CTRL_MANUAL:
1288 if (adev->pm.dpm.fan.ucode_fan_control)
1289 ci_fan_ctrl_stop_smc_fan_control(adev);
1290 break;
1291 case AMD_FAN_CTRL_AUTO:
Alex Deuchera2e73f52015-04-20 17:09:27 -04001292 if (adev->pm.dpm.fan.ucode_fan_control)
1293 ci_thermal_start_smc_fan_control(adev);
Rex Zhuafa31872017-05-05 17:53:18 +08001294 break;
1295 default:
1296 break;
Alex Deuchera2e73f52015-04-20 17:09:27 -04001297 }
1298}
1299
Rex Zhucfa289f2017-09-06 15:27:59 +08001300static u32 ci_dpm_get_fan_control_mode(void *handle)
Alex Deuchera2e73f52015-04-20 17:09:27 -04001301{
Rex Zhucfa289f2017-09-06 15:27:59 +08001302 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
Alex Deuchera2e73f52015-04-20 17:09:27 -04001303 struct ci_power_info *pi = ci_get_pi(adev);
Alex Deuchera2e73f52015-04-20 17:09:27 -04001304
1305 if (pi->fan_is_controlled_by_smc)
Rex Zhuafa31872017-05-05 17:53:18 +08001306 return AMD_FAN_CTRL_AUTO;
1307 else
1308 return AMD_FAN_CTRL_MANUAL;
Alex Deuchera2e73f52015-04-20 17:09:27 -04001309}
1310
1311#if 0
1312static int ci_fan_ctrl_get_fan_speed_rpm(struct amdgpu_device *adev,
1313 u32 *speed)
1314{
1315 u32 tach_period;
1316 u32 xclk = amdgpu_asic_get_xclk(adev);
1317
1318 if (adev->pm.no_fan)
1319 return -ENOENT;
1320
1321 if (adev->pm.fan_pulses_per_revolution == 0)
1322 return -ENOENT;
1323
1324 tach_period = (RREG32_SMC(ixCG_TACH_STATUS) & CG_TACH_STATUS__TACH_PERIOD_MASK)
1325 >> CG_TACH_STATUS__TACH_PERIOD__SHIFT;
1326 if (tach_period == 0)
1327 return -ENOENT;
1328
1329 *speed = 60 * xclk * 10000 / tach_period;
1330
1331 return 0;
1332}
1333
1334static int ci_fan_ctrl_set_fan_speed_rpm(struct amdgpu_device *adev,
1335 u32 speed)
1336{
1337 u32 tach_period, tmp;
1338 u32 xclk = amdgpu_asic_get_xclk(adev);
1339
1340 if (adev->pm.no_fan)
1341 return -ENOENT;
1342
1343 if (adev->pm.fan_pulses_per_revolution == 0)
1344 return -ENOENT;
1345
1346 if ((speed < adev->pm.fan_min_rpm) ||
1347 (speed > adev->pm.fan_max_rpm))
1348 return -EINVAL;
1349
1350 if (adev->pm.dpm.fan.ucode_fan_control)
1351 ci_fan_ctrl_stop_smc_fan_control(adev);
1352
1353 tach_period = 60 * xclk * 10000 / (8 * speed);
1354 tmp = RREG32_SMC(ixCG_TACH_CTRL) & ~CG_TACH_CTRL__TARGET_PERIOD_MASK;
1355 tmp |= tach_period << CG_TACH_CTRL__TARGET_PERIOD__SHIFT;
1356 WREG32_SMC(CG_TACH_CTRL, tmp);
1357
1358 ci_fan_ctrl_set_static_mode(adev, FDO_PWM_MODE_STATIC_RPM);
1359
1360 return 0;
1361}
1362#endif
1363
1364static void ci_fan_ctrl_set_default_mode(struct amdgpu_device *adev)
1365{
1366 struct ci_power_info *pi = ci_get_pi(adev);
1367 u32 tmp;
1368
1369 if (!pi->fan_ctrl_is_in_default_mode) {
1370 tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__FDO_PWM_MODE_MASK;
1371 tmp |= pi->fan_ctrl_default_mode << CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT;
1372 WREG32_SMC(ixCG_FDO_CTRL2, tmp);
1373
1374 tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__TMIN_MASK;
1375 tmp |= pi->t_min << CG_FDO_CTRL2__TMIN__SHIFT;
1376 WREG32_SMC(ixCG_FDO_CTRL2, tmp);
1377 pi->fan_ctrl_is_in_default_mode = true;
1378 }
1379}
1380
1381static void ci_thermal_start_smc_fan_control(struct amdgpu_device *adev)
1382{
1383 if (adev->pm.dpm.fan.ucode_fan_control) {
1384 ci_fan_ctrl_start_smc_fan_control(adev);
1385 ci_fan_ctrl_set_static_mode(adev, FDO_PWM_MODE_STATIC);
1386 }
1387}
1388
1389static void ci_thermal_initialize(struct amdgpu_device *adev)
1390{
1391 u32 tmp;
1392
1393 if (adev->pm.fan_pulses_per_revolution) {
1394 tmp = RREG32_SMC(ixCG_TACH_CTRL) & ~CG_TACH_CTRL__EDGE_PER_REV_MASK;
1395 tmp |= (adev->pm.fan_pulses_per_revolution - 1)
1396 << CG_TACH_CTRL__EDGE_PER_REV__SHIFT;
1397 WREG32_SMC(ixCG_TACH_CTRL, tmp);
1398 }
1399
1400 tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__TACH_PWM_RESP_RATE_MASK;
1401 tmp |= 0x28 << CG_FDO_CTRL2__TACH_PWM_RESP_RATE__SHIFT;
1402 WREG32_SMC(ixCG_FDO_CTRL2, tmp);
1403}
1404
1405static int ci_thermal_start_thermal_controller(struct amdgpu_device *adev)
1406{
1407 int ret;
1408
1409 ci_thermal_initialize(adev);
1410 ret = ci_thermal_set_temperature_range(adev, CISLANDS_TEMP_RANGE_MIN, CISLANDS_TEMP_RANGE_MAX);
1411 if (ret)
1412 return ret;
1413 ret = ci_thermal_enable_alert(adev, true);
1414 if (ret)
1415 return ret;
1416 if (adev->pm.dpm.fan.ucode_fan_control) {
1417 ret = ci_thermal_setup_fan_table(adev);
1418 if (ret)
1419 return ret;
1420 ci_thermal_start_smc_fan_control(adev);
1421 }
1422
1423 return 0;
1424}
1425
1426static void ci_thermal_stop_thermal_controller(struct amdgpu_device *adev)
1427{
1428 if (!adev->pm.no_fan)
1429 ci_fan_ctrl_set_default_mode(adev);
1430}
1431
Alex Deuchera2e73f52015-04-20 17:09:27 -04001432static int ci_read_smc_soft_register(struct amdgpu_device *adev,
1433 u16 reg_offset, u32 *value)
1434{
1435 struct ci_power_info *pi = ci_get_pi(adev);
1436
1437 return amdgpu_ci_read_smc_sram_dword(adev,
1438 pi->soft_regs_start + reg_offset,
1439 value, pi->sram_end);
1440}
Alex Deuchera2e73f52015-04-20 17:09:27 -04001441
1442static int ci_write_smc_soft_register(struct amdgpu_device *adev,
1443 u16 reg_offset, u32 value)
1444{
1445 struct ci_power_info *pi = ci_get_pi(adev);
1446
1447 return amdgpu_ci_write_smc_sram_dword(adev,
1448 pi->soft_regs_start + reg_offset,
1449 value, pi->sram_end);
1450}
1451
1452static void ci_init_fps_limits(struct amdgpu_device *adev)
1453{
1454 struct ci_power_info *pi = ci_get_pi(adev);
1455 SMU7_Discrete_DpmTable *table = &pi->smc_state_table;
1456
1457 if (pi->caps_fps) {
1458 u16 tmp;
1459
1460 tmp = 45;
1461 table->FpsHighT = cpu_to_be16(tmp);
1462
1463 tmp = 30;
1464 table->FpsLowT = cpu_to_be16(tmp);
1465 }
1466}
1467
1468static int ci_update_sclk_t(struct amdgpu_device *adev)
1469{
1470 struct ci_power_info *pi = ci_get_pi(adev);
1471 int ret = 0;
1472 u32 low_sclk_interrupt_t = 0;
1473
1474 if (pi->caps_sclk_throttle_low_notification) {
1475 low_sclk_interrupt_t = cpu_to_be32(pi->low_sclk_interrupt_t);
1476
1477 ret = amdgpu_ci_copy_bytes_to_smc(adev,
1478 pi->dpm_table_start +
1479 offsetof(SMU7_Discrete_DpmTable, LowSclkInterruptT),
1480 (u8 *)&low_sclk_interrupt_t,
1481 sizeof(u32), pi->sram_end);
1482
1483 }
1484
1485 return ret;
1486}
1487
1488static void ci_get_leakage_voltages(struct amdgpu_device *adev)
1489{
1490 struct ci_power_info *pi = ci_get_pi(adev);
1491 u16 leakage_id, virtual_voltage_id;
1492 u16 vddc, vddci;
1493 int i;
1494
1495 pi->vddc_leakage.count = 0;
1496 pi->vddci_leakage.count = 0;
1497
1498 if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
1499 for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) {
1500 virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
1501 if (amdgpu_atombios_get_voltage_evv(adev, virtual_voltage_id, &vddc) != 0)
1502 continue;
1503 if (vddc != 0 && vddc != virtual_voltage_id) {
1504 pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc;
1505 pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id;
1506 pi->vddc_leakage.count++;
1507 }
1508 }
1509 } else if (amdgpu_atombios_get_leakage_id_from_vbios(adev, &leakage_id) == 0) {
1510 for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) {
1511 virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
1512 if (amdgpu_atombios_get_leakage_vddc_based_on_leakage_params(adev, &vddc, &vddci,
1513 virtual_voltage_id,
1514 leakage_id) == 0) {
1515 if (vddc != 0 && vddc != virtual_voltage_id) {
1516 pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc;
1517 pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id;
1518 pi->vddc_leakage.count++;
1519 }
1520 if (vddci != 0 && vddci != virtual_voltage_id) {
1521 pi->vddci_leakage.actual_voltage[pi->vddci_leakage.count] = vddci;
1522 pi->vddci_leakage.leakage_id[pi->vddci_leakage.count] = virtual_voltage_id;
1523 pi->vddci_leakage.count++;
1524 }
1525 }
1526 }
1527 }
1528}
1529
1530static void ci_set_dpm_event_sources(struct amdgpu_device *adev, u32 sources)
1531{
1532 struct ci_power_info *pi = ci_get_pi(adev);
1533 bool want_thermal_protection;
1534 enum amdgpu_dpm_event_src dpm_event_src;
1535 u32 tmp;
1536
1537 switch (sources) {
1538 case 0:
1539 default:
1540 want_thermal_protection = false;
1541 break;
1542 case (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL):
1543 want_thermal_protection = true;
1544 dpm_event_src = AMDGPU_DPM_EVENT_SRC_DIGITAL;
1545 break;
1546 case (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL):
1547 want_thermal_protection = true;
1548 dpm_event_src = AMDGPU_DPM_EVENT_SRC_EXTERNAL;
1549 break;
1550 case ((1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL) |
1551 (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL)):
1552 want_thermal_protection = true;
1553 dpm_event_src = AMDGPU_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL;
1554 break;
1555 }
1556
1557 if (want_thermal_protection) {
1558#if 0
1559 /* XXX: need to figure out how to handle this properly */
1560 tmp = RREG32_SMC(ixCG_THERMAL_CTRL);
1561 tmp &= DPM_EVENT_SRC_MASK;
1562 tmp |= DPM_EVENT_SRC(dpm_event_src);
1563 WREG32_SMC(ixCG_THERMAL_CTRL, tmp);
1564#endif
1565
1566 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
1567 if (pi->thermal_protection)
1568 tmp &= ~GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK;
1569 else
1570 tmp |= GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK;
1571 WREG32_SMC(ixGENERAL_PWRMGT, tmp);
1572 } else {
1573 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
1574 tmp |= GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK;
1575 WREG32_SMC(ixGENERAL_PWRMGT, tmp);
1576 }
1577}
1578
1579static void ci_enable_auto_throttle_source(struct amdgpu_device *adev,
1580 enum amdgpu_dpm_auto_throttle_src source,
1581 bool enable)
1582{
1583 struct ci_power_info *pi = ci_get_pi(adev);
1584
1585 if (enable) {
1586 if (!(pi->active_auto_throttle_sources & (1 << source))) {
1587 pi->active_auto_throttle_sources |= 1 << source;
1588 ci_set_dpm_event_sources(adev, pi->active_auto_throttle_sources);
1589 }
1590 } else {
1591 if (pi->active_auto_throttle_sources & (1 << source)) {
1592 pi->active_auto_throttle_sources &= ~(1 << source);
1593 ci_set_dpm_event_sources(adev, pi->active_auto_throttle_sources);
1594 }
1595 }
1596}
1597
1598static void ci_enable_vr_hot_gpio_interrupt(struct amdgpu_device *adev)
1599{
1600 if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REGULATOR_HOT)
1601 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_EnableVRHotGPIOInterrupt);
1602}
1603
1604static int ci_unfreeze_sclk_mclk_dpm(struct amdgpu_device *adev)
1605{
1606 struct ci_power_info *pi = ci_get_pi(adev);
1607 PPSMC_Result smc_result;
1608
1609 if (!pi->need_update_smu7_dpm_table)
1610 return 0;
1611
1612 if ((!pi->sclk_dpm_key_disabled) &&
1613 (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) {
1614 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_SCLKDPM_UnfreezeLevel);
1615 if (smc_result != PPSMC_Result_OK)
1616 return -EINVAL;
1617 }
1618
1619 if ((!pi->mclk_dpm_key_disabled) &&
1620 (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
1621 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MCLKDPM_UnfreezeLevel);
1622 if (smc_result != PPSMC_Result_OK)
1623 return -EINVAL;
1624 }
1625
1626 pi->need_update_smu7_dpm_table = 0;
1627 return 0;
1628}
1629
1630static int ci_enable_sclk_mclk_dpm(struct amdgpu_device *adev, bool enable)
1631{
1632 struct ci_power_info *pi = ci_get_pi(adev);
1633 PPSMC_Result smc_result;
1634
1635 if (enable) {
1636 if (!pi->sclk_dpm_key_disabled) {
1637 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DPM_Enable);
1638 if (smc_result != PPSMC_Result_OK)
1639 return -EINVAL;
1640 }
1641
1642 if (!pi->mclk_dpm_key_disabled) {
1643 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MCLKDPM_Enable);
1644 if (smc_result != PPSMC_Result_OK)
1645 return -EINVAL;
1646
1647 WREG32_P(mmMC_SEQ_CNTL_3, MC_SEQ_CNTL_3__CAC_EN_MASK,
1648 ~MC_SEQ_CNTL_3__CAC_EN_MASK);
1649
1650 WREG32_SMC(ixLCAC_MC0_CNTL, 0x05);
1651 WREG32_SMC(ixLCAC_MC1_CNTL, 0x05);
1652 WREG32_SMC(ixLCAC_CPL_CNTL, 0x100005);
1653
1654 udelay(10);
1655
1656 WREG32_SMC(ixLCAC_MC0_CNTL, 0x400005);
1657 WREG32_SMC(ixLCAC_MC1_CNTL, 0x400005);
1658 WREG32_SMC(ixLCAC_CPL_CNTL, 0x500005);
1659 }
1660 } else {
1661 if (!pi->sclk_dpm_key_disabled) {
1662 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DPM_Disable);
1663 if (smc_result != PPSMC_Result_OK)
1664 return -EINVAL;
1665 }
1666
1667 if (!pi->mclk_dpm_key_disabled) {
1668 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MCLKDPM_Disable);
1669 if (smc_result != PPSMC_Result_OK)
1670 return -EINVAL;
1671 }
1672 }
1673
1674 return 0;
1675}
1676
1677static int ci_start_dpm(struct amdgpu_device *adev)
1678{
1679 struct ci_power_info *pi = ci_get_pi(adev);
1680 PPSMC_Result smc_result;
1681 int ret;
1682 u32 tmp;
1683
1684 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
1685 tmp |= GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK;
1686 WREG32_SMC(ixGENERAL_PWRMGT, tmp);
1687
1688 tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
1689 tmp |= SCLK_PWRMGT_CNTL__DYNAMIC_PM_EN_MASK;
1690 WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp);
1691
1692 ci_write_smc_soft_register(adev, offsetof(SMU7_SoftRegisters, VoltageChangeTimeout), 0x1000);
1693
1694 WREG32_P(mmBIF_LNCNT_RESET, 0, ~BIF_LNCNT_RESET__RESET_LNCNT_EN_MASK);
1695
1696 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Voltage_Cntl_Enable);
1697 if (smc_result != PPSMC_Result_OK)
1698 return -EINVAL;
1699
1700 ret = ci_enable_sclk_mclk_dpm(adev, true);
1701 if (ret)
1702 return ret;
1703
1704 if (!pi->pcie_dpm_key_disabled) {
1705 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_PCIeDPM_Enable);
1706 if (smc_result != PPSMC_Result_OK)
1707 return -EINVAL;
1708 }
1709
1710 return 0;
1711}
1712
1713static int ci_freeze_sclk_mclk_dpm(struct amdgpu_device *adev)
1714{
1715 struct ci_power_info *pi = ci_get_pi(adev);
1716 PPSMC_Result smc_result;
1717
1718 if (!pi->need_update_smu7_dpm_table)
1719 return 0;
1720
1721 if ((!pi->sclk_dpm_key_disabled) &&
1722 (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) {
1723 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_SCLKDPM_FreezeLevel);
1724 if (smc_result != PPSMC_Result_OK)
1725 return -EINVAL;
1726 }
1727
1728 if ((!pi->mclk_dpm_key_disabled) &&
1729 (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
1730 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MCLKDPM_FreezeLevel);
1731 if (smc_result != PPSMC_Result_OK)
1732 return -EINVAL;
1733 }
1734
1735 return 0;
1736}
1737
1738static int ci_stop_dpm(struct amdgpu_device *adev)
1739{
1740 struct ci_power_info *pi = ci_get_pi(adev);
1741 PPSMC_Result smc_result;
1742 int ret;
1743 u32 tmp;
1744
1745 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
1746 tmp &= ~GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK;
1747 WREG32_SMC(ixGENERAL_PWRMGT, tmp);
1748
1749 tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
1750 tmp &= ~SCLK_PWRMGT_CNTL__DYNAMIC_PM_EN_MASK;
1751 WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp);
1752
1753 if (!pi->pcie_dpm_key_disabled) {
1754 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_PCIeDPM_Disable);
1755 if (smc_result != PPSMC_Result_OK)
1756 return -EINVAL;
1757 }
1758
1759 ret = ci_enable_sclk_mclk_dpm(adev, false);
1760 if (ret)
1761 return ret;
1762
1763 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Voltage_Cntl_Disable);
1764 if (smc_result != PPSMC_Result_OK)
1765 return -EINVAL;
1766
1767 return 0;
1768}
1769
1770static void ci_enable_sclk_control(struct amdgpu_device *adev, bool enable)
1771{
1772 u32 tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
1773
1774 if (enable)
1775 tmp &= ~SCLK_PWRMGT_CNTL__SCLK_PWRMGT_OFF_MASK;
1776 else
1777 tmp |= SCLK_PWRMGT_CNTL__SCLK_PWRMGT_OFF_MASK;
1778 WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp);
1779}
1780
1781#if 0
1782static int ci_notify_hw_of_power_source(struct amdgpu_device *adev,
1783 bool ac_power)
1784{
1785 struct ci_power_info *pi = ci_get_pi(adev);
1786 struct amdgpu_cac_tdp_table *cac_tdp_table =
1787 adev->pm.dpm.dyn_state.cac_tdp_table;
1788 u32 power_limit;
1789
1790 if (ac_power)
1791 power_limit = (u32)(cac_tdp_table->maximum_power_delivery_limit * 256);
1792 else
1793 power_limit = (u32)(cac_tdp_table->battery_power_limit * 256);
1794
1795 ci_set_power_limit(adev, power_limit);
1796
1797 if (pi->caps_automatic_dc_transition) {
1798 if (ac_power)
1799 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_RunningOnAC);
1800 else
1801 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Remove_DC_Clamp);
1802 }
1803
1804 return 0;
1805}
1806#endif
1807
1808static PPSMC_Result amdgpu_ci_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
1809 PPSMC_Msg msg, u32 parameter)
1810{
1811 WREG32(mmSMC_MSG_ARG_0, parameter);
1812 return amdgpu_ci_send_msg_to_smc(adev, msg);
1813}
1814
1815static PPSMC_Result amdgpu_ci_send_msg_to_smc_return_parameter(struct amdgpu_device *adev,
1816 PPSMC_Msg msg, u32 *parameter)
1817{
1818 PPSMC_Result smc_result;
1819
1820 smc_result = amdgpu_ci_send_msg_to_smc(adev, msg);
1821
1822 if ((smc_result == PPSMC_Result_OK) && parameter)
1823 *parameter = RREG32(mmSMC_MSG_ARG_0);
1824
1825 return smc_result;
1826}
1827
1828static int ci_dpm_force_state_sclk(struct amdgpu_device *adev, u32 n)
1829{
1830 struct ci_power_info *pi = ci_get_pi(adev);
1831
1832 if (!pi->sclk_dpm_key_disabled) {
1833 PPSMC_Result smc_result =
1834 amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SCLKDPM_SetEnabledMask, 1 << n);
1835 if (smc_result != PPSMC_Result_OK)
1836 return -EINVAL;
1837 }
1838
1839 return 0;
1840}
1841
1842static int ci_dpm_force_state_mclk(struct amdgpu_device *adev, u32 n)
1843{
1844 struct ci_power_info *pi = ci_get_pi(adev);
1845
1846 if (!pi->mclk_dpm_key_disabled) {
1847 PPSMC_Result smc_result =
1848 amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_MCLKDPM_SetEnabledMask, 1 << n);
1849 if (smc_result != PPSMC_Result_OK)
1850 return -EINVAL;
1851 }
1852
1853 return 0;
1854}
1855
1856static int ci_dpm_force_state_pcie(struct amdgpu_device *adev, u32 n)
1857{
1858 struct ci_power_info *pi = ci_get_pi(adev);
1859
1860 if (!pi->pcie_dpm_key_disabled) {
1861 PPSMC_Result smc_result =
1862 amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_PCIeDPM_ForceLevel, n);
1863 if (smc_result != PPSMC_Result_OK)
1864 return -EINVAL;
1865 }
1866
1867 return 0;
1868}
1869
1870static int ci_set_power_limit(struct amdgpu_device *adev, u32 n)
1871{
1872 struct ci_power_info *pi = ci_get_pi(adev);
1873
1874 if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit) {
1875 PPSMC_Result smc_result =
1876 amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_PkgPwrSetLimit, n);
1877 if (smc_result != PPSMC_Result_OK)
1878 return -EINVAL;
1879 }
1880
1881 return 0;
1882}
1883
1884static int ci_set_overdrive_target_tdp(struct amdgpu_device *adev,
1885 u32 target_tdp)
1886{
1887 PPSMC_Result smc_result =
1888 amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_OverDriveSetTargetTdp, target_tdp);
1889 if (smc_result != PPSMC_Result_OK)
1890 return -EINVAL;
1891 return 0;
1892}
1893
1894#if 0
1895static int ci_set_boot_state(struct amdgpu_device *adev)
1896{
1897 return ci_enable_sclk_mclk_dpm(adev, false);
1898}
1899#endif
1900
1901static u32 ci_get_average_sclk_freq(struct amdgpu_device *adev)
1902{
1903 u32 sclk_freq;
1904 PPSMC_Result smc_result =
1905 amdgpu_ci_send_msg_to_smc_return_parameter(adev,
1906 PPSMC_MSG_API_GetSclkFrequency,
1907 &sclk_freq);
1908 if (smc_result != PPSMC_Result_OK)
1909 sclk_freq = 0;
1910
1911 return sclk_freq;
1912}
1913
1914static u32 ci_get_average_mclk_freq(struct amdgpu_device *adev)
1915{
1916 u32 mclk_freq;
1917 PPSMC_Result smc_result =
1918 amdgpu_ci_send_msg_to_smc_return_parameter(adev,
1919 PPSMC_MSG_API_GetMclkFrequency,
1920 &mclk_freq);
1921 if (smc_result != PPSMC_Result_OK)
1922 mclk_freq = 0;
1923
1924 return mclk_freq;
1925}
1926
1927static void ci_dpm_start_smc(struct amdgpu_device *adev)
1928{
1929 int i;
1930
1931 amdgpu_ci_program_jump_on_start(adev);
1932 amdgpu_ci_start_smc_clock(adev);
1933 amdgpu_ci_start_smc(adev);
1934 for (i = 0; i < adev->usec_timeout; i++) {
1935 if (RREG32_SMC(ixFIRMWARE_FLAGS) & FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK)
1936 break;
1937 }
1938}
1939
1940static void ci_dpm_stop_smc(struct amdgpu_device *adev)
1941{
1942 amdgpu_ci_reset_smc(adev);
1943 amdgpu_ci_stop_smc_clock(adev);
1944}
1945
1946static int ci_process_firmware_header(struct amdgpu_device *adev)
1947{
1948 struct ci_power_info *pi = ci_get_pi(adev);
1949 u32 tmp;
1950 int ret;
1951
1952 ret = amdgpu_ci_read_smc_sram_dword(adev,
1953 SMU7_FIRMWARE_HEADER_LOCATION +
1954 offsetof(SMU7_Firmware_Header, DpmTable),
1955 &tmp, pi->sram_end);
1956 if (ret)
1957 return ret;
1958
1959 pi->dpm_table_start = tmp;
1960
1961 ret = amdgpu_ci_read_smc_sram_dword(adev,
1962 SMU7_FIRMWARE_HEADER_LOCATION +
1963 offsetof(SMU7_Firmware_Header, SoftRegisters),
1964 &tmp, pi->sram_end);
1965 if (ret)
1966 return ret;
1967
1968 pi->soft_regs_start = tmp;
1969
1970 ret = amdgpu_ci_read_smc_sram_dword(adev,
1971 SMU7_FIRMWARE_HEADER_LOCATION +
1972 offsetof(SMU7_Firmware_Header, mcRegisterTable),
1973 &tmp, pi->sram_end);
1974 if (ret)
1975 return ret;
1976
1977 pi->mc_reg_table_start = tmp;
1978
1979 ret = amdgpu_ci_read_smc_sram_dword(adev,
1980 SMU7_FIRMWARE_HEADER_LOCATION +
1981 offsetof(SMU7_Firmware_Header, FanTable),
1982 &tmp, pi->sram_end);
1983 if (ret)
1984 return ret;
1985
1986 pi->fan_table_start = tmp;
1987
1988 ret = amdgpu_ci_read_smc_sram_dword(adev,
1989 SMU7_FIRMWARE_HEADER_LOCATION +
1990 offsetof(SMU7_Firmware_Header, mcArbDramTimingTable),
1991 &tmp, pi->sram_end);
1992 if (ret)
1993 return ret;
1994
1995 pi->arb_table_start = tmp;
1996
1997 return 0;
1998}
1999
2000static void ci_read_clock_registers(struct amdgpu_device *adev)
2001{
2002 struct ci_power_info *pi = ci_get_pi(adev);
2003
2004 pi->clock_registers.cg_spll_func_cntl =
2005 RREG32_SMC(ixCG_SPLL_FUNC_CNTL);
2006 pi->clock_registers.cg_spll_func_cntl_2 =
2007 RREG32_SMC(ixCG_SPLL_FUNC_CNTL_2);
2008 pi->clock_registers.cg_spll_func_cntl_3 =
2009 RREG32_SMC(ixCG_SPLL_FUNC_CNTL_3);
2010 pi->clock_registers.cg_spll_func_cntl_4 =
2011 RREG32_SMC(ixCG_SPLL_FUNC_CNTL_4);
2012 pi->clock_registers.cg_spll_spread_spectrum =
2013 RREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM);
2014 pi->clock_registers.cg_spll_spread_spectrum_2 =
2015 RREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM_2);
2016 pi->clock_registers.dll_cntl = RREG32(mmDLL_CNTL);
2017 pi->clock_registers.mclk_pwrmgt_cntl = RREG32(mmMCLK_PWRMGT_CNTL);
2018 pi->clock_registers.mpll_ad_func_cntl = RREG32(mmMPLL_AD_FUNC_CNTL);
2019 pi->clock_registers.mpll_dq_func_cntl = RREG32(mmMPLL_DQ_FUNC_CNTL);
2020 pi->clock_registers.mpll_func_cntl = RREG32(mmMPLL_FUNC_CNTL);
2021 pi->clock_registers.mpll_func_cntl_1 = RREG32(mmMPLL_FUNC_CNTL_1);
2022 pi->clock_registers.mpll_func_cntl_2 = RREG32(mmMPLL_FUNC_CNTL_2);
2023 pi->clock_registers.mpll_ss1 = RREG32(mmMPLL_SS1);
2024 pi->clock_registers.mpll_ss2 = RREG32(mmMPLL_SS2);
2025}
2026
2027static void ci_init_sclk_t(struct amdgpu_device *adev)
2028{
2029 struct ci_power_info *pi = ci_get_pi(adev);
2030
2031 pi->low_sclk_interrupt_t = 0;
2032}
2033
2034static void ci_enable_thermal_protection(struct amdgpu_device *adev,
2035 bool enable)
2036{
2037 u32 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
2038
2039 if (enable)
2040 tmp &= ~GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK;
2041 else
2042 tmp |= GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK;
2043 WREG32_SMC(ixGENERAL_PWRMGT, tmp);
2044}
2045
2046static void ci_enable_acpi_power_management(struct amdgpu_device *adev)
2047{
2048 u32 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
2049
2050 tmp |= GENERAL_PWRMGT__STATIC_PM_EN_MASK;
2051
2052 WREG32_SMC(ixGENERAL_PWRMGT, tmp);
2053}
2054
2055#if 0
2056static int ci_enter_ulp_state(struct amdgpu_device *adev)
2057{
2058
2059 WREG32(mmSMC_MESSAGE_0, PPSMC_MSG_SwitchToMinimumPower);
2060
2061 udelay(25000);
2062
2063 return 0;
2064}
2065
2066static int ci_exit_ulp_state(struct amdgpu_device *adev)
2067{
2068 int i;
2069
2070 WREG32(mmSMC_MESSAGE_0, PPSMC_MSG_ResumeFromMinimumPower);
2071
2072 udelay(7000);
2073
2074 for (i = 0; i < adev->usec_timeout; i++) {
2075 if (RREG32(mmSMC_RESP_0) == 1)
2076 break;
2077 udelay(1000);
2078 }
2079
2080 return 0;
2081}
2082#endif
2083
2084static int ci_notify_smc_display_change(struct amdgpu_device *adev,
2085 bool has_display)
2086{
2087 PPSMC_Msg msg = has_display ? PPSMC_MSG_HasDisplay : PPSMC_MSG_NoDisplay;
2088
2089 return (amdgpu_ci_send_msg_to_smc(adev, msg) == PPSMC_Result_OK) ? 0 : -EINVAL;
2090}
2091
2092static int ci_enable_ds_master_switch(struct amdgpu_device *adev,
2093 bool enable)
2094{
2095 struct ci_power_info *pi = ci_get_pi(adev);
2096
2097 if (enable) {
2098 if (pi->caps_sclk_ds) {
2099 if (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MASTER_DeepSleep_ON) != PPSMC_Result_OK)
2100 return -EINVAL;
2101 } else {
2102 if (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK)
2103 return -EINVAL;
2104 }
2105 } else {
2106 if (pi->caps_sclk_ds) {
2107 if (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK)
2108 return -EINVAL;
2109 }
2110 }
2111
2112 return 0;
2113}
2114
2115static void ci_program_display_gap(struct amdgpu_device *adev)
2116{
2117 u32 tmp = RREG32_SMC(ixCG_DISPLAY_GAP_CNTL);
2118 u32 pre_vbi_time_in_us;
2119 u32 frame_time_in_us;
2120 u32 ref_clock = adev->clock.spll.reference_freq;
2121 u32 refresh_rate = amdgpu_dpm_get_vrefresh(adev);
2122 u32 vblank_time = amdgpu_dpm_get_vblank_time(adev);
2123
2124 tmp &= ~CG_DISPLAY_GAP_CNTL__DISP_GAP_MASK;
2125 if (adev->pm.dpm.new_active_crtc_count > 0)
2126 tmp |= (AMDGPU_PM_DISPLAY_GAP_VBLANK_OR_WM << CG_DISPLAY_GAP_CNTL__DISP_GAP__SHIFT);
2127 else
2128 tmp |= (AMDGPU_PM_DISPLAY_GAP_IGNORE << CG_DISPLAY_GAP_CNTL__DISP_GAP__SHIFT);
2129 WREG32_SMC(ixCG_DISPLAY_GAP_CNTL, tmp);
2130
2131 if (refresh_rate == 0)
2132 refresh_rate = 60;
2133 if (vblank_time == 0xffffffff)
2134 vblank_time = 500;
2135 frame_time_in_us = 1000000 / refresh_rate;
2136 pre_vbi_time_in_us =
2137 frame_time_in_us - 200 - vblank_time;
2138 tmp = pre_vbi_time_in_us * (ref_clock / 100);
2139
2140 WREG32_SMC(ixCG_DISPLAY_GAP_CNTL2, tmp);
2141 ci_write_smc_soft_register(adev, offsetof(SMU7_SoftRegisters, PreVBlankGap), 0x64);
2142 ci_write_smc_soft_register(adev, offsetof(SMU7_SoftRegisters, VBlankTimeout), (frame_time_in_us - pre_vbi_time_in_us));
2143
2144
2145 ci_notify_smc_display_change(adev, (adev->pm.dpm.new_active_crtc_count == 1));
2146
2147}
2148
2149static void ci_enable_spread_spectrum(struct amdgpu_device *adev, bool enable)
2150{
2151 struct ci_power_info *pi = ci_get_pi(adev);
2152 u32 tmp;
2153
2154 if (enable) {
2155 if (pi->caps_sclk_ss_support) {
2156 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
2157 tmp |= GENERAL_PWRMGT__DYN_SPREAD_SPECTRUM_EN_MASK;
2158 WREG32_SMC(ixGENERAL_PWRMGT, tmp);
2159 }
2160 } else {
2161 tmp = RREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM);
2162 tmp &= ~CG_SPLL_SPREAD_SPECTRUM__SSEN_MASK;
2163 WREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM, tmp);
2164
2165 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
2166 tmp &= ~GENERAL_PWRMGT__DYN_SPREAD_SPECTRUM_EN_MASK;
2167 WREG32_SMC(ixGENERAL_PWRMGT, tmp);
2168 }
2169}
2170
2171static void ci_program_sstp(struct amdgpu_device *adev)
2172{
2173 WREG32_SMC(ixCG_STATIC_SCREEN_PARAMETER,
2174 ((CISLANDS_SSTU_DFLT << CG_STATIC_SCREEN_PARAMETER__STATIC_SCREEN_THRESHOLD_UNIT__SHIFT) |
2175 (CISLANDS_SST_DFLT << CG_STATIC_SCREEN_PARAMETER__STATIC_SCREEN_THRESHOLD__SHIFT)));
2176}
2177
2178static void ci_enable_display_gap(struct amdgpu_device *adev)
2179{
2180 u32 tmp = RREG32_SMC(ixCG_DISPLAY_GAP_CNTL);
2181
2182 tmp &= ~(CG_DISPLAY_GAP_CNTL__DISP_GAP_MASK |
2183 CG_DISPLAY_GAP_CNTL__DISP_GAP_MCHG_MASK);
2184 tmp |= ((AMDGPU_PM_DISPLAY_GAP_IGNORE << CG_DISPLAY_GAP_CNTL__DISP_GAP__SHIFT) |
2185 (AMDGPU_PM_DISPLAY_GAP_VBLANK << CG_DISPLAY_GAP_CNTL__DISP_GAP_MCHG__SHIFT));
2186
2187 WREG32_SMC(ixCG_DISPLAY_GAP_CNTL, tmp);
2188}
2189
2190static void ci_program_vc(struct amdgpu_device *adev)
2191{
2192 u32 tmp;
2193
2194 tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
2195 tmp &= ~(SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK | SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK);
2196 WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp);
2197
2198 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, CISLANDS_VRC_DFLT0);
2199 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_1, CISLANDS_VRC_DFLT1);
2200 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_2, CISLANDS_VRC_DFLT2);
2201 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_3, CISLANDS_VRC_DFLT3);
2202 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_4, CISLANDS_VRC_DFLT4);
2203 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_5, CISLANDS_VRC_DFLT5);
2204 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_6, CISLANDS_VRC_DFLT6);
2205 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_7, CISLANDS_VRC_DFLT7);
2206}
2207
2208static void ci_clear_vc(struct amdgpu_device *adev)
2209{
2210 u32 tmp;
2211
2212 tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
2213 tmp |= (SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK | SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK);
2214 WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp);
2215
2216 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, 0);
2217 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_1, 0);
2218 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_2, 0);
2219 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_3, 0);
2220 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_4, 0);
2221 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_5, 0);
2222 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_6, 0);
2223 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_7, 0);
2224}
2225
2226static int ci_upload_firmware(struct amdgpu_device *adev)
2227{
Alex Deuchera2e73f52015-04-20 17:09:27 -04002228 int i, ret;
2229
Rex Zhu3f767e32016-10-26 13:44:12 +08002230 if (amdgpu_ci_is_smc_running(adev)) {
2231 DRM_INFO("smc is running, no need to load smc firmware\n");
2232 return 0;
2233 }
2234
Alex Deuchera2e73f52015-04-20 17:09:27 -04002235 for (i = 0; i < adev->usec_timeout; i++) {
2236 if (RREG32_SMC(ixRCU_UC_EVENTS) & RCU_UC_EVENTS__boot_seq_done_MASK)
2237 break;
2238 }
2239 WREG32_SMC(ixSMC_SYSCON_MISC_CNTL, 1);
2240
2241 amdgpu_ci_stop_smc_clock(adev);
2242 amdgpu_ci_reset_smc(adev);
2243
Rex Zhubac601e2017-02-03 17:33:11 +08002244 ret = amdgpu_ci_load_smc_ucode(adev, SMC_RAM_END);
Alex Deuchera2e73f52015-04-20 17:09:27 -04002245
2246 return ret;
2247
2248}
2249
2250static int ci_get_svi2_voltage_table(struct amdgpu_device *adev,
2251 struct amdgpu_clock_voltage_dependency_table *voltage_dependency_table,
2252 struct atom_voltage_table *voltage_table)
2253{
2254 u32 i;
2255
2256 if (voltage_dependency_table == NULL)
2257 return -EINVAL;
2258
2259 voltage_table->mask_low = 0;
2260 voltage_table->phase_delay = 0;
2261
2262 voltage_table->count = voltage_dependency_table->count;
2263 for (i = 0; i < voltage_table->count; i++) {
2264 voltage_table->entries[i].value = voltage_dependency_table->entries[i].v;
2265 voltage_table->entries[i].smio_low = 0;
2266 }
2267
2268 return 0;
2269}
2270
2271static int ci_construct_voltage_tables(struct amdgpu_device *adev)
2272{
2273 struct ci_power_info *pi = ci_get_pi(adev);
2274 int ret;
2275
2276 if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
2277 ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_VDDC,
2278 VOLTAGE_OBJ_GPIO_LUT,
2279 &pi->vddc_voltage_table);
2280 if (ret)
2281 return ret;
2282 } else if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
2283 ret = ci_get_svi2_voltage_table(adev,
2284 &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
2285 &pi->vddc_voltage_table);
2286 if (ret)
2287 return ret;
2288 }
2289
2290 if (pi->vddc_voltage_table.count > SMU7_MAX_LEVELS_VDDC)
2291 ci_trim_voltage_table_to_fit_state_table(adev, SMU7_MAX_LEVELS_VDDC,
2292 &pi->vddc_voltage_table);
2293
2294 if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
2295 ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_VDDCI,
2296 VOLTAGE_OBJ_GPIO_LUT,
2297 &pi->vddci_voltage_table);
2298 if (ret)
2299 return ret;
2300 } else if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
2301 ret = ci_get_svi2_voltage_table(adev,
2302 &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
2303 &pi->vddci_voltage_table);
2304 if (ret)
2305 return ret;
2306 }
2307
2308 if (pi->vddci_voltage_table.count > SMU7_MAX_LEVELS_VDDCI)
2309 ci_trim_voltage_table_to_fit_state_table(adev, SMU7_MAX_LEVELS_VDDCI,
2310 &pi->vddci_voltage_table);
2311
2312 if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
2313 ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_MVDDC,
2314 VOLTAGE_OBJ_GPIO_LUT,
2315 &pi->mvdd_voltage_table);
2316 if (ret)
2317 return ret;
2318 } else if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
2319 ret = ci_get_svi2_voltage_table(adev,
2320 &adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
2321 &pi->mvdd_voltage_table);
2322 if (ret)
2323 return ret;
2324 }
2325
2326 if (pi->mvdd_voltage_table.count > SMU7_MAX_LEVELS_MVDD)
2327 ci_trim_voltage_table_to_fit_state_table(adev, SMU7_MAX_LEVELS_MVDD,
2328 &pi->mvdd_voltage_table);
2329
2330 return 0;
2331}
2332
2333static void ci_populate_smc_voltage_table(struct amdgpu_device *adev,
2334 struct atom_voltage_table_entry *voltage_table,
2335 SMU7_Discrete_VoltageLevel *smc_voltage_table)
2336{
2337 int ret;
2338
2339 ret = ci_get_std_voltage_value_sidd(adev, voltage_table,
2340 &smc_voltage_table->StdVoltageHiSidd,
2341 &smc_voltage_table->StdVoltageLoSidd);
2342
2343 if (ret) {
2344 smc_voltage_table->StdVoltageHiSidd = voltage_table->value * VOLTAGE_SCALE;
2345 smc_voltage_table->StdVoltageLoSidd = voltage_table->value * VOLTAGE_SCALE;
2346 }
2347
2348 smc_voltage_table->Voltage = cpu_to_be16(voltage_table->value * VOLTAGE_SCALE);
2349 smc_voltage_table->StdVoltageHiSidd =
2350 cpu_to_be16(smc_voltage_table->StdVoltageHiSidd);
2351 smc_voltage_table->StdVoltageLoSidd =
2352 cpu_to_be16(smc_voltage_table->StdVoltageLoSidd);
2353}
2354
2355static int ci_populate_smc_vddc_table(struct amdgpu_device *adev,
2356 SMU7_Discrete_DpmTable *table)
2357{
2358 struct ci_power_info *pi = ci_get_pi(adev);
2359 unsigned int count;
2360
2361 table->VddcLevelCount = pi->vddc_voltage_table.count;
2362 for (count = 0; count < table->VddcLevelCount; count++) {
2363 ci_populate_smc_voltage_table(adev,
2364 &pi->vddc_voltage_table.entries[count],
2365 &table->VddcLevel[count]);
2366
2367 if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
2368 table->VddcLevel[count].Smio |=
2369 pi->vddc_voltage_table.entries[count].smio_low;
2370 else
2371 table->VddcLevel[count].Smio = 0;
2372 }
2373 table->VddcLevelCount = cpu_to_be32(table->VddcLevelCount);
2374
2375 return 0;
2376}
2377
2378static int ci_populate_smc_vddci_table(struct amdgpu_device *adev,
2379 SMU7_Discrete_DpmTable *table)
2380{
2381 unsigned int count;
2382 struct ci_power_info *pi = ci_get_pi(adev);
2383
2384 table->VddciLevelCount = pi->vddci_voltage_table.count;
2385 for (count = 0; count < table->VddciLevelCount; count++) {
2386 ci_populate_smc_voltage_table(adev,
2387 &pi->vddci_voltage_table.entries[count],
2388 &table->VddciLevel[count]);
2389
2390 if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
2391 table->VddciLevel[count].Smio |=
2392 pi->vddci_voltage_table.entries[count].smio_low;
2393 else
2394 table->VddciLevel[count].Smio = 0;
2395 }
2396 table->VddciLevelCount = cpu_to_be32(table->VddciLevelCount);
2397
2398 return 0;
2399}
2400
2401static int ci_populate_smc_mvdd_table(struct amdgpu_device *adev,
2402 SMU7_Discrete_DpmTable *table)
2403{
2404 struct ci_power_info *pi = ci_get_pi(adev);
2405 unsigned int count;
2406
2407 table->MvddLevelCount = pi->mvdd_voltage_table.count;
2408 for (count = 0; count < table->MvddLevelCount; count++) {
2409 ci_populate_smc_voltage_table(adev,
2410 &pi->mvdd_voltage_table.entries[count],
2411 &table->MvddLevel[count]);
2412
2413 if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
2414 table->MvddLevel[count].Smio |=
2415 pi->mvdd_voltage_table.entries[count].smio_low;
2416 else
2417 table->MvddLevel[count].Smio = 0;
2418 }
2419 table->MvddLevelCount = cpu_to_be32(table->MvddLevelCount);
2420
2421 return 0;
2422}
2423
2424static int ci_populate_smc_voltage_tables(struct amdgpu_device *adev,
2425 SMU7_Discrete_DpmTable *table)
2426{
2427 int ret;
2428
2429 ret = ci_populate_smc_vddc_table(adev, table);
2430 if (ret)
2431 return ret;
2432
2433 ret = ci_populate_smc_vddci_table(adev, table);
2434 if (ret)
2435 return ret;
2436
2437 ret = ci_populate_smc_mvdd_table(adev, table);
2438 if (ret)
2439 return ret;
2440
2441 return 0;
2442}
2443
2444static int ci_populate_mvdd_value(struct amdgpu_device *adev, u32 mclk,
2445 SMU7_Discrete_VoltageLevel *voltage)
2446{
2447 struct ci_power_info *pi = ci_get_pi(adev);
2448 u32 i = 0;
2449
2450 if (pi->mvdd_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
2451 for (i = 0; i < adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count; i++) {
2452 if (mclk <= adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries[i].clk) {
2453 voltage->Voltage = pi->mvdd_voltage_table.entries[i].value;
2454 break;
2455 }
2456 }
2457
2458 if (i >= adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count)
2459 return -EINVAL;
2460 }
2461
2462 return -EINVAL;
2463}
2464
2465static int ci_get_std_voltage_value_sidd(struct amdgpu_device *adev,
2466 struct atom_voltage_table_entry *voltage_table,
2467 u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd)
2468{
2469 u16 v_index, idx;
2470 bool voltage_found = false;
2471 *std_voltage_hi_sidd = voltage_table->value * VOLTAGE_SCALE;
2472 *std_voltage_lo_sidd = voltage_table->value * VOLTAGE_SCALE;
2473
2474 if (adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries == NULL)
2475 return -EINVAL;
2476
2477 if (adev->pm.dpm.dyn_state.cac_leakage_table.entries) {
2478 for (v_index = 0; (u32)v_index < adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
2479 if (voltage_table->value ==
2480 adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
2481 voltage_found = true;
2482 if ((u32)v_index < adev->pm.dpm.dyn_state.cac_leakage_table.count)
2483 idx = v_index;
2484 else
2485 idx = adev->pm.dpm.dyn_state.cac_leakage_table.count - 1;
2486 *std_voltage_lo_sidd =
2487 adev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE;
2488 *std_voltage_hi_sidd =
2489 adev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE;
2490 break;
2491 }
2492 }
2493
2494 if (!voltage_found) {
2495 for (v_index = 0; (u32)v_index < adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
2496 if (voltage_table->value <=
2497 adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
2498 voltage_found = true;
2499 if ((u32)v_index < adev->pm.dpm.dyn_state.cac_leakage_table.count)
2500 idx = v_index;
2501 else
2502 idx = adev->pm.dpm.dyn_state.cac_leakage_table.count - 1;
2503 *std_voltage_lo_sidd =
2504 adev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE;
2505 *std_voltage_hi_sidd =
2506 adev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE;
2507 break;
2508 }
2509 }
2510 }
2511 }
2512
2513 return 0;
2514}
2515
2516static void ci_populate_phase_value_based_on_sclk(struct amdgpu_device *adev,
2517 const struct amdgpu_phase_shedding_limits_table *limits,
2518 u32 sclk,
2519 u32 *phase_shedding)
2520{
2521 unsigned int i;
2522
2523 *phase_shedding = 1;
2524
2525 for (i = 0; i < limits->count; i++) {
2526 if (sclk < limits->entries[i].sclk) {
2527 *phase_shedding = i;
2528 break;
2529 }
2530 }
2531}
2532
2533static void ci_populate_phase_value_based_on_mclk(struct amdgpu_device *adev,
2534 const struct amdgpu_phase_shedding_limits_table *limits,
2535 u32 mclk,
2536 u32 *phase_shedding)
2537{
2538 unsigned int i;
2539
2540 *phase_shedding = 1;
2541
2542 for (i = 0; i < limits->count; i++) {
2543 if (mclk < limits->entries[i].mclk) {
2544 *phase_shedding = i;
2545 break;
2546 }
2547 }
2548}
2549
2550static int ci_init_arb_table_index(struct amdgpu_device *adev)
2551{
2552 struct ci_power_info *pi = ci_get_pi(adev);
2553 u32 tmp;
2554 int ret;
2555
2556 ret = amdgpu_ci_read_smc_sram_dword(adev, pi->arb_table_start,
2557 &tmp, pi->sram_end);
2558 if (ret)
2559 return ret;
2560
2561 tmp &= 0x00FFFFFF;
2562 tmp |= MC_CG_ARB_FREQ_F1 << 24;
2563
2564 return amdgpu_ci_write_smc_sram_dword(adev, pi->arb_table_start,
2565 tmp, pi->sram_end);
2566}
2567
2568static int ci_get_dependency_volt_by_clk(struct amdgpu_device *adev,
2569 struct amdgpu_clock_voltage_dependency_table *allowed_clock_voltage_table,
2570 u32 clock, u32 *voltage)
2571{
2572 u32 i = 0;
2573
2574 if (allowed_clock_voltage_table->count == 0)
2575 return -EINVAL;
2576
2577 for (i = 0; i < allowed_clock_voltage_table->count; i++) {
2578 if (allowed_clock_voltage_table->entries[i].clk >= clock) {
2579 *voltage = allowed_clock_voltage_table->entries[i].v;
2580 return 0;
2581 }
2582 }
2583
2584 *voltage = allowed_clock_voltage_table->entries[i-1].v;
2585
2586 return 0;
2587}
2588
Nils Wallménius438498a2016-05-05 09:07:48 +02002589static u8 ci_get_sleep_divider_id_from_clock(u32 sclk, u32 min_sclk_in_sr)
Alex Deuchera2e73f52015-04-20 17:09:27 -04002590{
2591 u32 i;
2592 u32 tmp;
Nils Wallménius9887e422016-05-05 09:07:46 +02002593 u32 min = max(min_sclk_in_sr, (u32)CISLAND_MINIMUM_ENGINE_CLOCK);
Alex Deuchera2e73f52015-04-20 17:09:27 -04002594
2595 if (sclk < min)
2596 return 0;
2597
2598 for (i = CISLAND_MAX_DEEPSLEEP_DIVIDER_ID; ; i--) {
Nils Wallménius354ef922016-05-05 09:07:47 +02002599 tmp = sclk >> i;
Alex Deuchera2e73f52015-04-20 17:09:27 -04002600 if (tmp >= min || i == 0)
2601 break;
2602 }
2603
2604 return (u8)i;
2605}
2606
2607static int ci_initial_switch_from_arb_f0_to_f1(struct amdgpu_device *adev)
2608{
2609 return ci_copy_and_switch_arb_sets(adev, MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
2610}
2611
2612static int ci_reset_to_default(struct amdgpu_device *adev)
2613{
2614 return (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_ResetToDefaults) == PPSMC_Result_OK) ?
2615 0 : -EINVAL;
2616}
2617
2618static int ci_force_switch_to_arb_f0(struct amdgpu_device *adev)
2619{
2620 u32 tmp;
2621
2622 tmp = (RREG32_SMC(ixSMC_SCRATCH9) & 0x0000ff00) >> 8;
2623
2624 if (tmp == MC_CG_ARB_FREQ_F0)
2625 return 0;
2626
2627 return ci_copy_and_switch_arb_sets(adev, tmp, MC_CG_ARB_FREQ_F0);
2628}
2629
2630static void ci_register_patching_mc_arb(struct amdgpu_device *adev,
2631 const u32 engine_clock,
2632 const u32 memory_clock,
2633 u32 *dram_timimg2)
2634{
2635 bool patch;
2636 u32 tmp, tmp2;
2637
2638 tmp = RREG32(mmMC_SEQ_MISC0);
2639 patch = ((tmp & 0x0000f00) == 0x300) ? true : false;
2640
2641 if (patch &&
2642 ((adev->pdev->device == 0x67B0) ||
2643 (adev->pdev->device == 0x67B1))) {
2644 if ((memory_clock > 100000) && (memory_clock <= 125000)) {
2645 tmp2 = (((0x31 * engine_clock) / 125000) - 1) & 0xff;
2646 *dram_timimg2 &= ~0x00ff0000;
2647 *dram_timimg2 |= tmp2 << 16;
2648 } else if ((memory_clock > 125000) && (memory_clock <= 137500)) {
2649 tmp2 = (((0x36 * engine_clock) / 137500) - 1) & 0xff;
2650 *dram_timimg2 &= ~0x00ff0000;
2651 *dram_timimg2 |= tmp2 << 16;
2652 }
2653 }
2654}
2655
2656static int ci_populate_memory_timing_parameters(struct amdgpu_device *adev,
2657 u32 sclk,
2658 u32 mclk,
2659 SMU7_Discrete_MCArbDramTimingTableEntry *arb_regs)
2660{
2661 u32 dram_timing;
2662 u32 dram_timing2;
2663 u32 burst_time;
2664
2665 amdgpu_atombios_set_engine_dram_timings(adev, sclk, mclk);
2666
2667 dram_timing = RREG32(mmMC_ARB_DRAM_TIMING);
2668 dram_timing2 = RREG32(mmMC_ARB_DRAM_TIMING2);
2669 burst_time = RREG32(mmMC_ARB_BURST_TIME) & MC_ARB_BURST_TIME__STATE0_MASK;
2670
2671 ci_register_patching_mc_arb(adev, sclk, mclk, &dram_timing2);
2672
2673 arb_regs->McArbDramTiming = cpu_to_be32(dram_timing);
2674 arb_regs->McArbDramTiming2 = cpu_to_be32(dram_timing2);
2675 arb_regs->McArbBurstTime = (u8)burst_time;
2676
2677 return 0;
2678}
2679
2680static int ci_do_program_memory_timing_parameters(struct amdgpu_device *adev)
2681{
2682 struct ci_power_info *pi = ci_get_pi(adev);
2683 SMU7_Discrete_MCArbDramTimingTable arb_regs;
2684 u32 i, j;
2685 int ret = 0;
2686
2687 memset(&arb_regs, 0, sizeof(SMU7_Discrete_MCArbDramTimingTable));
2688
2689 for (i = 0; i < pi->dpm_table.sclk_table.count; i++) {
2690 for (j = 0; j < pi->dpm_table.mclk_table.count; j++) {
2691 ret = ci_populate_memory_timing_parameters(adev,
2692 pi->dpm_table.sclk_table.dpm_levels[i].value,
2693 pi->dpm_table.mclk_table.dpm_levels[j].value,
2694 &arb_regs.entries[i][j]);
2695 if (ret)
2696 break;
2697 }
2698 }
2699
2700 if (ret == 0)
2701 ret = amdgpu_ci_copy_bytes_to_smc(adev,
2702 pi->arb_table_start,
2703 (u8 *)&arb_regs,
2704 sizeof(SMU7_Discrete_MCArbDramTimingTable),
2705 pi->sram_end);
2706
2707 return ret;
2708}
2709
2710static int ci_program_memory_timing_parameters(struct amdgpu_device *adev)
2711{
2712 struct ci_power_info *pi = ci_get_pi(adev);
2713
2714 if (pi->need_update_smu7_dpm_table == 0)
2715 return 0;
2716
2717 return ci_do_program_memory_timing_parameters(adev);
2718}
2719
2720static void ci_populate_smc_initial_state(struct amdgpu_device *adev,
2721 struct amdgpu_ps *amdgpu_boot_state)
2722{
2723 struct ci_ps *boot_state = ci_get_ps(amdgpu_boot_state);
2724 struct ci_power_info *pi = ci_get_pi(adev);
2725 u32 level = 0;
2726
2727 for (level = 0; level < adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; level++) {
2728 if (adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[level].clk >=
2729 boot_state->performance_levels[0].sclk) {
2730 pi->smc_state_table.GraphicsBootLevel = level;
2731 break;
2732 }
2733 }
2734
2735 for (level = 0; level < adev->pm.dpm.dyn_state.vddc_dependency_on_mclk.count; level++) {
2736 if (adev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries[level].clk >=
2737 boot_state->performance_levels[0].mclk) {
2738 pi->smc_state_table.MemoryBootLevel = level;
2739 break;
2740 }
2741 }
2742}
2743
2744static u32 ci_get_dpm_level_enable_mask_value(struct ci_single_dpm_table *dpm_table)
2745{
2746 u32 i;
2747 u32 mask_value = 0;
2748
2749 for (i = dpm_table->count; i > 0; i--) {
2750 mask_value = mask_value << 1;
2751 if (dpm_table->dpm_levels[i-1].enabled)
2752 mask_value |= 0x1;
2753 else
2754 mask_value &= 0xFFFFFFFE;
2755 }
2756
2757 return mask_value;
2758}
2759
2760static void ci_populate_smc_link_level(struct amdgpu_device *adev,
2761 SMU7_Discrete_DpmTable *table)
2762{
2763 struct ci_power_info *pi = ci_get_pi(adev);
2764 struct ci_dpm_table *dpm_table = &pi->dpm_table;
2765 u32 i;
2766
2767 for (i = 0; i < dpm_table->pcie_speed_table.count; i++) {
2768 table->LinkLevel[i].PcieGenSpeed =
2769 (u8)dpm_table->pcie_speed_table.dpm_levels[i].value;
2770 table->LinkLevel[i].PcieLaneCount =
2771 amdgpu_encode_pci_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1);
2772 table->LinkLevel[i].EnabledForActivity = 1;
2773 table->LinkLevel[i].DownT = cpu_to_be32(5);
2774 table->LinkLevel[i].UpT = cpu_to_be32(30);
2775 }
2776
2777 pi->smc_state_table.LinkLevelCount = (u8)dpm_table->pcie_speed_table.count;
2778 pi->dpm_level_enable_mask.pcie_dpm_enable_mask =
2779 ci_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
2780}
2781
2782static int ci_populate_smc_uvd_level(struct amdgpu_device *adev,
2783 SMU7_Discrete_DpmTable *table)
2784{
2785 u32 count;
2786 struct atom_clock_dividers dividers;
2787 int ret = -EINVAL;
2788
2789 table->UvdLevelCount =
2790 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count;
2791
2792 for (count = 0; count < table->UvdLevelCount; count++) {
2793 table->UvdLevel[count].VclkFrequency =
2794 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].vclk;
2795 table->UvdLevel[count].DclkFrequency =
2796 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].dclk;
2797 table->UvdLevel[count].MinVddc =
2798 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2799 table->UvdLevel[count].MinVddcPhases = 1;
2800
2801 ret = amdgpu_atombios_get_clock_dividers(adev,
2802 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2803 table->UvdLevel[count].VclkFrequency, false, &dividers);
2804 if (ret)
2805 return ret;
2806
2807 table->UvdLevel[count].VclkDivider = (u8)dividers.post_divider;
2808
2809 ret = amdgpu_atombios_get_clock_dividers(adev,
2810 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2811 table->UvdLevel[count].DclkFrequency, false, &dividers);
2812 if (ret)
2813 return ret;
2814
2815 table->UvdLevel[count].DclkDivider = (u8)dividers.post_divider;
2816
2817 table->UvdLevel[count].VclkFrequency = cpu_to_be32(table->UvdLevel[count].VclkFrequency);
2818 table->UvdLevel[count].DclkFrequency = cpu_to_be32(table->UvdLevel[count].DclkFrequency);
2819 table->UvdLevel[count].MinVddc = cpu_to_be16(table->UvdLevel[count].MinVddc);
2820 }
2821
2822 return ret;
2823}
2824
2825static int ci_populate_smc_vce_level(struct amdgpu_device *adev,
2826 SMU7_Discrete_DpmTable *table)
2827{
2828 u32 count;
2829 struct atom_clock_dividers dividers;
2830 int ret = -EINVAL;
2831
2832 table->VceLevelCount =
2833 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count;
2834
2835 for (count = 0; count < table->VceLevelCount; count++) {
2836 table->VceLevel[count].Frequency =
2837 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].evclk;
2838 table->VceLevel[count].MinVoltage =
2839 (u16)adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2840 table->VceLevel[count].MinPhases = 1;
2841
2842 ret = amdgpu_atombios_get_clock_dividers(adev,
2843 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2844 table->VceLevel[count].Frequency, false, &dividers);
2845 if (ret)
2846 return ret;
2847
2848 table->VceLevel[count].Divider = (u8)dividers.post_divider;
2849
2850 table->VceLevel[count].Frequency = cpu_to_be32(table->VceLevel[count].Frequency);
2851 table->VceLevel[count].MinVoltage = cpu_to_be16(table->VceLevel[count].MinVoltage);
2852 }
2853
2854 return ret;
2855
2856}
2857
2858static int ci_populate_smc_acp_level(struct amdgpu_device *adev,
2859 SMU7_Discrete_DpmTable *table)
2860{
2861 u32 count;
2862 struct atom_clock_dividers dividers;
2863 int ret = -EINVAL;
2864
2865 table->AcpLevelCount = (u8)
2866 (adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count);
2867
2868 for (count = 0; count < table->AcpLevelCount; count++) {
2869 table->AcpLevel[count].Frequency =
2870 adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].clk;
2871 table->AcpLevel[count].MinVoltage =
2872 adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].v;
2873 table->AcpLevel[count].MinPhases = 1;
2874
2875 ret = amdgpu_atombios_get_clock_dividers(adev,
2876 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2877 table->AcpLevel[count].Frequency, false, &dividers);
2878 if (ret)
2879 return ret;
2880
2881 table->AcpLevel[count].Divider = (u8)dividers.post_divider;
2882
2883 table->AcpLevel[count].Frequency = cpu_to_be32(table->AcpLevel[count].Frequency);
2884 table->AcpLevel[count].MinVoltage = cpu_to_be16(table->AcpLevel[count].MinVoltage);
2885 }
2886
2887 return ret;
2888}
2889
2890static int ci_populate_smc_samu_level(struct amdgpu_device *adev,
2891 SMU7_Discrete_DpmTable *table)
2892{
2893 u32 count;
2894 struct atom_clock_dividers dividers;
2895 int ret = -EINVAL;
2896
2897 table->SamuLevelCount =
2898 adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count;
2899
2900 for (count = 0; count < table->SamuLevelCount; count++) {
2901 table->SamuLevel[count].Frequency =
2902 adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].clk;
2903 table->SamuLevel[count].MinVoltage =
2904 adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2905 table->SamuLevel[count].MinPhases = 1;
2906
2907 ret = amdgpu_atombios_get_clock_dividers(adev,
2908 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2909 table->SamuLevel[count].Frequency, false, &dividers);
2910 if (ret)
2911 return ret;
2912
2913 table->SamuLevel[count].Divider = (u8)dividers.post_divider;
2914
2915 table->SamuLevel[count].Frequency = cpu_to_be32(table->SamuLevel[count].Frequency);
2916 table->SamuLevel[count].MinVoltage = cpu_to_be16(table->SamuLevel[count].MinVoltage);
2917 }
2918
2919 return ret;
2920}
2921
2922static int ci_calculate_mclk_params(struct amdgpu_device *adev,
2923 u32 memory_clock,
2924 SMU7_Discrete_MemoryLevel *mclk,
2925 bool strobe_mode,
2926 bool dll_state_on)
2927{
2928 struct ci_power_info *pi = ci_get_pi(adev);
2929 u32 dll_cntl = pi->clock_registers.dll_cntl;
2930 u32 mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl;
2931 u32 mpll_ad_func_cntl = pi->clock_registers.mpll_ad_func_cntl;
2932 u32 mpll_dq_func_cntl = pi->clock_registers.mpll_dq_func_cntl;
2933 u32 mpll_func_cntl = pi->clock_registers.mpll_func_cntl;
2934 u32 mpll_func_cntl_1 = pi->clock_registers.mpll_func_cntl_1;
2935 u32 mpll_func_cntl_2 = pi->clock_registers.mpll_func_cntl_2;
2936 u32 mpll_ss1 = pi->clock_registers.mpll_ss1;
2937 u32 mpll_ss2 = pi->clock_registers.mpll_ss2;
2938 struct atom_mpll_param mpll_param;
2939 int ret;
2940
2941 ret = amdgpu_atombios_get_memory_pll_dividers(adev, memory_clock, strobe_mode, &mpll_param);
2942 if (ret)
2943 return ret;
2944
2945 mpll_func_cntl &= ~MPLL_FUNC_CNTL__BWCTRL_MASK;
2946 mpll_func_cntl |= (mpll_param.bwcntl << MPLL_FUNC_CNTL__BWCTRL__SHIFT);
2947
2948 mpll_func_cntl_1 &= ~(MPLL_FUNC_CNTL_1__CLKF_MASK | MPLL_FUNC_CNTL_1__CLKFRAC_MASK |
2949 MPLL_FUNC_CNTL_1__VCO_MODE_MASK);
2950 mpll_func_cntl_1 |= (mpll_param.clkf) << MPLL_FUNC_CNTL_1__CLKF__SHIFT |
2951 (mpll_param.clkfrac << MPLL_FUNC_CNTL_1__CLKFRAC__SHIFT) |
2952 (mpll_param.vco_mode << MPLL_FUNC_CNTL_1__VCO_MODE__SHIFT);
2953
2954 mpll_ad_func_cntl &= ~MPLL_AD_FUNC_CNTL__YCLK_POST_DIV_MASK;
2955 mpll_ad_func_cntl |= (mpll_param.post_div << MPLL_AD_FUNC_CNTL__YCLK_POST_DIV__SHIFT);
2956
Christian König770d13b2018-01-12 14:52:22 +01002957 if (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) {
Alex Deuchera2e73f52015-04-20 17:09:27 -04002958 mpll_dq_func_cntl &= ~(MPLL_DQ_FUNC_CNTL__YCLK_SEL_MASK |
2959 MPLL_AD_FUNC_CNTL__YCLK_POST_DIV_MASK);
2960 mpll_dq_func_cntl |= (mpll_param.yclk_sel << MPLL_DQ_FUNC_CNTL__YCLK_SEL__SHIFT) |
2961 (mpll_param.post_div << MPLL_AD_FUNC_CNTL__YCLK_POST_DIV__SHIFT);
2962 }
2963
2964 if (pi->caps_mclk_ss_support) {
2965 struct amdgpu_atom_ss ss;
2966 u32 freq_nom;
2967 u32 tmp;
2968 u32 reference_clock = adev->clock.mpll.reference_freq;
2969
2970 if (mpll_param.qdr == 1)
2971 freq_nom = memory_clock * 4 * (1 << mpll_param.post_div);
2972 else
2973 freq_nom = memory_clock * 2 * (1 << mpll_param.post_div);
2974
2975 tmp = (freq_nom / reference_clock);
2976 tmp = tmp * tmp;
2977 if (amdgpu_atombios_get_asic_ss_info(adev, &ss,
2978 ASIC_INTERNAL_MEMORY_SS, freq_nom)) {
2979 u32 clks = reference_clock * 5 / ss.rate;
2980 u32 clkv = (u32)((((131 * ss.percentage * ss.rate) / 100) * tmp) / freq_nom);
2981
2982 mpll_ss1 &= ~MPLL_SS1__CLKV_MASK;
2983 mpll_ss1 |= (clkv << MPLL_SS1__CLKV__SHIFT);
2984
2985 mpll_ss2 &= ~MPLL_SS2__CLKS_MASK;
2986 mpll_ss2 |= (clks << MPLL_SS2__CLKS__SHIFT);
2987 }
2988 }
2989
2990 mclk_pwrmgt_cntl &= ~MCLK_PWRMGT_CNTL__DLL_SPEED_MASK;
2991 mclk_pwrmgt_cntl |= (mpll_param.dll_speed << MCLK_PWRMGT_CNTL__DLL_SPEED__SHIFT);
2992
2993 if (dll_state_on)
2994 mclk_pwrmgt_cntl |= MCLK_PWRMGT_CNTL__MRDCK0_PDNB_MASK |
2995 MCLK_PWRMGT_CNTL__MRDCK1_PDNB_MASK;
2996 else
2997 mclk_pwrmgt_cntl &= ~(MCLK_PWRMGT_CNTL__MRDCK0_PDNB_MASK |
2998 MCLK_PWRMGT_CNTL__MRDCK1_PDNB_MASK);
2999
3000 mclk->MclkFrequency = memory_clock;
3001 mclk->MpllFuncCntl = mpll_func_cntl;
3002 mclk->MpllFuncCntl_1 = mpll_func_cntl_1;
3003 mclk->MpllFuncCntl_2 = mpll_func_cntl_2;
3004 mclk->MpllAdFuncCntl = mpll_ad_func_cntl;
3005 mclk->MpllDqFuncCntl = mpll_dq_func_cntl;
3006 mclk->MclkPwrmgtCntl = mclk_pwrmgt_cntl;
3007 mclk->DllCntl = dll_cntl;
3008 mclk->MpllSs1 = mpll_ss1;
3009 mclk->MpllSs2 = mpll_ss2;
3010
3011 return 0;
3012}
3013
3014static int ci_populate_single_memory_level(struct amdgpu_device *adev,
3015 u32 memory_clock,
3016 SMU7_Discrete_MemoryLevel *memory_level)
3017{
3018 struct ci_power_info *pi = ci_get_pi(adev);
3019 int ret;
3020 bool dll_state_on;
3021
3022 if (adev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries) {
3023 ret = ci_get_dependency_volt_by_clk(adev,
3024 &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
3025 memory_clock, &memory_level->MinVddc);
3026 if (ret)
3027 return ret;
3028 }
3029
3030 if (adev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries) {
3031 ret = ci_get_dependency_volt_by_clk(adev,
3032 &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
3033 memory_clock, &memory_level->MinVddci);
3034 if (ret)
3035 return ret;
3036 }
3037
3038 if (adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries) {
3039 ret = ci_get_dependency_volt_by_clk(adev,
3040 &adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
3041 memory_clock, &memory_level->MinMvdd);
3042 if (ret)
3043 return ret;
3044 }
3045
3046 memory_level->MinVddcPhases = 1;
3047
3048 if (pi->vddc_phase_shed_control)
3049 ci_populate_phase_value_based_on_mclk(adev,
3050 &adev->pm.dpm.dyn_state.phase_shedding_limits_table,
3051 memory_clock,
3052 &memory_level->MinVddcPhases);
3053
Rex Zhu7ef69842017-04-18 19:21:44 +08003054 memory_level->EnabledForActivity = 1;
Alex Deuchera2e73f52015-04-20 17:09:27 -04003055 memory_level->EnabledForThrottle = 1;
Alex Deuchera2e73f52015-04-20 17:09:27 -04003056 memory_level->UpH = 0;
3057 memory_level->DownH = 100;
3058 memory_level->VoltageDownH = 0;
3059 memory_level->ActivityLevel = (u16)pi->mclk_activity_target;
3060
3061 memory_level->StutterEnable = false;
3062 memory_level->StrobeEnable = false;
3063 memory_level->EdcReadEnable = false;
3064 memory_level->EdcWriteEnable = false;
3065 memory_level->RttEnable = false;
3066
3067 memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
3068
3069 if (pi->mclk_stutter_mode_threshold &&
3070 (memory_clock <= pi->mclk_stutter_mode_threshold) &&
Edward O'Callaghan004e29c2016-07-12 10:17:53 +10003071 (!pi->uvd_enabled) &&
Alex Deuchera2e73f52015-04-20 17:09:27 -04003072 (RREG32(mmDPG_PIPE_STUTTER_CONTROL) & DPG_PIPE_STUTTER_CONTROL__STUTTER_ENABLE_MASK) &&
3073 (adev->pm.dpm.new_active_crtc_count <= 2))
3074 memory_level->StutterEnable = true;
3075
3076 if (pi->mclk_strobe_mode_threshold &&
3077 (memory_clock <= pi->mclk_strobe_mode_threshold))
3078 memory_level->StrobeEnable = 1;
3079
Christian König770d13b2018-01-12 14:52:22 +01003080 if (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) {
Alex Deuchera2e73f52015-04-20 17:09:27 -04003081 memory_level->StrobeRatio =
3082 ci_get_mclk_frequency_ratio(memory_clock, memory_level->StrobeEnable);
3083 if (pi->mclk_edc_enable_threshold &&
3084 (memory_clock > pi->mclk_edc_enable_threshold))
3085 memory_level->EdcReadEnable = true;
3086
3087 if (pi->mclk_edc_wr_enable_threshold &&
3088 (memory_clock > pi->mclk_edc_wr_enable_threshold))
3089 memory_level->EdcWriteEnable = true;
3090
3091 if (memory_level->StrobeEnable) {
3092 if (ci_get_mclk_frequency_ratio(memory_clock, true) >=
3093 ((RREG32(mmMC_SEQ_MISC7) >> 16) & 0xf))
3094 dll_state_on = ((RREG32(mmMC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
3095 else
3096 dll_state_on = ((RREG32(mmMC_SEQ_MISC6) >> 1) & 0x1) ? true : false;
3097 } else {
3098 dll_state_on = pi->dll_default_on;
3099 }
3100 } else {
3101 memory_level->StrobeRatio = ci_get_ddr3_mclk_frequency_ratio(memory_clock);
3102 dll_state_on = ((RREG32(mmMC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
3103 }
3104
3105 ret = ci_calculate_mclk_params(adev, memory_clock, memory_level, memory_level->StrobeEnable, dll_state_on);
3106 if (ret)
3107 return ret;
3108
3109 memory_level->MinVddc = cpu_to_be32(memory_level->MinVddc * VOLTAGE_SCALE);
3110 memory_level->MinVddcPhases = cpu_to_be32(memory_level->MinVddcPhases);
3111 memory_level->MinVddci = cpu_to_be32(memory_level->MinVddci * VOLTAGE_SCALE);
3112 memory_level->MinMvdd = cpu_to_be32(memory_level->MinMvdd * VOLTAGE_SCALE);
3113
3114 memory_level->MclkFrequency = cpu_to_be32(memory_level->MclkFrequency);
3115 memory_level->ActivityLevel = cpu_to_be16(memory_level->ActivityLevel);
3116 memory_level->MpllFuncCntl = cpu_to_be32(memory_level->MpllFuncCntl);
3117 memory_level->MpllFuncCntl_1 = cpu_to_be32(memory_level->MpllFuncCntl_1);
3118 memory_level->MpllFuncCntl_2 = cpu_to_be32(memory_level->MpllFuncCntl_2);
3119 memory_level->MpllAdFuncCntl = cpu_to_be32(memory_level->MpllAdFuncCntl);
3120 memory_level->MpllDqFuncCntl = cpu_to_be32(memory_level->MpllDqFuncCntl);
3121 memory_level->MclkPwrmgtCntl = cpu_to_be32(memory_level->MclkPwrmgtCntl);
3122 memory_level->DllCntl = cpu_to_be32(memory_level->DllCntl);
3123 memory_level->MpllSs1 = cpu_to_be32(memory_level->MpllSs1);
3124 memory_level->MpllSs2 = cpu_to_be32(memory_level->MpllSs2);
3125
3126 return 0;
3127}
3128
3129static int ci_populate_smc_acpi_level(struct amdgpu_device *adev,
3130 SMU7_Discrete_DpmTable *table)
3131{
3132 struct ci_power_info *pi = ci_get_pi(adev);
3133 struct atom_clock_dividers dividers;
3134 SMU7_Discrete_VoltageLevel voltage_level;
3135 u32 spll_func_cntl = pi->clock_registers.cg_spll_func_cntl;
3136 u32 spll_func_cntl_2 = pi->clock_registers.cg_spll_func_cntl_2;
3137 u32 dll_cntl = pi->clock_registers.dll_cntl;
3138 u32 mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl;
3139 int ret;
3140
3141 table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
3142
3143 if (pi->acpi_vddc)
3144 table->ACPILevel.MinVddc = cpu_to_be32(pi->acpi_vddc * VOLTAGE_SCALE);
3145 else
3146 table->ACPILevel.MinVddc = cpu_to_be32(pi->min_vddc_in_pp_table * VOLTAGE_SCALE);
3147
3148 table->ACPILevel.MinVddcPhases = pi->vddc_phase_shed_control ? 0 : 1;
3149
3150 table->ACPILevel.SclkFrequency = adev->clock.spll.reference_freq;
3151
3152 ret = amdgpu_atombios_get_clock_dividers(adev,
3153 COMPUTE_GPUCLK_INPUT_FLAG_SCLK,
3154 table->ACPILevel.SclkFrequency, false, &dividers);
3155 if (ret)
3156 return ret;
3157
3158 table->ACPILevel.SclkDid = (u8)dividers.post_divider;
3159 table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
3160 table->ACPILevel.DeepSleepDivId = 0;
3161
3162 spll_func_cntl &= ~CG_SPLL_FUNC_CNTL__SPLL_PWRON_MASK;
3163 spll_func_cntl |= CG_SPLL_FUNC_CNTL__SPLL_RESET_MASK;
3164
3165 spll_func_cntl_2 &= ~CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL_MASK;
3166 spll_func_cntl_2 |= (4 << CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL__SHIFT);
3167
3168 table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
3169 table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
3170 table->ACPILevel.CgSpllFuncCntl3 = pi->clock_registers.cg_spll_func_cntl_3;
3171 table->ACPILevel.CgSpllFuncCntl4 = pi->clock_registers.cg_spll_func_cntl_4;
3172 table->ACPILevel.SpllSpreadSpectrum = pi->clock_registers.cg_spll_spread_spectrum;
3173 table->ACPILevel.SpllSpreadSpectrum2 = pi->clock_registers.cg_spll_spread_spectrum_2;
3174 table->ACPILevel.CcPwrDynRm = 0;
3175 table->ACPILevel.CcPwrDynRm1 = 0;
3176
3177 table->ACPILevel.Flags = cpu_to_be32(table->ACPILevel.Flags);
3178 table->ACPILevel.MinVddcPhases = cpu_to_be32(table->ACPILevel.MinVddcPhases);
3179 table->ACPILevel.SclkFrequency = cpu_to_be32(table->ACPILevel.SclkFrequency);
3180 table->ACPILevel.CgSpllFuncCntl = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl);
3181 table->ACPILevel.CgSpllFuncCntl2 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl2);
3182 table->ACPILevel.CgSpllFuncCntl3 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl3);
3183 table->ACPILevel.CgSpllFuncCntl4 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl4);
3184 table->ACPILevel.SpllSpreadSpectrum = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum);
3185 table->ACPILevel.SpllSpreadSpectrum2 = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum2);
3186 table->ACPILevel.CcPwrDynRm = cpu_to_be32(table->ACPILevel.CcPwrDynRm);
3187 table->ACPILevel.CcPwrDynRm1 = cpu_to_be32(table->ACPILevel.CcPwrDynRm1);
3188
3189 table->MemoryACPILevel.MinVddc = table->ACPILevel.MinVddc;
3190 table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;
3191
3192 if (pi->vddci_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
3193 if (pi->acpi_vddci)
3194 table->MemoryACPILevel.MinVddci =
3195 cpu_to_be32(pi->acpi_vddci * VOLTAGE_SCALE);
3196 else
3197 table->MemoryACPILevel.MinVddci =
3198 cpu_to_be32(pi->min_vddci_in_pp_table * VOLTAGE_SCALE);
3199 }
3200
3201 if (ci_populate_mvdd_value(adev, 0, &voltage_level))
3202 table->MemoryACPILevel.MinMvdd = 0;
3203 else
3204 table->MemoryACPILevel.MinMvdd =
3205 cpu_to_be32(voltage_level.Voltage * VOLTAGE_SCALE);
3206
3207 mclk_pwrmgt_cntl |= MCLK_PWRMGT_CNTL__MRDCK0_RESET_MASK |
3208 MCLK_PWRMGT_CNTL__MRDCK1_RESET_MASK;
3209 mclk_pwrmgt_cntl &= ~(MCLK_PWRMGT_CNTL__MRDCK0_PDNB_MASK |
3210 MCLK_PWRMGT_CNTL__MRDCK1_PDNB_MASK);
3211
3212 dll_cntl &= ~(DLL_CNTL__MRDCK0_BYPASS_MASK | DLL_CNTL__MRDCK1_BYPASS_MASK);
3213
3214 table->MemoryACPILevel.DllCntl = cpu_to_be32(dll_cntl);
3215 table->MemoryACPILevel.MclkPwrmgtCntl = cpu_to_be32(mclk_pwrmgt_cntl);
3216 table->MemoryACPILevel.MpllAdFuncCntl =
3217 cpu_to_be32(pi->clock_registers.mpll_ad_func_cntl);
3218 table->MemoryACPILevel.MpllDqFuncCntl =
3219 cpu_to_be32(pi->clock_registers.mpll_dq_func_cntl);
3220 table->MemoryACPILevel.MpllFuncCntl =
3221 cpu_to_be32(pi->clock_registers.mpll_func_cntl);
3222 table->MemoryACPILevel.MpllFuncCntl_1 =
3223 cpu_to_be32(pi->clock_registers.mpll_func_cntl_1);
3224 table->MemoryACPILevel.MpllFuncCntl_2 =
3225 cpu_to_be32(pi->clock_registers.mpll_func_cntl_2);
3226 table->MemoryACPILevel.MpllSs1 = cpu_to_be32(pi->clock_registers.mpll_ss1);
3227 table->MemoryACPILevel.MpllSs2 = cpu_to_be32(pi->clock_registers.mpll_ss2);
3228
3229 table->MemoryACPILevel.EnabledForThrottle = 0;
3230 table->MemoryACPILevel.EnabledForActivity = 0;
3231 table->MemoryACPILevel.UpH = 0;
3232 table->MemoryACPILevel.DownH = 100;
3233 table->MemoryACPILevel.VoltageDownH = 0;
3234 table->MemoryACPILevel.ActivityLevel =
3235 cpu_to_be16((u16)pi->mclk_activity_target);
3236
3237 table->MemoryACPILevel.StutterEnable = false;
3238 table->MemoryACPILevel.StrobeEnable = false;
3239 table->MemoryACPILevel.EdcReadEnable = false;
3240 table->MemoryACPILevel.EdcWriteEnable = false;
3241 table->MemoryACPILevel.RttEnable = false;
3242
3243 return 0;
3244}
3245
3246
3247static int ci_enable_ulv(struct amdgpu_device *adev, bool enable)
3248{
3249 struct ci_power_info *pi = ci_get_pi(adev);
3250 struct ci_ulv_parm *ulv = &pi->ulv;
3251
3252 if (ulv->supported) {
3253 if (enable)
3254 return (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_EnableULV) == PPSMC_Result_OK) ?
3255 0 : -EINVAL;
3256 else
3257 return (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DisableULV) == PPSMC_Result_OK) ?
3258 0 : -EINVAL;
3259 }
3260
3261 return 0;
3262}
3263
3264static int ci_populate_ulv_level(struct amdgpu_device *adev,
3265 SMU7_Discrete_Ulv *state)
3266{
3267 struct ci_power_info *pi = ci_get_pi(adev);
3268 u16 ulv_voltage = adev->pm.dpm.backbias_response_time;
3269
3270 state->CcPwrDynRm = 0;
3271 state->CcPwrDynRm1 = 0;
3272
3273 if (ulv_voltage == 0) {
3274 pi->ulv.supported = false;
3275 return 0;
3276 }
3277
3278 if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
3279 if (ulv_voltage > adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v)
3280 state->VddcOffset = 0;
3281 else
3282 state->VddcOffset =
3283 adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage;
3284 } else {
3285 if (ulv_voltage > adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v)
3286 state->VddcOffsetVid = 0;
3287 else
3288 state->VddcOffsetVid = (u8)
3289 ((adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage) *
3290 VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
3291 }
3292 state->VddcPhase = pi->vddc_phase_shed_control ? 0 : 1;
3293
3294 state->CcPwrDynRm = cpu_to_be32(state->CcPwrDynRm);
3295 state->CcPwrDynRm1 = cpu_to_be32(state->CcPwrDynRm1);
3296 state->VddcOffset = cpu_to_be16(state->VddcOffset);
3297
3298 return 0;
3299}
3300
3301static int ci_calculate_sclk_params(struct amdgpu_device *adev,
3302 u32 engine_clock,
3303 SMU7_Discrete_GraphicsLevel *sclk)
3304{
3305 struct ci_power_info *pi = ci_get_pi(adev);
3306 struct atom_clock_dividers dividers;
3307 u32 spll_func_cntl_3 = pi->clock_registers.cg_spll_func_cntl_3;
3308 u32 spll_func_cntl_4 = pi->clock_registers.cg_spll_func_cntl_4;
3309 u32 cg_spll_spread_spectrum = pi->clock_registers.cg_spll_spread_spectrum;
3310 u32 cg_spll_spread_spectrum_2 = pi->clock_registers.cg_spll_spread_spectrum_2;
3311 u32 reference_clock = adev->clock.spll.reference_freq;
3312 u32 reference_divider;
3313 u32 fbdiv;
3314 int ret;
3315
3316 ret = amdgpu_atombios_get_clock_dividers(adev,
3317 COMPUTE_GPUCLK_INPUT_FLAG_SCLK,
3318 engine_clock, false, &dividers);
3319 if (ret)
3320 return ret;
3321
3322 reference_divider = 1 + dividers.ref_div;
3323 fbdiv = dividers.fb_div & 0x3FFFFFF;
3324
3325 spll_func_cntl_3 &= ~CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV_MASK;
3326 spll_func_cntl_3 |= (fbdiv << CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV__SHIFT);
3327 spll_func_cntl_3 |= CG_SPLL_FUNC_CNTL_3__SPLL_DITHEN_MASK;
3328
3329 if (pi->caps_sclk_ss_support) {
3330 struct amdgpu_atom_ss ss;
3331 u32 vco_freq = engine_clock * dividers.post_div;
3332
3333 if (amdgpu_atombios_get_asic_ss_info(adev, &ss,
3334 ASIC_INTERNAL_ENGINE_SS, vco_freq)) {
3335 u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate);
3336 u32 clk_v = 4 * ss.percentage * fbdiv / (clk_s * 10000);
3337
3338 cg_spll_spread_spectrum &= ~(CG_SPLL_SPREAD_SPECTRUM__CLKS_MASK | CG_SPLL_SPREAD_SPECTRUM__SSEN_MASK);
3339 cg_spll_spread_spectrum |= (clk_s << CG_SPLL_SPREAD_SPECTRUM__CLKS__SHIFT);
3340 cg_spll_spread_spectrum |= (1 << CG_SPLL_SPREAD_SPECTRUM__SSEN__SHIFT);
3341
3342 cg_spll_spread_spectrum_2 &= ~CG_SPLL_SPREAD_SPECTRUM_2__CLKV_MASK;
3343 cg_spll_spread_spectrum_2 |= (clk_v << CG_SPLL_SPREAD_SPECTRUM_2__CLKV__SHIFT);
3344 }
3345 }
3346
3347 sclk->SclkFrequency = engine_clock;
3348 sclk->CgSpllFuncCntl3 = spll_func_cntl_3;
3349 sclk->CgSpllFuncCntl4 = spll_func_cntl_4;
3350 sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum;
3351 sclk->SpllSpreadSpectrum2 = cg_spll_spread_spectrum_2;
3352 sclk->SclkDid = (u8)dividers.post_divider;
3353
3354 return 0;
3355}
3356
3357static int ci_populate_single_graphic_level(struct amdgpu_device *adev,
3358 u32 engine_clock,
3359 u16 sclk_activity_level_t,
3360 SMU7_Discrete_GraphicsLevel *graphic_level)
3361{
3362 struct ci_power_info *pi = ci_get_pi(adev);
3363 int ret;
3364
3365 ret = ci_calculate_sclk_params(adev, engine_clock, graphic_level);
3366 if (ret)
3367 return ret;
3368
3369 ret = ci_get_dependency_volt_by_clk(adev,
3370 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
3371 engine_clock, &graphic_level->MinVddc);
3372 if (ret)
3373 return ret;
3374
3375 graphic_level->SclkFrequency = engine_clock;
3376
3377 graphic_level->Flags = 0;
3378 graphic_level->MinVddcPhases = 1;
3379
3380 if (pi->vddc_phase_shed_control)
3381 ci_populate_phase_value_based_on_sclk(adev,
3382 &adev->pm.dpm.dyn_state.phase_shedding_limits_table,
3383 engine_clock,
3384 &graphic_level->MinVddcPhases);
3385
3386 graphic_level->ActivityLevel = sclk_activity_level_t;
3387
3388 graphic_level->CcPwrDynRm = 0;
3389 graphic_level->CcPwrDynRm1 = 0;
3390 graphic_level->EnabledForThrottle = 1;
3391 graphic_level->UpH = 0;
3392 graphic_level->DownH = 0;
3393 graphic_level->VoltageDownH = 0;
3394 graphic_level->PowerThrottle = 0;
3395
3396 if (pi->caps_sclk_ds)
Nils Wallménius438498a2016-05-05 09:07:48 +02003397 graphic_level->DeepSleepDivId = ci_get_sleep_divider_id_from_clock(engine_clock,
Alex Deuchera2e73f52015-04-20 17:09:27 -04003398 CISLAND_MINIMUM_ENGINE_CLOCK);
3399
3400 graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
3401
3402 graphic_level->Flags = cpu_to_be32(graphic_level->Flags);
3403 graphic_level->MinVddc = cpu_to_be32(graphic_level->MinVddc * VOLTAGE_SCALE);
3404 graphic_level->MinVddcPhases = cpu_to_be32(graphic_level->MinVddcPhases);
3405 graphic_level->SclkFrequency = cpu_to_be32(graphic_level->SclkFrequency);
3406 graphic_level->ActivityLevel = cpu_to_be16(graphic_level->ActivityLevel);
3407 graphic_level->CgSpllFuncCntl3 = cpu_to_be32(graphic_level->CgSpllFuncCntl3);
3408 graphic_level->CgSpllFuncCntl4 = cpu_to_be32(graphic_level->CgSpllFuncCntl4);
3409 graphic_level->SpllSpreadSpectrum = cpu_to_be32(graphic_level->SpllSpreadSpectrum);
3410 graphic_level->SpllSpreadSpectrum2 = cpu_to_be32(graphic_level->SpllSpreadSpectrum2);
3411 graphic_level->CcPwrDynRm = cpu_to_be32(graphic_level->CcPwrDynRm);
3412 graphic_level->CcPwrDynRm1 = cpu_to_be32(graphic_level->CcPwrDynRm1);
Alex Deuchera2e73f52015-04-20 17:09:27 -04003413
3414 return 0;
3415}
3416
3417static int ci_populate_all_graphic_levels(struct amdgpu_device *adev)
3418{
3419 struct ci_power_info *pi = ci_get_pi(adev);
3420 struct ci_dpm_table *dpm_table = &pi->dpm_table;
3421 u32 level_array_address = pi->dpm_table_start +
3422 offsetof(SMU7_Discrete_DpmTable, GraphicsLevel);
3423 u32 level_array_size = sizeof(SMU7_Discrete_GraphicsLevel) *
3424 SMU7_MAX_LEVELS_GRAPHICS;
3425 SMU7_Discrete_GraphicsLevel *levels = pi->smc_state_table.GraphicsLevel;
3426 u32 i, ret;
3427
3428 memset(levels, 0, level_array_size);
3429
3430 for (i = 0; i < dpm_table->sclk_table.count; i++) {
3431 ret = ci_populate_single_graphic_level(adev,
3432 dpm_table->sclk_table.dpm_levels[i].value,
3433 (u16)pi->activity_target[i],
3434 &pi->smc_state_table.GraphicsLevel[i]);
3435 if (ret)
3436 return ret;
3437 if (i > 1)
3438 pi->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0;
3439 if (i == (dpm_table->sclk_table.count - 1))
3440 pi->smc_state_table.GraphicsLevel[i].DisplayWatermark =
3441 PPSMC_DISPLAY_WATERMARK_HIGH;
3442 }
Alex Deucher4223cc3d2016-03-03 12:27:46 -05003443 pi->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
Alex Deuchera2e73f52015-04-20 17:09:27 -04003444
3445 pi->smc_state_table.GraphicsDpmLevelCount = (u8)dpm_table->sclk_table.count;
3446 pi->dpm_level_enable_mask.sclk_dpm_enable_mask =
3447 ci_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
3448
3449 ret = amdgpu_ci_copy_bytes_to_smc(adev, level_array_address,
3450 (u8 *)levels, level_array_size,
3451 pi->sram_end);
3452 if (ret)
3453 return ret;
3454
3455 return 0;
3456}
3457
3458static int ci_populate_ulv_state(struct amdgpu_device *adev,
3459 SMU7_Discrete_Ulv *ulv_level)
3460{
3461 return ci_populate_ulv_level(adev, ulv_level);
3462}
3463
3464static int ci_populate_all_memory_levels(struct amdgpu_device *adev)
3465{
3466 struct ci_power_info *pi = ci_get_pi(adev);
3467 struct ci_dpm_table *dpm_table = &pi->dpm_table;
3468 u32 level_array_address = pi->dpm_table_start +
3469 offsetof(SMU7_Discrete_DpmTable, MemoryLevel);
3470 u32 level_array_size = sizeof(SMU7_Discrete_MemoryLevel) *
3471 SMU7_MAX_LEVELS_MEMORY;
3472 SMU7_Discrete_MemoryLevel *levels = pi->smc_state_table.MemoryLevel;
3473 u32 i, ret;
3474
3475 memset(levels, 0, level_array_size);
3476
3477 for (i = 0; i < dpm_table->mclk_table.count; i++) {
3478 if (dpm_table->mclk_table.dpm_levels[i].value == 0)
3479 return -EINVAL;
3480 ret = ci_populate_single_memory_level(adev,
3481 dpm_table->mclk_table.dpm_levels[i].value,
3482 &pi->smc_state_table.MemoryLevel[i]);
3483 if (ret)
3484 return ret;
3485 }
3486
3487 if ((dpm_table->mclk_table.count >= 2) &&
3488 ((adev->pdev->device == 0x67B0) || (adev->pdev->device == 0x67B1))) {
3489 pi->smc_state_table.MemoryLevel[1].MinVddc =
3490 pi->smc_state_table.MemoryLevel[0].MinVddc;
3491 pi->smc_state_table.MemoryLevel[1].MinVddcPhases =
3492 pi->smc_state_table.MemoryLevel[0].MinVddcPhases;
3493 }
3494
3495 pi->smc_state_table.MemoryLevel[0].ActivityLevel = cpu_to_be16(0x1F);
3496
3497 pi->smc_state_table.MemoryDpmLevelCount = (u8)dpm_table->mclk_table.count;
3498 pi->dpm_level_enable_mask.mclk_dpm_enable_mask =
3499 ci_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
3500
3501 pi->smc_state_table.MemoryLevel[dpm_table->mclk_table.count - 1].DisplayWatermark =
3502 PPSMC_DISPLAY_WATERMARK_HIGH;
3503
3504 ret = amdgpu_ci_copy_bytes_to_smc(adev, level_array_address,
3505 (u8 *)levels, level_array_size,
3506 pi->sram_end);
3507 if (ret)
3508 return ret;
3509
3510 return 0;
3511}
3512
3513static void ci_reset_single_dpm_table(struct amdgpu_device *adev,
3514 struct ci_single_dpm_table* dpm_table,
3515 u32 count)
3516{
3517 u32 i;
3518
3519 dpm_table->count = count;
3520 for (i = 0; i < MAX_REGULAR_DPM_NUMBER; i++)
3521 dpm_table->dpm_levels[i].enabled = false;
3522}
3523
3524static void ci_setup_pcie_table_entry(struct ci_single_dpm_table* dpm_table,
3525 u32 index, u32 pcie_gen, u32 pcie_lanes)
3526{
3527 dpm_table->dpm_levels[index].value = pcie_gen;
3528 dpm_table->dpm_levels[index].param1 = pcie_lanes;
3529 dpm_table->dpm_levels[index].enabled = true;
3530}
3531
3532static int ci_setup_default_pcie_tables(struct amdgpu_device *adev)
3533{
3534 struct ci_power_info *pi = ci_get_pi(adev);
3535
3536 if (!pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels)
3537 return -EINVAL;
3538
3539 if (pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels) {
3540 pi->pcie_gen_powersaving = pi->pcie_gen_performance;
3541 pi->pcie_lane_powersaving = pi->pcie_lane_performance;
3542 } else if (!pi->use_pcie_performance_levels && pi->use_pcie_powersaving_levels) {
3543 pi->pcie_gen_performance = pi->pcie_gen_powersaving;
3544 pi->pcie_lane_performance = pi->pcie_lane_powersaving;
3545 }
3546
3547 ci_reset_single_dpm_table(adev,
3548 &pi->dpm_table.pcie_speed_table,
3549 SMU7_MAX_LEVELS_LINK);
3550
3551 if (adev->asic_type == CHIP_BONAIRE)
3552 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0,
3553 pi->pcie_gen_powersaving.min,
3554 pi->pcie_lane_powersaving.max);
3555 else
3556 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0,
3557 pi->pcie_gen_powersaving.min,
3558 pi->pcie_lane_powersaving.min);
3559 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 1,
3560 pi->pcie_gen_performance.min,
3561 pi->pcie_lane_performance.min);
3562 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 2,
3563 pi->pcie_gen_powersaving.min,
3564 pi->pcie_lane_powersaving.max);
3565 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 3,
3566 pi->pcie_gen_performance.min,
3567 pi->pcie_lane_performance.max);
3568 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 4,
3569 pi->pcie_gen_powersaving.max,
3570 pi->pcie_lane_powersaving.max);
3571 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 5,
3572 pi->pcie_gen_performance.max,
3573 pi->pcie_lane_performance.max);
3574
3575 pi->dpm_table.pcie_speed_table.count = 6;
3576
3577 return 0;
3578}
3579
3580static int ci_setup_default_dpm_tables(struct amdgpu_device *adev)
3581{
3582 struct ci_power_info *pi = ci_get_pi(adev);
3583 struct amdgpu_clock_voltage_dependency_table *allowed_sclk_vddc_table =
3584 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
3585 struct amdgpu_clock_voltage_dependency_table *allowed_mclk_table =
3586 &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk;
3587 struct amdgpu_cac_leakage_table *std_voltage_table =
3588 &adev->pm.dpm.dyn_state.cac_leakage_table;
3589 u32 i;
3590
3591 if (allowed_sclk_vddc_table == NULL)
3592 return -EINVAL;
3593 if (allowed_sclk_vddc_table->count < 1)
3594 return -EINVAL;
3595 if (allowed_mclk_table == NULL)
3596 return -EINVAL;
3597 if (allowed_mclk_table->count < 1)
3598 return -EINVAL;
3599
3600 memset(&pi->dpm_table, 0, sizeof(struct ci_dpm_table));
3601
3602 ci_reset_single_dpm_table(adev,
3603 &pi->dpm_table.sclk_table,
3604 SMU7_MAX_LEVELS_GRAPHICS);
3605 ci_reset_single_dpm_table(adev,
3606 &pi->dpm_table.mclk_table,
3607 SMU7_MAX_LEVELS_MEMORY);
3608 ci_reset_single_dpm_table(adev,
3609 &pi->dpm_table.vddc_table,
3610 SMU7_MAX_LEVELS_VDDC);
3611 ci_reset_single_dpm_table(adev,
3612 &pi->dpm_table.vddci_table,
3613 SMU7_MAX_LEVELS_VDDCI);
3614 ci_reset_single_dpm_table(adev,
3615 &pi->dpm_table.mvdd_table,
3616 SMU7_MAX_LEVELS_MVDD);
3617
3618 pi->dpm_table.sclk_table.count = 0;
3619 for (i = 0; i < allowed_sclk_vddc_table->count; i++) {
3620 if ((i == 0) ||
3621 (pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count-1].value !=
3622 allowed_sclk_vddc_table->entries[i].clk)) {
3623 pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].value =
3624 allowed_sclk_vddc_table->entries[i].clk;
3625 pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].enabled =
3626 (i == 0) ? true : false;
3627 pi->dpm_table.sclk_table.count++;
3628 }
3629 }
3630
3631 pi->dpm_table.mclk_table.count = 0;
3632 for (i = 0; i < allowed_mclk_table->count; i++) {
3633 if ((i == 0) ||
3634 (pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count-1].value !=
3635 allowed_mclk_table->entries[i].clk)) {
3636 pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].value =
3637 allowed_mclk_table->entries[i].clk;
3638 pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].enabled =
3639 (i == 0) ? true : false;
3640 pi->dpm_table.mclk_table.count++;
3641 }
3642 }
3643
3644 for (i = 0; i < allowed_sclk_vddc_table->count; i++) {
3645 pi->dpm_table.vddc_table.dpm_levels[i].value =
3646 allowed_sclk_vddc_table->entries[i].v;
3647 pi->dpm_table.vddc_table.dpm_levels[i].param1 =
3648 std_voltage_table->entries[i].leakage;
3649 pi->dpm_table.vddc_table.dpm_levels[i].enabled = true;
3650 }
3651 pi->dpm_table.vddc_table.count = allowed_sclk_vddc_table->count;
3652
3653 allowed_mclk_table = &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk;
3654 if (allowed_mclk_table) {
3655 for (i = 0; i < allowed_mclk_table->count; i++) {
3656 pi->dpm_table.vddci_table.dpm_levels[i].value =
3657 allowed_mclk_table->entries[i].v;
3658 pi->dpm_table.vddci_table.dpm_levels[i].enabled = true;
3659 }
3660 pi->dpm_table.vddci_table.count = allowed_mclk_table->count;
3661 }
3662
3663 allowed_mclk_table = &adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk;
3664 if (allowed_mclk_table) {
3665 for (i = 0; i < allowed_mclk_table->count; i++) {
3666 pi->dpm_table.mvdd_table.dpm_levels[i].value =
3667 allowed_mclk_table->entries[i].v;
3668 pi->dpm_table.mvdd_table.dpm_levels[i].enabled = true;
3669 }
3670 pi->dpm_table.mvdd_table.count = allowed_mclk_table->count;
3671 }
3672
3673 ci_setup_default_pcie_tables(adev);
3674
Eric Huang3cc25912016-05-19 15:54:35 -04003675 /* save a copy of the default DPM table */
3676 memcpy(&(pi->golden_dpm_table), &(pi->dpm_table),
3677 sizeof(struct ci_dpm_table));
3678
Alex Deuchera2e73f52015-04-20 17:09:27 -04003679 return 0;
3680}
3681
3682static int ci_find_boot_level(struct ci_single_dpm_table *table,
3683 u32 value, u32 *boot_level)
3684{
3685 u32 i;
3686 int ret = -EINVAL;
3687
3688 for(i = 0; i < table->count; i++) {
3689 if (value == table->dpm_levels[i].value) {
3690 *boot_level = i;
3691 ret = 0;
3692 }
3693 }
3694
3695 return ret;
3696}
3697
3698static int ci_init_smc_table(struct amdgpu_device *adev)
3699{
3700 struct ci_power_info *pi = ci_get_pi(adev);
3701 struct ci_ulv_parm *ulv = &pi->ulv;
3702 struct amdgpu_ps *amdgpu_boot_state = adev->pm.dpm.boot_ps;
3703 SMU7_Discrete_DpmTable *table = &pi->smc_state_table;
3704 int ret;
3705
3706 ret = ci_setup_default_dpm_tables(adev);
3707 if (ret)
3708 return ret;
3709
3710 if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE)
3711 ci_populate_smc_voltage_tables(adev, table);
3712
3713 ci_init_fps_limits(adev);
3714
3715 if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_HARDWAREDC)
3716 table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
3717
3718 if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC)
3719 table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
3720
Christian König770d13b2018-01-12 14:52:22 +01003721 if (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5)
Alex Deuchera2e73f52015-04-20 17:09:27 -04003722 table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
3723
3724 if (ulv->supported) {
3725 ret = ci_populate_ulv_state(adev, &pi->smc_state_table.Ulv);
3726 if (ret)
3727 return ret;
3728 WREG32_SMC(ixCG_ULV_PARAMETER, ulv->cg_ulv_parameter);
3729 }
3730
3731 ret = ci_populate_all_graphic_levels(adev);
3732 if (ret)
3733 return ret;
3734
3735 ret = ci_populate_all_memory_levels(adev);
3736 if (ret)
3737 return ret;
3738
3739 ci_populate_smc_link_level(adev, table);
3740
3741 ret = ci_populate_smc_acpi_level(adev, table);
3742 if (ret)
3743 return ret;
3744
3745 ret = ci_populate_smc_vce_level(adev, table);
3746 if (ret)
3747 return ret;
3748
3749 ret = ci_populate_smc_acp_level(adev, table);
3750 if (ret)
3751 return ret;
3752
3753 ret = ci_populate_smc_samu_level(adev, table);
3754 if (ret)
3755 return ret;
3756
3757 ret = ci_do_program_memory_timing_parameters(adev);
3758 if (ret)
3759 return ret;
3760
3761 ret = ci_populate_smc_uvd_level(adev, table);
3762 if (ret)
3763 return ret;
3764
3765 table->UvdBootLevel = 0;
3766 table->VceBootLevel = 0;
3767 table->AcpBootLevel = 0;
3768 table->SamuBootLevel = 0;
3769 table->GraphicsBootLevel = 0;
3770 table->MemoryBootLevel = 0;
3771
3772 ret = ci_find_boot_level(&pi->dpm_table.sclk_table,
3773 pi->vbios_boot_state.sclk_bootup_value,
3774 (u32 *)&pi->smc_state_table.GraphicsBootLevel);
3775
3776 ret = ci_find_boot_level(&pi->dpm_table.mclk_table,
3777 pi->vbios_boot_state.mclk_bootup_value,
3778 (u32 *)&pi->smc_state_table.MemoryBootLevel);
3779
3780 table->BootVddc = pi->vbios_boot_state.vddc_bootup_value;
3781 table->BootVddci = pi->vbios_boot_state.vddci_bootup_value;
3782 table->BootMVdd = pi->vbios_boot_state.mvdd_bootup_value;
3783
3784 ci_populate_smc_initial_state(adev, amdgpu_boot_state);
3785
3786 ret = ci_populate_bapm_parameters_in_dpm_table(adev);
3787 if (ret)
3788 return ret;
3789
3790 table->UVDInterval = 1;
3791 table->VCEInterval = 1;
3792 table->ACPInterval = 1;
3793 table->SAMUInterval = 1;
3794 table->GraphicsVoltageChangeEnable = 1;
3795 table->GraphicsThermThrottleEnable = 1;
3796 table->GraphicsInterval = 1;
3797 table->VoltageInterval = 1;
3798 table->ThermalInterval = 1;
3799 table->TemperatureLimitHigh = (u16)((pi->thermal_temp_setting.temperature_high *
3800 CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000);
3801 table->TemperatureLimitLow = (u16)((pi->thermal_temp_setting.temperature_low *
3802 CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000);
3803 table->MemoryVoltageChangeEnable = 1;
3804 table->MemoryInterval = 1;
3805 table->VoltageResponseTime = 0;
3806 table->VddcVddciDelta = 4000;
3807 table->PhaseResponseTime = 0;
3808 table->MemoryThermThrottleEnable = 1;
3809 table->PCIeBootLinkLevel = pi->dpm_table.pcie_speed_table.count - 1;
3810 table->PCIeGenInterval = 1;
3811 if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2)
3812 table->SVI2Enable = 1;
3813 else
3814 table->SVI2Enable = 0;
3815
3816 table->ThermGpio = 17;
3817 table->SclkStepSize = 0x4000;
3818
3819 table->SystemFlags = cpu_to_be32(table->SystemFlags);
3820 table->SmioMaskVddcVid = cpu_to_be32(table->SmioMaskVddcVid);
3821 table->SmioMaskVddcPhase = cpu_to_be32(table->SmioMaskVddcPhase);
3822 table->SmioMaskVddciVid = cpu_to_be32(table->SmioMaskVddciVid);
3823 table->SmioMaskMvddVid = cpu_to_be32(table->SmioMaskMvddVid);
3824 table->SclkStepSize = cpu_to_be32(table->SclkStepSize);
3825 table->TemperatureLimitHigh = cpu_to_be16(table->TemperatureLimitHigh);
3826 table->TemperatureLimitLow = cpu_to_be16(table->TemperatureLimitLow);
3827 table->VddcVddciDelta = cpu_to_be16(table->VddcVddciDelta);
3828 table->VoltageResponseTime = cpu_to_be16(table->VoltageResponseTime);
3829 table->PhaseResponseTime = cpu_to_be16(table->PhaseResponseTime);
3830 table->BootVddc = cpu_to_be16(table->BootVddc * VOLTAGE_SCALE);
3831 table->BootVddci = cpu_to_be16(table->BootVddci * VOLTAGE_SCALE);
3832 table->BootMVdd = cpu_to_be16(table->BootMVdd * VOLTAGE_SCALE);
3833
3834 ret = amdgpu_ci_copy_bytes_to_smc(adev,
3835 pi->dpm_table_start +
3836 offsetof(SMU7_Discrete_DpmTable, SystemFlags),
3837 (u8 *)&table->SystemFlags,
3838 sizeof(SMU7_Discrete_DpmTable) - 3 * sizeof(SMU7_PIDController),
3839 pi->sram_end);
3840 if (ret)
3841 return ret;
3842
3843 return 0;
3844}
3845
3846static void ci_trim_single_dpm_states(struct amdgpu_device *adev,
3847 struct ci_single_dpm_table *dpm_table,
3848 u32 low_limit, u32 high_limit)
3849{
3850 u32 i;
3851
3852 for (i = 0; i < dpm_table->count; i++) {
3853 if ((dpm_table->dpm_levels[i].value < low_limit) ||
3854 (dpm_table->dpm_levels[i].value > high_limit))
3855 dpm_table->dpm_levels[i].enabled = false;
3856 else
3857 dpm_table->dpm_levels[i].enabled = true;
3858 }
3859}
3860
3861static void ci_trim_pcie_dpm_states(struct amdgpu_device *adev,
3862 u32 speed_low, u32 lanes_low,
3863 u32 speed_high, u32 lanes_high)
3864{
3865 struct ci_power_info *pi = ci_get_pi(adev);
3866 struct ci_single_dpm_table *pcie_table = &pi->dpm_table.pcie_speed_table;
3867 u32 i, j;
3868
3869 for (i = 0; i < pcie_table->count; i++) {
3870 if ((pcie_table->dpm_levels[i].value < speed_low) ||
3871 (pcie_table->dpm_levels[i].param1 < lanes_low) ||
3872 (pcie_table->dpm_levels[i].value > speed_high) ||
3873 (pcie_table->dpm_levels[i].param1 > lanes_high))
3874 pcie_table->dpm_levels[i].enabled = false;
3875 else
3876 pcie_table->dpm_levels[i].enabled = true;
3877 }
3878
3879 for (i = 0; i < pcie_table->count; i++) {
3880 if (pcie_table->dpm_levels[i].enabled) {
3881 for (j = i + 1; j < pcie_table->count; j++) {
3882 if (pcie_table->dpm_levels[j].enabled) {
3883 if ((pcie_table->dpm_levels[i].value == pcie_table->dpm_levels[j].value) &&
3884 (pcie_table->dpm_levels[i].param1 == pcie_table->dpm_levels[j].param1))
3885 pcie_table->dpm_levels[j].enabled = false;
3886 }
3887 }
3888 }
3889 }
3890}
3891
3892static int ci_trim_dpm_states(struct amdgpu_device *adev,
3893 struct amdgpu_ps *amdgpu_state)
3894{
3895 struct ci_ps *state = ci_get_ps(amdgpu_state);
3896 struct ci_power_info *pi = ci_get_pi(adev);
3897 u32 high_limit_count;
3898
3899 if (state->performance_level_count < 1)
3900 return -EINVAL;
3901
3902 if (state->performance_level_count == 1)
3903 high_limit_count = 0;
3904 else
3905 high_limit_count = 1;
3906
3907 ci_trim_single_dpm_states(adev,
3908 &pi->dpm_table.sclk_table,
3909 state->performance_levels[0].sclk,
3910 state->performance_levels[high_limit_count].sclk);
3911
3912 ci_trim_single_dpm_states(adev,
3913 &pi->dpm_table.mclk_table,
3914 state->performance_levels[0].mclk,
3915 state->performance_levels[high_limit_count].mclk);
3916
3917 ci_trim_pcie_dpm_states(adev,
3918 state->performance_levels[0].pcie_gen,
3919 state->performance_levels[0].pcie_lane,
3920 state->performance_levels[high_limit_count].pcie_gen,
3921 state->performance_levels[high_limit_count].pcie_lane);
3922
3923 return 0;
3924}
3925
3926static int ci_apply_disp_minimum_voltage_request(struct amdgpu_device *adev)
3927{
3928 struct amdgpu_clock_voltage_dependency_table *disp_voltage_table =
3929 &adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk;
3930 struct amdgpu_clock_voltage_dependency_table *vddc_table =
3931 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
3932 u32 requested_voltage = 0;
3933 u32 i;
3934
3935 if (disp_voltage_table == NULL)
3936 return -EINVAL;
3937 if (!disp_voltage_table->count)
3938 return -EINVAL;
3939
3940 for (i = 0; i < disp_voltage_table->count; i++) {
3941 if (adev->clock.current_dispclk == disp_voltage_table->entries[i].clk)
3942 requested_voltage = disp_voltage_table->entries[i].v;
3943 }
3944
3945 for (i = 0; i < vddc_table->count; i++) {
3946 if (requested_voltage <= vddc_table->entries[i].v) {
3947 requested_voltage = vddc_table->entries[i].v;
3948 return (amdgpu_ci_send_msg_to_smc_with_parameter(adev,
3949 PPSMC_MSG_VddC_Request,
3950 requested_voltage * VOLTAGE_SCALE) == PPSMC_Result_OK) ?
3951 0 : -EINVAL;
3952 }
3953 }
3954
3955 return -EINVAL;
3956}
3957
3958static int ci_upload_dpm_level_enable_mask(struct amdgpu_device *adev)
3959{
3960 struct ci_power_info *pi = ci_get_pi(adev);
3961 PPSMC_Result result;
3962
3963 ci_apply_disp_minimum_voltage_request(adev);
3964
3965 if (!pi->sclk_dpm_key_disabled) {
3966 if (pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
3967 result = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
3968 PPSMC_MSG_SCLKDPM_SetEnabledMask,
3969 pi->dpm_level_enable_mask.sclk_dpm_enable_mask);
3970 if (result != PPSMC_Result_OK)
3971 return -EINVAL;
3972 }
3973 }
3974
3975 if (!pi->mclk_dpm_key_disabled) {
3976 if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
3977 result = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
3978 PPSMC_MSG_MCLKDPM_SetEnabledMask,
3979 pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
3980 if (result != PPSMC_Result_OK)
3981 return -EINVAL;
3982 }
3983 }
3984
3985#if 0
3986 if (!pi->pcie_dpm_key_disabled) {
3987 if (pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
3988 result = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
3989 PPSMC_MSG_PCIeDPM_SetEnabledMask,
3990 pi->dpm_level_enable_mask.pcie_dpm_enable_mask);
3991 if (result != PPSMC_Result_OK)
3992 return -EINVAL;
3993 }
3994 }
3995#endif
3996
3997 return 0;
3998}
3999
4000static void ci_find_dpm_states_clocks_in_dpm_table(struct amdgpu_device *adev,
4001 struct amdgpu_ps *amdgpu_state)
4002{
4003 struct ci_power_info *pi = ci_get_pi(adev);
4004 struct ci_ps *state = ci_get_ps(amdgpu_state);
4005 struct ci_single_dpm_table *sclk_table = &pi->dpm_table.sclk_table;
4006 u32 sclk = state->performance_levels[state->performance_level_count-1].sclk;
4007 struct ci_single_dpm_table *mclk_table = &pi->dpm_table.mclk_table;
4008 u32 mclk = state->performance_levels[state->performance_level_count-1].mclk;
4009 u32 i;
4010
4011 pi->need_update_smu7_dpm_table = 0;
4012
4013 for (i = 0; i < sclk_table->count; i++) {
4014 if (sclk == sclk_table->dpm_levels[i].value)
4015 break;
4016 }
4017
4018 if (i >= sclk_table->count) {
4019 pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
4020 } else {
4021 /* XXX check display min clock requirements */
4022 if (CISLAND_MINIMUM_ENGINE_CLOCK != CISLAND_MINIMUM_ENGINE_CLOCK)
4023 pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
4024 }
4025
4026 for (i = 0; i < mclk_table->count; i++) {
4027 if (mclk == mclk_table->dpm_levels[i].value)
4028 break;
4029 }
4030
4031 if (i >= mclk_table->count)
4032 pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
4033
4034 if (adev->pm.dpm.current_active_crtc_count !=
4035 adev->pm.dpm.new_active_crtc_count)
4036 pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK;
4037}
4038
4039static int ci_populate_and_upload_sclk_mclk_dpm_levels(struct amdgpu_device *adev,
4040 struct amdgpu_ps *amdgpu_state)
4041{
4042 struct ci_power_info *pi = ci_get_pi(adev);
4043 struct ci_ps *state = ci_get_ps(amdgpu_state);
4044 u32 sclk = state->performance_levels[state->performance_level_count-1].sclk;
4045 u32 mclk = state->performance_levels[state->performance_level_count-1].mclk;
4046 struct ci_dpm_table *dpm_table = &pi->dpm_table;
4047 int ret;
4048
4049 if (!pi->need_update_smu7_dpm_table)
4050 return 0;
4051
4052 if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK)
4053 dpm_table->sclk_table.dpm_levels[dpm_table->sclk_table.count-1].value = sclk;
4054
4055 if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)
4056 dpm_table->mclk_table.dpm_levels[dpm_table->mclk_table.count-1].value = mclk;
4057
4058 if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK)) {
4059 ret = ci_populate_all_graphic_levels(adev);
4060 if (ret)
4061 return ret;
4062 }
4063
4064 if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_MCLK | DPMTABLE_UPDATE_MCLK)) {
4065 ret = ci_populate_all_memory_levels(adev);
4066 if (ret)
4067 return ret;
4068 }
4069
4070 return 0;
4071}
4072
4073static int ci_enable_uvd_dpm(struct amdgpu_device *adev, bool enable)
4074{
4075 struct ci_power_info *pi = ci_get_pi(adev);
4076 const struct amdgpu_clock_and_voltage_limits *max_limits;
4077 int i;
4078
4079 if (adev->pm.dpm.ac_power)
4080 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
4081 else
4082 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
4083
4084 if (enable) {
4085 pi->dpm_level_enable_mask.uvd_dpm_enable_mask = 0;
4086
4087 for (i = adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
4088 if (adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
4089 pi->dpm_level_enable_mask.uvd_dpm_enable_mask |= 1 << i;
4090
4091 if (!pi->caps_uvd_dpm)
4092 break;
4093 }
4094 }
4095
4096 amdgpu_ci_send_msg_to_smc_with_parameter(adev,
4097 PPSMC_MSG_UVDDPM_SetEnabledMask,
4098 pi->dpm_level_enable_mask.uvd_dpm_enable_mask);
4099
4100 if (pi->last_mclk_dpm_enable_mask & 0x1) {
4101 pi->uvd_enabled = true;
4102 pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
4103 amdgpu_ci_send_msg_to_smc_with_parameter(adev,
4104 PPSMC_MSG_MCLKDPM_SetEnabledMask,
4105 pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
4106 }
4107 } else {
Rex Zhu49a5d732016-10-21 16:55:02 +08004108 if (pi->uvd_enabled) {
Alex Deuchera2e73f52015-04-20 17:09:27 -04004109 pi->uvd_enabled = false;
4110 pi->dpm_level_enable_mask.mclk_dpm_enable_mask |= 1;
4111 amdgpu_ci_send_msg_to_smc_with_parameter(adev,
4112 PPSMC_MSG_MCLKDPM_SetEnabledMask,
4113 pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
4114 }
4115 }
4116
4117 return (amdgpu_ci_send_msg_to_smc(adev, enable ?
4118 PPSMC_MSG_UVDDPM_Enable : PPSMC_MSG_UVDDPM_Disable) == PPSMC_Result_OK) ?
4119 0 : -EINVAL;
4120}
4121
4122static int ci_enable_vce_dpm(struct amdgpu_device *adev, bool enable)
4123{
4124 struct ci_power_info *pi = ci_get_pi(adev);
4125 const struct amdgpu_clock_and_voltage_limits *max_limits;
4126 int i;
4127
4128 if (adev->pm.dpm.ac_power)
4129 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
4130 else
4131 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
4132
4133 if (enable) {
4134 pi->dpm_level_enable_mask.vce_dpm_enable_mask = 0;
4135 for (i = adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
4136 if (adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
4137 pi->dpm_level_enable_mask.vce_dpm_enable_mask |= 1 << i;
4138
4139 if (!pi->caps_vce_dpm)
4140 break;
4141 }
4142 }
4143
4144 amdgpu_ci_send_msg_to_smc_with_parameter(adev,
4145 PPSMC_MSG_VCEDPM_SetEnabledMask,
4146 pi->dpm_level_enable_mask.vce_dpm_enable_mask);
4147 }
4148
4149 return (amdgpu_ci_send_msg_to_smc(adev, enable ?
4150 PPSMC_MSG_VCEDPM_Enable : PPSMC_MSG_VCEDPM_Disable) == PPSMC_Result_OK) ?
4151 0 : -EINVAL;
4152}
4153
4154#if 0
4155static int ci_enable_samu_dpm(struct amdgpu_device *adev, bool enable)
4156{
4157 struct ci_power_info *pi = ci_get_pi(adev);
4158 const struct amdgpu_clock_and_voltage_limits *max_limits;
4159 int i;
4160
4161 if (adev->pm.dpm.ac_power)
4162 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
4163 else
4164 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
4165
4166 if (enable) {
4167 pi->dpm_level_enable_mask.samu_dpm_enable_mask = 0;
4168 for (i = adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
4169 if (adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
4170 pi->dpm_level_enable_mask.samu_dpm_enable_mask |= 1 << i;
4171
4172 if (!pi->caps_samu_dpm)
4173 break;
4174 }
4175 }
4176
4177 amdgpu_ci_send_msg_to_smc_with_parameter(adev,
4178 PPSMC_MSG_SAMUDPM_SetEnabledMask,
4179 pi->dpm_level_enable_mask.samu_dpm_enable_mask);
4180 }
4181 return (amdgpu_ci_send_msg_to_smc(adev, enable ?
4182 PPSMC_MSG_SAMUDPM_Enable : PPSMC_MSG_SAMUDPM_Disable) == PPSMC_Result_OK) ?
4183 0 : -EINVAL;
4184}
4185
4186static int ci_enable_acp_dpm(struct amdgpu_device *adev, bool enable)
4187{
4188 struct ci_power_info *pi = ci_get_pi(adev);
4189 const struct amdgpu_clock_and_voltage_limits *max_limits;
4190 int i;
4191
4192 if (adev->pm.dpm.ac_power)
4193 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
4194 else
4195 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
4196
4197 if (enable) {
4198 pi->dpm_level_enable_mask.acp_dpm_enable_mask = 0;
4199 for (i = adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
4200 if (adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
4201 pi->dpm_level_enable_mask.acp_dpm_enable_mask |= 1 << i;
4202
4203 if (!pi->caps_acp_dpm)
4204 break;
4205 }
4206 }
4207
4208 amdgpu_ci_send_msg_to_smc_with_parameter(adev,
4209 PPSMC_MSG_ACPDPM_SetEnabledMask,
4210 pi->dpm_level_enable_mask.acp_dpm_enable_mask);
4211 }
4212
4213 return (amdgpu_ci_send_msg_to_smc(adev, enable ?
4214 PPSMC_MSG_ACPDPM_Enable : PPSMC_MSG_ACPDPM_Disable) == PPSMC_Result_OK) ?
4215 0 : -EINVAL;
4216}
4217#endif
4218
4219static int ci_update_uvd_dpm(struct amdgpu_device *adev, bool gate)
4220{
4221 struct ci_power_info *pi = ci_get_pi(adev);
4222 u32 tmp;
Rex Zhu3495a102016-10-26 18:05:00 +08004223 int ret = 0;
Alex Deuchera2e73f52015-04-20 17:09:27 -04004224
4225 if (!gate) {
Rex Zhu3495a102016-10-26 18:05:00 +08004226 /* turn the clocks on when decoding */
Alex Deuchera2e73f52015-04-20 17:09:27 -04004227 if (pi->caps_uvd_dpm ||
4228 (adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count <= 0))
4229 pi->smc_state_table.UvdBootLevel = 0;
4230 else
4231 pi->smc_state_table.UvdBootLevel =
4232 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1;
4233
4234 tmp = RREG32_SMC(ixDPM_TABLE_475);
4235 tmp &= ~DPM_TABLE_475__UvdBootLevel_MASK;
4236 tmp |= (pi->smc_state_table.UvdBootLevel << DPM_TABLE_475__UvdBootLevel__SHIFT);
4237 WREG32_SMC(ixDPM_TABLE_475, tmp);
Rex Zhu3495a102016-10-26 18:05:00 +08004238 ret = ci_enable_uvd_dpm(adev, true);
4239 } else {
4240 ret = ci_enable_uvd_dpm(adev, false);
4241 if (ret)
4242 return ret;
Alex Deuchera2e73f52015-04-20 17:09:27 -04004243 }
4244
Rex Zhu3495a102016-10-26 18:05:00 +08004245 return ret;
Alex Deuchera2e73f52015-04-20 17:09:27 -04004246}
4247
4248static u8 ci_get_vce_boot_level(struct amdgpu_device *adev)
4249{
4250 u8 i;
4251 u32 min_evclk = 30000; /* ??? */
4252 struct amdgpu_vce_clock_voltage_dependency_table *table =
4253 &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
4254
4255 for (i = 0; i < table->count; i++) {
4256 if (table->entries[i].evclk >= min_evclk)
4257 return i;
4258 }
4259
4260 return table->count - 1;
4261}
4262
4263static int ci_update_vce_dpm(struct amdgpu_device *adev,
4264 struct amdgpu_ps *amdgpu_new_state,
4265 struct amdgpu_ps *amdgpu_current_state)
4266{
4267 struct ci_power_info *pi = ci_get_pi(adev);
4268 int ret = 0;
4269 u32 tmp;
4270
4271 if (amdgpu_current_state->evclk != amdgpu_new_state->evclk) {
4272 if (amdgpu_new_state->evclk) {
Alex Deuchera2e73f52015-04-20 17:09:27 -04004273 pi->smc_state_table.VceBootLevel = ci_get_vce_boot_level(adev);
4274 tmp = RREG32_SMC(ixDPM_TABLE_475);
4275 tmp &= ~DPM_TABLE_475__VceBootLevel_MASK;
4276 tmp |= (pi->smc_state_table.VceBootLevel << DPM_TABLE_475__VceBootLevel__SHIFT);
4277 WREG32_SMC(ixDPM_TABLE_475, tmp);
4278
4279 ret = ci_enable_vce_dpm(adev, true);
4280 } else {
Rex Zhu415282b2016-10-26 17:05:30 +08004281 ret = ci_enable_vce_dpm(adev, false);
4282 if (ret)
4283 return ret;
Alex Deuchera2e73f52015-04-20 17:09:27 -04004284 }
4285 }
4286 return ret;
4287}
4288
4289#if 0
4290static int ci_update_samu_dpm(struct amdgpu_device *adev, bool gate)
4291{
4292 return ci_enable_samu_dpm(adev, gate);
4293}
4294
4295static int ci_update_acp_dpm(struct amdgpu_device *adev, bool gate)
4296{
4297 struct ci_power_info *pi = ci_get_pi(adev);
4298 u32 tmp;
4299
4300 if (!gate) {
4301 pi->smc_state_table.AcpBootLevel = 0;
4302
4303 tmp = RREG32_SMC(ixDPM_TABLE_475);
4304 tmp &= ~AcpBootLevel_MASK;
4305 tmp |= AcpBootLevel(pi->smc_state_table.AcpBootLevel);
4306 WREG32_SMC(ixDPM_TABLE_475, tmp);
4307 }
4308
4309 return ci_enable_acp_dpm(adev, !gate);
4310}
4311#endif
4312
4313static int ci_generate_dpm_level_enable_mask(struct amdgpu_device *adev,
4314 struct amdgpu_ps *amdgpu_state)
4315{
4316 struct ci_power_info *pi = ci_get_pi(adev);
4317 int ret;
4318
4319 ret = ci_trim_dpm_states(adev, amdgpu_state);
4320 if (ret)
4321 return ret;
4322
4323 pi->dpm_level_enable_mask.sclk_dpm_enable_mask =
4324 ci_get_dpm_level_enable_mask_value(&pi->dpm_table.sclk_table);
4325 pi->dpm_level_enable_mask.mclk_dpm_enable_mask =
4326 ci_get_dpm_level_enable_mask_value(&pi->dpm_table.mclk_table);
4327 pi->last_mclk_dpm_enable_mask =
4328 pi->dpm_level_enable_mask.mclk_dpm_enable_mask;
4329 if (pi->uvd_enabled) {
4330 if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask & 1)
4331 pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
4332 }
4333 pi->dpm_level_enable_mask.pcie_dpm_enable_mask =
4334 ci_get_dpm_level_enable_mask_value(&pi->dpm_table.pcie_speed_table);
4335
4336 return 0;
4337}
4338
4339static u32 ci_get_lowest_enabled_level(struct amdgpu_device *adev,
4340 u32 level_mask)
4341{
4342 u32 level = 0;
4343
4344 while ((level_mask & (1 << level)) == 0)
4345 level++;
4346
4347 return level;
4348}
4349
4350
Rex Zhucfa289f2017-09-06 15:27:59 +08004351static int ci_dpm_force_performance_level(void *handle,
Rex Zhue5d03ac2016-12-23 14:39:41 +08004352 enum amd_dpm_forced_level level)
Alex Deuchera2e73f52015-04-20 17:09:27 -04004353{
Rex Zhucfa289f2017-09-06 15:27:59 +08004354 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
Alex Deuchera2e73f52015-04-20 17:09:27 -04004355 struct ci_power_info *pi = ci_get_pi(adev);
4356 u32 tmp, levels, i;
4357 int ret;
4358
Rex Zhue5d03ac2016-12-23 14:39:41 +08004359 if (level == AMD_DPM_FORCED_LEVEL_HIGH) {
Alex Deuchera2e73f52015-04-20 17:09:27 -04004360 if ((!pi->pcie_dpm_key_disabled) &&
4361 pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
4362 levels = 0;
4363 tmp = pi->dpm_level_enable_mask.pcie_dpm_enable_mask;
4364 while (tmp >>= 1)
4365 levels++;
4366 if (levels) {
4367 ret = ci_dpm_force_state_pcie(adev, level);
4368 if (ret)
4369 return ret;
4370 for (i = 0; i < adev->usec_timeout; i++) {
4371 tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX_1) &
4372 TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX_MASK) >>
4373 TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX__SHIFT;
4374 if (tmp == levels)
4375 break;
4376 udelay(1);
4377 }
4378 }
4379 }
4380 if ((!pi->sclk_dpm_key_disabled) &&
4381 pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
4382 levels = 0;
4383 tmp = pi->dpm_level_enable_mask.sclk_dpm_enable_mask;
4384 while (tmp >>= 1)
4385 levels++;
4386 if (levels) {
4387 ret = ci_dpm_force_state_sclk(adev, levels);
4388 if (ret)
4389 return ret;
4390 for (i = 0; i < adev->usec_timeout; i++) {
4391 tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
4392 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK) >>
4393 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT;
4394 if (tmp == levels)
4395 break;
4396 udelay(1);
4397 }
4398 }
4399 }
4400 if ((!pi->mclk_dpm_key_disabled) &&
4401 pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
4402 levels = 0;
4403 tmp = pi->dpm_level_enable_mask.mclk_dpm_enable_mask;
4404 while (tmp >>= 1)
4405 levels++;
4406 if (levels) {
4407 ret = ci_dpm_force_state_mclk(adev, levels);
4408 if (ret)
4409 return ret;
4410 for (i = 0; i < adev->usec_timeout; i++) {
4411 tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
4412 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX_MASK) >>
4413 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX__SHIFT;
4414 if (tmp == levels)
4415 break;
4416 udelay(1);
4417 }
4418 }
4419 }
Rex Zhue5d03ac2016-12-23 14:39:41 +08004420 } else if (level == AMD_DPM_FORCED_LEVEL_LOW) {
Alex Deuchera2e73f52015-04-20 17:09:27 -04004421 if ((!pi->sclk_dpm_key_disabled) &&
4422 pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
4423 levels = ci_get_lowest_enabled_level(adev,
4424 pi->dpm_level_enable_mask.sclk_dpm_enable_mask);
4425 ret = ci_dpm_force_state_sclk(adev, levels);
4426 if (ret)
4427 return ret;
4428 for (i = 0; i < adev->usec_timeout; i++) {
4429 tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
4430 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK) >>
4431 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT;
4432 if (tmp == levels)
4433 break;
4434 udelay(1);
4435 }
4436 }
4437 if ((!pi->mclk_dpm_key_disabled) &&
4438 pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
4439 levels = ci_get_lowest_enabled_level(adev,
4440 pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
4441 ret = ci_dpm_force_state_mclk(adev, levels);
4442 if (ret)
4443 return ret;
4444 for (i = 0; i < adev->usec_timeout; i++) {
4445 tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
4446 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX_MASK) >>
4447 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX__SHIFT;
4448 if (tmp == levels)
4449 break;
4450 udelay(1);
4451 }
4452 }
4453 if ((!pi->pcie_dpm_key_disabled) &&
4454 pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
4455 levels = ci_get_lowest_enabled_level(adev,
4456 pi->dpm_level_enable_mask.pcie_dpm_enable_mask);
4457 ret = ci_dpm_force_state_pcie(adev, levels);
4458 if (ret)
4459 return ret;
4460 for (i = 0; i < adev->usec_timeout; i++) {
4461 tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX_1) &
4462 TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX_MASK) >>
4463 TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX__SHIFT;
4464 if (tmp == levels)
4465 break;
4466 udelay(1);
4467 }
4468 }
Rex Zhue5d03ac2016-12-23 14:39:41 +08004469 } else if (level == AMD_DPM_FORCED_LEVEL_AUTO) {
Alex Deuchera2e73f52015-04-20 17:09:27 -04004470 if (!pi->pcie_dpm_key_disabled) {
4471 PPSMC_Result smc_result;
4472
4473 smc_result = amdgpu_ci_send_msg_to_smc(adev,
4474 PPSMC_MSG_PCIeDPM_UnForceLevel);
4475 if (smc_result != PPSMC_Result_OK)
4476 return -EINVAL;
4477 }
4478 ret = ci_upload_dpm_level_enable_mask(adev);
4479 if (ret)
4480 return ret;
4481 }
4482
4483 adev->pm.dpm.forced_level = level;
4484
4485 return 0;
4486}
4487
4488static int ci_set_mc_special_registers(struct amdgpu_device *adev,
4489 struct ci_mc_reg_table *table)
4490{
4491 u8 i, j, k;
4492 u32 temp_reg;
4493
4494 for (i = 0, j = table->last; i < table->last; i++) {
4495 if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4496 return -EINVAL;
4497 switch(table->mc_reg_address[i].s1) {
4498 case mmMC_SEQ_MISC1:
4499 temp_reg = RREG32(mmMC_PMG_CMD_EMRS);
4500 table->mc_reg_address[j].s1 = mmMC_PMG_CMD_EMRS;
4501 table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_EMRS_LP;
4502 for (k = 0; k < table->num_entries; k++) {
4503 table->mc_reg_table_entry[k].mc_data[j] =
4504 ((temp_reg & 0xffff0000)) | ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
4505 }
4506 j++;
Ernst Sjöstrand47e87882017-11-19 18:52:45 +01004507
Alex Deuchera2e73f52015-04-20 17:09:27 -04004508 if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4509 return -EINVAL;
Alex Deuchera2e73f52015-04-20 17:09:27 -04004510 temp_reg = RREG32(mmMC_PMG_CMD_MRS);
4511 table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS;
4512 table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS_LP;
4513 for (k = 0; k < table->num_entries; k++) {
4514 table->mc_reg_table_entry[k].mc_data[j] =
4515 (temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
Christian König770d13b2018-01-12 14:52:22 +01004516 if (adev->gmc.vram_type != AMDGPU_VRAM_TYPE_GDDR5)
Alex Deuchera2e73f52015-04-20 17:09:27 -04004517 table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
4518 }
4519 j++;
Alex Deuchera2e73f52015-04-20 17:09:27 -04004520
Christian König770d13b2018-01-12 14:52:22 +01004521 if (adev->gmc.vram_type != AMDGPU_VRAM_TYPE_GDDR5) {
Ernst Sjöstrand47e87882017-11-19 18:52:45 +01004522 if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4523 return -EINVAL;
Alex Deuchera2e73f52015-04-20 17:09:27 -04004524 table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD;
4525 table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD;
4526 for (k = 0; k < table->num_entries; k++) {
4527 table->mc_reg_table_entry[k].mc_data[j] =
4528 (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
4529 }
4530 j++;
Alex Deuchera2e73f52015-04-20 17:09:27 -04004531 }
4532 break;
4533 case mmMC_SEQ_RESERVE_M:
4534 temp_reg = RREG32(mmMC_PMG_CMD_MRS1);
4535 table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS1;
4536 table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS1_LP;
4537 for (k = 0; k < table->num_entries; k++) {
4538 table->mc_reg_table_entry[k].mc_data[j] =
4539 (temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
4540 }
4541 j++;
Alex Deuchera2e73f52015-04-20 17:09:27 -04004542 break;
4543 default:
4544 break;
4545 }
4546
4547 }
4548
4549 table->last = j;
4550
4551 return 0;
4552}
4553
4554static bool ci_check_s0_mc_reg_index(u16 in_reg, u16 *out_reg)
4555{
4556 bool result = true;
4557
4558 switch(in_reg) {
4559 case mmMC_SEQ_RAS_TIMING:
4560 *out_reg = mmMC_SEQ_RAS_TIMING_LP;
4561 break;
4562 case mmMC_SEQ_DLL_STBY:
4563 *out_reg = mmMC_SEQ_DLL_STBY_LP;
4564 break;
4565 case mmMC_SEQ_G5PDX_CMD0:
4566 *out_reg = mmMC_SEQ_G5PDX_CMD0_LP;
4567 break;
4568 case mmMC_SEQ_G5PDX_CMD1:
4569 *out_reg = mmMC_SEQ_G5PDX_CMD1_LP;
4570 break;
4571 case mmMC_SEQ_G5PDX_CTRL:
4572 *out_reg = mmMC_SEQ_G5PDX_CTRL_LP;
4573 break;
4574 case mmMC_SEQ_CAS_TIMING:
4575 *out_reg = mmMC_SEQ_CAS_TIMING_LP;
4576 break;
4577 case mmMC_SEQ_MISC_TIMING:
4578 *out_reg = mmMC_SEQ_MISC_TIMING_LP;
4579 break;
4580 case mmMC_SEQ_MISC_TIMING2:
4581 *out_reg = mmMC_SEQ_MISC_TIMING2_LP;
4582 break;
4583 case mmMC_SEQ_PMG_DVS_CMD:
4584 *out_reg = mmMC_SEQ_PMG_DVS_CMD_LP;
4585 break;
4586 case mmMC_SEQ_PMG_DVS_CTL:
4587 *out_reg = mmMC_SEQ_PMG_DVS_CTL_LP;
4588 break;
4589 case mmMC_SEQ_RD_CTL_D0:
4590 *out_reg = mmMC_SEQ_RD_CTL_D0_LP;
4591 break;
4592 case mmMC_SEQ_RD_CTL_D1:
4593 *out_reg = mmMC_SEQ_RD_CTL_D1_LP;
4594 break;
4595 case mmMC_SEQ_WR_CTL_D0:
4596 *out_reg = mmMC_SEQ_WR_CTL_D0_LP;
4597 break;
4598 case mmMC_SEQ_WR_CTL_D1:
4599 *out_reg = mmMC_SEQ_WR_CTL_D1_LP;
4600 break;
4601 case mmMC_PMG_CMD_EMRS:
4602 *out_reg = mmMC_SEQ_PMG_CMD_EMRS_LP;
4603 break;
4604 case mmMC_PMG_CMD_MRS:
4605 *out_reg = mmMC_SEQ_PMG_CMD_MRS_LP;
4606 break;
4607 case mmMC_PMG_CMD_MRS1:
4608 *out_reg = mmMC_SEQ_PMG_CMD_MRS1_LP;
4609 break;
4610 case mmMC_SEQ_PMG_TIMING:
4611 *out_reg = mmMC_SEQ_PMG_TIMING_LP;
4612 break;
4613 case mmMC_PMG_CMD_MRS2:
4614 *out_reg = mmMC_SEQ_PMG_CMD_MRS2_LP;
4615 break;
4616 case mmMC_SEQ_WR_CTL_2:
4617 *out_reg = mmMC_SEQ_WR_CTL_2_LP;
4618 break;
4619 default:
4620 result = false;
4621 break;
4622 }
4623
4624 return result;
4625}
4626
4627static void ci_set_valid_flag(struct ci_mc_reg_table *table)
4628{
4629 u8 i, j;
4630
4631 for (i = 0; i < table->last; i++) {
4632 for (j = 1; j < table->num_entries; j++) {
4633 if (table->mc_reg_table_entry[j-1].mc_data[i] !=
4634 table->mc_reg_table_entry[j].mc_data[i]) {
4635 table->valid_flag |= 1 << i;
4636 break;
4637 }
4638 }
4639 }
4640}
4641
4642static void ci_set_s0_mc_reg_index(struct ci_mc_reg_table *table)
4643{
4644 u32 i;
4645 u16 address;
4646
4647 for (i = 0; i < table->last; i++) {
4648 table->mc_reg_address[i].s0 =
4649 ci_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) ?
4650 address : table->mc_reg_address[i].s1;
4651 }
4652}
4653
4654static int ci_copy_vbios_mc_reg_table(const struct atom_mc_reg_table *table,
4655 struct ci_mc_reg_table *ci_table)
4656{
4657 u8 i, j;
4658
4659 if (table->last > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4660 return -EINVAL;
4661 if (table->num_entries > MAX_AC_TIMING_ENTRIES)
4662 return -EINVAL;
4663
4664 for (i = 0; i < table->last; i++)
4665 ci_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
4666
4667 ci_table->last = table->last;
4668
4669 for (i = 0; i < table->num_entries; i++) {
4670 ci_table->mc_reg_table_entry[i].mclk_max =
4671 table->mc_reg_table_entry[i].mclk_max;
4672 for (j = 0; j < table->last; j++)
4673 ci_table->mc_reg_table_entry[i].mc_data[j] =
4674 table->mc_reg_table_entry[i].mc_data[j];
4675 }
4676 ci_table->num_entries = table->num_entries;
4677
4678 return 0;
4679}
4680
4681static int ci_register_patching_mc_seq(struct amdgpu_device *adev,
4682 struct ci_mc_reg_table *table)
4683{
4684 u8 i, k;
4685 u32 tmp;
4686 bool patch;
4687
4688 tmp = RREG32(mmMC_SEQ_MISC0);
4689 patch = ((tmp & 0x0000f00) == 0x300) ? true : false;
4690
4691 if (patch &&
4692 ((adev->pdev->device == 0x67B0) ||
4693 (adev->pdev->device == 0x67B1))) {
4694 for (i = 0; i < table->last; i++) {
4695 if (table->last >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4696 return -EINVAL;
4697 switch (table->mc_reg_address[i].s1) {
4698 case mmMC_SEQ_MISC1:
4699 for (k = 0; k < table->num_entries; k++) {
4700 if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
4701 (table->mc_reg_table_entry[k].mclk_max == 137500))
4702 table->mc_reg_table_entry[k].mc_data[i] =
4703 (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFF8) |
4704 0x00000007;
4705 }
4706 break;
4707 case mmMC_SEQ_WR_CTL_D0:
4708 for (k = 0; k < table->num_entries; k++) {
4709 if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
4710 (table->mc_reg_table_entry[k].mclk_max == 137500))
4711 table->mc_reg_table_entry[k].mc_data[i] =
4712 (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFF0F00) |
4713 0x0000D0DD;
4714 }
4715 break;
4716 case mmMC_SEQ_WR_CTL_D1:
4717 for (k = 0; k < table->num_entries; k++) {
4718 if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
4719 (table->mc_reg_table_entry[k].mclk_max == 137500))
4720 table->mc_reg_table_entry[k].mc_data[i] =
4721 (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFF0F00) |
4722 0x0000D0DD;
4723 }
4724 break;
4725 case mmMC_SEQ_WR_CTL_2:
4726 for (k = 0; k < table->num_entries; k++) {
4727 if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
4728 (table->mc_reg_table_entry[k].mclk_max == 137500))
4729 table->mc_reg_table_entry[k].mc_data[i] = 0;
4730 }
4731 break;
4732 case mmMC_SEQ_CAS_TIMING:
4733 for (k = 0; k < table->num_entries; k++) {
4734 if (table->mc_reg_table_entry[k].mclk_max == 125000)
4735 table->mc_reg_table_entry[k].mc_data[i] =
4736 (table->mc_reg_table_entry[k].mc_data[i] & 0xFFE0FE0F) |
4737 0x000C0140;
4738 else if (table->mc_reg_table_entry[k].mclk_max == 137500)
4739 table->mc_reg_table_entry[k].mc_data[i] =
4740 (table->mc_reg_table_entry[k].mc_data[i] & 0xFFE0FE0F) |
4741 0x000C0150;
4742 }
4743 break;
4744 case mmMC_SEQ_MISC_TIMING:
4745 for (k = 0; k < table->num_entries; k++) {
4746 if (table->mc_reg_table_entry[k].mclk_max == 125000)
4747 table->mc_reg_table_entry[k].mc_data[i] =
4748 (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFE0) |
4749 0x00000030;
4750 else if (table->mc_reg_table_entry[k].mclk_max == 137500)
4751 table->mc_reg_table_entry[k].mc_data[i] =
4752 (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFE0) |
4753 0x00000035;
4754 }
4755 break;
4756 default:
4757 break;
4758 }
4759 }
4760
4761 WREG32(mmMC_SEQ_IO_DEBUG_INDEX, 3);
4762 tmp = RREG32(mmMC_SEQ_IO_DEBUG_DATA);
4763 tmp = (tmp & 0xFFF8FFFF) | (1 << 16);
4764 WREG32(mmMC_SEQ_IO_DEBUG_INDEX, 3);
4765 WREG32(mmMC_SEQ_IO_DEBUG_DATA, tmp);
4766 }
4767
4768 return 0;
4769}
4770
4771static int ci_initialize_mc_reg_table(struct amdgpu_device *adev)
4772{
4773 struct ci_power_info *pi = ci_get_pi(adev);
4774 struct atom_mc_reg_table *table;
4775 struct ci_mc_reg_table *ci_table = &pi->mc_reg_table;
4776 u8 module_index = ci_get_memory_module_index(adev);
4777 int ret;
4778
4779 table = kzalloc(sizeof(struct atom_mc_reg_table), GFP_KERNEL);
4780 if (!table)
4781 return -ENOMEM;
4782
4783 WREG32(mmMC_SEQ_RAS_TIMING_LP, RREG32(mmMC_SEQ_RAS_TIMING));
4784 WREG32(mmMC_SEQ_CAS_TIMING_LP, RREG32(mmMC_SEQ_CAS_TIMING));
4785 WREG32(mmMC_SEQ_DLL_STBY_LP, RREG32(mmMC_SEQ_DLL_STBY));
4786 WREG32(mmMC_SEQ_G5PDX_CMD0_LP, RREG32(mmMC_SEQ_G5PDX_CMD0));
4787 WREG32(mmMC_SEQ_G5PDX_CMD1_LP, RREG32(mmMC_SEQ_G5PDX_CMD1));
4788 WREG32(mmMC_SEQ_G5PDX_CTRL_LP, RREG32(mmMC_SEQ_G5PDX_CTRL));
4789 WREG32(mmMC_SEQ_PMG_DVS_CMD_LP, RREG32(mmMC_SEQ_PMG_DVS_CMD));
4790 WREG32(mmMC_SEQ_PMG_DVS_CTL_LP, RREG32(mmMC_SEQ_PMG_DVS_CTL));
4791 WREG32(mmMC_SEQ_MISC_TIMING_LP, RREG32(mmMC_SEQ_MISC_TIMING));
4792 WREG32(mmMC_SEQ_MISC_TIMING2_LP, RREG32(mmMC_SEQ_MISC_TIMING2));
4793 WREG32(mmMC_SEQ_PMG_CMD_EMRS_LP, RREG32(mmMC_PMG_CMD_EMRS));
4794 WREG32(mmMC_SEQ_PMG_CMD_MRS_LP, RREG32(mmMC_PMG_CMD_MRS));
4795 WREG32(mmMC_SEQ_PMG_CMD_MRS1_LP, RREG32(mmMC_PMG_CMD_MRS1));
4796 WREG32(mmMC_SEQ_WR_CTL_D0_LP, RREG32(mmMC_SEQ_WR_CTL_D0));
4797 WREG32(mmMC_SEQ_WR_CTL_D1_LP, RREG32(mmMC_SEQ_WR_CTL_D1));
4798 WREG32(mmMC_SEQ_RD_CTL_D0_LP, RREG32(mmMC_SEQ_RD_CTL_D0));
4799 WREG32(mmMC_SEQ_RD_CTL_D1_LP, RREG32(mmMC_SEQ_RD_CTL_D1));
4800 WREG32(mmMC_SEQ_PMG_TIMING_LP, RREG32(mmMC_SEQ_PMG_TIMING));
4801 WREG32(mmMC_SEQ_PMG_CMD_MRS2_LP, RREG32(mmMC_PMG_CMD_MRS2));
4802 WREG32(mmMC_SEQ_WR_CTL_2_LP, RREG32(mmMC_SEQ_WR_CTL_2));
4803
4804 ret = amdgpu_atombios_init_mc_reg_table(adev, module_index, table);
4805 if (ret)
4806 goto init_mc_done;
4807
4808 ret = ci_copy_vbios_mc_reg_table(table, ci_table);
4809 if (ret)
4810 goto init_mc_done;
4811
4812 ci_set_s0_mc_reg_index(ci_table);
4813
4814 ret = ci_register_patching_mc_seq(adev, ci_table);
4815 if (ret)
4816 goto init_mc_done;
4817
4818 ret = ci_set_mc_special_registers(adev, ci_table);
4819 if (ret)
4820 goto init_mc_done;
4821
4822 ci_set_valid_flag(ci_table);
4823
4824init_mc_done:
4825 kfree(table);
4826
4827 return ret;
4828}
4829
4830static int ci_populate_mc_reg_addresses(struct amdgpu_device *adev,
4831 SMU7_Discrete_MCRegisters *mc_reg_table)
4832{
4833 struct ci_power_info *pi = ci_get_pi(adev);
4834 u32 i, j;
4835
4836 for (i = 0, j = 0; j < pi->mc_reg_table.last; j++) {
4837 if (pi->mc_reg_table.valid_flag & (1 << j)) {
4838 if (i >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4839 return -EINVAL;
4840 mc_reg_table->address[i].s0 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s0);
4841 mc_reg_table->address[i].s1 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s1);
4842 i++;
4843 }
4844 }
4845
4846 mc_reg_table->last = (u8)i;
4847
4848 return 0;
4849}
4850
4851static void ci_convert_mc_registers(const struct ci_mc_reg_entry *entry,
4852 SMU7_Discrete_MCRegisterSet *data,
4853 u32 num_entries, u32 valid_flag)
4854{
4855 u32 i, j;
4856
4857 for (i = 0, j = 0; j < num_entries; j++) {
4858 if (valid_flag & (1 << j)) {
4859 data->value[i] = cpu_to_be32(entry->mc_data[j]);
4860 i++;
4861 }
4862 }
4863}
4864
4865static void ci_convert_mc_reg_table_entry_to_smc(struct amdgpu_device *adev,
4866 const u32 memory_clock,
4867 SMU7_Discrete_MCRegisterSet *mc_reg_table_data)
4868{
4869 struct ci_power_info *pi = ci_get_pi(adev);
4870 u32 i = 0;
4871
4872 for(i = 0; i < pi->mc_reg_table.num_entries; i++) {
4873 if (memory_clock <= pi->mc_reg_table.mc_reg_table_entry[i].mclk_max)
4874 break;
4875 }
4876
4877 if ((i == pi->mc_reg_table.num_entries) && (i > 0))
4878 --i;
4879
4880 ci_convert_mc_registers(&pi->mc_reg_table.mc_reg_table_entry[i],
4881 mc_reg_table_data, pi->mc_reg_table.last,
4882 pi->mc_reg_table.valid_flag);
4883}
4884
4885static void ci_convert_mc_reg_table_to_smc(struct amdgpu_device *adev,
4886 SMU7_Discrete_MCRegisters *mc_reg_table)
4887{
4888 struct ci_power_info *pi = ci_get_pi(adev);
4889 u32 i;
4890
4891 for (i = 0; i < pi->dpm_table.mclk_table.count; i++)
4892 ci_convert_mc_reg_table_entry_to_smc(adev,
4893 pi->dpm_table.mclk_table.dpm_levels[i].value,
4894 &mc_reg_table->data[i]);
4895}
4896
4897static int ci_populate_initial_mc_reg_table(struct amdgpu_device *adev)
4898{
4899 struct ci_power_info *pi = ci_get_pi(adev);
4900 int ret;
4901
4902 memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters));
4903
4904 ret = ci_populate_mc_reg_addresses(adev, &pi->smc_mc_reg_table);
4905 if (ret)
4906 return ret;
4907 ci_convert_mc_reg_table_to_smc(adev, &pi->smc_mc_reg_table);
4908
4909 return amdgpu_ci_copy_bytes_to_smc(adev,
4910 pi->mc_reg_table_start,
4911 (u8 *)&pi->smc_mc_reg_table,
4912 sizeof(SMU7_Discrete_MCRegisters),
4913 pi->sram_end);
4914}
4915
4916static int ci_update_and_upload_mc_reg_table(struct amdgpu_device *adev)
4917{
4918 struct ci_power_info *pi = ci_get_pi(adev);
4919
4920 if (!(pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK))
4921 return 0;
4922
4923 memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters));
4924
4925 ci_convert_mc_reg_table_to_smc(adev, &pi->smc_mc_reg_table);
4926
4927 return amdgpu_ci_copy_bytes_to_smc(adev,
4928 pi->mc_reg_table_start +
4929 offsetof(SMU7_Discrete_MCRegisters, data[0]),
4930 (u8 *)&pi->smc_mc_reg_table.data[0],
4931 sizeof(SMU7_Discrete_MCRegisterSet) *
4932 pi->dpm_table.mclk_table.count,
4933 pi->sram_end);
4934}
4935
4936static void ci_enable_voltage_control(struct amdgpu_device *adev)
4937{
4938 u32 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
4939
4940 tmp |= GENERAL_PWRMGT__VOLT_PWRMGT_EN_MASK;
4941 WREG32_SMC(ixGENERAL_PWRMGT, tmp);
4942}
4943
4944static enum amdgpu_pcie_gen ci_get_maximum_link_speed(struct amdgpu_device *adev,
4945 struct amdgpu_ps *amdgpu_state)
4946{
4947 struct ci_ps *state = ci_get_ps(amdgpu_state);
4948 int i;
4949 u16 pcie_speed, max_speed = 0;
4950
4951 for (i = 0; i < state->performance_level_count; i++) {
4952 pcie_speed = state->performance_levels[i].pcie_gen;
4953 if (max_speed < pcie_speed)
4954 max_speed = pcie_speed;
4955 }
4956
4957 return max_speed;
4958}
4959
4960static u16 ci_get_current_pcie_speed(struct amdgpu_device *adev)
4961{
4962 u32 speed_cntl = 0;
4963
4964 speed_cntl = RREG32_PCIE(ixPCIE_LC_SPEED_CNTL) &
4965 PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK;
4966 speed_cntl >>= PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
4967
4968 return (u16)speed_cntl;
4969}
4970
4971static int ci_get_current_pcie_lane_number(struct amdgpu_device *adev)
4972{
4973 u32 link_width = 0;
4974
4975 link_width = RREG32_PCIE(ixPCIE_LC_LINK_WIDTH_CNTL) &
4976 PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK;
4977 link_width >>= PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT;
4978
4979 switch (link_width) {
4980 case 1:
4981 return 1;
4982 case 2:
4983 return 2;
4984 case 3:
4985 return 4;
4986 case 4:
4987 return 8;
4988 case 0:
4989 case 6:
4990 default:
4991 return 16;
4992 }
4993}
4994
4995static void ci_request_link_speed_change_before_state_change(struct amdgpu_device *adev,
4996 struct amdgpu_ps *amdgpu_new_state,
4997 struct amdgpu_ps *amdgpu_current_state)
4998{
4999 struct ci_power_info *pi = ci_get_pi(adev);
5000 enum amdgpu_pcie_gen target_link_speed =
5001 ci_get_maximum_link_speed(adev, amdgpu_new_state);
5002 enum amdgpu_pcie_gen current_link_speed;
5003
5004 if (pi->force_pcie_gen == AMDGPU_PCIE_GEN_INVALID)
5005 current_link_speed = ci_get_maximum_link_speed(adev, amdgpu_current_state);
5006 else
5007 current_link_speed = pi->force_pcie_gen;
5008
5009 pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID;
5010 pi->pspp_notify_required = false;
5011 if (target_link_speed > current_link_speed) {
5012 switch (target_link_speed) {
5013#ifdef CONFIG_ACPI
5014 case AMDGPU_PCIE_GEN3:
5015 if (amdgpu_acpi_pcie_performance_request(adev, PCIE_PERF_REQ_PECI_GEN3, false) == 0)
5016 break;
5017 pi->force_pcie_gen = AMDGPU_PCIE_GEN2;
5018 if (current_link_speed == AMDGPU_PCIE_GEN2)
5019 break;
5020 case AMDGPU_PCIE_GEN2:
5021 if (amdgpu_acpi_pcie_performance_request(adev, PCIE_PERF_REQ_PECI_GEN2, false) == 0)
5022 break;
5023#endif
5024 default:
5025 pi->force_pcie_gen = ci_get_current_pcie_speed(adev);
5026 break;
5027 }
5028 } else {
5029 if (target_link_speed < current_link_speed)
5030 pi->pspp_notify_required = true;
5031 }
5032}
5033
5034static void ci_notify_link_speed_change_after_state_change(struct amdgpu_device *adev,
5035 struct amdgpu_ps *amdgpu_new_state,
5036 struct amdgpu_ps *amdgpu_current_state)
5037{
5038 struct ci_power_info *pi = ci_get_pi(adev);
5039 enum amdgpu_pcie_gen target_link_speed =
5040 ci_get_maximum_link_speed(adev, amdgpu_new_state);
5041 u8 request;
5042
5043 if (pi->pspp_notify_required) {
5044 if (target_link_speed == AMDGPU_PCIE_GEN3)
5045 request = PCIE_PERF_REQ_PECI_GEN3;
5046 else if (target_link_speed == AMDGPU_PCIE_GEN2)
5047 request = PCIE_PERF_REQ_PECI_GEN2;
5048 else
5049 request = PCIE_PERF_REQ_PECI_GEN1;
5050
5051 if ((request == PCIE_PERF_REQ_PECI_GEN1) &&
5052 (ci_get_current_pcie_speed(adev) > 0))
5053 return;
5054
5055#ifdef CONFIG_ACPI
5056 amdgpu_acpi_pcie_performance_request(adev, request, false);
5057#endif
5058 }
5059}
5060
5061static int ci_set_private_data_variables_based_on_pptable(struct amdgpu_device *adev)
5062{
5063 struct ci_power_info *pi = ci_get_pi(adev);
5064 struct amdgpu_clock_voltage_dependency_table *allowed_sclk_vddc_table =
5065 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
5066 struct amdgpu_clock_voltage_dependency_table *allowed_mclk_vddc_table =
5067 &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk;
5068 struct amdgpu_clock_voltage_dependency_table *allowed_mclk_vddci_table =
5069 &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk;
5070
5071 if (allowed_sclk_vddc_table == NULL)
5072 return -EINVAL;
5073 if (allowed_sclk_vddc_table->count < 1)
5074 return -EINVAL;
5075 if (allowed_mclk_vddc_table == NULL)
5076 return -EINVAL;
5077 if (allowed_mclk_vddc_table->count < 1)
5078 return -EINVAL;
5079 if (allowed_mclk_vddci_table == NULL)
5080 return -EINVAL;
5081 if (allowed_mclk_vddci_table->count < 1)
5082 return -EINVAL;
5083
5084 pi->min_vddc_in_pp_table = allowed_sclk_vddc_table->entries[0].v;
5085 pi->max_vddc_in_pp_table =
5086 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
5087
5088 pi->min_vddci_in_pp_table = allowed_mclk_vddci_table->entries[0].v;
5089 pi->max_vddci_in_pp_table =
5090 allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
5091
5092 adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk =
5093 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
5094 adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk =
5095 allowed_mclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
5096 adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddc =
5097 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
5098 adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci =
5099 allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
5100
5101 return 0;
5102}
5103
5104static void ci_patch_with_vddc_leakage(struct amdgpu_device *adev, u16 *vddc)
5105{
5106 struct ci_power_info *pi = ci_get_pi(adev);
5107 struct ci_leakage_voltage *leakage_table = &pi->vddc_leakage;
5108 u32 leakage_index;
5109
5110 for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) {
5111 if (leakage_table->leakage_id[leakage_index] == *vddc) {
5112 *vddc = leakage_table->actual_voltage[leakage_index];
5113 break;
5114 }
5115 }
5116}
5117
5118static void ci_patch_with_vddci_leakage(struct amdgpu_device *adev, u16 *vddci)
5119{
5120 struct ci_power_info *pi = ci_get_pi(adev);
5121 struct ci_leakage_voltage *leakage_table = &pi->vddci_leakage;
5122 u32 leakage_index;
5123
5124 for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) {
5125 if (leakage_table->leakage_id[leakage_index] == *vddci) {
5126 *vddci = leakage_table->actual_voltage[leakage_index];
5127 break;
5128 }
5129 }
5130}
5131
5132static void ci_patch_clock_voltage_dependency_table_with_vddc_leakage(struct amdgpu_device *adev,
5133 struct amdgpu_clock_voltage_dependency_table *table)
5134{
5135 u32 i;
5136
5137 if (table) {
5138 for (i = 0; i < table->count; i++)
5139 ci_patch_with_vddc_leakage(adev, &table->entries[i].v);
5140 }
5141}
5142
5143static void ci_patch_clock_voltage_dependency_table_with_vddci_leakage(struct amdgpu_device *adev,
5144 struct amdgpu_clock_voltage_dependency_table *table)
5145{
5146 u32 i;
5147
5148 if (table) {
5149 for (i = 0; i < table->count; i++)
5150 ci_patch_with_vddci_leakage(adev, &table->entries[i].v);
5151 }
5152}
5153
5154static void ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(struct amdgpu_device *adev,
5155 struct amdgpu_vce_clock_voltage_dependency_table *table)
5156{
5157 u32 i;
5158
5159 if (table) {
5160 for (i = 0; i < table->count; i++)
5161 ci_patch_with_vddc_leakage(adev, &table->entries[i].v);
5162 }
5163}
5164
5165static void ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(struct amdgpu_device *adev,
5166 struct amdgpu_uvd_clock_voltage_dependency_table *table)
5167{
5168 u32 i;
5169
5170 if (table) {
5171 for (i = 0; i < table->count; i++)
5172 ci_patch_with_vddc_leakage(adev, &table->entries[i].v);
5173 }
5174}
5175
5176static void ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(struct amdgpu_device *adev,
5177 struct amdgpu_phase_shedding_limits_table *table)
5178{
5179 u32 i;
5180
5181 if (table) {
5182 for (i = 0; i < table->count; i++)
5183 ci_patch_with_vddc_leakage(adev, &table->entries[i].voltage);
5184 }
5185}
5186
5187static void ci_patch_clock_voltage_limits_with_vddc_leakage(struct amdgpu_device *adev,
5188 struct amdgpu_clock_and_voltage_limits *table)
5189{
5190 if (table) {
5191 ci_patch_with_vddc_leakage(adev, (u16 *)&table->vddc);
5192 ci_patch_with_vddci_leakage(adev, (u16 *)&table->vddci);
5193 }
5194}
5195
5196static void ci_patch_cac_leakage_table_with_vddc_leakage(struct amdgpu_device *adev,
5197 struct amdgpu_cac_leakage_table *table)
5198{
5199 u32 i;
5200
5201 if (table) {
5202 for (i = 0; i < table->count; i++)
5203 ci_patch_with_vddc_leakage(adev, &table->entries[i].vddc);
5204 }
5205}
5206
5207static void ci_patch_dependency_tables_with_leakage(struct amdgpu_device *adev)
5208{
5209
5210 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev,
5211 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk);
5212 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev,
5213 &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk);
5214 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev,
5215 &adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk);
5216 ci_patch_clock_voltage_dependency_table_with_vddci_leakage(adev,
5217 &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk);
5218 ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(adev,
5219 &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table);
5220 ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(adev,
5221 &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table);
5222 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev,
5223 &adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table);
5224 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev,
5225 &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table);
5226 ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(adev,
5227 &adev->pm.dpm.dyn_state.phase_shedding_limits_table);
5228 ci_patch_clock_voltage_limits_with_vddc_leakage(adev,
5229 &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac);
5230 ci_patch_clock_voltage_limits_with_vddc_leakage(adev,
5231 &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc);
5232 ci_patch_cac_leakage_table_with_vddc_leakage(adev,
5233 &adev->pm.dpm.dyn_state.cac_leakage_table);
5234
5235}
5236
5237static void ci_update_current_ps(struct amdgpu_device *adev,
5238 struct amdgpu_ps *rps)
5239{
5240 struct ci_ps *new_ps = ci_get_ps(rps);
5241 struct ci_power_info *pi = ci_get_pi(adev);
5242
5243 pi->current_rps = *rps;
5244 pi->current_ps = *new_ps;
5245 pi->current_rps.ps_priv = &pi->current_ps;
Rex Zhu8c8e2c32016-10-14 19:29:02 +08005246 adev->pm.dpm.current_ps = &pi->current_rps;
Alex Deuchera2e73f52015-04-20 17:09:27 -04005247}
5248
5249static void ci_update_requested_ps(struct amdgpu_device *adev,
5250 struct amdgpu_ps *rps)
5251{
5252 struct ci_ps *new_ps = ci_get_ps(rps);
5253 struct ci_power_info *pi = ci_get_pi(adev);
5254
5255 pi->requested_rps = *rps;
5256 pi->requested_ps = *new_ps;
5257 pi->requested_rps.ps_priv = &pi->requested_ps;
Rex Zhu8c8e2c32016-10-14 19:29:02 +08005258 adev->pm.dpm.requested_ps = &pi->requested_rps;
Alex Deuchera2e73f52015-04-20 17:09:27 -04005259}
5260
Rex Zhucfa289f2017-09-06 15:27:59 +08005261static int ci_dpm_pre_set_power_state(void *handle)
Alex Deuchera2e73f52015-04-20 17:09:27 -04005262{
Rex Zhucfa289f2017-09-06 15:27:59 +08005263 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
Alex Deuchera2e73f52015-04-20 17:09:27 -04005264 struct ci_power_info *pi = ci_get_pi(adev);
5265 struct amdgpu_ps requested_ps = *adev->pm.dpm.requested_ps;
5266 struct amdgpu_ps *new_ps = &requested_ps;
5267
5268 ci_update_requested_ps(adev, new_ps);
5269
5270 ci_apply_state_adjust_rules(adev, &pi->requested_rps);
5271
5272 return 0;
5273}
5274
Rex Zhucfa289f2017-09-06 15:27:59 +08005275static void ci_dpm_post_set_power_state(void *handle)
Alex Deuchera2e73f52015-04-20 17:09:27 -04005276{
Rex Zhucfa289f2017-09-06 15:27:59 +08005277 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
Alex Deuchera2e73f52015-04-20 17:09:27 -04005278 struct ci_power_info *pi = ci_get_pi(adev);
5279 struct amdgpu_ps *new_ps = &pi->requested_rps;
5280
5281 ci_update_current_ps(adev, new_ps);
5282}
5283
5284
5285static void ci_dpm_setup_asic(struct amdgpu_device *adev)
5286{
5287 ci_read_clock_registers(adev);
5288 ci_enable_acpi_power_management(adev);
5289 ci_init_sclk_t(adev);
5290}
5291
5292static int ci_dpm_enable(struct amdgpu_device *adev)
5293{
5294 struct ci_power_info *pi = ci_get_pi(adev);
5295 struct amdgpu_ps *boot_ps = adev->pm.dpm.boot_ps;
5296 int ret;
5297
Alex Deuchera2e73f52015-04-20 17:09:27 -04005298 if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
5299 ci_enable_voltage_control(adev);
5300 ret = ci_construct_voltage_tables(adev);
5301 if (ret) {
5302 DRM_ERROR("ci_construct_voltage_tables failed\n");
5303 return ret;
5304 }
5305 }
5306 if (pi->caps_dynamic_ac_timing) {
5307 ret = ci_initialize_mc_reg_table(adev);
5308 if (ret)
5309 pi->caps_dynamic_ac_timing = false;
5310 }
5311 if (pi->dynamic_ss)
5312 ci_enable_spread_spectrum(adev, true);
5313 if (pi->thermal_protection)
5314 ci_enable_thermal_protection(adev, true);
5315 ci_program_sstp(adev);
5316 ci_enable_display_gap(adev);
5317 ci_program_vc(adev);
5318 ret = ci_upload_firmware(adev);
5319 if (ret) {
5320 DRM_ERROR("ci_upload_firmware failed\n");
5321 return ret;
5322 }
5323 ret = ci_process_firmware_header(adev);
5324 if (ret) {
5325 DRM_ERROR("ci_process_firmware_header failed\n");
5326 return ret;
5327 }
5328 ret = ci_initial_switch_from_arb_f0_to_f1(adev);
5329 if (ret) {
5330 DRM_ERROR("ci_initial_switch_from_arb_f0_to_f1 failed\n");
5331 return ret;
5332 }
5333 ret = ci_init_smc_table(adev);
5334 if (ret) {
5335 DRM_ERROR("ci_init_smc_table failed\n");
5336 return ret;
5337 }
5338 ret = ci_init_arb_table_index(adev);
5339 if (ret) {
5340 DRM_ERROR("ci_init_arb_table_index failed\n");
5341 return ret;
5342 }
5343 if (pi->caps_dynamic_ac_timing) {
5344 ret = ci_populate_initial_mc_reg_table(adev);
5345 if (ret) {
5346 DRM_ERROR("ci_populate_initial_mc_reg_table failed\n");
5347 return ret;
5348 }
5349 }
5350 ret = ci_populate_pm_base(adev);
5351 if (ret) {
5352 DRM_ERROR("ci_populate_pm_base failed\n");
5353 return ret;
5354 }
5355 ci_dpm_start_smc(adev);
5356 ci_enable_vr_hot_gpio_interrupt(adev);
5357 ret = ci_notify_smc_display_change(adev, false);
5358 if (ret) {
5359 DRM_ERROR("ci_notify_smc_display_change failed\n");
5360 return ret;
5361 }
5362 ci_enable_sclk_control(adev, true);
5363 ret = ci_enable_ulv(adev, true);
5364 if (ret) {
5365 DRM_ERROR("ci_enable_ulv failed\n");
5366 return ret;
5367 }
5368 ret = ci_enable_ds_master_switch(adev, true);
5369 if (ret) {
5370 DRM_ERROR("ci_enable_ds_master_switch failed\n");
5371 return ret;
5372 }
5373 ret = ci_start_dpm(adev);
5374 if (ret) {
5375 DRM_ERROR("ci_start_dpm failed\n");
5376 return ret;
5377 }
5378 ret = ci_enable_didt(adev, true);
5379 if (ret) {
5380 DRM_ERROR("ci_enable_didt failed\n");
5381 return ret;
5382 }
5383 ret = ci_enable_smc_cac(adev, true);
5384 if (ret) {
5385 DRM_ERROR("ci_enable_smc_cac failed\n");
5386 return ret;
5387 }
5388 ret = ci_enable_power_containment(adev, true);
5389 if (ret) {
5390 DRM_ERROR("ci_enable_power_containment failed\n");
5391 return ret;
5392 }
5393
5394 ret = ci_power_control_set_level(adev);
5395 if (ret) {
5396 DRM_ERROR("ci_power_control_set_level failed\n");
5397 return ret;
5398 }
5399
5400 ci_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
5401
5402 ret = ci_enable_thermal_based_sclk_dpm(adev, true);
5403 if (ret) {
5404 DRM_ERROR("ci_enable_thermal_based_sclk_dpm failed\n");
5405 return ret;
5406 }
5407
5408 ci_thermal_start_thermal_controller(adev);
5409
5410 ci_update_current_ps(adev, boot_ps);
5411
Alex Deuchera2e73f52015-04-20 17:09:27 -04005412 return 0;
5413}
5414
5415static void ci_dpm_disable(struct amdgpu_device *adev)
5416{
5417 struct ci_power_info *pi = ci_get_pi(adev);
5418 struct amdgpu_ps *boot_ps = adev->pm.dpm.boot_ps;
5419
5420 amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
5421 AMDGPU_THERMAL_IRQ_LOW_TO_HIGH);
5422 amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
5423 AMDGPU_THERMAL_IRQ_HIGH_TO_LOW);
5424
Rex Zhuc08770e2016-08-24 19:39:06 +08005425 ci_dpm_powergate_uvd(adev, true);
Alex Deuchera2e73f52015-04-20 17:09:27 -04005426
5427 if (!amdgpu_ci_is_smc_running(adev))
5428 return;
5429
5430 ci_thermal_stop_thermal_controller(adev);
5431
5432 if (pi->thermal_protection)
5433 ci_enable_thermal_protection(adev, false);
5434 ci_enable_power_containment(adev, false);
5435 ci_enable_smc_cac(adev, false);
5436 ci_enable_didt(adev, false);
5437 ci_enable_spread_spectrum(adev, false);
5438 ci_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, false);
5439 ci_stop_dpm(adev);
5440 ci_enable_ds_master_switch(adev, false);
5441 ci_enable_ulv(adev, false);
5442 ci_clear_vc(adev);
5443 ci_reset_to_default(adev);
5444 ci_dpm_stop_smc(adev);
5445 ci_force_switch_to_arb_f0(adev);
5446 ci_enable_thermal_based_sclk_dpm(adev, false);
5447
5448 ci_update_current_ps(adev, boot_ps);
5449}
5450
Rex Zhucfa289f2017-09-06 15:27:59 +08005451static int ci_dpm_set_power_state(void *handle)
Alex Deuchera2e73f52015-04-20 17:09:27 -04005452{
Rex Zhucfa289f2017-09-06 15:27:59 +08005453 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
Alex Deuchera2e73f52015-04-20 17:09:27 -04005454 struct ci_power_info *pi = ci_get_pi(adev);
5455 struct amdgpu_ps *new_ps = &pi->requested_rps;
5456 struct amdgpu_ps *old_ps = &pi->current_rps;
5457 int ret;
5458
5459 ci_find_dpm_states_clocks_in_dpm_table(adev, new_ps);
5460 if (pi->pcie_performance_request)
5461 ci_request_link_speed_change_before_state_change(adev, new_ps, old_ps);
5462 ret = ci_freeze_sclk_mclk_dpm(adev);
5463 if (ret) {
5464 DRM_ERROR("ci_freeze_sclk_mclk_dpm failed\n");
5465 return ret;
5466 }
5467 ret = ci_populate_and_upload_sclk_mclk_dpm_levels(adev, new_ps);
5468 if (ret) {
5469 DRM_ERROR("ci_populate_and_upload_sclk_mclk_dpm_levels failed\n");
5470 return ret;
5471 }
5472 ret = ci_generate_dpm_level_enable_mask(adev, new_ps);
5473 if (ret) {
5474 DRM_ERROR("ci_generate_dpm_level_enable_mask failed\n");
5475 return ret;
5476 }
5477
5478 ret = ci_update_vce_dpm(adev, new_ps, old_ps);
5479 if (ret) {
5480 DRM_ERROR("ci_update_vce_dpm failed\n");
5481 return ret;
5482 }
5483
5484 ret = ci_update_sclk_t(adev);
5485 if (ret) {
5486 DRM_ERROR("ci_update_sclk_t failed\n");
5487 return ret;
5488 }
5489 if (pi->caps_dynamic_ac_timing) {
5490 ret = ci_update_and_upload_mc_reg_table(adev);
5491 if (ret) {
5492 DRM_ERROR("ci_update_and_upload_mc_reg_table failed\n");
5493 return ret;
5494 }
5495 }
5496 ret = ci_program_memory_timing_parameters(adev);
5497 if (ret) {
5498 DRM_ERROR("ci_program_memory_timing_parameters failed\n");
5499 return ret;
5500 }
5501 ret = ci_unfreeze_sclk_mclk_dpm(adev);
5502 if (ret) {
5503 DRM_ERROR("ci_unfreeze_sclk_mclk_dpm failed\n");
5504 return ret;
5505 }
5506 ret = ci_upload_dpm_level_enable_mask(adev);
5507 if (ret) {
5508 DRM_ERROR("ci_upload_dpm_level_enable_mask failed\n");
5509 return ret;
5510 }
5511 if (pi->pcie_performance_request)
5512 ci_notify_link_speed_change_after_state_change(adev, new_ps, old_ps);
5513
5514 return 0;
5515}
5516
5517#if 0
5518static void ci_dpm_reset_asic(struct amdgpu_device *adev)
5519{
5520 ci_set_boot_state(adev);
5521}
5522#endif
5523
Rex Zhucfa289f2017-09-06 15:27:59 +08005524static void ci_dpm_display_configuration_changed(void *handle)
Alex Deuchera2e73f52015-04-20 17:09:27 -04005525{
Rex Zhucfa289f2017-09-06 15:27:59 +08005526 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5527
Alex Deuchera2e73f52015-04-20 17:09:27 -04005528 ci_program_display_gap(adev);
5529}
5530
5531union power_info {
5532 struct _ATOM_POWERPLAY_INFO info;
5533 struct _ATOM_POWERPLAY_INFO_V2 info_2;
5534 struct _ATOM_POWERPLAY_INFO_V3 info_3;
5535 struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
5536 struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
5537 struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
5538};
5539
5540union pplib_clock_info {
5541 struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
5542 struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
5543 struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
5544 struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
5545 struct _ATOM_PPLIB_SI_CLOCK_INFO si;
5546 struct _ATOM_PPLIB_CI_CLOCK_INFO ci;
5547};
5548
5549union pplib_power_state {
5550 struct _ATOM_PPLIB_STATE v1;
5551 struct _ATOM_PPLIB_STATE_V2 v2;
5552};
5553
5554static void ci_parse_pplib_non_clock_info(struct amdgpu_device *adev,
5555 struct amdgpu_ps *rps,
5556 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,
5557 u8 table_rev)
5558{
5559 rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
5560 rps->class = le16_to_cpu(non_clock_info->usClassification);
5561 rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
5562
5563 if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
5564 rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
5565 rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
5566 } else {
5567 rps->vclk = 0;
5568 rps->dclk = 0;
5569 }
5570
5571 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT)
5572 adev->pm.dpm.boot_ps = rps;
5573 if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
5574 adev->pm.dpm.uvd_ps = rps;
5575}
5576
5577static void ci_parse_pplib_clock_info(struct amdgpu_device *adev,
5578 struct amdgpu_ps *rps, int index,
5579 union pplib_clock_info *clock_info)
5580{
5581 struct ci_power_info *pi = ci_get_pi(adev);
5582 struct ci_ps *ps = ci_get_ps(rps);
5583 struct ci_pl *pl = &ps->performance_levels[index];
5584
5585 ps->performance_level_count = index + 1;
5586
5587 pl->sclk = le16_to_cpu(clock_info->ci.usEngineClockLow);
5588 pl->sclk |= clock_info->ci.ucEngineClockHigh << 16;
5589 pl->mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow);
5590 pl->mclk |= clock_info->ci.ucMemoryClockHigh << 16;
5591
5592 pl->pcie_gen = amdgpu_get_pcie_gen_support(adev,
5593 pi->sys_pcie_mask,
5594 pi->vbios_boot_state.pcie_gen_bootup_value,
5595 clock_info->ci.ucPCIEGen);
5596 pl->pcie_lane = amdgpu_get_pcie_lane_support(adev,
5597 pi->vbios_boot_state.pcie_lane_bootup_value,
5598 le16_to_cpu(clock_info->ci.usPCIELane));
5599
5600 if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) {
5601 pi->acpi_pcie_gen = pl->pcie_gen;
5602 }
5603
5604 if (rps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) {
5605 pi->ulv.supported = true;
5606 pi->ulv.pl = *pl;
5607 pi->ulv.cg_ulv_parameter = CISLANDS_CGULVPARAMETER_DFLT;
5608 }
5609
5610 /* patch up boot state */
5611 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
5612 pl->mclk = pi->vbios_boot_state.mclk_bootup_value;
5613 pl->sclk = pi->vbios_boot_state.sclk_bootup_value;
5614 pl->pcie_gen = pi->vbios_boot_state.pcie_gen_bootup_value;
5615 pl->pcie_lane = pi->vbios_boot_state.pcie_lane_bootup_value;
5616 }
5617
5618 switch (rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
5619 case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
5620 pi->use_pcie_powersaving_levels = true;
5621 if (pi->pcie_gen_powersaving.max < pl->pcie_gen)
5622 pi->pcie_gen_powersaving.max = pl->pcie_gen;
5623 if (pi->pcie_gen_powersaving.min > pl->pcie_gen)
5624 pi->pcie_gen_powersaving.min = pl->pcie_gen;
5625 if (pi->pcie_lane_powersaving.max < pl->pcie_lane)
5626 pi->pcie_lane_powersaving.max = pl->pcie_lane;
5627 if (pi->pcie_lane_powersaving.min > pl->pcie_lane)
5628 pi->pcie_lane_powersaving.min = pl->pcie_lane;
5629 break;
5630 case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
5631 pi->use_pcie_performance_levels = true;
5632 if (pi->pcie_gen_performance.max < pl->pcie_gen)
5633 pi->pcie_gen_performance.max = pl->pcie_gen;
5634 if (pi->pcie_gen_performance.min > pl->pcie_gen)
5635 pi->pcie_gen_performance.min = pl->pcie_gen;
5636 if (pi->pcie_lane_performance.max < pl->pcie_lane)
5637 pi->pcie_lane_performance.max = pl->pcie_lane;
5638 if (pi->pcie_lane_performance.min > pl->pcie_lane)
5639 pi->pcie_lane_performance.min = pl->pcie_lane;
5640 break;
5641 default:
5642 break;
5643 }
5644}
5645
5646static int ci_parse_power_table(struct amdgpu_device *adev)
5647{
5648 struct amdgpu_mode_info *mode_info = &adev->mode_info;
5649 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
5650 union pplib_power_state *power_state;
5651 int i, j, k, non_clock_array_index, clock_array_index;
5652 union pplib_clock_info *clock_info;
5653 struct _StateArray *state_array;
5654 struct _ClockInfoArray *clock_info_array;
5655 struct _NonClockInfoArray *non_clock_info_array;
5656 union power_info *power_info;
5657 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
5658 u16 data_offset;
5659 u8 frev, crev;
5660 u8 *power_state_offset;
5661 struct ci_ps *ps;
5662
5663 if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
5664 &frev, &crev, &data_offset))
5665 return -EINVAL;
5666 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
5667
5668 amdgpu_add_thermal_controller(adev);
5669
5670 state_array = (struct _StateArray *)
5671 (mode_info->atom_context->bios + data_offset +
5672 le16_to_cpu(power_info->pplib.usStateArrayOffset));
5673 clock_info_array = (struct _ClockInfoArray *)
5674 (mode_info->atom_context->bios + data_offset +
5675 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
5676 non_clock_info_array = (struct _NonClockInfoArray *)
5677 (mode_info->atom_context->bios + data_offset +
5678 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
5679
5680 adev->pm.dpm.ps = kzalloc(sizeof(struct amdgpu_ps) *
5681 state_array->ucNumEntries, GFP_KERNEL);
5682 if (!adev->pm.dpm.ps)
5683 return -ENOMEM;
5684 power_state_offset = (u8 *)state_array->states;
5685 for (i = 0; i < state_array->ucNumEntries; i++) {
5686 u8 *idx;
5687 power_state = (union pplib_power_state *)power_state_offset;
5688 non_clock_array_index = power_state->v2.nonClockInfoIndex;
5689 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
5690 &non_clock_info_array->nonClockInfo[non_clock_array_index];
5691 ps = kzalloc(sizeof(struct ci_ps), GFP_KERNEL);
5692 if (ps == NULL) {
5693 kfree(adev->pm.dpm.ps);
5694 return -ENOMEM;
5695 }
5696 adev->pm.dpm.ps[i].ps_priv = ps;
5697 ci_parse_pplib_non_clock_info(adev, &adev->pm.dpm.ps[i],
5698 non_clock_info,
5699 non_clock_info_array->ucEntrySize);
5700 k = 0;
5701 idx = (u8 *)&power_state->v2.clockInfoIndex[0];
5702 for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
5703 clock_array_index = idx[j];
5704 if (clock_array_index >= clock_info_array->ucNumEntries)
5705 continue;
5706 if (k >= CISLANDS_MAX_HARDWARE_POWERLEVELS)
5707 break;
5708 clock_info = (union pplib_clock_info *)
5709 ((u8 *)&clock_info_array->clockInfo[0] +
5710 (clock_array_index * clock_info_array->ucEntrySize));
5711 ci_parse_pplib_clock_info(adev,
5712 &adev->pm.dpm.ps[i], k,
5713 clock_info);
5714 k++;
5715 }
5716 power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
5717 }
5718 adev->pm.dpm.num_ps = state_array->ucNumEntries;
5719
5720 /* fill in the vce power states */
Rex Zhu66ba1af2016-10-12 15:38:56 +08005721 for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) {
Alex Deuchera2e73f52015-04-20 17:09:27 -04005722 u32 sclk, mclk;
5723 clock_array_index = adev->pm.dpm.vce_states[i].clk_idx;
5724 clock_info = (union pplib_clock_info *)
5725 &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize];
5726 sclk = le16_to_cpu(clock_info->ci.usEngineClockLow);
5727 sclk |= clock_info->ci.ucEngineClockHigh << 16;
5728 mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow);
5729 mclk |= clock_info->ci.ucMemoryClockHigh << 16;
5730 adev->pm.dpm.vce_states[i].sclk = sclk;
5731 adev->pm.dpm.vce_states[i].mclk = mclk;
5732 }
5733
5734 return 0;
5735}
5736
5737static int ci_get_vbios_boot_values(struct amdgpu_device *adev,
5738 struct ci_vbios_boot_state *boot_state)
5739{
5740 struct amdgpu_mode_info *mode_info = &adev->mode_info;
5741 int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
5742 ATOM_FIRMWARE_INFO_V2_2 *firmware_info;
5743 u8 frev, crev;
5744 u16 data_offset;
5745
5746 if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
5747 &frev, &crev, &data_offset)) {
5748 firmware_info =
5749 (ATOM_FIRMWARE_INFO_V2_2 *)(mode_info->atom_context->bios +
5750 data_offset);
5751 boot_state->mvdd_bootup_value = le16_to_cpu(firmware_info->usBootUpMVDDCVoltage);
5752 boot_state->vddc_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCVoltage);
5753 boot_state->vddci_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCIVoltage);
5754 boot_state->pcie_gen_bootup_value = ci_get_current_pcie_speed(adev);
5755 boot_state->pcie_lane_bootup_value = ci_get_current_pcie_lane_number(adev);
5756 boot_state->sclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultEngineClock);
5757 boot_state->mclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultMemoryClock);
5758
5759 return 0;
5760 }
5761 return -EINVAL;
5762}
5763
5764static void ci_dpm_fini(struct amdgpu_device *adev)
5765{
5766 int i;
5767
5768 for (i = 0; i < adev->pm.dpm.num_ps; i++) {
5769 kfree(adev->pm.dpm.ps[i].ps_priv);
5770 }
5771 kfree(adev->pm.dpm.ps);
5772 kfree(adev->pm.dpm.priv);
5773 kfree(adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries);
5774 amdgpu_free_extended_power_table(adev);
5775}
5776
5777/**
5778 * ci_dpm_init_microcode - load ucode images from disk
5779 *
5780 * @adev: amdgpu_device pointer
5781 *
5782 * Use the firmware interface to load the ucode images into
5783 * the driver (not loaded into hw).
5784 * Returns 0 on success, error on failure.
5785 */
5786static int ci_dpm_init_microcode(struct amdgpu_device *adev)
5787{
5788 const char *chip_name;
5789 char fw_name[30];
5790 int err;
5791
5792 DRM_DEBUG("\n");
5793
5794 switch (adev->asic_type) {
5795 case CHIP_BONAIRE:
Alex Deucher2254c212015-12-10 00:49:32 -05005796 if ((adev->pdev->revision == 0x80) ||
5797 (adev->pdev->revision == 0x81) ||
5798 (adev->pdev->device == 0x665f))
5799 chip_name = "bonaire_k";
5800 else
5801 chip_name = "bonaire";
Alex Deuchera2e73f52015-04-20 17:09:27 -04005802 break;
5803 case CHIP_HAWAII:
Alex Deucher2254c212015-12-10 00:49:32 -05005804 if (adev->pdev->revision == 0x80)
5805 chip_name = "hawaii_k";
5806 else
5807 chip_name = "hawaii";
Alex Deuchera2e73f52015-04-20 17:09:27 -04005808 break;
5809 case CHIP_KAVERI:
5810 case CHIP_KABINI:
Alex Deucherb9a8be92016-07-29 18:14:39 -04005811 case CHIP_MULLINS:
Alex Deuchera2e73f52015-04-20 17:09:27 -04005812 default: BUG();
5813 }
5814
5815 snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
5816 err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
5817 if (err)
5818 goto out;
5819 err = amdgpu_ucode_validate(adev->pm.fw);
5820
5821out:
5822 if (err) {
Joe Perches7ca85292017-02-28 04:55:52 -08005823 pr_err("cik_smc: Failed to load firmware \"%s\"\n", fw_name);
Alex Deuchera2e73f52015-04-20 17:09:27 -04005824 release_firmware(adev->pm.fw);
5825 adev->pm.fw = NULL;
5826 }
5827 return err;
5828}
5829
5830static int ci_dpm_init(struct amdgpu_device *adev)
5831{
5832 int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
5833 SMU7_Discrete_DpmTable *dpm_table;
5834 struct amdgpu_gpio_rec gpio;
5835 u16 data_offset, size;
5836 u8 frev, crev;
5837 struct ci_power_info *pi;
5838 int ret;
Alex Deuchera2e73f52015-04-20 17:09:27 -04005839
Alex Deuchera2e73f52015-04-20 17:09:27 -04005840 pi = kzalloc(sizeof(struct ci_power_info), GFP_KERNEL);
5841 if (pi == NULL)
5842 return -ENOMEM;
5843 adev->pm.dpm.priv = pi;
5844
Alex Deucher50171eb2016-02-04 10:44:04 -05005845 pi->sys_pcie_mask =
5846 (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_MASK) >>
5847 CAIL_PCIE_LINK_SPEED_SUPPORT_SHIFT;
5848
Alex Deuchera2e73f52015-04-20 17:09:27 -04005849 pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID;
5850
5851 pi->pcie_gen_performance.max = AMDGPU_PCIE_GEN1;
5852 pi->pcie_gen_performance.min = AMDGPU_PCIE_GEN3;
5853 pi->pcie_gen_powersaving.max = AMDGPU_PCIE_GEN1;
5854 pi->pcie_gen_powersaving.min = AMDGPU_PCIE_GEN3;
5855
5856 pi->pcie_lane_performance.max = 0;
5857 pi->pcie_lane_performance.min = 16;
5858 pi->pcie_lane_powersaving.max = 0;
5859 pi->pcie_lane_powersaving.min = 16;
5860
5861 ret = ci_get_vbios_boot_values(adev, &pi->vbios_boot_state);
5862 if (ret) {
5863 ci_dpm_fini(adev);
5864 return ret;
5865 }
5866
5867 ret = amdgpu_get_platform_caps(adev);
5868 if (ret) {
5869 ci_dpm_fini(adev);
5870 return ret;
5871 }
5872
5873 ret = amdgpu_parse_extended_power_table(adev);
5874 if (ret) {
5875 ci_dpm_fini(adev);
5876 return ret;
5877 }
5878
5879 ret = ci_parse_power_table(adev);
5880 if (ret) {
5881 ci_dpm_fini(adev);
5882 return ret;
5883 }
5884
5885 pi->dll_default_on = false;
5886 pi->sram_end = SMC_RAM_END;
5887
5888 pi->activity_target[0] = CISLAND_TARGETACTIVITY_DFLT;
5889 pi->activity_target[1] = CISLAND_TARGETACTIVITY_DFLT;
5890 pi->activity_target[2] = CISLAND_TARGETACTIVITY_DFLT;
5891 pi->activity_target[3] = CISLAND_TARGETACTIVITY_DFLT;
5892 pi->activity_target[4] = CISLAND_TARGETACTIVITY_DFLT;
5893 pi->activity_target[5] = CISLAND_TARGETACTIVITY_DFLT;
5894 pi->activity_target[6] = CISLAND_TARGETACTIVITY_DFLT;
5895 pi->activity_target[7] = CISLAND_TARGETACTIVITY_DFLT;
5896
5897 pi->mclk_activity_target = CISLAND_MCLK_TARGETACTIVITY_DFLT;
5898
5899 pi->sclk_dpm_key_disabled = 0;
5900 pi->mclk_dpm_key_disabled = 0;
5901 pi->pcie_dpm_key_disabled = 0;
5902 pi->thermal_sclk_dpm_enabled = 0;
5903
Rex Zhu801caaf2016-11-02 13:35:15 +08005904 if (amdgpu_pp_feature_mask & SCLK_DEEP_SLEEP_MASK)
Rex Zhu66bc3f72016-07-28 17:36:35 +08005905 pi->caps_sclk_ds = true;
5906 else
5907 pi->caps_sclk_ds = false;
Alex Deuchera2e73f52015-04-20 17:09:27 -04005908
5909 pi->mclk_strobe_mode_threshold = 40000;
5910 pi->mclk_stutter_mode_threshold = 40000;
5911 pi->mclk_edc_enable_threshold = 40000;
5912 pi->mclk_edc_wr_enable_threshold = 40000;
5913
5914 ci_initialize_powertune_defaults(adev);
5915
5916 pi->caps_fps = false;
5917
5918 pi->caps_sclk_throttle_low_notification = false;
5919
5920 pi->caps_uvd_dpm = true;
5921 pi->caps_vce_dpm = true;
5922
5923 ci_get_leakage_voltages(adev);
5924 ci_patch_dependency_tables_with_leakage(adev);
5925 ci_set_private_data_variables_based_on_pptable(adev);
5926
5927 adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries =
5928 kzalloc(4 * sizeof(struct amdgpu_clock_voltage_dependency_entry), GFP_KERNEL);
5929 if (!adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) {
5930 ci_dpm_fini(adev);
5931 return -ENOMEM;
5932 }
5933 adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count = 4;
5934 adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].clk = 0;
5935 adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].v = 0;
5936 adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].clk = 36000;
5937 adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].v = 720;
5938 adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].clk = 54000;
5939 adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].v = 810;
5940 adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].clk = 72000;
5941 adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].v = 900;
5942
5943 adev->pm.dpm.dyn_state.mclk_sclk_ratio = 4;
5944 adev->pm.dpm.dyn_state.sclk_mclk_delta = 15000;
5945 adev->pm.dpm.dyn_state.vddc_vddci_delta = 200;
5946
5947 adev->pm.dpm.dyn_state.valid_sclk_values.count = 0;
5948 adev->pm.dpm.dyn_state.valid_sclk_values.values = NULL;
5949 adev->pm.dpm.dyn_state.valid_mclk_values.count = 0;
5950 adev->pm.dpm.dyn_state.valid_mclk_values.values = NULL;
5951
5952 if (adev->asic_type == CHIP_HAWAII) {
5953 pi->thermal_temp_setting.temperature_low = 94500;
5954 pi->thermal_temp_setting.temperature_high = 95000;
5955 pi->thermal_temp_setting.temperature_shutdown = 104000;
5956 } else {
5957 pi->thermal_temp_setting.temperature_low = 99500;
5958 pi->thermal_temp_setting.temperature_high = 100000;
5959 pi->thermal_temp_setting.temperature_shutdown = 104000;
5960 }
5961
5962 pi->uvd_enabled = false;
5963
5964 dpm_table = &pi->smc_state_table;
5965
5966 gpio = amdgpu_atombios_lookup_gpio(adev, VDDC_VRHOT_GPIO_PINID);
5967 if (gpio.valid) {
5968 dpm_table->VRHotGpio = gpio.shift;
5969 adev->pm.dpm.platform_caps |= ATOM_PP_PLATFORM_CAP_REGULATOR_HOT;
5970 } else {
5971 dpm_table->VRHotGpio = CISLANDS_UNUSED_GPIO_PIN;
5972 adev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_REGULATOR_HOT;
5973 }
5974
5975 gpio = amdgpu_atombios_lookup_gpio(adev, PP_AC_DC_SWITCH_GPIO_PINID);
5976 if (gpio.valid) {
5977 dpm_table->AcDcGpio = gpio.shift;
5978 adev->pm.dpm.platform_caps |= ATOM_PP_PLATFORM_CAP_HARDWAREDC;
5979 } else {
5980 dpm_table->AcDcGpio = CISLANDS_UNUSED_GPIO_PIN;
5981 adev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_HARDWAREDC;
5982 }
5983
5984 gpio = amdgpu_atombios_lookup_gpio(adev, VDDC_PCC_GPIO_PINID);
5985 if (gpio.valid) {
5986 u32 tmp = RREG32_SMC(ixCNB_PWRMGT_CNTL);
5987
5988 switch (gpio.shift) {
5989 case 0:
5990 tmp &= ~CNB_PWRMGT_CNTL__GNB_SLOW_MODE_MASK;
5991 tmp |= 1 << CNB_PWRMGT_CNTL__GNB_SLOW_MODE__SHIFT;
5992 break;
5993 case 1:
5994 tmp &= ~CNB_PWRMGT_CNTL__GNB_SLOW_MODE_MASK;
5995 tmp |= 2 << CNB_PWRMGT_CNTL__GNB_SLOW_MODE__SHIFT;
5996 break;
5997 case 2:
5998 tmp |= CNB_PWRMGT_CNTL__GNB_SLOW_MASK;
5999 break;
6000 case 3:
6001 tmp |= CNB_PWRMGT_CNTL__FORCE_NB_PS1_MASK;
6002 break;
6003 case 4:
6004 tmp |= CNB_PWRMGT_CNTL__DPM_ENABLED_MASK;
6005 break;
6006 default:
Rex Zhu58a6a7d2016-11-09 17:27:59 +08006007 DRM_INFO("Invalid PCC GPIO: %u!\n", gpio.shift);
Alex Deuchera2e73f52015-04-20 17:09:27 -04006008 break;
6009 }
6010 WREG32_SMC(ixCNB_PWRMGT_CNTL, tmp);
6011 }
6012
6013 pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_NONE;
6014 pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_NONE;
6015 pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_NONE;
6016 if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT))
6017 pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
6018 else if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2))
6019 pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
6020
6021 if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL) {
6022 if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
6023 pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
6024 else if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2))
6025 pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
6026 else
6027 adev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL;
6028 }
6029
6030 if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_MVDDCONTROL) {
6031 if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT))
6032 pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
6033 else if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2))
6034 pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
6035 else
6036 adev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_MVDDCONTROL;
6037 }
6038
6039 pi->vddc_phase_shed_control = true;
6040
6041#if defined(CONFIG_ACPI)
6042 pi->pcie_performance_request =
6043 amdgpu_acpi_is_pcie_performance_request_supported(adev);
6044#else
6045 pi->pcie_performance_request = false;
6046#endif
6047
6048 if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, &size,
6049 &frev, &crev, &data_offset)) {
6050 pi->caps_sclk_ss_support = true;
6051 pi->caps_mclk_ss_support = true;
6052 pi->dynamic_ss = true;
6053 } else {
6054 pi->caps_sclk_ss_support = false;
6055 pi->caps_mclk_ss_support = false;
6056 pi->dynamic_ss = true;
6057 }
6058
6059 if (adev->pm.int_thermal_type != THERMAL_TYPE_NONE)
6060 pi->thermal_protection = true;
6061 else
6062 pi->thermal_protection = false;
6063
6064 pi->caps_dynamic_ac_timing = true;
6065
Rex Zhuc08770e2016-08-24 19:39:06 +08006066 pi->uvd_power_gated = true;
Alex Deuchera2e73f52015-04-20 17:09:27 -04006067
6068 /* make sure dc limits are valid */
6069 if ((adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk == 0) ||
6070 (adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk == 0))
6071 adev->pm.dpm.dyn_state.max_clock_voltage_on_dc =
6072 adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
6073
6074 pi->fan_ctrl_is_in_default_mode = true;
6075
6076 return 0;
6077}
6078
6079static void
Rex Zhucfa289f2017-09-06 15:27:59 +08006080ci_dpm_debugfs_print_current_performance_level(void *handle,
Alex Deuchera2e73f52015-04-20 17:09:27 -04006081 struct seq_file *m)
6082{
Rex Zhucfa289f2017-09-06 15:27:59 +08006083 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
Alex Deuchera2e73f52015-04-20 17:09:27 -04006084 struct ci_power_info *pi = ci_get_pi(adev);
6085 struct amdgpu_ps *rps = &pi->current_rps;
6086 u32 sclk = ci_get_average_sclk_freq(adev);
6087 u32 mclk = ci_get_average_mclk_freq(adev);
Rex Zhu93545732016-01-06 17:08:46 +08006088 u32 activity_percent = 50;
6089 int ret;
6090
6091 ret = ci_read_smc_soft_register(adev, offsetof(SMU7_SoftRegisters, AverageGraphicsA),
6092 &activity_percent);
6093
6094 if (ret == 0) {
6095 activity_percent += 0x80;
6096 activity_percent >>= 8;
6097 activity_percent = activity_percent > 100 ? 100 : activity_percent;
6098 }
Alex Deuchera2e73f52015-04-20 17:09:27 -04006099
Rex Zhuddbc2592016-11-25 19:23:06 +08006100 seq_printf(m, "uvd %sabled\n", pi->uvd_power_gated ? "dis" : "en");
Alex Deuchera2e73f52015-04-20 17:09:27 -04006101 seq_printf(m, "vce %sabled\n", rps->vce_active ? "en" : "dis");
6102 seq_printf(m, "power level avg sclk: %u mclk: %u\n",
6103 sclk, mclk);
Rex Zhu93545732016-01-06 17:08:46 +08006104 seq_printf(m, "GPU load: %u %%\n", activity_percent);
Alex Deuchera2e73f52015-04-20 17:09:27 -04006105}
6106
Rex Zhucfa289f2017-09-06 15:27:59 +08006107static void ci_dpm_print_power_state(void *handle, void *current_ps)
Alex Deuchera2e73f52015-04-20 17:09:27 -04006108{
Rex Zhucfa289f2017-09-06 15:27:59 +08006109 struct amdgpu_ps *rps = (struct amdgpu_ps *)current_ps;
Alex Deuchera2e73f52015-04-20 17:09:27 -04006110 struct ci_ps *ps = ci_get_ps(rps);
6111 struct ci_pl *pl;
6112 int i;
Rex Zhucfa289f2017-09-06 15:27:59 +08006113 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
Alex Deuchera2e73f52015-04-20 17:09:27 -04006114
6115 amdgpu_dpm_print_class_info(rps->class, rps->class2);
6116 amdgpu_dpm_print_cap_info(rps->caps);
6117 printk("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
6118 for (i = 0; i < ps->performance_level_count; i++) {
6119 pl = &ps->performance_levels[i];
6120 printk("\t\tpower level %d sclk: %u mclk: %u pcie gen: %u pcie lanes: %u\n",
6121 i, pl->sclk, pl->mclk, pl->pcie_gen + 1, pl->pcie_lane);
6122 }
6123 amdgpu_dpm_print_ps_status(adev, rps);
6124}
6125
Rex Zhu1d516c42016-10-14 19:16:54 +08006126static inline bool ci_are_power_levels_equal(const struct ci_pl *ci_cpl1,
6127 const struct ci_pl *ci_cpl2)
6128{
6129 return ((ci_cpl1->mclk == ci_cpl2->mclk) &&
6130 (ci_cpl1->sclk == ci_cpl2->sclk) &&
6131 (ci_cpl1->pcie_gen == ci_cpl2->pcie_gen) &&
6132 (ci_cpl1->pcie_lane == ci_cpl2->pcie_lane));
6133}
6134
Rex Zhucfa289f2017-09-06 15:27:59 +08006135static int ci_check_state_equal(void *handle,
6136 void *current_ps,
6137 void *request_ps,
Rex Zhu1d516c42016-10-14 19:16:54 +08006138 bool *equal)
6139{
6140 struct ci_ps *ci_cps;
6141 struct ci_ps *ci_rps;
6142 int i;
Rex Zhucfa289f2017-09-06 15:27:59 +08006143 struct amdgpu_ps *cps = (struct amdgpu_ps *)current_ps;
6144 struct amdgpu_ps *rps = (struct amdgpu_ps *)request_ps;
6145 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
Rex Zhu1d516c42016-10-14 19:16:54 +08006146
6147 if (adev == NULL || cps == NULL || rps == NULL || equal == NULL)
6148 return -EINVAL;
6149
Rex Zhucfa289f2017-09-06 15:27:59 +08006150 ci_cps = ci_get_ps((struct amdgpu_ps *)cps);
6151 ci_rps = ci_get_ps((struct amdgpu_ps *)rps);
Rex Zhu1d516c42016-10-14 19:16:54 +08006152
6153 if (ci_cps == NULL) {
6154 *equal = false;
6155 return 0;
6156 }
6157
6158 if (ci_cps->performance_level_count != ci_rps->performance_level_count) {
6159
6160 *equal = false;
6161 return 0;
6162 }
6163
6164 for (i = 0; i < ci_cps->performance_level_count; i++) {
6165 if (!ci_are_power_levels_equal(&(ci_cps->performance_levels[i]),
6166 &(ci_rps->performance_levels[i]))) {
6167 *equal = false;
6168 return 0;
6169 }
6170 }
6171
6172 /* If all performance levels are the same try to use the UVD clocks to break the tie.*/
6173 *equal = ((cps->vclk == rps->vclk) && (cps->dclk == rps->dclk));
6174 *equal &= ((cps->evclk == rps->evclk) && (cps->ecclk == rps->ecclk));
6175
6176 return 0;
6177}
6178
Rex Zhucfa289f2017-09-06 15:27:59 +08006179static u32 ci_dpm_get_sclk(void *handle, bool low)
Alex Deuchera2e73f52015-04-20 17:09:27 -04006180{
Rex Zhucfa289f2017-09-06 15:27:59 +08006181 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
Alex Deuchera2e73f52015-04-20 17:09:27 -04006182 struct ci_power_info *pi = ci_get_pi(adev);
6183 struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps);
6184
6185 if (low)
6186 return requested_state->performance_levels[0].sclk;
6187 else
6188 return requested_state->performance_levels[requested_state->performance_level_count - 1].sclk;
6189}
6190
Rex Zhucfa289f2017-09-06 15:27:59 +08006191static u32 ci_dpm_get_mclk(void *handle, bool low)
Alex Deuchera2e73f52015-04-20 17:09:27 -04006192{
Rex Zhucfa289f2017-09-06 15:27:59 +08006193 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
Alex Deuchera2e73f52015-04-20 17:09:27 -04006194 struct ci_power_info *pi = ci_get_pi(adev);
6195 struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps);
6196
6197 if (low)
6198 return requested_state->performance_levels[0].mclk;
6199 else
6200 return requested_state->performance_levels[requested_state->performance_level_count - 1].mclk;
6201}
6202
6203/* get temperature in millidegrees */
Rex Zhucfa289f2017-09-06 15:27:59 +08006204static int ci_dpm_get_temp(void *handle)
Alex Deuchera2e73f52015-04-20 17:09:27 -04006205{
6206 u32 temp;
6207 int actual_temp = 0;
Rex Zhucfa289f2017-09-06 15:27:59 +08006208 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
Alex Deuchera2e73f52015-04-20 17:09:27 -04006209
6210 temp = (RREG32_SMC(ixCG_MULT_THERMAL_STATUS) & CG_MULT_THERMAL_STATUS__CTF_TEMP_MASK) >>
6211 CG_MULT_THERMAL_STATUS__CTF_TEMP__SHIFT;
6212
6213 if (temp & 0x200)
6214 actual_temp = 255;
6215 else
6216 actual_temp = temp & 0x1ff;
6217
6218 actual_temp = actual_temp * 1000;
6219
6220 return actual_temp;
6221}
6222
6223static int ci_set_temperature_range(struct amdgpu_device *adev)
6224{
6225 int ret;
6226
6227 ret = ci_thermal_enable_alert(adev, false);
6228 if (ret)
6229 return ret;
6230 ret = ci_thermal_set_temperature_range(adev, CISLANDS_TEMP_RANGE_MIN,
6231 CISLANDS_TEMP_RANGE_MAX);
6232 if (ret)
6233 return ret;
6234 ret = ci_thermal_enable_alert(adev, true);
6235 if (ret)
6236 return ret;
6237 return ret;
6238}
6239
yanyang15fc3aee2015-05-22 14:39:35 -04006240static int ci_dpm_early_init(void *handle)
Alex Deuchera2e73f52015-04-20 17:09:27 -04006241{
yanyang15fc3aee2015-05-22 14:39:35 -04006242 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6243
Alex Deuchera2e73f52015-04-20 17:09:27 -04006244 ci_dpm_set_irq_funcs(adev);
6245
6246 return 0;
6247}
6248
yanyang15fc3aee2015-05-22 14:39:35 -04006249static int ci_dpm_late_init(void *handle)
Alex Deuchera2e73f52015-04-20 17:09:27 -04006250{
6251 int ret;
yanyang15fc3aee2015-05-22 14:39:35 -04006252 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
Alex Deuchera2e73f52015-04-20 17:09:27 -04006253
6254 if (!amdgpu_dpm)
6255 return 0;
6256
Alex Deucherfa022a92015-09-30 17:05:40 -04006257 /* init the sysfs and debugfs files late */
6258 ret = amdgpu_pm_sysfs_init(adev);
6259 if (ret)
6260 return ret;
6261
Alex Deuchera2e73f52015-04-20 17:09:27 -04006262 ret = ci_set_temperature_range(adev);
6263 if (ret)
6264 return ret;
6265
Alex Deuchera2e73f52015-04-20 17:09:27 -04006266 return 0;
6267}
6268
yanyang15fc3aee2015-05-22 14:39:35 -04006269static int ci_dpm_sw_init(void *handle)
Alex Deuchera2e73f52015-04-20 17:09:27 -04006270{
6271 int ret;
yanyang15fc3aee2015-05-22 14:39:35 -04006272 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
Alex Deuchera2e73f52015-04-20 17:09:27 -04006273
Alex Deucherd766e6a2016-03-29 18:28:50 -04006274 ret = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 230,
6275 &adev->pm.dpm.thermal.irq);
Alex Deuchera2e73f52015-04-20 17:09:27 -04006276 if (ret)
6277 return ret;
6278
Alex Deucherd766e6a2016-03-29 18:28:50 -04006279 ret = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 231,
6280 &adev->pm.dpm.thermal.irq);
Alex Deuchera2e73f52015-04-20 17:09:27 -04006281 if (ret)
6282 return ret;
6283
6284 /* default to balanced state */
6285 adev->pm.dpm.state = POWER_STATE_TYPE_BALANCED;
6286 adev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
Rex Zhue5d03ac2016-12-23 14:39:41 +08006287 adev->pm.dpm.forced_level = AMD_DPM_FORCED_LEVEL_AUTO;
Alex Deuchera2e73f52015-04-20 17:09:27 -04006288 adev->pm.default_sclk = adev->clock.default_sclk;
6289 adev->pm.default_mclk = adev->clock.default_mclk;
6290 adev->pm.current_sclk = adev->clock.default_sclk;
6291 adev->pm.current_mclk = adev->clock.default_mclk;
6292 adev->pm.int_thermal_type = THERMAL_TYPE_NONE;
6293
Christian Königfaad24c2015-05-28 22:02:26 +02006294 ret = ci_dpm_init_microcode(adev);
6295 if (ret)
6296 return ret;
6297
Rex Zhubac601e2017-02-03 17:33:11 +08006298 if (amdgpu_dpm == 0)
6299 return 0;
6300
Alex Deuchera2e73f52015-04-20 17:09:27 -04006301 INIT_WORK(&adev->pm.dpm.thermal.work, amdgpu_dpm_thermal_work_handler);
6302 mutex_lock(&adev->pm.mutex);
6303 ret = ci_dpm_init(adev);
6304 if (ret)
6305 goto dpm_failed;
6306 adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
6307 if (amdgpu_dpm == 1)
6308 amdgpu_pm_print_power_states(adev);
Alex Deuchera2e73f52015-04-20 17:09:27 -04006309 mutex_unlock(&adev->pm.mutex);
6310 DRM_INFO("amdgpu: dpm initialized\n");
6311
6312 return 0;
6313
6314dpm_failed:
6315 ci_dpm_fini(adev);
6316 mutex_unlock(&adev->pm.mutex);
6317 DRM_ERROR("amdgpu: dpm initialization failed\n");
6318 return ret;
6319}
6320
yanyang15fc3aee2015-05-22 14:39:35 -04006321static int ci_dpm_sw_fini(void *handle)
Alex Deuchera2e73f52015-04-20 17:09:27 -04006322{
yanyang15fc3aee2015-05-22 14:39:35 -04006323 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6324
Alex Deucher45607382016-10-21 16:30:10 -04006325 flush_work(&adev->pm.dpm.thermal.work);
6326
Alex Deuchera2e73f52015-04-20 17:09:27 -04006327 mutex_lock(&adev->pm.mutex);
Alex Deuchera2e73f52015-04-20 17:09:27 -04006328 ci_dpm_fini(adev);
6329 mutex_unlock(&adev->pm.mutex);
6330
Alex Deucher768c95e2016-06-01 11:09:01 -04006331 release_firmware(adev->pm.fw);
6332 adev->pm.fw = NULL;
6333
Alex Deuchera2e73f52015-04-20 17:09:27 -04006334 return 0;
6335}
6336
yanyang15fc3aee2015-05-22 14:39:35 -04006337static int ci_dpm_hw_init(void *handle)
Alex Deuchera2e73f52015-04-20 17:09:27 -04006338{
6339 int ret;
6340
yanyang15fc3aee2015-05-22 14:39:35 -04006341 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6342
Rex Zhubac601e2017-02-03 17:33:11 +08006343 if (!amdgpu_dpm) {
6344 ret = ci_upload_firmware(adev);
6345 if (ret) {
6346 DRM_ERROR("ci_upload_firmware failed\n");
6347 return ret;
6348 }
6349 ci_dpm_start_smc(adev);
Alex Deuchera2e73f52015-04-20 17:09:27 -04006350 return 0;
Rex Zhubac601e2017-02-03 17:33:11 +08006351 }
Alex Deuchera2e73f52015-04-20 17:09:27 -04006352
6353 mutex_lock(&adev->pm.mutex);
6354 ci_dpm_setup_asic(adev);
6355 ret = ci_dpm_enable(adev);
6356 if (ret)
6357 adev->pm.dpm_enabled = false;
6358 else
6359 adev->pm.dpm_enabled = true;
6360 mutex_unlock(&adev->pm.mutex);
6361
6362 return ret;
6363}
6364
yanyang15fc3aee2015-05-22 14:39:35 -04006365static int ci_dpm_hw_fini(void *handle)
Alex Deuchera2e73f52015-04-20 17:09:27 -04006366{
yanyang15fc3aee2015-05-22 14:39:35 -04006367 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6368
Alex Deuchera2e73f52015-04-20 17:09:27 -04006369 if (adev->pm.dpm_enabled) {
6370 mutex_lock(&adev->pm.mutex);
6371 ci_dpm_disable(adev);
6372 mutex_unlock(&adev->pm.mutex);
Rex Zhubac601e2017-02-03 17:33:11 +08006373 } else {
6374 ci_dpm_stop_smc(adev);
Alex Deuchera2e73f52015-04-20 17:09:27 -04006375 }
6376
6377 return 0;
6378}
6379
yanyang15fc3aee2015-05-22 14:39:35 -04006380static int ci_dpm_suspend(void *handle)
Alex Deuchera2e73f52015-04-20 17:09:27 -04006381{
yanyang15fc3aee2015-05-22 14:39:35 -04006382 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6383
Alex Deuchera2e73f52015-04-20 17:09:27 -04006384 if (adev->pm.dpm_enabled) {
6385 mutex_lock(&adev->pm.mutex);
Rex Zhu86f8c592016-10-03 20:46:36 +08006386 amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
6387 AMDGPU_THERMAL_IRQ_LOW_TO_HIGH);
6388 amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
6389 AMDGPU_THERMAL_IRQ_HIGH_TO_LOW);
6390 adev->pm.dpm.last_user_state = adev->pm.dpm.user_state;
6391 adev->pm.dpm.last_state = adev->pm.dpm.state;
6392 adev->pm.dpm.user_state = POWER_STATE_TYPE_INTERNAL_BOOT;
6393 adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_BOOT;
Alex Deuchera2e73f52015-04-20 17:09:27 -04006394 mutex_unlock(&adev->pm.mutex);
Rex Zhu86f8c592016-10-03 20:46:36 +08006395 amdgpu_pm_compute_clocks(adev);
6396
Alex Deuchera2e73f52015-04-20 17:09:27 -04006397 }
Rex Zhu86f8c592016-10-03 20:46:36 +08006398
Alex Deuchera2e73f52015-04-20 17:09:27 -04006399 return 0;
6400}
6401
yanyang15fc3aee2015-05-22 14:39:35 -04006402static int ci_dpm_resume(void *handle)
Alex Deuchera2e73f52015-04-20 17:09:27 -04006403{
6404 int ret;
yanyang15fc3aee2015-05-22 14:39:35 -04006405 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
Alex Deuchera2e73f52015-04-20 17:09:27 -04006406
6407 if (adev->pm.dpm_enabled) {
6408 /* asic init will reset to the boot state */
6409 mutex_lock(&adev->pm.mutex);
6410 ci_dpm_setup_asic(adev);
6411 ret = ci_dpm_enable(adev);
6412 if (ret)
6413 adev->pm.dpm_enabled = false;
6414 else
6415 adev->pm.dpm_enabled = true;
Rex Zhu86f8c592016-10-03 20:46:36 +08006416 adev->pm.dpm.user_state = adev->pm.dpm.last_user_state;
6417 adev->pm.dpm.state = adev->pm.dpm.last_state;
Alex Deuchera2e73f52015-04-20 17:09:27 -04006418 mutex_unlock(&adev->pm.mutex);
6419 if (adev->pm.dpm_enabled)
6420 amdgpu_pm_compute_clocks(adev);
6421 }
6422 return 0;
6423}
6424
yanyang15fc3aee2015-05-22 14:39:35 -04006425static bool ci_dpm_is_idle(void *handle)
Alex Deuchera2e73f52015-04-20 17:09:27 -04006426{
6427 /* XXX */
6428 return true;
6429}
6430
yanyang15fc3aee2015-05-22 14:39:35 -04006431static int ci_dpm_wait_for_idle(void *handle)
Alex Deuchera2e73f52015-04-20 17:09:27 -04006432{
6433 /* XXX */
6434 return 0;
6435}
6436
yanyang15fc3aee2015-05-22 14:39:35 -04006437static int ci_dpm_soft_reset(void *handle)
Alex Deuchera2e73f52015-04-20 17:09:27 -04006438{
6439 return 0;
6440}
6441
6442static int ci_dpm_set_interrupt_state(struct amdgpu_device *adev,
6443 struct amdgpu_irq_src *source,
6444 unsigned type,
6445 enum amdgpu_interrupt_state state)
6446{
6447 u32 cg_thermal_int;
6448
6449 switch (type) {
6450 case AMDGPU_THERMAL_IRQ_LOW_TO_HIGH:
6451 switch (state) {
6452 case AMDGPU_IRQ_STATE_DISABLE:
6453 cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
Rex Zhuc305fd52015-10-13 13:57:52 +08006454 cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
Alex Deuchera2e73f52015-04-20 17:09:27 -04006455 WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
6456 break;
6457 case AMDGPU_IRQ_STATE_ENABLE:
6458 cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
Rex Zhuc305fd52015-10-13 13:57:52 +08006459 cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
Alex Deuchera2e73f52015-04-20 17:09:27 -04006460 WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
6461 break;
6462 default:
6463 break;
6464 }
6465 break;
6466
6467 case AMDGPU_THERMAL_IRQ_HIGH_TO_LOW:
6468 switch (state) {
6469 case AMDGPU_IRQ_STATE_DISABLE:
6470 cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
Rex Zhuc305fd52015-10-13 13:57:52 +08006471 cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
Alex Deuchera2e73f52015-04-20 17:09:27 -04006472 WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
6473 break;
6474 case AMDGPU_IRQ_STATE_ENABLE:
6475 cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
Rex Zhuc305fd52015-10-13 13:57:52 +08006476 cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
Alex Deuchera2e73f52015-04-20 17:09:27 -04006477 WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
6478 break;
6479 default:
6480 break;
6481 }
6482 break;
6483
6484 default:
6485 break;
6486 }
6487 return 0;
6488}
6489
6490static int ci_dpm_process_interrupt(struct amdgpu_device *adev,
Christian Königedf600d2016-05-03 15:54:54 +02006491 struct amdgpu_irq_src *source,
Alex Deuchera2e73f52015-04-20 17:09:27 -04006492 struct amdgpu_iv_entry *entry)
6493{
6494 bool queue_thermal = false;
6495
6496 if (entry == NULL)
6497 return -EINVAL;
6498
6499 switch (entry->src_id) {
6500 case 230: /* thermal low to high */
6501 DRM_DEBUG("IH: thermal low to high\n");
6502 adev->pm.dpm.thermal.high_to_low = false;
6503 queue_thermal = true;
6504 break;
6505 case 231: /* thermal high to low */
6506 DRM_DEBUG("IH: thermal high to low\n");
6507 adev->pm.dpm.thermal.high_to_low = true;
6508 queue_thermal = true;
6509 break;
6510 default:
6511 break;
6512 }
6513
6514 if (queue_thermal)
6515 schedule_work(&adev->pm.dpm.thermal.work);
6516
6517 return 0;
6518}
6519
yanyang15fc3aee2015-05-22 14:39:35 -04006520static int ci_dpm_set_clockgating_state(void *handle,
6521 enum amd_clockgating_state state)
Alex Deuchera2e73f52015-04-20 17:09:27 -04006522{
6523 return 0;
6524}
6525
yanyang15fc3aee2015-05-22 14:39:35 -04006526static int ci_dpm_set_powergating_state(void *handle,
6527 enum amd_powergating_state state)
Alex Deuchera2e73f52015-04-20 17:09:27 -04006528{
6529 return 0;
6530}
6531
Rex Zhucfa289f2017-09-06 15:27:59 +08006532static int ci_dpm_print_clock_levels(void *handle,
Eric Huang19fbc432016-05-19 15:50:09 -04006533 enum pp_clock_type type, char *buf)
6534{
Rex Zhucfa289f2017-09-06 15:27:59 +08006535 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
Eric Huang19fbc432016-05-19 15:50:09 -04006536 struct ci_power_info *pi = ci_get_pi(adev);
6537 struct ci_single_dpm_table *sclk_table = &pi->dpm_table.sclk_table;
6538 struct ci_single_dpm_table *mclk_table = &pi->dpm_table.mclk_table;
6539 struct ci_single_dpm_table *pcie_table = &pi->dpm_table.pcie_speed_table;
6540
6541 int i, now, size = 0;
6542 uint32_t clock, pcie_speed;
6543
6544 switch (type) {
6545 case PP_SCLK:
6546 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_API_GetSclkFrequency);
6547 clock = RREG32(mmSMC_MSG_ARG_0);
6548
6549 for (i = 0; i < sclk_table->count; i++) {
6550 if (clock > sclk_table->dpm_levels[i].value)
6551 continue;
6552 break;
6553 }
6554 now = i;
6555
6556 for (i = 0; i < sclk_table->count; i++)
6557 size += sprintf(buf + size, "%d: %uMhz %s\n",
6558 i, sclk_table->dpm_levels[i].value / 100,
6559 (i == now) ? "*" : "");
6560 break;
6561 case PP_MCLK:
6562 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_API_GetMclkFrequency);
6563 clock = RREG32(mmSMC_MSG_ARG_0);
6564
6565 for (i = 0; i < mclk_table->count; i++) {
6566 if (clock > mclk_table->dpm_levels[i].value)
6567 continue;
6568 break;
6569 }
6570 now = i;
6571
6572 for (i = 0; i < mclk_table->count; i++)
6573 size += sprintf(buf + size, "%d: %uMhz %s\n",
6574 i, mclk_table->dpm_levels[i].value / 100,
6575 (i == now) ? "*" : "");
6576 break;
6577 case PP_PCIE:
6578 pcie_speed = ci_get_current_pcie_speed(adev);
6579 for (i = 0; i < pcie_table->count; i++) {
6580 if (pcie_speed != pcie_table->dpm_levels[i].value)
6581 continue;
6582 break;
6583 }
6584 now = i;
6585
6586 for (i = 0; i < pcie_table->count; i++)
6587 size += sprintf(buf + size, "%d: %s %s\n", i,
Evan Quan7413d2f2017-10-26 17:29:34 +08006588 (pcie_table->dpm_levels[i].value == 0) ? "2.5GT/s, x1" :
6589 (pcie_table->dpm_levels[i].value == 1) ? "5.0GT/s, x16" :
6590 (pcie_table->dpm_levels[i].value == 2) ? "8.0GT/s, x16" : "",
Eric Huang19fbc432016-05-19 15:50:09 -04006591 (i == now) ? "*" : "");
6592 break;
6593 default:
6594 break;
6595 }
6596
6597 return size;
6598}
6599
Rex Zhucfa289f2017-09-06 15:27:59 +08006600static int ci_dpm_force_clock_level(void *handle,
Eric Huang19fbc432016-05-19 15:50:09 -04006601 enum pp_clock_type type, uint32_t mask)
6602{
Rex Zhucfa289f2017-09-06 15:27:59 +08006603 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
Eric Huang19fbc432016-05-19 15:50:09 -04006604 struct ci_power_info *pi = ci_get_pi(adev);
6605
Rex Zhu948fd662018-01-30 12:48:12 +08006606 if (adev->pm.dpm.forced_level != AMD_DPM_FORCED_LEVEL_MANUAL)
Eric Huang19fbc432016-05-19 15:50:09 -04006607 return -EINVAL;
6608
Rex Zhu61e208b2018-01-25 18:42:08 +08006609 if (mask == 0)
6610 return -EINVAL;
6611
Eric Huang19fbc432016-05-19 15:50:09 -04006612 switch (type) {
6613 case PP_SCLK:
6614 if (!pi->sclk_dpm_key_disabled)
6615 amdgpu_ci_send_msg_to_smc_with_parameter(adev,
6616 PPSMC_MSG_SCLKDPM_SetEnabledMask,
6617 pi->dpm_level_enable_mask.sclk_dpm_enable_mask & mask);
6618 break;
6619
6620 case PP_MCLK:
6621 if (!pi->mclk_dpm_key_disabled)
6622 amdgpu_ci_send_msg_to_smc_with_parameter(adev,
6623 PPSMC_MSG_MCLKDPM_SetEnabledMask,
6624 pi->dpm_level_enable_mask.mclk_dpm_enable_mask & mask);
6625 break;
6626
6627 case PP_PCIE:
6628 {
6629 uint32_t tmp = mask & pi->dpm_level_enable_mask.pcie_dpm_enable_mask;
Eric Huang19fbc432016-05-19 15:50:09 -04006630
Rex Zhu61e208b2018-01-25 18:42:08 +08006631 if (!pi->pcie_dpm_key_disabled) {
6632 if (fls(tmp) != ffs(tmp))
6633 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_PCIeDPM_UnForceLevel);
6634 else
6635 amdgpu_ci_send_msg_to_smc_with_parameter(adev,
Eric Huang19fbc432016-05-19 15:50:09 -04006636 PPSMC_MSG_PCIeDPM_ForceLevel,
Rex Zhu61e208b2018-01-25 18:42:08 +08006637 fls(tmp) - 1);
6638 }
Eric Huang19fbc432016-05-19 15:50:09 -04006639 break;
6640 }
6641 default:
6642 break;
6643 }
6644
6645 return 0;
6646}
6647
Rex Zhucfa289f2017-09-06 15:27:59 +08006648static int ci_dpm_get_sclk_od(void *handle)
Eric Huang3cc25912016-05-19 15:54:35 -04006649{
Rex Zhucfa289f2017-09-06 15:27:59 +08006650 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
Eric Huang3cc25912016-05-19 15:54:35 -04006651 struct ci_power_info *pi = ci_get_pi(adev);
6652 struct ci_single_dpm_table *sclk_table = &(pi->dpm_table.sclk_table);
6653 struct ci_single_dpm_table *golden_sclk_table =
6654 &(pi->golden_dpm_table.sclk_table);
6655 int value;
6656
6657 value = (sclk_table->dpm_levels[sclk_table->count - 1].value -
6658 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) *
6659 100 /
6660 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
6661
6662 return value;
6663}
6664
Rex Zhucfa289f2017-09-06 15:27:59 +08006665static int ci_dpm_set_sclk_od(void *handle, uint32_t value)
Eric Huang3cc25912016-05-19 15:54:35 -04006666{
Rex Zhucfa289f2017-09-06 15:27:59 +08006667 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
Eric Huang3cc25912016-05-19 15:54:35 -04006668 struct ci_power_info *pi = ci_get_pi(adev);
6669 struct ci_ps *ps = ci_get_ps(adev->pm.dpm.requested_ps);
6670 struct ci_single_dpm_table *golden_sclk_table =
6671 &(pi->golden_dpm_table.sclk_table);
6672
6673 if (value > 20)
6674 value = 20;
6675
6676 ps->performance_levels[ps->performance_level_count - 1].sclk =
6677 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value *
6678 value / 100 +
6679 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
6680
6681 return 0;
6682}
6683
Rex Zhucfa289f2017-09-06 15:27:59 +08006684static int ci_dpm_get_mclk_od(void *handle)
Eric Huang40899d52016-05-24 15:43:53 -04006685{
Rex Zhucfa289f2017-09-06 15:27:59 +08006686 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
Eric Huang40899d52016-05-24 15:43:53 -04006687 struct ci_power_info *pi = ci_get_pi(adev);
6688 struct ci_single_dpm_table *mclk_table = &(pi->dpm_table.mclk_table);
6689 struct ci_single_dpm_table *golden_mclk_table =
6690 &(pi->golden_dpm_table.mclk_table);
6691 int value;
6692
6693 value = (mclk_table->dpm_levels[mclk_table->count - 1].value -
6694 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) *
6695 100 /
6696 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
6697
6698 return value;
6699}
6700
Rex Zhucfa289f2017-09-06 15:27:59 +08006701static int ci_dpm_set_mclk_od(void *handle, uint32_t value)
Eric Huang40899d52016-05-24 15:43:53 -04006702{
Rex Zhucfa289f2017-09-06 15:27:59 +08006703 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
Eric Huang40899d52016-05-24 15:43:53 -04006704 struct ci_power_info *pi = ci_get_pi(adev);
6705 struct ci_ps *ps = ci_get_ps(adev->pm.dpm.requested_ps);
6706 struct ci_single_dpm_table *golden_mclk_table =
6707 &(pi->golden_dpm_table.mclk_table);
6708
6709 if (value > 20)
6710 value = 20;
6711
6712 ps->performance_levels[ps->performance_level_count - 1].mclk =
6713 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value *
6714 value / 100 +
6715 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
6716
6717 return 0;
6718}
6719
Rex Zhucfa289f2017-09-06 15:27:59 +08006720static int ci_dpm_read_sensor(void *handle, int idx,
Samuel Pitoisetd6c29692017-02-14 01:00:49 +01006721 void *value, int *size)
6722{
6723 u32 activity_percent = 50;
6724 int ret;
Rex Zhucfa289f2017-09-06 15:27:59 +08006725 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
Samuel Pitoisetd6c29692017-02-14 01:00:49 +01006726
6727 /* size must be at least 4 bytes for all sensors */
6728 if (*size < 4)
6729 return -EINVAL;
6730
6731 switch (idx) {
6732 case AMDGPU_PP_SENSOR_GFX_SCLK:
6733 *((uint32_t *)value) = ci_get_average_sclk_freq(adev);
6734 *size = 4;
6735 return 0;
6736 case AMDGPU_PP_SENSOR_GFX_MCLK:
6737 *((uint32_t *)value) = ci_get_average_mclk_freq(adev);
6738 *size = 4;
6739 return 0;
6740 case AMDGPU_PP_SENSOR_GPU_TEMP:
6741 *((uint32_t *)value) = ci_dpm_get_temp(adev);
6742 *size = 4;
6743 return 0;
6744 case AMDGPU_PP_SENSOR_GPU_LOAD:
6745 ret = ci_read_smc_soft_register(adev,
6746 offsetof(SMU7_SoftRegisters,
6747 AverageGraphicsA),
6748 &activity_percent);
6749 if (ret == 0) {
6750 activity_percent += 0x80;
6751 activity_percent >>= 8;
6752 activity_percent =
6753 activity_percent > 100 ? 100 : activity_percent;
6754 }
6755 *((uint32_t *)value) = activity_percent;
6756 *size = 4;
6757 return 0;
6758 default:
6759 return -EINVAL;
6760 }
6761}
6762
yanyang15fc3aee2015-05-22 14:39:35 -04006763const struct amd_ip_funcs ci_dpm_ip_funcs = {
Tom St Denis88a907d2016-05-04 14:28:35 -04006764 .name = "ci_dpm",
Alex Deuchera2e73f52015-04-20 17:09:27 -04006765 .early_init = ci_dpm_early_init,
6766 .late_init = ci_dpm_late_init,
6767 .sw_init = ci_dpm_sw_init,
6768 .sw_fini = ci_dpm_sw_fini,
6769 .hw_init = ci_dpm_hw_init,
6770 .hw_fini = ci_dpm_hw_fini,
6771 .suspend = ci_dpm_suspend,
6772 .resume = ci_dpm_resume,
6773 .is_idle = ci_dpm_is_idle,
6774 .wait_for_idle = ci_dpm_wait_for_idle,
6775 .soft_reset = ci_dpm_soft_reset,
Alex Deuchera2e73f52015-04-20 17:09:27 -04006776 .set_clockgating_state = ci_dpm_set_clockgating_state,
6777 .set_powergating_state = ci_dpm_set_powergating_state,
6778};
6779
Rex Zhucd4d7462017-09-06 18:43:52 +08006780const struct amd_pm_funcs ci_dpm_funcs = {
Alex Deuchera2e73f52015-04-20 17:09:27 -04006781 .pre_set_power_state = &ci_dpm_pre_set_power_state,
6782 .set_power_state = &ci_dpm_set_power_state,
6783 .post_set_power_state = &ci_dpm_post_set_power_state,
6784 .display_configuration_changed = &ci_dpm_display_configuration_changed,
6785 .get_sclk = &ci_dpm_get_sclk,
6786 .get_mclk = &ci_dpm_get_mclk,
6787 .print_power_state = &ci_dpm_print_power_state,
6788 .debugfs_print_current_performance_level = &ci_dpm_debugfs_print_current_performance_level,
6789 .force_performance_level = &ci_dpm_force_performance_level,
6790 .vblank_too_short = &ci_dpm_vblank_too_short,
6791 .powergate_uvd = &ci_dpm_powergate_uvd,
6792 .set_fan_control_mode = &ci_dpm_set_fan_control_mode,
6793 .get_fan_control_mode = &ci_dpm_get_fan_control_mode,
6794 .set_fan_speed_percent = &ci_dpm_set_fan_speed_percent,
6795 .get_fan_speed_percent = &ci_dpm_get_fan_speed_percent,
Eric Huang19fbc432016-05-19 15:50:09 -04006796 .print_clock_levels = ci_dpm_print_clock_levels,
6797 .force_clock_level = ci_dpm_force_clock_level,
Eric Huang3cc25912016-05-19 15:54:35 -04006798 .get_sclk_od = ci_dpm_get_sclk_od,
6799 .set_sclk_od = ci_dpm_set_sclk_od,
Eric Huang40899d52016-05-24 15:43:53 -04006800 .get_mclk_od = ci_dpm_get_mclk_od,
6801 .set_mclk_od = ci_dpm_set_mclk_od,
Rex Zhu1d516c42016-10-14 19:16:54 +08006802 .check_state_equal = ci_check_state_equal,
Alex Deucher825cc992016-10-07 12:38:04 -04006803 .get_vce_clock_state = amdgpu_get_vce_clock_state,
Samuel Pitoisetd6c29692017-02-14 01:00:49 +01006804 .read_sensor = ci_dpm_read_sensor,
Alex Deuchera2e73f52015-04-20 17:09:27 -04006805};
6806
Alex Deuchera2e73f52015-04-20 17:09:27 -04006807static const struct amdgpu_irq_src_funcs ci_dpm_irq_funcs = {
6808 .set = ci_dpm_set_interrupt_state,
6809 .process = ci_dpm_process_interrupt,
6810};
6811
6812static void ci_dpm_set_irq_funcs(struct amdgpu_device *adev)
6813{
6814 adev->pm.dpm.thermal.irq.num_types = AMDGPU_THERMAL_IRQ_LAST;
6815 adev->pm.dpm.thermal.irq.funcs = &ci_dpm_irq_funcs;
6816}