blob: 508245d49d3394055835683f3067e021f6d482d0 [file] [log] [blame]
Rex Zhu599a7e92016-09-09 13:25:22 +08001/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#include <linux/module.h>
24#include <linux/slab.h>
25#include <linux/fb.h>
26#include <asm/div64.h>
27#include "linux/delay.h"
28#include "pp_acpi.h"
29#include "pp_debug.h"
30#include "ppatomctrl.h"
31#include "atombios.h"
32#include "pptable_v1_0.h"
33#include "pppcielanes.h"
34#include "amd_pcie_helpers.h"
35#include "hardwaremanager.h"
36#include "process_pptables_v1_0.h"
37#include "cgs_common.h"
38
39#include "smu7_common.h"
40
41#include "hwmgr.h"
42#include "smu7_hwmgr.h"
43#include "smu7_powertune.h"
44#include "smu7_dyn_defaults.h"
45#include "smu7_thermal.h"
46#include "smu7_clockpowergating.h"
47#include "processpptables.h"
48
49#define MC_CG_ARB_FREQ_F0 0x0a
50#define MC_CG_ARB_FREQ_F1 0x0b
51#define MC_CG_ARB_FREQ_F2 0x0c
52#define MC_CG_ARB_FREQ_F3 0x0d
53
54#define MC_CG_SEQ_DRAMCONF_S0 0x05
55#define MC_CG_SEQ_DRAMCONF_S1 0x06
56#define MC_CG_SEQ_YCLK_SUSPEND 0x04
57#define MC_CG_SEQ_YCLK_RESUME 0x0a
58
59#define SMC_CG_IND_START 0xc0030000
60#define SMC_CG_IND_END 0xc0040000
61
62#define VOLTAGE_SCALE 4
63#define VOLTAGE_VID_OFFSET_SCALE1 625
64#define VOLTAGE_VID_OFFSET_SCALE2 100
65
66#define MEM_FREQ_LOW_LATENCY 25000
67#define MEM_FREQ_HIGH_LATENCY 80000
68
69#define MEM_LATENCY_HIGH 45
70#define MEM_LATENCY_LOW 35
71#define MEM_LATENCY_ERR 0xFFFF
72
73#define MC_SEQ_MISC0_GDDR5_SHIFT 28
74#define MC_SEQ_MISC0_GDDR5_MASK 0xf0000000
75#define MC_SEQ_MISC0_GDDR5_VALUE 5
76
77#define PCIE_BUS_CLK 10000
78#define TCLK (PCIE_BUS_CLK / 10)
79
80
81/** Values for the CG_THERMAL_CTRL::DPM_EVENT_SRC field. */
82enum DPM_EVENT_SRC {
83 DPM_EVENT_SRC_ANALOG = 0,
84 DPM_EVENT_SRC_EXTERNAL = 1,
85 DPM_EVENT_SRC_DIGITAL = 2,
86 DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3,
87 DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL = 4
88};
89
90static const unsigned long PhwVIslands_Magic = (unsigned long)(PHM_VIslands_Magic);
91
92struct smu7_power_state *cast_phw_smu7_power_state(
93 struct pp_hw_power_state *hw_ps)
94{
95 PP_ASSERT_WITH_CODE((PhwVIslands_Magic == hw_ps->magic),
96 "Invalid Powerstate Type!",
97 return NULL);
98
99 return (struct smu7_power_state *)hw_ps;
100}
101
102const struct smu7_power_state *cast_const_phw_smu7_power_state(
103 const struct pp_hw_power_state *hw_ps)
104{
105 PP_ASSERT_WITH_CODE((PhwVIslands_Magic == hw_ps->magic),
106 "Invalid Powerstate Type!",
107 return NULL);
108
109 return (const struct smu7_power_state *)hw_ps;
110}
111
112/**
113 * Find the MC microcode version and store it in the HwMgr struct
114 *
115 * @param hwmgr the address of the powerplay hardware manager.
116 * @return always 0
117 */
118int smu7_get_mc_microcode_version (struct pp_hwmgr *hwmgr)
119{
120 cgs_write_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_INDEX, 0x9F);
121
122 hwmgr->microcode_version_info.MC = cgs_read_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_DATA);
123
124 return 0;
125}
126
127uint16_t smu7_get_current_pcie_speed(struct pp_hwmgr *hwmgr)
128{
129 uint32_t speedCntl = 0;
130
131 /* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */
132 speedCntl = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__PCIE,
133 ixPCIE_LC_SPEED_CNTL);
134 return((uint16_t)PHM_GET_FIELD(speedCntl,
135 PCIE_LC_SPEED_CNTL, LC_CURRENT_DATA_RATE));
136}
137
138int smu7_get_current_pcie_lane_number(struct pp_hwmgr *hwmgr)
139{
140 uint32_t link_width;
141
142 /* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */
143 link_width = PHM_READ_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE,
144 PCIE_LC_LINK_WIDTH_CNTL, LC_LINK_WIDTH_RD);
145
146 PP_ASSERT_WITH_CODE((7 >= link_width),
147 "Invalid PCIe lane width!", return 0);
148
149 return decode_pcie_lane_width(link_width);
150}
151
152/**
153* Enable voltage control
154*
155* @param pHwMgr the address of the powerplay hardware manager.
156* @return always PP_Result_OK
157*/
158int smu7_enable_smc_voltage_controller(struct pp_hwmgr *hwmgr)
159{
160 if (hwmgr->feature_mask & PP_SMC_VOLTAGE_CONTROL_MASK)
161 smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Voltage_Cntl_Enable);
162
163 return 0;
164}
165
166/**
167* Checks if we want to support voltage control
168*
169* @param hwmgr the address of the powerplay hardware manager.
170*/
171static bool smu7_voltage_control(const struct pp_hwmgr *hwmgr)
172{
173 const struct smu7_hwmgr *data =
174 (const struct smu7_hwmgr *)(hwmgr->backend);
175
176 return (SMU7_VOLTAGE_CONTROL_NONE != data->voltage_control);
177}
178
179/**
180* Enable voltage control
181*
182* @param hwmgr the address of the powerplay hardware manager.
183* @return always 0
184*/
185static int smu7_enable_voltage_control(struct pp_hwmgr *hwmgr)
186{
187 /* enable voltage control */
188 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
189 GENERAL_PWRMGT, VOLT_PWRMGT_EN, 1);
190
191 return 0;
192}
193
194static int phm_get_svi2_voltage_table_v0(pp_atomctrl_voltage_table *voltage_table,
195 struct phm_clock_voltage_dependency_table *voltage_dependency_table
196 )
197{
198 uint32_t i;
199
200 PP_ASSERT_WITH_CODE((NULL != voltage_table),
201 "Voltage Dependency Table empty.", return -EINVAL;);
202
203 voltage_table->mask_low = 0;
204 voltage_table->phase_delay = 0;
205 voltage_table->count = voltage_dependency_table->count;
206
207 for (i = 0; i < voltage_dependency_table->count; i++) {
208 voltage_table->entries[i].value =
209 voltage_dependency_table->entries[i].v;
210 voltage_table->entries[i].smio_low = 0;
211 }
212
213 return 0;
214}
215
216
217/**
218* Create Voltage Tables.
219*
220* @param hwmgr the address of the powerplay hardware manager.
221* @return always 0
222*/
223static int smu7_construct_voltage_tables(struct pp_hwmgr *hwmgr)
224{
225 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
226 struct phm_ppt_v1_information *table_info =
227 (struct phm_ppt_v1_information *)hwmgr->pptable;
228 int result = 0;
229 uint32_t tmp;
230
231 if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
232 result = atomctrl_get_voltage_table_v3(hwmgr,
233 VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT,
234 &(data->mvdd_voltage_table));
235 PP_ASSERT_WITH_CODE((0 == result),
236 "Failed to retrieve MVDD table.",
237 return result);
238 } else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) {
239 if (hwmgr->pp_table_version == PP_TABLE_V1)
240 result = phm_get_svi2_mvdd_voltage_table(&(data->mvdd_voltage_table),
241 table_info->vdd_dep_on_mclk);
242 else if (hwmgr->pp_table_version == PP_TABLE_V0)
243 result = phm_get_svi2_voltage_table_v0(&(data->mvdd_voltage_table),
244 hwmgr->dyn_state.mvdd_dependency_on_mclk);
245
246 PP_ASSERT_WITH_CODE((0 == result),
247 "Failed to retrieve SVI2 MVDD table from dependancy table.",
248 return result;);
249 }
250
251 if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
252 result = atomctrl_get_voltage_table_v3(hwmgr,
253 VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT,
254 &(data->vddci_voltage_table));
255 PP_ASSERT_WITH_CODE((0 == result),
256 "Failed to retrieve VDDCI table.",
257 return result);
258 } else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
259 if (hwmgr->pp_table_version == PP_TABLE_V1)
260 result = phm_get_svi2_vddci_voltage_table(&(data->vddci_voltage_table),
261 table_info->vdd_dep_on_mclk);
262 else if (hwmgr->pp_table_version == PP_TABLE_V0)
263 result = phm_get_svi2_voltage_table_v0(&(data->vddci_voltage_table),
264 hwmgr->dyn_state.vddci_dependency_on_mclk);
265 PP_ASSERT_WITH_CODE((0 == result),
266 "Failed to retrieve SVI2 VDDCI table from dependancy table.",
267 return result);
268 }
269
270 if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_gfx_control) {
271 /* VDDGFX has only SVI2 voltage control */
272 result = phm_get_svi2_vdd_voltage_table(&(data->vddgfx_voltage_table),
273 table_info->vddgfx_lookup_table);
274 PP_ASSERT_WITH_CODE((0 == result),
275 "Failed to retrieve SVI2 VDDGFX table from lookup table.", return result;);
276 }
277
278
279 if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->voltage_control) {
280 result = atomctrl_get_voltage_table_v3(hwmgr,
281 VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT,
282 &data->vddc_voltage_table);
283 PP_ASSERT_WITH_CODE((0 == result),
284 "Failed to retrieve VDDC table.", return result;);
285 } else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
286
287 if (hwmgr->pp_table_version == PP_TABLE_V0)
288 result = phm_get_svi2_voltage_table_v0(&data->vddc_voltage_table,
289 hwmgr->dyn_state.vddc_dependency_on_mclk);
290 else if (hwmgr->pp_table_version == PP_TABLE_V1)
291 result = phm_get_svi2_vdd_voltage_table(&(data->vddc_voltage_table),
292 table_info->vddc_lookup_table);
293
294 PP_ASSERT_WITH_CODE((0 == result),
295 "Failed to retrieve SVI2 VDDC table from dependancy table.", return result;);
296 }
297
298 tmp = smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_VDDC);
299 PP_ASSERT_WITH_CODE(
300 (data->vddc_voltage_table.count <= tmp),
301 "Too many voltage values for VDDC. Trimming to fit state table.",
302 phm_trim_voltage_table_to_fit_state_table(tmp,
303 &(data->vddc_voltage_table)));
304
305 tmp = smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_VDDGFX);
306 PP_ASSERT_WITH_CODE(
307 (data->vddgfx_voltage_table.count <= tmp),
308 "Too many voltage values for VDDC. Trimming to fit state table.",
309 phm_trim_voltage_table_to_fit_state_table(tmp,
310 &(data->vddgfx_voltage_table)));
311
312 tmp = smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_VDDCI);
313 PP_ASSERT_WITH_CODE(
314 (data->vddci_voltage_table.count <= tmp),
315 "Too many voltage values for VDDCI. Trimming to fit state table.",
316 phm_trim_voltage_table_to_fit_state_table(tmp,
317 &(data->vddci_voltage_table)));
318
319 tmp = smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_MVDD);
320 PP_ASSERT_WITH_CODE(
321 (data->mvdd_voltage_table.count <= tmp),
322 "Too many voltage values for MVDD. Trimming to fit state table.",
323 phm_trim_voltage_table_to_fit_state_table(tmp,
324 &(data->mvdd_voltage_table)));
325
326 return 0;
327}
328
329/**
330* Programs static screed detection parameters
331*
332* @param hwmgr the address of the powerplay hardware manager.
333* @return always 0
334*/
335static int smu7_program_static_screen_threshold_parameters(
336 struct pp_hwmgr *hwmgr)
337{
338 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
339
340 /* Set static screen threshold unit */
341 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
342 CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD_UNIT,
343 data->static_screen_threshold_unit);
344 /* Set static screen threshold */
345 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
346 CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD,
347 data->static_screen_threshold);
348
349 return 0;
350}
351
352/**
353* Setup display gap for glitch free memory clock switching.
354*
355* @param hwmgr the address of the powerplay hardware manager.
356* @return always 0
357*/
358static int smu7_enable_display_gap(struct pp_hwmgr *hwmgr)
359{
360 uint32_t display_gap =
361 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
362 ixCG_DISPLAY_GAP_CNTL);
363
364 display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL,
365 DISP_GAP, DISPLAY_GAP_IGNORE);
366
367 display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL,
368 DISP_GAP_MCHG, DISPLAY_GAP_VBLANK);
369
370 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
371 ixCG_DISPLAY_GAP_CNTL, display_gap);
372
373 return 0;
374}
375
376/**
377* Programs activity state transition voting clients
378*
379* @param hwmgr the address of the powerplay hardware manager.
380* @return always 0
381*/
382static int smu7_program_voting_clients(struct pp_hwmgr *hwmgr)
383{
384 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
385
386 /* Clear reset for voting clients before enabling DPM */
387 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
388 SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 0);
389 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
390 SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 0);
391
392 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
393 ixCG_FREQ_TRAN_VOTING_0, data->voting_rights_clients0);
394 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
395 ixCG_FREQ_TRAN_VOTING_1, data->voting_rights_clients1);
396 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
397 ixCG_FREQ_TRAN_VOTING_2, data->voting_rights_clients2);
398 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
399 ixCG_FREQ_TRAN_VOTING_3, data->voting_rights_clients3);
400 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
401 ixCG_FREQ_TRAN_VOTING_4, data->voting_rights_clients4);
402 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
403 ixCG_FREQ_TRAN_VOTING_5, data->voting_rights_clients5);
404 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
405 ixCG_FREQ_TRAN_VOTING_6, data->voting_rights_clients6);
406 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
407 ixCG_FREQ_TRAN_VOTING_7, data->voting_rights_clients7);
408
409 return 0;
410}
411
412static int smu7_clear_voting_clients(struct pp_hwmgr *hwmgr)
413{
414 /* Reset voting clients before disabling DPM */
415 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
416 SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 1);
417 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
418 SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 1);
419
420 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
421 ixCG_FREQ_TRAN_VOTING_0, 0);
422 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
423 ixCG_FREQ_TRAN_VOTING_1, 0);
424 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
425 ixCG_FREQ_TRAN_VOTING_2, 0);
426 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
427 ixCG_FREQ_TRAN_VOTING_3, 0);
428 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
429 ixCG_FREQ_TRAN_VOTING_4, 0);
430 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
431 ixCG_FREQ_TRAN_VOTING_5, 0);
432 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
433 ixCG_FREQ_TRAN_VOTING_6, 0);
434 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
435 ixCG_FREQ_TRAN_VOTING_7, 0);
436
437 return 0;
438}
439
440/* Copy one arb setting to another and then switch the active set.
441 * arb_src and arb_dest is one of the MC_CG_ARB_FREQ_Fx constants.
442 */
443static int smu7_copy_and_switch_arb_sets(struct pp_hwmgr *hwmgr,
444 uint32_t arb_src, uint32_t arb_dest)
445{
446 uint32_t mc_arb_dram_timing;
447 uint32_t mc_arb_dram_timing2;
448 uint32_t burst_time;
449 uint32_t mc_cg_config;
450
451 switch (arb_src) {
452 case MC_CG_ARB_FREQ_F0:
453 mc_arb_dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
454 mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
455 burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
456 break;
457 case MC_CG_ARB_FREQ_F1:
458 mc_arb_dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1);
459 mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1);
460 burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1);
461 break;
462 default:
463 return -EINVAL;
464 }
465
466 switch (arb_dest) {
467 case MC_CG_ARB_FREQ_F0:
468 cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING, mc_arb_dram_timing);
469 cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2, mc_arb_dram_timing2);
470 PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0, burst_time);
471 break;
472 case MC_CG_ARB_FREQ_F1:
473 cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1, mc_arb_dram_timing);
474 cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2);
475 PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1, burst_time);
476 break;
477 default:
478 return -EINVAL;
479 }
480
481 mc_cg_config = cgs_read_register(hwmgr->device, mmMC_CG_CONFIG);
482 mc_cg_config |= 0x0000000F;
483 cgs_write_register(hwmgr->device, mmMC_CG_CONFIG, mc_cg_config);
484 PHM_WRITE_FIELD(hwmgr->device, MC_ARB_CG, CG_ARB_REQ, arb_dest);
485
486 return 0;
487}
488
489static int smu7_reset_to_default(struct pp_hwmgr *hwmgr)
490{
491 return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_ResetToDefaults);
492}
493
494/**
495* Initial switch from ARB F0->F1
496*
497* @param hwmgr the address of the powerplay hardware manager.
498* @return always 0
499* This function is to be called from the SetPowerState table.
500*/
501static int smu7_initial_switch_from_arbf0_to_f1(struct pp_hwmgr *hwmgr)
502{
503 return smu7_copy_and_switch_arb_sets(hwmgr,
504 MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
505}
506
507static int smu7_force_switch_to_arbf0(struct pp_hwmgr *hwmgr)
508{
509 uint32_t tmp;
510
511 tmp = (cgs_read_ind_register(hwmgr->device,
512 CGS_IND_REG__SMC, ixSMC_SCRATCH9) &
513 0x0000ff00) >> 8;
514
515 if (tmp == MC_CG_ARB_FREQ_F0)
516 return 0;
517
518 return smu7_copy_and_switch_arb_sets(hwmgr,
519 tmp, MC_CG_ARB_FREQ_F0);
520}
521
522static int smu7_setup_default_pcie_table(struct pp_hwmgr *hwmgr)
523{
524 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
525
526 struct phm_ppt_v1_information *table_info =
527 (struct phm_ppt_v1_information *)(hwmgr->pptable);
528 struct phm_ppt_v1_pcie_table *pcie_table = NULL;
529
530 uint32_t i, max_entry;
531 uint32_t tmp;
532
533 PP_ASSERT_WITH_CODE((data->use_pcie_performance_levels ||
534 data->use_pcie_power_saving_levels), "No pcie performance levels!",
535 return -EINVAL);
536
537 if (table_info != NULL)
538 pcie_table = table_info->pcie_table;
539
540 if (data->use_pcie_performance_levels &&
541 !data->use_pcie_power_saving_levels) {
542 data->pcie_gen_power_saving = data->pcie_gen_performance;
543 data->pcie_lane_power_saving = data->pcie_lane_performance;
544 } else if (!data->use_pcie_performance_levels &&
545 data->use_pcie_power_saving_levels) {
546 data->pcie_gen_performance = data->pcie_gen_power_saving;
547 data->pcie_lane_performance = data->pcie_lane_power_saving;
548 }
549 tmp = smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_LINK);
550 phm_reset_single_dpm_table(&data->dpm_table.pcie_speed_table,
551 tmp,
552 MAX_REGULAR_DPM_NUMBER);
553
554 if (pcie_table != NULL) {
555 /* max_entry is used to make sure we reserve one PCIE level
556 * for boot level (fix for A+A PSPP issue).
557 * If PCIE table from PPTable have ULV entry + 8 entries,
558 * then ignore the last entry.*/
559 max_entry = (tmp < pcie_table->count) ? tmp : pcie_table->count;
560 for (i = 1; i < max_entry; i++) {
561 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, i - 1,
562 get_pcie_gen_support(data->pcie_gen_cap,
563 pcie_table->entries[i].gen_speed),
564 get_pcie_lane_support(data->pcie_lane_cap,
565 pcie_table->entries[i].lane_width));
566 }
567 data->dpm_table.pcie_speed_table.count = max_entry - 1;
568 smum_update_smc_table(hwmgr, SMU_BIF_TABLE);
569 } else {
570 /* Hardcode Pcie Table */
571 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 0,
572 get_pcie_gen_support(data->pcie_gen_cap,
573 PP_Min_PCIEGen),
574 get_pcie_lane_support(data->pcie_lane_cap,
575 PP_Max_PCIELane));
576 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 1,
577 get_pcie_gen_support(data->pcie_gen_cap,
578 PP_Min_PCIEGen),
579 get_pcie_lane_support(data->pcie_lane_cap,
580 PP_Max_PCIELane));
581 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 2,
582 get_pcie_gen_support(data->pcie_gen_cap,
583 PP_Max_PCIEGen),
584 get_pcie_lane_support(data->pcie_lane_cap,
585 PP_Max_PCIELane));
586 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 3,
587 get_pcie_gen_support(data->pcie_gen_cap,
588 PP_Max_PCIEGen),
589 get_pcie_lane_support(data->pcie_lane_cap,
590 PP_Max_PCIELane));
591 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 4,
592 get_pcie_gen_support(data->pcie_gen_cap,
593 PP_Max_PCIEGen),
594 get_pcie_lane_support(data->pcie_lane_cap,
595 PP_Max_PCIELane));
596 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 5,
597 get_pcie_gen_support(data->pcie_gen_cap,
598 PP_Max_PCIEGen),
599 get_pcie_lane_support(data->pcie_lane_cap,
600 PP_Max_PCIELane));
601
602 data->dpm_table.pcie_speed_table.count = 6;
603 }
604 /* Populate last level for boot PCIE level, but do not increment count. */
605 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table,
606 data->dpm_table.pcie_speed_table.count,
607 get_pcie_gen_support(data->pcie_gen_cap,
608 PP_Min_PCIEGen),
609 get_pcie_lane_support(data->pcie_lane_cap,
610 PP_Max_PCIELane));
611
612 return 0;
613}
614
615static int smu7_reset_dpm_tables(struct pp_hwmgr *hwmgr)
616{
617 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
618
619 memset(&(data->dpm_table), 0x00, sizeof(data->dpm_table));
620
621 phm_reset_single_dpm_table(
622 &data->dpm_table.sclk_table,
623 smum_get_mac_definition(hwmgr->smumgr,
624 SMU_MAX_LEVELS_GRAPHICS),
625 MAX_REGULAR_DPM_NUMBER);
626 phm_reset_single_dpm_table(
627 &data->dpm_table.mclk_table,
628 smum_get_mac_definition(hwmgr->smumgr,
629 SMU_MAX_LEVELS_MEMORY), MAX_REGULAR_DPM_NUMBER);
630
631 phm_reset_single_dpm_table(
632 &data->dpm_table.vddc_table,
633 smum_get_mac_definition(hwmgr->smumgr,
634 SMU_MAX_LEVELS_VDDC),
635 MAX_REGULAR_DPM_NUMBER);
636 phm_reset_single_dpm_table(
637 &data->dpm_table.vddci_table,
638 smum_get_mac_definition(hwmgr->smumgr,
639 SMU_MAX_LEVELS_VDDCI), MAX_REGULAR_DPM_NUMBER);
640
641 phm_reset_single_dpm_table(
642 &data->dpm_table.mvdd_table,
643 smum_get_mac_definition(hwmgr->smumgr,
644 SMU_MAX_LEVELS_MVDD),
645 MAX_REGULAR_DPM_NUMBER);
646 return 0;
647}
648/*
649 * This function is to initialize all DPM state tables
650 * for SMU7 based on the dependency table.
651 * Dynamic state patching function will then trim these
652 * state tables to the allowed range based
653 * on the power policy or external client requests,
654 * such as UVD request, etc.
655 */
656
657static int smu7_setup_dpm_tables_v0(struct pp_hwmgr *hwmgr)
658{
659 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
660 struct phm_clock_voltage_dependency_table *allowed_vdd_sclk_table =
661 hwmgr->dyn_state.vddc_dependency_on_sclk;
662 struct phm_clock_voltage_dependency_table *allowed_vdd_mclk_table =
663 hwmgr->dyn_state.vddc_dependency_on_mclk;
664 struct phm_cac_leakage_table *std_voltage_table =
665 hwmgr->dyn_state.cac_leakage_table;
666 uint32_t i;
667
668 PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table != NULL,
669 "SCLK dependency table is missing. This table is mandatory", return -EINVAL);
670 PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table->count >= 1,
671 "SCLK dependency table has to have is missing. This table is mandatory", return -EINVAL);
672
673 PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table != NULL,
674 "MCLK dependency table is missing. This table is mandatory", return -EINVAL);
675 PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table->count >= 1,
676 "VMCLK dependency table has to have is missing. This table is mandatory", return -EINVAL);
677
678
679 /* Initialize Sclk DPM table based on allow Sclk values*/
680 data->dpm_table.sclk_table.count = 0;
681
682 for (i = 0; i < allowed_vdd_sclk_table->count; i++) {
683 if (i == 0 || data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count-1].value !=
684 allowed_vdd_sclk_table->entries[i].clk) {
685 data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].value =
686 allowed_vdd_sclk_table->entries[i].clk;
687 data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled = 1; /*(i==0) ? 1 : 0; to do */
688 data->dpm_table.sclk_table.count++;
689 }
690 }
691
692 PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table != NULL,
693 "MCLK dependency table is missing. This table is mandatory", return -EINVAL);
694 /* Initialize Mclk DPM table based on allow Mclk values */
695 data->dpm_table.mclk_table.count = 0;
696 for (i = 0; i < allowed_vdd_mclk_table->count; i++) {
697 if (i == 0 || data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count-1].value !=
698 allowed_vdd_mclk_table->entries[i].clk) {
699 data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].value =
700 allowed_vdd_mclk_table->entries[i].clk;
701 data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled = 1; /*(i==0) ? 1 : 0; */
702 data->dpm_table.mclk_table.count++;
703 }
704 }
705
706 /* Initialize Vddc DPM table based on allow Vddc values. And populate corresponding std values. */
707 for (i = 0; i < allowed_vdd_sclk_table->count; i++) {
708 data->dpm_table.vddc_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v;
709 data->dpm_table.vddc_table.dpm_levels[i].param1 = std_voltage_table->entries[i].Leakage;
710 /* param1 is for corresponding std voltage */
711 data->dpm_table.vddc_table.dpm_levels[i].enabled = 1;
712 }
713
714 data->dpm_table.vddc_table.count = allowed_vdd_sclk_table->count;
715 allowed_vdd_mclk_table = hwmgr->dyn_state.vddci_dependency_on_mclk;
716
717 if (NULL != allowed_vdd_mclk_table) {
718 /* Initialize Vddci DPM table based on allow Mclk values */
719 for (i = 0; i < allowed_vdd_mclk_table->count; i++) {
720 data->dpm_table.vddci_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v;
721 data->dpm_table.vddci_table.dpm_levels[i].enabled = 1;
722 }
723 data->dpm_table.vddci_table.count = allowed_vdd_mclk_table->count;
724 }
725
726 allowed_vdd_mclk_table = hwmgr->dyn_state.mvdd_dependency_on_mclk;
727
728 if (NULL != allowed_vdd_mclk_table) {
729 /*
730 * Initialize MVDD DPM table based on allow Mclk
731 * values
732 */
733 for (i = 0; i < allowed_vdd_mclk_table->count; i++) {
734 data->dpm_table.mvdd_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v;
735 data->dpm_table.mvdd_table.dpm_levels[i].enabled = 1;
736 }
737 data->dpm_table.mvdd_table.count = allowed_vdd_mclk_table->count;
738 }
739
740 return 0;
741}
742
743static int smu7_setup_dpm_tables_v1(struct pp_hwmgr *hwmgr)
744{
745 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
746 struct phm_ppt_v1_information *table_info =
747 (struct phm_ppt_v1_information *)(hwmgr->pptable);
748 uint32_t i;
749
750 struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table;
751 struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table;
752
753 if (table_info == NULL)
754 return -EINVAL;
755
756 dep_sclk_table = table_info->vdd_dep_on_sclk;
757 dep_mclk_table = table_info->vdd_dep_on_mclk;
758
759 PP_ASSERT_WITH_CODE(dep_sclk_table != NULL,
760 "SCLK dependency table is missing.",
761 return -EINVAL);
762 PP_ASSERT_WITH_CODE(dep_sclk_table->count >= 1,
763 "SCLK dependency table count is 0.",
764 return -EINVAL);
765
766 PP_ASSERT_WITH_CODE(dep_mclk_table != NULL,
767 "MCLK dependency table is missing.",
768 return -EINVAL);
769 PP_ASSERT_WITH_CODE(dep_mclk_table->count >= 1,
770 "MCLK dependency table count is 0",
771 return -EINVAL);
772
773 /* Initialize Sclk DPM table based on allow Sclk values */
774 data->dpm_table.sclk_table.count = 0;
775 for (i = 0; i < dep_sclk_table->count; i++) {
776 if (i == 0 || data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count - 1].value !=
777 dep_sclk_table->entries[i].clk) {
778
779 data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].value =
780 dep_sclk_table->entries[i].clk;
781
782 data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled =
783 (i == 0) ? true : false;
784 data->dpm_table.sclk_table.count++;
785 }
786 }
787
788 /* Initialize Mclk DPM table based on allow Mclk values */
789 data->dpm_table.mclk_table.count = 0;
790 for (i = 0; i < dep_mclk_table->count; i++) {
791 if (i == 0 || data->dpm_table.mclk_table.dpm_levels
792 [data->dpm_table.mclk_table.count - 1].value !=
793 dep_mclk_table->entries[i].clk) {
794 data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].value =
795 dep_mclk_table->entries[i].clk;
796 data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled =
797 (i == 0) ? true : false;
798 data->dpm_table.mclk_table.count++;
799 }
800 }
801
802 return 0;
803}
804
805int smu7_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
806{
807 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
808
809 smu7_reset_dpm_tables(hwmgr);
810
811 if (hwmgr->pp_table_version == PP_TABLE_V1)
812 smu7_setup_dpm_tables_v1(hwmgr);
813 else if (hwmgr->pp_table_version == PP_TABLE_V0)
814 smu7_setup_dpm_tables_v0(hwmgr);
815
816 smu7_setup_default_pcie_table(hwmgr);
817
818 /* save a copy of the default DPM table */
819 memcpy(&(data->golden_dpm_table), &(data->dpm_table),
820 sizeof(struct smu7_dpm_table));
821 return 0;
822}
823
824uint32_t smu7_get_xclk(struct pp_hwmgr *hwmgr)
825{
826 uint32_t reference_clock, tmp;
827 struct cgs_display_info info = {0};
828 struct cgs_mode_info mode_info;
829
830 info.mode_info = &mode_info;
831
832 tmp = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK);
833
834 if (tmp)
835 return TCLK;
836
837 cgs_get_active_displays_info(hwmgr->device, &info);
838 reference_clock = mode_info.ref_clock;
839
840 tmp = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_CLKPIN_CNTL, XTALIN_DIVIDE);
841
842 if (0 != tmp)
843 return reference_clock / 4;
844
845 return reference_clock;
846}
847
848static int smu7_enable_vrhot_gpio_interrupt(struct pp_hwmgr *hwmgr)
849{
850
851 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
852 PHM_PlatformCaps_RegulatorHot))
853 return smum_send_msg_to_smc(hwmgr->smumgr,
854 PPSMC_MSG_EnableVRHotGPIOInterrupt);
855
856 return 0;
857}
858
859static int smu7_enable_sclk_control(struct pp_hwmgr *hwmgr)
860{
861 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
862 SCLK_PWRMGT_OFF, 0);
863 return 0;
864}
865
866static int smu7_enable_ulv(struct pp_hwmgr *hwmgr)
867{
868 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
869
870 if (data->ulv_supported)
871 return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_EnableULV);
872
873 return 0;
874}
875
876static int smu7_disable_ulv(struct pp_hwmgr *hwmgr)
877{
878 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
879
880 if (data->ulv_supported)
881 return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DisableULV);
882
883 return 0;
884}
885
886static int smu7_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
887{
888 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
889 PHM_PlatformCaps_SclkDeepSleep)) {
890 if (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_MASTER_DeepSleep_ON))
891 PP_ASSERT_WITH_CODE(false,
892 "Attempt to enable Master Deep Sleep switch failed!",
893 return -EINVAL);
894 } else {
895 if (smum_send_msg_to_smc(hwmgr->smumgr,
896 PPSMC_MSG_MASTER_DeepSleep_OFF)) {
897 PP_ASSERT_WITH_CODE(false,
898 "Attempt to disable Master Deep Sleep switch failed!",
899 return -EINVAL);
900 }
901 }
902
903 return 0;
904}
905
906static int smu7_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
907{
908 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
909 PHM_PlatformCaps_SclkDeepSleep)) {
910 if (smum_send_msg_to_smc(hwmgr->smumgr,
911 PPSMC_MSG_MASTER_DeepSleep_OFF)) {
912 PP_ASSERT_WITH_CODE(false,
913 "Attempt to disable Master Deep Sleep switch failed!",
914 return -EINVAL);
915 }
916 }
917
918 return 0;
919}
920
921static int smu7_disable_handshake_uvd(struct pp_hwmgr *hwmgr)
922{
923 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
924 uint32_t soft_register_value = 0;
925 uint32_t handshake_disables_offset = data->soft_regs_start
926 + smum_get_offsetof(hwmgr->smumgr,
927 SMU_SoftRegisters, HandshakeDisables);
928
929 soft_register_value = cgs_read_ind_register(hwmgr->device,
930 CGS_IND_REG__SMC, handshake_disables_offset);
931 soft_register_value |= smum_get_mac_definition(hwmgr->smumgr,
932 SMU_UVD_MCLK_HANDSHAKE_DISABLE);
933 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
934 handshake_disables_offset, soft_register_value);
935 return 0;
936}
937
938static int smu7_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
939{
940 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
941
942 /* enable SCLK dpm */
943 if (!data->sclk_dpm_key_disabled)
944 PP_ASSERT_WITH_CODE(
945 (0 == smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DPM_Enable)),
946 "Failed to enable SCLK DPM during DPM Start Function!",
947 return -EINVAL);
948
949 /* enable MCLK dpm */
950 if (0 == data->mclk_dpm_key_disabled) {
951 if (!(hwmgr->feature_mask & PP_UVD_HANDSHAKE_MASK))
952 smu7_disable_handshake_uvd(hwmgr);
953 PP_ASSERT_WITH_CODE(
954 (0 == smum_send_msg_to_smc(hwmgr->smumgr,
955 PPSMC_MSG_MCLKDPM_Enable)),
956 "Failed to enable MCLK DPM during DPM Start Function!",
957 return -EINVAL);
958
959 PHM_WRITE_FIELD(hwmgr->device, MC_SEQ_CNTL_3, CAC_EN, 0x1);
960
961 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x5);
962 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x5);
963 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x100005);
964 udelay(10);
965 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x400005);
966 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x400005);
967 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x500005);
968 }
969
970 return 0;
971}
972
973static int smu7_start_dpm(struct pp_hwmgr *hwmgr)
974{
975 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
976
977 /*enable general power management */
978
979 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
980 GLOBAL_PWRMGT_EN, 1);
981
982 /* enable sclk deep sleep */
983
984 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
985 DYNAMIC_PM_EN, 1);
986
987 /* prepare for PCIE DPM */
988
989 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
990 data->soft_regs_start +
991 smum_get_offsetof(hwmgr->smumgr, SMU_SoftRegisters,
992 VoltageChangeTimeout), 0x1000);
993 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE,
994 SWRST_COMMAND_1, RESETLC, 0x0);
995
996 PP_ASSERT_WITH_CODE(
997 (0 == smum_send_msg_to_smc(hwmgr->smumgr,
998 PPSMC_MSG_Voltage_Cntl_Enable)),
999 "Failed to enable voltage DPM during DPM Start Function!",
1000 return -EINVAL);
1001
1002
1003 if (smu7_enable_sclk_mclk_dpm(hwmgr)) {
1004 printk(KERN_ERR "Failed to enable Sclk DPM and Mclk DPM!");
1005 return -EINVAL;
1006 }
1007
1008 /* enable PCIE dpm */
1009 if (0 == data->pcie_dpm_key_disabled) {
1010 PP_ASSERT_WITH_CODE(
1011 (0 == smum_send_msg_to_smc(hwmgr->smumgr,
1012 PPSMC_MSG_PCIeDPM_Enable)),
1013 "Failed to enable pcie DPM during DPM Start Function!",
1014 return -EINVAL);
1015 }
1016
1017 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1018 PHM_PlatformCaps_Falcon_QuickTransition)) {
1019 PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc(hwmgr->smumgr,
1020 PPSMC_MSG_EnableACDCGPIOInterrupt)),
1021 "Failed to enable AC DC GPIO Interrupt!",
1022 );
1023 }
1024
1025 return 0;
1026}
1027
1028static int smu7_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
1029{
1030 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1031
1032 /* disable SCLK dpm */
1033 if (!data->sclk_dpm_key_disabled)
1034 PP_ASSERT_WITH_CODE(
1035 (smum_send_msg_to_smc(hwmgr->smumgr,
1036 PPSMC_MSG_DPM_Disable) == 0),
1037 "Failed to disable SCLK DPM!",
1038 return -EINVAL);
1039
1040 /* disable MCLK dpm */
1041 if (!data->mclk_dpm_key_disabled) {
1042 PP_ASSERT_WITH_CODE(
1043 (smum_send_msg_to_smc(hwmgr->smumgr,
1044 PPSMC_MSG_MCLKDPM_Disable) == 0),
1045 "Failed to disable MCLK DPM!",
1046 return -EINVAL);
1047 }
1048
1049 return 0;
1050}
1051
1052static int smu7_stop_dpm(struct pp_hwmgr *hwmgr)
1053{
1054 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1055
1056 /* disable general power management */
1057 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
1058 GLOBAL_PWRMGT_EN, 0);
1059 /* disable sclk deep sleep */
1060 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
1061 DYNAMIC_PM_EN, 0);
1062
1063 /* disable PCIE dpm */
1064 if (!data->pcie_dpm_key_disabled) {
1065 PP_ASSERT_WITH_CODE(
1066 (smum_send_msg_to_smc(hwmgr->smumgr,
1067 PPSMC_MSG_PCIeDPM_Disable) == 0),
1068 "Failed to disable pcie DPM during DPM Stop Function!",
1069 return -EINVAL);
1070 }
1071
1072 if (smu7_disable_sclk_mclk_dpm(hwmgr)) {
1073 printk(KERN_ERR "Failed to disable Sclk DPM and Mclk DPM!");
1074 return -EINVAL;
1075 }
1076
1077 return 0;
1078}
1079
1080static void smu7_set_dpm_event_sources(struct pp_hwmgr *hwmgr, uint32_t sources)
1081{
1082 bool protection;
1083 enum DPM_EVENT_SRC src;
1084
1085 switch (sources) {
1086 default:
1087 printk(KERN_ERR "Unknown throttling event sources.");
1088 /* fall through */
1089 case 0:
1090 protection = false;
1091 /* src is unused */
1092 break;
1093 case (1 << PHM_AutoThrottleSource_Thermal):
1094 protection = true;
1095 src = DPM_EVENT_SRC_DIGITAL;
1096 break;
1097 case (1 << PHM_AutoThrottleSource_External):
1098 protection = true;
1099 src = DPM_EVENT_SRC_EXTERNAL;
1100 break;
1101 case (1 << PHM_AutoThrottleSource_External) |
1102 (1 << PHM_AutoThrottleSource_Thermal):
1103 protection = true;
1104 src = DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL;
1105 break;
1106 }
1107 /* Order matters - don't enable thermal protection for the wrong source. */
1108 if (protection) {
1109 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_CTRL,
1110 DPM_EVENT_SRC, src);
1111 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
1112 THERMAL_PROTECTION_DIS,
1113 !phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1114 PHM_PlatformCaps_ThermalController));
1115 } else
1116 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
1117 THERMAL_PROTECTION_DIS, 1);
1118}
1119
1120static int smu7_enable_auto_throttle_source(struct pp_hwmgr *hwmgr,
1121 PHM_AutoThrottleSource source)
1122{
1123 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1124
1125 if (!(data->active_auto_throttle_sources & (1 << source))) {
1126 data->active_auto_throttle_sources |= 1 << source;
1127 smu7_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources);
1128 }
1129 return 0;
1130}
1131
1132static int smu7_enable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
1133{
1134 return smu7_enable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
1135}
1136
1137static int smu7_disable_auto_throttle_source(struct pp_hwmgr *hwmgr,
1138 PHM_AutoThrottleSource source)
1139{
1140 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1141
1142 if (data->active_auto_throttle_sources & (1 << source)) {
1143 data->active_auto_throttle_sources &= ~(1 << source);
1144 smu7_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources);
1145 }
1146 return 0;
1147}
1148
1149static int smu7_disable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
1150{
1151 return smu7_disable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
1152}
1153
1154int smu7_pcie_performance_request(struct pp_hwmgr *hwmgr)
1155{
1156 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1157 data->pcie_performance_request = true;
1158
1159 return 0;
1160}
1161
1162int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
1163{
1164 int tmp_result = 0;
1165 int result = 0;
1166
1167 tmp_result = (!smum_is_dpm_running(hwmgr)) ? 0 : -1;
1168 PP_ASSERT_WITH_CODE(tmp_result == 0,
1169 "DPM is already running right now, no need to enable DPM!",
1170 return 0);
1171
1172 if (smu7_voltage_control(hwmgr)) {
1173 tmp_result = smu7_enable_voltage_control(hwmgr);
1174 PP_ASSERT_WITH_CODE(tmp_result == 0,
1175 "Failed to enable voltage control!",
1176 result = tmp_result);
1177
1178 tmp_result = smu7_construct_voltage_tables(hwmgr);
1179 PP_ASSERT_WITH_CODE((0 == tmp_result),
1180 "Failed to contruct voltage tables!",
1181 result = tmp_result);
1182 }
1183 smum_initialize_mc_reg_table(hwmgr);
1184
1185 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1186 PHM_PlatformCaps_EngineSpreadSpectrumSupport))
1187 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
1188 GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 1);
1189
1190 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1191 PHM_PlatformCaps_ThermalController))
1192 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
1193 GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 0);
1194
1195 tmp_result = smu7_program_static_screen_threshold_parameters(hwmgr);
1196 PP_ASSERT_WITH_CODE((0 == tmp_result),
1197 "Failed to program static screen threshold parameters!",
1198 result = tmp_result);
1199
1200 tmp_result = smu7_enable_display_gap(hwmgr);
1201 PP_ASSERT_WITH_CODE((0 == tmp_result),
1202 "Failed to enable display gap!", result = tmp_result);
1203
1204 tmp_result = smu7_program_voting_clients(hwmgr);
1205 PP_ASSERT_WITH_CODE((0 == tmp_result),
1206 "Failed to program voting clients!", result = tmp_result);
1207
1208 tmp_result = smum_process_firmware_header(hwmgr);
1209 PP_ASSERT_WITH_CODE((0 == tmp_result),
1210 "Failed to process firmware header!", result = tmp_result);
1211
1212 tmp_result = smu7_initial_switch_from_arbf0_to_f1(hwmgr);
1213 PP_ASSERT_WITH_CODE((0 == tmp_result),
1214 "Failed to initialize switch from ArbF0 to F1!",
1215 result = tmp_result);
1216
1217 result = smu7_setup_default_dpm_tables(hwmgr);
1218 PP_ASSERT_WITH_CODE(0 == result,
1219 "Failed to setup default DPM tables!", return result);
1220
1221 tmp_result = smum_init_smc_table(hwmgr);
1222 PP_ASSERT_WITH_CODE((0 == tmp_result),
1223 "Failed to initialize SMC table!", result = tmp_result);
1224
1225 tmp_result = smu7_enable_vrhot_gpio_interrupt(hwmgr);
1226 PP_ASSERT_WITH_CODE((0 == tmp_result),
1227 "Failed to enable VR hot GPIO interrupt!", result = tmp_result);
1228
1229 smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)PPSMC_HasDisplay);
1230
1231 tmp_result = smu7_enable_sclk_control(hwmgr);
1232 PP_ASSERT_WITH_CODE((0 == tmp_result),
1233 "Failed to enable SCLK control!", result = tmp_result);
1234
1235 tmp_result = smu7_enable_smc_voltage_controller(hwmgr);
1236 PP_ASSERT_WITH_CODE((0 == tmp_result),
1237 "Failed to enable voltage control!", result = tmp_result);
1238
1239 tmp_result = smu7_enable_ulv(hwmgr);
1240 PP_ASSERT_WITH_CODE((0 == tmp_result),
1241 "Failed to enable ULV!", result = tmp_result);
1242
1243 tmp_result = smu7_enable_deep_sleep_master_switch(hwmgr);
1244 PP_ASSERT_WITH_CODE((0 == tmp_result),
1245 "Failed to enable deep sleep master switch!", result = tmp_result);
1246
1247 tmp_result = smu7_enable_didt_config(hwmgr);
1248 PP_ASSERT_WITH_CODE((tmp_result == 0),
1249 "Failed to enable deep sleep master switch!", result = tmp_result);
1250
1251 tmp_result = smu7_start_dpm(hwmgr);
1252 PP_ASSERT_WITH_CODE((0 == tmp_result),
1253 "Failed to start DPM!", result = tmp_result);
1254
1255 tmp_result = smu7_enable_smc_cac(hwmgr);
1256 PP_ASSERT_WITH_CODE((0 == tmp_result),
1257 "Failed to enable SMC CAC!", result = tmp_result);
1258
1259 tmp_result = smu7_enable_power_containment(hwmgr);
1260 PP_ASSERT_WITH_CODE((0 == tmp_result),
1261 "Failed to enable power containment!", result = tmp_result);
1262
1263 tmp_result = smu7_power_control_set_level(hwmgr);
1264 PP_ASSERT_WITH_CODE((0 == tmp_result),
1265 "Failed to power control set level!", result = tmp_result);
1266
1267 tmp_result = smu7_enable_thermal_auto_throttle(hwmgr);
1268 PP_ASSERT_WITH_CODE((0 == tmp_result),
1269 "Failed to enable thermal auto throttle!", result = tmp_result);
1270
1271 tmp_result = smu7_pcie_performance_request(hwmgr);
1272 PP_ASSERT_WITH_CODE((0 == tmp_result),
1273 "pcie performance request failed!", result = tmp_result);
1274
1275 return 0;
1276}
1277
1278int smu7_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
1279{
1280 int tmp_result, result = 0;
1281
1282 tmp_result = (smum_is_dpm_running(hwmgr)) ? 0 : -1;
1283 PP_ASSERT_WITH_CODE(tmp_result == 0,
1284 "DPM is not running right now, no need to disable DPM!",
1285 return 0);
1286
1287 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1288 PHM_PlatformCaps_ThermalController))
1289 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
1290 GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 1);
1291
1292 tmp_result = smu7_disable_power_containment(hwmgr);
1293 PP_ASSERT_WITH_CODE((tmp_result == 0),
1294 "Failed to disable power containment!", result = tmp_result);
1295
1296 tmp_result = smu7_disable_smc_cac(hwmgr);
1297 PP_ASSERT_WITH_CODE((tmp_result == 0),
1298 "Failed to disable SMC CAC!", result = tmp_result);
1299
1300 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
1301 CG_SPLL_SPREAD_SPECTRUM, SSEN, 0);
1302 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
1303 GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 0);
1304
1305 tmp_result = smu7_disable_thermal_auto_throttle(hwmgr);
1306 PP_ASSERT_WITH_CODE((tmp_result == 0),
1307 "Failed to disable thermal auto throttle!", result = tmp_result);
1308
1309 tmp_result = smu7_stop_dpm(hwmgr);
1310 PP_ASSERT_WITH_CODE((tmp_result == 0),
1311 "Failed to stop DPM!", result = tmp_result);
1312
1313 tmp_result = smu7_disable_deep_sleep_master_switch(hwmgr);
1314 PP_ASSERT_WITH_CODE((tmp_result == 0),
1315 "Failed to disable deep sleep master switch!", result = tmp_result);
1316
1317 tmp_result = smu7_disable_ulv(hwmgr);
1318 PP_ASSERT_WITH_CODE((tmp_result == 0),
1319 "Failed to disable ULV!", result = tmp_result);
1320
1321 tmp_result = smu7_clear_voting_clients(hwmgr);
1322 PP_ASSERT_WITH_CODE((tmp_result == 0),
1323 "Failed to clear voting clients!", result = tmp_result);
1324
1325 tmp_result = smu7_reset_to_default(hwmgr);
1326 PP_ASSERT_WITH_CODE((tmp_result == 0),
1327 "Failed to reset to default!", result = tmp_result);
1328
1329 tmp_result = smu7_force_switch_to_arbf0(hwmgr);
1330 PP_ASSERT_WITH_CODE((tmp_result == 0),
1331 "Failed to force to switch arbf0!", result = tmp_result);
1332
1333 return result;
1334}
1335
1336int smu7_reset_asic_tasks(struct pp_hwmgr *hwmgr)
1337{
1338
1339 return 0;
1340}
1341
1342static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr)
1343{
1344 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1345 struct phm_ppt_v1_information *table_info =
1346 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1347
1348 data->dll_default_on = false;
1349 data->mclk_dpm0_activity_target = 0xa;
1350 data->mclk_activity_target = SMU7_MCLK_TARGETACTIVITY_DFLT;
1351 data->vddc_vddgfx_delta = 300;
1352 data->static_screen_threshold = SMU7_STATICSCREENTHRESHOLD_DFLT;
1353 data->static_screen_threshold_unit = SMU7_STATICSCREENTHRESHOLDUNIT_DFLT;
1354 data->voting_rights_clients0 = SMU7_VOTINGRIGHTSCLIENTS_DFLT0;
1355 data->voting_rights_clients1 = SMU7_VOTINGRIGHTSCLIENTS_DFLT1;
1356 data->voting_rights_clients2 = SMU7_VOTINGRIGHTSCLIENTS_DFLT2;
1357 data->voting_rights_clients3 = SMU7_VOTINGRIGHTSCLIENTS_DFLT3;
1358 data->voting_rights_clients4 = SMU7_VOTINGRIGHTSCLIENTS_DFLT4;
1359 data->voting_rights_clients5 = SMU7_VOTINGRIGHTSCLIENTS_DFLT5;
1360 data->voting_rights_clients6 = SMU7_VOTINGRIGHTSCLIENTS_DFLT6;
1361 data->voting_rights_clients7 = SMU7_VOTINGRIGHTSCLIENTS_DFLT7;
1362
1363 data->mclk_dpm_key_disabled = hwmgr->feature_mask & PP_MCLK_DPM_MASK ? false : true;
1364 data->sclk_dpm_key_disabled = hwmgr->feature_mask & PP_SCLK_DPM_MASK ? false : true;
1365 data->pcie_dpm_key_disabled = hwmgr->feature_mask & PP_PCIE_DPM_MASK ? false : true;
1366 /* need to set voltage control types before EVV patching */
1367 data->voltage_control = SMU7_VOLTAGE_CONTROL_NONE;
1368 data->vddci_control = SMU7_VOLTAGE_CONTROL_NONE;
1369 data->mvdd_control = SMU7_VOLTAGE_CONTROL_NONE;
1370 data->enable_tdc_limit_feature = true;
1371 data->enable_pkg_pwr_tracking_feature = true;
1372 data->force_pcie_gen = PP_PCIEGenInvalid;
1373 data->ulv_supported = hwmgr->feature_mask & PP_ULV_MASK ? true : false;
1374
1375 data->fast_watermark_threshold = 100;
1376 if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
1377 VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2))
1378 data->voltage_control = SMU7_VOLTAGE_CONTROL_BY_SVID2;
1379
1380 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1381 PHM_PlatformCaps_ControlVDDGFX)) {
1382 if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
1383 VOLTAGE_TYPE_VDDGFX, VOLTAGE_OBJ_SVID2)) {
1384 data->vdd_gfx_control = SMU7_VOLTAGE_CONTROL_BY_SVID2;
1385 }
1386 }
1387
1388 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1389 PHM_PlatformCaps_EnableMVDDControl)) {
1390 if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
1391 VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT))
1392 data->mvdd_control = SMU7_VOLTAGE_CONTROL_BY_GPIO;
1393 else if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
1394 VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2))
1395 data->mvdd_control = SMU7_VOLTAGE_CONTROL_BY_SVID2;
1396 }
1397
1398 if (SMU7_VOLTAGE_CONTROL_NONE == data->vdd_gfx_control) {
1399 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1400 PHM_PlatformCaps_ControlVDDGFX);
1401 }
1402
1403 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1404 PHM_PlatformCaps_ControlVDDCI)) {
1405 if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
1406 VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
1407 data->vddci_control = SMU7_VOLTAGE_CONTROL_BY_GPIO;
1408 else if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
1409 VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2))
1410 data->vddci_control = SMU7_VOLTAGE_CONTROL_BY_SVID2;
1411 }
1412
1413 if (data->mvdd_control == SMU7_VOLTAGE_CONTROL_NONE)
1414 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1415 PHM_PlatformCaps_EnableMVDDControl);
1416
1417 if (data->vddci_control == SMU7_VOLTAGE_CONTROL_NONE)
1418 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1419 PHM_PlatformCaps_ControlVDDCI);
1420
1421 if ((hwmgr->pp_table_version != PP_TABLE_V0)
1422 && (table_info->cac_dtp_table->usClockStretchAmount != 0))
1423 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
1424 PHM_PlatformCaps_ClockStretcher);
1425
1426 data->pcie_gen_performance.max = PP_PCIEGen1;
1427 data->pcie_gen_performance.min = PP_PCIEGen3;
1428 data->pcie_gen_power_saving.max = PP_PCIEGen1;
1429 data->pcie_gen_power_saving.min = PP_PCIEGen3;
1430 data->pcie_lane_performance.max = 0;
1431 data->pcie_lane_performance.min = 16;
1432 data->pcie_lane_power_saving.max = 0;
1433 data->pcie_lane_power_saving.min = 16;
1434}
1435
1436/**
1437* Get Leakage VDDC based on leakage ID.
1438*
1439* @param hwmgr the address of the powerplay hardware manager.
1440* @return always 0
1441*/
1442static int smu7_get_evv_voltages(struct pp_hwmgr *hwmgr)
1443{
1444 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1445 uint16_t vv_id;
1446 uint16_t vddc = 0;
1447 uint16_t vddgfx = 0;
1448 uint16_t i, j;
1449 uint32_t sclk = 0;
1450 struct phm_ppt_v1_information *table_info =
1451 (struct phm_ppt_v1_information *)hwmgr->pptable;
1452 struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = NULL;
1453
1454
1455 if (table_info != NULL)
1456 sclk_table = table_info->vdd_dep_on_sclk;
1457
1458 for (i = 0; i < SMU7_MAX_LEAKAGE_COUNT; i++) {
1459 vv_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
1460
1461 if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
1462 if (0 == phm_get_sclk_for_voltage_evv(hwmgr,
1463 table_info->vddgfx_lookup_table, vv_id, &sclk)) {
1464 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1465 PHM_PlatformCaps_ClockStretcher)) {
1466 for (j = 1; j < sclk_table->count; j++) {
1467 if (sclk_table->entries[j].clk == sclk &&
1468 sclk_table->entries[j].cks_enable == 0) {
1469 sclk += 5000;
1470 break;
1471 }
1472 }
1473 }
1474 if (0 == atomctrl_get_voltage_evv_on_sclk
1475 (hwmgr, VOLTAGE_TYPE_VDDGFX, sclk,
1476 vv_id, &vddgfx)) {
1477 /* need to make sure vddgfx is less than 2v or else, it could burn the ASIC. */
1478 PP_ASSERT_WITH_CODE((vddgfx < 2000 && vddgfx != 0), "Invalid VDDGFX value!", return -EINVAL);
1479
1480 /* the voltage should not be zero nor equal to leakage ID */
1481 if (vddgfx != 0 && vddgfx != vv_id) {
1482 data->vddcgfx_leakage.actual_voltage[data->vddcgfx_leakage.count] = vddgfx;
1483 data->vddcgfx_leakage.leakage_id[data->vddcgfx_leakage.count] = vv_id;
1484 data->vddcgfx_leakage.count++;
1485 }
1486 } else {
1487 printk("Error retrieving EVV voltage value!\n");
1488 }
1489 }
1490 } else {
1491
1492 if ((hwmgr->pp_table_version == PP_TABLE_V0)
1493 || !phm_get_sclk_for_voltage_evv(hwmgr,
1494 table_info->vddc_lookup_table, vv_id, &sclk)) {
1495 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1496 PHM_PlatformCaps_ClockStretcher)) {
1497 for (j = 1; j < sclk_table->count; j++) {
1498 if (sclk_table->entries[j].clk == sclk &&
1499 sclk_table->entries[j].cks_enable == 0) {
1500 sclk += 5000;
1501 break;
1502 }
1503 }
1504 }
1505
1506 if (phm_get_voltage_evv_on_sclk(hwmgr,
1507 VOLTAGE_TYPE_VDDC,
1508 sclk, vv_id, &vddc) == 0) {
1509 if (vddc >= 2000 || vddc == 0)
1510 return -EINVAL;
1511 } else {
1512 printk(KERN_WARNING "failed to retrieving EVV voltage!\n");
1513 continue;
1514 }
1515
1516 /* the voltage should not be zero nor equal to leakage ID */
1517 if (vddc != 0 && vddc != vv_id) {
1518 data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = (uint16_t)(vddc);
1519 data->vddc_leakage.leakage_id[data->vddc_leakage.count] = vv_id;
1520 data->vddc_leakage.count++;
1521 }
1522 }
1523 }
1524 }
1525
1526 return 0;
1527}
1528
1529/**
1530 * Change virtual leakage voltage to actual value.
1531 *
1532 * @param hwmgr the address of the powerplay hardware manager.
1533 * @param pointer to changing voltage
1534 * @param pointer to leakage table
1535 */
1536static void smu7_patch_ppt_v1_with_vdd_leakage(struct pp_hwmgr *hwmgr,
1537 uint16_t *voltage, struct smu7_leakage_voltage *leakage_table)
1538{
1539 uint32_t index;
1540
1541 /* search for leakage voltage ID 0xff01 ~ 0xff08 */
1542 for (index = 0; index < leakage_table->count; index++) {
1543 /* if this voltage matches a leakage voltage ID */
1544 /* patch with actual leakage voltage */
1545 if (leakage_table->leakage_id[index] == *voltage) {
1546 *voltage = leakage_table->actual_voltage[index];
1547 break;
1548 }
1549 }
1550
1551 if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0)
1552 printk(KERN_ERR "Voltage value looks like a Leakage ID but it's not patched \n");
1553}
1554
1555/**
1556* Patch voltage lookup table by EVV leakages.
1557*
1558* @param hwmgr the address of the powerplay hardware manager.
1559* @param pointer to voltage lookup table
1560* @param pointer to leakage table
1561* @return always 0
1562*/
1563static int smu7_patch_lookup_table_with_leakage(struct pp_hwmgr *hwmgr,
1564 phm_ppt_v1_voltage_lookup_table *lookup_table,
1565 struct smu7_leakage_voltage *leakage_table)
1566{
1567 uint32_t i;
1568
1569 for (i = 0; i < lookup_table->count; i++)
1570 smu7_patch_ppt_v1_with_vdd_leakage(hwmgr,
1571 &lookup_table->entries[i].us_vdd, leakage_table);
1572
1573 return 0;
1574}
1575
1576static int smu7_patch_clock_voltage_limits_with_vddc_leakage(
1577 struct pp_hwmgr *hwmgr, struct smu7_leakage_voltage *leakage_table,
1578 uint16_t *vddc)
1579{
1580 struct phm_ppt_v1_information *table_info =
1581 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1582 smu7_patch_ppt_v1_with_vdd_leakage(hwmgr, (uint16_t *)vddc, leakage_table);
1583 hwmgr->dyn_state.max_clock_voltage_on_dc.vddc =
1584 table_info->max_clock_voltage_on_dc.vddc;
1585 return 0;
1586}
1587
1588static int smu7_patch_voltage_dependency_tables_with_lookup_table(
1589 struct pp_hwmgr *hwmgr)
1590{
1591 uint8_t entry_id;
1592 uint8_t voltage_id;
1593 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1594 struct phm_ppt_v1_information *table_info =
1595 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1596
1597 struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
1598 table_info->vdd_dep_on_sclk;
1599 struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table =
1600 table_info->vdd_dep_on_mclk;
1601 struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
1602 table_info->mm_dep_table;
1603
1604 if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
1605 for (entry_id = 0; entry_id < sclk_table->count; ++entry_id) {
1606 voltage_id = sclk_table->entries[entry_id].vddInd;
1607 sclk_table->entries[entry_id].vddgfx =
1608 table_info->vddgfx_lookup_table->entries[voltage_id].us_vdd;
1609 }
1610 } else {
1611 for (entry_id = 0; entry_id < sclk_table->count; ++entry_id) {
1612 voltage_id = sclk_table->entries[entry_id].vddInd;
1613 sclk_table->entries[entry_id].vddc =
1614 table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
1615 }
1616 }
1617
1618 for (entry_id = 0; entry_id < mclk_table->count; ++entry_id) {
1619 voltage_id = mclk_table->entries[entry_id].vddInd;
1620 mclk_table->entries[entry_id].vddc =
1621 table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
1622 }
1623
1624 for (entry_id = 0; entry_id < mm_table->count; ++entry_id) {
1625 voltage_id = mm_table->entries[entry_id].vddcInd;
1626 mm_table->entries[entry_id].vddc =
1627 table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
1628 }
1629
1630 return 0;
1631
1632}
1633
1634static int phm_add_voltage(struct pp_hwmgr *hwmgr,
1635 phm_ppt_v1_voltage_lookup_table *look_up_table,
1636 phm_ppt_v1_voltage_lookup_record *record)
1637{
1638 uint32_t i;
1639
1640 PP_ASSERT_WITH_CODE((NULL != look_up_table),
1641 "Lookup Table empty.", return -EINVAL);
1642 PP_ASSERT_WITH_CODE((0 != look_up_table->count),
1643 "Lookup Table empty.", return -EINVAL);
1644
1645 i = smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_VDDGFX);
1646 PP_ASSERT_WITH_CODE((i >= look_up_table->count),
1647 "Lookup Table is full.", return -EINVAL);
1648
1649 /* This is to avoid entering duplicate calculated records. */
1650 for (i = 0; i < look_up_table->count; i++) {
1651 if (look_up_table->entries[i].us_vdd == record->us_vdd) {
1652 if (look_up_table->entries[i].us_calculated == 1)
1653 return 0;
1654 break;
1655 }
1656 }
1657
1658 look_up_table->entries[i].us_calculated = 1;
1659 look_up_table->entries[i].us_vdd = record->us_vdd;
1660 look_up_table->entries[i].us_cac_low = record->us_cac_low;
1661 look_up_table->entries[i].us_cac_mid = record->us_cac_mid;
1662 look_up_table->entries[i].us_cac_high = record->us_cac_high;
1663 /* Only increment the count when we're appending, not replacing duplicate entry. */
1664 if (i == look_up_table->count)
1665 look_up_table->count++;
1666
1667 return 0;
1668}
1669
1670
1671static int smu7_calc_voltage_dependency_tables(struct pp_hwmgr *hwmgr)
1672{
1673 uint8_t entry_id;
1674 struct phm_ppt_v1_voltage_lookup_record v_record;
1675 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1676 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
1677
1678 phm_ppt_v1_clock_voltage_dependency_table *sclk_table = pptable_info->vdd_dep_on_sclk;
1679 phm_ppt_v1_clock_voltage_dependency_table *mclk_table = pptable_info->vdd_dep_on_mclk;
1680
1681 if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
1682 for (entry_id = 0; entry_id < sclk_table->count; ++entry_id) {
1683 if (sclk_table->entries[entry_id].vdd_offset & (1 << 15))
1684 v_record.us_vdd = sclk_table->entries[entry_id].vddgfx +
1685 sclk_table->entries[entry_id].vdd_offset - 0xFFFF;
1686 else
1687 v_record.us_vdd = sclk_table->entries[entry_id].vddgfx +
1688 sclk_table->entries[entry_id].vdd_offset;
1689
1690 sclk_table->entries[entry_id].vddc =
1691 v_record.us_cac_low = v_record.us_cac_mid =
1692 v_record.us_cac_high = v_record.us_vdd;
1693
1694 phm_add_voltage(hwmgr, pptable_info->vddc_lookup_table, &v_record);
1695 }
1696
1697 for (entry_id = 0; entry_id < mclk_table->count; ++entry_id) {
1698 if (mclk_table->entries[entry_id].vdd_offset & (1 << 15))
1699 v_record.us_vdd = mclk_table->entries[entry_id].vddc +
1700 mclk_table->entries[entry_id].vdd_offset - 0xFFFF;
1701 else
1702 v_record.us_vdd = mclk_table->entries[entry_id].vddc +
1703 mclk_table->entries[entry_id].vdd_offset;
1704
1705 mclk_table->entries[entry_id].vddgfx = v_record.us_cac_low =
1706 v_record.us_cac_mid = v_record.us_cac_high = v_record.us_vdd;
1707 phm_add_voltage(hwmgr, pptable_info->vddgfx_lookup_table, &v_record);
1708 }
1709 }
1710 return 0;
1711}
1712
1713static int smu7_calc_mm_voltage_dependency_table(struct pp_hwmgr *hwmgr)
1714{
1715 uint8_t entry_id;
1716 struct phm_ppt_v1_voltage_lookup_record v_record;
1717 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1718 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
1719 phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = pptable_info->mm_dep_table;
1720
1721 if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
1722 for (entry_id = 0; entry_id < mm_table->count; entry_id++) {
1723 if (mm_table->entries[entry_id].vddgfx_offset & (1 << 15))
1724 v_record.us_vdd = mm_table->entries[entry_id].vddc +
1725 mm_table->entries[entry_id].vddgfx_offset - 0xFFFF;
1726 else
1727 v_record.us_vdd = mm_table->entries[entry_id].vddc +
1728 mm_table->entries[entry_id].vddgfx_offset;
1729
1730 /* Add the calculated VDDGFX to the VDDGFX lookup table */
1731 mm_table->entries[entry_id].vddgfx = v_record.us_cac_low =
1732 v_record.us_cac_mid = v_record.us_cac_high = v_record.us_vdd;
1733 phm_add_voltage(hwmgr, pptable_info->vddgfx_lookup_table, &v_record);
1734 }
1735 }
1736 return 0;
1737}
1738
1739static int smu7_sort_lookup_table(struct pp_hwmgr *hwmgr,
1740 struct phm_ppt_v1_voltage_lookup_table *lookup_table)
1741{
1742 uint32_t table_size, i, j;
1743 struct phm_ppt_v1_voltage_lookup_record tmp_voltage_lookup_record;
1744 table_size = lookup_table->count;
1745
1746 PP_ASSERT_WITH_CODE(0 != lookup_table->count,
1747 "Lookup table is empty", return -EINVAL);
1748
1749 /* Sorting voltages */
1750 for (i = 0; i < table_size - 1; i++) {
1751 for (j = i + 1; j > 0; j--) {
1752 if (lookup_table->entries[j].us_vdd <
1753 lookup_table->entries[j - 1].us_vdd) {
1754 tmp_voltage_lookup_record = lookup_table->entries[j - 1];
1755 lookup_table->entries[j - 1] = lookup_table->entries[j];
1756 lookup_table->entries[j] = tmp_voltage_lookup_record;
1757 }
1758 }
1759 }
1760
1761 return 0;
1762}
1763
1764static int smu7_complete_dependency_tables(struct pp_hwmgr *hwmgr)
1765{
1766 int result = 0;
1767 int tmp_result;
1768 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1769 struct phm_ppt_v1_information *table_info =
1770 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1771
1772 if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
1773 tmp_result = smu7_patch_lookup_table_with_leakage(hwmgr,
1774 table_info->vddgfx_lookup_table, &(data->vddcgfx_leakage));
1775 if (tmp_result != 0)
1776 result = tmp_result;
1777
1778 smu7_patch_ppt_v1_with_vdd_leakage(hwmgr,
1779 &table_info->max_clock_voltage_on_dc.vddgfx, &(data->vddcgfx_leakage));
1780 } else {
1781
1782 tmp_result = smu7_patch_lookup_table_with_leakage(hwmgr,
1783 table_info->vddc_lookup_table, &(data->vddc_leakage));
1784 if (tmp_result)
1785 result = tmp_result;
1786
1787 tmp_result = smu7_patch_clock_voltage_limits_with_vddc_leakage(hwmgr,
1788 &(data->vddc_leakage), &table_info->max_clock_voltage_on_dc.vddc);
1789 if (tmp_result)
1790 result = tmp_result;
1791 }
1792
1793 tmp_result = smu7_patch_voltage_dependency_tables_with_lookup_table(hwmgr);
1794 if (tmp_result)
1795 result = tmp_result;
1796
1797 tmp_result = smu7_calc_voltage_dependency_tables(hwmgr);
1798 if (tmp_result)
1799 result = tmp_result;
1800
1801 tmp_result = smu7_calc_mm_voltage_dependency_table(hwmgr);
1802 if (tmp_result)
1803 result = tmp_result;
1804
1805 tmp_result = smu7_sort_lookup_table(hwmgr, table_info->vddgfx_lookup_table);
1806 if (tmp_result)
1807 result = tmp_result;
1808
1809 tmp_result = smu7_sort_lookup_table(hwmgr, table_info->vddc_lookup_table);
1810 if (tmp_result)
1811 result = tmp_result;
1812
1813 return result;
1814}
1815
1816static int smu7_set_private_data_based_on_pptable_v1(struct pp_hwmgr *hwmgr)
1817{
1818 struct phm_ppt_v1_information *table_info =
1819 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1820
1821 struct phm_ppt_v1_clock_voltage_dependency_table *allowed_sclk_vdd_table =
1822 table_info->vdd_dep_on_sclk;
1823 struct phm_ppt_v1_clock_voltage_dependency_table *allowed_mclk_vdd_table =
1824 table_info->vdd_dep_on_mclk;
1825
1826 PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table != NULL,
1827 "VDD dependency on SCLK table is missing.",
1828 return -EINVAL);
1829 PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table->count >= 1,
1830 "VDD dependency on SCLK table has to have is missing.",
1831 return -EINVAL);
1832
1833 PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table != NULL,
1834 "VDD dependency on MCLK table is missing",
1835 return -EINVAL);
1836 PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table->count >= 1,
1837 "VDD dependency on MCLK table has to have is missing.",
1838 return -EINVAL);
1839
1840 table_info->max_clock_voltage_on_ac.sclk =
1841 allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].clk;
1842 table_info->max_clock_voltage_on_ac.mclk =
1843 allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].clk;
1844 table_info->max_clock_voltage_on_ac.vddc =
1845 allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].vddc;
1846 table_info->max_clock_voltage_on_ac.vddci =
1847 allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].vddci;
1848
1849 hwmgr->dyn_state.max_clock_voltage_on_ac.sclk = table_info->max_clock_voltage_on_ac.sclk;
1850 hwmgr->dyn_state.max_clock_voltage_on_ac.mclk = table_info->max_clock_voltage_on_ac.mclk;
1851 hwmgr->dyn_state.max_clock_voltage_on_ac.vddc = table_info->max_clock_voltage_on_ac.vddc;
1852 hwmgr->dyn_state.max_clock_voltage_on_ac.vddci = table_info->max_clock_voltage_on_ac.vddci;
1853
1854 return 0;
1855}
1856
1857int smu7_patch_voltage_workaround(struct pp_hwmgr *hwmgr)
1858{
1859 struct phm_ppt_v1_information *table_info =
1860 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1861 struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table;
1862 struct phm_ppt_v1_voltage_lookup_table *lookup_table;
1863 uint32_t i;
1864 uint32_t hw_revision, sub_vendor_id, sub_sys_id;
1865 struct cgs_system_info sys_info = {0};
1866
1867 if (table_info != NULL) {
1868 dep_mclk_table = table_info->vdd_dep_on_mclk;
1869 lookup_table = table_info->vddc_lookup_table;
1870 } else
1871 return 0;
1872
1873 sys_info.size = sizeof(struct cgs_system_info);
1874
1875 sys_info.info_id = CGS_SYSTEM_INFO_PCIE_REV;
1876 cgs_query_system_info(hwmgr->device, &sys_info);
1877 hw_revision = (uint32_t)sys_info.value;
1878
1879 sys_info.info_id = CGS_SYSTEM_INFO_PCIE_SUB_SYS_ID;
1880 cgs_query_system_info(hwmgr->device, &sys_info);
1881 sub_sys_id = (uint32_t)sys_info.value;
1882
1883 sys_info.info_id = CGS_SYSTEM_INFO_PCIE_SUB_SYS_VENDOR_ID;
1884 cgs_query_system_info(hwmgr->device, &sys_info);
1885 sub_vendor_id = (uint32_t)sys_info.value;
1886
1887 if (hwmgr->chip_id == CHIP_POLARIS10 && hw_revision == 0xC7 &&
1888 ((sub_sys_id == 0xb37 && sub_vendor_id == 0x1002) ||
1889 (sub_sys_id == 0x4a8 && sub_vendor_id == 0x1043) ||
1890 (sub_sys_id == 0x9480 && sub_vendor_id == 0x1682))) {
1891 if (lookup_table->entries[dep_mclk_table->entries[dep_mclk_table->count-1].vddInd].us_vdd >= 1000)
1892 return 0;
1893
1894 for (i = 0; i < lookup_table->count; i++) {
1895 if (lookup_table->entries[i].us_vdd < 0xff01 && lookup_table->entries[i].us_vdd >= 1000) {
1896 dep_mclk_table->entries[dep_mclk_table->count-1].vddInd = (uint8_t) i;
1897 return 0;
1898 }
1899 }
1900 }
1901 return 0;
1902}
1903
1904static int smu7_thermal_parameter_init(struct pp_hwmgr *hwmgr)
1905{
1906 struct pp_atomctrl_gpio_pin_assignment gpio_pin_assignment;
1907 uint32_t temp_reg;
1908 struct phm_ppt_v1_information *table_info =
1909 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1910
1911
1912 if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_PCC_GPIO_PINID, &gpio_pin_assignment)) {
1913 temp_reg = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL);
1914 switch (gpio_pin_assignment.uc_gpio_pin_bit_shift) {
1915 case 0:
1916 temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x1);
1917 break;
1918 case 1:
1919 temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x2);
1920 break;
1921 case 2:
1922 temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW, 0x1);
1923 break;
1924 case 3:
1925 temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, FORCE_NB_PS1, 0x1);
1926 break;
1927 case 4:
1928 temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, DPM_ENABLED, 0x1);
1929 break;
1930 default:
1931 PP_ASSERT_WITH_CODE(0,
1932 "Failed to setup PCC HW register! Wrong GPIO assigned for VDDC_PCC_GPIO_PINID!",
1933 );
1934 break;
1935 }
1936 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL, temp_reg);
1937 }
1938
1939 if (table_info == NULL)
1940 return 0;
1941
1942 if (table_info->cac_dtp_table->usDefaultTargetOperatingTemp != 0 &&
1943 hwmgr->thermal_controller.advanceFanControlParameters.ucFanControlMode) {
1944 hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMMinLimit =
1945 (uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit;
1946
1947 hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMMaxLimit =
1948 (uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM;
1949
1950 hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMStep = 1;
1951
1952 hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMMaxLimit = 100;
1953
1954 hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMMinLimit =
1955 (uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit;
1956
1957 hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMStep = 1;
1958
1959 table_info->cac_dtp_table->usDefaultTargetOperatingTemp = (table_info->cac_dtp_table->usDefaultTargetOperatingTemp >= 50) ?
1960 (table_info->cac_dtp_table->usDefaultTargetOperatingTemp - 50) : 0;
1961
1962 table_info->cac_dtp_table->usOperatingTempMaxLimit = table_info->cac_dtp_table->usDefaultTargetOperatingTemp;
1963 table_info->cac_dtp_table->usOperatingTempStep = 1;
1964 table_info->cac_dtp_table->usOperatingTempHyst = 1;
1965
1966 hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanPWM =
1967 hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM;
1968
1969 hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM =
1970 hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanRPM;
1971
1972 hwmgr->dyn_state.cac_dtp_table->usOperatingTempMinLimit =
1973 table_info->cac_dtp_table->usOperatingTempMinLimit;
1974
1975 hwmgr->dyn_state.cac_dtp_table->usOperatingTempMaxLimit =
1976 table_info->cac_dtp_table->usOperatingTempMaxLimit;
1977
1978 hwmgr->dyn_state.cac_dtp_table->usDefaultTargetOperatingTemp =
1979 table_info->cac_dtp_table->usDefaultTargetOperatingTemp;
1980
1981 hwmgr->dyn_state.cac_dtp_table->usOperatingTempStep =
1982 table_info->cac_dtp_table->usOperatingTempStep;
1983
1984 hwmgr->dyn_state.cac_dtp_table->usTargetOperatingTemp =
1985 table_info->cac_dtp_table->usTargetOperatingTemp;
1986 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
1987 PHM_PlatformCaps_ODFuzzyFanControlSupport);
1988 }
1989
1990 return 0;
1991}
1992
1993/**
1994 * Change virtual leakage voltage to actual value.
1995 *
1996 * @param hwmgr the address of the powerplay hardware manager.
1997 * @param pointer to changing voltage
1998 * @param pointer to leakage table
1999 */
2000static void smu7_patch_ppt_v0_with_vdd_leakage(struct pp_hwmgr *hwmgr,
2001 uint32_t *voltage, struct smu7_leakage_voltage *leakage_table)
2002{
2003 uint32_t index;
2004
2005 /* search for leakage voltage ID 0xff01 ~ 0xff08 */
2006 for (index = 0; index < leakage_table->count; index++) {
2007 /* if this voltage matches a leakage voltage ID */
2008 /* patch with actual leakage voltage */
2009 if (leakage_table->leakage_id[index] == *voltage) {
2010 *voltage = leakage_table->actual_voltage[index];
2011 break;
2012 }
2013 }
2014
2015 if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0)
2016 printk(KERN_ERR "Voltage value looks like a Leakage ID but it's not patched \n");
2017}
2018
2019
2020static int smu7_patch_vddc(struct pp_hwmgr *hwmgr,
2021 struct phm_clock_voltage_dependency_table *tab)
2022{
2023 uint16_t i;
2024 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2025
2026 if (tab)
2027 for (i = 0; i < tab->count; i++)
2028 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
2029 &data->vddc_leakage);
2030
2031 return 0;
2032}
2033
2034static int smu7_patch_vddci(struct pp_hwmgr *hwmgr,
2035 struct phm_clock_voltage_dependency_table *tab)
2036{
2037 uint16_t i;
2038 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2039
2040 if (tab)
2041 for (i = 0; i < tab->count; i++)
2042 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
2043 &data->vddci_leakage);
2044
2045 return 0;
2046}
2047
2048static int smu7_patch_vce_vddc(struct pp_hwmgr *hwmgr,
2049 struct phm_vce_clock_voltage_dependency_table *tab)
2050{
2051 uint16_t i;
2052 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2053
2054 if (tab)
2055 for (i = 0; i < tab->count; i++)
2056 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
2057 &data->vddc_leakage);
2058
2059 return 0;
2060}
2061
2062
2063static int smu7_patch_uvd_vddc(struct pp_hwmgr *hwmgr,
2064 struct phm_uvd_clock_voltage_dependency_table *tab)
2065{
2066 uint16_t i;
2067 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2068
2069 if (tab)
2070 for (i = 0; i < tab->count; i++)
2071 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
2072 &data->vddc_leakage);
2073
2074 return 0;
2075}
2076
2077static int smu7_patch_vddc_shed_limit(struct pp_hwmgr *hwmgr,
2078 struct phm_phase_shedding_limits_table *tab)
2079{
2080 uint16_t i;
2081 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2082
2083 if (tab)
2084 for (i = 0; i < tab->count; i++)
2085 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].Voltage,
2086 &data->vddc_leakage);
2087
2088 return 0;
2089}
2090
2091static int smu7_patch_samu_vddc(struct pp_hwmgr *hwmgr,
2092 struct phm_samu_clock_voltage_dependency_table *tab)
2093{
2094 uint16_t i;
2095 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2096
2097 if (tab)
2098 for (i = 0; i < tab->count; i++)
2099 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
2100 &data->vddc_leakage);
2101
2102 return 0;
2103}
2104
2105static int smu7_patch_acp_vddc(struct pp_hwmgr *hwmgr,
2106 struct phm_acp_clock_voltage_dependency_table *tab)
2107{
2108 uint16_t i;
2109 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2110
2111 if (tab)
2112 for (i = 0; i < tab->count; i++)
2113 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
2114 &data->vddc_leakage);
2115
2116 return 0;
2117}
2118
2119static int smu7_patch_limits_vddc(struct pp_hwmgr *hwmgr,
2120 struct phm_clock_and_voltage_limits *tab)
2121{
2122 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2123
2124 if (tab) {
2125 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, (uint32_t *)&tab->vddc,
2126 &data->vddc_leakage);
2127 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, (uint32_t *)&tab->vddci,
2128 &data->vddci_leakage);
2129 }
2130
2131 return 0;
2132}
2133
2134static int smu7_patch_cac_vddc(struct pp_hwmgr *hwmgr, struct phm_cac_leakage_table *tab)
2135{
2136 uint32_t i;
2137 uint32_t vddc;
2138 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2139
2140 if (tab) {
2141 for (i = 0; i < tab->count; i++) {
2142 vddc = (uint32_t)(tab->entries[i].Vddc);
2143 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddc, &data->vddc_leakage);
2144 tab->entries[i].Vddc = (uint16_t)vddc;
2145 }
2146 }
2147
2148 return 0;
2149}
2150
2151static int smu7_patch_dependency_tables_with_leakage(struct pp_hwmgr *hwmgr)
2152{
2153 int tmp;
2154
2155 tmp = smu7_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dependency_on_sclk);
2156 if (tmp)
2157 return -EINVAL;
2158
2159 tmp = smu7_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dependency_on_mclk);
2160 if (tmp)
2161 return -EINVAL;
2162
2163 tmp = smu7_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
2164 if (tmp)
2165 return -EINVAL;
2166
2167 tmp = smu7_patch_vddci(hwmgr, hwmgr->dyn_state.vddci_dependency_on_mclk);
2168 if (tmp)
2169 return -EINVAL;
2170
2171 tmp = smu7_patch_vce_vddc(hwmgr, hwmgr->dyn_state.vce_clock_voltage_dependency_table);
2172 if (tmp)
2173 return -EINVAL;
2174
2175 tmp = smu7_patch_uvd_vddc(hwmgr, hwmgr->dyn_state.uvd_clock_voltage_dependency_table);
2176 if (tmp)
2177 return -EINVAL;
2178
2179 tmp = smu7_patch_samu_vddc(hwmgr, hwmgr->dyn_state.samu_clock_voltage_dependency_table);
2180 if (tmp)
2181 return -EINVAL;
2182
2183 tmp = smu7_patch_acp_vddc(hwmgr, hwmgr->dyn_state.acp_clock_voltage_dependency_table);
2184 if (tmp)
2185 return -EINVAL;
2186
2187 tmp = smu7_patch_vddc_shed_limit(hwmgr, hwmgr->dyn_state.vddc_phase_shed_limits_table);
2188 if (tmp)
2189 return -EINVAL;
2190
2191 tmp = smu7_patch_limits_vddc(hwmgr, &hwmgr->dyn_state.max_clock_voltage_on_ac);
2192 if (tmp)
2193 return -EINVAL;
2194
2195 tmp = smu7_patch_limits_vddc(hwmgr, &hwmgr->dyn_state.max_clock_voltage_on_dc);
2196 if (tmp)
2197 return -EINVAL;
2198
2199 tmp = smu7_patch_cac_vddc(hwmgr, hwmgr->dyn_state.cac_leakage_table);
2200 if (tmp)
2201 return -EINVAL;
2202
2203 return 0;
2204}
2205
2206
2207static int smu7_set_private_data_based_on_pptable_v0(struct pp_hwmgr *hwmgr)
2208{
2209 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2210
2211 struct phm_clock_voltage_dependency_table *allowed_sclk_vddc_table = hwmgr->dyn_state.vddc_dependency_on_sclk;
2212 struct phm_clock_voltage_dependency_table *allowed_mclk_vddc_table = hwmgr->dyn_state.vddc_dependency_on_mclk;
2213 struct phm_clock_voltage_dependency_table *allowed_mclk_vddci_table = hwmgr->dyn_state.vddci_dependency_on_mclk;
2214
2215 PP_ASSERT_WITH_CODE(allowed_sclk_vddc_table != NULL,
2216 "VDDC dependency on SCLK table is missing. This table is mandatory\n", return -EINVAL);
2217 PP_ASSERT_WITH_CODE(allowed_sclk_vddc_table->count >= 1,
2218 "VDDC dependency on SCLK table has to have is missing. This table is mandatory\n", return -EINVAL);
2219
2220 PP_ASSERT_WITH_CODE(allowed_mclk_vddc_table != NULL,
2221 "VDDC dependency on MCLK table is missing. This table is mandatory\n", return -EINVAL);
2222 PP_ASSERT_WITH_CODE(allowed_mclk_vddc_table->count >= 1,
2223 "VDD dependency on MCLK table has to have is missing. This table is mandatory\n", return -EINVAL);
2224
2225 data->min_vddc_in_pptable = (uint16_t)allowed_sclk_vddc_table->entries[0].v;
2226 data->max_vddc_in_pptable = (uint16_t)allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
2227
2228 hwmgr->dyn_state.max_clock_voltage_on_ac.sclk =
2229 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
2230 hwmgr->dyn_state.max_clock_voltage_on_ac.mclk =
2231 allowed_mclk_vddc_table->entries[allowed_mclk_vddc_table->count - 1].clk;
2232 hwmgr->dyn_state.max_clock_voltage_on_ac.vddc =
2233 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
2234
2235 if (allowed_mclk_vddci_table != NULL && allowed_mclk_vddci_table->count >= 1) {
2236 data->min_vddci_in_pptable = (uint16_t)allowed_mclk_vddci_table->entries[0].v;
2237 data->max_vddci_in_pptable = (uint16_t)allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
2238 }
2239
2240 if (hwmgr->dyn_state.vddci_dependency_on_mclk != NULL && hwmgr->dyn_state.vddci_dependency_on_mclk->count > 1)
2241 hwmgr->dyn_state.max_clock_voltage_on_ac.vddci = hwmgr->dyn_state.vddci_dependency_on_mclk->entries[hwmgr->dyn_state.vddci_dependency_on_mclk->count - 1].v;
2242
2243 return 0;
2244}
2245
2246int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
2247{
2248 struct smu7_hwmgr *data;
2249 int result;
2250
2251 data = kzalloc(sizeof(struct smu7_hwmgr), GFP_KERNEL);
2252 if (data == NULL)
2253 return -ENOMEM;
2254
2255 hwmgr->backend = data;
2256
2257 smu7_patch_voltage_workaround(hwmgr);
2258 smu7_init_dpm_defaults(hwmgr);
2259
2260 /* Get leakage voltage based on leakage ID. */
2261 result = smu7_get_evv_voltages(hwmgr);
2262
2263 if (result) {
2264 printk("Get EVV Voltage Failed. Abort Driver loading!\n");
2265 return -EINVAL;
2266 }
2267
2268 if (hwmgr->pp_table_version == PP_TABLE_V1) {
2269 smu7_complete_dependency_tables(hwmgr);
2270 smu7_set_private_data_based_on_pptable_v1(hwmgr);
2271 } else if (hwmgr->pp_table_version == PP_TABLE_V0) {
2272 smu7_patch_dependency_tables_with_leakage(hwmgr);
2273 smu7_set_private_data_based_on_pptable_v0(hwmgr);
2274 }
2275
2276 /* Initalize Dynamic State Adjustment Rule Settings */
2277 result = phm_initializa_dynamic_state_adjustment_rule_settings(hwmgr);
2278
2279 if (0 == result) {
2280 struct cgs_system_info sys_info = {0};
2281
2282 data->is_tlu_enabled = false;
2283
2284 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
2285 SMU7_MAX_HARDWARE_POWERLEVELS;
2286 hwmgr->platform_descriptor.hardwarePerformanceLevels = 2;
2287 hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
2288
2289 sys_info.size = sizeof(struct cgs_system_info);
2290 sys_info.info_id = CGS_SYSTEM_INFO_PCIE_GEN_INFO;
2291 result = cgs_query_system_info(hwmgr->device, &sys_info);
2292 if (result)
2293 data->pcie_gen_cap = AMDGPU_DEFAULT_PCIE_GEN_MASK;
2294 else
2295 data->pcie_gen_cap = (uint32_t)sys_info.value;
2296 if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
2297 data->pcie_spc_cap = 20;
2298 sys_info.size = sizeof(struct cgs_system_info);
2299 sys_info.info_id = CGS_SYSTEM_INFO_PCIE_MLW;
2300 result = cgs_query_system_info(hwmgr->device, &sys_info);
2301 if (result)
2302 data->pcie_lane_cap = AMDGPU_DEFAULT_PCIE_MLW_MASK;
2303 else
2304 data->pcie_lane_cap = (uint32_t)sys_info.value;
2305
2306 hwmgr->platform_descriptor.vbiosInterruptId = 0x20000400; /* IRQ_SOURCE1_SW_INT */
2307/* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */
2308 hwmgr->platform_descriptor.clockStep.engineClock = 500;
2309 hwmgr->platform_descriptor.clockStep.memoryClock = 500;
2310 smu7_thermal_parameter_init(hwmgr);
2311 } else {
2312 /* Ignore return value in here, we are cleaning up a mess. */
2313 phm_hwmgr_backend_fini(hwmgr);
2314 }
2315
2316 return 0;
2317}
2318
2319static int smu7_force_dpm_highest(struct pp_hwmgr *hwmgr)
2320{
2321 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2322 uint32_t level, tmp;
2323
2324 if (!data->pcie_dpm_key_disabled) {
2325 if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) {
2326 level = 0;
2327 tmp = data->dpm_level_enable_mask.pcie_dpm_enable_mask;
2328 while (tmp >>= 1)
2329 level++;
2330
2331 if (level)
2332 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
2333 PPSMC_MSG_PCIeDPM_ForceLevel, level);
2334 }
2335 }
2336
2337 if (!data->sclk_dpm_key_disabled) {
2338 if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
2339 level = 0;
2340 tmp = data->dpm_level_enable_mask.sclk_dpm_enable_mask;
2341 while (tmp >>= 1)
2342 level++;
2343
2344 if (level)
2345 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
2346 PPSMC_MSG_SCLKDPM_SetEnabledMask,
2347 (1 << level));
2348 }
2349 }
2350
2351 if (!data->mclk_dpm_key_disabled) {
2352 if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) {
2353 level = 0;
2354 tmp = data->dpm_level_enable_mask.mclk_dpm_enable_mask;
2355 while (tmp >>= 1)
2356 level++;
2357
2358 if (level)
2359 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
2360 PPSMC_MSG_MCLKDPM_SetEnabledMask,
2361 (1 << level));
2362 }
2363 }
2364
2365 return 0;
2366}
2367
2368static int smu7_upload_dpm_level_enable_mask(struct pp_hwmgr *hwmgr)
2369{
2370 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2371
2372 if (hwmgr->pp_table_version == PP_TABLE_V1)
2373 phm_apply_dal_min_voltage_request(hwmgr);
2374/* TO DO for v0 iceland and Ci*/
2375
2376 if (!data->sclk_dpm_key_disabled) {
2377 if (data->dpm_level_enable_mask.sclk_dpm_enable_mask)
2378 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
2379 PPSMC_MSG_SCLKDPM_SetEnabledMask,
2380 data->dpm_level_enable_mask.sclk_dpm_enable_mask);
2381 }
2382
2383 if (!data->mclk_dpm_key_disabled) {
2384 if (data->dpm_level_enable_mask.mclk_dpm_enable_mask)
2385 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
2386 PPSMC_MSG_MCLKDPM_SetEnabledMask,
2387 data->dpm_level_enable_mask.mclk_dpm_enable_mask);
2388 }
2389
2390 return 0;
2391}
2392
2393static int smu7_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
2394{
2395 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2396
2397 if (!smum_is_dpm_running(hwmgr))
2398 return -EINVAL;
2399
2400 if (!data->pcie_dpm_key_disabled) {
2401 smum_send_msg_to_smc(hwmgr->smumgr,
2402 PPSMC_MSG_PCIeDPM_UnForceLevel);
2403 }
2404
2405 return smu7_upload_dpm_level_enable_mask(hwmgr);
2406}
2407
2408static int smu7_force_dpm_lowest(struct pp_hwmgr *hwmgr)
2409{
2410 struct smu7_hwmgr *data =
2411 (struct smu7_hwmgr *)(hwmgr->backend);
2412 uint32_t level;
2413
2414 if (!data->sclk_dpm_key_disabled)
2415 if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
2416 level = phm_get_lowest_enabled_level(hwmgr,
2417 data->dpm_level_enable_mask.sclk_dpm_enable_mask);
2418 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
2419 PPSMC_MSG_SCLKDPM_SetEnabledMask,
2420 (1 << level));
2421
2422 }
2423
2424 if (!data->mclk_dpm_key_disabled) {
2425 if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) {
2426 level = phm_get_lowest_enabled_level(hwmgr,
2427 data->dpm_level_enable_mask.mclk_dpm_enable_mask);
2428 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
2429 PPSMC_MSG_MCLKDPM_SetEnabledMask,
2430 (1 << level));
2431 }
2432 }
2433
2434 if (!data->pcie_dpm_key_disabled) {
2435 if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) {
2436 level = phm_get_lowest_enabled_level(hwmgr,
2437 data->dpm_level_enable_mask.pcie_dpm_enable_mask);
2438 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
2439 PPSMC_MSG_PCIeDPM_ForceLevel,
2440 (level));
2441 }
2442 }
2443
2444 return 0;
2445
2446}
2447static int smu7_force_dpm_level(struct pp_hwmgr *hwmgr,
2448 enum amd_dpm_forced_level level)
2449{
2450 int ret = 0;
2451
2452 switch (level) {
2453 case AMD_DPM_FORCED_LEVEL_HIGH:
2454 ret = smu7_force_dpm_highest(hwmgr);
2455 if (ret)
2456 return ret;
2457 break;
2458 case AMD_DPM_FORCED_LEVEL_LOW:
2459 ret = smu7_force_dpm_lowest(hwmgr);
2460 if (ret)
2461 return ret;
2462 break;
2463 case AMD_DPM_FORCED_LEVEL_AUTO:
2464 ret = smu7_unforce_dpm_levels(hwmgr);
2465 if (ret)
2466 return ret;
2467 break;
2468 default:
2469 break;
2470 }
2471
2472 hwmgr->dpm_level = level;
2473
2474 return ret;
2475}
2476
2477static int smu7_get_power_state_size(struct pp_hwmgr *hwmgr)
2478{
2479 return sizeof(struct smu7_power_state);
2480}
2481
2482
2483static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
2484 struct pp_power_state *request_ps,
2485 const struct pp_power_state *current_ps)
2486{
2487
2488 struct smu7_power_state *smu7_ps =
2489 cast_phw_smu7_power_state(&request_ps->hardware);
2490 uint32_t sclk;
2491 uint32_t mclk;
2492 struct PP_Clocks minimum_clocks = {0};
2493 bool disable_mclk_switching;
2494 bool disable_mclk_switching_for_frame_lock;
2495 struct cgs_display_info info = {0};
2496 const struct phm_clock_and_voltage_limits *max_limits;
2497 uint32_t i;
2498 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2499 struct phm_ppt_v1_information *table_info =
2500 (struct phm_ppt_v1_information *)(hwmgr->pptable);
2501 int32_t count;
2502 int32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0;
2503
2504 data->battery_state = (PP_StateUILabel_Battery ==
2505 request_ps->classification.ui_label);
2506
2507 PP_ASSERT_WITH_CODE(smu7_ps->performance_level_count == 2,
2508 "VI should always have 2 performance levels",
2509 );
2510
2511 max_limits = (PP_PowerSource_AC == hwmgr->power_source) ?
2512 &(hwmgr->dyn_state.max_clock_voltage_on_ac) :
2513 &(hwmgr->dyn_state.max_clock_voltage_on_dc);
2514
2515 /* Cap clock DPM tables at DC MAX if it is in DC. */
2516 if (PP_PowerSource_DC == hwmgr->power_source) {
2517 for (i = 0; i < smu7_ps->performance_level_count; i++) {
2518 if (smu7_ps->performance_levels[i].memory_clock > max_limits->mclk)
2519 smu7_ps->performance_levels[i].memory_clock = max_limits->mclk;
2520 if (smu7_ps->performance_levels[i].engine_clock > max_limits->sclk)
2521 smu7_ps->performance_levels[i].engine_clock = max_limits->sclk;
2522 }
2523 }
2524
2525 smu7_ps->vce_clks.evclk = hwmgr->vce_arbiter.evclk;
2526 smu7_ps->vce_clks.ecclk = hwmgr->vce_arbiter.ecclk;
2527
2528 cgs_get_active_displays_info(hwmgr->device, &info);
2529
2530 /*TO DO result = PHM_CheckVBlankTime(hwmgr, &vblankTooShort);*/
2531
2532 minimum_clocks.engineClock = hwmgr->display_config.min_core_set_clock;
2533 minimum_clocks.memoryClock = hwmgr->display_config.min_mem_set_clock;
2534
2535 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2536 PHM_PlatformCaps_StablePState)) {
2537 max_limits = &(hwmgr->dyn_state.max_clock_voltage_on_ac);
2538 stable_pstate_sclk = (max_limits->sclk * 75) / 100;
2539
2540 for (count = table_info->vdd_dep_on_sclk->count - 1;
2541 count >= 0; count--) {
2542 if (stable_pstate_sclk >=
2543 table_info->vdd_dep_on_sclk->entries[count].clk) {
2544 stable_pstate_sclk =
2545 table_info->vdd_dep_on_sclk->entries[count].clk;
2546 break;
2547 }
2548 }
2549
2550 if (count < 0)
2551 stable_pstate_sclk = table_info->vdd_dep_on_sclk->entries[0].clk;
2552
2553 stable_pstate_mclk = max_limits->mclk;
2554
2555 minimum_clocks.engineClock = stable_pstate_sclk;
2556 minimum_clocks.memoryClock = stable_pstate_mclk;
2557 }
2558
2559 if (minimum_clocks.engineClock < hwmgr->gfx_arbiter.sclk)
2560 minimum_clocks.engineClock = hwmgr->gfx_arbiter.sclk;
2561
2562 if (minimum_clocks.memoryClock < hwmgr->gfx_arbiter.mclk)
2563 minimum_clocks.memoryClock = hwmgr->gfx_arbiter.mclk;
2564
2565 smu7_ps->sclk_threshold = hwmgr->gfx_arbiter.sclk_threshold;
2566
2567 if (0 != hwmgr->gfx_arbiter.sclk_over_drive) {
2568 PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.sclk_over_drive <=
2569 hwmgr->platform_descriptor.overdriveLimit.engineClock),
2570 "Overdrive sclk exceeds limit",
2571 hwmgr->gfx_arbiter.sclk_over_drive =
2572 hwmgr->platform_descriptor.overdriveLimit.engineClock);
2573
2574 if (hwmgr->gfx_arbiter.sclk_over_drive >= hwmgr->gfx_arbiter.sclk)
2575 smu7_ps->performance_levels[1].engine_clock =
2576 hwmgr->gfx_arbiter.sclk_over_drive;
2577 }
2578
2579 if (0 != hwmgr->gfx_arbiter.mclk_over_drive) {
2580 PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.mclk_over_drive <=
2581 hwmgr->platform_descriptor.overdriveLimit.memoryClock),
2582 "Overdrive mclk exceeds limit",
2583 hwmgr->gfx_arbiter.mclk_over_drive =
2584 hwmgr->platform_descriptor.overdriveLimit.memoryClock);
2585
2586 if (hwmgr->gfx_arbiter.mclk_over_drive >= hwmgr->gfx_arbiter.mclk)
2587 smu7_ps->performance_levels[1].memory_clock =
2588 hwmgr->gfx_arbiter.mclk_over_drive;
2589 }
2590
2591 disable_mclk_switching_for_frame_lock = phm_cap_enabled(
2592 hwmgr->platform_descriptor.platformCaps,
2593 PHM_PlatformCaps_DisableMclkSwitchingForFrameLock);
2594
2595
2596 disable_mclk_switching = (1 < info.display_count) ||
2597 disable_mclk_switching_for_frame_lock;
2598
2599 sclk = smu7_ps->performance_levels[0].engine_clock;
2600 mclk = smu7_ps->performance_levels[0].memory_clock;
2601
2602 if (disable_mclk_switching)
2603 mclk = smu7_ps->performance_levels
2604 [smu7_ps->performance_level_count - 1].memory_clock;
2605
2606 if (sclk < minimum_clocks.engineClock)
2607 sclk = (minimum_clocks.engineClock > max_limits->sclk) ?
2608 max_limits->sclk : minimum_clocks.engineClock;
2609
2610 if (mclk < minimum_clocks.memoryClock)
2611 mclk = (minimum_clocks.memoryClock > max_limits->mclk) ?
2612 max_limits->mclk : minimum_clocks.memoryClock;
2613
2614 smu7_ps->performance_levels[0].engine_clock = sclk;
2615 smu7_ps->performance_levels[0].memory_clock = mclk;
2616
2617 smu7_ps->performance_levels[1].engine_clock =
2618 (smu7_ps->performance_levels[1].engine_clock >=
2619 smu7_ps->performance_levels[0].engine_clock) ?
2620 smu7_ps->performance_levels[1].engine_clock :
2621 smu7_ps->performance_levels[0].engine_clock;
2622
2623 if (disable_mclk_switching) {
2624 if (mclk < smu7_ps->performance_levels[1].memory_clock)
2625 mclk = smu7_ps->performance_levels[1].memory_clock;
2626
2627 smu7_ps->performance_levels[0].memory_clock = mclk;
2628 smu7_ps->performance_levels[1].memory_clock = mclk;
2629 } else {
2630 if (smu7_ps->performance_levels[1].memory_clock <
2631 smu7_ps->performance_levels[0].memory_clock)
2632 smu7_ps->performance_levels[1].memory_clock =
2633 smu7_ps->performance_levels[0].memory_clock;
2634 }
2635
2636 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2637 PHM_PlatformCaps_StablePState)) {
2638 for (i = 0; i < smu7_ps->performance_level_count; i++) {
2639 smu7_ps->performance_levels[i].engine_clock = stable_pstate_sclk;
2640 smu7_ps->performance_levels[i].memory_clock = stable_pstate_mclk;
2641 smu7_ps->performance_levels[i].pcie_gen = data->pcie_gen_performance.max;
2642 smu7_ps->performance_levels[i].pcie_lane = data->pcie_gen_performance.max;
2643 }
2644 }
2645 return 0;
2646}
2647
2648
2649static int smu7_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
2650{
2651 struct pp_power_state *ps;
2652 struct smu7_power_state *smu7_ps;
2653
2654 if (hwmgr == NULL)
2655 return -EINVAL;
2656
2657 ps = hwmgr->request_ps;
2658
2659 if (ps == NULL)
2660 return -EINVAL;
2661
2662 smu7_ps = cast_phw_smu7_power_state(&ps->hardware);
2663
2664 if (low)
2665 return smu7_ps->performance_levels[0].memory_clock;
2666 else
2667 return smu7_ps->performance_levels
2668 [smu7_ps->performance_level_count-1].memory_clock;
2669}
2670
2671static int smu7_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
2672{
2673 struct pp_power_state *ps;
2674 struct smu7_power_state *smu7_ps;
2675
2676 if (hwmgr == NULL)
2677 return -EINVAL;
2678
2679 ps = hwmgr->request_ps;
2680
2681 if (ps == NULL)
2682 return -EINVAL;
2683
2684 smu7_ps = cast_phw_smu7_power_state(&ps->hardware);
2685
2686 if (low)
2687 return smu7_ps->performance_levels[0].engine_clock;
2688 else
2689 return smu7_ps->performance_levels
2690 [smu7_ps->performance_level_count-1].engine_clock;
2691}
2692
2693static int smu7_dpm_patch_boot_state(struct pp_hwmgr *hwmgr,
2694 struct pp_hw_power_state *hw_ps)
2695{
2696 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2697 struct smu7_power_state *ps = (struct smu7_power_state *)hw_ps;
2698 ATOM_FIRMWARE_INFO_V2_2 *fw_info;
2699 uint16_t size;
2700 uint8_t frev, crev;
2701 int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
2702
2703 /* First retrieve the Boot clocks and VDDC from the firmware info table.
2704 * We assume here that fw_info is unchanged if this call fails.
2705 */
2706 fw_info = (ATOM_FIRMWARE_INFO_V2_2 *)cgs_atom_get_data_table(
2707 hwmgr->device, index,
2708 &size, &frev, &crev);
2709 if (!fw_info)
2710 /* During a test, there is no firmware info table. */
2711 return 0;
2712
2713 /* Patch the state. */
2714 data->vbios_boot_state.sclk_bootup_value =
2715 le32_to_cpu(fw_info->ulDefaultEngineClock);
2716 data->vbios_boot_state.mclk_bootup_value =
2717 le32_to_cpu(fw_info->ulDefaultMemoryClock);
2718 data->vbios_boot_state.mvdd_bootup_value =
2719 le16_to_cpu(fw_info->usBootUpMVDDCVoltage);
2720 data->vbios_boot_state.vddc_bootup_value =
2721 le16_to_cpu(fw_info->usBootUpVDDCVoltage);
2722 data->vbios_boot_state.vddci_bootup_value =
2723 le16_to_cpu(fw_info->usBootUpVDDCIVoltage);
2724 data->vbios_boot_state.pcie_gen_bootup_value =
2725 smu7_get_current_pcie_speed(hwmgr);
2726
2727 data->vbios_boot_state.pcie_lane_bootup_value =
2728 (uint16_t)smu7_get_current_pcie_lane_number(hwmgr);
2729
2730 /* set boot power state */
2731 ps->performance_levels[0].memory_clock = data->vbios_boot_state.mclk_bootup_value;
2732 ps->performance_levels[0].engine_clock = data->vbios_boot_state.sclk_bootup_value;
2733 ps->performance_levels[0].pcie_gen = data->vbios_boot_state.pcie_gen_bootup_value;
2734 ps->performance_levels[0].pcie_lane = data->vbios_boot_state.pcie_lane_bootup_value;
2735
2736 return 0;
2737}
2738
2739static int smu7_get_number_of_powerplay_table_entries(struct pp_hwmgr *hwmgr)
2740{
2741 int result;
2742 unsigned long ret = 0;
2743
2744 if (hwmgr->pp_table_version == PP_TABLE_V0) {
2745 result = pp_tables_get_num_of_entries(hwmgr, &ret);
2746 return result ? 0 : ret;
2747 } else if (hwmgr->pp_table_version == PP_TABLE_V1) {
2748 result = get_number_of_powerplay_table_entries_v1_0(hwmgr);
2749 return result;
2750 }
2751 return 0;
2752}
2753
2754static int smu7_get_pp_table_entry_callback_func_v1(struct pp_hwmgr *hwmgr,
2755 void *state, struct pp_power_state *power_state,
2756 void *pp_table, uint32_t classification_flag)
2757{
2758 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2759 struct smu7_power_state *smu7_power_state =
2760 (struct smu7_power_state *)(&(power_state->hardware));
2761 struct smu7_performance_level *performance_level;
2762 ATOM_Tonga_State *state_entry = (ATOM_Tonga_State *)state;
2763 ATOM_Tonga_POWERPLAYTABLE *powerplay_table =
2764 (ATOM_Tonga_POWERPLAYTABLE *)pp_table;
2765 PPTable_Generic_SubTable_Header *sclk_dep_table =
2766 (PPTable_Generic_SubTable_Header *)
2767 (((unsigned long)powerplay_table) +
2768 le16_to_cpu(powerplay_table->usSclkDependencyTableOffset));
2769
2770 ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table =
2771 (ATOM_Tonga_MCLK_Dependency_Table *)
2772 (((unsigned long)powerplay_table) +
2773 le16_to_cpu(powerplay_table->usMclkDependencyTableOffset));
2774
2775 /* The following fields are not initialized here: id orderedList allStatesList */
2776 power_state->classification.ui_label =
2777 (le16_to_cpu(state_entry->usClassification) &
2778 ATOM_PPLIB_CLASSIFICATION_UI_MASK) >>
2779 ATOM_PPLIB_CLASSIFICATION_UI_SHIFT;
2780 power_state->classification.flags = classification_flag;
2781 /* NOTE: There is a classification2 flag in BIOS that is not being used right now */
2782
2783 power_state->classification.temporary_state = false;
2784 power_state->classification.to_be_deleted = false;
2785
2786 power_state->validation.disallowOnDC =
2787 (0 != (le32_to_cpu(state_entry->ulCapsAndSettings) &
2788 ATOM_Tonga_DISALLOW_ON_DC));
2789
2790 power_state->pcie.lanes = 0;
2791
2792 power_state->display.disableFrameModulation = false;
2793 power_state->display.limitRefreshrate = false;
2794 power_state->display.enableVariBright =
2795 (0 != (le32_to_cpu(state_entry->ulCapsAndSettings) &
2796 ATOM_Tonga_ENABLE_VARIBRIGHT));
2797
2798 power_state->validation.supportedPowerLevels = 0;
2799 power_state->uvd_clocks.VCLK = 0;
2800 power_state->uvd_clocks.DCLK = 0;
2801 power_state->temperatures.min = 0;
2802 power_state->temperatures.max = 0;
2803
2804 performance_level = &(smu7_power_state->performance_levels
2805 [smu7_power_state->performance_level_count++]);
2806
2807 PP_ASSERT_WITH_CODE(
2808 (smu7_power_state->performance_level_count < smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_GRAPHICS)),
2809 "Performance levels exceeds SMC limit!",
2810 return -EINVAL);
2811
2812 PP_ASSERT_WITH_CODE(
2813 (smu7_power_state->performance_level_count <=
2814 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels),
2815 "Performance levels exceeds Driver limit!",
2816 return -EINVAL);
2817
2818 /* Performance levels are arranged from low to high. */
2819 performance_level->memory_clock = mclk_dep_table->entries
2820 [state_entry->ucMemoryClockIndexLow].ulMclk;
2821 if (sclk_dep_table->ucRevId == 0)
2822 performance_level->engine_clock = ((ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table)->entries
2823 [state_entry->ucEngineClockIndexLow].ulSclk;
2824 else if (sclk_dep_table->ucRevId == 1)
2825 performance_level->engine_clock = ((ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table)->entries
2826 [state_entry->ucEngineClockIndexLow].ulSclk;
2827 performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap,
2828 state_entry->ucPCIEGenLow);
2829 performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap,
2830 state_entry->ucPCIELaneHigh);
2831
2832 performance_level = &(smu7_power_state->performance_levels
2833 [smu7_power_state->performance_level_count++]);
2834 performance_level->memory_clock = mclk_dep_table->entries
2835 [state_entry->ucMemoryClockIndexHigh].ulMclk;
2836
2837 if (sclk_dep_table->ucRevId == 0)
2838 performance_level->engine_clock = ((ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table)->entries
2839 [state_entry->ucEngineClockIndexHigh].ulSclk;
2840 else if (sclk_dep_table->ucRevId == 1)
2841 performance_level->engine_clock = ((ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table)->entries
2842 [state_entry->ucEngineClockIndexHigh].ulSclk;
2843
2844 performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap,
2845 state_entry->ucPCIEGenHigh);
2846 performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap,
2847 state_entry->ucPCIELaneHigh);
2848
2849 return 0;
2850}
2851
2852static int smu7_get_pp_table_entry_v1(struct pp_hwmgr *hwmgr,
2853 unsigned long entry_index, struct pp_power_state *state)
2854{
2855 int result;
2856 struct smu7_power_state *ps;
2857 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2858 struct phm_ppt_v1_information *table_info =
2859 (struct phm_ppt_v1_information *)(hwmgr->pptable);
2860 struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table =
2861 table_info->vdd_dep_on_mclk;
2862
2863 state->hardware.magic = PHM_VIslands_Magic;
2864
2865 ps = (struct smu7_power_state *)(&state->hardware);
2866
2867 result = get_powerplay_table_entry_v1_0(hwmgr, entry_index, state,
2868 smu7_get_pp_table_entry_callback_func_v1);
2869
2870 /* This is the earliest time we have all the dependency table and the VBIOS boot state
2871 * as PP_Tables_GetPowerPlayTableEntry retrieves the VBIOS boot state
2872 * if there is only one VDDCI/MCLK level, check if it's the same as VBIOS boot state
2873 */
2874 if (dep_mclk_table != NULL && dep_mclk_table->count == 1) {
2875 if (dep_mclk_table->entries[0].clk !=
2876 data->vbios_boot_state.mclk_bootup_value)
2877 printk(KERN_ERR "Single MCLK entry VDDCI/MCLK dependency table "
2878 "does not match VBIOS boot MCLK level");
2879 if (dep_mclk_table->entries[0].vddci !=
2880 data->vbios_boot_state.vddci_bootup_value)
2881 printk(KERN_ERR "Single VDDCI entry VDDCI/MCLK dependency table "
2882 "does not match VBIOS boot VDDCI level");
2883 }
2884
2885 /* set DC compatible flag if this state supports DC */
2886 if (!state->validation.disallowOnDC)
2887 ps->dc_compatible = true;
2888
2889 if (state->classification.flags & PP_StateClassificationFlag_ACPI)
2890 data->acpi_pcie_gen = ps->performance_levels[0].pcie_gen;
2891
2892 ps->uvd_clks.vclk = state->uvd_clocks.VCLK;
2893 ps->uvd_clks.dclk = state->uvd_clocks.DCLK;
2894
2895 if (!result) {
2896 uint32_t i;
2897
2898 switch (state->classification.ui_label) {
2899 case PP_StateUILabel_Performance:
2900 data->use_pcie_performance_levels = true;
2901 for (i = 0; i < ps->performance_level_count; i++) {
2902 if (data->pcie_gen_performance.max <
2903 ps->performance_levels[i].pcie_gen)
2904 data->pcie_gen_performance.max =
2905 ps->performance_levels[i].pcie_gen;
2906
2907 if (data->pcie_gen_performance.min >
2908 ps->performance_levels[i].pcie_gen)
2909 data->pcie_gen_performance.min =
2910 ps->performance_levels[i].pcie_gen;
2911
2912 if (data->pcie_lane_performance.max <
2913 ps->performance_levels[i].pcie_lane)
2914 data->pcie_lane_performance.max =
2915 ps->performance_levels[i].pcie_lane;
2916 if (data->pcie_lane_performance.min >
2917 ps->performance_levels[i].pcie_lane)
2918 data->pcie_lane_performance.min =
2919 ps->performance_levels[i].pcie_lane;
2920 }
2921 break;
2922 case PP_StateUILabel_Battery:
2923 data->use_pcie_power_saving_levels = true;
2924
2925 for (i = 0; i < ps->performance_level_count; i++) {
2926 if (data->pcie_gen_power_saving.max <
2927 ps->performance_levels[i].pcie_gen)
2928 data->pcie_gen_power_saving.max =
2929 ps->performance_levels[i].pcie_gen;
2930
2931 if (data->pcie_gen_power_saving.min >
2932 ps->performance_levels[i].pcie_gen)
2933 data->pcie_gen_power_saving.min =
2934 ps->performance_levels[i].pcie_gen;
2935
2936 if (data->pcie_lane_power_saving.max <
2937 ps->performance_levels[i].pcie_lane)
2938 data->pcie_lane_power_saving.max =
2939 ps->performance_levels[i].pcie_lane;
2940
2941 if (data->pcie_lane_power_saving.min >
2942 ps->performance_levels[i].pcie_lane)
2943 data->pcie_lane_power_saving.min =
2944 ps->performance_levels[i].pcie_lane;
2945 }
2946 break;
2947 default:
2948 break;
2949 }
2950 }
2951 return 0;
2952}
2953
2954static int smu7_get_pp_table_entry_callback_func_v0(struct pp_hwmgr *hwmgr,
2955 struct pp_hw_power_state *power_state,
2956 unsigned int index, const void *clock_info)
2957{
2958 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2959 struct smu7_power_state *ps = cast_phw_smu7_power_state(power_state);
2960 const ATOM_PPLIB_CI_CLOCK_INFO *visland_clk_info = clock_info;
2961 struct smu7_performance_level *performance_level;
2962 uint32_t engine_clock, memory_clock;
2963 uint16_t pcie_gen_from_bios;
2964
2965 engine_clock = visland_clk_info->ucEngineClockHigh << 16 | visland_clk_info->usEngineClockLow;
2966 memory_clock = visland_clk_info->ucMemoryClockHigh << 16 | visland_clk_info->usMemoryClockLow;
2967
2968 if (!(data->mc_micro_code_feature & DISABLE_MC_LOADMICROCODE) && memory_clock > data->highest_mclk)
2969 data->highest_mclk = memory_clock;
2970
2971 performance_level = &(ps->performance_levels
2972 [ps->performance_level_count++]);
2973
2974 PP_ASSERT_WITH_CODE(
2975 (ps->performance_level_count < smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_GRAPHICS)),
2976 "Performance levels exceeds SMC limit!",
2977 return -EINVAL);
2978
2979 PP_ASSERT_WITH_CODE(
2980 (ps->performance_level_count <=
2981 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels),
2982 "Performance levels exceeds Driver limit!",
2983 return -EINVAL);
2984
2985 /* Performance levels are arranged from low to high. */
2986 performance_level->memory_clock = memory_clock;
2987 performance_level->engine_clock = engine_clock;
2988
2989 pcie_gen_from_bios = visland_clk_info->ucPCIEGen;
2990
2991 performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap, pcie_gen_from_bios);
2992 performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap, visland_clk_info->usPCIELane);
2993
2994 return 0;
2995}
2996
2997static int smu7_get_pp_table_entry_v0(struct pp_hwmgr *hwmgr,
2998 unsigned long entry_index, struct pp_power_state *state)
2999{
3000 int result;
3001 struct smu7_power_state *ps;
3002 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3003 struct phm_clock_voltage_dependency_table *dep_mclk_table =
3004 hwmgr->dyn_state.vddci_dependency_on_mclk;
3005
3006 memset(&state->hardware, 0x00, sizeof(struct pp_hw_power_state));
3007
3008 state->hardware.magic = PHM_VIslands_Magic;
3009
3010 ps = (struct smu7_power_state *)(&state->hardware);
3011
3012 result = pp_tables_get_entry(hwmgr, entry_index, state,
3013 smu7_get_pp_table_entry_callback_func_v0);
3014
3015 /*
3016 * This is the earliest time we have all the dependency table
3017 * and the VBIOS boot state as
3018 * PP_Tables_GetPowerPlayTableEntry retrieves the VBIOS boot
3019 * state if there is only one VDDCI/MCLK level, check if it's
3020 * the same as VBIOS boot state
3021 */
3022 if (dep_mclk_table != NULL && dep_mclk_table->count == 1) {
3023 if (dep_mclk_table->entries[0].clk !=
3024 data->vbios_boot_state.mclk_bootup_value)
3025 printk(KERN_ERR "Single MCLK entry VDDCI/MCLK dependency table "
3026 "does not match VBIOS boot MCLK level");
3027 if (dep_mclk_table->entries[0].v !=
3028 data->vbios_boot_state.vddci_bootup_value)
3029 printk(KERN_ERR "Single VDDCI entry VDDCI/MCLK dependency table "
3030 "does not match VBIOS boot VDDCI level");
3031 }
3032
3033 /* set DC compatible flag if this state supports DC */
3034 if (!state->validation.disallowOnDC)
3035 ps->dc_compatible = true;
3036
3037 if (state->classification.flags & PP_StateClassificationFlag_ACPI)
3038 data->acpi_pcie_gen = ps->performance_levels[0].pcie_gen;
3039
3040 ps->uvd_clks.vclk = state->uvd_clocks.VCLK;
3041 ps->uvd_clks.dclk = state->uvd_clocks.DCLK;
3042
3043 if (!result) {
3044 uint32_t i;
3045
3046 switch (state->classification.ui_label) {
3047 case PP_StateUILabel_Performance:
3048 data->use_pcie_performance_levels = true;
3049
3050 for (i = 0; i < ps->performance_level_count; i++) {
3051 if (data->pcie_gen_performance.max <
3052 ps->performance_levels[i].pcie_gen)
3053 data->pcie_gen_performance.max =
3054 ps->performance_levels[i].pcie_gen;
3055
3056 if (data->pcie_gen_performance.min >
3057 ps->performance_levels[i].pcie_gen)
3058 data->pcie_gen_performance.min =
3059 ps->performance_levels[i].pcie_gen;
3060
3061 if (data->pcie_lane_performance.max <
3062 ps->performance_levels[i].pcie_lane)
3063 data->pcie_lane_performance.max =
3064 ps->performance_levels[i].pcie_lane;
3065
3066 if (data->pcie_lane_performance.min >
3067 ps->performance_levels[i].pcie_lane)
3068 data->pcie_lane_performance.min =
3069 ps->performance_levels[i].pcie_lane;
3070 }
3071 break;
3072 case PP_StateUILabel_Battery:
3073 data->use_pcie_power_saving_levels = true;
3074
3075 for (i = 0; i < ps->performance_level_count; i++) {
3076 if (data->pcie_gen_power_saving.max <
3077 ps->performance_levels[i].pcie_gen)
3078 data->pcie_gen_power_saving.max =
3079 ps->performance_levels[i].pcie_gen;
3080
3081 if (data->pcie_gen_power_saving.min >
3082 ps->performance_levels[i].pcie_gen)
3083 data->pcie_gen_power_saving.min =
3084 ps->performance_levels[i].pcie_gen;
3085
3086 if (data->pcie_lane_power_saving.max <
3087 ps->performance_levels[i].pcie_lane)
3088 data->pcie_lane_power_saving.max =
3089 ps->performance_levels[i].pcie_lane;
3090
3091 if (data->pcie_lane_power_saving.min >
3092 ps->performance_levels[i].pcie_lane)
3093 data->pcie_lane_power_saving.min =
3094 ps->performance_levels[i].pcie_lane;
3095 }
3096 break;
3097 default:
3098 break;
3099 }
3100 }
3101 return 0;
3102}
3103
3104static int smu7_get_pp_table_entry(struct pp_hwmgr *hwmgr,
3105 unsigned long entry_index, struct pp_power_state *state)
3106{
3107 if (hwmgr->pp_table_version == PP_TABLE_V0)
3108 return smu7_get_pp_table_entry_v0(hwmgr, entry_index, state);
3109 else if (hwmgr->pp_table_version == PP_TABLE_V1)
3110 return smu7_get_pp_table_entry_v1(hwmgr, entry_index, state);
3111
3112 return 0;
3113}
3114
Tom St Denisa6e36952016-09-15 10:07:34 -04003115static int smu7_read_sensor(struct pp_hwmgr *hwmgr, int idx, int32_t *value)
3116{
3117 uint32_t sclk, mclk, activity_percent;
3118 uint32_t offset;
3119 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3120
3121 switch (idx) {
3122 case AMDGPU_PP_SENSOR_GFX_SCLK:
3123 smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetSclkFrequency);
3124 sclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
3125 *value = sclk;
3126 return 0;
3127 case AMDGPU_PP_SENSOR_GFX_MCLK:
3128 smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetMclkFrequency);
3129 mclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
3130 *value = mclk;
3131 return 0;
3132 case AMDGPU_PP_SENSOR_GPU_LOAD:
3133 offset = data->soft_regs_start + smum_get_offsetof(hwmgr->smumgr,
3134 SMU_SoftRegisters,
3135 AverageGraphicsActivity);
3136
3137 activity_percent = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset);
3138 activity_percent += 0x80;
3139 activity_percent >>= 8;
3140 *value = activity_percent > 100 ? 100 : activity_percent;
3141 return 0;
3142 case AMDGPU_PP_SENSOR_GPU_TEMP:
3143 *value = smu7_thermal_get_temperature(hwmgr);
3144 return 0;
Tom St Denis3de4ec52016-09-19 12:48:52 -04003145 case AMDGPU_PP_SENSOR_UVD_POWER:
3146 *value = data->uvd_power_gated ? 0 : 1;
3147 return 0;
3148 case AMDGPU_PP_SENSOR_VCE_POWER:
3149 *value = data->vce_power_gated ? 0 : 1;
3150 return 0;
Tom St Denisa6e36952016-09-15 10:07:34 -04003151 default:
3152 return -EINVAL;
3153 }
3154}
3155
Rex Zhu599a7e92016-09-09 13:25:22 +08003156static int smu7_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, const void *input)
3157{
3158 const struct phm_set_power_state_input *states =
3159 (const struct phm_set_power_state_input *)input;
3160 const struct smu7_power_state *smu7_ps =
3161 cast_const_phw_smu7_power_state(states->pnew_state);
3162 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3163 struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
3164 uint32_t sclk = smu7_ps->performance_levels
3165 [smu7_ps->performance_level_count - 1].engine_clock;
3166 struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
3167 uint32_t mclk = smu7_ps->performance_levels
3168 [smu7_ps->performance_level_count - 1].memory_clock;
3169 struct PP_Clocks min_clocks = {0};
3170 uint32_t i;
3171 struct cgs_display_info info = {0};
3172
3173 data->need_update_smu7_dpm_table = 0;
3174
3175 for (i = 0; i < sclk_table->count; i++) {
3176 if (sclk == sclk_table->dpm_levels[i].value)
3177 break;
3178 }
3179
3180 if (i >= sclk_table->count)
3181 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
3182 else {
3183 /* TODO: Check SCLK in DAL's minimum clocks
3184 * in case DeepSleep divider update is required.
3185 */
3186 if (data->display_timing.min_clock_in_sr != min_clocks.engineClockInSR &&
3187 (min_clocks.engineClockInSR >= SMU7_MINIMUM_ENGINE_CLOCK ||
3188 data->display_timing.min_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK))
3189 data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
3190 }
3191
3192 for (i = 0; i < mclk_table->count; i++) {
3193 if (mclk == mclk_table->dpm_levels[i].value)
3194 break;
3195 }
3196
3197 if (i >= mclk_table->count)
3198 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
3199
3200 cgs_get_active_displays_info(hwmgr->device, &info);
3201
3202 if (data->display_timing.num_existing_displays != info.display_count)
3203 data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK;
3204
3205 return 0;
3206}
3207
3208static uint16_t smu7_get_maximum_link_speed(struct pp_hwmgr *hwmgr,
3209 const struct smu7_power_state *smu7_ps)
3210{
3211 uint32_t i;
3212 uint32_t sclk, max_sclk = 0;
3213 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3214 struct smu7_dpm_table *dpm_table = &data->dpm_table;
3215
3216 for (i = 0; i < smu7_ps->performance_level_count; i++) {
3217 sclk = smu7_ps->performance_levels[i].engine_clock;
3218 if (max_sclk < sclk)
3219 max_sclk = sclk;
3220 }
3221
3222 for (i = 0; i < dpm_table->sclk_table.count; i++) {
3223 if (dpm_table->sclk_table.dpm_levels[i].value == max_sclk)
3224 return (uint16_t) ((i >= dpm_table->pcie_speed_table.count) ?
3225 dpm_table->pcie_speed_table.dpm_levels
3226 [dpm_table->pcie_speed_table.count - 1].value :
3227 dpm_table->pcie_speed_table.dpm_levels[i].value);
3228 }
3229
3230 return 0;
3231}
3232
3233static int smu7_request_link_speed_change_before_state_change(
3234 struct pp_hwmgr *hwmgr, const void *input)
3235{
3236 const struct phm_set_power_state_input *states =
3237 (const struct phm_set_power_state_input *)input;
3238 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3239 const struct smu7_power_state *smu7_nps =
3240 cast_const_phw_smu7_power_state(states->pnew_state);
3241 const struct smu7_power_state *polaris10_cps =
3242 cast_const_phw_smu7_power_state(states->pcurrent_state);
3243
3244 uint16_t target_link_speed = smu7_get_maximum_link_speed(hwmgr, smu7_nps);
3245 uint16_t current_link_speed;
3246
3247 if (data->force_pcie_gen == PP_PCIEGenInvalid)
3248 current_link_speed = smu7_get_maximum_link_speed(hwmgr, polaris10_cps);
3249 else
3250 current_link_speed = data->force_pcie_gen;
3251
3252 data->force_pcie_gen = PP_PCIEGenInvalid;
3253 data->pspp_notify_required = false;
3254
3255 if (target_link_speed > current_link_speed) {
3256 switch (target_link_speed) {
3257 case PP_PCIEGen3:
3258 if (0 == acpi_pcie_perf_request(hwmgr->device, PCIE_PERF_REQ_GEN3, false))
3259 break;
3260 data->force_pcie_gen = PP_PCIEGen2;
3261 if (current_link_speed == PP_PCIEGen2)
3262 break;
3263 case PP_PCIEGen2:
3264 if (0 == acpi_pcie_perf_request(hwmgr->device, PCIE_PERF_REQ_GEN2, false))
3265 break;
3266 default:
3267 data->force_pcie_gen = smu7_get_current_pcie_speed(hwmgr);
3268 break;
3269 }
3270 } else {
3271 if (target_link_speed < current_link_speed)
3272 data->pspp_notify_required = true;
3273 }
3274
3275 return 0;
3276}
3277
3278static int smu7_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
3279{
3280 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3281
3282 if (0 == data->need_update_smu7_dpm_table)
3283 return 0;
3284
3285 if ((0 == data->sclk_dpm_key_disabled) &&
3286 (data->need_update_smu7_dpm_table &
3287 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
3288 PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
3289 "Trying to freeze SCLK DPM when DPM is disabled",
3290 );
3291 PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
3292 PPSMC_MSG_SCLKDPM_FreezeLevel),
3293 "Failed to freeze SCLK DPM during FreezeSclkMclkDPM Function!",
3294 return -EINVAL);
3295 }
3296
3297 if ((0 == data->mclk_dpm_key_disabled) &&
3298 (data->need_update_smu7_dpm_table &
3299 DPMTABLE_OD_UPDATE_MCLK)) {
3300 PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
3301 "Trying to freeze MCLK DPM when DPM is disabled",
3302 );
3303 PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
3304 PPSMC_MSG_MCLKDPM_FreezeLevel),
3305 "Failed to freeze MCLK DPM during FreezeSclkMclkDPM Function!",
3306 return -EINVAL);
3307 }
3308
3309 return 0;
3310}
3311
3312static int smu7_populate_and_upload_sclk_mclk_dpm_levels(
3313 struct pp_hwmgr *hwmgr, const void *input)
3314{
3315 int result = 0;
3316 const struct phm_set_power_state_input *states =
3317 (const struct phm_set_power_state_input *)input;
3318 const struct smu7_power_state *smu7_ps =
3319 cast_const_phw_smu7_power_state(states->pnew_state);
3320 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3321 uint32_t sclk = smu7_ps->performance_levels
3322 [smu7_ps->performance_level_count - 1].engine_clock;
3323 uint32_t mclk = smu7_ps->performance_levels
3324 [smu7_ps->performance_level_count - 1].memory_clock;
3325 struct smu7_dpm_table *dpm_table = &data->dpm_table;
3326
3327 struct smu7_dpm_table *golden_dpm_table = &data->golden_dpm_table;
3328 uint32_t dpm_count, clock_percent;
3329 uint32_t i;
3330
3331 if (0 == data->need_update_smu7_dpm_table)
3332 return 0;
3333
3334 if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK) {
3335 dpm_table->sclk_table.dpm_levels
3336 [dpm_table->sclk_table.count - 1].value = sclk;
3337
3338 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinACSupport) ||
3339 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinDCSupport)) {
3340 /* Need to do calculation based on the golden DPM table
3341 * as the Heatmap GPU Clock axis is also based on the default values
3342 */
3343 PP_ASSERT_WITH_CODE(
3344 (golden_dpm_table->sclk_table.dpm_levels
3345 [golden_dpm_table->sclk_table.count - 1].value != 0),
3346 "Divide by 0!",
3347 return -EINVAL);
3348 dpm_count = dpm_table->sclk_table.count < 2 ? 0 : dpm_table->sclk_table.count - 2;
3349
3350 for (i = dpm_count; i > 1; i--) {
3351 if (sclk > golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count-1].value) {
3352 clock_percent =
3353 ((sclk
3354 - golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count-1].value
3355 ) * 100)
3356 / golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count-1].value;
3357
3358 dpm_table->sclk_table.dpm_levels[i].value =
3359 golden_dpm_table->sclk_table.dpm_levels[i].value +
3360 (golden_dpm_table->sclk_table.dpm_levels[i].value *
3361 clock_percent)/100;
3362
3363 } else if (golden_dpm_table->sclk_table.dpm_levels[dpm_table->sclk_table.count-1].value > sclk) {
3364 clock_percent =
3365 ((golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count - 1].value
3366 - sclk) * 100)
3367 / golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count-1].value;
3368
3369 dpm_table->sclk_table.dpm_levels[i].value =
3370 golden_dpm_table->sclk_table.dpm_levels[i].value -
3371 (golden_dpm_table->sclk_table.dpm_levels[i].value *
3372 clock_percent) / 100;
3373 } else
3374 dpm_table->sclk_table.dpm_levels[i].value =
3375 golden_dpm_table->sclk_table.dpm_levels[i].value;
3376 }
3377 }
3378 }
3379
3380 if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK) {
3381 dpm_table->mclk_table.dpm_levels
3382 [dpm_table->mclk_table.count - 1].value = mclk;
3383
3384 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinACSupport) ||
3385 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinDCSupport)) {
3386
3387 PP_ASSERT_WITH_CODE(
3388 (golden_dpm_table->mclk_table.dpm_levels
3389 [golden_dpm_table->mclk_table.count-1].value != 0),
3390 "Divide by 0!",
3391 return -EINVAL);
3392 dpm_count = dpm_table->mclk_table.count < 2 ? 0 : dpm_table->mclk_table.count - 2;
3393 for (i = dpm_count; i > 1; i--) {
3394 if (golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value < mclk) {
3395 clock_percent = ((mclk -
3396 golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value) * 100)
3397 / golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value;
3398
3399 dpm_table->mclk_table.dpm_levels[i].value =
3400 golden_dpm_table->mclk_table.dpm_levels[i].value +
3401 (golden_dpm_table->mclk_table.dpm_levels[i].value *
3402 clock_percent) / 100;
3403
3404 } else if (golden_dpm_table->mclk_table.dpm_levels[dpm_table->mclk_table.count-1].value > mclk) {
3405 clock_percent = (
3406 (golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value - mclk)
3407 * 100)
3408 / golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value;
3409
3410 dpm_table->mclk_table.dpm_levels[i].value =
3411 golden_dpm_table->mclk_table.dpm_levels[i].value -
3412 (golden_dpm_table->mclk_table.dpm_levels[i].value *
3413 clock_percent) / 100;
3414 } else
3415 dpm_table->mclk_table.dpm_levels[i].value =
3416 golden_dpm_table->mclk_table.dpm_levels[i].value;
3417 }
3418 }
3419 }
3420
3421 if (data->need_update_smu7_dpm_table &
3422 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) {
3423 result = smum_populate_all_graphic_levels(hwmgr);
3424 PP_ASSERT_WITH_CODE((0 == result),
3425 "Failed to populate SCLK during PopulateNewDPMClocksStates Function!",
3426 return result);
3427 }
3428
3429 if (data->need_update_smu7_dpm_table &
3430 (DPMTABLE_OD_UPDATE_MCLK + DPMTABLE_UPDATE_MCLK)) {
3431 /*populate MCLK dpm table to SMU7 */
3432 result = smum_populate_all_memory_levels(hwmgr);
3433 PP_ASSERT_WITH_CODE((0 == result),
3434 "Failed to populate MCLK during PopulateNewDPMClocksStates Function!",
3435 return result);
3436 }
3437
3438 return result;
3439}
3440
3441static int smu7_trim_single_dpm_states(struct pp_hwmgr *hwmgr,
3442 struct smu7_single_dpm_table *dpm_table,
3443 uint32_t low_limit, uint32_t high_limit)
3444{
3445 uint32_t i;
3446
3447 for (i = 0; i < dpm_table->count; i++) {
3448 if ((dpm_table->dpm_levels[i].value < low_limit)
3449 || (dpm_table->dpm_levels[i].value > high_limit))
3450 dpm_table->dpm_levels[i].enabled = false;
3451 else
3452 dpm_table->dpm_levels[i].enabled = true;
3453 }
3454
3455 return 0;
3456}
3457
3458static int smu7_trim_dpm_states(struct pp_hwmgr *hwmgr,
3459 const struct smu7_power_state *smu7_ps)
3460{
3461 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3462 uint32_t high_limit_count;
3463
3464 PP_ASSERT_WITH_CODE((smu7_ps->performance_level_count >= 1),
3465 "power state did not have any performance level",
3466 return -EINVAL);
3467
3468 high_limit_count = (1 == smu7_ps->performance_level_count) ? 0 : 1;
3469
3470 smu7_trim_single_dpm_states(hwmgr,
3471 &(data->dpm_table.sclk_table),
3472 smu7_ps->performance_levels[0].engine_clock,
3473 smu7_ps->performance_levels[high_limit_count].engine_clock);
3474
3475 smu7_trim_single_dpm_states(hwmgr,
3476 &(data->dpm_table.mclk_table),
3477 smu7_ps->performance_levels[0].memory_clock,
3478 smu7_ps->performance_levels[high_limit_count].memory_clock);
3479
3480 return 0;
3481}
3482
3483static int smu7_generate_dpm_level_enable_mask(
3484 struct pp_hwmgr *hwmgr, const void *input)
3485{
3486 int result;
3487 const struct phm_set_power_state_input *states =
3488 (const struct phm_set_power_state_input *)input;
3489 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3490 const struct smu7_power_state *smu7_ps =
3491 cast_const_phw_smu7_power_state(states->pnew_state);
3492
3493 result = smu7_trim_dpm_states(hwmgr, smu7_ps);
3494 if (result)
3495 return result;
3496
3497 data->dpm_level_enable_mask.sclk_dpm_enable_mask =
3498 phm_get_dpm_level_enable_mask_value(&data->dpm_table.sclk_table);
3499 data->dpm_level_enable_mask.mclk_dpm_enable_mask =
3500 phm_get_dpm_level_enable_mask_value(&data->dpm_table.mclk_table);
3501 data->dpm_level_enable_mask.pcie_dpm_enable_mask =
3502 phm_get_dpm_level_enable_mask_value(&data->dpm_table.pcie_speed_table);
3503
3504 return 0;
3505}
3506
3507static int smu7_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
3508{
3509 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3510
3511 if (0 == data->need_update_smu7_dpm_table)
3512 return 0;
3513
3514 if ((0 == data->sclk_dpm_key_disabled) &&
3515 (data->need_update_smu7_dpm_table &
3516 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
3517
3518 PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
3519 "Trying to Unfreeze SCLK DPM when DPM is disabled",
3520 );
3521 PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
3522 PPSMC_MSG_SCLKDPM_UnfreezeLevel),
3523 "Failed to unfreeze SCLK DPM during UnFreezeSclkMclkDPM Function!",
3524 return -EINVAL);
3525 }
3526
3527 if ((0 == data->mclk_dpm_key_disabled) &&
3528 (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
3529
3530 PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
3531 "Trying to Unfreeze MCLK DPM when DPM is disabled",
3532 );
3533 PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
3534 PPSMC_MSG_SCLKDPM_UnfreezeLevel),
3535 "Failed to unfreeze MCLK DPM during UnFreezeSclkMclkDPM Function!",
3536 return -EINVAL);
3537 }
3538
3539 data->need_update_smu7_dpm_table = 0;
3540
3541 return 0;
3542}
3543
3544static int smu7_notify_link_speed_change_after_state_change(
3545 struct pp_hwmgr *hwmgr, const void *input)
3546{
3547 const struct phm_set_power_state_input *states =
3548 (const struct phm_set_power_state_input *)input;
3549 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3550 const struct smu7_power_state *smu7_ps =
3551 cast_const_phw_smu7_power_state(states->pnew_state);
3552 uint16_t target_link_speed = smu7_get_maximum_link_speed(hwmgr, smu7_ps);
3553 uint8_t request;
3554
3555 if (data->pspp_notify_required) {
3556 if (target_link_speed == PP_PCIEGen3)
3557 request = PCIE_PERF_REQ_GEN3;
3558 else if (target_link_speed == PP_PCIEGen2)
3559 request = PCIE_PERF_REQ_GEN2;
3560 else
3561 request = PCIE_PERF_REQ_GEN1;
3562
3563 if (request == PCIE_PERF_REQ_GEN1 &&
3564 smu7_get_current_pcie_speed(hwmgr) > 0)
3565 return 0;
3566
3567 if (acpi_pcie_perf_request(hwmgr->device, request, false)) {
3568 if (PP_PCIEGen2 == target_link_speed)
3569 printk("PSPP request to switch to Gen2 from Gen3 Failed!");
3570 else
3571 printk("PSPP request to switch to Gen1 from Gen2 Failed!");
3572 }
3573 }
3574
3575 return 0;
3576}
3577
3578static int smu7_notify_smc_display(struct pp_hwmgr *hwmgr)
3579{
3580 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3581
3582 if (hwmgr->feature_mask & PP_VBI_TIME_SUPPORT_MASK)
3583 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
3584 (PPSMC_Msg)PPSMC_MSG_SetVBITimeout, data->frame_time_x2);
3585 return (smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)PPSMC_HasDisplay) == 0) ? 0 : -EINVAL;
3586}
3587
3588static int smu7_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
3589{
3590 int tmp_result, result = 0;
3591 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3592
3593 tmp_result = smu7_find_dpm_states_clocks_in_dpm_table(hwmgr, input);
3594 PP_ASSERT_WITH_CODE((0 == tmp_result),
3595 "Failed to find DPM states clocks in DPM table!",
3596 result = tmp_result);
3597
3598 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3599 PHM_PlatformCaps_PCIEPerformanceRequest)) {
3600 tmp_result =
3601 smu7_request_link_speed_change_before_state_change(hwmgr, input);
3602 PP_ASSERT_WITH_CODE((0 == tmp_result),
3603 "Failed to request link speed change before state change!",
3604 result = tmp_result);
3605 }
3606
3607 tmp_result = smu7_freeze_sclk_mclk_dpm(hwmgr);
3608 PP_ASSERT_WITH_CODE((0 == tmp_result),
3609 "Failed to freeze SCLK MCLK DPM!", result = tmp_result);
3610
3611 tmp_result = smu7_populate_and_upload_sclk_mclk_dpm_levels(hwmgr, input);
3612 PP_ASSERT_WITH_CODE((0 == tmp_result),
3613 "Failed to populate and upload SCLK MCLK DPM levels!",
3614 result = tmp_result);
3615
3616 tmp_result = smu7_generate_dpm_level_enable_mask(hwmgr, input);
3617 PP_ASSERT_WITH_CODE((0 == tmp_result),
3618 "Failed to generate DPM level enabled mask!",
3619 result = tmp_result);
3620
3621 tmp_result = smum_update_sclk_threshold(hwmgr);
3622 PP_ASSERT_WITH_CODE((0 == tmp_result),
3623 "Failed to update SCLK threshold!",
3624 result = tmp_result);
3625
3626 tmp_result = smu7_notify_smc_display(hwmgr);
3627 PP_ASSERT_WITH_CODE((0 == tmp_result),
3628 "Failed to notify smc display settings!",
3629 result = tmp_result);
3630
3631 tmp_result = smu7_unfreeze_sclk_mclk_dpm(hwmgr);
3632 PP_ASSERT_WITH_CODE((0 == tmp_result),
3633 "Failed to unfreeze SCLK MCLK DPM!",
3634 result = tmp_result);
3635
3636 tmp_result = smu7_upload_dpm_level_enable_mask(hwmgr);
3637 PP_ASSERT_WITH_CODE((0 == tmp_result),
3638 "Failed to upload DPM level enabled mask!",
3639 result = tmp_result);
3640
3641 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3642 PHM_PlatformCaps_PCIEPerformanceRequest)) {
3643 tmp_result =
3644 smu7_notify_link_speed_change_after_state_change(hwmgr, input);
3645 PP_ASSERT_WITH_CODE((0 == tmp_result),
3646 "Failed to notify link speed change after state change!",
3647 result = tmp_result);
3648 }
3649 data->apply_optimized_settings = false;
3650 return result;
3651}
3652
3653static int smu7_set_max_fan_pwm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_pwm)
3654{
3655 hwmgr->thermal_controller.
3656 advanceFanControlParameters.usMaxFanPWM = us_max_fan_pwm;
3657
3658 if (phm_is_hw_access_blocked(hwmgr))
3659 return 0;
3660
3661 return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
3662 PPSMC_MSG_SetFanPwmMax, us_max_fan_pwm);
3663}
3664
3665int smu7_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display)
3666{
3667 PPSMC_Msg msg = has_display ? (PPSMC_Msg)PPSMC_HasDisplay : (PPSMC_Msg)PPSMC_NoDisplay;
3668
3669 return (smum_send_msg_to_smc(hwmgr->smumgr, msg) == 0) ? 0 : -1;
3670}
3671
3672int smu7_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr)
3673{
3674 uint32_t num_active_displays = 0;
3675 struct cgs_display_info info = {0};
3676
3677 info.mode_info = NULL;
3678 cgs_get_active_displays_info(hwmgr->device, &info);
3679
3680 num_active_displays = info.display_count;
3681
3682 if (num_active_displays > 1 && hwmgr->display_config.multi_monitor_in_sync != true)
3683 smu7_notify_smc_display_change(hwmgr, false);
3684
3685 return 0;
3686}
3687
3688/**
3689* Programs the display gap
3690*
3691* @param hwmgr the address of the powerplay hardware manager.
3692* @return always OK
3693*/
3694int smu7_program_display_gap(struct pp_hwmgr *hwmgr)
3695{
3696 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3697 uint32_t num_active_displays = 0;
3698 uint32_t display_gap = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL);
3699 uint32_t display_gap2;
3700 uint32_t pre_vbi_time_in_us;
3701 uint32_t frame_time_in_us;
3702 uint32_t ref_clock;
3703 uint32_t refresh_rate = 0;
3704 struct cgs_display_info info = {0};
3705 struct cgs_mode_info mode_info;
3706
3707 info.mode_info = &mode_info;
3708
3709 cgs_get_active_displays_info(hwmgr->device, &info);
3710 num_active_displays = info.display_count;
3711
3712 display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL, DISP_GAP, (num_active_displays > 0) ? DISPLAY_GAP_VBLANK_OR_WM : DISPLAY_GAP_IGNORE);
3713 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL, display_gap);
3714
3715 ref_clock = mode_info.ref_clock;
3716 refresh_rate = mode_info.refresh_rate;
3717
3718 if (0 == refresh_rate)
3719 refresh_rate = 60;
3720
3721 frame_time_in_us = 1000000 / refresh_rate;
3722
3723 pre_vbi_time_in_us = frame_time_in_us - 200 - mode_info.vblank_time_us;
3724 data->frame_time_x2 = frame_time_in_us * 2 / 100;
3725
3726 display_gap2 = pre_vbi_time_in_us * (ref_clock / 100);
3727
3728 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL2, display_gap2);
3729
3730 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
3731 data->soft_regs_start + smum_get_offsetof(hwmgr->smumgr,
3732 SMU_SoftRegisters,
3733 PreVBlankGap), 0x64);
3734
3735 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
3736 data->soft_regs_start + smum_get_offsetof(hwmgr->smumgr,
3737 SMU_SoftRegisters,
3738 VBlankTimeout),
3739 (frame_time_in_us - pre_vbi_time_in_us));
3740
3741 return 0;
3742}
3743
3744int smu7_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
3745{
3746 return smu7_program_display_gap(hwmgr);
3747}
3748
3749/**
3750* Set maximum target operating fan output RPM
3751*
3752* @param hwmgr: the address of the powerplay hardware manager.
3753* @param usMaxFanRpm: max operating fan RPM value.
3754* @return The response that came from the SMC.
3755*/
3756static int smu7_set_max_fan_rpm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_rpm)
3757{
3758 hwmgr->thermal_controller.
3759 advanceFanControlParameters.usMaxFanRPM = us_max_fan_rpm;
3760
3761 if (phm_is_hw_access_blocked(hwmgr))
3762 return 0;
3763
3764 return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
3765 PPSMC_MSG_SetFanRpmMax, us_max_fan_rpm);
3766}
3767
3768int smu7_register_internal_thermal_interrupt(struct pp_hwmgr *hwmgr,
3769 const void *thermal_interrupt_info)
3770{
3771 return 0;
3772}
3773
3774bool smu7_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
3775{
3776 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3777 bool is_update_required = false;
3778 struct cgs_display_info info = {0, 0, NULL};
3779
3780 cgs_get_active_displays_info(hwmgr->device, &info);
3781
3782 if (data->display_timing.num_existing_displays != info.display_count)
3783 is_update_required = true;
3784
3785 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) {
3786 if (data->display_timing.min_clock_in_sr != hwmgr->display_config.min_core_set_clock_in_sr &&
3787 (data->display_timing.min_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK ||
3788 hwmgr->display_config.min_core_set_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK))
3789 is_update_required = true;
3790 }
3791 return is_update_required;
3792}
3793
3794static inline bool smu7_are_power_levels_equal(const struct smu7_performance_level *pl1,
3795 const struct smu7_performance_level *pl2)
3796{
3797 return ((pl1->memory_clock == pl2->memory_clock) &&
3798 (pl1->engine_clock == pl2->engine_clock) &&
3799 (pl1->pcie_gen == pl2->pcie_gen) &&
3800 (pl1->pcie_lane == pl2->pcie_lane));
3801}
3802
3803int smu7_check_states_equal(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *pstate1, const struct pp_hw_power_state *pstate2, bool *equal)
3804{
3805 const struct smu7_power_state *psa = cast_const_phw_smu7_power_state(pstate1);
3806 const struct smu7_power_state *psb = cast_const_phw_smu7_power_state(pstate2);
3807 int i;
3808
3809 if (pstate1 == NULL || pstate2 == NULL || equal == NULL)
3810 return -EINVAL;
3811
3812 /* If the two states don't even have the same number of performance levels they cannot be the same state. */
3813 if (psa->performance_level_count != psb->performance_level_count) {
3814 *equal = false;
3815 return 0;
3816 }
3817
3818 for (i = 0; i < psa->performance_level_count; i++) {
3819 if (!smu7_are_power_levels_equal(&(psa->performance_levels[i]), &(psb->performance_levels[i]))) {
3820 /* If we have found even one performance level pair that is different the states are different. */
3821 *equal = false;
3822 return 0;
3823 }
3824 }
3825
3826 /* If all performance levels are the same try to use the UVD clocks to break the tie.*/
3827 *equal = ((psa->uvd_clks.vclk == psb->uvd_clks.vclk) && (psa->uvd_clks.dclk == psb->uvd_clks.dclk));
3828 *equal &= ((psa->vce_clks.evclk == psb->vce_clks.evclk) && (psa->vce_clks.ecclk == psb->vce_clks.ecclk));
3829 *equal &= (psa->sclk_threshold == psb->sclk_threshold);
3830
3831 return 0;
3832}
3833
3834int smu7_upload_mc_firmware(struct pp_hwmgr *hwmgr)
3835{
3836 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3837
3838 uint32_t vbios_version;
3839 uint32_t tmp;
3840
3841 /* Read MC indirect register offset 0x9F bits [3:0] to see
3842 * if VBIOS has already loaded a full version of MC ucode
3843 * or not.
3844 */
3845
3846 smu7_get_mc_microcode_version(hwmgr);
3847 vbios_version = hwmgr->microcode_version_info.MC & 0xf;
3848
3849 data->need_long_memory_training = false;
3850
3851 cgs_write_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_INDEX,
3852 ixMC_IO_DEBUG_UP_13);
3853 tmp = cgs_read_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_DATA);
3854
3855 if (tmp & (1 << 23)) {
3856 data->mem_latency_high = MEM_LATENCY_HIGH;
3857 data->mem_latency_low = MEM_LATENCY_LOW;
3858 } else {
3859 data->mem_latency_high = 330;
3860 data->mem_latency_low = 330;
3861 }
3862
3863 return 0;
3864}
3865
3866static int smu7_read_clock_registers(struct pp_hwmgr *hwmgr)
3867{
3868 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3869
3870 data->clock_registers.vCG_SPLL_FUNC_CNTL =
3871 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL);
3872 data->clock_registers.vCG_SPLL_FUNC_CNTL_2 =
3873 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_2);
3874 data->clock_registers.vCG_SPLL_FUNC_CNTL_3 =
3875 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_3);
3876 data->clock_registers.vCG_SPLL_FUNC_CNTL_4 =
3877 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_4);
3878 data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM =
3879 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_SPREAD_SPECTRUM);
3880 data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2 =
3881 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_SPREAD_SPECTRUM_2);
3882 data->clock_registers.vDLL_CNTL =
3883 cgs_read_register(hwmgr->device, mmDLL_CNTL);
3884 data->clock_registers.vMCLK_PWRMGT_CNTL =
3885 cgs_read_register(hwmgr->device, mmMCLK_PWRMGT_CNTL);
3886 data->clock_registers.vMPLL_AD_FUNC_CNTL =
3887 cgs_read_register(hwmgr->device, mmMPLL_AD_FUNC_CNTL);
3888 data->clock_registers.vMPLL_DQ_FUNC_CNTL =
3889 cgs_read_register(hwmgr->device, mmMPLL_DQ_FUNC_CNTL);
3890 data->clock_registers.vMPLL_FUNC_CNTL =
3891 cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL);
3892 data->clock_registers.vMPLL_FUNC_CNTL_1 =
3893 cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL_1);
3894 data->clock_registers.vMPLL_FUNC_CNTL_2 =
3895 cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL_2);
3896 data->clock_registers.vMPLL_SS1 =
3897 cgs_read_register(hwmgr->device, mmMPLL_SS1);
3898 data->clock_registers.vMPLL_SS2 =
3899 cgs_read_register(hwmgr->device, mmMPLL_SS2);
3900 return 0;
3901
3902}
3903
3904/**
3905 * Find out if memory is GDDR5.
3906 *
3907 * @param hwmgr the address of the powerplay hardware manager.
3908 * @return always 0
3909 */
3910static int smu7_get_memory_type(struct pp_hwmgr *hwmgr)
3911{
3912 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3913 uint32_t temp;
3914
3915 temp = cgs_read_register(hwmgr->device, mmMC_SEQ_MISC0);
3916
3917 data->is_memory_gddr5 = (MC_SEQ_MISC0_GDDR5_VALUE ==
3918 ((temp & MC_SEQ_MISC0_GDDR5_MASK) >>
3919 MC_SEQ_MISC0_GDDR5_SHIFT));
3920
3921 return 0;
3922}
3923
3924/**
3925 * Enables Dynamic Power Management by SMC
3926 *
3927 * @param hwmgr the address of the powerplay hardware manager.
3928 * @return always 0
3929 */
3930static int smu7_enable_acpi_power_management(struct pp_hwmgr *hwmgr)
3931{
3932 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
3933 GENERAL_PWRMGT, STATIC_PM_EN, 1);
3934
3935 return 0;
3936}
3937
3938/**
3939 * Initialize PowerGating States for different engines
3940 *
3941 * @param hwmgr the address of the powerplay hardware manager.
3942 * @return always 0
3943 */
3944static int smu7_init_power_gate_state(struct pp_hwmgr *hwmgr)
3945{
3946 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3947
3948 data->uvd_power_gated = false;
3949 data->vce_power_gated = false;
3950 data->samu_power_gated = false;
3951
3952 return 0;
3953}
3954
3955static int smu7_init_sclk_threshold(struct pp_hwmgr *hwmgr)
3956{
3957 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3958
3959 data->low_sclk_interrupt_threshold = 0;
3960 return 0;
3961}
3962
3963int smu7_setup_asic_task(struct pp_hwmgr *hwmgr)
3964{
3965 int tmp_result, result = 0;
3966
3967 smu7_upload_mc_firmware(hwmgr);
3968
3969 tmp_result = smu7_read_clock_registers(hwmgr);
3970 PP_ASSERT_WITH_CODE((0 == tmp_result),
3971 "Failed to read clock registers!", result = tmp_result);
3972
3973 tmp_result = smu7_get_memory_type(hwmgr);
3974 PP_ASSERT_WITH_CODE((0 == tmp_result),
3975 "Failed to get memory type!", result = tmp_result);
3976
3977 tmp_result = smu7_enable_acpi_power_management(hwmgr);
3978 PP_ASSERT_WITH_CODE((0 == tmp_result),
3979 "Failed to enable ACPI power management!", result = tmp_result);
3980
3981 tmp_result = smu7_init_power_gate_state(hwmgr);
3982 PP_ASSERT_WITH_CODE((0 == tmp_result),
3983 "Failed to init power gate state!", result = tmp_result);
3984
3985 tmp_result = smu7_get_mc_microcode_version(hwmgr);
3986 PP_ASSERT_WITH_CODE((0 == tmp_result),
3987 "Failed to get MC microcode version!", result = tmp_result);
3988
3989 tmp_result = smu7_init_sclk_threshold(hwmgr);
3990 PP_ASSERT_WITH_CODE((0 == tmp_result),
3991 "Failed to init sclk threshold!", result = tmp_result);
3992
3993 return result;
3994}
3995
3996static int smu7_force_clock_level(struct pp_hwmgr *hwmgr,
3997 enum pp_clock_type type, uint32_t mask)
3998{
3999 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4000
4001 if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
4002 return -EINVAL;
4003
4004 switch (type) {
4005 case PP_SCLK:
4006 if (!data->sclk_dpm_key_disabled)
4007 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
4008 PPSMC_MSG_SCLKDPM_SetEnabledMask,
4009 data->dpm_level_enable_mask.sclk_dpm_enable_mask & mask);
4010 break;
4011 case PP_MCLK:
4012 if (!data->mclk_dpm_key_disabled)
4013 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
4014 PPSMC_MSG_MCLKDPM_SetEnabledMask,
4015 data->dpm_level_enable_mask.mclk_dpm_enable_mask & mask);
4016 break;
4017 case PP_PCIE:
4018 {
4019 uint32_t tmp = mask & data->dpm_level_enable_mask.pcie_dpm_enable_mask;
4020 uint32_t level = 0;
4021
4022 while (tmp >>= 1)
4023 level++;
4024
4025 if (!data->pcie_dpm_key_disabled)
4026 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
4027 PPSMC_MSG_PCIeDPM_ForceLevel,
4028 level);
4029 break;
4030 }
4031 default:
4032 break;
4033 }
4034
4035 return 0;
4036}
4037
4038static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
4039 enum pp_clock_type type, char *buf)
4040{
4041 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4042 struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
4043 struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
4044 struct smu7_single_dpm_table *pcie_table = &(data->dpm_table.pcie_speed_table);
4045 int i, now, size = 0;
4046 uint32_t clock, pcie_speed;
4047
4048 switch (type) {
4049 case PP_SCLK:
4050 smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetSclkFrequency);
4051 clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
4052
4053 for (i = 0; i < sclk_table->count; i++) {
4054 if (clock > sclk_table->dpm_levels[i].value)
4055 continue;
4056 break;
4057 }
4058 now = i;
4059
4060 for (i = 0; i < sclk_table->count; i++)
4061 size += sprintf(buf + size, "%d: %uMhz %s\n",
4062 i, sclk_table->dpm_levels[i].value / 100,
4063 (i == now) ? "*" : "");
4064 break;
4065 case PP_MCLK:
4066 smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetMclkFrequency);
4067 clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
4068
4069 for (i = 0; i < mclk_table->count; i++) {
4070 if (clock > mclk_table->dpm_levels[i].value)
4071 continue;
4072 break;
4073 }
4074 now = i;
4075
4076 for (i = 0; i < mclk_table->count; i++)
4077 size += sprintf(buf + size, "%d: %uMhz %s\n",
4078 i, mclk_table->dpm_levels[i].value / 100,
4079 (i == now) ? "*" : "");
4080 break;
4081 case PP_PCIE:
4082 pcie_speed = smu7_get_current_pcie_speed(hwmgr);
4083 for (i = 0; i < pcie_table->count; i++) {
4084 if (pcie_speed != pcie_table->dpm_levels[i].value)
4085 continue;
4086 break;
4087 }
4088 now = i;
4089
4090 for (i = 0; i < pcie_table->count; i++)
4091 size += sprintf(buf + size, "%d: %s %s\n", i,
4092 (pcie_table->dpm_levels[i].value == 0) ? "2.5GB, x8" :
4093 (pcie_table->dpm_levels[i].value == 1) ? "5.0GB, x16" :
4094 (pcie_table->dpm_levels[i].value == 2) ? "8.0GB, x16" : "",
4095 (i == now) ? "*" : "");
4096 break;
4097 default:
4098 break;
4099 }
4100 return size;
4101}
4102
4103static int smu7_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
4104{
4105 if (mode) {
4106 /* stop auto-manage */
4107 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4108 PHM_PlatformCaps_MicrocodeFanControl))
4109 smu7_fan_ctrl_stop_smc_fan_control(hwmgr);
4110 smu7_fan_ctrl_set_static_mode(hwmgr, mode);
4111 } else
4112 /* restart auto-manage */
4113 smu7_fan_ctrl_reset_fan_speed_to_default(hwmgr);
4114
4115 return 0;
4116}
4117
4118static int smu7_get_fan_control_mode(struct pp_hwmgr *hwmgr)
4119{
4120 if (hwmgr->fan_ctrl_is_in_default_mode)
4121 return hwmgr->fan_ctrl_default_mode;
4122 else
4123 return PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
4124 CG_FDO_CTRL2, FDO_PWM_MODE);
4125}
4126
4127static int smu7_get_sclk_od(struct pp_hwmgr *hwmgr)
4128{
4129 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4130 struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
4131 struct smu7_single_dpm_table *golden_sclk_table =
4132 &(data->golden_dpm_table.sclk_table);
4133 int value;
4134
4135 value = (sclk_table->dpm_levels[sclk_table->count - 1].value -
4136 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) *
4137 100 /
4138 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
4139
4140 return value;
4141}
4142
4143static int smu7_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
4144{
4145 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4146 struct smu7_single_dpm_table *golden_sclk_table =
4147 &(data->golden_dpm_table.sclk_table);
4148 struct pp_power_state *ps;
4149 struct smu7_power_state *smu7_ps;
4150
4151 if (value > 20)
4152 value = 20;
4153
4154 ps = hwmgr->request_ps;
4155
4156 if (ps == NULL)
4157 return -EINVAL;
4158
4159 smu7_ps = cast_phw_smu7_power_state(&ps->hardware);
4160
4161 smu7_ps->performance_levels[smu7_ps->performance_level_count - 1].engine_clock =
4162 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value *
4163 value / 100 +
4164 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
4165
4166 return 0;
4167}
4168
4169static int smu7_get_mclk_od(struct pp_hwmgr *hwmgr)
4170{
4171 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4172 struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
4173 struct smu7_single_dpm_table *golden_mclk_table =
4174 &(data->golden_dpm_table.mclk_table);
4175 int value;
4176
4177 value = (mclk_table->dpm_levels[mclk_table->count - 1].value -
4178 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) *
4179 100 /
4180 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
4181
4182 return value;
4183}
4184
4185static int smu7_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
4186{
4187 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4188 struct smu7_single_dpm_table *golden_mclk_table =
4189 &(data->golden_dpm_table.mclk_table);
4190 struct pp_power_state *ps;
4191 struct smu7_power_state *smu7_ps;
4192
4193 if (value > 20)
4194 value = 20;
4195
4196 ps = hwmgr->request_ps;
4197
4198 if (ps == NULL)
4199 return -EINVAL;
4200
4201 smu7_ps = cast_phw_smu7_power_state(&ps->hardware);
4202
4203 smu7_ps->performance_levels[smu7_ps->performance_level_count - 1].memory_clock =
4204 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value *
4205 value / 100 +
4206 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
4207
4208 return 0;
4209}
4210
4211
4212static int smu7_get_sclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks)
4213{
4214 struct phm_ppt_v1_information *table_info =
4215 (struct phm_ppt_v1_information *)hwmgr->pptable;
4216 struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table;
4217 int i;
4218
4219 if (table_info == NULL)
4220 return -EINVAL;
4221
4222 dep_sclk_table = table_info->vdd_dep_on_sclk;
4223
4224 for (i = 0; i < dep_sclk_table->count; i++) {
4225 clocks->clock[i] = dep_sclk_table->entries[i].clk;
4226 clocks->count++;
4227 }
4228 return 0;
4229}
4230
4231static uint32_t smu7_get_mem_latency(struct pp_hwmgr *hwmgr, uint32_t clk)
4232{
4233 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4234
4235 if (clk >= MEM_FREQ_LOW_LATENCY && clk < MEM_FREQ_HIGH_LATENCY)
4236 return data->mem_latency_high;
4237 else if (clk >= MEM_FREQ_HIGH_LATENCY)
4238 return data->mem_latency_low;
4239 else
4240 return MEM_LATENCY_ERR;
4241}
4242
4243static int smu7_get_mclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks)
4244{
4245 struct phm_ppt_v1_information *table_info =
4246 (struct phm_ppt_v1_information *)hwmgr->pptable;
4247 struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table;
4248 int i;
4249
4250 if (table_info == NULL)
4251 return -EINVAL;
4252
4253 dep_mclk_table = table_info->vdd_dep_on_mclk;
4254
4255 for (i = 0; i < dep_mclk_table->count; i++) {
4256 clocks->clock[i] = dep_mclk_table->entries[i].clk;
4257 clocks->latency[i] = smu7_get_mem_latency(hwmgr,
4258 dep_mclk_table->entries[i].clk);
4259 clocks->count++;
4260 }
4261 return 0;
4262}
4263
4264static int smu7_get_clock_by_type(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type type,
4265 struct amd_pp_clocks *clocks)
4266{
4267 switch (type) {
4268 case amd_pp_sys_clock:
4269 smu7_get_sclks(hwmgr, clocks);
4270 break;
4271 case amd_pp_mem_clock:
4272 smu7_get_mclks(hwmgr, clocks);
4273 break;
4274 default:
4275 return -EINVAL;
4276 }
4277
4278 return 0;
4279}
4280
Nils Wallméniusa1c1a1d2016-09-22 21:13:49 +02004281static const struct pp_hwmgr_func smu7_hwmgr_funcs = {
Rex Zhu599a7e92016-09-09 13:25:22 +08004282 .backend_init = &smu7_hwmgr_backend_init,
4283 .backend_fini = &phm_hwmgr_backend_fini,
4284 .asic_setup = &smu7_setup_asic_task,
4285 .dynamic_state_management_enable = &smu7_enable_dpm_tasks,
4286 .apply_state_adjust_rules = smu7_apply_state_adjust_rules,
4287 .force_dpm_level = &smu7_force_dpm_level,
4288 .power_state_set = smu7_set_power_state_tasks,
4289 .get_power_state_size = smu7_get_power_state_size,
4290 .get_mclk = smu7_dpm_get_mclk,
4291 .get_sclk = smu7_dpm_get_sclk,
4292 .patch_boot_state = smu7_dpm_patch_boot_state,
4293 .get_pp_table_entry = smu7_get_pp_table_entry,
4294 .get_num_of_pp_table_entries = smu7_get_number_of_powerplay_table_entries,
Rex Zhu599a7e92016-09-09 13:25:22 +08004295 .powerdown_uvd = smu7_powerdown_uvd,
4296 .powergate_uvd = smu7_powergate_uvd,
4297 .powergate_vce = smu7_powergate_vce,
4298 .disable_clock_power_gating = smu7_disable_clock_power_gating,
4299 .update_clock_gatings = smu7_update_clock_gatings,
4300 .notify_smc_display_config_after_ps_adjustment = smu7_notify_smc_display_config_after_ps_adjustment,
4301 .display_config_changed = smu7_display_configuration_changed_task,
4302 .set_max_fan_pwm_output = smu7_set_max_fan_pwm_output,
4303 .set_max_fan_rpm_output = smu7_set_max_fan_rpm_output,
4304 .get_temperature = smu7_thermal_get_temperature,
4305 .stop_thermal_controller = smu7_thermal_stop_thermal_controller,
4306 .get_fan_speed_info = smu7_fan_ctrl_get_fan_speed_info,
4307 .get_fan_speed_percent = smu7_fan_ctrl_get_fan_speed_percent,
4308 .set_fan_speed_percent = smu7_fan_ctrl_set_fan_speed_percent,
4309 .reset_fan_speed_to_default = smu7_fan_ctrl_reset_fan_speed_to_default,
4310 .get_fan_speed_rpm = smu7_fan_ctrl_get_fan_speed_rpm,
4311 .set_fan_speed_rpm = smu7_fan_ctrl_set_fan_speed_rpm,
4312 .uninitialize_thermal_controller = smu7_thermal_ctrl_uninitialize_thermal_controller,
4313 .register_internal_thermal_interrupt = smu7_register_internal_thermal_interrupt,
4314 .check_smc_update_required_for_display_configuration = smu7_check_smc_update_required_for_display_configuration,
4315 .check_states_equal = smu7_check_states_equal,
4316 .set_fan_control_mode = smu7_set_fan_control_mode,
4317 .get_fan_control_mode = smu7_get_fan_control_mode,
4318 .force_clock_level = smu7_force_clock_level,
4319 .print_clock_levels = smu7_print_clock_levels,
4320 .enable_per_cu_power_gating = smu7_enable_per_cu_power_gating,
4321 .get_sclk_od = smu7_get_sclk_od,
4322 .set_sclk_od = smu7_set_sclk_od,
4323 .get_mclk_od = smu7_get_mclk_od,
4324 .set_mclk_od = smu7_set_mclk_od,
4325 .get_clock_by_type = smu7_get_clock_by_type,
Tom St Denisa6e36952016-09-15 10:07:34 -04004326 .read_sensor = smu7_read_sensor,
Rex Zhu599a7e92016-09-09 13:25:22 +08004327};
4328
4329uint8_t smu7_get_sleep_divider_id_from_clock(uint32_t clock,
4330 uint32_t clock_insr)
4331{
4332 uint8_t i;
4333 uint32_t temp;
4334 uint32_t min = max(clock_insr, (uint32_t)SMU7_MINIMUM_ENGINE_CLOCK);
4335
4336 PP_ASSERT_WITH_CODE((clock >= min), "Engine clock can't satisfy stutter requirement!", return 0);
4337 for (i = SMU7_MAX_DEEPSLEEP_DIVIDER_ID; ; i--) {
4338 temp = clock >> i;
4339
4340 if (temp >= min || i == 0)
4341 break;
4342 }
4343 return i;
4344}
4345
4346int smu7_hwmgr_init(struct pp_hwmgr *hwmgr)
4347{
4348 int ret = 0;
4349
4350 hwmgr->hwmgr_func = &smu7_hwmgr_funcs;
4351 if (hwmgr->pp_table_version == PP_TABLE_V0)
4352 hwmgr->pptable_func = &pptable_funcs;
4353 else if (hwmgr->pp_table_version == PP_TABLE_V1)
4354 hwmgr->pptable_func = &pptable_v1_0_funcs;
4355
4356 pp_smu7_thermal_initialize(hwmgr);
4357 return ret;
4358}
4359