blob: 131b4733b112807e664255b5b69cae11381fe7de [file] [log] [blame]
Alex Deucheraaa36a92015-04-20 17:31:14 -04001/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <linux/firmware.h>
25#include <linux/seq_file.h>
26#include "drmP.h"
27#include "amdgpu.h"
28#include "amdgpu_pm.h"
29#include "amdgpu_atombios.h"
30#include "vid.h"
31#include "vi_dpm.h"
32#include "amdgpu_dpm.h"
33#include "cz_dpm.h"
34#include "cz_ppsmc.h"
35#include "atom.h"
36
37#include "smu/smu_8_0_d.h"
38#include "smu/smu_8_0_sh_mask.h"
39#include "gca/gfx_8_0_d.h"
40#include "gca/gfx_8_0_sh_mask.h"
41#include "gmc/gmc_8_1_d.h"
42#include "bif/bif_5_1_d.h"
43#include "gfx_v8_0.h"
44
Sonny Jiang564ea792015-05-12 16:13:35 -040045static void cz_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate);
46
Alex Deucheraaa36a92015-04-20 17:31:14 -040047static struct cz_ps *cz_get_ps(struct amdgpu_ps *rps)
48{
49 struct cz_ps *ps = rps->ps_priv;
50
51 return ps;
52}
53
54static struct cz_power_info *cz_get_pi(struct amdgpu_device *adev)
55{
56 struct cz_power_info *pi = adev->pm.dpm.priv;
57
58 return pi;
59}
60
61static uint16_t cz_convert_8bit_index_to_voltage(struct amdgpu_device *adev,
62 uint16_t voltage)
63{
64 uint16_t tmp = 6200 - voltage * 25;
65
66 return tmp;
67}
68
69static void cz_construct_max_power_limits_table(struct amdgpu_device *adev,
70 struct amdgpu_clock_and_voltage_limits *table)
71{
72 struct cz_power_info *pi = cz_get_pi(adev);
73 struct amdgpu_clock_voltage_dependency_table *dep_table =
74 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
75
76 if (dep_table->count > 0) {
77 table->sclk = dep_table->entries[dep_table->count - 1].clk;
78 table->vddc = cz_convert_8bit_index_to_voltage(adev,
79 dep_table->entries[dep_table->count - 1].v);
80 }
81
82 table->mclk = pi->sys_info.nbp_memory_clock[0];
83
84}
85
86union igp_info {
87 struct _ATOM_INTEGRATED_SYSTEM_INFO info;
88 struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7;
89 struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8;
90 struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_9 info_9;
91};
92
93static int cz_parse_sys_info_table(struct amdgpu_device *adev)
94{
95 struct cz_power_info *pi = cz_get_pi(adev);
96 struct amdgpu_mode_info *mode_info = &adev->mode_info;
97 int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
98 union igp_info *igp_info;
99 u8 frev, crev;
100 u16 data_offset;
101 int i = 0;
102
103 if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
104 &frev, &crev, &data_offset)) {
105 igp_info = (union igp_info *)(mode_info->atom_context->bios +
106 data_offset);
107
108 if (crev != 9) {
109 DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev);
110 return -EINVAL;
111 }
112 pi->sys_info.bootup_sclk =
113 le32_to_cpu(igp_info->info_9.ulBootUpEngineClock);
114 pi->sys_info.bootup_uma_clk =
115 le32_to_cpu(igp_info->info_9.ulBootUpUMAClock);
116 pi->sys_info.dentist_vco_freq =
117 le32_to_cpu(igp_info->info_9.ulDentistVCOFreq);
118 pi->sys_info.bootup_nb_voltage_index =
119 le16_to_cpu(igp_info->info_9.usBootUpNBVoltage);
120
121 if (igp_info->info_9.ucHtcTmpLmt == 0)
122 pi->sys_info.htc_tmp_lmt = 203;
123 else
124 pi->sys_info.htc_tmp_lmt = igp_info->info_9.ucHtcTmpLmt;
125
126 if (igp_info->info_9.ucHtcHystLmt == 0)
127 pi->sys_info.htc_hyst_lmt = 5;
128 else
129 pi->sys_info.htc_hyst_lmt = igp_info->info_9.ucHtcHystLmt;
130
131 if (pi->sys_info.htc_tmp_lmt <= pi->sys_info.htc_hyst_lmt) {
132 DRM_ERROR("The htcTmpLmt should be larger than htcHystLmt.\n");
133 return -EINVAL;
134 }
135
136 if (le32_to_cpu(igp_info->info_9.ulSystemConfig) & (1 << 3) &&
137 pi->enable_nb_ps_policy)
138 pi->sys_info.nb_dpm_enable = true;
139 else
140 pi->sys_info.nb_dpm_enable = false;
141
142 for (i = 0; i < CZ_NUM_NBPSTATES; i++) {
143 if (i < CZ_NUM_NBPMEMORY_CLOCK)
144 pi->sys_info.nbp_memory_clock[i] =
145 le32_to_cpu(igp_info->info_9.ulNbpStateMemclkFreq[i]);
146 pi->sys_info.nbp_n_clock[i] =
147 le32_to_cpu(igp_info->info_9.ulNbpStateNClkFreq[i]);
148 }
149
150 for (i = 0; i < CZ_MAX_DISPLAY_CLOCK_LEVEL; i++)
151 pi->sys_info.display_clock[i] =
152 le32_to_cpu(igp_info->info_9.sDispClkVoltageMapping[i].ulMaximumSupportedCLK);
153
154 for (i = 0; i < CZ_NUM_NBPSTATES; i++)
155 pi->sys_info.nbp_voltage_index[i] =
156 le32_to_cpu(igp_info->info_9.usNBPStateVoltage[i]);
157
158 if (le32_to_cpu(igp_info->info_9.ulGPUCapInfo) &
159 SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS)
160 pi->caps_enable_dfs_bypass = true;
161
162 pi->sys_info.uma_channel_number =
163 igp_info->info_9.ucUMAChannelNumber;
164
165 cz_construct_max_power_limits_table(adev,
166 &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac);
167 }
168
169 return 0;
170}
171
172static void cz_patch_voltage_values(struct amdgpu_device *adev)
173{
174 int i;
175 struct amdgpu_uvd_clock_voltage_dependency_table *uvd_table =
176 &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
177 struct amdgpu_vce_clock_voltage_dependency_table *vce_table =
178 &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
179 struct amdgpu_clock_voltage_dependency_table *acp_table =
180 &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
181
182 if (uvd_table->count) {
183 for (i = 0; i < uvd_table->count; i++)
184 uvd_table->entries[i].v =
185 cz_convert_8bit_index_to_voltage(adev,
186 uvd_table->entries[i].v);
187 }
188
189 if (vce_table->count) {
190 for (i = 0; i < vce_table->count; i++)
191 vce_table->entries[i].v =
192 cz_convert_8bit_index_to_voltage(adev,
193 vce_table->entries[i].v);
194 }
195
196 if (acp_table->count) {
197 for (i = 0; i < acp_table->count; i++)
198 acp_table->entries[i].v =
199 cz_convert_8bit_index_to_voltage(adev,
200 acp_table->entries[i].v);
201 }
202
203}
204
205static void cz_construct_boot_state(struct amdgpu_device *adev)
206{
207 struct cz_power_info *pi = cz_get_pi(adev);
208
209 pi->boot_pl.sclk = pi->sys_info.bootup_sclk;
210 pi->boot_pl.vddc_index = pi->sys_info.bootup_nb_voltage_index;
211 pi->boot_pl.ds_divider_index = 0;
212 pi->boot_pl.ss_divider_index = 0;
213 pi->boot_pl.allow_gnb_slow = 1;
214 pi->boot_pl.force_nbp_state = 0;
215 pi->boot_pl.display_wm = 0;
216 pi->boot_pl.vce_wm = 0;
217
218}
219
220static void cz_patch_boot_state(struct amdgpu_device *adev,
221 struct cz_ps *ps)
222{
223 struct cz_power_info *pi = cz_get_pi(adev);
224
225 ps->num_levels = 1;
226 ps->levels[0] = pi->boot_pl;
227}
228
229union pplib_clock_info {
230 struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
231 struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
232 struct _ATOM_PPLIB_CZ_CLOCK_INFO carrizo;
233};
234
235static void cz_parse_pplib_clock_info(struct amdgpu_device *adev,
236 struct amdgpu_ps *rps, int index,
237 union pplib_clock_info *clock_info)
238{
239 struct cz_power_info *pi = cz_get_pi(adev);
240 struct cz_ps *ps = cz_get_ps(rps);
241 struct cz_pl *pl = &ps->levels[index];
242 struct amdgpu_clock_voltage_dependency_table *table =
243 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
244
245 pl->sclk = table->entries[clock_info->carrizo.index].clk;
246 pl->vddc_index = table->entries[clock_info->carrizo.index].v;
247
248 ps->num_levels = index + 1;
249
250 if (pi->caps_sclk_ds) {
251 pl->ds_divider_index = 5;
252 pl->ss_divider_index = 5;
253 }
254
255}
256
257static void cz_parse_pplib_non_clock_info(struct amdgpu_device *adev,
258 struct amdgpu_ps *rps,
259 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,
260 u8 table_rev)
261{
262 struct cz_ps *ps = cz_get_ps(rps);
263
264 rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
265 rps->class = le16_to_cpu(non_clock_info->usClassification);
266 rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
267
268 if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
269 rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
270 rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
271 } else {
272 rps->vclk = 0;
273 rps->dclk = 0;
274 }
275
276 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
277 adev->pm.dpm.boot_ps = rps;
278 cz_patch_boot_state(adev, ps);
279 }
280 if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
281 adev->pm.dpm.uvd_ps = rps;
282
283}
284
285union power_info {
286 struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
287 struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
288 struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
289 struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4;
290 struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5;
291};
292
293union pplib_power_state {
294 struct _ATOM_PPLIB_STATE v1;
295 struct _ATOM_PPLIB_STATE_V2 v2;
296};
297
298static int cz_parse_power_table(struct amdgpu_device *adev)
299{
300 struct amdgpu_mode_info *mode_info = &adev->mode_info;
301 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
302 union pplib_power_state *power_state;
303 int i, j, k, non_clock_array_index, clock_array_index;
304 union pplib_clock_info *clock_info;
305 struct _StateArray *state_array;
306 struct _ClockInfoArray *clock_info_array;
307 struct _NonClockInfoArray *non_clock_info_array;
308 union power_info *power_info;
309 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
310 u16 data_offset;
311 u8 frev, crev;
312 u8 *power_state_offset;
313 struct cz_ps *ps;
314
315 if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
316 &frev, &crev, &data_offset))
317 return -EINVAL;
318 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
319
320 state_array = (struct _StateArray *)
321 (mode_info->atom_context->bios + data_offset +
322 le16_to_cpu(power_info->pplib.usStateArrayOffset));
323 clock_info_array = (struct _ClockInfoArray *)
324 (mode_info->atom_context->bios + data_offset +
325 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
326 non_clock_info_array = (struct _NonClockInfoArray *)
327 (mode_info->atom_context->bios + data_offset +
328 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
329
330 adev->pm.dpm.ps = kzalloc(sizeof(struct amdgpu_ps) *
331 state_array->ucNumEntries, GFP_KERNEL);
332
333 if (!adev->pm.dpm.ps)
334 return -ENOMEM;
335
336 power_state_offset = (u8 *)state_array->states;
337 adev->pm.dpm.platform_caps =
338 le32_to_cpu(power_info->pplib.ulPlatformCaps);
339 adev->pm.dpm.backbias_response_time =
340 le16_to_cpu(power_info->pplib.usBackbiasTime);
341 adev->pm.dpm.voltage_response_time =
342 le16_to_cpu(power_info->pplib.usVoltageTime);
343
344 for (i = 0; i < state_array->ucNumEntries; i++) {
345 power_state = (union pplib_power_state *)power_state_offset;
346 non_clock_array_index = power_state->v2.nonClockInfoIndex;
347 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
348 &non_clock_info_array->nonClockInfo[non_clock_array_index];
349
350 ps = kzalloc(sizeof(struct cz_ps), GFP_KERNEL);
351 if (ps == NULL) {
352 kfree(adev->pm.dpm.ps);
353 return -ENOMEM;
354 }
355
356 adev->pm.dpm.ps[i].ps_priv = ps;
357 k = 0;
358 for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
359 clock_array_index = power_state->v2.clockInfoIndex[j];
360 if (clock_array_index >= clock_info_array->ucNumEntries)
361 continue;
362 if (k >= CZ_MAX_HARDWARE_POWERLEVELS)
363 break;
364 clock_info = (union pplib_clock_info *)
365 &clock_info_array->clockInfo[clock_array_index *
366 clock_info_array->ucEntrySize];
367 cz_parse_pplib_clock_info(adev, &adev->pm.dpm.ps[i],
368 k, clock_info);
369 k++;
370 }
371 cz_parse_pplib_non_clock_info(adev, &adev->pm.dpm.ps[i],
372 non_clock_info,
373 non_clock_info_array->ucEntrySize);
374 power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
375 }
376 adev->pm.dpm.num_ps = state_array->ucNumEntries;
377
378 return 0;
379}
380
381static int cz_process_firmware_header(struct amdgpu_device *adev)
382{
383 struct cz_power_info *pi = cz_get_pi(adev);
384 u32 tmp;
385 int ret;
386
387 ret = cz_read_smc_sram_dword(adev, SMU8_FIRMWARE_HEADER_LOCATION +
388 offsetof(struct SMU8_Firmware_Header,
389 DpmTable),
390 &tmp, pi->sram_end);
391
392 if (ret == 0)
393 pi->dpm_table_start = tmp;
394
395 return ret;
396}
397
398static int cz_dpm_init(struct amdgpu_device *adev)
399{
400 struct cz_power_info *pi;
401 int ret, i;
402
403 pi = kzalloc(sizeof(struct cz_power_info), GFP_KERNEL);
404 if (NULL == pi)
405 return -ENOMEM;
406
407 adev->pm.dpm.priv = pi;
408
409 ret = amdgpu_get_platform_caps(adev);
410 if (ret)
411 return ret;
412
413 ret = amdgpu_parse_extended_power_table(adev);
414 if (ret)
415 return ret;
416
417 pi->sram_end = SMC_RAM_END;
418
419 /* set up DPM defaults */
420 for (i = 0; i < CZ_MAX_HARDWARE_POWERLEVELS; i++)
421 pi->active_target[i] = CZ_AT_DFLT;
422
423 pi->mgcg_cgtt_local0 = 0x0;
424 pi->mgcg_cgtt_local1 = 0x0;
425 pi->clock_slow_down_step = 25000;
426 pi->skip_clock_slow_down = 1;
427 pi->enable_nb_ps_policy = 1;
428 pi->caps_power_containment = true;
429 pi->caps_cac = true;
430 pi->didt_enabled = false;
431 if (pi->didt_enabled) {
432 pi->caps_sq_ramping = true;
433 pi->caps_db_ramping = true;
434 pi->caps_td_ramping = true;
435 pi->caps_tcp_ramping = true;
436 }
437 pi->caps_sclk_ds = true;
438 pi->voting_clients = 0x00c00033;
439 pi->auto_thermal_throttling_enabled = true;
440 pi->bapm_enabled = false;
441 pi->disable_nb_ps3_in_battery = false;
442 pi->voltage_drop_threshold = 0;
443 pi->caps_sclk_throttle_low_notification = false;
444 pi->gfx_pg_threshold = 500;
445 pi->caps_fps = true;
446 /* uvd */
447 pi->caps_uvd_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_UVD) ? true : false;
448 pi->caps_uvd_dpm = true;
449 /* vce */
450 pi->caps_vce_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_VCE) ? true : false;
451 pi->caps_vce_dpm = true;
452 /* acp */
453 pi->caps_acp_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_ACP) ? true : false;
454 pi->caps_acp_dpm = true;
455
456 pi->caps_stable_power_state = false;
457 pi->nb_dpm_enabled_by_driver = true;
458 pi->nb_dpm_enabled = false;
459 pi->caps_voltage_island = false;
460 /* flags which indicate need to upload pptable */
461 pi->need_pptable_upload = true;
462
463 ret = cz_parse_sys_info_table(adev);
464 if (ret)
465 return ret;
466
467 cz_patch_voltage_values(adev);
468 cz_construct_boot_state(adev);
469
470 ret = cz_parse_power_table(adev);
471 if (ret)
472 return ret;
473
474 ret = cz_process_firmware_header(adev);
475 if (ret)
476 return ret;
477
478 pi->dpm_enabled = true;
Sonny Jiang564ea792015-05-12 16:13:35 -0400479 pi->uvd_dynamic_pg = false;
Alex Deucheraaa36a92015-04-20 17:31:14 -0400480
481 return 0;
482}
483
484static void cz_dpm_fini(struct amdgpu_device *adev)
485{
486 int i;
487
488 for (i = 0; i < adev->pm.dpm.num_ps; i++)
489 kfree(adev->pm.dpm.ps[i].ps_priv);
490
491 kfree(adev->pm.dpm.ps);
492 kfree(adev->pm.dpm.priv);
493 amdgpu_free_extended_power_table(adev);
494}
495
496static void
497cz_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
498 struct seq_file *m)
499{
500 struct amdgpu_clock_voltage_dependency_table *table =
501 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
502 u32 current_index =
503 (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
504 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK) >>
505 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT;
506 u32 sclk, tmp;
507 u16 vddc;
508
509 if (current_index >= NUM_SCLK_LEVELS) {
510 seq_printf(m, "invalid dpm profile %d\n", current_index);
511 } else {
512 sclk = table->entries[current_index].clk;
513 tmp = (RREG32_SMC(ixSMU_VOLTAGE_STATUS) &
514 SMU_VOLTAGE_STATUS__SMU_VOLTAGE_CURRENT_LEVEL_MASK) >>
515 SMU_VOLTAGE_STATUS__SMU_VOLTAGE_CURRENT_LEVEL__SHIFT;
516 vddc = cz_convert_8bit_index_to_voltage(adev, (u16)tmp);
517 seq_printf(m, "power level %d sclk: %u vddc: %u\n",
518 current_index, sclk, vddc);
519 }
520}
521
522static void cz_dpm_print_power_state(struct amdgpu_device *adev,
523 struct amdgpu_ps *rps)
524{
525 int i;
526 struct cz_ps *ps = cz_get_ps(rps);
527
528 amdgpu_dpm_print_class_info(rps->class, rps->class2);
529 amdgpu_dpm_print_cap_info(rps->caps);
530
531 DRM_INFO("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
532 for (i = 0; i < ps->num_levels; i++) {
533 struct cz_pl *pl = &ps->levels[i];
534
535 DRM_INFO("\t\tpower level %d sclk: %u vddc: %u\n",
536 i, pl->sclk,
537 cz_convert_8bit_index_to_voltage(adev, pl->vddc_index));
538 }
539
540 amdgpu_dpm_print_ps_status(adev, rps);
541}
542
543static void cz_dpm_set_funcs(struct amdgpu_device *adev);
544
yanyang15fc3aee2015-05-22 14:39:35 -0400545static int cz_dpm_early_init(void *handle)
Alex Deucheraaa36a92015-04-20 17:31:14 -0400546{
yanyang15fc3aee2015-05-22 14:39:35 -0400547 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
548
Alex Deucheraaa36a92015-04-20 17:31:14 -0400549 cz_dpm_set_funcs(adev);
550
551 return 0;
552}
553
Sonny Jiang564ea792015-05-12 16:13:35 -0400554
yanyang15fc3aee2015-05-22 14:39:35 -0400555static int cz_dpm_late_init(void *handle)
Sonny Jiang564ea792015-05-12 16:13:35 -0400556{
yanyang15fc3aee2015-05-22 14:39:35 -0400557 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
558
Sonny Jiang564ea792015-05-12 16:13:35 -0400559 /* powerdown unused blocks for now */
560 cz_dpm_powergate_uvd(adev, true);
561
562 return 0;
563}
564
yanyang15fc3aee2015-05-22 14:39:35 -0400565static int cz_dpm_sw_init(void *handle)
Alex Deucheraaa36a92015-04-20 17:31:14 -0400566{
yanyang15fc3aee2015-05-22 14:39:35 -0400567 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
Alex Deucheraaa36a92015-04-20 17:31:14 -0400568 int ret = 0;
569 /* fix me to add thermal support TODO */
570
571 /* default to balanced state */
572 adev->pm.dpm.state = POWER_STATE_TYPE_BALANCED;
573 adev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
574 adev->pm.dpm.forced_level = AMDGPU_DPM_FORCED_LEVEL_AUTO;
575 adev->pm.default_sclk = adev->clock.default_sclk;
576 adev->pm.default_mclk = adev->clock.default_mclk;
577 adev->pm.current_sclk = adev->clock.default_sclk;
578 adev->pm.current_mclk = adev->clock.default_mclk;
579 adev->pm.int_thermal_type = THERMAL_TYPE_NONE;
580
581 if (amdgpu_dpm == 0)
582 return 0;
583
584 mutex_lock(&adev->pm.mutex);
585 ret = cz_dpm_init(adev);
586 if (ret)
587 goto dpm_init_failed;
588
589 adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
590 if (amdgpu_dpm == 1)
591 amdgpu_pm_print_power_states(adev);
592
593 ret = amdgpu_pm_sysfs_init(adev);
594 if (ret)
595 goto dpm_init_failed;
596
597 mutex_unlock(&adev->pm.mutex);
598 DRM_INFO("amdgpu: dpm initialized\n");
599
600 return 0;
601
602dpm_init_failed:
603 cz_dpm_fini(adev);
604 mutex_unlock(&adev->pm.mutex);
605 DRM_ERROR("amdgpu: dpm initialization failed\n");
606
607 return ret;
608}
609
yanyang15fc3aee2015-05-22 14:39:35 -0400610static int cz_dpm_sw_fini(void *handle)
Alex Deucheraaa36a92015-04-20 17:31:14 -0400611{
yanyang15fc3aee2015-05-22 14:39:35 -0400612 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
613
Alex Deucheraaa36a92015-04-20 17:31:14 -0400614 mutex_lock(&adev->pm.mutex);
615 amdgpu_pm_sysfs_fini(adev);
616 cz_dpm_fini(adev);
617 mutex_unlock(&adev->pm.mutex);
618
619 return 0;
620}
621
622static void cz_reset_ap_mask(struct amdgpu_device *adev)
623{
624 struct cz_power_info *pi = cz_get_pi(adev);
625
626 pi->active_process_mask = 0;
627
628}
629
630static int cz_dpm_download_pptable_from_smu(struct amdgpu_device *adev,
631 void **table)
632{
633 int ret = 0;
634
635 ret = cz_smu_download_pptable(adev, table);
636
637 return ret;
638}
639
640static int cz_dpm_upload_pptable_to_smu(struct amdgpu_device *adev)
641{
642 struct cz_power_info *pi = cz_get_pi(adev);
643 struct SMU8_Fusion_ClkTable *clock_table;
644 struct atom_clock_dividers dividers;
645 void *table = NULL;
646 uint8_t i = 0;
647 int ret = 0;
648
649 struct amdgpu_clock_voltage_dependency_table *vddc_table =
650 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
651 struct amdgpu_clock_voltage_dependency_table *vddgfx_table =
652 &adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk;
653 struct amdgpu_uvd_clock_voltage_dependency_table *uvd_table =
654 &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
655 struct amdgpu_vce_clock_voltage_dependency_table *vce_table =
656 &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
657 struct amdgpu_clock_voltage_dependency_table *acp_table =
658 &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
659
660 if (!pi->need_pptable_upload)
661 return 0;
662
663 ret = cz_dpm_download_pptable_from_smu(adev, &table);
664 if (ret) {
665 DRM_ERROR("amdgpu: Failed to get power play table from SMU!\n");
666 return -EINVAL;
667 }
668
669 clock_table = (struct SMU8_Fusion_ClkTable *)table;
670 /* patch clock table */
671 if (vddc_table->count > CZ_MAX_HARDWARE_POWERLEVELS ||
672 vddgfx_table->count > CZ_MAX_HARDWARE_POWERLEVELS ||
673 uvd_table->count > CZ_MAX_HARDWARE_POWERLEVELS ||
674 vce_table->count > CZ_MAX_HARDWARE_POWERLEVELS ||
675 acp_table->count > CZ_MAX_HARDWARE_POWERLEVELS) {
676 DRM_ERROR("amdgpu: Invalid Clock Voltage Dependency Table!\n");
677 return -EINVAL;
678 }
679
680 for (i = 0; i < CZ_MAX_HARDWARE_POWERLEVELS; i++) {
681
682 /* vddc sclk */
683 clock_table->SclkBreakdownTable.ClkLevel[i].GnbVid =
684 (i < vddc_table->count) ? (uint8_t)vddc_table->entries[i].v : 0;
685 clock_table->SclkBreakdownTable.ClkLevel[i].Frequency =
686 (i < vddc_table->count) ? vddc_table->entries[i].clk : 0;
687 ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
688 clock_table->SclkBreakdownTable.ClkLevel[i].Frequency,
689 false, &dividers);
690 if (ret)
691 return ret;
692 clock_table->SclkBreakdownTable.ClkLevel[i].DfsDid =
693 (uint8_t)dividers.post_divider;
694
695 /* vddgfx sclk */
696 clock_table->SclkBreakdownTable.ClkLevel[i].GfxVid =
697 (i < vddgfx_table->count) ? (uint8_t)vddgfx_table->entries[i].v : 0;
698
699 /* acp breakdown */
700 clock_table->AclkBreakdownTable.ClkLevel[i].GfxVid =
701 (i < acp_table->count) ? (uint8_t)acp_table->entries[i].v : 0;
702 clock_table->AclkBreakdownTable.ClkLevel[i].Frequency =
703 (i < acp_table->count) ? acp_table->entries[i].clk : 0;
704 ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
705 clock_table->SclkBreakdownTable.ClkLevel[i].Frequency,
706 false, &dividers);
707 if (ret)
708 return ret;
709 clock_table->AclkBreakdownTable.ClkLevel[i].DfsDid =
710 (uint8_t)dividers.post_divider;
711
712 /* uvd breakdown */
713 clock_table->VclkBreakdownTable.ClkLevel[i].GfxVid =
714 (i < uvd_table->count) ? (uint8_t)uvd_table->entries[i].v : 0;
715 clock_table->VclkBreakdownTable.ClkLevel[i].Frequency =
716 (i < uvd_table->count) ? uvd_table->entries[i].vclk : 0;
717 ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
718 clock_table->VclkBreakdownTable.ClkLevel[i].Frequency,
719 false, &dividers);
720 if (ret)
721 return ret;
722 clock_table->VclkBreakdownTable.ClkLevel[i].DfsDid =
723 (uint8_t)dividers.post_divider;
724
725 clock_table->DclkBreakdownTable.ClkLevel[i].GfxVid =
726 (i < uvd_table->count) ? (uint8_t)uvd_table->entries[i].v : 0;
727 clock_table->DclkBreakdownTable.ClkLevel[i].Frequency =
728 (i < uvd_table->count) ? uvd_table->entries[i].dclk : 0;
729 ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
730 clock_table->DclkBreakdownTable.ClkLevel[i].Frequency,
731 false, &dividers);
732 if (ret)
733 return ret;
734 clock_table->DclkBreakdownTable.ClkLevel[i].DfsDid =
735 (uint8_t)dividers.post_divider;
736
737 /* vce breakdown */
738 clock_table->EclkBreakdownTable.ClkLevel[i].GfxVid =
739 (i < vce_table->count) ? (uint8_t)vce_table->entries[i].v : 0;
740 clock_table->EclkBreakdownTable.ClkLevel[i].Frequency =
741 (i < vce_table->count) ? vce_table->entries[i].ecclk : 0;
742 ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
743 clock_table->EclkBreakdownTable.ClkLevel[i].Frequency,
744 false, &dividers);
745 if (ret)
746 return ret;
747 clock_table->EclkBreakdownTable.ClkLevel[i].DfsDid =
748 (uint8_t)dividers.post_divider;
749 }
750
751 /* its time to upload to SMU */
752 ret = cz_smu_upload_pptable(adev);
753 if (ret) {
754 DRM_ERROR("amdgpu: Failed to put power play table to SMU!\n");
755 return ret;
756 }
757
758 return 0;
759}
760
761static void cz_init_sclk_limit(struct amdgpu_device *adev)
762{
763 struct cz_power_info *pi = cz_get_pi(adev);
764 struct amdgpu_clock_voltage_dependency_table *table =
765 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
766 uint32_t clock = 0, level;
767
768 if (!table || !table->count) {
769 DRM_ERROR("Invalid Voltage Dependency table.\n");
770 return;
771 }
772
773 pi->sclk_dpm.soft_min_clk = 0;
774 pi->sclk_dpm.hard_min_clk = 0;
775 cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxSclkLevel);
776 level = cz_get_argument(adev);
777 if (level < table->count)
778 clock = table->entries[level].clk;
779 else {
780 DRM_ERROR("Invalid SLCK Voltage Dependency table entry.\n");
781 clock = table->entries[table->count - 1].clk;
782 }
783
784 pi->sclk_dpm.soft_max_clk = clock;
785 pi->sclk_dpm.hard_max_clk = clock;
786
787}
788
789static void cz_init_uvd_limit(struct amdgpu_device *adev)
790{
791 struct cz_power_info *pi = cz_get_pi(adev);
792 struct amdgpu_uvd_clock_voltage_dependency_table *table =
793 &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
794 uint32_t clock = 0, level;
795
796 if (!table || !table->count) {
797 DRM_ERROR("Invalid Voltage Dependency table.\n");
798 return;
799 }
800
801 pi->uvd_dpm.soft_min_clk = 0;
802 pi->uvd_dpm.hard_min_clk = 0;
803 cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxUvdLevel);
804 level = cz_get_argument(adev);
805 if (level < table->count)
806 clock = table->entries[level].vclk;
807 else {
808 DRM_ERROR("Invalid UVD Voltage Dependency table entry.\n");
809 clock = table->entries[table->count - 1].vclk;
810 }
811
812 pi->uvd_dpm.soft_max_clk = clock;
813 pi->uvd_dpm.hard_max_clk = clock;
814
815}
816
817static void cz_init_vce_limit(struct amdgpu_device *adev)
818{
819 struct cz_power_info *pi = cz_get_pi(adev);
820 struct amdgpu_vce_clock_voltage_dependency_table *table =
821 &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
822 uint32_t clock = 0, level;
823
824 if (!table || !table->count) {
825 DRM_ERROR("Invalid Voltage Dependency table.\n");
826 return;
827 }
828
829 pi->vce_dpm.soft_min_clk = 0;
830 pi->vce_dpm.hard_min_clk = 0;
831 cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxEclkLevel);
832 level = cz_get_argument(adev);
833 if (level < table->count)
834 clock = table->entries[level].evclk;
835 else {
836 /* future BIOS would fix this error */
837 DRM_ERROR("Invalid VCE Voltage Dependency table entry.\n");
838 clock = table->entries[table->count - 1].evclk;
839 }
840
841 pi->vce_dpm.soft_max_clk = clock;
842 pi->vce_dpm.hard_max_clk = clock;
843
844}
845
846static void cz_init_acp_limit(struct amdgpu_device *adev)
847{
848 struct cz_power_info *pi = cz_get_pi(adev);
849 struct amdgpu_clock_voltage_dependency_table *table =
850 &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
851 uint32_t clock = 0, level;
852
853 if (!table || !table->count) {
854 DRM_ERROR("Invalid Voltage Dependency table.\n");
855 return;
856 }
857
858 pi->acp_dpm.soft_min_clk = 0;
859 pi->acp_dpm.hard_min_clk = 0;
860 cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxAclkLevel);
861 level = cz_get_argument(adev);
862 if (level < table->count)
863 clock = table->entries[level].clk;
864 else {
865 DRM_ERROR("Invalid ACP Voltage Dependency table entry.\n");
866 clock = table->entries[table->count - 1].clk;
867 }
868
869 pi->acp_dpm.soft_max_clk = clock;
870 pi->acp_dpm.hard_max_clk = clock;
871
872}
873
874static void cz_init_pg_state(struct amdgpu_device *adev)
875{
876 struct cz_power_info *pi = cz_get_pi(adev);
877
878 pi->uvd_power_gated = false;
879 pi->vce_power_gated = false;
880 pi->acp_power_gated = false;
881
882}
883
884static void cz_init_sclk_threshold(struct amdgpu_device *adev)
885{
886 struct cz_power_info *pi = cz_get_pi(adev);
887
888 pi->low_sclk_interrupt_threshold = 0;
889
890}
891
892static void cz_dpm_setup_asic(struct amdgpu_device *adev)
893{
894 cz_reset_ap_mask(adev);
895 cz_dpm_upload_pptable_to_smu(adev);
896 cz_init_sclk_limit(adev);
897 cz_init_uvd_limit(adev);
898 cz_init_vce_limit(adev);
899 cz_init_acp_limit(adev);
900 cz_init_pg_state(adev);
901 cz_init_sclk_threshold(adev);
902
903}
904
905static bool cz_check_smu_feature(struct amdgpu_device *adev,
906 uint32_t feature)
907{
908 uint32_t smu_feature = 0;
909 int ret;
910
911 ret = cz_send_msg_to_smc_with_parameter(adev,
912 PPSMC_MSG_GetFeatureStatus, 0);
913 if (ret) {
914 DRM_ERROR("Failed to get SMU features from SMC.\n");
915 return false;
916 } else {
917 smu_feature = cz_get_argument(adev);
918 if (feature & smu_feature)
919 return true;
920 }
921
922 return false;
923}
924
925static bool cz_check_for_dpm_enabled(struct amdgpu_device *adev)
926{
927 if (cz_check_smu_feature(adev,
928 SMU_EnabledFeatureScoreboard_SclkDpmOn))
929 return true;
930
931 return false;
932}
933
934static void cz_program_voting_clients(struct amdgpu_device *adev)
935{
936 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, PPCZ_VOTINGRIGHTSCLIENTS_DFLT0);
937}
938
939static void cz_clear_voting_clients(struct amdgpu_device *adev)
940{
941 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, 0);
942}
943
944static int cz_start_dpm(struct amdgpu_device *adev)
945{
946 int ret = 0;
947
948 if (amdgpu_dpm) {
949 ret = cz_send_msg_to_smc_with_parameter(adev,
950 PPSMC_MSG_EnableAllSmuFeatures, SCLK_DPM_MASK);
951 if (ret) {
952 DRM_ERROR("SMU feature: SCLK_DPM enable failed\n");
953 return -EINVAL;
954 }
955 }
956
957 return 0;
958}
959
960static int cz_stop_dpm(struct amdgpu_device *adev)
961{
962 int ret = 0;
963
964 if (amdgpu_dpm && adev->pm.dpm_enabled) {
965 ret = cz_send_msg_to_smc_with_parameter(adev,
966 PPSMC_MSG_DisableAllSmuFeatures, SCLK_DPM_MASK);
967 if (ret) {
968 DRM_ERROR("SMU feature: SCLK_DPM disable failed\n");
969 return -EINVAL;
970 }
971 }
972
973 return 0;
974}
975
976static uint32_t cz_get_sclk_level(struct amdgpu_device *adev,
977 uint32_t clock, uint16_t msg)
978{
979 int i = 0;
980 struct amdgpu_clock_voltage_dependency_table *table =
981 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
982
983 switch (msg) {
984 case PPSMC_MSG_SetSclkSoftMin:
985 case PPSMC_MSG_SetSclkHardMin:
986 for (i = 0; i < table->count; i++)
987 if (clock <= table->entries[i].clk)
988 break;
989 if (i == table->count)
990 i = table->count - 1;
991 break;
992 case PPSMC_MSG_SetSclkSoftMax:
993 case PPSMC_MSG_SetSclkHardMax:
994 for (i = table->count - 1; i >= 0; i--)
995 if (clock >= table->entries[i].clk)
996 break;
997 if (i < 0)
998 i = 0;
999 break;
1000 default:
1001 break;
1002 }
1003
1004 return i;
1005}
1006
1007static int cz_program_bootup_state(struct amdgpu_device *adev)
1008{
1009 struct cz_power_info *pi = cz_get_pi(adev);
1010 uint32_t soft_min_clk = 0;
1011 uint32_t soft_max_clk = 0;
1012 int ret = 0;
1013
1014 pi->sclk_dpm.soft_min_clk = pi->sys_info.bootup_sclk;
1015 pi->sclk_dpm.soft_max_clk = pi->sys_info.bootup_sclk;
1016
1017 soft_min_clk = cz_get_sclk_level(adev,
1018 pi->sclk_dpm.soft_min_clk,
1019 PPSMC_MSG_SetSclkSoftMin);
1020 soft_max_clk = cz_get_sclk_level(adev,
1021 pi->sclk_dpm.soft_max_clk,
1022 PPSMC_MSG_SetSclkSoftMax);
1023
1024 ret = cz_send_msg_to_smc_with_parameter(adev,
1025 PPSMC_MSG_SetSclkSoftMin, soft_min_clk);
1026 if (ret)
1027 return -EINVAL;
1028
1029 ret = cz_send_msg_to_smc_with_parameter(adev,
1030 PPSMC_MSG_SetSclkSoftMax, soft_max_clk);
1031 if (ret)
1032 return -EINVAL;
1033
1034 return 0;
1035}
1036
1037/* TODO */
1038static int cz_disable_cgpg(struct amdgpu_device *adev)
1039{
1040 return 0;
1041}
1042
1043/* TODO */
1044static int cz_enable_cgpg(struct amdgpu_device *adev)
1045{
1046 return 0;
1047}
1048
1049/* TODO */
1050static int cz_program_pt_config_registers(struct amdgpu_device *adev)
1051{
1052 return 0;
1053}
1054
1055static void cz_do_enable_didt(struct amdgpu_device *adev, bool enable)
1056{
1057 struct cz_power_info *pi = cz_get_pi(adev);
1058 uint32_t reg = 0;
1059
1060 if (pi->caps_sq_ramping) {
1061 reg = RREG32_DIDT(ixDIDT_SQ_CTRL0);
1062 if (enable)
1063 reg = REG_SET_FIELD(reg, DIDT_SQ_CTRL0, DIDT_CTRL_EN, 1);
1064 else
1065 reg = REG_SET_FIELD(reg, DIDT_SQ_CTRL0, DIDT_CTRL_EN, 0);
1066 WREG32_DIDT(ixDIDT_SQ_CTRL0, reg);
1067 }
1068 if (pi->caps_db_ramping) {
1069 reg = RREG32_DIDT(ixDIDT_DB_CTRL0);
1070 if (enable)
1071 reg = REG_SET_FIELD(reg, DIDT_DB_CTRL0, DIDT_CTRL_EN, 1);
1072 else
1073 reg = REG_SET_FIELD(reg, DIDT_DB_CTRL0, DIDT_CTRL_EN, 0);
1074 WREG32_DIDT(ixDIDT_DB_CTRL0, reg);
1075 }
1076 if (pi->caps_td_ramping) {
1077 reg = RREG32_DIDT(ixDIDT_TD_CTRL0);
1078 if (enable)
1079 reg = REG_SET_FIELD(reg, DIDT_TD_CTRL0, DIDT_CTRL_EN, 1);
1080 else
1081 reg = REG_SET_FIELD(reg, DIDT_TD_CTRL0, DIDT_CTRL_EN, 0);
1082 WREG32_DIDT(ixDIDT_TD_CTRL0, reg);
1083 }
1084 if (pi->caps_tcp_ramping) {
1085 reg = RREG32_DIDT(ixDIDT_TCP_CTRL0);
1086 if (enable)
1087 reg = REG_SET_FIELD(reg, DIDT_SQ_CTRL0, DIDT_CTRL_EN, 1);
1088 else
1089 reg = REG_SET_FIELD(reg, DIDT_SQ_CTRL0, DIDT_CTRL_EN, 0);
1090 WREG32_DIDT(ixDIDT_TCP_CTRL0, reg);
1091 }
1092
1093}
1094
1095static int cz_enable_didt(struct amdgpu_device *adev, bool enable)
1096{
1097 struct cz_power_info *pi = cz_get_pi(adev);
1098 int ret;
1099
1100 if (pi->caps_sq_ramping || pi->caps_db_ramping ||
1101 pi->caps_td_ramping || pi->caps_tcp_ramping) {
1102 if (adev->gfx.gfx_current_status != AMDGPU_GFX_SAFE_MODE) {
1103 ret = cz_disable_cgpg(adev);
1104 if (ret) {
1105 DRM_ERROR("Pre Di/Dt disable cg/pg failed\n");
1106 return -EINVAL;
1107 }
1108 adev->gfx.gfx_current_status = AMDGPU_GFX_SAFE_MODE;
1109 }
1110
1111 ret = cz_program_pt_config_registers(adev);
1112 if (ret) {
1113 DRM_ERROR("Di/Dt config failed\n");
1114 return -EINVAL;
1115 }
1116 cz_do_enable_didt(adev, enable);
1117
1118 if (adev->gfx.gfx_current_status == AMDGPU_GFX_SAFE_MODE) {
1119 ret = cz_enable_cgpg(adev);
1120 if (ret) {
1121 DRM_ERROR("Post Di/Dt enable cg/pg failed\n");
1122 return -EINVAL;
1123 }
1124 adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
1125 }
1126 }
1127
1128 return 0;
1129}
1130
1131/* TODO */
1132static void cz_reset_acp_boot_level(struct amdgpu_device *adev)
1133{
1134}
1135
1136static void cz_update_current_ps(struct amdgpu_device *adev,
1137 struct amdgpu_ps *rps)
1138{
1139 struct cz_power_info *pi = cz_get_pi(adev);
1140 struct cz_ps *ps = cz_get_ps(rps);
1141
1142 pi->current_ps = *ps;
1143 pi->current_rps = *rps;
1144 pi->current_rps.ps_priv = ps;
1145
1146}
1147
1148static void cz_update_requested_ps(struct amdgpu_device *adev,
1149 struct amdgpu_ps *rps)
1150{
1151 struct cz_power_info *pi = cz_get_pi(adev);
1152 struct cz_ps *ps = cz_get_ps(rps);
1153
1154 pi->requested_ps = *ps;
1155 pi->requested_rps = *rps;
1156 pi->requested_rps.ps_priv = ps;
1157
1158}
1159
1160/* PP arbiter support needed TODO */
1161static void cz_apply_state_adjust_rules(struct amdgpu_device *adev,
1162 struct amdgpu_ps *new_rps,
1163 struct amdgpu_ps *old_rps)
1164{
1165 struct cz_ps *ps = cz_get_ps(new_rps);
1166 struct cz_power_info *pi = cz_get_pi(adev);
1167 struct amdgpu_clock_and_voltage_limits *limits =
1168 &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
1169 /* 10kHz memory clock */
1170 uint32_t mclk = 0;
1171
1172 ps->force_high = false;
1173 ps->need_dfs_bypass = true;
1174 pi->video_start = new_rps->dclk || new_rps->vclk ||
1175 new_rps->evclk || new_rps->ecclk;
1176
1177 if ((new_rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) ==
1178 ATOM_PPLIB_CLASSIFICATION_UI_BATTERY)
1179 pi->battery_state = true;
1180 else
1181 pi->battery_state = false;
1182
1183 if (pi->caps_stable_power_state)
1184 mclk = limits->mclk;
1185
1186 if (mclk > pi->sys_info.nbp_memory_clock[CZ_NUM_NBPMEMORY_CLOCK - 1])
1187 ps->force_high = true;
1188
1189}
1190
1191static int cz_dpm_enable(struct amdgpu_device *adev)
1192{
1193 int ret = 0;
1194
1195 /* renable will hang up SMU, so check first */
1196 if (cz_check_for_dpm_enabled(adev))
1197 return -EINVAL;
1198
1199 cz_program_voting_clients(adev);
1200
1201 ret = cz_start_dpm(adev);
1202 if (ret) {
1203 DRM_ERROR("Carrizo DPM enable failed\n");
1204 return -EINVAL;
1205 }
1206
1207 ret = cz_program_bootup_state(adev);
1208 if (ret) {
1209 DRM_ERROR("Carrizo bootup state program failed\n");
1210 return -EINVAL;
1211 }
1212
1213 ret = cz_enable_didt(adev, true);
1214 if (ret) {
1215 DRM_ERROR("Carrizo enable di/dt failed\n");
1216 return -EINVAL;
1217 }
1218
1219 cz_reset_acp_boot_level(adev);
1220
1221 cz_update_current_ps(adev, adev->pm.dpm.boot_ps);
1222
1223 return 0;
1224}
1225
yanyang15fc3aee2015-05-22 14:39:35 -04001226static int cz_dpm_hw_init(void *handle)
Alex Deucheraaa36a92015-04-20 17:31:14 -04001227{
yanyang15fc3aee2015-05-22 14:39:35 -04001228 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
Sonny Jiang46651cc2015-04-30 17:12:14 -04001229 int ret = 0;
Alex Deucheraaa36a92015-04-20 17:31:14 -04001230
1231 mutex_lock(&adev->pm.mutex);
1232
Alex Deucher05188312015-06-09 17:32:53 -04001233 /* smu init only needs to be called at startup, not resume.
1234 * It should be in sw_init, but requires the fw info gathered
1235 * in sw_init from other IP modules.
1236 */
Alex Deucheraaa36a92015-04-20 17:31:14 -04001237 ret = cz_smu_init(adev);
1238 if (ret) {
1239 DRM_ERROR("amdgpu: smc initialization failed\n");
1240 mutex_unlock(&adev->pm.mutex);
1241 return ret;
1242 }
1243
1244 /* do the actual fw loading */
1245 ret = cz_smu_start(adev);
1246 if (ret) {
1247 DRM_ERROR("amdgpu: smc start failed\n");
1248 mutex_unlock(&adev->pm.mutex);
1249 return ret;
1250 }
1251
Sonny Jiang46651cc2015-04-30 17:12:14 -04001252 if (!amdgpu_dpm) {
1253 adev->pm.dpm_enabled = false;
1254 mutex_unlock(&adev->pm.mutex);
1255 return ret;
1256 }
1257
Alex Deucheraaa36a92015-04-20 17:31:14 -04001258 /* cz dpm setup asic */
1259 cz_dpm_setup_asic(adev);
1260
1261 /* cz dpm enable */
1262 ret = cz_dpm_enable(adev);
1263 if (ret)
1264 adev->pm.dpm_enabled = false;
1265 else
1266 adev->pm.dpm_enabled = true;
1267
1268 mutex_unlock(&adev->pm.mutex);
1269
1270 return 0;
1271}
1272
1273static int cz_dpm_disable(struct amdgpu_device *adev)
1274{
1275 int ret = 0;
1276
1277 if (!cz_check_for_dpm_enabled(adev))
1278 return -EINVAL;
1279
1280 ret = cz_enable_didt(adev, false);
1281 if (ret) {
1282 DRM_ERROR("Carrizo disable di/dt failed\n");
1283 return -EINVAL;
1284 }
1285
Sonny Jiang564ea792015-05-12 16:13:35 -04001286 /* powerup blocks */
1287 cz_dpm_powergate_uvd(adev, false);
1288
Alex Deucheraaa36a92015-04-20 17:31:14 -04001289 cz_clear_voting_clients(adev);
1290 cz_stop_dpm(adev);
1291 cz_update_current_ps(adev, adev->pm.dpm.boot_ps);
1292
1293 return 0;
1294}
1295
yanyang15fc3aee2015-05-22 14:39:35 -04001296static int cz_dpm_hw_fini(void *handle)
Alex Deucheraaa36a92015-04-20 17:31:14 -04001297{
1298 int ret = 0;
yanyang15fc3aee2015-05-22 14:39:35 -04001299 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
Alex Deucheraaa36a92015-04-20 17:31:14 -04001300
1301 mutex_lock(&adev->pm.mutex);
1302
Alex Deucher05188312015-06-09 17:32:53 -04001303 /* smu fini only needs to be called at teardown, not suspend.
1304 * It should be in sw_fini, but we put it here for symmetry
1305 * with smu init.
1306 */
Alex Deucheraaa36a92015-04-20 17:31:14 -04001307 cz_smu_fini(adev);
1308
1309 if (adev->pm.dpm_enabled) {
1310 ret = cz_dpm_disable(adev);
Alex Deucheraaa36a92015-04-20 17:31:14 -04001311
1312 adev->pm.dpm.current_ps =
1313 adev->pm.dpm.requested_ps =
1314 adev->pm.dpm.boot_ps;
1315 }
1316
1317 adev->pm.dpm_enabled = false;
1318
1319 mutex_unlock(&adev->pm.mutex);
1320
Alex Deucher10457452015-04-30 11:42:54 -04001321 return ret;
Alex Deucheraaa36a92015-04-20 17:31:14 -04001322}
1323
yanyang15fc3aee2015-05-22 14:39:35 -04001324static int cz_dpm_suspend(void *handle)
Alex Deucheraaa36a92015-04-20 17:31:14 -04001325{
1326 int ret = 0;
yanyang15fc3aee2015-05-22 14:39:35 -04001327 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
Alex Deucheraaa36a92015-04-20 17:31:14 -04001328
1329 if (adev->pm.dpm_enabled) {
1330 mutex_lock(&adev->pm.mutex);
1331
1332 ret = cz_dpm_disable(adev);
Alex Deucheraaa36a92015-04-20 17:31:14 -04001333
1334 adev->pm.dpm.current_ps =
1335 adev->pm.dpm.requested_ps =
1336 adev->pm.dpm.boot_ps;
1337
1338 mutex_unlock(&adev->pm.mutex);
1339 }
1340
Alex Deucher10457452015-04-30 11:42:54 -04001341 return ret;
Alex Deucheraaa36a92015-04-20 17:31:14 -04001342}
1343
yanyang15fc3aee2015-05-22 14:39:35 -04001344static int cz_dpm_resume(void *handle)
Alex Deucheraaa36a92015-04-20 17:31:14 -04001345{
1346 int ret = 0;
yanyang15fc3aee2015-05-22 14:39:35 -04001347 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
Alex Deucheraaa36a92015-04-20 17:31:14 -04001348
1349 mutex_lock(&adev->pm.mutex);
Alex Deucheraaa36a92015-04-20 17:31:14 -04001350
1351 /* do the actual fw loading */
1352 ret = cz_smu_start(adev);
1353 if (ret) {
1354 DRM_ERROR("amdgpu: smc start failed\n");
1355 mutex_unlock(&adev->pm.mutex);
1356 return ret;
1357 }
1358
Sonny Jiang46651cc2015-04-30 17:12:14 -04001359 if (!amdgpu_dpm) {
1360 adev->pm.dpm_enabled = false;
1361 mutex_unlock(&adev->pm.mutex);
1362 return ret;
1363 }
1364
Alex Deucheraaa36a92015-04-20 17:31:14 -04001365 /* cz dpm setup asic */
1366 cz_dpm_setup_asic(adev);
1367
1368 /* cz dpm enable */
1369 ret = cz_dpm_enable(adev);
1370 if (ret)
1371 adev->pm.dpm_enabled = false;
1372 else
1373 adev->pm.dpm_enabled = true;
1374
1375 mutex_unlock(&adev->pm.mutex);
1376 /* upon resume, re-compute the clocks */
1377 if (adev->pm.dpm_enabled)
1378 amdgpu_pm_compute_clocks(adev);
1379
1380 return 0;
1381}
1382
yanyang15fc3aee2015-05-22 14:39:35 -04001383static int cz_dpm_set_clockgating_state(void *handle,
1384 enum amd_clockgating_state state)
Alex Deucheraaa36a92015-04-20 17:31:14 -04001385{
1386 return 0;
1387}
1388
yanyang15fc3aee2015-05-22 14:39:35 -04001389static int cz_dpm_set_powergating_state(void *handle,
1390 enum amd_powergating_state state)
Alex Deucheraaa36a92015-04-20 17:31:14 -04001391{
1392 return 0;
1393}
1394
1395/* borrowed from KV, need future unify */
1396static int cz_dpm_get_temperature(struct amdgpu_device *adev)
1397{
1398 int actual_temp = 0;
1399 uint32_t temp = RREG32_SMC(0xC0300E0C);
1400
1401 if (temp)
1402 actual_temp = 1000 * ((temp / 8) - 49);
1403
1404 return actual_temp;
1405}
1406
1407static int cz_dpm_pre_set_power_state(struct amdgpu_device *adev)
1408{
1409 struct cz_power_info *pi = cz_get_pi(adev);
1410 struct amdgpu_ps requested_ps = *adev->pm.dpm.requested_ps;
1411 struct amdgpu_ps *new_ps = &requested_ps;
1412
1413 cz_update_requested_ps(adev, new_ps);
1414 cz_apply_state_adjust_rules(adev, &pi->requested_rps,
1415 &pi->current_rps);
1416
1417 return 0;
1418}
1419
1420static int cz_dpm_update_sclk_limit(struct amdgpu_device *adev)
1421{
1422 struct cz_power_info *pi = cz_get_pi(adev);
1423 struct amdgpu_clock_and_voltage_limits *limits =
1424 &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
1425 uint32_t clock, stable_ps_clock = 0;
1426
1427 clock = pi->sclk_dpm.soft_min_clk;
1428
1429 if (pi->caps_stable_power_state) {
1430 stable_ps_clock = limits->sclk * 75 / 100;
1431 if (clock < stable_ps_clock)
1432 clock = stable_ps_clock;
1433 }
1434
1435 if (clock != pi->sclk_dpm.soft_min_clk) {
1436 pi->sclk_dpm.soft_min_clk = clock;
1437 cz_send_msg_to_smc_with_parameter(adev,
1438 PPSMC_MSG_SetSclkSoftMin,
1439 cz_get_sclk_level(adev, clock,
1440 PPSMC_MSG_SetSclkSoftMin));
1441 }
1442
1443 if (pi->caps_stable_power_state &&
1444 pi->sclk_dpm.soft_max_clk != clock) {
1445 pi->sclk_dpm.soft_max_clk = clock;
1446 cz_send_msg_to_smc_with_parameter(adev,
1447 PPSMC_MSG_SetSclkSoftMax,
1448 cz_get_sclk_level(adev, clock,
1449 PPSMC_MSG_SetSclkSoftMax));
1450 } else {
1451 cz_send_msg_to_smc_with_parameter(adev,
1452 PPSMC_MSG_SetSclkSoftMax,
1453 cz_get_sclk_level(adev,
1454 pi->sclk_dpm.soft_max_clk,
1455 PPSMC_MSG_SetSclkSoftMax));
1456 }
1457
1458 return 0;
1459}
1460
1461static int cz_dpm_set_deep_sleep_sclk_threshold(struct amdgpu_device *adev)
1462{
1463 int ret = 0;
1464 struct cz_power_info *pi = cz_get_pi(adev);
1465
1466 if (pi->caps_sclk_ds) {
1467 cz_send_msg_to_smc_with_parameter(adev,
1468 PPSMC_MSG_SetMinDeepSleepSclk,
1469 CZ_MIN_DEEP_SLEEP_SCLK);
1470 }
1471
1472 return ret;
1473}
1474
1475/* ?? without dal support, is this still needed in setpowerstate list*/
1476static int cz_dpm_set_watermark_threshold(struct amdgpu_device *adev)
1477{
1478 int ret = 0;
1479 struct cz_power_info *pi = cz_get_pi(adev);
1480
1481 cz_send_msg_to_smc_with_parameter(adev,
1482 PPSMC_MSG_SetWatermarkFrequency,
1483 pi->sclk_dpm.soft_max_clk);
1484
1485 return ret;
1486}
1487
1488static int cz_dpm_enable_nbdpm(struct amdgpu_device *adev)
1489{
1490 int ret = 0;
1491 struct cz_power_info *pi = cz_get_pi(adev);
1492
1493 /* also depend on dal NBPStateDisableRequired */
1494 if (pi->nb_dpm_enabled_by_driver && !pi->nb_dpm_enabled) {
1495 ret = cz_send_msg_to_smc_with_parameter(adev,
1496 PPSMC_MSG_EnableAllSmuFeatures,
1497 NB_DPM_MASK);
1498 if (ret) {
1499 DRM_ERROR("amdgpu: nb dpm enable failed\n");
1500 return ret;
1501 }
1502 pi->nb_dpm_enabled = true;
1503 }
1504
1505 return ret;
1506}
1507
1508static void cz_dpm_nbdpm_lm_pstate_enable(struct amdgpu_device *adev,
1509 bool enable)
1510{
1511 if (enable)
1512 cz_send_msg_to_smc(adev, PPSMC_MSG_EnableLowMemoryPstate);
1513 else
1514 cz_send_msg_to_smc(adev, PPSMC_MSG_DisableLowMemoryPstate);
1515
1516}
1517
1518static int cz_dpm_update_low_memory_pstate(struct amdgpu_device *adev)
1519{
1520 int ret = 0;
1521 struct cz_power_info *pi = cz_get_pi(adev);
1522 struct cz_ps *ps = &pi->requested_ps;
1523
1524 if (pi->sys_info.nb_dpm_enable) {
1525 if (ps->force_high)
1526 cz_dpm_nbdpm_lm_pstate_enable(adev, true);
1527 else
1528 cz_dpm_nbdpm_lm_pstate_enable(adev, false);
1529 }
1530
1531 return ret;
1532}
1533
1534/* with dpm enabled */
1535static int cz_dpm_set_power_state(struct amdgpu_device *adev)
1536{
1537 int ret = 0;
1538
1539 cz_dpm_update_sclk_limit(adev);
1540 cz_dpm_set_deep_sleep_sclk_threshold(adev);
1541 cz_dpm_set_watermark_threshold(adev);
1542 cz_dpm_enable_nbdpm(adev);
1543 cz_dpm_update_low_memory_pstate(adev);
1544
1545 return ret;
1546}
1547
1548static void cz_dpm_post_set_power_state(struct amdgpu_device *adev)
1549{
1550 struct cz_power_info *pi = cz_get_pi(adev);
1551 struct amdgpu_ps *ps = &pi->requested_rps;
1552
1553 cz_update_current_ps(adev, ps);
1554
1555}
1556
1557static int cz_dpm_force_highest(struct amdgpu_device *adev)
1558{
1559 struct cz_power_info *pi = cz_get_pi(adev);
1560 int ret = 0;
1561
1562 if (pi->sclk_dpm.soft_min_clk != pi->sclk_dpm.soft_max_clk) {
1563 pi->sclk_dpm.soft_min_clk =
1564 pi->sclk_dpm.soft_max_clk;
1565 ret = cz_send_msg_to_smc_with_parameter(adev,
1566 PPSMC_MSG_SetSclkSoftMin,
1567 cz_get_sclk_level(adev,
1568 pi->sclk_dpm.soft_min_clk,
1569 PPSMC_MSG_SetSclkSoftMin));
1570 if (ret)
1571 return ret;
1572 }
1573
1574 return ret;
1575}
1576
1577static int cz_dpm_force_lowest(struct amdgpu_device *adev)
1578{
1579 struct cz_power_info *pi = cz_get_pi(adev);
1580 int ret = 0;
1581
1582 if (pi->sclk_dpm.soft_max_clk != pi->sclk_dpm.soft_min_clk) {
1583 pi->sclk_dpm.soft_max_clk = pi->sclk_dpm.soft_min_clk;
1584 ret = cz_send_msg_to_smc_with_parameter(adev,
1585 PPSMC_MSG_SetSclkSoftMax,
1586 cz_get_sclk_level(adev,
1587 pi->sclk_dpm.soft_max_clk,
1588 PPSMC_MSG_SetSclkSoftMax));
1589 if (ret)
1590 return ret;
1591 }
1592
1593 return ret;
1594}
1595
1596static uint32_t cz_dpm_get_max_sclk_level(struct amdgpu_device *adev)
1597{
1598 struct cz_power_info *pi = cz_get_pi(adev);
1599
1600 if (!pi->max_sclk_level) {
1601 cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxSclkLevel);
1602 pi->max_sclk_level = cz_get_argument(adev) + 1;
1603 }
1604
1605 if (pi->max_sclk_level > CZ_MAX_HARDWARE_POWERLEVELS) {
1606 DRM_ERROR("Invalid max sclk level!\n");
1607 return -EINVAL;
1608 }
1609
1610 return pi->max_sclk_level;
1611}
1612
1613static int cz_dpm_unforce_dpm_levels(struct amdgpu_device *adev)
1614{
1615 struct cz_power_info *pi = cz_get_pi(adev);
1616 struct amdgpu_clock_voltage_dependency_table *dep_table =
1617 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
1618 uint32_t level = 0;
1619 int ret = 0;
1620
1621 pi->sclk_dpm.soft_min_clk = dep_table->entries[0].clk;
1622 level = cz_dpm_get_max_sclk_level(adev) - 1;
1623 if (level < dep_table->count)
1624 pi->sclk_dpm.soft_max_clk = dep_table->entries[level].clk;
1625 else
1626 pi->sclk_dpm.soft_max_clk =
1627 dep_table->entries[dep_table->count - 1].clk;
1628
1629 /* get min/max sclk soft value
1630 * notify SMU to execute */
1631 ret = cz_send_msg_to_smc_with_parameter(adev,
1632 PPSMC_MSG_SetSclkSoftMin,
1633 cz_get_sclk_level(adev,
1634 pi->sclk_dpm.soft_min_clk,
1635 PPSMC_MSG_SetSclkSoftMin));
1636 if (ret)
1637 return ret;
1638
1639 ret = cz_send_msg_to_smc_with_parameter(adev,
1640 PPSMC_MSG_SetSclkSoftMax,
1641 cz_get_sclk_level(adev,
1642 pi->sclk_dpm.soft_max_clk,
1643 PPSMC_MSG_SetSclkSoftMax));
1644 if (ret)
1645 return ret;
1646
1647 DRM_INFO("DPM unforce state min=%d, max=%d.\n",
1648 pi->sclk_dpm.soft_min_clk,
1649 pi->sclk_dpm.soft_max_clk);
1650
1651 return 0;
1652}
1653
1654static int cz_dpm_force_dpm_level(struct amdgpu_device *adev,
1655 enum amdgpu_dpm_forced_level level)
1656{
1657 int ret = 0;
1658
1659 switch (level) {
1660 case AMDGPU_DPM_FORCED_LEVEL_HIGH:
1661 ret = cz_dpm_force_highest(adev);
1662 if (ret)
1663 return ret;
1664 break;
1665 case AMDGPU_DPM_FORCED_LEVEL_LOW:
1666 ret = cz_dpm_force_lowest(adev);
1667 if (ret)
1668 return ret;
1669 break;
1670 case AMDGPU_DPM_FORCED_LEVEL_AUTO:
1671 ret = cz_dpm_unforce_dpm_levels(adev);
1672 if (ret)
1673 return ret;
1674 break;
1675 default:
1676 break;
1677 }
1678
1679 return ret;
1680}
1681
1682/* fix me, display configuration change lists here
1683 * mostly dal related*/
1684static void cz_dpm_display_configuration_changed(struct amdgpu_device *adev)
1685{
1686}
1687
1688static uint32_t cz_dpm_get_sclk(struct amdgpu_device *adev, bool low)
1689{
1690 struct cz_power_info *pi = cz_get_pi(adev);
1691 struct cz_ps *requested_state = cz_get_ps(&pi->requested_rps);
1692
1693 if (low)
1694 return requested_state->levels[0].sclk;
1695 else
1696 return requested_state->levels[requested_state->num_levels - 1].sclk;
1697
1698}
1699
1700static uint32_t cz_dpm_get_mclk(struct amdgpu_device *adev, bool low)
1701{
1702 struct cz_power_info *pi = cz_get_pi(adev);
1703
1704 return pi->sys_info.bootup_uma_clk;
1705}
1706
Sonny Jiang564ea792015-05-12 16:13:35 -04001707static int cz_enable_uvd_dpm(struct amdgpu_device *adev, bool enable)
1708{
1709 struct cz_power_info *pi = cz_get_pi(adev);
1710 int ret = 0;
1711
1712 if (enable && pi->caps_uvd_dpm ) {
1713 pi->dpm_flags |= DPMFlags_UVD_Enabled;
1714 DRM_DEBUG("UVD DPM Enabled.\n");
1715
1716 ret = cz_send_msg_to_smc_with_parameter(adev,
1717 PPSMC_MSG_EnableAllSmuFeatures, UVD_DPM_MASK);
1718 } else {
1719 pi->dpm_flags &= ~DPMFlags_UVD_Enabled;
1720 DRM_DEBUG("UVD DPM Stopped\n");
1721
1722 ret = cz_send_msg_to_smc_with_parameter(adev,
1723 PPSMC_MSG_DisableAllSmuFeatures, UVD_DPM_MASK);
1724 }
1725
1726 return ret;
1727}
1728
1729static int cz_update_uvd_dpm(struct amdgpu_device *adev, bool gate)
1730{
1731 return cz_enable_uvd_dpm(adev, !gate);
1732}
1733
1734
1735static void cz_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate)
1736{
1737 struct cz_power_info *pi = cz_get_pi(adev);
1738 int ret;
1739
1740 if (pi->uvd_power_gated == gate)
1741 return;
1742
1743 pi->uvd_power_gated = gate;
1744
1745 if (gate) {
1746 if (pi->caps_uvd_pg) {
1747 /* disable clockgating so we can properly shut down the block */
yanyang15fc3aee2015-05-22 14:39:35 -04001748 ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
1749 AMD_CG_STATE_UNGATE);
Sonny Jiang564ea792015-05-12 16:13:35 -04001750 /* shutdown the UVD block */
yanyang15fc3aee2015-05-22 14:39:35 -04001751 ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
1752 AMD_PG_STATE_GATE);
Sonny Jiang564ea792015-05-12 16:13:35 -04001753 /* XXX: check for errors */
1754 }
1755 cz_update_uvd_dpm(adev, gate);
1756 if (pi->caps_uvd_pg)
1757 /* power off the UVD block */
1758 cz_send_msg_to_smc(adev, PPSMC_MSG_UVDPowerOFF);
1759 } else {
1760 if (pi->caps_uvd_pg) {
1761 /* power on the UVD block */
1762 if (pi->uvd_dynamic_pg)
1763 cz_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_UVDPowerON, 1);
1764 else
1765 cz_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_UVDPowerON, 0);
1766 /* re-init the UVD block */
yanyang15fc3aee2015-05-22 14:39:35 -04001767 ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
1768 AMD_PG_STATE_UNGATE);
Sonny Jiang564ea792015-05-12 16:13:35 -04001769 /* enable clockgating. hw will dynamically gate/ungate clocks on the fly */
yanyang15fc3aee2015-05-22 14:39:35 -04001770 ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
1771 AMD_CG_STATE_GATE);
Sonny Jiang564ea792015-05-12 16:13:35 -04001772 /* XXX: check for errors */
1773 }
1774 cz_update_uvd_dpm(adev, gate);
1775 }
1776}
1777
yanyang15fc3aee2015-05-22 14:39:35 -04001778const struct amd_ip_funcs cz_dpm_ip_funcs = {
Alex Deucheraaa36a92015-04-20 17:31:14 -04001779 .early_init = cz_dpm_early_init,
Sonny Jiang564ea792015-05-12 16:13:35 -04001780 .late_init = cz_dpm_late_init,
Alex Deucheraaa36a92015-04-20 17:31:14 -04001781 .sw_init = cz_dpm_sw_init,
1782 .sw_fini = cz_dpm_sw_fini,
1783 .hw_init = cz_dpm_hw_init,
1784 .hw_fini = cz_dpm_hw_fini,
1785 .suspend = cz_dpm_suspend,
1786 .resume = cz_dpm_resume,
1787 .is_idle = NULL,
1788 .wait_for_idle = NULL,
1789 .soft_reset = NULL,
1790 .print_status = NULL,
1791 .set_clockgating_state = cz_dpm_set_clockgating_state,
1792 .set_powergating_state = cz_dpm_set_powergating_state,
1793};
1794
1795static const struct amdgpu_dpm_funcs cz_dpm_funcs = {
1796 .get_temperature = cz_dpm_get_temperature,
1797 .pre_set_power_state = cz_dpm_pre_set_power_state,
1798 .set_power_state = cz_dpm_set_power_state,
1799 .post_set_power_state = cz_dpm_post_set_power_state,
1800 .display_configuration_changed = cz_dpm_display_configuration_changed,
1801 .get_sclk = cz_dpm_get_sclk,
1802 .get_mclk = cz_dpm_get_mclk,
1803 .print_power_state = cz_dpm_print_power_state,
1804 .debugfs_print_current_performance_level =
1805 cz_dpm_debugfs_print_current_performance_level,
1806 .force_performance_level = cz_dpm_force_dpm_level,
1807 .vblank_too_short = NULL,
Sonny Jiang564ea792015-05-12 16:13:35 -04001808 .powergate_uvd = cz_dpm_powergate_uvd,
Alex Deucheraaa36a92015-04-20 17:31:14 -04001809};
1810
1811static void cz_dpm_set_funcs(struct amdgpu_device *adev)
1812{
1813 if (NULL == adev->pm.funcs)
1814 adev->pm.funcs = &cz_dpm_funcs;
1815}