blob: 77ad59ade85ca79b56d352608e96fde4395ece99 [file] [log] [blame]
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001/*
2 * Copyright 2011 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
24
Masahiro Yamada248a1d62017-04-24 13:50:21 +090025#include <drm/drmP.h>
Alex Deucherd38ceaf2015-04-20 16:55:21 -040026#include "amdgpu.h"
27#include "amdgpu_atombios.h"
28#include "amdgpu_i2c.h"
29#include "amdgpu_dpm.h"
30#include "atom.h"
31
32void amdgpu_dpm_print_class_info(u32 class, u32 class2)
33{
Joe Percheseeca2322017-02-27 17:31:03 -080034 const char *s;
35
Alex Deucherd38ceaf2015-04-20 16:55:21 -040036 switch (class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
37 case ATOM_PPLIB_CLASSIFICATION_UI_NONE:
38 default:
Joe Percheseeca2322017-02-27 17:31:03 -080039 s = "none";
Alex Deucherd38ceaf2015-04-20 16:55:21 -040040 break;
41 case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
Joe Percheseeca2322017-02-27 17:31:03 -080042 s = "battery";
Alex Deucherd38ceaf2015-04-20 16:55:21 -040043 break;
44 case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED:
Joe Percheseeca2322017-02-27 17:31:03 -080045 s = "balanced";
Alex Deucherd38ceaf2015-04-20 16:55:21 -040046 break;
47 case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
Joe Percheseeca2322017-02-27 17:31:03 -080048 s = "performance";
Alex Deucherd38ceaf2015-04-20 16:55:21 -040049 break;
50 }
Joe Percheseeca2322017-02-27 17:31:03 -080051 printk("\tui class: %s\n", s);
52 printk("\tinternal class:");
Alex Deucherd38ceaf2015-04-20 16:55:21 -040053 if (((class & ~ATOM_PPLIB_CLASSIFICATION_UI_MASK) == 0) &&
54 (class2 == 0))
Joe Percheseeca2322017-02-27 17:31:03 -080055 pr_cont(" none");
Alex Deucherd38ceaf2015-04-20 16:55:21 -040056 else {
57 if (class & ATOM_PPLIB_CLASSIFICATION_BOOT)
Joe Percheseeca2322017-02-27 17:31:03 -080058 pr_cont(" boot");
Alex Deucherd38ceaf2015-04-20 16:55:21 -040059 if (class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
Joe Percheseeca2322017-02-27 17:31:03 -080060 pr_cont(" thermal");
Alex Deucherd38ceaf2015-04-20 16:55:21 -040061 if (class & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE)
Joe Percheseeca2322017-02-27 17:31:03 -080062 pr_cont(" limited_pwr");
Alex Deucherd38ceaf2015-04-20 16:55:21 -040063 if (class & ATOM_PPLIB_CLASSIFICATION_REST)
Joe Percheseeca2322017-02-27 17:31:03 -080064 pr_cont(" rest");
Alex Deucherd38ceaf2015-04-20 16:55:21 -040065 if (class & ATOM_PPLIB_CLASSIFICATION_FORCED)
Joe Percheseeca2322017-02-27 17:31:03 -080066 pr_cont(" forced");
Alex Deucherd38ceaf2015-04-20 16:55:21 -040067 if (class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
Joe Percheseeca2322017-02-27 17:31:03 -080068 pr_cont(" 3d_perf");
Alex Deucherd38ceaf2015-04-20 16:55:21 -040069 if (class & ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE)
Joe Percheseeca2322017-02-27 17:31:03 -080070 pr_cont(" ovrdrv");
Alex Deucherd38ceaf2015-04-20 16:55:21 -040071 if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
Joe Percheseeca2322017-02-27 17:31:03 -080072 pr_cont(" uvd");
Alex Deucherd38ceaf2015-04-20 16:55:21 -040073 if (class & ATOM_PPLIB_CLASSIFICATION_3DLOW)
Joe Percheseeca2322017-02-27 17:31:03 -080074 pr_cont(" 3d_low");
Alex Deucherd38ceaf2015-04-20 16:55:21 -040075 if (class & ATOM_PPLIB_CLASSIFICATION_ACPI)
Joe Percheseeca2322017-02-27 17:31:03 -080076 pr_cont(" acpi");
Alex Deucherd38ceaf2015-04-20 16:55:21 -040077 if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
Joe Percheseeca2322017-02-27 17:31:03 -080078 pr_cont(" uvd_hd2");
Alex Deucherd38ceaf2015-04-20 16:55:21 -040079 if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
Joe Percheseeca2322017-02-27 17:31:03 -080080 pr_cont(" uvd_hd");
Alex Deucherd38ceaf2015-04-20 16:55:21 -040081 if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
Joe Percheseeca2322017-02-27 17:31:03 -080082 pr_cont(" uvd_sd");
Alex Deucherd38ceaf2015-04-20 16:55:21 -040083 if (class2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2)
Joe Percheseeca2322017-02-27 17:31:03 -080084 pr_cont(" limited_pwr2");
Alex Deucherd38ceaf2015-04-20 16:55:21 -040085 if (class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
Joe Percheseeca2322017-02-27 17:31:03 -080086 pr_cont(" ulv");
Alex Deucherd38ceaf2015-04-20 16:55:21 -040087 if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
Joe Percheseeca2322017-02-27 17:31:03 -080088 pr_cont(" uvd_mvc");
Alex Deucherd38ceaf2015-04-20 16:55:21 -040089 }
Joe Percheseeca2322017-02-27 17:31:03 -080090 pr_cont("\n");
Alex Deucherd38ceaf2015-04-20 16:55:21 -040091}
92
93void amdgpu_dpm_print_cap_info(u32 caps)
94{
Joe Percheseeca2322017-02-27 17:31:03 -080095 printk("\tcaps:");
Alex Deucherd38ceaf2015-04-20 16:55:21 -040096 if (caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY)
Joe Percheseeca2322017-02-27 17:31:03 -080097 pr_cont(" single_disp");
Alex Deucherd38ceaf2015-04-20 16:55:21 -040098 if (caps & ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK)
Joe Percheseeca2322017-02-27 17:31:03 -080099 pr_cont(" video");
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400100 if (caps & ATOM_PPLIB_DISALLOW_ON_DC)
Joe Percheseeca2322017-02-27 17:31:03 -0800101 pr_cont(" no_dc");
102 pr_cont("\n");
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400103}
104
105void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev,
106 struct amdgpu_ps *rps)
107{
Joe Percheseeca2322017-02-27 17:31:03 -0800108 printk("\tstatus:");
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400109 if (rps == adev->pm.dpm.current_ps)
Joe Percheseeca2322017-02-27 17:31:03 -0800110 pr_cont(" c");
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400111 if (rps == adev->pm.dpm.requested_ps)
Joe Percheseeca2322017-02-27 17:31:03 -0800112 pr_cont(" r");
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400113 if (rps == adev->pm.dpm.boot_ps)
Joe Percheseeca2322017-02-27 17:31:03 -0800114 pr_cont(" b");
115 pr_cont("\n");
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400116}
117
Rex Zhud91ea492018-03-26 22:08:29 +0800118void amdgpu_dpm_get_active_displays(struct amdgpu_device *adev)
119{
120 struct drm_device *ddev = adev->ddev;
121 struct drm_crtc *crtc;
122 struct amdgpu_crtc *amdgpu_crtc;
123
124 adev->pm.dpm.new_active_crtcs = 0;
125 adev->pm.dpm.new_active_crtc_count = 0;
126 if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
127 list_for_each_entry(crtc,
128 &ddev->mode_config.crtc_list, head) {
129 amdgpu_crtc = to_amdgpu_crtc(crtc);
130 if (amdgpu_crtc->enabled) {
131 adev->pm.dpm.new_active_crtcs |= (1 << amdgpu_crtc->crtc_id);
132 adev->pm.dpm.new_active_crtc_count++;
133 }
134 }
135 }
136}
137
Rex Zhudc8184aa2016-10-10 15:57:21 +0800138
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400139u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev)
140{
141 struct drm_device *dev = adev->ddev;
142 struct drm_crtc *crtc;
143 struct amdgpu_crtc *amdgpu_crtc;
Rex Zhudc8184aa2016-10-10 15:57:21 +0800144 u32 vblank_in_pixels;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400145 u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */
146
147 if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
148 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
149 amdgpu_crtc = to_amdgpu_crtc(crtc);
150 if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
Rex Zhudc8184aa2016-10-10 15:57:21 +0800151 vblank_in_pixels =
152 amdgpu_crtc->hw_mode.crtc_htotal *
153 (amdgpu_crtc->hw_mode.crtc_vblank_end -
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400154 amdgpu_crtc->hw_mode.crtc_vdisplay +
Rex Zhudc8184aa2016-10-10 15:57:21 +0800155 (amdgpu_crtc->v_border * 2));
156
157 vblank_time_us = vblank_in_pixels * 1000 / amdgpu_crtc->hw_mode.clock;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400158 break;
159 }
160 }
161 }
162
163 return vblank_time_us;
164}
165
166u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev)
167{
168 struct drm_device *dev = adev->ddev;
169 struct drm_crtc *crtc;
170 struct amdgpu_crtc *amdgpu_crtc;
171 u32 vrefresh = 0;
172
173 if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
174 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
175 amdgpu_crtc = to_amdgpu_crtc(crtc);
176 if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
Alex Deucher6b8812e2016-05-02 10:24:41 -0400177 vrefresh = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400178 break;
179 }
180 }
181 }
182
183 return vrefresh;
184}
185
186void amdgpu_calculate_u_and_p(u32 i, u32 r_c, u32 p_b,
187 u32 *p, u32 *u)
188{
189 u32 b_c = 0;
190 u32 i_c;
191 u32 tmp;
192
193 i_c = (i * r_c) / 100;
194 tmp = i_c >> p_b;
195
196 while (tmp) {
197 b_c++;
198 tmp >>= 1;
199 }
200
201 *u = (b_c + 1) / 2;
202 *p = i_c / (1 << (2 * (*u)));
203}
204
205int amdgpu_calculate_at(u32 t, u32 h, u32 fh, u32 fl, u32 *tl, u32 *th)
206{
207 u32 k, a, ah, al;
208 u32 t1;
209
210 if ((fl == 0) || (fh == 0) || (fl > fh))
211 return -EINVAL;
212
213 k = (100 * fh) / fl;
214 t1 = (t * (k - 100));
215 a = (1000 * (100 * h + t1)) / (10000 + (t1 / 100));
216 a = (a + 5) / 10;
217 ah = ((a * t) + 5000) / 10000;
218 al = a - ah;
219
220 *th = t - ah;
221 *tl = t + al;
222
223 return 0;
224}
225
226bool amdgpu_is_uvd_state(u32 class, u32 class2)
227{
228 if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
229 return true;
230 if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
231 return true;
232 if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
233 return true;
234 if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
235 return true;
236 if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
237 return true;
238 return false;
239}
240
241bool amdgpu_is_internal_thermal_sensor(enum amdgpu_int_thermal_type sensor)
242{
243 switch (sensor) {
244 case THERMAL_TYPE_RV6XX:
245 case THERMAL_TYPE_RV770:
246 case THERMAL_TYPE_EVERGREEN:
247 case THERMAL_TYPE_SUMO:
248 case THERMAL_TYPE_NI:
249 case THERMAL_TYPE_SI:
250 case THERMAL_TYPE_CI:
251 case THERMAL_TYPE_KV:
252 return true;
253 case THERMAL_TYPE_ADT7473_WITH_INTERNAL:
254 case THERMAL_TYPE_EMC2103_WITH_INTERNAL:
255 return false; /* need special handling */
256 case THERMAL_TYPE_NONE:
257 case THERMAL_TYPE_EXTERNAL:
258 case THERMAL_TYPE_EXTERNAL_GPIO:
259 default:
260 return false;
261 }
262}
263
264union power_info {
265 struct _ATOM_POWERPLAY_INFO info;
266 struct _ATOM_POWERPLAY_INFO_V2 info_2;
267 struct _ATOM_POWERPLAY_INFO_V3 info_3;
268 struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
269 struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
270 struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
271 struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4;
272 struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5;
273};
274
275union fan_info {
276 struct _ATOM_PPLIB_FANTABLE fan;
277 struct _ATOM_PPLIB_FANTABLE2 fan2;
278 struct _ATOM_PPLIB_FANTABLE3 fan3;
279};
280
281static int amdgpu_parse_clk_voltage_dep_table(struct amdgpu_clock_voltage_dependency_table *amdgpu_table,
282 ATOM_PPLIB_Clock_Voltage_Dependency_Table *atom_table)
283{
284 u32 size = atom_table->ucNumEntries *
285 sizeof(struct amdgpu_clock_voltage_dependency_entry);
286 int i;
287 ATOM_PPLIB_Clock_Voltage_Dependency_Record *entry;
288
289 amdgpu_table->entries = kzalloc(size, GFP_KERNEL);
290 if (!amdgpu_table->entries)
291 return -ENOMEM;
292
293 entry = &atom_table->entries[0];
294 for (i = 0; i < atom_table->ucNumEntries; i++) {
295 amdgpu_table->entries[i].clk = le16_to_cpu(entry->usClockLow) |
296 (entry->ucClockHigh << 16);
297 amdgpu_table->entries[i].v = le16_to_cpu(entry->usVoltage);
298 entry = (ATOM_PPLIB_Clock_Voltage_Dependency_Record *)
299 ((u8 *)entry + sizeof(ATOM_PPLIB_Clock_Voltage_Dependency_Record));
300 }
301 amdgpu_table->count = atom_table->ucNumEntries;
302
303 return 0;
304}
305
306int amdgpu_get_platform_caps(struct amdgpu_device *adev)
307{
308 struct amdgpu_mode_info *mode_info = &adev->mode_info;
309 union power_info *power_info;
310 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
311 u16 data_offset;
312 u8 frev, crev;
313
314 if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
315 &frev, &crev, &data_offset))
316 return -EINVAL;
317 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
318
319 adev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
320 adev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
321 adev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
322
323 return 0;
324}
325
326/* sizeof(ATOM_PPLIB_EXTENDEDHEADER) */
327#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12
328#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14
329#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4 16
330#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5 18
331#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6 20
332#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7 22
333#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8 24
334#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V9 26
335
336int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
337{
338 struct amdgpu_mode_info *mode_info = &adev->mode_info;
339 union power_info *power_info;
340 union fan_info *fan_info;
341 ATOM_PPLIB_Clock_Voltage_Dependency_Table *dep_table;
342 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
343 u16 data_offset;
344 u8 frev, crev;
345 int ret, i;
346
347 if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
348 &frev, &crev, &data_offset))
349 return -EINVAL;
350 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
351
352 /* fan table */
353 if (le16_to_cpu(power_info->pplib.usTableSize) >=
354 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
355 if (power_info->pplib3.usFanTableOffset) {
356 fan_info = (union fan_info *)(mode_info->atom_context->bios + data_offset +
357 le16_to_cpu(power_info->pplib3.usFanTableOffset));
358 adev->pm.dpm.fan.t_hyst = fan_info->fan.ucTHyst;
359 adev->pm.dpm.fan.t_min = le16_to_cpu(fan_info->fan.usTMin);
360 adev->pm.dpm.fan.t_med = le16_to_cpu(fan_info->fan.usTMed);
361 adev->pm.dpm.fan.t_high = le16_to_cpu(fan_info->fan.usTHigh);
362 adev->pm.dpm.fan.pwm_min = le16_to_cpu(fan_info->fan.usPWMMin);
363 adev->pm.dpm.fan.pwm_med = le16_to_cpu(fan_info->fan.usPWMMed);
364 adev->pm.dpm.fan.pwm_high = le16_to_cpu(fan_info->fan.usPWMHigh);
365 if (fan_info->fan.ucFanTableFormat >= 2)
366 adev->pm.dpm.fan.t_max = le16_to_cpu(fan_info->fan2.usTMax);
367 else
368 adev->pm.dpm.fan.t_max = 10900;
369 adev->pm.dpm.fan.cycle_delay = 100000;
370 if (fan_info->fan.ucFanTableFormat >= 3) {
371 adev->pm.dpm.fan.control_mode = fan_info->fan3.ucFanControlMode;
372 adev->pm.dpm.fan.default_max_fan_pwm =
373 le16_to_cpu(fan_info->fan3.usFanPWMMax);
374 adev->pm.dpm.fan.default_fan_output_sensitivity = 4836;
375 adev->pm.dpm.fan.fan_output_sensitivity =
376 le16_to_cpu(fan_info->fan3.usFanOutputSensitivity);
377 }
378 adev->pm.dpm.fan.ucode_fan_control = true;
379 }
380 }
381
382 /* clock dependancy tables, shedding tables */
383 if (le16_to_cpu(power_info->pplib.usTableSize) >=
384 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE4)) {
385 if (power_info->pplib4.usVddcDependencyOnSCLKOffset) {
386 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
387 (mode_info->atom_context->bios + data_offset +
388 le16_to_cpu(power_info->pplib4.usVddcDependencyOnSCLKOffset));
389 ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
390 dep_table);
391 if (ret) {
392 amdgpu_free_extended_power_table(adev);
393 return ret;
394 }
395 }
396 if (power_info->pplib4.usVddciDependencyOnMCLKOffset) {
397 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
398 (mode_info->atom_context->bios + data_offset +
399 le16_to_cpu(power_info->pplib4.usVddciDependencyOnMCLKOffset));
400 ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
401 dep_table);
402 if (ret) {
403 amdgpu_free_extended_power_table(adev);
404 return ret;
405 }
406 }
407 if (power_info->pplib4.usVddcDependencyOnMCLKOffset) {
408 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
409 (mode_info->atom_context->bios + data_offset +
410 le16_to_cpu(power_info->pplib4.usVddcDependencyOnMCLKOffset));
411 ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
412 dep_table);
413 if (ret) {
414 amdgpu_free_extended_power_table(adev);
415 return ret;
416 }
417 }
418 if (power_info->pplib4.usMvddDependencyOnMCLKOffset) {
419 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
420 (mode_info->atom_context->bios + data_offset +
421 le16_to_cpu(power_info->pplib4.usMvddDependencyOnMCLKOffset));
422 ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
423 dep_table);
424 if (ret) {
425 amdgpu_free_extended_power_table(adev);
426 return ret;
427 }
428 }
429 if (power_info->pplib4.usMaxClockVoltageOnDCOffset) {
430 ATOM_PPLIB_Clock_Voltage_Limit_Table *clk_v =
431 (ATOM_PPLIB_Clock_Voltage_Limit_Table *)
432 (mode_info->atom_context->bios + data_offset +
433 le16_to_cpu(power_info->pplib4.usMaxClockVoltageOnDCOffset));
434 if (clk_v->ucNumEntries) {
435 adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk =
436 le16_to_cpu(clk_v->entries[0].usSclkLow) |
437 (clk_v->entries[0].ucSclkHigh << 16);
438 adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk =
439 le16_to_cpu(clk_v->entries[0].usMclkLow) |
440 (clk_v->entries[0].ucMclkHigh << 16);
441 adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc =
442 le16_to_cpu(clk_v->entries[0].usVddc);
443 adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddci =
444 le16_to_cpu(clk_v->entries[0].usVddci);
445 }
446 }
447 if (power_info->pplib4.usVddcPhaseShedLimitsTableOffset) {
448 ATOM_PPLIB_PhaseSheddingLimits_Table *psl =
449 (ATOM_PPLIB_PhaseSheddingLimits_Table *)
450 (mode_info->atom_context->bios + data_offset +
451 le16_to_cpu(power_info->pplib4.usVddcPhaseShedLimitsTableOffset));
452 ATOM_PPLIB_PhaseSheddingLimits_Record *entry;
453
454 adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries =
Kees Cook6396bb22018-06-12 14:03:40 -0700455 kcalloc(psl->ucNumEntries,
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400456 sizeof(struct amdgpu_phase_shedding_limits_entry),
457 GFP_KERNEL);
458 if (!adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) {
459 amdgpu_free_extended_power_table(adev);
460 return -ENOMEM;
461 }
462
463 entry = &psl->entries[0];
464 for (i = 0; i < psl->ucNumEntries; i++) {
465 adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].sclk =
466 le16_to_cpu(entry->usSclkLow) | (entry->ucSclkHigh << 16);
467 adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].mclk =
468 le16_to_cpu(entry->usMclkLow) | (entry->ucMclkHigh << 16);
469 adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].voltage =
470 le16_to_cpu(entry->usVoltage);
471 entry = (ATOM_PPLIB_PhaseSheddingLimits_Record *)
472 ((u8 *)entry + sizeof(ATOM_PPLIB_PhaseSheddingLimits_Record));
473 }
474 adev->pm.dpm.dyn_state.phase_shedding_limits_table.count =
475 psl->ucNumEntries;
476 }
477 }
478
479 /* cac data */
480 if (le16_to_cpu(power_info->pplib.usTableSize) >=
481 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE5)) {
482 adev->pm.dpm.tdp_limit = le32_to_cpu(power_info->pplib5.ulTDPLimit);
483 adev->pm.dpm.near_tdp_limit = le32_to_cpu(power_info->pplib5.ulNearTDPLimit);
484 adev->pm.dpm.near_tdp_limit_adjusted = adev->pm.dpm.near_tdp_limit;
485 adev->pm.dpm.tdp_od_limit = le16_to_cpu(power_info->pplib5.usTDPODLimit);
486 if (adev->pm.dpm.tdp_od_limit)
487 adev->pm.dpm.power_control = true;
488 else
489 adev->pm.dpm.power_control = false;
490 adev->pm.dpm.tdp_adjustment = 0;
491 adev->pm.dpm.sq_ramping_threshold = le32_to_cpu(power_info->pplib5.ulSQRampingThreshold);
492 adev->pm.dpm.cac_leakage = le32_to_cpu(power_info->pplib5.ulCACLeakage);
493 adev->pm.dpm.load_line_slope = le16_to_cpu(power_info->pplib5.usLoadLineSlope);
494 if (power_info->pplib5.usCACLeakageTableOffset) {
495 ATOM_PPLIB_CAC_Leakage_Table *cac_table =
496 (ATOM_PPLIB_CAC_Leakage_Table *)
497 (mode_info->atom_context->bios + data_offset +
498 le16_to_cpu(power_info->pplib5.usCACLeakageTableOffset));
499 ATOM_PPLIB_CAC_Leakage_Record *entry;
500 u32 size = cac_table->ucNumEntries * sizeof(struct amdgpu_cac_leakage_table);
501 adev->pm.dpm.dyn_state.cac_leakage_table.entries = kzalloc(size, GFP_KERNEL);
502 if (!adev->pm.dpm.dyn_state.cac_leakage_table.entries) {
503 amdgpu_free_extended_power_table(adev);
504 return -ENOMEM;
505 }
506 entry = &cac_table->entries[0];
507 for (i = 0; i < cac_table->ucNumEntries; i++) {
508 if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
509 adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1 =
510 le16_to_cpu(entry->usVddc1);
511 adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2 =
512 le16_to_cpu(entry->usVddc2);
513 adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3 =
514 le16_to_cpu(entry->usVddc3);
515 } else {
516 adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc =
517 le16_to_cpu(entry->usVddc);
518 adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage =
519 le32_to_cpu(entry->ulLeakageValue);
520 }
521 entry = (ATOM_PPLIB_CAC_Leakage_Record *)
522 ((u8 *)entry + sizeof(ATOM_PPLIB_CAC_Leakage_Record));
523 }
524 adev->pm.dpm.dyn_state.cac_leakage_table.count = cac_table->ucNumEntries;
525 }
526 }
527
528 /* ext tables */
529 if (le16_to_cpu(power_info->pplib.usTableSize) >=
530 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
531 ATOM_PPLIB_EXTENDEDHEADER *ext_hdr = (ATOM_PPLIB_EXTENDEDHEADER *)
532 (mode_info->atom_context->bios + data_offset +
533 le16_to_cpu(power_info->pplib3.usExtendendedHeaderOffset));
534 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2) &&
535 ext_hdr->usVCETableOffset) {
536 VCEClockInfoArray *array = (VCEClockInfoArray *)
537 (mode_info->atom_context->bios + data_offset +
538 le16_to_cpu(ext_hdr->usVCETableOffset) + 1);
539 ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *limits =
540 (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *)
541 (mode_info->atom_context->bios + data_offset +
542 le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
543 1 + array->ucNumEntries * sizeof(VCEClockInfo));
544 ATOM_PPLIB_VCE_State_Table *states =
545 (ATOM_PPLIB_VCE_State_Table *)
546 (mode_info->atom_context->bios + data_offset +
547 le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
548 1 + (array->ucNumEntries * sizeof (VCEClockInfo)) +
549 1 + (limits->numEntries * sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record)));
550 ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *entry;
551 ATOM_PPLIB_VCE_State_Record *state_entry;
552 VCEClockInfo *vce_clk;
553 u32 size = limits->numEntries *
554 sizeof(struct amdgpu_vce_clock_voltage_dependency_entry);
555 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries =
556 kzalloc(size, GFP_KERNEL);
557 if (!adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries) {
558 amdgpu_free_extended_power_table(adev);
559 return -ENOMEM;
560 }
561 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count =
562 limits->numEntries;
563 entry = &limits->entries[0];
564 state_entry = &states->entries[0];
565 for (i = 0; i < limits->numEntries; i++) {
566 vce_clk = (VCEClockInfo *)
567 ((u8 *)&array->entries[0] +
568 (entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
569 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].evclk =
570 le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
571 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].ecclk =
572 le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
573 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v =
574 le16_to_cpu(entry->usVoltage);
575 entry = (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *)
576 ((u8 *)entry + sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record));
577 }
Rex Zhu66ba1af2016-10-12 15:38:56 +0800578 adev->pm.dpm.num_of_vce_states =
579 states->numEntries > AMD_MAX_VCE_LEVELS ?
580 AMD_MAX_VCE_LEVELS : states->numEntries;
581 for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400582 vce_clk = (VCEClockInfo *)
583 ((u8 *)&array->entries[0] +
584 (state_entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
585 adev->pm.dpm.vce_states[i].evclk =
586 le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
587 adev->pm.dpm.vce_states[i].ecclk =
588 le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
589 adev->pm.dpm.vce_states[i].clk_idx =
590 state_entry->ucClockInfoIndex & 0x3f;
591 adev->pm.dpm.vce_states[i].pstate =
592 (state_entry->ucClockInfoIndex & 0xc0) >> 6;
593 state_entry = (ATOM_PPLIB_VCE_State_Record *)
594 ((u8 *)state_entry + sizeof(ATOM_PPLIB_VCE_State_Record));
595 }
596 }
597 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3) &&
598 ext_hdr->usUVDTableOffset) {
599 UVDClockInfoArray *array = (UVDClockInfoArray *)
600 (mode_info->atom_context->bios + data_offset +
601 le16_to_cpu(ext_hdr->usUVDTableOffset) + 1);
602 ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *limits =
603 (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *)
604 (mode_info->atom_context->bios + data_offset +
605 le16_to_cpu(ext_hdr->usUVDTableOffset) + 1 +
606 1 + (array->ucNumEntries * sizeof (UVDClockInfo)));
607 ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *entry;
608 u32 size = limits->numEntries *
609 sizeof(struct amdgpu_uvd_clock_voltage_dependency_entry);
610 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries =
611 kzalloc(size, GFP_KERNEL);
612 if (!adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries) {
613 amdgpu_free_extended_power_table(adev);
614 return -ENOMEM;
615 }
616 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count =
617 limits->numEntries;
618 entry = &limits->entries[0];
619 for (i = 0; i < limits->numEntries; i++) {
620 UVDClockInfo *uvd_clk = (UVDClockInfo *)
621 ((u8 *)&array->entries[0] +
622 (entry->ucUVDClockInfoIndex * sizeof(UVDClockInfo)));
623 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].vclk =
624 le16_to_cpu(uvd_clk->usVClkLow) | (uvd_clk->ucVClkHigh << 16);
625 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].dclk =
626 le16_to_cpu(uvd_clk->usDClkLow) | (uvd_clk->ucDClkHigh << 16);
627 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v =
628 le16_to_cpu(entry->usVoltage);
629 entry = (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *)
630 ((u8 *)entry + sizeof(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record));
631 }
632 }
633 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4) &&
634 ext_hdr->usSAMUTableOffset) {
635 ATOM_PPLIB_SAMClk_Voltage_Limit_Table *limits =
636 (ATOM_PPLIB_SAMClk_Voltage_Limit_Table *)
637 (mode_info->atom_context->bios + data_offset +
638 le16_to_cpu(ext_hdr->usSAMUTableOffset) + 1);
639 ATOM_PPLIB_SAMClk_Voltage_Limit_Record *entry;
640 u32 size = limits->numEntries *
641 sizeof(struct amdgpu_clock_voltage_dependency_entry);
642 adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries =
643 kzalloc(size, GFP_KERNEL);
644 if (!adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries) {
645 amdgpu_free_extended_power_table(adev);
646 return -ENOMEM;
647 }
648 adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count =
649 limits->numEntries;
650 entry = &limits->entries[0];
651 for (i = 0; i < limits->numEntries; i++) {
652 adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].clk =
653 le16_to_cpu(entry->usSAMClockLow) | (entry->ucSAMClockHigh << 16);
654 adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v =
655 le16_to_cpu(entry->usVoltage);
656 entry = (ATOM_PPLIB_SAMClk_Voltage_Limit_Record *)
657 ((u8 *)entry + sizeof(ATOM_PPLIB_SAMClk_Voltage_Limit_Record));
658 }
659 }
660 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5) &&
661 ext_hdr->usPPMTableOffset) {
662 ATOM_PPLIB_PPM_Table *ppm = (ATOM_PPLIB_PPM_Table *)
663 (mode_info->atom_context->bios + data_offset +
664 le16_to_cpu(ext_hdr->usPPMTableOffset));
665 adev->pm.dpm.dyn_state.ppm_table =
666 kzalloc(sizeof(struct amdgpu_ppm_table), GFP_KERNEL);
667 if (!adev->pm.dpm.dyn_state.ppm_table) {
668 amdgpu_free_extended_power_table(adev);
669 return -ENOMEM;
670 }
671 adev->pm.dpm.dyn_state.ppm_table->ppm_design = ppm->ucPpmDesign;
672 adev->pm.dpm.dyn_state.ppm_table->cpu_core_number =
673 le16_to_cpu(ppm->usCpuCoreNumber);
674 adev->pm.dpm.dyn_state.ppm_table->platform_tdp =
675 le32_to_cpu(ppm->ulPlatformTDP);
676 adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdp =
677 le32_to_cpu(ppm->ulSmallACPlatformTDP);
678 adev->pm.dpm.dyn_state.ppm_table->platform_tdc =
679 le32_to_cpu(ppm->ulPlatformTDC);
680 adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdc =
681 le32_to_cpu(ppm->ulSmallACPlatformTDC);
682 adev->pm.dpm.dyn_state.ppm_table->apu_tdp =
683 le32_to_cpu(ppm->ulApuTDP);
684 adev->pm.dpm.dyn_state.ppm_table->dgpu_tdp =
685 le32_to_cpu(ppm->ulDGpuTDP);
686 adev->pm.dpm.dyn_state.ppm_table->dgpu_ulv_power =
687 le32_to_cpu(ppm->ulDGpuUlvPower);
688 adev->pm.dpm.dyn_state.ppm_table->tj_max =
689 le32_to_cpu(ppm->ulTjmax);
690 }
691 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6) &&
692 ext_hdr->usACPTableOffset) {
693 ATOM_PPLIB_ACPClk_Voltage_Limit_Table *limits =
694 (ATOM_PPLIB_ACPClk_Voltage_Limit_Table *)
695 (mode_info->atom_context->bios + data_offset +
696 le16_to_cpu(ext_hdr->usACPTableOffset) + 1);
697 ATOM_PPLIB_ACPClk_Voltage_Limit_Record *entry;
698 u32 size = limits->numEntries *
699 sizeof(struct amdgpu_clock_voltage_dependency_entry);
700 adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries =
701 kzalloc(size, GFP_KERNEL);
702 if (!adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries) {
703 amdgpu_free_extended_power_table(adev);
704 return -ENOMEM;
705 }
706 adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count =
707 limits->numEntries;
708 entry = &limits->entries[0];
709 for (i = 0; i < limits->numEntries; i++) {
710 adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].clk =
711 le16_to_cpu(entry->usACPClockLow) | (entry->ucACPClockHigh << 16);
712 adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v =
713 le16_to_cpu(entry->usVoltage);
714 entry = (ATOM_PPLIB_ACPClk_Voltage_Limit_Record *)
715 ((u8 *)entry + sizeof(ATOM_PPLIB_ACPClk_Voltage_Limit_Record));
716 }
717 }
718 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7) &&
719 ext_hdr->usPowerTuneTableOffset) {
720 u8 rev = *(u8 *)(mode_info->atom_context->bios + data_offset +
721 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
722 ATOM_PowerTune_Table *pt;
723 adev->pm.dpm.dyn_state.cac_tdp_table =
724 kzalloc(sizeof(struct amdgpu_cac_tdp_table), GFP_KERNEL);
725 if (!adev->pm.dpm.dyn_state.cac_tdp_table) {
726 amdgpu_free_extended_power_table(adev);
727 return -ENOMEM;
728 }
729 if (rev > 0) {
730 ATOM_PPLIB_POWERTUNE_Table_V1 *ppt = (ATOM_PPLIB_POWERTUNE_Table_V1 *)
731 (mode_info->atom_context->bios + data_offset +
732 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
733 adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit =
734 ppt->usMaximumPowerDeliveryLimit;
735 pt = &ppt->power_tune_table;
736 } else {
737 ATOM_PPLIB_POWERTUNE_Table *ppt = (ATOM_PPLIB_POWERTUNE_Table *)
738 (mode_info->atom_context->bios + data_offset +
739 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
740 adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit = 255;
741 pt = &ppt->power_tune_table;
742 }
743 adev->pm.dpm.dyn_state.cac_tdp_table->tdp = le16_to_cpu(pt->usTDP);
744 adev->pm.dpm.dyn_state.cac_tdp_table->configurable_tdp =
745 le16_to_cpu(pt->usConfigurableTDP);
746 adev->pm.dpm.dyn_state.cac_tdp_table->tdc = le16_to_cpu(pt->usTDC);
747 adev->pm.dpm.dyn_state.cac_tdp_table->battery_power_limit =
748 le16_to_cpu(pt->usBatteryPowerLimit);
749 adev->pm.dpm.dyn_state.cac_tdp_table->small_power_limit =
750 le16_to_cpu(pt->usSmallPowerLimit);
751 adev->pm.dpm.dyn_state.cac_tdp_table->low_cac_leakage =
752 le16_to_cpu(pt->usLowCACLeakage);
753 adev->pm.dpm.dyn_state.cac_tdp_table->high_cac_leakage =
754 le16_to_cpu(pt->usHighCACLeakage);
755 }
756 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8) &&
757 ext_hdr->usSclkVddgfxTableOffset) {
758 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
759 (mode_info->atom_context->bios + data_offset +
760 le16_to_cpu(ext_hdr->usSclkVddgfxTableOffset));
761 ret = amdgpu_parse_clk_voltage_dep_table(
762 &adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk,
763 dep_table);
764 if (ret) {
765 kfree(adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk.entries);
766 return ret;
767 }
768 }
769 }
770
771 return 0;
772}
773
774void amdgpu_free_extended_power_table(struct amdgpu_device *adev)
775{
776 struct amdgpu_dpm_dynamic_state *dyn_state = &adev->pm.dpm.dyn_state;
777
778 kfree(dyn_state->vddc_dependency_on_sclk.entries);
779 kfree(dyn_state->vddci_dependency_on_mclk.entries);
780 kfree(dyn_state->vddc_dependency_on_mclk.entries);
781 kfree(dyn_state->mvdd_dependency_on_mclk.entries);
782 kfree(dyn_state->cac_leakage_table.entries);
783 kfree(dyn_state->phase_shedding_limits_table.entries);
784 kfree(dyn_state->ppm_table);
785 kfree(dyn_state->cac_tdp_table);
786 kfree(dyn_state->vce_clock_voltage_dependency_table.entries);
787 kfree(dyn_state->uvd_clock_voltage_dependency_table.entries);
788 kfree(dyn_state->samu_clock_voltage_dependency_table.entries);
789 kfree(dyn_state->acp_clock_voltage_dependency_table.entries);
790 kfree(dyn_state->vddgfx_dependency_on_sclk.entries);
791}
792
793static const char *pp_lib_thermal_controller_names[] = {
794 "NONE",
795 "lm63",
796 "adm1032",
797 "adm1030",
798 "max6649",
799 "lm64",
800 "f75375",
801 "RV6xx",
802 "RV770",
803 "adt7473",
804 "NONE",
805 "External GPIO",
806 "Evergreen",
807 "emc2103",
808 "Sumo",
809 "Northern Islands",
810 "Southern Islands",
811 "lm96163",
812 "Sea Islands",
813 "Kaveri/Kabini",
814};
815
816void amdgpu_add_thermal_controller(struct amdgpu_device *adev)
817{
818 struct amdgpu_mode_info *mode_info = &adev->mode_info;
819 ATOM_PPLIB_POWERPLAYTABLE *power_table;
820 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
821 ATOM_PPLIB_THERMALCONTROLLER *controller;
822 struct amdgpu_i2c_bus_rec i2c_bus;
823 u16 data_offset;
824 u8 frev, crev;
825
826 if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
827 &frev, &crev, &data_offset))
828 return;
829 power_table = (ATOM_PPLIB_POWERPLAYTABLE *)
830 (mode_info->atom_context->bios + data_offset);
831 controller = &power_table->sThermalController;
832
833 /* add the i2c bus for thermal/fan chip */
834 if (controller->ucType > 0) {
835 if (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN)
836 adev->pm.no_fan = true;
837 adev->pm.fan_pulses_per_revolution =
838 controller->ucFanParameters & ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK;
839 if (adev->pm.fan_pulses_per_revolution) {
840 adev->pm.fan_min_rpm = controller->ucFanMinRPM;
841 adev->pm.fan_max_rpm = controller->ucFanMaxRPM;
842 }
843 if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) {
844 DRM_INFO("Internal thermal controller %s fan control\n",
845 (controller->ucFanParameters &
846 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
847 adev->pm.int_thermal_type = THERMAL_TYPE_RV6XX;
848 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) {
849 DRM_INFO("Internal thermal controller %s fan control\n",
850 (controller->ucFanParameters &
851 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
852 adev->pm.int_thermal_type = THERMAL_TYPE_RV770;
853 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN) {
854 DRM_INFO("Internal thermal controller %s fan control\n",
855 (controller->ucFanParameters &
856 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
857 adev->pm.int_thermal_type = THERMAL_TYPE_EVERGREEN;
858 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SUMO) {
859 DRM_INFO("Internal thermal controller %s fan control\n",
860 (controller->ucFanParameters &
861 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
862 adev->pm.int_thermal_type = THERMAL_TYPE_SUMO;
863 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_NISLANDS) {
864 DRM_INFO("Internal thermal controller %s fan control\n",
865 (controller->ucFanParameters &
866 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
867 adev->pm.int_thermal_type = THERMAL_TYPE_NI;
868 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SISLANDS) {
869 DRM_INFO("Internal thermal controller %s fan control\n",
870 (controller->ucFanParameters &
871 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
872 adev->pm.int_thermal_type = THERMAL_TYPE_SI;
873 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_CISLANDS) {
874 DRM_INFO("Internal thermal controller %s fan control\n",
875 (controller->ucFanParameters &
876 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
877 adev->pm.int_thermal_type = THERMAL_TYPE_CI;
878 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_KAVERI) {
879 DRM_INFO("Internal thermal controller %s fan control\n",
880 (controller->ucFanParameters &
881 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
882 adev->pm.int_thermal_type = THERMAL_TYPE_KV;
883 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) {
884 DRM_INFO("External GPIO thermal controller %s fan control\n",
885 (controller->ucFanParameters &
886 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
887 adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL_GPIO;
888 } else if (controller->ucType ==
889 ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) {
890 DRM_INFO("ADT7473 with internal thermal controller %s fan control\n",
891 (controller->ucFanParameters &
892 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
893 adev->pm.int_thermal_type = THERMAL_TYPE_ADT7473_WITH_INTERNAL;
894 } else if (controller->ucType ==
895 ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL) {
896 DRM_INFO("EMC2103 with internal thermal controller %s fan control\n",
897 (controller->ucFanParameters &
898 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
899 adev->pm.int_thermal_type = THERMAL_TYPE_EMC2103_WITH_INTERNAL;
900 } else if (controller->ucType < ARRAY_SIZE(pp_lib_thermal_controller_names)) {
901 DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n",
902 pp_lib_thermal_controller_names[controller->ucType],
903 controller->ucI2cAddress >> 1,
904 (controller->ucFanParameters &
905 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
906 adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL;
907 i2c_bus = amdgpu_atombios_lookup_i2c_gpio(adev, controller->ucI2cLine);
908 adev->pm.i2c_bus = amdgpu_i2c_lookup(adev, &i2c_bus);
909 if (adev->pm.i2c_bus) {
910 struct i2c_board_info info = { };
911 const char *name = pp_lib_thermal_controller_names[controller->ucType];
912 info.addr = controller->ucI2cAddress >> 1;
913 strlcpy(info.type, name, sizeof(info.type));
914 i2c_new_device(&adev->pm.i2c_bus->adapter, &info);
915 }
916 } else {
917 DRM_INFO("Unknown thermal controller type %d at 0x%02x %s fan control\n",
918 controller->ucType,
919 controller->ucI2cAddress >> 1,
920 (controller->ucFanParameters &
921 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
922 }
923 }
924}
925
926enum amdgpu_pcie_gen amdgpu_get_pcie_gen_support(struct amdgpu_device *adev,
927 u32 sys_mask,
928 enum amdgpu_pcie_gen asic_gen,
929 enum amdgpu_pcie_gen default_gen)
930{
931 switch (asic_gen) {
932 case AMDGPU_PCIE_GEN1:
933 return AMDGPU_PCIE_GEN1;
934 case AMDGPU_PCIE_GEN2:
935 return AMDGPU_PCIE_GEN2;
936 case AMDGPU_PCIE_GEN3:
937 return AMDGPU_PCIE_GEN3;
938 default:
939 if ((sys_mask & DRM_PCIE_SPEED_80) && (default_gen == AMDGPU_PCIE_GEN3))
940 return AMDGPU_PCIE_GEN3;
941 else if ((sys_mask & DRM_PCIE_SPEED_50) && (default_gen == AMDGPU_PCIE_GEN2))
942 return AMDGPU_PCIE_GEN2;
943 else
944 return AMDGPU_PCIE_GEN1;
945 }
946 return AMDGPU_PCIE_GEN1;
947}
948
949u16 amdgpu_get_pcie_lane_support(struct amdgpu_device *adev,
950 u16 asic_lanes,
951 u16 default_lanes)
952{
953 switch (asic_lanes) {
954 case 0:
955 default:
956 return default_lanes;
957 case 1:
958 return 1;
959 case 2:
960 return 2;
961 case 4:
962 return 4;
963 case 8:
964 return 8;
965 case 12:
966 return 12;
967 case 16:
968 return 16;
969 }
970}
971
972u8 amdgpu_encode_pci_lane_width(u32 lanes)
973{
974 u8 encoded_lanes[] = { 0, 1, 2, 0, 3, 0, 0, 0, 4, 0, 0, 0, 5, 0, 0, 0, 6 };
975
976 if (lanes > 16)
977 return 0;
978
979 return encoded_lanes[lanes];
980}
Alex Deucher825cc992016-10-07 12:38:04 -0400981
982struct amd_vce_state*
Rex Zhucfa289f2017-09-06 15:27:59 +0800983amdgpu_get_vce_clock_state(void *handle, u32 idx)
Alex Deucher825cc992016-10-07 12:38:04 -0400984{
Rex Zhucfa289f2017-09-06 15:27:59 +0800985 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
986
Alex Deucher825cc992016-10-07 12:38:04 -0400987 if (idx < adev->pm.dpm.num_of_vce_states)
988 return &adev->pm.dpm.vce_states[idx];
989
990 return NULL;
991}