blob: 6ca0333ca4c0f5398cfac5197c216164b39a49b2 [file] [log] [blame]
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001/*
2 * Copyright 2011 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
24
25#include "drmP.h"
26#include "amdgpu.h"
27#include "amdgpu_atombios.h"
28#include "amdgpu_i2c.h"
29#include "amdgpu_dpm.h"
30#include "atom.h"
31
32void amdgpu_dpm_print_class_info(u32 class, u32 class2)
33{
34 printk("\tui class: ");
35 switch (class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
36 case ATOM_PPLIB_CLASSIFICATION_UI_NONE:
37 default:
38 printk("none\n");
39 break;
40 case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
41 printk("battery\n");
42 break;
43 case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED:
44 printk("balanced\n");
45 break;
46 case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
47 printk("performance\n");
48 break;
49 }
50 printk("\tinternal class: ");
51 if (((class & ~ATOM_PPLIB_CLASSIFICATION_UI_MASK) == 0) &&
52 (class2 == 0))
53 printk("none");
54 else {
55 if (class & ATOM_PPLIB_CLASSIFICATION_BOOT)
56 printk("boot ");
57 if (class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
58 printk("thermal ");
59 if (class & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE)
60 printk("limited_pwr ");
61 if (class & ATOM_PPLIB_CLASSIFICATION_REST)
62 printk("rest ");
63 if (class & ATOM_PPLIB_CLASSIFICATION_FORCED)
64 printk("forced ");
65 if (class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
66 printk("3d_perf ");
67 if (class & ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE)
68 printk("ovrdrv ");
69 if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
70 printk("uvd ");
71 if (class & ATOM_PPLIB_CLASSIFICATION_3DLOW)
72 printk("3d_low ");
73 if (class & ATOM_PPLIB_CLASSIFICATION_ACPI)
74 printk("acpi ");
75 if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
76 printk("uvd_hd2 ");
77 if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
78 printk("uvd_hd ");
79 if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
80 printk("uvd_sd ");
81 if (class2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2)
82 printk("limited_pwr2 ");
83 if (class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
84 printk("ulv ");
85 if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
86 printk("uvd_mvc ");
87 }
88 printk("\n");
89}
90
91void amdgpu_dpm_print_cap_info(u32 caps)
92{
93 printk("\tcaps: ");
94 if (caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY)
95 printk("single_disp ");
96 if (caps & ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK)
97 printk("video ");
98 if (caps & ATOM_PPLIB_DISALLOW_ON_DC)
99 printk("no_dc ");
100 printk("\n");
101}
102
103void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev,
104 struct amdgpu_ps *rps)
105{
106 printk("\tstatus: ");
107 if (rps == adev->pm.dpm.current_ps)
108 printk("c ");
109 if (rps == adev->pm.dpm.requested_ps)
110 printk("r ");
111 if (rps == adev->pm.dpm.boot_ps)
112 printk("b ");
113 printk("\n");
114}
115
Rex Zhudc8184a2016-10-10 15:57:21 +0800116
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400117u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev)
118{
119 struct drm_device *dev = adev->ddev;
120 struct drm_crtc *crtc;
121 struct amdgpu_crtc *amdgpu_crtc;
Rex Zhudc8184a2016-10-10 15:57:21 +0800122 u32 vblank_in_pixels;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400123 u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */
124
125 if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
126 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
127 amdgpu_crtc = to_amdgpu_crtc(crtc);
128 if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
Rex Zhudc8184a2016-10-10 15:57:21 +0800129 vblank_in_pixels =
130 amdgpu_crtc->hw_mode.crtc_htotal *
131 (amdgpu_crtc->hw_mode.crtc_vblank_end -
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400132 amdgpu_crtc->hw_mode.crtc_vdisplay +
Rex Zhudc8184a2016-10-10 15:57:21 +0800133 (amdgpu_crtc->v_border * 2));
134
135 vblank_time_us = vblank_in_pixels * 1000 / amdgpu_crtc->hw_mode.clock;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400136 break;
137 }
138 }
139 }
140
141 return vblank_time_us;
142}
143
144u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev)
145{
146 struct drm_device *dev = adev->ddev;
147 struct drm_crtc *crtc;
148 struct amdgpu_crtc *amdgpu_crtc;
149 u32 vrefresh = 0;
150
151 if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
152 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
153 amdgpu_crtc = to_amdgpu_crtc(crtc);
154 if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
Alex Deucher6b8812e2016-05-02 10:24:41 -0400155 vrefresh = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400156 break;
157 }
158 }
159 }
160
161 return vrefresh;
162}
163
164void amdgpu_calculate_u_and_p(u32 i, u32 r_c, u32 p_b,
165 u32 *p, u32 *u)
166{
167 u32 b_c = 0;
168 u32 i_c;
169 u32 tmp;
170
171 i_c = (i * r_c) / 100;
172 tmp = i_c >> p_b;
173
174 while (tmp) {
175 b_c++;
176 tmp >>= 1;
177 }
178
179 *u = (b_c + 1) / 2;
180 *p = i_c / (1 << (2 * (*u)));
181}
182
183int amdgpu_calculate_at(u32 t, u32 h, u32 fh, u32 fl, u32 *tl, u32 *th)
184{
185 u32 k, a, ah, al;
186 u32 t1;
187
188 if ((fl == 0) || (fh == 0) || (fl > fh))
189 return -EINVAL;
190
191 k = (100 * fh) / fl;
192 t1 = (t * (k - 100));
193 a = (1000 * (100 * h + t1)) / (10000 + (t1 / 100));
194 a = (a + 5) / 10;
195 ah = ((a * t) + 5000) / 10000;
196 al = a - ah;
197
198 *th = t - ah;
199 *tl = t + al;
200
201 return 0;
202}
203
204bool amdgpu_is_uvd_state(u32 class, u32 class2)
205{
206 if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
207 return true;
208 if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
209 return true;
210 if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
211 return true;
212 if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
213 return true;
214 if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
215 return true;
216 return false;
217}
218
219bool amdgpu_is_internal_thermal_sensor(enum amdgpu_int_thermal_type sensor)
220{
221 switch (sensor) {
222 case THERMAL_TYPE_RV6XX:
223 case THERMAL_TYPE_RV770:
224 case THERMAL_TYPE_EVERGREEN:
225 case THERMAL_TYPE_SUMO:
226 case THERMAL_TYPE_NI:
227 case THERMAL_TYPE_SI:
228 case THERMAL_TYPE_CI:
229 case THERMAL_TYPE_KV:
230 return true;
231 case THERMAL_TYPE_ADT7473_WITH_INTERNAL:
232 case THERMAL_TYPE_EMC2103_WITH_INTERNAL:
233 return false; /* need special handling */
234 case THERMAL_TYPE_NONE:
235 case THERMAL_TYPE_EXTERNAL:
236 case THERMAL_TYPE_EXTERNAL_GPIO:
237 default:
238 return false;
239 }
240}
241
242union power_info {
243 struct _ATOM_POWERPLAY_INFO info;
244 struct _ATOM_POWERPLAY_INFO_V2 info_2;
245 struct _ATOM_POWERPLAY_INFO_V3 info_3;
246 struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
247 struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
248 struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
249 struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4;
250 struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5;
251};
252
253union fan_info {
254 struct _ATOM_PPLIB_FANTABLE fan;
255 struct _ATOM_PPLIB_FANTABLE2 fan2;
256 struct _ATOM_PPLIB_FANTABLE3 fan3;
257};
258
259static int amdgpu_parse_clk_voltage_dep_table(struct amdgpu_clock_voltage_dependency_table *amdgpu_table,
260 ATOM_PPLIB_Clock_Voltage_Dependency_Table *atom_table)
261{
262 u32 size = atom_table->ucNumEntries *
263 sizeof(struct amdgpu_clock_voltage_dependency_entry);
264 int i;
265 ATOM_PPLIB_Clock_Voltage_Dependency_Record *entry;
266
267 amdgpu_table->entries = kzalloc(size, GFP_KERNEL);
268 if (!amdgpu_table->entries)
269 return -ENOMEM;
270
271 entry = &atom_table->entries[0];
272 for (i = 0; i < atom_table->ucNumEntries; i++) {
273 amdgpu_table->entries[i].clk = le16_to_cpu(entry->usClockLow) |
274 (entry->ucClockHigh << 16);
275 amdgpu_table->entries[i].v = le16_to_cpu(entry->usVoltage);
276 entry = (ATOM_PPLIB_Clock_Voltage_Dependency_Record *)
277 ((u8 *)entry + sizeof(ATOM_PPLIB_Clock_Voltage_Dependency_Record));
278 }
279 amdgpu_table->count = atom_table->ucNumEntries;
280
281 return 0;
282}
283
284int amdgpu_get_platform_caps(struct amdgpu_device *adev)
285{
286 struct amdgpu_mode_info *mode_info = &adev->mode_info;
287 union power_info *power_info;
288 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
289 u16 data_offset;
290 u8 frev, crev;
291
292 if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
293 &frev, &crev, &data_offset))
294 return -EINVAL;
295 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
296
297 adev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
298 adev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
299 adev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
300
301 return 0;
302}
303
304/* sizeof(ATOM_PPLIB_EXTENDEDHEADER) */
305#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12
306#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14
307#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4 16
308#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5 18
309#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6 20
310#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7 22
311#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8 24
312#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V9 26
313
314int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
315{
316 struct amdgpu_mode_info *mode_info = &adev->mode_info;
317 union power_info *power_info;
318 union fan_info *fan_info;
319 ATOM_PPLIB_Clock_Voltage_Dependency_Table *dep_table;
320 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
321 u16 data_offset;
322 u8 frev, crev;
323 int ret, i;
324
325 if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
326 &frev, &crev, &data_offset))
327 return -EINVAL;
328 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
329
330 /* fan table */
331 if (le16_to_cpu(power_info->pplib.usTableSize) >=
332 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
333 if (power_info->pplib3.usFanTableOffset) {
334 fan_info = (union fan_info *)(mode_info->atom_context->bios + data_offset +
335 le16_to_cpu(power_info->pplib3.usFanTableOffset));
336 adev->pm.dpm.fan.t_hyst = fan_info->fan.ucTHyst;
337 adev->pm.dpm.fan.t_min = le16_to_cpu(fan_info->fan.usTMin);
338 adev->pm.dpm.fan.t_med = le16_to_cpu(fan_info->fan.usTMed);
339 adev->pm.dpm.fan.t_high = le16_to_cpu(fan_info->fan.usTHigh);
340 adev->pm.dpm.fan.pwm_min = le16_to_cpu(fan_info->fan.usPWMMin);
341 adev->pm.dpm.fan.pwm_med = le16_to_cpu(fan_info->fan.usPWMMed);
342 adev->pm.dpm.fan.pwm_high = le16_to_cpu(fan_info->fan.usPWMHigh);
343 if (fan_info->fan.ucFanTableFormat >= 2)
344 adev->pm.dpm.fan.t_max = le16_to_cpu(fan_info->fan2.usTMax);
345 else
346 adev->pm.dpm.fan.t_max = 10900;
347 adev->pm.dpm.fan.cycle_delay = 100000;
348 if (fan_info->fan.ucFanTableFormat >= 3) {
349 adev->pm.dpm.fan.control_mode = fan_info->fan3.ucFanControlMode;
350 adev->pm.dpm.fan.default_max_fan_pwm =
351 le16_to_cpu(fan_info->fan3.usFanPWMMax);
352 adev->pm.dpm.fan.default_fan_output_sensitivity = 4836;
353 adev->pm.dpm.fan.fan_output_sensitivity =
354 le16_to_cpu(fan_info->fan3.usFanOutputSensitivity);
355 }
356 adev->pm.dpm.fan.ucode_fan_control = true;
357 }
358 }
359
360 /* clock dependancy tables, shedding tables */
361 if (le16_to_cpu(power_info->pplib.usTableSize) >=
362 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE4)) {
363 if (power_info->pplib4.usVddcDependencyOnSCLKOffset) {
364 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
365 (mode_info->atom_context->bios + data_offset +
366 le16_to_cpu(power_info->pplib4.usVddcDependencyOnSCLKOffset));
367 ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
368 dep_table);
369 if (ret) {
370 amdgpu_free_extended_power_table(adev);
371 return ret;
372 }
373 }
374 if (power_info->pplib4.usVddciDependencyOnMCLKOffset) {
375 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
376 (mode_info->atom_context->bios + data_offset +
377 le16_to_cpu(power_info->pplib4.usVddciDependencyOnMCLKOffset));
378 ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
379 dep_table);
380 if (ret) {
381 amdgpu_free_extended_power_table(adev);
382 return ret;
383 }
384 }
385 if (power_info->pplib4.usVddcDependencyOnMCLKOffset) {
386 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
387 (mode_info->atom_context->bios + data_offset +
388 le16_to_cpu(power_info->pplib4.usVddcDependencyOnMCLKOffset));
389 ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
390 dep_table);
391 if (ret) {
392 amdgpu_free_extended_power_table(adev);
393 return ret;
394 }
395 }
396 if (power_info->pplib4.usMvddDependencyOnMCLKOffset) {
397 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
398 (mode_info->atom_context->bios + data_offset +
399 le16_to_cpu(power_info->pplib4.usMvddDependencyOnMCLKOffset));
400 ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
401 dep_table);
402 if (ret) {
403 amdgpu_free_extended_power_table(adev);
404 return ret;
405 }
406 }
407 if (power_info->pplib4.usMaxClockVoltageOnDCOffset) {
408 ATOM_PPLIB_Clock_Voltage_Limit_Table *clk_v =
409 (ATOM_PPLIB_Clock_Voltage_Limit_Table *)
410 (mode_info->atom_context->bios + data_offset +
411 le16_to_cpu(power_info->pplib4.usMaxClockVoltageOnDCOffset));
412 if (clk_v->ucNumEntries) {
413 adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk =
414 le16_to_cpu(clk_v->entries[0].usSclkLow) |
415 (clk_v->entries[0].ucSclkHigh << 16);
416 adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk =
417 le16_to_cpu(clk_v->entries[0].usMclkLow) |
418 (clk_v->entries[0].ucMclkHigh << 16);
419 adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc =
420 le16_to_cpu(clk_v->entries[0].usVddc);
421 adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddci =
422 le16_to_cpu(clk_v->entries[0].usVddci);
423 }
424 }
425 if (power_info->pplib4.usVddcPhaseShedLimitsTableOffset) {
426 ATOM_PPLIB_PhaseSheddingLimits_Table *psl =
427 (ATOM_PPLIB_PhaseSheddingLimits_Table *)
428 (mode_info->atom_context->bios + data_offset +
429 le16_to_cpu(power_info->pplib4.usVddcPhaseShedLimitsTableOffset));
430 ATOM_PPLIB_PhaseSheddingLimits_Record *entry;
431
432 adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries =
433 kzalloc(psl->ucNumEntries *
434 sizeof(struct amdgpu_phase_shedding_limits_entry),
435 GFP_KERNEL);
436 if (!adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) {
437 amdgpu_free_extended_power_table(adev);
438 return -ENOMEM;
439 }
440
441 entry = &psl->entries[0];
442 for (i = 0; i < psl->ucNumEntries; i++) {
443 adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].sclk =
444 le16_to_cpu(entry->usSclkLow) | (entry->ucSclkHigh << 16);
445 adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].mclk =
446 le16_to_cpu(entry->usMclkLow) | (entry->ucMclkHigh << 16);
447 adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].voltage =
448 le16_to_cpu(entry->usVoltage);
449 entry = (ATOM_PPLIB_PhaseSheddingLimits_Record *)
450 ((u8 *)entry + sizeof(ATOM_PPLIB_PhaseSheddingLimits_Record));
451 }
452 adev->pm.dpm.dyn_state.phase_shedding_limits_table.count =
453 psl->ucNumEntries;
454 }
455 }
456
457 /* cac data */
458 if (le16_to_cpu(power_info->pplib.usTableSize) >=
459 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE5)) {
460 adev->pm.dpm.tdp_limit = le32_to_cpu(power_info->pplib5.ulTDPLimit);
461 adev->pm.dpm.near_tdp_limit = le32_to_cpu(power_info->pplib5.ulNearTDPLimit);
462 adev->pm.dpm.near_tdp_limit_adjusted = adev->pm.dpm.near_tdp_limit;
463 adev->pm.dpm.tdp_od_limit = le16_to_cpu(power_info->pplib5.usTDPODLimit);
464 if (adev->pm.dpm.tdp_od_limit)
465 adev->pm.dpm.power_control = true;
466 else
467 adev->pm.dpm.power_control = false;
468 adev->pm.dpm.tdp_adjustment = 0;
469 adev->pm.dpm.sq_ramping_threshold = le32_to_cpu(power_info->pplib5.ulSQRampingThreshold);
470 adev->pm.dpm.cac_leakage = le32_to_cpu(power_info->pplib5.ulCACLeakage);
471 adev->pm.dpm.load_line_slope = le16_to_cpu(power_info->pplib5.usLoadLineSlope);
472 if (power_info->pplib5.usCACLeakageTableOffset) {
473 ATOM_PPLIB_CAC_Leakage_Table *cac_table =
474 (ATOM_PPLIB_CAC_Leakage_Table *)
475 (mode_info->atom_context->bios + data_offset +
476 le16_to_cpu(power_info->pplib5.usCACLeakageTableOffset));
477 ATOM_PPLIB_CAC_Leakage_Record *entry;
478 u32 size = cac_table->ucNumEntries * sizeof(struct amdgpu_cac_leakage_table);
479 adev->pm.dpm.dyn_state.cac_leakage_table.entries = kzalloc(size, GFP_KERNEL);
480 if (!adev->pm.dpm.dyn_state.cac_leakage_table.entries) {
481 amdgpu_free_extended_power_table(adev);
482 return -ENOMEM;
483 }
484 entry = &cac_table->entries[0];
485 for (i = 0; i < cac_table->ucNumEntries; i++) {
486 if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
487 adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1 =
488 le16_to_cpu(entry->usVddc1);
489 adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2 =
490 le16_to_cpu(entry->usVddc2);
491 adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3 =
492 le16_to_cpu(entry->usVddc3);
493 } else {
494 adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc =
495 le16_to_cpu(entry->usVddc);
496 adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage =
497 le32_to_cpu(entry->ulLeakageValue);
498 }
499 entry = (ATOM_PPLIB_CAC_Leakage_Record *)
500 ((u8 *)entry + sizeof(ATOM_PPLIB_CAC_Leakage_Record));
501 }
502 adev->pm.dpm.dyn_state.cac_leakage_table.count = cac_table->ucNumEntries;
503 }
504 }
505
506 /* ext tables */
507 if (le16_to_cpu(power_info->pplib.usTableSize) >=
508 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
509 ATOM_PPLIB_EXTENDEDHEADER *ext_hdr = (ATOM_PPLIB_EXTENDEDHEADER *)
510 (mode_info->atom_context->bios + data_offset +
511 le16_to_cpu(power_info->pplib3.usExtendendedHeaderOffset));
512 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2) &&
513 ext_hdr->usVCETableOffset) {
514 VCEClockInfoArray *array = (VCEClockInfoArray *)
515 (mode_info->atom_context->bios + data_offset +
516 le16_to_cpu(ext_hdr->usVCETableOffset) + 1);
517 ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *limits =
518 (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *)
519 (mode_info->atom_context->bios + data_offset +
520 le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
521 1 + array->ucNumEntries * sizeof(VCEClockInfo));
522 ATOM_PPLIB_VCE_State_Table *states =
523 (ATOM_PPLIB_VCE_State_Table *)
524 (mode_info->atom_context->bios + data_offset +
525 le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
526 1 + (array->ucNumEntries * sizeof (VCEClockInfo)) +
527 1 + (limits->numEntries * sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record)));
528 ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *entry;
529 ATOM_PPLIB_VCE_State_Record *state_entry;
530 VCEClockInfo *vce_clk;
531 u32 size = limits->numEntries *
532 sizeof(struct amdgpu_vce_clock_voltage_dependency_entry);
533 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries =
534 kzalloc(size, GFP_KERNEL);
535 if (!adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries) {
536 amdgpu_free_extended_power_table(adev);
537 return -ENOMEM;
538 }
539 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count =
540 limits->numEntries;
541 entry = &limits->entries[0];
542 state_entry = &states->entries[0];
543 for (i = 0; i < limits->numEntries; i++) {
544 vce_clk = (VCEClockInfo *)
545 ((u8 *)&array->entries[0] +
546 (entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
547 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].evclk =
548 le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
549 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].ecclk =
550 le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
551 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v =
552 le16_to_cpu(entry->usVoltage);
553 entry = (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *)
554 ((u8 *)entry + sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record));
555 }
Rex Zhu66ba1af2016-10-12 15:38:56 +0800556 adev->pm.dpm.num_of_vce_states =
557 states->numEntries > AMD_MAX_VCE_LEVELS ?
558 AMD_MAX_VCE_LEVELS : states->numEntries;
559 for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400560 vce_clk = (VCEClockInfo *)
561 ((u8 *)&array->entries[0] +
562 (state_entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
563 adev->pm.dpm.vce_states[i].evclk =
564 le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
565 adev->pm.dpm.vce_states[i].ecclk =
566 le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
567 adev->pm.dpm.vce_states[i].clk_idx =
568 state_entry->ucClockInfoIndex & 0x3f;
569 adev->pm.dpm.vce_states[i].pstate =
570 (state_entry->ucClockInfoIndex & 0xc0) >> 6;
571 state_entry = (ATOM_PPLIB_VCE_State_Record *)
572 ((u8 *)state_entry + sizeof(ATOM_PPLIB_VCE_State_Record));
573 }
574 }
575 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3) &&
576 ext_hdr->usUVDTableOffset) {
577 UVDClockInfoArray *array = (UVDClockInfoArray *)
578 (mode_info->atom_context->bios + data_offset +
579 le16_to_cpu(ext_hdr->usUVDTableOffset) + 1);
580 ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *limits =
581 (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *)
582 (mode_info->atom_context->bios + data_offset +
583 le16_to_cpu(ext_hdr->usUVDTableOffset) + 1 +
584 1 + (array->ucNumEntries * sizeof (UVDClockInfo)));
585 ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *entry;
586 u32 size = limits->numEntries *
587 sizeof(struct amdgpu_uvd_clock_voltage_dependency_entry);
588 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries =
589 kzalloc(size, GFP_KERNEL);
590 if (!adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries) {
591 amdgpu_free_extended_power_table(adev);
592 return -ENOMEM;
593 }
594 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count =
595 limits->numEntries;
596 entry = &limits->entries[0];
597 for (i = 0; i < limits->numEntries; i++) {
598 UVDClockInfo *uvd_clk = (UVDClockInfo *)
599 ((u8 *)&array->entries[0] +
600 (entry->ucUVDClockInfoIndex * sizeof(UVDClockInfo)));
601 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].vclk =
602 le16_to_cpu(uvd_clk->usVClkLow) | (uvd_clk->ucVClkHigh << 16);
603 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].dclk =
604 le16_to_cpu(uvd_clk->usDClkLow) | (uvd_clk->ucDClkHigh << 16);
605 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v =
606 le16_to_cpu(entry->usVoltage);
607 entry = (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *)
608 ((u8 *)entry + sizeof(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record));
609 }
610 }
611 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4) &&
612 ext_hdr->usSAMUTableOffset) {
613 ATOM_PPLIB_SAMClk_Voltage_Limit_Table *limits =
614 (ATOM_PPLIB_SAMClk_Voltage_Limit_Table *)
615 (mode_info->atom_context->bios + data_offset +
616 le16_to_cpu(ext_hdr->usSAMUTableOffset) + 1);
617 ATOM_PPLIB_SAMClk_Voltage_Limit_Record *entry;
618 u32 size = limits->numEntries *
619 sizeof(struct amdgpu_clock_voltage_dependency_entry);
620 adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries =
621 kzalloc(size, GFP_KERNEL);
622 if (!adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries) {
623 amdgpu_free_extended_power_table(adev);
624 return -ENOMEM;
625 }
626 adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count =
627 limits->numEntries;
628 entry = &limits->entries[0];
629 for (i = 0; i < limits->numEntries; i++) {
630 adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].clk =
631 le16_to_cpu(entry->usSAMClockLow) | (entry->ucSAMClockHigh << 16);
632 adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v =
633 le16_to_cpu(entry->usVoltage);
634 entry = (ATOM_PPLIB_SAMClk_Voltage_Limit_Record *)
635 ((u8 *)entry + sizeof(ATOM_PPLIB_SAMClk_Voltage_Limit_Record));
636 }
637 }
638 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5) &&
639 ext_hdr->usPPMTableOffset) {
640 ATOM_PPLIB_PPM_Table *ppm = (ATOM_PPLIB_PPM_Table *)
641 (mode_info->atom_context->bios + data_offset +
642 le16_to_cpu(ext_hdr->usPPMTableOffset));
643 adev->pm.dpm.dyn_state.ppm_table =
644 kzalloc(sizeof(struct amdgpu_ppm_table), GFP_KERNEL);
645 if (!adev->pm.dpm.dyn_state.ppm_table) {
646 amdgpu_free_extended_power_table(adev);
647 return -ENOMEM;
648 }
649 adev->pm.dpm.dyn_state.ppm_table->ppm_design = ppm->ucPpmDesign;
650 adev->pm.dpm.dyn_state.ppm_table->cpu_core_number =
651 le16_to_cpu(ppm->usCpuCoreNumber);
652 adev->pm.dpm.dyn_state.ppm_table->platform_tdp =
653 le32_to_cpu(ppm->ulPlatformTDP);
654 adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdp =
655 le32_to_cpu(ppm->ulSmallACPlatformTDP);
656 adev->pm.dpm.dyn_state.ppm_table->platform_tdc =
657 le32_to_cpu(ppm->ulPlatformTDC);
658 adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdc =
659 le32_to_cpu(ppm->ulSmallACPlatformTDC);
660 adev->pm.dpm.dyn_state.ppm_table->apu_tdp =
661 le32_to_cpu(ppm->ulApuTDP);
662 adev->pm.dpm.dyn_state.ppm_table->dgpu_tdp =
663 le32_to_cpu(ppm->ulDGpuTDP);
664 adev->pm.dpm.dyn_state.ppm_table->dgpu_ulv_power =
665 le32_to_cpu(ppm->ulDGpuUlvPower);
666 adev->pm.dpm.dyn_state.ppm_table->tj_max =
667 le32_to_cpu(ppm->ulTjmax);
668 }
669 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6) &&
670 ext_hdr->usACPTableOffset) {
671 ATOM_PPLIB_ACPClk_Voltage_Limit_Table *limits =
672 (ATOM_PPLIB_ACPClk_Voltage_Limit_Table *)
673 (mode_info->atom_context->bios + data_offset +
674 le16_to_cpu(ext_hdr->usACPTableOffset) + 1);
675 ATOM_PPLIB_ACPClk_Voltage_Limit_Record *entry;
676 u32 size = limits->numEntries *
677 sizeof(struct amdgpu_clock_voltage_dependency_entry);
678 adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries =
679 kzalloc(size, GFP_KERNEL);
680 if (!adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries) {
681 amdgpu_free_extended_power_table(adev);
682 return -ENOMEM;
683 }
684 adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count =
685 limits->numEntries;
686 entry = &limits->entries[0];
687 for (i = 0; i < limits->numEntries; i++) {
688 adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].clk =
689 le16_to_cpu(entry->usACPClockLow) | (entry->ucACPClockHigh << 16);
690 adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v =
691 le16_to_cpu(entry->usVoltage);
692 entry = (ATOM_PPLIB_ACPClk_Voltage_Limit_Record *)
693 ((u8 *)entry + sizeof(ATOM_PPLIB_ACPClk_Voltage_Limit_Record));
694 }
695 }
696 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7) &&
697 ext_hdr->usPowerTuneTableOffset) {
698 u8 rev = *(u8 *)(mode_info->atom_context->bios + data_offset +
699 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
700 ATOM_PowerTune_Table *pt;
701 adev->pm.dpm.dyn_state.cac_tdp_table =
702 kzalloc(sizeof(struct amdgpu_cac_tdp_table), GFP_KERNEL);
703 if (!adev->pm.dpm.dyn_state.cac_tdp_table) {
704 amdgpu_free_extended_power_table(adev);
705 return -ENOMEM;
706 }
707 if (rev > 0) {
708 ATOM_PPLIB_POWERTUNE_Table_V1 *ppt = (ATOM_PPLIB_POWERTUNE_Table_V1 *)
709 (mode_info->atom_context->bios + data_offset +
710 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
711 adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit =
712 ppt->usMaximumPowerDeliveryLimit;
713 pt = &ppt->power_tune_table;
714 } else {
715 ATOM_PPLIB_POWERTUNE_Table *ppt = (ATOM_PPLIB_POWERTUNE_Table *)
716 (mode_info->atom_context->bios + data_offset +
717 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
718 adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit = 255;
719 pt = &ppt->power_tune_table;
720 }
721 adev->pm.dpm.dyn_state.cac_tdp_table->tdp = le16_to_cpu(pt->usTDP);
722 adev->pm.dpm.dyn_state.cac_tdp_table->configurable_tdp =
723 le16_to_cpu(pt->usConfigurableTDP);
724 adev->pm.dpm.dyn_state.cac_tdp_table->tdc = le16_to_cpu(pt->usTDC);
725 adev->pm.dpm.dyn_state.cac_tdp_table->battery_power_limit =
726 le16_to_cpu(pt->usBatteryPowerLimit);
727 adev->pm.dpm.dyn_state.cac_tdp_table->small_power_limit =
728 le16_to_cpu(pt->usSmallPowerLimit);
729 adev->pm.dpm.dyn_state.cac_tdp_table->low_cac_leakage =
730 le16_to_cpu(pt->usLowCACLeakage);
731 adev->pm.dpm.dyn_state.cac_tdp_table->high_cac_leakage =
732 le16_to_cpu(pt->usHighCACLeakage);
733 }
734 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8) &&
735 ext_hdr->usSclkVddgfxTableOffset) {
736 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
737 (mode_info->atom_context->bios + data_offset +
738 le16_to_cpu(ext_hdr->usSclkVddgfxTableOffset));
739 ret = amdgpu_parse_clk_voltage_dep_table(
740 &adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk,
741 dep_table);
742 if (ret) {
743 kfree(adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk.entries);
744 return ret;
745 }
746 }
747 }
748
749 return 0;
750}
751
752void amdgpu_free_extended_power_table(struct amdgpu_device *adev)
753{
754 struct amdgpu_dpm_dynamic_state *dyn_state = &adev->pm.dpm.dyn_state;
755
756 kfree(dyn_state->vddc_dependency_on_sclk.entries);
757 kfree(dyn_state->vddci_dependency_on_mclk.entries);
758 kfree(dyn_state->vddc_dependency_on_mclk.entries);
759 kfree(dyn_state->mvdd_dependency_on_mclk.entries);
760 kfree(dyn_state->cac_leakage_table.entries);
761 kfree(dyn_state->phase_shedding_limits_table.entries);
762 kfree(dyn_state->ppm_table);
763 kfree(dyn_state->cac_tdp_table);
764 kfree(dyn_state->vce_clock_voltage_dependency_table.entries);
765 kfree(dyn_state->uvd_clock_voltage_dependency_table.entries);
766 kfree(dyn_state->samu_clock_voltage_dependency_table.entries);
767 kfree(dyn_state->acp_clock_voltage_dependency_table.entries);
768 kfree(dyn_state->vddgfx_dependency_on_sclk.entries);
769}
770
771static const char *pp_lib_thermal_controller_names[] = {
772 "NONE",
773 "lm63",
774 "adm1032",
775 "adm1030",
776 "max6649",
777 "lm64",
778 "f75375",
779 "RV6xx",
780 "RV770",
781 "adt7473",
782 "NONE",
783 "External GPIO",
784 "Evergreen",
785 "emc2103",
786 "Sumo",
787 "Northern Islands",
788 "Southern Islands",
789 "lm96163",
790 "Sea Islands",
791 "Kaveri/Kabini",
792};
793
794void amdgpu_add_thermal_controller(struct amdgpu_device *adev)
795{
796 struct amdgpu_mode_info *mode_info = &adev->mode_info;
797 ATOM_PPLIB_POWERPLAYTABLE *power_table;
798 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
799 ATOM_PPLIB_THERMALCONTROLLER *controller;
800 struct amdgpu_i2c_bus_rec i2c_bus;
801 u16 data_offset;
802 u8 frev, crev;
803
804 if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
805 &frev, &crev, &data_offset))
806 return;
807 power_table = (ATOM_PPLIB_POWERPLAYTABLE *)
808 (mode_info->atom_context->bios + data_offset);
809 controller = &power_table->sThermalController;
810
811 /* add the i2c bus for thermal/fan chip */
812 if (controller->ucType > 0) {
813 if (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN)
814 adev->pm.no_fan = true;
815 adev->pm.fan_pulses_per_revolution =
816 controller->ucFanParameters & ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK;
817 if (adev->pm.fan_pulses_per_revolution) {
818 adev->pm.fan_min_rpm = controller->ucFanMinRPM;
819 adev->pm.fan_max_rpm = controller->ucFanMaxRPM;
820 }
821 if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) {
822 DRM_INFO("Internal thermal controller %s fan control\n",
823 (controller->ucFanParameters &
824 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
825 adev->pm.int_thermal_type = THERMAL_TYPE_RV6XX;
826 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) {
827 DRM_INFO("Internal thermal controller %s fan control\n",
828 (controller->ucFanParameters &
829 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
830 adev->pm.int_thermal_type = THERMAL_TYPE_RV770;
831 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN) {
832 DRM_INFO("Internal thermal controller %s fan control\n",
833 (controller->ucFanParameters &
834 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
835 adev->pm.int_thermal_type = THERMAL_TYPE_EVERGREEN;
836 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SUMO) {
837 DRM_INFO("Internal thermal controller %s fan control\n",
838 (controller->ucFanParameters &
839 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
840 adev->pm.int_thermal_type = THERMAL_TYPE_SUMO;
841 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_NISLANDS) {
842 DRM_INFO("Internal thermal controller %s fan control\n",
843 (controller->ucFanParameters &
844 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
845 adev->pm.int_thermal_type = THERMAL_TYPE_NI;
846 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SISLANDS) {
847 DRM_INFO("Internal thermal controller %s fan control\n",
848 (controller->ucFanParameters &
849 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
850 adev->pm.int_thermal_type = THERMAL_TYPE_SI;
851 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_CISLANDS) {
852 DRM_INFO("Internal thermal controller %s fan control\n",
853 (controller->ucFanParameters &
854 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
855 adev->pm.int_thermal_type = THERMAL_TYPE_CI;
856 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_KAVERI) {
857 DRM_INFO("Internal thermal controller %s fan control\n",
858 (controller->ucFanParameters &
859 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
860 adev->pm.int_thermal_type = THERMAL_TYPE_KV;
861 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) {
862 DRM_INFO("External GPIO thermal controller %s fan control\n",
863 (controller->ucFanParameters &
864 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
865 adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL_GPIO;
866 } else if (controller->ucType ==
867 ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) {
868 DRM_INFO("ADT7473 with internal thermal controller %s fan control\n",
869 (controller->ucFanParameters &
870 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
871 adev->pm.int_thermal_type = THERMAL_TYPE_ADT7473_WITH_INTERNAL;
872 } else if (controller->ucType ==
873 ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL) {
874 DRM_INFO("EMC2103 with internal thermal controller %s fan control\n",
875 (controller->ucFanParameters &
876 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
877 adev->pm.int_thermal_type = THERMAL_TYPE_EMC2103_WITH_INTERNAL;
878 } else if (controller->ucType < ARRAY_SIZE(pp_lib_thermal_controller_names)) {
879 DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n",
880 pp_lib_thermal_controller_names[controller->ucType],
881 controller->ucI2cAddress >> 1,
882 (controller->ucFanParameters &
883 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
884 adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL;
885 i2c_bus = amdgpu_atombios_lookup_i2c_gpio(adev, controller->ucI2cLine);
886 adev->pm.i2c_bus = amdgpu_i2c_lookup(adev, &i2c_bus);
887 if (adev->pm.i2c_bus) {
888 struct i2c_board_info info = { };
889 const char *name = pp_lib_thermal_controller_names[controller->ucType];
890 info.addr = controller->ucI2cAddress >> 1;
891 strlcpy(info.type, name, sizeof(info.type));
892 i2c_new_device(&adev->pm.i2c_bus->adapter, &info);
893 }
894 } else {
895 DRM_INFO("Unknown thermal controller type %d at 0x%02x %s fan control\n",
896 controller->ucType,
897 controller->ucI2cAddress >> 1,
898 (controller->ucFanParameters &
899 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
900 }
901 }
902}
903
904enum amdgpu_pcie_gen amdgpu_get_pcie_gen_support(struct amdgpu_device *adev,
905 u32 sys_mask,
906 enum amdgpu_pcie_gen asic_gen,
907 enum amdgpu_pcie_gen default_gen)
908{
909 switch (asic_gen) {
910 case AMDGPU_PCIE_GEN1:
911 return AMDGPU_PCIE_GEN1;
912 case AMDGPU_PCIE_GEN2:
913 return AMDGPU_PCIE_GEN2;
914 case AMDGPU_PCIE_GEN3:
915 return AMDGPU_PCIE_GEN3;
916 default:
917 if ((sys_mask & DRM_PCIE_SPEED_80) && (default_gen == AMDGPU_PCIE_GEN3))
918 return AMDGPU_PCIE_GEN3;
919 else if ((sys_mask & DRM_PCIE_SPEED_50) && (default_gen == AMDGPU_PCIE_GEN2))
920 return AMDGPU_PCIE_GEN2;
921 else
922 return AMDGPU_PCIE_GEN1;
923 }
924 return AMDGPU_PCIE_GEN1;
925}
926
927u16 amdgpu_get_pcie_lane_support(struct amdgpu_device *adev,
928 u16 asic_lanes,
929 u16 default_lanes)
930{
931 switch (asic_lanes) {
932 case 0:
933 default:
934 return default_lanes;
935 case 1:
936 return 1;
937 case 2:
938 return 2;
939 case 4:
940 return 4;
941 case 8:
942 return 8;
943 case 12:
944 return 12;
945 case 16:
946 return 16;
947 }
948}
949
950u8 amdgpu_encode_pci_lane_width(u32 lanes)
951{
952 u8 encoded_lanes[] = { 0, 1, 2, 0, 3, 0, 0, 0, 4, 0, 0, 0, 5, 0, 0, 0, 6 };
953
954 if (lanes > 16)
955 return 0;
956
957 return encoded_lanes[lanes];
958}
Alex Deucher825cc992016-10-07 12:38:04 -0400959
960struct amd_vce_state*
961amdgpu_get_vce_clock_state(struct amdgpu_device *adev, unsigned idx)
962{
963 if (idx < adev->pm.dpm.num_of_vce_states)
964 return &adev->pm.dpm.vce_states[idx];
965
966 return NULL;
967}