blob: f35893c195314ac02f81a0f9b8853cf91471ea6b [file] [log] [blame]
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001/*
2 * Permission is hereby granted, free of charge, to any person obtaining a
3 * copy of this software and associated documentation files (the "Software"),
4 * to deal in the Software without restriction, including without limitation
5 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
6 * and/or sell copies of the Software, and to permit persons to whom the
7 * Software is furnished to do so, subject to the following conditions:
8 *
9 * The above copyright notice and this permission notice shall be included in
10 * all copies or substantial portions of the Software.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
15 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
16 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
17 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
18 * OTHER DEALINGS IN THE SOFTWARE.
19 *
20 * Authors: Rafał Miłecki <zajec5@gmail.com>
21 * Alex Deucher <alexdeucher@gmail.com>
22 */
23#include <drm/drmP.h>
24#include "amdgpu.h"
25#include "amdgpu_drv.h"
26#include "amdgpu_pm.h"
27#include "amdgpu_dpm.h"
28#include "atom.h"
29#include <linux/power_supply.h>
30#include <linux/hwmon.h>
31#include <linux/hwmon-sysfs.h>
32
Rex Zhu1b5708f2015-11-10 18:25:24 -050033#include "amd_powerplay.h"
34
Alex Deucherd38ceaf2015-04-20 16:55:21 -040035static int amdgpu_debugfs_pm_init(struct amdgpu_device *adev);
36
Huang Ruia8503b12017-01-05 19:17:13 +080037static const struct cg_flag_name clocks[] = {
38 {AMD_CG_SUPPORT_GFX_MGCG, "Graphics Medium Grain Clock Gating"},
39 {AMD_CG_SUPPORT_GFX_MGLS, "Graphics Medium Grain memory Light Sleep"},
40 {AMD_CG_SUPPORT_GFX_CGCG, "Graphics Coarse Grain Clock Gating"},
41 {AMD_CG_SUPPORT_GFX_CGLS, "Graphics Coarse Grain memory Light Sleep"},
42 {AMD_CG_SUPPORT_GFX_CGTS, "Graphics Coarse Grain Tree Shader Light Sleep"},
43 {AMD_CG_SUPPORT_GFX_CGTS_LS, "Graphics Coarse Grain Tree Shader Light Sleep"},
44 {AMD_CG_SUPPORT_GFX_CP_LS, "Graphics Command Processor Light Sleep"},
45 {AMD_CG_SUPPORT_GFX_RLC_LS, "Graphics Run List Controller Light Sleep"},
46 {AMD_CG_SUPPORT_MC_LS, "Memory Controller Light Sleep"},
47 {AMD_CG_SUPPORT_MC_MGCG, "Memory Controller Medium Grain Clock Gating"},
48 {AMD_CG_SUPPORT_SDMA_LS, "System Direct Memory Access Light Sleep"},
49 {AMD_CG_SUPPORT_SDMA_MGCG, "System Direct Memory Access Medium Grain Clock Gating"},
50 {AMD_CG_SUPPORT_BIF_LS, "Bus Interface Light Sleep"},
51 {AMD_CG_SUPPORT_UVD_MGCG, "Unified Video Decoder Medium Grain Clock Gating"},
52 {AMD_CG_SUPPORT_VCE_MGCG, "Video Compression Engine Medium Grain Clock Gating"},
53 {AMD_CG_SUPPORT_HDP_LS, "Host Data Path Light Sleep"},
54 {AMD_CG_SUPPORT_HDP_MGCG, "Host Data Path Medium Grain Clock Gating"},
55 {AMD_CG_SUPPORT_ROM_MGCG, "Rom Medium Grain Clock Gating"},
56 {0, NULL},
57};
58
Alex Deucherd38ceaf2015-04-20 16:55:21 -040059void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
60{
Jammy Zhoue61710c2015-11-10 18:31:08 -050061 if (adev->pp_enabled)
Rex Zhu1b5708f2015-11-10 18:25:24 -050062 /* TODO */
63 return;
64
Alex Deucherd38ceaf2015-04-20 16:55:21 -040065 if (adev->pm.dpm_enabled) {
66 mutex_lock(&adev->pm.mutex);
67 if (power_supply_is_system_supplied() > 0)
68 adev->pm.dpm.ac_power = true;
69 else
70 adev->pm.dpm.ac_power = false;
71 if (adev->pm.funcs->enable_bapm)
72 amdgpu_dpm_enable_bapm(adev, adev->pm.dpm.ac_power);
73 mutex_unlock(&adev->pm.mutex);
74 }
75}
76
77static ssize_t amdgpu_get_dpm_state(struct device *dev,
78 struct device_attribute *attr,
79 char *buf)
80{
81 struct drm_device *ddev = dev_get_drvdata(dev);
82 struct amdgpu_device *adev = ddev->dev_private;
Rex Zhu1b5708f2015-11-10 18:25:24 -050083 enum amd_pm_state_type pm;
84
Jammy Zhoue61710c2015-11-10 18:31:08 -050085 if (adev->pp_enabled) {
Rex Zhu1b5708f2015-11-10 18:25:24 -050086 pm = amdgpu_dpm_get_current_power_state(adev);
87 } else
88 pm = adev->pm.dpm.user_state;
Alex Deucherd38ceaf2015-04-20 16:55:21 -040089
90 return snprintf(buf, PAGE_SIZE, "%s\n",
91 (pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
92 (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
93}
94
95static ssize_t amdgpu_set_dpm_state(struct device *dev,
96 struct device_attribute *attr,
97 const char *buf,
98 size_t count)
99{
100 struct drm_device *ddev = dev_get_drvdata(dev);
101 struct amdgpu_device *adev = ddev->dev_private;
Rex Zhu1b5708f2015-11-10 18:25:24 -0500102 enum amd_pm_state_type state;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400103
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400104 if (strncmp("battery", buf, strlen("battery")) == 0)
Rex Zhu1b5708f2015-11-10 18:25:24 -0500105 state = POWER_STATE_TYPE_BATTERY;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400106 else if (strncmp("balanced", buf, strlen("balanced")) == 0)
Rex Zhu1b5708f2015-11-10 18:25:24 -0500107 state = POWER_STATE_TYPE_BALANCED;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400108 else if (strncmp("performance", buf, strlen("performance")) == 0)
Rex Zhu1b5708f2015-11-10 18:25:24 -0500109 state = POWER_STATE_TYPE_PERFORMANCE;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400110 else {
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400111 count = -EINVAL;
112 goto fail;
113 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400114
Jammy Zhoue61710c2015-11-10 18:31:08 -0500115 if (adev->pp_enabled) {
Rex Zhu1b5708f2015-11-10 18:25:24 -0500116 amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_ENABLE_USER_STATE, &state, NULL);
117 } else {
118 mutex_lock(&adev->pm.mutex);
119 adev->pm.dpm.user_state = state;
120 mutex_unlock(&adev->pm.mutex);
121
122 /* Can't set dpm state when the card is off */
123 if (!(adev->flags & AMD_IS_PX) ||
124 (ddev->switch_power_state == DRM_SWITCH_POWER_ON))
125 amdgpu_pm_compute_clocks(adev);
126 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400127fail:
128 return count;
129}
130
131static ssize_t amdgpu_get_dpm_forced_performance_level(struct device *dev,
Rex Zhu1b5708f2015-11-10 18:25:24 -0500132 struct device_attribute *attr,
133 char *buf)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400134{
135 struct drm_device *ddev = dev_get_drvdata(dev);
136 struct amdgpu_device *adev = ddev->dev_private;
Rex Zhue5d03ac2016-12-23 14:39:41 +0800137 enum amd_dpm_forced_level level;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400138
Alex Deucher0c67df42016-02-19 15:30:15 -0500139 if ((adev->flags & AMD_IS_PX) &&
140 (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
141 return snprintf(buf, PAGE_SIZE, "off\n");
142
Rex Zhue5d03ac2016-12-23 14:39:41 +0800143 level = amdgpu_dpm_get_performance_level(adev);
144 return snprintf(buf, PAGE_SIZE, "%s\n",
Rex Zhu570272d2017-01-06 13:32:49 +0800145 (level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" :
146 (level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" :
147 (level == AMD_DPM_FORCED_LEVEL_HIGH) ? "high" :
148 (level == AMD_DPM_FORCED_LEVEL_MANUAL) ? "manual" :
149 (level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) ? "profile_standard" :
150 (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) ? "profile_min_sclk" :
151 (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) ? "profile_min_mclk" :
152 (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) ? "profile_peak" :
153 "unknown");
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400154}
155
156static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
157 struct device_attribute *attr,
158 const char *buf,
159 size_t count)
160{
161 struct drm_device *ddev = dev_get_drvdata(dev);
162 struct amdgpu_device *adev = ddev->dev_private;
Rex Zhue5d03ac2016-12-23 14:39:41 +0800163 enum amd_dpm_forced_level level;
Rex Zhu3bd58972016-12-23 15:24:37 +0800164 enum amd_dpm_forced_level current_level;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400165 int ret = 0;
166
Alex Deucher0c67df42016-02-19 15:30:15 -0500167 /* Can't force performance level when the card is off */
168 if ((adev->flags & AMD_IS_PX) &&
169 (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
170 return -EINVAL;
171
Rex Zhu3bd58972016-12-23 15:24:37 +0800172 current_level = amdgpu_dpm_get_performance_level(adev);
173
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400174 if (strncmp("low", buf, strlen("low")) == 0) {
Rex Zhue5d03ac2016-12-23 14:39:41 +0800175 level = AMD_DPM_FORCED_LEVEL_LOW;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400176 } else if (strncmp("high", buf, strlen("high")) == 0) {
Rex Zhue5d03ac2016-12-23 14:39:41 +0800177 level = AMD_DPM_FORCED_LEVEL_HIGH;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400178 } else if (strncmp("auto", buf, strlen("auto")) == 0) {
Rex Zhue5d03ac2016-12-23 14:39:41 +0800179 level = AMD_DPM_FORCED_LEVEL_AUTO;
Eric Huangf3898ea2015-12-11 16:24:34 -0500180 } else if (strncmp("manual", buf, strlen("manual")) == 0) {
Rex Zhue5d03ac2016-12-23 14:39:41 +0800181 level = AMD_DPM_FORCED_LEVEL_MANUAL;
Rex Zhu570272d2017-01-06 13:32:49 +0800182 } else if (strncmp("profile_exit", buf, strlen("profile_exit")) == 0) {
183 level = AMD_DPM_FORCED_LEVEL_PROFILE_EXIT;
184 } else if (strncmp("profile_standard", buf, strlen("profile_standard")) == 0) {
185 level = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD;
186 } else if (strncmp("profile_min_sclk", buf, strlen("profile_min_sclk")) == 0) {
187 level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK;
188 } else if (strncmp("profile_min_mclk", buf, strlen("profile_min_mclk")) == 0) {
189 level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK;
190 } else if (strncmp("profile_peak", buf, strlen("profile_peak")) == 0) {
191 level = AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
192 } else {
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400193 count = -EINVAL;
194 goto fail;
195 }
Rex Zhu1b5708f2015-11-10 18:25:24 -0500196
Rex Zhu3bd58972016-12-23 15:24:37 +0800197 if (current_level == level)
Rex Zhu8e7afd32017-01-09 15:18:01 +0800198 return count;
Rex Zhu3bd58972016-12-23 15:24:37 +0800199
Jammy Zhoue61710c2015-11-10 18:31:08 -0500200 if (adev->pp_enabled)
Rex Zhu1b5708f2015-11-10 18:25:24 -0500201 amdgpu_dpm_force_performance_level(adev, level);
202 else {
203 mutex_lock(&adev->pm.mutex);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400204 if (adev->pm.dpm.thermal_active) {
205 count = -EINVAL;
Alex Deucher10f950f2016-02-19 15:18:45 -0500206 mutex_unlock(&adev->pm.mutex);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400207 goto fail;
208 }
209 ret = amdgpu_dpm_force_performance_level(adev, level);
210 if (ret)
211 count = -EINVAL;
Rex Zhu1b5708f2015-11-10 18:25:24 -0500212 else
213 adev->pm.dpm.forced_level = level;
214 mutex_unlock(&adev->pm.mutex);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400215 }
Rex Zhu570272d2017-01-06 13:32:49 +0800216
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400217fail:
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400218 return count;
219}
220
Eric Huangf3898ea2015-12-11 16:24:34 -0500221static ssize_t amdgpu_get_pp_num_states(struct device *dev,
222 struct device_attribute *attr,
223 char *buf)
224{
225 struct drm_device *ddev = dev_get_drvdata(dev);
226 struct amdgpu_device *adev = ddev->dev_private;
227 struct pp_states_info data;
228 int i, buf_len;
229
230 if (adev->pp_enabled)
231 amdgpu_dpm_get_pp_num_states(adev, &data);
232
233 buf_len = snprintf(buf, PAGE_SIZE, "states: %d\n", data.nums);
234 for (i = 0; i < data.nums; i++)
235 buf_len += snprintf(buf + buf_len, PAGE_SIZE, "%d %s\n", i,
236 (data.states[i] == POWER_STATE_TYPE_INTERNAL_BOOT) ? "boot" :
237 (data.states[i] == POWER_STATE_TYPE_BATTERY) ? "battery" :
238 (data.states[i] == POWER_STATE_TYPE_BALANCED) ? "balanced" :
239 (data.states[i] == POWER_STATE_TYPE_PERFORMANCE) ? "performance" : "default");
240
241 return buf_len;
242}
243
244static ssize_t amdgpu_get_pp_cur_state(struct device *dev,
245 struct device_attribute *attr,
246 char *buf)
247{
248 struct drm_device *ddev = dev_get_drvdata(dev);
249 struct amdgpu_device *adev = ddev->dev_private;
250 struct pp_states_info data;
251 enum amd_pm_state_type pm = 0;
252 int i = 0;
253
254 if (adev->pp_enabled) {
255
256 pm = amdgpu_dpm_get_current_power_state(adev);
257 amdgpu_dpm_get_pp_num_states(adev, &data);
258
259 for (i = 0; i < data.nums; i++) {
260 if (pm == data.states[i])
261 break;
262 }
263
264 if (i == data.nums)
265 i = -EINVAL;
266 }
267
268 return snprintf(buf, PAGE_SIZE, "%d\n", i);
269}
270
271static ssize_t amdgpu_get_pp_force_state(struct device *dev,
272 struct device_attribute *attr,
273 char *buf)
274{
275 struct drm_device *ddev = dev_get_drvdata(dev);
276 struct amdgpu_device *adev = ddev->dev_private;
277 struct pp_states_info data;
278 enum amd_pm_state_type pm = 0;
279 int i;
280
281 if (adev->pp_force_state_enabled && adev->pp_enabled) {
282 pm = amdgpu_dpm_get_current_power_state(adev);
283 amdgpu_dpm_get_pp_num_states(adev, &data);
284
285 for (i = 0; i < data.nums; i++) {
286 if (pm == data.states[i])
287 break;
288 }
289
290 if (i == data.nums)
291 i = -EINVAL;
292
293 return snprintf(buf, PAGE_SIZE, "%d\n", i);
294
295 } else
296 return snprintf(buf, PAGE_SIZE, "\n");
297}
298
299static ssize_t amdgpu_set_pp_force_state(struct device *dev,
300 struct device_attribute *attr,
301 const char *buf,
302 size_t count)
303{
304 struct drm_device *ddev = dev_get_drvdata(dev);
305 struct amdgpu_device *adev = ddev->dev_private;
306 enum amd_pm_state_type state = 0;
Dan Carpenter041bf022016-06-16 11:30:23 +0300307 unsigned long idx;
Eric Huangf3898ea2015-12-11 16:24:34 -0500308 int ret;
309
310 if (strlen(buf) == 1)
311 adev->pp_force_state_enabled = false;
Dan Carpenter041bf022016-06-16 11:30:23 +0300312 else if (adev->pp_enabled) {
313 struct pp_states_info data;
Eric Huangf3898ea2015-12-11 16:24:34 -0500314
Dan Carpenter041bf022016-06-16 11:30:23 +0300315 ret = kstrtoul(buf, 0, &idx);
316 if (ret || idx >= ARRAY_SIZE(data.states)) {
Eric Huangf3898ea2015-12-11 16:24:34 -0500317 count = -EINVAL;
318 goto fail;
319 }
320
Dan Carpenter041bf022016-06-16 11:30:23 +0300321 amdgpu_dpm_get_pp_num_states(adev, &data);
322 state = data.states[idx];
323 /* only set user selected power states */
324 if (state != POWER_STATE_TYPE_INTERNAL_BOOT &&
325 state != POWER_STATE_TYPE_DEFAULT) {
326 amdgpu_dpm_dispatch_task(adev,
327 AMD_PP_EVENT_ENABLE_USER_STATE, &state, NULL);
328 adev->pp_force_state_enabled = true;
Eric Huangf3898ea2015-12-11 16:24:34 -0500329 }
330 }
331fail:
332 return count;
333}
334
335static ssize_t amdgpu_get_pp_table(struct device *dev,
336 struct device_attribute *attr,
337 char *buf)
338{
339 struct drm_device *ddev = dev_get_drvdata(dev);
340 struct amdgpu_device *adev = ddev->dev_private;
341 char *table = NULL;
Eric Huang1684d3b2016-07-28 17:25:01 -0400342 int size;
Eric Huangf3898ea2015-12-11 16:24:34 -0500343
344 if (adev->pp_enabled)
345 size = amdgpu_dpm_get_pp_table(adev, &table);
346 else
347 return 0;
348
349 if (size >= PAGE_SIZE)
350 size = PAGE_SIZE - 1;
351
Eric Huang1684d3b2016-07-28 17:25:01 -0400352 memcpy(buf, table, size);
Eric Huangf3898ea2015-12-11 16:24:34 -0500353
354 return size;
355}
356
357static ssize_t amdgpu_set_pp_table(struct device *dev,
358 struct device_attribute *attr,
359 const char *buf,
360 size_t count)
361{
362 struct drm_device *ddev = dev_get_drvdata(dev);
363 struct amdgpu_device *adev = ddev->dev_private;
364
365 if (adev->pp_enabled)
366 amdgpu_dpm_set_pp_table(adev, buf, count);
367
368 return count;
369}
370
371static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
372 struct device_attribute *attr,
373 char *buf)
374{
375 struct drm_device *ddev = dev_get_drvdata(dev);
376 struct amdgpu_device *adev = ddev->dev_private;
377 ssize_t size = 0;
378
379 if (adev->pp_enabled)
380 size = amdgpu_dpm_print_clock_levels(adev, PP_SCLK, buf);
Eric Huangc85e2992016-05-19 15:41:25 -0400381 else if (adev->pm.funcs->print_clock_levels)
382 size = adev->pm.funcs->print_clock_levels(adev, PP_SCLK, buf);
Eric Huangf3898ea2015-12-11 16:24:34 -0500383
384 return size;
385}
386
387static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
388 struct device_attribute *attr,
389 const char *buf,
390 size_t count)
391{
392 struct drm_device *ddev = dev_get_drvdata(dev);
393 struct amdgpu_device *adev = ddev->dev_private;
394 int ret;
395 long level;
Eric Huang56327082016-04-12 14:57:23 -0400396 uint32_t i, mask = 0;
397 char sub_str[2];
Eric Huangf3898ea2015-12-11 16:24:34 -0500398
Eric Huang14b33072016-06-14 15:08:22 -0400399 for (i = 0; i < strlen(buf); i++) {
400 if (*(buf + i) == '\n')
401 continue;
Eric Huang56327082016-04-12 14:57:23 -0400402 sub_str[0] = *(buf + i);
403 sub_str[1] = '\0';
404 ret = kstrtol(sub_str, 0, &level);
Eric Huangf3898ea2015-12-11 16:24:34 -0500405
Eric Huang56327082016-04-12 14:57:23 -0400406 if (ret) {
407 count = -EINVAL;
408 goto fail;
409 }
410 mask |= 1 << level;
Eric Huangf3898ea2015-12-11 16:24:34 -0500411 }
412
413 if (adev->pp_enabled)
Eric Huang56327082016-04-12 14:57:23 -0400414 amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask);
Eric Huangc85e2992016-05-19 15:41:25 -0400415 else if (adev->pm.funcs->force_clock_level)
416 adev->pm.funcs->force_clock_level(adev, PP_SCLK, mask);
Eric Huangf3898ea2015-12-11 16:24:34 -0500417fail:
418 return count;
419}
420
421static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev,
422 struct device_attribute *attr,
423 char *buf)
424{
425 struct drm_device *ddev = dev_get_drvdata(dev);
426 struct amdgpu_device *adev = ddev->dev_private;
427 ssize_t size = 0;
428
429 if (adev->pp_enabled)
430 size = amdgpu_dpm_print_clock_levels(adev, PP_MCLK, buf);
Eric Huangc85e2992016-05-19 15:41:25 -0400431 else if (adev->pm.funcs->print_clock_levels)
432 size = adev->pm.funcs->print_clock_levels(adev, PP_MCLK, buf);
Eric Huangf3898ea2015-12-11 16:24:34 -0500433
434 return size;
435}
436
437static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
438 struct device_attribute *attr,
439 const char *buf,
440 size_t count)
441{
442 struct drm_device *ddev = dev_get_drvdata(dev);
443 struct amdgpu_device *adev = ddev->dev_private;
444 int ret;
445 long level;
Eric Huang56327082016-04-12 14:57:23 -0400446 uint32_t i, mask = 0;
447 char sub_str[2];
Eric Huangf3898ea2015-12-11 16:24:34 -0500448
Eric Huang14b33072016-06-14 15:08:22 -0400449 for (i = 0; i < strlen(buf); i++) {
450 if (*(buf + i) == '\n')
451 continue;
Eric Huang56327082016-04-12 14:57:23 -0400452 sub_str[0] = *(buf + i);
453 sub_str[1] = '\0';
454 ret = kstrtol(sub_str, 0, &level);
Eric Huangf3898ea2015-12-11 16:24:34 -0500455
Eric Huang56327082016-04-12 14:57:23 -0400456 if (ret) {
457 count = -EINVAL;
458 goto fail;
459 }
460 mask |= 1 << level;
Eric Huangf3898ea2015-12-11 16:24:34 -0500461 }
462
463 if (adev->pp_enabled)
Eric Huang56327082016-04-12 14:57:23 -0400464 amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask);
Eric Huangc85e2992016-05-19 15:41:25 -0400465 else if (adev->pm.funcs->force_clock_level)
466 adev->pm.funcs->force_clock_level(adev, PP_MCLK, mask);
Eric Huangf3898ea2015-12-11 16:24:34 -0500467fail:
468 return count;
469}
470
471static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev,
472 struct device_attribute *attr,
473 char *buf)
474{
475 struct drm_device *ddev = dev_get_drvdata(dev);
476 struct amdgpu_device *adev = ddev->dev_private;
477 ssize_t size = 0;
478
479 if (adev->pp_enabled)
480 size = amdgpu_dpm_print_clock_levels(adev, PP_PCIE, buf);
Eric Huangc85e2992016-05-19 15:41:25 -0400481 else if (adev->pm.funcs->print_clock_levels)
482 size = adev->pm.funcs->print_clock_levels(adev, PP_PCIE, buf);
Eric Huangf3898ea2015-12-11 16:24:34 -0500483
484 return size;
485}
486
487static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
488 struct device_attribute *attr,
489 const char *buf,
490 size_t count)
491{
492 struct drm_device *ddev = dev_get_drvdata(dev);
493 struct amdgpu_device *adev = ddev->dev_private;
494 int ret;
495 long level;
Eric Huang56327082016-04-12 14:57:23 -0400496 uint32_t i, mask = 0;
497 char sub_str[2];
Eric Huangf3898ea2015-12-11 16:24:34 -0500498
Eric Huang14b33072016-06-14 15:08:22 -0400499 for (i = 0; i < strlen(buf); i++) {
500 if (*(buf + i) == '\n')
501 continue;
Eric Huang56327082016-04-12 14:57:23 -0400502 sub_str[0] = *(buf + i);
503 sub_str[1] = '\0';
504 ret = kstrtol(sub_str, 0, &level);
Eric Huangf3898ea2015-12-11 16:24:34 -0500505
Eric Huang56327082016-04-12 14:57:23 -0400506 if (ret) {
507 count = -EINVAL;
508 goto fail;
509 }
510 mask |= 1 << level;
Eric Huangf3898ea2015-12-11 16:24:34 -0500511 }
512
513 if (adev->pp_enabled)
Eric Huang56327082016-04-12 14:57:23 -0400514 amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask);
Eric Huangc85e2992016-05-19 15:41:25 -0400515 else if (adev->pm.funcs->force_clock_level)
516 adev->pm.funcs->force_clock_level(adev, PP_PCIE, mask);
Eric Huangf3898ea2015-12-11 16:24:34 -0500517fail:
518 return count;
519}
520
Eric Huang428bafa2016-05-12 14:51:21 -0400521static ssize_t amdgpu_get_pp_sclk_od(struct device *dev,
522 struct device_attribute *attr,
523 char *buf)
524{
525 struct drm_device *ddev = dev_get_drvdata(dev);
526 struct amdgpu_device *adev = ddev->dev_private;
527 uint32_t value = 0;
528
529 if (adev->pp_enabled)
530 value = amdgpu_dpm_get_sclk_od(adev);
Eric Huang8b2e5742016-05-19 15:46:10 -0400531 else if (adev->pm.funcs->get_sclk_od)
532 value = adev->pm.funcs->get_sclk_od(adev);
Eric Huang428bafa2016-05-12 14:51:21 -0400533
534 return snprintf(buf, PAGE_SIZE, "%d\n", value);
535}
536
537static ssize_t amdgpu_set_pp_sclk_od(struct device *dev,
538 struct device_attribute *attr,
539 const char *buf,
540 size_t count)
541{
542 struct drm_device *ddev = dev_get_drvdata(dev);
543 struct amdgpu_device *adev = ddev->dev_private;
544 int ret;
545 long int value;
546
547 ret = kstrtol(buf, 0, &value);
548
549 if (ret) {
550 count = -EINVAL;
551 goto fail;
552 }
553
Eric Huang8b2e5742016-05-19 15:46:10 -0400554 if (adev->pp_enabled) {
Eric Huang428bafa2016-05-12 14:51:21 -0400555 amdgpu_dpm_set_sclk_od(adev, (uint32_t)value);
Eric Huang8b2e5742016-05-19 15:46:10 -0400556 amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_READJUST_POWER_STATE, NULL, NULL);
557 } else if (adev->pm.funcs->set_sclk_od) {
558 adev->pm.funcs->set_sclk_od(adev, (uint32_t)value);
559 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
560 amdgpu_pm_compute_clocks(adev);
561 }
Eric Huang428bafa2016-05-12 14:51:21 -0400562
563fail:
564 return count;
565}
566
Eric Huangf2bdc052016-05-24 15:11:17 -0400567static ssize_t amdgpu_get_pp_mclk_od(struct device *dev,
568 struct device_attribute *attr,
569 char *buf)
570{
571 struct drm_device *ddev = dev_get_drvdata(dev);
572 struct amdgpu_device *adev = ddev->dev_private;
573 uint32_t value = 0;
574
575 if (adev->pp_enabled)
576 value = amdgpu_dpm_get_mclk_od(adev);
577 else if (adev->pm.funcs->get_mclk_od)
578 value = adev->pm.funcs->get_mclk_od(adev);
579
580 return snprintf(buf, PAGE_SIZE, "%d\n", value);
581}
582
583static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
584 struct device_attribute *attr,
585 const char *buf,
586 size_t count)
587{
588 struct drm_device *ddev = dev_get_drvdata(dev);
589 struct amdgpu_device *adev = ddev->dev_private;
590 int ret;
591 long int value;
592
593 ret = kstrtol(buf, 0, &value);
594
595 if (ret) {
596 count = -EINVAL;
597 goto fail;
598 }
599
600 if (adev->pp_enabled) {
601 amdgpu_dpm_set_mclk_od(adev, (uint32_t)value);
602 amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_READJUST_POWER_STATE, NULL, NULL);
603 } else if (adev->pm.funcs->set_mclk_od) {
604 adev->pm.funcs->set_mclk_od(adev, (uint32_t)value);
605 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
606 amdgpu_pm_compute_clocks(adev);
607 }
608
609fail:
610 return count;
611}
612
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400613static DEVICE_ATTR(power_dpm_state, S_IRUGO | S_IWUSR, amdgpu_get_dpm_state, amdgpu_set_dpm_state);
614static DEVICE_ATTR(power_dpm_force_performance_level, S_IRUGO | S_IWUSR,
615 amdgpu_get_dpm_forced_performance_level,
616 amdgpu_set_dpm_forced_performance_level);
Eric Huangf3898ea2015-12-11 16:24:34 -0500617static DEVICE_ATTR(pp_num_states, S_IRUGO, amdgpu_get_pp_num_states, NULL);
618static DEVICE_ATTR(pp_cur_state, S_IRUGO, amdgpu_get_pp_cur_state, NULL);
619static DEVICE_ATTR(pp_force_state, S_IRUGO | S_IWUSR,
620 amdgpu_get_pp_force_state,
621 amdgpu_set_pp_force_state);
622static DEVICE_ATTR(pp_table, S_IRUGO | S_IWUSR,
623 amdgpu_get_pp_table,
624 amdgpu_set_pp_table);
625static DEVICE_ATTR(pp_dpm_sclk, S_IRUGO | S_IWUSR,
626 amdgpu_get_pp_dpm_sclk,
627 amdgpu_set_pp_dpm_sclk);
628static DEVICE_ATTR(pp_dpm_mclk, S_IRUGO | S_IWUSR,
629 amdgpu_get_pp_dpm_mclk,
630 amdgpu_set_pp_dpm_mclk);
631static DEVICE_ATTR(pp_dpm_pcie, S_IRUGO | S_IWUSR,
632 amdgpu_get_pp_dpm_pcie,
633 amdgpu_set_pp_dpm_pcie);
Eric Huang428bafa2016-05-12 14:51:21 -0400634static DEVICE_ATTR(pp_sclk_od, S_IRUGO | S_IWUSR,
635 amdgpu_get_pp_sclk_od,
636 amdgpu_set_pp_sclk_od);
Eric Huangf2bdc052016-05-24 15:11:17 -0400637static DEVICE_ATTR(pp_mclk_od, S_IRUGO | S_IWUSR,
638 amdgpu_get_pp_mclk_od,
639 amdgpu_set_pp_mclk_od);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400640
641static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
642 struct device_attribute *attr,
643 char *buf)
644{
645 struct amdgpu_device *adev = dev_get_drvdata(dev);
Alex Deucher0c67df42016-02-19 15:30:15 -0500646 struct drm_device *ddev = adev->ddev;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400647 int temp;
648
Alex Deucher0c67df42016-02-19 15:30:15 -0500649 /* Can't get temperature when the card is off */
650 if ((adev->flags & AMD_IS_PX) &&
651 (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
652 return -EINVAL;
653
Jammy Zhoue61710c2015-11-10 18:31:08 -0500654 if (!adev->pp_enabled && !adev->pm.funcs->get_temperature)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400655 temp = 0;
Rex Zhu8804b8d2015-11-10 18:29:11 -0500656 else
657 temp = amdgpu_dpm_get_temperature(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400658
659 return snprintf(buf, PAGE_SIZE, "%d\n", temp);
660}
661
662static ssize_t amdgpu_hwmon_show_temp_thresh(struct device *dev,
663 struct device_attribute *attr,
664 char *buf)
665{
666 struct amdgpu_device *adev = dev_get_drvdata(dev);
667 int hyst = to_sensor_dev_attr(attr)->index;
668 int temp;
669
670 if (hyst)
671 temp = adev->pm.dpm.thermal.min_temp;
672 else
673 temp = adev->pm.dpm.thermal.max_temp;
674
675 return snprintf(buf, PAGE_SIZE, "%d\n", temp);
676}
677
678static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
679 struct device_attribute *attr,
680 char *buf)
681{
682 struct amdgpu_device *adev = dev_get_drvdata(dev);
683 u32 pwm_mode = 0;
684
Jammy Zhoue61710c2015-11-10 18:31:08 -0500685 if (!adev->pp_enabled && !adev->pm.funcs->get_fan_control_mode)
Rex Zhu8804b8d2015-11-10 18:29:11 -0500686 return -EINVAL;
687
688 pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400689
690 /* never 0 (full-speed), fuse or smc-controlled always */
691 return sprintf(buf, "%i\n", pwm_mode == FDO_PWM_MODE_STATIC ? 1 : 2);
692}
693
694static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
695 struct device_attribute *attr,
696 const char *buf,
697 size_t count)
698{
699 struct amdgpu_device *adev = dev_get_drvdata(dev);
700 int err;
701 int value;
702
Jammy Zhoue61710c2015-11-10 18:31:08 -0500703 if (!adev->pp_enabled && !adev->pm.funcs->set_fan_control_mode)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400704 return -EINVAL;
705
706 err = kstrtoint(buf, 10, &value);
707 if (err)
708 return err;
709
710 switch (value) {
711 case 1: /* manual, percent-based */
712 amdgpu_dpm_set_fan_control_mode(adev, FDO_PWM_MODE_STATIC);
713 break;
714 default: /* disable */
715 amdgpu_dpm_set_fan_control_mode(adev, 0);
716 break;
717 }
718
719 return count;
720}
721
722static ssize_t amdgpu_hwmon_get_pwm1_min(struct device *dev,
723 struct device_attribute *attr,
724 char *buf)
725{
726 return sprintf(buf, "%i\n", 0);
727}
728
729static ssize_t amdgpu_hwmon_get_pwm1_max(struct device *dev,
730 struct device_attribute *attr,
731 char *buf)
732{
733 return sprintf(buf, "%i\n", 255);
734}
735
736static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
737 struct device_attribute *attr,
738 const char *buf, size_t count)
739{
740 struct amdgpu_device *adev = dev_get_drvdata(dev);
741 int err;
742 u32 value;
743
744 err = kstrtou32(buf, 10, &value);
745 if (err)
746 return err;
747
748 value = (value * 100) / 255;
749
750 err = amdgpu_dpm_set_fan_speed_percent(adev, value);
751 if (err)
752 return err;
753
754 return count;
755}
756
757static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
758 struct device_attribute *attr,
759 char *buf)
760{
761 struct amdgpu_device *adev = dev_get_drvdata(dev);
762 int err;
763 u32 speed;
764
765 err = amdgpu_dpm_get_fan_speed_percent(adev, &speed);
766 if (err)
767 return err;
768
769 speed = (speed * 255) / 100;
770
771 return sprintf(buf, "%i\n", speed);
772}
773
Grazvydas Ignotas81c15142016-10-29 23:28:59 +0300774static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
775 struct device_attribute *attr,
776 char *buf)
777{
778 struct amdgpu_device *adev = dev_get_drvdata(dev);
779 int err;
780 u32 speed;
781
782 err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed);
783 if (err)
784 return err;
785
786 return sprintf(buf, "%i\n", speed);
787}
788
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400789static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, 0);
790static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 0);
791static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 1);
792static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1, amdgpu_hwmon_set_pwm1, 0);
793static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1_enable, amdgpu_hwmon_set_pwm1_enable, 0);
794static SENSOR_DEVICE_ATTR(pwm1_min, S_IRUGO, amdgpu_hwmon_get_pwm1_min, NULL, 0);
795static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO, amdgpu_hwmon_get_pwm1_max, NULL, 0);
Grazvydas Ignotas81c15142016-10-29 23:28:59 +0300796static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, amdgpu_hwmon_get_fan1_input, NULL, 0);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400797
798static struct attribute *hwmon_attributes[] = {
799 &sensor_dev_attr_temp1_input.dev_attr.attr,
800 &sensor_dev_attr_temp1_crit.dev_attr.attr,
801 &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
802 &sensor_dev_attr_pwm1.dev_attr.attr,
803 &sensor_dev_attr_pwm1_enable.dev_attr.attr,
804 &sensor_dev_attr_pwm1_min.dev_attr.attr,
805 &sensor_dev_attr_pwm1_max.dev_attr.attr,
Grazvydas Ignotas81c15142016-10-29 23:28:59 +0300806 &sensor_dev_attr_fan1_input.dev_attr.attr,
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400807 NULL
808};
809
810static umode_t hwmon_attributes_visible(struct kobject *kobj,
811 struct attribute *attr, int index)
812{
Geliang Tangcc29ec82016-01-13 22:48:42 +0800813 struct device *dev = kobj_to_dev(kobj);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400814 struct amdgpu_device *adev = dev_get_drvdata(dev);
815 umode_t effective_mode = attr->mode;
816
Rex Zhu1b5708f2015-11-10 18:25:24 -0500817 /* Skip limit attributes if DPM is not enabled */
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400818 if (!adev->pm.dpm_enabled &&
819 (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
Alex Deucher27100732015-10-19 15:49:11 -0400820 attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr ||
821 attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
822 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
823 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
824 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400825 return 0;
826
Jammy Zhoue61710c2015-11-10 18:31:08 -0500827 if (adev->pp_enabled)
Rex Zhu8804b8d2015-11-10 18:29:11 -0500828 return effective_mode;
829
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400830 /* Skip fan attributes if fan is not present */
831 if (adev->pm.no_fan &&
832 (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
833 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
834 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
835 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
836 return 0;
837
838 /* mask fan attributes if we have no bindings for this asic to expose */
839 if ((!adev->pm.funcs->get_fan_speed_percent &&
840 attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */
841 (!adev->pm.funcs->get_fan_control_mode &&
842 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't query state */
843 effective_mode &= ~S_IRUGO;
844
845 if ((!adev->pm.funcs->set_fan_speed_percent &&
846 attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't manage fan */
847 (!adev->pm.funcs->set_fan_control_mode &&
848 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */
849 effective_mode &= ~S_IWUSR;
850
851 /* hide max/min values if we can't both query and manage the fan */
852 if ((!adev->pm.funcs->set_fan_speed_percent &&
853 !adev->pm.funcs->get_fan_speed_percent) &&
854 (attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
855 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
856 return 0;
857
Grazvydas Ignotas81c15142016-10-29 23:28:59 +0300858 /* requires powerplay */
859 if (attr == &sensor_dev_attr_fan1_input.dev_attr.attr)
860 return 0;
861
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400862 return effective_mode;
863}
864
865static const struct attribute_group hwmon_attrgroup = {
866 .attrs = hwmon_attributes,
867 .is_visible = hwmon_attributes_visible,
868};
869
870static const struct attribute_group *hwmon_groups[] = {
871 &hwmon_attrgroup,
872 NULL
873};
874
875void amdgpu_dpm_thermal_work_handler(struct work_struct *work)
876{
877 struct amdgpu_device *adev =
878 container_of(work, struct amdgpu_device,
879 pm.dpm.thermal.work);
880 /* switch to the thermal state */
Rex Zhu3a2c7882015-08-25 15:57:43 +0800881 enum amd_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400882
883 if (!adev->pm.dpm_enabled)
884 return;
885
886 if (adev->pm.funcs->get_temperature) {
887 int temp = amdgpu_dpm_get_temperature(adev);
888
889 if (temp < adev->pm.dpm.thermal.min_temp)
890 /* switch back the user state */
891 dpm_state = adev->pm.dpm.user_state;
892 } else {
893 if (adev->pm.dpm.thermal.high_to_low)
894 /* switch back the user state */
895 dpm_state = adev->pm.dpm.user_state;
896 }
897 mutex_lock(&adev->pm.mutex);
898 if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL)
899 adev->pm.dpm.thermal_active = true;
900 else
901 adev->pm.dpm.thermal_active = false;
902 adev->pm.dpm.state = dpm_state;
903 mutex_unlock(&adev->pm.mutex);
904
905 amdgpu_pm_compute_clocks(adev);
906}
907
908static struct amdgpu_ps *amdgpu_dpm_pick_power_state(struct amdgpu_device *adev,
Rex Zhu3a2c7882015-08-25 15:57:43 +0800909 enum amd_pm_state_type dpm_state)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400910{
911 int i;
912 struct amdgpu_ps *ps;
913 u32 ui_class;
914 bool single_display = (adev->pm.dpm.new_active_crtc_count < 2) ?
915 true : false;
916
917 /* check if the vblank period is too short to adjust the mclk */
918 if (single_display && adev->pm.funcs->vblank_too_short) {
919 if (amdgpu_dpm_vblank_too_short(adev))
920 single_display = false;
921 }
922
923 /* certain older asics have a separare 3D performance state,
924 * so try that first if the user selected performance
925 */
926 if (dpm_state == POWER_STATE_TYPE_PERFORMANCE)
927 dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF;
928 /* balanced states don't exist at the moment */
929 if (dpm_state == POWER_STATE_TYPE_BALANCED)
930 dpm_state = POWER_STATE_TYPE_PERFORMANCE;
931
932restart_search:
933 /* Pick the best power state based on current conditions */
934 for (i = 0; i < adev->pm.dpm.num_ps; i++) {
935 ps = &adev->pm.dpm.ps[i];
936 ui_class = ps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK;
937 switch (dpm_state) {
938 /* user states */
939 case POWER_STATE_TYPE_BATTERY:
940 if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) {
941 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
942 if (single_display)
943 return ps;
944 } else
945 return ps;
946 }
947 break;
948 case POWER_STATE_TYPE_BALANCED:
949 if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BALANCED) {
950 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
951 if (single_display)
952 return ps;
953 } else
954 return ps;
955 }
956 break;
957 case POWER_STATE_TYPE_PERFORMANCE:
958 if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
959 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
960 if (single_display)
961 return ps;
962 } else
963 return ps;
964 }
965 break;
966 /* internal states */
967 case POWER_STATE_TYPE_INTERNAL_UVD:
968 if (adev->pm.dpm.uvd_ps)
969 return adev->pm.dpm.uvd_ps;
970 else
971 break;
972 case POWER_STATE_TYPE_INTERNAL_UVD_SD:
973 if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
974 return ps;
975 break;
976 case POWER_STATE_TYPE_INTERNAL_UVD_HD:
977 if (ps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
978 return ps;
979 break;
980 case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
981 if (ps->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
982 return ps;
983 break;
984 case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
985 if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
986 return ps;
987 break;
988 case POWER_STATE_TYPE_INTERNAL_BOOT:
989 return adev->pm.dpm.boot_ps;
990 case POWER_STATE_TYPE_INTERNAL_THERMAL:
991 if (ps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
992 return ps;
993 break;
994 case POWER_STATE_TYPE_INTERNAL_ACPI:
995 if (ps->class & ATOM_PPLIB_CLASSIFICATION_ACPI)
996 return ps;
997 break;
998 case POWER_STATE_TYPE_INTERNAL_ULV:
999 if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
1000 return ps;
1001 break;
1002 case POWER_STATE_TYPE_INTERNAL_3DPERF:
1003 if (ps->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
1004 return ps;
1005 break;
1006 default:
1007 break;
1008 }
1009 }
1010 /* use a fallback state if we didn't match */
1011 switch (dpm_state) {
1012 case POWER_STATE_TYPE_INTERNAL_UVD_SD:
1013 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
1014 goto restart_search;
1015 case POWER_STATE_TYPE_INTERNAL_UVD_HD:
1016 case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
1017 case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
1018 if (adev->pm.dpm.uvd_ps) {
1019 return adev->pm.dpm.uvd_ps;
1020 } else {
1021 dpm_state = POWER_STATE_TYPE_PERFORMANCE;
1022 goto restart_search;
1023 }
1024 case POWER_STATE_TYPE_INTERNAL_THERMAL:
1025 dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI;
1026 goto restart_search;
1027 case POWER_STATE_TYPE_INTERNAL_ACPI:
1028 dpm_state = POWER_STATE_TYPE_BATTERY;
1029 goto restart_search;
1030 case POWER_STATE_TYPE_BATTERY:
1031 case POWER_STATE_TYPE_BALANCED:
1032 case POWER_STATE_TYPE_INTERNAL_3DPERF:
1033 dpm_state = POWER_STATE_TYPE_PERFORMANCE;
1034 goto restart_search;
1035 default:
1036 break;
1037 }
1038
1039 return NULL;
1040}
1041
1042static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
1043{
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001044 struct amdgpu_ps *ps;
Rex Zhu3a2c7882015-08-25 15:57:43 +08001045 enum amd_pm_state_type dpm_state;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001046 int ret;
Rex Zhu5e876c62016-10-14 19:23:34 +08001047 bool equal;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001048
1049 /* if dpm init failed */
1050 if (!adev->pm.dpm_enabled)
1051 return;
1052
1053 if (adev->pm.dpm.user_state != adev->pm.dpm.state) {
1054 /* add other state override checks here */
1055 if ((!adev->pm.dpm.thermal_active) &&
1056 (!adev->pm.dpm.uvd_active))
1057 adev->pm.dpm.state = adev->pm.dpm.user_state;
1058 }
1059 dpm_state = adev->pm.dpm.state;
1060
1061 ps = amdgpu_dpm_pick_power_state(adev, dpm_state);
1062 if (ps)
1063 adev->pm.dpm.requested_ps = ps;
1064 else
1065 return;
1066
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001067 if (amdgpu_dpm == 1) {
1068 printk("switching from power state:\n");
1069 amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps);
1070 printk("switching to power state:\n");
1071 amdgpu_dpm_print_power_state(adev, adev->pm.dpm.requested_ps);
1072 }
1073
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001074 /* update whether vce is active */
1075 ps->vce_active = adev->pm.dpm.vce_active;
1076
Rex Zhu5e876c62016-10-14 19:23:34 +08001077 amdgpu_dpm_display_configuration_changed(adev);
1078
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001079 ret = amdgpu_dpm_pre_set_power_state(adev);
1080 if (ret)
Christian Königa27de352016-01-21 11:28:53 +01001081 return;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001082
Rex Zhu5e876c62016-10-14 19:23:34 +08001083 if ((0 != amgdpu_dpm_check_state_equal(adev, adev->pm.dpm.current_ps, adev->pm.dpm.requested_ps, &equal)))
1084 equal = false;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001085
Rex Zhu5e876c62016-10-14 19:23:34 +08001086 if (equal)
1087 return;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001088
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001089 amdgpu_dpm_set_power_state(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001090 amdgpu_dpm_post_set_power_state(adev);
1091
Alex Deuchereda1d1c2016-02-24 17:18:25 -05001092 adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
1093 adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
1094
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001095 if (adev->pm.funcs->force_performance_level) {
1096 if (adev->pm.dpm.thermal_active) {
Rex Zhue5d03ac2016-12-23 14:39:41 +08001097 enum amd_dpm_forced_level level = adev->pm.dpm.forced_level;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001098 /* force low perf level for thermal */
Rex Zhue5d03ac2016-12-23 14:39:41 +08001099 amdgpu_dpm_force_performance_level(adev, AMD_DPM_FORCED_LEVEL_LOW);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001100 /* save the user's level */
1101 adev->pm.dpm.forced_level = level;
1102 } else {
1103 /* otherwise, user selected level */
1104 amdgpu_dpm_force_performance_level(adev, adev->pm.dpm.forced_level);
1105 }
1106 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001107}
1108
1109void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
1110{
Tom St Denise95a14a2016-07-28 09:40:07 -04001111 if (adev->pp_enabled || adev->pm.funcs->powergate_uvd) {
1112 /* enable/disable UVD */
1113 mutex_lock(&adev->pm.mutex);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001114 amdgpu_dpm_powergate_uvd(adev, !enable);
Tom St Denise95a14a2016-07-28 09:40:07 -04001115 mutex_unlock(&adev->pm.mutex);
1116 } else {
1117 if (enable) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001118 mutex_lock(&adev->pm.mutex);
Tom St Denise95a14a2016-07-28 09:40:07 -04001119 adev->pm.dpm.uvd_active = true;
1120 adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001121 mutex_unlock(&adev->pm.mutex);
1122 } else {
Tom St Denise95a14a2016-07-28 09:40:07 -04001123 mutex_lock(&adev->pm.mutex);
1124 adev->pm.dpm.uvd_active = false;
1125 mutex_unlock(&adev->pm.mutex);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001126 }
Tom St Denise95a14a2016-07-28 09:40:07 -04001127 amdgpu_pm_compute_clocks(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001128 }
1129}
1130
1131void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
1132{
Tom St Denise95a14a2016-07-28 09:40:07 -04001133 if (adev->pp_enabled || adev->pm.funcs->powergate_vce) {
1134 /* enable/disable VCE */
1135 mutex_lock(&adev->pm.mutex);
Sonny Jiangb7a07762015-05-28 15:47:53 -04001136 amdgpu_dpm_powergate_vce(adev, !enable);
Tom St Denise95a14a2016-07-28 09:40:07 -04001137 mutex_unlock(&adev->pm.mutex);
1138 } else {
1139 if (enable) {
Sonny Jiangb7a07762015-05-28 15:47:53 -04001140 mutex_lock(&adev->pm.mutex);
Tom St Denise95a14a2016-07-28 09:40:07 -04001141 adev->pm.dpm.vce_active = true;
1142 /* XXX select vce level based on ring/task */
Rex Zhu0d8de7c2016-10-12 15:13:29 +08001143 adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL;
Sonny Jiangb7a07762015-05-28 15:47:53 -04001144 mutex_unlock(&adev->pm.mutex);
1145 } else {
Tom St Denise95a14a2016-07-28 09:40:07 -04001146 mutex_lock(&adev->pm.mutex);
1147 adev->pm.dpm.vce_active = false;
1148 mutex_unlock(&adev->pm.mutex);
Sonny Jiangb7a07762015-05-28 15:47:53 -04001149 }
Tom St Denise95a14a2016-07-28 09:40:07 -04001150 amdgpu_pm_compute_clocks(adev);
Sonny Jiangb7a07762015-05-28 15:47:53 -04001151 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001152}
1153
1154void amdgpu_pm_print_power_states(struct amdgpu_device *adev)
1155{
1156 int i;
1157
Jammy Zhoue61710c2015-11-10 18:31:08 -05001158 if (adev->pp_enabled)
Rex Zhu1b5708f2015-11-10 18:25:24 -05001159 /* TO DO */
1160 return;
1161
1162 for (i = 0; i < adev->pm.dpm.num_ps; i++)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001163 amdgpu_dpm_print_power_state(adev, &adev->pm.dpm.ps[i]);
Rex Zhu1b5708f2015-11-10 18:25:24 -05001164
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001165}
1166
1167int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
1168{
1169 int ret;
1170
Alex Deucherc86f5ebf2015-10-23 10:45:14 -04001171 if (adev->pm.sysfs_initialized)
1172 return 0;
1173
Jammy Zhoue61710c2015-11-10 18:31:08 -05001174 if (!adev->pp_enabled) {
Rex Zhu1b5708f2015-11-10 18:25:24 -05001175 if (adev->pm.funcs->get_temperature == NULL)
1176 return 0;
1177 }
1178
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001179 adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev,
1180 DRIVER_NAME, adev,
1181 hwmon_groups);
1182 if (IS_ERR(adev->pm.int_hwmon_dev)) {
1183 ret = PTR_ERR(adev->pm.int_hwmon_dev);
1184 dev_err(adev->dev,
1185 "Unable to register hwmon device: %d\n", ret);
1186 return ret;
1187 }
1188
1189 ret = device_create_file(adev->dev, &dev_attr_power_dpm_state);
1190 if (ret) {
1191 DRM_ERROR("failed to create device file for dpm state\n");
1192 return ret;
1193 }
1194 ret = device_create_file(adev->dev, &dev_attr_power_dpm_force_performance_level);
1195 if (ret) {
1196 DRM_ERROR("failed to create device file for dpm state\n");
1197 return ret;
1198 }
Eric Huangf3898ea2015-12-11 16:24:34 -05001199
1200 if (adev->pp_enabled) {
1201 ret = device_create_file(adev->dev, &dev_attr_pp_num_states);
1202 if (ret) {
1203 DRM_ERROR("failed to create device file pp_num_states\n");
1204 return ret;
1205 }
1206 ret = device_create_file(adev->dev, &dev_attr_pp_cur_state);
1207 if (ret) {
1208 DRM_ERROR("failed to create device file pp_cur_state\n");
1209 return ret;
1210 }
1211 ret = device_create_file(adev->dev, &dev_attr_pp_force_state);
1212 if (ret) {
1213 DRM_ERROR("failed to create device file pp_force_state\n");
1214 return ret;
1215 }
1216 ret = device_create_file(adev->dev, &dev_attr_pp_table);
1217 if (ret) {
1218 DRM_ERROR("failed to create device file pp_table\n");
1219 return ret;
1220 }
Eric Huangf3898ea2015-12-11 16:24:34 -05001221 }
Eric Huangc85e2992016-05-19 15:41:25 -04001222
1223 ret = device_create_file(adev->dev, &dev_attr_pp_dpm_sclk);
1224 if (ret) {
1225 DRM_ERROR("failed to create device file pp_dpm_sclk\n");
1226 return ret;
1227 }
1228 ret = device_create_file(adev->dev, &dev_attr_pp_dpm_mclk);
1229 if (ret) {
1230 DRM_ERROR("failed to create device file pp_dpm_mclk\n");
1231 return ret;
1232 }
1233 ret = device_create_file(adev->dev, &dev_attr_pp_dpm_pcie);
1234 if (ret) {
1235 DRM_ERROR("failed to create device file pp_dpm_pcie\n");
1236 return ret;
1237 }
Eric Huang8b2e5742016-05-19 15:46:10 -04001238 ret = device_create_file(adev->dev, &dev_attr_pp_sclk_od);
1239 if (ret) {
1240 DRM_ERROR("failed to create device file pp_sclk_od\n");
1241 return ret;
1242 }
Eric Huangf2bdc052016-05-24 15:11:17 -04001243 ret = device_create_file(adev->dev, &dev_attr_pp_mclk_od);
1244 if (ret) {
1245 DRM_ERROR("failed to create device file pp_mclk_od\n");
1246 return ret;
1247 }
Eric Huangc85e2992016-05-19 15:41:25 -04001248
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001249 ret = amdgpu_debugfs_pm_init(adev);
1250 if (ret) {
1251 DRM_ERROR("Failed to register debugfs file for dpm!\n");
1252 return ret;
1253 }
1254
Alex Deucherc86f5ebf2015-10-23 10:45:14 -04001255 adev->pm.sysfs_initialized = true;
1256
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001257 return 0;
1258}
1259
1260void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
1261{
1262 if (adev->pm.int_hwmon_dev)
1263 hwmon_device_unregister(adev->pm.int_hwmon_dev);
1264 device_remove_file(adev->dev, &dev_attr_power_dpm_state);
1265 device_remove_file(adev->dev, &dev_attr_power_dpm_force_performance_level);
Eric Huangf3898ea2015-12-11 16:24:34 -05001266 if (adev->pp_enabled) {
1267 device_remove_file(adev->dev, &dev_attr_pp_num_states);
1268 device_remove_file(adev->dev, &dev_attr_pp_cur_state);
1269 device_remove_file(adev->dev, &dev_attr_pp_force_state);
1270 device_remove_file(adev->dev, &dev_attr_pp_table);
Eric Huangf3898ea2015-12-11 16:24:34 -05001271 }
Eric Huangc85e2992016-05-19 15:41:25 -04001272 device_remove_file(adev->dev, &dev_attr_pp_dpm_sclk);
1273 device_remove_file(adev->dev, &dev_attr_pp_dpm_mclk);
1274 device_remove_file(adev->dev, &dev_attr_pp_dpm_pcie);
Eric Huang8b2e5742016-05-19 15:46:10 -04001275 device_remove_file(adev->dev, &dev_attr_pp_sclk_od);
Eric Huangf2bdc052016-05-24 15:11:17 -04001276 device_remove_file(adev->dev, &dev_attr_pp_mclk_od);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001277}
1278
1279void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
1280{
1281 struct drm_device *ddev = adev->ddev;
1282 struct drm_crtc *crtc;
1283 struct amdgpu_crtc *amdgpu_crtc;
Rex Zhu5e876c62016-10-14 19:23:34 +08001284 int i = 0;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001285
1286 if (!adev->pm.dpm_enabled)
1287 return;
1288
Rex Zhu5e876c62016-10-14 19:23:34 +08001289 amdgpu_display_bandwidth_update(adev);
1290
1291 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
1292 struct amdgpu_ring *ring = adev->rings[i];
1293 if (ring && ring->ready)
1294 amdgpu_fence_wait_empty(ring);
1295 }
1296
Jammy Zhoue61710c2015-11-10 18:31:08 -05001297 if (adev->pp_enabled) {
Rex Zhu1b5708f2015-11-10 18:25:24 -05001298 amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_DISPLAY_CONFIG_CHANGE, NULL, NULL);
1299 } else {
1300 mutex_lock(&adev->pm.mutex);
1301 adev->pm.dpm.new_active_crtcs = 0;
1302 adev->pm.dpm.new_active_crtc_count = 0;
1303 if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
1304 list_for_each_entry(crtc,
1305 &ddev->mode_config.crtc_list, head) {
1306 amdgpu_crtc = to_amdgpu_crtc(crtc);
1307 if (crtc->enabled) {
1308 adev->pm.dpm.new_active_crtcs |= (1 << amdgpu_crtc->crtc_id);
1309 adev->pm.dpm.new_active_crtc_count++;
1310 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001311 }
1312 }
Rex Zhu1b5708f2015-11-10 18:25:24 -05001313 /* update battery/ac status */
1314 if (power_supply_is_system_supplied() > 0)
1315 adev->pm.dpm.ac_power = true;
1316 else
1317 adev->pm.dpm.ac_power = false;
1318
1319 amdgpu_dpm_change_power_state_locked(adev);
1320
1321 mutex_unlock(&adev->pm.mutex);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001322 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001323}
1324
1325/*
1326 * Debugfs info
1327 */
1328#if defined(CONFIG_DEBUG_FS)
1329
Tom St Denis3de4ec52016-09-19 12:48:52 -04001330static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *adev)
1331{
1332 int32_t value;
1333
1334 /* sanity check PP is enabled */
1335 if (!(adev->powerplay.pp_funcs &&
1336 adev->powerplay.pp_funcs->read_sensor))
1337 return -EINVAL;
1338
1339 /* GPU Clocks */
1340 seq_printf(m, "GFX Clocks and Power:\n");
1341 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK, &value))
1342 seq_printf(m, "\t%u MHz (MCLK)\n", value/100);
1343 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK, &value))
1344 seq_printf(m, "\t%u MHz (SCLK)\n", value/100);
1345 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX, &value))
1346 seq_printf(m, "\t%u mV (VDDGFX)\n", value);
1347 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB, &value))
1348 seq_printf(m, "\t%u mV (VDDNB)\n", value);
1349 seq_printf(m, "\n");
1350
1351 /* GPU Temp */
1352 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP, &value))
1353 seq_printf(m, "GPU Temperature: %u C\n", value/1000);
1354
1355 /* GPU Load */
1356 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD, &value))
1357 seq_printf(m, "GPU Load: %u %%\n", value);
1358 seq_printf(m, "\n");
1359
1360 /* UVD clocks */
1361 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_POWER, &value)) {
1362 if (!value) {
1363 seq_printf(m, "UVD: Disabled\n");
1364 } else {
1365 seq_printf(m, "UVD: Enabled\n");
1366 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, &value))
1367 seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
1368 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, &value))
1369 seq_printf(m, "\t%u MHz (VCLK)\n", value/100);
1370 }
1371 }
1372 seq_printf(m, "\n");
1373
1374 /* VCE clocks */
1375 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_POWER, &value)) {
1376 if (!value) {
1377 seq_printf(m, "VCE: Disabled\n");
1378 } else {
1379 seq_printf(m, "VCE: Enabled\n");
1380 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_ECCLK, &value))
1381 seq_printf(m, "\t%u MHz (ECCLK)\n", value/100);
1382 }
1383 }
1384
1385 return 0;
1386}
1387
Huang Ruia8503b12017-01-05 19:17:13 +08001388static void amdgpu_parse_cg_state(struct seq_file *m, u32 flags)
1389{
1390 int i;
1391
1392 for (i = 0; clocks[i].flag; i++)
1393 seq_printf(m, "\t%s: %s\n", clocks[i].name,
1394 (flags & clocks[i].flag) ? "On" : "Off");
1395}
1396
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001397static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data)
1398{
1399 struct drm_info_node *node = (struct drm_info_node *) m->private;
1400 struct drm_device *dev = node->minor->dev;
1401 struct amdgpu_device *adev = dev->dev_private;
Alex Deucher0c67df42016-02-19 15:30:15 -05001402 struct drm_device *ddev = adev->ddev;
Huang Rui6cb2d4e2017-01-05 18:44:41 +08001403 u32 flags = 0;
1404
1405 amdgpu_get_clockgating_state(adev, &flags);
1406 seq_printf(m, "Clock Gating Flags Mask: 0x%x\n", flags);
Huang Ruia8503b12017-01-05 19:17:13 +08001407 amdgpu_parse_cg_state(m, flags);
1408 seq_printf(m, "\n");
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001409
Rex Zhu1b5708f2015-11-10 18:25:24 -05001410 if (!adev->pm.dpm_enabled) {
1411 seq_printf(m, "dpm not enabled\n");
1412 return 0;
1413 }
Alex Deucher0c67df42016-02-19 15:30:15 -05001414 if ((adev->flags & AMD_IS_PX) &&
1415 (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
1416 seq_printf(m, "PX asic powered off\n");
1417 } else if (adev->pp_enabled) {
Tom St Denis3de4ec52016-09-19 12:48:52 -04001418 return amdgpu_debugfs_pm_info_pp(m, adev);
Rex Zhu1b5708f2015-11-10 18:25:24 -05001419 } else {
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001420 mutex_lock(&adev->pm.mutex);
1421 if (adev->pm.funcs->debugfs_print_current_performance_level)
Tom St Denis3de4ec52016-09-19 12:48:52 -04001422 adev->pm.funcs->debugfs_print_current_performance_level(adev, m);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001423 else
1424 seq_printf(m, "Debugfs support not implemented for this asic\n");
1425 mutex_unlock(&adev->pm.mutex);
1426 }
1427
1428 return 0;
1429}
1430
Nils Wallménius06ab6832016-05-02 12:46:15 -04001431static const struct drm_info_list amdgpu_pm_info_list[] = {
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001432 {"amdgpu_pm_info", amdgpu_debugfs_pm_info, 0, NULL},
1433};
1434#endif
1435
1436static int amdgpu_debugfs_pm_init(struct amdgpu_device *adev)
1437{
1438#if defined(CONFIG_DEBUG_FS)
1439 return amdgpu_debugfs_add_files(adev, amdgpu_pm_info_list, ARRAY_SIZE(amdgpu_pm_info_list));
1440#else
1441 return 0;
1442#endif
1443}