blob: 6eb0e0b3264ba226259b57326a6e12f9329a0c4b [file] [log] [blame]
Rafał Miłecki74338742009-11-03 00:53:02 +01001/*
2 * Permission is hereby granted, free of charge, to any person obtaining a
3 * copy of this software and associated documentation files (the "Software"),
4 * to deal in the Software without restriction, including without limitation
5 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
6 * and/or sell copies of the Software, and to permit persons to whom the
7 * Software is furnished to do so, subject to the following conditions:
8 *
9 * The above copyright notice and this permission notice shall be included in
10 * all copies or substantial portions of the Software.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
15 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
16 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
17 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
18 * OTHER DEALINGS IN THE SOFTWARE.
19 *
20 * Authors: Rafał Miłecki <zajec5@gmail.com>
Alex Deucher56278a82009-12-28 13:58:44 -050021 * Alex Deucher <alexdeucher@gmail.com>
Rafał Miłecki74338742009-11-03 00:53:02 +010022 */
23#include "drmP.h"
24#include "radeon.h"
25
Rafał Miłeckic913e232009-12-22 23:02:16 +010026#define RADEON_IDLE_LOOP_MS 100
27#define RADEON_RECLOCK_DELAY_MS 200
28
29static void radeon_pm_check_limits(struct radeon_device *rdev);
30static void radeon_pm_set_clocks_locked(struct radeon_device *rdev);
31static void radeon_pm_set_clocks(struct radeon_device *rdev);
32static void radeon_pm_reclock_work_handler(struct work_struct *work);
33static void radeon_pm_idle_work_handler(struct work_struct *work);
34static int radeon_debugfs_pm_init(struct radeon_device *rdev);
35
36static const char *pm_state_names[4] = {
37 "PM_STATE_DISABLED",
38 "PM_STATE_MINIMUM",
39 "PM_STATE_PAUSED",
40 "PM_STATE_ACTIVE"
41};
Rafał Miłecki74338742009-11-03 00:53:02 +010042
Alex Deucher56278a82009-12-28 13:58:44 -050043static void radeon_print_power_mode_info(struct radeon_device *rdev)
44{
45 int i, j;
46 bool is_default;
47
48 DRM_INFO("%d Power State(s)\n", rdev->pm.num_power_states);
49 for (i = 0; i < rdev->pm.num_power_states; i++) {
50 if (rdev->pm.default_power_state == &rdev->pm.power_state[i])
51 is_default = true;
52 else
53 is_default = false;
54 DRM_INFO("State %d %s\n", i, is_default ? "(default)" : "");
55 if ((rdev->flags & RADEON_IS_PCIE) && !(rdev->flags & RADEON_IS_IGP))
56 DRM_INFO("\t%d PCIE Lanes\n", rdev->pm.power_state[i].non_clock_info.pcie_lanes);
57 DRM_INFO("\t%d Clock Mode(s)\n", rdev->pm.power_state[i].num_clock_modes);
58 for (j = 0; j < rdev->pm.power_state[i].num_clock_modes; j++) {
59 if (rdev->flags & RADEON_IS_IGP)
60 DRM_INFO("\t\t%d engine: %d\n",
61 j,
62 rdev->pm.power_state[i].clock_info[j].sclk * 10);
63 else
64 DRM_INFO("\t\t%d engine/memory: %d/%d\n",
65 j,
66 rdev->pm.power_state[i].clock_info[j].sclk * 10,
67 rdev->pm.power_state[i].clock_info[j].mclk * 10);
68 }
69 }
70}
71
Rafał Miłecki74338742009-11-03 00:53:02 +010072int radeon_pm_init(struct radeon_device *rdev)
73{
Rafał Miłeckic913e232009-12-22 23:02:16 +010074 rdev->pm.state = PM_STATE_DISABLED;
75 rdev->pm.planned_action = PM_ACTION_NONE;
76 rdev->pm.downclocked = false;
77 rdev->pm.vblank_callback = false;
78
Alex Deucher56278a82009-12-28 13:58:44 -050079 if (rdev->bios) {
80 if (rdev->is_atom_bios)
81 radeon_atombios_get_power_modes(rdev);
82 else
83 radeon_combios_get_power_modes(rdev);
84 radeon_print_power_mode_info(rdev);
85 }
86
Rafał Miłeckic913e232009-12-22 23:02:16 +010087 radeon_pm_check_limits(rdev);
88
Rafał Miłecki74338742009-11-03 00:53:02 +010089 if (radeon_debugfs_pm_init(rdev)) {
Rafał Miłeckic142c3e2009-11-06 11:38:34 +010090 DRM_ERROR("Failed to register debugfs file for PM!\n");
Rafał Miłecki74338742009-11-03 00:53:02 +010091 }
92
Rafał Miłeckic913e232009-12-22 23:02:16 +010093 INIT_WORK(&rdev->pm.reclock_work, radeon_pm_reclock_work_handler);
94 INIT_DELAYED_WORK(&rdev->pm.idle_work, radeon_pm_idle_work_handler);
95
96 if (radeon_dynpm != -1 && radeon_dynpm) {
97 rdev->pm.state = PM_STATE_PAUSED;
98 DRM_INFO("radeon: dynamic power management enabled\n");
99 }
100
101 DRM_INFO("radeon: power management initialized\n");
102
Rafał Miłecki74338742009-11-03 00:53:02 +0100103 return 0;
104}
105
Rafał Miłeckic913e232009-12-22 23:02:16 +0100106static void radeon_pm_check_limits(struct radeon_device *rdev)
107{
108 rdev->pm.min_gpu_engine_clock = rdev->clock.default_sclk - 5000;
109 rdev->pm.min_gpu_memory_clock = rdev->clock.default_mclk - 5000;
110}
111
112void radeon_pm_compute_clocks(struct radeon_device *rdev)
113{
114 struct drm_device *ddev = rdev->ddev;
115 struct drm_connector *connector;
116 struct radeon_crtc *radeon_crtc;
117 int count = 0;
118
119 if (rdev->pm.state == PM_STATE_DISABLED)
120 return;
121
122 mutex_lock(&rdev->pm.mutex);
123
124 rdev->pm.active_crtcs = 0;
125 list_for_each_entry(connector,
126 &ddev->mode_config.connector_list, head) {
127 if (connector->encoder &&
128 connector->dpms != DRM_MODE_DPMS_OFF) {
129 radeon_crtc = to_radeon_crtc(connector->encoder->crtc);
130 rdev->pm.active_crtcs |= (1 << radeon_crtc->crtc_id);
131 ++count;
132 }
133 }
134
135 if (count > 1) {
136 if (rdev->pm.state == PM_STATE_ACTIVE) {
137 wait_queue_head_t wait;
138 init_waitqueue_head(&wait);
139
140 cancel_delayed_work(&rdev->pm.idle_work);
141
142 rdev->pm.state = PM_STATE_PAUSED;
143 rdev->pm.planned_action = PM_ACTION_UPCLOCK;
144 rdev->pm.vblank_callback = true;
145
146 mutex_unlock(&rdev->pm.mutex);
147
148 wait_event_timeout(wait, !rdev->pm.downclocked,
149 msecs_to_jiffies(300));
150 if (!rdev->pm.downclocked)
151 radeon_pm_set_clocks(rdev);
152
153 DRM_DEBUG("radeon: dynamic power management deactivated\n");
154 } else {
155 mutex_unlock(&rdev->pm.mutex);
156 }
157 } else if (count == 1) {
158 rdev->pm.min_mode_engine_clock = rdev->pm.min_gpu_engine_clock;
159 rdev->pm.min_mode_memory_clock = rdev->pm.min_gpu_memory_clock;
160 /* TODO: Increase clocks if needed for current mode */
161
162 if (rdev->pm.state == PM_STATE_MINIMUM) {
163 rdev->pm.state = PM_STATE_ACTIVE;
164 rdev->pm.planned_action = PM_ACTION_UPCLOCK;
165 radeon_pm_set_clocks_locked(rdev);
166
167 queue_delayed_work(rdev->wq, &rdev->pm.idle_work,
168 msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
169 }
170 else if (rdev->pm.state == PM_STATE_PAUSED) {
171 rdev->pm.state = PM_STATE_ACTIVE;
172 queue_delayed_work(rdev->wq, &rdev->pm.idle_work,
173 msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
174 DRM_DEBUG("radeon: dynamic power management activated\n");
175 }
176
177 mutex_unlock(&rdev->pm.mutex);
178 }
179 else { /* count == 0 */
180 if (rdev->pm.state != PM_STATE_MINIMUM) {
181 cancel_delayed_work(&rdev->pm.idle_work);
182
183 rdev->pm.state = PM_STATE_MINIMUM;
184 rdev->pm.planned_action = PM_ACTION_MINIMUM;
185 radeon_pm_set_clocks_locked(rdev);
186 }
187
188 mutex_unlock(&rdev->pm.mutex);
189 }
190}
191
192static void radeon_pm_set_clocks_locked(struct radeon_device *rdev)
193{
194 /*radeon_fence_wait_last(rdev);*/
195 switch (rdev->pm.planned_action) {
196 case PM_ACTION_UPCLOCK:
197 radeon_set_engine_clock(rdev, rdev->clock.default_sclk);
198 rdev->pm.downclocked = false;
199 break;
200 case PM_ACTION_DOWNCLOCK:
201 radeon_set_engine_clock(rdev,
202 rdev->pm.min_mode_engine_clock);
203 rdev->pm.downclocked = true;
204 break;
205 case PM_ACTION_MINIMUM:
206 radeon_set_engine_clock(rdev,
207 rdev->pm.min_gpu_engine_clock);
208 break;
209 case PM_ACTION_NONE:
210 DRM_ERROR("%s: PM_ACTION_NONE\n", __func__);
211 break;
212 }
213
214 rdev->pm.planned_action = PM_ACTION_NONE;
215}
216
217static void radeon_pm_set_clocks(struct radeon_device *rdev)
218{
219 mutex_lock(&rdev->pm.mutex);
220 /* new VBLANK irq may come before handling previous one */
221 if (rdev->pm.vblank_callback) {
222 mutex_lock(&rdev->cp.mutex);
223 if (rdev->pm.req_vblank & (1 << 0)) {
224 rdev->pm.req_vblank &= ~(1 << 0);
225 drm_vblank_put(rdev->ddev, 0);
226 }
227 if (rdev->pm.req_vblank & (1 << 1)) {
228 rdev->pm.req_vblank &= ~(1 << 1);
229 drm_vblank_put(rdev->ddev, 1);
230 }
231 rdev->pm.vblank_callback = false;
232 radeon_pm_set_clocks_locked(rdev);
233 mutex_unlock(&rdev->cp.mutex);
234 }
235 mutex_unlock(&rdev->pm.mutex);
236}
237
238static void radeon_pm_reclock_work_handler(struct work_struct *work)
239{
240 struct radeon_device *rdev;
241 rdev = container_of(work, struct radeon_device,
242 pm.reclock_work);
243 radeon_pm_set_clocks(rdev);
244}
245
246static void radeon_pm_idle_work_handler(struct work_struct *work)
247{
248 struct radeon_device *rdev;
249 rdev = container_of(work, struct radeon_device,
250 pm.idle_work.work);
251
252 mutex_lock(&rdev->pm.mutex);
253 if (rdev->pm.state == PM_STATE_ACTIVE &&
254 !rdev->pm.vblank_callback) {
255 unsigned long irq_flags;
256 int not_processed = 0;
257
258 read_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
259 if (!list_empty(&rdev->fence_drv.emited)) {
260 struct list_head *ptr;
261 list_for_each(ptr, &rdev->fence_drv.emited) {
262 /* count up to 3, that's enought info */
263 if (++not_processed >= 3)
264 break;
265 }
266 }
267 read_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
268
269 if (not_processed >= 3) { /* should upclock */
270 if (rdev->pm.planned_action == PM_ACTION_DOWNCLOCK) {
271 rdev->pm.planned_action = PM_ACTION_NONE;
272 } else if (rdev->pm.planned_action == PM_ACTION_NONE &&
273 rdev->pm.downclocked) {
274 rdev->pm.planned_action =
275 PM_ACTION_UPCLOCK;
276 rdev->pm.action_timeout = jiffies +
277 msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS);
278 }
279 } else if (not_processed == 0) { /* should downclock */
280 if (rdev->pm.planned_action == PM_ACTION_UPCLOCK) {
281 rdev->pm.planned_action = PM_ACTION_NONE;
282 } else if (rdev->pm.planned_action == PM_ACTION_NONE &&
283 !rdev->pm.downclocked) {
284 rdev->pm.planned_action =
285 PM_ACTION_DOWNCLOCK;
286 rdev->pm.action_timeout = jiffies +
287 msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS);
288 }
289 }
290
291 if (rdev->pm.planned_action != PM_ACTION_NONE &&
292 jiffies > rdev->pm.action_timeout) {
293 if (rdev->pm.active_crtcs & (1 << 0)) {
294 rdev->pm.req_vblank |= (1 << 0);
295 drm_vblank_get(rdev->ddev, 0);
296 }
297 if (rdev->pm.active_crtcs & (1 << 1)) {
298 rdev->pm.req_vblank |= (1 << 1);
299 drm_vblank_get(rdev->ddev, 1);
300 }
301 rdev->pm.vblank_callback = true;
302 }
303 }
304 mutex_unlock(&rdev->pm.mutex);
305
306 queue_delayed_work(rdev->wq, &rdev->pm.idle_work,
307 msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
308}
309
Rafał Miłecki74338742009-11-03 00:53:02 +0100310/*
311 * Debugfs info
312 */
313#if defined(CONFIG_DEBUG_FS)
314
315static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
316{
317 struct drm_info_node *node = (struct drm_info_node *) m->private;
318 struct drm_device *dev = node->minor->dev;
319 struct radeon_device *rdev = dev->dev_private;
320
Rafał Miłeckic913e232009-12-22 23:02:16 +0100321 seq_printf(m, "state: %s\n", pm_state_names[rdev->pm.state]);
Rafał Miłecki62340772009-12-15 21:46:58 +0100322 seq_printf(m, "default engine clock: %u0 kHz\n", rdev->clock.default_sclk);
323 seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
324 seq_printf(m, "default memory clock: %u0 kHz\n", rdev->clock.default_mclk);
325 if (rdev->asic->get_memory_clock)
326 seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev));
Rafał Miłecki74338742009-11-03 00:53:02 +0100327
328 return 0;
329}
330
331static struct drm_info_list radeon_pm_info_list[] = {
332 {"radeon_pm_info", radeon_debugfs_pm_info, 0, NULL},
333};
334#endif
335
Rafał Miłeckic913e232009-12-22 23:02:16 +0100336static int radeon_debugfs_pm_init(struct radeon_device *rdev)
Rafał Miłecki74338742009-11-03 00:53:02 +0100337{
338#if defined(CONFIG_DEBUG_FS)
339 return radeon_debugfs_add_files(rdev, radeon_pm_info_list, ARRAY_SIZE(radeon_pm_info_list));
340#else
341 return 0;
342#endif
343}