blob: 27ab3bfe375b60f66c045c274aeb76c144ae262c [file] [log] [blame]
Len Brown4f86d3a2007-10-03 18:58:00 -04001/*
2 * ladder.c - the residency ladder algorithm
3 *
4 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
5 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
6 * Copyright (C) 2004, 2005 Dominik Brodowski <linux@brodo.de>
7 *
8 * (C) 2006-2007 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
9 * Shaohua Li <shaohua.li@intel.com>
10 * Adam Belay <abelay@novell.com>
11 *
12 * This code is licenced under the GPL.
13 */
14
15#include <linux/kernel.h>
16#include <linux/cpuidle.h>
Mark Grossd82b3512008-02-04 22:30:08 -080017#include <linux/pm_qos_params.h>
Len Brown4f86d3a2007-10-03 18:58:00 -040018#include <linux/moduleparam.h>
19#include <linux/jiffies.h>
20
21#include <asm/io.h>
22#include <asm/uaccess.h>
23
24#define PROMOTION_COUNT 4
25#define DEMOTION_COUNT 1
26
27struct ladder_device_state {
28 struct {
29 u32 promotion_count;
30 u32 demotion_count;
31 u32 promotion_time;
32 u32 demotion_time;
33 } threshold;
34 struct {
35 int promotion_count;
36 int demotion_count;
37 } stats;
38};
39
40struct ladder_device {
41 struct ladder_device_state states[CPUIDLE_STATE_MAX];
42 int last_state_idx;
43};
44
45static DEFINE_PER_CPU(struct ladder_device, ladder_devices);
46
47/**
48 * ladder_do_selection - prepares private data for a state change
49 * @ldev: the ladder device
50 * @old_idx: the current state index
51 * @new_idx: the new target state index
52 */
53static inline void ladder_do_selection(struct ladder_device *ldev,
54 int old_idx, int new_idx)
55{
56 ldev->states[old_idx].stats.promotion_count = 0;
57 ldev->states[old_idx].stats.demotion_count = 0;
58 ldev->last_state_idx = new_idx;
59}
60
61/**
62 * ladder_select_state - selects the next state to enter
63 * @dev: the CPU
64 */
65static int ladder_select_state(struct cpuidle_device *dev)
66{
67 struct ladder_device *ldev = &__get_cpu_var(ladder_devices);
68 struct ladder_device_state *last_state;
69 int last_residency, last_idx = ldev->last_state_idx;
venkatesh.pallipadi@intel.coma2bd9202008-07-30 19:21:42 -070070 int latency_req = pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY);
Len Brown4f86d3a2007-10-03 18:58:00 -040071
72 if (unlikely(!ldev))
73 return 0;
74
venkatesh.pallipadi@intel.coma2bd9202008-07-30 19:21:42 -070075 /* Special case when user has set very strict latency requirement */
76 if (unlikely(latency_req == 0)) {
77 ladder_do_selection(ldev, last_idx, 0);
78 return 0;
79 }
80
Len Brown4f86d3a2007-10-03 18:58:00 -040081 last_state = &ldev->states[last_idx];
82
83 if (dev->states[last_idx].flags & CPUIDLE_FLAG_TIME_VALID)
84 last_residency = cpuidle_get_last_residency(dev) - dev->states[last_idx].exit_latency;
85 else
86 last_residency = last_state->threshold.promotion_time + 1;
87
88 /* consider promotion */
89 if (last_idx < dev->state_count - 1 &&
90 last_residency > last_state->threshold.promotion_time &&
venkatesh.pallipadi@intel.coma2bd9202008-07-30 19:21:42 -070091 dev->states[last_idx + 1].exit_latency <= latency_req) {
Len Brown4f86d3a2007-10-03 18:58:00 -040092 last_state->stats.promotion_count++;
93 last_state->stats.demotion_count = 0;
94 if (last_state->stats.promotion_count >= last_state->threshold.promotion_count) {
95 ladder_do_selection(ldev, last_idx, last_idx + 1);
96 return last_idx + 1;
97 }
98 }
99
100 /* consider demotion */
venkatesh.pallipadi@intel.coma2bd9202008-07-30 19:21:42 -0700101 if (last_idx > CPUIDLE_DRIVER_STATE_START &&
Len Brown4f86d3a2007-10-03 18:58:00 -0400102 last_residency < last_state->threshold.demotion_time) {
103 last_state->stats.demotion_count++;
104 last_state->stats.promotion_count = 0;
105 if (last_state->stats.demotion_count >= last_state->threshold.demotion_count) {
106 ladder_do_selection(ldev, last_idx, last_idx - 1);
107 return last_idx - 1;
108 }
109 }
110
111 /* otherwise remain at the current state */
112 return last_idx;
113}
114
115/**
116 * ladder_enable_device - setup for the governor
117 * @dev: the CPU
118 */
119static int ladder_enable_device(struct cpuidle_device *dev)
120{
121 int i;
122 struct ladder_device *ldev = &per_cpu(ladder_devices, dev->cpu);
123 struct ladder_device_state *lstate;
124 struct cpuidle_state *state;
125
venkatesh.pallipadi@intel.coma2bd9202008-07-30 19:21:42 -0700126 ldev->last_state_idx = CPUIDLE_DRIVER_STATE_START;
Len Brown4f86d3a2007-10-03 18:58:00 -0400127
128 for (i = 0; i < dev->state_count; i++) {
129 state = &dev->states[i];
130 lstate = &ldev->states[i];
131
132 lstate->stats.promotion_count = 0;
133 lstate->stats.demotion_count = 0;
134
135 lstate->threshold.promotion_count = PROMOTION_COUNT;
136 lstate->threshold.demotion_count = DEMOTION_COUNT;
137
138 if (i < dev->state_count - 1)
139 lstate->threshold.promotion_time = state->exit_latency;
140 if (i > 0)
141 lstate->threshold.demotion_time = state->exit_latency;
142 }
143
144 return 0;
145}
146
147static struct cpuidle_governor ladder_governor = {
148 .name = "ladder",
149 .rating = 10,
150 .enable = ladder_enable_device,
151 .select = ladder_select_state,
152 .owner = THIS_MODULE,
153};
154
155/**
156 * init_ladder - initializes the governor
157 */
158static int __init init_ladder(void)
159{
160 return cpuidle_register_governor(&ladder_governor);
161}
162
163/**
164 * exit_ladder - exits the governor
165 */
166static void __exit exit_ladder(void)
167{
168 cpuidle_unregister_governor(&ladder_governor);
169}
170
171MODULE_LICENSE("GPL");
172module_init(init_ladder);
173module_exit(exit_ladder);