Len Brown | 4f86d3a | 2007-10-03 18:58:00 -0400 | [diff] [blame] | 1 | /* |
| 2 | * menu.c - the menu idle governor |
| 3 | * |
| 4 | * Copyright (C) 2006-2007 Adam Belay <abelay@novell.com> |
Arjan van de Ven | 69d2587 | 2009-09-21 17:04:08 -0700 | [diff] [blame] | 5 | * Copyright (C) 2009 Intel Corporation |
| 6 | * Author: |
| 7 | * Arjan van de Ven <arjan@linux.intel.com> |
Len Brown | 4f86d3a | 2007-10-03 18:58:00 -0400 | [diff] [blame] | 8 | * |
Arjan van de Ven | 69d2587 | 2009-09-21 17:04:08 -0700 | [diff] [blame] | 9 | * This code is licenced under the GPL version 2 as described |
| 10 | * in the COPYING file that acompanies the Linux Kernel. |
Len Brown | 4f86d3a | 2007-10-03 18:58:00 -0400 | [diff] [blame] | 11 | */ |
| 12 | |
| 13 | #include <linux/kernel.h> |
| 14 | #include <linux/cpuidle.h> |
Jean Pihet | e8db0be | 2011-08-25 15:35:03 +0200 | [diff] [blame] | 15 | #include <linux/pm_qos.h> |
Len Brown | 4f86d3a | 2007-10-03 18:58:00 -0400 | [diff] [blame] | 16 | #include <linux/time.h> |
| 17 | #include <linux/ktime.h> |
| 18 | #include <linux/hrtimer.h> |
| 19 | #include <linux/tick.h> |
Arjan van de Ven | 69d2587 | 2009-09-21 17:04:08 -0700 | [diff] [blame] | 20 | #include <linux/sched.h> |
Stephen Hemminger | 5787536 | 2010-01-08 14:43:08 -0800 | [diff] [blame] | 21 | #include <linux/math64.h> |
Paul Gortmaker | 884b17e | 2011-08-29 17:52:39 -0400 | [diff] [blame] | 22 | #include <linux/module.h> |
Len Brown | 4f86d3a | 2007-10-03 18:58:00 -0400 | [diff] [blame] | 23 | |
Arjan van de Ven | 69d2587 | 2009-09-21 17:04:08 -0700 | [diff] [blame] | 24 | #define BUCKETS 12 |
Arjan van de Ven | 1f85f87 | 2010-05-24 14:32:59 -0700 | [diff] [blame] | 25 | #define INTERVALS 8 |
Arjan van de Ven | 69d2587 | 2009-09-21 17:04:08 -0700 | [diff] [blame] | 26 | #define RESOLUTION 1024 |
Arjan van de Ven | 1f85f87 | 2010-05-24 14:32:59 -0700 | [diff] [blame] | 27 | #define DECAY 8 |
Arjan van de Ven | 69d2587 | 2009-09-21 17:04:08 -0700 | [diff] [blame] | 28 | #define MAX_INTERESTING 50000 |
Arjan van de Ven | 1f85f87 | 2010-05-24 14:32:59 -0700 | [diff] [blame] | 29 | #define STDDEV_THRESH 400 |
| 30 | |
Youquan Song | 69a37be | 2012-10-26 12:26:41 +0200 | [diff] [blame] | 31 | /* 60 * 60 > STDDEV_THRESH * INTERVALS = 400 * 8 */ |
| 32 | #define MAX_DEVIATION 60 |
| 33 | |
| 34 | static DEFINE_PER_CPU(struct hrtimer, menu_hrtimer); |
| 35 | static DEFINE_PER_CPU(int, hrtimer_status); |
| 36 | /* menu hrtimer mode */ |
Youquan Song | e11538d | 2012-10-26 12:26:50 +0200 | [diff] [blame] | 37 | enum {MENU_HRTIMER_STOP, MENU_HRTIMER_REPEAT, MENU_HRTIMER_GENERAL}; |
Arjan van de Ven | 69d2587 | 2009-09-21 17:04:08 -0700 | [diff] [blame] | 38 | |
| 39 | /* |
| 40 | * Concepts and ideas behind the menu governor |
| 41 | * |
| 42 | * For the menu governor, there are 3 decision factors for picking a C |
| 43 | * state: |
| 44 | * 1) Energy break even point |
| 45 | * 2) Performance impact |
| 46 | * 3) Latency tolerance (from pmqos infrastructure) |
| 47 | * These these three factors are treated independently. |
| 48 | * |
| 49 | * Energy break even point |
| 50 | * ----------------------- |
| 51 | * C state entry and exit have an energy cost, and a certain amount of time in |
| 52 | * the C state is required to actually break even on this cost. CPUIDLE |
| 53 | * provides us this duration in the "target_residency" field. So all that we |
| 54 | * need is a good prediction of how long we'll be idle. Like the traditional |
| 55 | * menu governor, we start with the actual known "next timer event" time. |
| 56 | * |
| 57 | * Since there are other source of wakeups (interrupts for example) than |
| 58 | * the next timer event, this estimation is rather optimistic. To get a |
| 59 | * more realistic estimate, a correction factor is applied to the estimate, |
| 60 | * that is based on historic behavior. For example, if in the past the actual |
| 61 | * duration always was 50% of the next timer tick, the correction factor will |
| 62 | * be 0.5. |
| 63 | * |
| 64 | * menu uses a running average for this correction factor, however it uses a |
| 65 | * set of factors, not just a single factor. This stems from the realization |
| 66 | * that the ratio is dependent on the order of magnitude of the expected |
| 67 | * duration; if we expect 500 milliseconds of idle time the likelihood of |
| 68 | * getting an interrupt very early is much higher than if we expect 50 micro |
| 69 | * seconds of idle time. A second independent factor that has big impact on |
| 70 | * the actual factor is if there is (disk) IO outstanding or not. |
| 71 | * (as a special twist, we consider every sleep longer than 50 milliseconds |
| 72 | * as perfect; there are no power gains for sleeping longer than this) |
| 73 | * |
| 74 | * For these two reasons we keep an array of 12 independent factors, that gets |
| 75 | * indexed based on the magnitude of the expected duration as well as the |
| 76 | * "is IO outstanding" property. |
| 77 | * |
Arjan van de Ven | 1f85f87 | 2010-05-24 14:32:59 -0700 | [diff] [blame] | 78 | * Repeatable-interval-detector |
| 79 | * ---------------------------- |
| 80 | * There are some cases where "next timer" is a completely unusable predictor: |
| 81 | * Those cases where the interval is fixed, for example due to hardware |
| 82 | * interrupt mitigation, but also due to fixed transfer rate devices such as |
| 83 | * mice. |
| 84 | * For this, we use a different predictor: We track the duration of the last 8 |
| 85 | * intervals and if the stand deviation of these 8 intervals is below a |
| 86 | * threshold value, we use the average of these intervals as prediction. |
| 87 | * |
Arjan van de Ven | 69d2587 | 2009-09-21 17:04:08 -0700 | [diff] [blame] | 88 | * Limiting Performance Impact |
| 89 | * --------------------------- |
| 90 | * C states, especially those with large exit latencies, can have a real |
Lucas De Marchi | 20e3341 | 2010-09-07 12:53:49 -0400 | [diff] [blame] | 91 | * noticeable impact on workloads, which is not acceptable for most sysadmins, |
Arjan van de Ven | 69d2587 | 2009-09-21 17:04:08 -0700 | [diff] [blame] | 92 | * and in addition, less performance has a power price of its own. |
| 93 | * |
| 94 | * As a general rule of thumb, menu assumes that the following heuristic |
| 95 | * holds: |
| 96 | * The busier the system, the less impact of C states is acceptable |
| 97 | * |
| 98 | * This rule-of-thumb is implemented using a performance-multiplier: |
| 99 | * If the exit latency times the performance multiplier is longer than |
| 100 | * the predicted duration, the C state is not considered a candidate |
| 101 | * for selection due to a too high performance impact. So the higher |
| 102 | * this multiplier is, the longer we need to be idle to pick a deep C |
| 103 | * state, and thus the less likely a busy CPU will hit such a deep |
| 104 | * C state. |
| 105 | * |
| 106 | * Two factors are used in determing this multiplier: |
| 107 | * a value of 10 is added for each point of "per cpu load average" we have. |
| 108 | * a value of 5 points is added for each process that is waiting for |
| 109 | * IO on this CPU. |
| 110 | * (these values are experimentally determined) |
| 111 | * |
| 112 | * The load average factor gives a longer term (few seconds) input to the |
| 113 | * decision, while the iowait value gives a cpu local instantanious input. |
| 114 | * The iowait factor may look low, but realize that this is also already |
| 115 | * represented in the system load average. |
| 116 | * |
| 117 | */ |
Len Brown | 4f86d3a | 2007-10-03 18:58:00 -0400 | [diff] [blame] | 118 | |
Youquan Song | e11538d | 2012-10-26 12:26:50 +0200 | [diff] [blame] | 119 | /* |
| 120 | * The C-state residency is so long that is is worthwhile to exit |
| 121 | * from the shallow C-state and re-enter into a deeper C-state. |
| 122 | */ |
| 123 | static unsigned int perfect_cstate_ms __read_mostly = 30; |
| 124 | module_param(perfect_cstate_ms, uint, 0000); |
| 125 | |
Len Brown | 4f86d3a | 2007-10-03 18:58:00 -0400 | [diff] [blame] | 126 | struct menu_device { |
| 127 | int last_state_idx; |
Corrado Zoccolo | 672917d | 2009-09-21 17:04:09 -0700 | [diff] [blame] | 128 | int needs_update; |
Len Brown | 4f86d3a | 2007-10-03 18:58:00 -0400 | [diff] [blame] | 129 | |
| 130 | unsigned int expected_us; |
Richard Kennedy | 56e6943b4 | 2010-03-05 13:42:30 -0800 | [diff] [blame] | 131 | u64 predicted_us; |
Arjan van de Ven | 69d2587 | 2009-09-21 17:04:08 -0700 | [diff] [blame] | 132 | unsigned int exit_us; |
| 133 | unsigned int bucket; |
| 134 | u64 correction_factor[BUCKETS]; |
Arjan van de Ven | 1f85f87 | 2010-05-24 14:32:59 -0700 | [diff] [blame] | 135 | u32 intervals[INTERVALS]; |
| 136 | int interval_ptr; |
Len Brown | 4f86d3a | 2007-10-03 18:58:00 -0400 | [diff] [blame] | 137 | }; |
| 138 | |
Arjan van de Ven | 69d2587 | 2009-09-21 17:04:08 -0700 | [diff] [blame] | 139 | |
| 140 | #define LOAD_INT(x) ((x) >> FSHIFT) |
| 141 | #define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100) |
| 142 | |
| 143 | static int get_loadavg(void) |
| 144 | { |
| 145 | unsigned long this = this_cpu_load(); |
| 146 | |
| 147 | |
| 148 | return LOAD_INT(this) * 10 + LOAD_FRAC(this) / 10; |
| 149 | } |
| 150 | |
| 151 | static inline int which_bucket(unsigned int duration) |
| 152 | { |
| 153 | int bucket = 0; |
| 154 | |
| 155 | /* |
| 156 | * We keep two groups of stats; one with no |
| 157 | * IO pending, one without. |
| 158 | * This allows us to calculate |
| 159 | * E(duration)|iowait |
| 160 | */ |
Peter Zijlstra | 8c215bd | 2010-07-01 09:07:17 +0200 | [diff] [blame] | 161 | if (nr_iowait_cpu(smp_processor_id())) |
Arjan van de Ven | 69d2587 | 2009-09-21 17:04:08 -0700 | [diff] [blame] | 162 | bucket = BUCKETS/2; |
| 163 | |
| 164 | if (duration < 10) |
| 165 | return bucket; |
| 166 | if (duration < 100) |
| 167 | return bucket + 1; |
| 168 | if (duration < 1000) |
| 169 | return bucket + 2; |
| 170 | if (duration < 10000) |
| 171 | return bucket + 3; |
| 172 | if (duration < 100000) |
| 173 | return bucket + 4; |
| 174 | return bucket + 5; |
| 175 | } |
| 176 | |
| 177 | /* |
| 178 | * Return a multiplier for the exit latency that is intended |
| 179 | * to take performance requirements into account. |
| 180 | * The more performance critical we estimate the system |
| 181 | * to be, the higher this multiplier, and thus the higher |
| 182 | * the barrier to go to an expensive C state. |
| 183 | */ |
| 184 | static inline int performance_multiplier(void) |
| 185 | { |
| 186 | int mult = 1; |
| 187 | |
| 188 | /* for higher loadavg, we are more reluctant */ |
| 189 | |
| 190 | mult += 2 * get_loadavg(); |
| 191 | |
| 192 | /* for IO wait tasks (per cpu!) we add 5x each */ |
Peter Zijlstra | 8c215bd | 2010-07-01 09:07:17 +0200 | [diff] [blame] | 193 | mult += 10 * nr_iowait_cpu(smp_processor_id()); |
Arjan van de Ven | 69d2587 | 2009-09-21 17:04:08 -0700 | [diff] [blame] | 194 | |
| 195 | return mult; |
| 196 | } |
| 197 | |
Len Brown | 4f86d3a | 2007-10-03 18:58:00 -0400 | [diff] [blame] | 198 | static DEFINE_PER_CPU(struct menu_device, menu_devices); |
| 199 | |
Deepthi Dharwar | 46bcfad | 2011-10-28 16:20:42 +0530 | [diff] [blame] | 200 | static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev); |
Corrado Zoccolo | 672917d | 2009-09-21 17:04:09 -0700 | [diff] [blame] | 201 | |
Stephen Hemminger | 5787536 | 2010-01-08 14:43:08 -0800 | [diff] [blame] | 202 | /* This implements DIV_ROUND_CLOSEST but avoids 64 bit division */ |
| 203 | static u64 div_round64(u64 dividend, u32 divisor) |
| 204 | { |
| 205 | return div_u64(dividend + (divisor / 2), divisor); |
| 206 | } |
| 207 | |
Youquan Song | 69a37be | 2012-10-26 12:26:41 +0200 | [diff] [blame] | 208 | /* Cancel the hrtimer if it is not triggered yet */ |
| 209 | void menu_hrtimer_cancel(void) |
| 210 | { |
| 211 | int cpu = smp_processor_id(); |
| 212 | struct hrtimer *hrtmr = &per_cpu(menu_hrtimer, cpu); |
| 213 | |
| 214 | /* The timer is still not time out*/ |
| 215 | if (per_cpu(hrtimer_status, cpu)) { |
| 216 | hrtimer_cancel(hrtmr); |
| 217 | per_cpu(hrtimer_status, cpu) = MENU_HRTIMER_STOP; |
| 218 | } |
| 219 | } |
| 220 | EXPORT_SYMBOL_GPL(menu_hrtimer_cancel); |
| 221 | |
| 222 | /* Call back for hrtimer is triggered */ |
| 223 | static enum hrtimer_restart menu_hrtimer_notify(struct hrtimer *hrtimer) |
| 224 | { |
| 225 | int cpu = smp_processor_id(); |
Youquan Song | e11538d | 2012-10-26 12:26:50 +0200 | [diff] [blame] | 226 | struct menu_device *data = &per_cpu(menu_devices, cpu); |
| 227 | |
| 228 | /* In general case, the expected residency is much larger than |
| 229 | * deepest C-state target residency, but prediction logic still |
| 230 | * predicts a small predicted residency, so the prediction |
| 231 | * history is totally broken if the timer is triggered. |
| 232 | * So reset the correction factor. |
| 233 | */ |
| 234 | if (per_cpu(hrtimer_status, cpu) == MENU_HRTIMER_GENERAL) |
| 235 | data->correction_factor[data->bucket] = RESOLUTION * DECAY; |
Youquan Song | 69a37be | 2012-10-26 12:26:41 +0200 | [diff] [blame] | 236 | |
| 237 | per_cpu(hrtimer_status, cpu) = MENU_HRTIMER_STOP; |
| 238 | |
| 239 | return HRTIMER_NORESTART; |
| 240 | } |
| 241 | |
Arjan van de Ven | 1f85f87 | 2010-05-24 14:32:59 -0700 | [diff] [blame] | 242 | /* |
| 243 | * Try detecting repeating patterns by keeping track of the last 8 |
| 244 | * intervals, and checking if the standard deviation of that set |
| 245 | * of points is below a threshold. If it is... then use the |
| 246 | * average of these 8 points as the estimated value. |
| 247 | */ |
Youquan Song | c96ca4f | 2012-10-26 12:27:07 +0200 | [diff] [blame] | 248 | static u32 get_typical_interval(struct menu_device *data) |
Arjan van de Ven | 1f85f87 | 2010-05-24 14:32:59 -0700 | [diff] [blame] | 249 | { |
Youquan Song | c96ca4f | 2012-10-26 12:27:07 +0200 | [diff] [blame] | 250 | int i = 0, divisor = 0; |
| 251 | uint64_t max = 0, avg = 0, stddev = 0; |
| 252 | int64_t thresh = LLONG_MAX; /* Discard outliers above this value. */ |
| 253 | unsigned int ret = 0; |
| 254 | |
| 255 | again: |
Arjan van de Ven | 1f85f87 | 2010-05-24 14:32:59 -0700 | [diff] [blame] | 256 | |
| 257 | /* first calculate average and standard deviation of the past */ |
Youquan Song | c96ca4f | 2012-10-26 12:27:07 +0200 | [diff] [blame] | 258 | max = avg = divisor = stddev = 0; |
| 259 | for (i = 0; i < INTERVALS; i++) { |
| 260 | int64_t value = data->intervals[i]; |
| 261 | if (value <= thresh) { |
| 262 | avg += value; |
| 263 | divisor++; |
| 264 | if (value > max) |
| 265 | max = value; |
| 266 | } |
| 267 | } |
| 268 | do_div(avg, divisor); |
Arjan van de Ven | 1f85f87 | 2010-05-24 14:32:59 -0700 | [diff] [blame] | 269 | |
Youquan Song | c96ca4f | 2012-10-26 12:27:07 +0200 | [diff] [blame] | 270 | for (i = 0; i < INTERVALS; i++) { |
| 271 | int64_t value = data->intervals[i]; |
| 272 | if (value <= thresh) { |
| 273 | int64_t diff = value - avg; |
| 274 | stddev += diff * diff; |
| 275 | } |
| 276 | } |
| 277 | do_div(stddev, divisor); |
| 278 | stddev = int_sqrt(stddev); |
Arjan van de Ven | 1f85f87 | 2010-05-24 14:32:59 -0700 | [diff] [blame] | 279 | /* |
Youquan Song | c96ca4f | 2012-10-26 12:27:07 +0200 | [diff] [blame] | 280 | * If we have outliers to the upside in our distribution, discard |
| 281 | * those by setting the threshold to exclude these outliers, then |
| 282 | * calculate the average and standard deviation again. Once we get |
| 283 | * down to the bottom 3/4 of our samples, stop excluding samples. |
| 284 | * |
| 285 | * This can deal with workloads that have long pauses interspersed |
| 286 | * with sporadic activity with a bunch of short pauses. |
| 287 | * |
| 288 | * The typical interval is obtained when standard deviation is small |
| 289 | * or standard deviation is small compared to the average interval. |
Arjan van de Ven | 1f85f87 | 2010-05-24 14:32:59 -0700 | [diff] [blame] | 290 | */ |
Youquan Song | c96ca4f | 2012-10-26 12:27:07 +0200 | [diff] [blame] | 291 | if (((avg > stddev * 6) && (divisor * 4 >= INTERVALS * 3)) |
| 292 | || stddev <= 20) { |
Arjan van de Ven | 1f85f87 | 2010-05-24 14:32:59 -0700 | [diff] [blame] | 293 | data->predicted_us = avg; |
Youquan Song | 69a37be | 2012-10-26 12:26:41 +0200 | [diff] [blame] | 294 | ret = 1; |
Youquan Song | c96ca4f | 2012-10-26 12:27:07 +0200 | [diff] [blame] | 295 | return ret; |
| 296 | |
| 297 | } else if ((divisor * 4) > INTERVALS * 3) { |
| 298 | /* Exclude the max interval */ |
| 299 | thresh = max - 1; |
| 300 | goto again; |
Youquan Song | 69a37be | 2012-10-26 12:26:41 +0200 | [diff] [blame] | 301 | } |
| 302 | |
| 303 | return ret; |
Arjan van de Ven | 1f85f87 | 2010-05-24 14:32:59 -0700 | [diff] [blame] | 304 | } |
| 305 | |
Len Brown | 4f86d3a | 2007-10-03 18:58:00 -0400 | [diff] [blame] | 306 | /** |
| 307 | * menu_select - selects the next idle state to enter |
Deepthi Dharwar | 46bcfad | 2011-10-28 16:20:42 +0530 | [diff] [blame] | 308 | * @drv: cpuidle driver containing state data |
Len Brown | 4f86d3a | 2007-10-03 18:58:00 -0400 | [diff] [blame] | 309 | * @dev: the CPU |
| 310 | */ |
Deepthi Dharwar | 46bcfad | 2011-10-28 16:20:42 +0530 | [diff] [blame] | 311 | static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) |
Len Brown | 4f86d3a | 2007-10-03 18:58:00 -0400 | [diff] [blame] | 312 | { |
| 313 | struct menu_device *data = &__get_cpu_var(menu_devices); |
Mark Gross | ed77134 | 2010-05-06 01:59:26 +0200 | [diff] [blame] | 314 | int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY); |
Len Brown | 4f86d3a | 2007-10-03 18:58:00 -0400 | [diff] [blame] | 315 | int i; |
Arjan van de Ven | 69d2587 | 2009-09-21 17:04:08 -0700 | [diff] [blame] | 316 | int multiplier; |
Tero Kristo | 7467571 | 2011-02-24 17:19:23 +0200 | [diff] [blame] | 317 | struct timespec t; |
Youquan Song | 69a37be | 2012-10-26 12:26:41 +0200 | [diff] [blame] | 318 | int repeat = 0, low_predicted = 0; |
| 319 | int cpu = smp_processor_id(); |
| 320 | struct hrtimer *hrtmr = &per_cpu(menu_hrtimer, cpu); |
Arjan van de Ven | 69d2587 | 2009-09-21 17:04:08 -0700 | [diff] [blame] | 321 | |
Corrado Zoccolo | 672917d | 2009-09-21 17:04:09 -0700 | [diff] [blame] | 322 | if (data->needs_update) { |
Deepthi Dharwar | 46bcfad | 2011-10-28 16:20:42 +0530 | [diff] [blame] | 323 | menu_update(drv, dev); |
Corrado Zoccolo | 672917d | 2009-09-21 17:04:09 -0700 | [diff] [blame] | 324 | data->needs_update = 0; |
| 325 | } |
| 326 | |
Arjan van de Ven | 1c6fe03 | 2010-05-08 15:47:37 -0700 | [diff] [blame] | 327 | data->last_state_idx = 0; |
| 328 | data->exit_us = 0; |
| 329 | |
venkatesh.pallipadi@intel.com | a2bd9202 | 2008-07-30 19:21:42 -0700 | [diff] [blame] | 330 | /* Special case when user has set very strict latency requirement */ |
Arjan van de Ven | 69d2587 | 2009-09-21 17:04:08 -0700 | [diff] [blame] | 331 | if (unlikely(latency_req == 0)) |
venkatesh.pallipadi@intel.com | a2bd9202 | 2008-07-30 19:21:42 -0700 | [diff] [blame] | 332 | return 0; |
venkatesh.pallipadi@intel.com | a2bd9202 | 2008-07-30 19:21:42 -0700 | [diff] [blame] | 333 | |
Arjan van de Ven | 69d2587 | 2009-09-21 17:04:08 -0700 | [diff] [blame] | 334 | /* determine the expected residency time, round up */ |
Tero Kristo | 7467571 | 2011-02-24 17:19:23 +0200 | [diff] [blame] | 335 | t = ktime_to_timespec(tick_nohz_get_sleep_length()); |
Len Brown | 4f86d3a | 2007-10-03 18:58:00 -0400 | [diff] [blame] | 336 | data->expected_us = |
Tero Kristo | 7467571 | 2011-02-24 17:19:23 +0200 | [diff] [blame] | 337 | t.tv_sec * USEC_PER_SEC + t.tv_nsec / NSEC_PER_USEC; |
Len Brown | 4f86d3a | 2007-10-03 18:58:00 -0400 | [diff] [blame] | 338 | |
Arjan van de Ven | 69d2587 | 2009-09-21 17:04:08 -0700 | [diff] [blame] | 339 | |
| 340 | data->bucket = which_bucket(data->expected_us); |
| 341 | |
| 342 | multiplier = performance_multiplier(); |
| 343 | |
| 344 | /* |
| 345 | * if the correction factor is 0 (eg first time init or cpu hotplug |
| 346 | * etc), we actually want to start out with a unity factor. |
| 347 | */ |
| 348 | if (data->correction_factor[data->bucket] == 0) |
| 349 | data->correction_factor[data->bucket] = RESOLUTION * DECAY; |
| 350 | |
| 351 | /* Make sure to round up for half microseconds */ |
Stephen Hemminger | 5787536 | 2010-01-08 14:43:08 -0800 | [diff] [blame] | 352 | data->predicted_us = div_round64(data->expected_us * data->correction_factor[data->bucket], |
| 353 | RESOLUTION * DECAY); |
Arjan van de Ven | 69d2587 | 2009-09-21 17:04:08 -0700 | [diff] [blame] | 354 | |
Youquan Song | c96ca4f | 2012-10-26 12:27:07 +0200 | [diff] [blame] | 355 | repeat = get_typical_interval(data); |
Arjan van de Ven | 1f85f87 | 2010-05-24 14:32:59 -0700 | [diff] [blame] | 356 | |
Arjan van de Ven | 69d2587 | 2009-09-21 17:04:08 -0700 | [diff] [blame] | 357 | /* |
| 358 | * We want to default to C1 (hlt), not to busy polling |
| 359 | * unless the timer is happening really really soon. |
| 360 | */ |
ShuoX Liu | 3a53396 | 2012-03-28 15:19:11 -0700 | [diff] [blame] | 361 | if (data->expected_us > 5 && |
Rafael J. Wysocki | cbc9ef0 | 2012-07-03 19:07:42 +0200 | [diff] [blame] | 362 | !drv->states[CPUIDLE_DRIVER_STATE_START].disabled && |
ShuoX Liu | dc7fd27 | 2012-07-03 19:05:31 +0200 | [diff] [blame] | 363 | dev->states_usage[CPUIDLE_DRIVER_STATE_START].disable == 0) |
Arjan van de Ven | 69d2587 | 2009-09-21 17:04:08 -0700 | [diff] [blame] | 364 | data->last_state_idx = CPUIDLE_DRIVER_STATE_START; |
| 365 | |
Ai Li | 71abbbf | 2010-08-09 17:20:13 -0700 | [diff] [blame] | 366 | /* |
| 367 | * Find the idle state with the lowest power while satisfying |
| 368 | * our constraints. |
| 369 | */ |
Deepthi Dharwar | 46bcfad | 2011-10-28 16:20:42 +0530 | [diff] [blame] | 370 | for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++) { |
| 371 | struct cpuidle_state *s = &drv->states[i]; |
ShuoX Liu | dc7fd27 | 2012-07-03 19:05:31 +0200 | [diff] [blame] | 372 | struct cpuidle_state_usage *su = &dev->states_usage[i]; |
Len Brown | 4f86d3a | 2007-10-03 18:58:00 -0400 | [diff] [blame] | 373 | |
Rafael J. Wysocki | cbc9ef0 | 2012-07-03 19:07:42 +0200 | [diff] [blame] | 374 | if (s->disabled || su->disable) |
ShuoX Liu | 3a53396 | 2012-03-28 15:19:11 -0700 | [diff] [blame] | 375 | continue; |
Youquan Song | 69a37be | 2012-10-26 12:26:41 +0200 | [diff] [blame] | 376 | if (s->target_residency > data->predicted_us) { |
| 377 | low_predicted = 1; |
Ai Li | 71abbbf | 2010-08-09 17:20:13 -0700 | [diff] [blame] | 378 | continue; |
Youquan Song | 69a37be | 2012-10-26 12:26:41 +0200 | [diff] [blame] | 379 | } |
venkatesh.pallipadi@intel.com | a2bd9202 | 2008-07-30 19:21:42 -0700 | [diff] [blame] | 380 | if (s->exit_latency > latency_req) |
Ai Li | 71abbbf | 2010-08-09 17:20:13 -0700 | [diff] [blame] | 381 | continue; |
Arjan van de Ven | 69d2587 | 2009-09-21 17:04:08 -0700 | [diff] [blame] | 382 | if (s->exit_latency * multiplier > data->predicted_us) |
Ai Li | 71abbbf | 2010-08-09 17:20:13 -0700 | [diff] [blame] | 383 | continue; |
| 384 | |
Daniel Lezcano | 8aef33a | 2013-01-15 14:18:04 +0100 | [diff] [blame^] | 385 | data->last_state_idx = i; |
| 386 | data->exit_us = s->exit_latency; |
Len Brown | 4f86d3a | 2007-10-03 18:58:00 -0400 | [diff] [blame] | 387 | } |
| 388 | |
Youquan Song | 69a37be | 2012-10-26 12:26:41 +0200 | [diff] [blame] | 389 | /* not deepest C-state chosen for low predicted residency */ |
| 390 | if (low_predicted) { |
| 391 | unsigned int timer_us = 0; |
Youquan Song | e11538d | 2012-10-26 12:26:50 +0200 | [diff] [blame] | 392 | unsigned int perfect_us = 0; |
Youquan Song | 69a37be | 2012-10-26 12:26:41 +0200 | [diff] [blame] | 393 | |
| 394 | /* |
| 395 | * Set a timer to detect whether this sleep is much |
| 396 | * longer than repeat mode predicted. If the timer |
| 397 | * triggers, the code will evaluate whether to put |
| 398 | * the CPU into a deeper C-state. |
| 399 | * The timer is cancelled on CPU wakeup. |
| 400 | */ |
| 401 | timer_us = 2 * (data->predicted_us + MAX_DEVIATION); |
| 402 | |
Youquan Song | e11538d | 2012-10-26 12:26:50 +0200 | [diff] [blame] | 403 | perfect_us = perfect_cstate_ms * 1000; |
| 404 | |
Youquan Song | 69a37be | 2012-10-26 12:26:41 +0200 | [diff] [blame] | 405 | if (repeat && (4 * timer_us < data->expected_us)) { |
Li Zhong | a093b93 | 2012-11-23 00:05:03 +0100 | [diff] [blame] | 406 | RCU_NONIDLE(hrtimer_start(hrtmr, |
| 407 | ns_to_ktime(1000 * timer_us), |
| 408 | HRTIMER_MODE_REL_PINNED)); |
Youquan Song | 69a37be | 2012-10-26 12:26:41 +0200 | [diff] [blame] | 409 | /* In repeat case, menu hrtimer is started */ |
| 410 | per_cpu(hrtimer_status, cpu) = MENU_HRTIMER_REPEAT; |
Youquan Song | e11538d | 2012-10-26 12:26:50 +0200 | [diff] [blame] | 411 | } else if (perfect_us < data->expected_us) { |
| 412 | /* |
| 413 | * The next timer is long. This could be because |
| 414 | * we did not make a useful prediction. |
| 415 | * In that case, it makes sense to re-enter |
| 416 | * into a deeper C-state after some time. |
| 417 | */ |
Li Zhong | a093b93 | 2012-11-23 00:05:03 +0100 | [diff] [blame] | 418 | RCU_NONIDLE(hrtimer_start(hrtmr, |
| 419 | ns_to_ktime(1000 * timer_us), |
| 420 | HRTIMER_MODE_REL_PINNED)); |
Youquan Song | e11538d | 2012-10-26 12:26:50 +0200 | [diff] [blame] | 421 | /* In general case, menu hrtimer is started */ |
| 422 | per_cpu(hrtimer_status, cpu) = MENU_HRTIMER_GENERAL; |
Youquan Song | 69a37be | 2012-10-26 12:26:41 +0200 | [diff] [blame] | 423 | } |
Youquan Song | e11538d | 2012-10-26 12:26:50 +0200 | [diff] [blame] | 424 | |
Youquan Song | 69a37be | 2012-10-26 12:26:41 +0200 | [diff] [blame] | 425 | } |
| 426 | |
Arjan van de Ven | 69d2587 | 2009-09-21 17:04:08 -0700 | [diff] [blame] | 427 | return data->last_state_idx; |
Len Brown | 4f86d3a | 2007-10-03 18:58:00 -0400 | [diff] [blame] | 428 | } |
| 429 | |
| 430 | /** |
Corrado Zoccolo | 672917d | 2009-09-21 17:04:09 -0700 | [diff] [blame] | 431 | * menu_reflect - records that data structures need update |
Len Brown | 4f86d3a | 2007-10-03 18:58:00 -0400 | [diff] [blame] | 432 | * @dev: the CPU |
Deepthi Dharwar | e978aa7 | 2011-10-28 16:20:09 +0530 | [diff] [blame] | 433 | * @index: the index of actual entered state |
Len Brown | 4f86d3a | 2007-10-03 18:58:00 -0400 | [diff] [blame] | 434 | * |
| 435 | * NOTE: it's important to be fast here because this operation will add to |
| 436 | * the overall exit latency. |
| 437 | */ |
Deepthi Dharwar | e978aa7 | 2011-10-28 16:20:09 +0530 | [diff] [blame] | 438 | static void menu_reflect(struct cpuidle_device *dev, int index) |
Len Brown | 4f86d3a | 2007-10-03 18:58:00 -0400 | [diff] [blame] | 439 | { |
| 440 | struct menu_device *data = &__get_cpu_var(menu_devices); |
Deepthi Dharwar | e978aa7 | 2011-10-28 16:20:09 +0530 | [diff] [blame] | 441 | data->last_state_idx = index; |
| 442 | if (index >= 0) |
| 443 | data->needs_update = 1; |
Corrado Zoccolo | 672917d | 2009-09-21 17:04:09 -0700 | [diff] [blame] | 444 | } |
| 445 | |
| 446 | /** |
| 447 | * menu_update - attempts to guess what happened after entry |
Deepthi Dharwar | 46bcfad | 2011-10-28 16:20:42 +0530 | [diff] [blame] | 448 | * @drv: cpuidle driver containing state data |
Corrado Zoccolo | 672917d | 2009-09-21 17:04:09 -0700 | [diff] [blame] | 449 | * @dev: the CPU |
| 450 | */ |
Deepthi Dharwar | 46bcfad | 2011-10-28 16:20:42 +0530 | [diff] [blame] | 451 | static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev) |
Corrado Zoccolo | 672917d | 2009-09-21 17:04:09 -0700 | [diff] [blame] | 452 | { |
| 453 | struct menu_device *data = &__get_cpu_var(menu_devices); |
Len Brown | 4f86d3a | 2007-10-03 18:58:00 -0400 | [diff] [blame] | 454 | int last_idx = data->last_state_idx; |
venkatesh.pallipadi@intel.com | 320eee7 | 2008-07-30 19:21:43 -0700 | [diff] [blame] | 455 | unsigned int last_idle_us = cpuidle_get_last_residency(dev); |
Deepthi Dharwar | 46bcfad | 2011-10-28 16:20:42 +0530 | [diff] [blame] | 456 | struct cpuidle_state *target = &drv->states[last_idx]; |
venkatesh.pallipadi@intel.com | 320eee7 | 2008-07-30 19:21:43 -0700 | [diff] [blame] | 457 | unsigned int measured_us; |
Arjan van de Ven | 69d2587 | 2009-09-21 17:04:08 -0700 | [diff] [blame] | 458 | u64 new_factor; |
Len Brown | 4f86d3a | 2007-10-03 18:58:00 -0400 | [diff] [blame] | 459 | |
| 460 | /* |
| 461 | * Ugh, this idle state doesn't support residency measurements, so we |
| 462 | * are basically lost in the dark. As a compromise, assume we slept |
Arjan van de Ven | 69d2587 | 2009-09-21 17:04:08 -0700 | [diff] [blame] | 463 | * for the whole expected time. |
Len Brown | 4f86d3a | 2007-10-03 18:58:00 -0400 | [diff] [blame] | 464 | */ |
venkatesh.pallipadi@intel.com | 320eee7 | 2008-07-30 19:21:43 -0700 | [diff] [blame] | 465 | if (unlikely(!(target->flags & CPUIDLE_FLAG_TIME_VALID))) |
Arjan van de Ven | 69d2587 | 2009-09-21 17:04:08 -0700 | [diff] [blame] | 466 | last_idle_us = data->expected_us; |
| 467 | |
| 468 | |
| 469 | measured_us = last_idle_us; |
Len Brown | 4f86d3a | 2007-10-03 18:58:00 -0400 | [diff] [blame] | 470 | |
venkatesh.pallipadi@intel.com | 320eee7 | 2008-07-30 19:21:43 -0700 | [diff] [blame] | 471 | /* |
Arjan van de Ven | 69d2587 | 2009-09-21 17:04:08 -0700 | [diff] [blame] | 472 | * We correct for the exit latency; we are assuming here that the |
| 473 | * exit latency happens after the event that we're interested in. |
venkatesh.pallipadi@intel.com | 320eee7 | 2008-07-30 19:21:43 -0700 | [diff] [blame] | 474 | */ |
Arjan van de Ven | 69d2587 | 2009-09-21 17:04:08 -0700 | [diff] [blame] | 475 | if (measured_us > data->exit_us) |
| 476 | measured_us -= data->exit_us; |
| 477 | |
| 478 | |
| 479 | /* update our correction ratio */ |
| 480 | |
| 481 | new_factor = data->correction_factor[data->bucket] |
| 482 | * (DECAY - 1) / DECAY; |
| 483 | |
Arjan van de Ven | 1c6fe03 | 2010-05-08 15:47:37 -0700 | [diff] [blame] | 484 | if (data->expected_us > 0 && measured_us < MAX_INTERESTING) |
Arjan van de Ven | 69d2587 | 2009-09-21 17:04:08 -0700 | [diff] [blame] | 485 | new_factor += RESOLUTION * measured_us / data->expected_us; |
venkatesh.pallipadi@intel.com | 320eee7 | 2008-07-30 19:21:43 -0700 | [diff] [blame] | 486 | else |
Arjan van de Ven | 69d2587 | 2009-09-21 17:04:08 -0700 | [diff] [blame] | 487 | /* |
| 488 | * we were idle so long that we count it as a perfect |
| 489 | * prediction |
| 490 | */ |
| 491 | new_factor += RESOLUTION; |
venkatesh.pallipadi@intel.com | 320eee7 | 2008-07-30 19:21:43 -0700 | [diff] [blame] | 492 | |
Arjan van de Ven | 69d2587 | 2009-09-21 17:04:08 -0700 | [diff] [blame] | 493 | /* |
| 494 | * We don't want 0 as factor; we always want at least |
| 495 | * a tiny bit of estimated time. |
| 496 | */ |
| 497 | if (new_factor == 0) |
| 498 | new_factor = 1; |
venkatesh.pallipadi@intel.com | 320eee7 | 2008-07-30 19:21:43 -0700 | [diff] [blame] | 499 | |
Arjan van de Ven | 69d2587 | 2009-09-21 17:04:08 -0700 | [diff] [blame] | 500 | data->correction_factor[data->bucket] = new_factor; |
Arjan van de Ven | 1f85f87 | 2010-05-24 14:32:59 -0700 | [diff] [blame] | 501 | |
| 502 | /* update the repeating-pattern data */ |
| 503 | data->intervals[data->interval_ptr++] = last_idle_us; |
| 504 | if (data->interval_ptr >= INTERVALS) |
| 505 | data->interval_ptr = 0; |
Len Brown | 4f86d3a | 2007-10-03 18:58:00 -0400 | [diff] [blame] | 506 | } |
| 507 | |
| 508 | /** |
| 509 | * menu_enable_device - scans a CPU's states and does setup |
Deepthi Dharwar | 46bcfad | 2011-10-28 16:20:42 +0530 | [diff] [blame] | 510 | * @drv: cpuidle driver |
Len Brown | 4f86d3a | 2007-10-03 18:58:00 -0400 | [diff] [blame] | 511 | * @dev: the CPU |
| 512 | */ |
Deepthi Dharwar | 46bcfad | 2011-10-28 16:20:42 +0530 | [diff] [blame] | 513 | static int menu_enable_device(struct cpuidle_driver *drv, |
| 514 | struct cpuidle_device *dev) |
Len Brown | 4f86d3a | 2007-10-03 18:58:00 -0400 | [diff] [blame] | 515 | { |
| 516 | struct menu_device *data = &per_cpu(menu_devices, dev->cpu); |
Youquan Song | 69a37be | 2012-10-26 12:26:41 +0200 | [diff] [blame] | 517 | struct hrtimer *t = &per_cpu(menu_hrtimer, dev->cpu); |
| 518 | hrtimer_init(t, CLOCK_MONOTONIC, HRTIMER_MODE_REL); |
| 519 | t->function = menu_hrtimer_notify; |
Len Brown | 4f86d3a | 2007-10-03 18:58:00 -0400 | [diff] [blame] | 520 | |
| 521 | memset(data, 0, sizeof(struct menu_device)); |
| 522 | |
| 523 | return 0; |
| 524 | } |
| 525 | |
| 526 | static struct cpuidle_governor menu_governor = { |
| 527 | .name = "menu", |
| 528 | .rating = 20, |
| 529 | .enable = menu_enable_device, |
| 530 | .select = menu_select, |
| 531 | .reflect = menu_reflect, |
| 532 | .owner = THIS_MODULE, |
| 533 | }; |
| 534 | |
| 535 | /** |
| 536 | * init_menu - initializes the governor |
| 537 | */ |
| 538 | static int __init init_menu(void) |
| 539 | { |
| 540 | return cpuidle_register_governor(&menu_governor); |
| 541 | } |
| 542 | |
| 543 | /** |
| 544 | * exit_menu - exits the governor |
| 545 | */ |
| 546 | static void __exit exit_menu(void) |
| 547 | { |
| 548 | cpuidle_unregister_governor(&menu_governor); |
| 549 | } |
| 550 | |
| 551 | MODULE_LICENSE("GPL"); |
| 552 | module_init(init_menu); |
| 553 | module_exit(exit_menu); |