blob: f1df59f59a37710e4c0370e993ae786f2bddbc4b [file] [log] [blame]
Len Brown4f86d3a2007-10-03 18:58:00 -04001/*
2 * menu.c - the menu idle governor
3 *
4 * Copyright (C) 2006-2007 Adam Belay <abelay@novell.com>
5 *
6 * This code is licenced under the GPL.
7 */
8
9#include <linux/kernel.h>
10#include <linux/cpuidle.h>
Mark Grossd82b3512008-02-04 22:30:08 -080011#include <linux/pm_qos_params.h>
Len Brown4f86d3a2007-10-03 18:58:00 -040012#include <linux/time.h>
13#include <linux/ktime.h>
14#include <linux/hrtimer.h>
15#include <linux/tick.h>
16
17#define BREAK_FUZZ 4 /* 4 us */
Pallipadi, Venkatesh816bb612008-12-30 14:46:02 -080018#define PRED_HISTORY_PCT 50
Len Brown4f86d3a2007-10-03 18:58:00 -040019
20struct menu_device {
21 int last_state_idx;
22
23 unsigned int expected_us;
24 unsigned int predicted_us;
Pallipadi, Venkatesh816bb612008-12-30 14:46:02 -080025 unsigned int current_predicted_us;
Len Brown4f86d3a2007-10-03 18:58:00 -040026 unsigned int last_measured_us;
27 unsigned int elapsed_us;
28};
29
30static DEFINE_PER_CPU(struct menu_device, menu_devices);
31
32/**
33 * menu_select - selects the next idle state to enter
34 * @dev: the CPU
35 */
36static int menu_select(struct cpuidle_device *dev)
37{
38 struct menu_device *data = &__get_cpu_var(menu_devices);
venkatesh.pallipadi@intel.coma2bd92022008-07-30 19:21:42 -070039 int latency_req = pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY);
Len Brown4f86d3a2007-10-03 18:58:00 -040040 int i;
41
venkatesh.pallipadi@intel.coma2bd92022008-07-30 19:21:42 -070042 /* Special case when user has set very strict latency requirement */
43 if (unlikely(latency_req == 0)) {
44 data->last_state_idx = 0;
45 return 0;
46 }
47
Len Brown4f86d3a2007-10-03 18:58:00 -040048 /* determine the expected residency time */
49 data->expected_us =
50 (u32) ktime_to_ns(tick_nohz_get_sleep_length()) / 1000;
51
Pallipadi, Venkatesh816bb612008-12-30 14:46:02 -080052 /* Recalculate predicted_us based on prediction_history_pct */
53 data->predicted_us *= PRED_HISTORY_PCT;
54 data->predicted_us += (100 - PRED_HISTORY_PCT) *
55 data->current_predicted_us;
56 data->predicted_us /= 100;
57
Len Brown4f86d3a2007-10-03 18:58:00 -040058 /* find the deepest idle state that satisfies our constraints */
venkatesh.pallipadi@intel.coma2bd92022008-07-30 19:21:42 -070059 for (i = CPUIDLE_DRIVER_STATE_START + 1; i < dev->state_count; i++) {
Len Brown4f86d3a2007-10-03 18:58:00 -040060 struct cpuidle_state *s = &dev->states[i];
61
62 if (s->target_residency > data->expected_us)
63 break;
64 if (s->target_residency > data->predicted_us)
65 break;
venkatesh.pallipadi@intel.coma2bd92022008-07-30 19:21:42 -070066 if (s->exit_latency > latency_req)
Len Brown4f86d3a2007-10-03 18:58:00 -040067 break;
68 }
69
70 data->last_state_idx = i - 1;
71 return i - 1;
72}
73
74/**
75 * menu_reflect - attempts to guess what happened after entry
76 * @dev: the CPU
77 *
78 * NOTE: it's important to be fast here because this operation will add to
79 * the overall exit latency.
80 */
81static void menu_reflect(struct cpuidle_device *dev)
82{
83 struct menu_device *data = &__get_cpu_var(menu_devices);
84 int last_idx = data->last_state_idx;
venkatesh.pallipadi@intel.com320eee72008-07-30 19:21:43 -070085 unsigned int last_idle_us = cpuidle_get_last_residency(dev);
Len Brown4f86d3a2007-10-03 18:58:00 -040086 struct cpuidle_state *target = &dev->states[last_idx];
venkatesh.pallipadi@intel.com320eee72008-07-30 19:21:43 -070087 unsigned int measured_us;
Len Brown4f86d3a2007-10-03 18:58:00 -040088
89 /*
90 * Ugh, this idle state doesn't support residency measurements, so we
91 * are basically lost in the dark. As a compromise, assume we slept
92 * for one full standard timer tick. However, be aware that this
93 * could potentially result in a suboptimal state transition.
94 */
venkatesh.pallipadi@intel.com320eee72008-07-30 19:21:43 -070095 if (unlikely(!(target->flags & CPUIDLE_FLAG_TIME_VALID)))
96 last_idle_us = USEC_PER_SEC / HZ;
Len Brown4f86d3a2007-10-03 18:58:00 -040097
venkatesh.pallipadi@intel.com320eee72008-07-30 19:21:43 -070098 /*
99 * measured_us and elapsed_us are the cumulative idle time, since the
100 * last time we were woken out of idle by an interrupt.
101 */
102 if (data->elapsed_us <= data->elapsed_us + last_idle_us)
103 measured_us = data->elapsed_us + last_idle_us;
104 else
105 measured_us = -1;
106
107 /* Predict time until next break event */
Pallipadi, Venkatesh816bb612008-12-30 14:46:02 -0800108 data->current_predicted_us = max(measured_us, data->last_measured_us);
venkatesh.pallipadi@intel.com320eee72008-07-30 19:21:43 -0700109
110 if (last_idle_us + BREAK_FUZZ <
111 data->expected_us - target->exit_latency) {
Len Brown4f86d3a2007-10-03 18:58:00 -0400112 data->last_measured_us = measured_us;
113 data->elapsed_us = 0;
114 } else {
venkatesh.pallipadi@intel.com320eee72008-07-30 19:21:43 -0700115 data->elapsed_us = measured_us;
Len Brown4f86d3a2007-10-03 18:58:00 -0400116 }
117}
118
119/**
120 * menu_enable_device - scans a CPU's states and does setup
121 * @dev: the CPU
122 */
123static int menu_enable_device(struct cpuidle_device *dev)
124{
125 struct menu_device *data = &per_cpu(menu_devices, dev->cpu);
126
127 memset(data, 0, sizeof(struct menu_device));
128
129 return 0;
130}
131
132static struct cpuidle_governor menu_governor = {
133 .name = "menu",
134 .rating = 20,
135 .enable = menu_enable_device,
136 .select = menu_select,
137 .reflect = menu_reflect,
138 .owner = THIS_MODULE,
139};
140
141/**
142 * init_menu - initializes the governor
143 */
144static int __init init_menu(void)
145{
146 return cpuidle_register_governor(&menu_governor);
147}
148
149/**
150 * exit_menu - exits the governor
151 */
152static void __exit exit_menu(void)
153{
154 cpuidle_unregister_governor(&menu_governor);
155}
156
157MODULE_LICENSE("GPL");
158module_init(init_menu);
159module_exit(exit_menu);