blob: 4147b44c338f8756b692c83c6e4a7e8a98ff9638 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * drivers/cpufreq/cpufreq_ondemand.c
3 *
4 * Copyright (C) 2001 Russell King
5 * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
6 * Jun Nakajima <jun.nakajima@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/kernel.h>
14#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <linux/init.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <linux/cpufreq.h>
Andrew Morton138a01282006-06-23 03:31:19 -070017#include <linux/cpu.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include <linux/jiffies.h>
19#include <linux/kernel_stat.h>
akpm@osdl.org3fc54d32006-01-13 15:54:22 -080020#include <linux/mutex.h>
venkatesh.pallipadi@intel.com80800912008-08-04 11:59:12 -070021#include <linux/hrtimer.h>
22#include <linux/tick.h>
23#include <linux/ktime.h>
Thomas Renninger9411b4e2009-02-04 11:54:04 +010024#include <linux/sched.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070025#include <linux/input.h>
26#include <linux/workqueue.h>
27#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028
29/*
30 * dbs is used in this file as a shortform for demandbased switching
31 * It helps to keep variable names smaller, simpler
32 */
33
venkatesh.pallipadi@intel.come9d95bf2008-08-04 11:59:10 -070034#define DEF_FREQUENCY_DOWN_DIFFERENTIAL (10)
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#define DEF_FREQUENCY_UP_THRESHOLD (80)
David C Niemi3f78a9f2010-10-06 16:54:24 -040036#define DEF_SAMPLING_DOWN_FACTOR (1)
37#define MAX_SAMPLING_DOWN_FACTOR (100000)
venkatesh.pallipadi@intel.com80800912008-08-04 11:59:12 -070038#define MICRO_FREQUENCY_DOWN_DIFFERENTIAL (3)
39#define MICRO_FREQUENCY_UP_THRESHOLD (95)
Thomas Renningercef96152009-04-22 13:48:29 +020040#define MICRO_FREQUENCY_MIN_SAMPLE_RATE (10000)
Dave Jonesc29f1402005-05-31 19:03:50 -070041#define MIN_FREQUENCY_UP_THRESHOLD (11)
Linus Torvalds1da177e2005-04-16 15:20:36 -070042#define MAX_FREQUENCY_UP_THRESHOLD (100)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070043#define MIN_FREQUENCY_DOWN_DIFFERENTIAL (1)
Linus Torvalds1da177e2005-04-16 15:20:36 -070044
Dave Jones32ee8c32006-02-28 00:43:23 -050045/*
46 * The polling frequency of this governor depends on the capability of
Linus Torvalds1da177e2005-04-16 15:20:36 -070047 * the processor. Default polling frequency is 1000 times the transition
Dave Jones32ee8c32006-02-28 00:43:23 -050048 * latency of the processor. The governor will work on any processor with
49 * transition latency <= 10mS, using appropriate sampling
Linus Torvalds1da177e2005-04-16 15:20:36 -070050 * rate.
51 * For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL)
52 * this governor will not work.
53 * All times here are in uS.
54 */
Dave Jonesdf8b59b2005-09-20 12:39:35 -070055#define MIN_SAMPLING_RATE_RATIO (2)
Thomas Renninger112124a2009-02-04 11:55:12 +010056
Thomas Renningercef96152009-04-22 13:48:29 +020057static unsigned int min_sampling_rate;
58
Thomas Renninger112124a2009-02-04 11:55:12 +010059#define LATENCY_MULTIPLIER (1000)
Thomas Renningercef96152009-04-22 13:48:29 +020060#define MIN_LATENCY_MULTIPLIER (100)
Thomas Renninger1c256242007-10-02 13:28:12 -070061#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000)
Linus Torvalds1da177e2005-04-16 15:20:36 -070062
David Ng8192a2f2012-01-19 14:16:19 -080063#define POWERSAVE_BIAS_MAXLEVEL (1000)
64#define POWERSAVE_BIAS_MINLEVEL (-1000)
65
David Howellsc4028952006-11-22 14:57:56 +000066static void do_dbs_timer(struct work_struct *work);
Thomas Renninger0e625ac2009-07-24 15:25:06 +020067static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
68 unsigned int event);
69
70#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
71static
72#endif
73struct cpufreq_governor cpufreq_gov_ondemand = {
74 .name = "ondemand",
75 .governor = cpufreq_governor_dbs,
76 .max_transition_latency = TRANSITION_LATENCY_LIMIT,
77 .owner = THIS_MODULE,
78};
David Howellsc4028952006-11-22 14:57:56 +000079
80/* Sampling types */
Venkatesh Pallipadi529af7a2007-02-05 16:12:44 -080081enum {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE};
Linus Torvalds1da177e2005-04-16 15:20:36 -070082
83struct cpu_dbs_info_s {
Venkatesh Pallipadiccb2fe22006-06-28 13:49:52 -070084 cputime64_t prev_cpu_idle;
Arjan van de Ven6b8fcd92010-05-09 08:26:06 -070085 cputime64_t prev_cpu_iowait;
Venkatesh Pallipadiccb2fe22006-06-28 13:49:52 -070086 cputime64_t prev_cpu_wall;
venkatesh.pallipadi@intel.com80800912008-08-04 11:59:12 -070087 cputime64_t prev_cpu_nice;
Dave Jones32ee8c32006-02-28 00:43:23 -050088 struct cpufreq_policy *cur_policy;
Dave Jones2b03f892009-01-18 01:43:44 -050089 struct delayed_work work;
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +040090 struct cpufreq_frequency_table *freq_table;
91 unsigned int freq_lo;
92 unsigned int freq_lo_jiffies;
93 unsigned int freq_hi_jiffies;
David C Niemi3f78a9f2010-10-06 16:54:24 -040094 unsigned int rate_mult;
Narayanan Gopalakrishnand3ca7832012-10-19 17:24:53 -070095 unsigned int prev_load;
96 unsigned int max_load;
Venkatesh Pallipadi529af7a2007-02-05 16:12:44 -080097 int cpu;
venkatesh.pallipadi@intel.com5a75c822009-07-02 17:08:32 -070098 unsigned int sample_type:1;
99 /*
100 * percpu mutex that serializes governor limit change with
101 * do_dbs_timer invocation. We do not want do_dbs_timer to run
102 * when user is changing the governor or limits.
103 */
104 struct mutex timer_mutex;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105};
Tejun Heo245b2e72009-06-24 15:13:48 +0900106static DEFINE_PER_CPU(struct cpu_dbs_info_s, od_cpu_dbs_info);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107
David Ng8192a2f2012-01-19 14:16:19 -0800108static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info);
109static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info);
110
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111static unsigned int dbs_enable; /* number of CPUs using this policy */
112
Venkatesh Pallipadi4ec223d2006-06-21 15:18:34 -0700113/*
Thomas Renninger326c86d2011-03-03 21:31:27 +0100114 * dbs_mutex protects dbs_enable in governor start/stop.
Venkatesh Pallipadi4ec223d2006-06-21 15:18:34 -0700115 */
Venkatesh Pallipadiffac80e2006-06-28 13:52:18 -0700116static DEFINE_MUTEX(dbs_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700118static struct workqueue_struct *input_wq;
119
Stephen Boydc8fc3012012-10-31 17:43:08 -0700120struct dbs_work_struct {
121 struct work_struct work;
122 unsigned int cpu;
123};
124
125static DEFINE_PER_CPU(struct dbs_work_struct, dbs_refresh_work);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700126
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400127static struct dbs_tuners {
Dave Jones32ee8c32006-02-28 00:43:23 -0500128 unsigned int sampling_rate;
Dave Jones32ee8c32006-02-28 00:43:23 -0500129 unsigned int up_threshold;
Narayanan Gopalakrishnand3ca7832012-10-19 17:24:53 -0700130 unsigned int up_threshold_multi_core;
venkatesh.pallipadi@intel.come9d95bf2008-08-04 11:59:10 -0700131 unsigned int down_differential;
Narayanan Gopalakrishnand3ca7832012-10-19 17:24:53 -0700132 unsigned int down_differential_multi_core;
133 unsigned int optimal_freq;
134 unsigned int up_threshold_any_cpu_load;
135 unsigned int sync_freq;
Dave Jones32ee8c32006-02-28 00:43:23 -0500136 unsigned int ignore_nice;
David C Niemi3f78a9f2010-10-06 16:54:24 -0400137 unsigned int sampling_down_factor;
David Ng8192a2f2012-01-19 14:16:19 -0800138 int powersave_bias;
Arjan van de Ven19379b12010-05-09 08:26:51 -0700139 unsigned int io_is_busy;
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400140} dbs_tuners_ins = {
Narayanan Gopalakrishnand3ca7832012-10-19 17:24:53 -0700141 .up_threshold_multi_core = DEF_FREQUENCY_UP_THRESHOLD,
Dave Jones32ee8c32006-02-28 00:43:23 -0500142 .up_threshold = DEF_FREQUENCY_UP_THRESHOLD,
David C Niemi3f78a9f2010-10-06 16:54:24 -0400143 .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR,
venkatesh.pallipadi@intel.come9d95bf2008-08-04 11:59:10 -0700144 .down_differential = DEF_FREQUENCY_DOWN_DIFFERENTIAL,
Narayanan Gopalakrishnand3ca7832012-10-19 17:24:53 -0700145 .down_differential_multi_core = MICRO_FREQUENCY_DOWN_DIFFERENTIAL,
146 .up_threshold_any_cpu_load = DEF_FREQUENCY_UP_THRESHOLD,
Eric Piel9cbad612006-03-10 11:35:27 +0200147 .ignore_nice = 0,
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400148 .powersave_bias = 0,
Narayanan Gopalakrishnand3ca7832012-10-19 17:24:53 -0700149 .sync_freq = 0,
150 .optimal_freq = 0,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151};
152
Glauber Costa3292beb2011-11-28 14:45:17 -0200153static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
Dave Jonesdac1c1a2005-05-31 19:03:49 -0700154{
Glauber Costa3292beb2011-11-28 14:45:17 -0200155 u64 idle_time;
Martin Schwidefsky612ef282011-12-19 19:23:15 +0100156 u64 cur_wall_time;
Glauber Costa3292beb2011-11-28 14:45:17 -0200157 u64 busy_time;
Venkatesh Pallipadiccb2fe22006-06-28 13:49:52 -0700158
venkatesh.pallipadi@intel.com34305022008-08-04 11:59:09 -0700159 cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
Venkatesh Pallipadiccb2fe22006-06-28 13:49:52 -0700160
Martin Schwidefsky612ef282011-12-19 19:23:15 +0100161 busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
162 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
Glauber Costa3292beb2011-11-28 14:45:17 -0200163 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
164 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
165 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
166 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
Venkatesh Pallipadiccb2fe22006-06-28 13:49:52 -0700167
Martin Schwidefsky64861632011-12-15 14:56:09 +0100168 idle_time = cur_wall_time - busy_time;
venkatesh.pallipadi@intel.com34305022008-08-04 11:59:09 -0700169 if (wall)
Glauber Costa3292beb2011-11-28 14:45:17 -0200170 *wall = jiffies_to_usecs(cur_wall_time);
venkatesh.pallipadi@intel.com34305022008-08-04 11:59:09 -0700171
Glauber Costa3292beb2011-11-28 14:45:17 -0200172 return jiffies_to_usecs(idle_time);
Dave Jonesdac1c1a2005-05-31 19:03:49 -0700173}
174
venkatesh.pallipadi@intel.com80800912008-08-04 11:59:12 -0700175static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall)
176{
Michal Hocko6beea0c2011-08-24 09:37:48 +0200177 u64 idle_time = get_cpu_idle_time_us(cpu, NULL);
venkatesh.pallipadi@intel.com80800912008-08-04 11:59:12 -0700178
179 if (idle_time == -1ULL)
180 return get_cpu_idle_time_jiffy(cpu, wall);
Michal Hocko6beea0c2011-08-24 09:37:48 +0200181 else
182 idle_time += get_cpu_iowait_time_us(cpu, wall);
venkatesh.pallipadi@intel.com80800912008-08-04 11:59:12 -0700183
venkatesh.pallipadi@intel.com80800912008-08-04 11:59:12 -0700184 return idle_time;
185}
186
Arjan van de Ven6b8fcd92010-05-09 08:26:06 -0700187static inline cputime64_t get_cpu_iowait_time(unsigned int cpu, cputime64_t *wall)
188{
189 u64 iowait_time = get_cpu_iowait_time_us(cpu, wall);
190
191 if (iowait_time == -1ULL)
192 return 0;
193
194 return iowait_time;
195}
196
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400197/*
198 * Find right freq to be set now with powersave_bias on.
199 * Returns the freq_hi to be used right now and will set freq_hi_jiffies,
200 * freq_lo, and freq_lo_jiffies in percpu area for averaging freqs.
201 */
Adrian Bunkb5ecf602006-08-13 23:00:08 +0200202static unsigned int powersave_bias_target(struct cpufreq_policy *policy,
203 unsigned int freq_next,
204 unsigned int relation)
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400205{
David Ng8192a2f2012-01-19 14:16:19 -0800206 unsigned int freq_req, freq_avg;
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400207 unsigned int freq_hi, freq_lo;
208 unsigned int index = 0;
209 unsigned int jiffies_total, jiffies_hi, jiffies_lo;
David Ng8192a2f2012-01-19 14:16:19 -0800210 int freq_reduc;
Tejun Heo245b2e72009-06-24 15:13:48 +0900211 struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info,
212 policy->cpu);
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400213
214 if (!dbs_info->freq_table) {
215 dbs_info->freq_lo = 0;
216 dbs_info->freq_lo_jiffies = 0;
217 return freq_next;
218 }
219
220 cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_next,
221 relation, &index);
222 freq_req = dbs_info->freq_table[index].frequency;
223 freq_reduc = freq_req * dbs_tuners_ins.powersave_bias / 1000;
224 freq_avg = freq_req - freq_reduc;
225
226 /* Find freq bounds for freq_avg in freq_table */
227 index = 0;
228 cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg,
229 CPUFREQ_RELATION_H, &index);
230 freq_lo = dbs_info->freq_table[index].frequency;
231 index = 0;
232 cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg,
233 CPUFREQ_RELATION_L, &index);
234 freq_hi = dbs_info->freq_table[index].frequency;
235
236 /* Find out how long we have to be in hi and lo freqs */
237 if (freq_hi == freq_lo) {
238 dbs_info->freq_lo = 0;
239 dbs_info->freq_lo_jiffies = 0;
240 return freq_lo;
241 }
242 jiffies_total = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
243 jiffies_hi = (freq_avg - freq_lo) * jiffies_total;
244 jiffies_hi += ((freq_hi - freq_lo) / 2);
245 jiffies_hi /= (freq_hi - freq_lo);
246 jiffies_lo = jiffies_total - jiffies_hi;
247 dbs_info->freq_lo = freq_lo;
248 dbs_info->freq_lo_jiffies = jiffies_lo;
249 dbs_info->freq_hi_jiffies = jiffies_hi;
250 return freq_hi;
251}
252
David Ng8192a2f2012-01-19 14:16:19 -0800253static int ondemand_powersave_bias_setspeed(struct cpufreq_policy *policy,
254 struct cpufreq_policy *altpolicy,
255 int level)
256{
257 if (level == POWERSAVE_BIAS_MAXLEVEL) {
258 /* maximum powersave; set to lowest frequency */
259 __cpufreq_driver_target(policy,
260 (altpolicy) ? altpolicy->min : policy->min,
261 CPUFREQ_RELATION_L);
262 return 1;
263 } else if (level == POWERSAVE_BIAS_MINLEVEL) {
264 /* minimum powersave; set to highest frequency */
265 __cpufreq_driver_target(policy,
266 (altpolicy) ? altpolicy->max : policy->max,
267 CPUFREQ_RELATION_H);
268 return 1;
269 }
270 return 0;
271}
272
venkatesh.pallipadi@intel.com5a75c822009-07-02 17:08:32 -0700273static void ondemand_powersave_bias_init_cpu(int cpu)
274{
Tejun Heo384be2b2009-08-14 14:41:02 +0900275 struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
venkatesh.pallipadi@intel.com5a75c822009-07-02 17:08:32 -0700276 dbs_info->freq_table = cpufreq_frequency_get_table(cpu);
277 dbs_info->freq_lo = 0;
278}
279
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400280static void ondemand_powersave_bias_init(void)
281{
282 int i;
283 for_each_online_cpu(i) {
venkatesh.pallipadi@intel.com5a75c822009-07-02 17:08:32 -0700284 ondemand_powersave_bias_init_cpu(i);
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400285 }
286}
287
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288/************************** sysfs interface ************************/
Thomas Renninger0e625ac2009-07-24 15:25:06 +0200289
Thomas Renninger0e625ac2009-07-24 15:25:06 +0200290static ssize_t show_sampling_rate_min(struct kobject *kobj,
291 struct attribute *attr, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292{
Thomas Renningercef96152009-04-22 13:48:29 +0200293 return sprintf(buf, "%u\n", min_sampling_rate);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294}
295
Borislav Petkov6dad2a22010-03-31 21:56:46 +0200296define_one_global_ro(sampling_rate_min);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297
298/* cpufreq_ondemand Governor Tunables */
299#define show_one(file_name, object) \
300static ssize_t show_##file_name \
Thomas Renninger0e625ac2009-07-24 15:25:06 +0200301(struct kobject *kobj, struct attribute *attr, char *buf) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302{ \
303 return sprintf(buf, "%u\n", dbs_tuners_ins.object); \
304}
305show_one(sampling_rate, sampling_rate);
Arjan van de Ven19379b12010-05-09 08:26:51 -0700306show_one(io_is_busy, io_is_busy);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307show_one(up_threshold, up_threshold);
Narayanan Gopalakrishnand3ca7832012-10-19 17:24:53 -0700308show_one(up_threshold_multi_core, up_threshold_multi_core);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700309show_one(down_differential, down_differential);
David C Niemi3f78a9f2010-10-06 16:54:24 -0400310show_one(sampling_down_factor, sampling_down_factor);
Alexander Clouter001893c2005-12-01 01:09:25 -0800311show_one(ignore_nice_load, ignore_nice);
Narayanan Gopalakrishnand3ca7832012-10-19 17:24:53 -0700312show_one(optimal_freq, optimal_freq);
313show_one(up_threshold_any_cpu_load, up_threshold_any_cpu_load);
314show_one(sync_freq, sync_freq);
David Ng8192a2f2012-01-19 14:16:19 -0800315
316static ssize_t show_powersave_bias
317(struct kobject *kobj, struct attribute *attr, char *buf)
318{
319 return snprintf(buf, PAGE_SIZE, "%d\n", dbs_tuners_ins.powersave_bias);
320}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321
MyungJoo Hamfd0ef7a2012-02-29 17:54:41 +0900322/**
323 * update_sampling_rate - update sampling rate effective immediately if needed.
324 * @new_rate: new sampling rate
325 *
326 * If new rate is smaller than the old, simply updaing
327 * dbs_tuners_int.sampling_rate might not be appropriate. For example,
328 * if the original sampling_rate was 1 second and the requested new sampling
329 * rate is 10 ms because the user needs immediate reaction from ondemand
330 * governor, but not sure if higher frequency will be required or not,
331 * then, the governor may change the sampling rate too late; up to 1 second
332 * later. Thus, if we are reducing the sampling rate, we need to make the
333 * new value effective immediately.
334 */
335static void update_sampling_rate(unsigned int new_rate)
336{
337 int cpu;
338
339 dbs_tuners_ins.sampling_rate = new_rate
340 = max(new_rate, min_sampling_rate);
341
342 for_each_online_cpu(cpu) {
343 struct cpufreq_policy *policy;
344 struct cpu_dbs_info_s *dbs_info;
345 unsigned long next_sampling, appointed_at;
346
347 policy = cpufreq_cpu_get(cpu);
348 if (!policy)
349 continue;
350 dbs_info = &per_cpu(od_cpu_dbs_info, policy->cpu);
351 cpufreq_cpu_put(policy);
352
353 mutex_lock(&dbs_info->timer_mutex);
354
355 if (!delayed_work_pending(&dbs_info->work)) {
356 mutex_unlock(&dbs_info->timer_mutex);
357 continue;
358 }
359
360 next_sampling = jiffies + usecs_to_jiffies(new_rate);
361 appointed_at = dbs_info->work.timer.expires;
362
363
364 if (time_before(next_sampling, appointed_at)) {
365
366 mutex_unlock(&dbs_info->timer_mutex);
367 cancel_delayed_work_sync(&dbs_info->work);
368 mutex_lock(&dbs_info->timer_mutex);
369
370 schedule_delayed_work_on(dbs_info->cpu, &dbs_info->work,
371 usecs_to_jiffies(new_rate));
372
373 }
374 mutex_unlock(&dbs_info->timer_mutex);
375 }
376}
377
Thomas Renninger0e625ac2009-07-24 15:25:06 +0200378static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b,
379 const char *buf, size_t count)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380{
381 unsigned int input;
382 int ret;
Venkatesh Pallipadiffac80e2006-06-28 13:52:18 -0700383 ret = sscanf(buf, "%u", &input);
venkatesh.pallipadi@intel.com5a75c822009-07-02 17:08:32 -0700384 if (ret != 1)
385 return -EINVAL;
MyungJoo Hamfd0ef7a2012-02-29 17:54:41 +0900386 update_sampling_rate(input);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387 return count;
388}
389
Narayanan Gopalakrishnand3ca7832012-10-19 17:24:53 -0700390static ssize_t store_sync_freq(struct kobject *a, struct attribute *b,
391 const char *buf, size_t count)
392{
393 unsigned int input;
394 int ret;
395
396 ret = sscanf(buf, "%u", &input);
397 if (ret != 1)
398 return -EINVAL;
399 dbs_tuners_ins.sync_freq = input;
400 return count;
401}
402
Arjan van de Ven19379b12010-05-09 08:26:51 -0700403static ssize_t store_io_is_busy(struct kobject *a, struct attribute *b,
404 const char *buf, size_t count)
405{
406 unsigned int input;
407 int ret;
408
409 ret = sscanf(buf, "%u", &input);
410 if (ret != 1)
411 return -EINVAL;
Arjan van de Ven19379b12010-05-09 08:26:51 -0700412 dbs_tuners_ins.io_is_busy = !!input;
Arjan van de Ven19379b12010-05-09 08:26:51 -0700413 return count;
414}
415
Narayanan Gopalakrishnand3ca7832012-10-19 17:24:53 -0700416static ssize_t store_optimal_freq(struct kobject *a, struct attribute *b,
417 const char *buf, size_t count)
418{
419 unsigned int input;
420 int ret;
421
422 ret = sscanf(buf, "%u", &input);
423 if (ret != 1)
424 return -EINVAL;
425 dbs_tuners_ins.optimal_freq = input;
426 return count;
427}
428
Thomas Renninger0e625ac2009-07-24 15:25:06 +0200429static ssize_t store_up_threshold(struct kobject *a, struct attribute *b,
430 const char *buf, size_t count)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431{
432 unsigned int input;
433 int ret;
Venkatesh Pallipadiffac80e2006-06-28 13:52:18 -0700434 ret = sscanf(buf, "%u", &input);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700435
Dave Jones32ee8c32006-02-28 00:43:23 -0500436 if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD ||
Dave Jonesc29f1402005-05-31 19:03:50 -0700437 input < MIN_FREQUENCY_UP_THRESHOLD) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700438 return -EINVAL;
439 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700440 dbs_tuners_ins.up_threshold = input;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700441 return count;
442}
443
Narayanan Gopalakrishnand3ca7832012-10-19 17:24:53 -0700444static ssize_t store_up_threshold_multi_core(struct kobject *a,
445 struct attribute *b, const char *buf, size_t count)
446{
447 unsigned int input;
448 int ret;
449 ret = sscanf(buf, "%u", &input);
450
451 if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD ||
452 input < MIN_FREQUENCY_UP_THRESHOLD) {
453 return -EINVAL;
454 }
455 dbs_tuners_ins.up_threshold_multi_core = input;
456 return count;
457}
458
459static ssize_t store_up_threshold_any_cpu_load(struct kobject *a,
460 struct attribute *b, const char *buf, size_t count)
461{
462 unsigned int input;
463 int ret;
464 ret = sscanf(buf, "%u", &input);
465
466 if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD ||
467 input < MIN_FREQUENCY_UP_THRESHOLD) {
468 return -EINVAL;
469 }
470 dbs_tuners_ins.up_threshold_any_cpu_load = input;
471 return count;
472}
473
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700474static ssize_t store_down_differential(struct kobject *a, struct attribute *b,
475 const char *buf, size_t count)
476{
477 unsigned int input;
478 int ret;
479 ret = sscanf(buf, "%u", &input);
480
481 if (ret != 1 || input >= dbs_tuners_ins.up_threshold ||
482 input < MIN_FREQUENCY_DOWN_DIFFERENTIAL) {
483 return -EINVAL;
484 }
485
486 dbs_tuners_ins.down_differential = input;
487
488 return count;
489}
490
David C Niemi3f78a9f2010-10-06 16:54:24 -0400491static ssize_t store_sampling_down_factor(struct kobject *a,
492 struct attribute *b, const char *buf, size_t count)
493{
494 unsigned int input, j;
495 int ret;
496 ret = sscanf(buf, "%u", &input);
497
498 if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
499 return -EINVAL;
David C Niemi3f78a9f2010-10-06 16:54:24 -0400500 dbs_tuners_ins.sampling_down_factor = input;
501
502 /* Reset down sampling multiplier in case it was active */
503 for_each_online_cpu(j) {
504 struct cpu_dbs_info_s *dbs_info;
505 dbs_info = &per_cpu(od_cpu_dbs_info, j);
506 dbs_info->rate_mult = 1;
507 }
David C Niemi3f78a9f2010-10-06 16:54:24 -0400508 return count;
509}
510
Thomas Renninger0e625ac2009-07-24 15:25:06 +0200511static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b,
512 const char *buf, size_t count)
Dave Jones3d5ee9e2005-05-31 19:03:47 -0700513{
514 unsigned int input;
515 int ret;
516
517 unsigned int j;
Dave Jones32ee8c32006-02-28 00:43:23 -0500518
Venkatesh Pallipadiffac80e2006-06-28 13:52:18 -0700519 ret = sscanf(buf, "%u", &input);
Dave Jones2b03f892009-01-18 01:43:44 -0500520 if (ret != 1)
Dave Jones3d5ee9e2005-05-31 19:03:47 -0700521 return -EINVAL;
522
Dave Jones2b03f892009-01-18 01:43:44 -0500523 if (input > 1)
Dave Jones3d5ee9e2005-05-31 19:03:47 -0700524 input = 1;
Dave Jones32ee8c32006-02-28 00:43:23 -0500525
Dave Jones2b03f892009-01-18 01:43:44 -0500526 if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */
Dave Jones3d5ee9e2005-05-31 19:03:47 -0700527 return count;
528 }
529 dbs_tuners_ins.ignore_nice = input;
530
Venkatesh Pallipadiccb2fe22006-06-28 13:49:52 -0700531 /* we need to re-evaluate prev_cpu_idle */
Dave Jonesdac1c1a2005-05-31 19:03:49 -0700532 for_each_online_cpu(j) {
Venkatesh Pallipadiccb2fe22006-06-28 13:49:52 -0700533 struct cpu_dbs_info_s *dbs_info;
Tejun Heo245b2e72009-06-24 15:13:48 +0900534 dbs_info = &per_cpu(od_cpu_dbs_info, j);
venkatesh.pallipadi@intel.com34305022008-08-04 11:59:09 -0700535 dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
536 &dbs_info->prev_cpu_wall);
Venkatesh Pallipadi1ca3abd2009-01-23 09:25:02 -0500537 if (dbs_tuners_ins.ignore_nice)
Glauber Costa3292beb2011-11-28 14:45:17 -0200538 dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
Venkatesh Pallipadi1ca3abd2009-01-23 09:25:02 -0500539
Dave Jones3d5ee9e2005-05-31 19:03:47 -0700540 }
Dave Jones3d5ee9e2005-05-31 19:03:47 -0700541 return count;
542}
543
Thomas Renninger0e625ac2009-07-24 15:25:06 +0200544static ssize_t store_powersave_bias(struct kobject *a, struct attribute *b,
545 const char *buf, size_t count)
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400546{
David Ng8192a2f2012-01-19 14:16:19 -0800547 int input = 0;
548 int bypass = 0;
Krishna Vankaebf80eb2012-04-19 13:11:20 +0530549 int ret, cpu, reenable_timer, j;
David Ng8192a2f2012-01-19 14:16:19 -0800550 struct cpu_dbs_info_s *dbs_info;
551
Krishna Vankaebf80eb2012-04-19 13:11:20 +0530552 struct cpumask cpus_timer_done;
553 cpumask_clear(&cpus_timer_done);
554
David Ng8192a2f2012-01-19 14:16:19 -0800555 ret = sscanf(buf, "%d", &input);
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400556
557 if (ret != 1)
558 return -EINVAL;
559
David Ng8192a2f2012-01-19 14:16:19 -0800560 if (input >= POWERSAVE_BIAS_MAXLEVEL) {
561 input = POWERSAVE_BIAS_MAXLEVEL;
562 bypass = 1;
563 } else if (input <= POWERSAVE_BIAS_MINLEVEL) {
564 input = POWERSAVE_BIAS_MINLEVEL;
565 bypass = 1;
566 }
567
568 if (input == dbs_tuners_ins.powersave_bias) {
569 /* no change */
570 return count;
571 }
572
573 reenable_timer = ((dbs_tuners_ins.powersave_bias ==
574 POWERSAVE_BIAS_MAXLEVEL) ||
575 (dbs_tuners_ins.powersave_bias ==
576 POWERSAVE_BIAS_MINLEVEL));
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400577
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400578 dbs_tuners_ins.powersave_bias = input;
David Ng8192a2f2012-01-19 14:16:19 -0800579 if (!bypass) {
580 if (reenable_timer) {
581 /* reinstate dbs timer */
582 for_each_online_cpu(cpu) {
583 if (lock_policy_rwsem_write(cpu) < 0)
584 continue;
585
586 dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
Krishna Vankaebf80eb2012-04-19 13:11:20 +0530587
588 for_each_cpu(j, &cpus_timer_done) {
589 if (!dbs_info->cur_policy) {
590 pr_err("Dbs policy is NULL\n");
591 goto skip_this_cpu;
592 }
593 if (cpumask_test_cpu(j, dbs_info->
594 cur_policy->cpus))
595 goto skip_this_cpu;
596 }
597
598 cpumask_set_cpu(cpu, &cpus_timer_done);
David Ng8192a2f2012-01-19 14:16:19 -0800599 if (dbs_info->cur_policy) {
600 /* restart dbs timer */
601 dbs_timer_init(dbs_info);
602 }
Krishna Vankaebf80eb2012-04-19 13:11:20 +0530603skip_this_cpu:
David Ng8192a2f2012-01-19 14:16:19 -0800604 unlock_policy_rwsem_write(cpu);
605 }
606 }
607 ondemand_powersave_bias_init();
608 } else {
609 /* running at maximum or minimum frequencies; cancel
610 dbs timer as periodic load sampling is not necessary */
611 for_each_online_cpu(cpu) {
612 if (lock_policy_rwsem_write(cpu) < 0)
613 continue;
614
615 dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
Krishna Vankaebf80eb2012-04-19 13:11:20 +0530616
617 for_each_cpu(j, &cpus_timer_done) {
618 if (!dbs_info->cur_policy) {
619 pr_err("Dbs policy is NULL\n");
620 goto skip_this_cpu_bypass;
621 }
622 if (cpumask_test_cpu(j, dbs_info->
623 cur_policy->cpus))
624 goto skip_this_cpu_bypass;
625 }
626
627 cpumask_set_cpu(cpu, &cpus_timer_done);
628
David Ng8192a2f2012-01-19 14:16:19 -0800629 if (dbs_info->cur_policy) {
630 /* cpu using ondemand, cancel dbs timer */
631 mutex_lock(&dbs_info->timer_mutex);
632 dbs_timer_exit(dbs_info);
633
634 ondemand_powersave_bias_setspeed(
635 dbs_info->cur_policy,
636 NULL,
637 input);
638
639 mutex_unlock(&dbs_info->timer_mutex);
640 }
Krishna Vankaebf80eb2012-04-19 13:11:20 +0530641skip_this_cpu_bypass:
David Ng8192a2f2012-01-19 14:16:19 -0800642 unlock_policy_rwsem_write(cpu);
643 }
644 }
645
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400646 return count;
647}
648
Borislav Petkov6dad2a22010-03-31 21:56:46 +0200649define_one_global_rw(sampling_rate);
Linus Torvalds07d77752010-05-18 08:49:13 -0700650define_one_global_rw(io_is_busy);
Borislav Petkov6dad2a22010-03-31 21:56:46 +0200651define_one_global_rw(up_threshold);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700652define_one_global_rw(down_differential);
David C Niemi3f78a9f2010-10-06 16:54:24 -0400653define_one_global_rw(sampling_down_factor);
Borislav Petkov6dad2a22010-03-31 21:56:46 +0200654define_one_global_rw(ignore_nice_load);
655define_one_global_rw(powersave_bias);
Narayanan Gopalakrishnand3ca7832012-10-19 17:24:53 -0700656define_one_global_rw(up_threshold_multi_core);
657define_one_global_rw(optimal_freq);
658define_one_global_rw(up_threshold_any_cpu_load);
659define_one_global_rw(sync_freq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700660
Dave Jones2b03f892009-01-18 01:43:44 -0500661static struct attribute *dbs_attributes[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700662 &sampling_rate_min.attr,
663 &sampling_rate.attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700664 &up_threshold.attr,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700665 &down_differential.attr,
David C Niemi3f78a9f2010-10-06 16:54:24 -0400666 &sampling_down_factor.attr,
Alexander Clouter001893c2005-12-01 01:09:25 -0800667 &ignore_nice_load.attr,
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400668 &powersave_bias.attr,
Arjan van de Ven19379b12010-05-09 08:26:51 -0700669 &io_is_busy.attr,
Narayanan Gopalakrishnand3ca7832012-10-19 17:24:53 -0700670 &up_threshold_multi_core.attr,
671 &optimal_freq.attr,
672 &up_threshold_any_cpu_load.attr,
673 &sync_freq.attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700674 NULL
675};
676
677static struct attribute_group dbs_attr_group = {
678 .attrs = dbs_attributes,
679 .name = "ondemand",
680};
681
682/************************** sysfs end ************************/
683
Mike Chan00e299f2010-01-26 17:06:47 -0800684static void dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq)
685{
686 if (dbs_tuners_ins.powersave_bias)
687 freq = powersave_bias_target(p, freq, CPUFREQ_RELATION_H);
688 else if (p->cur == p->max)
689 return;
690
691 __cpufreq_driver_target(p, freq, dbs_tuners_ins.powersave_bias ?
692 CPUFREQ_RELATION_L : CPUFREQ_RELATION_H);
693}
694
Venkatesh Pallipadi2f8a8352006-06-28 13:51:19 -0700695static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700696{
Anitha Anandcbeef6a2012-03-05 18:10:52 -0800697 /* Extrapolated load of this CPU */
698 unsigned int load_at_max_freq = 0;
venkatesh.pallipadi@intel.comc43aa3b2008-08-04 11:59:08 -0700699 unsigned int max_load_freq;
Anitha Anandcbeef6a2012-03-05 18:10:52 -0800700 /* Current load across this CPU */
701 unsigned int cur_load = 0;
Narayanan Gopalakrishnand3ca7832012-10-19 17:24:53 -0700702 unsigned int max_load_other_cpu = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700703 struct cpufreq_policy *policy;
704 unsigned int j;
705
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400706 this_dbs_info->freq_lo = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700707 policy = this_dbs_info->cur_policy;
Venki Pallipadiea487612007-06-20 14:26:24 -0700708
Dave Jones32ee8c32006-02-28 00:43:23 -0500709 /*
Dave Jonesc29f1402005-05-31 19:03:50 -0700710 * Every sampling_rate, we check, if current idle time is less
711 * than 20% (default), then we try to increase frequency
Venkatesh Pallipadiccb2fe22006-06-28 13:49:52 -0700712 * Every sampling_rate, we look for a the lowest
Dave Jonesc29f1402005-05-31 19:03:50 -0700713 * frequency which can sustain the load while keeping idle time over
714 * 30%. If such a frequency exist, we try to decrease to this frequency.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700715 *
Dave Jones32ee8c32006-02-28 00:43:23 -0500716 * Any frequency increase takes it to the maximum frequency.
717 * Frequency reduction happens at minimum steps of
718 * 5% (default) of current frequency
Linus Torvalds1da177e2005-04-16 15:20:36 -0700719 */
720
venkatesh.pallipadi@intel.comc43aa3b2008-08-04 11:59:08 -0700721 /* Get Absolute Load - in terms of freq */
722 max_load_freq = 0;
723
Rusty Russell835481d2009-01-04 05:18:06 -0800724 for_each_cpu(j, policy->cpus) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700725 struct cpu_dbs_info_s *j_dbs_info;
Arjan van de Ven6b8fcd92010-05-09 08:26:06 -0700726 cputime64_t cur_wall_time, cur_idle_time, cur_iowait_time;
727 unsigned int idle_time, wall_time, iowait_time;
Anitha Anandcbeef6a2012-03-05 18:10:52 -0800728 unsigned int load_freq;
venkatesh.pallipadi@intel.comc43aa3b2008-08-04 11:59:08 -0700729 int freq_avg;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700730
Tejun Heo245b2e72009-06-24 15:13:48 +0900731 j_dbs_info = &per_cpu(od_cpu_dbs_info, j);
venkatesh.pallipadi@intel.com34305022008-08-04 11:59:09 -0700732
733 cur_idle_time = get_cpu_idle_time(j, &cur_wall_time);
Arjan van de Ven6b8fcd92010-05-09 08:26:06 -0700734 cur_iowait_time = get_cpu_iowait_time(j, &cur_wall_time);
venkatesh.pallipadi@intel.com34305022008-08-04 11:59:09 -0700735
Martin Schwidefsky64861632011-12-15 14:56:09 +0100736 wall_time = (unsigned int)
737 (cur_wall_time - j_dbs_info->prev_cpu_wall);
venkatesh.pallipadi@intel.comc43aa3b2008-08-04 11:59:08 -0700738 j_dbs_info->prev_cpu_wall = cur_wall_time;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700739
Martin Schwidefsky64861632011-12-15 14:56:09 +0100740 idle_time = (unsigned int)
741 (cur_idle_time - j_dbs_info->prev_cpu_idle);
venkatesh.pallipadi@intel.comc43aa3b2008-08-04 11:59:08 -0700742 j_dbs_info->prev_cpu_idle = cur_idle_time;
743
Martin Schwidefsky64861632011-12-15 14:56:09 +0100744 iowait_time = (unsigned int)
745 (cur_iowait_time - j_dbs_info->prev_cpu_iowait);
Arjan van de Ven6b8fcd92010-05-09 08:26:06 -0700746 j_dbs_info->prev_cpu_iowait = cur_iowait_time;
747
Venkatesh Pallipadi1ca3abd2009-01-23 09:25:02 -0500748 if (dbs_tuners_ins.ignore_nice) {
Glauber Costa3292beb2011-11-28 14:45:17 -0200749 u64 cur_nice;
Venkatesh Pallipadi1ca3abd2009-01-23 09:25:02 -0500750 unsigned long cur_nice_jiffies;
751
Glauber Costa3292beb2011-11-28 14:45:17 -0200752 cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE] -
753 j_dbs_info->prev_cpu_nice;
Venkatesh Pallipadi1ca3abd2009-01-23 09:25:02 -0500754 /*
755 * Assumption: nice time between sampling periods will
756 * be less than 2^32 jiffies for 32 bit sys
757 */
758 cur_nice_jiffies = (unsigned long)
759 cputime64_to_jiffies64(cur_nice);
760
Glauber Costa3292beb2011-11-28 14:45:17 -0200761 j_dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
Venkatesh Pallipadi1ca3abd2009-01-23 09:25:02 -0500762 idle_time += jiffies_to_usecs(cur_nice_jiffies);
763 }
764
Arjan van de Ven6b8fcd92010-05-09 08:26:06 -0700765 /*
766 * For the purpose of ondemand, waiting for disk IO is an
767 * indication that you're performance critical, and not that
768 * the system is actually idle. So subtract the iowait time
769 * from the cpu idle time.
770 */
771
Arjan van de Ven19379b12010-05-09 08:26:51 -0700772 if (dbs_tuners_ins.io_is_busy && idle_time >= iowait_time)
Arjan van de Ven6b8fcd92010-05-09 08:26:06 -0700773 idle_time -= iowait_time;
774
venkatesh.pallipadi@intel.com34305022008-08-04 11:59:09 -0700775 if (unlikely(!wall_time || wall_time < idle_time))
venkatesh.pallipadi@intel.comc43aa3b2008-08-04 11:59:08 -0700776 continue;
venkatesh.pallipadi@intel.comc43aa3b2008-08-04 11:59:08 -0700777
Anitha Anandcbeef6a2012-03-05 18:10:52 -0800778 cur_load = 100 * (wall_time - idle_time) / wall_time;
Narayanan Gopalakrishnand3ca7832012-10-19 17:24:53 -0700779 j_dbs_info->max_load = max(cur_load, j_dbs_info->prev_load);
780 j_dbs_info->prev_load = cur_load;
venkatesh.pallipadi@intel.comc43aa3b2008-08-04 11:59:08 -0700781 freq_avg = __cpufreq_driver_getavg(policy, j);
782 if (freq_avg <= 0)
783 freq_avg = policy->cur;
784
Anitha Anandcbeef6a2012-03-05 18:10:52 -0800785 load_freq = cur_load * freq_avg;
venkatesh.pallipadi@intel.comc43aa3b2008-08-04 11:59:08 -0700786 if (load_freq > max_load_freq)
787 max_load_freq = load_freq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700788 }
Narayanan Gopalakrishnand3ca7832012-10-19 17:24:53 -0700789
790 for_each_online_cpu(j) {
791 struct cpu_dbs_info_s *j_dbs_info;
792 j_dbs_info = &per_cpu(od_cpu_dbs_info, j);
793
794 if (j == policy->cpu)
795 continue;
796
797 if (max_load_other_cpu < j_dbs_info->max_load)
798 max_load_other_cpu = j_dbs_info->max_load;
799 /*
800 * The other cpu could be running at higher frequency
801 * but may not have completed it's sampling_down_factor.
802 * For that case consider other cpu is loaded so that
803 * frequency imbalance does not occur.
804 */
805
806 if ((j_dbs_info->cur_policy != NULL)
807 && (j_dbs_info->cur_policy->cur ==
808 j_dbs_info->cur_policy->max)) {
809
810 if (policy->cur >= dbs_tuners_ins.optimal_freq)
811 max_load_other_cpu =
812 dbs_tuners_ins.up_threshold_any_cpu_load;
813 }
814 }
815
Anitha Anandcbeef6a2012-03-05 18:10:52 -0800816 /* calculate the scaled load across CPU */
817 load_at_max_freq = (cur_load * policy->cur)/policy->cpuinfo.max_freq;
818
819 cpufreq_notify_utilization(policy, load_at_max_freq);
Venkatesh Pallipadiccb2fe22006-06-28 13:49:52 -0700820 /* Check for frequency increase */
venkatesh.pallipadi@intel.comc43aa3b2008-08-04 11:59:08 -0700821 if (max_load_freq > dbs_tuners_ins.up_threshold * policy->cur) {
David C Niemi3f78a9f2010-10-06 16:54:24 -0400822 /* If switching to max speed, apply sampling_down_factor */
823 if (policy->cur < policy->max)
824 this_dbs_info->rate_mult =
825 dbs_tuners_ins.sampling_down_factor;
Mike Chan00e299f2010-01-26 17:06:47 -0800826 dbs_freq_increase(policy, policy->max);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700827 return;
828 }
829
Narayanan Gopalakrishnand3ca7832012-10-19 17:24:53 -0700830 if (num_online_cpus() > 1) {
831
832 if (max_load_other_cpu >
833 dbs_tuners_ins.up_threshold_any_cpu_load) {
834 if (policy->cur < dbs_tuners_ins.sync_freq)
835 dbs_freq_increase(policy,
836 dbs_tuners_ins.sync_freq);
837 return;
838 }
839
840 if (max_load_freq > dbs_tuners_ins.up_threshold_multi_core *
841 policy->cur) {
842 if (policy->cur < dbs_tuners_ins.optimal_freq)
843 dbs_freq_increase(policy,
844 dbs_tuners_ins.optimal_freq);
845 return;
846 }
847 }
848
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849 /* Check for frequency decrease */
Dave Jonesc29f1402005-05-31 19:03:50 -0700850 /* if we cannot reduce the frequency anymore, break out early */
851 if (policy->cur == policy->min)
852 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700853
Dave Jonesc29f1402005-05-31 19:03:50 -0700854 /*
855 * The optimal frequency is the frequency that is the lowest that
856 * can support the current CPU usage without triggering the up
857 * policy. To be safe, we focus 10 points under the threshold.
858 */
venkatesh.pallipadi@intel.come9d95bf2008-08-04 11:59:10 -0700859 if (max_load_freq <
860 (dbs_tuners_ins.up_threshold - dbs_tuners_ins.down_differential) *
861 policy->cur) {
venkatesh.pallipadi@intel.comc43aa3b2008-08-04 11:59:08 -0700862 unsigned int freq_next;
venkatesh.pallipadi@intel.come9d95bf2008-08-04 11:59:10 -0700863 freq_next = max_load_freq /
864 (dbs_tuners_ins.up_threshold -
865 dbs_tuners_ins.down_differential);
Venkatesh Pallipadidfde5d62006-10-03 12:38:45 -0700866
David C Niemi3f78a9f2010-10-06 16:54:24 -0400867 /* No longer fully busy, reset rate_mult */
868 this_dbs_info->rate_mult = 1;
869
Nagananda.Chumbalkar@hp.com1dbf5882009-12-21 23:40:52 +0100870 if (freq_next < policy->min)
871 freq_next = policy->min;
872
Narayanan Gopalakrishnand3ca7832012-10-19 17:24:53 -0700873 if (num_online_cpus() > 1) {
874 if (max_load_other_cpu >
875 (dbs_tuners_ins.up_threshold_multi_core -
876 dbs_tuners_ins.down_differential) &&
877 freq_next < dbs_tuners_ins.sync_freq)
878 freq_next = dbs_tuners_ins.sync_freq;
879
880 if (max_load_freq >
881 (dbs_tuners_ins.up_threshold_multi_core -
882 dbs_tuners_ins.down_differential_multi_core) *
883 policy->cur)
884 freq_next = dbs_tuners_ins.optimal_freq;
885
886 }
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400887 if (!dbs_tuners_ins.powersave_bias) {
888 __cpufreq_driver_target(policy, freq_next,
889 CPUFREQ_RELATION_L);
890 } else {
891 int freq = powersave_bias_target(policy, freq_next,
892 CPUFREQ_RELATION_L);
893 __cpufreq_driver_target(policy, freq,
894 CPUFREQ_RELATION_L);
895 }
Venkatesh Pallipadiccb2fe22006-06-28 13:49:52 -0700896 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700897}
898
David Howellsc4028952006-11-22 14:57:56 +0000899static void do_dbs_timer(struct work_struct *work)
Dave Jones32ee8c32006-02-28 00:43:23 -0500900{
Venkatesh Pallipadi529af7a2007-02-05 16:12:44 -0800901 struct cpu_dbs_info_s *dbs_info =
902 container_of(work, struct cpu_dbs_info_s, work.work);
903 unsigned int cpu = dbs_info->cpu;
904 int sample_type = dbs_info->sample_type;
905
Vincent Guittot5cb2c3b2011-02-07 17:14:25 +0100906 int delay;
Jocelyn Falempea665df92010-03-11 14:01:11 -0800907
venkatesh.pallipadi@intel.com5a75c822009-07-02 17:08:32 -0700908 mutex_lock(&dbs_info->timer_mutex);
Venkatesh Pallipadi56463b72007-02-05 16:12:45 -0800909
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400910 /* Common NORMAL_SAMPLE setup */
David Howellsc4028952006-11-22 14:57:56 +0000911 dbs_info->sample_type = DBS_NORMAL_SAMPLE;
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400912 if (!dbs_tuners_ins.powersave_bias ||
David Howellsc4028952006-11-22 14:57:56 +0000913 sample_type == DBS_NORMAL_SAMPLE) {
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400914 dbs_check_cpu(dbs_info);
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400915 if (dbs_info->freq_lo) {
916 /* Setup timer for SUB_SAMPLE */
David Howellsc4028952006-11-22 14:57:56 +0000917 dbs_info->sample_type = DBS_SUB_SAMPLE;
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400918 delay = dbs_info->freq_hi_jiffies;
Vincent Guittot5cb2c3b2011-02-07 17:14:25 +0100919 } else {
920 /* We want all CPUs to do sampling nearly on
921 * same jiffy
922 */
923 delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate
924 * dbs_info->rate_mult);
925
926 if (num_online_cpus() > 1)
927 delay -= jiffies % delay;
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400928 }
929 } else {
930 __cpufreq_driver_target(dbs_info->cur_policy,
Dave Jones2b03f892009-01-18 01:43:44 -0500931 dbs_info->freq_lo, CPUFREQ_RELATION_H);
Vincent Guittot5cb2c3b2011-02-07 17:14:25 +0100932 delay = dbs_info->freq_lo_jiffies;
Alexey Starikovskiy05ca0352006-07-31 22:28:12 +0400933 }
Tejun Heo57df5572011-01-26 12:12:50 +0100934 schedule_delayed_work_on(cpu, &dbs_info->work, delay);
venkatesh.pallipadi@intel.com5a75c822009-07-02 17:08:32 -0700935 mutex_unlock(&dbs_info->timer_mutex);
Dave Jones32ee8c32006-02-28 00:43:23 -0500936}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700937
Venkatesh Pallipadi529af7a2007-02-05 16:12:44 -0800938static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700939{
Alexey Starikovskiy1ce28d62006-07-31 22:25:20 +0400940 /* We want all CPUs to do sampling nearly on same jiffy */
941 int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
Jocelyn Falempea665df92010-03-11 14:01:11 -0800942
943 if (num_online_cpus() > 1)
944 delay -= jiffies % delay;
Venkatesh Pallipadi2f8a8352006-06-28 13:51:19 -0700945
David Howellsc4028952006-11-22 14:57:56 +0000946 dbs_info->sample_type = DBS_NORMAL_SAMPLE;
Venki Pallipadi28287032007-05-08 00:27:47 -0700947 INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer);
Tejun Heo57df5572011-01-26 12:12:50 +0100948 schedule_delayed_work_on(dbs_info->cpu, &dbs_info->work, delay);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700949}
950
Linus Torvalds2cd7cbd2006-07-23 12:05:00 -0700951static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700952{
Mathieu Desnoyersb14893a2009-05-17 10:30:45 -0400953 cancel_delayed_work_sync(&dbs_info->work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700954}
955
Arjan van de Ven19379b12010-05-09 08:26:51 -0700956/*
957 * Not all CPUs want IO time to be accounted as busy; this dependson how
958 * efficient idling at a higher frequency/voltage is.
959 * Pavel Machek says this is not so for various generations of AMD and old
960 * Intel systems.
961 * Mike Chan (androidlcom) calis this is also not true for ARM.
962 * Because of this, whitelist specific known (series) of CPUs by default, and
963 * leave all others up to the user.
964 */
965static int should_io_be_busy(void)
966{
967#if defined(CONFIG_X86)
968 /*
969 * For Intel, Core 2 (model 15) andl later have an efficient idle.
970 */
971 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
972 boot_cpu_data.x86 == 6 &&
973 boot_cpu_data.x86_model >= 15)
974 return 1;
975#endif
976 return 0;
977}
978
Stephen Boydc8fc3012012-10-31 17:43:08 -0700979static void dbs_refresh_callback(struct work_struct *work)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700980{
981 struct cpufreq_policy *policy;
982 struct cpu_dbs_info_s *this_dbs_info;
Stephen Boydc8fc3012012-10-31 17:43:08 -0700983 struct dbs_work_struct *dbs_work;
984 unsigned int cpu;
985
986 dbs_work = container_of(work, struct dbs_work_struct, work);
987 cpu = dbs_work->cpu;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700988
Krishna Vankaa3e04d82012-06-08 11:35:43 +0530989 get_online_cpus();
990
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700991 if (lock_policy_rwsem_write(cpu) < 0)
Krishna Vankaa3e04d82012-06-08 11:35:43 +0530992 goto bail_acq_sema_failed;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700993
994 this_dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
995 policy = this_dbs_info->cur_policy;
David Ng4a0a0232011-08-03 14:04:43 -0700996 if (!policy) {
997 /* CPU not using ondemand governor */
Krishna Vankaa3e04d82012-06-08 11:35:43 +0530998 goto bail_incorrect_governor;
David Ng4a0a0232011-08-03 14:04:43 -0700999 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001000
1001 if (policy->cur < policy->max) {
Anji Jonnalaf8732322012-12-13 14:03:54 +05301002 /*
1003 * Arch specific cpufreq driver may fail.
1004 * Don't update governor frequency upon failure.
1005 */
1006 if (__cpufreq_driver_target(policy, policy->max,
1007 CPUFREQ_RELATION_L) >= 0)
1008 policy->cur = policy->max;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001009
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001010 this_dbs_info->prev_cpu_idle = get_cpu_idle_time(cpu,
1011 &this_dbs_info->prev_cpu_wall);
1012 }
Krishna Vankaa3e04d82012-06-08 11:35:43 +05301013
1014bail_incorrect_governor:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001015 unlock_policy_rwsem_write(cpu);
Krishna Vankaa3e04d82012-06-08 11:35:43 +05301016
1017bail_acq_sema_failed:
1018 put_online_cpus();
1019 return;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001020}
1021
1022static void dbs_input_event(struct input_handle *handle, unsigned int type,
1023 unsigned int code, int value)
1024{
Matt Wagantall2100f002012-10-19 15:26:48 -07001025 int i;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001026
David Ng8192a2f2012-01-19 14:16:19 -08001027 if ((dbs_tuners_ins.powersave_bias == POWERSAVE_BIAS_MAXLEVEL) ||
1028 (dbs_tuners_ins.powersave_bias == POWERSAVE_BIAS_MINLEVEL)) {
1029 /* nothing to do */
1030 return;
1031 }
1032
Stephen Boydc8fc3012012-10-31 17:43:08 -07001033 for_each_online_cpu(i)
1034 queue_work_on(i, input_wq, &per_cpu(dbs_refresh_work, i).work);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001035}
1036
1037static int dbs_input_connect(struct input_handler *handler,
1038 struct input_dev *dev, const struct input_device_id *id)
1039{
1040 struct input_handle *handle;
1041 int error;
1042
1043 handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL);
1044 if (!handle)
1045 return -ENOMEM;
1046
1047 handle->dev = dev;
1048 handle->handler = handler;
1049 handle->name = "cpufreq";
1050
1051 error = input_register_handle(handle);
1052 if (error)
1053 goto err2;
1054
1055 error = input_open_device(handle);
1056 if (error)
1057 goto err1;
1058
1059 return 0;
1060err1:
1061 input_unregister_handle(handle);
1062err2:
1063 kfree(handle);
1064 return error;
1065}
1066
1067static void dbs_input_disconnect(struct input_handle *handle)
1068{
1069 input_close_device(handle);
1070 input_unregister_handle(handle);
1071 kfree(handle);
1072}
1073
1074static const struct input_device_id dbs_ids[] = {
1075 { .driver_info = 1 },
1076 { },
1077};
1078
1079static struct input_handler dbs_input_handler = {
1080 .event = dbs_input_event,
1081 .connect = dbs_input_connect,
1082 .disconnect = dbs_input_disconnect,
1083 .name = "cpufreq_ond",
1084 .id_table = dbs_ids,
1085};
1086
Linus Torvalds1da177e2005-04-16 15:20:36 -07001087static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
1088 unsigned int event)
1089{
1090 unsigned int cpu = policy->cpu;
1091 struct cpu_dbs_info_s *this_dbs_info;
1092 unsigned int j;
Jeff Garzik914f7c32006-10-20 14:31:00 -07001093 int rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001094
Tejun Heo245b2e72009-06-24 15:13:48 +09001095 this_dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001096
1097 switch (event) {
1098 case CPUFREQ_GOV_START:
Venkatesh Pallipadiffac80e2006-06-28 13:52:18 -07001099 if ((!cpu_online(cpu)) || (!policy->cur))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001100 return -EINVAL;
1101
akpm@osdl.org3fc54d32006-01-13 15:54:22 -08001102 mutex_lock(&dbs_mutex);
Jeff Garzik914f7c32006-10-20 14:31:00 -07001103
venkatesh.pallipadi@intel.com5a75c822009-07-02 17:08:32 -07001104 dbs_enable++;
Rusty Russell835481d2009-01-04 05:18:06 -08001105 for_each_cpu(j, policy->cpus) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001106 struct cpu_dbs_info_s *j_dbs_info;
Tejun Heo245b2e72009-06-24 15:13:48 +09001107 j_dbs_info = &per_cpu(od_cpu_dbs_info, j);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001108 j_dbs_info->cur_policy = policy;
Dave Jones32ee8c32006-02-28 00:43:23 -05001109
venkatesh.pallipadi@intel.com34305022008-08-04 11:59:09 -07001110 j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
1111 &j_dbs_info->prev_cpu_wall);
Glauber Costa3292beb2011-11-28 14:45:17 -02001112 if (dbs_tuners_ins.ignore_nice)
Venkatesh Pallipadi1ca3abd2009-01-23 09:25:02 -05001113 j_dbs_info->prev_cpu_nice =
Glauber Costa3292beb2011-11-28 14:45:17 -02001114 kcpustat_cpu(j).cpustat[CPUTIME_NICE];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001115 }
Venkatesh Pallipadi529af7a2007-02-05 16:12:44 -08001116 this_dbs_info->cpu = cpu;
David C Niemi3f78a9f2010-10-06 16:54:24 -04001117 this_dbs_info->rate_mult = 1;
venkatesh.pallipadi@intel.com5a75c822009-07-02 17:08:32 -07001118 ondemand_powersave_bias_init_cpu(cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001119 /*
1120 * Start the timerschedule work, when this governor
1121 * is used for first time
1122 */
1123 if (dbs_enable == 1) {
1124 unsigned int latency;
Thomas Renninger0e625ac2009-07-24 15:25:06 +02001125
1126 rc = sysfs_create_group(cpufreq_global_kobject,
1127 &dbs_attr_group);
1128 if (rc) {
1129 mutex_unlock(&dbs_mutex);
1130 return rc;
1131 }
1132
Linus Torvalds1da177e2005-04-16 15:20:36 -07001133 /* policy latency is in nS. Convert it to uS first */
Dave Jonesdf8b59b2005-09-20 12:39:35 -07001134 latency = policy->cpuinfo.transition_latency / 1000;
1135 if (latency == 0)
1136 latency = 1;
Thomas Renningercef96152009-04-22 13:48:29 +02001137 /* Bring kernel and HW constraints together */
1138 min_sampling_rate = max(min_sampling_rate,
1139 MIN_LATENCY_MULTIPLIER * latency);
1140 dbs_tuners_ins.sampling_rate =
1141 max(min_sampling_rate,
1142 latency * LATENCY_MULTIPLIER);
Arjan van de Ven19379b12010-05-09 08:26:51 -07001143 dbs_tuners_ins.io_is_busy = should_io_be_busy();
Narayanan Gopalakrishnand3ca7832012-10-19 17:24:53 -07001144
1145 if (dbs_tuners_ins.optimal_freq == 0)
1146 dbs_tuners_ins.optimal_freq = policy->min;
1147
1148 if (dbs_tuners_ins.sync_freq == 0)
1149 dbs_tuners_ins.sync_freq = policy->min;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001150 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001151 if (!cpu)
1152 rc = input_register_handler(&dbs_input_handler);
akpm@osdl.org3fc54d32006-01-13 15:54:22 -08001153 mutex_unlock(&dbs_mutex);
venkatesh.pallipadi@intel.com7d26e2d2009-07-02 17:08:30 -07001154
David Ng8192a2f2012-01-19 14:16:19 -08001155
1156 if (!ondemand_powersave_bias_setspeed(
1157 this_dbs_info->cur_policy,
1158 NULL,
1159 dbs_tuners_ins.powersave_bias))
1160 dbs_timer_init(this_dbs_info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001161 break;
1162
1163 case CPUFREQ_GOV_STOP:
Linus Torvalds2cd7cbd2006-07-23 12:05:00 -07001164 dbs_timer_exit(this_dbs_info);
venkatesh.pallipadi@intel.com7d26e2d2009-07-02 17:08:30 -07001165
1166 mutex_lock(&dbs_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001167 dbs_enable--;
Anitha Anand3dd65092012-01-18 17:17:40 -08001168 /* If device is being removed, policy is no longer
1169 * valid. */
1170 this_dbs_info->cur_policy = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001171 if (!cpu)
1172 input_unregister_handler(&dbs_input_handler);
akpm@osdl.org3fc54d32006-01-13 15:54:22 -08001173 mutex_unlock(&dbs_mutex);
Thomas Renninger0e625ac2009-07-24 15:25:06 +02001174 if (!dbs_enable)
1175 sysfs_remove_group(cpufreq_global_kobject,
1176 &dbs_attr_group);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001177
1178 break;
1179
1180 case CPUFREQ_GOV_LIMITS:
venkatesh.pallipadi@intel.com5a75c822009-07-02 17:08:32 -07001181 mutex_lock(&this_dbs_info->timer_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001182 if (policy->max < this_dbs_info->cur_policy->cur)
Venkatesh Pallipadiffac80e2006-06-28 13:52:18 -07001183 __cpufreq_driver_target(this_dbs_info->cur_policy,
Dave Jones2b03f892009-01-18 01:43:44 -05001184 policy->max, CPUFREQ_RELATION_H);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001185 else if (policy->min > this_dbs_info->cur_policy->cur)
Venkatesh Pallipadiffac80e2006-06-28 13:52:18 -07001186 __cpufreq_driver_target(this_dbs_info->cur_policy,
Dave Jones2b03f892009-01-18 01:43:44 -05001187 policy->min, CPUFREQ_RELATION_L);
David Ng8192a2f2012-01-19 14:16:19 -08001188 else if (dbs_tuners_ins.powersave_bias != 0)
1189 ondemand_powersave_bias_setspeed(
1190 this_dbs_info->cur_policy,
1191 policy,
1192 dbs_tuners_ins.powersave_bias);
venkatesh.pallipadi@intel.com5a75c822009-07-02 17:08:32 -07001193 mutex_unlock(&this_dbs_info->timer_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001194 break;
1195 }
1196 return 0;
1197}
1198
Linus Torvalds1da177e2005-04-16 15:20:36 -07001199static int __init cpufreq_gov_dbs_init(void)
1200{
Andrea Righi4f6e6b92008-09-18 10:43:40 +00001201 u64 idle_time;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001202 unsigned int i;
Andrea Righi4f6e6b92008-09-18 10:43:40 +00001203 int cpu = get_cpu();
venkatesh.pallipadi@intel.com80800912008-08-04 11:59:12 -07001204
Kamalesh Babulal21f2e3c2011-12-09 16:18:42 +05301205 idle_time = get_cpu_idle_time_us(cpu, NULL);
Andrea Righi4f6e6b92008-09-18 10:43:40 +00001206 put_cpu();
venkatesh.pallipadi@intel.com80800912008-08-04 11:59:12 -07001207 if (idle_time != -1ULL) {
1208 /* Idle micro accounting is supported. Use finer thresholds */
1209 dbs_tuners_ins.up_threshold = MICRO_FREQUENCY_UP_THRESHOLD;
1210 dbs_tuners_ins.down_differential =
1211 MICRO_FREQUENCY_DOWN_DIFFERENTIAL;
Thomas Renningercef96152009-04-22 13:48:29 +02001212 /*
Paul Bollebd74b322011-08-06 14:33:43 +02001213 * In nohz/micro accounting case we set the minimum frequency
Thomas Renningercef96152009-04-22 13:48:29 +02001214 * not depending on HZ, but fixed (very low). The deferred
1215 * timer might skip some samples if idle/sleeping as needed.
1216 */
1217 min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE;
1218 } else {
1219 /* For correct statistics, we need 10 ticks for each measure */
1220 min_sampling_rate =
1221 MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10);
venkatesh.pallipadi@intel.com80800912008-08-04 11:59:12 -07001222 }
Akinobu Mita888a7942008-07-14 12:00:45 +09001223
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001224 input_wq = create_workqueue("iewq");
1225 if (!input_wq) {
1226 printk(KERN_ERR "Failed to create iewq workqueue\n");
1227 return -EFAULT;
1228 }
1229 for_each_possible_cpu(i) {
Praveen Chidambaram457a4452012-07-19 10:45:07 -06001230 struct cpu_dbs_info_s *this_dbs_info =
1231 &per_cpu(od_cpu_dbs_info, i);
Stephen Boydc8fc3012012-10-31 17:43:08 -07001232 struct dbs_work_struct *dbs_work =
1233 &per_cpu(dbs_refresh_work, i);
1234
Praveen Chidambaram457a4452012-07-19 10:45:07 -06001235 mutex_init(&this_dbs_info->timer_mutex);
Stephen Boydc8fc3012012-10-31 17:43:08 -07001236 INIT_WORK(&dbs_work->work, dbs_refresh_callback);
1237 dbs_work->cpu = i;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001238 }
1239
Tejun Heo57df5572011-01-26 12:12:50 +01001240 return cpufreq_register_governor(&cpufreq_gov_ondemand);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001241}
1242
1243static void __exit cpufreq_gov_dbs_exit(void)
1244{
Anji Jonnala4c1485f2012-11-14 13:34:54 +05301245 unsigned int i;
1246
Thomas Renninger1c256242007-10-02 13:28:12 -07001247 cpufreq_unregister_governor(&cpufreq_gov_ondemand);
Anji Jonnala4c1485f2012-11-14 13:34:54 +05301248 for_each_possible_cpu(i) {
1249 struct cpu_dbs_info_s *this_dbs_info =
1250 &per_cpu(od_cpu_dbs_info, i);
1251 mutex_destroy(&this_dbs_info->timer_mutex);
1252 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001253 destroy_workqueue(input_wq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001254}
1255
1256
Venkatesh Pallipadiffac80e2006-06-28 13:52:18 -07001257MODULE_AUTHOR("Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>");
1258MODULE_AUTHOR("Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>");
1259MODULE_DESCRIPTION("'cpufreq_ondemand' - A dynamic cpufreq governor for "
Dave Jones2b03f892009-01-18 01:43:44 -05001260 "Low Latency Frequency Transition capable processors");
Venkatesh Pallipadiffac80e2006-06-28 13:52:18 -07001261MODULE_LICENSE("GPL");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001262
Johannes Weiner69157192008-01-17 15:21:08 -08001263#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
1264fs_initcall(cpufreq_gov_dbs_init);
1265#else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001266module_init(cpufreq_gov_dbs_init);
Johannes Weiner69157192008-01-17 15:21:08 -08001267#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001268module_exit(cpufreq_gov_dbs_exit);